On Thu, Jun 27, 2019 at 8:17 AM Jakub Jelinek <ja...@redhat.com> wrote:
>
> Hi!
>
> In the last two alternatives of avx_vec_concat<mode>, we can allow memory
> source, which optimizes the following testcases from weird
>         vmovaps (%rdi), %xmm0
>         vmovaps %xmm0, %xmm0
> and similar to just the first instruction.  I went through all the
> gen_avx_vec_concat* users and all of them ensure the middle operand is a
> register by force_reg or constraint, rather than by testing the predicate
> of the instruction, so I believe the additional condition on the instruction
> is fine and we don't need some expander to fix up the case when middle
> operand is a MEM and last operand is not zero.
>
> Bootstrapped/regtested on x86_64-linux and i686-linux, ok for trunk?
>
> 2019-06-27  Jakub Jelinek  <ja...@redhat.com>
>
>         PR target/90991
>         * config/i386/sse.md (avx_vec_concat<mode>): Use nonimmediate_operand
>         instead of register_operand for operands[1], add m to its constraints
>         if operands[2] uses "C" constraint.  Ensure in condition that if
>         operands[2] is not 0, then operands[1] is not a MEM.  For last two
>         alternatives, use unaligned loads instead of aligned if operands[1] is
>         misaligned_operand.
>
>         * gcc.target/i386/avx2-pr90991-1.c: New test.
>         * gcc.target/i386/avx512dq-pr90991-2.c: New test.

OK with a nit below.

Thanks,
Uros.

>
> --- gcc/config/i386/sse.md.jj   2019-06-26 09:22:40.506567515 +0200
> +++ gcc/config/i386/sse.md      2019-06-26 09:59:15.271571330 +0200
> @@ -20743,9 +20743,10 @@ (define_insn "<avx2_avx512>_<shift_insn>
>  (define_insn "avx_vec_concat<mode>"
>    [(set (match_operand:V_256_512 0 "register_operand" "=x,v,x,Yv")
>         (vec_concat:V_256_512
> -         (match_operand:<ssehalfvecmode> 1 "register_operand" "x,v,x,v")
> +         (match_operand:<ssehalfvecmode> 1 "nonimmediate_operand" 
> "x,v,xm,vm")
>           (match_operand:<ssehalfvecmode> 2 "nonimm_or_0_operand" 
> "xm,vm,C,C")))]
> -  "TARGET_AVX"
> +  "TARGET_AVX && (operands[2] == CONST0_RTX (<ssehalfvecmode>mode)
> +                 || !MEM_P (operands[1]))"

Please put "&& (operands[2] ..." in a separate line.

>  {
>    switch (which_alternative)
>      {
> @@ -20771,27 +20772,63 @@ (define_insn "avx_vec_concat<mode>"
>        switch (get_attr_mode (insn))
>         {
>         case MODE_V16SF:
> -         return "vmovaps\t{%1, %t0|%t0, %1}";
> +         if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
> +           return "vmovups\t{%1, %t0|%t0, %1}";
> +         else
> +           return "vmovaps\t{%1, %t0|%t0, %1}";
>         case MODE_V8DF:
> -         return "vmovapd\t{%1, %t0|%t0, %1}";
> +         if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
> +           return "vmovupd\t{%1, %t0|%t0, %1}";
> +         else
> +           return "vmovapd\t{%1, %t0|%t0, %1}";
>         case MODE_V8SF:
> -         return "vmovaps\t{%1, %x0|%x0, %1}";
> +         if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
> +           return "vmovups\t{%1, %x0|%x0, %1}";
> +         else
> +           return "vmovaps\t{%1, %x0|%x0, %1}";
>         case MODE_V4DF:
> -         return "vmovapd\t{%1, %x0|%x0, %1}";
> +         if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
> +           return "vmovupd\t{%1, %x0|%x0, %1}";
> +         else
> +           return "vmovapd\t{%1, %x0|%x0, %1}";
>         case MODE_XI:
> -         if (which_alternative == 2)
> -           return "vmovdqa\t{%1, %t0|%t0, %1}";
> -         else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
> -           return "vmovdqa64\t{%1, %t0|%t0, %1}";
> +         if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
> +           {
> +             if (which_alternative == 2)
> +               return "vmovdqu\t{%1, %t0|%t0, %1}";
> +             else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
> +               return "vmovdqu64\t{%1, %t0|%t0, %1}";
> +             else
> +               return "vmovdqu32\t{%1, %t0|%t0, %1}";
> +           }
>           else
> -           return "vmovdqa32\t{%1, %t0|%t0, %1}";
> +           {
> +             if (which_alternative == 2)
> +               return "vmovdqa\t{%1, %t0|%t0, %1}";
> +             else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
> +               return "vmovdqa64\t{%1, %t0|%t0, %1}";
> +             else
> +               return "vmovdqa32\t{%1, %t0|%t0, %1}";
> +           }
>         case MODE_OI:
> -         if (which_alternative == 2)
> -           return "vmovdqa\t{%1, %x0|%x0, %1}";
> -         else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
> -           return "vmovdqa64\t{%1, %x0|%x0, %1}";
> +         if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
> +           {
> +             if (which_alternative == 2)
> +               return "vmovdqu\t{%1, %x0|%x0, %1}";
> +             else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
> +               return "vmovdqu64\t{%1, %x0|%x0, %1}";
> +             else
> +               return "vmovdqu32\t{%1, %x0|%x0, %1}";
> +           }
>           else
> -           return "vmovdqa32\t{%1, %x0|%x0, %1}";
> +           {
> +             if (which_alternative == 2)
> +               return "vmovdqa\t{%1, %x0|%x0, %1}";
> +             else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
> +               return "vmovdqa64\t{%1, %x0|%x0, %1}";
> +             else
> +               return "vmovdqa32\t{%1, %x0|%x0, %1}";
> +           }
>         default:
>           gcc_unreachable ();
>         }
> --- gcc/testsuite/gcc.target/i386/avx2-pr90991-1.c.jj   2019-06-26 
> 09:37:46.348539065 +0200
> +++ gcc/testsuite/gcc.target/i386/avx2-pr90991-1.c      2019-06-26 
> 09:42:57.343721166 +0200
> @@ -0,0 +1,50 @@
> +/* PR target/90991 */
> +/* { dg-do compile } */
> +/* { dg-options "-O2 -mavx2 -masm=att" } */
> +/* { dg-final { scan-assembler-times "vmovaps\[ \t]\+\\(\[^\n\r]*\\), %xmm0" 
> 1 } } */
> +/* { dg-final { scan-assembler-times "vmovapd\[ \t]\+\\(\[^\n\r]*\\), %xmm0" 
> 1 } } */
> +/* { dg-final { scan-assembler-times "vmovdqa\[ \t]\+\\(\[^\n\r]*\\), %xmm0" 
> 1 } } */
> +/* { dg-final { scan-assembler-times "vmovups\[ \t]\+\\(\[^\n\r]*\\), %xmm0" 
> 1 } } */
> +/* { dg-final { scan-assembler-times "vmovupd\[ \t]\+\\(\[^\n\r]*\\), %xmm0" 
> 1 } } */
> +/* { dg-final { scan-assembler-times "vmovdqu\[ \t]\+\\(\[^\n\r]*\\), %xmm0" 
> 1 } } */
> +/* { dg-final { scan-assembler-not "vmovaps\[^\n\r]*xmm0\[^\n\r]*xmm0" } } */
> +/* { dg-final { scan-assembler-not "vmovapd\[^\n\r]*xmm0\[^\n\r]*xmm0" } } */
> +/* { dg-final { scan-assembler-not "vmovdqa\[^\n\r]*xmm0\[^\n\r]*xmm0" } } */
> +
> +#include <x86intrin.h>
> +
> +__m256
> +f1 (void *a)
> +{
> +  return _mm256_insertf128_ps (_mm256_set1_ps (0.0f), _mm_load_ps (a), 0);
> +}
> +
> +__m256d
> +f2 (void *a)
> +{
> +  return _mm256_insertf128_pd (_mm256_set1_pd (0.0), _mm_load_pd (a), 0);
> +}
> +
> +__m256i
> +f3 (void *a)
> +{
> +  return _mm256_insertf128_si256 (_mm256_set1_epi32 (0), _mm_load_si128 (a), 
> 0);
> +}
> +
> +__m256
> +f4 (void *a)
> +{
> +  return _mm256_insertf128_ps (_mm256_set1_ps (0.0f), _mm_loadu_ps (a), 0);
> +}
> +
> +__m256d
> +f5 (void *a)
> +{
> +  return _mm256_insertf128_pd (_mm256_set1_pd (0.0), _mm_loadu_pd (a), 0);
> +}
> +
> +__m256i
> +f6 (void *a)
> +{
> +  return _mm256_insertf128_si256 (_mm256_set1_epi32 (0), _mm_loadu_si128 
> (a), 0);
> +}
> --- gcc/testsuite/gcc.target/i386/avx512dq-pr90991-2.c.jj       2019-06-26 
> 09:33:25.581578833 +0200
> +++ gcc/testsuite/gcc.target/i386/avx512dq-pr90991-2.c  2019-06-26 
> 10:02:31.511531237 +0200
> @@ -0,0 +1,47 @@
> +/* PR target/90991 */
> +/* { dg-do compile } */
> +/* { dg-options "-O2 -mavx512dq -masm=att -mtune=intel" } */
> +/* { dg-final { scan-assembler-times "vmovaps\[ \t]\+\\(\[^\n\r]*\\), %ymm0" 
> 1 } } */
> +/* { dg-final { scan-assembler-times "vmovapd\[ \t]\+\\(\[^\n\r]*\\), %ymm0" 
> 1 } } */
> +/* { dg-final { scan-assembler-times "vmovdqa\[ \t]\+\\(\[^\n\r]*\\), %ymm0" 
> 1 } } */
> +/* { dg-final { scan-assembler-times "vmovups\[ \t]\+\\(\[^\n\r]*\\), %ymm0" 
> 1 } } */
> +/* { dg-final { scan-assembler-times "vmovupd\[ \t]\+\\(\[^\n\r]*\\), %ymm0" 
> 1 } } */
> +/* { dg-final { scan-assembler-times "vmovdqu\[ \t]\+\\(\[^\n\r]*\\), %ymm0" 
> 1 } } */
> +
> +#include <x86intrin.h>
> +
> +__m512
> +f1 (void *a)
> +{
> +  return _mm512_insertf32x8 (_mm512_set1_ps (0.0f), _mm256_load_ps (a), 0);
> +}
> +
> +__m512d
> +f2 (void *a)
> +{
> +  return _mm512_insertf64x4 (_mm512_set1_pd (0.0), _mm256_load_pd (a), 0);
> +}
> +
> +__m512i
> +f3 (void *a)
> +{
> +  return _mm512_inserti32x8 (_mm512_set1_epi32 (0), _mm256_load_si256 (a), 
> 0);
> +}
> +
> +__m512
> +f4 (void *a)
> +{
> +  return _mm512_insertf32x8 (_mm512_set1_ps (0.0f), _mm256_loadu_ps (a), 0);
> +}
> +
> +__m512d
> +f5 (void *a)
> +{
> +  return _mm512_insertf64x4 (_mm512_set1_pd (0.0), _mm256_loadu_pd (a), 0);
> +}
> +
> +__m512i
> +f6 (void *a)
> +{
> +  return _mm512_inserti32x8 (_mm512_set1_epi32 (0), _mm256_loadu_si256 (a), 
> 0);
> +}
>
>         Jakub

Reply via email to