On Mon, Nov 13, 2023 at 4:45 PM Jakub Jelinek <ja...@redhat.com> wrote: > > On Mon, Nov 13, 2023 at 02:27:35PM +0800, Hongtao Liu wrote: > > > 1) if it isn't better to use separate alternative instead of > > > x86_evex_reg_mentioned_p, like in the patch below > > vblendps doesn't support gpr32 which is checked by x86_evex_reg_mentioned_p. > > we need to use xjm for operands[1], (I think we don't need to set > > attribute addr to gpr16 for alternative 0 since the alternative 1 is > > alway available and recog will match alternative1 when gpr32 is used) > > Ok, so like this then? I've incorporated the other two tests into the patch > as well. LGTM. > > 2023-11-13 Jakub Jelinek <ja...@redhat.com> > Hu, Lin1 <lin1...@intel.com> > > PR target/112435 > * config/i386/sse.md (avx512vl_shuf_<shuffletype>32x4_1<mask_name>, > <mask_codefor>avx512dq_shuf_<shuffletype>64x2_1<mask_name>): Add > alternative with just x instead of v constraints and xjm instead of > vm and use vblendps as optimization only with that alternative. > > * gcc.target/i386/avx512vl-pr112435-1.c: New test. > * gcc.target/i386/avx512vl-pr112435-2.c: New test. > * gcc.target/i386/avx512vl-pr112435-3.c: New test. > > --- gcc/config/i386/sse.md.jj 2023-11-11 08:52:20.377845673 +0100 > +++ gcc/config/i386/sse.md 2023-11-13 09:31:08.568935535 +0100 > @@ -19235,11 +19235,11 @@ (define_expand "avx512dq_shuf_<shufflety > }) > > (define_insn "<mask_codefor>avx512dq_shuf_<shuffletype>64x2_1<mask_name>" > - [(set (match_operand:VI8F_256 0 "register_operand" "=v") > + [(set (match_operand:VI8F_256 0 "register_operand" "=x,v") > (vec_select:VI8F_256 > (vec_concat:<ssedoublemode> > - (match_operand:VI8F_256 1 "register_operand" "v") > - (match_operand:VI8F_256 2 "nonimmediate_operand" "vm")) > + (match_operand:VI8F_256 1 "register_operand" "x,v") > + (match_operand:VI8F_256 2 "nonimmediate_operand" "xjm,vm")) > (parallel [(match_operand 3 "const_0_to_3_operand") > (match_operand 4 "const_0_to_3_operand") > (match_operand 5 "const_4_to_7_operand") > @@ -19254,7 +19254,7 @@ (define_insn "<mask_codefor>avx512dq_shu > mask = INTVAL (operands[3]) / 2; > mask |= (INTVAL (operands[5]) - 4) / 2 << 1; > operands[3] = GEN_INT (mask); > - if (INTVAL (operands[3]) == 2 && !<mask_applied>) > + if (INTVAL (operands[3]) == 2 && !<mask_applied> && which_alternative == 0) > return "vblendps\t{$240, %2, %1, %0|%0, %1, %2, 240}"; > return "vshuf<shuffletype>64x2\t{%3, %2, %1, > %0<mask_operand7>|%0<mask_operand7>, %1, %2, %3}"; > } > @@ -19386,11 +19386,11 @@ (define_expand "avx512vl_shuf_<shufflety > }) > > (define_insn "avx512vl_shuf_<shuffletype>32x4_1<mask_name>" > - [(set (match_operand:VI4F_256 0 "register_operand" "=v") > + [(set (match_operand:VI4F_256 0 "register_operand" "=x,v") > (vec_select:VI4F_256 > (vec_concat:<ssedoublemode> > - (match_operand:VI4F_256 1 "register_operand" "v") > - (match_operand:VI4F_256 2 "nonimmediate_operand" "vm")) > + (match_operand:VI4F_256 1 "register_operand" "x,v") > + (match_operand:VI4F_256 2 "nonimmediate_operand" "xjm,vm")) > (parallel [(match_operand 3 "const_0_to_7_operand") > (match_operand 4 "const_0_to_7_operand") > (match_operand 5 "const_0_to_7_operand") > @@ -19414,7 +19414,7 @@ (define_insn "avx512vl_shuf_<shuffletype > mask |= (INTVAL (operands[7]) - 8) / 4 << 1; > operands[3] = GEN_INT (mask); > > - if (INTVAL (operands[3]) == 2 && !<mask_applied>) > + if (INTVAL (operands[3]) == 2 && !<mask_applied> && which_alternative == 0) > return "vblendps\t{$240, %2, %1, %0|%0, %1, %2, 240}"; > > return "vshuf<shuffletype>32x4\t{%3, %2, %1, > %0<mask_operand11>|%0<mask_operand11>, %1, %2, %3}"; > --- gcc/testsuite/gcc.target/i386/avx512vl-pr112435-1.c.jj 2023-11-13 > 09:20:53.330643098 +0100 > +++ gcc/testsuite/gcc.target/i386/avx512vl-pr112435-1.c 2023-11-13 > 09:20:53.330643098 +0100 > @@ -0,0 +1,13 @@ > +/* PR target/112435 */ > +/* { dg-do assemble { target { avx512vl && { ! ia32 } } } } */ > +/* { dg-options "-mavx512vl -O2" } */ > + > +#include <x86intrin.h> > + > +__m256i > +foo (__m256i a, __m256i b) > +{ > + register __m256i c __asm__("ymm16") = a; > + asm ("" : "+v" (c)); > + return _mm256_shuffle_i32x4 (c, b, 2); > +} > --- gcc/testsuite/gcc.target/i386/avx512vl-pr112435-2.c.jj 2023-11-13 > 09:23:04.361788598 +0100 > +++ gcc/testsuite/gcc.target/i386/avx512vl-pr112435-2.c 2023-11-13 > 09:34:57.186699876 +0100 > @@ -0,0 +1,63 @@ > +/* PR target/112435 */ > +/* { dg-do assemble { target { avx512vl && { ! ia32 } } } } */ > +/* { dg-options "-mavx512vl -O2" } */ > + > +#include <x86intrin.h> > + > +/* vpermi128/vpermf128 */ > +__m256i > +perm0 (__m256i a, __m256i b) > +{ > + register __m256i c __asm__("ymm17") = a; > + asm ("":"+v" (c)); > + return _mm256_permute2x128_si256 (c, b, 50); > +} > + > +__m256i > +perm1 (__m256i a, __m256i b) > +{ > + register __m256i c __asm__("ymm17") = a; > + asm ("":"+v" (c)); > + return _mm256_permute2x128_si256 (c, b, 18); > +} > + > +__m256i > +perm2 (__m256i a, __m256i b) > +{ > + register __m256i c __asm__("ymm17") = a; > + asm ("":"+v" (c)); > + return _mm256_permute2x128_si256 (c, b, 48); > +} > + > +/* vshuf{i,f}{32x4,64x2} ymm .*/ > +__m256i > +shuff0 (__m256i a, __m256i b) > +{ > + register __m256i c __asm__("ymm17") = a; > + asm ("":"+v" (c)); > + return _mm256_shuffle_i32x4 (c, b, 2); > +} > + > +__m256 > +shuff1 (__m256 a, __m256 b) > +{ > + register __m256 c __asm__("ymm17") = a; > + asm ("":"+v" (c)); > + return _mm256_shuffle_f32x4 (c, b, 2); > +} > + > +__m256i > +shuff2 (__m256i a, __m256i b) > +{ > + register __m256i c __asm__("ymm17") = a; > + asm ("":"+v" (c)); > + return _mm256_shuffle_i64x2 (c, b, 2); > +} > + > +__m256d > +shuff3 (__m256d a, __m256d b) > +{ > + register __m256d c __asm__("ymm17") = a; > + asm ("":"+v" (c)); > + return _mm256_shuffle_f64x2 (c, b, 2); > +} > --- gcc/testsuite/gcc.target/i386/avx512vl-pr112435-3.c.jj 2023-11-13 > 09:24:52.518257838 +0100 > +++ gcc/testsuite/gcc.target/i386/avx512vl-pr112435-3.c 2023-11-13 > 09:26:20.761008930 +0100 > @@ -0,0 +1,78 @@ > +/* PR target/112435 */ > +/* { dg-do assemble { target { avx512vl && { ! ia32 } } } } */ > +/* { dg-options "-mavx512vl -O2" } */ > + > +#include <x86intrin.h> > + > +/* vpermf128 */ > +__m256 > +perm0 (__m256 a, __m256 b) > +{ > + register __m256 c __asm__("ymm17") =a; > + asm ("":"+v" (c)); > + return _mm256_permute2f128_ps (c, b, 50); > +} > + > +__m256 > +perm1 (__m256 a, __m256 b) > +{ > + register __m256 c __asm__("ymm17") =a; > + asm ("":"+v" (c)); > + return _mm256_permute2f128_ps (c, b, 18); > +} > + > +__m256 > +perm2 (__m256 a, __m256 b) > +{ > + register __m256 c __asm__("ymm17") =a; > + asm ("":"+v" (c)); > + return _mm256_permute2f128_ps (c, b, 48); > +} > + > +__m256i > +perm3 (__m256i a, __m256i b) > +{ > + register __m256i c __asm__("ymm17") =a; > + asm ("":"+v" (c)); > + return _mm256_permute2f128_si256 (c, b, 50); > +} > + > +__m256i > +perm4 (__m256i a, __m256i b) > +{ > + register __m256i c __asm__("ymm17") =a; > + asm ("":"+v" (c)); > + return _mm256_permute2f128_si256 (c, b, 18); > +} > + > +__m256i > +perm5 (__m256i a, __m256i b) > +{ > + register __m256i c __asm__("ymm17") =a; > + asm ("":"+v" (c)); > + return _mm256_permute2f128_si256 (c, b, 48); > +} > + > +__m256d > +perm6 (__m256d a, __m256d b) > +{ > + register __m256d c __asm__("ymm17") =a; > + asm ("":"+v" (c)); > + return _mm256_permute2f128_pd (c, b, 50); > +} > + > +__m256d > +perm7 (__m256d a, __m256d b) > +{ > + register __m256d c __asm__("ymm17") =a; > + asm ("":"+v" (c)); > + return _mm256_permute2f128_pd (c, b, 18); > +} > + > +__m256d > +perm8 (__m256d a, __m256d b) > +{ > + register __m256d c __asm__("ymm17") =a; > + asm ("":"+v" (c)); > + return _mm256_permute2f128_pd (c, b, 48); > +} > > Jakub >
-- BR, Hongtao