Hi, The ashl/lshr/ashr expanders calls ix86_expand_binary_operator, while they will be called for some post-reload split, and TARGET_APX_NDD is required for these calls to avoid force-load to memory at postreload stage.
Bootstrapped/regtested on x86-64-pc-linux-gnu{-m32,} Ok for master? gcc/ChangeLog: PR target/112943 * config/i386/i386.md (ashl<mode>3): Add TARGET_APX_NDD to ix86_expand_binary_operator call. (<insn><mode>3): Likewise for rshift. (<insn>di3): Likewise for DImode rotate. (<insn><mode>3): Likewise for SWI124 rotate. gcc/testsuite/ChangeLog: PR target/112943 * gcc.target/i386/pr112943.c: New test. --- gcc/config/i386/i386.md | 12 +++-- gcc/testsuite/gcc.target/i386/pr112943.c | 63 ++++++++++++++++++++++++ 2 files changed, 71 insertions(+), 4 deletions(-) create mode 100644 gcc/testsuite/gcc.target/i386/pr112943.c diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md index b4db50f61cd..f83064ec335 100644 --- a/gcc/config/i386/i386.md +++ b/gcc/config/i386/i386.md @@ -14308,7 +14308,8 @@ (define_expand "ashl<mode>3" (ashift:SDWIM (match_operand:SDWIM 1 "<ashl_input_operand>") (match_operand:QI 2 "nonmemory_operand")))] "" - "ix86_expand_binary_operator (ASHIFT, <MODE>mode, operands); DONE;") + "ix86_expand_binary_operator (ASHIFT, <MODE>mode, operands, + TARGET_APX_NDD); DONE;") (define_insn_and_split "*ashl<dwi>3_doubleword_mask" [(set (match_operand:<DWI> 0 "register_operand") @@ -15564,7 +15565,8 @@ (define_expand "<insn><mode>3" (any_shiftrt:SDWIM (match_operand:SDWIM 1 "<shift_operand>") (match_operand:QI 2 "nonmemory_operand")))] "" - "ix86_expand_binary_operator (<CODE>, <MODE>mode, operands); DONE;") + "ix86_expand_binary_operator (<CODE>, <MODE>mode, operands, + TARGET_APX_NDD); DONE;") ;; Avoid useless masking of count operand. (define_insn_and_split "*<insn><mode>3_mask" @@ -16791,7 +16793,8 @@ (define_expand "<insn>di3" "" { if (TARGET_64BIT) - ix86_expand_binary_operator (<CODE>, DImode, operands); + ix86_expand_binary_operator (<CODE>, DImode, operands, + TARGET_APX_NDD); else if (const_1_to_31_operand (operands[2], VOIDmode)) emit_insn (gen_ix86_<insn>di3_doubleword (operands[0], operands[1], operands[2])); @@ -16811,7 +16814,8 @@ (define_expand "<insn><mode>3" (any_rotate:SWIM124 (match_operand:SWIM124 1 "nonimmediate_operand") (match_operand:QI 2 "nonmemory_operand")))] "" - "ix86_expand_binary_operator (<CODE>, <MODE>mode, operands); DONE;") + "ix86_expand_binary_operator (<CODE>, <MODE>mode, operands, + TARGET_APX_NDD); DONE;") ;; Avoid useless masking of count operand. (define_insn_and_split "*<insn><mode>3_mask" diff --git a/gcc/testsuite/gcc.target/i386/pr112943.c b/gcc/testsuite/gcc.target/i386/pr112943.c new file mode 100644 index 00000000000..45da6cce5b7 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/pr112943.c @@ -0,0 +1,63 @@ +/* PR target/112943 */ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2 -march=westmere -mapxf" } */ + +typedef unsigned char __attribute__((__vector_size__(1))) v8u8; +typedef char __attribute__((__vector_size__(2))) v16u8; +typedef char __attribute__((__vector_size__(4))) v32u8; +typedef char __attribute__((__vector_size__(8))) v64u8; +typedef char __attribute__((__vector_size__(16))) v128u8; +typedef _Float16 __attribute__((__vector_size__(2))) v16f16; +typedef _Float16 __attribute__((__vector_size__(16))) v128f16; +typedef _Float64x __attribute__((__vector_size__(16))) v128f128; +typedef _Decimal64 d64; +char foo0_u8_0; +v8u8 foo0_v8u8_0; +__attribute__((__vector_size__(sizeof(char)))) char foo0_v8s8_0; +__attribute__((__vector_size__(sizeof(long)))) unsigned long v64u64_0; +_Float16 foo0_f16_0; +v128f16 foo0_v128f16_0; +double foo0_f64_0; +int foo0_f128_0, foo0_v32d32_0, foo0__0; +d64 foo0_d64_0; +v8u8 *foo0_ret; +unsigned __int128 foo0_u128_3; +v8u8 d; +void foo0() { + v64u64_0 -= foo0_u8_0; + v8u8 v8u8_1 = foo0_v8u8_0 % d; + v128f128 v128f128_1 = __builtin_convertvector(v64u64_0, v128f128); + __int128 u128_2 = (9223372036854775808 << 4) * foo0_u8_0; /* { dg-warning "integer constant is so large that it is unsigned" "so large" } */ + __int128 u128_r = u128_2 + foo0_u128_3 + foo0_f128_0 + (__int128)foo0_d64_0; + v16f16 v16f16_1 = __builtin_convertvector(foo0_v8s8_0, v16f16); + v128f16 v128f16_1 = 0 > foo0_v128f16_0; + v128u8 v128u8_r = (v128u8)v128f16_1 + (v128u8)v128f128_1; + v64u8 v64u8_r = ((union { + v128u8 a; + v64u8 b; + })v128u8_r) + .b + + (v64u8)v64u64_0; + v32u8 v32u8_r = ((union { + v64u8 a; + v32u8 b; + })v64u8_r) + .b + + (v32u8)foo0_v32d32_0; + v16u8 v16u8_r = ((union { + v32u8 a; + v16u8 b; + })v32u8_r) + .b + + (v16u8)v16f16_1; + v8u8 v8u8_r = ((union { + v16u8 a; + v8u8 b; + })v16u8_r) + .b + + foo0_v8u8_0 + v8u8_1 + foo0_v8s8_0; + long u64_r = u128_r + foo0_f64_0 + (unsigned long)foo0__0; + short u16_r = u64_r + foo0_f16_0; + char u8_r = u16_r + foo0_u8_0; + *foo0_ret = v8u8_r + u8_r; +} -- 2.31.1