Jonathan Wright <jonathan.wri...@arm.com> writes: > Hi, > > As subject, this patch adds tests to confirm that a *2 (write to high-half) > Neon instruction is generated from vcombine* of a narrowing intrinsic > sequence. > > Ok for master?
OK, thanks. Richard > Thanks, > Jonathan > > --- > > gcc/testsuite/ChangeLog: > > 2021-05-14 Jonathan Wright <jonathan.wri...@arm.com> > > * gcc.target/aarch64/narrow_high_combine.c: New test. > > diff --git a/gcc/testsuite/gcc.target/aarch64/narrow_high_combine.c > b/gcc/testsuite/gcc.target/aarch64/narrow_high_combine.c > new file mode 100644 > index > 0000000000000000000000000000000000000000..cf649bda28d4d648c9392d202fcc5660107a11d7 > --- /dev/null > +++ b/gcc/testsuite/gcc.target/aarch64/narrow_high_combine.c > @@ -0,0 +1,125 @@ > +/* { dg-skip-if "" { arm*-*-* } } */ > +/* { dg-do compile } */ > +/* { dg-options "-O3" } */ > + > +#include <arm_neon.h> > + > +#define TEST_ARITH(name, rettype, rmwtype, intype, fs, rs) \ > + rettype test_ ## name ## _ ## fs ## _high_combine \ > + (rmwtype a, intype b, intype c) \ > + { \ > + return vcombine_ ## rs (a, name ## _ ## fs (b, c)); \ > + } > + > +TEST_ARITH (vaddhn, int8x16_t, int8x8_t, int16x8_t, s16, s8) > +TEST_ARITH (vaddhn, int16x8_t, int16x4_t, int32x4_t, s32, s16) > +TEST_ARITH (vaddhn, int32x4_t, int32x2_t, int64x2_t, s64, s32) > +TEST_ARITH (vaddhn, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8) > +TEST_ARITH (vaddhn, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16) > +TEST_ARITH (vaddhn, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32) > + > +TEST_ARITH (vraddhn, int8x16_t, int8x8_t, int16x8_t, s16, s8) > +TEST_ARITH (vraddhn, int16x8_t, int16x4_t, int32x4_t, s32, s16) > +TEST_ARITH (vraddhn, int32x4_t, int32x2_t, int64x2_t, s64, s32) > +TEST_ARITH (vraddhn, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8) > +TEST_ARITH (vraddhn, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16) > +TEST_ARITH (vraddhn, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32) > + > +TEST_ARITH (vsubhn, int8x16_t, int8x8_t, int16x8_t, s16, s8) > +TEST_ARITH (vsubhn, int16x8_t, int16x4_t, int32x4_t, s32, s16) > +TEST_ARITH (vsubhn, int32x4_t, int32x2_t, int64x2_t, s64, s32) > +TEST_ARITH (vsubhn, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8) > +TEST_ARITH (vsubhn, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16) > +TEST_ARITH (vsubhn, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32) > + > +TEST_ARITH (vrsubhn, int8x16_t, int8x8_t, int16x8_t, s16, s8) > +TEST_ARITH (vrsubhn, int16x8_t, int16x4_t, int32x4_t, s32, s16) > +TEST_ARITH (vrsubhn, int32x4_t, int32x2_t, int64x2_t, s64, s32) > +TEST_ARITH (vrsubhn, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8) > +TEST_ARITH (vrsubhn, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16) > +TEST_ARITH (vrsubhn, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32) > + > +#define TEST_SHIFT(name, rettype, rmwtype, intype, fs, rs) \ > + rettype test_ ## name ## _ ## fs ## _high_combine \ > + (rmwtype a, intype b) \ > + { \ > + return vcombine_ ## rs (a, name ## _ ## fs (b, 4)); \ > + } > + > +TEST_SHIFT (vshrn_n, int8x16_t, int8x8_t, int16x8_t, s16, s8) > +TEST_SHIFT (vshrn_n, int16x8_t, int16x4_t, int32x4_t, s32, s16) > +TEST_SHIFT (vshrn_n, int32x4_t, int32x2_t, int64x2_t, s64, s32) > +TEST_SHIFT (vshrn_n, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8) > +TEST_SHIFT (vshrn_n, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16) > +TEST_SHIFT (vshrn_n, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32) > + > +TEST_SHIFT (vrshrn_n, int8x16_t, int8x8_t, int16x8_t, s16, s8) > +TEST_SHIFT (vrshrn_n, int16x8_t, int16x4_t, int32x4_t, s32, s16) > +TEST_SHIFT (vrshrn_n, int32x4_t, int32x2_t, int64x2_t, s64, s32) > +TEST_SHIFT (vrshrn_n, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8) > +TEST_SHIFT (vrshrn_n, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16) > +TEST_SHIFT (vrshrn_n, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32) > + > +TEST_SHIFT (vqshrn_n, int8x16_t, int8x8_t, int16x8_t, s16, s8) > +TEST_SHIFT (vqshrn_n, int16x8_t, int16x4_t, int32x4_t, s32, s16) > +TEST_SHIFT (vqshrn_n, int32x4_t, int32x2_t, int64x2_t, s64, s32) > +TEST_SHIFT (vqshrn_n, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8) > +TEST_SHIFT (vqshrn_n, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16) > +TEST_SHIFT (vqshrn_n, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32) > + > +TEST_SHIFT (vqrshrn_n, int8x16_t, int8x8_t, int16x8_t, s16, s8) > +TEST_SHIFT (vqrshrn_n, int16x8_t, int16x4_t, int32x4_t, s32, s16) > +TEST_SHIFT (vqrshrn_n, int32x4_t, int32x2_t, int64x2_t, s64, s32) > +TEST_SHIFT (vqrshrn_n, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8) > +TEST_SHIFT (vqrshrn_n, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16) > +TEST_SHIFT (vqrshrn_n, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32) > + > +TEST_SHIFT (vqshrun_n, uint8x16_t, uint8x8_t, int16x8_t, s16, u8) > +TEST_SHIFT (vqshrun_n, uint16x8_t, uint16x4_t, int32x4_t, s32, u16) > +TEST_SHIFT (vqshrun_n, uint32x4_t, uint32x2_t, int64x2_t, s64, u32) > + > +TEST_SHIFT (vqrshrun_n, uint8x16_t, uint8x8_t, int16x8_t, s16, u8) > +TEST_SHIFT (vqrshrun_n, uint16x8_t, uint16x4_t, int32x4_t, s32, u16) > +TEST_SHIFT (vqrshrun_n, uint32x4_t, uint32x2_t, int64x2_t, s64, u32) > + > +#define TEST_UNARY(name, rettype, rmwtype, intype, fs, rs) \ > + rettype test_ ## name ## _ ## fs ## _high_combine \ > + (rmwtype a, intype b) \ > + { \ > + return vcombine_ ## rs (a, name ## _ ## fs (b)); \ > + } > + > +TEST_UNARY (vmovn, int8x16_t, int8x8_t, int16x8_t, s16, s8) > +TEST_UNARY (vmovn, int16x8_t, int16x4_t, int32x4_t, s32, s16) > +TEST_UNARY (vmovn, int32x4_t, int32x2_t, int64x2_t, s64, s32) > +TEST_UNARY (vmovn, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8) > +TEST_UNARY (vmovn, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16) > +TEST_UNARY (vmovn, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32) > + > +TEST_UNARY (vqmovn, int8x16_t, int8x8_t, int16x8_t, s16, s8) > +TEST_UNARY (vqmovn, int16x8_t, int16x4_t, int32x4_t, s32, s16) > +TEST_UNARY (vqmovn, int32x4_t, int32x2_t, int64x2_t, s64, s32) > +TEST_UNARY (vqmovn, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8) > +TEST_UNARY (vqmovn, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16) > +TEST_UNARY (vqmovn, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32) > + > +TEST_UNARY (vqmovun, uint8x16_t, uint8x8_t, int16x8_t, s16, u8) > +TEST_UNARY (vqmovun, uint16x8_t, uint16x4_t, int32x4_t, s32, u16) > +TEST_UNARY (vqmovun, uint32x4_t, uint32x2_t, int64x2_t, s64, u32) > + > +/* { dg-final { scan-assembler-times "\\taddhn2\\tv" 6} } */ > +/* { dg-final { scan-assembler-times "\\tsubhn2\\tv" 6} } */ > +/* { dg-final { scan-assembler-times "\\trsubhn2\\tv" 6} } */ > +/* { dg-final { scan-assembler-times "\\traddhn2\\tv" 6} } */ > +/* { dg-final { scan-assembler-times "\\trshrn2\\tv" 6} } */ > +/* { dg-final { scan-assembler-times "\\tshrn2\\tv" 6} } */ > +/* { dg-final { scan-assembler-times "\\tsqshrun2\\tv" 3} } */ > +/* { dg-final { scan-assembler-times "\\tsqrshrun2\\tv" 3} } */ > +/* { dg-final { scan-assembler-times "\\tsqshrn2\\tv" 3} } */ > +/* { dg-final { scan-assembler-times "\\tuqshrn2\\tv" 3} } */ > +/* { dg-final { scan-assembler-times "\\tsqrshrn2\\tv" 3} } */ > +/* { dg-final { scan-assembler-times "\\tuqrshrn2\\tv" 3} } */ > +/* { dg-final { scan-assembler-times "\\txtn2\\tv" 6} } */ > +/* { dg-final { scan-assembler-times "\\tuqxtn2\\tv" 3} } */ > +/* { dg-final { scan-assembler-times "\\tsqxtn2\\tv" 3} } */ > +/* { dg-final { scan-assembler-times "\\tsqxtun2\\tv" 3} } */