On Tue, Oct 18, 2022 at 5:18 PM Haochen Jiang via Gcc-patches <gcc-patches@gcc.gnu.org> wrote: > > Hi all, > > We would like to add one more patch to enhance the codegen with avxvnniint8. > Also renamed two awkward named mode_attr to make them more aligned with > others. > > Regtested on x86_64-pc-linux-gnu. Ok for trunk? The patch LGTM, but please commit after [1] is checked in.
[1] https://gcc.gnu.org/pipermail/gcc-patches/2022-October/603782.html > > BRs, > Haochen > > gcc/ChangeLog: > > * config/i386/sse.md (ssedvecmode): Rename from VI1SI. > (ssedvecmodelower): Rename from vi1si. > (sdot_prod<mode>): New define_expand. > (udot_prod<mode>): Ditto. > > gcc/testsuite/ChangeLog: > > * gcc.target/i386/vnniint8-auto-vectorize-1.c: New test. > * gcc.target/i386/vnniint8-auto-vectorize-2.c: Ditto. > --- > gcc/config/i386/sse.md | 61 ++++++++++++--- > .../i386/vnniint8-auto-vectorize-1.c | 28 +++++++ > .../i386/vnniint8-auto-vectorize-2.c | 75 +++++++++++++++++++ > 3 files changed, 153 insertions(+), 11 deletions(-) > create mode 100644 gcc/testsuite/gcc.target/i386/vnniint8-auto-vectorize-1.c > create mode 100644 gcc/testsuite/gcc.target/i386/vnniint8-auto-vectorize-2.c > > diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md > index 29cf6fa090b..fc17b5193dc 100644 > --- a/gcc/config/i386/sse.md > +++ b/gcc/config/i386/sse.md > @@ -1043,6 +1043,13 @@ > (V16HI "v16hi") (V8HI "v8hi") > (V32QI "v32qi") (V16QI "v16qi")]) > > +;; Mapping of vector modes to an V*SImode of the same size > +(define_mode_attr ssedvecmode > + [(V64QI "V16SI") (V32QI "V8SI") (V16QI "V4SI")]) > + > +(define_mode_attr ssedvecmodelower > + [(V64QI "v16si") (V32QI "v8si") (V16QI "v4si")]) > + > ;; Mapping of vector modes to a vector mode of double size > (define_mode_attr ssedoublevecmode > [(V64QI "V128QI") (V32HI "V64HI") (V16SI "V32SI") (V8DI "V16DI") > @@ -28523,29 +28530,23 @@ > [(set_attr ("prefix") ("evex")) > (set_attr "mode" "<sseinsnmode>")]) > > -(define_mode_attr VI1SI > - [(V64QI "V16SI") (V32QI "V8SI") (V16QI "V4SI")]) > - > -(define_mode_attr vi1si > - [(V64QI "v16si") (V32QI "v8si") (V16QI "v4si")]) > - > (define_expand "usdot_prod<mode>" > - [(match_operand:<VI1SI> 0 "register_operand") > + [(match_operand:<ssedvecmode> 0 "register_operand") > (match_operand:VI1_AVX512VNNI 1 "register_operand") > (match_operand:VI1_AVX512VNNI 2 "register_operand") > - (match_operand:<VI1SI> 3 "register_operand")] > + (match_operand:<ssedvecmode> 3 "register_operand")] > "(<MODE_SIZE> == 64 > ||((TARGET_AVX512VNNI && TARGET_AVX512VL) > || TARGET_AVXVNNI))" > { > - operands[1] = lowpart_subreg (<VI1SI>mode, > + operands[1] = lowpart_subreg (<ssedvecmode>mode, > force_reg (<MODE>mode, operands[1]), > <MODE>mode); > - operands[2] = lowpart_subreg (<VI1SI>mode, > + operands[2] = lowpart_subreg (<ssedvecmode>mode, > force_reg (<MODE>mode, operands[2]), > <MODE>mode); > emit_insn (gen_rtx_SET (operands[0], operands[3])); > - emit_insn (gen_vpdpbusd_<vi1si> (operands[0], operands[3], > + emit_insn (gen_vpdpbusd_<ssedvecmodelower> (operands[0], operands[3], > operands[1], operands[2])); > DONE; > }) > @@ -29358,6 +29359,44 @@ > (UNSPEC_VPDPBSUD "bsud") (UNSPEC_VPDPBSUDS "bsuds") > (UNSPEC_VPDPBUUD "buud") (UNSPEC_VPDPBUUDS "buuds")]) > > +(define_expand "sdot_prod<mode>" > + [(match_operand:<ssedvecmode> 0 "register_operand") > + (match_operand:VI1 1 "register_operand") > + (match_operand:VI1 2 "register_operand") > + (match_operand:<ssedvecmode> 3 "register_operand")] > + "TARGET_AVXVNNIINT8" > +{ > + operands[1] = lowpart_subreg (<ssedvecmode>mode, > + force_reg (<MODE>mode, operands[1]), > + <MODE>mode); > + operands[2] = lowpart_subreg (<ssedvecmode>mode, > + force_reg (<MODE>mode, operands[2]), > + <MODE>mode); > + emit_insn (gen_rtx_SET (operands[0], operands[3])); > + emit_insn (gen_vpdpbssd_<ssedvecmodelower> (operands[0], operands[3], > + operands[1], operands[2])); > + DONE; > +}) > + > +(define_expand "udot_prod<mode>" > + [(match_operand:<ssedvecmode> 0 "register_operand") > + (match_operand:VI1 1 "register_operand") > + (match_operand:VI1 2 "register_operand") > + (match_operand:<ssedvecmode> 3 "register_operand")] > + "TARGET_AVXVNNIINT8" > +{ > + operands[1] = lowpart_subreg (<ssedvecmode>mode, > + force_reg (<MODE>mode, operands[1]), > + <MODE>mode); > + operands[2] = lowpart_subreg (<ssedvecmode>mode, > + force_reg (<MODE>mode, operands[2]), > + <MODE>mode); > + emit_insn (gen_rtx_SET (operands[0], operands[3])); > + emit_insn (gen_vpdpbuud_<ssedvecmodelower> (operands[0], operands[3], > + operands[1], operands[2])); > + DONE; > +}) > + > (define_insn "vpdp<vpdotprodtype>_<mode>" > [(set (match_operand:VI4_AVX 0 "register_operand" "=x") > (unspec:VI4_AVX > diff --git a/gcc/testsuite/gcc.target/i386/vnniint8-auto-vectorize-1.c > b/gcc/testsuite/gcc.target/i386/vnniint8-auto-vectorize-1.c > new file mode 100644 > index 00000000000..9cadab6a845 > --- /dev/null > +++ b/gcc/testsuite/gcc.target/i386/vnniint8-auto-vectorize-1.c > @@ -0,0 +1,28 @@ > +/* { dg-do compile } */ > +/* { dg-options "-mavxvnniint8 -O2" } */ > +/* { dg-final { scan-assembler "vpdpbssd\t" } } */ > +/* { dg-final { scan-assembler "vpdpbuud\t" } } */ > + > +int __attribute__((noinline, noclone, optimize("tree-vectorize"))) > +sdot_prod_qi (char * restrict a, char * restrict b, > + int c, int n) > +{ > + int i; > + for (i = 0; i < n; i++) > + { > + c += ((int) a[i] * (int) b[i]); > + } > + return c; > +} > + > +int __attribute__((noinline, noclone, optimize("tree-vectorize"))) > +udot_prod_qi (unsigned char * restrict a, unsigned char *restrict b, > + int c, int n) > +{ > + int i; > + for (i = 0; i < n; i++) > + { > + c += ((int) a[i] * (int) b[i]); > + } > + return c; > +} > diff --git a/gcc/testsuite/gcc.target/i386/vnniint8-auto-vectorize-2.c > b/gcc/testsuite/gcc.target/i386/vnniint8-auto-vectorize-2.c > new file mode 100644 > index 00000000000..99853e6c3b7 > --- /dev/null > +++ b/gcc/testsuite/gcc.target/i386/vnniint8-auto-vectorize-2.c > @@ -0,0 +1,75 @@ > +/* { dg-do run } */ > +/* { dg-options "-O2 -mavxvnniint8" } */ > +/* { dg-require-effective-target avxvnniint8 } */ > + > +#define AVXVNNIINT8 > +#ifndef CHECK > +#define CHECK "avx-check.h" > +#endif > + > +#ifndef TEST > +#define TEST avx_test > +#endif > + > +#include CHECK > +#include "vnniint8-auto-vectorize-1.c" > + > +#define N 256 > +char a_i8[N], b_i8[N]; > +unsigned char c_u8[N], d_u8[N]; > +int i8_exp, i8_ref; > + > +int __attribute__((noipa, optimize("no-tree-vectorize"))) > +sdot_prod_qi_scalar (char * restrict a, char * restrict b, > + int c, int n) > +{ > + int i; > + for (i = 0; i < n; i++) > + { > + c += ((int) a[i] * (int) b[i]); > + } > + return c; > +} > + > +int __attribute__((noipa, optimize("no-tree-vectorize"))) > +udot_prod_qi_scalar (unsigned char * restrict a, unsigned char *restrict b, > + int c, int n) > +{ > + int i; > + for (i = 0; i < n; i++) > + { > + c += ((int) a[i] * (int) b[i]); > + } > + return c; > +} > + > +void init () > +{ > + int i; > + > + i8_exp = i8_ref = 127; > + > + for (i = 0; i < N; i++) > + { > + a_i8[i] = (-i + 4) % 128; > + b_i8[i] = (i + 1) % 128; > + c_u8[i] = (i + 3) % 256; > + d_u8[i] = (i + 5) % 256; > + } > +} > + > +void > +TEST (void) > +{ > + init (); > + i8_exp = sdot_prod_qi (a_i8, b_i8, i8_exp, N); > + i8_ref = sdot_prod_qi_scalar (a_i8, b_i8, i8_ref, N); > + if (i8_exp != i8_ref) > + abort (); > + > + init (); > + i8_exp = udot_prod_qi (c_u8, d_u8, i8_exp, N); > + i8_ref = udot_prod_qi_scalar (c_u8, d_u8, i8_ref, N); > + if (i8_exp != i8_ref) > + abort (); > +} > -- > 2.18.1 > -- BR, Hongtao