In [1], I propose a patch to generate vmovdqu for all vlddqu intrinsics
after AVX2, it's rejected as
> The instruction is reachable only as __builtin_ia32_lddqu* (aka
> _mm_lddqu_si*), so it was chosen by the programmer for a reason. I
> think that in this case, the compiler should not be too smart and
> change the instruction behind the programmer's back. The caveats are
> also explained at length in the ISA manual.

So the patch is more conservative, only optimize vlddqu + vinserti128
to vbroadcasti128.
vlddqu + vinserti128 will use shuffle port in addition to load port
comparing to vbroadcasti128, For latency perspective,vbroadcasti is no
worse than vlddqu + vinserti128.

[1] https://gcc.gnu.org/pipermail/gcc-patches/2023-July/625122.html

Bootstrapped and regtested on x86_64-linux-gnu{-m32,}.
Ok for trunk?

gcc/ChangeLog:

        * config/i386/sse.md (*avx2_lddqu_inserti_to_bcasti): New
        pre_reload define_insn_and_split.

gcc/testsuite/ChangeLog:

        * gcc.target/i386/vlddqu_vinserti128.c: New test.
---
 gcc/config/i386/sse.md                         | 18 ++++++++++++++++++
 .../gcc.target/i386/vlddqu_vinserti128.c       | 11 +++++++++++
 2 files changed, 29 insertions(+)
 create mode 100644 gcc/testsuite/gcc.target/i386/vlddqu_vinserti128.c

diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index 2d81347c7b6..4bdd2b43ba7 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -26600,6 +26600,24 @@ (define_insn "avx2_vbroadcasti128_<mode>"
    (set_attr "prefix" "vex,evex,evex")
    (set_attr "mode" "OI")])
 
+;; optimize vlddqu + vinserti128 to vbroadcasti128, the former will use
+;; extra shuffle port in addition to load port than the latter.
+;; For latency perspective,vbroadcasti is no worse.
+(define_insn_and_split "avx2_lddqu_inserti_to_bcasti"
+  [(set (match_operand:V4DI 0 "register_operand" "=x,v,v")
+       (vec_concat:V4DI
+         (subreg:V2DI
+           (unspec:V16QI [(match_operand:V16QI 1 "memory_operand")]
+                         UNSPEC_LDDQU) 0)
+         (subreg:V2DI (unspec:V16QI [(match_dup 1)]
+                         UNSPEC_LDDQU) 0)))]
+  "TARGET_AVX2 && ix86_pre_reload_split ()"
+  "#"
+  "&& 1"
+  [(set (match_dup 0)
+       (vec_concat:V4DI (match_dup 1) (match_dup 1)))]
+  "operands[1] = adjust_address (operands[1], V2DImode, 0);")
+
 ;; Modes handled by AVX vec_dup patterns.
 (define_mode_iterator AVX_VEC_DUP_MODE
   [V8SI V8SF V4DI V4DF])
diff --git a/gcc/testsuite/gcc.target/i386/vlddqu_vinserti128.c 
b/gcc/testsuite/gcc.target/i386/vlddqu_vinserti128.c
new file mode 100644
index 00000000000..29699a5fa7f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/vlddqu_vinserti128.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-mavx2 -O2" } */
+/* { dg-final { scan-assembler-times "vbroadcasti128" 1 } } */
+/* { dg-final { scan-assembler-not {(?n)vlddqu.*xmm} } } */
+
+#include <immintrin.h>
+__m256i foo(void *data) {
+    __m128i X1 = _mm_lddqu_si128((__m128i*)data);
+    __m256i V1 = _mm256_broadcastsi128_si256 (X1);
+    return V1;
+}
-- 
2.39.1.388.g2fc9e9ca3c

Reply via email to