https://gcc.gnu.org/g:372415181e4c6ab5bd1e32d60e7c2c96824e0cc8

commit r13-9504-g372415181e4c6ab5bd1e32d60e7c2c96824e0cc8
Author: Jin Ma <ji...@linux.alibaba.com>
Date:   Wed Apr 2 13:37:07 2025 -0600

    [PATCH v2] RISC-V: Fixbug for slli + addw + zext.w into sh[123]add + zext.w
    
    Assuming we have the following variables:
    
    unsigned long long a0, a1;
    unsigned int a2;
    
    For the expression:
    
    a0 = (a0 << 50) >> 49;  // slli a0, a0, 50 + srli a0, a0, 49
    a2 = a1 + a0;           // addw a2, a1, a0 + slli a2, a2, 32 + srli a2, a2, 
32
    
    In the optimization process of ZBA (combine pass), it would be optimized to:
    
    a2 = a0 << 1 + a1;      // sh1add a2, a0, a1 + zext.w a2, a2
    
    This is clearly incorrect, as it overlooks the fact that a0=a0&0x7ffe, 
meaning
    that the bits a0[32:14] are set to zero.
    
    gcc/ChangeLog:
    
            * config/riscv/bitmanip.md: The optimization can only be applied if
            the high bit of operands[3] is set to 1.
    
    gcc/testsuite/ChangeLog:
    
            * gcc.target/riscv/zba-shNadd-09.c: New test.
            * gcc.target/riscv/zba-shNadd-10.c: New test.
    
    (cherry picked from commit dd6ebc0a3473a830115995bdcaf8f797ebd085a3)

Diff:
---
 gcc/config/riscv/bitmanip.md                   |  4 +++-
 gcc/testsuite/gcc.target/riscv/zba-shNadd-09.c | 12 ++++++++++++
 gcc/testsuite/gcc.target/riscv/zba-shNadd-10.c | 21 +++++++++++++++++++++
 3 files changed, 36 insertions(+), 1 deletion(-)

diff --git a/gcc/config/riscv/bitmanip.md b/gcc/config/riscv/bitmanip.md
index 7aa591689ba8..92f4261ba1a3 100644
--- a/gcc/config/riscv/bitmanip.md
+++ b/gcc/config/riscv/bitmanip.md
@@ -80,7 +80,9 @@
                                                    (match_operand:DI 3 
"consecutive_bits_operand")) 0)
                                 (subreg:SI (match_operand:DI 4 
"register_operand") 0))))]
   "TARGET_64BIT && TARGET_ZBA
-   && riscv_shamt_matches_mask_p (INTVAL (operands[2]), INTVAL (operands[3]))"
+   && riscv_shamt_matches_mask_p (INTVAL (operands[2]), INTVAL (operands[3]))
+   /* Ensure the mask includes all the bits in SImode.  */
+   && ((INTVAL (operands[3]) & (HOST_WIDE_INT_1U << 31)) != 0)"
   [(set (match_dup 0) (plus:DI (ashift:DI (match_dup 1) (match_dup 2)) 
(match_dup 4)))
    (set (match_dup 0) (zero_extend:DI (subreg:SI (match_dup 0) 0)))])
 
diff --git a/gcc/testsuite/gcc.target/riscv/zba-shNadd-09.c 
b/gcc/testsuite/gcc.target/riscv/zba-shNadd-09.c
new file mode 100644
index 000000000000..303f3cbb8630
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zba-shNadd-09.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zba -mabi=lp64" } */
+/* { dg-skip-if "" { *-*-* } { "-O0" "-Og" } } */
+
+long long sub (unsigned long long a, unsigned long long b)
+{
+  b = (b << 50) >> 49;
+  unsigned int x = a + b;
+  return x;
+}
+
+/* { dg-final { scan-assembler-not {\msh1add} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zba-shNadd-10.c 
b/gcc/testsuite/gcc.target/riscv/zba-shNadd-10.c
new file mode 100644
index 000000000000..883cce271ca1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/zba-shNadd-10.c
@@ -0,0 +1,21 @@
+/* { dg-do run { target { rv64 } } } */
+/* { dg-options "-march=rv64gc_zba -mabi=lp64d -O2" } */
+
+struct {
+  unsigned a : 14;
+  unsigned b : 3;
+} c;
+
+unsigned long long d;
+void e (unsigned long long *f, long p2) { *f = p2; }
+signed g;
+long i;
+
+int main () {
+  c.b = 4;
+  i = -(-c.a - (3023282U + c.a + g));
+  e (&d, i);
+  if (d != 3023282)
+    __builtin_abort ();
+  __builtin_exit (0);
+}

Reply via email to