https://gcc.gnu.org/g:6c7c25c282642565b23da21cab7e313e78744ab5

commit r14-11472-g6c7c25c282642565b23da21cab7e313e78744ab5
Author: Jakub Jelinek <ja...@redhat.com>
Date:   Wed Mar 26 14:03:50 2025 +0100

    widening_mul: Fix up further r14-8680 widening mul issues [PR119417]
    
    The following testcase is miscompiled since r14-8680 PR113560 changes.
    I've already tried to fix some of the issues caused by that change in
    r14-8823 PR113759, but apparently didn't get it right.
    
    The problem is that the r14-8680 changes sometimes set *type_out to
    a narrower type than the *new_rhs_out actually has (because it will
    handle stuff like _1 = rhs1 & 0xffff; and imply from that HImode type_out.
    
    Now, if in convert_mult_to_widen or convert_plusminus_to_widen we actually
    get optab for the modes we've asked for (i.e. with from_mode and to_mode),
    everything works fine, if the operands don't have the expected types,
    they are converted to those (for INTEGER_CSTs with fold_convert,
    otherwise with build_and_insert_cast).
    On the following testcase on aarch64 that is not the case, we ask
    for from_mode HImode and to_mode DImode, but get actual_mode SImode.
    The mult_rhs1 operand already has SImode and we change type1 to unsigned int
    and so no cast is actually done, except that the & 0xffff is lost that way.
    
    The following patch ensures that if we change typeN because of wider
    actual_mode (or because of a sign change), we first cast to the old
    typeN (if the r14-8680 code was encountered, otherwise it would have the
    same precision) and only then change it, and then perhaps cast again.
    
    On the testcase on aarch64-linux the patch results in the expected
    -       add     x19, x19, w0, uxtw 1
    +       add     x19, x19, w0, uxth 1
    difference.
    
    2025-03-26  Jakub Jelinek  <ja...@redhat.com>
    
            PR tree-optimization/119417
            * tree-ssa-math-opts.cc (convert_mult_to_widen): Before changing
            typeN because actual_precision/from_unsignedN differs cast rhsN
            to typeN if it has a different type.
            (convert_plusminus_to_widen): Before changing
            typeN because actual_precision/from_unsignedN differs cast mult_rhsN
            to typeN if it has a different type.
    
            * gcc.dg/torture/pr119417.c: New test.
    
    (cherry picked from commit 02132faf4e2fb604758aa86f0b097e6871be595a)

Diff:
---
 gcc/testsuite/gcc.dg/torture/pr119417.c | 24 +++++++++++++++++
 gcc/tree-ssa-math-opts.cc               | 48 ++++++++++++++++++++++++++++++---
 2 files changed, 68 insertions(+), 4 deletions(-)

diff --git a/gcc/testsuite/gcc.dg/torture/pr119417.c 
b/gcc/testsuite/gcc.dg/torture/pr119417.c
new file mode 100644
index 000000000000..d0b5378ceb42
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr119417.c
@@ -0,0 +1,24 @@
+/* PR tree-optimization/119417 */
+/* { dg-do run { target int32 } } */
+
+__attribute__((noipa)) void
+foo (unsigned long long x)
+{
+  if (x != 0)
+    __builtin_abort ();
+}
+
+unsigned v = 0x10000;
+
+int
+main ()
+{
+  unsigned long long a = 0;
+  while (1)
+    {
+      a = a + ((v & 0xFFFF) * 2);
+      foo (a);
+      if (v)
+       break;
+    }
+}
diff --git a/gcc/tree-ssa-math-opts.cc b/gcc/tree-ssa-math-opts.cc
index 705f4a4695ac..60cc0c2a3eb2 100644
--- a/gcc/tree-ssa-math-opts.cc
+++ b/gcc/tree-ssa-math-opts.cc
@@ -2866,7 +2866,17 @@ convert_mult_to_widen (gimple *stmt, 
gimple_stmt_iterator *gsi)
     return false;
   if (actual_precision != TYPE_PRECISION (type1)
       || from_unsigned1 != TYPE_UNSIGNED (type1))
-    type1 = build_nonstandard_integer_type (actual_precision, from_unsigned1);
+    {
+      if (!useless_type_conversion_p (type1, TREE_TYPE (rhs1)))
+       {
+         if (TREE_CODE (rhs1) == INTEGER_CST)
+           rhs1 = fold_convert (type1, rhs1);
+         else
+           rhs1 = build_and_insert_cast (gsi, loc, type1, rhs1);
+       }
+      type1 = build_nonstandard_integer_type (actual_precision,
+                                             from_unsigned1);
+    }
   if (!useless_type_conversion_p (type1, TREE_TYPE (rhs1)))
     {
       if (TREE_CODE (rhs1) == INTEGER_CST)
@@ -2876,7 +2886,17 @@ convert_mult_to_widen (gimple *stmt, 
gimple_stmt_iterator *gsi)
     }
   if (actual_precision != TYPE_PRECISION (type2)
       || from_unsigned2 != TYPE_UNSIGNED (type2))
-    type2 = build_nonstandard_integer_type (actual_precision, from_unsigned2);
+    {
+      if (!useless_type_conversion_p (type2, TREE_TYPE (rhs2)))
+       {
+         if (TREE_CODE (rhs2) == INTEGER_CST)
+           rhs2 = fold_convert (type2, rhs2);
+         else
+           rhs2 = build_and_insert_cast (gsi, loc, type2, rhs2);
+       }
+      type2 = build_nonstandard_integer_type (actual_precision,
+                                             from_unsigned2);
+    }
   if (!useless_type_conversion_p (type2, TREE_TYPE (rhs2)))
     {
       if (TREE_CODE (rhs2) == INTEGER_CST)
@@ -3087,7 +3107,17 @@ convert_plusminus_to_widen (gimple_stmt_iterator *gsi, 
gimple *stmt,
   actual_precision = GET_MODE_PRECISION (actual_mode);
   if (actual_precision != TYPE_PRECISION (type1)
       || from_unsigned1 != TYPE_UNSIGNED (type1))
-    type1 = build_nonstandard_integer_type (actual_precision, from_unsigned1);
+    {
+      if (!useless_type_conversion_p (type1, TREE_TYPE (mult_rhs1)))
+       {
+         if (TREE_CODE (mult_rhs1) == INTEGER_CST)
+           mult_rhs1 = fold_convert (type1, mult_rhs1);
+         else
+           mult_rhs1 = build_and_insert_cast (gsi, loc, type1, mult_rhs1);
+       }
+      type1 = build_nonstandard_integer_type (actual_precision,
+                                             from_unsigned1);
+    }
   if (!useless_type_conversion_p (type1, TREE_TYPE (mult_rhs1)))
     {
       if (TREE_CODE (mult_rhs1) == INTEGER_CST)
@@ -3097,7 +3127,17 @@ convert_plusminus_to_widen (gimple_stmt_iterator *gsi, 
gimple *stmt,
     }
   if (actual_precision != TYPE_PRECISION (type2)
       || from_unsigned2 != TYPE_UNSIGNED (type2))
-    type2 = build_nonstandard_integer_type (actual_precision, from_unsigned2);
+    {
+      if (!useless_type_conversion_p (type2, TREE_TYPE (mult_rhs2)))
+       {
+         if (TREE_CODE (mult_rhs2) == INTEGER_CST)
+           mult_rhs2 = fold_convert (type2, mult_rhs2);
+         else
+           mult_rhs2 = build_and_insert_cast (gsi, loc, type2, mult_rhs2);
+       }
+      type2 = build_nonstandard_integer_type (actual_precision,
+                                             from_unsigned2);
+    }
   if (!useless_type_conversion_p (type2, TREE_TYPE (mult_rhs2)))
     {
       if (TREE_CODE (mult_rhs2) == INTEGER_CST)

Reply via email to