commit:     7b1b42ebd4b59fadb62692e1821d4a14123c85e7
Author:     Sam James <sam <AT> gentoo <DOT> org>
AuthorDate: Thu Jan 16 23:09:29 2025 +0000
Commit:     Sam James <sam <AT> gentoo <DOT> org>
CommitDate: Thu Jan 16 23:10:59 2025 +0000
URL:        https://gitweb.gentoo.org/proj/gcc-patches.git/commit/?id=7b1b42eb

15.0.0: drop patches merged upstream

This reverts commit e42dbea5f6e2a59e7a2aafe1f406ddf697a2658e.

Signed-off-by: Sam James <sam <AT> gentoo.org>

 15.0.0/gentoo/82_all_PR118409-ifcombine.patch      | 154 ---------
 ...xtend-constants-to-compare-with-bitfields.patch | 214 -------------
 ...PR118456-robustify-decode_field_reference.patch | 354 ---------------------
 ...-Fix-build-for-STORE_FLAG_VALUE-0-targets.patch |  87 -----
 15.0.0/gentoo/README.history                       |   6 +
 5 files changed, 6 insertions(+), 809 deletions(-)

diff --git a/15.0.0/gentoo/82_all_PR118409-ifcombine.patch 
b/15.0.0/gentoo/82_all_PR118409-ifcombine.patch
deleted file mode 100644
index 6362cd1..0000000
--- a/15.0.0/gentoo/82_all_PR118409-ifcombine.patch
+++ /dev/null
@@ -1,154 +0,0 @@
-https://inbox.sourceware.org/gcc-patches/ored17gz9x....@lxoliva.fsfla.org/
-
-From bfb791ad941348a0bb854b770f2294424528bc40 Mon Sep 17 00:00:00 2001
-Message-ID: 
<bfb791ad941348a0bb854b770f2294424528bc40.1736739564.git....@gentoo.org>
-From: Alexandre Oliva <ol...@adacore.com>
-Date: Mon, 13 Jan 2025 00:37:14 -0300
-Subject: [PATCH] propagate signbit mask to XOR right-hand operand
-
-If a single-bit bitfield takes up the sign bit of a storage unit,
-comparing the corresponding bitfield between two objects loads the
-storage units, XORs them, converts the result to signed char, and
-compares it with zero: ((signed char)(a.<byte> ^ c.<byte>) >= 0).
-
-fold_truth_andor_for_ifcombine recognizes the compare with zero as a
-sign bit test, then it decomposes the XOR into an equality test.
-
-The problem is that, after this decomposition, that figures out the
-width of the accessed fields, we apply the sign bit mask to the
-left-hand operand of the compare, but we failed to also apply it to
-the right-hand operand when both were taken from the same XOR.
-
-This patch fixes that.
-
-Regstrapped on x86_64-linux-gnu.  Ok to install?
-
-for  gcc/ChangeLog
-
-       PR tree-optimization/118409
-       * gimple-fold.cc (fold_truth_andor_for_ifcombine): Apply the
-       signbit mask to the right-hand XOR operand too.
-
-for  gcc/testsuite/ChangeLog
-
-PR tree-optimization/118409
-       * gcc.dg/field-merge-20.c: New.
----
- gcc/gimple-fold.cc                    | 20 +++++++++
- gcc/testsuite/gcc.dg/field-merge-20.c | 64 +++++++++++++++++++++++++++
- 2 files changed, 84 insertions(+)
- create mode 100644 gcc/testsuite/gcc.dg/field-merge-20.c
-
-diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc
-index a3987c4590ae..93ed8b3abb05 100644
---- a/gcc/gimple-fold.cc
-+++ b/gcc/gimple-fold.cc
-@@ -8270,6 +8270,16 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
-       ll_and_mask = sign;
-       else
-       ll_and_mask &= sign;
-+      if (l_xor)
-+      {
-+        if (!lr_and_mask.get_precision ())
-+          lr_and_mask = sign;
-+        else
-+          lr_and_mask &= sign;
-+        if (l_const.get_precision ())
-+          l_const &= wide_int::from (lr_and_mask,
-+                                     l_const.get_precision (), UNSIGNED);
-+      }
-     }
- 
-   if (rsignbit)
-@@ -8279,6 +8289,16 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
-       rl_and_mask = sign;
-       else
-       rl_and_mask &= sign;
-+      if (r_xor)
-+      {
-+        if (!rr_and_mask.get_precision ())
-+          rr_and_mask = sign;
-+        else
-+          rr_and_mask &= sign;
-+        if (r_const.get_precision ())
-+          r_const &= wide_int::from (rr_and_mask,
-+                                     r_const.get_precision (), UNSIGNED);
-+      }
-     }
- 
-   /* If either comparison code is not correct for our logical operation,
-diff --git a/gcc/testsuite/gcc.dg/field-merge-20.c 
b/gcc/testsuite/gcc.dg/field-merge-20.c
-new file mode 100644
-index 000000000000..44ac7fae50dc
---- /dev/null
-+++ b/gcc/testsuite/gcc.dg/field-merge-20.c
-@@ -0,0 +1,64 @@
-+/* { dg-do run } */
-+/* { dg-options "-O1" } */
-+
-+/* tree-optimization/118409 */
-+
-+/* Check that tests involving a sign bit of a storage unit are handled
-+   correctly.  The compares are turned into xor tests by earlier passes, and
-+   ifcombine has to propagate the sign bit mask to the right hand of the
-+   compare extracted from the xor, otherwise we'll retain unwanted bits for 
the
-+   compare.  */
-+
-+typedef struct {
-+    int p : __CHAR_BIT__;
-+    int d : 1;
-+    int b : __CHAR_BIT__ - 2;
-+    int e : 1;
-+} g;
-+
-+g a = {.d = 1, .e = 1}, c = {.b = 1, .d = 1, .e = 1};
-+
-+__attribute__((noipa))
-+int f1 ()
-+{
-+  if (a.d == c.d
-+      && a.e == c.e)
-+    return 0;
-+  return -1;
-+}
-+
-+__attribute__((noipa))
-+int f2 ()
-+{
-+  if (a.d != c.d
-+      || a.e != c.e)
-+    return -1;
-+  return 0;
-+}
-+
-+__attribute__((noipa))
-+int f3 ()
-+{
-+  if (c.d == a.d
-+      && c.e == a.e)
-+    return 0;
-+  return -1;
-+}
-+
-+__attribute__((noipa))
-+int f4 ()
-+{
-+  if (c.d != a.d
-+      || c.e != a.e)
-+    return -1;
-+  return 0;
-+}
-+
-+int main() {
-+  if (f1 () < 0
-+      || f2 () < 0
-+      || f3 () < 0
-+      || f4 () < 0)
-+    __builtin_abort();
-+  return 0;
-+}
-
-base-commit: 0e05b793fba2a9bea9f0fbb1f068679f5dadf514
--- 
-2.48.0
-

diff --git 
a/15.0.0/gentoo/84_all_PR118456-check-and-extend-constants-to-compare-with-bitfields.patch
 
b/15.0.0/gentoo/84_all_PR118456-check-and-extend-constants-to-compare-with-bitfields.patch
deleted file mode 100644
index e005c02..0000000
--- 
a/15.0.0/gentoo/84_all_PR118456-check-and-extend-constants-to-compare-with-bitfields.patch
+++ /dev/null
@@ -1,214 +0,0 @@
-https://inbox.sourceware.org/gcc-patches/ora5bugmmi....@lxoliva.fsfla.org/
-
-From 4e794a3a5de8e8fa0fcaf98e5ea298d4a3c71192 Mon Sep 17 00:00:00 2001
-Message-ID: 
<4e794a3a5de8e8fa0fcaf98e5ea298d4a3c71192.1736844127.git....@gentoo.org>
-From: Alexandre Oliva <ol...@adacore.com>
-Date: Mon, 13 Jan 2025 23:22:45 -0300
-Subject: [PATCH 1/2] check and extend constants to compare with bitfields
- [PR118456]
-
-Add logic to check and extend constants compared with bitfields, so
-that fields are only compared with constants they could actually
-equal.  This involves making sure the signedness doesn't change
-between loads and conversions before shifts: we'd need to carry a lot
-more data to deal with all the possibilities.
-
-Regstrapped on x86_64-linux-gnu.  Ok to install?
-
-for  gcc/ChangeLog
-
-       PR tree-optimization/118456
-       * gimple-fold.cc (decode_field_reference): Punt if shifting
-       after changing signedness.
-       (fold_truth_andor_for_ifcombine): Check extension bits in
-       constants before clipping.
-
-for  gcc/testsuite/ChangeLog
-
-PR tree-optimization/118456
-       * gcc.dg/field-merge-21.c: New.
-       * gcc.dg/field-merge-22.c: New.
----
- gcc/gimple-fold.cc                    | 40 +++++++++++++++++++-
- gcc/testsuite/gcc.dg/field-merge-21.c | 53 +++++++++++++++++++++++++++
- gcc/testsuite/gcc.dg/field-merge-22.c | 31 ++++++++++++++++
- 3 files changed, 122 insertions(+), 2 deletions(-)
- create mode 100644 gcc/testsuite/gcc.dg/field-merge-21.c
- create mode 100644 gcc/testsuite/gcc.dg/field-merge-22.c
-
-diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc
-index 93ed8b3abb05..5b1fbe6db1df 100644
---- a/gcc/gimple-fold.cc
-+++ b/gcc/gimple-fold.cc
-@@ -7712,6 +7712,18 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
- 
-   if (shiftrt)
-     {
-+      /* Punt if we're shifting by more than the loaded bitfield (after
-+       adjustment), or if there's a shift after a change of signedness, punt.
-+       When comparing this field with a constant, we'll check that the
-+       constant is a proper sign- or zero-extension (depending on signedness)
-+       of a value that would fit in the selected portion of the bitfield.  A
-+       shift after a change of signedness would make the extension
-+       non-uniform, and we can't deal with that (yet ???).  See
-+       gcc.dg/field-merge-22.c for a test that would go wrong.  */
-+      if (*pbitsize <= shiftrt
-+        || (convert_before_shift
-+            && outer_type && unsignedp != TYPE_UNSIGNED (outer_type)))
-+      return NULL_TREE;
-       if (!*preversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
-       *pbitpos += shiftrt;
-       *pbitsize -= shiftrt;
-@@ -8512,13 +8524,25 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
-      and bit position.  */
-   if (l_const.get_precision ())
-     {
-+      /* Before clipping upper bits of the right-hand operand of the compare,
-+       check that they're sign or zero extensions, depending on how the
-+       left-hand operand would be extended.  */
-+      bool l_non_ext_bits = false;
-+      if (ll_bitsize < lr_bitsize)
-+      {
-+        wide_int zext = wi::zext (l_const, ll_bitsize);
-+        if ((ll_unsignedp ? zext : wi::sext (l_const, ll_bitsize)) == l_const)
-+          l_const = zext;
-+        else
-+          l_non_ext_bits = true;
-+      }
-       /* We're doing bitwise equality tests, so don't bother with sign
-        extensions.  */
-       l_const = wide_int::from (l_const, lnprec, UNSIGNED);
-       if (ll_and_mask.get_precision ())
-       l_const &= wide_int::from (ll_and_mask, lnprec, UNSIGNED);
-       l_const <<= xll_bitpos;
--      if ((l_const & ~ll_mask) != 0)
-+      if (l_non_ext_bits || (l_const & ~ll_mask) != 0)
-       {
-         warning_at (lloc, OPT_Wtautological_compare,
-                     "comparison is always %d", wanted_code == NE_EXPR);
-@@ -8530,11 +8554,23 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
-        again.  */
-       gcc_checking_assert (r_const.get_precision ());
- 
-+      /* Before clipping upper bits of the right-hand operand of the compare,
-+       check that they're sign or zero extensions, depending on how the
-+       left-hand operand would be extended.  */
-+      bool r_non_ext_bits = false;
-+      if (rl_bitsize < rr_bitsize)
-+      {
-+        wide_int zext = wi::zext (r_const, rl_bitsize);
-+        if ((rl_unsignedp ? zext : wi::sext (r_const, rl_bitsize)) == r_const)
-+          r_const = zext;
-+        else
-+          r_non_ext_bits = true;
-+      }
-       r_const = wide_int::from (r_const, lnprec, UNSIGNED);
-       if (rl_and_mask.get_precision ())
-       r_const &= wide_int::from (rl_and_mask, lnprec, UNSIGNED);
-       r_const <<= xrl_bitpos;
--      if ((r_const & ~rl_mask) != 0)
-+      if (r_non_ext_bits || (r_const & ~rl_mask) != 0)
-       {
-         warning_at (rloc, OPT_Wtautological_compare,
-                     "comparison is always %d", wanted_code == NE_EXPR);
-diff --git a/gcc/testsuite/gcc.dg/field-merge-21.c 
b/gcc/testsuite/gcc.dg/field-merge-21.c
-new file mode 100644
-index 000000000000..042b2123eb63
---- /dev/null
-+++ b/gcc/testsuite/gcc.dg/field-merge-21.c
-@@ -0,0 +1,53 @@
-+/* { dg-do run } */
-+/* { dg-options "-O2" } */
-+
-+/* PR tree-optimization/118456 */
-+/* Check that shifted fields compared with a constants compare correctly even
-+   if the constant contains sign-extension bits not present in the bit
-+   range.  */
-+
-+struct S { unsigned long long o; unsigned short a, b; } s;
-+
-+__attribute__((noipa)) int
-+foo (void)
-+{
-+  return ((unsigned char) s.a) >> 3 == 17 && ((signed char) s.b) >> 2 == -27;
-+}
-+
-+__attribute__((noipa)) int
-+bar (void)
-+{
-+  return ((unsigned char) s.a) >> 3 == 17 && ((signed char) s.b) >> 2 == -91;
-+}
-+
-+__attribute__((noipa)) int
-+bars (void)
-+{
-+  return ((unsigned char) s.a) >> 3 == 17 && ((signed char) s.b) >> 2 == 37;
-+}
-+
-+__attribute__((noipa)) int
-+baz (void)
-+{
-+  return ((unsigned char) s.a) >> 3 == 49 && ((signed char) s.b) >> 2 == -27;
-+}
-+
-+__attribute__((noipa)) int
-+bazs (void)
-+{
-+  return ((unsigned char) s.a) >> 3 == (unsigned char) -15 && ((signed char) 
s.b) >> 2 == -27;
-+}
-+
-+int
-+main ()
-+{
-+  s.a = 17 << 3;
-+  s.b = (unsigned short)(-27u << 2);
-+  if (foo () != 1
-+      || bar () != 0
-+      || bars () != 0
-+      || baz () != 0
-+      || bazs () != 0)
-+    __builtin_abort ();
-+  return 0;
-+}
-diff --git a/gcc/testsuite/gcc.dg/field-merge-22.c 
b/gcc/testsuite/gcc.dg/field-merge-22.c
-new file mode 100644
-index 000000000000..45b29c0bccaf
---- /dev/null
-+++ b/gcc/testsuite/gcc.dg/field-merge-22.c
-@@ -0,0 +1,31 @@
-+/* { dg-do run } */
-+/* { dg-options "-O2" } */
-+
-+/* PR tree-optimization/118456 */
-+/* Check that compares with constants take into account sign/zero extension of
-+   both the bitfield and of the shifting type.  */
-+
-+#define shift (__CHAR_BIT__ - 4)
-+
-+struct S {
-+  signed char a : shift + 2;
-+  signed char b : shift + 2;
-+  short ignore[0];
-+} s;
-+
-+__attribute__((noipa)) int
-+foo (void)
-+{
-+  return ((unsigned char) s.a) >> shift == 15
-+    && ((unsigned char) s.b) >> shift == 0;
-+}
-+
-+int
-+main ()
-+{
-+  s.a = -1;
-+  s.b = 1;
-+  if (foo () != 1)
-+    __builtin_abort ();
-+  return 0;
-+}
-
-base-commit: 31c3c1a83fd885b4687c9f6f7acd68af76d758d3
--- 
-2.48.0
-

diff --git 
a/15.0.0/gentoo/85_all_PR118456-robustify-decode_field_reference.patch 
b/15.0.0/gentoo/85_all_PR118456-robustify-decode_field_reference.patch
deleted file mode 100644
index 065c958..0000000
--- a/15.0.0/gentoo/85_all_PR118456-robustify-decode_field_reference.patch
+++ /dev/null
@@ -1,354 +0,0 @@
-https://inbox.sourceware.org/gcc-patches/or1px6gf6r....@lxoliva.fsfla.org/
-
-From e3a5a707fd88522a73d05841970fa2465e991eaa Mon Sep 17 00:00:00 2001
-Message-ID: 
<e3a5a707fd88522a73d05841970fa2465e991eaa.1736844127.git....@gentoo.org>
-In-Reply-To: 
<4e794a3a5de8e8fa0fcaf98e5ea298d4a3c71192.1736844127.git....@gentoo.org>
-References: 
<4e794a3a5de8e8fa0fcaf98e5ea298d4a3c71192.1736844127.git....@gentoo.org>
-From: Alexandre Oliva <ol...@adacore.com>
-Date: Tue, 14 Jan 2025 02:03:24 -0300
-Subject: [PATCH 2/2] robustify decode_field_reference
-
-Arrange for decode_field_reference to use local variables throughout,
-to modify the out parms only when we're about to return non-NULL, and
-to drop the unused case of NULL pand_mask, that had a latent failure
-to detect signbit masking.
-
-Regstrapped on x86_64-linux-gnu along with the PR118456 patch.
-Ok to install?
-
-for  gcc/ChangeLog
-
-* gimple-fold.cc (decode_field_reference): Rebustify to set
-       out parms only when returning non-NULL.
-       (fold_truth_andor_for_ifcombine): Bail if
-       decode_field_reference returns NULL.  Add complementary assert
-       on r_const's not being set when l_const isn't.
----
- gcc/gimple-fold.cc | 155 +++++++++++++++++++++++----------------------
- 1 file changed, 80 insertions(+), 75 deletions(-)
-
-diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc
-index 5b1fbe6db1df..3c971a29ef04 100644
---- a/gcc/gimple-fold.cc
-+++ b/gcc/gimple-fold.cc
-@@ -7510,18 +7510,17 @@ gimple_binop_def_p (enum tree_code code, tree t, tree 
op[2])
-    *PREVERSEP is set to the storage order of the field.
- 
-    *PAND_MASK is set to the mask found in a BIT_AND_EXPR, if any.  If
--   PAND_MASK *is NULL, BIT_AND_EXPR is not recognized.  If *PAND_MASK
--   is initially set to a mask with nonzero precision, that mask is
-+   *PAND_MASK is initially set to a mask with nonzero precision, that mask is
-    combined with the found mask, or adjusted in precision to match.
- 
-    *PSIGNBIT is set to TRUE if, before clipping to *PBITSIZE, the mask
-    encompassed bits that corresponded to extensions of the sign bit.
- 
--   *XOR_P is to be FALSE if EXP might be a XOR used in a compare, in which
--   case, if XOR_CMP_OP is a zero constant, it will be overridden with *PEXP,
--   *XOR_P will be set to TRUE, *XOR_PAND_MASK will be copied from *PAND_MASK,
--   and the left-hand operand of the XOR will be decoded.  If *XOR_P is TRUE,
--   XOR_CMP_OP and XOR_PAND_MASK are supposed to be NULL, and then the
-+   *PXORP is to be FALSE if EXP might be a XOR used in a compare, in which
-+   case, if PXOR_CMP_OP is a zero constant, it will be overridden with *PEXP,
-+   *PXORP will be set to TRUE, *PXOR_AND_MASK will be copied from *PAND_MASK,
-+   and the left-hand operand of the XOR will be decoded.  If *PXORP is TRUE,
-+   PXOR_CMP_OP and PXOR_AND_MASK are supposed to be NULL, and then the
-    right-hand operand of the XOR will be decoded.
- 
-    *LOAD is set to the load stmt of the innermost reference, if any,
-@@ -7538,8 +7537,8 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
-                       HOST_WIDE_INT *pbitpos,
-                       bool *punsignedp, bool *preversep, bool *pvolatilep,
-                       wide_int *pand_mask, bool *psignbit,
--                      bool *xor_p, tree *xor_cmp_op, wide_int *xor_pand_mask,
--                      gimple **load, location_t loc[4])
-+                      bool *pxorp, tree *pxor_cmp_op, wide_int *pxor_and_mask,
-+                      gimple **pload, location_t loc[4])
- {
-   tree exp = *pexp;
-   tree outer_type = 0;
-@@ -7549,9 +7548,11 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
-   tree res_ops[2];
-   machine_mode mode;
-   bool convert_before_shift = false;
--
--  *load = NULL;
--  *psignbit = false;
-+  bool signbit = false;
-+  bool xorp = false;
-+  tree xor_cmp_op;
-+  wide_int xor_and_mask;
-+  gimple *load = NULL;
- 
-   /* All the optimizations using this function assume integer fields.
-      There are problems with FP fields since the type_for_size call
-@@ -7576,7 +7577,7 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
- 
-   /* Recognize and save a masking operation.  Combine it with an
-      incoming mask.  */
--  if (pand_mask && gimple_binop_def_p (BIT_AND_EXPR, exp, res_ops)
-+  if (gimple_binop_def_p (BIT_AND_EXPR, exp, res_ops)
-       && TREE_CODE (res_ops[1]) == INTEGER_CST)
-     {
-       loc[1] = gimple_location (SSA_NAME_DEF_STMT (exp));
-@@ -7596,29 +7597,29 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
-           and_mask &= wide_int::from (*pand_mask, prec_op, UNSIGNED);
-       }
-     }
--  else if (pand_mask)
-+  else
-     and_mask = *pand_mask;
- 
-   /* Turn (a ^ b) [!]= 0 into a [!]= b.  */
--  if (xor_p && gimple_binop_def_p (BIT_XOR_EXPR, exp, res_ops))
-+  if (pxorp && gimple_binop_def_p (BIT_XOR_EXPR, exp, res_ops))
-     {
-       /* No location recorded for this one, it's entirely subsumed by the
-        compare.  */
--      if (*xor_p)
-+      if (*pxorp)
-       {
-         exp = res_ops[1];
--        gcc_checking_assert (!xor_cmp_op && !xor_pand_mask);
-+        gcc_checking_assert (!pxor_cmp_op && !pxor_and_mask);
-       }
--      else if (!xor_cmp_op)
-+      else if (!pxor_cmp_op)
-       /* Not much we can do when xor appears in the right-hand compare
-          operand.  */
-       return NULL_TREE;
--      else if (integer_zerop (*xor_cmp_op))
-+      else if (integer_zerop (*pxor_cmp_op))
-       {
--        *xor_p = true;
-+        xorp = true;
-         exp = res_ops[0];
--        *xor_cmp_op = *pexp;
--        *xor_pand_mask = *pand_mask;
-+        xor_cmp_op = *pexp;
-+        xor_and_mask = *pand_mask;
-       }
-     }
- 
-@@ -7646,12 +7647,12 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
-   /* Yet another chance to drop conversions.  This one is allowed to
-      match a converting load, subsuming the load identification block
-      below.  */
--  if (!outer_type && gimple_convert_def_p (exp, res_ops, load))
-+  if (!outer_type && gimple_convert_def_p (exp, res_ops, &load))
-     {
-       outer_type = TREE_TYPE (exp);
-       loc[0] = gimple_location (SSA_NAME_DEF_STMT (exp));
--      if (*load)
--      loc[3] = gimple_location (*load);
-+      if (load)
-+      loc[3] = gimple_location (load);
-       exp = res_ops[0];
-       /* This looks backwards, but we're going back the def chain, so if we
-        find the conversion here, after finding a shift, that's because the
-@@ -7662,14 +7663,13 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
-     }
- 
-   /* Identify the load, if there is one.  */
--  if (!(*load) && TREE_CODE (exp) == SSA_NAME
--      && !SSA_NAME_IS_DEFAULT_DEF (exp))
-+  if (!load && TREE_CODE (exp) == SSA_NAME && !SSA_NAME_IS_DEFAULT_DEF (exp))
-     {
-       gimple *def = SSA_NAME_DEF_STMT (exp);
-       if (gimple_assign_load_p (def))
-       {
-         loc[3] = gimple_location (def);
--        *load = def;
-+        load = def;
-         exp = gimple_assign_rhs1 (def);
-       }
-     }
-@@ -7694,20 +7694,14 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
-         && !type_has_mode_precision_p (TREE_TYPE (inner))))
-     return NULL_TREE;
- 
--  *pbitsize = bs;
--  *pbitpos = bp;
--  *punsignedp = unsignedp;
--  *preversep = reversep;
--  *pvolatilep = volatilep;
--
-   /* Adjust shifts...  */
-   if (convert_before_shift
--      && outer_type && *pbitsize > TYPE_PRECISION (outer_type))
-+      && outer_type && bs > TYPE_PRECISION (outer_type))
-     {
--      HOST_WIDE_INT excess = *pbitsize - TYPE_PRECISION (outer_type);
--      if (*preversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
--      *pbitpos += excess;
--      *pbitsize -= excess;
-+      HOST_WIDE_INT excess = bs - TYPE_PRECISION (outer_type);
-+      if (reversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
-+      bp += excess;
-+      bs -= excess;
-     }
- 
-   if (shiftrt)
-@@ -7720,49 +7714,57 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
-        shift after a change of signedness would make the extension
-        non-uniform, and we can't deal with that (yet ???).  See
-        gcc.dg/field-merge-22.c for a test that would go wrong.  */
--      if (*pbitsize <= shiftrt
-+      if (bs <= shiftrt
-         || (convert_before_shift
-             && outer_type && unsignedp != TYPE_UNSIGNED (outer_type)))
-       return NULL_TREE;
--      if (!*preversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
--      *pbitpos += shiftrt;
--      *pbitsize -= shiftrt;
-+      if (!reversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
-+      bp += shiftrt;
-+      bs -= shiftrt;
-     }
- 
-   /* ... and bit position.  */
-   if (!convert_before_shift
--      && outer_type && *pbitsize > TYPE_PRECISION (outer_type))
-+      && outer_type && bs > TYPE_PRECISION (outer_type))
-     {
--      HOST_WIDE_INT excess = *pbitsize - TYPE_PRECISION (outer_type);
--      if (*preversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
--      *pbitpos += excess;
--      *pbitsize -= excess;
-+      HOST_WIDE_INT excess = bs - TYPE_PRECISION (outer_type);
-+      if (reversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
-+      bp += excess;
-+      bs -= excess;
-     }
- 
--  *pexp = exp;
--
-   /* If the number of bits in the reference is the same as the bitsize of
-      the outer type, then the outer type gives the signedness. Otherwise
-      (in case of a small bitfield) the signedness is unchanged.  */
--  if (outer_type && *pbitsize == TYPE_PRECISION (outer_type))
--    *punsignedp = TYPE_UNSIGNED (outer_type);
-+  if (outer_type && bs == TYPE_PRECISION (outer_type))
-+    unsignedp = TYPE_UNSIGNED (outer_type);
- 
--  if (pand_mask)
-+  /* Make the mask the expected width.  */
-+  if (and_mask.get_precision () != 0)
-     {
--      /* Make the mask the expected width.  */
--      if (and_mask.get_precision () != 0)
--      {
--        /* If the AND_MASK encompasses bits that would be extensions of
--           the sign bit, set *PSIGNBIT.  */
--        if (!unsignedp
--            && and_mask.get_precision () > *pbitsize
--            && (and_mask
--                & wi::mask (*pbitsize, true, and_mask.get_precision ())) != 0)
--          *psignbit = true;
--        and_mask = wide_int::from (and_mask, *pbitsize, UNSIGNED);
--      }
-+      /* If the AND_MASK encompasses bits that would be extensions of
-+       the sign bit, set SIGNBIT.  */
-+      if (!unsignedp
-+        && and_mask.get_precision () > bs
-+        && (and_mask & wi::mask (bs, true, and_mask.get_precision ())) != 0)
-+      signbit = true;
-+      and_mask = wide_int::from (and_mask, bs, UNSIGNED);
-+    }
- 
--      *pand_mask = and_mask;
-+  *pexp = exp;
-+  *pload = load;
-+  *pbitsize = bs;
-+  *pbitpos = bp;
-+  *punsignedp = unsignedp;
-+  *preversep = reversep;
-+  *pvolatilep = volatilep;
-+  *psignbit = signbit;
-+  *pand_mask = and_mask;
-+  if (xorp)
-+    {
-+      *pxorp = xorp;
-+      *pxor_cmp_op = xor_cmp_op;
-+      *pxor_and_mask = xor_and_mask;
-     }
- 
-   return inner;
-@@ -8168,19 +8170,27 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
-                                    &ll_and_mask, &ll_signbit,
-                                    &l_xor, &lr_arg, &lr_and_mask,
-                                    &ll_load, ll_loc);
-+  if (!ll_inner)
-+    return 0;
-   lr_inner = decode_field_reference (&lr_arg, &lr_bitsize, &lr_bitpos,
-                                    &lr_unsignedp, &lr_reversep, &volatilep,
-                                    &lr_and_mask, &lr_signbit, &l_xor, 0, 0,
-                                    &lr_load, lr_loc);
-+  if (!lr_inner)
-+    return 0;
-   rl_inner = decode_field_reference (&rl_arg, &rl_bitsize, &rl_bitpos,
-                                    &rl_unsignedp, &rl_reversep, &volatilep,
-                                    &rl_and_mask, &rl_signbit,
-                                    &r_xor, &rr_arg, &rr_and_mask,
-                                    &rl_load, rl_loc);
-+  if (!rl_inner)
-+    return 0;
-   rr_inner = decode_field_reference (&rr_arg, &rr_bitsize, &rr_bitpos,
-                                    &rr_unsignedp, &rr_reversep, &volatilep,
-                                    &rr_and_mask, &rr_signbit, &r_xor, 0, 0,
-                                    &rr_load, rr_loc);
-+  if (!rr_inner)
-+    return 0;
- 
-   /* It must be true that the inner operation on the lhs of each
-      comparison must be the same if we are to be able to do anything.
-@@ -8188,16 +8198,13 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
-      the rhs's.  If one is a load and the other isn't, we have to be
-      conservative and avoid the optimization, otherwise we could get
-      SRAed fields wrong.  */
--  if (volatilep
--      || ll_reversep != rl_reversep
--      || ll_inner == 0 || rl_inner == 0)
-+  if (volatilep || ll_reversep != rl_reversep)
-     return 0;
- 
-   if (! operand_equal_p (ll_inner, rl_inner, 0))
-     {
-       /* Try swapping the operands.  */
-       if (ll_reversep != rr_reversep
--        || !rr_inner
-         || !operand_equal_p (ll_inner, rr_inner, 0))
-       return 0;
- 
-@@ -8266,7 +8273,6 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
-       lr_reversep = ll_reversep;
-     }
-   else if (lr_reversep != rr_reversep
--         || lr_inner == 0 || rr_inner == 0
-          || ! operand_equal_p (lr_inner, rr_inner, 0)
-          || ((lr_load && rr_load)
-              ? gimple_vuse (lr_load) != gimple_vuse (rr_load)
-@@ -8520,6 +8526,9 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
-   else
-     rl_mask = wi::shifted_mask (xrl_bitpos, rl_bitsize, false, lnprec);
- 
-+  /* When we set l_const, we also set r_const.  */
-+  gcc_checking_assert (!l_const.get_precision () == !r_const.get_precision 
());
-+
-   /* Adjust right-hand constants in both original comparisons to match width
-      and bit position.  */
-   if (l_const.get_precision ())
-@@ -8550,10 +8559,6 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
-         return constant_boolean_node (wanted_code == NE_EXPR, truth_type);
-       }
- 
--      /* When we set l_const, we also set r_const, so we need not test it
--       again.  */
--      gcc_checking_assert (r_const.get_precision ());
--
-       /* Before clipping upper bits of the right-hand operand of the compare,
-        check that they're sign or zero extensions, depending on how the
-        left-hand operand would be extended.  */
--- 
-2.48.0
-

diff --git 
a/15.0.0/gentoo/86_all_PR118418-Fix-build-for-STORE_FLAG_VALUE-0-targets.patch 
b/15.0.0/gentoo/86_all_PR118418-Fix-build-for-STORE_FLAG_VALUE-0-targets.patch
deleted file mode 100644
index ce0b338..0000000
--- 
a/15.0.0/gentoo/86_all_PR118418-Fix-build-for-STORE_FLAG_VALUE-0-targets.patch
+++ /dev/null
@@ -1,87 +0,0 @@
-From a1a14ce3c39c25fecf052ffde063fc0ecfc2ffa3 Mon Sep 17 00:00:00 2001
-Message-ID: 
<a1a14ce3c39c25fecf052ffde063fc0ecfc2ffa3.1736867096.git....@gentoo.org>
-From: Richard Sandiford <richard.sandif...@arm.com>
-Date: Mon, 13 Jan 2025 19:37:12 +0000
-Subject: [PATCH] Fix build for STORE_FLAG_VALUE<0 targets [PR118418]
-
-In g:06c4cf398947b53b4bfc65752f9f879bb2d07924 I mishandled signed
-comparisons of comparison results on STORE_FLAG_VALUE < 0 targets
-(despite specifically referencing STORE_FLAG_VALUE in the commit
-message).  There, (lt TRUE FALSE) is true, although (ltu FALSE TRUE)
-still holds.
-
-Things get messy with vector modes, and since those weren't the focus
-of the original commit, it seemed better to punt on them for now.
-However, punting means that this optimisation no longer feels like
-a natural tail-call operation.  The patch therefore converts
-"return simplify..." to the usual call-and-conditional-return pattern.
-
-gcc/
-       PR target/118418
-       * simplify-rtx.cc (simplify_context::simplify_relational_operation_1):
-       Take STORE_FLAG_VALUE into account when handling signed comparisons
-       of comparison results.
----
- gcc/simplify-rtx.cc | 39 ++++++++++++++++++++++++---------------
- 1 file changed, 24 insertions(+), 15 deletions(-)
-
-diff --git a/gcc/simplify-rtx.cc b/gcc/simplify-rtx.cc
-index 71c5d3c1b1b8..dda8fc689e79 100644
---- a/gcc/simplify-rtx.cc
-+++ b/gcc/simplify-rtx.cc
-@@ -6434,7 +6434,7 @@ simplify_context::simplify_relational_operation_1 
(rtx_code code,
-       return simplify_gen_binary (AND, mode, XEXP (tmp, 0), const1_rtx);
-     }
- 
--  /* For two booleans A and B:
-+  /* For two unsigned booleans A and B:
- 
-      A >  B == ~B & A
-      A >= B == ~B | A
-@@ -6443,20 +6443,29 @@ simplify_context::simplify_relational_operation_1 
(rtx_code code,
-      A == B == ~A ^ B (== ~B ^ A)
-      A != B ==  A ^ B
- 
--     simplify_logical_relational_operation checks whether A and B
--     are booleans.  */
--  if (code == GTU || code == GT)
--    return simplify_logical_relational_operation (AND, mode, op1, op0, true);
--  if (code == GEU || code == GE)
--    return simplify_logical_relational_operation (IOR, mode, op1, op0, true);
--  if (code == LTU || code == LT)
--    return simplify_logical_relational_operation (AND, mode, op0, op1, true);
--  if (code == LEU || code == LE)
--    return simplify_logical_relational_operation (IOR, mode, op0, op1, true);
--  if (code == EQ)
--    return simplify_logical_relational_operation (XOR, mode, op0, op1, true);
--  if (code == NE)
--    return simplify_logical_relational_operation (XOR, mode, op0, op1);
-+     For signed comparisons, we have to take STORE_FLAG_VALUE into account,
-+     with the rules above applying for positive STORE_FLAG_VALUE and with
-+     the relations reversed for negative STORE_FLAG_VALUE.  */
-+  if (is_a<scalar_int_mode> (cmp_mode)
-+      && COMPARISON_P (op0)
-+      && COMPARISON_P (op1))
-+    {
-+      rtx t = NULL_RTX;
-+      if (code == GTU || code == (STORE_FLAG_VALUE > 0 ? GT : LT))
-+      t = simplify_logical_relational_operation (AND, mode, op1, op0, true);
-+      else if (code == GEU || code == (STORE_FLAG_VALUE > 0 ? GE : LE))
-+      t = simplify_logical_relational_operation (IOR, mode, op1, op0, true);
-+      else if (code == LTU || code == (STORE_FLAG_VALUE > 0 ? LT : GT))
-+      t = simplify_logical_relational_operation (AND, mode, op0, op1, true);
-+      else if (code == LEU || code == (STORE_FLAG_VALUE > 0 ? LE : GE))
-+      t = simplify_logical_relational_operation (IOR, mode, op0, op1, true);
-+      else if (code == EQ)
-+      t = simplify_logical_relational_operation (XOR, mode, op0, op1, true);
-+      else if (code == NE)
-+      t = simplify_logical_relational_operation (XOR, mode, op0, op1);
-+      if (t)
-+      return t;
-+    }
- 
-   return NULL_RTX;
- }
--- 
-2.48.0
-

diff --git a/15.0.0/gentoo/README.history b/15.0.0/gentoo/README.history
index 4650c20..1baa80e 100644
--- a/15.0.0/gentoo/README.history
+++ b/15.0.0/gentoo/README.history
@@ -1,3 +1,9 @@
+42     ????
+
+       - 82_all_PR118409-ifcombine.patch
+       - 
84_all_PR118456-check-and-extend-constants-to-compare-with-bitfields.patch
+       - 85_all_PR118456-robustify-decode_field_reference.patch
+
 41     16 January 2025
 
        + 82_all_PR118409-ifcombine.patch

Reply via email to