On 2/19/24 02:55, Jakub Jelinek wrote:
On Fri, Feb 16, 2024 at 01:51:54PM +0000, Jonathan Wakely wrote:
Ah, although __atomic_compare_exchange only takes pointers, the
compiler replaces that with a call to __atomic_compare_exchange_n
which takes the newval by value, which presumably uses an 80-bit FP
register and so the padding bits become indeterminate again.

The problem is that __atomic_{,compare_}exchange lowering if it has
a supported atomic 1/2/4/8/16 size emits code like:
   _3 = *p2;
   _4 = VIEW_CONVERT_EXPR<I_type> (_3);

Hmm, I find that surprising; I thought of VIEW_CONVERT_EXPR as working on lvalues, not rvalues, based on the documentation describing it as roughly *(type2 *)&X.

Now I see that gimplify_expr does what you describe, and I wonder what the advantage of that is. That seems to go back to richi's r206420 for PR59471. r270579 for PR limited it to cases where the caller wants an rvalue; perhaps it should also be avoided when the operand is an INDIRECT_REF?

so if long double or some small struct etc. has some carefully filled
padding bits, those bits can be lost on the assignment.  The library call
for __atomic_{,compare_}exchange would actually work because it woiuld
load the value from memory using integral type or memcpy.
E.g. on
void
foo (long double *a, long double *b, long double *c)
{
   __atomic_compare_exchange (a, b, c, false, __ATOMIC_RELAXED, 
__ATOMIC_RELAXED);
}
we end up with -O0 with:
        fldt    (%rax)
        fstpt   -48(%rbp)
        movq    -48(%rbp), %rax
        movq    -40(%rbp), %rdx
i.e. load *c from memory into 387 register, store it back to uninitialized
stack slot (the padding bits are now random in there) and then load a
__uint128_t (pair of GPR regs).  The problem is that we first load it using
whatever type the pointer points to and then VIEW_CONVERT_EXPR that value:
   p2 = build_indirect_ref (loc, p2, RO_UNARY_STAR);
   p2 = build1 (VIEW_CONVERT_EXPR, I_type, p2);
The following patch fixes that by creating a MEM_REF instead, with the
I_type type, but with the original pointer type on the second argument for
aliasing purposes, so we actually preserve the padding bits that way.
I've done that for types which may have padding and also for
non-integral/pointer types, because I fear even on floating point types
like double or float which don't have padding bits the copying through
floating point could misbehave in presence of sNaNs or unsupported bit
combinations.
With this patch instead of the above assembly we emit
        movq    8(%rax), %rdx
        movq    (%rax), %rax
I had to add support for MEM_REF in pt.cc, though with the assumption
that it has been already originally created with non-dependent
types/operands (which is the case here for the __atomic*exchange lowering).

Bootstrapped/regtested on x86_64-linux and i686-linux, ok for trunk?

2024-02-19  Jakub Jelinek  <ja...@redhat.com>

gcc/c-family/
        * c-common.cc (resolve_overloaded_atomic_exchange): For
        non-integral/pointer types or types which may need padding
        instead of setting p1 to VIEW_CONVERT_EXPR<I_type> (*p1), set it to
        MEM_REF with p1 and (typeof (p1)) 0 operands and I_type type.
        (resolve_overloaded_atomic_compare_exchange): Similarly for p2.
gcc/cp/
        * pt.cc (tsubst_expr): Handle MEM_REF.
gcc/testsuite/
        * g++.dg/ext/atomic-5.C: New test.

--- gcc/c-family/c-common.cc.jj 2024-02-16 17:33:43.995739790 +0100
+++ gcc/c-family/c-common.cc    2024-02-17 11:11:34.029474214 +0100
@@ -7794,8 +7794,23 @@ resolve_overloaded_atomic_exchange (loca
    p0 = build1 (VIEW_CONVERT_EXPR, I_type_ptr, p0);
    (*params)[0] = p0;
    /* Convert new value to required type, and dereference it.  */
-  p1 = build_indirect_ref (loc, p1, RO_UNARY_STAR);
-  p1 = build1 (VIEW_CONVERT_EXPR, I_type, p1);
+  if ((!INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (p1)))
+       && !POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (p1))))
+      || clear_padding_type_may_have_padding_p (TREE_TYPE (TREE_TYPE (p1))))
+    {
+      /* If *p1 type can have padding or may involve floating point which
+        could e.g. be promoted to wider precision and demoted afterwards,
+        state of padding bits might not be preserved.  */
+      build_indirect_ref (loc, p1, RO_UNARY_STAR);
+      p1 = build2_loc (loc, MEM_REF, I_type,
+                      build1 (VIEW_CONVERT_EXPR, I_type_ptr, p1),
+                      build_zero_cst (TREE_TYPE (p1)));
+    }
+  else
+    {
+      p1 = build_indirect_ref (loc, p1, RO_UNARY_STAR);
+      p1 = build1 (VIEW_CONVERT_EXPR, I_type, p1);
+    }
    (*params)[1] = p1;
/* Move memory model to the 3rd position, and end param list. */
@@ -7874,8 +7889,23 @@ resolve_overloaded_atomic_compare_exchan
    (*params)[1] = p1;
/* Convert desired value to required type, and dereference it. */
-  p2 = build_indirect_ref (loc, p2, RO_UNARY_STAR);
-  p2 = build1 (VIEW_CONVERT_EXPR, I_type, p2);
+  if ((!INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (p2)))
+       && !POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (p2))))
+      || clear_padding_type_may_have_padding_p (TREE_TYPE (TREE_TYPE (p2))))
+    {
+      /* If *p2 type can have padding or may involve floating point which
+        could e.g. be promoted to wider precision and demoted afterwards,
+        state of padding bits might not be preserved.  */
+      build_indirect_ref (loc, p2, RO_UNARY_STAR);
+      p2 = build2_loc (loc, MEM_REF, I_type,
+                      build1 (VIEW_CONVERT_EXPR, I_type_ptr, p2),
+                      build_zero_cst (TREE_TYPE (p2)));
+    }
+  else
+    {
+      p2 = build_indirect_ref (loc, p2, RO_UNARY_STAR);
+      p2 = build1 (VIEW_CONVERT_EXPR, I_type, p2);
+    }
    (*params)[2] = p2;
/* The rest of the parameters are fine. NULL means no special return value
--- gcc/cp/pt.cc.jj     2024-02-14 14:26:19.695811596 +0100
+++ gcc/cp/pt.cc        2024-02-17 11:05:47.988237152 +0100
@@ -20088,6 +20088,14 @@ tsubst_expr (tree t, tree args, tsubst_f
        RETURN (r);
        }
+ case MEM_REF:
+      {
+       tree op0 = RECUR (TREE_OPERAND (t, 0));
+       tree op1 = RECUR (TREE_OPERAND (t, 0));
+       tree new_type = tsubst (TREE_TYPE (t), args, complain, in_decl);
+       RETURN (build2_loc (EXPR_LOCATION (t), MEM_REF, new_type, op0, op1));
+      }
+
      case NOP_EXPR:
        {
        tree type = tsubst (TREE_TYPE (t), args, complain, in_decl);
--- gcc/testsuite/g++.dg/ext/atomic-5.C.jj      2024-02-17 11:13:37.824770288 
+0100
+++ gcc/testsuite/g++.dg/ext/atomic-5.C 2024-02-17 11:14:54.077720732 +0100
@@ -0,0 +1,42 @@
+// { dg-do compile { target c++14 } }
+
+template <int N>
+void
+foo (long double *ptr, long double *val, long double *ret)
+{
+  __atomic_exchange (ptr, val, ret, __ATOMIC_RELAXED);
+}
+
+template <int N>
+bool
+bar (long double *ptr, long double *exp, long double *des)
+{
+  return __atomic_compare_exchange (ptr, exp, des, false,
+                                   __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+}
+
+bool
+baz (long double *p, long double *q, long double *r)
+{
+  foo<0> (p, q, r);
+  foo<1> (p + 1, q + 1, r + 1);
+  return bar<0> (p + 2, q + 2, r + 2) || bar<1> (p + 3, q + 3, r + 3);
+}
+
+constexpr int
+qux (long double *ptr, long double *val, long double *ret)
+{
+  __atomic_exchange (ptr, val, ret, __ATOMIC_RELAXED);
+  return 0;
+}
+
+constexpr bool
+corge (long double *ptr, long double *exp, long double *des)
+{
+  return __atomic_compare_exchange (ptr, exp, des, false,
+                                   __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+}
+
+long double a[6];
+const int b = qux (a, a + 1, a + 2);
+const bool c = corge (a + 3, a + 4, a + 5);

        Jakub


Reply via email to