Hi!

libgcrypt FAILs to build on aarch64-linux with
*** stack smashing detected ***: terminated
when gcc is compiled with -D_FORTIFY_SOURCE=2.  The problem is if
fold_array_ctor_reference is called with size equal to or very close to
MAX_BITSIZE_MODE_ANY_MODE bits and non-zero inner_offset.
The first native_encode_expr is called with that inner_offset and bufoff 0,
the subsequent ones with offset of 0, and bufoff elt_size - inner_offset,
2 * elt_size - inner_offset etc.  So, e.g. on the testcase where we start
with inner_offset 1 and size is e.g. 256 bytes and elt_size 4 bytes
we then call native_encode_expr at bufoff 251 and then 255, but that one
overwrites 3 bytes beyond the buf array.
The following patch fixes that.  In addition, it avoids calling
elt_size.to_uhwi () all the time, and punts if elt_sz would be too large.

Bootstrapped/regtested on x86_64-linux and i686-linux, ok for trunk?

2020-01-28  Jakub Jelinek  <ja...@redhat.com>

        PR tree-optimization/93454
        * gimple-fold.c (fold_array_ctor_reference): Perform
        elt_size.to_uhwi () just once, instead of calling it in every
        iteration.  Punt if that value is above size of the temporary
        buffer.  Decrease third native_encode_expr argument when
        bufoff + elt_sz is above size of buf.

        * gcc.dg/pr93454.c: New test.

--- gcc/gimple-fold.c.jj        2020-01-12 11:54:36.000000000 +0100
+++ gcc/gimple-fold.c   2020-01-27 15:54:51.188830178 +0100
@@ -6665,12 +6665,14 @@ fold_array_ctor_reference (tree type, tr
   /* And offset within the access.  */
   inner_offset = offset % (elt_size.to_uhwi () * BITS_PER_UNIT);
 
-  if (size > elt_size.to_uhwi () * BITS_PER_UNIT)
+  unsigned HOST_WIDE_INT elt_sz = elt_size.to_uhwi ();
+  if (size > elt_sz * BITS_PER_UNIT)
     {
       /* native_encode_expr constraints.  */
       if (size > MAX_BITSIZE_MODE_ANY_MODE
          || size % BITS_PER_UNIT != 0
-         || inner_offset % BITS_PER_UNIT != 0)
+         || inner_offset % BITS_PER_UNIT != 0
+         || elt_sz > MAX_BITSIZE_MODE_ANY_MODE / BITS_PER_UNIT)
        return NULL_TREE;
 
       unsigned ctor_idx;
@@ -6701,10 +6703,11 @@ fold_array_ctor_reference (tree type, tr
       index = wi::umax (index, access_index);
       do
        {
-         int len = native_encode_expr (val, buf + bufoff,
-                                       elt_size.to_uhwi (),
+         if (bufoff + elt_sz > sizeof (buf))
+           elt_sz = sizeof (buf) - bufoff;
+         int len = native_encode_expr (val, buf + bufoff, elt_sz,
                                        inner_offset / BITS_PER_UNIT);
-         if (len != elt_size - inner_offset / BITS_PER_UNIT)
+         if (len != (int) elt_sz - inner_offset / BITS_PER_UNIT)
            return NULL_TREE;
          inner_offset = 0;
          bufoff += len;
--- gcc/testsuite/gcc.dg/pr93454.c.jj   2020-01-27 16:04:22.420430555 +0100
+++ gcc/testsuite/gcc.dg/pr93454.c      2020-01-27 16:03:24.734278795 +0100
@@ -0,0 +1,25 @@
+/* PR tree-optimization/93454 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -g" } */
+
+#if __SIZEOF_INT__ == 4 && __CHAR_BIT__ == 8
+#define A(n) n, n + 0x01010101, n + 0x02020202, n + 0x03030303
+#define B(n) A (n), A (n + 0x04040404), A (n + 0x08080808), A (n + 0x0c0c0c0c)
+#define C(n) B (n), B (n + 0x10101010), B (n + 0x20202020), B (n + 0x30303030)
+#define D(n) C (n), C (n + 0x40404040), C (n + 0x80808080U), C (n + 
0xc0c0c0c0U)
+const unsigned int a[64] = { C (0) };
+const unsigned int b[256] = { D (0) };
+const unsigned int c[32] = { B (0), B (0x10101010) };
+const unsigned int d[16] = { B (0) };
+const unsigned int e[8] = { A (0), A (0x04040404) };
+
+void
+foo (void)
+{
+  const unsigned char *s = ((const unsigned char *) a) + 1;
+  const unsigned char *t = ((const unsigned char *) b) + 1;
+  const unsigned char *u = ((const unsigned char *) c) + 1;
+  const unsigned char *v = ((const unsigned char *) d) + 1;
+  const unsigned char *w = ((const unsigned char *) e) + 1;
+}
+#endif

        Jakub

Reply via email to