Hi,
this is a regression present on the mainline and 4.8 branch. emit_group_store
happily writes past the end of a packed structure, thus accessing a distinct
variable stored there. Then the scheduler swaps a couple of writes, leading
to wrong code. Fixed by preventing emit_group_store from writing past the end
of the structure.
Tested on x86_64-suse-linux, OK for the mainline and 4.8 branch?
2013-11-22 Eric Botcazou <ebotca...@adacore.com>
PR middle-end/59138
* expr.c (emit_group_store): Do not write past the end of the structure.
(store_bit_field): Fix formatting.
2013-11-22 Eric Botcazou <ebotca...@adacore.com>
* gcc.c-torture/execute/20131122-1.c: New test.
--
Eric Botcazou
/* PR middle-end/59138 */
/* Testcase by John Regehr <reg...@cs.utah.edu> */
extern void abort (void);
#pragma pack(1)
struct S0 {
int f0;
int f1;
int f2;
short f3;
};
short a = 1;
struct S0 b = { 1 }, c, d, e;
struct S0 fn1() { return c; }
void fn2 (void)
{
b = fn1 ();
a = 0;
d = e;
}
int main (void)
{
fn2 ();
if (a != 0)
abort ();
return 0;
}
Index: expr.c
===================================================================
--- expr.c (revision 205244)
+++ expr.c (working copy)
@@ -2056,12 +2056,14 @@ emit_group_store (rtx orig_dst, rtx src,
HOST_WIDE_INT bytepos = INTVAL (XEXP (XVECEXP (src, 0, i), 1));
enum machine_mode mode = GET_MODE (tmps[i]);
unsigned int bytelen = GET_MODE_SIZE (mode);
- unsigned int adj_bytelen = bytelen;
+ unsigned int adj_bytelen;
rtx dest = dst;
/* Handle trailing fragments that run over the size of the struct. */
if (ssize >= 0 && bytepos + (HOST_WIDE_INT) bytelen > ssize)
adj_bytelen = ssize - bytepos;
+ else
+ adj_bytelen = bytelen;
if (GET_CODE (dst) == CONCAT)
{
@@ -2102,6 +2104,7 @@ emit_group_store (rtx orig_dst, rtx src,
}
}
+ /* Handle trailing fragments that run over the size of the struct. */
if (ssize >= 0 && bytepos + (HOST_WIDE_INT) bytelen > ssize)
{
/* store_bit_field always takes its value from the lsb.
@@ -2119,16 +2122,22 @@ emit_group_store (rtx orig_dst, rtx src,
tmps[i] = expand_shift (RSHIFT_EXPR, mode, tmps[i],
shift, tmps[i], 0);
}
- bytelen = adj_bytelen;
+
+ /* Make sure not to write past the end of the struct. */
+ store_bit_field (dest,
+ adj_bytelen * BITS_PER_UNIT, bytepos * BITS_PER_UNIT,
+ bytepos * BITS_PER_UNIT, ssize * BITS_PER_UNIT,
+ VOIDmode, tmps[i]);
}
/* Optimize the access just a bit. */
- if (MEM_P (dest)
- && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (dest))
- || MEM_ALIGN (dest) >= GET_MODE_ALIGNMENT (mode))
- && bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
- && bytelen == GET_MODE_SIZE (mode))
+ else if (MEM_P (dest)
+ && (!SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (dest))
+ || MEM_ALIGN (dest) >= GET_MODE_ALIGNMENT (mode))
+ && bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
+ && bytelen == GET_MODE_SIZE (mode))
emit_move_insn (adjust_address (dest, mode, bytepos), tmps[i]);
+
else
store_bit_field (dest, bytelen * BITS_PER_UNIT, bytepos * BITS_PER_UNIT,
0, 0, mode, tmps[i]);
@@ -4771,8 +4780,7 @@ expand_assignment (tree to, tree from, b
expand_insn (icode, 2, ops);
}
else
- store_bit_field (mem, GET_MODE_BITSIZE (mode),
- 0, 0, 0, mode, reg);
+ store_bit_field (mem, GET_MODE_BITSIZE (mode), 0, 0, 0, mode, reg);
return;
}