This patch fixes various -fstrict-volatile-bitfields wrong-code bugs, including instances where bitfield values were being quietly truncated as well as bugs relating to using the wrong width. The code changes are identical to those in the previous version of this patch series (originally posted in June and re-pinged many times since then), but for this version I have cleaned up the test cases to remove dependencies on header files, as Bernd requested.

-Sandra

2013-09-28  Sandra Loosemore  <san...@codesourcery.com>

	PR middle-end/23623
	PR middle-end/48784
	PR middle-end/56341
	PR middle-end/56997

	gcc/
	* expmed.c (strict_volatile_bitfield_p): New function.
	(store_bit_field_1): Don't special-case strict volatile
	bitfields here.
	(store_bit_field): Handle strict volatile bitfields here instead.
	(store_fixed_bit_field): Don't special-case strict volatile
	bitfields here.
	(extract_bit_field_1): Don't special-case strict volatile
	bitfields here.
	(extract_bit_field): Handle strict volatile bitfields here instead.
	(extract_fixed_bit_field): Don't special-case strict volatile
	bitfields here.  Simplify surrounding code to resemble that in
	store_fixed_bit_field.
	* doc/invoke.texi (Code Gen Options): Update
	-fstrict-volatile-bitfields description.

	gcc/testsuite/
	* gcc.dg/pr23623.c: New test.
	* gcc.dg/pr48784-1.c: New test.
	* gcc.dg/pr48784-2.c: New test.
	* gcc.dg/pr56341-1.c: New test.
	* gcc.dg/pr56341-2.c: New test.
	* gcc.dg/pr56997-1.c: New test.
	* gcc.dg/pr56997-2.c: New test.
	* gcc.dg/pr56997-3.c: New test.
Index: gcc/expmed.c
===================================================================
--- gcc/expmed.c	(revision 203003)
+++ gcc/expmed.c	(working copy)
@@ -415,6 +415,42 @@ lowpart_bit_field_p (unsigned HOST_WIDE_
     return bitnum % BITS_PER_WORD == 0;
 }
 
+/* Return true if -fstrict-volatile-bitfields applies an access of OP0
+   containing BITSIZE bits starting at BITNUM, with field mode FIELDMODE.  */
+
+static bool
+strict_volatile_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
+			    unsigned HOST_WIDE_INT bitnum,
+			    enum machine_mode fieldmode)
+{
+  unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (fieldmode);
+
+  /* -fstrict-volatile-bitfields must be enabled and we must have a
+     volatile MEM.  */
+  if (!MEM_P (op0)
+      || !MEM_VOLATILE_P (op0)
+      || flag_strict_volatile_bitfields <= 0)
+    return false;
+
+  /* Non-integral modes likely only happen with packed structures.
+     Punt.  */
+  if (!SCALAR_INT_MODE_P (fieldmode))
+    return false;
+
+  /* The bit size must not be larger than the field mode, and
+     the field mode must not be larger than a word.  */
+  if (bitsize > modesize || modesize > BITS_PER_WORD)
+    return false;
+
+  /* Check for cases of unaligned fields that must be split.  */
+  if (bitnum % BITS_PER_UNIT + bitsize > modesize
+      || (STRICT_ALIGNMENT
+	  && bitnum % GET_MODE_ALIGNMENT (fieldmode) + bitsize > modesize))
+    return false;
+
+  return true;
+}
+
 /* Return true if OP is a memory and if a bitfield of size BITSIZE at
    bit number BITNUM can be treated as a simple value of mode MODE.  */
 
@@ -813,12 +849,8 @@ store_bit_field_1 (rtx str_rtx, unsigned
      cheap register alternative is available.  */
   if (MEM_P (op0))
     {
-      /* Do not use unaligned memory insvs for volatile bitfields when
-	 -fstrict-volatile-bitfields is in effect.  */
-      if (!(MEM_VOLATILE_P (op0)
-	    && flag_strict_volatile_bitfields > 0)
-	  && get_best_mem_extraction_insn (&insv, EP_insv, bitsize, bitnum,
-					   fieldmode)
+      if (get_best_mem_extraction_insn (&insv, EP_insv, bitsize, bitnum,
+					fieldmode)
 	  && store_bit_field_using_insv (&insv, op0, bitsize, bitnum, value))
 	return true;
 
@@ -871,6 +903,27 @@ store_bit_field (rtx str_rtx, unsigned H
 		 enum machine_mode fieldmode,
 		 rtx value)
 {
+  /* Handle -fstrict-volatile-bitfields in the cases where it applies.  */
+  if (strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, fieldmode))
+    {
+
+      /* Storing any naturally aligned field can be done with a simple
+	 store.  For targets that support fast unaligned memory, any
+	 naturally sized, unit aligned field can be done directly.  */
+      if (simple_mem_bitfield_p (str_rtx, bitsize, bitnum, fieldmode))
+	{
+	  str_rtx = adjust_bitfield_address (str_rtx, fieldmode,
+					     bitnum / BITS_PER_UNIT);
+	  emit_move_insn (str_rtx, value);
+	}
+      else
+	/* Explicitly override the C/C++ memory model; ignore the
+	   bit range so that we can do the access in the mode mandated
+	   by -fstrict-volatile-bitfields instead.  */
+	store_fixed_bit_field (str_rtx, bitsize, bitnum, 0, 0, value);
+      return;
+    }
+
   /* Under the C++0x memory model, we must not touch bits outside the
      bit region.  Adjust the address to start at the beginning of the
      bit region.  */
@@ -923,29 +976,12 @@ store_fixed_bit_field (rtx op0, unsigned
 
   if (MEM_P (op0))
     {
-      unsigned HOST_WIDE_INT maxbits = MAX_FIXED_MODE_SIZE;
-
-      if (bitregion_end)
-	maxbits = bitregion_end - bitregion_start + 1;
-
-      /* Get the proper mode to use for this field.  We want a mode that
-	 includes the entire field.  If such a mode would be larger than
-	 a word, we won't be doing the extraction the normal way.
-	 We don't want a mode bigger than the destination.  */
-
       mode = GET_MODE (op0);
       if (GET_MODE_BITSIZE (mode) == 0
 	  || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
 	mode = word_mode;
-
-      if (MEM_VOLATILE_P (op0)
-          && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
-	  && GET_MODE_BITSIZE (GET_MODE (op0)) <= maxbits
-	  && flag_strict_volatile_bitfields > 0)
-	mode = GET_MODE (op0);
-      else
-	mode = get_best_mode (bitsize, bitnum, bitregion_start, bitregion_end,
-			      MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
+      mode = get_best_mode (bitsize, bitnum, bitregion_start, bitregion_end,
+			    MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
 
       if (mode == VOIDmode)
 	{
@@ -1429,19 +1465,8 @@ extract_bit_field_1 (rtx str_rtx, unsign
      If that's wrong, the solution is to test for it and set TARGET to 0
      if needed.  */
 
-  /* If the bitfield is volatile, we need to make sure the access
-     remains on a type-aligned boundary.  */
-  if (GET_CODE (op0) == MEM
-      && MEM_VOLATILE_P (op0)
-      && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
-      && flag_strict_volatile_bitfields > 0)
-    goto no_subreg_mode_swap;
-
-  /* Only scalar integer modes can be converted via subregs.  There is an
-     additional problem for FP modes here in that they can have a precision
-     which is different from the size.  mode_for_size uses precision, but
-     we want a mode based on the size, so we must avoid calling it for FP
-     modes.  */
+  /* Get the mode of the field to use for atomic access or subreg
+     conversion.  */
   mode1 = mode;
   if (SCALAR_INT_MODE_P (tmode))
     {
@@ -1474,8 +1499,6 @@ extract_bit_field_1 (rtx str_rtx, unsign
       return convert_extracted_bit_field (op0, mode, tmode, unsignedp);
     }
 
- no_subreg_mode_swap:
-
   /* Handle fields bigger than a word.  */
 
   if (bitsize > BITS_PER_WORD)
@@ -1595,11 +1618,8 @@ extract_bit_field_1 (rtx str_rtx, unsign
      cheap register alternative is available.  */
   if (MEM_P (op0))
     {
-      /* Do not use extv/extzv for volatile bitfields when
-         -fstrict-volatile-bitfields is in effect.  */
-      if (!(MEM_VOLATILE_P (op0) && flag_strict_volatile_bitfields > 0)
-	  && get_best_mem_extraction_insn (&extv, pattern, bitsize, bitnum,
-					   tmode))
+      if (get_best_mem_extraction_insn (&extv, pattern, bitsize, bitnum,
+					tmode))
 	{
 	  rtx result = extract_bit_field_using_extv (&extv, op0, bitsize,
 						     bitnum, unsignedp,
@@ -1665,6 +1685,31 @@ extract_bit_field (rtx str_rtx, unsigned
 		   unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
 		   enum machine_mode mode, enum machine_mode tmode)
 {
+  enum machine_mode mode1;
+
+  /* Handle -fstrict-volatile-bitfields in the cases where it applies.  */
+  if (GET_MODE_BITSIZE (GET_MODE (str_rtx)) > 0)
+    mode1 = GET_MODE (str_rtx);
+  else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
+    mode1 = GET_MODE (target);
+  else
+    mode1 = tmode;
+
+  if (strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, mode1))
+    {
+      rtx result;
+
+      /* Extraction of a full MODE1 value can be done with a load as long as
+	 the field is on a byte boundary and is sufficiently aligned.  */
+      if (simple_mem_bitfield_p (str_rtx, bitsize, bitnum, mode1))
+	result = adjust_bitfield_address (str_rtx, mode1,
+					  bitnum / BITS_PER_UNIT);
+      else
+	result = extract_fixed_bit_field (mode, str_rtx, bitsize, bitnum,
+					  target, unsignedp);
+      return convert_extracted_bit_field (result, mode, tmode, unsignedp);
+    }
+  
   return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp,
 			      target, mode, tmode, true);
 }
@@ -1691,45 +1736,19 @@ extract_fixed_bit_field (enum machine_mo
 	 includes the entire field.  If such a mode would be larger than
 	 a word, we won't be doing the extraction the normal way.  */
 
-      if (MEM_VOLATILE_P (op0)
-	  && flag_strict_volatile_bitfields > 0)
-	{
-	  if (GET_MODE_BITSIZE (GET_MODE (op0)) > 0)
-	    mode = GET_MODE (op0);
-	  else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
-	    mode = GET_MODE (target);
-	  else
-	    mode = tmode;
-	}
-      else
-	mode = get_best_mode (bitsize, bitnum, 0, 0,
-			      MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
+      mode = GET_MODE (op0);
+      if (GET_MODE_BITSIZE (mode) == 0
+	  || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
+	mode = word_mode;
+      mode = get_best_mode (bitsize, bitnum, 0, 0,
+			    MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
 
       if (mode == VOIDmode)
 	/* The only way this should occur is if the field spans word
 	   boundaries.  */
 	return extract_split_bit_field (op0, bitsize, bitnum, unsignedp);
 
-      unsigned int total_bits = GET_MODE_BITSIZE (mode);
-      HOST_WIDE_INT bit_offset = bitnum - bitnum % total_bits;
-
-      /* If we're accessing a volatile MEM, we can't apply BIT_OFFSET
-	 if it results in a multi-word access where we otherwise wouldn't
-	 have one.  So, check for that case here.  */
-      if (MEM_P (op0)
-	  && MEM_VOLATILE_P (op0)
-	  && flag_strict_volatile_bitfields > 0
-	  && bitnum % BITS_PER_UNIT + bitsize <= total_bits
-	  && bitnum % GET_MODE_BITSIZE (mode) + bitsize > total_bits)
-	{
-	  /* If the target doesn't support unaligned access, give up and
-	     split the access into two.  */
-	  if (STRICT_ALIGNMENT)
-	    return extract_split_bit_field (op0, bitsize, bitnum, unsignedp);
-	  bit_offset = bitnum - bitnum % BITS_PER_UNIT;
-	}
-      op0 = adjust_bitfield_address (op0, mode, bit_offset / BITS_PER_UNIT);
-      bitnum -= bit_offset;
+      op0 = narrow_bit_field_mem (op0, mode, bitsize, bitnum, &bitnum);
     }
 
   mode = GET_MODE (op0);
Index: gcc/doc/invoke.texi
===================================================================
--- gcc/doc/invoke.texi	(revision 203003)
+++ gcc/doc/invoke.texi	(working copy)
@@ -21169,6 +21169,12 @@ instruction, even though that accesses b
 any portion of the bit-field, or memory-mapped registers unrelated to
 the one being updated.
 
+In some cases, such as when the @code{packed} attribute is applied to a 
+structure field, it may not be possible to access the field with a single
+read or write that is correctly aligned for the target machine.  In this
+case GCC falls back to generating multiple accesses rather than code that 
+will fault or truncate the result at run time.
+
 The default value of this option is determined by the application binary
 interface for the target processor.
 
Index: gcc/testsuite/gcc.dg/pr23623.c
===================================================================
--- gcc/testsuite/gcc.dg/pr23623.c	(revision 0)
+++ gcc/testsuite/gcc.dg/pr23623.c	(revision 0)
@@ -0,0 +1,45 @@
+/* { dg-do compile } */
+/* { dg-options "-fstrict-volatile-bitfields -fdump-rtl-final" } */
+
+/* With -fstrict-volatile-bitfields, the volatile accesses to bf2.b
+   and bf3.b must do unsigned int reads/writes.  The non-volatile
+   accesses to bf1.b are not so constrained.  */
+
+extern struct
+{
+  unsigned int b : 1;
+} bf1;
+
+extern volatile struct
+{
+  unsigned int b : 1;
+} bf2;
+
+extern struct
+{
+  volatile unsigned int b : 1;
+} bf3;
+
+void writeb(void)
+{
+  bf1.b = 1;
+  bf2.b = 1;	/* volatile read + volatile write */
+  bf3.b = 1;	/* volatile read + volatile write */
+}
+
+extern unsigned int x1, x2, x3;
+
+void readb(void)
+{
+  x1 = bf1.b;
+  x2 = bf2.b;   /* volatile write */
+  x3 = bf3.b;   /* volatile write */
+}
+
+/* There should be 6 volatile MEMs total, but scan-rtl-dump-times counts
+   the number of match variables and not the number of matches.  Since
+   the parenthesized subexpression in the regexp introduces an extra match
+   variable, we need to give a count of 12 instead of 6 here.  */
+/* { dg-final { scan-rtl-dump-times "mem/v(/.)*:SI" 12 "final" } } */
+/* { dg-final { cleanup-rtl-dump "final" } } */
+
Index: gcc/testsuite/gcc.dg/pr48784-1.c
===================================================================
--- gcc/testsuite/gcc.dg/pr48784-1.c	(revision 0)
+++ gcc/testsuite/gcc.dg/pr48784-1.c	(revision 0)
@@ -0,0 +1,18 @@
+/* { dg-do run } */
+/* { dg-options "-fstrict-volatile-bitfields" } */
+
+extern void abort (void);
+
+#pragma pack(1)
+volatile struct S0 {
+   signed a : 7;
+   unsigned b : 28;  /* b can't be fetched with an aligned 32-bit access, */
+                     /* but it certainly can be fetched with an unaligned access */
+} g = {0,0xfffffff};
+
+int main() {
+  unsigned b = g.b;
+  if (b != 0xfffffff)
+    abort ();
+  return 0;
+}
Index: gcc/testsuite/gcc.dg/pr48784-2.c
===================================================================
--- gcc/testsuite/gcc.dg/pr48784-2.c	(revision 0)
+++ gcc/testsuite/gcc.dg/pr48784-2.c	(revision 0)
@@ -0,0 +1,18 @@
+/* { dg-do run } */
+/* { dg-options "-fno-strict-volatile-bitfields" } */
+
+extern void abort (void);
+
+#pragma pack(1)
+volatile struct S0 {
+   signed a : 7;
+   unsigned b : 28;  /* b can't be fetched with an aligned 32-bit access, */
+                     /* but it certainly can be fetched with an unaligned access */
+} g = {0,0xfffffff};
+
+int main() {
+  unsigned b = g.b;
+  if (b != 0xfffffff)
+    abort ();
+  return 0;
+}
Index: gcc/testsuite/gcc.dg/pr56341-1.c
===================================================================
--- gcc/testsuite/gcc.dg/pr56341-1.c	(revision 0)
+++ gcc/testsuite/gcc.dg/pr56341-1.c	(revision 0)
@@ -0,0 +1,40 @@
+/* { dg-do run } */
+/* { dg-options "-fstrict-volatile-bitfields" } */
+
+extern void abort (void);
+
+struct test0
+{
+  unsigned char b1[2];
+} __attribute__((packed, aligned(2)));
+
+struct test1
+{
+  volatile unsigned long a1;
+  unsigned char b1[4];
+} __attribute__((packed, aligned(2)));
+
+struct test2
+{
+  struct test0 t0;
+  struct test1 t1;
+  struct test0 t2;
+} __attribute__((packed, aligned(2)));
+
+struct test2 xx;
+struct test2 *x1 = &xx;
+
+#define MAGIC 0x12345678
+
+void test0 (struct test2* x1)
+{
+  x1->t1.a1 = MAGIC;
+}
+
+int main()
+{
+  test0 (x1);
+  if (xx.t1.a1 != MAGIC)
+    abort ();
+  return 0;
+}
Index: gcc/testsuite/gcc.dg/pr56341-2.c
===================================================================
--- gcc/testsuite/gcc.dg/pr56341-2.c	(revision 0)
+++ gcc/testsuite/gcc.dg/pr56341-2.c	(revision 0)
@@ -0,0 +1,40 @@
+/* { dg-do run } */
+/* { dg-options "-fno-strict-volatile-bitfields" } */
+
+extern void abort (void);
+
+struct test0
+{
+  unsigned char b1[2];
+} __attribute__((packed, aligned(2)));
+
+struct test1
+{
+  volatile unsigned long a1;
+  unsigned char b1[4];
+} __attribute__((packed, aligned(2)));
+
+struct test2
+{
+  struct test0 t0;
+  struct test1 t1;
+  struct test0 t2;
+} __attribute__((packed, aligned(2)));
+
+struct test2 xx;
+struct test2 *x1 = &xx;
+
+#define MAGIC 0x12345678
+
+void test0 (struct test2* x1)
+{
+  x1->t1.a1 = MAGIC;
+}
+
+int main()
+{
+  test0 (x1);
+  if (xx.t1.a1 != MAGIC)
+    abort ();
+  return 0;
+}
Index: gcc/testsuite/gcc.dg/pr56997-1.c
===================================================================
--- gcc/testsuite/gcc.dg/pr56997-1.c	(revision 0)
+++ gcc/testsuite/gcc.dg/pr56997-1.c	(revision 0)
@@ -0,0 +1,44 @@
+/* Test volatile access to unaligned field.  */
+/* { dg-do run } */
+/* { dg-options "-fstrict-volatile-bitfields" } */
+
+extern void abort (void);
+
+#define test_type unsigned short
+#define MAGIC (unsigned short)0x102u
+
+typedef struct s{
+ unsigned char Prefix;
+ test_type Type;
+}__attribute((__packed__)) ss;
+
+volatile ss v;
+ss g;
+
+void __attribute__((noinline))
+foo (test_type u)
+{
+  v.Type = u;
+}
+
+test_type __attribute__((noinline))
+bar (void)
+{
+  return v.Type;
+}
+
+int main()
+{
+  test_type temp;
+  foo(MAGIC);
+  __builtin_memcpy(&g, (void *)&v, sizeof(g));
+  if (g.Type != MAGIC)
+    abort ();
+
+  g.Type = MAGIC;
+  __builtin_memcpy((void *)&v, &g, sizeof(v));
+  temp = bar();
+  if (temp != MAGIC)
+    abort ();
+  return 0;
+}
Index: gcc/testsuite/gcc.dg/pr56997-2.c
===================================================================
--- gcc/testsuite/gcc.dg/pr56997-2.c	(revision 0)
+++ gcc/testsuite/gcc.dg/pr56997-2.c	(revision 0)
@@ -0,0 +1,44 @@
+/* Test volatile access to unaligned field.  */
+/* { dg-do run } */
+/* { dg-options "-fstrict-volatile-bitfields" } */
+
+extern void abort (void);
+
+#define test_type unsigned int
+#define MAGIC 0x1020304u
+
+typedef struct s{
+ unsigned char Prefix;
+ test_type Type;
+}__attribute((__packed__)) ss;
+
+volatile ss v;
+ss g;
+
+void __attribute__((noinline))
+foo (test_type u)
+{
+  v.Type = u;
+}
+
+test_type __attribute__((noinline))
+bar (void)
+{
+  return v.Type;
+}
+
+int main()
+{
+  test_type temp;
+  foo(MAGIC);
+  __builtin_memcpy(&g, (void *)&v, sizeof(g));
+  if (g.Type != MAGIC)
+    abort ();
+
+  g.Type = MAGIC;
+  __builtin_memcpy((void *)&v, &g, sizeof(v));
+  temp = bar();
+  if (temp != MAGIC)
+    abort ();
+  return 0;
+}
Index: gcc/testsuite/gcc.dg/pr56997-3.c
===================================================================
--- gcc/testsuite/gcc.dg/pr56997-3.c	(revision 0)
+++ gcc/testsuite/gcc.dg/pr56997-3.c	(revision 0)
@@ -0,0 +1,44 @@
+/* Test volatile access to unaligned field.  */
+/* { dg-do run } */
+/* { dg-options "-fstrict-volatile-bitfields" } */
+
+extern void abort (void);
+
+#define test_type unsigned long long
+#define MAGIC 0x102030405060708ull
+
+typedef struct s{
+ unsigned char Prefix;
+ test_type Type;
+}__attribute((__packed__)) ss;
+
+volatile ss v;
+ss g;
+
+void __attribute__((noinline))
+foo (test_type u)
+{
+  v.Type = u;
+}
+
+test_type __attribute__((noinline))
+bar (void)
+{
+  return v.Type;
+}
+
+int main()
+{
+  test_type temp;
+  foo(MAGIC);
+  __builtin_memcpy(&g, (void *)&v, sizeof(g));
+  if (g.Type != MAGIC)
+    abort ();
+
+  g.Type = MAGIC;
+  __builtin_memcpy((void *)&v, &g, sizeof(v));
+  temp = bar();
+  if (temp != MAGIC)
+    abort ();
+  return 0;
+}

Reply via email to