From: kelefth <[email protected]>

This patch enables the avoid-store-forwarding patch by default at O2 or
higher.

The assembly patterns in `bitfield-bitint-abi-align16.c` and
`bitfield-bitint-abi-align8.c` have been updated to account for
the asf transformations.

Co-Authored-By: Christoph Müllner <[email protected]>

gcc/ChangeLog:

        * doc/invoke.texi: Document asf as an O2 enabled option.
        * opts.cc: Enable asf at O2.

gcc/testsuite/ChangeLog:

        * gcc.target/aarch64/bitfield-bitint-abi-align16.c:
        Modify testcases to account for the asf transformations.
        * gcc.target/aarch64/bitfield-bitint-abi-align8.c: Likewise.
        * gcc.target/aarch64/avoid-store-forwarding-6.c: New test.

Signed-off-by: Konstantinos Eleftheriou <[email protected]>
---

(no changes since v1)

 gcc/doc/invoke.texi                           |  3 +-
 gcc/opts.cc                                   |  1 +
 .../aarch64/avoid-store-forwarding-6.c        | 29 +++++++++++++++++++
 .../aarch64/bitfield-bitint-abi-align16.c     | 25 +++++++++-------
 .../aarch64/bitfield-bitint-abi-align8.c      | 25 +++++++++-------
 5 files changed, 60 insertions(+), 23 deletions(-)
 create mode 100644 gcc/testsuite/gcc.target/aarch64/avoid-store-forwarding-6.c

diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index 32b9c48f155c..cdd10884d57d 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -13144,6 +13144,7 @@ also turns on the following optimization flags:
 @c Please keep the following list alphabetized!
 @gccoptlist{-falign-functions  -falign-jumps
 -falign-labels  -falign-loops
+-favoid-store-forwarding
 -fcaller-saves
 -fcode-hoisting
 -fcrossjumping
@@ -13313,7 +13314,7 @@ Many CPUs will stall for many cycles when a load 
partially depends on previous
 smaller stores.  This pass tries to detect such cases and avoid the penalty by
 changing the order of the load and store and then fixing up the loaded value.
 
-Disabled by default.
+Enabled by default at @option{-O2} and higher.
 
 @opindex ffp-contract
 @item -ffp-contract=@var{style}
diff --git a/gcc/opts.cc b/gcc/opts.cc
index ceb1e0f445b1..2eafc3f3d58e 100644
--- a/gcc/opts.cc
+++ b/gcc/opts.cc
@@ -636,6 +636,7 @@ static const struct default_options default_options_table[] 
=
     { OPT_LEVELS_1_PLUS_NOT_DEBUG, OPT_ftree_sra, NULL, 1 },
 
     /* -O2 and -Os optimizations.  */
+    { OPT_LEVELS_2_PLUS, OPT_favoid_store_forwarding, NULL, 1 },
     { OPT_LEVELS_2_PLUS, OPT_fcaller_saves, NULL, 1 },
     { OPT_LEVELS_2_PLUS, OPT_fcode_hoisting, NULL, 1 },
     { OPT_LEVELS_2_PLUS, OPT_fcrossjumping, NULL, 1 },
diff --git a/gcc/testsuite/gcc.target/aarch64/avoid-store-forwarding-6.c 
b/gcc/testsuite/gcc.target/aarch64/avoid-store-forwarding-6.c
new file mode 100644
index 000000000000..320fa5e101e6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/avoid-store-forwarding-6.c
@@ -0,0 +1,29 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+/* Same as avoid-store-forwarding-1.c but without -favoid-store-forwarding.  */
+
+typedef union {
+    char arr_8[8];
+    long long_value;
+} DataUnion;
+
+long ssll_1 (DataUnion *data, char x)
+{
+  data->arr_8[0] = x;
+  return data->long_value;
+}
+
+long ssll_2 (DataUnion *data, char x)
+{
+  data->arr_8[1] = x;
+  return data->long_value;
+}
+
+long ssll_3 (DataUnion *data, char x)
+{
+  data->arr_8[7] = x;
+  return data->long_value;
+}
+
+/* { dg-final { scan-assembler-times {ldr\tx[0-9]+, 
\[x[0-9]+\]\n\tstrb\tw[0-9]+, \[x[0-9]+(, \d+)?\]\n\tbfi\tx[0-9]+, x[0-9]+, 
\d+, \d+} 3 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align16.c 
b/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align16.c
index c29a230a7713..34f3d7f9653c 100644
--- a/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align16.c
+++ b/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align16.c
@@ -87,14 +87,15 @@
 **     sub     sp, sp, #16
 **     mov     (x[0-9]+), x0
 **     mov     w0, w1
+**     mov     x1, 0
 **     sbfx    x([0-9]+), \1, 0, 63
 **     mov     (w[0-9]+), 0
 **     bfi     \3, w\2, 0, 1
 **     and     x3, x\2, 9223372036854775807
-**     mov     x2, 0
-**     str     xzr, \[sp\]
+**     str     x1, \[sp\]
 **     strb    \3, \[sp\]
-**     ldr     x1, \[sp\]
+**     bfi     x1, x2, 0, 8
+**     mov     x2, 0
 **     add     sp, sp, 16
 **     b       fp
 */
@@ -183,19 +184,20 @@
 **     sxtw    (x[0-9]+), w1
 **     mov     x0, \2
 **     and     x7, \2, 9223372036854775807
+**     mov     x2, 0
 **     mov     (w[0-9]+), 0
 **     bfi     \3, w\1, 0, 1
 **     strb    wzr, \[sp, 16\]
 **     mov     x6, x7
 **     mov     x5, x7
 **     mov     x4, x7
+**     mov     x1, x7
+**     str     x2, \[sp, 48\]
+**     strb    \3, \[sp, 48\]
+**     bfi     x2, x3, 0, 8
+**     stp     x7, x2, \[sp\]
 **     mov     x3, x7
 **     mov     x2, x7
-**     str     xzr, \[sp, 48\]
-**     strb    \3, \[sp, 48\]
-**     ldr     (x[0-9]+), \[sp, 48\]
-**     stp     x7, \4, \[sp\]
-**     mov     x1, x7
 **     bl      fp_stack
 **     sbfx    x0, x0, 0, 63
 **...
@@ -341,12 +343,13 @@
 **...
 **     mov     x([0-9]+), x0
 **     mov     w0, w1
+**     mov     x1, 0
 **     mov     (w[0-9]+), 0
 **     bfi     \2, w\1, 0, 1
-**     mov     x2, 0
-**     str     xzr, \[sp\]
+**     str     x1, \[sp\]
 **     strb    \2, \[sp\]
-**     ldr     x1, \[sp\]
+**     bfi     x1, x2, 0, 8
+**     mov     x2, 0
 **...
 **     b       fp_stdarg
 */
diff --git a/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align8.c 
b/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align8.c
index 13ffbf416cab..d9cefbabb80c 100644
--- a/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align8.c
+++ b/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align8.c
@@ -87,14 +87,15 @@
 **     sub     sp, sp, #16
 **     mov     (x[0-9]+), x0
 **     mov     w0, w1
+**     mov     x1, 0
 **     sbfx    x([0-9]+), \1, 0, 63
 **     mov     (w[0-9]+), 0
 **     bfi     \3, w\2, 0, 1
 **     and     x3, x\2, 9223372036854775807
-**     mov     x2, 0
-**     str     xzr, \[sp\]
+**     str     x1, \[sp\]
 **     strb    \3, \[sp\]
-**     ldr     x1, \[sp\]
+**     bfi     x1, x2, 0, 8
+**     mov     x2, 0
 **     add     sp, sp, 16
 **     b       fp
 */
@@ -183,19 +184,20 @@
 **     sxtw    (x[0-9]+), w1
 **     mov     x0, \2
 **     and     x7, \2, 9223372036854775807
+**     mov     x2, 0
 **     mov     (w[0-9]+), 0
 **     bfi     \3, w\1, 0, 1
 **     strb    wzr, \[sp, 16\]
 **     mov     x6, x7
 **     mov     x5, x7
 **     mov     x4, x7
+**     mov     x1, x7
+**     str     x2, \[sp, 48\]
+**     strb    \3, \[sp, 48\]
+**     bfi     x2, x3, 0, 8
+**     stp     x7, x2, \[sp\]
 **     mov     x3, x7
 **     mov     x2, x7
-**     str     xzr, \[sp, 48\]
-**     strb    \3, \[sp, 48\]
-**     ldr     (x[0-9]+), \[sp, 48\]
-**     stp     x7, \4, \[sp\]
-**     mov     x1, x7
 **     bl      fp_stack
 **     sbfx    x0, x0, 0, 63
 **...
@@ -343,12 +345,13 @@
 **...
 **     mov     x([0-9]+), x0
 **     mov     w0, w1
+**     mov     x1, 0
 **     mov     (w[0-9]+), 0
 **     bfi     \2, w\1, 0, 1
-**     mov     x2, 0
-**     str     xzr, \[sp\]
+**     str     x1, \[sp\]
 **     strb    \2, \[sp\]
-**     ldr     x1, \[sp\]
+**     bfi     x1, x2, 0, 8
+**     mov     x2, 0
 **...
 **     b       fp_stdarg
 */
-- 
2.50.1

Reply via email to