Manos Anagnostakis <manos.anagnosta...@vrull.eu> writes:
> This is an RTL pass that detects store forwarding from stores to larger loads 
> (load pairs).
>
> This optimization is SPEC2017-driven and was found to be beneficial for some 
> benchmarks,
> through testing on ampere1/ampere1a machines.
>
> For example, it can transform cases like
>
> str  d5, [sp, #320]
> fmul d5, d31, d29
> ldp  d31, d17, [sp, #312] # Large load from small store
>
> to
>
> str  d5, [sp, #320]
> fmul d5, d31, d29
> ldr  d31, [sp, #312]
> ldr  d17, [sp, #320]
>
> Currently, the pass is disabled by default on all architectures and enabled 
> by a target-specific option.
>
> If deemed beneficial enough for a default, it will be enabled on 
> ampere1/ampere1a,
> or other architectures as well, without needing to be turned on by this 
> option.
>
> Bootstrapped and regtested on aarch64-linux.
>
> gcc/ChangeLog:
>
>         * config.gcc: Add aarch64-store-forwarding.o to extra_objs.
>         * config/aarch64/aarch64-passes.def (INSERT_PASS_AFTER): New pass.
>         * config/aarch64/aarch64-protos.h (make_pass_avoid_store_forwarding): 
> Declare.
>         * config/aarch64/aarch64.opt (mavoid-store-forwarding): New option.
>       (aarch64-store-forwarding-threshold): New param.
>         * config/aarch64/t-aarch64: Add aarch64-store-forwarding.o
>         * doc/invoke.texi: Document new option and new param.
>         * config/aarch64/aarch64-store-forwarding.cc: New file.
>
> gcc/testsuite/ChangeLog:
>
>         * gcc.target/aarch64/ldp_ssll_no_overlap_address.c: New test.
>         * gcc.target/aarch64/ldp_ssll_no_overlap_offset.c: New test.
>         * gcc.target/aarch64/ldp_ssll_overlap.c: New test.
>
> Signed-off-by: Manos Anagnostakis <manos.anagnosta...@vrull.eu>
> Co-Authored-By: Manolis Tsamis <manolis.tsa...@vrull.eu>
> Co-Authored-By: Philipp Tomsich <philipp.toms...@vrull.eu>
> ---
> Changes in v2:
>       - Remove usage of memrefs_conflict_p with the helper
>       check_memory_reg_ovewrite, which actually prevents some cases of
>       being handled correctly.
>       - Code cleanup requested in v1.
>
>  gcc/config.gcc                                |   1 +
>  gcc/config/aarch64/aarch64-passes.def         |   1 +
>  gcc/config/aarch64/aarch64-protos.h           |   1 +
>  .../aarch64/aarch64-store-forwarding.cc       | 323 ++++++++++++++++++
>  gcc/config/aarch64/aarch64.opt                |   9 +
>  gcc/config/aarch64/t-aarch64                  |  10 +
>  gcc/doc/invoke.texi                           |  12 +-
>  .../aarch64/ldp_ssll_no_overlap_address.c     |  33 ++
>  .../aarch64/ldp_ssll_no_overlap_offset.c      |  33 ++
>  .../gcc.target/aarch64/ldp_ssll_overlap.c     |  33 ++
>  10 files changed, 455 insertions(+), 1 deletion(-)
>  create mode 100644 gcc/config/aarch64/aarch64-store-forwarding.cc
>  create mode 100644 
> gcc/testsuite/gcc.target/aarch64/ldp_ssll_no_overlap_address.c
>  create mode 100644 
> gcc/testsuite/gcc.target/aarch64/ldp_ssll_no_overlap_offset.c
>  create mode 100644 gcc/testsuite/gcc.target/aarch64/ldp_ssll_overlap.c
>
> diff --git a/gcc/config.gcc b/gcc/config.gcc
> index b88591b6fd8..4eb41584b94 100644
> --- a/gcc/config.gcc
> +++ b/gcc/config.gcc
> @@ -350,6 +350,7 @@ aarch64*-*-*)
>       cxx_target_objs="aarch64-c.o"
>       d_target_objs="aarch64-d.o"
>       extra_objs="aarch64-builtins.o aarch-common.o aarch64-sve-builtins.o 
> aarch64-sve-builtins-shapes.o aarch64-sve-builtins-base.o 
> aarch64-sve-builtins-sve2.o cortex-a57-fma-steering.o aarch64-speculation.o 
> falkor-tag-collision-avoidance.o aarch-bti-insert.o aarch64-cc-fusion.o"
> +     extra_objs="${extra_objs} aarch64-store-forwarding.o"
>       target_gtfiles="\$(srcdir)/config/aarch64/aarch64-builtins.cc 
> \$(srcdir)/config/aarch64/aarch64-sve-builtins.h 
> \$(srcdir)/config/aarch64/aarch64-sve-builtins.cc"
>       target_has_targetm_common=yes
>       ;;
> diff --git a/gcc/config/aarch64/aarch64-passes.def 
> b/gcc/config/aarch64/aarch64-passes.def
> index 6ace797b738..fa79e8adca8 100644
> --- a/gcc/config/aarch64/aarch64-passes.def
> +++ b/gcc/config/aarch64/aarch64-passes.def
> @@ -23,3 +23,4 @@ INSERT_PASS_BEFORE (pass_reorder_blocks, 1, 
> pass_track_speculation);
>  INSERT_PASS_AFTER (pass_machine_reorg, 1, pass_tag_collision_avoidance);
>  INSERT_PASS_BEFORE (pass_shorten_branches, 1, pass_insert_bti);
>  INSERT_PASS_AFTER (pass_if_after_combine, 1, pass_cc_fusion);
> +INSERT_PASS_AFTER (pass_peephole2, 1, pass_avoid_store_forwarding);
> diff --git a/gcc/config/aarch64/aarch64-protos.h 
> b/gcc/config/aarch64/aarch64-protos.h
> index 36d6c688bc8..aee074e58dd 100644
> --- a/gcc/config/aarch64/aarch64-protos.h
> +++ b/gcc/config/aarch64/aarch64-protos.h
> @@ -1051,6 +1051,7 @@ rtl_opt_pass *make_pass_track_speculation (gcc::context 
> *);
>  rtl_opt_pass *make_pass_tag_collision_avoidance (gcc::context *);
>  rtl_opt_pass *make_pass_insert_bti (gcc::context *ctxt);
>  rtl_opt_pass *make_pass_cc_fusion (gcc::context *ctxt);
> +rtl_opt_pass *make_pass_avoid_store_forwarding (gcc::context *ctxt);
>
>  poly_uint64 aarch64_regmode_natural_size (machine_mode);
>
> diff --git a/gcc/config/aarch64/aarch64-store-forwarding.cc 
> b/gcc/config/aarch64/aarch64-store-forwarding.cc
> new file mode 100644
> index 00000000000..50996874883
> --- /dev/null
> +++ b/gcc/config/aarch64/aarch64-store-forwarding.cc
> @@ -0,0 +1,323 @@
> +/* Avoid store forwarding optimization pass.
> +   Copyright (C) 2023 Free Software Foundation, Inc.
> +   Contributed by VRULL GmbH.
> +
> +   This file is part of GCC.
> +
> +   GCC is free software; you can redistribute it and/or modify it
> +   under the terms of the GNU General Public License as published by
> +   the Free Software Foundation; either version 3, or (at your option)
> +   any later version.
> +
> +   GCC is distributed in the hope that it will be useful, but
> +   WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   General Public License for more details.
> +
> +   You should have received a copy of the GNU General Public License
> +   along with GCC; see the file COPYING3.  If not see
> +   <http://www.gnu.org/licenses/>.  */
> +
> +#define IN_TARGET_CODE 1
> +
> +#include "config.h"
> +#define INCLUDE_VECTOR
> +#define INCLUDE_MAP
> +#include "system.h"
> +#include "coretypes.h"
> +#include "backend.h"
> +#include "rtl.h"
> +#include "alias.h"
> +#include "rtlanal.h"
> +#include "tree-pass.h"
> +#include "cselib.h"
> +
> +/* This is an RTL pass that detects store forwarding from stores to larger
> +   loads (load pairs). For example, it can transform cases like
> +
> +   str  d5, [sp, #320]
> +   fmul d5, d31, d29
> +   ldp  d31, d17, [sp, #312] # Large load from small store
> +
> +   to
> +
> +   str  d5, [sp, #320]
> +   fmul d5, d31, d29
> +   ldr  d31, [sp, #312]
> +   ldr  d17, [sp, #320]
> +
> +   Design: The pass follows a straightforward design.  It starts by
> +   initializing the alias analysis and the cselib.  Both of these are used to
> +   find stores and larger loads with overlapping addresses, which are
> +   candidates for store forwarding optimizations.  It then scans on basic 
> block
> +   level to find stores that forward to larger loads and handles them
> +   accordingly as described in the above example.  Finally, the alias 
> analysis
> +   and the cselib library are closed.  */
> +
> +typedef std::vector<std::pair<rtx_insn *, rtx>> vec_rtx_pair;
> +typedef std::map<rtx, uint32_t> map_rtx_dist;
> +
> +/* Statistics counters.  */
> +static unsigned int stats_store_count = 0;
> +static unsigned int stats_ldp_count = 0;
> +static unsigned int stats_ssll_count = 0;
> +static unsigned int stats_transformed_count = 0;
> +
> +/* Default.  */
> +static rtx dummy;
> +static bool is_store (rtx expr, rtx &op_0=dummy);
> +static bool is_load (rtx expr, rtx &op_1=dummy);
> +
> +/* Return true if SET expression EXPR is a store; otherwise false.  OP_0 will
> +   contain the MEM operand of the store.  */
> +
> +static bool
> +is_store (rtx expr, rtx &op_0)
> +{
> +  op_0 = SET_DEST (expr);
> +
> +  if (GET_CODE (op_0) == ZERO_EXTEND
> +      || GET_CODE (op_0) == SIGN_EXTEND)
> +    op_0 = XEXP (op_0, 0);

I missed this last time, but ZERO_EXTEND and SIGN_EXTEND can't occur in
SET_DESTs, so I think this is redundant.

ZERO_EXTRACT can occur in SET_DESTs, but I don't think AArch64 uses it
for memories.  It's probably best just to delete the "if".

> +
> +  return MEM_P (op_0);
> +}
> +
> +/* Return true if SET expression EXPR is a load; otherwise false.  OP_1 will
> +   contain the MEM operand of the load.  */
> +
> +static bool
> +is_load (rtx expr, rtx &op_1)
> +{
> +  op_1 = SET_SRC (expr);
> +
> +  if (GET_CODE (op_1) == ZERO_EXTEND
> +      || GET_CODE (op_1) == SIGN_EXTEND)
> +    op_1 = XEXP (op_1, 0);
> +
> +  return MEM_P (op_1);
> +}
> +
> +/* Check the maximum instruction distance threshold and if crossed, remove 
> the
> +   store from STORE_EXPRS.  DISTANCES contains the intruction distances.  */
> +
> +void
> +update_store_load_distances (vec_rtx_pair &store_exprs, map_rtx_dist 
> &distances)
> +{
> +  for (auto iter = store_exprs.begin (); iter < store_exprs.end ();)
> +    {
> +      (distances[(*iter).second])++;
> +      iter = distances[(*iter).second]
> +          > (uint32_t) aarch64_store_forwarding_threshold_param
> +          ? store_exprs.erase (iter) : iter + 1;
> +    }
> +}

It seems unnecessary to have both a vector and a map.  IIUC, the vector
effectively acts as a FIFO, in that the entries are in instruction order
and get removed when instructions become too distant.  If that's the
case, a list would be better than a vector, since removing from the
beginning of a vector is a linear operation.

The list entries could store:

- the instruction
- information about the memory (see below)
- an instruction counter

That should avoid the need for a separate map.

It would mean that the store expression datastructure can no longer
be a simple pair, but that's OK.  IMO it's more readable to have a
custom datastructure anyway.

Also, rather than starting each expression with a distance of zero:

+             store_cnt_byte_dist.insert ({ expr, 0 });

and increasing every recorded distance after every instruction:

+         update_store_load_distances (store_exprs, store_cnt_byte_dist);

could you instead maintain a block-local insn counter, use that to
initialise the store expressions, and then use the gap between the
current insn counter and the recorded insn counter as the distance?
That way it shouldn't be necessary to update after each instruction,
and it should be possible to break from the loop above once the
comparison is false.

> +
> +/* Return true if STORE_MEM is forwarding to LOAD_MEM; otherwise false.  */
> +
> +static bool
> +is_forwarding (rtx store_mem, rtx load_mem)
> +{
> +  gcc_checking_assert (MEM_P (store_mem) && MEM_P (load_mem));
> +
> +  rtx store_mem_addr = canon_rtx (get_addr (XEXP (store_mem, 0)));
> +  rtx load_mem_addr = canon_rtx (get_addr (XEXP (load_mem, 0)));
> +
> +  return rtx_equal_for_cselib_1 (store_mem_addr,
> +                              load_mem_addr,
> +                              GET_MODE (store_mem), 0);

For rtx_equal_for_cselib_1 to work well, I think we need to
call cselib_subst_to_values on the store address at the point that
the store is recorded.  That copes correctly with cases where the
register that was used in the store address is changed between
the store and the load.

If we do that, the important things about the store memory location
are its mode and the cselib_subst_to_values version of its address.
We probably don't need to store the MEM as well.

It looks like the new version of the pass doesn't use alias analysis to
detect hazards (with the risk that that had of false positives).  It instead
relies on cselib only (which should generally only find true positives).
That's OK of course, but then: does the canon_rtx help that much?
I suppose it can provide *some* global information, but it's pretty
limited after RA.

Could you try without the canon_rtxes and without the init_alias_analysis
to see if that makes a noticeable difference?

Sorry for raising new issues in the v2 review.

Thanks,
Richard

> +}
> +
> +/* Return true if INSN is a load pair, preceded by a store forwarding to it;
> +   otherwise false.  STORE_EXPRS contains the stores.  */
> +
> +static bool
> +is_small_store_to_large_load (vec_rtx_pair &store_exprs, rtx_insn *insn)
> +{
> +  unsigned int load_count = 0;
> +  bool forwarding = false;
> +  rtx expr = PATTERN (insn);
> +
> +  if (GET_CODE (expr) != PARALLEL
> +      || XVECLEN (expr, 0) != 2)
> +    return false;
> +
> +  for (int i = 0; i < XVECLEN (expr, 0); i++)
> +    {
> +      rtx op_1;
> +      rtx out_exp = XVECEXP (expr, 0, i);
> +
> +      if (GET_CODE (out_exp) != SET)
> +     continue;
> +
> +      if (!is_load (out_exp, op_1))
> +     continue;
> +
> +      load_count++;
> +
> +      for (std::pair<rtx_insn *, rtx> str : store_exprs)
> +     {
> +       if (!is_forwarding (XEXP (str.second, 0), op_1))
> +         continue;
> +
> +       if (dump_file)
> +         {
> +           fprintf (dump_file,
> +                    "Store forwarding to PARALLEL with loads:\n");
> +           fprintf (dump_file, "  From: ");
> +           print_rtl_single (dump_file, str.first);
> +           fprintf (dump_file, "  To: ");
> +           print_rtl_single (dump_file, insn);
> +         }
> +
> +       forwarding = true;
> +       }
> +    }
> +
> +  if (load_count == 2)
> +    stats_ldp_count++;
> +
> +  return load_count == 2 && forwarding;
> +}
> +
> +/* Break a load pair into its 2 distinct loads, except if the base source
> +   address to load from is overwriten in the first load.  INSN should be the
> +   PARALLEL of the load pair.  */
> +
> +static void
> +break_ldp (rtx_insn *insn)
> +{
> +  rtx expr = PATTERN (insn);
> +
> +  gcc_checking_assert (GET_CODE (expr) == PARALLEL && XVECLEN (expr, 0) == 
> 2);
> +
> +  rtx load_0 = XVECEXP (expr, 0, 0);
> +  rtx load_1 = XVECEXP (expr, 0, 1);
> +
> +  gcc_checking_assert (is_load (load_0) && is_load (load_1));
> +
> +  /* The base address was overwriten in the first load.  */
> +  if (reg_mentioned_p (SET_DEST (load_0), SET_SRC (load_1)))
> +    return;
> +
> +  emit_insn_before (load_0, insn);
> +  emit_insn_before (load_1, insn);
> +  remove_insn (insn);
> +
> +  stats_transformed_count++;
> +}
> +
> +static void
> +scan_and_transform_bb_level ()
> +{
> +  rtx_insn *insn, *next;
> +  basic_block bb;
> +  FOR_EACH_BB_FN (bb, cfun)
> +    {
> +      vec_rtx_pair store_exprs;
> +      map_rtx_dist store_cnt_byte_dist;
> +      for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
> +     {
> +       next = NEXT_INSN (insn);
> +
> +       /* If we cross a CALL_P insn, clear the vectors, because the
> +          small-store-to-large-load is unlikely to cause performance
> +          difference.  */
> +       if (CALL_P (insn))
> +         {
> +           store_exprs.clear ();
> +           store_cnt_byte_dist.clear ();
> +         }
> +
> +       if (!NONJUMP_INSN_P (insn))
> +         continue;
> +
> +       cselib_process_insn (insn);
> +
> +       rtx expr = single_set (insn);
> +
> +       /* If a store is encountered, push it to the store_exprs vector to
> +          check it later.  */
> +       if (expr && is_store (expr))
> +         {
> +           store_exprs.push_back ({insn, expr});
> +
> +           /* The store insn shouldn't have been encountered before.  */
> +           gcc_checking_assert (store_cnt_byte_dist.find (expr)
> +                                == store_cnt_byte_dist.end ());
> +           store_cnt_byte_dist.insert ({ expr, 0 });
> +           stats_store_count++;
> +         }
> +
> +       /* Check for small-store-to-large-load.  */
> +       if (is_small_store_to_large_load (store_exprs, insn))
> +         {
> +           stats_ssll_count++;
> +           break_ldp (insn);
> +         }
> +
> +       update_store_load_distances (store_exprs, store_cnt_byte_dist);
> +     }
> +    }
> +}
> +
> +static void
> +execute_avoid_store_forwarding ()
> +{
> +  init_alias_analysis ();
> +  cselib_init (0);
> +  scan_and_transform_bb_level ();
> +  end_alias_analysis ();
> +  cselib_finish ();
> +  statistics_counter_event (cfun, "Number of stores identified: ",
> +                         stats_store_count);
> +  statistics_counter_event (cfun, "Number of load pairs identified: ",
> +                         stats_ldp_count);
> +  statistics_counter_event (cfun,
> +                         "Number of forwarding cases identified: ",
> +                         stats_ssll_count);
> +  statistics_counter_event (cfun, "Number of trasformed cases: ",
> +                         stats_transformed_count);
> +}
> +
> +const pass_data pass_data_avoid_store_forwarding =
> +{
> +  RTL_PASS, /* type.  */
> +  "avoid_store_forwarding", /* name.  */
> +  OPTGROUP_NONE, /* optinfo_flags.  */
> +  TV_NONE, /* tv_id.  */
> +  0, /* properties_required.  */
> +  0, /* properties_provided.  */
> +  0, /* properties_destroyed.  */
> +  0, /* todo_flags_start.  */
> +  0 /* todo_flags_finish.  */
> +};
> +
> +class pass_avoid_store_forwarding : public rtl_opt_pass
> +{
> +public:
> +  pass_avoid_store_forwarding (gcc::context *ctxt)
> +    : rtl_opt_pass (pass_data_avoid_store_forwarding, ctxt)
> +  {}
> +
> +  /* opt_pass methods: */
> +  virtual bool gate (function *)
> +    {
> +      return aarch64_flag_avoid_store_forwarding && optimize >= 2;
> +    }
> +
> +  virtual unsigned int execute (function *)
> +    {
> +      execute_avoid_store_forwarding ();
> +      return 0;
> +    }
> +
> +}; // class pass_avoid_store_forwarding
> +
> +/* Create a new avoid store forwarding pass instance.  */
> +
> +rtl_opt_pass *
> +make_pass_avoid_store_forwarding (gcc::context *ctxt)
> +{
> +  return new pass_avoid_store_forwarding (ctxt);
> +}
> diff --git a/gcc/config/aarch64/aarch64.opt b/gcc/config/aarch64/aarch64.opt
> index f5a518202a1..a9e944a272f 100644
> --- a/gcc/config/aarch64/aarch64.opt
> +++ b/gcc/config/aarch64/aarch64.opt
> @@ -304,6 +304,10 @@ moutline-atomics
>  Target Var(aarch64_flag_outline_atomics) Init(2) Save
>  Generate local calls to out-of-line atomic operations.
>
> +mavoid-store-forwarding
> +Target Bool Var(aarch64_flag_avoid_store_forwarding) Init(0) Optimization
> +Avoid store forwarding to load pairs.
> +
>  -param=aarch64-sve-compare-costs=
>  Target Joined UInteger Var(aarch64_sve_compare_costs) Init(1) 
> IntegerRange(0, 1) Param
>  When vectorizing for SVE, consider using unpacked vectors for smaller 
> elements and use the cost model to pick the cheapest approach.  Also use the 
> cost model to choose between SVE and Advanced SIMD vectorization.
> @@ -360,3 +364,8 @@ Enum(aarch64_ldp_stp_policy) String(never) 
> Value(AARCH64_LDP_STP_POLICY_NEVER)
>
>  EnumValue
>  Enum(aarch64_ldp_stp_policy) String(aligned) 
> Value(AARCH64_LDP_STP_POLICY_ALIGNED)
> +
> +-param=aarch64-store-forwarding-threshold=
> +Target Joined UInteger Var(aarch64_store_forwarding_threshold_param) 
> Init(20) Param
> +Maximum instruction distance allowed between a store and a load pair for 
> this to be
> +considered a candidate to avoid when using aarch64-avoid-store-forwarding.
> diff --git a/gcc/config/aarch64/t-aarch64 b/gcc/config/aarch64/t-aarch64
> index a9a244ab6d6..7639b50358d 100644
> --- a/gcc/config/aarch64/t-aarch64
> +++ b/gcc/config/aarch64/t-aarch64
> @@ -176,6 +176,16 @@ aarch64-cc-fusion.o: 
> $(srcdir)/config/aarch64/aarch64-cc-fusion.cc \
>       $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
>               $(srcdir)/config/aarch64/aarch64-cc-fusion.cc
>
> +aarch64-store-forwarding.o: \
> +    $(srcdir)/config/aarch64/aarch64-store-forwarding.cc \
> +    $(CONFIG_H) $(SYSTEM_H) $(TM_H) $(REGS_H) insn-config.h $(RTL_BASE_H) \
> +    dominance.h cfg.h cfganal.h $(BASIC_BLOCK_H) $(INSN_ATTR_H) $(RECOG_H) \
> +    output.h hash-map.h $(DF_H) $(OBSTACK_H) $(TARGET_H) $(RTL_H) \
> +    $(CONTEXT_H) $(TREE_PASS_H) regrename.h \
> +    $(srcdir)/config/aarch64/aarch64-protos.h
> +     $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
> +             $(srcdir)/config/aarch64/aarch64-store-forwarding.cc
> +
>  comma=,
>  MULTILIB_OPTIONS    = $(subst $(comma),/, $(patsubst %, mabi=%, $(subst 
> $(comma),$(comma)mabi=,$(TM_MULTILIB_CONFIG))))
>  MULTILIB_DIRNAMES   = $(subst $(comma), ,$(TM_MULTILIB_CONFIG))
> diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
> index bc9f4f70914..275dde8d541 100644
> --- a/gcc/doc/invoke.texi
> +++ b/gcc/doc/invoke.texi
> @@ -796,7 +796,7 @@ Objective-C and Objective-C++ Dialects}.
>  -moverride=@var{string}  -mverbose-cost-dump
>  -mstack-protector-guard=@var{guard} -mstack-protector-guard-reg=@var{sysreg}
>  -mstack-protector-guard-offset=@var{offset} -mtrack-speculation
> --moutline-atomics }
> +-moutline-atomics -mavoid-store-forwarding}
>
>  @emph{Adapteva Epiphany Options}
>  @gccoptlist{-mhalf-reg-file  -mprefer-short-insn-regs
> @@ -16636,6 +16636,11 @@ With @option{--param=aarch64-stp-policy=never}, do 
> not emit stp.
>  With @option{--param=aarch64-stp-policy=aligned}, emit stp only if the
>  source pointer is aligned to at least double the alignment of the type.
>
> +@item aarch64-store-forwarding-threshold
> +Maximum allowed instruction distance between a store and a load pair for
> +this to be considered a candidate to avoid when using
> +aarch64-avoid-store-forwarding.
> +
>  @item aarch64-loop-vect-issue-rate-niters
>  The tuning for some AArch64 CPUs tries to take both latencies and issue
>  rates into account when deciding whether a loop should be vectorized
> @@ -20628,6 +20633,11 @@ Generate code which uses only the general-purpose 
> registers.  This will prevent
>  the compiler from using floating-point and Advanced SIMD registers but will 
> not
>  impose any restrictions on the assembler.
>
> +@item -mavoid-store-forwarding
> +@itemx -mno-avoid-store-forwarding
> +Avoid store forwarding to load pairs.
> +@option{-mavoid-store-forwarding} at levels @option{-O2}, @option{-O3}.
> +
>  @opindex mlittle-endian
>  @item -mlittle-endian
>  Generate little-endian code.  This is the default when GCC is configured for 
> an
> diff --git a/gcc/testsuite/gcc.target/aarch64/ldp_ssll_no_overlap_address.c 
> b/gcc/testsuite/gcc.target/aarch64/ldp_ssll_no_overlap_address.c
> new file mode 100644
> index 00000000000..b77de6c64b6
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/aarch64/ldp_ssll_no_overlap_address.c
> @@ -0,0 +1,33 @@
> +/* { dg-options "-O2 -mcpu=generic -mavoid-store-forwarding" } */
> +
> +#include <stdint.h>
> +
> +typedef int v4si __attribute__ ((vector_size (16)));
> +
> +/* Different address, same offset, no overlap  */
> +
> +#define LDP_SSLL_NO_OVERLAP_ADDRESS(TYPE) \
> +TYPE ldp_ssll_no_overlap_address_##TYPE(TYPE *ld_arr, TYPE *st_arr, TYPE 
> *st_arr_2, TYPE i, TYPE dummy){ \
> +     TYPE r, y; \
> +     st_arr[0] = i; \
> +     ld_arr[0] = dummy; \
> +     r = st_arr_2[0]; \
> +     y = st_arr_2[1]; \
> +     return r + y; \
> +}
> +
> +LDP_SSLL_NO_OVERLAP_ADDRESS(uint32_t)
> +LDP_SSLL_NO_OVERLAP_ADDRESS(uint64_t)
> +LDP_SSLL_NO_OVERLAP_ADDRESS(int32_t)
> +LDP_SSLL_NO_OVERLAP_ADDRESS(int64_t)
> +LDP_SSLL_NO_OVERLAP_ADDRESS(int)
> +LDP_SSLL_NO_OVERLAP_ADDRESS(long)
> +LDP_SSLL_NO_OVERLAP_ADDRESS(float)
> +LDP_SSLL_NO_OVERLAP_ADDRESS(double)
> +LDP_SSLL_NO_OVERLAP_ADDRESS(v4si)
> +
> +/* { dg-final { scan-assembler-times "ldp\tw\[0-9\]+, w\[0-9\]" 3 } } */
> +/* { dg-final { scan-assembler-times "ldp\tx\[0-9\]+, x\[0-9\]" 3 } } */
> +/* { dg-final { scan-assembler-times "ldp\ts\[0-9\]+, s\[0-9\]" 1 } } */
> +/* { dg-final { scan-assembler-times "ldp\td\[0-9\]+, d\[0-9\]" 1 } } */
> +/* { dg-final { scan-assembler-times "ldp\tq\[0-9\]+, q\[0-9\]" 1 } } */
> diff --git a/gcc/testsuite/gcc.target/aarch64/ldp_ssll_no_overlap_offset.c 
> b/gcc/testsuite/gcc.target/aarch64/ldp_ssll_no_overlap_offset.c
> new file mode 100644
> index 00000000000..f1b3a66abfd
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/aarch64/ldp_ssll_no_overlap_offset.c
> @@ -0,0 +1,33 @@
> +/* { dg-options "-O2 -mcpu=generic -mavoid-store-forwarding" } */
> +
> +#include <stdint.h>
> +
> +typedef int v4si __attribute__ ((vector_size (16)));
> +
> +/* Same address, different offset, no overlap  */
> +
> +#define LDP_SSLL_NO_OVERLAP_OFFSET(TYPE) \
> +TYPE ldp_ssll_no_overlap_offset_##TYPE(TYPE *ld_arr, TYPE *st_arr, TYPE i, 
> TYPE dummy){ \
> +     TYPE r, y; \
> +     st_arr[0] = i; \
> +     ld_arr[0] = dummy; \
> +     r = st_arr[10]; \
> +     y = st_arr[11]; \
> +     return r + y; \
> +}
> +
> +LDP_SSLL_NO_OVERLAP_OFFSET(uint32_t)
> +LDP_SSLL_NO_OVERLAP_OFFSET(uint64_t)
> +LDP_SSLL_NO_OVERLAP_OFFSET(int32_t)
> +LDP_SSLL_NO_OVERLAP_OFFSET(int64_t)
> +LDP_SSLL_NO_OVERLAP_OFFSET(int)
> +LDP_SSLL_NO_OVERLAP_OFFSET(long)
> +LDP_SSLL_NO_OVERLAP_OFFSET(float)
> +LDP_SSLL_NO_OVERLAP_OFFSET(double)
> +LDP_SSLL_NO_OVERLAP_OFFSET(v4si)
> +
> +/* { dg-final { scan-assembler-times "ldp\tw\[0-9\]+, w\[0-9\]" 3 } } */
> +/* { dg-final { scan-assembler-times "ldp\tx\[0-9\]+, x\[0-9\]" 3 } } */
> +/* { dg-final { scan-assembler-times "ldp\ts\[0-9\]+, s\[0-9\]" 1 } } */
> +/* { dg-final { scan-assembler-times "ldp\td\[0-9\]+, d\[0-9\]" 1 } } */
> +/* { dg-final { scan-assembler-times "ldp\tq\[0-9\]+, q\[0-9\]" 1 } } */
> diff --git a/gcc/testsuite/gcc.target/aarch64/ldp_ssll_overlap.c 
> b/gcc/testsuite/gcc.target/aarch64/ldp_ssll_overlap.c
> new file mode 100644
> index 00000000000..8d5ce5cc87e
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/aarch64/ldp_ssll_overlap.c
> @@ -0,0 +1,33 @@
> +/* { dg-options "-O2 -mcpu=generic -mavoid-store-forwarding" } */
> +
> +#include <stdint.h>
> +
> +typedef int v4si __attribute__ ((vector_size (16)));
> +
> +/* Same address, same offset, overlap  */
> +
> +#define LDP_SSLL_OVERLAP(TYPE) \
> +TYPE ldp_ssll_overlap_##TYPE(TYPE *ld_arr, TYPE *st_arr, TYPE i, TYPE 
> dummy){ \
> +     TYPE r, y; \
> +     st_arr[0] = i; \
> +     ld_arr[0] = dummy; \
> +     r = st_arr[0]; \
> +     y = st_arr[1]; \
> +     return r + y; \
> +}
> +
> +LDP_SSLL_OVERLAP(uint32_t)
> +LDP_SSLL_OVERLAP(uint64_t)
> +LDP_SSLL_OVERLAP(int32_t)
> +LDP_SSLL_OVERLAP(int64_t)
> +LDP_SSLL_OVERLAP(int)
> +LDP_SSLL_OVERLAP(long)
> +LDP_SSLL_OVERLAP(float)
> +LDP_SSLL_OVERLAP(double)
> +LDP_SSLL_OVERLAP(v4si)
> +
> +/* { dg-final { scan-assembler-times "ldp\tw\[0-9\]+, w\[0-9\]" 0 } } */
> +/* { dg-final { scan-assembler-times "ldp\tx\[0-9\]+, x\[0-9\]" 0 } } */
> +/* { dg-final { scan-assembler-times "ldp\ts\[0-9\]+, s\[0-9\]" 0 } } */
> +/* { dg-final { scan-assembler-times "ldp\td\[0-9\]+, d\[0-9\]" 0 } } */
> +/* { dg-final { scan-assembler-times "ldp\tq\[0-9\]+, q\[0-9\]" 0 } } */
> --
> 2.41.0

Reply via email to