This pass is used to optimise assignments to the FPMR register in
aarch64.  I chose to implement this as a middle-end pass because it
mostly reuses the existing RTL PRE code within gcse.cc.

Compared to RTL PRE, the key difference in this new pass is that we
insert new writes directly to the destination hardreg, instead of
writing to a new pseudo-register and copying the result later.  This
requires changes to the analysis portion of the pass, because sets
cannot be moved before existing instructions that set, use or clobber
the hardreg, and the value becomes unavailable after any uses of
clobbers of the hardreg.

This patch would currently break any debug instructions that use the
value of fpmr in a region of code where that value is changed by this
pass.  I haven't worked out the best way to fix this, but I suspect the
issue is uncommon and tricky enough that it would be best to just drop
those debug instructions.

I've bootstrapped and regression tested this on aarch64, and it should be NFC
on other targets.  Aside from this, my testing so far has involved hacking in a
single FP8 intrinsic and testing various parameters and control flow
structures, and checking both the codegen and the LCM bitmaps.  I intend to
write better and more comprehensive tests once there are some real intrinsic
implementations available to use.


Is this approach good?  Apart from fixing the debug instructions and
adding tests, is there anything else I need to change?


gcc/ChangeLog:

        * config/aarch64/aarch64.h (HARDREG_PRE_REGNOS): New macro.
        * gcse.cc (doing_hardreg_pre_p): New global variable.
        (current_hardreg_regno): Ditto.
        (compute_local_properties): Unset transp for hardreg clobbers.
        (prune_hardreg_uses): New.
        (want_to_gcse_p): Always return true for hardreg PRE.
        (hash_scan_set): Add checks for hardreg uses/clobbers.
        (oprs_unchanged_p): Disable load motion for hardreg PRE pass.
        (record_last_mem_set_info): Ditto.
        (compute_hash_table_work): Record hardreg uses.
        (prune_expressions): Mark hardreg sets as call-clobbered.
        (compute_pre_data): Add call to prune_hardreg_uses.
        (pre_expr_reaches_here_p_work): Add comment.
        (insert_insn_start_basic_block): New functions.
        (pre_edge_insert): Don't add hardreg sets to predecessor block.
        (pre_delete): Use hardreg for the reaching reg.
        (pre_gcse): Don't insert copies for hardreg PRE.
        (one_pre_gcse_pass): Disable load motion for hardreg PRE pass.
        (execute_hardreg_pre): New.
        (class pass_hardreg_pre): New.
        (pass_hardreg_pre::gate): New.
        (make_pass_hardreg_pre): New.
        * passes.def (pass_hardreg_pre): New pass.
        * tree-pass.h (make_pass_hardreg_pre): New.


diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
index 
593319fd4723626bf95f475e79c1c7b12238b2dd..860e29b3b24dfca656740c85ef0ac0445f9848cd
 100644
--- a/gcc/config/aarch64/aarch64.h
+++ b/gcc/config/aarch64/aarch64.h
@@ -1589,6 +1589,10 @@ enum class aarch64_tristate_mode : int { NO, YES, MAYBE 
};
   { int (aarch64_tristate_mode::MAYBE), \
     int (aarch64_local_sme_state::ANY) }
 
+/* Zero terminated list of regnos for which hardreg PRE should be
+   applied.  */
+#define HARDREG_PRE_REGNOS { FPM_REGNUM, 0 }
+
 #endif
 
 #endif /* GCC_AARCH64_H */
diff --git a/gcc/gcse.cc b/gcc/gcse.cc
index 
31b92f30fa1ba6c519429d4b7bc55547b2d71c01..ce4ebe420c02d78fcde3144eed595e22212aaa0b
 100644
--- a/gcc/gcse.cc
+++ b/gcc/gcse.cc
@@ -415,6 +415,11 @@ static int gcse_create_count;
 
 /* Doing code hoisting.  */
 static bool doing_code_hoisting_p = false;
+
+/* Doing hardreg_pre.  */
+static bool doing_hardreg_pre_p = false;
+
+static unsigned int current_hardreg_regno;
 
 /* For available exprs */
 static sbitmap *ae_kill;
@@ -693,10 +698,29 @@ compute_local_properties (sbitmap *transp, sbitmap *comp, 
sbitmap *antloc,
             We start by assuming all are transparent [none are killed], and
             then reset the bits for those that are.  */
          if (transp)
-           compute_transp (expr->expr, indx, transp,
-                           blocks_with_calls,
-                           modify_mem_list_set,
-                           canon_modify_mem_list);
+           {
+             compute_transp (expr->expr, indx, transp,
+                             blocks_with_calls,
+                             modify_mem_list_set,
+                             canon_modify_mem_list);
+
+             if (doing_hardreg_pre_p)
+               {
+                 /* We also need to check whether the destination hardreg is
+                    set or call-clobbered in each BB.  We'll check for hardreg
+                    uses later.  */
+                 df_ref def;
+                 for (def = DF_REG_DEF_CHAIN (current_hardreg_regno);
+                      def;
+                      def = DF_REF_NEXT_REG (def))
+                   bitmap_clear_bit (transp[DF_REF_BB (def)->index], indx);
+
+                 bitmap_iterator bi;
+                 unsigned bb_index;
+                 EXECUTE_IF_SET_IN_BITMAP (blocks_with_calls, 0, bb_index, bi)
+                   bitmap_clear_bit (transp[bb_index], indx);
+               }
+           }
 
          /* The occurrences recorded in antic_occr are exactly those that
             we want to set to nonzero in ANTLOC.  */
@@ -728,6 +752,37 @@ compute_local_properties (sbitmap *transp, sbitmap *comp, 
sbitmap *antloc,
        }
     }
 }
+
+/* A hardreg set is not transparent in a block if there are any uses of that
+   hardreg.  This filters the results of compute_local_properties, after the
+   result of that function has been used to define the kills bitmap.
+
+   TRANSP is the destination sbitmap to be updated.
+
+   TABLE controls which hash table to look at.  */
+
+static void
+prune_hardreg_uses (sbitmap *transp, struct gcse_hash_table_d *table)
+{
+  unsigned int i;
+  gcc_assert (doing_hardreg_pre_p);
+
+  for (i = 0; i < table->size; i++)
+    {
+      struct gcse_expr *expr;
+
+      for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
+       {
+         int indx = expr->bitmap_index;
+         df_ref def;
+
+         for (def = DF_REG_USE_CHAIN (current_hardreg_regno);
+              def;
+              def = DF_REF_NEXT_REG (def))
+           bitmap_clear_bit (transp[DF_REF_BB (def)->index], indx);
+       }
+    }
+}
 
 /* Hash table support.  */
 
@@ -739,6 +794,8 @@ struct reg_avail_info
 };
 
 static struct reg_avail_info *reg_avail_info;
+static basic_block hardreg_last_bb;
+static int hardreg_first_use;
 static basic_block current_bb;
 
 /* See whether X, the source of a set, is something we want to consider for
@@ -747,6 +804,9 @@ static basic_block current_bb;
 static bool
 want_to_gcse_p (rtx x, machine_mode mode, HOST_WIDE_INT *max_distance_ptr)
 {
+  if (doing_hardreg_pre_p)
+    return true;
+
 #ifdef STACK_REGS
   /* On register stack architectures, don't GCSE constants from the
      constant pool, as the benefits are often swamped by the overhead
@@ -911,7 +971,7 @@ oprs_unchanged_p (const_rtx x, const rtx_insn *insn, bool 
avail_p)
       }
 
     case MEM:
-      if (! flag_gcse_lm
+      if (! flag_gcse_lm || doing_hardreg_pre_p
          || load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
                                     x, avail_p))
        return false;
@@ -1258,8 +1318,10 @@ hash_scan_set (rtx set, rtx_insn *insn, struct 
gcse_hash_table_d *table)
          && want_to_gcse_p (XEXP (note, 0), GET_MODE (dest), NULL))
        src = XEXP (note, 0), set = gen_rtx_SET (dest, src);
 
-      /* Only record sets of pseudo-regs in the hash table.  */
-      if (regno >= FIRST_PSEUDO_REGISTER
+      /* Only record sets of pseudo-regs in the hash table, unless we're
+        currently doing hardreg switching.  */
+      if ((doing_hardreg_pre_p ? regno == current_hardreg_regno
+                                    : regno >= FIRST_PSEUDO_REGISTER)
          /* Don't GCSE something if we can't do a reg/reg copy.  */
          && can_copy_p (GET_MODE (dest))
          /* GCSE commonly inserts instruction after the insn.  We can't
@@ -1286,12 +1348,33 @@ hash_scan_set (rtx set, rtx_insn *insn, struct 
gcse_hash_table_d *table)
             able to handle code motion of insns with multiple sets.  */
          bool antic_p = (oprs_anticipatable_p (src, insn)
                          && !multiple_sets (insn));
+         if (doing_hardreg_pre_p)
+           {
+             /* An hardreg assignment is anticipatable only if the hardreg is
+                neither set nor used prior to this assignment.  */
+             auto info = reg_avail_info[current_hardreg_regno];
+             if ((info.last_bb == current_bb
+                  && info.first_set < DF_INSN_LUID (insn))
+                 || (hardreg_last_bb == current_bb
+                     && hardreg_first_use <= DF_INSN_LUID (insn)))
+               antic_p = false;
+           }
+
          /* An expression is not available if its operands are
             subsequently modified, including this insn.  It's also not
             available if this is a branch, because we can't insert
             a set after the branch.  */
          bool avail_p = (oprs_available_p (src, insn)
                          && ! JUMP_P (insn));
+         if (doing_hardreg_pre_p)
+           {
+             /* An hardreg assignment is only available if the hardreg is
+                not set later in the BB.  Uses of the hardreg are allowed. */
+             auto info = reg_avail_info[current_hardreg_regno];
+             if (info.last_bb == current_bb
+                 && info.last_set > DF_INSN_LUID (insn))
+               antic_p = false;
+           }
 
          insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p,
                                max_distance, table);
@@ -1300,7 +1383,10 @@ hash_scan_set (rtx set, rtx_insn *insn, struct 
gcse_hash_table_d *table)
   /* In case of store we want to consider the memory value as available in
      the REG stored in that memory. This makes it possible to remove
      redundant loads from due to stores to the same location.  */
-  else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
+  else if (flag_gcse_las
+          && !doing_hardreg_pre_p
+          && REG_P (src)
+          && MEM_P (dest))
     {
       unsigned int regno = REGNO (src);
       HOST_WIDE_INT max_distance = 0;
@@ -1460,7 +1546,7 @@ record_last_reg_set_info (rtx_insn *insn, int regno)
 static void
 record_last_mem_set_info (rtx_insn *insn)
 {
-  if (! flag_gcse_lm)
+  if (!flag_gcse_lm || doing_hardreg_pre_p)
     return;
 
   record_last_mem_set_info_common (insn, modify_mem_list,
@@ -1537,6 +1623,18 @@ compute_hash_table_work (struct gcse_hash_table_d *table)
              EXECUTE_IF_SET_IN_HARD_REG_SET (callee_clobbers, 0, regno, hrsi)
                record_last_reg_set_info (insn, regno);
 
+             if (doing_hardreg_pre_p)
+               {
+                 /* This is covered by the above clobbers, but let's
+                    conservatively make this work as well for hardregs that
+                    are call-used but not call-clobbered.  */
+                 record_last_reg_set_info (insn, current_hardreg_regno);
+
+                 /* Mark this block as containing a call-clobber.  */
+                 bitmap_set_bit (blocks_with_calls,
+                                 BLOCK_FOR_INSN (insn)->index);
+               }
+
              if (! RTL_CONST_OR_PURE_CALL_P (insn)
                  || RTL_LOOPING_CONST_OR_PURE_CALL_P (insn)
                  || can_throw_external (insn))
@@ -1544,6 +1642,19 @@ compute_hash_table_work (struct gcse_hash_table_d *table)
            }
 
          note_stores (insn, record_last_set_info, insn);
+
+         if (doing_hardreg_pre_p && hardreg_last_bb != current_bb)
+           {
+             /* We need to record the first use of a hardreg to determine if a
+                set of that hardreg is anticipatable.  */
+             df_ref ref;
+             FOR_EACH_INSN_USE (ref, insn)
+               if (DF_REF_REGNO (ref) == current_hardreg_regno)
+                 {
+                   hardreg_last_bb = current_bb;
+                   hardreg_first_use = DF_INSN_LUID (insn);
+                 }
+           }
        }
 
       /* The next pass builds the hash table.  */
@@ -1714,6 +1825,19 @@ prune_expressions (bool pre_p)
     {
       for (expr = expr_hash_table.table[ui]; expr; expr = expr->next_same_hash)
        {
+         /* For hardreg pre, we assume that all relevant hardregs are
+            call-clobbered, and set all bits in prune_exprs if the reg is call
+            clobbered.  If the hardreg were merely call-used, then we would
+            need to remove the expression from the anticipatable and
+            transparent bitmaps only (after using this to compute the kills
+            bitmap).  */
+
+         if (doing_hardreg_pre_p)
+           {
+             bitmap_set_bit (prune_exprs, expr->bitmap_index);
+             continue;
+           }
+
          /* Note potentially trapping expressions.  */
          if (may_trap_p (expr->expr))
            {
@@ -1884,6 +2008,9 @@ compute_pre_data (void)
       bitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
     }
 
+  if (doing_hardreg_pre_p)
+    prune_hardreg_uses (transp, &expr_hash_table);
+
   edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
                            ae_kill, &pre_insert_map, &pre_delete_map);
   sbitmap_vector_free (antloc);
@@ -1938,7 +2065,10 @@ pre_expr_reaches_here_p_work (basic_block occr_bb, 
struct gcse_expr *expr,
 
          visited[pred_bb->index] = 1;
        }
-      /* Ignore this predecessor if it kills the expression.  */
+      /* Ignore this predecessor if it kills the expression.
+
+        If this were used for hardreg pre, then it would need to use the kills
+        bitmap.  */
       else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
        visited[pred_bb->index] = 1;
 
@@ -2109,6 +2239,51 @@ insert_insn_end_basic_block (struct gcse_expr *expr, 
basic_block bb)
     }
 }
 
+/* Return the INSN which is added at the start of the block BB with
+   same instruction pattern with PAT.  */
+
+rtx_insn *
+insert_insn_start_basic_block (rtx_insn *pat, basic_block bb)
+{
+  rtx_insn *insn = BB_HEAD (bb);
+
+  gcc_assert (pat && INSN_P (pat));
+  rtx_insn *new_insn = emit_insn_before_noloc (pat, insn, bb);
+
+  while (pat != NULL_RTX)
+    {
+      if (INSN_P (pat))
+       add_label_notes (PATTERN (pat), new_insn);
+      pat = NEXT_INSN (pat);
+    }
+
+  return new_insn;
+}
+
+/* Add EXPR to the start of basic block BB.
+
+   This is used by hardreg PRE.  */
+
+static void
+insert_insn_start_basic_block (struct gcse_expr *expr, basic_block bb)
+{
+  rtx reg = expr->reaching_reg;
+  int regno = REGNO (reg);
+
+  rtx_insn *insn = process_insert_insn (expr);
+  rtx_insn *new_insn = insert_insn_start_basic_block (insn, bb);
+
+  gcse_create_count++;
+
+  if (dump_file)
+    {
+      fprintf (dump_file, "hardreg PRE: start of bb %d, insn %d, ",
+              bb->index, INSN_UID (new_insn));
+      fprintf (dump_file, "copying expression %d to reg %d\n",
+              expr->bitmap_index, regno);
+    }
+}
+
 /* Insert partially redundant expressions on edges in the CFG to make
    the expressions fully redundant.  */
 
@@ -2130,7 +2305,8 @@ pre_edge_insert (struct edge_list *edge_list, struct 
gcse_expr **index_map)
   for (e = 0; e < num_edges; e++)
     {
       int indx;
-      basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
+      basic_block pred_bb = INDEX_EDGE_PRED_BB (edge_list, e);
+      basic_block succ_bb = INDEX_EDGE_SUCC_BB (edge_list, e);
 
       for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
        {
@@ -2159,13 +2335,24 @@ pre_edge_insert (struct edge_list *edge_list, struct 
gcse_expr **index_map)
 
                        /* We can't insert anything on an abnormal and
                           critical edge, so we insert the insn at the end of
-                          the previous block. There are several alternatives
+                          the previous block.  There are several alternatives
                           detailed in Morgans book P277 (sec 10.5) for
                           handling this situation.  This one is easiest for
-                          now.  */
+                          now.
 
+                          For hardreg PRE, this would add an unwanted clobber
+                          of the hardreg, so we instead insert in the
+                          successor block, which may be partially redundant
+                          but is at least correct.  */
                        if (eg->flags & EDGE_ABNORMAL)
-                         insert_insn_end_basic_block (index_map[j], bb);
+                         {
+                           if (doing_hardreg_pre_p)
+                             insert_insn_start_basic_block (index_map[j],
+                                                            succ_bb);
+                           else
+                             insert_insn_end_basic_block (index_map[j],
+                                                          pred_bb);
+                         }
                        else
                          {
                            insn = process_insert_insn (index_map[j]);
@@ -2175,8 +2362,8 @@ pre_edge_insert (struct edge_list *edge_list, struct 
gcse_expr **index_map)
                        if (dump_file)
                          {
                            fprintf (dump_file, "PRE: edge (%d,%d), ",
-                                    bb->index,
-                                    INDEX_EDGE_SUCC_BB (edge_list, e)->index);
+                                    pred_bb->index,
+                                    succ_bb->index);
                            fprintf (dump_file, "copy expression %d\n",
                                     expr->bitmap_index);
                          }
@@ -2491,13 +2678,24 @@ pre_delete (void)
                && (set = single_set (insn)) != 0
                 && dbg_cnt (pre_insn))
              {
-               /* Create a pseudo-reg to store the result of reaching
-                  expressions into.  Get the mode for the new pseudo from
-                  the mode of the original destination pseudo.  */
+               rtx dest = SET_DEST (set);
                if (expr->reaching_reg == NULL)
-                 expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
+                 {
+                   if (doing_hardreg_pre_p)
+                     /* Use the hardreg as the reaching register.  The
+                        deleted sets will be replaced with noop moves.
+
+                        FIXME: This may change the value of the hardreg in
+                        some debug instructions.  */
+                     expr->reaching_reg = dest;
+                   else
+                     /* Create a pseudo-reg to store the result of reaching
+                        expressions into.  Get the mode for the new pseudo from
+                        the mode of the original destination pseudo.  */
+                     expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST 
(set));
+                 }
 
-               gcse_emit_move_after (SET_DEST (set), expr->reaching_reg, insn);
+               gcse_emit_move_after (dest, expr->reaching_reg, insn);
                delete_insn (insn);
                occr->deleted_p = 1;
                changed = true;
@@ -2561,10 +2759,12 @@ pre_gcse (struct edge_list *edge_list)
 
   changed = pre_delete ();
   did_insert = pre_edge_insert (edge_list, index_map);
-
   /* In other places with reaching expressions, copy the expression to the
-     specially allocated pseudo-reg that reaches the redundant expr.  */
-  pre_insert_copies ();
+     specially allocated pseudo-reg that reaches the redundant expr.  This
+     isn't needed for hardreg PRE.  */
+  if (!doing_hardreg_pre_p)
+    pre_insert_copies ();
+
   if (did_insert)
     {
       commit_edge_insertions ();
@@ -2601,11 +2801,11 @@ one_pre_gcse_pass (void)
 
   alloc_hash_table (&expr_hash_table);
   add_noreturn_fake_exit_edges ();
-  if (flag_gcse_lm)
+  if (flag_gcse_lm && !doing_hardreg_pre_p)
     compute_ld_motion_mems ();
 
   compute_hash_table (&expr_hash_table);
-  if (flag_gcse_lm)
+  if (flag_gcse_lm && !doing_hardreg_pre_p)
     trim_ld_motion_mems ();
   if (dump_file)
     dump_hash_table (dump_file, "Expression", &expr_hash_table);
@@ -2621,7 +2821,7 @@ one_pre_gcse_pass (void)
       free_pre_mem ();
     }
 
-  if (flag_gcse_lm)
+  if (flag_gcse_lm && !doing_hardreg_pre_p)
     free_ld_motion_mems ();
   remove_fake_exit_edges ();
   free_hash_table (&expr_hash_table);
@@ -4028,6 +4228,31 @@ execute_rtl_pre (void)
   return 0;
 }
 
+static unsigned int
+execute_hardreg_pre (void)
+{
+  doing_hardreg_pre_p = true;
+  unsigned int regnos[] = HARDREG_PRE_REGNOS;
+  /* It's possible to avoid this loop, but it isn't worth doing so until
+     hardreg PRE is used for multiple hardregs.  */
+  for (int i = 0; regnos[i] != 0; i++)
+    {
+      int changed;
+      current_hardreg_regno = regnos[i];
+      if (dump_file)
+       fprintf(dump_file, "Entering hardreg PRE for regno %d\n",
+               current_hardreg_regno);
+      delete_unreachable_blocks ();
+      df_analyze ();
+      changed = one_pre_gcse_pass ();
+      flag_rerun_cse_after_global_opts |= changed;
+      if (changed)
+       cleanup_cfg (0);
+    }
+  doing_hardreg_pre_p = false;
+  return 0;
+}
+
 static unsigned int
 execute_rtl_hoist (void)
 {
@@ -4096,6 +4321,56 @@ make_pass_rtl_pre (gcc::context *ctxt)
 
 namespace {
 
+const pass_data pass_data_hardreg_pre =
+{
+  RTL_PASS, /* type */
+  "hardreg_pre", /* name */
+  OPTGROUP_NONE, /* optinfo_flags */
+  TV_PRE, /* tv_id */
+  PROP_cfglayout, /* properties_required */
+  0, /* properties_provided */
+  0, /* properties_destroyed */
+  0, /* todo_flags_start */
+  TODO_df_finish, /* todo_flags_finish */
+};
+
+class pass_hardreg_pre : public rtl_opt_pass
+{
+public:
+  pass_hardreg_pre (gcc::context *ctxt)
+    : rtl_opt_pass (pass_data_hardreg_pre, ctxt)
+  {}
+
+  /* opt_pass methods: */
+  bool gate (function *) final override;
+  unsigned int execute (function *)  final override
+  {
+    return execute_hardreg_pre ();
+  }
+
+}; // class pass_rtl_pre
+
+bool
+pass_hardreg_pre::gate (function *fun)
+{
+#ifdef HARDREG_PRE_REGNOS
+  return optimize > 0
+    && !fun->calls_setjmp;
+#else
+  return false;
+#endif
+}
+
+} // anon namespace
+
+rtl_opt_pass *
+make_pass_hardreg_pre (gcc::context *ctxt)
+{
+  return new pass_hardreg_pre (ctxt);
+}
+
+namespace {
+
 const pass_data pass_data_rtl_hoist =
 {
   RTL_PASS, /* type */
diff --git a/gcc/passes.def b/gcc/passes.def
index 
7d01227eed1fcdda4e2db0b1b9dac80f21e221d9..374b2daf92c427355f93a69c028ddd794fc694c2
 100644
--- a/gcc/passes.def
+++ b/gcc/passes.def
@@ -462,6 +462,7 @@ along with GCC; see the file COPYING3.  If not see
       NEXT_PASS (pass_rtl_cprop);
       NEXT_PASS (pass_rtl_pre);
       NEXT_PASS (pass_rtl_hoist);
+      NEXT_PASS (pass_hardreg_pre);
       NEXT_PASS (pass_rtl_cprop);
       NEXT_PASS (pass_rtl_store_motion);
       NEXT_PASS (pass_cse_after_global_opts);
diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
index 
a928cbe4557368ec483919a06cd3d29d733a7b66..d4cc85888d176ae603bc8c5aec1168749280511f
 100644
--- a/gcc/tree-pass.h
+++ b/gcc/tree-pass.h
@@ -572,6 +572,7 @@ extern rtl_opt_pass *make_pass_rtl_dse3 (gcc::context 
*ctxt);
 extern rtl_opt_pass *make_pass_rtl_cprop (gcc::context *ctxt);
 extern rtl_opt_pass *make_pass_rtl_pre (gcc::context *ctxt);
 extern rtl_opt_pass *make_pass_rtl_hoist (gcc::context *ctxt);
+extern rtl_opt_pass *make_pass_hardreg_pre (gcc::context *ctxt);
 extern rtl_opt_pass *make_pass_rtl_store_motion (gcc::context *ctxt);
 extern rtl_opt_pass *make_pass_cse_after_global_opts (gcc::context *ctxt);
 extern rtl_opt_pass *make_pass_rtl_ifcvt (gcc::context *ctxt);

Reply via email to