Howdy!
For some upcoming work I need some pass local data that I don't want to
be passing around as an argument. We have enough of those in the
threader as it is. So I moved the current pass local data into its own
class, and basically classified the entire pass, thus avoiding a lot of
arguments.
In doing this, I also noticed that not only was the prototype in the
header file wrong, but it wasn't used anywhere. I have removed the
useless header.
The net result is less lines of code, even without taking into account
the deleted header file.
Oh yeah, we had no way of clearing a hash set. I've fixed this
oversight :).
Tested on x86-64 Linux.
OK for trunk?
gcc/
* hash-set.h (hash_set::empty): New.
* tree-ssa-threadbackward.h: Remove.
* tree-ssa-threadbackward.c (class thread_jumps): New.
Move max_threaded_paths into class.
(fsm_find_thread_path): Remove arguments that are now in class.
(profitable_jump_thread_path): Rename to...
(thread_jumps::profitable_jump_thread_path): ...this.
(convert_and_register_jump_thread_path): Rename to...
(thread_jumps::convert_and_register_current_path): ...this.
(check_subpath_and_update_thread_path): Rename to...
(thread_jumps::check_subpath_and_update_thread_path): ...this.
(register_jump_thread_path_if_profitable): Rename to...
(thread_jumps::register_jump_thread_path_if_profitable): ...this.
(handle_phi): Rename to...
(thread_jumps::handle_phi): ...this.
(handle_assignment): Rename to...
(thread_jumps::handle_assignment): ...this.
(fsm_find_control_statement_thread_paths): Rename to...
(thread_jumps::fsm_find_control_statement_thread_paths): ...this.
(find_jump_threads_backwards): Rename to...
(thread_jumps::find_jump_threads_backwards): ...this.
Initialize path local data.
(pass_thread_jumps::execute): Call find_jump_threads_backwards
from within thread_jumps class.
(pass_early_thread_jumps::execute): Same.
diff --git a/gcc/hash-set.h b/gcc/hash-set.h
index d2247d39571..8ce796d1c48 100644
--- a/gcc/hash-set.h
+++ b/gcc/hash-set.h
@@ -80,6 +80,10 @@ public:
size_t elements () const { return m_table.elements (); }
+ /* Clear the hash table. */
+
+ void empty () { m_table.empty (); }
+
class iterator
{
public:
diff --git a/gcc/tree-ssa-threadbackward.c b/gcc/tree-ssa-threadbackward.c
index 12bc80350f5..a1454f31bec 100644
--- a/gcc/tree-ssa-threadbackward.c
+++ b/gcc/tree-ssa-threadbackward.c
@@ -38,7 +38,35 @@ along with GCC; see the file COPYING3. If not see
#include "tree-inline.h"
#include "tree-vectorizer.h"
-static int max_threaded_paths;
+class thread_jumps
+{
+ private:
+ /* The maximum number of BB we are allowed to thread. */
+ int max_threaded_paths;
+ /* Hash to keep track of seen bbs. */
+ hash_set<basic_block> visited_bbs;
+ /* The current path we're analyzing. */
+ auto_vec<basic_block> path;
+ /* Tracks if we have recursed through a loop PHI node. */
+ bool seen_loop_phi;
+ /* Indicate that we could increase code size to improve the
+ code path. */
+ bool speed_p;
+
+ edge profitable_jump_thread_path (basic_block bbi, tree name, tree arg,
+ bool *creates_irreducible_loop);
+ void convert_and_register_current_path (edge taken_edge);
+ void register_jump_thread_path_if_profitable (tree name, tree arg,
+ basic_block def_bb);
+ void handle_assignment (gimple *stmt, tree name, basic_block def_bb);
+ void handle_phi (gphi *phi, tree name, basic_block def_bb);
+ void fsm_find_control_statement_thread_paths (tree name);
+ bool check_subpath_and_update_thread_path (basic_block last_bb,
+ basic_block new_bb,
+ int *next_path_length);
+ public:
+ void find_jump_threads_backwards (basic_block bb, bool speed_p);
+};
/* Simple helper to get the last statement from BB, which is assumed
to be a control statement. Return NULL if the last statement is
@@ -61,14 +89,15 @@ get_gimple_control_stmt (basic_block bb)
/* Return true if the CFG contains at least one path from START_BB to
END_BB. When a path is found, record in PATH the blocks from
- END_BB to START_BB. VISITED_BBS is used to make sure we don't fall
- into an infinite loop. Bound the recursion to basic blocks
- belonging to LOOP. */
+ END_BB to START_BB. LOCAL_VISITED_BBS is used to make sure we
+ don't fall into an infinite loop. Bound the recursion to basic
+ blocks belonging to LOOP. */
static bool
fsm_find_thread_path (basic_block start_bb, basic_block end_bb,
vec<basic_block> &path,
- hash_set<basic_block> &visited_bbs, loop_p loop)
+ hash_set<basic_block> &local_visited_bbs,
+ loop_p loop)
{
if (loop != start_bb->loop_father)
return false;
@@ -79,12 +108,13 @@ fsm_find_thread_path (basic_block start_bb, basic_block end_bb,
return true;
}
- if (!visited_bbs.add (start_bb))
+ if (!local_visited_bbs.add (start_bb))
{
edge e;
edge_iterator ei;
FOR_EACH_EDGE (e, ei, start_bb->succs)
- if (fsm_find_thread_path (e->dest, end_bb, path, visited_bbs, loop))
+ if (fsm_find_thread_path (e->dest, end_bb, path, local_visited_bbs,
+ loop))
{
path.safe_push (start_bb);
return true;
@@ -102,16 +132,14 @@ fsm_find_thread_path (basic_block start_bb, basic_block end_bb,
NAME is the SSA_NAME of the variable we found to have a constant
value on PATH. ARG is the constant value of NAME on that path.
- BBI will be appended to PATH when we have a profitable jump threading
- path. Callers are responsible for removing BBI from PATH in that case.
-
- SPEED_P indicate that we could increase code size to improve the
- code path. */
+ BBI will be appended to PATH when we have a profitable jump
+ threading path. Callers are responsible for removing BBI from PATH
+ in that case. */
-static edge
-profitable_jump_thread_path (vec<basic_block> &path,
- basic_block bbi, tree name, tree arg,
- bool speed_p, bool *creates_irreducible_loop)
+edge
+thread_jumps::profitable_jump_thread_path (basic_block bbi, tree name,
+ tree arg,
+ bool *creates_irreducible_loop)
{
/* Note BBI is not in the path yet, hence the +1 in the test below
to make sure BBI is accounted for in the path length test. */
@@ -412,14 +440,14 @@ profitable_jump_thread_path (vec<basic_block> &path,
return taken_edge;
}
-/* PATH is vector of blocks forming a jump threading path in reverse
- order. TAKEN_EDGE is the edge taken from path[0].
+/* The current path PATH is a vector of blocks forming a jump threading
+ path in reverse order. TAKEN_EDGE is the edge taken from path[0].
- Convert that path into the form used by register_jump_thread and
- register the path. */
+ Convert the current path into the form used by register_jump_thread and
+ register it. */
-static void
-convert_and_register_jump_thread_path (vec<basic_block> &path, edge taken_edge)
+void
+thread_jumps::convert_and_register_current_path (edge taken_edge)
{
vec<jump_thread_edge *> *jump_thread_path = new vec<jump_thread_edge *> ();
@@ -455,11 +483,10 @@ convert_and_register_jump_thread_path (vec<basic_block> &path, edge taken_edge)
Store the length of the subpath in NEXT_PATH_LENGTH. */
-static bool
-check_subpath_and_update_thread_path (basic_block last_bb, basic_block new_bb,
- hash_set<basic_block> &visited_bbs,
- vec<basic_block> &path,
- int *next_path_length)
+bool
+thread_jumps::check_subpath_and_update_thread_path (basic_block last_bb,
+ basic_block new_bb,
+ int *next_path_length)
{
edge e;
int e_count = 0;
@@ -468,10 +495,10 @@ check_subpath_and_update_thread_path (basic_block last_bb, basic_block new_bb,
FOR_EACH_EDGE (e, ei, last_bb->preds)
{
- hash_set<basic_block> visited_bbs;
+ hash_set<basic_block> local_visited_bbs;
- if (fsm_find_thread_path (new_bb, e->src, next_path, visited_bbs,
- e->src->loop_father))
+ if (fsm_find_thread_path (new_bb, e->src, next_path,
+ local_visited_bbs, e->src->loop_father))
++e_count;
/* If there is more than one path, stop. */
@@ -507,28 +534,21 @@ check_subpath_and_update_thread_path (basic_block last_bb, basic_block new_bb,
NAME is an SSA NAME with a possible constant value of ARG on PATH.
- DEF_BB is the basic block that ultimately defines the constant.
+ DEF_BB is the basic block that ultimately defines the constant. */
- SPEED_P indicate that we could increase code size to improve the
- code path.
-*/
-
-static void
-register_jump_thread_path_if_profitable (vec<basic_block> &path,
- tree name,
- tree arg,
- basic_block def_bb,
- bool speed_p)
+void
+thread_jumps::register_jump_thread_path_if_profitable (tree name, tree arg,
+ basic_block def_bb)
{
if (TREE_CODE_CLASS (TREE_CODE (arg)) != tcc_constant)
return;
bool irreducible = false;
- edge taken_edge = profitable_jump_thread_path (path, def_bb, name, arg,
- speed_p, &irreducible);
+ edge taken_edge = profitable_jump_thread_path (def_bb, name, arg,
+ &irreducible);
if (taken_edge)
{
- convert_and_register_jump_thread_path (path, taken_edge);
+ convert_and_register_current_path (taken_edge);
path.pop ();
if (irreducible)
@@ -536,29 +556,15 @@ register_jump_thread_path_if_profitable (vec<basic_block> &path,
}
}
-static void fsm_find_control_statement_thread_paths (tree,
- hash_set<basic_block> &,
- vec<basic_block> &,
- bool, bool);
-
/* Given PHI which defines NAME in block DEF_BB, recurse through the
PHI's arguments searching for paths where NAME will ultimately have
a constant value.
- VISITED_BBS tracks the blocks that have been encountered.
-
PATH contains the series of blocks to traverse that will result in
- NAME having a constant value.
-
- SEEN_LOOP_PHI tracks if we have recursed through a loop PHI node.
-
- SPEED_P indicates if we are optimizing for speed over space. */
+ NAME having a constant value. */
-static void
-handle_phi (gphi *phi, tree name, basic_block def_bb,
- hash_set<basic_block> &visited_bbs,
- vec<basic_block> &path,
- bool seen_loop_phi, bool speed_p)
+void
+thread_jumps::handle_phi (gphi *phi, tree name, basic_block def_bb)
{
/* Iterate over the arguments of PHI. */
for (unsigned int i = 0; i < gimple_phi_num_args (phi); i++)
@@ -575,14 +581,13 @@ handle_phi (gphi *phi, tree name, basic_block def_bb,
path.safe_push (bbi);
/* Recursively follow SSA_NAMEs looking for a constant
definition. */
- fsm_find_control_statement_thread_paths (arg, visited_bbs, path,
- seen_loop_phi, speed_p);
+ fsm_find_control_statement_thread_paths (arg);
path.pop ();
continue;
}
- register_jump_thread_path_if_profitable (path, name, arg, bbi, speed_p);
+ register_jump_thread_path_if_profitable (name, arg, bbi);
}
}
@@ -620,26 +625,16 @@ handle_assignment_p (gimple *stmt)
PHI's arguments searching for paths where NAME will ultimately have
a constant value.
- VISITED_BBS tracks the blocks that have been encountered.
-
PATH contains the series of blocks to traverse that will result in
- NAME having a constant value.
-
- SEEN_LOOP_PHI tracks if we have recursed through a loop PHI node.
+ NAME having a constant value. */
- SPEED_P indicates if we are optimizing for speed over space. */
-
-static void
-handle_assignment (gimple *stmt, tree name, basic_block def_bb,
- hash_set<basic_block> &visited_bbs,
- vec<basic_block> &path,
- bool seen_loop_phi, bool speed_p)
+void
+thread_jumps::handle_assignment (gimple *stmt, tree name, basic_block def_bb)
{
tree arg = gimple_assign_rhs1 (stmt);
if (TREE_CODE (arg) == SSA_NAME)
- fsm_find_control_statement_thread_paths (arg, visited_bbs,
- path, seen_loop_phi, speed_p);
+ fsm_find_control_statement_thread_paths (arg);
else
{
@@ -648,8 +643,7 @@ handle_assignment (gimple *stmt, tree name, basic_block def_bb,
block at this point. So we can just pop it. */
path.pop ();
- register_jump_thread_path_if_profitable (path, name, arg, def_bb,
- speed_p);
+ register_jump_thread_path_if_profitable (name, arg, def_bb);
/* And put the current block back onto the path so that the
state of the stack is unchanged when we leave. */
@@ -659,16 +653,10 @@ handle_assignment (gimple *stmt, tree name, basic_block def_bb,
/* We trace the value of the SSA_NAME NAME back through any phi nodes
looking for places where it gets a constant value and save the
- path.
+ path. */
- SPEED_P indicate that we could increase code size to improve the
- code path. */
-
-static void
-fsm_find_control_statement_thread_paths (tree name,
- hash_set<basic_block> &visited_bbs,
- vec<basic_block> &path,
- bool seen_loop_phi, bool speed_p)
+void
+thread_jumps::fsm_find_control_statement_thread_paths (tree name)
{
/* If NAME appears in an abnormal PHI, then don't try to trace its
value back through PHI nodes. */
@@ -722,7 +710,6 @@ fsm_find_control_statement_thread_paths (tree name,
create bogus jump threading paths. */
visited_bbs.add (path[0]);
if (!check_subpath_and_update_thread_path (last_bb_in_path, def_bb,
- visited_bbs, path,
&next_path_length))
return;
}
@@ -730,11 +717,9 @@ fsm_find_control_statement_thread_paths (tree name,
gcc_assert (path.last () == def_bb);
if (gimple_code (def_stmt) == GIMPLE_PHI)
- handle_phi (as_a <gphi *> (def_stmt), name, def_bb,
- visited_bbs, path, seen_loop_phi, speed_p);
+ handle_phi (as_a <gphi *> (def_stmt), name, def_bb);
else if (gimple_code (def_stmt) == GIMPLE_ASSIGN)
- handle_assignment (def_stmt, name, def_bb,
- visited_bbs, path, seen_loop_phi, speed_p);
+ handle_assignment (def_stmt, name, def_bb);
/* Remove all the nodes that we added from NEXT_PATH. */
if (next_path_length)
@@ -749,8 +734,8 @@ fsm_find_control_statement_thread_paths (tree name,
SPEED_P indicate that we could increase code size to improve the
code path. */
-void
-find_jump_threads_backwards (basic_block bb, bool speed_p)
+void
+thread_jumps::find_jump_threads_backwards (basic_block bb, bool speed_p)
{
gimple *stmt = get_gimple_control_stmt (bb);
if (!stmt)
@@ -774,13 +759,15 @@ find_jump_threads_backwards (basic_block bb, bool speed_p)
if (!name || TREE_CODE (name) != SSA_NAME)
return;
- auto_vec<basic_block> bb_path;
- bb_path.safe_push (bb);
- hash_set<basic_block> visited_bbs;
+ /* Initialize the pass local data that changes at each iteration. */
+ path.truncate (0);
+ path.safe_push (bb);
+ visited_bbs.empty ();
+ seen_loop_phi = false;
+ this->speed_p = speed_p;
max_threaded_paths = PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATHS);
- fsm_find_control_statement_thread_paths (name, visited_bbs, bb_path, false,
- speed_p);
+ fsm_find_control_statement_thread_paths (name);
}
namespace {
@@ -823,11 +810,12 @@ pass_thread_jumps::execute (function *fun)
loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
/* Try to thread each block with more than one successor. */
+ thread_jumps pass;
basic_block bb;
FOR_EACH_BB_FN (bb, fun)
{
if (EDGE_COUNT (bb->succs) > 1)
- find_jump_threads_backwards (bb, true);
+ pass.find_jump_threads_backwards (bb, true);
}
bool changed = thread_through_all_blocks (true);
@@ -883,11 +871,12 @@ pass_early_thread_jumps::execute (function *fun)
loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
/* Try to thread each block with more than one successor. */
+ thread_jumps pass;
basic_block bb;
FOR_EACH_BB_FN (bb, fun)
{
if (EDGE_COUNT (bb->succs) > 1)
- find_jump_threads_backwards (bb, false);
+ pass.find_jump_threads_backwards (bb, false);
}
thread_through_all_blocks (true);
diff --git a/gcc/tree-ssa-threadbackward.h b/gcc/tree-ssa-threadbackward.h
deleted file mode 100644
index f758ff1f1ad..00000000000
--- a/gcc/tree-ssa-threadbackward.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* Header file for SSA dominator optimizations.
- Copyright (C) 2013-2017 Free Software Foundation, Inc.
-
-This file is part of GCC.
-
-GCC is free software; you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 3, or (at your option) any later
-version.
-
-GCC is distributed in the hope that it will be useful, but WITHOUT ANY
-WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- for more details.
-
-You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING3. If not see
-<http://www.gnu.org/licenses/>. */
-
-#ifndef GCC_TREE_SSA_THREADFSM_H
-#define GCC_TREE_SSA_THREADFSM_H
-
-extern void find_jump_threads_backwards (edge);
-
-#endif /* GCC_TREE_SSA_THREADFSM_H */