The following adds checks that when we search for a vector stmt
insert location we arrive at one where all required operand defs
are dominating the insert location.  At the moment any such
failure only blows up during SSA verification.

There's the long-standing issue that we do not verify there
exists a valid schedule of the SLP graph from BB vectorization
into the existing CFG.  We do not have the ability to insert
vector stmts on the dominance frontier "end", nor to insert
LC PHIs that would be eventually required.

This should be done all differently, computing the schedule
during analysis and failing if we can't schedule.

Bootstrap/regtest running on x86_64-unknown-linux-gnu.

        PR tree-optimization/119960
        * tree-vect-slp.cc (vect_schedule_slp_node): Sanity
        check dominance check on operand defs.
---
 gcc/tree-vect-slp.cc | 36 ++++++++++++++++++++++++++++--------
 1 file changed, 28 insertions(+), 8 deletions(-)

diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc
index 5eca08be2ef..439b99cab0f 100644
--- a/gcc/tree-vect-slp.cc
+++ b/gcc/tree-vect-slp.cc
@@ -11214,9 +11214,14 @@ vect_schedule_slp_node (vec_info *vinfo,
                            == cycle_phi_info_type);
                gphi *phi = as_a <gphi *>
                              (vect_find_last_scalar_stmt_in_slp (child)->stmt);
-               if (!last_stmt
-                   || vect_stmt_dominates_stmt_p (last_stmt, phi))
+               if (!last_stmt)
                  last_stmt = phi;
+               else if (vect_stmt_dominates_stmt_p (last_stmt, phi))
+                 last_stmt = phi;
+               else if (vect_stmt_dominates_stmt_p (phi, last_stmt))
+                 ;
+               else
+                 gcc_unreachable ();
              }
            /* We are emitting all vectorized stmts in the same place and
               the last one is the last.
@@ -11227,9 +11232,14 @@ vect_schedule_slp_node (vec_info *vinfo,
            FOR_EACH_VEC_ELT (SLP_TREE_VEC_DEFS (child), j, vdef)
              {
                gimple *vstmt = SSA_NAME_DEF_STMT (vdef);
-               if (!last_stmt
-                   || vect_stmt_dominates_stmt_p (last_stmt, vstmt))
+               if (!last_stmt)
+                 last_stmt = vstmt;
+               else if (vect_stmt_dominates_stmt_p (last_stmt, vstmt))
                  last_stmt = vstmt;
+               else if (vect_stmt_dominates_stmt_p (vstmt, last_stmt))
+                 ;
+               else
+                 gcc_unreachable ();
              }
          }
        else if (!SLP_TREE_VECTYPE (child))
@@ -11242,9 +11252,14 @@ vect_schedule_slp_node (vec_info *vinfo,
                  && !SSA_NAME_IS_DEFAULT_DEF (def))
                {
                  gimple *stmt = SSA_NAME_DEF_STMT (def);
-                 if (!last_stmt
-                     || vect_stmt_dominates_stmt_p (last_stmt, stmt))
+                 if (!last_stmt)
+                   last_stmt = stmt;
+                 else if (vect_stmt_dominates_stmt_p (last_stmt, stmt))
                    last_stmt = stmt;
+                 else if (vect_stmt_dominates_stmt_p (stmt, last_stmt))
+                   ;
+                 else
+                   gcc_unreachable ();
                }
          }
        else
@@ -11265,9 +11280,14 @@ vect_schedule_slp_node (vec_info *vinfo,
                      && !SSA_NAME_IS_DEFAULT_DEF (vdef))
                    {
                      gimple *vstmt = SSA_NAME_DEF_STMT (vdef);
-                     if (!last_stmt
-                         || vect_stmt_dominates_stmt_p (last_stmt, vstmt))
+                     if (!last_stmt)
+                       last_stmt = vstmt;
+                     else if (vect_stmt_dominates_stmt_p (last_stmt, vstmt))
                        last_stmt = vstmt;
+                     else if (vect_stmt_dominates_stmt_p (vstmt, last_stmt))
+                       ;
+                     else
+                       gcc_unreachable ();
                    }
              }
          }
-- 
2.43.0

Reply via email to