We have a new overload for vect_get_num_copies that handles both
SLP and non-SLP.  Use it and avoid the division by group_size
for SLP when not using load-store lanes.

        * tree-vect-stmts.cc (check_load_store_for_partial_vectors):
        Use the new vect_get_num_copies overload.  Only divide by
        group_size for SLP for load-store lanes.
---
 gcc/tree-vect-stmts.cc | 10 +++-------
 1 file changed, 3 insertions(+), 7 deletions(-)

diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index 1b351c5c66e..a8031b4f6f5 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -1507,19 +1507,15 @@ check_load_store_for_partial_vectors (loop_vec_info 
loop_vinfo, tree vectype,
   if (memory_access_type == VMAT_INVARIANT)
     return;
 
-  unsigned int nvectors;
-  if (slp_node)
-    /* ???  Incorrect for multi-lane lanes.  */
-    nvectors = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) / group_size;
-  else
-    nvectors = vect_get_num_copies (loop_vinfo, vectype);
-
+  unsigned int nvectors = vect_get_num_copies (loop_vinfo, slp_node, vectype);
   vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
   vec_loop_lens *lens = &LOOP_VINFO_LENS (loop_vinfo);
   machine_mode vecmode = TYPE_MODE (vectype);
   bool is_load = (vls_type == VLS_LOAD);
   if (memory_access_type == VMAT_LOAD_STORE_LANES)
     {
+      if (slp_node)
+       nvectors /= group_size;
       internal_fn ifn
        = (is_load ? vect_load_lanes_supported (vectype, group_size, true)
                   : vect_store_lanes_supported (vectype, group_size, true));
-- 
2.43.0

Reply via email to