The testcases show that we fail to disregard alignment for invariant
loads.  The patch handles them like we handle gather and scatter.
Bootstrapped on x86_64-unknown-linux-gnu, testing in progress.

2021-01-15  Richard Biener  <rguent...@suse.de>

        PR tree-optimization/96376
        * tree-vect-stmts.c (get_load_store_type): Disregard alignment
        for VMAT_INVARIANT.
---
 gcc/tree-vect-stmts.c | 23 +++++++++++++++--------
 1 file changed, 15 insertions(+), 8 deletions(-)

diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index 4d72c4db2f7..f180ced3124 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -2378,19 +2378,26 @@ get_load_store_type (vec_info  *vinfo, stmt_vec_info 
stmt_info,
   else
     {
       int cmp = compare_step_with_zero (vinfo, stmt_info);
-      if (cmp < 0)
-       *memory_access_type = get_negative_load_store_type
-         (vinfo, stmt_info, vectype, vls_type, ncopies);
-      else if (cmp == 0)
+      if (cmp == 0)
        {
          gcc_assert (vls_type == VLS_LOAD);
          *memory_access_type = VMAT_INVARIANT;
+         /* Invariant accesses perform only component accesses, alignment
+            is irrelevant for them.  */
+         *alignment_support_scheme = dr_unaligned_supported;
        }
       else
-       *memory_access_type = VMAT_CONTIGUOUS;
-      *alignment_support_scheme
-       = vect_supportable_dr_alignment (vinfo,
-                                        STMT_VINFO_DR_INFO (stmt_info), false);
+       {
+         if (cmp < 0)
+           *memory_access_type = get_negative_load_store_type
+              (vinfo, stmt_info, vectype, vls_type, ncopies);
+         else
+           *memory_access_type = VMAT_CONTIGUOUS;
+         *alignment_support_scheme
+           = vect_supportable_dr_alignment (vinfo,
+                                            STMT_VINFO_DR_INFO (stmt_info),
+                                            false);
+       }
     }
 
   if ((*memory_access_type == VMAT_ELEMENTWISE
-- 
2.26.2

Reply via email to