The following fixes a latent bug in vect dataref alignment analysis. Bootstrapped and tested on x86_64-unknown-linux-gnu, applied to trunk.
Richard. 2017-03-27 Richard Biener <rguent...@suse.de> PR tree-optimization/80170 * tree-vect-data-refs.c (vect_compute_data_ref_alignment): Make sure DR/SCEV didnt fold in constants we do not see when looking at the reference base alignment. * gcc.dg/pr80170.c: New testcase. Index: gcc/tree-vect-data-refs.c =================================================================== --- gcc/tree-vect-data-refs.c (revision 246489) +++ gcc/tree-vect-data-refs.c (working copy) @@ -779,7 +779,7 @@ vect_compute_data_ref_alignment (struct base = ref; while (handled_component_p (base)) base = TREE_OPERAND (base, 0); - unsigned int base_alignment; + unsigned int base_alignment = 0; unsigned HOST_WIDE_INT base_bitpos; get_object_alignment_1 (base, &base_alignment, &base_bitpos); /* As data-ref analysis strips the MEM_REF down to its base operand @@ -788,8 +788,17 @@ vect_compute_data_ref_alignment (struct DR_BASE_ADDRESS. */ if (TREE_CODE (base) == MEM_REF) { - base_bitpos -= mem_ref_offset (base).to_short_addr () * BITS_PER_UNIT; - base_bitpos &= (base_alignment - 1); + /* Note all this only works if DR_BASE_ADDRESS is the same as + MEM_REF operand zero, otherwise DR/SCEV analysis might have factored + in other offsets. We need to rework DR to compute the alingment + of DR_BASE_ADDRESS as long as all information is still available. */ + if (operand_equal_p (TREE_OPERAND (base, 0), base_addr, 0)) + { + base_bitpos -= mem_ref_offset (base).to_short_addr () * BITS_PER_UNIT; + base_bitpos &= (base_alignment - 1); + } + else + base_bitpos = BITS_PER_UNIT; } if (base_bitpos != 0) base_alignment = base_bitpos & -base_bitpos; Index: gcc/testsuite/gcc.dg/pr80170.c =================================================================== --- gcc/testsuite/gcc.dg/pr80170.c (nonexistent) +++ gcc/testsuite/gcc.dg/pr80170.c (working copy) @@ -0,0 +1,42 @@ +/* { dg-do run } */ +/* { dg-options "-fgimple -O2 -ftree-slp-vectorize" } */ + +struct A +{ + void * a; + void * b; +}; + +struct __attribute__((aligned(16))) B +{ + void * pad; + void * misaligned; + void * pad2; + + struct A a; +}; + +__attribute__((noclone, noinline)) +void __GIMPLE (startwith("slp")) +NullB (void * misalignedPtr) +{ + struct B * b; + + bb_2: +#if __SIZEOF_LONG__ == 8 + b_2 = misalignedPtr_1(D) + 18446744073709551608ul; +#else + b_2 = misalignedPtr_1(D) + 4294967292ul; +#endif + __MEM <struct B> (b_2).a.a = _Literal (void *) 0; + __MEM <struct B> (b_2).a.b = _Literal (void *) 0; + return; + +} + +int main() +{ + struct B b; + NullB (&b.misaligned); + return 0; +}