The following reverts the previous bogus fixes and instead fixes strided store support to properly get at SLP operands. It also fixes a typo and exchanges i == i for i == 0.
Bootstrapped on x86_64-unknown-linux-gnu, testing in progress. Richard. 2015-06-16 Richard Biener <rguent...@suse.de> PR tree-optimization/66251 * tree-vect-stmts.c (vectorizable_store): Fix gathering of vectorized stmts for SLP strided stores. * gfortran.fortran-torture/compile/pr66251-2.f90: New testcase. Revert 2015-05-22 Richard Biener <rguent...@suse.de> PR tree-optimization/66251 * tree-vect-stmts.c (vectorizable_conversion): Properly set STMT_VINFO_VEC_STMT even for the SLP case. 2015-05-26 Michael Matz <m...@suse.de> PR middle-end/66251 * tree-vect-stmts.c (vect_create_vectorized_demotion_stmts): Always set STMT_VINFO_VEC_STMT, also with SLP. Index: gcc/testsuite/gfortran.fortran-torture/compile/pr66251-2.f90 =================================================================== --- gcc/testsuite/gfortran.fortran-torture/compile/pr66251-2.f90 (revision 0) +++ gcc/testsuite/gfortran.fortran-torture/compile/pr66251-2.f90 (working copy) @@ -0,0 +1,23 @@ +subroutine mv(m,nc,irp,ja,val,x,ldx,y,ldy,acc) + use iso_fortran_env + implicit none + + integer, parameter :: ipk_ = int32 + integer, parameter :: spk_ = real32 + complex(spk_), parameter :: czero=(0.0_spk_,0.0_spk_) + + integer(ipk_), intent(in) :: m,ldx,ldy,nc,irp(*),ja(*) + complex(spk_), intent(in) :: x(ldx,*),val(*) + complex(spk_), intent(inout) :: y(ldy,*) + complex(spk_), intent(inout) :: acc(*) + integer(ipk_) :: i,j,k, ir, jc + + do i=1,m + acc(1:nc) = czero + do j=irp(i), irp(i+1)-1 + acc(1:nc) = acc(1:nc) + val(j) * x(ja(j),1:nc) + enddo + y(i,1:nc) = -acc(1:nc) + end do + +end subroutine mv Index: gcc/tree-vect-stmts.c =================================================================== --- gcc/tree-vect-stmts.c (revision 224514) +++ gcc/tree-vect-stmts.c (working copy) @@ -3370,13 +3370,15 @@ vect_create_vectorized_demotion_stmts (v (or in STMT_VINFO_RELATED_STMT chain). */ if (slp_node) SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); - - if (!*prev_stmt_info) - STMT_VINFO_VEC_STMT (stmt_info) = new_stmt; else - STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt; + { + if (!*prev_stmt_info) + STMT_VINFO_VEC_STMT (stmt_info) = new_stmt; + else + STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt; - *prev_stmt_info = vinfo_for_stmt (new_stmt); + *prev_stmt_info = vinfo_for_stmt (new_stmt); + } } } @@ -3955,12 +3957,14 @@ vectorizable_conversion (gimple stmt, gi if (slp_node) SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); - - if (!prev_stmt_info) - STMT_VINFO_VEC_STMT (stmt_info) = new_stmt; else - STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; - prev_stmt_info = vinfo_for_stmt (new_stmt); + { + if (!prev_stmt_info) + STMT_VINFO_VEC_STMT (stmt_info) = new_stmt; + else + STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; + prev_stmt_info = vinfo_for_stmt (new_stmt); + } } } @@ -5327,9 +5331,23 @@ vectorizable_store (gimple stmt, gimple_ /* We've set op and dt above, from gimple_assign_rhs1(stmt), and first_stmt == stmt. */ if (j == 0) - vec_oprnd = vect_get_vec_def_for_operand (op, first_stmt, NULL); + { + if (slp) + { + vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL, + slp_node, -1); + vec_oprnd = vec_oprnds[0]; + } + else + vec_oprnd = vect_get_vec_def_for_operand (op, first_stmt, NULL); + } else - vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd); + { + if (slp) + vec_oprnd = vec_oprnds[j]; + else + vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd); + } for (i = 0; i < nstores; i++) { @@ -5359,7 +5377,7 @@ vectorizable_store (gimple stmt, gimple_ vect_finish_stmt_generation (stmt, incr, gsi); running_off = newoff; - if (j == 0 && i == i) + if (j == 0 && i == 0) STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = assign; else STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;