This was responsible for a bunch of SVE FAILs with --param vect-force-slp=1
Bootstrapped and tested on x86_64-unknown-linux-gnu. * tree-vect-slp.cc (arg1_arg3_map): New. (arg1_arg3_arg4_map): Likewise. (vect_get_operand_map): Handle IFN_SCATTER_STORE, IFN_MASK_SCATTER_STORE and IFN_MASK_LEN_SCATTER_STORE. (vect_build_slp_tree_1): Likewise. * tree-vect-stmts.cc (vectorizable_store): For SLP masked gather/scatter record the mask with proper number of copies. * tree-vect-loop.cc (vectorizable_recurr): Avoid costing the initial value construction in the prologue twice with SLP. --- gcc/tree-vect-loop.cc | 9 ++++++--- gcc/tree-vect-slp.cc | 17 ++++++++++++++++- gcc/tree-vect-stmts.cc | 6 ++++-- 3 files changed, 26 insertions(+), 6 deletions(-) diff --git a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc index 3f2095da449..6cfce5aa7e1 100644 --- a/gcc/tree-vect-loop.cc +++ b/gcc/tree-vect-loop.cc @@ -9623,9 +9623,12 @@ vectorizable_recurr (loop_vec_info loop_vinfo, stmt_vec_info stmt_info, return false; /* The recurrence costs the initialization vector and one permute - for each copy. */ - unsigned prologue_cost = record_stmt_cost (cost_vec, 1, scalar_to_vec, - stmt_info, 0, vect_prologue); + for each copy. With SLP the prologue value is explicitly + represented and costed separately. */ + unsigned prologue_cost = 0; + if (!slp_node) + prologue_cost = record_stmt_cost (cost_vec, 1, scalar_to_vec, + stmt_info, 0, vect_prologue); unsigned inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt, stmt_info, 0, vect_body); if (dump_enabled_p ()) diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc index d78ce036345..7ec2f278067 100644 --- a/gcc/tree-vect-slp.cc +++ b/gcc/tree-vect-slp.cc @@ -512,7 +512,9 @@ static const int no_arg_map[] = { 0 }; static const int arg0_map[] = { 1, 0 }; static const int arg1_map[] = { 1, 1 }; static const int arg2_map[] = { 1, 2 }; +static const int arg1_arg3_map[] = { 2, 1, 3 }; static const int arg1_arg4_map[] = { 2, 1, 4 }; +static const int arg1_arg3_arg4_map[] = { 3, 1, 3, 4 }; static const int arg3_arg2_map[] = { 2, 3, 2 }; static const int op1_op0_map[] = { 2, 1, 0 }; static const int off_map[] = { 1, -3 }; @@ -573,6 +575,13 @@ vect_get_operand_map (const gimple *stmt, bool gather_scatter_p = false, case IFN_MASK_LEN_GATHER_LOAD: return arg1_arg4_map; + case IFN_SCATTER_STORE: + return arg1_arg3_map; + + case IFN_MASK_SCATTER_STORE: + case IFN_MASK_LEN_SCATTER_STORE: + return arg1_arg3_arg4_map; + case IFN_MASK_STORE: return gather_scatter_p ? off_arg3_arg2_map : arg3_arg2_map; @@ -1187,7 +1196,10 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap, if (cfn == CFN_MASK_LOAD || cfn == CFN_GATHER_LOAD || cfn == CFN_MASK_GATHER_LOAD - || cfn == CFN_MASK_LEN_GATHER_LOAD) + || cfn == CFN_MASK_LEN_GATHER_LOAD + || cfn == CFN_SCATTER_STORE + || cfn == CFN_MASK_SCATTER_STORE + || cfn == CFN_MASK_LEN_SCATTER_STORE) ldst_p = true; else if (cfn == CFN_MASK_STORE) { @@ -1473,6 +1485,9 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap, && rhs_code != CFN_GATHER_LOAD && rhs_code != CFN_MASK_GATHER_LOAD && rhs_code != CFN_MASK_LEN_GATHER_LOAD + && rhs_code != CFN_SCATTER_STORE + && rhs_code != CFN_MASK_SCATTER_STORE + && rhs_code != CFN_MASK_LEN_SCATTER_STORE && !STMT_VINFO_GATHER_SCATTER_P (stmt_info) /* Not grouped loads are handled as externals for BB vectorization. For loop vectorization we can handle diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc index b281beb8d65..fa44e19f163 100644 --- a/gcc/tree-vect-stmts.cc +++ b/gcc/tree-vect-stmts.cc @@ -9162,7 +9162,8 @@ vectorizable_store (vec_info *vinfo, { if (loop_masks) final_mask = vect_get_loop_mask (loop_vinfo, gsi, - loop_masks, ncopies, + loop_masks, + ncopies * vec_num, vectype, j); if (vec_mask) final_mask = prepare_vec_mask (loop_vinfo, mask_vectype, @@ -9188,7 +9189,8 @@ vectorizable_store (vec_info *vinfo, { if (loop_lens) final_len = vect_get_loop_len (loop_vinfo, gsi, - loop_lens, ncopies, + loop_lens, + ncopies * vec_num, vectype, j, 1); else final_len = size_int (TYPE_VECTOR_SUBPARTS (vectype)); -- 2.43.0