Hi Richards,
This patch adds support for the V8QI->V8HI case from widening vect patterns as
discussed to target PR98772.
Bootstrapped and regression tested on aarch64.
[aarch64][vect] Support V8QI->V8HI WIDEN_ patterns
In the case where 8 out of every 16 elements are widened using a
widening pattern and the next 8 are skipped the patterns are not
recognized. This is because they are normally used in a pair, such as
VEC_WIDEN_MINUS_HI/LO, to achieve a v16qi->v16hi conversion for example.
This patch adds support for V8HI->V8QI patterns.
gcc/ChangeLog:
PR tree-optimisation/98772
* optabs-tree.c (supportable_convert_operation): Add case for V8QI->V8HI
* tree-vect-stmts.c (vect_create_vectorized_promotion_stmts): New
function to generate promotion stmts for V8QI->V8HI
(vectorizable_conversion): Add case for V8QI->V8HI
gcc/testsuite/ChangeLog:
PR tree-optimisation/98772
* gcc.target/aarch64/pr98772.c: New test.
diff --git a/gcc/optabs-tree.c b/gcc/optabs-tree.c
index c94073e3ed98f8c4cab65891f65dedebdb1ec274..b91ce3af6f0d4b3a62110bdb38f68ecc53765cad 100644
--- a/gcc/optabs-tree.c
+++ b/gcc/optabs-tree.c
@@ -308,6 +308,40 @@ supportable_convert_operation (enum tree_code code,
if (!VECTOR_MODE_P (m1) || !VECTOR_MODE_P (m2))
return false;
+ /* The case where a widening operation is not making use of the full width of
+ of the input vector, but using the full width of the output vector.
+ Return the non-wided code, which will be used after the inputs are
+ converted to the wide type. */
+ if ((code == WIDEN_MINUS_EXPR
+ || code == WIDEN_PLUS_EXPR
+ || code == WIDEN_MULT_EXPR
+ || code == WIDEN_LSHIFT_EXPR)
+ && known_eq (TYPE_VECTOR_SUBPARTS (vectype_in),
+ TYPE_VECTOR_SUBPARTS (vectype_out)))
+ {
+ switch (code)
+ {
+ case WIDEN_LSHIFT_EXPR:
+ *code1 = LSHIFT_EXPR;
+ return true;
+ break;
+ case WIDEN_MINUS_EXPR:
+ *code1 = MINUS_EXPR;
+ return true;
+ break;
+ case WIDEN_PLUS_EXPR:
+ *code1 = PLUS_EXPR;
+ return true;
+ break;
+ case WIDEN_MULT_EXPR:
+ *code1 = MULT_EXPR;
+ return true;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+
/* First check if we can done conversion directly. */
if ((code == FIX_TRUNC_EXPR
&& can_fix_p (m1,m2,TYPE_UNSIGNED (vectype_out), &truncp)
diff --git a/gcc/testsuite/gcc.target/aarch64/pr98772.c b/gcc/testsuite/gcc.target/aarch64/pr98772.c
new file mode 100644
index 0000000000000000000000000000000000000000..35568a9f9d60c44aa01a6afc5f7e6a0935009aaf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/pr98772.c
@@ -0,0 +1,155 @@
+/* { dg-do run } */
+/* { dg-options "-O3 -save-temps" } */
+#include <stdint.h>
+#include <string.h>
+
+#define DSIZE 16
+#define PIXSIZE 64
+
+extern void
+wplus (uint16_t *d, uint8_t *restrict pix1, uint8_t *restrict pix2 )
+{
+ for( int y = 0; y < 4; y++ )
+ {
+ for( int x = 0; x < 4; x++ )
+ d[x + y*4] = pix1[x] + pix2[x];
+ pix1 += 16;
+ pix2 += 16;
+ }
+}
+extern void __attribute__((optimize (0)))
+wplus_no_opt (uint16_t *d, uint8_t *restrict pix1, uint8_t *restrict pix2 )
+{
+ for( int y = 0; y < 4; y++ )
+ {
+ for( int x = 0; x < 4; x++ )
+ d[x + y*4] = pix1[x] + pix2[x];
+ pix1 += 16;
+ pix2 += 16;
+ }
+}
+
+extern void
+wminus (uint16_t *d, uint8_t *restrict pix1, uint8_t *restrict pix2 )
+{
+ for( int y = 0; y < 4; y++ )
+ {
+ for( int x = 0; x < 4; x++ )
+ d[x + y*4] = pix1[x] - pix2[x];
+ pix1 += 16;
+ pix2 += 16;
+ }
+}
+extern void __attribute__((optimize (0)))
+wminus_no_opt (uint16_t *d, uint8_t *restrict pix1, uint8_t *restrict pix2 )
+{
+ for( int y = 0; y < 4; y++ )
+ {
+ for( int x = 0; x < 4; x++ )
+ d[x + y*4] = pix1[x] - pix2[x];
+ pix1 += 16;
+ pix2 += 16;
+ }
+}
+
+extern void
+wmult (uint16_t *d, uint8_t *restrict pix1, uint8_t *restrict pix2 )
+{
+ for( int y = 0; y < 4; y++ )
+ {
+ for( int x = 0; x < 4; x++ )
+ d[x + y*4] = pix1[x] * pix2[x];
+ pix1 += 16;
+ pix2 += 16;
+ }
+}
+extern void __attribute__((optimize (0)))
+wmult_no_opt (uint16_t *d, uint8_t *restrict pix1, uint8_t *restrict pix2 )
+{
+ for( int y = 0; y < 4; y++ )
+ {
+ for( int x = 0; x < 4; x++ )
+ d[x + y*4] = pix1[x] * pix2[x];
+ pix1 += 16;
+ pix2 += 16;
+ }
+}
+
+extern void
+wlshift (uint16_t *d, uint8_t *restrict pix1)
+
+{
+ for( int y = 0; y < 4; y++ )
+ {
+ for( int x = 0; x < 4; x++ )
+ d[x + y*4] = pix1[x] << 8;
+ pix1 += 16;
+ }
+}
+extern void __attribute__((optimize (0)))
+wlshift_no_opt (uint16_t *d, uint8_t *restrict pix1)
+
+{
+ for( int y = 0; y < 4; y++ )
+ {
+ for( int x = 0; x < 4; x++ )
+ d[x + y*4] = pix1[x] << 8;
+ pix1 += 16;
+ }
+}
+
+void __attribute__((optimize (0)))
+init_arrays(uint16_t *d_a, uint16_t *d_b, uint8_t *pix1, uint8_t *pix2)
+{
+ for(int i = 0; i < DSIZE; i++)
+ {
+ d_a[i] = (1074 * i)%17;
+ d_b[i] = (1074 * i)%17;
+ }
+ for(int i = 0; i < PIXSIZE; i++)
+ {
+ pix1[i] = (1024 * i)%17;
+ pix2[i] = (1024 * i)%17;
+ }
+}
+
+/* Don't optimize main so we don't get confused over where the vector
+ instructions are generated. */
+__attribute__((optimize (0)))
+int main()
+{
+ uint16_t d_a[DSIZE];
+ uint16_t d_b[DSIZE];
+ uint8_t pix1[PIXSIZE];
+ uint8_t pix2[PIXSIZE];
+
+ init_arrays (d_a, d_b, pix1, pix2);
+ wplus(d_a, pix1, pix2);
+ wplus_no_opt(d_b, pix1, pix2);
+ if (memcmp(d_a,d_b, DSIZE) != 0)
+ return 1;
+
+ init_arrays (d_a, d_b, pix1, pix2);
+ wminus(d_a, pix1, pix2);
+ wminus_no_opt(d_b, pix1, pix2);
+ if (memcmp(d_a,d_b, DSIZE) != 0)
+ return 2;
+
+ init_arrays (d_a, d_b, pix1, pix2);
+ wmult(d_a, pix1, pix2);
+ wmult_no_opt(d_b, pix1, pix2);
+ if (memcmp(d_a,d_b, DSIZE) != 0)
+ return 3;
+
+ init_arrays (d_a, d_b, pix1, pix2);
+ wlshift(d_a, pix1);
+ wlshift_no_opt(d_b, pix1);
+ if (memcmp(d_a,d_b, DSIZE) != 0)
+ return 4;
+
+}
+
+/* { dg-final { scan-assembler-times "uaddl\\tv" 2 } } */
+/* { dg-final { scan-assembler-times "usubl\\tv" 2 } } */
+/* { dg-final { scan-assembler-times "umull\\tv" 2 } } */
+/* { dg-final { scan-assembler-times "shl\\tv" 2 } } */
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index f180ced312443ba1e698932d5e8362208690b3fc..b34b00f67ea67943dee7023ab9bfd19c1be5ccbe 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -4545,6 +4545,72 @@ vect_create_vectorized_promotion_stmts (vec_info *vinfo,
*vec_oprnds0 = vec_tmp;
}
+/* Create vectorized promotion stmts for widening stmts using only half the
+ potential vector size for input. */
+static void
+vect_create_vectorized_promotion_stmts (vec_info *vinfo,
+ vec<tree> *vec_oprnds0,
+ vec<tree> *vec_oprnds1,
+ stmt_vec_info stmt_info, tree vec_dest,
+ gimple_stmt_iterator *gsi,
+ enum tree_code code1,
+ int op_type)
+{
+ int i;
+ tree vop0, vop1, new_tmp;
+ gimple *new_stmt1;
+ gimple *new_stmt2;
+ gimple *new_stmt3;
+ vec<tree> vec_tmp = vNULL;
+
+ vec_tmp.create (vec_oprnds0->length () * 2);
+ FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
+ {
+ tree new_tmp1, new_tmp2, new_tmp3, out_type;
+
+ gcc_assert (op_type == binary_op);
+ vop1 = (*vec_oprnds1)[i];
+
+ /* Widen the first vector input. */
+ out_type = TREE_TYPE (vec_dest);
+ new_tmp1 = make_ssa_name (out_type);
+ new_stmt1 = gimple_build_assign (new_tmp1, NOP_EXPR, vop0);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt1, gsi);
+ if (VECTOR_TYPE_P (TREE_TYPE (vop1)))
+ {
+ /* Widen the second vector input. */
+ new_tmp2 = make_ssa_name (out_type);
+ new_stmt2 = gimple_build_assign (new_tmp2, NOP_EXPR, vop1);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt2, gsi);
+ /* Perform the operation. With both vector inputs widened. */
+ new_stmt3 = gimple_build_assign (vec_dest, code1, new_tmp1, new_tmp2);
+ }
+ else
+ {
+ /* Perform the operation. With the single vector input widened. */
+ new_stmt3 = gimple_build_assign (vec_dest, code1, new_tmp1, vop1);
+ }
+
+ new_tmp3 = make_ssa_name (vec_dest, new_stmt3);
+ gimple_assign_set_lhs (new_stmt3, new_tmp3);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt3, gsi);
+ if (is_gimple_call (new_stmt3))
+ {
+ new_tmp = gimple_call_lhs (new_stmt3);
+ }
+ else
+ {
+ new_tmp = gimple_assign_lhs (new_stmt3);
+ }
+
+ /* Store the results for the next step. */
+ vec_tmp.quick_push (new_tmp);
+ }
+
+ vec_oprnds0->release ();
+ *vec_oprnds0 = vec_tmp;
+}
+
/* Check if STMT_INFO performs a conversion operation that can be vectorized.
If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
@@ -4697,7 +4763,13 @@ vectorizable_conversion (vec_info *vinfo,
nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
if (known_eq (nunits_out, nunits_in))
- modifier = NONE;
+ if (code == WIDEN_MINUS_EXPR
+ || code == WIDEN_PLUS_EXPR
+ || code == WIDEN_LSHIFT_EXPR
+ || code == WIDEN_MULT_EXPR)
+ modifier = WIDEN;
+ else
+ modifier = NONE;
else if (multiple_p (nunits_out, nunits_in))
modifier = NARROW;
else
@@ -4743,9 +4815,21 @@ vectorizable_conversion (vec_info *vinfo,
return false;
case WIDEN:
- if (supportable_widening_operation (vinfo, code, stmt_info, vectype_out,
- vectype_in, &code1, &code2,
- &multi_step_cvt, &interm_types))
+ if (known_eq (nunits_out, nunits_in)
+ && (code == WIDEN_MINUS_EXPR
+ || code == WIDEN_LSHIFT_EXPR
+ || code == WIDEN_PLUS_EXPR
+ || code == WIDEN_MULT_EXPR)
+ && supportable_convert_operation (code, vectype_out, vectype_in,
+ &code1))
+ {
+ gcc_assert (!(multi_step_cvt && op_type == binary_op));
+ break;
+ }
+ else if (supportable_widening_operation (vinfo, code, stmt_info,
+ vectype_out, vectype_in, &code1,
+ &code2, &multi_step_cvt,
+ &interm_types))
{
/* Binary widening operation can only be supported directly by the
architecture. */
@@ -4981,10 +5065,20 @@ vectorizable_conversion (vec_info *vinfo,
c1 = codecvt1;
c2 = codecvt2;
}
- vect_create_vectorized_promotion_stmts (vinfo, &vec_oprnds0,
- &vec_oprnds1, stmt_info,
- this_dest, gsi,
- c1, c2, op_type);
+ if ((code == WIDEN_MINUS_EXPR
+ || code == WIDEN_PLUS_EXPR
+ || code == WIDEN_LSHIFT_EXPR
+ || code == WIDEN_MULT_EXPR)
+ && known_eq (nunits_in, nunits_out))
+ vect_create_vectorized_promotion_stmts (vinfo, &vec_oprnds0,
+ &vec_oprnds1, stmt_info,
+ this_dest, gsi,
+ c1, op_type);
+ else
+ vect_create_vectorized_promotion_stmts (vinfo, &vec_oprnds0,
+ &vec_oprnds1, stmt_info,
+ this_dest, gsi,
+ c1, c2, op_type);
}
FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)