https://gcc.gnu.org/bugzilla/show_bug.cgi?id=115659

--- Comment #4 from Kewen Lin <linkw at gcc dot gnu.org> ---
(In reply to Richard Biener from comment #3)
>    c = x CMP y 
>    r = c ? -1 : z  =>  r = c ? c : z
>    r = c ?  z : 0  =>  r = c ? z : c
> 
> this is probably best left for ISEL.  I agree the transforms eliminating
> the COND are useful in general and suitable also for match.pd.  Watch
> out for vectorizer patterns though which creates scalar COND_EXPRs for
> bool mask <-> bool value transforms.

Thanks for the suggestion! If going with ISEL, the patch seems to be like:

-----
diff --git a/gcc/gimple-isel.cc b/gcc/gimple-isel.cc
index 54c1801038b..abb18932228 100644
--- a/gcc/gimple-isel.cc
+++ b/gcc/gimple-isel.cc
@@ -240,16 +240,34 @@ gimple_expand_vec_cond_expr (struct function *fun,
gimple_stmt_iterator *gsi,
             can_compute_op0 = expand_vec_cmp_expr_p (op0a_type, op0_type,
                                                      tcode);

-          /* Try to fold x CMP y ? -1 : 0 to x CMP y.  */
          if (can_compute_op0
-             && integer_minus_onep (op1)
-             && integer_zerop (op2)
              && TYPE_MODE (TREE_TYPE (lhs)) == TYPE_MODE (TREE_TYPE (op0)))
            {
-             tree conv_op = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), op0);
-             gassign *new_stmt = gimple_build_assign (lhs, conv_op);
-             gsi_replace (gsi, new_stmt, true);
-             return new_stmt;
+             bool op1_minus_onep = integer_minus_onep (op1);
+             bool op2_zerop = integer_zerop (op2);
+             /* Try to fold x CMP y ? -1 : 0 to x CMP y.  */
+             if (op1_minus_onep && op2_zerop)
+               {
+                 tree conv_op
+                   = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), op0);
+                 gassign *new_stmt = gimple_build_assign (lhs, conv_op);
+                 gsi_replace (gsi, new_stmt, true);
+                 return new_stmt;
+               }
+             /* Try to fold x CMP y ? -1 : z to x CMP y ? x CMP y : z,
+                or x CMP y ? z : 0 to x CMP y ? z : x CMP y.  */
+             if (op1_minus_onep || op2_zerop)
+               {
+                 tree conv_op
+                   = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), op0);
+                 tree new_op = make_ssa_name (TREE_TYPE (lhs));
+                 gassign *new_stmt = gimple_build_assign (new_op, conv_op);
+                 if (op1_minus_onep)
+                   op1 = new_op;
+                 else
+                   op2 = new_op;
+                 gsi_insert_seq_before (gsi, new_stmt, GSI_SAME_STMT);
+               }
            }

          /* When the compare has EH we do not want to forward it when

-----

But this doesn't help this exposed failure, as it belongs to the latter case.
If further going with some hacks for inversion:

-----
diff --git a/gcc/gimple-isel.cc b/gcc/gimple-isel.cc
index abb18932228..afc2c9f1386 100644
--- a/gcc/gimple-isel.cc
+++ b/gcc/gimple-isel.cc
@@ -240,6 +240,15 @@ gimple_expand_vec_cond_expr (struct function *fun,
gimple_stmt_iterator *gsi,
            can_compute_op0 = expand_vec_cmp_expr_p (op0a_type, op0_type,
                                                     tcode);

+         auto need_inverted_p = [](tree_code c, machine_mode m) {
+           if (GET_MODE_CLASS (m) == MODE_VECTOR_INT)
+             return (c == NE_EXPR || c == GE_EXPR || c == LE_EXPR);
+           gcc_assert (GET_MODE_CLASS (m) == MODE_VECTOR_FLOAT);
+           return (c == NE_EXPR || c == UNLE_EXPR || c == UNLT_EXPR
+                   || c == UNGE_EXPR || c == UNGT_EXPR || c == UNORDERED_EXPR
+                   || c == UNEQ_EXPR);
+         };
+
          if (can_compute_op0
              && TYPE_MODE (TREE_TYPE (lhs)) == TYPE_MODE (TREE_TYPE (op0)))
            {
@@ -254,6 +263,23 @@ gimple_expand_vec_cond_expr (struct function *fun,
gimple_stmt_iterator *gsi,
                  gsi_replace (gsi, new_stmt, true);
                  return new_stmt;
                }
+             bool inverted_p = need_inverted_p (tcode, TYPE_MODE (op0a_type));
+             bool op1_zerop = integer_zerop (op1);
+             bool op2_minus_onep = integer_minus_onep (op2);
+             /* Try to fold x CMP y ? 0 : -1 to ~(x CMP y), it can reuse
+                the comparison before the inversion.  */
+             if (inverted_p && op1_zerop && op2_minus_onep)
+               {
+                 tree inv_op0 = make_ssa_name (TREE_TYPE (op0));
+                 gassign *inv_stmt
+                   = gimple_build_assign (inv_op0, BIT_NOT_EXPR, op0);
+                 gsi_insert_seq_before (gsi, inv_stmt, GSI_SAME_STMT);
+                 tree conv_op
+                   = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), inv_op0);
+                 gassign *new_stmt = gimple_build_assign (lhs, conv_op);
+                 gsi_replace (gsi, new_stmt, true);
+                 return new_stmt;
+               }
              /* Try to fold x CMP y ? -1 : z to x CMP y ? x CMP y : z,
                 or x CMP y ? z : 0 to x CMP y ? z : x CMP y.  */
              if (op1_minus_onep || op2_zerop)
@@ -268,6 +294,25 @@ gimple_expand_vec_cond_expr (struct function *fun,
gimple_stmt_iterator *gsi,
                    op2 = new_op;
                  gsi_insert_seq_before (gsi, new_stmt, GSI_SAME_STMT);
                }
+             /* Try to fold x CMP y ? z : -1 to x CMP y ? z : ~(x CMP y),
+                or x CMP y ? 0 : z to x CMP y ? ~(x CMP y) : z, expect it
+                can reuse the comparison before the inversion.  */
+             else if (inverted_p && (op1_zerop || op2_minus_onep))
+               {
+                 tree inv_op0 = make_ssa_name (TREE_TYPE (op0));
+                 gassign *inv_stmt
+                   = gimple_build_assign (inv_op0, BIT_NOT_EXPR, op0);
+                 gsi_insert_seq_before (gsi, inv_stmt, GSI_SAME_STMT);
+                 tree conv_op
+                   = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), inv_op0);
+                 tree new_op = make_ssa_name (TREE_TYPE (lhs));
+                 gassign *new_stmt = gimple_build_assign (new_op, conv_op);
+                 gsi_insert_seq_before (gsi, new_stmt, GSI_SAME_STMT);
+                 if (integer_minus_onep (op2))
+                   op2 = new_op;
+                 else
+                   op1 = new_op;
+               }
            }

          /* When the compare has EH we do not want to forward it when

----

As the ML thread discussion, I think we will expand the target unsupported
vector comparison early, then we can see this inversion and able to
canonicalize the latter one to the former one?

Reply via email to