While looking at PR118956, I noticed that we had some dead code
left over after the removal of the vcond patterns.  The can_invert_p
path is no longer used.

Tested on aarch64-linux-gnu & pushed.

Richard


gcc/
        * config/aarch64/aarch64-protos.h (aarch64_expand_sve_vec_cmp_float):
        Remove can_invert_p argument and change return type to void.
        * config/aarch64/aarch64.cc (aarch64_expand_sve_vec_cmp_float):
        Likewise.
        * config/aarch64/aarch64-sve.md (vec_cmp<mode><vpred>): Update call
        accordingly.
---
 gcc/config/aarch64/aarch64-protos.h |  2 +-
 gcc/config/aarch64/aarch64-sve.md   |  2 +-
 gcc/config/aarch64/aarch64.cc       | 36 ++++++++---------------------
 3 files changed, 11 insertions(+), 29 deletions(-)

diff --git a/gcc/config/aarch64/aarch64-protos.h 
b/gcc/config/aarch64/aarch64-protos.h
index 4235f4a0ca5..0291a8aa761 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -1098,7 +1098,7 @@ void aarch64_finish_ldpstp_peephole (rtx *, bool,
                                     enum rtx_code = (enum rtx_code)0);
 
 void aarch64_expand_sve_vec_cmp_int (rtx, rtx_code, rtx, rtx);
-bool aarch64_expand_sve_vec_cmp_float (rtx, rtx_code, rtx, rtx, bool);
+void aarch64_expand_sve_vec_cmp_float (rtx, rtx_code, rtx, rtx);
 
 bool aarch64_prepare_sve_int_fma (rtx *, rtx_code);
 bool aarch64_prepare_sve_cond_int_fma (rtx *, rtx_code);
diff --git a/gcc/config/aarch64/aarch64-sve.md 
b/gcc/config/aarch64/aarch64-sve.md
index e975286a019..a93bc463a90 100644
--- a/gcc/config/aarch64/aarch64-sve.md
+++ b/gcc/config/aarch64/aarch64-sve.md
@@ -8495,7 +8495,7 @@ (define_expand "vec_cmp<mode><vpred>"
   "TARGET_SVE"
   {
     aarch64_expand_sve_vec_cmp_float (operands[0], GET_CODE (operands[1]),
-                                     operands[2], operands[3], false);
+                                     operands[2], operands[3]);
     DONE;
   }
 )
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index f5f23f6ff4b..fe76730b0a7 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -26877,14 +26877,10 @@ aarch64_emit_sve_invert_fp_cond (rtx target, rtx_code 
code, rtx pred,
 
 /* Expand an SVE floating-point comparison using the SVE equivalent of:
 
-     (set TARGET (CODE OP0 OP1))
-
-   If CAN_INVERT_P is true, the caller can also handle inverted results;
-   return true if the result is in fact inverted.  */
+     (set TARGET (CODE OP0 OP1)).  */
 
-bool
-aarch64_expand_sve_vec_cmp_float (rtx target, rtx_code code,
-                                 rtx op0, rtx op1, bool can_invert_p)
+void
+aarch64_expand_sve_vec_cmp_float (rtx target, rtx_code code, rtx op0, rtx op1)
 {
   machine_mode pred_mode = GET_MODE (target);
   machine_mode data_mode = GET_MODE (op0);
@@ -26902,16 +26898,14 @@ aarch64_expand_sve_vec_cmp_float (rtx target, 
rtx_code code,
     case GE:
     case EQ:
     case NE:
-      {
-       /* There is native support for the comparison.  */
-       aarch64_emit_sve_fp_cond (target, code, ptrue, true, op0, op1);
-       return false;
-      }
+      /* There is native support for the comparison.  */
+      aarch64_emit_sve_fp_cond (target, code, ptrue, true, op0, op1);
+      return;
 
     case LTGT:
       /* This is a trapping operation (LT or GT).  */
       aarch64_emit_sve_or_fp_conds (target, LT, GT, ptrue, true, op0, op1);
-      return false;
+      return;
 
     case UNEQ:
       if (!flag_trapping_math)
@@ -26920,7 +26914,7 @@ aarch64_expand_sve_vec_cmp_float (rtx target, rtx_code 
code,
          op1 = force_reg (data_mode, op1);
          aarch64_emit_sve_or_fp_conds (target, UNORDERED, EQ,
                                        ptrue, true, op0, op1);
-         return false;
+         return;
        }
       /* fall through */
     case UNLT:
@@ -26941,15 +26935,9 @@ aarch64_expand_sve_vec_cmp_float (rtx target, rtx_code 
code,
            code = NE;
          else
            code = reverse_condition_maybe_unordered (code);
-         if (can_invert_p)
-           {
-             aarch64_emit_sve_fp_cond (target, code,
-                                       ordered, false, op0, op1);
-             return true;
-           }
          aarch64_emit_sve_invert_fp_cond (target, code,
                                           ordered, false, op0, op1);
-         return false;
+         return;
        }
       break;
 
@@ -26964,13 +26952,7 @@ aarch64_expand_sve_vec_cmp_float (rtx target, rtx_code 
code,
 
   /* There is native support for the inverse comparison.  */
   code = reverse_condition_maybe_unordered (code);
-  if (can_invert_p)
-    {
-      aarch64_emit_sve_fp_cond (target, code, ptrue, true, op0, op1);
-      return true;
-    }
   aarch64_emit_sve_invert_fp_cond (target, code, ptrue, true, op0, op1);
-  return false;
 }
 
 /* Return true if:
-- 
2.25.1

Reply via email to