Hi all, this patch changed some RTL nodes to appropriate macros in Aarch64.c
backend using a script. Would this be okay for trunk?


Also, this is my first contribution, and I do not yet have commit rights, so if everything is okay could someone commit this for me?

Thank you and I would appreciate any feedback/advice!


gcc/ChangeLog:


2021-07-21  Alistair_Lee  alistair....@arm.com


     * rtl.h (CONST_VECTOR_P): New macro.
     (CONST_STRING_P): New macro.

* config/aarch64/aarch64.c (aarch64_get_sve_pred_bits): Use RTL code testing macros.
     (aarch64_ptrue_all_mode): Likewise.
     (aarch64_expand_mov_immediate): Likewise.
     (aarch64_const_vec_all_in_range_p): Likewise.
     (aarch64_rtx_costs): Likewise.
     (aarch64_legitimate_constant_p): Likewise.
     (aarch64_simd_valid_immediate): Likewise.
     (aarch64_simd_make_constant): Likewise.
     (aarch64_convert_mult_to_shift): Likewise.
     (aarch64_expand_sve_vec_perm): Likewise.
     (aarch64_vec_fpconst_pow_of_2): Likewise.

diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 
2753c852abdfab96ec6016075aa386eee73ad85d..d1408a6ab371223cd7d042012a32a4b0a76d6885
 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -4035,7 +4035,7 @@ aarch64_force_temporary (machine_mode mode, rtx x, rtx 
value)
 static bool
 aarch64_get_sve_pred_bits (rtx_vector_builder &builder, rtx x)
 {
-  if (GET_CODE (x) != CONST_VECTOR)
+  if (!CONST_VECTOR_P (x))
     return false;
 
   unsigned int factor = vector_element_size (GET_MODE_NUNITS (VNx16BImode),
@@ -4091,7 +4091,7 @@ opt_machine_mode
 aarch64_ptrue_all_mode (rtx x)
 {
   gcc_assert (GET_MODE (x) == VNx16BImode);
-  if (GET_CODE (x) != CONST_VECTOR
+  if (!CONST_VECTOR_P (x)
       || !CONST_VECTOR_DUPLICATE_P (x)
       || !CONST_INT_P (CONST_VECTOR_ENCODED_ELT (x, 0))
       || INTVAL (CONST_VECTOR_ENCODED_ELT (x, 0)) == 0)
@@ -5791,7 +5791,7 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
          return;
        }
 
-      if (GET_CODE (imm) == CONST_VECTOR && aarch64_sve_data_mode_p (mode))
+      if (CONST_VECTOR_P (imm) && aarch64_sve_data_mode_p (mode))
        if (rtx res = aarch64_expand_sve_const_vector (dest, imm))
          {
            if (dest != res)
@@ -10495,7 +10495,7 @@ aarch64_const_vec_all_in_range_p (rtx vec,
                                  HOST_WIDE_INT minval,
                                  HOST_WIDE_INT maxval)
 {
-  if (GET_CODE (vec) != CONST_VECTOR
+  if (!CONST_VECTOR_P (vec)
       || GET_MODE_CLASS (GET_MODE (vec)) != MODE_VECTOR_INT)
     return false;
 
@@ -12595,7 +12595,7 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer 
ATTRIBUTE_UNUSED,
        case SIGN_EXTRACT:
          /* Bit-field insertion.  Strip any redundant widening of
             the RHS to meet the width of the target.  */
-         if (GET_CODE (op1) == SUBREG)
+         if (SUBREG_P (op1))
            op1 = SUBREG_REG (op1);
          if ((GET_CODE (op1) == ZERO_EXTEND
               || GET_CODE (op1) == SIGN_EXTEND)
@@ -12868,7 +12868,7 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer 
ATTRIBUTE_UNUSED,
              But the integer MINUS logic expects the shift/extend
              operation in op1.  */
           if (! (REG_P (op0)
-                 || (GET_CODE (op0) == SUBREG && REG_P (SUBREG_REG (op0)))))
+                 || (SUBREG_P (op0) && REG_P (SUBREG_REG (op0)))))
           {
             op0 = XEXP (x, 1);
             op1 = XEXP (x, 0);
@@ -17997,7 +17997,7 @@ aarch64_legitimate_constant_p (machine_mode mode, rtx x)
 
   /* Otherwise, accept any CONST_VECTOR that, if all else fails, can at
      least be forced to memory and loaded from there.  */
-  if (GET_CODE (x) == CONST_VECTOR)
+  if (CONST_VECTOR_P (x))
     return !targetm.cannot_force_const_mem (mode, x);
 
   /* Do not allow vector struct mode constants for Advanced SIMD.
@@ -19804,7 +19804,7 @@ aarch64_simd_valid_immediate (rtx op, 
simd_immediate_info *info,
   scalar_mode elt_mode = GET_MODE_INNER (mode);
   rtx base, step;
   unsigned int n_elts;
-  if (GET_CODE (op) == CONST_VECTOR
+  if (CONST_VECTOR_P (op)
       && CONST_VECTOR_DUPLICATE_P (op))
     n_elts = CONST_VECTOR_NPATTERNS (op);
   else if ((vec_flags & VEC_SVE_DATA)
@@ -19826,7 +19826,7 @@ aarch64_simd_valid_immediate (rtx op, 
simd_immediate_info *info,
        }
       return true;
     }
-  else if (GET_CODE (op) == CONST_VECTOR
+  else if (CONST_VECTOR_P (op)
           && CONST_VECTOR_NUNITS (op).is_constant (&n_elts))
     /* N_ELTS set above.  */;
   else
@@ -20425,7 +20425,7 @@ aarch64_simd_make_constant (rtx vals)
   int n_const = 0;
   int i;
 
-  if (GET_CODE (vals) == CONST_VECTOR)
+  if (CONST_VECTOR_P (vals))
     const_vec = vals;
   else if (GET_CODE (vals) == PARALLEL)
     {
@@ -20966,7 +20966,7 @@ aarch64_sve_expand_vector_init (rtx target, rtx vals)
 static rtx
 aarch64_convert_mult_to_shift (rtx value, rtx_code &code)
 {
-  if (GET_CODE (value) != CONST_VECTOR)
+  if (!CONST_VECTOR_P (value))
     return NULL_RTX;
 
   rtx_vector_builder builder;
@@ -22130,7 +22130,7 @@ aarch64_expand_sve_vec_perm (rtx target, rtx op0, rtx 
op1, rtx sel)
   rtx sel_reg = force_reg (sel_mode, sel);
 
   /* Check if the sel only references the first values vector.  */
-  if (GET_CODE (sel) == CONST_VECTOR
+  if (CONST_VECTOR_P (sel)
       && aarch64_const_vec_all_in_range_p (sel, 0, nunits - 1))
     {
       emit_unspec2 (target, UNSPEC_TBL, op0, sel_reg);
@@ -22152,7 +22152,7 @@ aarch64_expand_sve_vec_perm (rtx target, rtx op0, rtx 
op1, rtx sel)
   rtx res0 = gen_reg_rtx (data_mode);
   rtx res1 = gen_reg_rtx (data_mode);
   rtx neg_num_elems = aarch64_simd_gen_const_vector_dup (sel_mode, -nunits);
-  if (GET_CODE (sel) != CONST_VECTOR
+  if (!CONST_VECTOR_P (sel)
       || !aarch64_const_vec_all_in_range_p (sel, 0, 2 * nunits - 1))
     {
       rtx max_sel = aarch64_simd_gen_const_vector_dup (sel_mode,
@@ -24676,7 +24676,7 @@ int
 aarch64_vec_fpconst_pow_of_2 (rtx x)
 {
   int nelts;
-  if (GET_CODE (x) != CONST_VECTOR
+  if (!CONST_VECTOR_P (x)
       || !CONST_VECTOR_NUNITS (x).is_constant (&nelts))
     return -1;
 
diff --git a/gcc/rtl.h b/gcc/rtl.h
index 
5ed0d6dd6fa6356f283f1ca9c3b029b8d22aa4f7..de253304d48001aa99f0c8e5e41c3d557b4d4843
 100644
--- a/gcc/rtl.h
+++ b/gcc/rtl.h
@@ -830,6 +830,12 @@ struct GTY(()) rtvec_def {
 #define CONST_DOUBLE_AS_FLOAT_P(X) \
   (GET_CODE (X) == CONST_DOUBLE && GET_MODE (X) != VOIDmode)
 
+/* Predicate yielding nonzero iff X is an rtx for a constant vector.  */
+#define CONST_VECTOR_P(X) (GET_CODE (X) == CONST_VECTOR)
+
+/* Predicate yielding nonzero iff X is an rtx for a constant string */
+#define CONST_STRING_P(X) (GET_CODE (X) == CONST_STRING)
+
 /* Predicate yielding nonzero iff X is a label insn.  */
 #define LABEL_P(X) (GET_CODE (X) == CODE_LABEL)
 

Reply via email to