(This patch is the fourth of five, where the first 4 do some clean-up and
the last fixes a bug with scalar MOVI.  The bug fix without the clean-up
was particularly ugly!)


I think the changelog says it all here.  Nothing major, just tidying up.


OK for trunk?


Cheers,
Ian


2013-06-03  Ian Bolton  <ian.bol...@arm.com>

        * config/aarch64/aarch64.c (simd_immediate_info): Remove
        element_char member.
        (sizetochar): Return signed char.
        (aarch64_simd_valid_immediate): Remove elchar and other
        unnecessary variables.
        (aarch64_output_simd_mov_immediate): Take rtx instead of &rtx.
        Calculate element_char as required.
        * config/aarch64/aarch64-protos.h: Update and move prototype
        for aarch64_output_simd_mov_immediate.
        * config/aarch64/aarch64-simd.md (*aarch64_simd_mov<mode>):
        Update arguments.
diff --git a/gcc/config/aarch64/aarch64-protos.h 
b/gcc/config/aarch64/aarch64-protos.h
index 083ce91..d21a2f5 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -148,6 +148,7 @@ bool aarch64_legitimate_pic_operand_p (rtx);
 bool aarch64_move_imm (HOST_WIDE_INT, enum machine_mode);
 bool aarch64_mov_operand_p (rtx, enum aarch64_symbol_context,
                            enum machine_mode);
+char *aarch64_output_simd_mov_immediate (rtx, enum machine_mode, unsigned);
 bool aarch64_pad_arg_upward (enum machine_mode, const_tree);
 bool aarch64_pad_reg_upward (enum machine_mode, const_tree, bool);
 bool aarch64_regno_ok_for_base_p (int, bool);
@@ -258,6 +259,4 @@ extern void aarch64_split_combinev16qi (rtx operands[3]);
 extern void aarch64_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel);
 extern bool
 aarch64_expand_vec_perm_const (rtx target, rtx op0, rtx op1, rtx sel);
-
-char* aarch64_output_simd_mov_immediate (rtx *, enum machine_mode, unsigned);
 #endif /* GCC_AARCH64_PROTOS_H */
diff --git a/gcc/config/aarch64/aarch64-simd.md 
b/gcc/config/aarch64/aarch64-simd.md
index 04fbdbd..e5990d4 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -409,7 +409,7 @@
      case 4: return "ins\t%0.d[0], %1";
      case 5: return "mov\t%0, %1";
      case 6:
-       return aarch64_output_simd_mov_immediate (&operands[1],
+       return aarch64_output_simd_mov_immediate (operands[1],
                                                  <MODE>mode, 64);
      default: gcc_unreachable ();
      }
@@ -440,7 +440,7 @@
     case 5:
        return "#";
     case 6:
-       return aarch64_output_simd_mov_immediate (&operands[1], <MODE>mode, 
128);
+       return aarch64_output_simd_mov_immediate (operands[1], <MODE>mode, 128);
     default:
        gcc_unreachable ();
     }
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index d83e645..001f9c5 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -91,7 +91,6 @@ struct simd_immediate_info {
   rtx value;
   int shift;
   int element_width;
-  unsigned char element_char;
   bool mvn;
 };
 
@@ -6102,7 +6101,7 @@ aarch64_mangle_type (const_tree type)
 }
 
 /* Return the equivalent letter for size.  */
-static unsigned char
+static char
 sizetochar (int size)
 {
   switch (size)
@@ -6163,7 +6162,6 @@ aarch64_simd_valid_immediate (rtx op, enum machine_mode 
mode, bool inverse,
     {                                                  \
       immtype = (CLASS);                               \
       elsize = (ELSIZE);                               \
-      elchar = sizetochar (elsize);                    \
       eshift = (SHIFT);                                        \
       emvn = (NEG);                                    \
       break;                                           \
@@ -6172,25 +6170,20 @@ aarch64_simd_valid_immediate (rtx op, enum machine_mode 
mode, bool inverse,
   unsigned int i, elsize = 0, idx = 0, n_elts = CONST_VECTOR_NUNITS (op);
   unsigned int innersize = GET_MODE_SIZE (GET_MODE_INNER (mode));
   unsigned char bytes[16];
-  unsigned char elchar = 0;
   int immtype = -1, matches;
   unsigned int invmask = inverse ? 0xff : 0;
   int eshift, emvn;
 
   if (GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
     {
-      bool simd_imm_zero = aarch64_simd_imm_zero_p (op, mode);
-      int elem_width = GET_MODE_BITSIZE (GET_MODE (CONST_VECTOR_ELT (op, 0)));
-
-      if (!(simd_imm_zero
-           || aarch64_vect_float_const_representable_p (op)))
+      if (! (aarch64_simd_imm_zero_p (op, mode)
+            || aarch64_vect_float_const_representable_p (op)))
        return false;
 
       if (info)
        {
          info->value = CONST_VECTOR_ELT (op, 0);
-         info->element_width = elem_width;
-         info->element_char = sizetochar (elem_width);
+         info->element_width = GET_MODE_BITSIZE (GET_MODE (info->value));
          info->mvn = false;
          info->shift = 0;
        }
@@ -6298,7 +6291,6 @@ aarch64_simd_valid_immediate (rtx op, enum machine_mode 
mode, bool inverse,
   if (info)
     {
       info->element_width = elsize;
-      info->element_char = elchar;
       info->mvn = emvn != 0;
       info->shift = eshift;
 
@@ -7228,7 +7220,7 @@ aarch64_float_const_representable_p (rtx x)
 }
 
 char*
-aarch64_output_simd_mov_immediate (rtx *const_vector,
+aarch64_output_simd_mov_immediate (rtx const_vector,
                                   enum machine_mode mode,
                                   unsigned width)
 {
@@ -7236,16 +7228,17 @@ aarch64_output_simd_mov_immediate (rtx *const_vector,
   static char templ[40];
   const char *mnemonic;
   unsigned int lane_count = 0;
+  char element_char;
 
   struct simd_immediate_info info;
 
   /* This will return true to show const_vector is legal for use as either
      a AdvSIMD MOVI instruction (or, implicitly, MVNI) immediate.  It will
      also update INFO to show how the immediate should be generated.  */
-  is_valid = aarch64_simd_valid_immediate (*const_vector, mode, false, &info);
+  is_valid = aarch64_simd_valid_immediate (const_vector, mode, false, &info);
   gcc_assert (is_valid);
 
-  gcc_assert (info.element_width != 0);
+  element_char = sizetochar (info.element_width);
   lane_count = width / info.element_width;
 
   mode = GET_MODE_INNER (mode);
@@ -7267,7 +7260,7 @@ aarch64_output_simd_mov_immediate (rtx *const_vector,
            snprintf (templ, sizeof (templ), "fmov\t%%d0, %s", float_buf);
          else
            snprintf (templ, sizeof (templ), "fmov\t%%0.%d%c, %s",
-                     lane_count, info.element_char, float_buf);
+                     lane_count, element_char, float_buf);
          return templ;
        }
     }
@@ -7279,11 +7272,11 @@ aarch64_output_simd_mov_immediate (rtx *const_vector,
              mnemonic, UINTVAL (info.value));
   else if (info.shift)
     snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, " HOST_WIDE_INT_PRINT_HEX
-             ", lsl %d", mnemonic, lane_count, info.element_char,
+             ", lsl %d", mnemonic, lane_count, element_char,
              UINTVAL (info.value), info.shift);
   else
     snprintf (templ, sizeof (templ), "%s\t%%0.%d%c, " HOST_WIDE_INT_PRINT_HEX,
-             mnemonic, lane_count, info.element_char, UINTVAL (info.value));
+             mnemonic, lane_count, element_char, UINTVAL (info.value));
   return templ;
 }
 

Reply via email to