SIMD operations like combine prefer to have their operands in FP registers,
so increase the cost of integer registers slightly to avoid unnecessary int<->FP
moves. This improves register allocation of scalar SIMD operations.

OK for trunk?

ChangeLog:
2016-04-22  Wilco Dijkstra  <wdijk...@arm.com>

        * gcc/config/aarch64/aarch64-simd.md (aarch64_combinez):
        Add ? to integer variant.
        (aarch64_combinez_be): Likewise.

--

diff --git a/gcc/config/aarch64/aarch64-simd.md 
b/gcc/config/aarch64/aarch64-simd.md
index 
e1f5682165cd22ca7d31643b8f4e7f631d99c2d8..d3830838867eec2098b71eb46b7343d0155acf7e
 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -2645,7 +2645,7 @@
 (define_insn "*aarch64_combinez<mode>"
   [(set (match_operand:<VDBL> 0 "register_operand" "=w,w,w")
         (vec_concat:<VDBL>
-          (match_operand:VD_BHSI 1 "general_operand" "w,r,m")
+          (match_operand:VD_BHSI 1 "general_operand" "w,?r,m")
           (match_operand:VD_BHSI 2 "aarch64_simd_imm_zero" "Dz,Dz,Dz")))]
   "TARGET_SIMD && !BYTES_BIG_ENDIAN"
   "@
@@ -2661,7 +2661,7 @@
   [(set (match_operand:<VDBL> 0 "register_operand" "=w,w,w")
         (vec_concat:<VDBL>
           (match_operand:VD_BHSI 2 "aarch64_simd_imm_zero" "Dz,Dz,Dz")
-          (match_operand:VD_BHSI 1 "general_operand" "w,r,m")))]
+          (match_operand:VD_BHSI 1 "general_operand" "w,?r,m")))]
   "TARGET_SIMD && BYTES_BIG_ENDIAN"
   "@
    mov\\t%0.8b, %1.8b

Reply via email to