SIMD moves are currently emitted as ORR. Change this to use the MOV pseudo instruction just like integer moves (the ARM-ARM states MOV is the preferred disassembly), improving readability of -S output.
Passes bootstrap, OK for commit? ChangeLog: 2017-06-20 Wilco Dijkstra <wdijk...@arm.com> * config/aarch64/aarch64.md (movti_aarch64): Emit mov rather than orr. (movtf_aarch64): Likewise. * config/aarch64/aarch64-simd.md (aarch64_simd_mov): Emit mov rather than orr. -- iff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 95f838318294359ab58cc4319989bcf6c280278d..24ef178b0de253aa2d49aef022d866266216a0d6 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -105,7 +105,7 @@ (define_insn "*aarch64_simd_mov<mode>" { case 0: return "ldr\\t%d0, %1"; case 1: return "str\\t%d1, %0"; - case 2: return "orr\t%0.<Vbtype>, %1.<Vbtype>, %1.<Vbtype>"; + case 2: return "mov\t%0.<Vbtype>, %1.<Vbtype>"; case 3: return "umov\t%0, %1.d[0]"; case 4: return "fmov\t%d0, %1"; case 5: return "mov\t%0, %1"; @@ -136,7 +136,7 @@ (define_insn "*aarch64_simd_mov<mode>" case 1: return "str\\t%q1, %0"; case 2: - return "orr\t%0.<Vbtype>, %1.<Vbtype>, %1.<Vbtype>"; + return "mov\t%0.<Vbtype>, %1.<Vbtype>"; case 3: case 4: case 5: diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md index 93972d134b1eeba0e339f00528fc2e03f0c02052..225b64e1daf1663d28bbe8c2d30ba373b4722176 100644 --- a/gcc/config/aarch64/aarch64.md +++ b/gcc/config/aarch64/aarch64.md @@ -1017,7 +1017,7 @@ (define_insn "*movti_aarch64" # # # - orr\\t%0.16b, %1.16b, %1.16b + mov\\t%0.16b, %1.16b ldp\\t%0, %H0, %1 stp\\t%1, %H1, %0 stp\\txzr, xzr, %0 @@ -1131,7 +1131,7 @@ (define_insn "*movtf_aarch64" "TARGET_FLOAT && (register_operand (operands[0], TFmode) || aarch64_reg_or_fp_zero (operands[1], TFmode))" "@ - orr\\t%0.16b, %1.16b, %1.16b + mov\\t%0.16b, %1.16b # # #