The assembler seems to prefer them, perhaps we should too.

Signed-off-by: Richard Henderson <r...@twiddle.net>
---
 tcg/aarch64/tcg-target.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c
index a538a87..58597e7 100644
--- a/tcg/aarch64/tcg-target.c
+++ b/tcg/aarch64/tcg-target.c
@@ -645,11 +645,6 @@ static void tcg_out_ldst_r(TCGContext *s, TCGMemOp size, 
AArch64LdstType type,
 static void tcg_out_ldst(TCGContext *s, TCGMemOp size, AArch64LdstType type,
                          TCGReg rd, TCGReg rn, intptr_t offset)
 {
-    if (offset >= -256 && offset < 256) {
-        tcg_out_ldst_9(s, size, type, rd, rn, offset);
-        return;
-    }
-
     /* If the offset is naturally aligned and in range, then we can
        use the scaled uimm12 encoding */
     if (offset >= 0 && !(offset & ((1 << size) - 1))) {
@@ -660,6 +655,11 @@ static void tcg_out_ldst(TCGContext *s, TCGMemOp size, 
AArch64LdstType type,
         }
     }
 
+    if (offset >= -256 && offset < 256) {
+        tcg_out_ldst_9(s, size, type, rd, rn, offset);
+        return;
+    }
+
     /* Worst-case scenario, move offset to temp register, use reg offset.  */
     tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset);
     tcg_out_ldst_r(s, size, type, rd, rn, TCG_REG_TMP);
-- 
1.9.0


Reply via email to