We are going to need the complete memop beforehand, so let's not compute it twice.
Reviewed-by: Peter Maydell <peter.mayd...@linaro.org> Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- target/arm/tcg/translate-a64.c | 43 ++++++++++++++++++---------------- 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 24e255aa34..02dbf76feb 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -915,15 +915,14 @@ static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr, /* * Store from FP register to memory */ -static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size) +static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, MemOp mop) { /* This writes the bottom N bits of a 128 bit wide vector to memory */ TCGv_i64 tmplo = tcg_temp_new_i64(); - MemOp mop = finalize_memop_asimd(s, size); tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64)); - if (size < MO_128) { + if ((mop & MO_SIZE) < MO_128) { tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop); } else { TCGv_i64 tmphi = tcg_temp_new_i64(); @@ -939,14 +938,13 @@ static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size) /* * Load from memory to FP register */ -static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) +static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, MemOp mop) { /* This always zero-extends and writes to a full 128 bit wide vector */ TCGv_i64 tmplo = tcg_temp_new_i64(); TCGv_i64 tmphi = NULL; - MemOp mop = finalize_memop_asimd(s, size); - if (size < MO_128) { + if ((mop & MO_SIZE) < MO_128) { tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop); } else { TCGv_i128 t16 = tcg_temp_new_i128(); @@ -2775,6 +2773,7 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn) bool is_signed = false; int size = 2; TCGv_i64 tcg_rt, clean_addr; + MemOp memop; if (is_vector) { if (opc == 3) { @@ -2785,6 +2784,7 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn) if (!fp_access_check(s)) { return; } + memop = finalize_memop_asimd(s, size); } else { if (opc == 3) { /* PRFM (literal) : prefetch */ @@ -2792,19 +2792,19 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn) } size = 2 + extract32(opc, 0, 1); is_signed = extract32(opc, 1, 1); + memop = finalize_memop(s, size + is_signed * MO_SIGN); } tcg_rt = cpu_reg(s, rt); clean_addr = tcg_temp_new_i64(); gen_pc_plus_diff(s, clean_addr, imm); + if (is_vector) { - do_fp_ld(s, rt, clean_addr, size); + do_fp_ld(s, rt, clean_addr, memop); } else { /* Only unsigned 32bit loads target 32bit registers. */ bool iss_sf = opc != 0; - MemOp memop = finalize_memop(s, size + is_signed * MO_SIGN); - do_gpr_ld(s, tcg_rt, clean_addr, memop, false, true, rt, iss_sf, false); } } @@ -2941,16 +2941,18 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn) (wback || rn != 31) && !set_tag, 2 << size); if (is_vector) { + MemOp mop = finalize_memop_asimd(s, size); + if (is_load) { - do_fp_ld(s, rt, clean_addr, size); + do_fp_ld(s, rt, clean_addr, mop); } else { - do_fp_st(s, rt, clean_addr, size); + do_fp_st(s, rt, clean_addr, mop); } tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size); if (is_load) { - do_fp_ld(s, rt2, clean_addr, size); + do_fp_ld(s, rt2, clean_addr, mop); } else { - do_fp_st(s, rt2, clean_addr, size); + do_fp_st(s, rt2, clean_addr, mop); } } else { TCGv_i64 tcg_rt = cpu_reg(s, rt); @@ -3072,6 +3074,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, if (!fp_access_check(s)) { return; } + memop = finalize_memop_asimd(s, size); } else { if (size == 3 && opc == 2) { /* PRFM - prefetch */ @@ -3088,6 +3091,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, is_store = (opc == 0); is_signed = !is_store && extract32(opc, 1, 1); is_extended = (size < 3) && extract32(opc, 0, 1); + memop = finalize_memop(s, size + is_signed * MO_SIGN); } switch (idx) { @@ -3120,7 +3124,6 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, } memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s); - memop = finalize_memop(s, size + is_signed * MO_SIGN); clean_addr = gen_mte_check1_mmuidx(s, dirty_addr, is_store, writeback || rn != 31, @@ -3128,9 +3131,9 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, if (is_vector) { if (is_store) { - do_fp_st(s, rt, clean_addr, size); + do_fp_st(s, rt, clean_addr, memop); } else { - do_fp_ld(s, rt, clean_addr, size); + do_fp_ld(s, rt, clean_addr, memop); } } else { TCGv_i64 tcg_rt = cpu_reg(s, rt); @@ -3236,9 +3239,9 @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn, if (is_vector) { if (is_store) { - do_fp_st(s, rt, clean_addr, size); + do_fp_st(s, rt, clean_addr, memop); } else { - do_fp_ld(s, rt, clean_addr, size); + do_fp_ld(s, rt, clean_addr, memop); } } else { TCGv_i64 tcg_rt = cpu_reg(s, rt); @@ -3322,9 +3325,9 @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn, if (is_vector) { if (is_store) { - do_fp_st(s, rt, clean_addr, size); + do_fp_st(s, rt, clean_addr, memop); } else { - do_fp_ld(s, rt, clean_addr, size); + do_fp_ld(s, rt, clean_addr, memop); } } else { TCGv_i64 tcg_rt = cpu_reg(s, rt); -- 2.34.1