Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- v8: Drop the out-of-line helper (pmm). --- target/arm/translate-sve.c | 61 +++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 28 deletions(-)
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c index ac7b3119e5..11e0dfc210 100644 --- a/target/arm/translate-sve.c +++ b/target/arm/translate-sve.c @@ -4342,71 +4342,76 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) int len_remain = len % 8; int nparts = len / 8 + ctpop8(len_remain); int midx = get_mem_index(s); - TCGv_i64 addr, t0, t1; + TCGv_i64 dirty_addr, clean_addr, t0, t1; - addr = tcg_temp_new_i64(); - t0 = tcg_temp_new_i64(); + dirty_addr = tcg_temp_new_i64(); + tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm); + clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len, MO_8); + tcg_temp_free_i64(dirty_addr); - /* Note that unpredicated load/store of vector/predicate registers + /* + * Note that unpredicated load/store of vector/predicate registers * are defined as a stream of bytes, which equates to little-endian - * operations on larger quantities. There is no nice way to force - * a little-endian load for aarch64_be-linux-user out of line. - * + * operations on larger quantities. * Attempt to keep code expansion to a minimum by limiting the * amount of unrolling done. */ if (nparts <= 4) { int i; + t0 = tcg_temp_new_i64(); for (i = 0; i < len_align; i += 8) { - tcg_gen_addi_i64(addr, cpu_reg_sp(s, rn), imm + i); - tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LEQ); + tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEQ); tcg_gen_st_i64(t0, cpu_env, vofs + i); + tcg_gen_addi_i64(clean_addr, cpu_reg_sp(s, rn), 8); } + tcg_temp_free_i64(t0); } else { TCGLabel *loop = gen_new_label(); TCGv_ptr tp, i = tcg_const_local_ptr(0); + /* Copy the clean address into a local temp, live across the loop. */ + t0 = clean_addr; + clean_addr = tcg_temp_local_new_i64(); + tcg_gen_mov_i64(clean_addr, t0); + tcg_temp_free_i64(t0); + gen_set_label(loop); - /* Minimize the number of local temps that must be re-read from - * the stack each iteration. Instead, re-compute values other - * than the loop counter. - */ + t0 = tcg_temp_new_i64(); + tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEQ); + tcg_gen_addi_i64(clean_addr, clean_addr, 8); + tp = tcg_temp_new_ptr(); - tcg_gen_addi_ptr(tp, i, imm); - tcg_gen_extu_ptr_i64(addr, tp); - tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, rn)); - - tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LEQ); - tcg_gen_add_ptr(tp, cpu_env, i); tcg_gen_addi_ptr(i, i, 8); tcg_gen_st_i64(t0, tp, vofs); tcg_temp_free_ptr(tp); + tcg_temp_free_i64(t0); tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); tcg_temp_free_ptr(i); } - /* Predicate register loads can be any multiple of 2. + /* + * Predicate register loads can be any multiple of 2. * Note that we still store the entire 64-bit unit into cpu_env. */ if (len_remain) { - tcg_gen_addi_i64(addr, cpu_reg_sp(s, rn), imm + len_align); - + t0 = tcg_temp_new_i64(); switch (len_remain) { case 2: case 4: case 8: - tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LE | ctz32(len_remain)); + tcg_gen_qemu_ld_i64(t0, clean_addr, midx, + MO_LE | ctz32(len_remain)); break; case 6: t1 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LEUL); - tcg_gen_addi_i64(addr, addr, 4); - tcg_gen_qemu_ld_i64(t1, addr, midx, MO_LEUW); + tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUL); + tcg_gen_addi_i64(clean_addr, clean_addr, 4); + tcg_gen_qemu_ld_i64(t1, clean_addr, midx, MO_LEUW); tcg_gen_deposit_i64(t0, t0, t1, 32, 32); tcg_temp_free_i64(t1); break; @@ -4415,9 +4420,9 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) g_assert_not_reached(); } tcg_gen_st_i64(t0, cpu_env, vofs + len_align); + tcg_temp_free_i64(t0); } - tcg_temp_free_i64(addr); - tcg_temp_free_i64(t0); + tcg_temp_free_i64(clean_addr); } /* Similarly for stores. */ -- 2.25.1