Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- target/arm/helper-sve.h | 2 + target/arm/sve_helper.c | 74 ++++++++++++++++++++++++++++-- target/arm/translate-sve.c | 93 ++++++++++++++++---------------------- 3 files changed, 110 insertions(+), 59 deletions(-)
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h index 2f47279155..82ea70cf63 100644 --- a/target/arm/helper-sve.h +++ b/target/arm/helper-sve.h @@ -1123,6 +1123,8 @@ DEF_HELPER_FLAGS_5(sve_ftmad_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_ftmad_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_ftmad_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sve_ldr, TCG_CALL_NO_WG, void, env, ptr, tl, int) + DEF_HELPER_FLAGS_4(sve_ld1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) DEF_HELPER_FLAGS_4(sve_ld3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c index 80453953ad..ede72a2989 100644 --- a/target/arm/sve_helper.c +++ b/target/arm/sve_helper.c @@ -3967,10 +3967,6 @@ void HELPER(sve_fcmla_zpzzz_d)(CPUARMState *env, void *vg, uint32_t desc) } while (i != 0); } -/* - * Load contiguous data, protected by a governing predicate. - */ - /* * Load elements into @vd + @reg_off, from @host, * or the reverse for stores. @@ -4194,6 +4190,76 @@ static bool sve_probe_page(SVEHostPage *info, bool nofault, return true; } +/* + * Load contiguous data, unpredicated. + * + * Note that unpredicated load/store of vector/predicate registers + * are defined as a stream of bytes, which equates to little-endian + * operations on larger quantities. + * + * Note any MTE check is already handled. + */ + +void HELPER(sve_ldr)(CPUARMState *env, void *vd, target_ulong addr, int size) +{ + int mmu_idx = cpu_mmu_index(env, false); + int in_page = -((int)addr | TARGET_PAGE_MASK); + uintptr_t ra = GETPC(); + uint64_t val; + int i; + + /* Small loads are expanded inline. */ + tcg_debug_assert(size > 2 * 8); + + /* Bulk copy the data from memory to the register. */ + if (likely(size <= in_page)) { + void *host = probe_read(env, addr, size, mmu_idx, ra); + + if (unlikely(!host)) { + goto mmio; + } + memcpy(vd, host, size); + } else { + void *h1 = probe_read(env, addr, in_page, mmu_idx, ra); + void *h2 = probe_read(env, addr + in_page, size - in_page, mmu_idx, ra); + + if (unlikely(!h1 || !h2)) { + goto mmio; + } + memcpy(vd, h1, in_page); + memcpy(vd + in_page, h2, size - in_page); + } + + /* Predicate load length may be any multiple of 2; ensure high bits 0. */ + if (unlikely(size & 7)) { + memset(vd + size, 0, 8 - (size & 7)); + } + + /* + * The memcpy and memset above kept the bytes in memory order. + * The in-register format has uint64_t in host order, so for + * big-endian host we need to bswap. + */ + for (i = 0; i < size; i += 8) { + le64_to_cpus(vd + i); + } + return; + + mmio: + for (i = 0; i + 8 <= size; i += 8) { + val = cpu_ldq_data_ra(env, addr + i, ra); + val = le_bswap64(val); + *(uint64_t *)(vd + i) = val; + } + + /* Predicate load length may be any multiple of 2. */ + if (unlikely(i != size)) { + val = cpu_ldq_data_ra(env, addr + i, ra); + val = le_bswap64(val); + val >>= (size - i) * 8; + *(uint64_t *)(vd + i + 8) = val; + } +} /* * Analyse contiguous data, protected by a governing predicate. diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c index 7bd7de80e6..e55f8835bb 100644 --- a/target/arm/translate-sve.c +++ b/target/arm/translate-sve.c @@ -4352,8 +4352,13 @@ static bool trans_UCVTF_dd(DisasContext *s, arg_rpr_esz *a) *** SVE Memory - 32-bit Gather and Unsized Contiguous Group */ -/* Subroutine loading a vector register at VOFS of LEN bytes. +/* + * Subroutine loading a vector register at VOFS of LEN bytes. * The load should begin at the address Rn + IMM. + * + * Note that unpredicated load/store of vector/predicate registers + * are defined as a stream of bytes, which equates to little-endian + * operations on larger quantities. */ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) @@ -4362,81 +4367,59 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) int len_remain = len % 8; int nparts = len / 8 + ctpop8(len_remain); int midx = get_mem_index(s); - TCGv_i64 addr, t0, t1; + TCGv_i64 dirty_addr, clean_addr, t0, t1; + int i; - addr = tcg_temp_new_i64(); - t0 = tcg_temp_new_i64(); + dirty_addr = read_cpu_reg_sp(s, rn, true); + tcg_gen_addi_i64(dirty_addr, dirty_addr, imm); - /* Note that unpredicated load/store of vector/predicate registers - * are defined as a stream of bytes, which equates to little-endian - * operations on larger quantities. There is no nice way to force - * a little-endian load for aarch64_be-linux-user out of line. - * - * Attempt to keep code expansion to a minimum by limiting the - * amount of unrolling done. - */ - if (nparts <= 4) { - int i; + clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len, MO_8); - for (i = 0; i < len_align; i += 8) { - tcg_gen_addi_i64(addr, cpu_reg_sp(s, rn), imm + i); - tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LEQ); - tcg_gen_st_i64(t0, cpu_env, vofs + i); - } - } else { - TCGLabel *loop = gen_new_label(); - TCGv_ptr tp, i = tcg_const_local_ptr(0); + /* Limit tcg code expansion by doing large loads out of line. */ + if (nparts > 4) { + TCGv_ptr t_rd = tcg_temp_new_ptr(); + TCGv_i32 t_len = tcg_const_i32(len); - gen_set_label(loop); - - /* Minimize the number of local temps that must be re-read from - * the stack each iteration. Instead, re-compute values other - * than the loop counter. - */ - tp = tcg_temp_new_ptr(); - tcg_gen_addi_ptr(tp, i, imm); - tcg_gen_extu_ptr_i64(addr, tp); - tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, rn)); - - tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LEQ); - - tcg_gen_add_ptr(tp, cpu_env, i); - tcg_gen_addi_ptr(i, i, 8); - tcg_gen_st_i64(t0, tp, vofs); - tcg_temp_free_ptr(tp); - - tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); - tcg_temp_free_ptr(i); + tcg_gen_addi_ptr(t_rd, cpu_env, vofs); + gen_helper_sve_ldr(cpu_env, t_rd, clean_addr, t_len); + tcg_temp_free_ptr(t_rd); + tcg_temp_free_i32(t_len); + return; } - /* Predicate register loads can be any multiple of 2. - * Note that we still store the entire 64-bit unit into cpu_env. + t0 = tcg_temp_new_i64(); + for (i = 0; i < len_align; i += 8) { + tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEQ); + tcg_gen_st_i64(t0, cpu_env, vofs + i); + tcg_gen_addi_i64(clean_addr, clean_addr, 8); + } + + /* + * Predicate register loads can be any multiple of 2. + * Note that we still store the entire 64-bit unit into cpu_env + * so that the high bits are zeroed. */ if (len_remain) { - tcg_gen_addi_i64(addr, cpu_reg_sp(s, rn), imm + len_align); - switch (len_remain) { case 2: - case 4: - case 8: - tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LE | ctz32(len_remain)); + tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUW); + break; + case 4: + tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUL); break; - case 6: t1 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LEUL); - tcg_gen_addi_i64(addr, addr, 4); - tcg_gen_qemu_ld_i64(t1, addr, midx, MO_LEUW); + tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUL); + tcg_gen_addi_i64(clean_addr, clean_addr, 4); + tcg_gen_qemu_ld_i64(t1, clean_addr, midx, MO_LEUW); tcg_gen_deposit_i64(t0, t0, t1, 32, 32); tcg_temp_free_i64(t1); break; - default: g_assert_not_reached(); } tcg_gen_st_i64(t0, cpu_env, vofs + len_align); } - tcg_temp_free_i64(addr); tcg_temp_free_i64(t0); } -- 2.20.1