Since we converted back to cpu_*_data_ra, we do not need to do this ourselves.
Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- target/arm/sve_helper.c | 38 -------------------------------------- 1 file changed, 38 deletions(-) diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c index f8b310a16b..8b470991db 100644 --- a/target/arm/sve_helper.c +++ b/target/arm/sve_helper.c @@ -4155,12 +4155,6 @@ static intptr_t max_for_page(target_ulong base, intptr_t mem_off, return MIN(split, mem_max - mem_off) + mem_off; } -#ifndef CONFIG_USER_ONLY -/* These are normally defined only for CONFIG_USER_ONLY in <exec/cpu_ldst.h> */ -static inline void set_helper_retaddr(uintptr_t ra) { } -static inline void clear_helper_retaddr(void) { } -#endif - /* * The result of tlb_vaddr_to_host for user-only is just g2h(x), * which is always non-null. Elide the useless test. @@ -4202,7 +4196,6 @@ static void sve_ld1_r(CPUARMState *env, void *vg, const target_ulong addr, return; } mem_off = reg_off >> diffsz; - set_helper_retaddr(retaddr); /* * If the (remaining) load is entirely within a single page, then: @@ -4217,7 +4210,6 @@ static void sve_ld1_r(CPUARMState *env, void *vg, const target_ulong addr, if (test_host_page(host)) { mem_off = host_fn(vd, vg, host - mem_off, mem_off, mem_max); tcg_debug_assert(mem_off == mem_max); - clear_helper_retaddr(); /* After having taken any fault, zero leading inactive elements. */ swap_memzero(vd, reg_off); return; @@ -4268,7 +4260,6 @@ static void sve_ld1_r(CPUARMState *env, void *vg, const target_ulong addr, } #endif - clear_helper_retaddr(); memcpy(vd, &scratch, reg_max); } @@ -4328,7 +4319,6 @@ static void sve_ld2_r(CPUARMState *env, void *vg, target_ulong addr, intptr_t i, oprsz = simd_oprsz(desc); ARMVectorReg scratch[2] = { }; - set_helper_retaddr(ra); for (i = 0; i < oprsz; ) { uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); do { @@ -4340,7 +4330,6 @@ static void sve_ld2_r(CPUARMState *env, void *vg, target_ulong addr, addr += 2 * size; } while (i & 15); } - clear_helper_retaddr(); /* Wait until all exceptions have been raised to write back. */ memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz); @@ -4355,7 +4344,6 @@ static void sve_ld3_r(CPUARMState *env, void *vg, target_ulong addr, intptr_t i, oprsz = simd_oprsz(desc); ARMVectorReg scratch[3] = { }; - set_helper_retaddr(ra); for (i = 0; i < oprsz; ) { uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); do { @@ -4368,7 +4356,6 @@ static void sve_ld3_r(CPUARMState *env, void *vg, target_ulong addr, addr += 3 * size; } while (i & 15); } - clear_helper_retaddr(); /* Wait until all exceptions have been raised to write back. */ memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz); @@ -4384,7 +4371,6 @@ static void sve_ld4_r(CPUARMState *env, void *vg, target_ulong addr, intptr_t i, oprsz = simd_oprsz(desc); ARMVectorReg scratch[4] = { }; - set_helper_retaddr(ra); for (i = 0; i < oprsz; ) { uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); do { @@ -4398,7 +4384,6 @@ static void sve_ld4_r(CPUARMState *env, void *vg, target_ulong addr, addr += 4 * size; } while (i & 15); } - clear_helper_retaddr(); /* Wait until all exceptions have been raised to write back. */ memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz); @@ -4505,7 +4490,6 @@ static void sve_ldff1_r(CPUARMState *env, void *vg, const target_ulong addr, return; } mem_off = reg_off >> diffsz; - set_helper_retaddr(retaddr); /* * If the (remaining) load is entirely within a single page, then: @@ -4520,7 +4504,6 @@ static void sve_ldff1_r(CPUARMState *env, void *vg, const target_ulong addr, if (test_host_page(host)) { mem_off = host_fn(vd, vg, host - mem_off, mem_off, mem_max); tcg_debug_assert(mem_off == mem_max); - clear_helper_retaddr(); /* After any fault, zero any leading inactive elements. */ swap_memzero(vd, reg_off); return; @@ -4563,7 +4546,6 @@ static void sve_ldff1_r(CPUARMState *env, void *vg, const target_ulong addr, } #endif - clear_helper_retaddr(); record_fault(env, reg_off, reg_max); } @@ -4709,7 +4691,6 @@ static void sve_st1_r(CPUARMState *env, void *vg, target_ulong addr, intptr_t i, oprsz = simd_oprsz(desc); void *vd = &env->vfp.zregs[rd]; - set_helper_retaddr(ra); for (i = 0; i < oprsz; ) { uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); do { @@ -4720,7 +4701,6 @@ static void sve_st1_r(CPUARMState *env, void *vg, target_ulong addr, addr += msize; } while (i & 15); } - clear_helper_retaddr(); } static void sve_st2_r(CPUARMState *env, void *vg, target_ulong addr, @@ -4733,7 +4713,6 @@ static void sve_st2_r(CPUARMState *env, void *vg, target_ulong addr, void *d1 = &env->vfp.zregs[rd]; void *d2 = &env->vfp.zregs[(rd + 1) & 31]; - set_helper_retaddr(ra); for (i = 0; i < oprsz; ) { uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); do { @@ -4745,7 +4724,6 @@ static void sve_st2_r(CPUARMState *env, void *vg, target_ulong addr, addr += 2 * msize; } while (i & 15); } - clear_helper_retaddr(); } static void sve_st3_r(CPUARMState *env, void *vg, target_ulong addr, @@ -4759,7 +4737,6 @@ static void sve_st3_r(CPUARMState *env, void *vg, target_ulong addr, void *d2 = &env->vfp.zregs[(rd + 1) & 31]; void *d3 = &env->vfp.zregs[(rd + 2) & 31]; - set_helper_retaddr(ra); for (i = 0; i < oprsz; ) { uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); do { @@ -4772,7 +4749,6 @@ static void sve_st3_r(CPUARMState *env, void *vg, target_ulong addr, addr += 3 * msize; } while (i & 15); } - clear_helper_retaddr(); } static void sve_st4_r(CPUARMState *env, void *vg, target_ulong addr, @@ -4787,7 +4763,6 @@ static void sve_st4_r(CPUARMState *env, void *vg, target_ulong addr, void *d3 = &env->vfp.zregs[(rd + 2) & 31]; void *d4 = &env->vfp.zregs[(rd + 3) & 31]; - set_helper_retaddr(ra); for (i = 0; i < oprsz; ) { uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); do { @@ -4801,7 +4776,6 @@ static void sve_st4_r(CPUARMState *env, void *vg, target_ulong addr, addr += 4 * msize; } while (i & 15); } - clear_helper_retaddr(); } #define DO_STN_1(N, NAME, ESIZE) \ @@ -4897,7 +4871,6 @@ static void sve_ld1_zs(CPUARMState *env, void *vd, void *vg, void *vm, intptr_t i, oprsz = simd_oprsz(desc); ARMVectorReg scratch = { }; - set_helper_retaddr(ra); for (i = 0; i < oprsz; ) { uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); do { @@ -4908,7 +4881,6 @@ static void sve_ld1_zs(CPUARMState *env, void *vd, void *vg, void *vm, i += 4, pg >>= 4; } while (i & 15); } - clear_helper_retaddr(); /* Wait until all exceptions have been raised to write back. */ memcpy(vd, &scratch, oprsz); @@ -4922,7 +4894,6 @@ static void sve_ld1_zd(CPUARMState *env, void *vd, void *vg, void *vm, intptr_t i, oprsz = simd_oprsz(desc) / 8; ARMVectorReg scratch = { }; - set_helper_retaddr(ra); for (i = 0; i < oprsz; i++) { uint8_t pg = *(uint8_t *)(vg + H1(i)); if (likely(pg & 1)) { @@ -4930,7 +4901,6 @@ static void sve_ld1_zd(CPUARMState *env, void *vd, void *vg, void *vm, tlb_fn(env, &scratch, i * 8, base + (off << scale), ra); } } - clear_helper_retaddr(); /* Wait until all exceptions have been raised to write back. */ memcpy(vd, &scratch, oprsz * 8); @@ -5102,13 +5072,11 @@ static inline void sve_ldff1_zs(CPUARMState *env, void *vd, void *vg, void *vm, reg_off = find_next_active(vg, 0, reg_max, MO_32); if (likely(reg_off < reg_max)) { /* Perform one normal read, which will fault or not. */ - set_helper_retaddr(ra); addr = off_fn(vm, reg_off); addr = base + (addr << scale); tlb_fn(env, vd, reg_off, addr, ra); /* The rest of the reads will be non-faulting. */ - clear_helper_retaddr(); } /* After any fault, zero the leading predicated false elements. */ @@ -5144,13 +5112,11 @@ static inline void sve_ldff1_zd(CPUARMState *env, void *vd, void *vg, void *vm, reg_off = find_next_active(vg, 0, reg_max, MO_64); if (likely(reg_off < reg_max)) { /* Perform one normal read, which will fault or not. */ - set_helper_retaddr(ra); addr = off_fn(vm, reg_off); addr = base + (addr << scale); tlb_fn(env, vd, reg_off, addr, ra); /* The rest of the reads will be non-faulting. */ - clear_helper_retaddr(); } /* After any fault, zero the leading predicated false elements. */ @@ -5262,7 +5228,6 @@ static void sve_st1_zs(CPUARMState *env, void *vd, void *vg, void *vm, const int scale = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 2); intptr_t i, oprsz = simd_oprsz(desc); - set_helper_retaddr(ra); for (i = 0; i < oprsz; ) { uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); do { @@ -5273,7 +5238,6 @@ static void sve_st1_zs(CPUARMState *env, void *vd, void *vg, void *vm, i += 4, pg >>= 4; } while (i & 15); } - clear_helper_retaddr(); } static void sve_st1_zd(CPUARMState *env, void *vd, void *vg, void *vm, @@ -5283,7 +5247,6 @@ static void sve_st1_zd(CPUARMState *env, void *vd, void *vg, void *vm, const int scale = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 2); intptr_t i, oprsz = simd_oprsz(desc) / 8; - set_helper_retaddr(ra); for (i = 0; i < oprsz; i++) { uint8_t pg = *(uint8_t *)(vg + H1(i)); if (likely(pg & 1)) { @@ -5291,7 +5254,6 @@ static void sve_st1_zd(CPUARMState *env, void *vd, void *vg, void *vm, tlb_fn(env, vd, i * 8, base + (off << scale), ra); } } - clear_helper_retaddr(); } #define DO_ST1_ZPZ_S(MEM, OFS) \ -- 2.20.1