Hi Richard, On 8/3/21 6:14 AM, Richard Henderson wrote: > Use the newly exposed cpu_unaligned_access for atomic_mmu_lookup, > which has access to complete alignment info from the TCGMemOpIdx arg. > > Signed-off-by: Richard Henderson <richard.hender...@linaro.org> > --- > accel/tcg/user-exec.c | 14 +++++++++++++- > 1 file changed, 13 insertions(+), 1 deletion(-) > > diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c > index 90d1a2d327..5ad808a25a 100644 > --- a/accel/tcg/user-exec.c > +++ b/accel/tcg/user-exec.c > @@ -28,6 +28,7 @@ > #include "qemu/atomic128.h" > #include "trace/trace-root.h" > #include "trace/mem.h" > +#include "internal.h" > > #undef EAX > #undef ECX > @@ -1230,11 +1231,22 @@ static void *atomic_mmu_lookup(CPUArchState *env, > target_ulong addr, > TCGMemOpIdx oi, int size, int prot, > uintptr_t retaddr) > { > + MemOp mop = get_memop(oi); > + int a_bits = get_alignment_bits(mop); > + void *ret; > + > + /* Enforce guest required alignment. */ > + if (unlikely(addr & ((1 << a_bits) - 1))) { > + MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE; > + cpu_unaligned_access(env_cpu(env), addr, t, get_mmuidx(oi), retaddr); > + } > + > /* Enforce qemu required alignment. */ > if (unlikely(addr & (size - 1))) { > cpu_loop_exit_atomic(env_cpu(env), retaddr); > } > - void *ret = g2h(env_cpu(env), addr); > + > + ret = g2h(env_cpu(env), addr); > set_helper_retaddr(retaddr); > return ret;
Can't we simply do: return g2h(env_cpu(env), addr); ? > } >