This creates a per-page method for checking of alignment. Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- include/exec/cpu-all.h | 4 +++- accel/tcg/cputlb.c | 25 ++++++++++++++++++++++++- 2 files changed, 27 insertions(+), 2 deletions(-)
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index f3b2f4229c..5bb04782ba 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -399,8 +399,10 @@ CPUArchState *cpu_copy(CPUArchState *env); #define TLB_BSWAP (1 << 0) /* Set if TLB entry contains a watchpoint. */ #define TLB_WATCHPOINT (1 << 1) +/* Set if TLB entry requires aligned accesses. */ +#define TLB_CHECK_ALIGNED (1 << 2) -#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT) +#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED) /* The two sets of flags must not overlap. */ QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK); diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index a90688ac30..c692e71766 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1546,7 +1546,7 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr, flags |= full->slow_flags[access_type]; /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ - if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { + if (flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY | TLB_CHECK_ALIGNED)) { *phost = NULL; return TLB_MMIO; } @@ -1885,6 +1885,29 @@ static bool mmu_lookup(CPUArchState *env, target_ulong addr, MemOpIdx oi, tcg_debug_assert((flags & TLB_BSWAP) == 0); } + /* + * This alignment check differs from the one above, in that this is + * based on the atomicity of the operation. The intended use case is + * the ARM memory type field of each PTE, where access to pages with + * Device memory type require alignment. + */ + if (unlikely(flags & TLB_CHECK_ALIGNED)) { + MemOp atmax = l->memop & MO_ATMAX_MASK; + MemOp atom = l->memop & MO_ATOM_MASK; + MemOp size = l->memop & MO_SIZE; + + if (size != MO_8 && atom != MO_ATOM_NONE) { + if (atmax == MO_ATMAX_SIZE) { + a_bits = size; + } else { + a_bits = atmax >> MO_ATMAX_SHIFT; + } + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(env_cpu(env), addr, type, l->mmu_idx, ra); + } + } + } + return crosspage; } -- 2.34.1