When TCG_TARGET_SIGNED_ADDR32 is set, adjust the tlb addend to allow the 32-bit guest address to be sign extended within the 64-bit host register instead of zero extended.
This will simplify tcg hosts like MIPS, RISC-V, and LoongArch, which naturally sign-extend 32-bit values, in contrast to x86_64 and AArch64 which zero-extend them. Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- accel/tcg/cputlb.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 0e62aa5d7c..0dbc3efbc7 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -40,6 +40,7 @@ #include "qemu/plugin-memory.h" #endif #include "tcg/tcg-ldst.h" +#include "tcg-target-sa32.h" /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ /* #define DEBUG_TLB */ @@ -93,6 +94,9 @@ static inline size_t sizeof_tlb(CPUTLBDescFast *fast) static inline uintptr_t g2h_tlbe(const CPUTLBEntry *tlb, target_ulong gaddr) { + if (TCG_TARGET_SIGNED_ADDR32 && TARGET_LONG_BITS == 32) { + return tlb->addend + (int32_t)gaddr; + } return tlb->addend + (uintptr_t)gaddr; } @@ -1244,7 +1248,13 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, desc->iotlb[index].attrs = attrs; /* Now calculate the new entry */ - tn.addend = addend - vaddr_page; + + if (TCG_TARGET_SIGNED_ADDR32 && TARGET_LONG_BITS == 32) { + tn.addend = addend - (int32_t)vaddr_page; + } else { + tn.addend = addend - vaddr_page; + } + if (prot & PAGE_READ) { tn.addr_read = address; if (wp_flags & BP_MEM_READ) { -- 2.25.1