From: Richard Henderson <richard.hender...@linaro.org> Signed-off-by: Richard Henderson <richard.hender...@linaro.org> Message-Id: <20210508201640.1045808-1-richard.hender...@linaro.org> [PMD: Split from bigger patch] Signed-off-by: Philippe Mathieu-Daudé <f4...@amsat.org> --- include/exec/exec-all.h | 19 +++++++++++++++++++ accel/tcg/cputlb.c | 20 +++++++++++++++----- 2 files changed, 34 insertions(+), 5 deletions(-)
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 6b036cae8f6..5a5f6d4c1a8 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -262,6 +262,20 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, void tlb_flush_page_bits_by_mmuidx_all_cpus_synced (CPUState *cpu, target_ulong addr, uint16_t idxmap, unsigned bits); +/** + * tlb_flush_range_by_mmuidx + * @cpu: CPU whose TLB should be flushed + * @addr: virtual address of the start of the range to be flushed + * @len: length of range to be flushed + * @idxmap: bitmap of mmu indexes to flush + * @bits: number of significant bits in address + * + * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len), + * comparing only the low @bits worth of each virtual page. + */ +void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, + target_ulong len, uint16_t idxmap, + unsigned bits); /** * tlb_set_page_with_attrs: * @cpu: CPU to add this TLB entry for @@ -365,6 +379,11 @@ tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, uint16_t idxmap, unsigned bits) { } +static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, + target_ulong len, uint16_t idxmap, + unsigned bits) +{ +} #endif /** * probe_access: diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 36e7831ef70..16924ceb777 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -825,14 +825,18 @@ static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu, g_free(d); } -void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, - uint16_t idxmap, unsigned bits) +void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, + target_ulong len, uint16_t idxmap, + unsigned bits) { TLBFlushRangeData d; run_on_cpu_data runon; - /* If all bits are significant, this devolves to tlb_flush_page. */ - if (bits >= TARGET_LONG_BITS) { + /* + * If all bits are significant, and len is small, + * this devolves to tlb_flush_page. + */ + if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { tlb_flush_page_by_mmuidx(cpu, addr, idxmap); return; } @@ -844,7 +848,7 @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, /* This should already be page aligned */ d.addr = addr & TARGET_PAGE_MASK; - d.len = TARGET_PAGE_SIZE; + d.len = len; d.idxmap = idxmap; d.bits = bits; @@ -860,6 +864,12 @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, } } +void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, + uint16_t idxmap, unsigned bits) +{ + tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); +} + void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, uint16_t idxmap, -- 2.26.3