From: Richard Henderson <richard.hender...@linaro.org> Now than ... /* we use range? FILL ME... */ ... we can remove the encode_pbm_to_runon() and flush_all_helper() calls.
Signed-off-by: Richard Henderson <richard.hender...@linaro.org> Message-Id: <20210508201640.1045808-1-richard.hender...@linaro.org> [PMD: Split from bigger patch] Signed-off-by: Philippe Mathieu-Daudé <f4...@amsat.org> --- XXX proper description, commit might be placed earlier in series. --- accel/tcg/cputlb.c | 86 +++++++++++----------------------------------- 1 file changed, 20 insertions(+), 66 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index ad0e44bce63..2f7088614a7 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -788,34 +788,6 @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, } } -static bool encode_pbm_to_runon(run_on_cpu_data *out, - TLBFlushRangeData d) -{ - /* We need 6 bits to hold to hold @bits up to 63. */ - if (d.idxmap <= MAKE_64BIT_MASK(0, TARGET_PAGE_BITS - 6)) { - *out = RUN_ON_CPU_TARGET_PTR(d.addr | (d.idxmap << 6) | d.bits); - return true; - } - return false; -} - -static TLBFlushRangeData -decode_runon_to_pbm(run_on_cpu_data data) -{ - target_ulong addr_map_bits = (target_ulong) data.target_ptr; - return (TLBFlushRangeData){ - .addr = addr_map_bits & TARGET_PAGE_MASK, - .idxmap = (addr_map_bits & ~TARGET_PAGE_MASK) >> 6, - .bits = addr_map_bits & 0x3f - }; -} - -static void tlb_flush_page_bits_by_mmuidx_async_1(CPUState *cpu, - run_on_cpu_data runon) -{ - tlb_flush_range_by_mmuidx_async_0(cpu, decode_runon_to_pbm(runon)); -} - static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu, run_on_cpu_data data) { @@ -829,7 +801,6 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, unsigned bits) { TLBFlushRangeData d; - run_on_cpu_data runon; /* * If all bits are significant, and len is small, @@ -853,8 +824,6 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, if (qemu_cpu_is_self(cpu)) { tlb_flush_range_by_mmuidx_async_0(cpu, d); - } else if (encode_pbm_to_runon(&runon, d)) { - async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon); } else { /* Otherwise allocate a structure, freed by the worker. */ TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); @@ -874,7 +843,7 @@ void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap, unsigned bits) { TLBFlushRangeData d; - run_on_cpu_data runon; + CPUState *dst_cpu; /* * If all bits are significant, and len is small, @@ -896,19 +865,13 @@ void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu, d.idxmap = idxmap; d.bits = bits; - if (encode_pbm_to_runon(&runon, d)) { - flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon); - } else { - CPUState *dst_cpu; - - /* Allocate a separate data block for each destination cpu. */ - CPU_FOREACH(dst_cpu) { - if (dst_cpu != src_cpu) { - TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); - async_run_on_cpu(dst_cpu, - tlb_flush_range_by_mmuidx_async_1, - RUN_ON_CPU_HOST_PTR(p)); - } + /* Allocate a separate data block for each destination cpu. */ + CPU_FOREACH(dst_cpu) { + if (dst_cpu != src_cpu) { + TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); + async_run_on_cpu(dst_cpu, + tlb_flush_range_by_mmuidx_async_1, + RUN_ON_CPU_HOST_PTR(p)); } } @@ -929,8 +892,8 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap, unsigned bits) { - TLBFlushRangeData d; - run_on_cpu_data runon; + TLBFlushRangeData d, *p; + CPUState *dst_cpu; /* * If all bits are significant, and len is small, @@ -952,27 +915,18 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, d.idxmap = idxmap; d.bits = bits; - if (encode_pbm_to_runon(&runon, d)) { - flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon); - async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, - runon); - } else { - CPUState *dst_cpu; - TLBFlushRangeData *p; - - /* Allocate a separate data block for each destination cpu. */ - CPU_FOREACH(dst_cpu) { - if (dst_cpu != src_cpu) { - p = g_memdup(&d, sizeof(d)); - async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1, - RUN_ON_CPU_HOST_PTR(p)); - } + /* Allocate a separate data block for each destination cpu. */ + CPU_FOREACH(dst_cpu) { + if (dst_cpu != src_cpu) { + p = g_memdup(&d, sizeof(d)); + async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1, + RUN_ON_CPU_HOST_PTR(p)); } - - p = g_memdup(&d, sizeof(d)); - async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1, - RUN_ON_CPU_HOST_PTR(p)); } + + p = g_memdup(&d, sizeof(d)); + async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1, + RUN_ON_CPU_HOST_PTR(p)); } void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, -- 2.26.3