Now there's only one copy of the local tlb flush logic for
non-kernel pages on SMP kernels.

The only functional change is that arch_tlbbatch_flush() will now
leave_mm() on the local CPU if that CPU is in the batch and is in
TLBSTATE_LAZY mode.

Cc: Rik van Riel <r...@redhat.com>
Cc: Dave Hansen <dave.han...@intel.com>
Cc: Nadav Amit <na...@vmware.com>
Cc: Michal Hocko <mho...@suse.com>
Cc: Arjan van de Ven <ar...@linux.intel.com>
Cc: Andrew Morton <a...@linux-foundation.org>
Signed-off-by: Andy Lutomirski <l...@kernel.org>
---
 arch/x86/mm/tlb.c | 8 ++------
 1 file changed, 2 insertions(+), 6 deletions(-)

diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 12b8812e8926..c03b4a0ce58c 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -382,12 +382,8 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch 
*batch)
 
        int cpu = get_cpu();
 
-       if (cpumask_test_cpu(cpu, &batch->cpumask)) {
-               count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
-               local_flush_tlb();
-               trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
-       }
-
+       if (cpumask_test_cpu(cpu, &batch->cpumask))
+               flush_tlb_func_local(&info, TLB_LOCAL_SHOOTDOWN);
        if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
                flush_tlb_others(&batch->cpumask, &info);
        cpumask_clear(&batch->cpumask);
-- 
2.9.4

Reply via email to