Since commit b9ef323ea168 ("powerpc/64s: Disable preemption in
hash lazy mmu mode") a task can not be preempted while inside
arch_enter_lazy_mmu_mode(). Therefore, the batch re-activation
code is never gets called, so remove it.

Signed-off-by: Alexander Gordeev <agord...@linux.ibm.com>
---
 arch/powerpc/include/asm/thread_info.h |  2 --
 arch/powerpc/kernel/process.c          | 25 -------------------------
 2 files changed, 27 deletions(-)

diff --git a/arch/powerpc/include/asm/thread_info.h 
b/arch/powerpc/include/asm/thread_info.h
index 2785c7462ebf..092118a68862 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -154,12 +154,10 @@ void arch_setup_new_exec(void);
 /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
 #define TLF_NAPPING            0       /* idle thread enabled NAP mode */
 #define TLF_SLEEPING           1       /* suspend code enabled SLEEP mode */
-#define TLF_LAZY_MMU           3       /* tlb_batch is active */
 #define TLF_RUNLATCH           4       /* Is the runlatch enabled? */
 
 #define _TLF_NAPPING           (1 << TLF_NAPPING)
 #define _TLF_SLEEPING          (1 << TLF_SLEEPING)
-#define _TLF_LAZY_MMU          (1 << TLF_LAZY_MMU)
 #define _TLF_RUNLATCH          (1 << TLF_RUNLATCH)
 
 #ifndef __ASSEMBLY__
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ef91f71e07c4..b5810b932e21 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1281,9 +1281,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
 {
        struct thread_struct *new_thread, *old_thread;
        struct task_struct *last;
-#ifdef CONFIG_PPC_64S_HASH_MMU
-       struct ppc64_tlb_batch *batch;
-#endif
 
        new_thread = &new->thread;
        old_thread = &current->thread;
@@ -1291,14 +1288,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
        WARN_ON(!irqs_disabled());
 
 #ifdef CONFIG_PPC_64S_HASH_MMU
-       batch = this_cpu_ptr(&ppc64_tlb_batch);
-       if (batch->active) {
-               current_thread_info()->local_flags |= _TLF_LAZY_MMU;
-               if (batch->index)
-                       __flush_tlb_pending(batch);
-               batch->active = 0;
-       }
-
        /*
         * On POWER9 the copy-paste buffer can only paste into
         * foreign real addresses, so unprivileged processes can not
@@ -1369,20 +1358,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
         */
 
 #ifdef CONFIG_PPC_BOOK3S_64
-#ifdef CONFIG_PPC_64S_HASH_MMU
-       /*
-        * This applies to a process that was context switched while inside
-        * arch_enter_lazy_mmu_mode(), to re-activate the batch that was
-        * deactivated above, before _switch(). This will never be the case
-        * for new tasks.
-        */
-       if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
-               current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
-               batch = this_cpu_ptr(&ppc64_tlb_batch);
-               batch->active = 1;
-       }
-#endif
-
        /*
         * Math facilities are masked out of the child MSR in copy_thread.
         * A new task does not need to restore_math because it will
-- 
2.45.2


Reply via email to