This will improve the task exit case, by batching tlb invalidates.

Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/radix.h | 14 ++++++++++----
 1 file changed, 10 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/radix.h 
b/arch/powerpc/include/asm/book3s/64/radix.h
index aec6e8ee6e27..e8b4f39e9fab 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -147,10 +147,16 @@ static inline unsigned long radix__pte_update(struct 
mm_struct *mm,
                 * new value of pte
                 */
                new_pte = (old_pte | set) & ~clr;
-               psize = radix_get_mmu_psize(pg_sz);
-               radix__flush_tlb_page_psize(mm, addr, psize);
-
-               __radix_pte_update(ptep, 0, new_pte);
+               /*
+                * If we are trying to clear the pte, we can skip
+                * the below sequence and batch the tlb flush. The
+                * tlb flush batching is done by mmu gather code
+                */
+               if (new_pte) {
+                       psize = radix_get_mmu_psize(pg_sz);
+                       radix__flush_tlb_page_psize(mm, addr, psize);
+                       __radix_pte_update(ptep, 0, new_pte);
+               }
        } else
                old_pte = __radix_pte_update(ptep, clr, set);
        asm volatile("ptesync" : : : "memory");
-- 
2.10.2

Reply via email to