This will improve the task exit case, by batching tlb invalidates. Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.vnet.ibm.com> --- arch/powerpc/include/asm/book3s/64/radix.h | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-)
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index aec6e8ee6e27..83c77323a769 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -142,15 +142,21 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm, unsigned long new_pte; old_pte = __radix_pte_update(ptep, ~0, 0); - asm volatile("ptesync" : : : "memory"); /* * new value of pte */ new_pte = (old_pte | set) & ~clr; - psize = radix_get_mmu_psize(pg_sz); - radix__flush_tlb_page_psize(mm, addr, psize); - - __radix_pte_update(ptep, 0, new_pte); + /* + * If we are trying to clear the pte, we can skip + * the below sequence and batch the tlb flush. The + * tlb flush batching is done by mmu gather code + */ + if (new_pte) { + asm volatile("ptesync" : : : "memory"); + psize = radix_get_mmu_psize(pg_sz); + radix__flush_tlb_page_psize(mm, addr, psize); + __radix_pte_update(ptep, 0, new_pte); + } } else old_pte = __radix_pte_update(ptep, clr, set); asm volatile("ptesync" : : : "memory"); -- 2.10.2