With the previous patch, we should now not be using need_flush_all for powerpc.
But then make sure we force a PID tlbie flush with RIC=2 if we ever
find need_flush_all set. Also don't reset it after a mmu gather flush

Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com>
---
 arch/powerpc/mm/book3s64/radix_tlb.c | 3 +--
 include/asm-generic/tlb.h            | 2 +-
 2 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c 
b/arch/powerpc/mm/book3s64/radix_tlb.c
index f9a4d5793f03..a95175c0972b 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -995,7 +995,7 @@ void radix__tlb_flush(struct mmu_gather *tlb)
         * that flushes the process table entry cache upon process teardown.
         * See the comment for radix in arch_exit_mmap().
         */
-       if (tlb->fullmm) {
+       if (tlb->fullmm || tlb->need_flush_all) {
                __flush_all_mm(mm, true);
        } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
                if (!tlb->freed_tables)
@@ -1008,7 +1008,6 @@ void radix__tlb_flush(struct mmu_gather *tlb)
                else
                        radix__flush_tlb_pwc_range_psize(mm, start, end, psize);
        }
-       tlb->need_flush_all = 0;
 }
 
 static __always_inline void __radix__flush_tlb_range_psize(struct mm_struct 
*mm,
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 04c0644006fd..e64991142a8b 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -428,7 +428,7 @@ static inline void tlb_change_page_size(struct mmu_gather 
*tlb,
 {
 #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
        if (tlb->page_size && tlb->page_size != page_size) {
-               if (!tlb->fullmm)
+               if (!tlb->fullmm && !tlb->need_flush_all)
                        tlb_flush_mmu(tlb);
        }
 
-- 
2.21.0

Reply via email to