This patch makes sure we update the mmu_gather page size even if we are
requesting for a fullmm flush. This avoids triggering VM_WARN_ON in code
paths like __tlb_remove_page_size that explicitly check for removing range page
size to be same as mmu gather page size.

Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com>
---
 arch/powerpc/include/asm/tlb.h | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index 97ecef697e1b..f0e571b2dc7c 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -49,13 +49,11 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather 
*tlb, pte_t *ptep,
 static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
                                                     unsigned int page_size)
 {
-       if (tlb->fullmm)
-               return;
-
        if (!tlb->page_size)
                tlb->page_size = page_size;
        else if (tlb->page_size != page_size) {
-               tlb_flush_mmu(tlb);
+               if (!tlb->fullmm)
+                       tlb_flush_mmu(tlb);
                /*
                 * update the page size after flush for the new
                 * mmu_gather.
-- 
2.17.1

Reply via email to