When the mm is being torn down there will be a full PID flush so there is no need to flush the TLB on page size changes.
Signed-off-by: Nicholas Piggin <npig...@gmail.com> --- arch/powerpc/include/asm/tlb.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index 5d3107f2b014..d1b3dc4a6a0a 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h @@ -84,6 +84,9 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, unsigned int page_size) { + if (tlb->fullmm) + return; + if (!tlb->page_size) tlb->page_size = page_size; else if (tlb->page_size != page_size) { -- 2.17.0