This is a note to let you know that I've just added the patch titled

    asm-generic/tlb: Track which levels of the page tables have been cleared

to the 4.19-stable tree which can be found at:
    
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     
asm-generic-tlb-track-which-levels-of-the-page-tables-have-been-cleared.patch
and it can be found in the queue-4.19 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <sta...@vger.kernel.org> know about it.


>From foo@baz Mon Jan  4 01:45:29 PM CET 2021
From: Santosh Sivaraj <sant...@fossix.org>
Date: Thu, 12 Mar 2020 18:57:36 +0530
Subject: asm-generic/tlb: Track which levels of the page tables have been 
cleared
To: <sta...@vger.kernel.org>, linuxppc-dev <linuxppc-dev@lists.ozlabs.org>
Cc: Michael Ellerman <m...@ellerman.id.au>, Greg KH <g...@kroah.com>, Sasha 
Levin <sas...@kernel.org>, Will Deacon <will.dea...@arm.com>
Message-ID: <20200312132740.225241-3-sant...@fossix.org>

From: Will Deacon <will.dea...@arm.com>

commit a6d60245d6d9b1caf66b0d94419988c4836980af upstream

It is common for architectures with hugepage support to require only a
single TLB invalidation operation per hugepage during unmap(), rather than
iterating through the mapping at a PAGE_SIZE increment. Currently,
however, the level in the page table where the unmap() operation occurs
is not stored in the mmu_gather structure, therefore forcing
architectures to issue additional TLB invalidation operations or to give
up and over-invalidate by e.g. invalidating the entire TLB.

Ideally, we could add an interval rbtree to the mmu_gather structure,
which would allow us to associate the correct mapping granule with the
various sub-mappings within the range being invalidated. However, this
is costly in terms of book-keeping and memory management, so instead we
approximate by keeping track of the page table levels that are cleared
and provide a means to query the smallest granule required for invalidation.

Signed-off-by: Will Deacon <will.dea...@arm.com>
Cc: <sta...@vger.kernel.org> # 4.19
Signed-off-by: Santosh Sivaraj <sant...@fossix.org>
[santosh: prerequisite for upcoming tlbflush backports]
Signed-off-by: Greg Kroah-Hartman <gre...@linuxfoundation.org>
---
 include/asm-generic/tlb.h |   58 +++++++++++++++++++++++++++++++++++++++-------
 mm/memory.c               |    4 ++-
 2 files changed, 53 insertions(+), 9 deletions(-)

--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -114,6 +114,14 @@ struct mmu_gather {
         */
        unsigned int            freed_tables : 1;
 
+       /*
+        * at which levels have we cleared entries?
+        */
+       unsigned int            cleared_ptes : 1;
+       unsigned int            cleared_pmds : 1;
+       unsigned int            cleared_puds : 1;
+       unsigned int            cleared_p4ds : 1;
+
        struct mmu_gather_batch *active;
        struct mmu_gather_batch local;
        struct page             *__pages[MMU_GATHER_BUNDLE];
@@ -148,6 +156,10 @@ static inline void __tlb_reset_range(str
                tlb->end = 0;
        }
        tlb->freed_tables = 0;
+       tlb->cleared_ptes = 0;
+       tlb->cleared_pmds = 0;
+       tlb->cleared_puds = 0;
+       tlb->cleared_p4ds = 0;
 }
 
 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
@@ -197,6 +209,25 @@ static inline void tlb_remove_check_page
 }
 #endif
 
+static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
+{
+       if (tlb->cleared_ptes)
+               return PAGE_SHIFT;
+       if (tlb->cleared_pmds)
+               return PMD_SHIFT;
+       if (tlb->cleared_puds)
+               return PUD_SHIFT;
+       if (tlb->cleared_p4ds)
+               return P4D_SHIFT;
+
+       return PAGE_SHIFT;
+}
+
+static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
+{
+       return 1UL << tlb_get_unmap_shift(tlb);
+}
+
 /*
  * In the case of tlb vma handling, we can optimise these away in the
  * case where we're doing a full MM flush.  When we're doing a munmap,
@@ -230,13 +261,19 @@ static inline void tlb_remove_check_page
 #define tlb_remove_tlb_entry(tlb, ptep, address)               \
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
+               tlb->cleared_ptes = 1;                          \
                __tlb_remove_tlb_entry(tlb, ptep, address);     \
        } while (0)
 
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)            \
-       do {                                                         \
-               __tlb_adjust_range(tlb, address, huge_page_size(h)); \
-               __tlb_remove_tlb_entry(tlb, ptep, address);          \
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)       \
+       do {                                                    \
+               unsigned long _sz = huge_page_size(h);          \
+               __tlb_adjust_range(tlb, address, _sz);          \
+               if (_sz == PMD_SIZE)                            \
+                       tlb->cleared_pmds = 1;                  \
+               else if (_sz == PUD_SIZE)                       \
+                       tlb->cleared_puds = 1;                  \
+               __tlb_remove_tlb_entry(tlb, ptep, address);     \
        } while (0)
 
 /**
@@ -250,6 +287,7 @@ static inline void tlb_remove_check_page
 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)                   \
        do {                                                            \
                __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE);       \
+               tlb->cleared_pmds = 1;                                  \
                __tlb_remove_pmd_tlb_entry(tlb, pmdp, address);         \
        } while (0)
 
@@ -264,6 +302,7 @@ static inline void tlb_remove_check_page
 #define tlb_remove_pud_tlb_entry(tlb, pudp, address)                   \
        do {                                                            \
                __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE);       \
+               tlb->cleared_puds = 1;                                  \
                __tlb_remove_pud_tlb_entry(tlb, pudp, address);         \
        } while (0)
 
@@ -289,7 +328,8 @@ static inline void tlb_remove_check_page
 #define pte_free_tlb(tlb, ptep, address)                       \
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
-               tlb->freed_tables = 1;                  \
+               tlb->freed_tables = 1;                          \
+               tlb->cleared_pmds = 1;                          \
                __pte_free_tlb(tlb, ptep, address);             \
        } while (0)
 #endif
@@ -298,7 +338,8 @@ static inline void tlb_remove_check_page
 #define pmd_free_tlb(tlb, pmdp, address)                       \
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
-               tlb->freed_tables = 1;                  \
+               tlb->freed_tables = 1;                          \
+               tlb->cleared_puds = 1;                          \
                __pmd_free_tlb(tlb, pmdp, address);             \
        } while (0)
 #endif
@@ -308,7 +349,8 @@ static inline void tlb_remove_check_page
 #define pud_free_tlb(tlb, pudp, address)                       \
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
-               tlb->freed_tables = 1;                  \
+               tlb->freed_tables = 1;                          \
+               tlb->cleared_p4ds = 1;                          \
                __pud_free_tlb(tlb, pudp, address);             \
        } while (0)
 #endif
@@ -319,7 +361,7 @@ static inline void tlb_remove_check_page
 #define p4d_free_tlb(tlb, pudp, address)                       \
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
-               tlb->freed_tables = 1;                  \
+               tlb->freed_tables = 1;                          \
                __p4d_free_tlb(tlb, pudp, address);             \
        } while (0)
 #endif
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -279,8 +279,10 @@ void arch_tlb_finish_mmu(struct mmu_gath
 {
        struct mmu_gather_batch *batch, *next;
 
-       if (force)
+       if (force) {
+               __tlb_reset_range(tlb);
                __tlb_adjust_range(tlb, start, end - start);
+       }
 
        tlb_flush_mmu(tlb);
 


Patches currently in stable-queue which might be from sant...@fossix.org are

queue-4.19/asm-generic-tlb-track-which-levels-of-the-page-tables-have-been-cleared.patch
queue-4.19/asm-generic-tlb-track-freeing-of-page-table-directories-in-struct-mmu_gather.patch
queue-4.19/asm-generic-tlb-avoid-potential-double-flush.patch
queue-4.19/mm-mmu_gather-invalidate-tlb-correctly-on-batch-allocation-failure-and-flush.patch
queue-4.19/powerpc-mmu_gather-enable-rcu_table_free-even-for-smp-case.patch
queue-4.19/asm-generic-tlb-arch-invert-config_have_rcu_table_invalidate.patch

Reply via email to