With shared mapping, even though we are unmapping a large range, the kernel
will force a TLB flush with ptl lock held to avoid the race mentioned in
commit 1cf35d47712d ("mm: split 'tlb_flush_mmu()' into tlb flushing and memory 
freeing parts")
This results in the kernel issuing a high number of TLB flushes even for a large
range. This can be improved by making sure the kernel switch to pid based flush 
if the
kernel is unmapping a 2M range.

Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com>
---
 arch/powerpc/mm/book3s64/radix_tlb.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c 
b/arch/powerpc/mm/book3s64/radix_tlb.c
index aefc100d79a7..21d0f098e43b 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -1106,7 +1106,7 @@ EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
  * invalidating a full PID, so it has a far lower threshold to change from
  * individual page flushes to full-pid flushes.
  */
-static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
+static unsigned long tlb_single_page_flush_ceiling __read_mostly = 32;
 static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = 
POWER9_TLB_SETS_RADIX * 2;
 
 static inline void __radix__flush_tlb_range(struct mm_struct *mm,
@@ -1133,7 +1133,7 @@ static inline void __radix__flush_tlb_range(struct 
mm_struct *mm,
        if (fullmm)
                flush_pid = true;
        else if (type == FLUSH_TYPE_GLOBAL)
-               flush_pid = nr_pages > tlb_single_page_flush_ceiling;
+               flush_pid = nr_pages >= tlb_single_page_flush_ceiling;
        else
                flush_pid = nr_pages > tlb_local_single_page_flush_ceiling;
        /*
@@ -1335,7 +1335,7 @@ static void __radix__flush_tlb_range_psize(struct 
mm_struct *mm,
        if (fullmm)
                flush_pid = true;
        else if (type == FLUSH_TYPE_GLOBAL)
-               flush_pid = nr_pages > tlb_single_page_flush_ceiling;
+               flush_pid = nr_pages >= tlb_single_page_flush_ceiling;
        else
                flush_pid = nr_pages > tlb_local_single_page_flush_ceiling;
 
@@ -1505,7 +1505,7 @@ void do_h_rpt_invalidate_prt(unsigned long pid, unsigned 
long lpid,
                        continue;
 
                nr_pages = (end - start) >> def->shift;
-               flush_pid = nr_pages > tlb_single_page_flush_ceiling;
+               flush_pid = nr_pages >= tlb_single_page_flush_ceiling;
 
                /*
                 * If the number of pages spanning the range is above
-- 
2.31.1

Reply via email to