When we are updating pte, we just need to flush the tlb mapping for
that pte. Right now we do a full mm flush because we don't track page
size. Update the interface to track the page size and use that to
do the right tlb flush.

Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/pgtable.h | 16 ++++++++++------
 arch/powerpc/include/asm/book3s/64/radix.h   | 19 ++++++++-----------
 arch/powerpc/mm/pgtable-radix.c              |  2 +-
 3 files changed, 19 insertions(+), 18 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h 
b/arch/powerpc/include/asm/book3s/64/pgtable.h
index ef2eef1ba99a..09869ad37aba 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -301,12 +301,16 @@ extern unsigned long pci_io_base;
 
 static inline unsigned long pte_update(struct mm_struct *mm, unsigned long 
addr,
                                       pte_t *ptep, unsigned long clr,
-                                      unsigned long set, int huge)
+                                      unsigned long set,
+                                      unsigned long pg_sz)
 {
+       bool huge = (pg_sz != PAGE_SIZE);
+
        if (radix_enabled())
-               return radix__pte_update(mm, addr, ptep, clr, set, huge);
+               return radix__pte_update(mm, addr, ptep, clr, set, pg_sz);
        return hash__pte_update(mm, addr, ptep, clr, set, huge);
 }
+
 /*
  * For hash even if we have _PAGE_ACCESSED = 0, we do a pte_update.
  * We currently remove entries from the hashtable regardless of whether
@@ -324,7 +328,7 @@ static inline int __ptep_test_and_clear_young(struct 
mm_struct *mm,
 
        if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 
0)
                return 0;
-       old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
+       old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, PAGE_SIZE);
        return (old & _PAGE_ACCESSED) != 0;
 }
 
@@ -343,21 +347,21 @@ static inline void ptep_set_wrprotect(struct mm_struct 
*mm, unsigned long addr,
        if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
                return;
 
-       pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
+       pte_update(mm, addr, ptep, _PAGE_WRITE, 0, PAGE_SIZE);
 }
 
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
                                       unsigned long addr, pte_t *ptep)
 {
-       unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
+       unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, PAGE_SIZE);
        return __pte(old);
 }
 
 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
                             pte_t * ptep)
 {
-       pte_update(mm, addr, ptep, ~0UL, 0, 0);
+       pte_update(mm, addr, ptep, ~0UL, 0, PAGE_SIZE);
 }
 
 static inline int pte_write(pte_t pte)
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h 
b/arch/powerpc/include/asm/book3s/64/radix.h
index 279b2f68e00f..aec6e8ee6e27 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -129,15 +129,16 @@ static inline unsigned long __radix_pte_update(pte_t 
*ptep, unsigned long clr,
 
 
 static inline unsigned long radix__pte_update(struct mm_struct *mm,
-                                       unsigned long addr,
-                                       pte_t *ptep, unsigned long clr,
-                                       unsigned long set,
-                                       int huge)
+                                             unsigned long addr,
+                                             pte_t *ptep, unsigned long clr,
+                                             unsigned long set,
+                                             unsigned long pg_sz)
 {
        unsigned long old_pte;
 
        if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
 
+               int psize;
                unsigned long new_pte;
 
                old_pte = __radix_pte_update(ptep, ~0, 0);
@@ -146,18 +147,14 @@ static inline unsigned long radix__pte_update(struct 
mm_struct *mm,
                 * new value of pte
                 */
                new_pte = (old_pte | set) & ~clr;
-
-               /*
-                * For now let's do heavy pid flush
-                * radix__flush_tlb_page_psize(mm, addr, mmu_virtual_psize);
-                */
-               radix__flush_tlb_mm(mm);
+               psize = radix_get_mmu_psize(pg_sz);
+               radix__flush_tlb_page_psize(mm, addr, psize);
 
                __radix_pte_update(ptep, 0, new_pte);
        } else
                old_pte = __radix_pte_update(ptep, clr, set);
        asm volatile("ptesync" : : : "memory");
-       if (!huge)
+       if (pg_sz == PAGE_SIZE)
                assert_pte_locked(mm, addr);
 
        return old_pte;
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 6b1ffc449158..735be6821e90 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -482,7 +482,7 @@ unsigned long radix__pmd_hugepage_update(struct mm_struct 
*mm, unsigned long add
        assert_spin_locked(&mm->page_table_lock);
 #endif
 
-       old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
+       old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 
HPAGE_PMD_SIZE);
        trace_hugepage_update(addr, old, clr, set);
 
        return old;
-- 
2.10.2

Reply via email to