We want to switch pte_update to use va based tlb flush. In order to do that we need to track the page size. With hugetlb we currently don't have page size available in these functions. Hence switch hugetlb to use seperate functions for update. In later patch we will update hugetlb functions to take vm_area_struct from which we can derive the page size. After that we will switch this back to use pte_update
Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.vnet.ibm.com> --- arch/powerpc/include/asm/book3s/64/hugetlb.h | 42 ++++++++++++++++++++++++++++ arch/powerpc/include/asm/book3s/64/pgtable.h | 9 ------ arch/powerpc/include/asm/hugetlb.h | 2 +- 3 files changed, 43 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h index d9c283f95e05..9a64f356a8e8 100644 --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h @@ -30,4 +30,46 @@ static inline int hstate_get_psize(struct hstate *hstate) return mmu_virtual_psize; } } + +static inline unsigned long huge_pte_update(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long clr, + unsigned long set) +{ + if (radix_enabled()) { + unsigned long old_pte; + + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { + + unsigned long new_pte; + + old_pte = __radix_pte_update(ptep, ~0, 0); + asm volatile("ptesync" : : : "memory"); + /* + * new value of pte + */ + new_pte = (old_pte | set) & ~clr; + /* + * For now let's do heavy pid flush + * radix__flush_tlb_page_psize(mm, addr, mmu_virtual_psize); + */ + radix__flush_tlb_mm(mm); + + __radix_pte_update(ptep, 0, new_pte); + } else + old_pte = __radix_pte_update(ptep, clr, set); + asm volatile("ptesync" : : : "memory"); + return old_pte; + } + return hash__pte_update(mm, addr, ptep, clr, set, true); +} + +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0) + return; + + huge_pte_update(mm, addr, ptep, _PAGE_WRITE, 0); +} + #endif diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 46d739457d68..ef2eef1ba99a 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -346,15 +346,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0); } -static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0) - return; - - pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1); -} - #define __HAVE_ARCH_PTEP_GET_AND_CLEAR static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index c03e0a3dd4d8..058d6311de87 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -136,7 +136,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_PPC64 - return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1)); + return __pte(huge_pte_update(mm, addr, ptep, ~0UL, 0)); #else return __pte(pte_update(ptep, ~0UL, 0)); #endif -- 2.10.2