To provide support for page table check on powerpc, we need to reinstate the address parameter in several functions, including page_table_check_{ptes,pmds,puds}_set().
In preparation for this, add the addr parameter to arm64's __set_ptes_anysz() and its callers, __set_ptes(), __set_pmds() and __set_puds(). While this parameter won't (at present) be used on arm64, this will keep the usage of the page table check interfaces consistent. Signed-off-by: Andrew Donnellan <a...@linux.ibm.com> --- v15: new patch --- arch/arm64/include/asm/pgtable.h | 19 ++++++++----------- arch/arm64/mm/hugetlbpage.c | 8 ++++---- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 192d86e1cc76..acbcb5e883ce 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -712,8 +712,8 @@ static inline pgprot_t pud_pgprot(pud_t pud) return __pgprot(pud_val(pfn_pud(pfn, __pgprot(0))) ^ pud_val(pud)); } -static inline void __set_ptes_anysz(struct mm_struct *mm, pte_t *ptep, - pte_t pte, unsigned int nr, +static inline void __set_ptes_anysz(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned int nr, unsigned long pgsize) { unsigned long stride = pgsize >> PAGE_SHIFT; @@ -748,26 +748,23 @@ static inline void __set_ptes_anysz(struct mm_struct *mm, pte_t *ptep, __set_pte_complete(pte); } -static inline void __set_ptes(struct mm_struct *mm, - unsigned long __always_unused addr, +static inline void __set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr) { - __set_ptes_anysz(mm, ptep, pte, nr, PAGE_SIZE); + __set_ptes_anysz(mm, addr, ptep, pte, nr, PAGE_SIZE); } -static inline void __set_pmds(struct mm_struct *mm, - unsigned long __always_unused addr, +static inline void __set_pmds(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd, unsigned int nr) { - __set_ptes_anysz(mm, (pte_t *)pmdp, pmd_pte(pmd), nr, PMD_SIZE); + __set_ptes_anysz(mm, addr, (pte_t *)pmdp, pmd_pte(pmd), nr, PMD_SIZE); } #define set_pmd_at(mm, addr, pmdp, pmd) __set_pmds(mm, addr, pmdp, pmd, 1) -static inline void __set_puds(struct mm_struct *mm, - unsigned long __always_unused addr, +static inline void __set_puds(struct mm_struct *mm, unsigned long addr, pud_t *pudp, pud_t pud, unsigned int nr) { - __set_ptes_anysz(mm, (pte_t *)pudp, pud_pte(pud), nr, PUD_SIZE); + __set_ptes_anysz(mm, addr, (pte_t *)pudp, pud_pte(pud), nr, PUD_SIZE); } #define set_pud_at(mm, addr, pudp, pud) __set_puds(mm, addr, pudp, pud, 1) diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 0c8737f4f2ce..1003b5020752 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -226,7 +226,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, if (!pte_present(pte)) { for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) - __set_ptes_anysz(mm, ptep, pte, 1, pgsize); + __set_ptes_anysz(mm, addr, ptep, pte, 1, pgsize); return; } @@ -234,7 +234,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, if (pte_cont(pte) && pte_valid(__ptep_get(ptep))) clear_flush(mm, addr, ptep, pgsize, ncontig); - __set_ptes_anysz(mm, ptep, pte, ncontig, pgsize); + __set_ptes_anysz(mm, addr, ptep, pte, ncontig, pgsize); } pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, @@ -449,7 +449,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, if (pte_young(orig_pte)) pte = pte_mkyoung(pte); - __set_ptes_anysz(mm, ptep, pte, ncontig, pgsize); + __set_ptes_anysz(mm, addr, ptep, pte, ncontig, pgsize); return 1; } @@ -473,7 +473,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm, pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig); pte = pte_wrprotect(pte); - __set_ptes_anysz(mm, ptep, pte, ncontig, pgsize); + __set_ptes_anysz(mm, addr, ptep, pte, ncontig, pgsize); } pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, -- 2.49.0