Le 06/11/2022 à 23:49, Rohan McLure a écrit : > Replace occurrences of p{u,m,4}d_is_leaf with p{u,m,4}_leaf, as the > latter is the name given to checking that a higher-level entry in > multi-level paging contains a page translation entry (pte). This commit > allows for p{u,m,4}d_leaf to be used on all powerpc platforms. > > Prior to this commit, the two names have both been present in the > kernel, having as far as I can tell the same exact purpose. While the > 'is' in the title may better indicate that the macro/function is a > boolean returning check, the former naming scheme is standard through > all other architectures.
Would be easier to understand and review if you split in two patches: 1/ Replace all uses of p{u,m,4}d_is_leaf by p{u,m,4}_leaf 2/ Properly implement p{u,m,4}_leaf and remove p{u,m,4}d_is_leaf > > 32-bit systems import pgtable-nop4d.h which defines a default pud_leaf. > Define pud_leaf preprocessor macro on both Book3E/S 32-bit to avoid > including the default definition in asm/pgtable.h. I think you should do it the other way round: Move it away from asm/pgtable.h. pud_leaf(), you only have to add a stub in asm/nohash/64/pgtable.h pmd_leaf(), you have to add a stub in asm/nohash/pgtable.h and asm/book3s/32/pgtable.h I think doing like that would be cleaner. Christophe > > Signed-off-by: Rohan McLure <rmcl...@linux.ibm.com> > --- > V4: new patch. > --- > arch/powerpc/include/asm/book3s/32/pgtable.h | 2 +- > arch/powerpc/include/asm/book3s/64/pgtable.h | 10 ++++------ > arch/powerpc/include/asm/nohash/32/pgtable.h | 1 + > arch/powerpc/include/asm/pgtable.h | 18 +++++++++--------- > arch/powerpc/kvm/book3s_64_mmu_radix.c | 12 ++++++------ > arch/powerpc/mm/book3s64/radix_pgtable.c | 14 +++++++------- > arch/powerpc/mm/pgtable.c | 6 +++--- > arch/powerpc/mm/pgtable_64.c | 6 +++--- > arch/powerpc/xmon/xmon.c | 6 +++--- > 9 files changed, 37 insertions(+), 38 deletions(-) > > diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h > b/arch/powerpc/include/asm/book3s/32/pgtable.h > index 75823f39e042..f1b91ad8f3a5 100644 > --- a/arch/powerpc/include/asm/book3s/32/pgtable.h > +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h > @@ -234,6 +234,7 @@ void unmap_kernel_page(unsigned long va); > #define pte_clear(mm, addr, ptep) \ > do { pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0); } while (0) > > +#define pud_leaf pud_leaf > #define pmd_none(pmd) (!pmd_val(pmd)) > #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) > #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) > @@ -242,7 +243,6 @@ static inline void pmd_clear(pmd_t *pmdp) > *pmdp = __pmd(0); > } > > - > /* > * When flushing the tlb entry for a page, we also need to flush the hash > * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. > diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h > b/arch/powerpc/include/asm/book3s/64/pgtable.h > index c436d8422654..3f51de24e4fc 100644 > --- a/arch/powerpc/include/asm/book3s/64/pgtable.h > +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h > @@ -1426,16 +1426,14 @@ static inline bool is_pte_rw_upgrade(unsigned long > old_val, unsigned long new_va > /* > * Like pmd_huge() and pmd_large(), but works regardless of config options > */ > -#define pmd_is_leaf pmd_is_leaf > -#define pmd_leaf pmd_is_leaf > -static inline bool pmd_is_leaf(pmd_t pmd) > +#define pmd_leaf pmd_leaf > +static inline bool pmd_leaf(pmd_t pmd) > { > return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE)); > } > > -#define pud_is_leaf pud_is_leaf > -#define pud_leaf pud_is_leaf > -static inline bool pud_is_leaf(pud_t pud) > +#define pud_leaf pud_leaf > +static inline bool pud_leaf(pud_t pud) > { > return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE)); > } > diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h > b/arch/powerpc/include/asm/nohash/32/pgtable.h > index 0d40b33184eb..04a3b0b128eb 100644 > --- a/arch/powerpc/include/asm/nohash/32/pgtable.h > +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h > @@ -201,6 +201,7 @@ static inline pte_t pte_mkexec(pte_t pte) > } > #endif > > +#define pud_leaf pud_leaf > #define pmd_none(pmd) (!pmd_val(pmd)) > #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) > #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) > diff --git a/arch/powerpc/include/asm/pgtable.h > b/arch/powerpc/include/asm/pgtable.h > index 283f40d05a4d..8e7625a89922 100644 > --- a/arch/powerpc/include/asm/pgtable.h > +++ b/arch/powerpc/include/asm/pgtable.h > @@ -134,25 +134,25 @@ static inline void pte_frag_set(mm_context_t *ctx, void > *p) > } > #endif > > -#ifndef pmd_is_leaf > -#define pmd_is_leaf pmd_is_leaf > -static inline bool pmd_is_leaf(pmd_t pmd) > +#ifndef pmd_leaf > +#define pmd_leaf pmd_leaf > +static inline bool pmd_leaf(pmd_t pmd) > { > return false; > } > #endif > > -#ifndef pud_is_leaf > -#define pud_is_leaf pud_is_leaf > -static inline bool pud_is_leaf(pud_t pud) > +#ifndef pud_leaf > +#define pud_leaf pud_leaf > +static inline bool pud_leaf(pud_t pud) > { > return false; > } > #endif > > -#ifndef p4d_is_leaf > -#define p4d_is_leaf p4d_is_leaf > -static inline bool p4d_is_leaf(p4d_t p4d) > +#ifndef p4d_leaf > +#define p4d_leaf p4d_leaf > +static inline bool p4d_leaf(p4d_t p4d) > { > return false; > } > diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c > b/arch/powerpc/kvm/book3s_64_mmu_radix.c > index 5d5e12f3bf86..d29f8d1d97a6 100644 > --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c > +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c > @@ -497,7 +497,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t > *pmd, bool full, > for (im = 0; im < PTRS_PER_PMD; ++im, ++p) { > if (!pmd_present(*p)) > continue; > - if (pmd_is_leaf(*p)) { > + if (pmd_leaf(*p)) { > if (full) { > pmd_clear(p); > } else { > @@ -526,7 +526,7 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t > *pud, > for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) { > if (!pud_present(*p)) > continue; > - if (pud_is_leaf(*p)) { > + if (pud_leaf(*p)) { > pud_clear(p); > } else { > pmd_t *pmd; > @@ -629,12 +629,12 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, > pte_t pte, > new_pud = pud_alloc_one(kvm->mm, gpa); > > pmd = NULL; > - if (pud && pud_present(*pud) && !pud_is_leaf(*pud)) > + if (pud && pud_present(*pud) && !pud_leaf(*pud)) > pmd = pmd_offset(pud, gpa); > else if (level <= 1) > new_pmd = kvmppc_pmd_alloc(); > > - if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd))) > + if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_leaf(*pmd))) > new_ptep = kvmppc_pte_alloc(); > > /* Check if we might have been invalidated; let the guest retry if so */ > @@ -652,7 +652,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, > pte_t pte, > new_pud = NULL; > } > pud = pud_offset(p4d, gpa); > - if (pud_is_leaf(*pud)) { > + if (pud_leaf(*pud)) { > unsigned long hgpa = gpa & PUD_MASK; > > /* Check if we raced and someone else has set the same thing */ > @@ -703,7 +703,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, > pte_t pte, > new_pmd = NULL; > } > pmd = pmd_offset(pud, gpa); > - if (pmd_is_leaf(*pmd)) { > + if (pmd_leaf(*pmd)) { > unsigned long lgpa = gpa & PMD_MASK; > > /* Check if we raced and someone else has set the same thing */ > diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c > b/arch/powerpc/mm/book3s64/radix_pgtable.c > index cac727b01799..8ac27e031ff4 100644 > --- a/arch/powerpc/mm/book3s64/radix_pgtable.c > +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c > @@ -205,14 +205,14 @@ static void radix__change_memory_range(unsigned long > start, unsigned long end, > pudp = pud_alloc(&init_mm, p4dp, idx); > if (!pudp) > continue; > - if (pud_is_leaf(*pudp)) { > + if (pud_leaf(*pudp)) { > ptep = (pte_t *)pudp; > goto update_the_pte; > } > pmdp = pmd_alloc(&init_mm, pudp, idx); > if (!pmdp) > continue; > - if (pmd_is_leaf(*pmdp)) { > + if (pmd_leaf(*pmdp)) { > ptep = pmdp_ptep(pmdp); > goto update_the_pte; > } > @@ -762,7 +762,7 @@ static void __meminit remove_pmd_table(pmd_t *pmd_start, > unsigned long addr, > if (!pmd_present(*pmd)) > continue; > > - if (pmd_is_leaf(*pmd)) { > + if (pmd_leaf(*pmd)) { > if (!IS_ALIGNED(addr, PMD_SIZE) || > !IS_ALIGNED(next, PMD_SIZE)) { > WARN_ONCE(1, "%s: unaligned range\n", __func__); > @@ -792,7 +792,7 @@ static void __meminit remove_pud_table(pud_t *pud_start, > unsigned long addr, > if (!pud_present(*pud)) > continue; > > - if (pud_is_leaf(*pud)) { > + if (pud_leaf(*pud)) { > if (!IS_ALIGNED(addr, PUD_SIZE) || > !IS_ALIGNED(next, PUD_SIZE)) { > WARN_ONCE(1, "%s: unaligned range\n", __func__); > @@ -825,7 +825,7 @@ static void __meminit remove_pagetable(unsigned long > start, unsigned long end) > if (!p4d_present(*p4d)) > continue; > > - if (p4d_is_leaf(*p4d)) { > + if (p4d_leaf(*p4d)) { > if (!IS_ALIGNED(addr, P4D_SIZE) || > !IS_ALIGNED(next, P4D_SIZE)) { > WARN_ONCE(1, "%s: unaligned range\n", __func__); > @@ -1088,7 +1088,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t > prot) > > int pud_clear_huge(pud_t *pud) > { > - if (pud_is_leaf(*pud)) { > + if (pud_leaf(*pud)) { > pud_clear(pud); > return 1; > } > @@ -1135,7 +1135,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t > prot) > > int pmd_clear_huge(pmd_t *pmd) > { > - if (pmd_is_leaf(*pmd)) { > + if (pmd_leaf(*pmd)) { > pmd_clear(pmd); > return 1; > } > diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c > index cb2dcdb18f8e..35b9677b9553 100644 > --- a/arch/powerpc/mm/pgtable.c > +++ b/arch/powerpc/mm/pgtable.c > @@ -387,7 +387,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, > if (p4d_none(p4d)) > return NULL; > > - if (p4d_is_leaf(p4d)) { > + if (p4d_leaf(p4d)) { > ret_pte = (pte_t *)p4dp; > goto out; > } > @@ -409,7 +409,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, > if (pud_none(pud)) > return NULL; > > - if (pud_is_leaf(pud)) { > + if (pud_leaf(pud)) { > ret_pte = (pte_t *)pudp; > goto out; > } > @@ -448,7 +448,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, > goto out; > } > > - if (pmd_is_leaf(pmd)) { > + if (pmd_leaf(pmd)) { > ret_pte = (pte_t *)pmdp; > goto out; > } > diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c > index 5ac1fd30341b..0604c80dae66 100644 > --- a/arch/powerpc/mm/pgtable_64.c > +++ b/arch/powerpc/mm/pgtable_64.c > @@ -100,7 +100,7 @@ EXPORT_SYMBOL(__pte_frag_size_shift); > /* 4 level page table */ > struct page *p4d_page(p4d_t p4d) > { > - if (p4d_is_leaf(p4d)) { > + if (p4d_leaf(p4d)) { > if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) > VM_WARN_ON(!p4d_huge(p4d)); > return pte_page(p4d_pte(p4d)); > @@ -111,7 +111,7 @@ struct page *p4d_page(p4d_t p4d) > > struct page *pud_page(pud_t pud) > { > - if (pud_is_leaf(pud)) { > + if (pud_leaf(pud)) { > if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) > VM_WARN_ON(!pud_huge(pud)); > return pte_page(pud_pte(pud)); > @@ -125,7 +125,7 @@ struct page *pud_page(pud_t pud) > */ > struct page *pmd_page(pmd_t pmd) > { > - if (pmd_is_leaf(pmd)) { > + if (pmd_leaf(pmd)) { > /* > * vmalloc_to_page may be called on any vmap address (not only > * vmalloc), and it uses pmd_page() etc., when huge vmap is > diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c > index f51c882bf902..705c230dd4f5 100644 > --- a/arch/powerpc/xmon/xmon.c > +++ b/arch/powerpc/xmon/xmon.c > @@ -3342,7 +3342,7 @@ static void show_pte(unsigned long addr) > return; > } > > - if (p4d_is_leaf(*p4dp)) { > + if (p4d_leaf(*p4dp)) { > format_pte(p4dp, p4d_val(*p4dp)); > return; > } > @@ -3356,7 +3356,7 @@ static void show_pte(unsigned long addr) > return; > } > > - if (pud_is_leaf(*pudp)) { > + if (pud_leaf(*pudp)) { > format_pte(pudp, pud_val(*pudp)); > return; > } > @@ -3370,7 +3370,7 @@ static void show_pte(unsigned long addr) > return; > } > > - if (pmd_is_leaf(*pmdp)) { > + if (pmd_leaf(*pmdp)) { > format_pte(pmdp, pmd_val(*pmdp)); > return; > }