On Wed, Mar 06, 2019 at 03:50:16PM +0000, Steven Price wrote:
> walk_page_range() is going to be allowed to walk page tables other than
> those of user space. For this it needs to know when it has reached a
> 'leaf' entry in the page tables. This information is provided by the
> p?d_large() functions/macros.
> 
> For powerpc pmd_large() was already implemented, so hoist it out of the
> CONFIG_TRANSPARENT_HUGEPAGE condition and implement the other levels.
> 
> Also since we now have a pmd_large always implemented we can drop the
> pmd_is_leaf() function.
> 
> CC: Benjamin Herrenschmidt <b...@kernel.crashing.org>
> CC: Paul Mackerras <pau...@samba.org>
> CC: Michael Ellerman <m...@ellerman.id.au>
> CC: linuxppc-dev@lists.ozlabs.org
> CC: kvm-...@vger.kernel.org
> Signed-off-by: Steven Price <steven.pr...@arm.com>
> ---
>  arch/powerpc/include/asm/book3s/64/pgtable.h | 30 ++++++++++++++------

There is one more definition of pmd_large() in
arch/powerpc/include/asm/pgtable.h

>  arch/powerpc/kvm/book3s_64_mmu_radix.c       | 12 ++------
>  2 files changed, 24 insertions(+), 18 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h 
> b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index c9bfe526ca9d..c4b29caf2a3b 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -907,6 +907,12 @@ static inline int pud_present(pud_t pud)
>       return (pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
>  }
> 
> +#define pud_large    pud_large
> +static inline int pud_large(pud_t pud)
> +{
> +     return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
> +}
> +
>  extern struct page *pud_page(pud_t pud);
>  extern struct page *pmd_page(pmd_t pmd);
>  static inline pte_t pud_pte(pud_t pud)
> @@ -954,6 +960,12 @@ static inline int pgd_present(pgd_t pgd)
>       return (pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
>  }
> 
> +#define pgd_large    pgd_large
> +static inline int pgd_large(pgd_t pgd)
> +{
> +     return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
> +}
> +
>  static inline pte_t pgd_pte(pgd_t pgd)
>  {
>       return __pte_raw(pgd_raw(pgd));
> @@ -1107,6 +1119,15 @@ static inline bool pmd_access_permitted(pmd_t pmd, 
> bool write)
>       return pte_access_permitted(pmd_pte(pmd), write);
>  }
> 
> +#define pmd_large    pmd_large
> +/*
> + * returns true for pmd migration entries, THP, devmap, hugetlb
> + */
> +static inline int pmd_large(pmd_t pmd)
> +{
> +     return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
> +}
> +
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>  extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
>  extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
> @@ -1133,15 +1154,6 @@ pmd_hugepage_update(struct mm_struct *mm, unsigned 
> long addr, pmd_t *pmdp,
>       return hash__pmd_hugepage_update(mm, addr, pmdp, clr, set);
>  }
> 
> -/*
> - * returns true for pmd migration entries, THP, devmap, hugetlb
> - * But compile time dependent on THP config
> - */
> -static inline int pmd_large(pmd_t pmd)
> -{
> -     return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
> -}
> -
>  static inline pmd_t pmd_mknotpresent(pmd_t pmd)
>  {
>       return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT);
> diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c 
> b/arch/powerpc/kvm/book3s_64_mmu_radix.c
> index 1b821c6efdef..040db20ac2ab 100644
> --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
> +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
> @@ -363,12 +363,6 @@ static void kvmppc_pte_free(pte_t *ptep)
>       kmem_cache_free(kvm_pte_cache, ptep);
>  }
> 
> -/* Like pmd_huge() and pmd_large(), but works regardless of config options */
> -static inline int pmd_is_leaf(pmd_t pmd)
> -{
> -     return !!(pmd_val(pmd) & _PAGE_PTE);
> -}
> -
>  static pmd_t *kvmppc_pmd_alloc(void)
>  {
>       return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
> @@ -455,7 +449,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t 
> *pmd, bool full,
>       for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
>               if (!pmd_present(*p))
>                       continue;
> -             if (pmd_is_leaf(*p)) {
> +             if (pmd_large(*p)) {
>                       if (full) {
>                               pmd_clear(p);
>                       } else {
> @@ -588,7 +582,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, 
> pte_t pte,
>       else if (level <= 1)
>               new_pmd = kvmppc_pmd_alloc();
> 
> -     if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
> +     if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_large(*pmd)))
>               new_ptep = kvmppc_pte_alloc();
> 
>       /* Check if we might have been invalidated; let the guest retry if so */
> @@ -657,7 +651,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, 
> pte_t pte,
>               new_pmd = NULL;
>       }
>       pmd = pmd_offset(pud, gpa);
> -     if (pmd_is_leaf(*pmd)) {
> +     if (pmd_large(*pmd)) {
>               unsigned long lgpa = gpa & PMD_MASK;
> 
>               /* Check if we raced and someone else has set the same thing */
> -- 
> 2.20.1
> 

-- 
Sincerely yours,
Mike.

Reply via email to