Le 12/09/2022 à 03:47, Rohan McLure a écrit :
> On creation and clearing of a page table mapping, instrument such calls
> by invoking page_table_check_pte_set and page_table_check_pte_clear
> respectively. These calls serve as a sanity check against illegal
> mappings.
> 
> Enable ARCH_SUPPORTS_PAGE_TABLE_CHECK for all ppc64, and 32-bit
> platforms implementing Book3S.

Why only book3s on 32 bits ?

> 
> Change pud_pfn to be a runtime bug rather than a build bug as it is
> consumed by page_table_check_pud_{clear,set} which are not called.

Runtime bug is to be banned.

> 
> See also:
> 
> riscv support in commit 3fee229a8eb9 ("riscv/mm: enable 
> ARCH_SUPPORTS_PAGE_TABLE_CHECK")

Long line.

Commit lines must not be splitted in Fixes: tags, but in the rest of 
message it should be.

> arm64 in commit 42b2547137f5 ("arm64/mm: enable 
> ARCH_SUPPORTS_PAGE_TABLE_CHECK")
> x86_64 in commit d283d422c6c4 ("x86: mm: add x86_64 support for page table
>      check")
> 
> Signed-off-by: Rohan McLure <rmcl...@linux.ibm.com>
> ---
>   arch/powerpc/Kconfig                         | 1 +
>   arch/powerpc/include/asm/book3s/32/pgtable.h | 7 ++++++-
>   arch/powerpc/include/asm/book3s/64/pgtable.h | 9 ++++++++-
>   arch/powerpc/include/asm/nohash/32/pgtable.h | 5 ++++-
>   arch/powerpc/include/asm/nohash/64/pgtable.h | 2 ++
>   arch/powerpc/include/asm/nohash/pgtable.h    | 1 +
>   arch/powerpc/include/asm/pgtable.h           | 4 ++++
>   7 files changed, 26 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 4c466acdc70d..6c213ac46a92 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -149,6 +149,7 @@ config PPC
>       select ARCH_STACKWALK
>       select ARCH_SUPPORTS_ATOMIC_RMW
>       select ARCH_SUPPORTS_DEBUG_PAGEALLOC    if PPC_BOOK3S || PPC_8xx || 40x
> +     select ARCH_SUPPORTS_PAGE_TABLE_CHECK

In the commit message you said only book3s on PPC32.

>       select ARCH_USE_BUILTIN_BSWAP
>       select ARCH_USE_CMPXCHG_LOCKREF         if PPC64
>       select ARCH_USE_MEMTEST
> diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h 
> b/arch/powerpc/include/asm/book3s/32/pgtable.h
> index 40041ac713d9..3d05c8fe4604 100644
> --- a/arch/powerpc/include/asm/book3s/32/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
> @@ -53,6 +53,8 @@
>   
>   #ifndef __ASSEMBLY__
>   
> +#include <linux/page_table_check.h>
> +
>   static inline bool pte_user(pte_t pte)
>   {
>       return pte_val(pte) & _PAGE_USER;
> @@ -353,7 +355,9 @@ static inline int __ptep_test_and_clear_young(struct 
> mm_struct *mm,
>   static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long 
> addr,
>                                      pte_t *ptep)
>   {
> -     return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
> +     unsigned long old = pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0);

Add a blank line here.


pte_update() return type is pte_basic_t, which can be more than a long 
on PPC32 (see asm/page_32.h):

        #ifdef CONFIG_PTE_64BIT
        typedef unsigned long long pte_basic_t;
        #else
        typedef unsigned long pte_basic_t;
        #endif

But why not just use pte_t instead, as you never use 'old' directly ?

> +     page_table_check_pte_clear(mm, addr, __pte(old));
> +     return __pte(old);
>   }
>   
>   #define __HAVE_ARCH_PTEP_SET_WRPROTECT
> @@ -541,6 +545,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t 
> newprot)
>   static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
>                               pte_t *ptep, pte_t pte, int percpu)
>   {
> +     page_table_check_pte_set(mm, addr, ptep, pte);
>   #if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
>       /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use 
> the
>        * helper pte_update() which does an atomic update. We need to do that
> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h 
> b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index 8874f2a3661d..cbb7bd99c897 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -179,6 +179,8 @@
>   #define PAGE_AGP            (PAGE_KERNEL_NC)
>   
>   #ifndef __ASSEMBLY__
> +#include <linux/page_table_check.h>
> +
>   /*
>    * page table defines
>    */
> @@ -483,6 +485,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct 
> *mm,
>                                      unsigned long addr, pte_t *ptep)
>   {
>       unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
> +     page_table_check_pte_clear(mm, addr, __pte(old));
>       return __pte(old);

Same, would be better to rewrite this function and use a 'pte_t old_pte'.

>   }
>   
> @@ -491,12 +494,15 @@ static inline pte_t ptep_get_and_clear_full(struct 
> mm_struct *mm,
>                                           unsigned long addr,
>                                           pte_t *ptep, int full)
>   {
> +     pte_t old_pte;

You don't need that variable on the full function scope, put it inside 
the { } below.

>       if (full && radix_enabled()) {
>               /*
>                * We know that this is a full mm pte clear and
>                * hence can be sure there is no parallel set_pte.
>                */
> -             return radix__ptep_get_and_clear_full(mm, addr, ptep, full);
> +             old_pte = radix__ptep_get_and_clear_full(mm, addr, ptep, full);
> +             page_table_check_pte_clear(mm, addr, old_pte);

A blank line here would increase readability.

> +             return old_pte;
>       }
>       return ptep_get_and_clear(mm, addr, ptep);
>   }
> @@ -872,6 +878,7 @@ static inline void __set_pte_at(struct mm_struct *mm, 
> unsigned long addr,
>        */
>       pte = __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE));
>   
> +     page_table_check_pte_set(mm, addr, ptep, pte);

A blank line here would increase readability as well.

>       if (radix_enabled())
>               return radix__set_pte_at(mm, addr, ptep, pte, percpu);
>       return hash__set_pte_at(mm, addr, ptep, pte, percpu);
> diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h 
> b/arch/powerpc/include/asm/nohash/32/pgtable.h
> index 9091e4904a6b..a3416cfb75d7 100644
> --- a/arch/powerpc/include/asm/nohash/32/pgtable.h
> +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
> @@ -166,6 +166,7 @@ void unmap_kernel_page(unsigned long va);
>   #define _PAGE_CHG_MASK      (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | 
> _PAGE_SPECIAL)
>   
>   #ifndef __ASSEMBLY__
> +#include <linux/page_table_check.h>
>   
>   #define pte_clear(mm, addr, ptep) \
>       do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0)
> @@ -305,7 +306,9 @@ static inline int __ptep_test_and_clear_young(struct 
> mm_struct *mm,
>   static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long 
> addr,
>                                      pte_t *ptep)
>   {
> -     return __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
> +     unsigned long old = pte_update(mm, addr, ptep, ~0, 0, 0);

Blank line required here.

> +     page_table_check_pte_clear(mm, addr, __pte(old));

A blank line here would increase readability.

> +     return __pte(old);
>   }
>   
>   #define __HAVE_ARCH_PTEP_SET_WRPROTECT
> diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h 
> b/arch/powerpc/include/asm/nohash/64/pgtable.h
> index 599921cc257e..5f2e10842a0d 100644
> --- a/arch/powerpc/include/asm/nohash/64/pgtable.h
> +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
> @@ -83,6 +83,7 @@
>   #define H_PAGE_4K_PFN 0
>   
>   #ifndef __ASSEMBLY__
> +#include <linux/page_table_check.h>
>   /* pte_clear moved to later in this file */
>   
>   static inline pte_t pte_mkwrite(pte_t pte)
> @@ -244,6 +245,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct 
> *mm,
>                                      unsigned long addr, pte_t *ptep)
>   {
>       unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);

Blank line.

> +     page_table_check_pte_clear(mm, addr, __pte(old));
>       return __pte(old);
>   }
>   
> diff --git a/arch/powerpc/include/asm/nohash/pgtable.h 
> b/arch/powerpc/include/asm/nohash/pgtable.h
> index b499da6c1a99..62b221b7cccf 100644
> --- a/arch/powerpc/include/asm/nohash/pgtable.h
> +++ b/arch/powerpc/include/asm/nohash/pgtable.h
> @@ -185,6 +185,7 @@ extern void set_pte_at(struct mm_struct *mm, unsigned 
> long addr, pte_t *ptep,
>   static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
>                               pte_t *ptep, pte_t pte, int percpu)
>   {
> +     page_table_check_pte_set(mm, addr, ptep, pte);

Blank line.

>       /* Second case is 32-bit with 64-bit PTE.  In this case, we
>        * can just store as long as we do the two halves in the right order
>        * with a barrier in between.
> diff --git a/arch/powerpc/include/asm/pgtable.h 
> b/arch/powerpc/include/asm/pgtable.h
> index 8c1f5feb9360..5e1032d12499 100644
> --- a/arch/powerpc/include/asm/pgtable.h
> +++ b/arch/powerpc/include/asm/pgtable.h
> @@ -166,7 +166,11 @@ static inline int pud_pfn(pud_t pud)
>        * check so this should never be used. If it grows another user we
>        * want to know about it.
>        */
> +#ifndef CONFIG_PAGE_TABLE_CHECK
>       BUILD_BUG();
> +#else
> +     BUG();
> +#endif

Awfull.

>       return 0;
>   }
>   

Reply via email to