There used to be a dependency on _PAGE_PRIVILEGED with pte_savedwrite.
But that got dropped by
commit 6a56ccbcf6c6 ("mm/autonuma: use can_change_(pte|pmd)_writable() to 
replace savedwrite")

With the change in this patch numa fault pte (pte_protnone()) gets mapped as 
regular user pte
with RWX cleared (no-access) whereas earlier it used to be mapped 
_PAGE_PRIVILEGED.

Hash fault handling code did get some WARN_ON added because those
functions are not expected to get called with _PAGE_READ cleared.
commit 18061c17c8ec ("powerpc/mm: Update PROTFAULT handling in the page fault 
path")
explains the details.

Also revert commit 1abce0580b89 ("powerpc/64s: Fix __pte_needs_flush() false 
positive warning")

Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/pgtable.h  | 9 +++------
 arch/powerpc/include/asm/book3s/64/tlbflush.h | 9 ++-------
 arch/powerpc/mm/book3s64/hash_utils.c         | 7 +++++++
 3 files changed, 12 insertions(+), 13 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h 
b/arch/powerpc/include/asm/book3s/64/pgtable.h
index cb77eddca54b..2cc58ac74080 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -17,12 +17,6 @@
 #define _PAGE_EXEC             0x00001 /* execute permission */
 #define _PAGE_WRITE            0x00002 /* write access allowed */
 #define _PAGE_READ             0x00004 /* read access allowed */
-#define _PAGE_NA               _PAGE_PRIVILEGED
-#define _PAGE_NAX              _PAGE_EXEC
-#define _PAGE_RO               _PAGE_READ
-#define _PAGE_ROX              (_PAGE_READ | _PAGE_EXEC)
-#define _PAGE_RW               (_PAGE_READ | _PAGE_WRITE)
-#define _PAGE_RWX              (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
 #define _PAGE_PRIVILEGED       0x00008 /* kernel access only */
 #define _PAGE_SAO              0x00010 /* Strong access order */
 #define _PAGE_NON_IDEMPOTENT   0x00020 /* non idempotent memory */
@@ -529,6 +523,9 @@ static inline bool pte_user(pte_t pte)
 }
 
 #define pte_access_permitted pte_access_permitted
+/*
+ * execute-only mappings return false
+ */
 static inline bool pte_access_permitted(pte_t pte, bool write)
 {
        /*
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h 
b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index 1950c1b825b4..fd642b729775 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -158,11 +158,6 @@ static inline void flush_tlb_fix_spurious_fault(struct 
vm_area_struct *vma,
         */
 }
 
-static inline bool __pte_protnone(unsigned long pte)
-{
-       return (pte & (pgprot_val(PAGE_NONE) | _PAGE_RWX)) == 
pgprot_val(PAGE_NONE);
-}
-
 static inline bool __pte_flags_need_flush(unsigned long oldval,
                                          unsigned long newval)
 {
@@ -179,8 +174,8 @@ static inline bool __pte_flags_need_flush(unsigned long 
oldval,
        /*
         * We do not expect kernel mappings or non-PTEs or not-present PTEs.
         */
-       VM_WARN_ON_ONCE(!__pte_protnone(oldval) && oldval & _PAGE_PRIVILEGED);
-       VM_WARN_ON_ONCE(!__pte_protnone(newval) && newval & _PAGE_PRIVILEGED);
+       VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED);
+       VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED);
        VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
        VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
        VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c 
b/arch/powerpc/mm/book3s64/hash_utils.c
index ad2afa08e62e..0626a25b0d72 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -310,9 +310,16 @@ unsigned long htab_convert_pte_flags(unsigned long 
pteflags, unsigned long flags
                        else
                                rflags |= 0x3;
                }
+               VM_WARN_ONCE(!(pteflags & _PAGE_RWX), "no-access mapping 
request");
        } else {
                if (pteflags & _PAGE_RWX)
                        rflags |= 0x2;
+               /*
+                * We should never hit this in normal fault handling because
+                * a permission check (check_pte_access()) will bubble this
+                * to higher level linux handler even for PAGE_NONE.
+                */
+               VM_WARN_ONCE(!(pteflags & _PAGE_RWX), "no-access mapping 
request");
                if (!((pteflags & _PAGE_WRITE) && (pteflags & _PAGE_DIRTY)))
                        rflags |= 0x1;
        }
-- 
2.41.0

Reply via email to