Update the mmap() implementation logic implemented in __mmap_region() and
functions invoked by it. The mmap_region() function converts its input
vm_flags_t parameter to a vma_flags_t value which it then passes to
__mmap_region() which uses the vma_flags_t value consistently from then on.

As part of the change, we convert map_deny_write_exec() to using
vma_flags_t (it was incorrectly using unsigned long before), and place it
in vma.h, as it is only used internal to mm.

With this change, we eliminate the legacy is_shared_maywrite_vm_flags()
helper function which is now no longer required.

We are also able to update the MMAP_STATE() and VMG_MMAP_STATE() macros to
use the vma_flags_t value.

Finally, we update the VMA tests to reflect the change.

Signed-off-by: Lorenzo Stoakes (Oracle) <[email protected]>
---
 include/linux/mm.h              | 18 ++++++++----
 include/linux/mman.h            | 49 -------------------------------
 mm/mprotect.c                   |  4 ++-
 mm/vma.c                        | 25 ++++++++--------
 mm/vma.h                        | 51 +++++++++++++++++++++++++++++++++
 tools/testing/vma/include/dup.h | 34 +++++-----------------
 tools/testing/vma/tests/mmap.c  | 18 ++++--------
 7 files changed, 92 insertions(+), 107 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index fd873a9467f8..34b587531f1b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1485,12 +1485,6 @@ static inline bool vma_is_accessible(const struct 
vm_area_struct *vma)
        return vma->vm_flags & VM_ACCESS_FLAGS;
 }
 
-static inline bool is_shared_maywrite_vm_flags(vm_flags_t vm_flags)
-{
-       return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
-               (VM_SHARED | VM_MAYWRITE);
-}
-
 static inline bool is_shared_maywrite(const vma_flags_t *flags)
 {
        return vma_flags_test_all(flags, VMA_SHARED_BIT, VMA_MAYWRITE_BIT);
@@ -4285,12 +4279,24 @@ static inline bool range_in_vma(const struct 
vm_area_struct *vma,
 
 #ifdef CONFIG_MMU
 pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
+
+static inline pgprot_t vma_get_page_prot(vma_flags_t vma_flags)
+{
+       const vm_flags_t vm_flags = vma_flags_to_legacy(vma_flags);
+
+       return vm_get_page_prot(vm_flags);
+}
+
 void vma_set_page_prot(struct vm_area_struct *vma);
 #else
 static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
 {
        return __pgprot(0);
 }
+static inline pgprot_t vma_get_page_prot(vma_flags_t vma_flags)
+{
+       return __pgprot(0);
+}
 static inline void vma_set_page_prot(struct vm_area_struct *vma)
 {
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 0ba8a7e8b90a..389521594c69 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -170,53 +170,4 @@ static inline bool 
arch_memory_deny_write_exec_supported(void)
 }
 #define arch_memory_deny_write_exec_supported 
arch_memory_deny_write_exec_supported
 #endif
-
-/*
- * Denies creating a writable executable mapping or gaining executable 
permissions.
- *
- * This denies the following:
- *
- *     a)      mmap(PROT_WRITE | PROT_EXEC)
- *
- *     b)      mmap(PROT_WRITE)
- *             mprotect(PROT_EXEC)
- *
- *     c)      mmap(PROT_WRITE)
- *             mprotect(PROT_READ)
- *             mprotect(PROT_EXEC)
- *
- * But allows the following:
- *
- *     d)      mmap(PROT_READ | PROT_EXEC)
- *             mmap(PROT_READ | PROT_EXEC | PROT_BTI)
- *
- * This is only applicable if the user has set the Memory-Deny-Write-Execute
- * (MDWE) protection mask for the current process.
- *
- * @old specifies the VMA flags the VMA originally possessed, and @new the ones
- * we propose to set.
- *
- * Return: false if proposed change is OK, true if not ok and should be denied.
- */
-static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
-{
-       /* If MDWE is disabled, we have nothing to deny. */
-       if (!mm_flags_test(MMF_HAS_MDWE, current->mm))
-               return false;
-
-       /* If the new VMA is not executable, we have nothing to deny. */
-       if (!(new & VM_EXEC))
-               return false;
-
-       /* Under MDWE we do not accept newly writably executable VMAs... */
-       if (new & VM_WRITE)
-               return true;
-
-       /* ...nor previously non-executable VMAs becoming executable. */
-       if (!(old & VM_EXEC))
-               return true;
-
-       return false;
-}
-
 #endif /* _LINUX_MMAN_H */
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 2b8a85689ab7..ef09cd1aa33f 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -882,6 +882,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
        tmp = vma->vm_start;
        for_each_vma_range(vmi, vma, end) {
                vm_flags_t mask_off_old_flags;
+               vma_flags_t new_vma_flags;
                vm_flags_t newflags;
                int new_vma_pkey;
 
@@ -904,6 +905,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
                new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
                newflags = calc_vm_prot_bits(prot, new_vma_pkey);
                newflags |= (vma->vm_flags & ~mask_off_old_flags);
+               new_vma_flags = legacy_to_vma_flags(newflags);
 
                /* newflags >> 4 shift VM_MAY% in place of VM_% */
                if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) {
@@ -911,7 +913,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
                        break;
                }
 
-               if (map_deny_write_exec(vma->vm_flags, newflags)) {
+               if (map_deny_write_exec(&vma->flags, &new_vma_flags)) {
                        error = -EACCES;
                        break;
                }
diff --git a/mm/vma.c b/mm/vma.c
index c2c649b23465..1b00d6a2cc8d 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -44,7 +44,7 @@ struct mmap_state {
        bool file_doesnt_need_get :1;
 };
 
-#define MMAP_STATE(name, mm_, vmi_, addr_, len_, pgoff_, vm_flags_, file_) \
+#define MMAP_STATE(name, mm_, vmi_, addr_, len_, pgoff_, vma_flags_, file_) \
        struct mmap_state name = {                                      \
                .mm = mm_,                                              \
                .vmi = vmi_,                                            \
@@ -52,9 +52,9 @@ struct mmap_state {
                .end = (addr_) + (len_),                                \
                .pgoff = pgoff_,                                        \
                .pglen = PHYS_PFN(len_),                                \
-               .vm_flags = vm_flags_,                                  \
+               .vma_flags = vma_flags_,                                \
                .file = file_,                                          \
-               .page_prot = vm_get_page_prot(vm_flags_),               \
+               .page_prot = vma_get_page_prot(vma_flags_),             \
        }
 
 #define VMG_MMAP_STATE(name, map_, vma_)                               \
@@ -63,7 +63,7 @@ struct mmap_state {
                .vmi = (map_)->vmi,                                     \
                .start = (map_)->addr,                                  \
                .end = (map_)->end,                                     \
-               .vm_flags = (map_)->vm_flags,                           \
+               .vma_flags = (map_)->vma_flags,                         \
                .pgoff = (map_)->pgoff,                                 \
                .file = (map_)->file,                                   \
                .prev = (map_)->prev,                                   \
@@ -2747,14 +2747,14 @@ static int call_action_complete(struct mmap_state *map,
 }
 
 static unsigned long __mmap_region(struct file *file, unsigned long addr,
-               unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
-               struct list_head *uf)
+               unsigned long len, vma_flags_t vma_flags,
+               unsigned long pgoff, struct list_head *uf)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma = NULL;
        bool have_mmap_prepare = file && file->f_op->mmap_prepare;
        VMA_ITERATOR(vmi, mm, addr);
-       MMAP_STATE(map, mm, &vmi, addr, len, pgoff, vm_flags, file);
+       MMAP_STATE(map, mm, &vmi, addr, len, pgoff, vma_flags, file);
        struct vm_area_desc desc = {
                .mm = mm,
                .file = file,
@@ -2838,16 +2838,17 @@ static unsigned long __mmap_region(struct file *file, 
unsigned long addr,
  * been performed.
  */
 unsigned long mmap_region(struct file *file, unsigned long addr,
-                         unsigned long len, vm_flags_t vm_flags, unsigned long 
pgoff,
-                         struct list_head *uf)
+                         unsigned long len, vm_flags_t vm_flags,
+                         unsigned long pgoff, struct list_head *uf)
 {
        unsigned long ret;
        bool writable_file_mapping = false;
+       const vma_flags_t vma_flags = legacy_to_vma_flags(vm_flags);
 
        mmap_assert_write_locked(current->mm);
 
        /* Check to see if MDWE is applicable. */
-       if (map_deny_write_exec(vm_flags, vm_flags))
+       if (map_deny_write_exec(&vma_flags, &vma_flags))
                return -EACCES;
 
        /* Allow architectures to sanity-check the vm_flags. */
@@ -2855,7 +2856,7 @@ unsigned long mmap_region(struct file *file, unsigned 
long addr,
                return -EINVAL;
 
        /* Map writable and ensure this isn't a sealed memfd. */
-       if (file && is_shared_maywrite_vm_flags(vm_flags)) {
+       if (file && is_shared_maywrite(&vma_flags)) {
                int error = mapping_map_writable(file->f_mapping);
 
                if (error)
@@ -2863,7 +2864,7 @@ unsigned long mmap_region(struct file *file, unsigned 
long addr,
                writable_file_mapping = true;
        }
 
-       ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf);
+       ret = __mmap_region(file, addr, len, vma_flags, pgoff, uf);
 
        /* Clear our write mapping regardless of error. */
        if (writable_file_mapping)
diff --git a/mm/vma.h b/mm/vma.h
index 270008e5babc..adc18f7dd9f1 100644
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -704,4 +704,55 @@ int create_init_stack_vma(struct mm_struct *mm, struct 
vm_area_struct **vmap,
 int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift);
 #endif
 
+#ifdef CONFIG_MMU
+/*
+ * Denies creating a writable executable mapping or gaining executable 
permissions.
+ *
+ * This denies the following:
+ *
+ *     a)      mmap(PROT_WRITE | PROT_EXEC)
+ *
+ *     b)      mmap(PROT_WRITE)
+ *             mprotect(PROT_EXEC)
+ *
+ *     c)      mmap(PROT_WRITE)
+ *             mprotect(PROT_READ)
+ *             mprotect(PROT_EXEC)
+ *
+ * But allows the following:
+ *
+ *     d)      mmap(PROT_READ | PROT_EXEC)
+ *             mmap(PROT_READ | PROT_EXEC | PROT_BTI)
+ *
+ * This is only applicable if the user has set the Memory-Deny-Write-Execute
+ * (MDWE) protection mask for the current process.
+ *
+ * @old specifies the VMA flags the VMA originally possessed, and @new the ones
+ * we propose to set.
+ *
+ * Return: false if proposed change is OK, true if not ok and should be denied.
+ */
+static inline bool map_deny_write_exec(const vma_flags_t *old,
+                                      const vma_flags_t *new)
+{
+       /* If MDWE is disabled, we have nothing to deny. */
+       if (!mm_flags_test(MMF_HAS_MDWE, current->mm))
+               return false;
+
+       /* If the new VMA is not executable, we have nothing to deny. */
+       if (!vma_flags_test(new, VMA_EXEC_BIT))
+               return false;
+
+       /* Under MDWE we do not accept newly writably executable VMAs... */
+       if (vma_flags_test(new, VMA_WRITE_BIT))
+               return true;
+
+       /* ...nor previously non-executable VMAs becoming executable. */
+       if (!vma_flags_test(old, VMA_EXEC_BIT))
+               return true;
+
+       return false;
+}
+#endif
+
 #endif /* __MM_VMA_H */
diff --git a/tools/testing/vma/include/dup.h b/tools/testing/vma/include/dup.h
index 71bb3559682d..f35c9d31aad3 100644
--- a/tools/testing/vma/include/dup.h
+++ b/tools/testing/vma/include/dup.h
@@ -1094,12 +1094,6 @@ static __always_inline void 
vma_desc_clear_flags_mask(struct vm_area_desc *desc,
 #define vma_desc_clear_flags(desc, ...) \
        vma_desc_clear_flags_mask(desc, mk_vma_flags(__VA_ARGS__))
 
-static inline bool is_shared_maywrite_vm_flags(vm_flags_t vm_flags)
-{
-       return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
-               (VM_SHARED | VM_MAYWRITE);
-}
-
 static inline bool is_shared_maywrite(const vma_flags_t *flags)
 {
        return vma_flags_test_all(flags, VMA_SHARED_BIT, VMA_MAYWRITE_BIT);
@@ -1416,27 +1410,6 @@ static inline bool mlock_future_ok(const struct 
mm_struct *mm,
        return locked_pages <= limit_pages;
 }
 
-static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
-{
-       /* If MDWE is disabled, we have nothing to deny. */
-       if (mm_flags_test(MMF_HAS_MDWE, current->mm))
-               return false;
-
-       /* If the new VMA is not executable, we have nothing to deny. */
-       if (!(new & VM_EXEC))
-               return false;
-
-       /* Under MDWE we do not accept newly writably executable VMAs... */
-       if (new & VM_WRITE)
-               return true;
-
-       /* ...nor previously non-executable VMAs becoming executable. */
-       if (!(old & VM_EXEC))
-               return true;
-
-       return false;
-}
-
 static inline int mapping_map_writable(struct address_space *mapping)
 {
        return atomic_inc_unless_negative(&mapping->i_mmap_writable) ?
@@ -1482,3 +1455,10 @@ static inline void vma_set_file(struct vm_area_struct 
*vma, struct file *file)
 #ifndef pgtable_supports_soft_dirty
 #define pgtable_supports_soft_dirty()  IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)
 #endif
+
+static inline pgprot_t vma_get_page_prot(vma_flags_t vma_flags)
+{
+       const vm_flags_t vm_flags = vma_flags_to_legacy(vma_flags);
+
+       return vm_get_page_prot(vm_flags);
+}
diff --git a/tools/testing/vma/tests/mmap.c b/tools/testing/vma/tests/mmap.c
index bded4ecbe5db..c85bc000d1cb 100644
--- a/tools/testing/vma/tests/mmap.c
+++ b/tools/testing/vma/tests/mmap.c
@@ -2,6 +2,8 @@
 
 static bool test_mmap_region_basic(void)
 {
+       const vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
+                       VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
        struct mm_struct mm = {};
        unsigned long addr;
        struct vm_area_struct *vma;
@@ -10,27 +12,19 @@ static bool test_mmap_region_basic(void)
        current->mm = &mm;
 
        /* Map at 0x300000, length 0x3000. */
-       addr = __mmap_region(NULL, 0x300000, 0x3000,
-                            VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
-                            0x300, NULL);
+       addr = __mmap_region(NULL, 0x300000, 0x3000, vma_flags, 0x300, NULL);
        ASSERT_EQ(addr, 0x300000);
 
        /* Map at 0x250000, length 0x3000. */
-       addr = __mmap_region(NULL, 0x250000, 0x3000,
-                            VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
-                            0x250, NULL);
+       addr = __mmap_region(NULL, 0x250000, 0x3000, vma_flags, 0x250, NULL);
        ASSERT_EQ(addr, 0x250000);
 
        /* Map at 0x303000, merging to 0x300000 of length 0x6000. */
-       addr = __mmap_region(NULL, 0x303000, 0x3000,
-                            VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
-                            0x303, NULL);
+       addr = __mmap_region(NULL, 0x303000, 0x3000, vma_flags, 0x303, NULL);
        ASSERT_EQ(addr, 0x303000);
 
        /* Map at 0x24d000, merging to 0x250000 of length 0x6000. */
-       addr = __mmap_region(NULL, 0x24d000, 0x3000,
-                            VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
-                            0x24d, NULL);
+       addr = __mmap_region(NULL, 0x24d000, 0x3000, vma_flags, 0x24d, NULL);
        ASSERT_EQ(addr, 0x24d000);
 
        ASSERT_EQ(mm.map_count, 2);
-- 
2.53.0


Reply via email to