When mremap()ing a memory region previously registered with userfaultfd
as write-protected but without UFFD_FEATURE_EVENT_REMAP, an
inconsistency in flag clearing leads to a mismatch between the vma flags
(which have uffd-wp cleared) and the pte/pmd flags (which do not have
uffd-wp cleared). This mismatch causes a subsequent mprotect(PROT_WRITE)
to trigger a warning in page_table_check_pte_flags() due to setting the
pte to writable while uffd-wp is still set.

Fix this by always explicitly clearing the uffd-wp pte/pmd flags on any
such mremap() so that the values are consistent with the existing
clearing of VM_UFFD_WP. Be careful to clear the logical flag regardless
of its physical form; a PTE bit, a swap PTE bit, or a PTE marker. Cover
PTE, huge PMD and hugetlb paths.

Co-developed-by: Mikołaj Lenczewski <miko.lenczew...@arm.com>
Signed-off-by: Mikołaj Lenczewski <miko.lenczew...@arm.com>
Signed-off-by: Ryan Roberts <ryan.robe...@arm.com>
Closes: 
https://lore.kernel.org/linux-mm/810b44a8-d2ae-4107-b665-5a42eae2d...@arm.com/
Fixes: 63b2d4174c4a ("userfaultfd: wp: add the writeprotect API to userfaultfd 
ioctl")
Cc: sta...@vger.kernel.org
---
 include/linux/userfaultfd_k.h | 12 ++++++++++++
 mm/huge_memory.c              | 12 ++++++++++++
 mm/hugetlb.c                  | 14 +++++++++++++-
 mm/mremap.c                   | 32 +++++++++++++++++++++++++++++++-
 4 files changed, 68 insertions(+), 2 deletions(-)

diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index cb40f1a1d081..75342022d144 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -247,6 +247,13 @@ static inline bool vma_can_userfault(struct vm_area_struct 
*vma,
            vma_is_shmem(vma);
 }
 
+static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma)
+{
+       struct userfaultfd_ctx *uffd_ctx = vma->vm_userfaultfd_ctx.ctx;
+
+       return uffd_ctx && (uffd_ctx->features & UFFD_FEATURE_EVENT_REMAP) == 0;
+}
+
 extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *);
 extern void dup_userfaultfd_complete(struct list_head *);
 void dup_userfaultfd_fail(struct list_head *);
@@ -402,6 +409,11 @@ static inline bool userfaultfd_wp_async(struct 
vm_area_struct *vma)
        return false;
 }
 
+static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma)
+{
+       return false;
+}
+
 #endif /* CONFIG_USERFAULTFD */
 
 static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c89aed1510f1..2654a9548749 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2212,6 +2212,16 @@ static pmd_t move_soft_dirty_pmd(pmd_t pmd)
        return pmd;
 }
 
+static pmd_t clear_uffd_wp_pmd(pmd_t pmd)
+{
+       if (pmd_present(pmd))
+               pmd = pmd_clear_uffd_wp(pmd);
+       else if (is_swap_pmd(pmd))
+               pmd = pmd_swp_clear_uffd_wp(pmd);
+
+       return pmd;
+}
+
 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
                  unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
 {
@@ -2250,6 +2260,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned 
long old_addr,
                        pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
                }
                pmd = move_soft_dirty_pmd(pmd);
+               if (vma_has_uffd_without_event_remap(vma))
+                       pmd = clear_uffd_wp_pmd(pmd);
                set_pmd_at(mm, new_addr, new_pmd, pmd);
                if (force_flush)
                        flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 354eec6f7e84..cdbc55d5384f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5454,6 +5454,7 @@ static void move_huge_pte(struct vm_area_struct *vma, 
unsigned long old_addr,
                          unsigned long new_addr, pte_t *src_pte, pte_t 
*dst_pte,
                          unsigned long sz)
 {
+       bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma);
        struct hstate *h = hstate_vma(vma);
        struct mm_struct *mm = vma->vm_mm;
        spinlock_t *src_ptl, *dst_ptl;
@@ -5470,7 +5471,18 @@ static void move_huge_pte(struct vm_area_struct *vma, 
unsigned long old_addr,
                spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
 
        pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
-       set_huge_pte_at(mm, new_addr, dst_pte, pte, sz);
+
+       if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
+               huge_pte_clear(mm, new_addr, dst_pte, sz);
+       else {
+               if (need_clear_uffd_wp) {
+                       if (pte_present(pte))
+                               pte = huge_pte_clear_uffd_wp(pte);
+                       else if (is_swap_pte(pte))
+                               pte = pte_swp_clear_uffd_wp(pte);
+               }
+               set_huge_pte_at(mm, new_addr, dst_pte, pte, sz);
+       }
 
        if (src_ptl != dst_ptl)
                spin_unlock(src_ptl);
diff --git a/mm/mremap.c b/mm/mremap.c
index 60473413836b..cff7f552f909 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -138,6 +138,7 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t 
*old_pmd,
                struct vm_area_struct *new_vma, pmd_t *new_pmd,
                unsigned long new_addr, bool need_rmap_locks)
 {
+       bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma);
        struct mm_struct *mm = vma->vm_mm;
        pte_t *old_pte, *new_pte, pte;
        pmd_t dummy_pmdval;
@@ -216,7 +217,18 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t 
*old_pmd,
                        force_flush = true;
                pte = move_pte(pte, old_addr, new_addr);
                pte = move_soft_dirty_pte(pte);
-               set_pte_at(mm, new_addr, new_pte, pte);
+
+               if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
+                       pte_clear(mm, new_addr, new_pte);
+               else {
+                       if (need_clear_uffd_wp) {
+                               if (pte_present(pte))
+                                       pte = pte_clear_uffd_wp(pte);
+                               else if (is_swap_pte(pte))
+                                       pte = pte_swp_clear_uffd_wp(pte);
+                       }
+                       set_pte_at(mm, new_addr, new_pte, pte);
+               }
        }
 
        arch_leave_lazy_mmu_mode();
@@ -278,6 +290,15 @@ static bool move_normal_pmd(struct vm_area_struct *vma, 
unsigned long old_addr,
        if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
                return false;
 
+       /* If this pmd belongs to a uffd vma with remap events disabled, we need
+        * to ensure that the uffd-wp state is cleared from all pgtables. This
+        * means recursing into lower page tables in move_page_tables(), and we
+        * can reuse the existing code if we simply treat the entry as "not
+        * moved".
+        */
+       if (vma_has_uffd_without_event_remap(vma))
+               return false;
+
        /*
         * We don't have to worry about the ordering of src and dst
         * ptlocks because exclusive mmap_lock prevents deadlock.
@@ -333,6 +354,15 @@ static bool move_normal_pud(struct vm_area_struct *vma, 
unsigned long old_addr,
        if (WARN_ON_ONCE(!pud_none(*new_pud)))
                return false;
 
+       /* If this pud belongs to a uffd vma with remap events disabled, we need
+        * to ensure that the uffd-wp state is cleared from all pgtables. This
+        * means recursing into lower page tables in move_page_tables(), and we
+        * can reuse the existing code if we simply treat the entry as "not
+        * moved".
+        */
+       if (vma_has_uffd_without_event_remap(vma))
+               return false;
+
        /*
         * We don't have to worry about the ordering of src and dst
         * ptlocks because exclusive mmap_lock prevents deadlock.
-- 
2.43.0


Reply via email to