In move_ptes(), we may modify the new_pte after acquiring the new_ptl, so
convert it to using pte_offset_map_rw_nolock(). Now new_pte is none, so
hpage_collapse_scan_file() path can not find this by traversing
file->f_mapping, so there is no concurrency with retract_page_tables(). In
addition, we already hold the exclusive mmap_lock, so this new_pte page is
stable, so there is no need to get pmdval and do pmd_same() check.

Signed-off-by: Qi Zheng <zhengqi.a...@bytedance.com>
---
 mm/mremap.c | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/mm/mremap.c b/mm/mremap.c
index 24712f8dbb6b5..9dffd4a5b4d18 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -143,6 +143,7 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t 
*old_pmd,
        spinlock_t *old_ptl, *new_ptl;
        bool force_flush = false;
        unsigned long len = old_end - old_addr;
+       pmd_t dummy_pmdval;
        int err = 0;
 
        /*
@@ -175,7 +176,15 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t 
*old_pmd,
                err = -EAGAIN;
                goto out;
        }
-       new_pte = pte_offset_map_nolock(mm, new_pmd, new_addr, &new_ptl);
+       /*
+        * Now new_pte is none, so hpage_collapse_scan_file() path can not find
+        * this by traversing file->f_mapping, so there is no concurrency with
+        * retract_page_tables(). In addition, we already hold the exclusive
+        * mmap_lock, so this new_pte page is stable, so there is no need to get
+        * pmdval and do pmd_same() check.
+        */
+       new_pte = pte_offset_map_rw_nolock(mm, new_pmd, new_addr, &dummy_pmdval,
+                                          &new_ptl);
        if (!new_pte) {
                pte_unmap_unlock(old_pte, old_ptl);
                err = -EAGAIN;
-- 
2.20.1


Reply via email to