In walk_pte_range(), we may modify the pte entry after holding the ptl, so
convert it to using pte_offset_map_rw_nolock(). At this time, the
pte_same() check is not performed after the ptl held, so we should get
pmdval and do pmd_same() check to ensure the stability of pmd entry.

Signed-off-by: Qi Zheng <zhengqi.a...@bytedance.com>
---
 mm/vmscan.c | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index a9b6a8196f958..36b84e46cd7b5 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3375,8 +3375,10 @@ static bool walk_pte_range(pmd_t *pmd, unsigned long 
start, unsigned long end,
        struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
        DEFINE_MAX_SEQ(walk->lruvec);
        int old_gen, new_gen = lru_gen_from_seq(max_seq);
+       pmd_t pmdval;
 
-       pte = pte_offset_map_nolock(args->mm, pmd, start & PMD_MASK, &ptl);
+       pte = pte_offset_map_rw_nolock(args->mm, pmd, start & PMD_MASK, &pmdval,
+                                      &ptl);
        if (!pte)
                return false;
        if (!spin_trylock(ptl)) {
@@ -3384,6 +3386,11 @@ static bool walk_pte_range(pmd_t *pmd, unsigned long 
start, unsigned long end,
                return false;
        }
 
+       if (unlikely(!pmd_same(pmdval, pmdp_get_lockless(pmd)))) {
+               pte_unmap_unlock(pte, ptl);
+               return false;
+       }
+
        arch_enter_lazy_mmu_mode();
 restart:
        for (i = pte_index(start), addr = start; addr != end; i++, addr += 
PAGE_SIZE) {
-- 
2.20.1


Reply via email to