Let's ignore these bits: they are irrelevant for fork, and will likely
be irrelevant for upcoming users such as page unmapping.

Signed-off-by: David Hildenbrand <da...@redhat.com>
---
 mm/memory.c | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index f563aec85b2a8..341b2be845b6e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -953,24 +953,30 @@ static __always_inline void __copy_present_ptes(struct 
vm_area_struct *dst_vma,
        set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr);
 }
 
+static inline pte_t __pte_batch_clear_ignored(pte_t pte)
+{
+       return pte_clear_soft_dirty(pte_mkclean(pte_mkold(pte)));
+}
+
 /*
  * Detect a PTE batch: consecutive (present) PTEs that map consecutive
  * pages of the same folio.
  *
  * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN.
+ * the accessed bit, dirty bit and soft-dirty bit.
  */
 static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
                pte_t *start_ptep, pte_t pte, int max_nr)
 {
        unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
        const pte_t *end_ptep = start_ptep + max_nr;
-       pte_t expected_pte = pte_next_pfn(pte);
+       pte_t expected_pte = __pte_batch_clear_ignored(pte_next_pfn(pte));
        pte_t *ptep = start_ptep + 1;
 
        VM_WARN_ON_FOLIO(!pte_present(pte), folio);
 
        while (ptep != end_ptep) {
-               pte = ptep_get(ptep);
+               pte = __pte_batch_clear_ignored(ptep_get(ptep));
 
                if (!pte_same(pte, expected_pte))
                        break;
-- 
2.43.0

Reply via email to