Normally free_pgtables needs to lock affected VMAs except for the case
when VMAs were isolated under VMA write-lock. munmap() does just that,
isolating while holding appropriate locks and then downgrading mmap_lock
and dropping per-VMA locks before freeing page tables.
Add a parameter to free_pgtables for such scenario.

Signed-off-by: Suren Baghdasaryan <sur...@google.com>
---
 mm/internal.h | 2 +-
 mm/memory.c   | 6 +++++-
 mm/mmap.c     | 5 +++--
 3 files changed, 9 insertions(+), 4 deletions(-)

diff --git a/mm/internal.h b/mm/internal.h
index 08ce56dbb1d9..fce94775819c 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -105,7 +105,7 @@ void folio_activate(struct folio *folio);
 
 void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
                   struct vm_area_struct *start_vma, unsigned long floor,
-                  unsigned long ceiling);
+                  unsigned long ceiling, bool mm_wr_locked);
 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
 
 struct zap_details;
diff --git a/mm/memory.c b/mm/memory.c
index bfa3100ec5a3..f7f412833e42 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -348,7 +348,7 @@ void free_pgd_range(struct mmu_gather *tlb,
 
 void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
                   struct vm_area_struct *vma, unsigned long floor,
-                  unsigned long ceiling)
+                  unsigned long ceiling, bool mm_wr_locked)
 {
        MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
 
@@ -366,6 +366,8 @@ void free_pgtables(struct mmu_gather *tlb, struct 
maple_tree *mt,
                 * Hide vma from rmap and truncate_pagecache before freeing
                 * pgtables
                 */
+               if (mm_wr_locked)
+                       vma_start_write(vma);
                unlink_anon_vmas(vma);
                unlink_file_vma(vma);
 
@@ -380,6 +382,8 @@ void free_pgtables(struct mmu_gather *tlb, struct 
maple_tree *mt,
                               && !is_vm_hugetlb_page(next)) {
                                vma = next;
                                next = mas_find(&mas, ceiling - 1);
+                               if (mm_wr_locked)
+                                       vma_start_write(vma);
                                unlink_anon_vmas(vma);
                                unlink_file_vma(vma);
                        }
diff --git a/mm/mmap.c b/mm/mmap.c
index f7ed357056c4..ec745586785c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2152,7 +2152,8 @@ static void unmap_region(struct mm_struct *mm, struct 
maple_tree *mt,
        update_hiwater_rss(mm);
        unmap_vmas(&tlb, mt, vma, start, end, mm_wr_locked);
        free_pgtables(&tlb, mt, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
-                                next ? next->vm_start : USER_PGTABLES_CEILING);
+                                next ? next->vm_start : USER_PGTABLES_CEILING,
+                                mm_wr_locked);
        tlb_finish_mmu(&tlb);
 }
 
@@ -3056,7 +3057,7 @@ void exit_mmap(struct mm_struct *mm)
        mmap_write_lock(mm);
        mt_clear_in_rcu(&mm->mm_mt);
        free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS,
-                     USER_PGTABLES_CEILING);
+                     USER_PGTABLES_CEILING, true);
        tlb_finish_mmu(&tlb);
 
        /*
-- 
2.39.2.722.g9855ee24e9-goog

Reply via email to