call_rcu() can take a long time when callback offloading is enabled.
Its use in the vm_area_free can cause regressions in the exit path when
multiple VMAs are being freed. To minimize that impact, place VMAs into
a list and free them in groups using one call_rcu() call per group.

Signed-off-by: Suren Baghdasaryan <sur...@google.com>
---
 include/linux/mm.h       |  1 +
 include/linux/mm_types.h | 19 +++++++++--
 kernel/fork.c            | 68 +++++++++++++++++++++++++++++++++++-----
 mm/init-mm.c             |  3 ++
 mm/mmap.c                |  1 +
 5 files changed, 82 insertions(+), 10 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3158f33e268c..50c7a6dd9c7a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -250,6 +250,7 @@ void setup_initial_init_mm(void *start_code, void *end_code,
 struct vm_area_struct *vm_area_alloc(struct mm_struct *);
 struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
 void vm_area_free(struct vm_area_struct *);
+void drain_free_vmas(struct mm_struct *mm);
 
 #ifndef CONFIG_MMU
 extern struct rb_root nommu_region_tree;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index fce9113d979c..c0e6c8e4700b 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -592,8 +592,18 @@ struct vm_area_struct {
        /* Information about our backing store: */
        unsigned long vm_pgoff;         /* Offset (within vm_file) in PAGE_SIZE
                                           units */
-       struct file * vm_file;          /* File we map to (can be NULL). */
-       void * vm_private_data;         /* was vm_pte (shared mem) */
+       union {
+               struct {
+                       /* File we map to (can be NULL). */
+                       struct file *vm_file;
+
+                       /* was vm_pte (shared mem) */
+                       void *vm_private_data;
+               };
+#ifdef CONFIG_PER_VMA_LOCK
+               struct list_head vm_free_list;
+#endif
+       };
 
 #ifdef CONFIG_ANON_VMA_NAME
        /*
@@ -693,6 +703,11 @@ struct mm_struct {
                                          */
 #ifdef CONFIG_PER_VMA_LOCK
                int mm_lock_seq;
+               struct {
+                       struct list_head head;
+                       spinlock_t lock;
+                       int size;
+               } vma_free_list;
 #endif
 
 
diff --git a/kernel/fork.c b/kernel/fork.c
index 6d9f14e55ecf..97f2b751f88d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -481,26 +481,75 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct 
*orig)
 }
 
 #ifdef CONFIG_PER_VMA_LOCK
-static void __vm_area_free(struct rcu_head *head)
+static inline void __vm_area_free(struct vm_area_struct *vma)
 {
-       struct vm_area_struct *vma = container_of(head, struct vm_area_struct,
-                                                 vm_rcu);
        /* The vma should either have no lock holders or be write-locked. */
        vma_assert_no_reader(vma);
        kmem_cache_free(vm_area_cachep, vma);
 }
-#endif
+
+static void vma_free_rcu_callback(struct rcu_head *head)
+{
+       struct vm_area_struct *first_vma;
+       struct vm_area_struct *vma, *vma2;
+
+       first_vma = container_of(head, struct vm_area_struct, vm_rcu);
+       list_for_each_entry_safe(vma, vma2, &first_vma->vm_free_list, 
vm_free_list)
+               __vm_area_free(vma);
+       __vm_area_free(first_vma);
+}
+
+void drain_free_vmas(struct mm_struct *mm)
+{
+       struct vm_area_struct *first_vma;
+       LIST_HEAD(to_destroy);
+
+       spin_lock(&mm->vma_free_list.lock);
+       list_splice_init(&mm->vma_free_list.head, &to_destroy);
+       mm->vma_free_list.size = 0;
+       spin_unlock(&mm->vma_free_list.lock);
+
+       if (list_empty(&to_destroy))
+               return;
+
+       first_vma = list_first_entry(&to_destroy, struct vm_area_struct, 
vm_free_list);
+       /* Remove the head which is allocated on the stack */
+       list_del(&to_destroy);
+
+       call_rcu(&first_vma->vm_rcu, vma_free_rcu_callback);
+}
+
+#define VM_AREA_FREE_LIST_MAX  32
+
+void vm_area_free(struct vm_area_struct *vma)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       bool drain;
+
+       free_anon_vma_name(vma);
+
+       spin_lock(&mm->vma_free_list.lock);
+       list_add(&vma->vm_free_list, &mm->vma_free_list.head);
+       mm->vma_free_list.size++;
+       drain = mm->vma_free_list.size > VM_AREA_FREE_LIST_MAX;
+       spin_unlock(&mm->vma_free_list.lock);
+
+       if (drain)
+               drain_free_vmas(mm);
+}
+
+#else /* CONFIG_PER_VMA_LOCK */
+
+void drain_free_vmas(struct mm_struct *mm) {}
 
 void vm_area_free(struct vm_area_struct *vma)
 {
        free_anon_vma_name(vma);
-#ifdef CONFIG_PER_VMA_LOCK
-       call_rcu(&vma->vm_rcu, __vm_area_free);
-#else
        kmem_cache_free(vm_area_cachep, vma);
-#endif
 }
 
+#endif /* CONFIG_PER_VMA_LOCK */
+
 static void account_kernel_stack(struct task_struct *tsk, int account)
 {
        if (IS_ENABLED(CONFIG_VMAP_STACK)) {
@@ -1150,6 +1199,9 @@ static struct mm_struct *mm_init(struct mm_struct *mm, 
struct task_struct *p,
        INIT_LIST_HEAD(&mm->mmlist);
 #ifdef CONFIG_PER_VMA_LOCK
        WRITE_ONCE(mm->mm_lock_seq, 0);
+       INIT_LIST_HEAD(&mm->vma_free_list.head);
+       spin_lock_init(&mm->vma_free_list.lock);
+       mm->vma_free_list.size = 0;
 #endif
        mm_pgtables_bytes_init(mm);
        mm->map_count = 0;
diff --git a/mm/init-mm.c b/mm/init-mm.c
index 33269314e060..b53d23c2d7a3 100644
--- a/mm/init-mm.c
+++ b/mm/init-mm.c
@@ -39,6 +39,9 @@ struct mm_struct init_mm = {
        .mmlist         = LIST_HEAD_INIT(init_mm.mmlist),
 #ifdef CONFIG_PER_VMA_LOCK
        .mm_lock_seq    = 0,
+       .vma_free_list.head = LIST_HEAD_INIT(init_mm.vma_free_list.head),
+       .vma_free_list.lock =  __SPIN_LOCK_UNLOCKED(init_mm.vma_free_list.lock),
+       .vma_free_list.size = 0,
 #endif
        .user_ns        = &init_user_ns,
        .cpu_bitmap     = CPU_BITS_NONE,
diff --git a/mm/mmap.c b/mm/mmap.c
index 332af383f7cd..a0d5d3af1d95 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3159,6 +3159,7 @@ void exit_mmap(struct mm_struct *mm)
        trace_exit_mmap(mm);
        __mt_destroy(&mm->mm_mt);
        mmap_write_unlock(mm);
+       drain_free_vmas(mm);
        vm_unacct_memory(nr_accounted);
 }
 
-- 
2.39.0

Reply via email to