Can we change this to active/inactive?  I think there is potential for
reusing this functionality to even larger degrees and that name would
fit better and would still make sense in this context.

ie: vma_mark_active() and vma_mark_inactive() ?

* Suren Baghdasaryan <sur...@google.com> [230216 00:18]:
> Per-vma locking mechanism will search for VMA under RCU protection and
> then after locking it, has to ensure it was not removed from the VMA
> tree after we found it. To make this check efficient, introduce a
> vma->detached flag to mark VMAs which were removed from the VMA tree.
> 
> Signed-off-by: Suren Baghdasaryan <sur...@google.com>
> ---
>  include/linux/mm.h       | 11 +++++++++++
>  include/linux/mm_types.h |  3 +++
>  mm/mmap.c                |  2 ++
>  3 files changed, 16 insertions(+)
> 
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index f4f702224ec5..3f98344f829c 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -693,6 +693,14 @@ static inline void vma_assert_write_locked(struct 
> vm_area_struct *vma)
>       VM_BUG_ON_VMA(vma->vm_lock_seq != READ_ONCE(vma->vm_mm->mm_lock_seq), 
> vma);
>  }
>  
> +static inline void vma_mark_detached(struct vm_area_struct *vma, bool 
> detached)
> +{
> +     /* When detaching vma should be write-locked */
> +     if (detached)
> +             vma_assert_write_locked(vma);
> +     vma->detached = detached;
> +}
> +
>  #else /* CONFIG_PER_VMA_LOCK */
>  
>  static inline void vma_init_lock(struct vm_area_struct *vma) {}
> @@ -701,6 +709,8 @@ static inline bool vma_start_read(struct vm_area_struct 
> *vma)
>  static inline void vma_end_read(struct vm_area_struct *vma) {}
>  static inline void vma_start_write(struct vm_area_struct *vma) {}
>  static inline void vma_assert_write_locked(struct vm_area_struct *vma) {}
> +static inline void vma_mark_detached(struct vm_area_struct *vma,
> +                                  bool detached) {}
>  
>  #endif /* CONFIG_PER_VMA_LOCK */
>  
> @@ -712,6 +722,7 @@ static inline void vma_init(struct vm_area_struct *vma, 
> struct mm_struct *mm)
>       vma->vm_mm = mm;
>       vma->vm_ops = &dummy_vm_ops;
>       INIT_LIST_HEAD(&vma->anon_vma_chain);
> +     vma_mark_detached(vma, false);
>       vma_init_lock(vma);
>  }
>  
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index e268723eaf44..939f4f5a1115 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -511,6 +511,9 @@ struct vm_area_struct {
>  #ifdef CONFIG_PER_VMA_LOCK
>       int vm_lock_seq;
>       struct rw_semaphore lock;
> +
> +     /* Flag to indicate areas detached from the mm->mm_mt tree */
> +     bool detached;
>  #endif
>  
>       /*
> diff --git a/mm/mmap.c b/mm/mmap.c
> index 801608726be8..adf40177e68f 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -593,6 +593,7 @@ static inline void vma_complete(struct vma_prepare *vp,
>  
>       if (vp->remove) {
>  again:
> +             vma_mark_detached(vp->remove, true);
>               if (vp->file) {
>                       uprobe_munmap(vp->remove, vp->remove->vm_start,
>                                     vp->remove->vm_end);
> @@ -2267,6 +2268,7 @@ static inline int munmap_sidetree(struct vm_area_struct 
> *vma,
>       if (mas_store_gfp(mas_detach, vma, GFP_KERNEL))
>               return -ENOMEM;
>  
> +     vma_mark_detached(vma, true);
>       if (vma->vm_flags & VM_LOCKED)
>               vma->vm_mm->locked_vm -= vma_pages(vma);
>  
> -- 
> 2.39.1
> 

Reply via email to