On Mon, Aug 05, 2019 at 08:33:57AM -0600, Rob Herring wrote:
> Add support to the shmem GEM helpers for tracking madvise state and
> purging pages. This is based on the msm implementation.
> 
> The BO provides a list_head, but the list management is handled outside
> of the shmem helpers as there are different locking requirements.
> 
> Cc: Tomeu Vizoso <tomeu.viz...@collabora.com>
> Cc: Maarten Lankhorst <maarten.lankho...@linux.intel.com>
> Cc: Maxime Ripard <maxime.rip...@bootlin.com>
> Cc: Sean Paul <s...@poorly.run>
> Cc: David Airlie <airl...@linux.ie>
> Cc: Daniel Vetter <dan...@ffwll.ch>
> Cc: Eric Anholt <e...@anholt.net>
> Signed-off-by: Rob Herring <r...@kernel.org>
> ---
>  drivers/gpu/drm/drm_gem_shmem_helper.c | 57 ++++++++++++++++++++++++++
>  include/drm/drm_gem_shmem_helper.h     | 15 +++++++
>  2 files changed, 72 insertions(+)
> 
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c 
> b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index 2f64667ac805..4b442576de1c 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -75,6 +75,7 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct 
> drm_device *dev, size_t
>       shmem = to_drm_gem_shmem_obj(obj);
>       mutex_init(&shmem->pages_lock);
>       mutex_init(&shmem->vmap_lock);

Maybe a bit late, but for reasons (interop with ttm, which will be more
important once we have dynamic dma-buf) it would be real nice to use the
reservation_obj lock for all this stuff. msm, being struct_mutex based,
isn't a great example here. The downside is that it will be a lot harder
to get  msm to use these then, but much better to not spread struct_mutex
inspired locking too far.

Other bit: Wire this all up in a shrinker while at it?
-Daniel

> +     INIT_LIST_HEAD(&shmem->madv_list);
>  
>       /*
>        * Our buffers are kept pinned, so allocating them
> @@ -362,6 +363,62 @@ drm_gem_shmem_create_with_handle(struct drm_file 
> *file_priv,
>  }
>  EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
>  
> +/* Update madvise status, returns true if not purged, else
> + * false or -errno.
> + */
> +int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv)
> +{
> +     struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +
> +     mutex_lock(&shmem->pages_lock);
> +
> +     if (shmem->madv >= 0)
> +             shmem->madv = madv;
> +
> +     madv = shmem->madv;
> +
> +     mutex_unlock(&shmem->pages_lock);
> +
> +     return (madv >= 0);
> +}
> +EXPORT_SYMBOL(drm_gem_shmem_madvise);
> +
> +void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
> +{
> +     struct drm_device *dev = obj->dev;
> +     struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +
> +     WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
> +
> +     drm_gem_shmem_put_pages_locked(shmem);
> +
> +     shmem->madv = -1;
> +
> +     drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
> +     drm_gem_free_mmap_offset(obj);
> +
> +     /* Our goal here is to return as much of the memory as
> +      * is possible back to the system as we are called from OOM.
> +      * To do this we must instruct the shmfs to drop all of its
> +      * backing pages, *now*.
> +      */
> +     shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
> +
> +     invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
> +                     0, (loff_t)-1);
> +}
> +EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
> +
> +void drm_gem_shmem_purge(struct drm_gem_object *obj)
> +{
> +     struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +
> +     mutex_lock(&shmem->pages_lock);
> +     drm_gem_shmem_purge_locked(obj);
> +     mutex_unlock(&shmem->pages_lock);
> +}
> +EXPORT_SYMBOL(drm_gem_shmem_purge);
> +
>  /**
>   * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
>   * @file: DRM file structure to create the dumb buffer for
> diff --git a/include/drm/drm_gem_shmem_helper.h 
> b/include/drm/drm_gem_shmem_helper.h
> index 038b6d313447..ce1600fdfc3e 100644
> --- a/include/drm/drm_gem_shmem_helper.h
> +++ b/include/drm/drm_gem_shmem_helper.h
> @@ -44,6 +44,9 @@ struct drm_gem_shmem_object {
>        */
>       unsigned int pages_use_count;
>  
> +     int madv;
> +     struct list_head madv_list;
> +
>       /**
>        * @pages_mark_dirty_on_put:
>        *
> @@ -121,6 +124,18 @@ void drm_gem_shmem_unpin(struct drm_gem_object *obj);
>  void *drm_gem_shmem_vmap(struct drm_gem_object *obj);
>  void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr);
>  
> +int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv);
> +
> +static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object 
> *shmem)
> +{
> +     return (shmem->madv > 0) &&
> +             !shmem->vmap_use_count && shmem->sgt &&
> +             !shmem->base.dma_buf && !shmem->base.import_attach;
> +}
> +
> +void drm_gem_shmem_purge_locked(struct drm_gem_object *obj);
> +void drm_gem_shmem_purge(struct drm_gem_object *obj);
> +
>  struct drm_gem_shmem_object *
>  drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
>                                struct drm_device *dev, size_t size,
> -- 
> 2.20.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to