On Sat, Jul 14, 2018 at 03:18:55PM +0300, Haneen Mohammed wrote:
> This patch add the necessary functions to map/unmap GEM
> backing memory to the kernel's virtual address space.
> 
> Signed-off-by: Haneen Mohammed <hamohammed...@gmail.com>
> ---
> Changes in v2:
> - add vkms_gem_vunmap
> - use vmap_count to guard against multiple prepare_fb calls on the same
>   fb
> 
>  drivers/gpu/drm/vkms/vkms_drv.h |  9 ++++
>  drivers/gpu/drm/vkms/vkms_gem.c | 73 ++++++++++++++++++++++++++++++++-
>  2 files changed, 81 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
> index 07be29f2dc44..855e1ea8bc35 100644
> --- a/drivers/gpu/drm/vkms/vkms_drv.h
> +++ b/drivers/gpu/drm/vkms/vkms_drv.h
> @@ -39,6 +39,8 @@ struct vkms_gem_object {
>       struct drm_gem_object gem;
>       struct mutex pages_lock; /* Page lock used in page fault handler */
>       struct page **pages;
> +     unsigned int vmap_count;
> +     void *vaddr;
>  };
>  
>  #define drm_crtc_to_vkms_output(target) \
> @@ -47,6 +49,9 @@ struct vkms_gem_object {
>  #define drm_device_to_vkms_device(target) \
>       container_of(target, struct vkms_device, drm)
>  
> +#define drm_gem_to_vkms_gem(target)\
> +     container_of(target, struct vkms_gem_object, gem)
> +
>  /* CRTC */
>  int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
>                  struct drm_plane *primary, struct drm_plane *cursor);
> @@ -75,4 +80,8 @@ int vkms_dumb_map(struct drm_file *file, struct drm_device 
> *dev,
>  
>  void vkms_gem_free_object(struct drm_gem_object *obj);
>  
> +void *vkms_gem_vmap(struct drm_gem_object *obj);
> +
> +void vkms_gem_vunmap(struct drm_gem_object *obj);
> +
>  #endif /* _VKMS_DRV_H_ */
> diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
> index c7e38368602b..3f574e761b0f 100644
> --- a/drivers/gpu/drm/vkms/vkms_gem.c
> +++ b/drivers/gpu/drm/vkms/vkms_gem.c
> @@ -37,7 +37,9 @@ void vkms_gem_free_object(struct drm_gem_object *obj)
>       struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
>                                                  gem);
>  
> -     kvfree(gem->pages);
> +     WARN_ON(gem->pages);
> +     WARN_ON(gem->vaddr);
> +
>       mutex_destroy(&gem->pages_lock);
>       drm_gem_object_release(obj);
>       kfree(gem);
> @@ -177,3 +179,72 @@ int vkms_dumb_map(struct drm_file *file, struct 
> drm_device *dev,
>  
>       return ret;
>  }
> +
> +static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
> +{
> +     struct drm_gem_object *gem_obj = &vkms_obj->gem;
> +
> +     if (!vkms_obj->pages) {
> +             struct page **pages = drm_gem_get_pages(gem_obj);
> +
> +             if (IS_ERR(pages))
> +                     return pages;
> +
> +             if (cmpxchg(&vkms_obj->pages, NULL, pages))
> +                     drm_gem_put_pages(gem_obj, pages, false, true);
> +     }
> +
> +     return vkms_obj->pages;
> +}
> +
> +void vkms_gem_vunmap(struct drm_gem_object *obj)
> +{
> +     struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
> +
> +     mutex_lock(&vkms_obj->pages_lock);
> +     WARN_ON(vkms_obj->vmap_count < 1);
> +     vkms_obj->vmap_count--;
> +
> +     if (vkms_obj->vmap_count == 0) {
> +             vunmap(vkms_obj->vaddr);
> +             vkms_obj->vaddr = NULL;
> +             drm_gem_put_pages(obj, vkms_obj->pages, false, true);
> +             vkms_obj->pages = NULL;
> +     }
> +
> +     mutex_unlock(&vkms_obj->pages_lock);
> +}
> +
> +void *vkms_gem_vmap(struct drm_gem_object *obj)
> +{
> +     struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
> +

nit: extra line

> +     int ret = 0;
> +
> +     mutex_lock(&vkms_obj->pages_lock);
> +     vkms_obj->vmap_count++;

If you increment this below the conditional block, you don't need to decrement
it in fail.

> +
> +     if (!vkms_obj->vaddr) {
> +             unsigned int n_pages = obj->size >> PAGE_SHIFT;
> +             struct page **pages = _get_pages(vkms_obj);
> +
> +             if (IS_ERR(pages)) {
> +                     ret = PTR_ERR(pages);
> +                     goto fail;
> +             }
> +
> +             vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
> +             if (!vkms_obj->vaddr) {

Do you need to cleanup from _get_pages() here?

> +                     ret = -ENOMEM;
> +                     goto fail;
> +             }
> +     }
> +
> +     mutex_unlock(&vkms_obj->pages_lock);
> +     return vkms_obj->vaddr;
> +
> +fail:
> +     vkms_obj->vmap_count--;
> +     mutex_unlock(&vkms_obj->pages_lock);
> +     return ERR_PTR(ret);
> +}
> -- 
> 2.17.1
> 

-- 
Sean Paul, Software Engineer, Google / Chromium OS
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to