On Thu, Oct 08, 2015 at 01:39:55PM +0100, Chris Wilson wrote:
> We now have two implementations for vmapping a whole object, one for
> dma-buf and one for the ringbuffer. If we couple the vmapping into the
> obj->pages lifetime, then we can reuse an obj->vmapping for both and at
> the same time couple it into the shrinker.
> 
> Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/i915_drv.h         | 12 ++++---
>  drivers/gpu/drm/i915/i915_gem.c         | 41 ++++++++++++++++++++++++
>  drivers/gpu/drm/i915/i915_gem_dmabuf.c  | 55 
> +++++----------------------------
>  drivers/gpu/drm/i915/intel_ringbuffer.c | 53 ++++++++++---------------------
>  4 files changed, 72 insertions(+), 89 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 4093eedfd664..343a0a723d2c 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2089,10 +2089,7 @@ struct drm_i915_gem_object {
>               struct scatterlist *sg;
>               int last;
>       } get_page;
> -
> -     /* prime dma-buf support */
> -     void *dma_buf_vmapping;
> -     int vmapping_count;
> +     void *vmapping;
>  
>       /** Breadcrumb of last rendering to the buffer.
>        * There can only be one writer, but we allow for multiple readers.
> @@ -2840,12 +2837,19 @@ static inline void i915_gem_object_pin_pages(struct 
> drm_i915_gem_object *obj)
>       BUG_ON(obj->pages == NULL);
>       obj->pages_pin_count++;
>  }
> +
>  static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object 
> *obj)
>  {
>       BUG_ON(obj->pages_pin_count == 0);
>       obj->pages_pin_count--;
>  }
>  
> +void *__must_check i915_gem_object_pin_vmap(struct drm_i915_gem_object *obj);
> +static inline void i915_gem_object_unpin_vmap(struct drm_i915_gem_object 
> *obj)
> +{
> +     i915_gem_object_unpin_pages(obj);
> +}
> +
>  int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
>  int i915_gem_object_sync(struct drm_i915_gem_object *obj,
>                        struct intel_engine_cs *to,
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 78640aecf86d..446cf0662a38 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2159,6 +2159,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object 
> *obj)
>       ops->put_pages(obj);
>       obj->pages = NULL;
>  
> +     if (obj->vmapping) {
> +             vunmap(obj->vmapping);
> +             obj->vmapping = NULL;
> +     }
> +
>       i915_gem_object_invalidate(obj);
>  
>       return 0;
> @@ -2325,6 +2330,42 @@ i915_gem_object_get_pages(struct drm_i915_gem_object 
> *obj)
>       return 0;
>  }
>  
> +void *i915_gem_object_pin_vmap(struct drm_i915_gem_object *obj)
> +{
> +     int ret;
> +
> +     ret = i915_gem_object_get_pages(obj);
> +     if (ret)
> +             return ERR_PTR(ret);
> +
> +     i915_gem_object_pin_pages(obj);
> +
> +     if (obj->vmapping == NULL) {
> +             struct sg_page_iter sg_iter;
> +             struct page **pages;
> +             int n;
> +
> +             n = obj->base.size >> PAGE_SHIFT;
> +             pages = kmalloc(n*sizeof(*pages), GFP_TEMPORARY);

Random driveby: kmalloc_array()

Also __GFP_NOWARN?

I wonder if the drm_malloc stuff should do the kmalloc attempt
regardless fo the size, so we wouldn't have to do it here?

> +             if (pages == NULL)
> +                     pages = drm_malloc_ab(n, sizeof(*pages));
> +             if (pages != NULL) {
> +                     n = 0;
> +                     for_each_sg_page(obj->pages->sgl, &sg_iter, 
> obj->pages->nents, 0)
> +                             pages[n++] = sg_page_iter_page(&sg_iter);
> +
> +                     obj->vmapping = vmap(pages, n, 0, PAGE_KERNEL);
> +                     drm_free_large(pages);
> +             }
> +             if (obj->vmapping == NULL) {
> +                     i915_gem_object_unpin_pages(obj);
> +                     return ERR_PTR(-ENOMEM);
> +             }
> +     }
> +
> +     return obj->vmapping;
> +}
> +
>  void i915_vma_move_to_active(struct i915_vma *vma,
>                            struct drm_i915_gem_request *req)
>  {
<snip>

-- 
Ville Syrjälä
Intel OTC
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to