On 01/03/16 16:33, Dave Gordon wrote:
From: Alex Dai <yu....@intel.com>

There are several places inside driver where a GEM object is mapped
to kernel virtual space. The mapping may be done either for the whole
object or only a subset of it.

This patch introduces a function i915_gem_object_vmap_range() to
implement the common functionality. The code itself is extracted and
adapted from that in vmap_batch(), but also replaces vmap_obj() and
the open-coded version in i915_gem_dmabuf_vmap().

v2: use obj->pages->nents for iteration within i915_gem_object_vmap;
     break when it finishes all desired pages. The caller must pass
     the actual page count required. [Tvrtko Ursulin]

v4: renamed to i915_gem_object_vmap_range() to make its function
     clearer. [Dave Gordon]

v5: use Chris Wilson's new drm_malloc_gfp() rather than kmalloc() or
     drm_malloc_ab(). [Dave Gordon]

v6: changed range checking to not use pages->nents. [Tvrtko Ursulin]
     Use sg_nents_for_len() for range check instead. [Dave Gordon]
     Pass range parameters in bytes rather than pages (both callers
     were converting from bytes to pages anyway, so this reduces the
     number of places where the conversion is done).

v7: changed range parameters back to pages, and simplified parameter
     validation. [Tvrtko Ursulin] As a convenience for callers, allow
     npages==0 as a shorthand for "up to the end of the object".

Looks OK to me,

Reviewed-by: Tvrtko Ursulin <tvrtko.ursu...@intel.com>

Regards,

Tvrtko


With this change, we have only one vmap() in the whole driver :)

Signed-off-by: Alex Dai <yu....@intel.com>
Signed-off-by: Dave Gordon <david.s.gor...@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursu...@intel.com>
Cc: Chris Wilson <ch...@chris-wilson.co.uk>
---
  drivers/gpu/drm/i915/i915_cmd_parser.c  | 34 +++----------------
  drivers/gpu/drm/i915/i915_drv.h         |  4 +++
  drivers/gpu/drm/i915/i915_gem.c         | 59 +++++++++++++++++++++++++++++++++
  drivers/gpu/drm/i915/i915_gem_dmabuf.c  | 15 ++-------
  drivers/gpu/drm/i915/intel_ringbuffer.c | 23 +------------
  5 files changed, 71 insertions(+), 64 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c 
b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 814d894..4d48617 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -863,37 +863,13 @@ void i915_cmd_parser_fini_ring(struct intel_engine_cs 
*ring)
  static u32 *vmap_batch(struct drm_i915_gem_object *obj,
                       unsigned start, unsigned len)
  {
-       int i;
-       void *addr = NULL;
-       struct sg_page_iter sg_iter;
-       int first_page = start >> PAGE_SHIFT;
-       int last_page = (len + start + 4095) >> PAGE_SHIFT;
-       int npages = last_page - first_page;
-       struct page **pages;
-
-       pages = drm_malloc_ab(npages, sizeof(*pages));
-       if (pages == NULL) {
-               DRM_DEBUG_DRIVER("Failed to get space for pages\n");
-               goto finish;
-       }
-
-       i = 0;
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 
first_page) {
-               pages[i++] = sg_page_iter_page(&sg_iter);
-               if (i == npages)
-                       break;
-       }
+       unsigned long first, npages;

-       addr = vmap(pages, i, 0, PAGE_KERNEL);
-       if (addr == NULL) {
-               DRM_DEBUG_DRIVER("Failed to vmap pages\n");
-               goto finish;
-       }
+       /* Convert [start, len) to pages */
+       first = start >> PAGE_SHIFT;
+       npages = DIV_ROUND_UP(start + len, PAGE_SIZE) - first;

-finish:
-       if (pages)
-               drm_free_large(pages);
-       return (u32*)addr;
+       return i915_gem_object_vmap_range(obj, first, npages);
  }

  /* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a4dcb74..b3ae191 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2983,6 +2983,10 @@ static inline void i915_gem_object_unpin_pages(struct 
drm_i915_gem_object *obj)
        obj->pages_pin_count--;
  }

+void *__must_check i915_gem_object_vmap_range(struct drm_i915_gem_object *obj,
+                                             unsigned long first,
+                                             unsigned long npages);
+
  int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
  int i915_gem_object_sync(struct drm_i915_gem_object *obj,
                         struct intel_engine_cs *to,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3d31d3a..d7c9ccd 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2400,6 +2400,65 @@ static void i915_gem_object_free_mmap_offset(struct 
drm_i915_gem_object *obj)
        return 0;
  }

+/**
+ * i915_gem_object_vmap_range - map some or all of a GEM object into kernel 
space
+ * @obj: the GEM object to be mapped
+ * @first: offset in pages of the start of the range to be mapped
+ * @npages: length in pages of the range to be mapped. For convenience, a
+ *          length of zero is taken to mean "the remainder of the object"
+ *
+ * Map a given range of a GEM object into kernel virtual space.  The caller 
must
+ * make sure the associated pages are gathered and pinned before calling this
+ * function, and is responsible for unmapping the returned address when it is 
no
+ * longer required.
+ *
+ * Returns the address at which the object has been mapped, or NULL on failure.
+ */
+void *i915_gem_object_vmap_range(struct drm_i915_gem_object *obj,
+                                unsigned long first,
+                                unsigned long npages)
+{
+       unsigned long max_pages = obj->base.size >> PAGE_SHIFT;
+       struct scatterlist *sg = obj->pages->sgl;
+       struct sg_page_iter sg_iter;
+       struct page **pages;
+       unsigned long i = 0;
+       void *addr = NULL;
+
+       /* Minimal range check */
+       if (first + npages > max_pages) {
+               DRM_DEBUG_DRIVER("Invalid page range\n");
+               return NULL;
+       }
+
+       /* npages==0 is shorthand for "the rest of the object" */
+       if (npages == 0)
+               npages = max_pages - first;
+
+       pages = drm_malloc_gfp(npages, sizeof(*pages), GFP_TEMPORARY);
+       if (pages == NULL) {
+               DRM_DEBUG_DRIVER("Failed to get space for pages\n");
+               return NULL;
+       }
+
+       for_each_sg_page(sg, &sg_iter, max_pages, first) {
+               pages[i] = sg_page_iter_page(&sg_iter);
+               if (++i == npages) {
+                       addr = vmap(pages, npages, 0, PAGE_KERNEL);
+                       break;
+               }
+       }
+
+       /* We should have got here via the 'break' above */
+       WARN_ON(i != npages);
+       if (addr == NULL)
+               DRM_DEBUG_DRIVER("Failed to vmap pages\n");
+
+       drm_free_large(pages);
+
+       return addr;
+}
+
  void i915_vma_move_to_active(struct i915_vma *vma,
                             struct drm_i915_gem_request *req)
  {
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 616f078..3a5d01a 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -108,9 +108,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
  {
        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
        struct drm_device *dev = obj->base.dev;
-       struct sg_page_iter sg_iter;
-       struct page **pages;
-       int ret, i;
+       int ret;

        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
@@ -129,16 +127,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)

        ret = -ENOMEM;

-       pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
-       if (pages == NULL)
-               goto err_unpin;
-
-       i = 0;
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
-               pages[i++] = sg_page_iter_page(&sg_iter);
-
-       obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
-       drm_free_large(pages);
+       obj->dma_buf_vmapping = i915_gem_object_vmap_range(obj, 0, 0);

        if (!obj->dma_buf_vmapping)
                goto err_unpin;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 8f52556..58a18e1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2064,27 +2064,6 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer 
*ringbuf)
        i915_gem_object_ggtt_unpin(ringbuf->obj);
  }

-static u32 *vmap_obj(struct drm_i915_gem_object *obj)
-{
-       struct sg_page_iter sg_iter;
-       struct page **pages;
-       void *addr;
-       int i;
-
-       pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
-       if (pages == NULL)
-               return NULL;
-
-       i = 0;
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
-               pages[i++] = sg_page_iter_page(&sg_iter);
-
-       addr = vmap(pages, i, 0, PAGE_KERNEL);
-       drm_free_large(pages);
-
-       return addr;
-}
-
  int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
                                     struct intel_ringbuffer *ringbuf)
  {
@@ -2101,7 +2080,7 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device 
*dev,
                if (ret)
                        goto unpin;

-               ringbuf->virtual_start = vmap_obj(obj);
+               ringbuf->virtual_start = i915_gem_object_vmap_range(obj, 0, 0);
                if (ringbuf->virtual_start == NULL) {
                        ret = -ENOMEM;
                        goto unpin;

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to