From: Tvrtko Ursulin <tvrtko.ursu...@intel.com>

There are currently two places in the code which build the
sg table for the object backing store and in the future there
will be one more.

Consolidate that into a single helper which takes a caller
defined context and callbacks.

!!! Compile tested only. !!!

Signed-off-by: Tvrtko Ursulin <tvrtko.ursu...@intel.com>
Cc: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c         | 162 ++++++++++++++++----------------
 drivers/gpu/drm/i915/i915_gem_gtt.c     |  84 +++++++++++++++++
 drivers/gpu/drm/i915/i915_gem_gtt.h     |  14 +++
 drivers/gpu/drm/i915/i915_gem_userptr.c |  54 ++++-------
 4 files changed, 198 insertions(+), 116 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a89a88922448..79dcd912759f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2208,19 +2208,69 @@ i915_gem_object_put_pages(struct drm_i915_gem_object 
*obj)
        return 0;
 }
 
+struct i915_gem_get_pages_gtt_ctx {
+       struct drm_i915_private *dev_priv;
+       gfp_t gfp;
+       struct address_space *mapping;
+       unsigned int page_count;
+       bool shrunk_all;
+};
+
+static struct page *
+i915_gem_gtt_get_page(void *context, unsigned int page_num)
+{
+       struct i915_gem_get_pages_gtt_ctx *ctx = context;
+       struct page *page;
+
+       if (!ctx->shrunk_all)
+               page = shmem_read_mapping_page_gfp(ctx->mapping, page_num,
+                                                  ctx->gfp);
+       else
+               page = shmem_read_mapping_page(ctx->mapping, page_num);
+
+       /* Check that the i965g/gm workaround works. */
+       WARN_ON(page && !IS_ERR(page) && (ctx->gfp & __GFP_DMA32) &&
+               (page_to_pfn(page) >= 0x00100000UL));
+
+       return page;
+}
+
+static bool
+i915_gem_gtt_get_page_failed(void *context, unsigned int page_num,
+                            unsigned int err_cnt, int err)
+{
+       struct i915_gem_get_pages_gtt_ctx *ctx = context;
+
+       if (err_cnt > 1)
+               return true;
+
+       if (err_cnt == 0) {
+               i915_gem_shrink(ctx->dev_priv, ctx->page_count,
+                               I915_SHRINK_BOUND | I915_SHRINK_UNBOUND |
+                               I915_SHRINK_PURGEABLE);
+       } else {
+               i915_gem_shrink_all(ctx->dev_priv);
+               ctx->shrunk_all = true;
+       }
+
+       return false;
+}
+
+static void
+i915_gem_gtt_put_page(void *context, struct page *page)
+{
+       put_page(page);
+}
+
 static int
 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-       int page_count, i;
-       struct address_space *mapping;
+       struct i915_gem_get_pages_gtt_ctx ctx;
        struct sg_table *st;
-       struct scatterlist *sg;
        struct sgt_iter sgt_iter;
        struct page *page;
-       unsigned long last_pfn = 0;     /* suppress gcc warning */
        int ret;
-       gfp_t gfp;
 
        /* Assert that the object is not currently in any GPU domain. As it
         * wasn't in the GTT, there shouldn't be any way it could have been in
@@ -2229,78 +2279,44 @@ i915_gem_object_get_pages_gtt(struct 
drm_i915_gem_object *obj)
        BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
        BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
 
-       st = kmalloc(sizeof(*st), GFP_KERNEL);
-       if (st == NULL)
-               return -ENOMEM;
-
-       page_count = obj->base.size / PAGE_SIZE;
-       if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
-               kfree(st);
-               return -ENOMEM;
-       }
+       ctx.dev_priv = dev_priv;
+       ctx.page_count = obj->base.size / PAGE_SIZE;
+       ctx.shrunk_all = false;
 
        /* Get the list of pages out of our struct file.  They'll be pinned
         * at this point until we release them.
         *
         * Fail silently without starting the shrinker
         */
-       mapping = obj->base.filp->f_mapping;
-       gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
-       gfp |= __GFP_NORETRY | __GFP_NOWARN;
-       sg = st->sgl;
-       st->nents = 0;
-       for (i = 0; i < page_count; i++) {
-               page = shmem_read_mapping_page_gfp(mapping, i, gfp);
-               if (IS_ERR(page)) {
-                       i915_gem_shrink(dev_priv,
-                                       page_count,
-                                       I915_SHRINK_BOUND |
-                                       I915_SHRINK_UNBOUND |
-                                       I915_SHRINK_PURGEABLE);
-                       page = shmem_read_mapping_page_gfp(mapping, i, gfp);
-               }
-               if (IS_ERR(page)) {
-                       /* We've tried hard to allocate the memory by reaping
-                        * our own buffer, now let the real VM do its job and
-                        * go down in flames if truly OOM.
-                        */
-                       i915_gem_shrink_all(dev_priv);
-                       page = shmem_read_mapping_page(mapping, i);
-                       if (IS_ERR(page)) {
-                               ret = PTR_ERR(page);
-                               goto err_pages;
-                       }
-               }
-#ifdef CONFIG_SWIOTLB
-               if (swiotlb_nr_tbl()) {
-                       st->nents++;
-                       sg_set_page(sg, page, PAGE_SIZE, 0);
-                       sg = sg_next(sg);
-                       continue;
-               }
-#endif
-               if (!i || page_to_pfn(page) != last_pfn + 1) {
-                       if (i)
-                               sg = sg_next(sg);
-                       st->nents++;
-                       sg_set_page(sg, page, PAGE_SIZE, 0);
-               } else {
-                       sg->length += PAGE_SIZE;
-               }
-               last_pfn = page_to_pfn(page);
-
-               /* Check that the i965g/gm workaround works. */
-               WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
+       ctx.mapping = obj->base.filp->f_mapping;
+       ctx.gfp = mapping_gfp_constraint(ctx.mapping,
+                                        ~(__GFP_IO | __GFP_RECLAIM));
+       ctx.gfp |= __GFP_NORETRY | __GFP_NOWARN;
+
+       st = i915_alloc_sg_table(ctx.page_count, &ctx,
+                                i915_gem_gtt_get_page,
+                                i915_gem_gtt_get_page_failed,
+                                i915_gem_gtt_put_page);
+       if (IS_ERR(st)) {
+               ret = PTR_ERR(st);
+               /* shmemfs first checks if there is enough memory to allocate
+                * the page and reports ENOSPC should there be insufficient,
+                * along with the usual ENOMEM for a genuine allocation failure.
+                *
+                * We use ENOSPC in our driver to mean that we have run out of
+                * aperture space and so want to translate the error from
+                * shmemfs back to our usual understanding of ENOMEM.
+                */
+               if (ret == -ENOSPC)
+                       ret = -ENOMEM;
+               return ret;
        }
-#ifdef CONFIG_SWIOTLB
-       if (!swiotlb_nr_tbl())
-#endif
-               sg_mark_end(sg);
+
        obj->pages = st;
 
        ret = i915_gem_gtt_prepare_object(obj);
        if (ret)
-               goto err_pages;
+               goto err;
 
        if (i915_gem_object_needs_bit17_swizzle(obj))
                i915_gem_object_do_bit_17_swizzle(obj);
@@ -2311,24 +2327,12 @@ i915_gem_object_get_pages_gtt(struct 
drm_i915_gem_object *obj)
 
        return 0;
 
-err_pages:
-       sg_mark_end(sg);
+err:
        for_each_sgt_page(page, sgt_iter, st)
                put_page(page);
        sg_free_table(st);
        kfree(st);
 
-       /* shmemfs first checks if there is enough memory to allocate the page
-        * and reports ENOSPC should there be insufficient, along with the usual
-        * ENOMEM for a genuine allocation failure.
-        *
-        * We use ENOSPC in our driver to mean that we have run out of aperture
-        * space and so want to translate the error from shmemfs back to our
-        * usual understanding of ENOMEM.
-        */
-       if (ret == -ENOSPC)
-               ret = -ENOMEM;
-
        return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 0bb4232f66bc..441910eafdea 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3703,3 +3703,87 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma)
        i915_vma_unpin(vma);
        i915_vma_put(vma);
 }
+
+struct sg_table *
+i915_alloc_sg_table(unsigned int page_count, void *context,
+                   i915_page_alloc_f page_alloc,
+                   i915_page_alloc_err_f page_alloc_error,
+                   i915_page_put_f page_put)
+{
+       struct sg_table *st;
+       struct scatterlist *sg;
+       struct page *page;
+       struct sgt_iter sgt_iter;
+       unsigned long pfn;
+       unsigned long last_pfn = 0;     /* suppress gcc warning */
+       unsigned int err_cnt = 0;
+       unsigned int i;
+       int err;
+
+       st = kmalloc(sizeof(*st), GFP_KERNEL);
+       if (st == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
+               kfree(st);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       sg = st->sgl;
+       st->nents = 0;
+
+       for (i = 0; i < page_count; i++) {
+retry:
+               page = page_alloc(context, i);
+               if (!page || IS_ERR(page)) {
+                       err = page ? PTR_ERR(page) : -ENOMEM;
+
+                       if (!page_alloc_error ||
+                           page_alloc_error(context, i, err_cnt, err))
+                               goto err_page_alloc;
+                       else
+                               goto retry;
+
+                       err_cnt++;
+               }
+
+#ifdef CONFIG_SWIOTLB
+               if (swiotlb_nr_tbl()) {
+                       st->nents++;
+                       sg_set_page(sg, page, PAGE_SIZE, 0);
+                       sg = sg_next(sg);
+                       continue;
+               }
+#endif
+               pfn = page_to_pfn(page);
+
+               if (!i || pfn != last_pfn + 1) {
+                       if (i)
+                               sg = sg_next(sg);
+                       st->nents++;
+                       sg_set_page(sg, page, PAGE_SIZE, 0);
+               } else {
+                       sg->length += PAGE_SIZE;
+               }
+
+               last_pfn = pfn;
+       }
+
+#ifdef CONFIG_SWIOTLB
+       if (!swiotlb_nr_tbl())
+#endif
+               sg_mark_end(sg);
+
+       return st;
+
+err_page_alloc:
+       sg_mark_end(sg);
+       if (page_put) {
+               for_each_sgt_page(page, sgt_iter, st)
+                       page_put(context, page);
+       }
+       sg_free_table(st);
+       kfree(st);
+
+       return ERR_PTR(err);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h 
b/drivers/gpu/drm/i915/i915_gem_gtt.h
index ec78be2f8c77..17768c99605e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -730,4 +730,18 @@ static inline struct page *i915_vma_first_page(struct 
i915_vma *vma)
        return sg_page(vma->pages->sgl);
 }
 
+typedef struct page *
+(*i915_page_alloc_f)(void *context, unsigned int page_num);
+typedef bool
+(*i915_page_alloc_err_f)(void *context, unsigned int page_num,
+                        unsigned int err_cnt, int err);
+typedef void
+(*i915_page_put_f)(void *context, struct page *page);
+
+struct sg_table *
+i915_alloc_sg_table(unsigned int page_count, void *context,
+                   i915_page_alloc_f page_alloc,
+                   i915_page_alloc_err_f page_alloc_error,
+                   i915_page_put_f page_put);
+
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c 
b/drivers/gpu/drm/i915/i915_gem_userptr.c
index e537930c64b5..cddebe50916a 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -397,54 +397,34 @@ struct get_pages_work {
        struct task_struct *task;
 };
 
-#if IS_ENABLED(CONFIG_SWIOTLB)
-#define swiotlb_active() swiotlb_nr_tbl()
-#else
-#define swiotlb_active() 0
-#endif
+struct i915_gem_get_pages_userptr_ctx {
+       struct page **pvec;
+};
 
-static int
-st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
+static struct page *
+i915_gem_userptr_get_page(void *context, unsigned int page_num)
 {
-       struct scatterlist *sg;
-       int ret, n;
-
-       *st = kmalloc(sizeof(**st), GFP_KERNEL);
-       if (*st == NULL)
-               return -ENOMEM;
+       struct i915_gem_get_pages_userptr_ctx *ctx = context;
 
-       if (swiotlb_active()) {
-               ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
-               if (ret)
-                       goto err;
-
-               for_each_sg((*st)->sgl, sg, num_pages, n)
-                       sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
-       } else {
-               ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
-                                               0, num_pages << PAGE_SHIFT,
-                                               GFP_KERNEL);
-               if (ret)
-                       goto err;
-       }
-
-       return 0;
-
-err:
-       kfree(*st);
-       *st = NULL;
-       return ret;
+       return ctx->pvec[page_num];
 }
 
 static int
 __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
                             struct page **pvec, int num_pages)
 {
+       struct i915_gem_get_pages_userptr_ctx ctx;
+       struct sg_table *st;
        int ret;
 
-       ret = st_set_pages(&obj->pages, pvec, num_pages);
-       if (ret)
-               return ret;
+       ctx.pvec = pvec;
+       st = i915_alloc_sg_table(num_pages, &ctx,
+                                i915_gem_userptr_get_page,
+                                NULL, NULL);
+       if (IS_ERR(st))
+               return PTR_ERR(st);
+
+       obj->pages = st;
 
        ret = i915_gem_gtt_prepare_object(obj);
        if (ret) {
-- 
2.7.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to