On Thu, Jul 22, 2021 at 02:07:51PM +0100, Christoph Hellwig wrote:
> On Thu, Jul 22, 2021 at 10:00:40AM -0300, Jason Gunthorpe wrote:
> > this is better:
> > 
> >    struct sg_append_table state;
> > 
> >    sg_append_init(&state, sgt, gfp_mask);
> > 
> >    while (..)
> >      ret = sg_append_pages(&state, pages, n_pages, ..)
> >      if (ret)
> >      sg_append_abort(&state); // Frees the sgt and puts it to NULL
> >    sg_append_complete(&state)
> > 
> > Which allows sg_alloc_table_from_pages() to be written as
> > 
> >    struct sg_append_table state;
> >    sg_append_init(&state, sgt, gfp_mask);
> >    ret = sg_append_pages(&state,pages, n_pages, offset, size, UINT_MAX)
> >    if (ret) {
> >       sg_append_abort(&state);
> >       return ret;
> >    }
> >    sg_append_complete(&state);
> >    return 0;
> > 
> > And then the API can manage all of this in some sane and
> > understandable way.
> 
> That would be a lot easier to use for sure.  Not sure how invasive the
> changes would be, though.

Looks pretty good, Maor can you try it?

diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 1739d10a2c556e..6c8e761ab389e8 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -806,27 +806,27 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops 
=  {
 struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
                                       struct page **pages, unsigned int 
nr_pages)
 {
-       struct sg_table *sg;
-       struct scatterlist *sge;
+       struct sg_table *sgt;
        size_t max_segment = 0;
+       int ret;
 
-       sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
-       if (!sg)
+       sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+       if (!sgt)
                return ERR_PTR(-ENOMEM);
 
        if (dev)
                max_segment = dma_max_mapping_size(dev->dev);
        if (max_segment == 0)
                max_segment = UINT_MAX;
-       sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
-                                         nr_pages << PAGE_SHIFT,
-                                         max_segment,
-                                         NULL, 0, GFP_KERNEL, NULL);
-       if (IS_ERR(sge)) {
-               kfree(sg);
-               sg = ERR_CAST(sge);
+
+       ret = sg_alloc_table_from_pages_segment(sgt, pages, nr_pages, 0,
+                                               nr_pages << PAGE_SHIFT,
+                                               max_segment, GFP_KERNEL);
+       if (ret) {
+               kfree(sgt);
+               return ERR_PTR(ret);
        }
-       return sg;
+       return sgt;
 }
 EXPORT_SYMBOL(drm_prime_pages_to_sg);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c 
b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index c341d3e3386ccb..a2c5e1b30f425f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -131,6 +131,7 @@ static int i915_gem_userptr_get_pages(struct 
drm_i915_gem_object *obj)
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
        unsigned int max_segment = i915_sg_segment_size();
+       struct sg_append_table state;
        struct sg_table *st;
        unsigned int sg_page_sizes;
        struct scatterlist *sg;
@@ -153,13 +154,11 @@ static int i915_gem_userptr_get_pages(struct 
drm_i915_gem_object *obj)
        spin_unlock(&i915->mm.notifier_lock);
 
 alloc_table:
-       sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0,
-                                        num_pages << PAGE_SHIFT, max_segment,
-                                        NULL, 0, GFP_KERNEL, NULL);
-       if (IS_ERR(sg)) {
-               ret = PTR_ERR(sg);
+       ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0,
+                                               num_pages << PAGE_SHIFT,
+                                               max_segment, GFP_KERNEL);
+       if (ret)
                goto err;
-       }
 
        ret = i915_gem_gtt_prepare_pages(obj, st);
        if (ret) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 2ad889272b0a15..56214bcc6c5280 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -386,15 +386,12 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
                if (unlikely(ret != 0))
                        return ret;
 
-               sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
-                               vsgt->num_pages, 0,
-                               (unsigned long) vsgt->num_pages << PAGE_SHIFT,
-                               dma_get_max_seg_size(dev_priv->drm.dev),
-                               NULL, 0, GFP_KERNEL, NULL);
-               if (IS_ERR(sg)) {
-                       ret = PTR_ERR(sg);
+               ret = sg_alloc_table_from_pages_segment(
+                       vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
+                       (unsigned long)vsgt->num_pages << PAGE_SHIFT,
+                       dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
+               if (ret)
                        goto out_sg_alloc_fail;
-               }
 
                if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
                        uint64_t over_alloc =
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 5cc41ae962ec1d..53de1ec77326be 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -556,6 +556,25 @@ struct scatterlist *__sg_alloc_table_from_pages(struct 
sg_table *sgt,
 }
 EXPORT_SYMBOL(__sg_alloc_table_from_pages);
 
+int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page 
**pages,
+                                     unsigned int n_pages, unsigned int offset,
+                                     unsigned long size,
+                                     unsigned int max_segment, gfp_t gfp_mask)
+{
+       struct sg_append_table state;
+       int ret;
+
+       sg_append_init(&state, sgt, max_segment, gfp_mask);
+       ret = sg_append_pages(&state, pages, n_pages, offset, size);
+       if (ret) {
+               sg_append_abort(&state);
+               return ret;
+       }
+       sg_append_complete(&state);
+       return 0;
+}
+EXPORT_SYMBOL(sg_alloc_table_from_pages_segment);
+
 /**
  * sg_alloc_table_from_pages - Allocate and initialize an sg table from
  *                            an array of pages
@@ -580,8 +599,8 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct 
page **pages,
                              unsigned int n_pages, unsigned int offset,
                              unsigned long size, gfp_t gfp_mask)
 {
-       return PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, n_pages,
-                       offset, size, UINT_MAX, NULL, 0, gfp_mask, NULL));
+       return sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset,
+                                                size, UINT_MAX, gfp_mask);
 }
 EXPORT_SYMBOL(sg_alloc_table_from_pages);
 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to