With this change, the exynos drm dmabuf module can export and
import dmabuf of gem objects with non-continuous memory.

The exynos_map_dmabuf function can create SGT of a non-contiguous
buffer by calling dma_get_pages to retrieve the allocated pages
and then maps the SGT to the caller's address space.

Signed-off-by: Prathyush K <prathyus...@samsung.com>
---
 drivers/gpu/drm/exynos/exynos_drm_dmabuf.c |   98 +++++++++++++++++++++++-----
 1 files changed, 81 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c 
b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index cbb6ad4..54b88bd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -56,6 +56,59 @@ static void exynos_dmabuf_detach(struct dma_buf *dmabuf,
        dma_buf_put(dmabuf);
 }
 
+
+static struct sg_table *drm_dc_pages_to_sgt(struct page **pages,
+       unsigned long n_pages, size_t offset, size_t offset2, dma_addr_t daddr)
+{
+       struct sg_table *sgt;
+       struct scatterlist *s;
+       int i, j, cur_page, chunks, ret;
+
+       sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
+       if (!sgt)
+               return ERR_PTR(-ENOMEM);
+
+       /* compute number of chunks */
+       chunks = 1;
+       for (i = 1; i < n_pages; ++i)
+               if (pages[i] != pages[i - 1] + 1)
+                       ++chunks;
+
+       ret = sg_alloc_table(sgt, chunks, GFP_KERNEL);
+       if (ret) {
+               kfree(sgt);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       /* merging chunks and putting them into the scatterlist */
+       cur_page = 0;
+       for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
+               size_t size = PAGE_SIZE;
+
+               for (j = cur_page + 1; j < n_pages; ++j) {
+                       if (pages[j] != pages[j - 1] + 1)
+                               break;
+                       size += PAGE_SIZE;
+               }
+
+               /* cut offset if chunk starts at the first page */
+               if (cur_page == 0)
+                       size -= offset;
+               /* cut offset2 if chunk ends at the last page */
+               if (j == n_pages)
+                       size -= offset2;
+
+               sg_set_page(s, pages[cur_page], size, offset);
+               s->dma_address = daddr;
+               daddr +=  size;
+               offset = 0;
+               cur_page = j;
+       }
+
+       return sgt;
+}
+
+
 static struct sg_table *exynos_map_dmabuf(struct dma_buf_attachment *attach,
                                        enum dma_data_direction direction)
 {
@@ -64,6 +117,8 @@ static struct sg_table *exynos_map_dmabuf(struct 
dma_buf_attachment *attach,
        struct exynos_drm_gem_buf *buffer;
        struct sg_table *sgt;
        int ret;
+       int size, n_pages;
+       struct page **pages = NULL;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -71,27 +126,37 @@ static struct sg_table *exynos_map_dmabuf(struct 
dma_buf_attachment *attach,
 
        buffer = exynos_gem_obj->buffer;
 
-       /* TODO. consider physically non-continuous memory with IOMMU. */
+       size = buffer->size;
+       n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 
-       sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
-       if (!sgt) {
-               DRM_DEBUG_KMS("failed to allocate sg table.\n");
-               return ERR_PTR(-ENOMEM);
+       pages = kmalloc(n_pages * sizeof pages[0], GFP_KERNEL);
+       if (!pages) {
+               DRM_DEBUG_KMS("failed to alloc page table\n");
+               return NULL;
        }
 
-       ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+       ret = dma_get_pages(attach->dev, buffer->kvaddr,
+                               buffer->dma_addr, pages, n_pages);
        if (ret < 0) {
-               DRM_DEBUG_KMS("failed to allocate scatter list.\n");
-               kfree(sgt);
-               sgt = NULL;
-               return ERR_PTR(-ENOMEM);
+               DRM_DEBUG_KMS("failed to get buffer pages from DMA API\n");
+               return NULL;
        }
+       if (ret != n_pages) {
+               DRM_DEBUG_KMS("failed to get all pages from DMA API\n");
+               return NULL;
+       }
+
+       sgt = drm_dc_pages_to_sgt(pages, n_pages, 0, 0, buffer->dma_addr);
+       if (IS_ERR(sgt)) {
+               DRM_DEBUG_KMS("failed to prepare sg table\n");
+               return NULL;
+       }
+
+       sgt->nents = dma_map_sg(attach->dev, sgt->sgl,
+                               sgt->orig_nents, DMA_BIDIRECTIONAL);
 
-       sg_init_table(sgt->sgl, 1);
-       sg_dma_len(sgt->sgl) = buffer->size;
-       sg_set_page(sgt->sgl, pfn_to_page(PFN_DOWN(buffer->dma_addr)),
-                       buffer->size, 0);
-       sg_dma_address(sgt->sgl) = buffer->dma_addr;
+       /* pages are no longer needed */
+       kfree(pages);
 
        /*
         * increase reference count of this buffer.
@@ -303,8 +368,6 @@ int exynos_dmabuf_prime_fd_to_handle(struct drm_device 
*drm_dev,
        if (ret < 0)
                goto fail_handle;
 
-       /* consider physically non-continuous memory with IOMMU. */
-
        buffer->dma_addr = sg_dma_address(sgt->sgl);
        buffer->size = sg_dma_len(sgt->sgl);
 
@@ -316,6 +379,7 @@ int exynos_dmabuf_prime_fd_to_handle(struct drm_device 
*drm_dev,
        atomic_set(&buffer->shared_refcount, 1);
 
        exynos_gem_obj->base.import_attach = attach;
+       exynos_gem_obj->buffer = buffer;
 
        ret = drm_prime_insert_fd_handle_mapping(&file_priv->prime,
                                                        dmabuf, *handle);
-- 
1.7.0.4

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to