This introduces new functions to allocate/free buffer using DMA mapping
API. Now already exynos-drm uses DMA mapping API to allocate/free buffer
but it is used on both iommu and non-iommu, so split it. It will be
added new buffer allocation not to use DMA mapping API later.

Signed-off-by: Joonyoung Shim <jy0922.shim at samsung.com>
---
 drivers/gpu/drm/exynos/exynos_drm_gem.c | 90 +++++++++++++++++++++++----------
 1 file changed, 64 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c 
b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index d5951f75c774..88196edd4ade 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -20,6 +20,63 @@
 #include "exynos_drm_gem.h"
 #include "exynos_drm_iommu.h"

+static int exynos_drm_alloc_dma(struct exynos_drm_gem *exynos_gem)
+{
+       struct drm_device *dev = exynos_gem->base.dev;
+       unsigned int nr_pages;
+       unsigned int i;
+       dma_addr_t addr;
+
+       init_dma_attrs(&exynos_gem->dma_attrs);
+
+       if (exynos_gem->flags & EXYNOS_BO_WC ||
+                       !(exynos_gem->flags & EXYNOS_BO_CACHABLE))
+               dma_set_attr(DMA_ATTR_WRITE_COMBINE, &exynos_gem->dma_attrs);
+
+       dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &exynos_gem->dma_attrs);
+
+       nr_pages = exynos_gem->size >> PAGE_SHIFT;
+
+       exynos_gem->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
+       if (!exynos_gem->pages) {
+               DRM_ERROR("failed to allocate pages\n");
+               return -ENOMEM;
+       }
+
+       exynos_gem->cookie = dma_alloc_attrs(dev->dev, exynos_gem->size,
+                                            &exynos_gem->dma_addr, GFP_KERNEL,
+                                            &exynos_gem->dma_attrs);
+       if (!exynos_gem->cookie) {
+               DRM_ERROR("failed to allocate buffer\n");
+               drm_free_large(exynos_gem->pages);
+               return -ENOMEM;
+       }
+
+       addr = exynos_gem->dma_addr;
+       for (i = 0; i < nr_pages; i++) {
+               exynos_gem->pages[i] =
+                       pfn_to_page(dma_to_pfn(dev->dev, addr));
+               addr += PAGE_SIZE;
+       }
+
+       DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+                       (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
+
+       return 0;
+}
+
+static void exynos_drm_free_dma(struct exynos_drm_gem *exynos_gem)
+{
+       struct drm_device *dev = exynos_gem->base.dev;
+
+       DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+                       (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
+
+       dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie,
+                       exynos_gem->dma_addr, &exynos_gem->dma_attrs);
+       drm_free_large(exynos_gem->pages);
+}
+
 static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
 {
        struct drm_device *dev = exynos_gem->base.dev;
@@ -31,6 +88,9 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem 
*exynos_gem)
                return 0;
        }

+       if (!is_drm_iommu_supported(dev))
+               return exynos_drm_alloc_dma(exynos_gem);
+
        init_dma_attrs(&exynos_gem->dma_attrs);

        /*
@@ -51,15 +111,6 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem 
*exynos_gem)

        nr_pages = exynos_gem->size >> PAGE_SHIFT;

-       if (!is_drm_iommu_supported(dev)) {
-               exynos_gem->pages = drm_calloc_large(nr_pages,
-                                                    sizeof(struct page *));
-               if (!exynos_gem->pages) {
-                       DRM_ERROR("failed to allocate pages.\n");
-                       return -ENOMEM;
-               }
-       }
-
        exynos_gem->cookie = dma_alloc_attrs(dev->dev, exynos_gem->size,
                                             &exynos_gem->dma_addr, GFP_KERNEL,
                                             &exynos_gem->dma_attrs);
@@ -70,20 +121,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem 
*exynos_gem)
                return -ENOMEM;
        }

-       if (exynos_gem->pages) {
-               dma_addr_t start_addr;
-               unsigned int i = 0;
-
-               start_addr = exynos_gem->dma_addr;
-               while (i < nr_pages) {
-                       exynos_gem->pages[i] =
-                               pfn_to_page(dma_to_pfn(dev->dev, start_addr));
-                       start_addr += PAGE_SIZE;
-                       i++;
-               }
-       } else {
-               exynos_gem->pages = exynos_gem->cookie;
-       }
+       exynos_gem->pages = exynos_gem->cookie;

        DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
                        (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
@@ -100,15 +138,15 @@ static void exynos_drm_free_buf(struct exynos_drm_gem 
*exynos_gem)
                return;
        }

+       if (!is_drm_iommu_supported(dev))
+               return exynos_drm_free_dma(exynos_gem);
+
        DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
                        (unsigned long)exynos_gem->dma_addr, exynos_gem->size);

        dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie,
                        (dma_addr_t)exynos_gem->dma_addr,
                        &exynos_gem->dma_attrs);
-
-       if (!is_drm_iommu_supported(dev))
-               drm_free_large(exynos_gem->pages);
 }

 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
-- 
1.9.1

Reply via email to