This patch fixes the issue that when buffer allocation is requested
without iommu, the allocation is failed.

Without iommu, dma_alloc_attrs function allocates some memory region
and returns cpu address so this patch makes the cpu address to be set
to buf->kvaddr correctly.

Signed-off-by: Inki Dae <inki.dae at samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park at samsung.com>
---
 drivers/gpu/drm/exynos/exynos_drm_buf.c |   50 +++++++++++++++++++++++++++----
 1 files changed, 44 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c 
b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 9601bad..22fb574 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -29,6 +29,7 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_gem.h"
 #include "exynos_drm_buf.h"
+#include "exynos_drm_iommu.h"

 static int lowlevel_buffer_allocate(struct drm_device *dev,
                unsigned int flags, struct exynos_drm_gem_buf *buf)
@@ -66,14 +67,45 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
        dma_set_attr(attr, &buf->dma_attrs);
        dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);

-       buf->pages = dma_alloc_attrs(dev->dev, buf->size,
-                       &buf->dma_addr, GFP_KERNEL, &buf->dma_attrs);
-       if (!buf->pages) {
-               DRM_ERROR("failed to allocate buffer.\n");
-               return -ENOMEM;
+       nr_pages = buf->size >> PAGE_SHIFT;
+
+       if (!is_drm_iommu_supported(dev)) {
+               dma_addr_t start_addr;
+               unsigned int i = 0;
+
+               buf->pages = kzalloc(sizeof(struct page) * nr_pages,
+                                       GFP_KERNEL);
+               if (!buf->pages) {
+                       DRM_ERROR("failed to allocate pages.\n");
+                       return -ENOMEM;
+               }
+
+               buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
+                                       &buf->dma_addr, GFP_KERNEL,
+                                       &buf->dma_attrs);
+               if (!buf->kvaddr) {
+                       DRM_ERROR("failed to allocate buffer.\n");
+                       kfree(buf->pages);
+                       return -ENOMEM;
+               }
+
+               start_addr = buf->dma_addr;
+               while (i < nr_pages) {
+                       buf->pages[i] = phys_to_page(start_addr);
+                       start_addr += PAGE_SIZE;
+                       i++;
+               }
+       } else {
+
+               buf->pages = dma_alloc_attrs(dev->dev, buf->size,
+                                       &buf->dma_addr, GFP_KERNEL,
+                                       &buf->dma_attrs);
+               if (!buf->pages) {
+                       DRM_ERROR("failed to allocate buffer.\n");
+                       return -ENOMEM;
+               }
        }

-       nr_pages = buf->size >> PAGE_SHIFT;
        buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
        if (!buf->sgt) {
                DRM_ERROR("failed to get sg table.\n");
@@ -92,6 +124,9 @@ err_free_attrs:
                        (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
        buf->dma_addr = (dma_addr_t)NULL;

+       if (!is_drm_iommu_supported(dev))
+               kfree(buf->pages);
+
        return ret;
 }

@@ -117,6 +152,9 @@ static void lowlevel_buffer_deallocate(struct drm_device 
*dev,
        dma_free_attrs(dev->dev, buf->size, buf->pages,
                                (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
        buf->dma_addr = (dma_addr_t)NULL;
+
+       if (!is_drm_iommu_supported(dev))
+               kfree(buf->pages);
 }

 struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
-- 
1.7.4.1

Reply via email to