Some platforms without MMU have display driver where a drm/kms
driver could be implemented.

To doing that this patch remove the dependency of DRM on MMU
configuration flag.

MMU configuration flag migrate to DRM_TTM, DRM_GEM_CMA_HELPER and
DRM_KMS_CMA_HELPER.

Since MMUless platform will need contiguous memory it duplicate
cma helpers files with "_nommu" suffix.
Main changes are the usage of vm_iomap_memory instead of dma_mmap_wc.

DRM_GEM_CMA_HELPER and DRM_KMS_CMA_HELPER are duplicated into
DRM_GEM_CMA_HELPER_NOMMU and DRM_KMS_CMA_HELPER_NOMMU to select
the correct files to compile.

Signed-off-by: Benjamin Gaignard <benjamin.gaignard at linaro.org>
---
 drivers/gpu/drm/Kconfig                    |  27 +-
 drivers/gpu/drm/Makefile                   |   2 +
 drivers/gpu/drm/drm_fb_cma_helper_nommu.c  | 648 +++++++++++++++++++++++++++++
 drivers/gpu/drm/drm_gem_cma_helper_nommu.c | 574 +++++++++++++++++++++++++
 include/drm/drm_gem_cma_helper.h           |   8 +
 5 files changed, 1255 insertions(+), 4 deletions(-)
 create mode 100644 drivers/gpu/drm/drm_fb_cma_helper_nommu.c
 create mode 100644 drivers/gpu/drm/drm_gem_cma_helper_nommu.c

diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 383989c..81c7e60 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -6,7 +6,7 @@
 #
 menuconfig DRM
        tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI 
support)"
-       depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU && HAS_DMA
+       depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA
        select HDMI
        select FB_CMDLINE
        select I2C
@@ -98,7 +98,7 @@ config DRM_LOAD_EDID_FIRMWARE

 config DRM_TTM
        tristate
-       depends on DRM
+       depends on DRM && MMU
        help
          GPU memory management subsystem for devices with multiple
          GPU memory types. Will be enabled automatically if a device driver
@@ -106,13 +106,13 @@ config DRM_TTM

 config DRM_GEM_CMA_HELPER
        bool
-       depends on DRM
+       depends on DRM && MMU
        help
          Choose this if you need the GEM CMA helper functions

 config DRM_KMS_CMA_HELPER
        bool
-       depends on DRM
+       depends on DRM && MMU
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_FB_HELPER
        select FB_SYS_FILLRECT
@@ -121,6 +121,25 @@ config DRM_KMS_CMA_HELPER
        help
          Choose this if you need the KMS CMA helper functions

+config DRM_GEM_CMA_HELPER_NOMMU
+       bool
+       depends on DRM && !MMU
+       help
+         Choose this if you need the GEM CMA helper functions
+         for platforms without MMU
+
+config DRM_KMS_CMA_HELPER_NOMMU
+       bool
+       depends on DRM && !MMU
+       select DRM_GEM_CMA_HELPER_NOMMU
+       select DRM_KMS_FB_HELPER
+       select FB_SYS_FILLRECT
+       select FB_SYS_COPYAREA
+       select FB_SYS_IMAGEBLIT
+       help
+         Choose this if you need the KMS CMA helper functions
+         for platforms without MMU
+
 source "drivers/gpu/drm/i2c/Kconfig"

 source "drivers/gpu/drm/arm/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 5b73b16..a296214 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -21,6 +21,7 @@ drm-y       :=        drm_auth.o drm_bufs.o drm_cache.o \
 drm-$(CONFIG_DRM_VM) += drm_vm.o
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
+drm-$(CONFIG_DRM_GEM_CMA_HELPER_NOMMU) += drm_gem_cma_helper_nommu.o
 drm-$(CONFIG_PCI) += ati_pcigart.o
 drm-$(CONFIG_DRM_PANEL) += drm_panel.o
 drm-$(CONFIG_OF) += drm_of.o
@@ -35,6 +36,7 @@ drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o 
drm_probe_helper.o \
 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
 drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
 drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
+drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER_NOMMU) += drm_fb_cma_helper_nommu.o
 drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o

 obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
diff --git a/drivers/gpu/drm/drm_fb_cma_helper_nommu.c 
b/drivers/gpu/drm/drm_fb_cma_helper_nommu.c
new file mode 100644
index 0000000..b568740
--- /dev/null
+++ b/drivers/gpu/drm/drm_fb_cma_helper_nommu.c
@@ -0,0 +1,648 @@
+/*
+ * drm kms/fb cma (contiguous memory allocator) helper functions
+ *
+ * Copyright (C) 2012 Analog Device Inc.
+ *   Author: Lars-Peter Clausen <lars at metafoo.de>
+ *
+ * Based on udl_fbdev.c
+ *  Copyright (C) 2012 Red Hat
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/reservation.h>
+
+#define DEFAULT_FBDEFIO_DELAY_MS 50
+
+struct drm_fb_cma {
+       struct drm_framebuffer          fb;
+       struct drm_gem_cma_object       *obj[4];
+};
+
+struct drm_fbdev_cma {
+       struct drm_fb_helper    fb_helper;
+       struct drm_fb_cma       *fb;
+};
+
+/**
+ * DOC: framebuffer cma helper functions
+ *
+ * Provides helper functions for creating a cma (contiguous memory allocator)
+ * backed framebuffer.
+ *
+ * drm_fb_cma_create() is used in the &drm_mode_config_funcs ->fb_create
+ * callback function to create a cma backed framebuffer.
+ *
+ * An fbdev framebuffer backed by cma is also available by calling
+ * drm_fbdev_cma_init(). drm_fbdev_cma_fini() tears it down.
+ * If the &drm_framebuffer_funcs ->dirty callback is set, fb_deferred_io
+ * will be set up automatically. dirty() is called by
+ * drm_fb_helper_deferred_io() in process context (struct delayed_work).
+ *
+ * Example fbdev deferred io code::
+ *
+ *     static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb,
+ *                                      struct drm_file *file_priv,
+ *                                      unsigned flags, unsigned color,
+ *                                      struct drm_clip_rect *clips,
+ *                                      unsigned num_clips)
+ *     {
+ *         struct drm_gem_cma_object *cma = drm_fb_cma_get_gem_obj(fb, 0);
+ *         ... push changes ...
+ *         return 0;
+ *     }
+ *
+ *     static struct drm_framebuffer_funcs driver_fbdev_fb_funcs = {
+ *         .destroy       = drm_fb_cma_destroy,
+ *         .create_handle = drm_fb_cma_create_handle,
+ *         .dirty         = driver_fbdev_fb_dirty,
+ *     };
+ *
+ *     static int driver_fbdev_create(struct drm_fb_helper *helper,
+ *             struct drm_fb_helper_surface_size *sizes)
+ *     {
+ *         return drm_fbdev_cma_create_with_funcs(helper, sizes,
+ *                                                &driver_fbdev_fb_funcs);
+ *     }
+ *
+ *     static const struct drm_fb_helper_funcs driver_fb_helper_funcs = {
+ *         .fb_probe = driver_fbdev_create,
+ *     };
+ *
+ *     Initialize:
+ *     fbdev = drm_fbdev_cma_init_with_funcs(dev, 16,
+ *                                           dev->mode_config.num_crtc,
+ *                                           dev->mode_config.num_connector,
+ *                                           &driver_fb_helper_funcs);
+ *
+ */
+
+static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
+{
+       return container_of(helper, struct drm_fbdev_cma, fb_helper);
+}
+
+static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb)
+{
+       return container_of(fb, struct drm_fb_cma, fb);
+}
+
+void drm_fb_cma_destroy(struct drm_framebuffer *fb)
+{
+       struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+       int i;
+
+       for (i = 0; i < 4; i++) {
+               if (fb_cma->obj[i])
+                       
drm_gem_object_unreference_unlocked(&fb_cma->obj[i]->base);
+       }
+
+       drm_framebuffer_cleanup(fb);
+       kfree(fb_cma);
+}
+EXPORT_SYMBOL(drm_fb_cma_destroy);
+
+int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
+       struct drm_file *file_priv, unsigned int *handle)
+{
+       struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+
+       return drm_gem_handle_create(file_priv,
+                       &fb_cma->obj[0]->base, handle);
+}
+EXPORT_SYMBOL(drm_fb_cma_create_handle);
+
+static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
+       .destroy        = drm_fb_cma_destroy,
+       .create_handle  = drm_fb_cma_create_handle,
+};
+
+static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
+       const struct drm_mode_fb_cmd2 *mode_cmd,
+       struct drm_gem_cma_object **obj,
+       unsigned int num_planes, const struct drm_framebuffer_funcs *funcs)
+{
+       struct drm_fb_cma *fb_cma;
+       int ret;
+       int i;
+
+       fb_cma = kzalloc(sizeof(*fb_cma), GFP_KERNEL);
+       if (!fb_cma)
+               return ERR_PTR(-ENOMEM);
+
+       drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd);
+
+       for (i = 0; i < num_planes; i++)
+               fb_cma->obj[i] = obj[i];
+
+       ret = drm_framebuffer_init(dev, &fb_cma->fb, funcs);
+       if (ret) {
+               dev_err(dev->dev, "Failed to initialize framebuffer: %d\n", 
ret);
+               kfree(fb_cma);
+               return ERR_PTR(ret);
+       }
+
+       return fb_cma;
+}
+
+/**
+ * drm_fb_cma_create_with_funcs() - helper function for the
+ *                                  &drm_mode_config_funcs ->fb_create
+ *                                  callback function
+ * @dev: DRM device
+ * @file_priv: drm file for the ioctl call
+ * @mode_cmd: metadata from the userspace fb creation request
+ * @funcs: vtable to be used for the new framebuffer object
+ *
+ * This can be used to set &drm_framebuffer_funcs for drivers that need the
+ * dirty() callback. Use drm_fb_cma_create() if you don't need to change
+ * &drm_framebuffer_funcs.
+ */
+struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
+       struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
+       const struct drm_framebuffer_funcs *funcs)
+{
+       const struct drm_format_info *info;
+       struct drm_fb_cma *fb_cma;
+       struct drm_gem_cma_object *objs[4];
+       struct drm_gem_object *obj;
+       int ret;
+       int i;
+
+       info = drm_format_info(mode_cmd->pixel_format);
+       if (!info)
+               return ERR_PTR(-EINVAL);
+
+       for (i = 0; i < info->num_planes; i++) {
+               unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
+               unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
+               unsigned int min_size;
+
+               obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
+               if (!obj) {
+                       dev_err(dev->dev, "Failed to lookup GEM object\n");
+                       ret = -ENXIO;
+                       goto err_gem_object_unreference;
+               }
+
+               min_size = (height - 1) * mode_cmd->pitches[i]
+                        + width * info->cpp[i]
+                        + mode_cmd->offsets[i];
+
+               if (obj->size < min_size) {
+                       drm_gem_object_unreference_unlocked(obj);
+                       ret = -EINVAL;
+                       goto err_gem_object_unreference;
+               }
+               objs[i] = to_drm_gem_cma_obj(obj);
+       }
+
+       fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i, funcs);
+       if (IS_ERR(fb_cma)) {
+               ret = PTR_ERR(fb_cma);
+               goto err_gem_object_unreference;
+       }
+
+       return &fb_cma->fb;
+
+err_gem_object_unreference:
+       for (i--; i >= 0; i--)
+               drm_gem_object_unreference_unlocked(&objs[i]->base);
+       return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
+
+/**
+ * drm_fb_cma_create() - &drm_mode_config_funcs ->fb_create callback function
+ * @dev: DRM device
+ * @file_priv: drm file for the ioctl call
+ * @mode_cmd: metadata from the userspace fb creation request
+ *
+ * If your hardware has special alignment or pitch requirements these should be
+ * checked before calling this function. Use drm_fb_cma_create_with_funcs() if
+ * you need to set &drm_framebuffer_funcs ->dirty.
+ */
+struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
+       struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+       return drm_fb_cma_create_with_funcs(dev, file_priv, mode_cmd,
+                                           &drm_fb_cma_funcs);
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_create);
+
+/**
+ * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
+ * @fb: The framebuffer
+ * @plane: Which plane
+ *
+ * Return the CMA GEM object for given framebuffer.
+ *
+ * This function will usually be called from the CRTC callback functions.
+ */
+struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
+                                                 unsigned int plane)
+{
+       struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+
+       if (plane >= 4)
+               return NULL;
+
+       return fb_cma->obj[plane];
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
+
+/**
+ * drm_fb_cma_prepare_fb() - Prepare CMA framebuffer
+ * @plane: Which plane
+ * @state: Plane state attach fence to
+ *
+ * This should be put into prepare_fb hook of struct &drm_plane_helper_funcs .
+ *
+ * This function checks if the plane FB has an dma-buf attached, extracts
+ * the exclusive fence and attaches it to plane state for the atomic helper
+ * to wait on.
+ *
+ * There is no need for cleanup_fb for CMA based framebuffer drivers.
+ */
+int drm_fb_cma_prepare_fb(struct drm_plane *plane,
+                         struct drm_plane_state *state)
+{
+       struct dma_buf *dma_buf;
+       struct dma_fence *fence;
+
+       if ((plane->state->fb == state->fb) || !state->fb)
+               return 0;
+
+       dma_buf = drm_fb_cma_get_gem_obj(state->fb, 0)->base.dma_buf;
+       if (dma_buf) {
+               fence = reservation_object_get_excl_rcu(dma_buf->resv);
+               drm_atomic_set_fence_for_plane(state, fence);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_prepare_fb);
+
+#ifdef CONFIG_DEBUG_FS
+static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
+{
+       struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+       const struct drm_format_info *info;
+       int i;
+
+       seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
+                       (char *)&fb->pixel_format);
+
+       info = drm_format_info(fb->pixel_format);
+
+       for (i = 0; i < info->num_planes; i++) {
+               seq_printf(m, "   %d: offset=%d pitch=%d, obj: ",
+                               i, fb->offsets[i], fb->pitches[i]);
+               drm_gem_cma_describe(fb_cma->obj[i], m);
+       }
+}
+
+/**
+ * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
+ *                            in debugfs.
+ * @m: output file
+ * @arg: private data for the callback
+ */
+int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_framebuffer *fb;
+
+       mutex_lock(&dev->mode_config.fb_lock);
+       drm_for_each_fb(fb, dev)
+               drm_fb_cma_describe(fb, m);
+       mutex_unlock(&dev->mode_config.fb_lock);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
+#endif
+
+static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+       return vm_iomap_memory(vma, vma->vm_start, info->fix.smem_len);
+}
+
+static struct fb_ops drm_fbdev_cma_ops = {
+       .owner          = THIS_MODULE,
+       DRM_FB_HELPER_DEFAULT_OPS,
+       .fb_fillrect    = drm_fb_helper_sys_fillrect,
+       .fb_copyarea    = drm_fb_helper_sys_copyarea,
+       .fb_imageblit   = drm_fb_helper_sys_imageblit,
+       .fb_mmap        = drm_fb_cma_mmap,
+};
+
+static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
+                                         struct vm_area_struct *vma)
+{
+       fb_deferred_io_mmap(info, vma);
+       vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+       return 0;
+}
+
+static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
+                                   struct drm_gem_cma_object *cma_obj)
+{
+       struct fb_deferred_io *fbdefio;
+       struct fb_ops *fbops;
+
+       /*
+        * Per device structures are needed because:
+        * fbops: fb_deferred_io_cleanup() clears fbops.fb_mmap
+        * fbdefio: individual delays
+        */
+       fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
+       fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
+       if (!fbdefio || !fbops) {
+               kfree(fbdefio);
+               kfree(fbops);
+               return -ENOMEM;
+       }
+
+       /* can't be offset from vaddr since dirty() uses cma_obj */
+       fbi->screen_buffer = cma_obj->vaddr;
+       /* fb_deferred_io_fault() needs a physical address */
+       fbi->fix.smem_start = page_to_phys(virt_to_page(fbi->screen_buffer));
+
+       *fbops = *fbi->fbops;
+       fbi->fbops = fbops;
+
+       fbdefio->delay = msecs_to_jiffies(DEFAULT_FBDEFIO_DELAY_MS);
+       fbdefio->deferred_io = drm_fb_helper_deferred_io;
+       fbi->fbdefio = fbdefio;
+       fb_deferred_io_init(fbi);
+       fbi->fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap;
+
+       return 0;
+}
+
+static void drm_fbdev_cma_defio_fini(struct fb_info *fbi)
+{
+       if (!fbi->fbdefio)
+               return;
+
+       fb_deferred_io_cleanup(fbi);
+       kfree(fbi->fbdefio);
+       kfree(fbi->fbops);
+}
+
+/*
+ * For use in a (struct drm_fb_helper_funcs *)->fb_probe callback function that
+ * needs custom struct drm_framebuffer_funcs, like dirty() for deferred_io use.
+ */
+int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
+       struct drm_fb_helper_surface_size *sizes,
+       const struct drm_framebuffer_funcs *funcs)
+{
+       struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
+       struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+       struct drm_device *dev = helper->dev;
+       struct drm_gem_cma_object *obj;
+       struct drm_framebuffer *fb;
+       unsigned int bytes_per_pixel;
+       unsigned long offset;
+       struct fb_info *fbi;
+       size_t size;
+       int ret;
+
+       DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
+                       sizes->surface_width, sizes->surface_height,
+                       sizes->surface_bpp);
+
+       bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+       mode_cmd.width = sizes->surface_width;
+       mode_cmd.height = sizes->surface_height * 2;
+       mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
+       mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+               sizes->surface_depth);
+
+       size = mode_cmd.pitches[0] * mode_cmd.height;
+       obj = drm_gem_cma_create(dev, size);
+       if (IS_ERR(obj))
+               return -ENOMEM;
+
+       fbi = drm_fb_helper_alloc_fbi(helper);
+       if (IS_ERR(fbi)) {
+               ret = PTR_ERR(fbi);
+               goto err_gem_free_object;
+       }
+
+       fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1, funcs);
+       if (IS_ERR(fbdev_cma->fb)) {
+               dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
+               ret = PTR_ERR(fbdev_cma->fb);
+               goto err_fb_info_destroy;
+       }
+
+       fb = &fbdev_cma->fb->fb;
+       helper->fb = fb;
+
+       fbi->par = helper;
+       fbi->flags = FBINFO_FLAG_DEFAULT;
+       fbi->fbops = &drm_fbdev_cma_ops;
+
+       drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+       drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+       fbi->var.yres /= 2;
+
+       offset = fbi->var.xoffset * bytes_per_pixel;
+       offset += fbi->var.yoffset * fb->pitches[0];
+
+       dev->mode_config.fb_base = (resource_size_t)obj->paddr;
+       fbi->screen_base = obj->vaddr + offset;
+       fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
+       fbi->screen_size = size;
+       fbi->fix.smem_len = size;
+
+       if (funcs->dirty) {
+               ret = drm_fbdev_cma_defio_init(fbi, obj);
+               if (ret)
+                       goto err_cma_destroy;
+       }
+
+       return 0;
+
+err_cma_destroy:
+       drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
+       drm_fb_cma_destroy(&fbdev_cma->fb->fb);
+err_fb_info_destroy:
+       drm_fb_helper_release_fbi(helper);
+err_gem_free_object:
+       drm_gem_object_unreference_unlocked(&obj->base);
+       return ret;
+}
+EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs);
+
+static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
+       struct drm_fb_helper_surface_size *sizes)
+{
+       return drm_fbdev_cma_create_with_funcs(helper, sizes, 
&drm_fb_cma_funcs);
+}
+
+static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
+       .fb_probe = drm_fbdev_cma_create,
+};
+
+/**
+ * drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma 
struct
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device
+ * @num_crtc: Number of CRTCs
+ * @max_conn_count: Maximum number of connectors
+ * @funcs: fb helper functions, in particular fb_probe()
+ *
+ * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
+ */
+struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
+       unsigned int preferred_bpp, unsigned int num_crtc,
+       unsigned int max_conn_count, const struct drm_fb_helper_funcs *funcs)
+{
+       struct drm_fbdev_cma *fbdev_cma;
+       struct drm_fb_helper *helper;
+       int ret;
+
+       fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
+       if (!fbdev_cma) {
+               dev_err(dev->dev, "Failed to allocate drm fbdev.\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       helper = &fbdev_cma->fb_helper;
+
+       drm_fb_helper_prepare(dev, helper, funcs);
+
+       ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
+       if (ret < 0) {
+               dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
+               goto err_free;
+       }
+
+       ret = drm_fb_helper_single_add_all_connectors(helper);
+       if (ret < 0) {
+               dev_err(dev->dev, "Failed to add connectors.\n");
+               goto err_drm_fb_helper_fini;
+
+       }
+
+       ret = drm_fb_helper_initial_config(helper, preferred_bpp);
+       if (ret < 0) {
+               dev_err(dev->dev, "Failed to set initial hw configuration.\n");
+               goto err_drm_fb_helper_fini;
+       }
+
+       return fbdev_cma;
+
+err_drm_fb_helper_fini:
+       drm_fb_helper_fini(helper);
+err_free:
+       kfree(fbdev_cma);
+
+       return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);
+
+/**
+ * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device
+ * @num_crtc: Number of CRTCs
+ * @max_conn_count: Maximum number of connectors
+ *
+ * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
+ */
+struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
+       unsigned int preferred_bpp, unsigned int num_crtc,
+       unsigned int max_conn_count)
+{
+       return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp, num_crtc,
+                               max_conn_count, &drm_fb_cma_helper_funcs);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
+
+/**
+ * drm_fbdev_cma_fini() - Free drm_fbdev_cma struct
+ * @fbdev_cma: The drm_fbdev_cma struct
+ */
+void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
+{
+       drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
+       if (fbdev_cma->fb_helper.fbdev)
+               drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
+       drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
+
+       if (fbdev_cma->fb) {
+               drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
+               drm_fb_cma_destroy(&fbdev_cma->fb->fb);
+       }
+
+       drm_fb_helper_fini(&fbdev_cma->fb_helper);
+       kfree(fbdev_cma);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
+
+/**
+ * drm_fbdev_cma_restore_mode() - Restores initial framebuffer mode
+ * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
+ *
+ * This function is usually called from the DRM drivers lastclose callback.
+ */
+void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
+{
+       if (fbdev_cma)
+               
drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev_cma->fb_helper);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
+
+/**
+ * drm_fbdev_cma_hotplug_event() - Poll for hotpulug events
+ * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
+ *
+ * This function is usually called from the DRM drivers output_poll_changed
+ * callback.
+ */
+void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
+{
+       if (fbdev_cma)
+               drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
+
+/**
+ * drm_fbdev_cma_set_suspend - wrapper around drm_fb_helper_set_suspend
+ * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
+ * @state: desired state, zero to resume, non-zero to suspend
+ *
+ * Calls drm_fb_helper_set_suspend, which is a wrapper around
+ * fb_set_suspend implemented by fbdev core.
+ */
+void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state)
+{
+       if (fbdev_cma)
+               drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state);
+}
+EXPORT_SYMBOL(drm_fbdev_cma_set_suspend);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper_nommu.c 
b/drivers/gpu/drm/drm_gem_cma_helper_nommu.c
new file mode 100644
index 0000000..57b7f2b
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_cma_helper_nommu.c
@@ -0,0 +1,574 @@
+/*
+ * drm gem CMA (contiguous memory allocator) helper functions
+ *
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix
+ *
+ * Based on Samsung Exynos code
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/export.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_vma_manager.h>
+
+/**
+ * DOC: cma helpers
+ *
+ * The Contiguous Memory Allocator reserves a pool of memory at early boot
+ * that is used to service requests for large blocks of contiguous memory.
+ *
+ * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
+ * objects that are physically contiguous in memory. This is useful for
+ * display drivers that are unable to map scattered buffers via an IOMMU.
+ */
+
+/**
+ * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
+ * @drm: DRM device
+ * @size: size of the object to allocate
+ *
+ * This function creates and initializes a GEM CMA object of the given size,
+ * but doesn't allocate any memory to back the object.
+ *
+ * Returns:
+ * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
+ */
+static struct drm_gem_cma_object *
+__drm_gem_cma_create(struct drm_device *drm, size_t size)
+{
+       struct drm_gem_cma_object *cma_obj;
+       struct drm_gem_object *gem_obj;
+       int ret;
+
+       if (drm->driver->gem_create_object)
+               gem_obj = drm->driver->gem_create_object(drm, size);
+       else
+               gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
+       if (!gem_obj)
+               return ERR_PTR(-ENOMEM);
+       cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
+
+       ret = drm_gem_object_init(drm, gem_obj, size);
+       if (ret)
+               goto error;
+
+       ret = drm_gem_create_mmap_offset(gem_obj);
+       if (ret) {
+               drm_gem_object_release(gem_obj);
+               goto error;
+       }
+
+       return cma_obj;
+
+error:
+       kfree(cma_obj);
+       return ERR_PTR(ret);
+}
+
+/**
+ * drm_gem_cma_create - allocate an object with the given size
+ * @drm: DRM device
+ * @size: size of the object to allocate
+ *
+ * This function creates a CMA GEM object and allocates a contiguous chunk of
+ * memory as backing store. The backing memory has the writecombine attribute
+ * set.
+ *
+ * Returns:
+ * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
+                                             size_t size)
+{
+       struct drm_gem_cma_object *cma_obj;
+       int ret;
+
+       size = round_up(size, PAGE_SIZE);
+
+       cma_obj = __drm_gem_cma_create(drm, size);
+       if (IS_ERR(cma_obj))
+               return cma_obj;
+
+       cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
+                                     GFP_KERNEL | __GFP_NOWARN);
+       if (!cma_obj->vaddr) {
+               dev_err(drm->dev, "failed to allocate buffer with size %zu\n",
+                       size);
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       return cma_obj;
+
+error:
+       drm_gem_object_unreference_unlocked(&cma_obj->base);
+       return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_create);
+
+/**
+ * drm_gem_cma_create_with_handle - allocate an object with the given size and
+ *     return a GEM handle to it
+ * @file_priv: DRM file-private structure to register the handle for
+ * @drm: DRM device
+ * @size: size of the object to allocate
+ * @handle: return location for the GEM handle
+ *
+ * This function creates a CMA GEM object, allocating a physically contiguous
+ * chunk of memory as backing store. The GEM object is then added to the list
+ * of object associated with the given file and a handle to it is returned.
+ *
+ * Returns:
+ * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
+ */
+static struct drm_gem_cma_object *
+drm_gem_cma_create_with_handle(struct drm_file *file_priv,
+                              struct drm_device *drm, size_t size,
+                              uint32_t *handle)
+{
+       struct drm_gem_cma_object *cma_obj;
+       struct drm_gem_object *gem_obj;
+       int ret;
+
+       cma_obj = drm_gem_cma_create(drm, size);
+       if (IS_ERR(cma_obj))
+               return cma_obj;
+
+       gem_obj = &cma_obj->base;
+
+       /*
+        * allocate a id of idr table where the obj is registered
+        * and handle has the id what user can see.
+        */
+       ret = drm_gem_handle_create(file_priv, gem_obj, handle);
+       /* drop reference from allocate - handle holds it now. */
+       drm_gem_object_unreference_unlocked(gem_obj);
+       if (ret)
+               return ERR_PTR(ret);
+
+       return cma_obj;
+}
+
+/**
+ * drm_gem_cma_free_object - free resources associated with a CMA GEM object
+ * @gem_obj: GEM object to free
+ *
+ * This function frees the backing memory of the CMA GEM object, cleans up the
+ * GEM object state and frees the memory used to store the object itself.
+ * Drivers using the CMA helpers should set this as their DRM driver's
+ * ->gem_free_object() callback.
+ */
+void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
+{
+       struct drm_gem_cma_object *cma_obj;
+
+       cma_obj = to_drm_gem_cma_obj(gem_obj);
+
+       if (cma_obj->vaddr) {
+               dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
+                           cma_obj->vaddr, cma_obj->paddr);
+       } else if (gem_obj->import_attach) {
+               drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
+       }
+
+       drm_gem_object_release(gem_obj);
+
+       kfree(cma_obj);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
+
+/**
+ * drm_gem_cma_dumb_create_internal - create a dumb buffer object
+ * @file_priv: DRM file-private structure to create the dumb buffer for
+ * @drm: DRM device
+ * @args: IOCTL data
+ *
+ * This aligns the pitch and size arguments to the minimum required. This is
+ * an internal helper that can be wrapped by a driver to account for hardware
+ * with more specific alignment requirements. It should not be used directly
+ * as the ->dumb_create() callback in a DRM driver.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
+                                    struct drm_device *drm,
+                                    struct drm_mode_create_dumb *args)
+{
+       unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+       struct drm_gem_cma_object *cma_obj;
+
+       if (args->pitch < min_pitch)
+               args->pitch = min_pitch;
+
+       if (args->size < args->pitch * args->height)
+               args->size = args->pitch * args->height;
+
+       cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
+                                                &args->handle);
+       return PTR_ERR_OR_ZERO(cma_obj);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
+
+/**
+ * drm_gem_cma_dumb_create - create a dumb buffer object
+ * @file_priv: DRM file-private structure to create the dumb buffer for
+ * @drm: DRM device
+ * @args: IOCTL data
+ *
+ * This function computes the pitch of the dumb buffer and rounds it up to an
+ * integer number of bytes per pixel. Drivers for hardware that doesn't have
+ * any additional restrictions on the pitch can directly use this function as
+ * their ->dumb_create() callback.
+ *
+ * For hardware with additional restrictions, drivers can adjust the fields
+ * set up by userspace and pass the IOCTL data along to the
+ * drm_gem_cma_dumb_create_internal() function.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_cma_dumb_create(struct drm_file *file_priv,
+                           struct drm_device *drm,
+                           struct drm_mode_create_dumb *args)
+{
+       struct drm_gem_cma_object *cma_obj;
+
+       args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+       args->size = args->pitch * args->height;
+
+       cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
+                                                &args->handle);
+       return PTR_ERR_OR_ZERO(cma_obj);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
+
+/**
+ * drm_gem_cma_dumb_map_offset - return the fake mmap offset for a CMA GEM
+ *     object
+ * @file_priv: DRM file-private structure containing the GEM object
+ * @drm: DRM device
+ * @handle: GEM object handle
+ * @offset: return location for the fake mmap offset
+ *
+ * This function look up an object by its handle and returns the fake mmap
+ * offset associated with it. Drivers using the CMA helpers should set this
+ * as their DRM driver's ->dumb_map_offset() callback.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
+                               struct drm_device *drm, u32 handle,
+                               u64 *offset)
+{
+       struct drm_gem_object *gem_obj;
+
+       gem_obj = drm_gem_object_lookup(file_priv, handle);
+       if (!gem_obj) {
+               dev_err(drm->dev, "failed to lookup GEM object\n");
+               return -EINVAL;
+       }
+
+       *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+
+       drm_gem_object_unreference_unlocked(gem_obj);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
+
+const struct vm_operations_struct drm_gem_cma_vm_ops = {
+       .open = drm_gem_vm_open,
+       .close = drm_gem_vm_close,
+};
+EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
+
+static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
+                               struct vm_area_struct *vma)
+{
+       /*
+        * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
+        * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
+        * the whole buffer.
+        */
+       vma->vm_flags &= ~VM_PFNMAP;
+       vma->vm_pgoff = 0;
+
+       return vm_iomap_memory(vma, vma->vm_start,
+                              (vma->vm_end - vma->vm_start));
+}
+
+/**
+ * drm_gem_cma_mmap - memory-map a CMA GEM object
+ * @filp: file object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function implements an augmented version of the GEM DRM file mmap
+ * operation for CMA objects: In addition to the usual GEM VMA setup it
+ * immediately faults in the entire object instead of using on-demaind
+ * faulting. Drivers which employ the CMA helpers should use this function
+ * as their ->mmap() handler in the DRM device file's file_operations
+ * structure.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_gem_cma_object *cma_obj;
+       struct drm_gem_object *gem_obj;
+       int ret;
+
+       ret = drm_gem_mmap(filp, vma);
+       if (ret)
+               return ret;
+
+       gem_obj = vma->vm_private_data;
+       cma_obj = to_drm_gem_cma_obj(gem_obj);
+
+       return drm_gem_cma_mmap_obj(cma_obj, vma);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
+
+unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
+                                           unsigned long addr,
+                                           unsigned long len,
+                                           unsigned long pgoff,
+                                           unsigned long flags)
+{
+       struct drm_gem_cma_object *cma_obj;
+       struct drm_gem_object *obj = NULL;
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_vma_offset_node *node;
+
+       if (drm_device_is_unplugged(dev))
+               return -ENODEV;
+
+       drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+       node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+                                                 pgoff,
+                                                 len >> PAGE_SHIFT);
+       if (likely(node)) {
+               obj = container_of(node, struct drm_gem_object, vma_node);
+               /*
+                * When the object is being freed, after it hits 0-refcnt it
+                * proceeds to tear down the object. In the process it will
+                * attempt to remove the VMA offset and so acquire this
+                * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
+                * that matches our range, we know it is in the process of being
+                * destroyed and will be freed as soon as we release the lock -
+                * so we have to check for the 0-refcnted object and treat it as
+                * invalid.
+                */
+               if (!kref_get_unless_zero(&obj->refcount))
+                       obj = NULL;
+       }
+
+       drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+
+       if (!obj)
+               return -EINVAL;
+
+       if (!drm_vma_node_is_allowed(node, priv)) {
+               drm_gem_object_unreference_unlocked(obj);
+               return -EACCES;
+       }
+
+       cma_obj = to_drm_gem_cma_obj(obj);
+
+       drm_gem_object_unreference_unlocked(obj);
+
+       return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * drm_gem_cma_describe - describe a CMA GEM object for debugfs
+ * @cma_obj: CMA GEM object
+ * @m: debugfs file handle
+ *
+ * This function can be used to dump a human-readable representation of the
+ * CMA GEM object into a synthetic file.
+ */
+void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
+                         struct seq_file *m)
+{
+       struct drm_gem_object *obj = &cma_obj->base;
+       uint64_t off;
+
+       off = drm_vma_node_start(&obj->vma_node);
+
+       seq_printf(m, "%2d (%2d) %08llx %pad %p %zu",
+                       obj->name, obj->refcount.refcount.counter,
+                       off, &cma_obj->paddr, cma_obj->vaddr, obj->size);
+
+       seq_printf(m, "\n");
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
+#endif
+
+/**
+ * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned
+ *     pages for a CMA GEM object
+ * @obj: GEM object
+ *
+ * This function exports a scatter/gather table suitable for PRIME usage by
+ * calling the standard DMA mapping API. Drivers using the CMA helpers should
+ * set this as their DRM driver's ->gem_prime_get_sg_table() callback.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or NULL on failure.
+ */
+struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
+{
+       struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+       struct sg_table *sgt;
+       int ret;
+
+       sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+       if (!sgt)
+               return NULL;
+
+       ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
+                             cma_obj->paddr, obj->size);
+       if (ret < 0)
+               goto out;
+
+       return sgt;
+
+out:
+       kfree(sgt);
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
+
+/**
+ * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
+ *     driver's scatter/gather table of pinned pages
+ * @dev: device to import into
+ * @attach: DMA-BUF attachment
+ * @sgt: scatter/gather table of pinned pages
+ *
+ * This function imports a scatter/gather table exported via DMA-BUF by
+ * another driver. Imported buffers must be physically contiguous in memory
+ * (i.e. the scatter/gather table must contain a single entry). Drivers that
+ * use the CMA helpers should set this as their DRM driver's
+ * ->gem_prime_import_sg_table() callback.
+ *
+ * Returns:
+ * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_object *
+drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
+                                 struct dma_buf_attachment *attach,
+                                 struct sg_table *sgt)
+{
+       struct drm_gem_cma_object *cma_obj;
+
+       if (sgt->nents != 1)
+               return ERR_PTR(-EINVAL);
+
+       /* Create a CMA GEM buffer. */
+       cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
+       if (IS_ERR(cma_obj))
+               return ERR_CAST(cma_obj);
+
+       cma_obj->paddr = sg_dma_address(sgt->sgl);
+       cma_obj->sgt = sgt;
+
+       DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, 
attach->dmabuf->size);
+
+       return &cma_obj->base;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
+
+/**
+ * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
+ * @obj: GEM object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function maps a buffer imported via DRM PRIME into a userspace
+ * process's address space. Drivers that use the CMA helpers should set this
+ * as their DRM driver's ->gem_prime_mmap() callback.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
+                          struct vm_area_struct *vma)
+{
+       struct drm_gem_cma_object *cma_obj;
+       int ret;
+
+       ret = drm_gem_mmap_obj(obj, obj->size, vma);
+       if (ret < 0)
+               return ret;
+
+       cma_obj = to_drm_gem_cma_obj(obj);
+       return drm_gem_cma_mmap_obj(cma_obj, vma);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
+
+/**
+ * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
+ *     address space
+ * @obj: GEM object
+ *
+ * This function maps a buffer exported via DRM PRIME into the kernel's
+ * virtual address space. Since the CMA buffers are already mapped into the
+ * kernel virtual address space this simply returns the cached virtual
+ * address. Drivers using the CMA helpers should set this as their DRM
+ * driver's ->gem_prime_vmap() callback.
+ *
+ * Returns:
+ * The kernel virtual address of the CMA GEM object's backing store.
+ */
+void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
+{
+       struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+
+       return cma_obj->vaddr;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
+
+/**
+ * drm_gem_cma_prime_vunmap - unmap a CMA GEM object from the kernel's virtual
+ *     address space
+ * @obj: GEM object
+ * @vaddr: kernel virtual address where the CMA GEM object was mapped
+ *
+ * This function removes a buffer exported via DRM PRIME from the kernel's
+ * virtual address space. This is a no-op because CMA buffers cannot be
+ * unmapped from kernel space. Drivers using the CMA helpers should set this
+ * as their DRM driver's ->gem_prime_vunmap() callback.
+ */
+void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+       /* Nothing to do */
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index acd6af8..ddb2390 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -53,6 +53,14 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct 
drm_device *drm,

 extern const struct vm_operations_struct drm_gem_cma_vm_ops;

+#ifdef DRM_GEM_CMA_HELPER_NOMMU
+unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
+                                           unsigned long addr,
+                                           unsigned long len,
+                                           unsigned long pgoff,
+                                           unsigned long flags);
+#endif
+
 #ifdef CONFIG_DEBUG_FS
 void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m);
 #endif
-- 
1.9.1

Reply via email to