Probes vxd392 on Baytrail platform and create
a new drm device for hardware accelerated video
decoding.

Signed-off-by: Yao Cheng <yao.cheng at intel.com>
---
 drivers/gpu/drm/Kconfig            |    2 +
 drivers/gpu/drm/Makefile           |    1 +
 drivers/gpu/drm/ipvr/Kconfig       |   16 +
 drivers/gpu/drm/ipvr/Makefile      |   21 +
 drivers/gpu/drm/ipvr/ipvr_buffer.c | 1177 +++++++++++++++++++++++++++++++++
 drivers/gpu/drm/ipvr/ipvr_buffer.h |  184 ++++++
 drivers/gpu/drm/ipvr/ipvr_debug.c  |  263 ++++++++
 drivers/gpu/drm/ipvr/ipvr_debug.h  |   50 ++
 drivers/gpu/drm/ipvr/ipvr_drm.h    |  265 ++++++++
 drivers/gpu/drm/ipvr/ipvr_drv.c    |  776 ++++++++++++++++++++++
 drivers/gpu/drm/ipvr/ipvr_drv.h    |  464 +++++++++++++
 drivers/gpu/drm/ipvr/ipvr_exec.c   |  530 +++++++++++++++
 drivers/gpu/drm/ipvr/ipvr_exec.h   |   68 ++
 drivers/gpu/drm/ipvr/ipvr_fence.c  |  550 ++++++++++++++++
 drivers/gpu/drm/ipvr/ipvr_fence.h  |   68 ++
 drivers/gpu/drm/ipvr/ipvr_gem.c    |  248 +++++++
 drivers/gpu/drm/ipvr/ipvr_gem.h    |   66 ++
 drivers/gpu/drm/ipvr/ipvr_mmu.c    |  807 +++++++++++++++++++++++
 drivers/gpu/drm/ipvr/ipvr_mmu.h    |  135 ++++
 drivers/gpu/drm/ipvr/ipvr_trace.c  |   11 +
 drivers/gpu/drm/ipvr/ipvr_trace.h  |  296 +++++++++
 drivers/gpu/drm/ipvr/ved_cmd.c     | 1269 ++++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/ipvr/ved_cmd.h     |  104 +++
 drivers/gpu/drm/ipvr/ved_ec.c      |  584 +++++++++++++++++
 drivers/gpu/drm/ipvr/ved_ec.h      |  207 ++++++
 drivers/gpu/drm/ipvr/ved_fw.c      |  660 +++++++++++++++++++
 drivers/gpu/drm/ipvr/ved_fw.h      |   73 +++
 drivers/gpu/drm/ipvr/ved_init.c    |  829 +++++++++++++++++++++++
 drivers/gpu/drm/ipvr/ved_init.h    |   61 ++
 drivers/gpu/drm/ipvr/ved_msg.h     |  364 +++++++++++
 drivers/gpu/drm/ipvr/ved_pm.c      |  392 +++++++++++
 drivers/gpu/drm/ipvr/ved_pm.h      |   55 ++
 drivers/gpu/drm/ipvr/ved_reg.h     |  609 +++++++++++++++++
 33 files changed, 11205 insertions(+)
 create mode 100644 drivers/gpu/drm/ipvr/Kconfig
 create mode 100644 drivers/gpu/drm/ipvr/Makefile
 create mode 100644 drivers/gpu/drm/ipvr/ipvr_buffer.c
 create mode 100644 drivers/gpu/drm/ipvr/ipvr_buffer.h
 create mode 100644 drivers/gpu/drm/ipvr/ipvr_debug.c
 create mode 100644 drivers/gpu/drm/ipvr/ipvr_debug.h
 create mode 100644 drivers/gpu/drm/ipvr/ipvr_drm.h
 create mode 100644 drivers/gpu/drm/ipvr/ipvr_drv.c
 create mode 100644 drivers/gpu/drm/ipvr/ipvr_drv.h
 create mode 100644 drivers/gpu/drm/ipvr/ipvr_exec.c
 create mode 100644 drivers/gpu/drm/ipvr/ipvr_exec.h
 create mode 100644 drivers/gpu/drm/ipvr/ipvr_fence.c
 create mode 100644 drivers/gpu/drm/ipvr/ipvr_fence.h
 create mode 100644 drivers/gpu/drm/ipvr/ipvr_gem.c
 create mode 100644 drivers/gpu/drm/ipvr/ipvr_gem.h
 create mode 100644 drivers/gpu/drm/ipvr/ipvr_mmu.c
 create mode 100644 drivers/gpu/drm/ipvr/ipvr_mmu.h
 create mode 100644 drivers/gpu/drm/ipvr/ipvr_trace.c
 create mode 100644 drivers/gpu/drm/ipvr/ipvr_trace.h
 create mode 100644 drivers/gpu/drm/ipvr/ved_cmd.c
 create mode 100644 drivers/gpu/drm/ipvr/ved_cmd.h
 create mode 100644 drivers/gpu/drm/ipvr/ved_ec.c
 create mode 100644 drivers/gpu/drm/ipvr/ved_ec.h
 create mode 100644 drivers/gpu/drm/ipvr/ved_fw.c
 create mode 100644 drivers/gpu/drm/ipvr/ved_fw.h
 create mode 100644 drivers/gpu/drm/ipvr/ved_init.c
 create mode 100644 drivers/gpu/drm/ipvr/ved_init.h
 create mode 100644 drivers/gpu/drm/ipvr/ved_msg.h
 create mode 100644 drivers/gpu/drm/ipvr/ved_pm.c
 create mode 100644 drivers/gpu/drm/ipvr/ved_pm.h
 create mode 100644 drivers/gpu/drm/ipvr/ved_reg.h

diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index e3b4b0f..ad7585d 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -165,6 +165,8 @@ config DRM_SAVAGE
          Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
          chipset. If M is selected the module will be called savage.

+source "drivers/gpu/drm/ipvr/Kconfig"
+
 source "drivers/gpu/drm/exynos/Kconfig"

 source "drivers/gpu/drm/vmwgfx/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 9292a76..8ec4bda 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_DRM_RADEON)+= radeon/
 obj-$(CONFIG_DRM_MGA)  += mga/
 obj-$(CONFIG_DRM_I810) += i810/
 obj-$(CONFIG_DRM_I915)  += i915/
+obj-$(CONFIG_DRM_IPVR)  += ipvr/
 obj-$(CONFIG_DRM_MGAG200) += mgag200/
 obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
 obj-$(CONFIG_DRM_SIS)   += sis/
diff --git a/drivers/gpu/drm/ipvr/Kconfig b/drivers/gpu/drm/ipvr/Kconfig
new file mode 100644
index 0000000..6516590
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/Kconfig
@@ -0,0 +1,16 @@
+config DRM_IPVR
+       tristate "Baytrail VP8 video decode driver"
+       depends on DRM
+       depends on DRM_I915
+       select SHMEM
+       select TMPFS
+       default m
+       help
+         Choose this option if you want to enable hardware accelerated VP8 
decoding on Baytrail platform
+
+config DRM_IPVR_EC
+       bool "[EXPERIMENTAL] error concealment for VP8"
+       depends on DRM_IPVR
+       default y
+       help
+         Choose this option if you want to enable hardware error concealment 
for IPVR VP8
diff --git a/drivers/gpu/drm/ipvr/Makefile b/drivers/gpu/drm/ipvr/Makefile
new file mode 100644
index 0000000..e25a7d3
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/Makefile
@@ -0,0 +1,21 @@
+
+ccflags-y := -Iinclude/drm
+
+ipvr-y := \
+        ipvr_gem.o \
+        ipvr_drv.o \
+        ipvr_buffer.o \
+        ipvr_exec.o \
+        ipvr_fence.o \
+        ipvr_mmu.o \
+        ipvr_debug.o \
+        ipvr_trace.o \
+        ved_pm.o \
+        ved_ec.o \
+        ved_cmd.o \
+        ved_fw.o \
+        ved_init.o
+
+obj-$(CONFIG_DRM_IPVR) += ipvr.o
+
+CFLAGS_ipvr_trace.o := -I$(src)
diff --git a/drivers/gpu/drm/ipvr/ipvr_buffer.c 
b/drivers/gpu/drm/ipvr/ipvr_buffer.c
new file mode 100644
index 0000000..a2fe587
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ipvr_buffer.c
@@ -0,0 +1,1177 @@
+/**************************************************************************
+ * ipvr_buffer.c: IPVR buffer creation, destory, map etc
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *    Yao Cheng <yao.cheng at intel.com>
+ *
+ **************************************************************************/
+
+#include <drmP.h>
+#include "ipvr_buffer.h"
+#include "ipvr_trace.h"
+
+extern int fake_bo_debug;
+
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. ipvr_gem_object_get_pages() may be called
+ * multiple times before they are released by a single call to
+ * ipvr_gem_object_put_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+/* for fake debug, will allocate bo with alloc_page, instead of using sg  */
+int32_t ipvr_gem_object_get_pages(struct drm_ipvr_gem_object *obj)
+{
+       struct drm_ipvr_private *dev_priv = obj->base.dev->dev_private;
+       const struct drm_ipvr_gem_object_ops *ops = obj->ops;
+       int32_t ret, r;
+
+       if (obj->sg_table)
+               return 0;
+
+       BUG_ON(obj->pages_pin_count);
+
+       if (fake_bo_debug) {
+               gfp_t gfp_flags = GFP_USER | GFP_DMA32 | __GFP_IO;
+               struct page *p = NULL;
+               obj->fake_pages =
+                       drm_calloc_large(obj->base.size / 4096, sizeof(void*));
+               IPVR_DEBUG_GENERAL("fake call alloc_page to alloc pages.\n");
+               for (r = 0; r < obj->base.size / 4096; ++r) {
+                       p = alloc_page(gfp_flags);
+                       if (!p) {
+                               IPVR_ERROR("Unable to allocate page\n");
+                               return -ENOMEM;
+                       }
+                       obj->fake_pages[r] = p;
+                       switch (obj->cache_level) {
+                       case IPVR_CACHE_NONE:
+                               ret = set_pages_uc(p, 1);
+                               break;
+                       case IPVR_CACHE_WC:
+                               ret = set_memory_wc(
+                                       (unsigned long)page_address(p), 1);
+                               break;
+                       case IPVR_CACHE_WB:
+                               ret = set_pages_wb(p, 1);
+                               break;
+                       default:
+                               ret = -EINVAL;
+                               break;
+                       }
+                       if (ret) {
+                               IPVR_DEBUG_WARN("failed to set page cache.\n");
+                               return -ENOMEM;
+                       }
+               }
+       }
+       /* will call ipvr_gem_object_get_pages_mmu */
+       ret = ops->get_pages(obj);
+       if (ret) {
+               IPVR_ERROR("failed to call ops->get_pages\n");
+               return ret;
+       }
+
+       /* do we need lock here? */
+       list_add_tail(&obj->global_list, &dev_priv->ipvr_mm.unbound_list);
+       return 0;
+}
+
+int32_t ipvr_gem_object_put_pages(struct drm_ipvr_gem_object *obj)
+{
+       const struct drm_ipvr_gem_object_ops *ops = obj->ops;
+
+       if (obj->sg_table == NULL)
+               return 0;
+
+       if (obj->pages_pin_count)
+               return -EBUSY;
+
+       list_del(&obj->global_list);
+
+       ops->put_pages(obj);
+       obj->sg_table = NULL;
+
+       return 0;
+}
+
+static int32_t ipvr_gem_object_get_pages_sg(struct drm_ipvr_gem_object *obj)
+{
+       uint32_t page_count, i;
+       struct address_space *mapping;
+       struct sg_table *st;
+       struct scatterlist *sg;
+       struct sg_page_iter sg_iter;
+       struct page *page = NULL;
+       struct page **page_array = NULL;
+       int carray = 0;
+       unsigned long last_pfn = 0;     /* suppress gcc warning */
+       gfp_t gfp;
+       int32_t ret;
+
+       /* Assert that the object is not currently in any GPU domain. As it
+        * wasn't in the MMU, there shouldn't be any way it could have been in
+        * a GPU cache
+        */
+       BUG_ON(obj->base.read_domains & IPVR_GEM_DOMAIN_GPU);
+       BUG_ON(obj->base.write_domain & IPVR_GEM_DOMAIN_GPU);
+
+       st = kmalloc(sizeof(*st), GFP_KERNEL);
+       if (st == NULL)
+               return -ENOMEM;
+
+       page_count = obj->base.size / PAGE_SIZE;
+
+       if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
+               sg_free_table(st);
+               kfree(st);
+               return -ENOMEM;
+       }
+
+       page_array = kmalloc(page_count * sizeof(struct page *), GFP_KERNEL);
+       if (unlikely(!page_array)) {
+               sg_free_table(st);
+               kfree(st);
+               return -ENOMEM;
+       }
+       /* Get the list of pages out of our struct file.  They'll be pinned
+        * at this point until we release them.
+        *
+        * Fail silently without starting the shrinker
+        */
+       mapping = file_inode(obj->base.filp)->i_mapping;
+       gfp = mapping_gfp_mask(mapping);
+       /* todo: need set correct gfp */
+       gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
+       gfp |= GFP_DMA32 | __GFP_IO;
+       sg = st->sgl;
+       st->nents = 0;
+       for (i = 0; i < page_count; i++) {
+               page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+               if (IS_ERR(page)) {
+                       /* doesn't support shrink
+                        * since ipvr has 4GiB address space */
+                       goto err_pages;
+               }
+               page_array[carray++] = page;
+
+#ifdef CONFIG_SWIOTLB
+               if (swiotlb_nr_tbl()) {
+                       st->nents++;
+                       sg_set_page(sg, page, PAGE_SIZE, 0);
+                       sg = sg_next(sg);
+                       continue;
+               }
+#endif
+               if (!i || page_to_pfn(page) != last_pfn + 1) {
+                       if (i)
+                               sg = sg_next(sg);
+                       st->nents++;
+                       sg_set_page(sg, page, PAGE_SIZE, 0);
+               } else {
+                       sg->length += PAGE_SIZE;
+               }
+               last_pfn = page_to_pfn(page);
+       }
+#ifdef CONFIG_SWIOTLB
+       if (!swiotlb_nr_tbl())
+#endif
+               sg_mark_end(sg);
+       obj->sg_table = st;
+
+       switch (obj->cache_level) {
+       case IPVR_CACHE_NONE:
+               ret = set_pages_array_uc(page_array, carray);
+               break;
+       case IPVR_CACHE_WC:
+               ret = set_pages_array_wc(page_array, carray);
+               break;
+       case IPVR_CACHE_WB:
+               ret = set_pages_array_wb(page_array, carray);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+       if (ret) {
+               IPVR_DEBUG_WARN("failed to set page cache.\n");
+               goto err_pages;
+       }
+       kfree(page_array);
+       return 0;
+
+err_pages:
+       sg_mark_end(sg);
+       for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
+               page_cache_release(sg_page_iter_page(&sg_iter));
+       sg_free_table(st);
+       kfree(st);
+       if (page_array)
+               kfree(page_array);
+       return PTR_ERR(page);
+}
+
+static bool cpu_cache_is_coherent(struct drm_device *dev,
+                                       enum ipvr_cache_level level)
+{
+       /* todo: need check if cache snoop is enabled */
+       if (level == IPVR_CACHE_WB)
+               return false;
+       else
+               return true;
+}
+
+bool ipvr_gem_clflush_object(struct drm_ipvr_gem_object *obj, bool force)
+{
+       /* If we don't have a page list set up, then we're not pinned
+        * to GPU, and we can ignore the cache flush because it'll happen
+        * again at bind time.
+        */
+       if (obj->sg_table == NULL)
+               return false;
+
+       /* If the GPU is snooping the contents of the CPU cache,
+        * we do not need to manually clear the CPU cache lines.  However,
+        * the caches are only snooped when the render cache is
+        * flushed/invalidated.  As we always have to emit invalidations
+        * and flushes when moving into and out of the RENDER domain, correct
+        * snooping behaviour occurs naturally as the result of our domain
+        * tracking.
+        */
+       if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+               return false;
+
+       drm_clflush_sg(obj->sg_table);
+
+       return true;
+}
+
+/**
+ * Moves a single object to the CPU read, and possibly write domain.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+int32_t
+ipvr_gem_object_set_to_cpu_domain(struct drm_ipvr_gem_object *obj, bool write)
+{
+       uint32_t old_write_domain, old_read_domains;
+       int32_t ret;
+
+       if (obj->base.write_domain == IPVR_GEM_DOMAIN_CPU)
+               return 0;
+
+       ret = ipvr_fence_wait(obj->fence, true, false);
+       if (ret)
+               return ret;
+
+       old_write_domain = obj->base.write_domain;
+       old_read_domains = obj->base.read_domains;
+
+       /* Flush the CPU cache if it's still invalid. */
+       if ((obj->base.read_domains & IPVR_GEM_DOMAIN_CPU) == 0) {
+               ipvr_gem_clflush_object(obj, false);
+
+               obj->base.read_domains |= IPVR_GEM_DOMAIN_CPU;
+       }
+
+       /* It should now be out of any other write domains, and we can update
+        * the domain values for our changes.
+        */
+       BUG_ON((obj->base.write_domain & ~IPVR_GEM_DOMAIN_CPU) != 0);
+
+       /* If we're writing through the CPU, then the GPU read domains will
+        * need to be invalidated at next use.
+        */
+       if (write) {
+               obj->base.read_domains = IPVR_GEM_DOMAIN_CPU;
+               obj->base.write_domain = IPVR_GEM_DOMAIN_CPU;
+       }
+
+       return 0;
+}
+
+static void ipvr_gem_object_put_pages_sg(struct drm_ipvr_gem_object *obj)
+{
+       struct sg_page_iter sg_iter;
+       int32_t ret;
+
+       ret = ipvr_gem_object_set_to_cpu_domain(obj, true);
+       if (ret) {
+               /* In the event of a disaster, abandon all caches and
+                * hope for the best.
+                */
+               WARN_ON(ret != -EIO);
+               ipvr_gem_clflush_object(obj, true);
+               obj->base.read_domains = IPVR_GEM_DOMAIN_CPU;
+               obj->base.write_domain = IPVR_GEM_DOMAIN_CPU;
+       }
+
+       IPVR_DEBUG_GENERAL("start to free pages.\n");
+       for_each_sg_page(obj->sg_table->sgl,
+                       &sg_iter, obj->sg_table->nents, 0) {
+               struct page *page = sg_page_iter_page(&sg_iter);
+
+               if (obj->dirty)
+                       set_page_dirty(page);
+               /* need set back to wb before release page */
+               ret = set_pages_wb(page, 1);
+               if (ret)
+                       IPVR_ERROR("failed to set page as wb.\n");
+               page_cache_release(page);
+       }
+       obj->dirty = 0;
+
+       sg_free_table(obj->sg_table);
+       kfree(obj->sg_table);
+}
+
+static const struct drm_ipvr_gem_object_ops ipvr_gem_object_ops = {
+       .get_pages = ipvr_gem_object_get_pages_sg,
+       .put_pages = ipvr_gem_object_put_pages_sg,
+};
+
+/* All the new VM stuff */
+unsigned long ipvr_gem_obj_mmu_offset(struct drm_ipvr_gem_object *obj)
+{
+       struct drm_ipvr_private *dev_priv = obj->base.dev->dev_private;
+       struct ipvr_vma *vma;
+
+       struct ipvr_address_space *vm = &dev_priv->addr_space;
+
+       BUG_ON(list_empty(&obj->vma_list));
+       list_for_each_entry(vma, &obj->vma_list, vma_link) {
+               /* todo: for user ptr obj, need consider offset in page in 
future */
+               if (vma->vm == vm) {
+                       IPVR_DEBUG_GENERAL("node start is 0x%lx.\n",
+                                       vma->node.start);
+                       return vma->node.start;
+               }
+       }
+
+       IPVR_ERROR("failed to find vma corresponding to this bo.\n");
+       return IPVR_ERR_OFFSET(-EINVAL);
+}
+
+void *ipvr_gem_object_alloc(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       if (dev_priv == NULL)
+               IPVR_ERROR("error: dev_prive is NULL.\n");
+       return kmem_cache_alloc(dev_priv->ipvr_bo_slab, GFP_KERNEL | 
__GFP_ZERO);
+}
+
+void ipvr_gem_object_free(struct drm_ipvr_gem_object *obj)
+{
+       struct drm_ipvr_private *dev_priv = obj->base.dev->dev_private;
+       kmem_cache_free(dev_priv->ipvr_bo_slab, obj);
+}
+
+/* some bookkeeping */
+static void
+ipvr_gem_info_add_obj(struct drm_ipvr_private *dev_priv, size_t size)
+{
+       spin_lock(&dev_priv->ipvr_mm.object_stat_lock);
+       dev_priv->ipvr_mm.object_count++;
+       dev_priv->ipvr_mm.object_memory += size;
+       spin_unlock(&dev_priv->ipvr_mm.object_stat_lock);
+}
+
+static void
+ipvr_gem_info_remove_obj(struct drm_ipvr_private *dev_priv, size_t size)
+{
+       spin_lock(&dev_priv->ipvr_mm.object_stat_lock);
+       dev_priv->ipvr_mm.object_count--;
+       dev_priv->ipvr_mm.object_memory -= size;
+       spin_unlock(&dev_priv->ipvr_mm.object_stat_lock);
+}
+
+void ipvr_gem_object_init(struct drm_ipvr_gem_object *obj,
+                               const struct drm_ipvr_gem_object_ops *ops)
+{
+       INIT_LIST_HEAD(&obj->global_list);
+       INIT_LIST_HEAD(&obj->vma_list);
+
+       obj->ops = ops;
+
+       ipvr_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
+}
+
+struct drm_ipvr_gem_object *
+ipvr_gem_alloc_object(struct drm_device *dev, size_t size)
+{
+       struct drm_ipvr_gem_object *obj;
+       struct address_space *mapping;
+       gfp_t mask;
+
+       obj = ipvr_gem_object_alloc(dev);
+       if (obj == NULL)
+               return NULL;
+       memset(obj, 0, sizeof(*obj));
+
+       if (drm_gem_object_init(dev, &obj->base, size) != 0) {
+               ipvr_gem_object_free(obj);
+               return NULL;
+       }
+
+       init_waitqueue_head(&obj->event_queue);
+       atomic_set(&obj->cpu_writers, 0);
+       /* todo: need set correct mask */
+       mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
+
+       /* ipvr cannot relocate objects above 4GiB. */
+       mask &= ~__GFP_HIGHMEM;
+       mask |= __GFP_DMA32;
+
+       mapping = file_inode(obj->base.filp)->i_mapping;
+       mapping_set_gfp_mask(mapping, mask);
+
+       ipvr_gem_object_init(obj, &ipvr_gem_object_ops);
+
+       obj->base.write_domain = IPVR_GEM_DOMAIN_CPU;
+       obj->base.read_domains = IPVR_GEM_DOMAIN_CPU;
+
+       return obj;
+}
+
+int32_t ipvr_gem_mmu_bind_object(struct drm_ipvr_gem_object *obj)
+{
+       struct drm_ipvr_private *dev_priv = obj->base.dev->dev_private;
+       uint32_t ret, type = 0;
+       const unsigned long entry = ipvr_gem_obj_mmu_offset(obj);
+
+       if (IPVR_IS_ERR(entry)) {
+               return IPVR_OFFSET_ERR(entry);
+       }
+
+       IPVR_DEBUG_GENERAL("entry is 0x%lx, size is %zu, nents is %d.\n",
+                       entry, obj->base.size, obj->sg_table->nents);
+
+       /* todo: it is better also pass RO, WO info */
+       type = (obj->cache_level == IPVR_CACHE_WB) ?
+                                               IPVR_MMU_CACHED_MEMORY : 0;
+
+       /* should not use dev_priv->pf_pd */
+       if (fake_bo_debug) {
+               ret = ipvr_mmu_insert_pages(dev_priv->mmu->default_pd,
+                       obj->fake_pages,
+                       entry, obj->base.size >> PAGE_SHIFT,
+                       0,
+                       0, type);
+       } else {
+               uint32_t i = 0;
+               struct page **pages;
+               struct sg_page_iter sg_iter;
+               pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT,
+                               sizeof(*pages));
+               if (pages == NULL) {
+                       IPVR_ERROR("Failed to get space for pages\n");
+                       return -ENOMEM;
+               }
+               for_each_sg_page(obj->sg_table->sgl, &sg_iter,
+                               obj->sg_table->nents, 0)
+                       pages[i++] = sg_page_iter_page(&sg_iter);
+               ret = ipvr_mmu_insert_pages(dev_priv->mmu->default_pd,
+                       pages, entry, obj->base.size >> PAGE_SHIFT,
+                       0, 0, type);
+               if (pages)
+                       drm_free_large(pages);
+       }
+       return ret;
+}
+
+int32_t ipvr_gem_create(struct drm_file *file_priv, struct drm_device *dev,
+                       uint64_t size, uint32_t tiling, uint32_t cache_level,
+                       uint64_t *rsize_p, uint32_t *handle_p,
+                       uint64_t *offset_p)
+{
+       struct drm_ipvr_gem_object *obj;
+       int32_t ret;
+       uint32_t handle;
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       IPVR_DEBUG_GENERAL("create bo size is %lld, tiling is %d, "
+                       "cache level is %d.\n",
+                       size, tiling, cache_level);
+
+       size = roundup(size, PAGE_SIZE);
+       if (size == 0)
+               return -EINVAL;
+       *rsize_p = size;
+
+       /* Allocate the new object */
+       obj = ipvr_gem_alloc_object(dev, size);
+       if (obj == NULL)
+               return -ENOMEM;
+       obj->drv_name = "ipvr";
+       obj->fence = NULL;
+       obj->tiling = tiling;
+       obj->cache_level = cache_level;
+       ret = drm_gem_handle_create(file_priv, &obj->base, &handle);
+       /* drop reference from allocate - handle holds it now */
+       drm_gem_object_unreference_unlocked(&obj->base);
+       if (ret)
+               return ret;
+       *handle_p = handle;
+
+       /* force to get pages at allocation stage */
+       ret = ipvr_gem_object_bind_to_vm(obj, &dev_priv->addr_space, 4096);
+       if (ret) {
+               ipvr_gem_free_object(&obj->base);
+               return ret;
+       }
+
+       ret = ipvr_gem_mmu_bind_object(obj);
+       if (ret) {
+               ipvr_gem_free_object(&obj->base);
+               return ret;
+       }
+       *offset_p = ipvr_gem_obj_mmu_offset(obj);
+       if (IPVR_IS_ERR(*offset_p))
+               return IPVR_OFFSET_ERR(*offset_p);
+
+       IPVR_DEBUG_GENERAL("bo create done, gpu offset: 0x%llx.\n", *offset_p);
+
+       trace_ipvr_gem_create(obj, *offset_p);
+
+       return 0;
+}
+
+struct drm_ipvr_gem_object *
+ipvr_gem_obj_create_and_bind(struct drm_device *dev, size_t size)
+{
+       uint32_t ret;
+       struct drm_ipvr_gem_object *obj;
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+
+       size = roundup(size, PAGE_SIZE);
+       if (size == 0) {
+               IPVR_ERROR("Passed size is 0, not correct.\n");
+               return NULL;
+       }
+
+       /* Allocate the new object */
+       obj = ipvr_gem_alloc_object(dev, size);
+       if (obj == NULL) {
+               IPVR_ERROR("Failed to allocate ipvr object.\n");
+               return NULL;
+       }
+
+       ret = ipvr_gem_object_bind_to_vm(obj, &dev_priv->addr_space, 4096);
+       if (ret) {
+               IPVR_ERROR("Failed to bind obj to vm.\n");
+               ipvr_gem_free_object(&obj->base);
+               return NULL;
+       }
+       ret = ipvr_gem_mmu_bind_object(obj);
+       if (ret) {
+               IPVR_ERROR("Failed to bind obj to mmu.\n");
+               ipvr_gem_free_object(&obj->base);
+               return NULL;
+       }
+
+       return obj;
+}
+
+struct ipvr_vma *ipvr_gem_obj_to_vma(struct drm_ipvr_gem_object *obj,
+                                       struct ipvr_address_space *vm)
+{
+       struct ipvr_vma *vma;
+       list_for_each_entry(vma, &obj->vma_list, vma_link)
+               if (vma->vm == vm)
+                       return vma;
+
+       return NULL;
+}
+
+struct ipvr_vma *ipvr_gem_vma_create(struct drm_ipvr_gem_object *obj,
+                                       struct ipvr_address_space *vm)
+{
+       struct ipvr_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+       if (vma == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       INIT_LIST_HEAD(&vma->vma_link);
+       INIT_LIST_HEAD(&vma->mm_list);
+       vma->vm = vm;
+       vma->obj = obj;
+
+       list_add_tail(&vma->vma_link, &obj->vma_list);
+
+       return vma;
+}
+
+static void ipvr_gem_vma_destroy(struct ipvr_vma *vma)
+{
+       WARN_ON(vma->node.allocated);
+       list_del(&vma->vma_link);
+       kfree(vma);
+}
+
+struct ipvr_vma *
+ipvr_gem_obj_lookup_or_create_vma(struct drm_ipvr_gem_object *obj,
+                                               struct ipvr_address_space *vm)
+{
+       struct ipvr_vma *vma;
+
+       vma = ipvr_gem_obj_to_vma(obj, vm);
+       if (!vma)
+               vma = ipvr_gem_vma_create(obj, vm);
+
+       return vma;
+}
+
+void ipvr_gem_object_pin_pages(struct drm_ipvr_gem_object *obj)
+{
+       BUG_ON(obj->sg_table == NULL);
+       obj->pages_pin_count++;
+}
+
+void ipvr_gem_object_unpin_pages(struct drm_ipvr_gem_object *obj)
+{
+       BUG_ON(obj->pages_pin_count == 0);
+       obj->pages_pin_count--;
+}
+
+static bool ipvr_gem_valid_mmu_space(struct drm_device *dev,
+                                               struct drm_mm_node *mmu_space,
+                                               uint32_t cache_level)
+{
+       struct drm_mm_node *other;
+
+       if (!drm_mm_node_allocated(mmu_space))
+               return true;
+
+       if (list_empty(&mmu_space->node_list))
+               return true;
+
+       other = list_entry(mmu_space->node_list.prev,
+                       struct drm_mm_node, node_list);
+       if (other->allocated && !other->hole_follows &&
+                               other->color != cache_level)
+               return false;
+
+       other = list_entry(mmu_space->node_list.next,
+                       struct drm_mm_node, node_list);
+       if (other->allocated && !mmu_space->hole_follows &&
+                               other->color != cache_level)
+               return false;
+
+       return true;
+}
+
+/**
+ * Finds free space in the MMU aperture and binds the object there.
+ */
+int32_t ipvr_gem_object_bind_to_vm(struct drm_ipvr_gem_object *obj,
+                                       struct ipvr_address_space *vm,
+                                       uint32_t alignment)
+{
+       struct drm_device *dev = obj->base.dev;
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       struct ipvr_vma *vma;
+       unsigned long start, end, size;
+       int32_t ret;
+       struct drm_mm *mm;
+       if (obj->tiling) {
+               mm = &vm->tiling_mm;
+               start = vm->tiling_start;
+               end = vm->tiling_start + vm->tiling_total;
+       } else {
+               mm = &vm->linear_mm;
+               start = vm->linear_start;
+               end = vm->linear_start + vm->linear_total;
+       }
+       size = obj->base.size;
+
+       ret = ipvr_gem_object_get_pages(obj);
+       if (ret) {
+               IPVR_ERROR("failed to call ipvr_gem_object_get_pages.\n");
+               return ret;
+       }
+
+       ipvr_gem_object_pin_pages(obj);
+
+       vma = ipvr_gem_obj_lookup_or_create_vma(obj, vm);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               IPVR_ERROR("failed on ipvr_gem_obj_lookup_or_create_vma.\n");
+               goto err_unpin;
+       }
+
+       /* For now we only ever use 1 vma per object */
+       BUG_ON(!list_is_singular(&obj->vma_list));
+
+       IPVR_DEBUG_GENERAL("call drm_mm_insert_node_in_range_generic.\n");
+       ret = drm_mm_insert_node_in_range_generic(mm, &vma->node, size,
+                                               alignment, obj->cache_level,
+                                               start, end,
+                                               DRM_MM_SEARCH_DEFAULT,
+                                               DRM_MM_CREATE_DEFAULT);
+       if (ret) {
+               /*
+                * ipvr doesn't support shrink so far since it has 4GiB addr 
space
+                */
+               IPVR_ERROR("failed on drm_mm_insert_node_in_range_generic.\n");
+               goto err_free_vma;
+       }
+
+       if (unlikely(!ipvr_gem_valid_mmu_space(dev, &vma->node,
+                                       obj->cache_level))) {
+               ret = -EINVAL;
+               IPVR_ERROR("failed on ipvr_gem_valid_mmu_space.\n");
+               goto err_remove_node;
+       }
+
+       list_move_tail(&obj->global_list, &dev_priv->ipvr_mm.bound_list);
+       list_add_tail(&vma->mm_list, &vm->inactive_list);
+
+       /* ipvr_gem_verify_gtt can be added here for debug */
+       return 0;
+
+err_remove_node:
+       drm_mm_remove_node(&vma->node);
+err_free_vma:
+       ipvr_gem_vma_destroy(vma);
+err_unpin:
+       ipvr_gem_object_unpin_pages(obj);
+       return ret;
+}
+
+void *ipvr_gem_object_vmap(struct drm_ipvr_gem_object *obj)
+{
+       int32_t i;
+       void *addr = NULL;
+       struct sg_page_iter sg_iter;
+       struct page **pages;
+
+       if (fake_bo_debug) {
+               addr = vmap(obj->fake_pages, obj->base.size / 4096,
+                       VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+               if (addr == NULL) {
+                       IPVR_ERROR("Failed to vmap pages\n");
+                       return ERR_PTR(-ENOMEM);
+               }
+               return addr;
+       }
+
+       pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
+       if (pages == NULL) {
+               IPVR_ERROR("Failed to get space for pages\n");
+               goto finish;
+       }
+
+       i = 0;
+       for_each_sg_page(obj->sg_table->sgl, &sg_iter,
+                       obj->sg_table->nents, 0) {
+               pages[i] = sg_page_iter_page(&sg_iter);
+               i++;
+       }
+
+       addr = vmap(pages, i, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+       if (addr == NULL) {
+               IPVR_ERROR("Failed to vmap pages\n");
+               goto finish;
+       }
+
+finish:
+       if (pages)
+               drm_free_large(pages);
+       return addr;
+}
+
+int32_t ipvr_gem_object_finish_gpu(struct drm_ipvr_gem_object *obj)
+{
+       int32_t ret = 0;
+
+       if ((obj->base.read_domains & IPVR_GEM_DOMAIN_GPU) == 0)
+               return ret;
+
+       /* Ensure that we invalidate the GPU's caches and TLBs. */
+       obj->base.read_domains &= ~IPVR_GEM_DOMAIN_GPU;
+       return ret;
+}
+
+/**
+ * ipvr_gem_release_mmap - remove physical page mappings
+ * @obj: obj in question
+ *
+ * If the object has been moved out of the aperture,
+ * then pages mapped into userspace must be revoked. Removing the
+ * mapping will then trigger a page fault on the next user access, allowing
+ * fixup by ipvr_gem_fault().
+ */
+void ipvr_gem_release_mmap(struct drm_ipvr_gem_object *obj)
+{
+       /* todo:
+        * will not call drm_gem_create_mmap_offset,
+        * ipvr dont' need use mmap for dev map
+        */
+       drm_vma_node_unmap(&obj->base.vma_node,
+                          obj->base.dev->anon_inode->i_mapping);
+
+       /* todo:
+        * it seems it need cause kernel panic, disable vm_munmap as workaround
+        */
+       if (obj->mmap_base) {
+               IPVR_DEBUG_GENERAL("call vm_munmap to unmap bo, base is 
0x%lx.\n",
+                               obj->mmap_base);
+               obj->mmap_base = 0;
+       }
+}
+
+int32_t ipvr_gem_object_vma_unbind(struct ipvr_vma *vma)
+{
+       struct drm_ipvr_gem_object *obj = vma->obj;
+       drm_ipvr_private_t *dev_priv = obj->base.dev->dev_private;
+       unsigned long entry;
+       int32_t ret;
+
+       if (list_empty(&vma->vma_link))
+               return 0;
+
+       if (!drm_mm_node_allocated(&vma->node))
+               goto destroy;
+
+       if (obj->pin_count)
+               return -EBUSY;
+
+       BUG_ON(obj->sg_table == NULL);
+       entry = ipvr_gem_obj_mmu_offset(obj);
+       if (IPVR_IS_ERR(entry))
+               return IPVR_OFFSET_ERR(entry);
+
+       ret = ipvr_gem_object_finish_gpu(obj);
+       if (ret)
+               return ret;
+
+       /* Continue on if we fail due to EIO, the GPU is hung so we
+        * should be safe and we need to cleanup or else we might
+        * cause memory corruption through use-after-free.
+        */
+       ipvr_gem_release_mmap(obj);
+
+       /* remove the bo from the mmu */
+       ipvr_mmu_remove_pages(dev_priv->mmu->default_pd, entry,
+                       obj->base.size >> PAGE_SHIFT, 0, 0);
+
+       ipvr_gem_object_unpin_pages(obj);
+
+       list_del(&vma->mm_list);
+
+       drm_mm_remove_node(&vma->node);
+
+destroy:
+       ipvr_gem_vma_destroy(vma);
+
+       /* Since the unbound list is global, only move to that list if
+        * no more VMAs exist.
+        * NB: Until we have real VMAs there will only ever be one */
+       WARN_ON(!list_empty(&obj->vma_list));
+       if (list_empty(&obj->vma_list))
+               list_move_tail(&obj->global_list,
+                       &dev_priv->ipvr_mm.unbound_list);
+
+       return 0;
+}
+
+static void ipvr_gem_object_free_mmap_offset(struct drm_ipvr_gem_object *obj)
+{
+       drm_gem_free_mmap_offset(&obj->base);
+}
+
+/**
+ * Called after the last reference to the object has been lost.
+ * Must be called holding struct_ mutex
+ *
+ * Frees the object
+ */
+static void ipvr__gem_free_object(struct kref *kref)
+{
+       struct drm_gem_object *gem_obj = (struct drm_gem_object *)kref;
+       struct drm_device *dev = gem_obj->dev;
+       struct drm_ipvr_gem_object *obj = to_ipvr_bo(gem_obj);
+       drm_ipvr_private_t *dev_priv = dev->dev_private;
+       struct ipvr_vma *vma, *next;
+       int32_t ret;
+
+       // FIXME: consider unlocked case
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+       if (obj->fence) {
+               ret = ipvr_fence_wait(obj->fence, true, false);
+               if (ret)
+                       IPVR_DEBUG_WARN("Failed to wait fence signaled.\n");
+       }
+
+       obj->pin_count = 0;
+       /* NB: 0 or 1 elements */
+       WARN_ON(!list_empty(&obj->vma_list) &&
+               !list_is_singular(&obj->vma_list));
+       list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+               int ret = ipvr_gem_object_vma_unbind(vma);
+               if (WARN_ON(ret == -ERESTARTSYS)) {
+                       bool was_interruptible;
+
+                       was_interruptible = dev_priv->ipvr_mm.interruptible;
+                       dev_priv->ipvr_mm.interruptible = false;
+
+                       WARN_ON(ipvr_gem_object_vma_unbind(vma));
+
+                       dev_priv->ipvr_mm.interruptible = was_interruptible;
+               }
+       }
+
+       if (WARN_ON(obj->pages_pin_count))
+               obj->pages_pin_count = 0;
+       ipvr_gem_object_put_pages(obj);
+       ipvr_gem_object_free_mmap_offset(obj);
+
+       BUG_ON(obj->sg_table);
+
+       if (obj->base.import_attach)
+               drm_prime_gem_destroy(&obj->base, NULL);
+
+       if (obj->ops->release)
+               obj->ops->release(obj);
+
+       drm_gem_object_release(&obj->base);
+       ipvr_gem_info_remove_obj(dev_priv, obj->base.size);
+
+       trace_ipvr__gem_free_object(obj);
+
+       ipvr_gem_object_free(obj);
+}
+
+/*
+ * When the last reference to a GEM object is released the GEM core calls the
+ * drm_driver .gem_free_object() operation. That operation is mandatory for
+ * GEM-enabled drivers and must free the GEM object and all associated
+ * resources.
+*/
+void ipvr_gem_free_object(struct drm_gem_object *gem_obj)
+{
+       ipvr__gem_free_object((struct kref *)gem_obj);
+}
+
+static struct
+ipvr_gem_userptr_object *to_userptr_object(struct drm_ipvr_gem_object *obj)
+{
+       return container_of(obj, struct ipvr_gem_userptr_object, gem);
+}
+
+static int32_t ipvr_gem_userptr_get_pages_sg(struct drm_ipvr_gem_object *obj) {
+       struct ipvr_gem_userptr_object *vmap = to_userptr_object(obj);
+       int32_t num_pages = obj->base.size >> PAGE_SHIFT;
+       struct sg_table *st;
+       struct scatterlist *sg;
+       struct page **pvec;
+       int32_t n, pinned, ret;
+
+       if (!access_ok(vmap->read_only ? VERIFY_READ : VERIFY_WRITE,
+                      (char __user *)vmap->user_ptr, vmap->user_size)) {
+               IPVR_ERROR("access_ok check failed.\n");
+               return -EFAULT;
+       }
+
+       /* If userspace should engineer that these pages are replaced in
+        * the vma between us binding this page into the GTT and completion
+        * of rendering... Their loss. If they change the mapping of their
+        * pages they need to create a new bo to point to the new vma.
+        *
+        * However, that still leaves open the possibility of the vma
+        * being copied upon fork. Which falls under the same userspace
+        * synchronisation issue as a regular bo, except that this time
+        * the process may not be expecting that a particular piece of
+        * memory is tied to the GPU.
+        *
+        * Fortunately, we can hook into the mmu_notifier in order to
+        * discard the page references prior to anything nasty happening
+        * to the vma (discard or cloning) which should prevent the more
+        * egregious cases from causing harm.
+        */
+
+       pvec = kmalloc(num_pages * sizeof(struct page *),
+                      GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+       if (pvec == NULL) {
+               pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
+               if (pvec == NULL) {
+                       IPVR_ERROR("pvec alloc failure\n");
+                       return -ENOMEM;
+               }
+       }
+
+       pinned = __get_user_pages_fast(vmap->user_ptr, num_pages,
+                               !vmap->read_only, pvec);
+       if (pinned < num_pages) {
+               struct mm_struct *mm = current->mm;
+               ret = 0;
+               down_read(&mm->mmap_sem);
+               ret = get_user_pages(current, mm,
+                               vmap->user_ptr + (pinned << PAGE_SHIFT),
+                               num_pages - pinned,
+                               !vmap->read_only, 0,
+                               pvec + pinned,
+                               NULL);
+               up_read(&mm->mmap_sem);
+               if (ret > 0)
+                       pinned += ret;
+
+               if (obj->sg_table || pinned < num_pages) {
+                       ret = obj->sg_table ? 0 : -EFAULT;
+                       IPVR_ERROR("obj->sg_table is NULL\n");
+                       goto cleanup_pinned;
+               }
+       }
+
+       st = kmalloc(sizeof(*st), GFP_KERNEL);
+       if (st == NULL) {
+               ret = -ENOMEM;
+               goto cleanup_pinned;
+       }
+
+       if (sg_alloc_table(st, num_pages, GFP_KERNEL)) {
+               ret = -ENOMEM;
+               goto cleanup_st;
+       }
+
+       for_each_sg(st->sgl, sg, num_pages, n)
+               sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
+       drm_free_large(pvec);
+
+       obj->sg_table = st;
+       return 0;
+
+cleanup_st:
+       kfree(st);
+cleanup_pinned:
+       release_pages(pvec, pinned, 0);
+       drm_free_large(pvec);
+       return ret;
+}
+
+static void ipvr_gem_userptr_put_pages_sg(struct drm_ipvr_gem_object *obj) {
+       struct scatterlist *sg;
+       int32_t i;
+
+       for_each_sg(obj->sg_table->sgl, sg, obj->sg_table->nents, i) {
+               struct page *page = sg_page(sg);
+
+               if (obj->dirty)
+                       set_page_dirty(page);
+
+               mark_page_accessed(page);
+               page_cache_release(page);
+       }
+       obj->dirty = 0;
+
+       sg_free_table(obj->sg_table);
+       kfree(obj->sg_table);
+}
+
+static const struct drm_ipvr_gem_object_ops ipvr_gem_userptr_ops = {
+       .get_pages = ipvr_gem_userptr_get_pages_sg,
+       .put_pages = ipvr_gem_userptr_put_pages_sg,
+};
+
+int32_t ipvr_gem_userptr(struct drm_file *file_priv, struct drm_device *dev,
+                       uint64_t user_ptr, uint64_t user_size,
+                       uint32_t cache_level, uint32_t tiling, uint32_t 
*handle_p,
+                       uint64_t *offset_p)
+{
+       struct ipvr_gem_userptr_object *obj;
+       int32_t ret = 0;
+       uint32_t handle, num_pages;
+       loff_t first_data_page, last_data_page;
+
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       IPVR_DEBUG_GENERAL("create user bo userptr is %llx, size is %lld, "
+                       "cache level is %d.\n",
+                       user_ptr, user_size, cache_level);
+
+       first_data_page = user_ptr / PAGE_SIZE;
+       last_data_page = (user_ptr + user_size - 1) / PAGE_SIZE;
+       num_pages = last_data_page - first_data_page + 1;
+
+       /* only support page aligned buffer for now */
+       if (offset_in_page(user_ptr) != 0 || (user_size & (PAGE_SIZE - 1)) != 0)
+               return -EINVAL;
+
+       /* Allocate the new object */
+       obj = ipvr_gem_object_alloc(dev);
+       if (obj == NULL)
+               return -ENOMEM;
+       memset(obj, 0, sizeof(*obj));
+
+       drm_gem_private_object_init(dev, &obj->gem.base,
+                                       num_pages * PAGE_SIZE);
+
+       ipvr_gem_object_init(&obj->gem, &ipvr_gem_userptr_ops);
+
+       init_waitqueue_head(&obj->gem.event_queue);
+       atomic_set(&obj->gem.cpu_writers, 0);
+
+       obj->gem.drv_name = "ipvr";
+       obj->gem.fence = NULL;
+       obj->gem.cache_level = cache_level;
+       obj->gem.tiling = tiling;
+
+       obj->user_ptr = user_ptr;
+       obj->user_size = user_size;
+
+       ret = drm_gem_handle_create(file_priv, &obj->gem.base, &handle);
+       /* drop reference from allocate - handle holds it now */
+       drm_gem_object_unreference_unlocked(&obj->gem.base);
+       if (ret) {
+               IPVR_ERROR("failed drm_gem_handle_create for obj 0x%lx\n",
+                       (unsigned long)obj);
+               goto err;
+       }
+
+       *handle_p = handle;
+
+       /* force to get pages at allocation stage */
+       ret = ipvr_gem_object_bind_to_vm(&obj->gem,
+                                       &dev_priv->addr_space, 4096);
+       if (ret) {
+               IPVR_ERROR("failed ipvr_gem_object_bind_to_vm for obj 0x%lx\n",
+                       (unsigned long)obj);
+               goto err;
+       }
+
+       ret = ipvr_gem_mmu_bind_object(&obj->gem);
+       if (ret) {
+               IPVR_ERROR("failed ipvr_gem_mmu_bind_object for obj 0x%lx\n",
+                       (unsigned long)obj);
+               goto err;
+       }
+
+       *offset_p = ipvr_gem_obj_mmu_offset(&obj->gem) + 
offset_in_page(user_ptr);
+       if (IPVR_IS_ERR(*offset_p)) {
+               ret = IPVR_OFFSET_ERR(*offset_p);
+               goto err;
+       }
+
+       IPVR_DEBUG_GENERAL("bo create done, gpu offset: 0x%llx.\n", *offset_p);
+
+err:
+       if (ret)
+               ipvr_gem_object_free(&obj->gem);
+       return ret;
+}
diff --git a/drivers/gpu/drm/ipvr/ipvr_buffer.h 
b/drivers/gpu/drm/ipvr/ipvr_buffer.h
new file mode 100644
index 0000000..245ff8c
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ipvr_buffer.h
@@ -0,0 +1,184 @@
+/**************************************************************************
+ * ipvr_buffer.h: IPVR buffer handling header file
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *    Yao Cheng <yao.cheng at intel.com>
+ *
+ **************************************************************************/
+
+
+#ifndef _IPVR_BUFFER_H_
+#define _IPVR_BUFFER_H_
+
+#include <drmP.h>
+#include <drm_gem.h>
+#include <linux/shmem_fs.h>
+#include "ipvr_drv.h"
+#include "ipvr_drm.h"
+#include "ipvr_fence.h"
+
+struct ipvr_fence;
+
+struct drm_ipvr_gem_object {
+       struct drm_gem_object base;
+
+       /* used to disinguish between i915 and ipvr */
+       char *drv_name;
+
+       const struct drm_ipvr_gem_object_ops *ops;
+
+       /** List of VMAs backed by this object */
+       struct list_head vma_list;
+
+       /* for ipvr case, buffer is bound into mmu, will not be unbound */
+       struct list_head global_list;
+
+       /**
+        * if this bo is a tiling buffer
+        * we can support multiple mode of tiling, which has different stride
+        * but only one tiling mode can be enabled.
+        */
+       bool tiling;
+
+       enum ipvr_cache_level cache_level;
+
+       /**
+        * This is set if the object is on the active lists (has pending
+        * rendering and so a non-zero seqno), and is not set if it is on
+        * inactive (ready to be unbound) list.
+        */
+       uint32_t active:1;
+
+       /**
+        * This is set if the object has been written to since last bound
+        * to the GTT
+        */
+       uint32_t dirty:1;
+
+       uint32_t pin_count:5;
+#define DRM_IPVR_GEM_OBJECT_MAX_PIN_COUNT 0x1f
+
+       struct sg_table *sg_table;
+       int32_t pages_pin_count;
+
+       /** User space pin count and filp owning the pin */
+       uint32_t user_pin_count;
+       struct drm_file *pin_filp;
+
+       struct ipvr_fence *fence;
+       atomic_t reserved;
+       wait_queue_head_t event_queue;
+       atomic_t cpu_writers;
+
+       unsigned long mmap_base;
+       uint64_t mmap_size;
+
+       struct page **fake_pages;
+};
+
+struct ipvr_gem_userptr_object {
+       struct drm_ipvr_gem_object gem;
+       uintptr_t user_ptr;
+       size_t user_size;
+       int32_t read_only;
+       struct mm_struct *mm;
+#if defined(CONFIG_MMU_NOTIFIER)
+       struct mmu_notifier mn;
+#endif
+};
+
+union drm_ipvr_gem_objects {
+       struct drm_ipvr_gem_object base;
+       struct ipvr_gem_userptr_object vmap;
+};
+
+struct drm_ipvr_gem_object_ops {
+       /*
+        * Interface between the GEM object and its backing storage.
+        * get_pages() is called once prior to the use of the associated set
+        * of pages before to binding them into the GTT, and put_pages() is
+        * called after we no longer need them. As we expect there to be
+        * associated cost with migrating pages between the backing storage
+        * and making them available for the GPU (e.g. clflush), we may hold
+        * onto the pages after they are no longer referenced by the GPU
+        * in case they may be used again shortly (for example migrating the
+        * pages to a different memory domain within the GTT). put_pages()
+        * will therefore most likely be called when the object itself is
+        * being released or under memory pressure (where we attempt to
+        * reap pages for the shrinker).
+        */
+       int (*get_pages)(struct drm_ipvr_gem_object *);
+       void (*put_pages)(struct drm_ipvr_gem_object *);
+       void (*release)(struct drm_ipvr_gem_object *);
+       bool (*is_userptr_obj)(void);
+};
+
+/*
+ * A VMA represents a GEM BO that is bound into an address space. Therefore, a
+ * VMA's presence cannot be guaranteed before binding, or after unbinding the
+ * object into/from the address space.
+ *
+ * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover 
us.
+ */
+struct ipvr_vma {
+       struct drm_mm_node node;
+       struct drm_ipvr_gem_object *obj;
+       struct ipvr_address_space *vm;
+
+       /* This object's place on the active/inactive lists */
+       struct list_head mm_list;
+
+       struct list_head vma_link; /* Link in the object's VMA list */
+};
+
+int32_t ipvr_gem_create(struct drm_file *file_priv, struct drm_device *dev,
+                       uint64_t size, uint32_t tiling, uint32_t cache_level,
+                       uint64_t *rsize_p, uint32_t *handle_p,
+                       uint64_t *offset_p);
+
+struct drm_ipvr_gem_object *
+ipvr_gem_obj_create_and_bind(struct drm_device *dev, size_t size);
+
+struct drm_ipvr_gem_object *
+ipvr_gem_alloc_object(struct drm_device *dev, size_t size);
+
+int32_t ipvr_gem_object_bind_to_vm(struct drm_ipvr_gem_object *obj,
+                       struct ipvr_address_space *vm,
+                       uint32_t alignment);
+
+unsigned long ipvr_gem_obj_mmu_offset(struct drm_ipvr_gem_object *obj);
+
+int32_t ipvr_gem_mmu_bind_object(struct drm_ipvr_gem_object *obj);
+
+void *ipvr_gem_object_vmap(struct drm_ipvr_gem_object *obj);
+
+int ipvr_gem_init_object(struct drm_gem_object *obj);
+
+void ipvr_gem_free_object(struct drm_gem_object *obj);
+
+bool ipvr_gem_clflush_object(struct drm_ipvr_gem_object *obj, bool force);
+
+int32_t ipvr_gem_userptr(struct drm_file *file_priv, struct drm_device *dev,
+                       uint64_t user_ptr, uint64_t user_size,
+                       uint32_t cache_level, uint32_t tiling,
+                       uint32_t *handle_p, uint64_t *offset_p);
+
+#endif
diff --git a/drivers/gpu/drm/ipvr/ipvr_debug.c 
b/drivers/gpu/drm/ipvr/ipvr_debug.c
new file mode 100644
index 0000000..eecf8de
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ipvr_debug.c
@@ -0,0 +1,263 @@
+/**************************************************************************
+ * ipvr_debug.c: IPVR debugfs support to assist bug triage
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *
+ **************************************************************************/
+
+#if defined(CONFIG_DEBUG_FS)
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include "ipvr_debug.h"
+#include "ipvr_drv.h"
+#include "ved_reg.h"
+
+union ipvr_debugfs_vars debugfs_vars;
+
+static int32_t ipvr_debug_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+
+       seq_printf(m, "ipvr platform revison id: 0x%x\n",
+               dev_priv->platform_rev_id);
+
+       return 0;
+}
+
+static int32_t ipvr_debug_gem_object_info(struct seq_file *m, void* data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       int32_t ret;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       seq_printf(m, "ipvr total allocate %u objects, %zu bytes\n",
+                  dev_priv->ipvr_mm.object_count,
+                  dev_priv->ipvr_mm.object_memory);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+static int32_t ipvr_debug_gem_seqno_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       drm_ipvr_private_t *dev_priv = dev->dev_private;
+       int32_t ret;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       seq_printf(m, "last signaled seq is %d, last emitted seq is %d\n",
+               atomic_read(&dev_priv->fence_drv.signaled_seq),
+               dev_priv->fence_drv.sync_seq);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+static ssize_t ipvr_debug_ved_reg_read(struct file *filp, char __user *ubuf,
+                                       size_t max, loff_t *ppos)
+{
+       struct drm_device *dev = filp->private_data;
+       drm_ipvr_private_t *dev_priv = dev->dev_private;
+       char buf[200], offset[20], operation[10], format[20], val[20];
+       int32_t len = 0, ret, no_of_tokens;
+       unsigned long reg_offset, reg_to_write;
+
+       if (debugfs_vars.reg.reg_input == 0)
+               return len;
+
+       snprintf(format, sizeof(format), "%%%zus %%%zus %%%zus",
+                       sizeof(operation), sizeof(offset), sizeof(val));
+
+       no_of_tokens = sscanf(debugfs_vars.reg.reg_vars,
+                                       format, operation, offset, val);
+
+       if (no_of_tokens < 3)
+               return len;
+
+       len = sizeof(debugfs_vars.reg.reg_vars);
+
+       if (strcmp(operation, IPVR_READ_TOKEN) == 0) {
+               ret = kstrtoul(offset, 16, &reg_offset);
+               if (ret)
+                       return -EINVAL;
+
+               len = scnprintf(buf, sizeof(buf), "0x%x: 0x%x\n",
+                       (uint32_t)reg_offset,
+                       VED_REG_READ32((uint32_t)reg_offset));
+       } else if (strcmp(operation, IPVR_WRITE_TOKEN) == 0) {
+               ret = kstrtoul(offset, 16, &reg_offset);
+               if (ret)
+                       return -EINVAL;
+
+               ret = kstrtoul(val, 16, &reg_to_write);
+               if (ret)
+                       return -EINVAL;
+
+               VED_REG_WRITE32(reg_offset, reg_to_write);
+               len = scnprintf(buf, sizeof(buf),
+                               "0x%x: 0x%x\n",
+                               (uint32_t)reg_offset,
+                               (uint32_t)VED_REG_READ32(reg_offset));
+       } else {
+               len = scnprintf(buf, sizeof(buf), "Operation Not Supported\n");
+       }
+
+       debugfs_vars.reg.reg_input = 0;
+
+       simple_read_from_buffer(ubuf, max, ppos, buf, len);
+
+       return len;
+}
+
+static ssize_t
+ipvr_debug_ved_reg_write(struct file *filp,const char __user *ubuf,
+                       size_t cnt, loff_t *ppos)
+{
+       /* reset the string */
+       memset(debugfs_vars.reg.reg_vars, 0, IPVR_MAX_BUFFER_STR_LEN);
+
+       if (cnt > 0) {
+               if (cnt > sizeof(debugfs_vars.reg.reg_vars) - 1)
+                       return -EINVAL;
+
+               if (copy_from_user(debugfs_vars.reg.reg_vars, ubuf, cnt))
+                       return -EFAULT;
+
+               debugfs_vars.reg.reg_vars[cnt] = 0;
+
+               /* Enable Read */
+               debugfs_vars.reg.reg_input = 1;
+       }
+
+       return cnt;
+}
+
+/* As the drm_debugfs_init() routines are called before dev->dev_private is
+ * allocated we need to hook into the minor for release. */
+static int32_t ipvr_add_fake_info_node(struct drm_minor *minor,
+                                       struct dentry *ent, const void *key)
+{
+       struct drm_info_node *node;
+
+       node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+       if (node == NULL) {
+               debugfs_remove(ent);
+               return -ENOMEM;
+       }
+
+       node->minor = minor;
+       node->dent = ent;
+       node->info_ent = (void *) key;
+
+       mutex_lock(&minor->debugfs_lock);
+       list_add(&node->list, &minor->debugfs_list);
+       mutex_unlock(&minor->debugfs_lock);
+
+       return 0;
+}
+
+static int32_t ipvr_debugfs_create(struct dentry *root,
+                              struct drm_minor *minor,
+                              const char *name,
+                              const struct file_operations *fops)
+{
+       struct drm_device *dev = minor->dev;
+       struct dentry *ent;
+
+       ent = debugfs_create_file(name,
+                                 S_IRUGO | S_IWUSR,
+                                 root, dev,
+                                 fops);
+       if (IS_ERR(ent))
+               return PTR_ERR(ent);
+
+       return ipvr_add_fake_info_node(minor, ent, fops);
+}
+
+static const struct file_operations ipvr_ved_reg_fops = {
+       .owner = THIS_MODULE,
+       .open = simple_open,
+       .read = ipvr_debug_ved_reg_read,
+       .write = ipvr_debug_ved_reg_write,
+       .llseek = default_llseek,
+};
+
+static struct drm_info_list ipvr_debugfs_list[] = {
+       {"ipvr_capabilities", ipvr_debug_info, 0},
+       {"ipvr_gem_objects", ipvr_debug_gem_object_info, 0},
+       {"ipvr_gem_seqno", ipvr_debug_gem_seqno_info, 0},
+
+};
+#define IPVR_DEBUGFS_ENTRIES ARRAY_SIZE(ipvr_debugfs_list)
+
+static struct ipvr_debugfs_files {
+       const char *name;
+       const struct file_operations *fops;
+} ipvr_debugfs_files[] = {
+       {"ipvr_ved_reg_api", &ipvr_ved_reg_fops},
+};
+
+int32_t ipvr_debugfs_init(struct drm_minor *minor)
+{
+       int32_t ret, i;
+
+       for (i = 0; i < ARRAY_SIZE(ipvr_debugfs_files); i++) {
+               ret = ipvr_debugfs_create(minor->debugfs_root, minor,
+                                  ipvr_debugfs_files[i].name,
+                                  ipvr_debugfs_files[i].fops);
+               if (ret)
+                       return ret;
+       }
+
+       return drm_debugfs_create_files(ipvr_debugfs_list,
+                                IPVR_DEBUGFS_ENTRIES,
+                                minor->debugfs_root, minor);
+}
+
+void ipvr_debugfs_cleanup(struct drm_minor *minor)
+{
+       int32_t i;
+
+       drm_debugfs_remove_files(ipvr_debugfs_list,
+                         IPVR_DEBUGFS_ENTRIES, minor);
+
+       for (i = 0; i < ARRAY_SIZE(ipvr_debugfs_files); i++) {
+               struct drm_info_list *info_list =
+                       (struct drm_info_list *)ipvr_debugfs_files[i].fops;
+
+               drm_debugfs_remove_files(info_list, 1, minor);
+       }
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/ipvr/ipvr_debug.h 
b/drivers/gpu/drm/ipvr/ipvr_debug.h
new file mode 100644
index 0000000..3cbeb73
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ipvr_debug.h
@@ -0,0 +1,50 @@
+/**************************************************************************
+ * ipvr_debug.h: IPVR debugfs support header file
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *
+ **************************************************************************/
+
+
+#ifndef _IPVR_DEBUG_H_
+#define _IPVR_DEBUG_H_
+
+#include "drmP.h"
+
+/* Operations supported */
+#define IPVR_MAX_BUFFER_STR_LEN                200
+
+#define IPVR_READ_TOKEN                        "READ"
+#define IPVR_WRITE_TOKEN               "WRITE"
+
+/* DebugFS Variable declaration */
+struct ipvr_debugfs_reg_vars {
+       char reg_vars[IPVR_MAX_BUFFER_STR_LEN];
+       uint32_t reg_input;
+};
+
+union ipvr_debugfs_vars {
+       struct ipvr_debugfs_reg_vars reg;
+};
+
+int32_t ipvr_debugfs_init(struct drm_minor *minor);
+void ipvr_debugfs_cleanup(struct drm_minor *minor);
+
+#endif
diff --git a/drivers/gpu/drm/ipvr/ipvr_drm.h b/drivers/gpu/drm/ipvr/ipvr_drm.h
new file mode 100644
index 0000000..1825d9f
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ipvr_drm.h
@@ -0,0 +1,265 @@
+/**************************************************************************
+ * ipvr_drm.h: IPVR header file exported to user space
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *    Yao Cheng <yao.cheng at intel.com>
+ *
+ **************************************************************************/
+
+
+/* this file only define structs and macro which need export to user space */
+#ifndef _IPVR_DRM_H_
+#define _IPVR_DRM_H_
+
+#include <drm/drm.h>
+struct drm_ipvr_context_create {
+       /* passed ctx_info, including codec, profile info */
+       uint32_t ctx_type;
+       /* returned back ctx_id */
+       uint32_t ctx_id;
+       /*
+        * following tiling strides for VED are supported:
+        * stride 0: 512 for scheme 0, 1024 for scheme 1
+        * stride 1: 1024 for scheme 0, 2048 for scheme 1
+        * stride 2: 2048 for scheme 0, 4096 for scheme 1
+        * stride 3: 4096 for scheme 0
+        */
+       uint32_t tiling_stride;
+       /*
+        * scheme 0: tile is 256x16, while minimal tile stride is 512
+        * scheme 1: tile is 512x8, while minimal tile stride is 1024
+        */
+       uint32_t tiling_scheme;
+};
+
+struct drm_ipvr_context_destroy {
+       uint32_t ctx_id;
+       uint32_t pad64;
+};
+
+enum drm_ipvr_misc_key {
+       IPVR_DEVICE_INFO,
+       IPVR_UPDATE_TILING,
+       IPVR_SET_DISPLAYING_FRAME,
+       IPVR_GET_DISPLAYING_FRAME,
+       IPVR_QUERY_ENTRY
+};
+
+/*
+ * different context maybe has different tiling stride,
+ * then tiling info need be bound with ctx
+ */
+struct drm_ipvr_update_tiling {
+       uint32_t ctx_id;
+       uint32_t tiling_stride;
+       uint32_t tiling_scheme;
+       uint32_t pad64;
+};
+
+/* Ioctl to set/get misc params:
+ */
+struct drm_ipvr_misc {
+       uint64_t key;
+       uint64_t arg;           /* argument pointer */
+       uint64_t value; /* feed back pointer */
+};
+
+struct ipvr_validate_arg {
+       /* point to next ipvr_validate_arg */
+       uint64_t next;
+       uint64_t presumed_gpu_offset;
+       /* User's handle for a buffer */
+       uint32_t handle;
+       uint32_t pad64;
+       /* fencing */
+       uint32_t skip_fence;
+       int32_t fence_fd;
+};
+
+#define MAX_SLICES_PER_PICTURE 72
+struct drm_ipvr_mb_region {
+       uint32_t start;
+       uint32_t end;
+};
+
+struct drm_ipvr_decode_status {
+       uint32_t num_region;
+       struct drm_ipvr_mb_region mb_regions[MAX_SLICES_PER_PICTURE];
+};
+
+struct drm_ipvr_gem_execbuffer {
+       /*
+        * List of ipvr_validate_arg structs
+        */
+       uint64_t buffer_list;
+       uint32_t buffer_count;
+
+       /* from user space point, this is msg buffer actually */
+       uint32_t cmdbuf_handle;
+       uint32_t cmdbuf_size;
+
+       uint32_t ctx_id;
+};
+
+enum ipvr_cache_level {
+       IPVR_CACHE_NONE = 0,    /* uncacheable */
+       IPVR_CACHE_WB,          /* write back cacheable */
+       IPVR_CACHE_WC,          /* write combine, uncacheable */
+       IPVR_CACHE_MAX,
+};
+
+struct drm_ipvr_gem_create {
+       /*
+        * Requested size for the object.
+        * The (page-aligned) allocated size for the object will be returned.
+        */
+       uint64_t size;
+       uint64_t rounded_size;
+       uint64_t gpu_offset;
+       /*
+        * Returned handle for the object.
+        * Object handles are nonzero.
+        */
+       uint32_t handle;
+       uint32_t tiling;
+       uint32_t cache_level;
+       uint32_t pad64;
+};
+
+struct drm_ipvr_gem_busy {
+       /* Handle of the buffer to check for busy */
+       uint32_t handle;
+
+       /*
+        * Return busy status (1 if busy, 0 if idle).
+        * The high word is used to indicate on which rings the object
+        * currently resides:
+        *  16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
+        */
+       uint32_t busy;
+};
+
+struct drm_ipvr_gem_mmap {
+       /* Handle for the object being mapped. */
+       uint32_t handle;
+       uint32_t pad64;
+       /** Offset in the object to map. */
+       uint64_t offset;
+       /*
+        * Length of data to map.
+        * The value will be page-aligned.
+        */
+       uint64_t size;
+       /*
+        * Returned pointer the data was mapped at.
+        * This is a fixed-size type for 32/64 compatibility.
+        */
+       uint64_t addr_ptr;
+};
+
+/*
+ * ACCESS mode flags for SYNCCPU.
+ *
+ * IPVR_SYNCCPU_MODE_READ will guarantee that the GPU is not
+ * writing to the buffer.
+ *
+ * IPVR_SYNCCPU_MODE_WRITE will guarantee that the GPU is not
+ * accessing the buffer.
+ *
+ * IPVR_SYNCCPU_MODE_NO_BLOCK makes sure the call does not wait
+ * for GPU accesses to finish but return -EBUSY.
+ *
+ * IPVR_SYNCCPU_MODE_TRYCACHED Try to place the buffer in cacheable
+ * memory while synchronized for CPU.
+ *
+ */
+
+#define IPVR_SYNCCPU_MODE_READ      (1 << 0)
+#define IPVR_SYNCCPU_MODE_WRITE     (1 << 1)
+#define IPVR_SYNCCPU_MODE_NO_BLOCK  (1 << 2)
+#define IPVR_SYNCCPU_MODE_TRYCACHED (1 << 3)
+
+struct drm_ipvr_sync_cpu {
+       /* Handle for the object */
+       uint32_t handle;
+       uint32_t access_mode;
+       enum {
+               IPVR_SYNCCPU_OP_GRAB,
+               IPVR_SYNCCPU_OP_RELEASE
+       } op;
+       uint32_t pad64;
+};
+
+struct drm_ipvr_gem_wait {
+       /* Handle of BO we shall wait on */
+       uint32_t handle;
+       uint32_t flags;
+};
+
+struct drm_ipvr_gem_userptr {
+       uint64_t user_ptr;
+       uint64_t user_size;
+       uint64_t gpu_offset;
+       uint32_t cache_level;
+    uint32_t tiling;
+       /*
+        * Returned handle for the object.
+        * Object handles are nonzero.
+        */
+       uint32_t handle;
+};
+
+/*
+ * IPVR GEM specific ioctls
+ * The device specific ioctl range is 0x50 to 0x5f.
+ */
+#define DRM_IPVR_CONTEXT_CREATE                0x00
+#define DRM_IPVR_CONTEXT_DESTROY       0x01
+#define DRM_IPVR_MISC                          0x02
+#define DRM_IPVR_GEM_EXECBUFFER                0x03
+#define DRM_IPVR_GEM_BUSY                      0x04
+#define DRM_IPVR_GEM_CREATE                    0x05
+#define DRM_IPVR_GEM_MMAP                      0x06
+#define DRM_IPVR_SYNC_CPU                      0x07
+#define DRM_IPVR_GEM_WAIT                      0x08
+#define DRM_IPVR_GEM_USERPTR           0x09
+
+#define DRM_IOCTL_IPVR_CONTEXT_CREATE  \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_IPVR_CONTEXT_CREATE, struct 
drm_ipvr_context_create)
+#define DRM_IOCTL_IPVR_CONTEXT_DESTROY \
+       DRM_IOW(DRM_COMMAND_BASE + DRM_IPVR_CONTEXT_DESTROY, struct 
drm_ipvr_context_destroy)
+#define DRM_IOCTL_IPVR_MISC            \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_IPVR_MISC, struct drm_ipvr_misc)
+#define DRM_IOCTL_IPVR_GEM_EXECBUFFER  \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_IPVR_GEM_EXECBUFFER, struct 
drm_ipvr_gem_execbuffer)
+#define DRM_IOCTL_IPVR_GEM_BUSY                \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_IPVR_GEM_BUSY, struct drm_ipvr_gem_busy)
+#define DRM_IOCTL_IPVR_GEM_CREATE      \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_IPVR_GEM_CREATE, struct 
drm_ipvr_gem_create)
+#define DRM_IOCTL_IPVR_GEM_MMAP                \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_IPVR_GEM_MMAP, struct drm_ipvr_gem_mmap)
+#define DRM_IOCTL_IPVR_SYNC_CPU        \
+       DRM_IOW(DRM_COMMAND_BASE + DRM_IPVR_SYNC_CPU, struct drm_ipvr_sync_cpu)
+#define DRM_IOCTL_IPVR_GEM_WAIT                \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_IPVR_GEM_WAIT, struct drm_ipvr_gem_wait)
+#define DRM_IOCTL_IPVR_GEM_USERPTR     \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_IPVR_GEM_USERPTR, struct 
drm_ipvr_gem_userptr)
+
+#endif
diff --git a/drivers/gpu/drm/ipvr/ipvr_drv.c b/drivers/gpu/drm/ipvr/ipvr_drv.c
new file mode 100644
index 0000000..7a7ccbf
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ipvr_drv.c
@@ -0,0 +1,776 @@
+/**************************************************************************
+ * ipvr_drv.c: IPVR driver common file for initialization/de-initialization
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *    Yao Cheng <yao.cheng at intel.com>
+ *
+ **************************************************************************/
+
+
+#include <linux/device.h>
+#include <linux/version.h>
+#include <uapi/drm/drm.h>
+#include <linux/pm_runtime.h>
+#include <linux/console.h>
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+#include "ipvr_drv.h"
+#include "ipvr_gem.h"
+#include "ipvr_mmu.h"
+#include "ipvr_exec.h"
+#include "ipvr_buffer.h"
+#include "ipvr_debug.h"
+#include "ved_init.h"
+#include "ved_pm.h"
+#include "ved_reg.h"
+#include "ved_cmd.h"
+#include "ipvr_trace.h"
+
+int32_t drm_ipvr_cpurelax = 0;
+int32_t drm_ipvr_udelaydivider = 1;
+int32_t drm_ipvr_trap_pagefaults = 0;
+int32_t drm_ipvr_tiling = 1;
+int32_t drm_ipvr_debug = 0x80;
+bool fake_bo_debug = false;
+int32_t drm_ipvr_freq = IP_FREQ_266_67;
+
+module_param_named(trap_pagefaults, drm_ipvr_trap_pagefaults, int, 0600);
+module_param_named(debug, drm_ipvr_debug, int, 0600);
+module_param_named(freq, drm_ipvr_freq, int, 0600);
+
+MODULE_PARM_DESC(debug,
+               "control debug info output"
+               "default: 0"
+               "0:IPVR_D_GENERAL, 1:IPVR_D_INIT, 2:IPVR_D_IRQ, 3:IPVR_D_ENTRY"
+               "4:IPVR_D_PM, 5:IPVR_D_REG, 6:IPVR_D_VED, 7:IPVR_D_WARN");
+MODULE_PARM_DESC(freq,
+               "control VED frequency"
+               "default: 0x0b (266.67 MHz)"
+               "0x1f: 100.00 MHz"
+               "0x1d: 106.67 MHz"
+               "0x17: 133.30 MHz"
+               "0x13: 160.00 MHz"
+               "0x11: 177.78 MHz"
+               "0x0f: 200.00 MHz"
+               "0x0e: 213.33 MHz"
+               "0x0b: 266.67 MHz"
+               "0x09: 320.00 MHz");
+
+uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       return ioread32(dev_priv->ved_reg_base + dev_priv->ved_reg_offset + 
reg);
+}
+
+static int32_t
+ipvr_context_create_ioctl(struct drm_device *dev,
+                                             void *data, struct drm_file 
*file_priv)
+{
+       struct drm_ipvr_context_create *args = data;
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       struct ipvr_context *ipvr_ctx  = NULL;
+       unsigned long irq_flags;
+       int32_t ctx_id, ret = 0;
+
+       IPVR_DEBUG_ENTRY("enter\n");
+       /* add video decode context */
+       ipvr_ctx = kzalloc(sizeof(struct ipvr_context), GFP_KERNEL);
+       if (ipvr_ctx  == NULL)
+               return -ENOMEM;
+
+       ctx_id = idr_alloc(&dev_priv->ipvr_ctx_idr, ipvr_ctx ,
+                          IPVR_MIN_CONTEXT_ID, IPVR_MAX_CONTEXT_ID,
+                          GFP_KERNEL);
+       if (ctx_id < 0)
+               return -ENOMEM;
+       ipvr_ctx->ctx_id = ctx_id;
+
+       INIT_LIST_HEAD(&ipvr_ctx->head);
+       ipvr_ctx->ctx_type = args->ctx_type;
+       ipvr_ctx->ipvr_fpriv = file_priv->driver_priv;
+       spin_lock_irqsave(&dev_priv->ipvr_ctx_lock, irq_flags);
+       list_add(&ipvr_ctx ->head, &dev_priv->ipvr_ctx_list);
+       spin_unlock_irqrestore(&dev_priv->ipvr_ctx_lock, irq_flags);
+       args->ctx_id = ctx_id;
+       IPVR_DEBUG_INIT("add ctx profile %d, entry %d, ctx_id is %d, "
+                       "protected is %d.\n",
+                       (ipvr_ctx->ctx_type >> 8) & 0xff,
+                       ipvr_ctx->ctx_type & 0xff, ctx_id,
+                       ipvr_ctx->ctx_type & VA_RT_FORMAT_PROTECTED);
+       /*
+        * todo: only one tiling region is supported now,
+        * maybe we need create additional tiling region for rotation case,
+        * which has different tiling stride
+        */
+       if ((args->tiling_scheme == 0 && args->tiling_stride <= 3) ||
+               (args->tiling_scheme == 1 && args->tiling_stride <= 2)) {
+               ipvr_ctx->tiling_scheme = args->tiling_scheme;
+               ipvr_ctx->tiling_stride = args->tiling_stride;
+       } else {
+               IPVR_DEBUG_WARN("unsupported tiling scheme %d and stide %d.\n",
+                       args->tiling_scheme, args->tiling_stride);
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
+static int32_t
+ipvr_context_destroy_ioctl(struct drm_device *dev,
+                                              void *data, struct drm_file 
*file_priv)
+{
+       struct drm_ipvr_context_destroy *args = data;
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       struct drm_ipvr_file_private *fpriv = file_priv->driver_priv;
+       struct ved_private *ved_priv = dev_priv->ved_private;
+       struct ipvr_context *ipvr_ctx  = NULL;
+       unsigned long irq_flags;
+
+       IPVR_DEBUG_ENTRY("enter\n");
+       ipvr_ctx = (struct ipvr_context *)
+                       idr_find(&dev_priv->ipvr_ctx_idr, args->ctx_id);
+       if (!ipvr_ctx  || (ipvr_ctx->ipvr_fpriv != file_priv->driver_priv)) {
+               return -ENOENT;
+       }
+       IPVR_DEBUG_PM("Video:remove context profile %d, entrypoint %d\n",
+               (ipvr_ctx->ctx_type >> 8) & 0xff,
+               (ipvr_ctx->ctx_type & 0xff));
+       mutex_lock(&ved_priv->ved_mutex);
+       if (ved_priv->ipvr_ctx == ipvr_ctx )
+               ved_priv->ipvr_ctx = NULL;
+       mutex_unlock(&ved_priv->ved_mutex);
+
+       spin_lock_irqsave(&dev_priv->ipvr_ctx_lock, irq_flags);
+       list_del(&ipvr_ctx->head);
+       fpriv->ctx_id = IPVR_CONTEXT_INVALID_ID;
+       spin_unlock_irqrestore(&dev_priv->ipvr_ctx_lock, irq_flags);
+
+       idr_remove(&dev_priv->ipvr_ctx_idr, ipvr_ctx->ctx_id);
+
+       kfree(ipvr_ctx);
+       return 0;
+}
+
+static int32_t
+ipvr_misc_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       struct drm_ipvr_misc *args = data;
+       int32_t ret = 0;
+       uint64_t value;
+
+       IPVR_DEBUG_ENTRY("enter\n");
+       if (!dev_priv) {
+               IPVR_DEBUG_WARN("called with no initialization.\n");
+               return -EINVAL;
+       }
+       switch (args->key) {
+       case IPVR_DEVICE_INFO: {
+               /* todo: remove hard-coding */
+               uint32_t device_info = 0xf31 << 16;
+               ret = copy_to_user((void __user *)((unsigned long)args->value),
+                       &device_info, sizeof(device_info));
+               break;
+       }
+       case IPVR_UPDATE_TILING: {
+               struct drm_ipvr_update_tiling tiling;
+               struct ipvr_context *ipvr_ctx  = NULL;
+               ret = copy_from_user(&tiling,
+                               (void __user *)((unsigned long)args->arg),
+                               sizeof(tiling));
+               if (ret)
+                       break;
+
+               ipvr_ctx  = (struct ipvr_context *)
+                       idr_find(&dev_priv->ipvr_ctx_idr, tiling.ctx_id);
+               if (!ipvr_ctx ||
+                       (ipvr_ctx->ipvr_fpriv != file_priv->driver_priv)) {
+                       IPVR_DEBUG_WARN("fail to find ctx %d", tiling.ctx_id);
+                       return -ENOENT;
+               }
+               IPVR_DEBUG_GENERAL("Video: update video tiling for ctx %d, "
+                       "old tiling scheme is %d, old tiling stride is %d, "
+                       "new tiling scheme is %d, new tiling stride is %d.\n",
+                       tiling.ctx_id,
+                       ipvr_ctx ->tiling_scheme, ipvr_ctx ->tiling_stride,
+                       tiling.tiling_scheme, tiling.tiling_stride);
+               if ((tiling.tiling_scheme == 0 && tiling.tiling_stride <= 3) ||
+               (tiling.tiling_scheme == 1 && tiling.tiling_stride <= 2)) {
+                       ipvr_ctx ->tiling_scheme = tiling.tiling_scheme;
+                       ipvr_ctx ->tiling_stride = tiling.tiling_stride;
+               } else {
+                       IPVR_ERROR("unsupported tile scheme: %d, stide: %d.\n",
+                               tiling.tiling_scheme, tiling.tiling_stride);
+                       ret = -EINVAL;
+               }
+               break;
+       }
+       default:
+               if (copy_from_user(&value,
+                               (void __user *)((unsigned long)args->value),
+                               sizeof(value))) {
+                       IPVR_DEBUG_WARN("copy_from_user failed\n");
+                       return -EFAULT;
+               }
+               if (copy_to_user((void __user *)((unsigned long)args->value),
+                               &value, sizeof(value))) {
+                       IPVR_DEBUG_WARN("copy_to_user failed\n");
+                       return -EFAULT;
+               }
+               ret = -EFAULT;
+               break;
+       }
+       return ret;
+}
+
+static struct drm_ioctl_desc ipvr_gem_ioctls[] = {
+       DRM_IOCTL_DEF_DRV(IPVR_CONTEXT_CREATE,
+                       ipvr_context_create_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(IPVR_CONTEXT_DESTROY,
+                       ipvr_context_destroy_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(IPVR_MISC,
+                       ipvr_misc_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(IPVR_GEM_EXECBUFFER,
+                       ipvr_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(IPVR_GEM_BUSY,
+                       ipvr_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(IPVR_GEM_CREATE,
+                       ipvr_gem_create_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(IPVR_GEM_MMAP,
+                       ipvr_gem_mmap_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(IPVR_SYNC_CPU,
+                       ipvr_sync_cpu_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(IPVR_GEM_WAIT,
+                       ipvr_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(IPVR_GEM_USERPTR,
+                       ipvr_gem_userptr_ioctl, DRM_UNLOCKED),
+};
+
+static void ipvr_gem_init(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+
+       dev_priv->ipvr_bo_slab = kmem_cache_create("ipvr_gem_object",
+                                 sizeof(union drm_ipvr_gem_objects), 0,
+                                 SLAB_HWCACHE_ALIGN, NULL);
+
+       INIT_LIST_HEAD(&dev_priv->ipvr_mm.unbound_list);
+       INIT_LIST_HEAD(&dev_priv->ipvr_mm.bound_list);
+       spin_lock_init(&dev_priv->ipvr_mm.object_stat_lock);
+
+       dev_priv->ipvr_mm.interruptible = true;
+}
+
+static void ipvr_gem_setup_mmu(struct drm_device *dev,
+                                      unsigned long linear_start,
+                                      unsigned long linear_end,
+                                      unsigned long tiling_start,
+                                      unsigned long tiling_end)
+{
+       /* Let GEM Manage all of the aperture.
+        *
+        * However, leave one page at the end still bound to the scratch page.
+        * There are a number of places where hardware apparently prefetches
+        * past the end of the object, and we've seen multiple hangs with the
+        * GPU head pointer stuck in a batchbuffer bound at last page of the
+        * aperture.  One page should be enough to keep any prefetching inside
+        * of the aperture.
+        */
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       struct ipvr_address_space *addr_space = &dev_priv->addr_space;
+
+       /* todo: add sanity check */
+       addr_space->dev = dev_priv->dev;
+       INIT_LIST_HEAD(&addr_space->active_list);
+       INIT_LIST_HEAD(&addr_space->inactive_list);
+
+       /* Subtract the guard page ... */
+       drm_mm_init(&addr_space->linear_mm, linear_start,
+                   linear_end - linear_start - PAGE_SIZE);
+       dev_priv->addr_space.linear_start = linear_start;
+       dev_priv->addr_space.linear_total = linear_end - linear_start;
+
+       drm_mm_init(&addr_space->tiling_mm, tiling_start,
+                   tiling_end - tiling_start - PAGE_SIZE);
+       dev_priv->addr_space.tiling_start = tiling_start;
+       dev_priv->addr_space.tiling_total = tiling_end - tiling_start;
+}
+
+static void ipvr_do_takedown(struct drm_device *dev)
+{
+       /* todo: need check if need clean up mm here */
+       ipvr_ved_uninit(dev);
+}
+
+static int32_t ipvr_drm_unload(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+
+       BUG_ON(!dev->platformdev);
+       BUG_ON(atomic_read(&dev_priv->ved_power_usage));
+
+       IPVR_DEBUG_ENTRY("entered.");
+       if (dev_priv) {
+               WARN_ON(pm_runtime_get_sync(&dev->platformdev->dev) < 0);
+
+               if (dev_priv->ipvr_bo_slab)
+                       kmem_cache_destroy(dev_priv->ipvr_bo_slab);
+               ipvr_fence_driver_fini(dev_priv);
+
+               ipvr_do_takedown(dev);
+
+               WARN_ON(pm_runtime_put_sync_suspend(&dev->platformdev->dev) < 
0);
+
+               if (dev_priv->validate_ctx.buffers)
+                       vfree(dev_priv->validate_ctx.buffers);
+
+               if (dev_priv->pf_pd) {
+                       ipvr_mmu_free_pagedir(dev_priv->pf_pd);
+                       dev_priv->pf_pd = NULL;
+               }
+               if (dev_priv->mmu) {
+                       ipvr_mmu_driver_takedown(dev_priv->mmu);
+                       dev_priv->mmu = NULL;
+               }
+
+               if (dev_priv->ved_reg_base) {
+                       iounmap(dev_priv->ved_reg_base - 
dev_priv->ved_reg_offset);
+                       dev_priv->ved_reg_base = NULL;
+                       dev_priv->ved_reg_offset = 0;
+               }
+
+               list_del(&dev_priv->default_ctx.head);
+               idr_remove(&dev_priv->ipvr_ctx_idr, 
dev_priv->default_ctx.ctx_id);
+               kfree(dev_priv);
+
+       }
+
+       pm_runtime_disable(&dev->platformdev->dev);
+
+       return 0;
+}
+
+static int32_t ipvr_drm_load(struct drm_device *dev, unsigned long flags)
+{
+       struct drm_ipvr_private *dev_priv;
+       int32_t ctx_id, ret = 0;
+       struct platform_device *platdev;
+       struct resource *res_mmio, *res_reg;
+       void __iomem* mmio_addr;
+
+       dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+       if (dev_priv == NULL)
+               return -ENOMEM;
+
+       dev->dev_private = dev_priv;
+       dev_priv->dev = dev;
+
+       BUG_ON(!dev->platformdev);
+       platdev = dev->platformdev;
+
+       mutex_init(&dev_priv->cmdbuf_mutex);
+       INIT_LIST_HEAD(&dev_priv->validate_ctx.validate_list);
+
+       dev_priv->pci_root = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+       if (!dev_priv->pci_root) {
+               kfree(dev_priv);
+               return -ENODEV;
+       }
+
+       dev->irq = platform_get_irq(platdev, 0);
+       if (dev->irq < 0) {
+               ret = -ENODEV;
+               goto out_err;
+       }
+
+       res_mmio = platform_get_resource(platdev, IORESOURCE_MEM, 0);
+       res_reg = platform_get_resource(platdev, IORESOURCE_REG, 0);
+       if (!res_mmio || !res_reg) {
+               ret = -ENXIO;
+               goto out_err;
+       }
+
+       mmio_addr = ioremap_wc(res_mmio->start,
+                                       res_mmio->end - res_mmio->start);
+       if (IS_ERR(mmio_addr)) {
+               ret = -EACCES;
+               goto out_err;
+       }
+
+       dev_priv->ved_reg_base = mmio_addr + res_reg->start;
+       dev_priv->ved_reg_offset = res_reg->start;
+       IPVR_DEBUG_VED("ved_reg_base is %p, range is 0x%llx - 0x%llx.\n",
+               dev_priv->ved_reg_base,
+               res_reg->start, res_reg->end);
+
+       platform_set_drvdata(dev->platformdev, dev);
+       pm_runtime_enable(&dev->platformdev->dev);
+
+       if (pm_runtime_get_sync(&dev->platformdev->dev) < 0) {
+               ret = -EBUSY;
+               goto out_err;
+       }
+
+       IPVR_DEBUG_INIT("MSVDX_CORE_REV_OFFSET by readl is 0x%x.\n",
+               readl(dev_priv->ved_reg_base + 0x640));
+       IPVR_DEBUG_INIT("MSVDX_CORE_REV_OFFSET by VED_REG_READ32 is 0x%x.\n",
+               VED_REG_READ32(MSVDX_CORE_REV_OFFSET));
+
+       /* mmu init */
+       dev_priv->mmu = ipvr_mmu_driver_init((void *)0,
+               drm_ipvr_trap_pagefaults,
+               0, dev_priv);
+       if (!dev_priv->mmu) {
+               ret = -EBUSY;
+               goto out_err;
+       }
+
+       dev_priv->pf_pd = ipvr_mmu_alloc_pd(dev_priv->mmu, 1, 0);
+       if (!dev_priv->pf_pd) {
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       ipvr_mmu_set_pd_context(ipvr_mmu_get_default_pd(dev_priv->mmu), 0);
+       ipvr_mmu_set_pd_context(dev_priv->pf_pd, 1);
+
+       /*
+        * Initialize sequence numbers for the different command
+        * submission mechanisms.
+        */
+       dev_priv->last_seq = 1;
+
+       ipvr_gem_init(dev);
+
+       ipvr_gem_setup_mmu(dev,
+               IPVR_MEM_MMU_LINEAR_START,
+               IPVR_MEM_MMU_LINEAR_END,
+               IPVR_MEM_MMU_TILING_START,
+               IPVR_MEM_MMU_TILING_END);
+
+       ipvr_ved_init(dev);
+
+       WARN_ON(pm_runtime_put_sync_suspend(&dev->platformdev->dev) < 0);
+
+       dev_priv->ved_private->ved_needs_reset = 1;
+       mutex_init(&dev_priv->ved_pm_mutex);
+       atomic_set(&dev_priv->ved_power_usage, 0);
+
+       ipvr_fence_driver_init(dev_priv);
+
+       dev_priv->validate_ctx.buffers =
+               vmalloc(IPVR_NUM_VALIDATE_BUFFERS *
+                       sizeof(struct ipvr_validate_buffer));
+       if (!dev_priv->validate_ctx.buffers) {
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       /* ipvr context initialization */
+       INIT_LIST_HEAD(&dev_priv->ipvr_ctx_list);
+       spin_lock_init(&dev_priv->ipvr_ctx_lock);
+       idr_init(&dev_priv->ipvr_ctx_idr);
+       /* default ipvr context is used for scaling, rotation case */
+       ctx_id = idr_alloc(&dev_priv->ipvr_ctx_idr, &dev_priv->default_ctx,
+                          IPVR_MIN_CONTEXT_ID, IPVR_MAX_CONTEXT_ID,
+                          GFP_KERNEL);
+       if (ctx_id < 0) {
+               return -ENOMEM;
+               goto out_err;
+       }
+       dev_priv->default_ctx.ctx_id = ctx_id;
+       INIT_LIST_HEAD(&dev_priv->default_ctx.head);
+       dev_priv->default_ctx.ctx_type = 0;
+       dev_priv->default_ctx.ipvr_fpriv = NULL;
+
+       /* don't need protect with spinlock during module load stage */
+       list_add(&dev_priv->default_ctx.head, &dev_priv->ipvr_ctx_list);
+       dev_priv->default_ctx.tiling_scheme = 0;
+       dev_priv->default_ctx.tiling_stride = 0;
+
+       return 0;
+out_err:
+       ipvr_drm_unload(dev);
+       return ret;
+
+}
+
+/*
+ * The .open() method is called every time the device is opened by an
+ * application. Drivers can allocate per-file private data in this method and
+ * store them in the struct drm_file::driver_priv field. Note that the .open()
+ * method is called before .firstopen().
+ */
+static int32_t
+ipvr_drm_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+       struct drm_ipvr_file_private *ipvr_fp;
+       IPVR_DEBUG_ENTRY("enter\n");
+
+       ipvr_fp = kzalloc(sizeof(*ipvr_fp), GFP_KERNEL);
+       if (unlikely(ipvr_fp == NULL))
+               return -ENOMEM;
+
+       file_priv->driver_priv = ipvr_fp;
+
+       return 0;
+}
+
+/*
+ * The close operation is split into .preclose() and .postclose() methods.
+ * Drivers must stop and cleanup all per-file operations in the .preclose()
+ * method. For instance pending vertical blanking and page flip events must be
+ * cancelled. No per-file operation is allowed on the file handle after
+ * returning from the .preclose() method.
+ */
+static void
+ipvr_drm_preclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+       /* if user didn't destory ctx explicitly, remove ctx here */
+       struct drm_ipvr_private *dev_priv;
+       struct drm_ipvr_file_private *ipvr_fpriv;
+       struct ved_private *ved_priv;
+       struct ipvr_context *ipvr_ctx  = NULL;
+       unsigned long irq_flags;
+
+       IPVR_DEBUG_ENTRY("enter\n");
+       dev_priv = dev->dev_private;
+       ipvr_fpriv = file_priv->driver_priv;
+       ved_priv = dev_priv->ved_private;
+
+       if (ipvr_fpriv->ctx_id == IPVR_CONTEXT_INVALID_ID)
+               return;
+       ipvr_ctx = (struct ipvr_context *)
+                       idr_find(&dev_priv->ipvr_ctx_idr, ipvr_fpriv->ctx_id);
+       if (!ipvr_ctx  || (ipvr_ctx->ipvr_fpriv != ipvr_fpriv)) {
+               IPVR_DEBUG_GENERAL("ctx for id %d has already destroyed\n",
+                               ipvr_fpriv->ctx_id);
+               return;
+       }
+       IPVR_DEBUG_PM("Video:remove context profile %d, entrypoint %d\n",
+               (ipvr_ctx->ctx_type >> 8) & 0xff,
+               (ipvr_ctx->ctx_type & 0xff));
+       mutex_lock(&ved_priv->ved_mutex);
+       if (ved_priv->ipvr_ctx == ipvr_ctx )
+               ved_priv->ipvr_ctx = NULL;
+       mutex_unlock(&ved_priv->ved_mutex);
+
+       spin_lock_irqsave(&dev_priv->ipvr_ctx_lock, irq_flags);
+       list_del(&ipvr_ctx->head);
+       ipvr_fpriv->ctx_id = IPVR_CONTEXT_INVALID_ID;
+       spin_unlock_irqrestore(&dev_priv->ipvr_ctx_lock, irq_flags);
+
+       idr_remove(&dev_priv->ipvr_ctx_idr, ipvr_ctx->ctx_id);
+
+       kfree(ipvr_ctx );
+}
+
+/*
+ * Finally the .postclose() method is called as the last step of the close
+ * operation, right before calling the .lastclose() method if no other open
+ * file handle exists for the device. Drivers that have allocated per-file
+ * private data in the .open() method should free it here.
+ */
+static void
+ipvr_drm_postclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+       struct drm_ipvr_file_private *ipvr_fpriv = file_priv->driver_priv;
+       IPVR_DEBUG_ENTRY("enter\n");
+       kfree(ipvr_fpriv);
+}
+
+static irqreturn_t ipvr_irq_handler(int32_t irq, void *arg)
+{
+       struct drm_device *dev = (struct drm_device *) arg;
+       WARN_ON(ved_irq_handler(dev));
+       return IRQ_HANDLED;
+}
+
+static const struct file_operations ipvr_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = drm_ioctl,
+#endif
+       /* no need to define mmap. User space maps bo with DRM_IPVR_GEM_MMAP */
+};
+
+static int32_t ipvr_drm_freeze(struct drm_device *dev)
+{
+       int32_t ret;
+       int32_t power_usage;
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+
+       IPVR_DEBUG_ENTRY("enter\n");
+       power_usage = atomic_read(&dev_priv->ved_power_usage);
+       BUG_ON(power_usage < 0);
+       if (power_usage > 0) {
+               IPVR_DEBUG_PM("VED power usage is %d, skip freezing\n", 
power_usage);
+               return 0;
+       }
+
+       ret = ved_check_idle(dev);
+       if (ret) {
+               IPVR_DEBUG_PM("VED check idle fail: %d, skip freezing\n", ret);
+               return 0;
+       }
+
+       if (dev->irq_enabled) {
+               ret = drm_irq_uninstall(dev);
+               if (unlikely(ret)) {
+                       IPVR_ERROR("Error uninstalling IRQ handler: %d\n", ret);
+                       return -EFAULT;
+               }
+               IPVR_DEBUG_PM("Successfully uninstalled IRQ\n");
+       }
+       else
+               IPVR_DEBUG_PM("irq_enabled is %d\n", dev->irq_enabled);
+
+       if (is_ved_on(dev)) {
+               if (!ved_power_off(dev)) {
+                       IPVR_ERROR("Failed to power off VED\n");
+                       return -EFAULT;
+               }
+               IPVR_DEBUG_PM("Successfully powered off\n");
+       } else {
+               IPVR_DEBUG_PM("Skiped power-off since already powered off\n");
+       }
+
+       return 0;
+}
+
+static int32_t ipvr_drm_thaw(struct drm_device *dev)
+{
+       int ret;
+       IPVR_DEBUG_ENTRY("enter\n");
+       if (!is_ved_on(dev)) {
+               if (!ved_power_on(dev)) {
+                       IPVR_ERROR("Failed to power on VED\n");
+                       return -EFAULT;
+               }
+               IPVR_DEBUG_PM("Successfully powered on\n");
+       } else {
+               IPVR_DEBUG_PM("Skiped power-on since already powered on\n");
+       }
+
+       if (!dev->irq_enabled) {
+               ret = drm_irq_install(dev, dev->irq);
+               if (ret) {
+                       IPVR_ERROR("Error installing IRQ handler: %d\n", ret);
+                       return -EFAULT;
+               }
+               IPVR_DEBUG_PM("Successfully installed IRQ\n");
+       }
+       else
+               IPVR_DEBUG_PM("irq_enabled is %d\n", dev->irq_enabled);
+
+       return 0;
+}
+
+static int32_t ipvr_pm_suspend(struct device *dev)
+{
+       struct platform_device *platformdev = to_platform_device(dev);
+       struct drm_device *drm_dev = platform_get_drvdata(platformdev);
+       IPVR_DEBUG_PM("PM suspend called\n");
+       BUG_ON(!drm_dev);
+       return ipvr_drm_freeze(drm_dev);
+}
+static int32_t ipvr_pm_resume(struct device *dev)
+{
+       struct platform_device *platformdev = to_platform_device(dev);
+       struct drm_device *drm_dev = platform_get_drvdata(platformdev);
+       IPVR_DEBUG_PM("PM resume called\n");
+       BUG_ON(!drm_dev);
+       return ipvr_drm_thaw(drm_dev);
+}
+
+/*
+ * dump GEM API is mainly for dumb buffers suitable for scanout,
+ * it is not needed for ipvr driver.
+ * gem_vm_ops is used for mmap case, also not needed for ipvr
+ * todo: prime support can be enabled later
+ */
+static struct drm_driver ipvr_drm_driver = {
+       .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM,
+       .load = ipvr_drm_load,
+       .unload = ipvr_drm_unload,
+       .open = ipvr_drm_open,
+       .preclose = ipvr_drm_preclose,
+       .postclose = ipvr_drm_postclose,
+       .irq_handler = ipvr_irq_handler,
+       .set_busid = drm_platform_set_busid,
+       .gem_free_object = ipvr_gem_free_object,
+#ifdef CONFIG_DEBUG_FS
+       .debugfs_init = ipvr_debugfs_init,
+       .debugfs_cleanup = ipvr_debugfs_cleanup,
+#endif
+       .ioctls = ipvr_gem_ioctls,
+       .num_ioctls = ARRAY_SIZE(ipvr_gem_ioctls),
+       .fops = &ipvr_fops,
+       .name = IPVR_DRIVER_NAME,
+       .desc = IPVR_DRIVER_DESC,
+       .date = IPVR_DRIVER_DATE,
+       .major = IPVR_DRIVER_MAJOR,
+       .minor = IPVR_DRIVER_MINOR,
+       .patchlevel = IPVR_DRIVER_PATCHLEVEL,
+};
+
+static int32_t ipvr_plat_probe(struct platform_device *device)
+{
+       return drm_platform_init(&ipvr_drm_driver, device);
+}
+
+static int32_t ipvr_plat_remove(struct platform_device *device)
+{
+       drm_put_dev(platform_get_drvdata(device));
+       return 0;
+}
+
+static struct dev_pm_ops ipvr_pm_ops = {
+       .suspend = ipvr_pm_suspend,
+       .resume = ipvr_pm_resume,
+       .freeze = ipvr_pm_suspend,
+       .thaw = ipvr_pm_resume,
+       .poweroff = ipvr_pm_suspend,
+       .restore = ipvr_pm_resume,
+#ifdef CONFIG_PM_RUNTIME
+       .runtime_suspend = ipvr_pm_suspend,
+       .runtime_resume = ipvr_pm_resume,
+#endif
+};
+
+static struct platform_driver ipvr_plat_driver = {
+       .driver = {
+               .name = "ipvr-ved",
+               .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+               .pm = &ipvr_pm_ops,
+#endif
+       },
+       .probe = ipvr_plat_probe,
+       .remove = ipvr_plat_remove,
+};
+
+module_platform_driver(ipvr_plat_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ipvr/ipvr_drv.h b/drivers/gpu/drm/ipvr/ipvr_drv.h
new file mode 100644
index 0000000..280307b
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ipvr_drv.h
@@ -0,0 +1,464 @@
+/**************************************************************************
+ * ipvr_drv.h: IPVR driver common header file
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *    Yao Cheng <yao.cheng at intel.com>
+ *
+ **************************************************************************/
+
+#ifndef _IPVR_DRV_H_
+#define _IPVR_DRV_H_
+#include "drmP.h"
+#include "ipvr_drm.h"
+#include "ipvr_mmu.h"
+#include <linux/version.h>
+#include <linux/io-mapping.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/backlight.h>
+#include <linux/intel-iommu.h>
+#include <linux/kref.h>
+#include <linux/pm_qos.h>
+#include <linux/mmu_notifier.h>
+
+#define IPVR_DRIVER_AUTHOR             "Intel, Inc."
+#define IPVR_DRIVER_NAME               "ipvr"
+#define IPVR_DRIVER_DESC               "Vxd392 VP8 driver for Baytrail"
+#define IPVR_DRIVER_DATE               "20140609"
+#define IPVR_DRIVER_MAJOR              0
+#define IPVR_DRIVER_MINOR              1
+#define IPVR_DRIVER_PATCHLEVEL 0
+
+/* General customization:
+ */
+
+#define IPVR_MMU_CACHED_MEMORY   0x0001        /* Bind to MMU only */
+#define IPVR_MMU_RO_MEMORY               0x0002        /* MMU RO memory */
+#define IPVR_MMU_WO_MEMORY           0x0004    /* MMU WO memory */
+
+/*
+ *PTE's and PDE's
+ */
+
+#define IPVR_PDE_MASK          0x003FFFFF
+#define IPVR_PDE_SHIFT         22
+#define IPVR_PTE_SHIFT         12
+
+#define IPVR_PTE_VALID         0x0001  /* PTE / PDE valid */
+#define IPVR_PTE_WO                    0x0002  /* Write only */
+#define IPVR_PTE_RO                    0x0004  /* Read only */
+#define IPVR_PTE_CACHED                0x0008  /* CPU cache coherent */
+
+/*
+ * linear MMU size is 512M : 0 - 512M
+ * tiling MMU size is 512M : 512M - 1024M
+ */
+#define IPVR_MEM_MMU_LINEAR_START      0x00000000
+#define IPVR_MEM_MMU_LINEAR_END                0x20000000
+#define IPVR_MEM_MMU_TILING_START      0x20000000
+#define IPVR_MEM_MMU_TILING_END                0x40000000
+
+#define IPVR_GEM_DOMAIN_CPU            0x00000001
+#define IPVR_GEM_DOMAIN_GPU            0x00000002
+
+/* video context */
+#define VA_RT_FORMAT_PROTECTED 0x80000000
+
+#define IPVR_CONTEXT_INVALID_ID 0
+#define IPVR_MIN_CONTEXT_ID 1
+#define IPVR_MAX_CONTEXT_ID 0xff
+
+/* video error status report */
+#define MAX_SLICES_PER_PICTURE 72
+#define MAX_DECODE_BUFFERS (24)
+#define VED_MAX_EC_INSTANCE (4)
+
+/*
+ *Debug print bits setting
+ */
+#define IPVR_D_GENERAL (1 << 0)
+#define IPVR_D_INIT    (1 << 1)
+#define IPVR_D_IRQ     (1 << 2)
+#define IPVR_D_ENTRY   (1 << 3)
+#define IPVR_D_PM      (1 << 4)
+#define IPVR_D_REG     (1 << 5)
+#define IPVR_D_VED     (1 << 6)
+#define IPVR_D_WARN    (1 << 7)
+
+#define IPVR_DEBUG_GENERAL(_fmt, _arg...) \
+       IPVR_DEBUG(IPVR_D_GENERAL, _fmt, ##_arg)
+#define IPVR_DEBUG_INIT(_fmt, _arg...) \
+       IPVR_DEBUG(IPVR_D_INIT, _fmt, ##_arg)
+#define IPVR_DEBUG_IRQ(_fmt, _arg...) \
+       IPVR_DEBUG(IPVR_D_IRQ, _fmt, ##_arg)
+#define IPVR_DEBUG_ENTRY(_fmt, _arg...) \
+       IPVR_DEBUG(IPVR_D_ENTRY, _fmt, ##_arg)
+#define IPVR_DEBUG_PM(_fmt, _arg...) \
+       IPVR_DEBUG(IPVR_D_PM, _fmt, ##_arg)
+#define IPVR_DEBUG_REG(_fmt, _arg...) \
+       IPVR_DEBUG(IPVR_D_REG, _fmt, ##_arg)
+#define IPVR_DEBUG_VED(_fmt, _arg...) \
+       IPVR_DEBUG(IPVR_D_VED, _fmt, ##_arg)
+#define IPVR_DEBUG_WARN(_fmt, _arg...) \
+       IPVR_DEBUG(IPVR_D_WARN, _fmt, ##_arg)
+
+#define IPVR_DEBUG(_flag, _fmt, _arg...)                               \
+       do {                                                            \
+               if (unlikely((_flag) & drm_ipvr_debug))         \
+                       printk(KERN_INFO                                \
+                              "[ipvr:0x%02x:%s] " _fmt , _flag,        \
+                              __func__ , ##_arg);                      \
+       } while (0)
+
+#define IPVR_ERROR(_fmt, _arg...)                                      \
+       do {                                                            \
+                       printk(KERN_ERR                                 \
+                              "[ipvr:ERROR:%s] " _fmt ,                \
+                              __func__ , ##_arg);                      \
+       } while (0)
+
+/*
+ * set cpu_relax = 1 in sysfs to use cpu_relax instead of udelay bysy loop
+ * set udelay_divider to reduce the udelay values,e.g.= 10, reduce 10 times
+ */
+#define IPVR_UDELAY(usec)                                      \
+do {                                                           \
+       if (drm_ipvr_cpurelax == 0)                             \
+               DRM_UDELAY(usec / drm_ipvr_udelaydivider);      \
+       else                                                    \
+               cpu_relax();                                    \
+} while (0)
+
+typedef struct ipvr_validate_buffer ipvr_validate_buffer_t;
+
+uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg);
+#define REG_READ(reg)         REGISTER_READ(dev, (reg))
+
+#define to_ipvr_bo(x) container_of(x, struct drm_ipvr_gem_object, base)
+
+extern int drm_ipvr_debug;
+extern int drm_ipvr_udelaydivider;
+extern int drm_ipvr_cpurelax;
+/**
+ *struct ipvr_validate_context
+ *
+ *@buffers:     array of pre-allocated validate buffers.
+ *@used_buffers: number of buffers in @buffers array currently in use.
+ *@validate_buffer: buffers validated from user-space.
+ *@kern_validate_buffers : buffers validated from kernel-space.
+ *@fence_flags : Fence flags to be used for fence creation.
+ *
+ *This structure is used during execbuf validation.
+ */
+struct ipvr_validate_context {
+       ipvr_validate_buffer_t *buffers;
+       uint32_t used_buffers;
+       struct list_head validate_list;
+};
+
+struct ipvr_mmu_driver;
+struct ipvr_mmu_pd;
+
+/*
+ * may be not needed, BOs are always bound into mmu,
+ * so there is no need for bound_list and unbound_list
+ */
+struct ipvr_gem_mm {
+       /** List of all objects in mmu space. Used to restore mmu
+        * mappings on resume */
+       struct list_head bound_list;
+       /**
+        * List of objects which are not bound to the mmu (thus
+        * are idle and not used by the GPU) but still have
+        * (presumably uncached) pages still attached.
+        */
+       struct list_head unbound_list;
+
+       /**
+        * Are we in a non-interruptible section of code like
+        * modesetting?
+        */
+       bool interruptible;
+
+       /* accounting, useful for userland debugging */
+       spinlock_t object_stat_lock;
+       size_t object_memory;
+       uint32_t object_count;
+};
+
+struct ipvr_address_space {
+       struct drm_mm linear_mm;
+       struct drm_mm tiling_mm;
+       struct drm_device *dev;
+       unsigned long linear_start;
+       size_t linear_total;
+       unsigned long tiling_start;
+       size_t tiling_total;
+
+       /* need it during clear_range */
+       struct {
+               dma_addr_t addr;
+               struct page *page;
+       } scratch;
+
+       /**
+        * List of objects currently involved in rendering.
+        *
+        * Includes buffers having the contents of their GPU caches
+        * flushed, not necessarily primitives.  last_rendering_seqno
+        * represents when the rendering involved will be completed.
+        *
+        * A reference is held on the buffer while on this list.
+        */
+       struct list_head active_list;
+
+       /**
+        * LRU list of objects which are not in the ringbuffer and
+        * are ready to unbind, but are still in the GTT.
+        *
+        * last_rendering_seqno is 0 while an object is in this list.
+        *
+        * A reference is not held on the buffer while on this list,
+        * as merely being GTT-bound shouldn't prevent its being
+        * freed, and we'll pull it off the list in the free path.
+        */
+       struct list_head inactive_list;
+
+       /* todo: it should be needed to avoid security problem,
+        * when destroy bo, need set scratch page the bo mmu entry
+        */
+       void (*clear_range)(struct ipvr_address_space *vm,
+                           unsigned int first_entry,
+                           unsigned int num_entries);
+       /* todo: directly call mmu function, the func ptr is not needed */
+       void (*insert_entries)(struct ipvr_address_space *vm,
+                              struct sg_table *st,
+                              unsigned int first_entry,
+                              int cache_level);
+       void (*cleanup)(struct ipvr_address_space *vm);
+};
+
+struct ipvr_fence_driver {
+       uint16_t        sync_seq;
+       atomic_t        signaled_seq;
+       unsigned long   last_activity;
+       bool            initialized;
+       spinlock_t      fence_lock;
+};
+
+struct ipvr_context {
+       /* used to link into ipvr_ctx_list */
+       struct list_head head;
+       uint32_t ctx_id;
+       /* used to double check ctx when find with idr, may be removed */
+       struct drm_ipvr_file_private *ipvr_fpriv; /* DRM device file pointer */
+       uint32_t ctx_type; /* profile << 8 | entrypoint */
+
+       uint16_t cur_seq;
+
+       /* for IMG DDK, only use tiling for 2k and 4k buffer stride */
+       /*
+        * following tiling strides for VED are supported:
+        * stride 0: 512 for scheme 0, 1024 for scheme 1
+        * stride 1: 1024 for scheme 0, 2048 for scheme 1
+        * stride 2: 2048 for scheme 0, 4096 for scheme 1
+        * stride 3: 4096 for scheme 0
+        */
+       uint8_t tiling_stride;
+       /*
+        * scheme 0: tile is 256x16, while minimal tile stride is 512
+        * scheme 1: tile is 512x8, while minimal tile stride is 1024
+        */
+       uint8_t tiling_scheme;
+};
+
+typedef struct drm_ipvr_private {
+       struct drm_device *dev;
+       struct pci_dev *pci_root;
+       /* pci revision id for B0:D2:F0 */
+       uint8_t platform_rev_id;
+
+       uint32_t device_id;
+
+       /* IMG video context */
+       struct list_head ipvr_ctx_list;
+       spinlock_t ipvr_ctx_lock;
+       struct idr ipvr_ctx_idr;
+       struct ipvr_context default_ctx;
+
+       /* PM related */
+       struct mutex ved_pm_mutex;
+       atomic_t ved_power_usage;
+
+       /* exec related */
+       struct mutex cmdbuf_mutex;
+       struct ipvr_validate_context validate_ctx;
+
+       /* IMG MMU specific */
+       struct ipvr_mmu_driver *mmu;
+       struct ipvr_mmu_pd *pf_pd;
+       atomic_t ipvr_mmu_invaldc;
+
+       /* GEM mm related */
+       struct ipvr_gem_mm ipvr_mm;
+       struct kmem_cache *ipvr_bo_slab;
+       struct ipvr_address_space addr_space;
+
+       /* fence related */
+       uint32_t last_seq;
+       wait_queue_head_t fence_queue;
+       struct ipvr_fence_driver fence_drv;
+
+       /*
+        * VED specific
+        */
+       uint8_t __iomem* ved_reg_base;
+       unsigned long ved_reg_offset;
+       struct ved_private *ved_private;
+}drm_ipvr_private_t;
+
+struct ved_mb_region {
+       uint32_t start;
+       uint32_t end;
+};
+
+struct ved_decode_status {
+       uint32_t num_region;
+       struct ved_mb_region mb_regions[MAX_SLICES_PER_PICTURE];
+};
+
+struct ved_frame_info {
+       uint32_t handle;
+       uint32_t surface_id;
+       uint16_t fence;
+       uint32_t buffer_stride;
+       uint32_t buffer_size;
+       uint32_t picture_width_mb;
+       uint32_t fw_status;
+       uint32_t size_mb;
+       struct ved_decode_status decode_status;
+};
+
+struct drm_ipvr_gem_object;
+
+/* VED private structure */
+struct ved_private {
+       struct drm_device *dev;
+       struct drm_ipvr_private *dev_priv;
+
+       /* used to record seq got from irq fw-to-host msg */
+       uint16_t ved_cur_seq;
+
+       /*
+        * VED Rendec Memory
+        */
+       struct drm_ipvr_gem_object *ccb0;
+       uint32_t base_addr0;
+       struct drm_ipvr_gem_object *ccb1;
+       uint32_t base_addr1;
+       bool rendec_initialized;
+
+       /* VED firmware related */
+       struct drm_ipvr_gem_object  *fw_bo;
+       uint32_t fw_offset;
+       uint32_t mtx_mem_size;
+       bool fw_loaded_to_bo;
+       bool ved_fw_loaded;
+       void *ved_fw_ptr;
+       int ved_fw_size;
+       uint32_t fw_b0_uploaded;
+       /*
+       * there are two ways to load fw:
+       * 1, load fw directly by kernel driver, byt use this way
+       * 2, load by punit, which has security benefits
+       */
+       bool fw_loaded_by_punit;
+
+       /*
+        * ved command queue
+        */
+       spinlock_t ved_lock;
+       struct mutex ved_mutex;
+       struct list_head ved_queue;
+       /* busy means cmd submitted to fw, while irq hasn't been receieved */
+       bool ved_busy;
+       /* VED status read from register 0x20D0 */
+       uint32_t ved_hw_busy;
+
+       uint32_t ved_dash_access_ctrl;
+       uint32_t decoding_err;
+
+       struct ved_frame_info frame_info[MAX_DECODE_BUFFERS];
+       struct ved_decode_status decode_status;
+       uint32_t host_be_opp_enabled;
+
+       /* error concealment related */
+       struct work_struct ec_work;
+       struct drm_file *tfile; /* protected by cmdbuf_mutex */
+       struct ved_ec_context *ved_ec_ctx[VED_MAX_EC_INSTANCE];
+       struct ved_ec_context *cur_msvdx_ec_ctx;
+       uint32_t deblock_cmd_offset;
+       int num_cmd;
+       uint32_t vec_ec_mem_data[5];
+       uint32_t vec_ec_mem_saved;
+
+       /* pm related */
+       int ved_needs_reset;
+       unsigned int ved_pmstate;
+       struct kernfs_node *sysfs_pmstate;
+       uint32_t pm_gating_count;
+       /* pm suspend wq, use wq for current implementation */
+       struct delayed_work ved_suspend_wq;
+       struct tasklet_struct ved_suspend_tq;
+
+       /* protected by ved_mutex */
+       /* current ved decode context */
+       struct ipvr_context *ipvr_ctx;
+
+       struct page *mmu_recover_page;
+};
+
+struct drm_ipvr_file_private {
+       uint32_t ctx_id;
+};
+
+/* public ipvr gem functions for bo/page operation */
+void ipvr_gem_object_pin_pages(struct drm_ipvr_gem_object *obj);
+void ipvr_gem_object_unpin_pages(struct drm_ipvr_gem_object *obj);
+void *ipvr_gem_object_alloc(struct drm_device *dev);
+void ipvr_gem_object_free(struct drm_ipvr_gem_object *obj);
+int32_t ipvr_gem_object_get_pages(struct drm_ipvr_gem_object *obj);
+int32_t ipvr_gem_object_put_pages(struct drm_ipvr_gem_object *obj);
+
+int32_t ipvr_sync_cpu_grab(struct drm_device *dev,
+                               struct drm_ipvr_gem_object *obj);
+int32_t ipvr_sync_cpu_release(struct drm_device *dev,
+                               struct drm_ipvr_gem_object *obj);
+
+/* PRIME callbacks */
+struct sg_table *ipvr_gem_prime_get_sg_table(struct drm_gem_object *obj);
+void *ipvr_gem_prime_vmap(struct drm_gem_object *obj);
+void ipvr_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+struct drm_gem_object *ipvr_gem_prime_import_sg_table(struct drm_device *dev,
+               size_t size, struct sg_table *sg);
+int ipvr_gem_prime_pin(struct drm_gem_object *obj);
+void ipvr_gem_prime_unpin(struct drm_gem_object *obj);
+
+#endif
diff --git a/drivers/gpu/drm/ipvr/ipvr_exec.c b/drivers/gpu/drm/ipvr/ipvr_exec.c
new file mode 100644
index 0000000..8b9d638
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ipvr_exec.c
@@ -0,0 +1,530 @@
+/**************************************************************************
+ * ipvr_exec.c: IPVR command buffer execution
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *    Yao Cheng <yao.cheng at intel.com>
+ *
+ **************************************************************************/
+
+#include "ipvr_exec.h"
+#include "ipvr_gem.h"
+#include "ipvr_mmu.h"
+#include "ipvr_buffer.h"
+#include "ipvr_fence.h"
+#include "ipvr_trace.h"
+#include "ved_fw.h"
+#include "ved_msg.h"
+#include "ved_reg.h"
+#include "ved_ec.h"
+#include "ved_init.h"
+#include "ved_pm.h"
+#include "ved_cmd.h"
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+
+static inline bool ipvr_bo_is_reserved(struct drm_ipvr_gem_object *obj)
+{
+       return atomic_read(&obj->reserved);
+}
+
+static int32_t
+ipvr_bo_wait_unreserved(struct drm_ipvr_gem_object *obj, bool interruptible)
+{
+       if (interruptible) {
+               return wait_event_interruptible(obj->event_queue,
+                                              !ipvr_bo_is_reserved(obj));
+       } else {
+               wait_event(obj->event_queue, !ipvr_bo_is_reserved(obj));
+               return 0;
+       }
+}
+
+/**
+ * ipvr_bo_reserve - reserve the given bo
+ *
+ * @obj:     The buffer object to reserve.
+ * @interruptible:     whether the waiting is interruptible or not.
+ * @no_wait:    flag to indicate returning immediately
+ *
+ * Returns:
+ * -EBUSY: if bo is busy and @no_wait is true.
+ * -ERESTARTSYS if waiting was interrupted by a signal.
+ * 0 if reserving succeeded.
+ */
+int32_t ipvr_bo_reserve(struct drm_ipvr_gem_object *obj,
+                       bool interruptible, bool no_wait)
+{
+       int32_t ret;
+
+       while (unlikely(atomic_xchg(&obj->reserved, 1) != 0)) {
+               if (no_wait)
+                       return -EBUSY;
+               IPVR_DEBUG_GENERAL("wait bo unreserved, add to wait queue.\n");
+               ret = ipvr_bo_wait_unreserved(obj, interruptible);
+               if (unlikely(ret))
+                       return ret;
+       }
+
+       return 0;
+}
+
+/**
+ * ipvr_bo_unreserve - unreserve the given bo
+ *
+ * @obj:     The buffer object to reserve.
+ *
+ * No return value.
+ */
+void ipvr_bo_unreserve(struct drm_ipvr_gem_object *obj)
+{
+       atomic_set(&obj->reserved, 0);
+       wake_up_all(&obj->event_queue);
+}
+
+static void ipvr_backoff_reservation(struct list_head *list)
+{
+       struct ipvr_validate_buffer *entry;
+
+       list_for_each_entry(entry, list, head) {
+               struct drm_ipvr_gem_object *obj = entry->ipvr_gem_bo;
+               if (!atomic_read(&obj->reserved))
+                       continue;
+               atomic_set(&obj->reserved, 0);
+               wake_up_all(&obj->event_queue);
+       }
+}
+
+/*
+ * ipvr_reserve_buffers - Reserve buffers for validation.
+ *
+ * @list:     points to a bo list to be backoffed
+ *
+ * If a buffer in the list is marked for CPU access, we back off and
+ * wait for that buffer to become free for GPU access.
+ *
+ * If a buffer is reserved for another validation, the validator with
+ * the highest validation sequence backs off and waits for that buffer
+ * to become unreserved. This prevents deadlocks when validating multiple
+ * buffers in different orders.
+ *
+ * Returns:
+ * -EBUSY: if bo is busy and @no_wait is true.
+ * -ERESTARTSYS if waiting was interrupted by a signal.
+ * 0 if reserving succeeded.
+ */
+int32_t ipvr_reserve_buffers(struct list_head *list)
+{
+       struct ipvr_validate_buffer *entry;
+       int32_t ret;
+
+       if (list_empty(list))
+               return 0;
+
+       list_for_each_entry(entry, list, head) {
+               struct drm_ipvr_gem_object *bo = entry->ipvr_gem_bo;
+
+               ret = ipvr_bo_reserve(bo, true, true);
+               switch (ret) {
+               case 0:
+                       break;
+               case -EBUSY:
+                       ret = ipvr_bo_reserve(bo, true, false);
+                       if (!ret)
+                               break;
+                       else
+                               goto err;
+               default:
+                       goto err;
+               }
+
+               if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+                       ret = -EBUSY;
+                       goto err;
+               }
+       }
+
+       return 0;
+err:
+       ipvr_backoff_reservation(list);
+       return ret;
+}
+
+/**
+ * ipvr_set_tile - global setting of tiling info
+ *
+ * @dev:     the ipvr drm device
+ * @tiling_scheme:     see ipvr_drm.h for details
+ * @tiling_stride:     see ipvr_drm.h for details
+ *
+ * vxd392 hardware supports only one tile region so this configuration
+ * is global.
+ */
+void ipvr_set_tile(struct drm_device *dev,
+               uint8_t tiling_scheme, uint8_t tiling_stride)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       uint32_t cmd;
+       uint32_t start = IPVR_MEM_MMU_TILING_START;
+       uint32_t end = IPVR_MEM_MMU_TILING_END;
+
+       /* Enable memory tiling */
+       cmd = ((start >> 20) + (((end >> 20) - 1) << 12) +
+                               ((0x8 | tiling_stride) << 24));
+       IPVR_DEBUG_GENERAL("VED: MMU Tiling register0 %08x.\n", cmd);
+       IPVR_DEBUG_GENERAL("Region 0x%08x-0x%08x.\n", start, end);
+       VED_REG_WRITE32(cmd, MSVDX_MMU_TILE_BASE0_OFFSET);
+
+       /* we need set tile format as 512x8 on Baytrail, which is shceme 1 */
+       VED_REG_WRITE32(tiling_scheme << 3, MSVDX_MMU_CONTROL2_OFFSET);
+}
+
+/**
+ * ipvr_find_ctx_with_fence - lookup the context with given fence seqno
+ *
+ * @dev_priv:     the ipvr drm device
+ * @fence:     fence seqno generated by the context
+ *
+ * Returns:
+ * context pointer if found.
+ * NULL if not found.
+ */
+struct ipvr_context*
+ipvr_find_ctx_with_fence(struct drm_ipvr_private *dev_priv, uint16_t fence)
+{
+       struct ipvr_context *pos = NULL, *n = NULL;
+
+       if (unlikely(dev_priv == NULL)) {
+               return NULL;
+       }
+
+       spin_lock(&dev_priv->ipvr_ctx_lock);
+       list_for_each_entry_safe(pos, n, &dev_priv->ipvr_ctx_list, head) {
+               if (pos->cur_seq == fence) {
+                       spin_unlock(&dev_priv->ipvr_ctx_lock);
+                       return pos;
+               }
+       }
+       spin_unlock(&dev_priv->ipvr_ctx_lock);
+
+       return NULL;
+}
+
+static void ipvr_unreference_buffers(struct ipvr_validate_context *context)
+{
+       struct ipvr_validate_buffer *entry, *next;
+       struct drm_ipvr_gem_object *obj;
+       struct list_head *list = &context->validate_list;
+
+       list_for_each_entry_safe(entry, next, list, head) {
+               obj = entry->ipvr_gem_bo;
+               list_del(&entry->head);
+               drm_gem_object_unreference(&obj->base);
+               context->used_buffers--;
+       }
+}
+
+static int ipvr_update_buffers(struct drm_file *file_priv,
+                                       struct ipvr_validate_context *context,
+                                       uint64_t buffer_list,
+                                       int32_t count)
+{
+       struct ipvr_validate_buffer *entry;
+       struct ipvr_validate_arg *val_arg =
+               (struct ipvr_validate_arg __user *)(unsigned long) buffer_list;
+
+       if (list_empty(&context->validate_list))
+               return 0;
+
+       list_for_each_entry(entry, &context->validate_list, head) {
+               if (!val_arg) {
+                       IPVR_DEBUG_WARN("unexpected end of val_arg list!!!\n");
+                       return -EINVAL;
+               }
+               if (unlikely(copy_to_user(val_arg, &entry->val_req,
+                                           sizeof(entry->val_req)))) {
+                       IPVR_ERROR("copy_to_user fault.\n");
+                       return -EFAULT;
+               }
+               val_arg = (struct ipvr_validate_arg __user *)
+                                       (unsigned long)entry->val_req.next;
+       }
+       return 0;
+}
+
+static int ipvr_reference_buffers(struct drm_file *file_priv,
+                                       struct ipvr_validate_context *context,
+                                       uint64_t buffer_list,
+                                       int32_t count)
+{
+       struct drm_device *dev = file_priv->minor->dev;
+       struct ipvr_validate_arg *val_arg =
+               (struct ipvr_validate_arg __user *)(unsigned long) buffer_list;
+       struct ipvr_validate_buffer *item;
+       struct drm_ipvr_gem_object *obj;
+       int32_t ret = 0;
+
+       while (likely(val_arg != 0) && (context->used_buffers <= count)) {
+               if (unlikely(context->used_buffers >=
+                            IPVR_NUM_VALIDATE_BUFFERS)) {
+                       IPVR_ERROR("Too many buffers "
+                                 "on validate list.\n");
+                       ret = -EINVAL;
+                       goto out_err;
+               }
+               item = &context->buffers[context->used_buffers];
+               if (unlikely(copy_from_user(&item->val_req, val_arg,
+                                           sizeof(item->val_req)) != 0)) {
+                       IPVR_ERROR("copy_from_user fault.\n");
+                       ret = -EFAULT;
+                       goto out_err;
+               }
+               INIT_LIST_HEAD(&item->head);
+               obj = to_ipvr_bo(drm_gem_object_lookup(dev, file_priv,
+                                               item->val_req.handle));
+               if (&obj->base == NULL) {
+                       ret = -ENOENT;
+                       goto out_err;
+               }
+               item->ipvr_gem_bo = obj;
+
+               list_add_tail(&item->head, &context->validate_list);
+               context->used_buffers++;
+
+               val_arg = (struct ipvr_validate_arg __user *)
+                                       (unsigned long)item->val_req.next;
+       }
+
+       return 0;
+
+out_err:
+       ipvr_unreference_buffers(context);
+       return ret;
+}
+
+static int32_t ipvr_validate_buffer_list(struct drm_file *file_priv,
+                                       struct ipvr_validate_context *context)
+{
+       struct ipvr_validate_buffer *entry;
+       struct drm_ipvr_gem_object *obj;
+       struct list_head *list = &context->validate_list;
+       int32_t ret = 0;
+
+       list_for_each_entry(entry, list, head) {
+               obj = entry->ipvr_gem_bo;
+               /**
+                * need validate bo locate in the mmu space
+                * check if presumed offset is correct
+                * with ved_check_presumed, if presume is not correct,
+                * call fixup relocs with ved_fixup_relocs.
+                * current implementation doesn't support shrink/evict,
+                * so needn't validate mmu offset.
+                * need be implemented in the future if shrink/evict
+                * is supported.
+                */
+       }
+
+       return ret;
+}
+
+/**
+ * ipvr_gem_do_execbuffer - lookup the context with given fence seqno
+ *
+ * @dev:     the ipvr drm device
+ * @file_priv:      the ipvr drm file pointer
+ * @args:      input argument passed from userland
+ * @vm:      ipvr address space for all the bo to bind to
+ *
+ * Returns:
+ * -ENOENT if context not found, or cmdbuf bo not found
+ * -EINVAL if referencing buffer fails, or executing cmdbuf fails
+ * -EINTR if fails to lock mutex
+ * -EBUSY if fails to get power well, or execution fails
+ * -ERESTARTSYS if reservating buffer fails
+ * -ENOMEM if execution fails
+ * -EFAULT if execution fails
+ * 0 if successful
+ */
+static int32_t ipvr_gem_do_execbuffer(struct drm_device *dev,
+                                       struct drm_file *file_priv,
+                                       struct drm_ipvr_gem_execbuffer *args,
+                                       struct ipvr_address_space *vm)
+{
+       drm_ipvr_private_t *dev_priv = dev->dev_private;
+       struct ipvr_validate_context *context = &dev_priv->validate_ctx;
+       struct ved_private *ved_priv = dev_priv->ved_private;
+       struct drm_ipvr_gem_object *cmd_buffer;
+       struct ipvr_context *ipvr_ctx  = NULL;
+       int32_t ret, ctx_id;
+
+       /* if not pass 0, use default context instead */
+       if (args->ctx_id == 0)
+               ctx_id = dev_priv->default_ctx.ctx_id;
+       else
+               ctx_id = args->ctx_id;
+
+       IPVR_DEBUG_GENERAL("try to find ctx according ctx_id %d.\n", ctx_id);
+       ipvr_ctx = (struct ipvr_context *)
+                       idr_find(&dev_priv->ipvr_ctx_idr, ctx_id);
+       if (!ipvr_ctx) {
+               IPVR_DEBUG_WARN("video ctx is not found.\n");
+               return -ENOENT;
+       }
+
+       IPVR_DEBUG_GENERAL("reference all buffers passed through 
buffer_list.\n");
+       ret = ipvr_reference_buffers(file_priv, context,
+                               args->buffer_list, args->buffer_count);
+       if (unlikely(ret != 0)) {
+               IPVR_DEBUG_WARN("reference buffer failed.\n");
+               return -EINVAL;
+       }
+
+       IPVR_DEBUG_GENERAL("reserve all buffers to make them not accessed "
+                       "by other threads.\n");
+       ret = ipvr_reserve_buffers(&context->validate_list);
+       if (unlikely(ret != 0)) {
+               IPVR_DEBUG_WARN("reserve buffers failed.\n");
+               /* -EBUSY or -ERESTARTSYS */
+               goto out_unref_buf;
+       }
+
+       IPVR_DEBUG_GENERAL("validate buffer list, mainly check "
+                       "the bo gpu offset.\n");
+       ret = ipvr_validate_buffer_list(file_priv, context);
+       if (ret) {
+               IPVR_DEBUG_WARN("validate buffers failed.\n");
+               goto out_backoff_reserv;
+       }
+
+       cmd_buffer = to_ipvr_bo(idr_find(&file_priv->object_idr,
+                                       args->cmdbuf_handle));
+       if (!cmd_buffer) {
+               IPVR_DEBUG_WARN("Invalid cmd object handle 0x%x.\n",
+                       args->cmdbuf_handle);
+               ret = -ENOENT;
+               goto out_backoff_reserv;
+       }
+
+       /*
+        * here VED is supported currently
+        * when support VEC and VSP, need implement a general way to
+        * call sub-engine functions
+        */
+       atomic_inc(&dev_priv->ved_power_usage);
+       IPVR_DEBUG_GENERAL("get VED power with usage=%d.\n",
+               atomic_read(&dev->platformdev->dev.power.usage_count));
+       ret = pm_runtime_get_sync(&dev->platformdev->dev);
+       if (unlikely(ret < 0)) {
+               IPVR_ERROR("Error get VED power: %d\n", ret);
+               ret = -EBUSY;
+               goto out_backoff_reserv;
+       }
+
+       ret = mutex_lock_interruptible(&ved_priv->ved_mutex);
+       if (unlikely(ret)) {
+               IPVR_ERROR("Error get VED mutex: %d\n", ret);
+               /* -EINTR */
+               goto out_power_put;
+       }
+
+       IPVR_DEBUG_GENERAL("parse cmd buffer and send to VED.\n");
+       ret = ved_cmdbuf_video(file_priv, cmd_buffer,
+                       args->cmdbuf_size, ipvr_ctx );
+       if (unlikely(ret)) {
+               IPVR_DEBUG_WARN("ved_cmdbuf_video returns %d.\n", ret);
+               /* -EINVAL, -ENOMEM, -EFAULT, -EBUSY */
+               goto out_power_put;
+       }
+
+       mutex_unlock(&ved_priv->ved_mutex);
+
+       /**
+        * update mmu_offsets and fence fds to user
+        */
+       ret = ipvr_update_buffers(file_priv, context,
+                               args->buffer_list, args->buffer_count);
+       if (unlikely(ret)) {
+               IPVR_DEBUG_WARN("ipvr_update_buffers returns error %d.\n", ret);
+               ret = 0;
+       }
+
+out_power_put:
+       if (ret) {
+               ret = pm_runtime_put(&dev->platformdev->dev);
+               if (unlikely(ret < 0))
+                       IPVR_ERROR("Error put VED power: %d\n", ret);
+               IPVR_DEBUG_GENERAL("VED power put. usage became %d.\n",
+                       atomic_read(&dev->platformdev->dev.power.usage_count));
+       }
+
+out_backoff_reserv:
+       IPVR_DEBUG_GENERAL("unreserve buffer list.\n");
+       ipvr_backoff_reservation(&context->validate_list);
+
+out_unref_buf:
+       IPVR_DEBUG_GENERAL("unref bufs which are refered during bo lookup.\n");
+       ipvr_unreference_buffers(context);
+
+       return ret;
+}
+
+/**
+ * ipvr_gem_do_execbuffer - lookup the context with given fence seqno
+ *
+ * ioctl entry for DRM_IPVR_GEM_EXECBUFFER
+ *
+ * Returns:
+ * -ENOENT if context not found, or cmdbuf bo not found
+ * -EINVAL if referencing buffer fails, or executing cmdbuf fails
+ * -EINTR if fails to lock mutex
+ * -EBUSY if fails to get power well, or execution fails
+ * -ERESTARTSYS if reservating buffer fails
+ * -ENOMEM if execution fails
+ * -EFAULT if execution fails
+ * 0 if successful
+ */
+int32_t ipvr_gem_execbuffer(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       struct drm_ipvr_gem_execbuffer *args = data;
+       int32_t ret;
+       struct ipvr_validate_context *context = &dev_priv->validate_ctx;
+
+       if (!context || !context->buffers) {
+               ret = -EINVAL;
+               return ret;
+       }
+
+       context->used_buffers = 0;
+
+       if (args->buffer_count < 1 ||
+               args->buffer_count >
+                       (UINT_MAX / sizeof(struct ipvr_validate_buffer))) {
+               IPVR_ERROR("validate %d buffers.\n", args->buffer_count);
+               return -EINVAL;
+       }
+
+       trace_ipvr_gem_exec_ioctl(args);
+       ret = ipvr_gem_do_execbuffer(dev, file_priv, args,
+                                   &dev_priv->addr_space);
+       return ret;
+}
diff --git a/drivers/gpu/drm/ipvr/ipvr_exec.h b/drivers/gpu/drm/ipvr/ipvr_exec.h
new file mode 100644
index 0000000..68405df
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ipvr_exec.h
@@ -0,0 +1,68 @@
+/**************************************************************************
+ * ipvr_exec.h: IPVR header file for command buffer execution
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *
+ **************************************************************************/
+
+#ifndef _IPVR_EXEC_H_
+#define _IPVR_EXEC_H_
+
+#include "ipvr_drv.h"
+#include "ipvr_drm.h"
+#include "ipvr_gem.h"
+#include "ipvr_fence.h"
+
+struct drm_ipvr_private;
+
+#define IPVR_NUM_VALIDATE_BUFFERS 2048
+
+#define IPVR_MAX_RELOC_PAGES 1024
+
+/* status of the command sent to the ipvr device */
+enum ipvr_cmd_status {
+       IPVR_CMD_SUCCESS,
+       IPVR_CMD_FAILED,
+       IPVR_CMD_LOCKUP,
+       IPVR_CMD_SKIP
+};
+
+struct ipvr_validate_buffer {
+       struct ipvr_validate_arg val_req;
+       struct list_head head;
+       struct drm_ipvr_gem_object *ipvr_gem_bo;
+       struct ipvr_fence *old_fence;
+};
+
+int ipvr_bo_reserve(struct drm_ipvr_gem_object *obj,
+                    bool interruptible, bool no_wait);
+
+void ipvr_bo_unreserve(struct drm_ipvr_gem_object *obj);
+
+struct ipvr_context*
+ipvr_find_ctx_with_fence(struct drm_ipvr_private *dev_priv, uint16_t fence);
+
+void ipvr_set_tile(struct drm_device *dev,
+               uint8_t tiling_scheme, uint8_t tiling_stride);
+
+int ipvr_cmdbuf_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv);
+
+#endif
diff --git a/drivers/gpu/drm/ipvr/ipvr_fence.c 
b/drivers/gpu/drm/ipvr/ipvr_fence.c
new file mode 100644
index 0000000..838e0d4
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ipvr_fence.c
@@ -0,0 +1,550 @@
+/**************************************************************************
+ * ipvr_fence.c: IPVR fence handling to track command exectuion status
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *    Yao Cheng <yao.cheng at intel.com>
+ *
+ **************************************************************************/
+
+#include "ipvr_fence.h"
+#include "ved_reg.h"
+#include "ved_fw.h"
+#include "ved_cmd.h"
+#include "ipvr_exec.h"
+#include "ipvr_buffer.h"
+#include "ipvr_trace.h"
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/anon_inodes.h>
+
+static int32_t ipvr_usrfence_release(struct inode *inode, struct file *file);
+static uint32_t ipvr_usrfence_poll(struct file *file, poll_table *wait);
+static const struct file_operations ipvr_usrfence_fops = {
+       .release = ipvr_usrfence_release,
+       .poll = ipvr_usrfence_poll,
+};
+
+/**
+ * ipvr_fence_create - create and init a fence
+ *
+ * @dev_priv: drm_ipvr_private pointer
+ * @fence: ipvr fence object
+ * @fence_fd: file descriptor for exporting fence
+ *
+ * Create a fence, actually the fence is written to ipvr through msg.
+ * exporting a new file descriptor to userspace.
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+int32_t
+ipvr_fence_create(struct drm_ipvr_private *dev_priv,
+                  struct ipvr_fence **fence,
+                  int *fence_fd)
+{
+       unsigned long irq_flags;
+       uint16_t old_seq;
+       int fd;
+       struct ved_private *ved_priv;
+       BUG_ON(!fence_fd || *fence_fd >= 0);
+       fd = get_unused_fd();
+       ved_priv = dev_priv->ved_private;
+
+       if (fd < 0) {
+               IPVR_ERROR("ALARM!!! no unused FD found!\n");
+               return fd;
+       }
+
+       *fence = kmalloc(sizeof(struct ipvr_fence), GFP_KERNEL);
+       if ((*fence) == NULL) {
+               put_unused_fd(fd);
+               return -ENOMEM;
+       }
+       (*fence)->file = anon_inode_getfile("ipvr_usrfence", 
&ipvr_usrfence_fops,
+                                        *fence, 0);
+       if (IS_ERR((*fence)->file)) {
+               kfree(*fence);
+               put_unused_fd(fd);
+               IPVR_ERROR("ALARM!!! anon_inode_getfile call failed\n");
+               return -ENOMEM;
+       }
+
+       kref_init(&((*fence)->kref));
+       (*fence)->dev_priv = dev_priv;
+
+       spin_lock_irqsave(&dev_priv->fence_drv.fence_lock, irq_flags);
+       /* cmds in one batch use different fence value */
+       old_seq = dev_priv->fence_drv.sync_seq;
+       dev_priv->fence_drv.sync_seq = dev_priv->last_seq++;
+       dev_priv->fence_drv.sync_seq <<= 4;
+       dev_priv->fence_drv.sync_seq += ved_priv->num_cmd;
+       (*fence)->seq = dev_priv->fence_drv.sync_seq;
+
+       spin_unlock_irqrestore(&dev_priv->fence_drv.fence_lock, irq_flags);
+
+       fd_install(fd, (*fence)->file);
+       kref_get(&(*fence)->kref);
+       IPVR_DEBUG_GENERAL("fence is created and its seq is %u (0x%04x), fd is 
%d.\n",
+               (*fence)->seq, (*fence)->seq, fd);
+       *fence_fd = fd;
+       return 0;
+}
+
+/**
+ * ipvr_fence_destroy - destroy a fence
+ *
+ * @kref: fence kref
+ *
+ * Frees the fence object (all asics).
+ */
+static void ipvr_fence_destroy(struct kref *kref)
+{
+       struct ipvr_fence *fence;
+
+       fence = container_of(kref, struct ipvr_fence, kref);
+       kfree(fence);
+}
+
+/**
+ * ipvr_fence_process - process a fence
+ *
+ * @dev_priv: drm_ipvr_private pointer
+ * @seq: indicate the fence seq has been signaled
+ * @err: indicate if err happened, for future use
+ *
+ * Checks the current fence value and wakes the fence queue
+ * if the sequence number has increased (all asics).
+ */
+void ipvr_fence_process(struct drm_ipvr_private *dev_priv,
+                       uint16_t seq, uint8_t err)
+{
+       int signaled_seq_int;
+       uint16_t signaled_seq;
+       uint16_t last_emitted;
+
+       signaled_seq_int = atomic_read(&dev_priv->fence_drv.signaled_seq);
+       signaled_seq = (uint16_t)signaled_seq_int;
+       last_emitted = dev_priv->fence_drv.sync_seq;
+
+       if (ipvr_seq_after(seq, last_emitted)) {
+               IPVR_DEBUG_WARN("seq error, seq is %u, signaled_seq is %u, "
+                               "last_emitted is %u.\n",
+                               seq, signaled_seq, last_emitted);
+               return;
+       }
+       if (ipvr_seq_after(seq, signaled_seq)) {
+               atomic_xchg(&dev_priv->fence_drv.signaled_seq, seq);
+               dev_priv->fence_drv.last_activity = jiffies;
+               IPVR_DEBUG_GENERAL("last emitted seq %u is updated.\n", seq);
+               wake_up_all(&dev_priv->fence_queue);
+       }
+}
+
+/**
+ * ipvr_fence_signaled - check if a fence sequeuce number has signaled
+ *
+ * @dev_priv: ipvr device pointer
+ * @seq: sequence number
+ *
+ * Check if the last singled fence sequnce number is >= the requested
+ * sequence number (all asics).
+ * Returns true if the fence has signaled (current fence value
+ * is >= requested value) or false if it has not (current fence
+ * value is < the requested value.
+ */
+static bool ipvr_fence_signaled(struct drm_ipvr_private *dev_priv, uint16_t 
seq)
+{
+       uint16_t curr_seq, signaled_seq;
+       unsigned long irq_flags;
+       spin_lock_irqsave(&dev_priv->fence_drv.fence_lock, irq_flags);
+       curr_seq = dev_priv->ved_private->ved_cur_seq;
+       signaled_seq = atomic_read(&dev_priv->fence_drv.signaled_seq);
+
+       if (ipvr_seq_after(seq, signaled_seq)) {
+               /* poll new last sequence at least once */
+               ipvr_fence_process(dev_priv, curr_seq, IPVR_CMD_SUCCESS);
+               signaled_seq = atomic_read(&dev_priv->fence_drv.signaled_seq);
+               if (ipvr_seq_after(seq, signaled_seq)) {
+                       spin_unlock_irqrestore(&dev_priv->fence_drv.fence_lock,
+                                               irq_flags);
+                       return false;
+               }
+       }
+       spin_unlock_irqrestore(&dev_priv->fence_drv.fence_lock, irq_flags);
+       return true;
+}
+
+/**
+ * ipvr_fence_lockup - ipvr lockup is detected
+ *
+ * @dev_priv: ipvr device pointer
+ * @fence: lockup detected when wait the specific fence
+ *
+ * During the calling of ipvr_fence_wait, if wait to timeout,
+ * indicate lockup happened, need flush cmd queue and reset ved
+ * If ipvr_fence_wait_empty_locked encounter lockup, fence is NULL
+ */
+
+static void
+ipvr_fence_lockup(struct drm_ipvr_private *dev_priv, struct ipvr_fence *fence)
+{
+       unsigned long irq_flags;
+       struct drm_device *dev = (struct drm_device *)dev_priv->dev;
+       struct ved_private *ved_priv = dev_priv->ved_private;
+
+       IPVR_DEBUG_WARN("timeout detected, flush queued cmd, maybe lockup.\n");
+       IPVR_DEBUG_WARN("MSVDX_COMMS_FW_STATUS reg is 0x%x.\n",
+                       VED_REG_READ32(MSVDX_COMMS_FW_STATUS));
+
+       if (fence) {
+               spin_lock_irqsave(&dev_priv->fence_drv.fence_lock, irq_flags);
+               ipvr_fence_process(dev_priv, fence->seq, IPVR_CMD_LOCKUP);
+               spin_unlock_irqrestore(&dev_priv->fence_drv.fence_lock, 
irq_flags);
+       }
+       ved_flush_cmd_queue(dev);
+
+       if (ved_priv->fw_loaded_by_punit)
+               ved_priv->ved_needs_reset |= MSVDX_RESET_NEEDS_REUPLOAD_FW |
+                       MSVDX_RESET_NEEDS_INIT_FW;
+       else
+               ved_priv->ved_needs_reset = 1;
+}
+
+/**
+ * ipvr_fence_wait_seq - wait for a specific sequence number
+ *
+ * @dev_priv: ipvr device pointer
+ * @target_seq: sequence number we want to wait for
+ * @intr: use interruptable sleep
+ *
+ * Wait for the requested sequence number to be written.
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the sequence number.
+ * Returns 0 if the sequence number has passed, error for all other cases.
+ * -EDEADLK is returned when a GPU lockup has been detected.
+ */
+static int32_t ipvr_fence_wait_seq(struct drm_ipvr_private *dev_priv,
+                                       uint16_t target_seq, bool intr)
+{
+       struct ipvr_fence_driver        *fence_drv = &dev_priv->fence_drv;
+       unsigned long timeout, last_activity;
+       uint16_t signaled_seq;
+       int32_t ret;
+       unsigned long irq_flags;
+       bool signaled;
+       spin_lock_irqsave(&dev_priv->fence_drv.fence_lock, irq_flags);
+
+       while (ipvr_seq_after(target_seq,
+                       (uint16_t)atomic_read(&fence_drv->signaled_seq))) {
+               /* seems the fence_drv->last_activity is useless? */
+               timeout = IPVR_FENCE_JIFFIES_TIMEOUT;
+               signaled_seq = atomic_read(&fence_drv->signaled_seq);
+               /* save last activity valuee, used to check for GPU lockups */
+               last_activity = fence_drv->last_activity;
+
+               spin_unlock_irqrestore(&dev_priv->fence_drv.fence_lock, 
irq_flags);
+               if (intr) {
+                       ret = wait_event_interruptible_timeout(
+                               dev_priv->fence_queue,
+                               (signaled = ipvr_fence_signaled(dev_priv, 
target_seq)),
+                               timeout);
+               } else {
+                       ret = wait_event_timeout(
+                               dev_priv->fence_queue,
+                               (signaled = ipvr_fence_signaled(dev_priv, 
target_seq)),
+                               timeout);
+               }
+               spin_lock_irqsave(&dev_priv->fence_drv.fence_lock, irq_flags);
+
+               if (unlikely(!signaled)) {
+                       /* we were interrupted for some reason and fence
+                        * isn't signaled yet, resume waiting until timeout  */
+                       if (unlikely(ret < 0)) {
+                               /* should return -ERESTARTSYS,
+                                * interrupted by a signal */
+                               continue;
+                       }
+
+                       /* check if sequence value has changed since
+                        * last_activity */
+                       if (signaled_seq !=
+                               atomic_read(&fence_drv->signaled_seq)) {
+                               continue;
+                       }
+
+                       if (last_activity != fence_drv->last_activity) {
+                               continue;
+                       }
+
+                       /* lockup happen, it is better have some reg to check */
+                       IPVR_DEBUG_WARN("GPU lockup (waiting for 0x%0x last "
+                                       "signaled fence id 0x%x).\n",
+                                       target_seq, signaled_seq);
+
+                       /* change last activity so nobody else
+                        * think there is a lockup */
+                       fence_drv->last_activity = jiffies;
+                       spin_unlock_irqrestore(&dev_priv->fence_drv.fence_lock,
+                                       irq_flags);
+                       return -EDEADLK;
+
+               }
+       }
+       spin_unlock_irqrestore(&dev_priv->fence_drv.fence_lock, irq_flags);
+       return 0;
+}
+
+/**
+ * ipvr_fence_wait - wait for a fence to signal
+ *
+ * @fence: ipvr fence object
+ * @intr: use interruptable sleep
+ * @no_wait: not signaled, if need add into wait queue
+ *
+ * Wait for the requested fence to signal (all asics).
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the fence.
+ * Returns 0 if the fence has passed, error for all other cases.
+ */
+int32_t ipvr_fence_wait(struct ipvr_fence *fence, bool intr, bool no_wait)
+{
+       int32_t ret;
+       struct drm_ipvr_private *dev_priv;
+
+       if (fence == NULL || fence->seq == IPVR_FENCE_SIGNALED_SEQ) {
+               IPVR_DEBUG_GENERAL("fence is NULL or has been singaled.\n");
+               return 0;
+       }
+       dev_priv = fence->dev_priv;
+
+       IPVR_DEBUG_GENERAL("wait fence seq %u, last signaled seq is %d, "
+                       "last emitted seq is %u.\n", fence->seq,
+                       atomic_read(&dev_priv->fence_drv.signaled_seq),
+                       dev_priv->fence_drv.sync_seq);
+       trace_ipvr_fence_wait(fence,
+                       atomic_read(&dev_priv->fence_drv.signaled_seq),
+                       dev_priv->fence_drv.sync_seq);
+
+       if (ipvr_fence_signaled(dev_priv, fence->seq)) {
+               IPVR_DEBUG_GENERAL("fence has been signaled.\n");
+               /*
+                * compare with ttm_bo_wait, don't need create a tmp_obj
+                * it is better we also set bo->fence = NULL
+                */
+               fence->seq = IPVR_FENCE_SIGNALED_SEQ;
+               ipvr_fence_unref(&fence);
+               return 0;
+       }
+
+       if (no_wait)
+               return -EBUSY;
+
+       ret = ipvr_fence_wait_seq(dev_priv, fence->seq, intr);
+       if (ret) {
+               if (ret == -EDEADLK)
+                       ipvr_fence_lockup(dev_priv, fence);
+               return ret;
+       }
+       fence->seq = IPVR_FENCE_SIGNALED_SEQ;
+
+       return 0;
+}
+
+/**
+ * ipvr_fence_driver_init - init the fence driver
+ *
+ * @dev_priv: ipvr device pointer
+ *
+ * Init the fence driver, will not fail
+ */
+void ipvr_fence_driver_init(struct drm_ipvr_private *dev_priv)
+{
+       spin_lock_init(&dev_priv->fence_drv.fence_lock);
+       init_waitqueue_head(&dev_priv->fence_queue);
+       dev_priv->fence_drv.sync_seq = 0;
+       atomic_set(&dev_priv->fence_drv.signaled_seq, 0);
+       dev_priv->fence_drv.last_activity = jiffies;
+       dev_priv->fence_drv.initialized = false;
+}
+
+/**
+ * ipvr_fence_wait_empty_locked - wait for all fences to signal
+ *
+ * @dev_priv: ipvr device pointer
+ *
+ * Wait for all fences to be signalled.
+ */
+void ipvr_fence_wait_empty_locked(struct drm_ipvr_private *dev_priv)
+{
+       uint16_t seq;
+
+       seq = dev_priv->fence_drv.sync_seq;
+
+       while(1) {
+               int32_t ret;
+               ret = ipvr_fence_wait_seq(dev_priv, seq, false);
+               if (ret == 0) {
+                       return;
+               } else if (ret == -EDEADLK) {
+                       ipvr_fence_lockup(dev_priv, NULL);
+                       IPVR_DEBUG_WARN("Lockup found waiting for seq %d.\n",
+                                       seq);
+                       return;
+               } else {
+                       continue;
+               }
+       }
+}
+
+/**
+ * ipvr_fence_driver_fini - tear down the fence driver
+ * for all possible rings.
+ *
+ * @dev_priv: ipvr device pointer
+ *
+ * Tear down the fence driver for all possible rings (all asics).
+ */
+void ipvr_fence_driver_fini(struct drm_ipvr_private *dev_priv)
+{
+       if (!dev_priv->fence_drv.initialized)
+               return;
+       ipvr_fence_wait_empty_locked(dev_priv);
+       wake_up_all(&dev_priv->fence_queue);
+       dev_priv->fence_drv.initialized = false;
+}
+
+/**
+ * ipvr_fence_ref - take a ref on a fence
+ *
+ * @fence: fence object
+ *
+ * Take a reference on a fence (all asics).
+ * Returns the fence.
+ */
+struct ipvr_fence *ipvr_fence_ref(struct ipvr_fence *fence)
+{
+       kref_get(&fence->kref);
+       return fence;
+}
+
+/**
+ * ipvr_fence_unref - remove a ref on a fence
+ *
+ * @fence: ipvr fence object
+ *
+ * Remove a reference on a fence, if ref == 0, destory the fence.
+ */
+void ipvr_fence_unref(struct ipvr_fence **fence)
+{
+       struct ipvr_fence *tmp = *fence;
+
+       *fence = NULL;
+       if (tmp) {
+               kref_put(&tmp->kref, &ipvr_fence_destroy);
+       }
+}
+
+/**
+ * ipvr_fence_buffer_objects - bind fence to buffer list
+ *
+ * @list: validation buffer list
+ * @fence: ipvr fence object
+ *
+ * bind a fence to all obj in the validation list
+ */
+void
+ipvr_fence_buffer_objects(struct list_head *list, struct ipvr_fence *fence, 
int fence_fd)
+{
+       struct ipvr_validate_buffer *entry;
+       struct drm_ipvr_gem_object *obj;
+
+       if (list_empty(list))
+               return;
+
+       list_for_each_entry(entry, list, head) {
+               obj = entry->ipvr_gem_bo;
+               /**
+                * do not update fence if val_args specifies so
+                */
+               if (!entry->val_req.skip_fence) {
+                       entry->old_fence = obj->fence;
+                       obj->fence = ipvr_fence_ref(fence);
+                       entry->val_req.fence_fd = fence_fd;
+               }
+               else {
+                       IPVR_DEBUG_GENERAL("obj 0x%lx marked as non-fence\n",
+                               ipvr_gem_obj_mmu_offset(obj));
+               }
+               ipvr_bo_unreserve(obj);
+       }
+
+       list_for_each_entry(entry, list, head) {
+               if (!entry->val_req.skip_fence && entry->old_fence)
+                       ipvr_fence_unref(&entry->old_fence);
+       }
+}
+
+static int32_t ipvr_usrfence_release(struct inode *inode, struct file *file)
+{
+       struct ipvr_fence *fence = file->private_data;
+       struct drm_ipvr_private *dev_priv = NULL;
+       int ret;
+       BUG_ON(!fence);
+       dev_priv = fence->dev_priv;
+       ret = kref_put(&fence->kref, &ipvr_fence_destroy);
+       IPVR_DEBUG_GENERAL("fence for seq %u is released by usr space\n", 
fence->seq);
+       return ret;
+}
+
+static uint32_t ipvr_usrfence_poll(struct file *file, poll_table *wait)
+{
+       struct ipvr_fence *fence = file->private_data;
+       struct drm_ipvr_private *dev_priv = NULL;
+       BUG_ON(!fence);
+       dev_priv = fence->dev_priv;
+       if (ipvr_fence_signaled(dev_priv, fence->seq)) {
+               IPVR_DEBUG_GENERAL("seq %u is already signalled, return POLLIN 
immediately\n", fence->seq);
+               return POLLIN;
+       }
+       IPVR_DEBUG_GENERAL("seq %u is not signalled, wait for fence_queue at 
%lu\n", fence->seq, jiffies);
+       poll_wait(file, &dev_priv->fence_queue, wait);
+
+       /*
+        * Make sure that reads to fence->status are ordered with the
+        * wait queue event triggering
+        */
+       /* smp_rmb(); */
+       IPVR_DEBUG_GENERAL("seq %u, fence_queue awaken up at %lu\n", 
fence->seq, jiffies);
+
+       if (ipvr_fence_signaled(dev_priv, fence->seq)) {
+               IPVR_DEBUG_GENERAL("seq %u is signalled after waiting\n", 
fence->seq);
+               return POLLIN;
+       }
+       else {
+               IPVR_DEBUG_GENERAL("seq %u is still NOT signalled after 
waiting\n", fence->seq);
+               return 0;
+       }
+}
diff --git a/drivers/gpu/drm/ipvr/ipvr_fence.h 
b/drivers/gpu/drm/ipvr/ipvr_fence.h
new file mode 100644
index 0000000..54846da
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ipvr_fence.h
@@ -0,0 +1,68 @@
+/**************************************************************************
+ * ipvr_fence.h: IPVR header file for fence handling
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *    Yao Cheng <yao.cheng at intel.com>
+ *
+ **************************************************************************/
+
+#ifndef _IPVR_FENCE_H_
+#define _IPVR_FENCE_H_
+
+#include "ipvr_drv.h"
+
+/* seq_after(a,b) returns true if the seq a is after seq b.*/
+#define ipvr_seq_after(a,b)     \
+    (typecheck(uint16_t, a) && \
+     typecheck(uint16_t, b) && \
+     ((int16_t)(a - b) > 0))
+
+#define IPVR_FENCE_JIFFIES_TIMEOUT             (HZ / 2)
+/* fence seq are set to this number when signaled */
+#define IPVR_FENCE_SIGNALED_SEQ                0LL
+
+struct ipvr_fence {
+       struct drm_ipvr_private         *dev_priv;
+       struct kref                     kref;
+       /* protected by dev_priv->fence_drv.fence_lock */
+       uint16_t                        seq;
+       /* fields for usrfence */
+       struct file                     *file;
+       char                            name[32];
+};
+
+int32_t ipvr_fence_wait(struct ipvr_fence *fence, bool intr, bool no_wait);
+
+void ipvr_fence_process(struct drm_ipvr_private *dev_priv,
+                        uint16_t seq, uint8_t err);
+
+void ipvr_fence_driver_init(struct drm_ipvr_private *dev_priv);
+
+void ipvr_fence_driver_fini(struct drm_ipvr_private *dev_priv);
+
+int32_t ipvr_fence_create(struct drm_ipvr_private *dev_priv,
+                       struct ipvr_fence **fence, int *fence_fd);
+
+void
+ipvr_fence_buffer_objects(struct list_head *list, struct ipvr_fence *fence, 
int fence_fd);
+
+void ipvr_fence_unref(struct ipvr_fence **fence);
+
+#endif
diff --git a/drivers/gpu/drm/ipvr/ipvr_gem.c b/drivers/gpu/drm/ipvr/ipvr_gem.c
new file mode 100644
index 0000000..888e369
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ipvr_gem.c
@@ -0,0 +1,248 @@
+/**************************************************************************
+ * ipvr_gem.c: IPVR hook file for gem ioctls
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *    Yao Cheng <yao.cheng at intel.com>
+ *
+ **************************************************************************/
+
+#include "ipvr_gem.h"
+#include "ipvr_buffer.h"
+#include "ipvr_fence.h"
+#include "ipvr_exec.h"
+#include "ipvr_trace.h"
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/pci.h>
+#include <linux/dma-buf.h>
+#include <drm_gem.h>
+/**
+ * Creates a new mm object and returns a handle to it.
+ */
+int32_t ipvr_gem_create_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv)
+{
+       struct drm_ipvr_gem_create *args = data;
+       if (args->cache_level >= IPVR_CACHE_MAX)
+               return -EINVAL;
+       return ipvr_gem_create(file_priv, dev, args->size, args->tiling,
+                             args->cache_level, &args->rounded_size,
+                             &args->handle, &args->gpu_offset);
+}
+
+int32_t ipvr_gem_busy_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv)
+{
+       struct drm_ipvr_gem_busy *args = data;
+       struct drm_ipvr_gem_object *obj;
+       int32_t ret = 0;
+
+       obj = to_ipvr_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
+       if (!obj || &obj->base == NULL) {
+               return -ENOENT;
+       }
+       IPVR_DEBUG_GENERAL("Checking bo %p (fence %p seq %u) busy status\n",
+        obj, obj->fence, ((obj->fence)? obj->fence->seq: 0));
+
+       ret = ipvr_bo_reserve(obj, true, false);
+       if (unlikely(ret != 0))
+               goto out;
+       ret = ipvr_fence_wait(obj->fence, true, true);
+       ipvr_bo_unreserve(obj);
+
+    args->busy = ret? 1: 0;
+out:
+       drm_gem_object_unreference(&obj->base);
+       return ret;
+}
+
+int32_t ipvr_gem_mmap_ioctl(struct drm_device *dev,
+                               void *data, struct drm_file *file_priv)
+{
+       struct drm_ipvr_gem_mmap *args = data;
+       struct drm_ipvr_gem_object *obj;
+       struct vm_area_struct *vma;
+       struct mm_struct *mm = current->mm;
+       uint16_t signaled_seq, seq;
+       struct drm_ipvr_private *dev_priv = (struct 
drm_ipvr_private*)dev->dev_private;
+       int32_t ret = 0;
+       IPVR_DEBUG_ENTRY("mmap is called on handle %u offset 0x%llx size 
%llu.\n",
+        args->handle, args->offset, args->size);
+
+       obj = to_ipvr_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
+       if (!obj) {
+               return -ENOENT;
+       }
+
+       /* sanity check */
+       if (unlikely(obj->fence)) {
+               signaled_seq = atomic_read(&dev_priv->fence_drv.signaled_seq);
+               seq = obj->fence->seq;
+               if (ipvr_seq_after(seq, signaled_seq))
+                       IPVR_DEBUG_WARN("mmaping an obj %lx which is currently 
fenced!!!\n",
+                               ipvr_gem_obj_mmu_offset(obj));
+       }
+
+       /* prime objects have no backing filp to GEM mmap
+        * pages from.
+        */
+       if (unlikely(!obj->base.filp)) {
+               IPVR_ERROR("has no backing flip.\n");
+               drm_gem_object_unreference_unlocked(&obj->base);
+               return -EINVAL;
+       }
+
+       /* need assume mmap size is same as bo size */
+       if (obj->mmap_base == 0) {
+               obj->mmap_size = args->size;
+               obj->mmap_base = vm_mmap(obj->base.filp, 0, args->size,
+                              PROT_READ | PROT_WRITE, MAP_SHARED,
+                              args->offset);
+       }
+
+       if (IS_ERR((void *)obj->mmap_base)) {
+               IPVR_ERROR("addr is not correct: %d.\n", (int)obj->mmap_base);
+               ret = obj->mmap_base;
+               goto err;
+       }
+
+       vma = find_vma(mm, obj->mmap_base);
+       if (!vma || vma->vm_start > obj->mmap_base ||
+           obj->mmap_base + obj->mmap_size > vma->vm_end) {
+               IPVR_ERROR("failed to find the vma.\n");
+               ret = -EFAULT;
+       }
+
+       if (obj->cache_level == IPVR_CACHE_NONE) {
+               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+       } else if (obj->cache_level == IPVR_CACHE_WC) {
+               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+       }
+       args->addr_ptr = obj->mmap_base;
+       IPVR_DEBUG_GENERAL("mmap got %p.\n", (void*)args->addr_ptr);
+       trace_ipvr_gem_mmap(obj, obj->mmap_base);
+err:
+       drm_gem_object_unreference_unlocked(&obj->base);
+       return ret;
+}
+
+int32_t ipvr_sync_cpu_grab(struct drm_device *dev,
+                               struct drm_ipvr_gem_object *obj)
+{
+       int32_t ret = 0;
+       ret = ipvr_bo_reserve(obj, true, false);
+       if (unlikely(ret != 0))
+               goto out;
+       ret = ipvr_fence_wait(obj->fence, true, false);
+       if (likely(ret == 0))
+               atomic_inc(&obj->cpu_writers);
+       else
+               IPVR_DEBUG_WARN("Failed to call ipvr_fence_wait.\n");
+       ipvr_bo_unreserve(obj);
+out:
+       drm_gem_object_unreference(&obj->base);
+       return ret;
+}
+int32_t ipvr_sync_cpu_release(struct drm_device *dev,
+                               struct drm_ipvr_gem_object *obj)
+{
+       ipvr_gem_clflush_object(obj, false);
+       atomic_dec(&obj->cpu_writers);
+       drm_gem_object_unreference(&obj->base);
+       return 0;
+}
+int32_t ipvr_sync_cpu_ioctl(struct drm_device *dev,
+                               void *data, struct drm_file *file_priv)
+{
+       struct drm_ipvr_gem_object *obj;
+       struct drm_ipvr_sync_cpu *args = data;
+
+       IPVR_DEBUG_ENTRY("enter\n");
+       obj = to_ipvr_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
+       if (&obj->base == NULL)
+               return -ENOENT;
+
+       switch (args->op) {
+       case IPVR_SYNCCPU_OP_GRAB:
+               return ipvr_sync_cpu_grab(dev, obj);
+       case IPVR_SYNCCPU_OP_RELEASE:
+               return ipvr_sync_cpu_release(dev, obj);
+       default:
+               return -EINVAL;
+       }
+}
+
+/**
+ * ipvr_gem_wait_ioctl - implements DRM_IOCTL_IPVR_GEM_WAIT
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+ * Returns 0 if successful, else an error is returned with the remaining time 
in
+ * the timeout parameter.
+ *  -ETIME: object is still busy after timeout
+ *  -ERESTARTSYS: signal interrupted the wait
+ *  -ENONENT: object doesn't exist
+ * Also possible, but rare:
+ *  -EAGAIN: GPU wedged
+ *  -ENOMEM: damn
+ *  -ENODEV: Internal IRQ fail
+ *  -E?: The add request failed
+ *
+ * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
+ * non-zero timeout parameter the wait ioctl will wait for the given number of
+ * nanoseconds on an object becoming unbusy. Since the wait itself does so
+ * without holding struct_mutex the object may become re-busied before this
+ * function completes. A similar but shorter * race condition exists in the 
busy
+ * ioctl
+ */
+int32_t ipvr_gem_wait_ioctl(struct drm_device *dev,
+                               void *data, struct drm_file *file_priv)
+{
+       struct drm_ipvr_gem_wait *args = data;
+       struct drm_ipvr_gem_object *obj;
+       int32_t ret = 0;
+
+       IPVR_DEBUG_ENTRY("wait %d buffer to finish execution.\n", args->handle);
+       obj = to_ipvr_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
+       if (&obj->base == NULL) {
+               return -ENOENT;
+       }
+
+       ret = ipvr_bo_reserve(obj, true, false);
+       if (unlikely(ret != 0))
+               goto out;
+
+       trace_ipvr_gem_wait_ioctl(obj);
+       ret = ipvr_fence_wait(obj->fence, true, false);
+
+       ipvr_bo_unreserve(obj);
+
+out:
+       drm_gem_object_unreference(&obj->base);
+       return ret;
+}
+
+int32_t ipvr_gem_userptr_ioctl(struct drm_device *dev,
+                               void *data, struct drm_file *file_priv)
+{
+       struct drm_ipvr_gem_userptr *args = data;
+       return ipvr_gem_userptr(file_priv, dev, args->user_ptr,
+                               args->user_size, args->cache_level, 
args->tiling,
+                               &args->handle, &args->gpu_offset);
+}
diff --git a/drivers/gpu/drm/ipvr/ipvr_gem.h b/drivers/gpu/drm/ipvr/ipvr_gem.h
new file mode 100644
index 0000000..9e7b4e7
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ipvr_gem.h
@@ -0,0 +1,66 @@
+/**************************************************************************
+ * ipvr_gem.h: IPVR header file for GEM ioctls
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *
+ **************************************************************************/
+
+#ifndef _IPVR_GEM_H_
+#define _IPVR_GEM_H_
+
+#include "ipvr_drv.h"
+
+/* ipvr_gem.c */
+int32_t ipvr_gem_execbuffer(struct drm_device *dev,
+                               void *data, struct drm_file *file_priv);
+
+int32_t ipvr_gem_pin_ioctl(struct drm_device *dev,
+                               void *data, struct drm_file *file_priv);
+
+int32_t ipvr_gem_unpin_ioctl(struct drm_device *dev,
+                               void *data, struct drm_file *file_priv);
+
+int32_t ipvr_gem_busy_ioctl(struct drm_device *dev,
+                               void *data, struct drm_file *file_priv);
+
+int32_t ipvr_gem_create_ioctl(struct drm_device *dev,
+                               void *data, struct drm_file *file_priv);
+
+int32_t ipvr_gem_pread_ioctl(struct drm_device *dev,
+                               void *data, struct drm_file *file_priv);
+
+int32_t ipvr_gem_pwrite_ioctl(struct drm_device *dev,
+                               void *data, struct drm_file *file_priv);
+int32_t ipvr_gem_mmap_ioctl(struct drm_device *dev,
+                               void *data, struct drm_file *file_priv);
+
+int32_t ipvr_gem_set_domain_ioctl(struct drm_device *dev,
+                               void *data, struct drm_file *file_priv);
+
+int32_t ipvr_sync_cpu_ioctl(struct drm_device *dev,
+                               void *data, struct drm_file *file_priv);
+
+int32_t ipvr_gem_wait_ioctl(struct drm_device *dev,
+                               void *data, struct drm_file *file_priv);
+
+int32_t ipvr_gem_userptr_ioctl(struct drm_device *dev,
+                               void *data, struct drm_file *file_priv);
+
+#endif
diff --git a/drivers/gpu/drm/ipvr/ipvr_mmu.c b/drivers/gpu/drm/ipvr/ipvr_mmu.c
new file mode 100644
index 0000000..4d6f536
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ipvr_mmu.c
@@ -0,0 +1,807 @@
+/**************************************************************************
+ * ipvr_mmu.c: IPVR MMU handling to support VED, VEC, VSP buffer access
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ * Copyright (c) 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *
+ **************************************************************************/
+
+#include "ipvr_mmu.h"
+
+/*
+ * Code for the VED MMU, maybe also enabled for VEC/VSP
+ */
+
+/*
+ * clflush on one processor only:
+ * clflush should apparently flush the cache line on all processors in an
+ * SMP system.
+ */
+
+/*
+ * kmap atomic:
+ * Usage of the slots must be completely encapsulated within a spinlock, and
+ * no other functions that may be using the locks for other purposed may be
+ * called from within the locked region.
+ * Since the slots are per processor, this will guarantee that we are the only
+ * user.
+ */
+
+static inline uint32_t ipvr_mmu_pt_index(uint32_t offset)
+{
+       return (offset >> IPVR_PTE_SHIFT) & 0x3FF;
+}
+
+static inline uint32_t ipvr_mmu_pd_index(uint32_t offset)
+{
+       return offset >> IPVR_PDE_SHIFT;
+}
+
+#if defined(CONFIG_X86)
+static inline void ipvr_clflush(void *addr)
+{
+       __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
+}
+
+static inline void
+ipvr_mmu_clflush(struct ipvr_mmu_driver *driver, void *addr)
+{
+       if (!driver->has_clflush)
+               return;
+
+       mb();
+       ipvr_clflush(addr);
+       mb();
+}
+
+static void
+ipvr_mmu_page_clflush(struct ipvr_mmu_driver *driver, struct page* page)
+{
+       uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT;
+       uint32_t clflush_count = PAGE_SIZE / clflush_add;
+       int i;
+       uint8_t *clf;
+
+       clf = kmap_atomic(page);
+
+       mb();
+       for (i = 0; i < clflush_count; ++i) {
+               ipvr_clflush(clf);
+               clf += clflush_add;
+       }
+       mb();
+
+       kunmap_atomic(clf);
+}
+
+static void ipvr_mmu_pages_clflush(struct ipvr_mmu_driver *driver,
+                               struct page *page[], unsigned long num_pages)
+{
+       int i;
+
+       if (!driver->has_clflush)
+               return ;
+
+       for (i = 0; i < num_pages; i++)
+               ipvr_mmu_page_clflush(driver, *page++);
+}
+#else
+
+static inline void
+ipvr_mmu_clflush(struct ipvr_mmu_driver *driver, void *addr)
+{
+       ;
+}
+
+static void ipvr_mmu_pages_clflush(struct ipvr_mmu_driver *driver,
+                               struct page *page[], unsigned long num_pages)
+{
+       printk("Dumy ipvr_mmu_pages_clflush\n");
+}
+
+#endif
+
+static void
+ipvr_mmu_flush_pd_locked(struct ipvr_mmu_driver *driver, int32_t force)
+{
+       if (atomic_read(&driver->needs_tlbflush) || force) {
+               if (!driver->dev_priv)
+                       goto out;
+
+               atomic_set(
+                       &driver->dev_priv->ipvr_mmu_invaldc,
+                       1);
+
+       }
+out:
+       atomic_set(&driver->needs_tlbflush, 0);
+}
+
+void ipvr_mmu_flush(struct ipvr_mmu_driver *driver, int32_t rc_prot)
+{
+       if (rc_prot)
+               down_write(&driver->sem);
+
+       if (!driver->dev_priv)
+               goto out;
+
+       atomic_set(&driver->dev_priv->ipvr_mmu_invaldc, 1);
+
+out:
+       if (rc_prot)
+               up_write(&driver->sem);
+}
+
+void ipvr_mmu_set_pd_context(struct ipvr_mmu_pd *pd, int32_t hw_context)
+{
+       /*ttm_tt_cache_flush(&pd->p, 1);*/
+       ipvr_mmu_pages_clflush(pd->driver, &pd->p, 1);
+       down_write(&pd->driver->sem);
+       wmb();
+       ipvr_mmu_flush_pd_locked(pd->driver, 1);
+       pd->hw_context = hw_context;
+       up_write(&pd->driver->sem);
+
+}
+
+static inline unsigned long
+ipvr_pd_addr_end(unsigned long addr, unsigned long end)
+{
+
+       addr = (addr + IPVR_PDE_MASK + 1) & ~IPVR_PDE_MASK;
+       return (addr < end) ? addr : end;
+}
+
+static inline uint32_t ipvr_mmu_mask_pte(uint32_t pfn, int32_t type)
+{
+       uint32_t mask = IPVR_PTE_VALID;
+
+       if (type & IPVR_MMU_CACHED_MEMORY)
+               mask |= IPVR_PTE_CACHED;
+       if (type & IPVR_MMU_RO_MEMORY)
+               mask |= IPVR_PTE_RO;
+       if (type & IPVR_MMU_WO_MEMORY)
+               mask |= IPVR_PTE_WO;
+
+       return (pfn << PAGE_SHIFT) | mask;
+}
+
+struct ipvr_mmu_pd *ipvr_mmu_alloc_pd(struct ipvr_mmu_driver *driver,
+                               int32_t trap_pagefaults, int32_t invalid_type)
+{
+       struct ipvr_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
+       uint32_t *v;
+       int32_t i;
+
+       if (!pd)
+               return NULL;
+
+       pd->p = alloc_page(GFP_DMA32);
+       if (!pd->p)
+               goto out_err1;
+       pd->dummy_pt = alloc_page(GFP_DMA32);
+       if (!pd->dummy_pt)
+               goto out_err2;
+       pd->dummy_page = alloc_page(GFP_DMA32);
+       if (!pd->dummy_page)
+               goto out_err3;
+
+       if (!trap_pagefaults) {
+               pd->invalid_pde =
+                       ipvr_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
+                                        invalid_type);
+               pd->invalid_pte =
+                       ipvr_mmu_mask_pte(page_to_pfn(pd->dummy_page),
+                                        invalid_type);
+       } else {
+               pd->invalid_pde = 0;
+               pd->invalid_pte = 0;
+       }
+
+       v = kmap(pd->dummy_pt);
+       if (!v)
+               goto out_err4;
+       for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+               v[i] = pd->invalid_pte;
+
+       kunmap(pd->dummy_pt);
+
+       v = kmap(pd->p);
+       if (!v)
+               goto out_err4;
+       for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+               v[i] = pd->invalid_pde;
+
+       kunmap(pd->p);
+
+       v = kmap(pd->dummy_page);
+       if (!v)
+               goto out_err4;
+       clear_page(v);
+       kunmap(pd->dummy_page);
+
+       pd->tables = vmalloc_user(sizeof(struct ipvr_mmu_pt *) * 1024);
+       if (!pd->tables)
+               goto out_err4;
+
+       pd->hw_context = -1;
+       pd->pd_mask = IPVR_PTE_VALID;
+       pd->driver = driver;
+
+       return pd;
+
+out_err4:
+       __free_page(pd->dummy_page);
+out_err3:
+       __free_page(pd->dummy_pt);
+out_err2:
+       __free_page(pd->p);
+out_err1:
+       kfree(pd);
+       return NULL;
+}
+
+void ipvr_mmu_free_pt(struct ipvr_mmu_pt *pt)
+{
+       __free_page(pt->p);
+       kfree(pt);
+}
+
+void ipvr_mmu_free_pagedir(struct ipvr_mmu_pd *pd)
+{
+       struct ipvr_mmu_driver *driver = pd->driver;
+       struct ipvr_mmu_pt *pt;
+       int32_t i;
+
+       down_write(&driver->sem);
+       if (pd->hw_context != -1)
+               ipvr_mmu_flush_pd_locked(driver, 1);
+
+       /* Should take the spinlock here, but we don't need to do that
+          since we have the semaphore in write mode. */
+
+       for (i = 0; i < 1024; ++i) {
+               pt = pd->tables[i];
+               if (pt)
+                       ipvr_mmu_free_pt(pt);
+       }
+
+       vfree(pd->tables);
+       __free_page(pd->dummy_page);
+       __free_page(pd->dummy_pt);
+       __free_page(pd->p);
+       kfree(pd);
+       up_write(&driver->sem);
+}
+
+static struct ipvr_mmu_pt *ipvr_mmu_alloc_pt(struct ipvr_mmu_pd *pd)
+{
+       struct ipvr_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
+       void *v;
+       uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
+       uint32_t clflush_count = PAGE_SIZE / clflush_add;
+       spinlock_t *lock = &pd->driver->lock;
+       uint8_t *clf;
+       uint32_t *ptes;
+       int32_t i;
+
+       if (!pt)
+               return NULL;
+
+       pt->p = alloc_page(GFP_DMA32);
+       if (!pt->p) {
+               kfree(pt);
+               return NULL;
+       }
+
+       spin_lock(lock);
+
+       v = kmap_atomic(pt->p);
+
+       clf = (uint8_t *) v;
+       ptes = (uint32_t *) v;
+       for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+               *ptes++ = pd->invalid_pte;
+
+
+#if defined(CONFIG_X86)
+       if (pd->driver->has_clflush && pd->hw_context != -1) {
+               mb();
+               for (i = 0; i < clflush_count; ++i) {
+                       ipvr_clflush(clf);
+                       clf += clflush_add;
+               }
+               mb();
+       }
+#endif
+       kunmap_atomic(v);
+
+       spin_unlock(lock);
+
+       pt->count = 0;
+       pt->pd = pd;
+       pt->index = 0;
+
+       return pt;
+}
+
+struct ipvr_mmu_pt *
+ipvr_mmu_pt_alloc_map_lock(struct ipvr_mmu_pd *pd, unsigned long addr)
+{
+       uint32_t index = ipvr_mmu_pd_index(addr);
+       struct ipvr_mmu_pt *pt;
+       uint32_t *v;
+       spinlock_t *lock = &pd->driver->lock;
+
+       spin_lock(lock);
+       pt = pd->tables[index];
+       while (!pt) {
+               spin_unlock(lock);
+               pt = ipvr_mmu_alloc_pt(pd);
+               if (!pt)
+                       return NULL;
+               spin_lock(lock);
+
+               if (pd->tables[index]) {
+                       spin_unlock(lock);
+                       ipvr_mmu_free_pt(pt);
+                       spin_lock(lock);
+                       pt = pd->tables[index];
+                       continue;
+               }
+
+               v = kmap_atomic(pd->p);
+
+               pd->tables[index] = pt;
+               v[index] = (page_to_pfn(pt->p) << 12) |
+                       pd->pd_mask;
+
+
+               pt->index = index;
+
+               kunmap_atomic((void *) v);
+
+               if (pd->hw_context != -1) {
+                       ipvr_mmu_clflush(pd->driver, (void *) &v[index]);
+                       atomic_set(&pd->driver->needs_tlbflush, 1);
+               }
+       }
+
+       pt->v = kmap_atomic(pt->p);
+
+       return pt;
+}
+
+static struct ipvr_mmu_pt *
+ipvr_mmu_pt_map_lock(struct ipvr_mmu_pd *pd, unsigned long addr)
+{
+       uint32_t index = ipvr_mmu_pd_index(addr);
+       struct ipvr_mmu_pt *pt;
+       spinlock_t *lock = &pd->driver->lock;
+
+       spin_lock(lock);
+       pt = pd->tables[index];
+       if (!pt) {
+               spin_unlock(lock);
+               return NULL;
+       }
+
+       pt->v = kmap_atomic(pt->p);
+
+       return pt;
+}
+
+static void ipvr_mmu_pt_unmap_unlock(struct ipvr_mmu_pt *pt)
+{
+       struct ipvr_mmu_pd *pd = pt->pd;
+       uint32_t *v;
+
+       kunmap_atomic(pt->v);
+
+       if (pt->count == 0) {
+               v = kmap_atomic(pd->p);
+
+               v[pt->index] = pd->invalid_pde;
+               pd->tables[pt->index] = NULL;
+
+               if (pd->hw_context != -1) {
+                       ipvr_mmu_clflush(pd->driver,
+                                       (void *) &v[pt->index]);
+                       atomic_set(&pd->driver->needs_tlbflush, 1);
+               }
+
+               kunmap_atomic(pt->v);
+
+               spin_unlock(&pd->driver->lock);
+               ipvr_mmu_free_pt(pt);
+               return;
+       }
+       spin_unlock(&pd->driver->lock);
+}
+
+static inline void
+ipvr_mmu_set_pte(struct ipvr_mmu_pt *pt, unsigned long addr, uint32_t pte)
+{
+       pt->v[ipvr_mmu_pt_index(addr)] = pte;
+}
+
+static inline void
+ipvr_mmu_invalidate_pte(struct ipvr_mmu_pt *pt, unsigned long addr)
+{
+       pt->v[ipvr_mmu_pt_index(addr)] = pt->pd->invalid_pte;
+}
+
+struct ipvr_mmu_pd *ipvr_mmu_get_default_pd(struct ipvr_mmu_driver *driver)
+{
+       struct ipvr_mmu_pd *pd;
+
+       /* down_read(&driver->sem); */
+       pd = driver->default_pd;
+       /* up_read(&driver->sem); */
+
+       return pd;
+}
+
+/* Returns the physical address of the PD shared by sgx/msvdx */
+uint32_t ipvr_get_default_pd_addr(struct ipvr_mmu_driver *driver)
+{
+       struct ipvr_mmu_pd *pd;
+
+       pd = ipvr_mmu_get_default_pd(driver);
+       return page_to_pfn(pd->p) << PAGE_SHIFT;
+}
+
+void ipvr_mmu_driver_takedown(struct ipvr_mmu_driver *driver)
+{
+       ipvr_mmu_free_pagedir(driver->default_pd);
+       kfree(driver);
+}
+
+struct ipvr_mmu_driver *
+ipvr_mmu_driver_init(uint8_t __iomem * registers, int32_t trap_pagefaults,
+               int32_t invalid_type, struct drm_ipvr_private *dev_priv)
+{
+       struct ipvr_mmu_driver *driver;
+
+       driver = kmalloc(sizeof(*driver), GFP_KERNEL);
+       if (!driver)
+               return NULL;
+
+       driver->dev_priv = dev_priv;
+
+       driver->default_pd =
+               ipvr_mmu_alloc_pd(driver, trap_pagefaults, invalid_type);
+       if (!driver->default_pd)
+               goto out_err1;
+
+       spin_lock_init(&driver->lock);
+       init_rwsem(&driver->sem);
+       down_write(&driver->sem);
+       driver->register_map = registers;
+       atomic_set(&driver->needs_tlbflush, 1);
+
+       driver->has_clflush = 0;
+
+#if defined(CONFIG_X86)
+       if (cpu_has_clflush) {
+               uint32_t tfms, misc, cap0, cap4, clflush_size;
+
+               /*
+                * clflush size is determined at kernel setup for x86_64
+                *  but not for i386. We have to do it here.
+                */
+
+               cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
+               clflush_size = ((misc >> 8) & 0xff) * 8;
+               driver->has_clflush = 1;
+               driver->clflush_add =
+                       PAGE_SIZE * clflush_size / sizeof(uint32_t);
+               driver->clflush_mask = driver->clflush_add - 1;
+               driver->clflush_mask = ~driver->clflush_mask;
+       }
+#endif
+
+       up_write(&driver->sem);
+       return driver;
+
+out_err1:
+       kfree(driver);
+       return NULL;
+}
+
+#if defined(CONFIG_X86)
+static void ipvr_mmu_flush_ptes(struct ipvr_mmu_pd *pd,
+                                       unsigned long address,
+                                       uint32_t num_pages,
+                                       uint32_t desired_tile_stride,
+                                       uint32_t hw_tile_stride)
+{
+       struct ipvr_mmu_pt *pt;
+       uint32_t rows = 1;
+       uint32_t i;
+       unsigned long addr;
+       unsigned long end;
+       unsigned long next;
+       unsigned long add;
+       unsigned long row_add;
+       unsigned long clflush_add = pd->driver->clflush_add;
+       unsigned long clflush_mask = pd->driver->clflush_mask;
+       IPVR_DEBUG_GENERAL("call x86 ipvr_mmu_flush_ptes, address is 0x%lx, "
+                       "num pages is %d.\n", address, num_pages);
+       if (!pd->driver->has_clflush) {
+               IPVR_DEBUG_GENERAL("call ipvr_mmu_pages_clflush.\n");
+               ipvr_mmu_pages_clflush(pd->driver, &pd->p, num_pages);
+               return;
+       }
+
+       if (hw_tile_stride)
+               rows = num_pages / desired_tile_stride;
+       else
+               desired_tile_stride = num_pages;
+
+       add = desired_tile_stride << PAGE_SHIFT;
+       row_add = hw_tile_stride << PAGE_SHIFT;
+       mb();
+       for (i = 0; i < rows; ++i) {
+               addr = address;
+               end = addr + add;
+
+               do {
+                       next = ipvr_pd_addr_end(addr, end);
+                       pt = ipvr_mmu_pt_map_lock(pd, addr);
+                       if (!pt)
+                               continue;
+                       do {
+                               ipvr_clflush(&pt->v
+                                           [ipvr_mmu_pt_index(addr)]);
+                       } while (addr +=
+                                        clflush_add,
+                                (addr & clflush_mask) < next);
+
+                       ipvr_mmu_pt_unmap_unlock(pt);
+               } while (addr = next, next != end);
+               address += row_add;
+       }
+       mb();
+}
+#else
+
+static void ipvr_mmu_flush_ptes(struct ipvr_mmu_pd *pd,
+                                       unsigned long address,
+                                       uint32_t num_pages,
+                                       uint32_t desired_tile_stride,
+                                       uint32_t hw_tile_stride)
+{
+       IPVR_DEBUG_GENERAL("call non-x86 ipvr_mmu_flush_ptes.\n");
+}
+#endif
+
+void ipvr_mmu_remove_pfn_sequence(struct ipvr_mmu_pd *pd,
+                                               unsigned long address,
+                                               uint32_t num_pages)
+{
+       struct ipvr_mmu_pt *pt;
+       unsigned long addr;
+       unsigned long end;
+       unsigned long next;
+       unsigned long f_address = address;
+
+       down_read(&pd->driver->sem);
+
+       addr = address;
+       end = addr + (num_pages << PAGE_SHIFT);
+
+       do {
+               next = ipvr_pd_addr_end(addr, end);
+               pt = ipvr_mmu_pt_alloc_map_lock(pd, addr);
+               if (!pt)
+                       goto out;
+               do {
+                       ipvr_mmu_invalidate_pte(pt, addr);
+                       --pt->count;
+               } while (addr += PAGE_SIZE, addr < next);
+               ipvr_mmu_pt_unmap_unlock(pt);
+
+       } while (addr = next, next != end);
+
+out:
+       if (pd->hw_context != -1)
+               ipvr_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
+
+       up_read(&pd->driver->sem);
+
+       if (pd->hw_context != -1)
+               ipvr_mmu_flush(pd->driver, 0);
+
+       return;
+}
+
+void ipvr_mmu_remove_pages(struct ipvr_mmu_pd *pd, unsigned long address,
+                       uint32_t num_pages, uint32_t desired_tile_stride,
+                       uint32_t hw_tile_stride)
+{
+       struct ipvr_mmu_pt *pt;
+       uint32_t rows = 1;
+       uint32_t i;
+       unsigned long addr;
+       unsigned long end;
+       unsigned long next;
+       unsigned long add;
+       unsigned long row_add;
+       unsigned long f_address = address;
+
+       if (hw_tile_stride)
+               rows = num_pages / desired_tile_stride;
+       else
+               desired_tile_stride = num_pages;
+
+       add = desired_tile_stride << PAGE_SHIFT;
+       row_add = hw_tile_stride << PAGE_SHIFT;
+
+       /* down_read(&pd->driver->sem); */
+
+       /* Make sure we only need to flush this processor's cache */
+
+       for (i = 0; i < rows; ++i) {
+
+               addr = address;
+               end = addr + add;
+
+               do {
+                       next = ipvr_pd_addr_end(addr, end);
+                       pt = ipvr_mmu_pt_map_lock(pd, addr);
+                       if (!pt)
+                               continue;
+                       do {
+                               ipvr_mmu_invalidate_pte(pt, addr);
+                               --pt->count;
+
+                       } while (addr += PAGE_SIZE, addr < next);
+                       ipvr_mmu_pt_unmap_unlock(pt);
+
+               } while (addr = next, next != end);
+               address += row_add;
+       }
+       if (pd->hw_context != -1)
+               ipvr_mmu_flush_ptes(pd, f_address, num_pages,
+                                  desired_tile_stride, hw_tile_stride);
+
+       /* up_read(&pd->driver->sem); */
+
+       if (pd->hw_context != -1)
+               ipvr_mmu_flush(pd->driver, 0);
+}
+
+int32_t ipvr_mmu_insert_pfn_sequence(struct ipvr_mmu_pd *pd,
+                                               uint32_t start_pfn,
+                                               unsigned long address,
+                                               uint32_t num_pages,
+                                               int32_t type)
+{
+       struct ipvr_mmu_pt *pt;
+       uint32_t pte;
+       unsigned long addr;
+       unsigned long end;
+       unsigned long next;
+       unsigned long f_address = address;
+       int32_t ret = 0;
+
+       down_read(&pd->driver->sem);
+
+       addr = address;
+       end = addr + (num_pages << PAGE_SHIFT);
+
+       do {
+               next = ipvr_pd_addr_end(addr, end);
+               pt = ipvr_mmu_pt_alloc_map_lock(pd, addr);
+               if (!pt) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               do {
+                       pte = ipvr_mmu_mask_pte(start_pfn++, type);
+
+                       ipvr_mmu_set_pte(pt, addr, pte);
+                       pt->count++;
+               } while (addr += PAGE_SIZE, addr < next);
+               ipvr_mmu_pt_unmap_unlock(pt);
+
+       } while (addr = next, next != end);
+
+out:
+       if (pd->hw_context != -1)
+               ipvr_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
+
+       up_read(&pd->driver->sem);
+
+       if (pd->hw_context != -1)
+               ipvr_mmu_flush(pd->driver, 1);
+
+       return ret;
+}
+
+int32_t ipvr_mmu_insert_pages(struct ipvr_mmu_pd *pd, struct page **pages,
+                               unsigned long address, uint32_t num_pages,
+                               uint32_t desired_tile_stride,
+                               uint32_t hw_tile_stride, int32_t type)
+{
+       struct ipvr_mmu_pt *pt;
+       uint32_t rows = 1;
+       uint32_t i;
+       uint32_t pte;
+       unsigned long addr;
+       unsigned long end;
+       unsigned long next;
+       unsigned long add;
+       unsigned long row_add;
+       unsigned long f_address = address;
+       int32_t ret = 0;
+
+       if (hw_tile_stride) {
+               if (num_pages % desired_tile_stride != 0)
+                       return -EINVAL;
+               rows = num_pages / desired_tile_stride;
+       } else {
+               desired_tile_stride = num_pages;
+       }
+
+       add = desired_tile_stride << PAGE_SHIFT;
+       row_add = hw_tile_stride << PAGE_SHIFT;
+
+       down_read(&pd->driver->sem);
+
+       for (i = 0; i < rows; ++i) {
+
+               addr = address;
+               end = addr + add;
+
+               do {
+                       next = ipvr_pd_addr_end(addr, end);
+                       pt = ipvr_mmu_pt_alloc_map_lock(pd, addr);
+                       if (!pt) {
+                               ret = -ENOMEM;
+                               goto out;
+                       }
+                       do {
+                               pte = ipvr_mmu_mask_pte(
+                                       page_to_pfn(*pages++),
+                                       type);
+                               ipvr_mmu_set_pte(pt, addr, pte);
+                               pt->count++;
+                       } while (addr += PAGE_SIZE, addr < next);
+                       ipvr_mmu_pt_unmap_unlock(pt);
+
+               } while (addr = next, next != end);
+
+               address += row_add;
+       }
+out:
+       if (pd->hw_context != -1)
+               ipvr_mmu_flush_ptes(pd, f_address, num_pages,
+                                  desired_tile_stride, hw_tile_stride);
+
+       up_read(&pd->driver->sem);
+
+       if (pd->hw_context != -1)
+               ipvr_mmu_flush(pd->driver, 1);
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/ipvr/ipvr_mmu.h b/drivers/gpu/drm/ipvr/ipvr_mmu.h
new file mode 100644
index 0000000..9ba4b69
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ipvr_mmu.h
@@ -0,0 +1,135 @@
+/**************************************************************************
+ * ipvr_mmu.h: IPVR header file for VED/VEC/VSP MMU handling
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ * Copyright (c) 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *    Eric Anholt <eric at anholt.net>
+ *
+ **************************************************************************/
+
+#ifndef _IPVR_MMU_H_
+#define _IPVR_MMU_H_
+
+#include "ipvr_drv.h"
+
+static inline bool __must_check IPVR_IS_ERR(__force const unsigned long offset)
+{
+       return unlikely((offset) >= (unsigned long)-MAX_ERRNO);
+}
+
+static inline int32_t __must_check IPVR_OFFSET_ERR(__force const unsigned long 
offset)
+{
+       return (int32_t)offset;
+}
+
+static inline unsigned long __must_check IPVR_ERR_OFFSET(__force const int32_t 
err)
+{
+       return (unsigned long)err;
+}
+
+struct ipvr_mmu_pd;
+
+struct ipvr_mmu_pt {
+       struct ipvr_mmu_pd *pd;
+       uint32_t index;
+       uint32_t count;
+       struct page *p;
+       uint32_t *v;
+};
+
+struct ipvr_mmu_driver {
+       /* protects driver- and pd structures. Always take in read mode
+        * before taking the page table spinlock.
+        */
+       struct rw_semaphore sem;
+
+       /* protects page tables, directory tables and pt tables.
+        * and pt structures.
+        */
+       spinlock_t lock;
+
+       atomic_t needs_tlbflush;
+
+       uint8_t __iomem *register_map;
+       struct ipvr_mmu_pd *default_pd;
+
+       int32_t has_clflush;
+       int32_t clflush_add;
+       unsigned long clflush_mask;
+
+       struct drm_ipvr_private *dev_priv;
+};
+
+struct ipvr_mmu_pd {
+       struct ipvr_mmu_driver *driver;
+       int32_t hw_context;
+       struct ipvr_mmu_pt **tables;
+       struct page *p;
+       struct page *dummy_pt;
+       struct page *dummy_page;
+       uint32_t pd_mask;
+       uint32_t invalid_pde;
+       uint32_t invalid_pte;
+};
+
+struct ipvr_mmu_driver *ipvr_mmu_driver_init(uint8_t __iomem *registers,
+                                       int32_t trap_pagefaults,
+                                       int32_t invalid_type,
+                                       struct drm_ipvr_private *dev_priv);
+
+void ipvr_mmu_driver_takedown(struct ipvr_mmu_driver *driver);
+
+struct ipvr_mmu_pd *
+ipvr_mmu_get_default_pd(struct ipvr_mmu_driver *driver);
+
+struct ipvr_mmu_pd *ipvr_mmu_alloc_pd(struct ipvr_mmu_driver *driver,
+                                       int32_t trap_pagefaults,
+                                       int32_t invalid_type);
+
+void ipvr_mmu_free_pagedir(struct ipvr_mmu_pd *pd);
+
+void ipvr_mmu_flush(struct ipvr_mmu_driver *driver, int rc_prot);
+
+void ipvr_mmu_remove_pfn_sequence(struct ipvr_mmu_pd *pd,
+                                               unsigned long address,
+                                               uint32_t num_pages);
+
+int32_t ipvr_mmu_insert_pfn_sequence(struct ipvr_mmu_pd *pd,
+                                               uint32_t start_pfn,
+                                               unsigned long address,
+                                               uint32_t num_pages,
+                                               int32_t type);
+
+void ipvr_mmu_set_pd_context(struct ipvr_mmu_pd *pd, int32_t hw_context);
+
+int32_t ipvr_mmu_insert_pages(struct ipvr_mmu_pd *pd, struct page **pages,
+                               unsigned long address, uint32_t num_pages,
+                               uint32_t desired_tile_stride,
+                               uint32_t hw_tile_stride, int32_t type);
+
+void ipvr_mmu_remove_pages(struct ipvr_mmu_pd *pd,
+                               unsigned long address, uint32_t num_pages,
+                               uint32_t desired_tile_stride,
+                               uint32_t hw_tile_stride);
+
+uint32_t ipvr_get_default_pd_addr(struct ipvr_mmu_driver *driver);
+
+#endif
diff --git a/drivers/gpu/drm/ipvr/ipvr_trace.c 
b/drivers/gpu/drm/ipvr/ipvr_trace.c
new file mode 100644
index 0000000..91c0bda
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ipvr_trace.c
@@ -0,0 +1,11 @@
+/*
+ * Copyright ? 2014 Intel Corporation
+ *
+ * Authors:
+ *    Yao Cheng <yao.cheng at intel.com>>
+ */
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "ipvr_trace.h"
+#endif
diff --git a/drivers/gpu/drm/ipvr/ipvr_trace.h 
b/drivers/gpu/drm/ipvr/ipvr_trace.h
new file mode 100644
index 0000000..d3209df
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ipvr_trace.h
@@ -0,0 +1,296 @@
+/**************************************************************************
+ * ipvr_trace.h: IPVR header file for trace support
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *    Yao Cheng <yao.cheng at intel.com>
+ *
+ **************************************************************************/
+
+#if !defined(_IPVR_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _IPVR_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drmP.h>
+#include "ipvr_buffer.h"
+#include "ipvr_fence.h"
+#include "ved_msg.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ipvr
+#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
+#define TRACE_INCLUDE_FILE ipvr_trace
+
+/* object tracking */
+
+TRACE_EVENT(ipvr_gem_create,
+       TP_PROTO(struct drm_ipvr_gem_object *obj, uint64_t gpu_offset),
+       TP_ARGS(obj, gpu_offset),
+       TP_STRUCT__entry(
+               __field(struct drm_ipvr_gem_object *, obj)
+               __field(uint32_t, size)
+               __field(bool, tiling)
+               __field(uint32_t, cache_level)
+               __field(uint64_t, gpu_offset)
+       ),
+       TP_fast_assign(
+               __entry->obj = obj;
+               __entry->size = obj->base.size;
+               __entry->tiling = obj->tiling;
+               __entry->cache_level = obj->cache_level;
+               __entry->gpu_offset = gpu_offset;
+       ),
+       TP_printk("obj=0x%p, size=%u, tiling=%u, cache=%u, gpu_offset=0x%llx",
+               __entry->obj, __entry->size, __entry->tiling,
+               __entry->cache_level, __entry->gpu_offset)
+);
+
+TRACE_EVENT(ipvr__gem_free_object,
+       TP_PROTO(struct drm_ipvr_gem_object *obj),
+       TP_ARGS(obj),
+       TP_STRUCT__entry(
+               __field(struct drm_ipvr_gem_object *, obj)
+       ),
+       TP_fast_assign(
+               __entry->obj = obj;
+       ),
+       TP_printk("obj=0x%p", __entry->obj)
+);
+
+TRACE_EVENT(ipvr_gem_wait_ioctl,
+       TP_PROTO(struct drm_ipvr_gem_object *obj),
+       TP_ARGS(obj),
+       TP_STRUCT__entry(
+               __field(struct drm_ipvr_gem_object *, obj)
+               __field(struct ipvr_fence *, fence)
+       ),
+       TP_fast_assign(
+               __entry->obj = obj;
+               __entry->fence = obj->fence;
+       ),
+       TP_printk("obj=%p, fence=%p ", __entry->obj, __entry->fence)
+);
+
+TRACE_EVENT(ipvr_fence_wait,
+       TP_PROTO(struct ipvr_fence *fence,
+               uint32_t signaled_seq,
+               uint16_t sync_seq),
+       TP_ARGS(fence, signaled_seq, sync_seq),
+       TP_STRUCT__entry(
+               __field(struct ipvr_fence *, fence)
+               __field(uint16_t, fence_seq)
+               __field(uint32_t, signaled_seq)
+               __field(uint16_t, sync_seq)
+       ),
+       TP_fast_assign(
+               __entry->fence = fence;
+               __entry->fence_seq = fence->seq;
+               __entry->signaled_seq = signaled_seq;
+               __entry->sync_seq = sync_seq;
+       ),
+       TP_printk("fence=%p, fence_seq=%d, signaled_seq=%d, sync_seq=%d",
+               __entry->fence, __entry->fence_seq,
+               __entry->signaled_seq, __entry->sync_seq)
+);
+
+TRACE_EVENT(ipvr_gem_mmap,
+       TP_PROTO(struct drm_ipvr_gem_object *obj, unsigned long mmap_base),
+       TP_ARGS(obj, mmap_base),
+       TP_STRUCT__entry(
+               __field(struct drm_ipvr_gem_object *, obj)
+               __field(unsigned long, mmap_base)
+       ),
+       TP_fast_assign(
+               __entry->obj = obj;
+               __entry->mmap_base = mmap_base;
+       ),
+       TP_printk("obj=%p, mmap_base=0x%lx", __entry->obj, __entry->mmap_base)
+);
+
+
+TRACE_EVENT(ipvr_gem_exec_ioctl,
+       TP_PROTO(struct drm_ipvr_gem_execbuffer *exec),
+       TP_ARGS(exec),
+       TP_STRUCT__entry(
+               __field(uint64_t, buffer_list)
+               __field(uint32_t, buffer_count)
+               __field(uint32_t, cmdbuf_handle)
+               __field(uint32_t, cmdbuf_size)
+               __field(uint32_t, ctx_id)
+       ),
+       TP_fast_assign(
+               __entry->buffer_list = exec->buffer_list;
+               __entry->buffer_count = exec->buffer_count;
+               __entry->cmdbuf_handle = exec->cmdbuf_handle;
+               __entry->cmdbuf_size = exec->cmdbuf_size;
+               __entry->ctx_id = exec->ctx_id;
+       ),
+       TP_printk("buffer_list=0x%llx, buffer_count=0x%d, "
+               "cmdbuf_handle=0x%x, cmdbuf_size=%u, ctx_id=%d",
+               __entry->buffer_list, __entry->buffer_count,
+               __entry->cmdbuf_handle, __entry->cmdbuf_size,
+               __entry->ctx_id)
+);
+
+TRACE_EVENT(ved_cmd_send,
+       TP_PROTO(uint32_t cmd_id, uint32_t seq),
+       TP_ARGS(cmd_id, seq),
+       TP_STRUCT__entry(
+               __field(uint32_t, cmd_id)
+               __field(uint32_t, seq)
+       ),
+       TP_fast_assign(
+               __entry->cmd_id = cmd_id;
+               __entry->seq = seq;
+       ),
+       TP_printk("cmd_id=0x%08x, seq=0x%08x",
+               __entry->cmd_id, __entry->seq)
+);
+
+TRACE_EVENT(ved_power_on,
+       TP_PROTO(int freq),
+       TP_ARGS(freq),
+       TP_STRUCT__entry(
+               __field(int, freq)
+       ),
+       TP_fast_assign(
+               __entry->freq = freq;
+       ),
+       TP_printk("frequency %d MHz", __entry->freq)
+);
+
+TRACE_EVENT(ved_power_off,
+       TP_PROTO(int freq),
+       TP_ARGS(freq),
+       TP_STRUCT__entry(
+               __field(int, freq)
+       ),
+       TP_fast_assign(
+               __entry->freq = freq;
+       ),
+       TP_printk("frequency %d MHz", __entry->freq)
+);
+
+TRACE_EVENT(ved_irq_completed,
+       TP_PROTO(struct fw_completed_msg *completed_msg),
+       TP_ARGS(completed_msg),
+       TP_STRUCT__entry(
+               __field(uint16_t, seqno)
+               __field(uint32_t, flags)
+               __field(uint32_t, vdebcr)
+               __field(uint16_t, start_mb)
+               __field(uint16_t, last_mb)
+       ),
+       TP_fast_assign(
+               __entry->seqno = completed_msg->header.bits.msg_fence;
+               __entry->flags = completed_msg->flags;
+               __entry->vdebcr = completed_msg->vdebcr;
+               __entry->start_mb = completed_msg->mb.bits.start_mb;
+               __entry->last_mb = completed_msg->mb.bits.last_mb;
+       ),
+       TP_printk("seq=0x%04x, flags=0x%08x, vdebcr=0x%08x, mb=[%u, %u]",
+               __entry->seqno,
+               __entry->flags,
+               __entry->vdebcr,
+               __entry->start_mb,
+               __entry->last_mb)
+);
+
+TRACE_EVENT(ved_irq_panic,
+       TP_PROTO(struct fw_panic_msg *panic_msg, uint32_t err_trig,
+               uint32_t irq_status, uint32_t mmu_status, uint32_t dmac_status),
+       TP_ARGS(panic_msg, err_trig, irq_status, mmu_status, dmac_status),
+       TP_STRUCT__entry(
+               __field(uint16_t, seqno)
+               __field(uint32_t, fe_status)
+               __field(uint32_t, be_status)
+               __field(uint16_t, rsvd)
+               __field(uint16_t, last_mb)
+               __field(uint32_t, err_trig)
+               __field(uint32_t, irq_status)
+               __field(uint32_t, mmu_status)
+               __field(uint32_t, dmac_status)
+
+       ),
+       TP_fast_assign(
+               __entry->seqno = panic_msg->header.bits.msg_fence;
+               __entry->fe_status = panic_msg->fe_status;
+               __entry->be_status = panic_msg->be_status;
+               __entry->rsvd = panic_msg->mb.bits.reserved2;
+               __entry->last_mb = panic_msg->mb.bits.last_mb;
+               __entry->err_trig = err_trig;
+               __entry->irq_status = irq_status;
+               __entry->mmu_status = mmu_status;
+               __entry->dmac_status = dmac_status;
+       ),
+       TP_printk("seq=0x%04x, status=[fe 0x%08x be 0x%08x], rsvd=0x%04x, "
+               "last_mb=%u, err_trig=0x%08x, irq_status=0x%08x, "
+               "mmu_status=0x%08x, dmac_status=0x%08x",
+               __entry->seqno,
+               __entry->fe_status,
+               __entry->be_status,
+               __entry->rsvd,
+               __entry->last_mb,
+               __entry->err_trig,
+               __entry->irq_status,
+               __entry->mmu_status,
+               __entry->dmac_status)
+);
+
+TRACE_EVENT(ved_irq_contiguity,
+       TP_PROTO(struct fw_contiguity_msg *msg),
+       TP_ARGS(msg),
+       TP_STRUCT__entry(
+               __field(uint16_t, seqno)
+               __field(uint16_t, begin_mb)
+               __field(uint16_t, end_mb)
+       ),
+       TP_fast_assign(
+               __entry->seqno = msg->header.bits.msg_fence;
+               __entry->begin_mb = msg->mb.bits.begin_mb_num;
+               __entry->end_mb = msg->mb.bits.end_mb_num;
+       ),
+       TP_printk("seq=0x%04x, mb=[%u, %u]",
+               __entry->seqno,
+               __entry->begin_mb,
+               __entry->end_mb)
+);
+
+TRACE_EVENT(ved_irq_deblock_required,
+       TP_PROTO(struct fw_deblock_required_msg *msg),
+       TP_ARGS(msg),
+       TP_STRUCT__entry(
+               __field(uint16_t, seqno)
+       ),
+       TP_fast_assign(
+               __entry->seqno = msg->header.bits.msg_fence;
+       ),
+       TP_printk("seq=0x%04x",
+               __entry->seqno)
+);
+
+#endif /* _IPVR_TRACE_H_ */
+
+ /* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/ipvr/ved_cmd.c b/drivers/gpu/drm/ipvr/ved_cmd.c
new file mode 100644
index 0000000..0ae46e5
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ved_cmd.c
@@ -0,0 +1,1269 @@
+/**************************************************************************
+ * ved_cmd.c: VED command handling between host driver and VED firmware
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ * Copyright (c) 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *    Yao Cheng <yao.cheng at intel.com>
+ *
+ **************************************************************************/
+
+#include "ipvr_gem.h"
+#include "ipvr_mmu.h"
+#include "ipvr_buffer.h"
+#include "ipvr_trace.h"
+#include "ipvr_fence.h"
+#include "ved_cmd.h"
+#include "ved_fw.h"
+#include "ved_msg.h"
+#include "ved_reg.h"
+#include "ved_ec.h"
+#include "ved_init.h"
+#include "ved_pm.h"
+#include <linux/pm_runtime.h>
+
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#ifndef list_first_entry
+#define list_first_entry(ptr, type, member) \
+       list_entry((ptr)->next, type, member)
+#endif
+
+int32_t ved_mtx_send(struct drm_ipvr_private *dev_priv, const void *msg)
+{
+       static struct fw_padding_msg pad_msg;
+       const uint32_t *p_msg = (uint32_t *)msg;
+       uint32_t msg_num, words_free, ridx, widx, buf_size, buf_offset;
+       int32_t ret = 0;
+       int i;
+       union msg_header *header;
+       header = (union msg_header *)msg;
+
+       IPVR_DEBUG_ENTRY("enter.\n");
+
+       /* we need clocks enabled before we touch VEC local ram,
+        * but fw will take care of the clock after fw is loaded
+        */
+
+       msg_num = (header->bits.msg_size + 3) / 4;
+
+       /* debug code for msg dump */
+       IPVR_DEBUG_VED("MSVDX: ved_mtx_send is %dDW\n", msg_num);
+
+       for (i = 0; i < msg_num; i++)
+               IPVR_DEBUG_VED("   0x%08x\n", p_msg[i]);
+
+       buf_size = VED_REG_READ32(MSVDX_COMMS_TO_MTX_BUF_SIZE) &
+                  ((1 << 16) - 1);
+
+       if (msg_num > buf_size) {
+               ret = -EINVAL;
+               IPVR_ERROR("VED: message exceed maximum, ret:%d\n", ret);
+               goto out;
+       }
+
+       ridx = VED_REG_READ32(MSVDX_COMMS_TO_MTX_RD_INDEX);
+       widx = VED_REG_READ32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
+
+
+       buf_size = VED_REG_READ32(MSVDX_COMMS_TO_MTX_BUF_SIZE) &
+                  ((1 << 16) - 1);
+       /*0x2000 is VEC Local Ram offset*/
+       buf_offset =
+               (VED_REG_READ32(MSVDX_COMMS_TO_MTX_BUF_SIZE) >> 16) + 0x2000;
+
+       /* message would wrap, need to send a pad message */
+       if (widx + msg_num > buf_size) {
+               /* Shouldn't happen for a PAD message itself */
+               if (header->bits.msg_type == MTX_MSGID_PADDING)
+                       IPVR_DEBUG_WARN("VED: should not wrap pad msg, "
+                               "buf_size is %d, widx is %d, msg_num is %d.\n",
+                               buf_size, widx, msg_num);
+
+               /* if the read pointer is at zero then we must wait for it to
+                * change otherwise the write pointer will equal the read
+                * pointer,which should only happen when the buffer is empty
+                *
+                * This will only happens if we try to overfill the queue,
+                * queue management should make
+                * sure this never happens in the first place.
+                */
+               if (0 == ridx) {
+                       ret = -EINVAL;
+                       IPVR_ERROR("MSVDX: RIndex=0, ret:%d\n", ret);
+                       goto out;
+               }
+
+               /* Send a pad message */
+               pad_msg.header.bits.msg_size = (buf_size - widx) << 2;
+               pad_msg.header.bits.msg_type = MTX_MSGID_PADDING;
+               ved_mtx_send(dev_priv, (void *)&pad_msg);
+               widx = VED_REG_READ32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
+       }
+
+       if (widx >= ridx)
+               words_free = buf_size - (widx - ridx) - 1;
+       else
+               words_free = ridx - widx - 1;
+
+       if (msg_num > words_free) {
+               ret = -EINVAL;
+               IPVR_ERROR("MSVDX: msg_num > words_free, ret:%d\n", ret);
+               goto out;
+       }
+       while (msg_num > 0) {
+               VED_REG_WRITE32(*p_msg++, buf_offset + (widx << 2));
+               msg_num--;
+               widx++;
+               if (buf_size == widx)
+                       widx = 0;
+       }
+
+       VED_REG_WRITE32(widx, MSVDX_COMMS_TO_MTX_WRT_INDEX);
+
+       /* Make sure clocks are enabled before we kick
+        * but fw will take care of the clock after fw is loaded
+        */
+
+       /* signal an interrupt to let the mtx know there is a new message */
+       VED_REG_WRITE32(1, MTX_KICK_INPUT_OFFSET);
+
+       /* Read MSVDX Register several times in case Idle signal assert */
+       VED_REG_READ32(MSVDX_INTERRUPT_STATUS_OFFSET);
+       VED_REG_READ32(MSVDX_INTERRUPT_STATUS_OFFSET);
+       VED_REG_READ32(MSVDX_INTERRUPT_STATUS_OFFSET);
+       VED_REG_READ32(MSVDX_INTERRUPT_STATUS_OFFSET);
+
+out:
+       return ret;
+}
+
+static int32_t ved_cmd_send(struct drm_device *dev, void *cmd,
+                       uint32_t cmd_size, struct ipvr_context *ipvr_ctx)
+{
+       int32_t ret = 0;
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       union msg_header *header;
+       uint32_t cur_seq = 0xffffffff;
+
+       while (cmd_size > 0) {
+               uint32_t cur_cmd_size, cur_cmd_id;
+               header = (union msg_header *)cmd;
+               cur_cmd_size = header->bits.msg_size;
+               cur_cmd_id = header->bits.msg_type;
+
+               cur_seq = ((struct fw_msg_header *)cmd)->header.bits.msg_fence;
+
+               if (cur_seq != 0xffffffff) {
+                       ipvr_ctx->cur_seq = cur_seq;
+               }
+
+               if (cur_cmd_size > cmd_size) {
+                       ret = -EINVAL;
+                       IPVR_ERROR("VED: cmd_size %u cur_cmd_size %u.\n",
+                                 cmd_size, cur_cmd_size);
+                       goto out;
+               }
+
+               /* Send the message to h/w */
+               trace_ved_cmd_send(cur_cmd_id, cur_seq);
+               ret = ved_mtx_send(dev_priv, cmd);
+               if (ret) {
+                       IPVR_DEBUG_WARN("VED: ret:%d\n", ret);
+                       goto out;
+               }
+               cmd += cur_cmd_size;
+               cmd_size -= cur_cmd_size;
+               if (cur_cmd_id == MTX_MSGID_HOST_BE_OPP ||
+                       cur_cmd_id == MTX_MSGID_DEBLOCK ||
+                       cur_cmd_id == MTX_MSGID_INTRA_OOLD) {
+                       cmd += (sizeof(struct fw_deblock_msg) - cur_cmd_size);
+                       cmd_size -=
+                               (sizeof(struct fw_deblock_msg) - cur_cmd_size);
+               }
+       }
+out:
+       IPVR_DEBUG_VED("VED: ret:%d\n", ret);
+       return ret;
+}
+
+int32_t ved_cmd_dequeue_send(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       struct ved_cmd_queue *ved_cmd = NULL;
+       int32_t ret = 0;
+       struct ved_private *ved_priv = dev_priv->ved_private;
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&ved_priv->ved_lock, irq_flags);
+       if (list_empty(&ved_priv->ved_queue)) {
+               IPVR_DEBUG_VED("VED: ved cmd queue empty.\n");
+               ved_priv->ved_busy = 0;
+               spin_unlock_irqrestore(&ved_priv->ved_lock, irq_flags);
+               return -EINVAL;
+       }
+
+       ved_cmd = list_first_entry(&ved_priv->ved_queue,
+                                    struct ved_cmd_queue, head);
+       list_del(&ved_cmd->head);
+       spin_unlock_irqrestore(&ved_priv->ved_lock, irq_flags);
+
+       IPVR_DEBUG_VED("VED: cmd queue seq is %08x.\n", ved_cmd->cmd_seq);
+
+       if (drm_ipvr_tiling)
+               ipvr_set_tile(dev, ved_cmd->tiling_scheme,
+                                  ved_cmd->tiling_stride);
+
+#ifdef CONFIG_DRM_IPVR_EC
+       /* Seperate update frame and backup cmds because if a batch of cmds
+        * doesn't have * host_be_opp message, no need to update frame info
+        * but still need to backup cmds.
+        * This case can happen if an batch of cmds is not the entire frame
+       */
+       if (ved_cmd->host_be_opp_enabled)
+               ved_update_frame_info(ved_priv, ved_cmd->tfile,
+                       ved_cmd->cmd + ved_cmd->deblock_cmd_offset);
+
+       ved_backup_cmd(ved_priv, ved_cmd->tfile,
+                       ved_cmd->cmd,
+                       ved_cmd->cmd_size,
+                       ved_cmd->deblock_cmd_offset);
+#endif
+       ret = ved_cmd_send(dev, ved_cmd->cmd,
+                          ved_cmd->cmd_size, ved_cmd->ipvr_ctx);
+       if (ret) {
+               IPVR_ERROR("VED: ved_cmd_send failed.\n");
+               ret = -EINVAL;
+       }
+
+       kfree(ved_cmd->cmd);
+       kfree(ved_cmd);
+
+       return ret;
+}
+
+void ved_flush_cmd_queue(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       struct ved_cmd_queue *ved_cmd;
+       struct list_head *list, *next;
+       struct ved_private *ved_priv = dev_priv->ved_private;
+       unsigned long irq_flags;
+       spin_lock_irqsave(&ved_priv->ved_lock, irq_flags);
+       /* Flush the VED cmd queue and signal all fences in the queue */
+       list_for_each_safe(list, next, &ved_priv->ved_queue) {
+               ved_cmd = list_entry(list, struct ved_cmd_queue, head);
+               list_del(list);
+               IPVR_DEBUG_VED("VED: flushing sequence:0x%08x.\n",
+                                 ved_cmd->cmd_seq);
+               ved_priv->ved_cur_seq = ved_cmd->cmd_seq;
+
+               ipvr_fence_process(dev_priv, ved_cmd->cmd_seq, IPVR_CMD_SKIP);
+
+               kfree(ved_cmd->cmd);
+               kfree(ved_cmd);
+       }
+       ved_priv->ved_busy = 0;
+       spin_unlock_irqrestore(&ved_priv->ved_lock, irq_flags);
+}
+
+/*
+ * Returns:
+ * -EINVAL if invalid argument found
+ * -EFAULT if mapping command buffer fails
+ * -ENOMEM if memory not enough for copying command
+ * 0 if successful.
+ */
+static int32_t
+ved_map_command(struct drm_device *dev,
+                               struct drm_ipvr_gem_object *cmd_buffer,
+                               uint32_t cmd_size, void **ved_cmd,
+                               uint16_t sequence, int32_t copy_cmd,
+                               struct ipvr_context *ipvr_ctx)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       struct ved_private *ved_priv = dev_priv->ved_private;
+       int32_t ret = 0;
+       uint32_t cmd_size_remain;
+       void *cmd, *cmd_copy, *cmd_start;
+       union msg_header *header;
+       struct ipvr_fence *fence = NULL;
+       int fence_fd = -1;
+
+       /* command buffers may not exceed page boundary */
+       if (cmd_size > PAGE_SIZE)
+               return -EINVAL;
+
+       cmd_start = kmap(sg_page(cmd_buffer->sg_table->sgl));
+       if (!cmd_start) {
+               IPVR_ERROR("VED: kmap failed.\n");
+               return -EFAULT;
+       }
+
+       cmd = cmd_start;
+       cmd_size_remain = cmd_size;
+
+       ved_priv->host_be_opp_enabled = 0;
+       ved_priv->deblock_cmd_offset = VED_INVALID_OFFSET;
+
+       while (cmd_size_remain > 0) {
+               uint32_t cur_cmd_size, cur_cmd_id, mmu_ptd, msvdx_mmu_invalid;
+               if (cmd_size_remain < MTX_GENMSG_HEADER_SIZE) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               header = (union msg_header *)cmd;
+               cur_cmd_size = header->bits.msg_size;
+               cur_cmd_id = header->bits.msg_type;
+               mmu_ptd = 0;
+               msvdx_mmu_invalid = 0;
+
+               IPVR_DEBUG_VED("cmd start at %lx cur_cmd_size = %d"
+                              " cur_cmd_id = %02x fence = %08x\n",
+                              (unsigned long)cmd, cur_cmd_size,
+                              cur_cmd_id, sequence);
+               if ((cur_cmd_size % sizeof(uint32_t))
+                   || (cur_cmd_size > cmd_size_remain)) {
+                       ret = -EINVAL;
+                       IPVR_ERROR("VED: cmd size err, ret:%d.\n", ret);
+                       goto out;
+               }
+
+               switch (cur_cmd_id) {
+               case MTX_MSGID_DECODE_FE: {
+                       struct fw_decode_msg *decode_msg;
+                       if (sizeof(struct fw_decode_msg) > cmd_size_remain) {
+                               /* Msg size is not correct */
+                               ret = -EINVAL;
+                               IPVR_DEBUG_VED("MSVDX: wrong msg size.\n");
+                               goto out;
+                       }
+                       decode_msg = (struct fw_decode_msg *)cmd;
+                       decode_msg->header.bits.msg_fence = sequence;
+
+                       mmu_ptd = ipvr_get_default_pd_addr(dev_priv->mmu);
+                       msvdx_mmu_invalid =
+                               atomic_cmpxchg(&dev_priv->ipvr_mmu_invaldc,
+                                              1, 0);
+                       if (msvdx_mmu_invalid == 1) {
+                               decode_msg->flag_size.bits.flags |=
+                                               FW_INVALIDATE_MMU;
+                               IPVR_DEBUG_VED("VED: Set MMU invalidate\n");
+                       }
+                       /*
+                       if (msvdx_mmu_invalid == 1)
+                               ipvr_mmu_pgtable_dump(dev);
+                       */
+                       /* if ctx_id is not passed, use default id */
+                       if (decode_msg->mmu_context.bits.context == 0)
+                               decode_msg->mmu_context.bits.context =
+                                       dev_priv->default_ctx.ctx_id;
+
+                       decode_msg->mmu_context.bits.mmu_ptd = mmu_ptd >> 8;
+                       IPVR_DEBUG_VED("VED: MSGID_DECODE_FE:"
+                                       " - fence: %08x"
+                                       " - flags: %08x - buffer_size: %08x"
+                                       " - crtl_alloc_addr: %08x"
+                                       " - context: %08x - mmu_ptd: %08x"
+                                       " - operating_mode: %08x.\n",
+                                       decode_msg->header.bits.msg_fence,
+                                       decode_msg->flag_size.bits.flags,
+                                       decode_msg->flag_size.bits.buffer_size,
+                                       decode_msg->crtl_alloc_addr,
+                                       decode_msg->mmu_context.bits.context,
+                                       decode_msg->mmu_context.bits.mmu_ptd,
+                                       decode_msg->operating_mode);
+                       break;
+               }
+
+               case MTX_MSGID_HOST_BE_OPP_MFLD:
+                       ved_priv->host_be_opp_enabled = 1;
+                       ved_priv->deblock_cmd_offset =
+                                       cmd_size - cmd_size_remain;
+               case MTX_MSGID_INTRA_OOLD_MFLD:
+               case MTX_MSGID_DEBLOCK_MFLD: {
+                       struct fw_deblock_msg *deblock_msg;
+                       if (sizeof(struct fw_deblock_msg) > cmd_size_remain) {
+                               /* Msg size is not correct */
+                               ret = -EINVAL;
+                               IPVR_DEBUG_VED("MSVDX: wrong msg size.\n");
+                               goto out;
+                       }
+                       deblock_msg = (struct fw_deblock_msg *)cmd;
+                       mmu_ptd = ipvr_get_default_pd_addr(dev_priv->mmu);
+                       msvdx_mmu_invalid =
+                               atomic_cmpxchg(&dev_priv->ipvr_mmu_invaldc,
+                                               1, 0);
+                       if (msvdx_mmu_invalid == 1) {
+                               deblock_msg->flag_type.bits.flags |=
+                                                       FW_INVALIDATE_MMU;
+                               IPVR_DEBUG_VED("VED: Set MMU invalidate\n");
+                       }
+
+                       /* patch to right cmd type */
+                       deblock_msg->header.bits.msg_type =
+                                       cur_cmd_id -
+                                       MTX_MSGID_DEBLOCK_MFLD +
+                                       MTX_MSGID_DEBLOCK;
+
+                       deblock_msg->header.bits.msg_fence = sequence & 0xffff;
+                       deblock_msg->mmu_context.bits.mmu_ptd = (mmu_ptd >> 8);
+                       /* if ctx_id is not passed, use default id */
+                       if (deblock_msg->mmu_context.bits.context == 0)
+                               deblock_msg->mmu_context.bits.context =
+                                       dev_priv->default_ctx.ctx_id;
+                       IPVR_DEBUG_VED("VED: MSGID_DEBLOCK:"
+                               " - fence: %08x"
+                               " - flags: %08x - slice_field_type: %08x"
+                               " - operating_mode: %08x"
+                               " - context: %08x - mmu_ptd: %08x"
+                               " - frame_height_mb: %08x - pic_width_mb: %08x"
+                               " - address_a0: %08x - address_a1: %08x"
+                               " - mb_param_address: %08x"
+                               " - ext_stride_a: %08x"
+                               " - address_b0: %08x - address_b1: %08x"
+                               " - alt_output_flags_b: %08x.\n",
+                               deblock_msg->header.bits.msg_fence,
+                               deblock_msg->flag_type.bits.flags,
+                               deblock_msg->flag_type.bits.slice_field_type,
+                               deblock_msg->operating_mode,
+                               deblock_msg->mmu_context.bits.context,
+                               deblock_msg->mmu_context.bits.mmu_ptd,
+                               deblock_msg->pic_size.bits.frame_height_mb,
+                               deblock_msg->pic_size.bits.pic_width_mb,
+                               deblock_msg->address_a0,
+                               deblock_msg->address_a1,
+                               deblock_msg->mb_param_address,
+                               deblock_msg->ext_stride_a,
+                               deblock_msg->address_b0,
+                               deblock_msg->address_b1,
+                               deblock_msg->alt_output_flags_b);
+                       cmd += (sizeof(struct fw_deblock_msg) - cur_cmd_size);
+                       cmd_size_remain -= (sizeof(struct fw_deblock_msg) -
+                                               cur_cmd_size);
+                       break;
+               }
+               default:
+                       /* Msg not supported */
+                       ret = -EINVAL;
+                       IPVR_DEBUG_WARN("VED: msg not supported.\n");
+                       goto out;
+               }
+
+               cmd += cur_cmd_size;
+               cmd_size_remain -= cur_cmd_size;
+               if (((sequence++) & 0xf) == 0xf) {
+                       ret = -EINVAL;
+                       IPVR_DEBUG_WARN("VED: too many cmds, abort.\n");
+                       goto out;
+               }
+       }
+
+       ved_priv->num_cmd = ((--sequence) & 0xf);
+
+       ipvr_fence_create(dev_priv, &fence, &fence_fd);
+
+       ipvr_fence_buffer_objects(&dev_priv->validate_ctx.validate_list,
+                               fence, fence_fd);
+
+       if (copy_cmd) {
+               IPVR_DEBUG_VED("VED: copying command.\n");
+
+               cmd_copy = kzalloc(cmd_size, GFP_KERNEL);
+               if (cmd_copy == NULL) {
+                       ret = -ENOMEM;
+                       IPVR_ERROR("VED: fail to callc, ret=:%d\n", ret);
+                       goto out;
+               }
+               memcpy(cmd_copy, cmd_start, cmd_size);
+               *ved_cmd = cmd_copy;
+       } else {
+               IPVR_DEBUG_VED("VED: did NOT copy command.\n");
+               if (drm_ipvr_tiling)
+                       ipvr_set_tile(dev, ved_priv->ipvr_ctx->tiling_scheme,
+                                       ved_priv->ipvr_ctx->tiling_stride);
+
+#ifdef CONFIG_DRM_IPVR_EC
+               if (ved_priv->host_be_opp_enabled) {
+                       ved_update_frame_info(ved_priv,
+                               ved_priv->tfile,
+                               cmd_start + ved_priv->deblock_cmd_offset);
+               }
+               ved_backup_cmd(ved_priv, ved_priv->tfile,
+                               cmd_start,
+                               cmd_size,
+                               ved_priv->deblock_cmd_offset);
+#endif
+               ret = ved_cmd_send(dev, cmd_start, cmd_size, ipvr_ctx);
+               if (ret) {
+                       IPVR_ERROR("VED: ved_cmd_send failed\n");
+                       ret = -EINVAL;
+               }
+       }
+
+out:
+       kunmap(sg_page(cmd_buffer->sg_table->sgl));
+
+       return ret;
+}
+
+/*
+ * Returns:
+ * -EINVAL
+ * -EFAULT
+ * -ENOMEM
+ * 0
+ */
+static int32_t
+ved_submit_cmdbuf_copy(struct drm_device *dev,
+                               struct drm_ipvr_gem_object *cmd_buffer,
+                               uint32_t cmd_size,
+                               struct ipvr_context *ipvr_ctx,
+                               uint32_t fence_flag)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       struct ved_private *ved_priv = dev_priv->ved_private;
+       struct ved_cmd_queue *ved_cmd;
+       uint16_t sequence =  (dev_priv->last_seq << 4);
+       unsigned long irq_flags;
+       void *cmd = NULL;
+       int32_t ret;
+
+       /* queue the command to be sent when the h/w is ready */
+       IPVR_DEBUG_VED("VED: queueing sequence:%08x.\n",
+                         sequence);
+       ved_cmd = kzalloc(sizeof(struct ved_cmd_queue),
+                           GFP_KERNEL);
+       if (ved_cmd == NULL) {
+               IPVR_ERROR("MSVDXQUE: Out of memory...\n");
+               return -ENOMEM;
+       }
+
+       ret = ved_map_command(dev, cmd_buffer, cmd_size,
+                               &cmd, sequence, 1, ipvr_ctx);
+       if (ret) {
+               IPVR_ERROR("VED: Failed to extract cmd\n");
+               kfree(ved_cmd);
+               /* -EINVAL or -EFAULT or -ENOMEM */
+               return ret;
+       }
+       ved_cmd->cmd = cmd;
+       ved_cmd->cmd_size = cmd_size;
+       ved_cmd->cmd_seq = sequence;
+
+       ved_cmd->tiling_scheme = ved_priv->ipvr_ctx->tiling_scheme;
+       ved_cmd->tiling_stride = ved_priv->ipvr_ctx->tiling_stride;
+       ved_cmd->deblock_cmd_offset =
+               ved_priv->deblock_cmd_offset;
+       ved_cmd->host_be_opp_enabled =
+               ved_priv->host_be_opp_enabled;
+       ved_cmd->tfile =
+               ved_priv->tfile;
+       ved_cmd->ipvr_ctx = ipvr_ctx;
+       spin_lock_irqsave(&ved_priv->ved_lock, irq_flags);
+       list_add_tail(&ved_cmd->head, &ved_priv->ved_queue);
+       spin_unlock_irqrestore(&ved_priv->ved_lock, irq_flags);
+       if (!ved_priv->ved_busy) {
+               ved_priv->ved_busy = 1;
+               IPVR_DEBUG_VED("VED: Need immediate dequeue.\n");
+               ved_cmd_dequeue_send(dev);
+       }
+
+       return ret;
+}
+
+int32_t
+ved_submit_video_cmdbuf(struct drm_device *dev,
+                               struct drm_ipvr_gem_object *cmd_buffer,
+                               uint32_t cmd_size,
+                               struct ipvr_context *ipvr_ctx,
+                               uint32_t fence_flag)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       uint16_t sequence =  (dev_priv->last_seq << 4) & 0xffff;
+       unsigned long irq_flags;
+       int32_t ret = 0;
+       struct ved_private *ved_priv = dev_priv->ved_private;
+#ifdef CONFIG_DRM_IPVR_EC
+       int offset = 0;
+#endif
+
+       if (sequence == IPVR_FENCE_SIGNALED_SEQ) {
+               sequence =  (++dev_priv->last_seq << 4) & 0xffff;
+       }
+
+       if (!ved_priv->fw_b0_uploaded){
+               /* not needed for baytrail */
+       }
+
+       if (!ipvr_ctx) {
+               IPVR_ERROR("VED: null ctx\n");
+               return -EFAULT;
+       }
+
+       spin_lock_irqsave(&ved_priv->ved_lock, irq_flags);
+
+       ved_priv->ipvr_ctx = ipvr_ctx;
+
+       IPVR_DEBUG_VED("sequence is 0x%x, needs_reset is 0x%x.\n",
+                       sequence, ved_priv->ved_needs_reset);
+
+       if (ved_priv->ved_busy) {
+               spin_unlock_irqrestore(&ved_priv->ved_lock, irq_flags);
+               ret = ved_submit_cmdbuf_copy(dev, cmd_buffer,
+                           cmd_size, ipvr_ctx, fence_flag);
+
+               return ret;
+       }
+
+       if (ved_priv->ved_needs_reset) {
+               spin_unlock_irqrestore(&ved_priv->ved_lock, irq_flags);
+               IPVR_DEBUG_VED("VED: will reset msvdx.\n");
+
+               if (!ved_priv->fw_loaded_by_punit) {
+                       if (ved_core_reset(dev_priv)) {
+                               ret = -EBUSY;
+                               IPVR_ERROR("VED: Reset failed.\n");
+                               return ret;
+                       }
+               }
+
+               ved_priv->ved_needs_reset = 0;
+               ved_priv->ved_busy = 0;
+
+               if (ved_priv->fw_loaded_by_punit){
+                       ret = ved_post_init(dev);
+                       if (ret) {
+                               ret = -EBUSY;
+                               IPVR_DEBUG_WARN("VED: ved_post_init fail.\n");
+                               return ret;
+                       }
+               }
+               else{
+                       if (ipvr_ved_init(dev)) {
+                               ret = -EBUSY;
+                               IPVR_DEBUG_WARN("VED: ipvr_ved_init fail.\n");
+                               return ret;
+                       }
+               }
+
+#ifdef CONFIG_DRM_IPVR_EC
+               /* restore the state when power up during EC */
+               if (ved_priv->vec_ec_mem_saved) {
+                       for (offset = 0; offset < 4; ++offset)
+                               VED_REG_WRITE32(
+                                       ved_priv->vec_ec_mem_data[offset],
+                                       0x2cb0 + offset * 4);
+
+                       VED_REG_WRITE32(ved_priv->vec_ec_mem_data[4],
+                                       0x2cc4);
+                       ved_priv->vec_ec_mem_saved = 0;
+               }
+#endif
+
+               spin_lock_irqsave(&ved_priv->ved_lock, irq_flags);
+       }
+
+       if (ved_priv->fw_loaded_by_punit && !ved_priv->rendec_initialized) {
+               spin_unlock_irqrestore(&ved_priv->ved_lock, irq_flags);
+               IPVR_DEBUG_VED("VED: setup msvdx.\n");
+               ret = ved_post_boot_init(dev);
+               if (ret) {
+                       IPVR_ERROR("VED: fail to setup msvdx.\n");
+                       /* FIXME: find a proper return value */
+                       return -EFAULT;
+               }
+               ved_priv->rendec_initialized = 1;
+
+               IPVR_DEBUG_VED("VED: setup msvdx successfully.\n");
+               spin_lock_irqsave(&ved_priv->ved_lock, irq_flags);
+       }
+
+       if (!ved_priv->fw_loaded_by_punit && !ved_priv->ved_fw_loaded) {
+               spin_unlock_irqrestore(&ved_priv->ved_lock, irq_flags);
+               IPVR_DEBUG_VED("VED: reload FW to MTX\n");
+               ret = ved_setup_fw(dev);
+               if (ret) {
+                       IPVR_ERROR("VED: fail to load FW\n");
+                       /* FIXME: find a proper return value */
+                       return -EFAULT;
+               }
+               ved_priv->ved_fw_loaded = 1;
+
+               IPVR_DEBUG_VED("VED: load firmware successfully\n");
+               spin_lock_irqsave(&ved_priv->ved_lock, irq_flags);
+       }
+
+       ved_priv->ved_busy = 1;
+       spin_unlock_irqrestore(&ved_priv->ved_lock, irq_flags);
+       IPVR_DEBUG_VED("VED: commit command to HW,seq=0x%08x\n",
+                         sequence);
+       ret = ved_map_command(dev, cmd_buffer, cmd_size,
+                               NULL, sequence, 0, ipvr_ctx);
+       if (ret)
+               IPVR_ERROR("VED: Failed to extract cmd.\n");
+
+       return ret;
+}
+
+/* Returns:
+ * -EINVAL
+ * -ENOMEM
+ * -EFAULT
+ * -EBUSY
+ */
+int32_t ved_cmdbuf_video(struct drm_file *file_priv,
+                                               struct drm_ipvr_gem_object 
*cmd_buffer,
+                                               uint32_t cmdbuf_size,
+                                               struct ipvr_context *ipvr_ctx)
+{
+       struct drm_device *dev = file_priv->minor->dev;
+       int32_t ret;
+
+       /*
+        * Check this. Doesn't seem right. Have fencing done AFTER command
+        * submission and make sure drm_ved_idle idles the VED completely.
+        */
+       ret = ved_submit_video_cmdbuf(dev, cmd_buffer, cmdbuf_size, ipvr_ctx, 
0);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int32_t ved_handle_panic_msg(struct ved_private *ved_priv,
+                                       struct fw_panic_msg *panic_msg)
+{
+       /* For VXD385 firmware, fence value is not validate here */
+       uint32_t i, diff = 0;
+       uint16_t fence;
+       uint32_t err_trig, irq_sts, mmu_sts, dmac_sts;
+       struct ved_frame_info *failed_frame = NULL;
+       struct drm_ipvr_private *dev_priv = ved_priv->dev_priv;
+       struct drm_device *dev = dev_priv->dev;
+       IPVR_DEBUG_WARN("MSVDX: MSGID_CMD_HW_PANIC:"
+                 "Fault detected"
+                 " - Fence: %08x"
+                 " - fe_status mb: %08x"
+                 " - be_status mb: %08x"
+                 " - reserved2: %08x"
+                 " - last mb: %08x"
+                 " - resetting and ignoring error\n",
+                 panic_msg->header.bits.msg_fence,
+                 panic_msg->fe_status,
+                 panic_msg->be_status,
+                 panic_msg->mb.bits.reserved2,
+                 panic_msg->mb.bits.last_mb);
+       /*
+        * If bit 8 of MSVDX_INTERRUPT_STATUS is set the fault
+        * was caused in the DMAC. In this case you should
+        * check bits 20:22 of MSVDX_INTERRUPT_STATUS.
+        * If bit 20 is set there was a problem DMAing the buffer
+        * back to host. If bit 22 is set you'll need to get the
+        * value of MSVDX_DMAC_STREAM_STATUS (0x648).
+        * If bit 1 is set then there was an issue DMAing
+        * the bitstream or termination code for parsing.
+        */
+       err_trig = VED_REG_READ32(MSVDX_COMMS_ERROR_TRIG);
+       irq_sts = VED_REG_READ32(MSVDX_INTERRUPT_STATUS_OFFSET);
+       mmu_sts = VED_REG_READ32(MSVDX_MMU_STATUS_OFFSET);
+       dmac_sts = VED_REG_READ32(MSVDX_DMAC_STREAM_STATUS_OFFSET);
+       IPVR_DEBUG_VED("MSVDX: MSVDX_COMMS_ERROR_TRIG is 0x%x,"
+               "MSVDX_INTERRUPT_STATUS is 0x%x,"
+               "MSVDX_MMU_STATUS is 0x%x,"
+               "MSVDX_DMAC_STREAM_STATUS is 0x%x.\n",
+               err_trig, irq_sts, mmu_sts, dmac_sts);
+
+       trace_ved_irq_panic(panic_msg, err_trig, irq_sts, mmu_sts, dmac_sts);
+
+       fence = panic_msg->header.bits.msg_fence;
+
+       if (ved_priv->fw_loaded_by_punit)
+               ved_priv->ved_needs_reset |= MSVDX_RESET_NEEDS_REUPLOAD_FW |
+                                       MSVDX_RESET_NEEDS_INIT_FW;
+       else
+               ved_priv->ved_needs_reset = 1;
+
+       diff = ved_priv->ved_cur_seq - dev_priv->last_seq;
+       if (diff > 0x0FFFFFFF)
+               ved_priv->ved_cur_seq++;
+
+       IPVR_DEBUG_WARN("VED: Fence ID missing, assuming %08x\n",
+                       ved_priv->ved_cur_seq);
+
+       ipvr_fence_process(dev_priv, ved_priv->ved_cur_seq, IPVR_CMD_FAILED);
+
+       /* Flush the command queue */
+       ved_flush_cmd_queue(dev);
+       if (ved_priv->host_be_opp_enabled) {
+               /* get the frame_info struct for ec frame */
+               for (i = 0; i < MAX_DECODE_BUFFERS; i++) {
+                       /* by default fence is 0, so there is problem here */
+                       if (ved_priv->frame_info[i].fence == fence) {
+                               failed_frame = &ved_priv->frame_info[i];
+                               break;
+                       }
+               }
+               if (!failed_frame) {
+                       IPVR_ERROR("VED: didn't find frame_info which matched "
+                               "the fence %d in panic message\n", fence);
+                       return -EINVAL;
+               }
+
+               failed_frame->fw_status = 1; /* set ERROR flag */
+       }
+       ved_priv->decoding_err = 1;
+       return 0;
+}
+
+static int32_t
+ved_handle_completed_msg(struct ved_private *ved_priv,
+                               struct fw_completed_msg *completed_msg)
+{
+       struct drm_ipvr_private *dev_priv = ved_priv->dev_priv;
+       struct drm_device *dev = dev_priv->dev;
+       uint16_t fence, flags;
+       struct ipvr_context *ipvr_ctx;
+
+       IPVR_DEBUG_VED("VED: MSGID_CMD_COMPLETED:"
+               " - Fence: %08x - flags: %08x - vdebcr: %08x"
+               " - first_mb : %d - last_mb: %d\n",
+               completed_msg->header.bits.msg_fence,
+               completed_msg->flags, completed_msg->vdebcr,
+               completed_msg->mb.bits.start_mb,
+               completed_msg->mb.bits.last_mb);
+
+       trace_ved_irq_completed(completed_msg);
+
+       flags = completed_msg->flags;
+       fence = completed_msg->header.bits.msg_fence;
+
+       ved_priv->ved_cur_seq = fence;
+
+#if 0
+       if (IS_MRFLD(dev))
+               ved_fw_error_detected(dev, fence, flags);
+#endif
+
+       ipvr_fence_process(dev_priv, fence, IPVR_CMD_SUCCESS);
+
+       ipvr_ctx = ipvr_find_ctx_with_fence(dev_priv, fence);
+       if (unlikely(ipvr_ctx == NULL)) {
+               IPVR_DEBUG_GENERAL("abnormal complete msg.\n");
+               return -EINVAL;
+       }
+
+       if (flags & FW_VA_RENDER_HOST_INT) {
+               /* Now send the next command from the msvdx cmd queue */
+               ved_cmd_dequeue_send(dev);
+       }
+       return 0;
+}
+
+static int32_t
+ved_handle_contiguity_msg(struct ved_private *ved_priv,
+                               struct fw_contiguity_msg *contiguity_msg)
+{
+       struct ved_decode_status *fault_region = NULL;
+       struct ved_ec_context *ved_ec_ctx = NULL;
+       uint32_t reg_idx, i, fence, start, end;
+       int32_t found = 0;
+       struct ved_decode_status *cur_dec_status;
+
+       IPVR_DEBUG_VED("VED: MSGID_CONTIGUITY_WARNING:");
+       IPVR_DEBUG_VED("- Fence: %08x - end_mb: %08x - begin_mb: %08x\n",
+                       contiguity_msg->header.bits.msg_fence,
+                       contiguity_msg->mb.bits.end_mb_num,
+                       contiguity_msg->mb.bits.begin_mb_num);
+
+       trace_ved_irq_contiguity(contiguity_msg);
+       /* get erro info */
+       fence = contiguity_msg->header.bits.msg_fence;
+       start = contiguity_msg->mb.bits.begin_mb_num;
+       end = contiguity_msg->mb.bits.end_mb_num;
+
+       /*get the frame_info struct for error concealment frame*/
+       for (i = 0; i < VED_MAX_EC_INSTANCE; i++)
+               if (ved_priv->ved_ec_ctx[i]->fence == (fence & (~0xf))) {
+                       ved_ec_ctx = ved_priv->ved_ec_ctx[i];
+                       found++;
+               }
+       /* ved_mtx_message_dump(dev); */
+       if (!ved_ec_ctx || !(ved_ec_ctx->tfile) || found > 1) {
+               IPVR_DEBUG_VED("no matched ctx: fence 0x%x, "
+                       "found %d, ctx 0x%08lx\n",
+                       fence, found, (unsigned long)ved_ec_ctx);
+               return -EINVAL;
+       }
+
+       if (ved_ec_ctx->cur_frame_info)
+               cur_dec_status = &ved_ec_ctx->cur_frame_info->decode_status;
+
+       fault_region = &ved_ec_ctx->decode_status;
+       if (start > end)
+               start = end;
+       if (start < VED_EC_ROLLBACK)
+               start = 0;
+       else
+               start -= VED_EC_ROLLBACK;
+
+       if (fault_region->num_region) {
+               reg_idx = fault_region->num_region - 1;
+               if ((start <= fault_region->mb_regions[reg_idx].end) &&
+                   (end > fault_region->mb_regions[reg_idx].end)) {
+                       fault_region->mb_regions[reg_idx].end = end;
+                       if (ved_ec_ctx->cur_frame_info) {
+                               cur_dec_status->mb_regions[reg_idx].end = end;
+                       }
+               }
+               else {
+                       reg_idx = fault_region->num_region++;
+                       if (unlikely(reg_idx >= MAX_SLICES_PER_PICTURE)) {
+                               IPVR_DEBUG_VED("too many fault regions.\n");
+                               return -EINVAL;
+                       }
+                       fault_region->mb_regions[reg_idx].start = start;
+                       fault_region->mb_regions[reg_idx].end = end;
+                       if (ved_ec_ctx->cur_frame_info) {
+                               cur_dec_status->num_region =
+                                       fault_region->num_region;
+                               cur_dec_status->mb_regions[reg_idx].start =
+                                       start;
+                               cur_dec_status->mb_regions[reg_idx].end = end;
+                       }
+               }
+       } else {
+               fault_region->num_region++;
+               fault_region->mb_regions[0].start = start;
+               fault_region->mb_regions[0].end = end;
+               if (ved_ec_ctx->cur_frame_info) {
+                       cur_dec_status->num_region = fault_region->num_region;
+                       cur_dec_status->mb_regions[0].start = start;
+                       cur_dec_status->mb_regions[0].end = end;
+               }
+       }
+       return 0;
+}
+
+static int32_t
+ved_handle_deblock_required_msg(struct ved_private *ved_priv,
+                       struct fw_deblock_required_msg *deblock_required_msg)
+{
+       uint32_t i;
+       int32_t found = 0;
+       struct ved_ec_context *ved_ec_ctx = NULL;
+       struct drm_ipvr_private *dev_priv = ved_priv->dev_priv;
+       uint16_t fence = deblock_required_msg->header.bits.msg_fence;
+       IPVR_DEBUG_VED("VED: MTX_MSGID_DEBLOCK_REQUIRED Fence=%08x.\n", fence);
+       IPVR_DEBUG_VED("Get deblock required msg for ec.\n");
+       for (i = 0; i < VED_MAX_EC_INSTANCE; i++)
+               if (ved_priv->ved_ec_ctx[i]->fence == (fence & (~0xf))) {
+                       ved_ec_ctx = ved_priv->ved_ec_ctx[i];
+                       found++;
+               }
+
+       trace_ved_irq_deblock_required(deblock_required_msg);
+
+       /* if found > 1, fence wrapping happens */
+       if (!ved_ec_ctx || !(ved_ec_ctx->tfile) || found > 1) {
+               IPVR_DEBUG_VED("no matched ctx: fence 0x%x, "
+                       "found %d, ctx 0x%08lx\n",
+                       fence, found, (unsigned long)ved_ec_ctx);
+               VED_REG_WRITE32(0, MSVDX_CMDS_END_SLICE_PICTURE_OFFSET);
+               VED_REG_WRITE32(1, MSVDX_CMDS_END_SLICE_PICTURE_OFFSET);
+               return -EINVAL;
+       }
+
+       ved_ec_ctx->cur_frame_info->fw_status = 1;
+       ved_priv->cur_msvdx_ec_ctx = ved_ec_ctx;
+
+       /*do error concealment with hw*/
+       schedule_work(&ved_priv->ec_work);
+       return 0;
+}
+
+/*
+ * MSVDX MTX interrupt
+ */
+static void ved_mtx_interrupt(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       static uint32_t buf[128]; /* message buffer */
+       uint32_t ridx, widx, buf_size, buf_offset;
+       uint32_t num, ofs; /* message num and offset */
+       struct ved_private *ved_priv = dev_priv->ved_private;
+       union msg_header *header;
+       int32_t cmd_complete = 0;
+       int ret;
+       IPVR_DEBUG_VED("VED: Got a VED MTX interrupt.\n");
+
+       /* we need clocks enabled before we touch VEC local ram,
+        * but fw will take care of the clock after fw is loaded
+        */
+
+loop: /* just for coding style check */
+       ridx = VED_REG_READ32(MSVDX_COMMS_TO_HOST_RD_INDEX);
+       widx = VED_REG_READ32(MSVDX_COMMS_TO_HOST_WRT_INDEX);
+
+       /* Get out of here if nothing */
+       if (ridx == widx)
+               goto done;
+
+       buf_size = VED_REG_READ32(MSVDX_COMMS_TO_HOST_BUF_SIZE) &
+               ((1 << 16) - 1);
+       /*0x2000 is VEC Local Ram offset*/
+       buf_offset = (VED_REG_READ32(MSVDX_COMMS_TO_HOST_BUF_SIZE) >> 16)
+               + 0x2000;
+
+       ofs = 0;
+       buf[ofs] = VED_REG_READ32(buf_offset + (ridx << 2));
+       header = (union msg_header *)buf;
+
+       /* round to nearest word */
+       num = (header->bits.msg_size + 3) / 4;
+
+       /* ASSERT(num <= sizeof(buf) / sizeof(uint32_t)); */
+
+       if (++ridx >= buf_size)
+               ridx = 0;
+
+       for (ofs++; ofs < num; ofs++) {
+               buf[ofs] = VED_REG_READ32(buf_offset + (ridx << 2));
+
+               if (++ridx >= buf_size)
+                       ridx = 0;
+       }
+
+       /* Update the Read index */
+       VED_REG_WRITE32(ridx, MSVDX_COMMS_TO_HOST_RD_INDEX);
+
+       if (ved_priv->ved_needs_reset)
+               goto loop;
+
+       switch (header->bits.msg_type) {
+       case MTX_MSGID_HW_PANIC: {
+               struct fw_panic_msg *panic_msg = (struct fw_panic_msg *)buf;
+               cmd_complete = 1;
+               ved_handle_panic_msg(ved_priv, panic_msg);
+               goto done;
+       }
+
+       case MTX_MSGID_COMPLETED: {
+               struct fw_completed_msg *completed_msg =
+                                       (struct fw_completed_msg *)buf;
+               cmd_complete = 1;
+               if (ved_handle_completed_msg(ved_priv, completed_msg))
+                       cmd_complete = 0;
+               break;
+       }
+
+       case MTX_MSGID_CONTIGUITY_WARNING: {
+               struct fw_contiguity_msg *contiguity_msg =
+                                       (struct fw_contiguity_msg *)buf;
+               ved_handle_contiguity_msg(ved_priv, contiguity_msg);
+               break;
+
+       }
+
+       case MTX_MSGID_DEBLOCK_REQUIRED: {
+               struct fw_deblock_required_msg *deblock_required_msg =
+                                       (struct fw_deblock_required_msg *)buf;
+               ved_handle_deblock_required_msg(ved_priv, deblock_required_msg);
+               break;
+       }
+       default:
+               IPVR_ERROR("VED: unknown message from MTX, ID:0x%08x.\n",
+                       header->bits.msg_type);
+               goto done;
+       }
+
+done:
+       IPVR_DEBUG_VED("VED Interrupt: finish process a message.\n");
+       if (ridx != widx) {
+               IPVR_DEBUG_VED("VED: there are more message to be read.\n");
+               goto loop;
+       }
+
+       atomic_dec(&dev_priv->ved_power_usage);
+       ret = pm_runtime_put(&dev->platformdev->dev);
+       if (unlikely(ret < 0)) {
+               IPVR_ERROR("Error put VED power: %d\n", ret);
+       }
+       IPVR_DEBUG_PM("VED power put, usage became %d\n",
+               atomic_read(&dev->platformdev->dev.power.usage_count));
+
+       mb();   /* TBD check this... */
+}
+
+/*
+ * MSVDX interrupt.
+ */
+int32_t ved_irq_handler(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv;
+       struct ved_private *ved_priv;
+       uint32_t msvdx_stat;
+
+       if (dev == NULL) {
+               IPVR_ERROR("VED: invalid dev.\n");
+               return -EINVAL;
+       }
+
+       dev_priv = dev->dev_private;
+
+       ved_priv = dev_priv->ved_private;
+       msvdx_stat = VED_REG_READ32(MSVDX_INTERRUPT_STATUS_OFFSET);
+
+       /* driver only needs to handle mtx irq
+        * For MMU fault irq, there's always a HW PANIC generated
+        * if HW/FW is totally hang, the lockup function will handle
+        * the reseting
+        */
+       if (msvdx_stat & MSVDX_INTERRUPT_STATUS_MMU_FAULT_IRQ_MASK) {
+               /*Ideally we should we should never get to this */
+               IPVR_DEBUG_IRQ("VED: MMU Fault:0x%x\n", msvdx_stat);
+
+               /* Pause MMU */
+               VED_REG_WRITE32(MSVDX_MMU_CONTROL0_MMU_PAUSE_MASK,
+                            MSVDX_MMU_CONTROL0_OFFSET);
+               wmb();
+
+               /* Clear this interupt bit only */
+               VED_REG_WRITE32(MSVDX_INTERRUPT_STATUS_MMU_FAULT_IRQ_MASK,
+                            MSVDX_INTERRUPT_CLEAR_OFFSET);
+               VED_REG_READ32(MSVDX_INTERRUPT_CLEAR_OFFSET);
+               rmb();
+
+               ved_priv->ved_needs_reset = 1;
+       } else if (msvdx_stat & MSVDX_INTERRUPT_STATUS_MTX_IRQ_MASK) {
+               IPVR_DEBUG_IRQ("VED: msvdx_stat: 0x%x(MTX)\n", msvdx_stat);
+
+               /* Clear all interupt bits */
+               if (ved_priv->fw_loaded_by_punit)
+                       VED_REG_WRITE32(MSVDX_INTERRUPT_STATUS_MTX_IRQ_MASK,
+                                    MSVDX_INTERRUPT_CLEAR_OFFSET);
+               else
+                       VED_REG_WRITE32(0xffff, MSVDX_INTERRUPT_CLEAR_OFFSET);
+
+               VED_REG_READ32(MSVDX_INTERRUPT_CLEAR_OFFSET);
+               rmb();
+
+               ved_mtx_interrupt(dev);
+       }
+
+       return 0;
+}
+
+int32_t ved_check_idle(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv;
+       struct ved_private *ved_priv;
+       uint32_t loop, ret;
+
+       dev_priv = dev->dev_private;
+       if (!dev_priv)
+               return -ENODEV;
+
+       ved_priv = dev_priv->ved_private;
+       if (!ved_priv)
+               return 0;
+
+       if (ved_priv->fw_loaded_by_punit && ved_priv->rendec_initialized == 0)
+               return 0;
+
+       if (!ved_priv->fw_loaded_by_punit && ved_priv->ved_fw_loaded == 0)
+               return 0;
+
+       if (ved_priv->ved_busy) {
+               IPVR_DEBUG_PM("VED: ved_busy was set, return busy.\n");
+               return -EBUSY;
+       }
+
+       if (ved_priv->fw_loaded_by_punit) {
+               if (!(VED_REG_READ32(MSVDX_COMMS_FW_STATUS) &
+                                       MSVDX_FW_STATUS_HW_IDLE)) {
+                       IPVR_DEBUG_PM("MSVDX_COMMS_SIGNATURE reg is 0x%x,\n"
+                               "MSVDX_COMMS_FW_STATUS reg is 0x%x,\n"
+                               "indicate hw is busy.\n",
+                               VED_REG_READ32(MSVDX_COMMS_SIGNATURE),
+                               VED_REG_READ32(MSVDX_COMMS_FW_STATUS));
+                       return -EBUSY;
+               }
+       }
+
+       /* on some cores below 50502, there is one instance that
+        * read requests may not go to zero is in the case of a page fault,
+        * check core revision by reg MSVDX_CORE_REV, 385 core is 0x20001
+        * check if mmu page fault happend by reg MSVDX_INTERRUPT_STATUS,
+        * check was it a page table rather than a protection fault
+        * by reg MSVDX_MMU_STATUS, for such case,
+        * need call ved_core_reset as the work around */
+       if ((VED_REG_READ32(MSVDX_CORE_REV_OFFSET) < 0x00050502) &&
+               (VED_REG_READ32(MSVDX_INTERRUPT_STATUS_OFFSET)
+                       & MSVDX_INTERRUPT_STATUS_MMU_FAULT_IRQ_MASK) &&
+               (VED_REG_READ32(MSVDX_MMU_STATUS_OFFSET) & 1)) {
+               IPVR_DEBUG_WARN("mmu page fault, recover by core_reset.\n");
+               return 0;
+       }
+
+       /* check MSVDX_MMU_MEM_REQ to confirm there's no memory requests */
+       for (loop = 0; loop < 10; loop++)
+               ret = ved_wait_for_register(dev_priv,
+                                       MSVDX_MMU_MEM_REQ_OFFSET,
+                                       0, 0xff, 100, 1);
+       if (ret) {
+               IPVR_DEBUG_WARN("MSVDX: MSVDX_MMU_MEM_REQ reg is 0x%x,\n"
+                               "indicate mem busy, prevent power off ved,"
+                               "MSVDX_COMMS_FW_STATUS reg is 0x%x,"
+                               "MSVDX_COMMS_ERROR_TRIG reg is 0x%x,",
+                               VED_REG_READ32(MSVDX_MMU_MEM_REQ_OFFSET),
+                               VED_REG_READ32(MSVDX_COMMS_FW_STATUS),
+                               VED_REG_READ32(MSVDX_COMMS_ERROR_TRIG));
+               return -EBUSY;
+       }
+       /*
+               if (ved_priv->ved_hw_busy) {
+                       IPVR_DEBUG_PM("VED: %s, HW is busy\n", __func__);
+                       return -EBUSY;
+               }
+       */
+       return 0;
+}
+
+void ved_check_reset_fw(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       struct ved_private *ved_priv = dev_priv->ved_private;
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&ved_priv->ved_lock, irq_flags);
+
+       /* handling fw upload here if required */
+       /* power off first, then hw_begin will power up/upload FW correctly */
+       if (ved_priv->ved_needs_reset & MSVDX_RESET_NEEDS_REUPLOAD_FW) {
+               ved_priv->ved_needs_reset &= ~MSVDX_RESET_NEEDS_REUPLOAD_FW;
+               spin_unlock_irqrestore(&ved_priv->ved_lock, irq_flags);
+               IPVR_DEBUG_VED("VED: force power off VED due to decode err\n");
+               /*ospm_apm_power_down_ved(dev, 1);*/
+               spin_lock_irqsave(&ved_priv->ved_lock, irq_flags);
+       }
+       spin_unlock_irqrestore(&ved_priv->ved_lock, irq_flags);
+}
diff --git a/drivers/gpu/drm/ipvr/ved_cmd.h b/drivers/gpu/drm/ipvr/ved_cmd.h
new file mode 100644
index 0000000..f3b5138
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ved_cmd.h
@@ -0,0 +1,104 @@
+/**************************************************************************
+ * ved_cmd.h: VED header file to support command buffer handling
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ * Copyright (c) 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *
+ **************************************************************************/
+
+#ifndef _VED_CMD_H_
+#define _VED_CMD_H_
+
+#include "ipvr_drv.h"
+#include "ipvr_drm.h"
+#include "ipvr_gem.h"
+#include "ved_reg.h"
+#include "ved_ec.h"
+#include "ved_pm.h"
+#include "ipvr_fence.h"
+#include "ipvr_exec.h"
+
+extern int32_t drm_ipvr_tiling;
+
+/* HOST_BE_OPP parameters */
+struct HOST_BE_OPP_PARAMS {
+       uint32_t handle;        /* struct ttm_buffer_object * of REGIO */
+       uint32_t buffer_stride;
+       uint32_t buffer_size;
+       uint32_t picture_width_mb;
+       uint32_t size_mb;
+};
+
+struct ved_cmd_queue {
+       struct list_head head;
+       void *cmd;
+       uint32_t cmd_size;
+       uint16_t cmd_seq;
+       uint32_t fence_flag;
+       uint8_t tiling_scheme;
+       uint8_t tiling_stride;
+       uint32_t host_be_opp_enabled;
+       uint32_t deblock_cmd_offset;
+       struct drm_file *tfile;
+       struct ipvr_context *ipvr_ctx;
+       int32_t frame_boundary;
+};
+
+#define VED_PMSTATE_POWERUP             0
+#define VED_PMSTATE_CLOCKGATED          1
+#define VED_PMSTATE_POWERDOWN           2
+
+#define VED_NEW_PMSTATE(drm_dev, ved_priv, new_state)                  \
+do {                                                                   \
+       ved_priv->ved_pmstate = new_state;                              \
+       if (new_state == VED_PMSTATE_POWERDOWN)                         \
+               ved_priv->pm_gating_count++;                            \
+       sysfs_notify_dirent(ved_priv->sysfs_pmstate);                   \
+       IPVR_DEBUG_PM("VED: %s, power gating count 0x%08x\n",           \
+               (new_state == VED_PMSTATE_POWERUP) ? "powerup"          \
+               : ((new_state == VED_PMSTATE_POWERDOWN) ? "powerdown"   \
+                       : "clockgated"), ved_priv->pm_gating_count);    \
+} while (0)
+
+int32_t ved_irq_handler(struct drm_device *dev);
+
+int32_t ved_mtx_send(struct drm_ipvr_private *dev_priv, const void *msg);
+
+int32_t ved_check_idle(struct drm_device *dev);
+
+void ved_check_reset_fw(struct drm_device *dev);
+
+void ved_flush_cmd_queue(struct drm_device *dev);
+
+int32_t ved_cmdbuf_video(struct drm_file *file_priv,
+                               struct drm_ipvr_gem_object *cmd_buffer,
+                               uint32_t cmdbuf_size,
+                               struct ipvr_context *ipvr_ctx);
+
+int32_t ved_submit_video_cmdbuf(struct drm_device *dev,
+                                       struct drm_ipvr_gem_object *cmd_buffer,
+                                       uint32_t cmd_size,
+                                       struct ipvr_context *ipvr_ctx,
+                                       uint32_t fence_flag);
+
+int32_t ved_cmd_dequeue_send(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/ipvr/ved_ec.c b/drivers/gpu/drm/ipvr/ved_ec.c
new file mode 100644
index 0000000..2aa52cc
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ved_ec.c
@@ -0,0 +1,584 @@
+/**************************************************************************
+ * ved_ec.c: VED error concealment support when decoding error happened
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *    Li Zeng <li.zeng at intel.com>
+ *
+ **************************************************************************/
+
+#include "ved_ec.h"
+#include "ved_cmd.h"
+#include "ved_msg.h"
+#include "ved_reg.h"
+#include "ved_fw.h"
+#include "ved_pm.h"
+#include <linux/pm_runtime.h>
+
+#define MAX_SIZE_IN_MB         (4096 / 16)
+
+static inline int32_t
+ved_cmd_port_write(struct drm_ipvr_private *dev_priv,
+               uint32_t offset, uint32_t value, uint32_t *cmd_space)
+{
+       uint32_t max_attempts = 0xff;
+       uint32_t attempts = 0;
+
+       max_attempts = 0xff;
+       while (*cmd_space == 0) {
+               *cmd_space = VED_REG_READ32(
+                       MSVDX_CORE_CR_MSVDX_COMMAND_SPACE_OFFSET +
+                       MSVDX_CORE_BASE);
+               if (*cmd_space)
+                       break;
+               IPVR_UDELAY(2);
+               attempts++;
+               if (attempts > max_attempts) {
+                       IPVR_DEBUG_WARN("VED: poll cmd space timeout.\n");
+                       return -1;
+               }
+       }
+
+       VED_REG_WRITE32(value, offset + MSVDX_CMDS_BASE);
+       (*cmd_space)--;
+       /*
+        *IPVR_DEBUG_WARN("VED: poll cmd space attempts %d\n", attempts);
+       */
+       return 0;
+}
+
+#define VED_CMDPORT_WRITE(_dev_priv_, _offset_, _cmd_, _cmd_space_)    \
+       do {                                                            \
+               ret = ved_cmd_port_write(_dev_priv_,                    \
+                                _offset_, _cmd_, &_cmd_space_);        \
+               if (ret) {                                              \
+                       IPVR_DEBUG_WARN("VED: write cmd fail, abort\n");\
+                       goto ec_done;                                   \
+               }                                                       \
+       } while (0);
+
+#define VED_CMDPORT_WRITE_FAST(_dev_priv_, _offset_, _cmd_, _cmd_space_)\
+       ved_cmd_port_write(_dev_priv_, _offset_, _cmd_, &_cmd_space_);  \
+
+void ved_do_concealment(struct work_struct *work)
+{
+       struct ved_private *ved_priv =
+                       container_of(work, struct ved_private, ec_work);
+       struct drm_ipvr_private *dev_priv = NULL;
+       struct ved_ec_context *ved_ec_ctx = ved_priv->cur_msvdx_ec_ctx;
+       struct ved_decode_status *fault_region = NULL;
+       struct fw_deblock_msg *deblock_msg =
+               (struct fw_deblock_msg *)(ved_ec_ctx->unfenced_cmd +
+                       ved_ec_ctx->deblock_cmd_offset);
+       uint32_t width_in_mb, height_in_mb, cmd;
+       int32_t conceal_above_row = 0, loop, mb_loop;
+       uint32_t cmd_space = 0;
+       int32_t ret = 0;
+       int32_t pm_ret = 0;
+
+       pm_ret = pm_runtime_get_sync(&dev_priv->dev->platformdev->dev);
+       if (unlikely(pm_ret < 0)) {
+               IPVR_ERROR("Error get VED power: %d\n", pm_ret);
+               return;
+       }
+
+       dev_priv = ved_priv->dev_priv;
+       fault_region = &ved_ec_ctx->decode_status;
+
+       /* Concealment should be done in time,
+        * otherwise panic msg will be signaled in msvdx
+        */
+       preempt_disable();
+
+       if (ved_ec_ctx->deblock_cmd_offset == VED_INVALID_OFFSET) {
+               IPVR_DEBUG_WARN("VED: invalid msg offset, abort conceal.\n");
+               goto ec_done;
+       }
+
+       if (fault_region->num_region == 0) {
+               IPVR_DEBUG_VED("VED: no fault region.\n");
+               goto ec_done;
+       }
+
+       width_in_mb = deblock_msg->pic_size.bits.pic_width_mb;
+       height_in_mb = deblock_msg->pic_size.bits.frame_height_mb;
+
+       {
+               int32_t i;
+               for (i = 0; i < fault_region->num_region; i++)
+                       IPVR_DEBUG_VED("[region %d] is %d to %d\n",
+                                        i,
+                                        fault_region->mb_regions[i].start,
+                                        fault_region->mb_regions[i].end);
+               IPVR_DEBUG_VED("MSVDX: MSGID_DEBLOCK:"
+                       " - fence: %08x"
+                       " - flags: %08x - slice_field_type: %08x"
+                       " - operating_mode: %08x"
+                       " - context: %08x - mmu_ptd: %08x"
+                       " - frame_height_mb: %08x - pic_width_mb: %08x"
+                       " - address_a0: %08x - address_a1: %08x"
+                       " - mb_param_address: %08x"
+                       " - ext_stride_a: %08x"
+                       " - address_b0: %08x - address_b1: %08x"
+                       " - alt_output_flags_b: %08x.\n",
+                       deblock_msg->header.bits.msg_fence,
+                       deblock_msg->flag_type.bits.flags,
+                       deblock_msg->flag_type.bits.slice_field_type,
+                       deblock_msg->operating_mode,
+                       deblock_msg->mmu_context.bits.context,
+                       deblock_msg->mmu_context.bits.mmu_ptd,
+                       deblock_msg->pic_size.bits.frame_height_mb,
+                       deblock_msg->pic_size.bits.pic_width_mb,
+                       deblock_msg->address_a0,
+                       deblock_msg->address_a1,
+                       deblock_msg->mb_param_address,
+                       deblock_msg->ext_stride_a,
+                       deblock_msg->address_b0,
+                       deblock_msg->address_b1,
+                       deblock_msg->alt_output_flags_b);
+               IPVR_DEBUG_VED("deblock addr_c0 is      0x%08x\n",
+                                       deblock_msg->address_c0);
+               IPVR_DEBUG_VED("deblock addr_c1 is      0x%08x\n",
+                                       deblock_msg->address_c1);
+       }
+
+       if (unlikely(!width_in_mb || !height_in_mb ||
+               width_in_mb > MAX_SIZE_IN_MB ||
+               height_in_mb > MAX_SIZE_IN_MB)) {
+               IPVR_DEBUG_VED("wrong pic size\n");
+               goto ec_done;
+       }
+
+       cmd = 0;
+       REGIO_WRITE_FIELD_LITE(cmd, MSVDX_CMDS,
+                              DISPLAY_PICTURE_SIZE_DISPLAY_PICTURE_HEIGHT,
+                              (height_in_mb * 16) - 1);
+       REGIO_WRITE_FIELD_LITE(cmd, MSVDX_CMDS,
+                              DISPLAY_PICTURE_SIZE_DISPLAY_PICTURE_WIDTH,
+                              (width_in_mb * 16) - 1);
+       VED_CMDPORT_WRITE(dev_priv,
+                                MSVDX_CMDS_DISPLAY_PICTURE_SIZE_OFFSET,
+                                cmd, cmd_space);
+
+       cmd = 0;
+       REGIO_WRITE_FIELD_LITE(cmd, MSVDX_CMDS,
+                              CODED_PICTURE_SIZE_CODED_PICTURE_HEIGHT,
+                              (height_in_mb * 16) - 1);
+       REGIO_WRITE_FIELD_LITE(cmd, MSVDX_CMDS,
+                              CODED_PICTURE_SIZE_CODED_PICTURE_WIDTH,
+                              (width_in_mb * 16) - 1);
+       VED_CMDPORT_WRITE(dev_priv,
+                                MSVDX_CMDS_CODED_PICTURE_SIZE_OFFSET,
+                                cmd, cmd_space);
+
+       cmd = deblock_msg->operating_mode;
+       REGIO_WRITE_FIELD(cmd, MSVDX_CMDS_OPERATING_MODE,
+                         CHROMA_FORMAT, 1);
+       REGIO_WRITE_FIELD(cmd, MSVDX_CMDS_OPERATING_MODE,
+                         ASYNC_MODE, 1);
+       REGIO_WRITE_FIELD(cmd, MSVDX_CMDS_OPERATING_MODE,
+                         CODEC_MODE, 3);
+       REGIO_WRITE_FIELD(cmd, MSVDX_CMDS_OPERATING_MODE,
+                         CODEC_PROFILE, 1);
+       VED_CMDPORT_WRITE(dev_priv, MSVDX_CMDS_OPERATING_MODE_OFFSET,
+                         cmd, cmd_space);
+
+       /* dest frame address */
+       VED_CMDPORT_WRITE(dev_priv,
+               MSVDX_CMDS_LUMA_RECONSTRUCTED_PICTURE_BASE_ADDRESSES_OFFSET,
+                                deblock_msg->address_a0,
+                                cmd_space);
+
+       VED_CMDPORT_WRITE(dev_priv,
+               MSVDX_CMDS_CHROMA_RECONSTRUCTED_PICTURE_BASE_ADDRESSES_OFFSET,
+                                deblock_msg->address_a1,
+                                cmd_space);
+
+       /* conceal frame address */
+       VED_CMDPORT_WRITE(dev_priv,
+               MSVDX_CMDS_REFERENCE_PICTURE_BASE_ADDRESSES_OFFSET,
+                                deblock_msg->address_b0,
+                                cmd_space);
+       VED_CMDPORT_WRITE(dev_priv,
+               MSVDX_CMDS_REFERENCE_PICTURE_BASE_ADDRESSES_OFFSET + 4,
+                                deblock_msg->address_b1,
+                                cmd_space);
+       cmd = 0;
+       REGIO_WRITE_FIELD(cmd, MSVDX_CMDS_SLICE_PARAMS, SLICE_FIELD_TYPE, 2);
+       REGIO_WRITE_FIELD(cmd, MSVDX_CMDS_SLICE_PARAMS, SLICE_CODE_TYPE, 1);
+
+       VED_CMDPORT_WRITE(dev_priv,
+                                MSVDX_CMDS_SLICE_PARAMS_OFFSET,
+                                cmd, cmd_space);
+
+       cmd = deblock_msg->alt_output_flags_b;
+       if ((cmd & 3) != 0) {
+               IPVR_DEBUG_VED("MSVDX: conceal to rotate surface\n");
+       } else {
+               VED_CMDPORT_WRITE(dev_priv,
+                       MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_OFFSET,
+                                        cmd, cmd_space);
+
+               VED_CMDPORT_WRITE(dev_priv,
+                       MSVDX_CMDS_VC1_LUMA_RANGE_MAPPING_BASE_ADDRESS_OFFSET,
+                                0, cmd_space);
+
+               VED_CMDPORT_WRITE(dev_priv,
+                       MSVDX_CMDS_VC1_CHROMA_RANGE_MAPPING_BASE_ADDRESS_OFFSET,
+                                0, cmd_space);
+
+               VED_CMDPORT_WRITE(dev_priv,
+                       MSVDX_CMDS_VC1_RANGE_MAPPING_FLAGS_OFFSET,
+                                0, cmd_space);
+       }
+
+       cmd = deblock_msg->ext_stride_a;
+       VED_CMDPORT_WRITE(dev_priv,
+                         MSVDX_CMDS_EXTENDED_ROW_STRIDE_OFFSET,
+                         cmd, cmd_space);
+
+       for (loop = 0; loop < fault_region->num_region; loop++) {
+
+               uint32_t start = fault_region->mb_regions[loop].start;
+               uint32_t end = fault_region->mb_regions[loop].end;
+               uint32_t x, y;
+
+               IPVR_DEBUG_VED("MSVDX: region(%d) is %d~%d\n",
+                       loop, start, end);
+
+               if (conceal_above_row)
+                       start -= width_in_mb;
+               if (end > (width_in_mb * height_in_mb - 1))
+                       end = (width_in_mb * height_in_mb - 1);
+               if (start > end)
+                       start = 0;
+
+               IPVR_DEBUG_VED("MSVDX: modify region(%d) is %d~%d\n",
+                       loop, start, end);
+
+               x = start % width_in_mb;
+               y = start / width_in_mb;
+
+               for (mb_loop = start; mb_loop <= end; mb_loop++, x++) {
+                       if (x >= width_in_mb) {
+                               x = 0;
+                               y++;
+                       }
+
+                       /* IPVR_DEBUG_VED("MSVDX: concleament (%d,%d)\n",
+                               x, y); */
+                       if ((x == 0) && (mb_loop != start))
+                               VED_CMDPORT_WRITE_FAST(dev_priv,
+                                       MSVDX_CMDS_END_SLICE_PICTURE_OFFSET,
+                                       0, cmd_space);
+                       cmd = 0;
+                       REGIO_WRITE_FIELD_LITE(cmd,
+                                              MSVDX_CMDS_MACROBLOCK_NUMBER,
+                                              MB_CODE_TYPE, 1);
+                       REGIO_WRITE_FIELD_LITE(cmd,
+                                              MSVDX_CMDS_MACROBLOCK_NUMBER,
+                                              MB_NO_X, x);
+                       REGIO_WRITE_FIELD_LITE(cmd,
+                                              MSVDX_CMDS_MACROBLOCK_NUMBER,
+                                              MB_NO_Y, y);
+                       VED_CMDPORT_WRITE_FAST(dev_priv,
+                               MSVDX_CMDS_MACROBLOCK_NUMBER_OFFSET,
+                               cmd, cmd_space);
+                       VED_CMDPORT_WRITE_FAST(dev_priv,
+                               MSVDX_CMDS_MACROBLOCK_RESIDUAL_FORMAT_OFFSET,
+                               0, cmd_space);
+                       cmd = 0;
+                       REGIO_WRITE_FIELD_LITE(cmd,
+                                       MSVDX_CMDS_INTER_BLOCK_PREDICTION,
+                                              REF_INDEX_A_VALID, 1);
+                       REGIO_WRITE_FIELD_LITE(cmd,
+                                       MSVDX_CMDS_INTER_BLOCK_PREDICTION,
+                                              INTER_PRED_BLOCK_SIZE, 0);
+                       REGIO_WRITE_FIELD_LITE(cmd,
+                                       MSVDX_CMDS_INTER_BLOCK_PREDICTION,
+                                              REF_INDEX_A, 0);
+                       REGIO_WRITE_FIELD_LITE(cmd,
+                               MSVDX_CMDS_INTER_BLOCK_PREDICTION,
+                               REF_INDEX_B, 0);
+                       VED_CMDPORT_WRITE_FAST(dev_priv,
+                               MSVDX_CMDS_INTER_BLOCK_PREDICTION_OFFSET,
+                               cmd, cmd_space);
+                       VED_CMDPORT_WRITE_FAST(dev_priv,
+                               MSVDX_CMDS_MOTION_VECTOR_OFFSET,
+                               0, cmd_space);
+               }
+
+               VED_CMDPORT_WRITE(dev_priv,
+                               MSVDX_CMDS_END_SLICE_PICTURE_OFFSET,
+                               0, cmd_space);
+       }
+
+ec_done:
+       /* try to unblock rendec */
+       ret = VED_CMDPORT_WRITE_FAST(dev_priv,
+                               MSVDX_CMDS_END_SLICE_PICTURE_OFFSET,
+                               1, cmd_space);
+
+       fault_region->num_region = 0;
+
+       preempt_enable();
+
+       pm_ret = pm_runtime_put(&dev_priv->dev->platformdev->dev);
+       if (unlikely(pm_ret < 0))
+               IPVR_ERROR("Error put VED power: %d\n", pm_ret);
+
+       IPVR_DEBUG_VED("VED: EC done, unlock msvdx ret %d.\n", ret);
+
+       return;
+}
+
+struct ved_ec_context *ved_find_ec_ctx(struct ved_private *ved_priv,
+                                       struct drm_file *tfile, void *cmd)
+{
+       int32_t i, free_idx;
+       struct ved_ec_context *ec_ctx = NULL;
+       struct fw_deblock_msg *deblock_msg = (struct fw_deblock_msg *)cmd;
+
+       free_idx = -1;
+       for (i = 0; i < VED_MAX_EC_INSTANCE; i++) {
+               if (ved_priv->ved_ec_ctx[i]->tfile == tfile)
+                       break;
+               else if (free_idx < 0 &&
+                        ved_priv->ved_ec_ctx[i]->tfile == NULL)
+                       free_idx = i;
+       }
+
+       if (i < VED_MAX_EC_INSTANCE)
+               ec_ctx = ved_priv->ved_ec_ctx[i];
+       else if (free_idx >= 0 && cmd) {
+               IPVR_DEBUG_VED("acquire ec ctx idx %d for tfile 0x%08lx.\n",
+                               free_idx, (unsigned long)tfile);
+               ec_ctx = ved_priv->ved_ec_ctx[free_idx];
+               memset(ec_ctx, 0, sizeof(*ec_ctx));
+               ec_ctx->tfile = tfile;
+               ec_ctx->context_id = deblock_msg->mmu_context.bits.context;
+       } else {
+               IPVR_DEBUG_VED("Available ec ctx is not found.\n");
+       }
+
+       return ec_ctx;
+}
+
+void ved_update_frame_info(struct ved_private *ved_priv,
+                               struct drm_file *tfile, void *cmd)
+{
+
+       int32_t i, free_idx;
+       struct ved_frame_info *frame_info;
+       struct fw_deblock_msg *deblock_msg = (struct fw_deblock_msg *)cmd;
+       uint32_t buffer_handle = deblock_msg->mb_param_address;
+
+       struct ved_ec_context *ec_ctx;
+
+       IPVR_DEBUG_VED(
+               "update frame info (handle 0x%08x) for error concealment\n",
+               buffer_handle);
+
+       ec_ctx = ved_find_ec_ctx(ved_priv, tfile, cmd);
+
+       if (!ec_ctx)
+               return;
+
+       free_idx = -1;
+       for (i = 0; i < MAX_DECODE_BUFFERS; i++) {
+               if (buffer_handle == ec_ctx->frame_info[i].handle)
+                       break;
+               if (free_idx < 0 && ec_ctx->frame_info[i].handle == 0)
+                       free_idx = i;
+       }
+
+       if (i < MAX_DECODE_BUFFERS)
+               frame_info = &ec_ctx->frame_info[i];
+       else if (free_idx >= 0) {
+               IPVR_DEBUG_VED("acquire frame_info solt idx %d\n", free_idx);
+               frame_info = &ec_ctx->frame_info[free_idx];
+       } else {
+               IPVR_DEBUG_VED("%d solts occupied, abort update frame_info\n",
+                               MAX_DECODE_BUFFERS);
+               return;
+       }
+
+       frame_info->fw_status = 0;
+       frame_info->handle = buffer_handle;
+       frame_info->fence = (deblock_msg->header.bits.msg_fence & (~0xf));
+       frame_info->decode_status.num_region = 0;
+       ec_ctx->cur_frame_info = frame_info;
+}
+
+void ved_backup_cmd(struct ved_private *ved_priv, struct drm_file *tfile,
+                       void *cmd, uint32_t cmd_size,
+                       uint32_t deblock_cmd_offset)
+{
+       struct fw_deblock_msg *deblock_msg = NULL;
+
+       struct ved_ec_context *ec_ctx;
+       union msg_header *header;
+
+       IPVR_DEBUG_VED("backup cmd for ved error concealment\n");
+
+       ec_ctx = ved_find_ec_ctx(ved_priv, tfile, NULL);
+
+       if (!ec_ctx) {
+               IPVR_DEBUG_VED("this is not a ec ctx, abort backup cmd\n");
+               return;
+       }
+
+       if (deblock_cmd_offset != VED_INVALID_OFFSET)
+               deblock_msg = (struct fw_deblock_msg *)
+                               (cmd + deblock_cmd_offset);
+
+       if (deblock_msg &&
+           ec_ctx->context_id != deblock_msg->mmu_context.bits.context) {
+               IPVR_DEBUG_VED("backup cmd but find mis-match context id\n");
+               return;
+       }
+
+       ec_ctx->cmd_size = cmd_size;
+       ec_ctx->deblock_cmd_offset = deblock_cmd_offset;
+       memcpy(ec_ctx->unfenced_cmd, cmd, cmd_size);
+       ec_ctx->fence = VED_INVALID_FENCE;
+       header = (union msg_header *)ec_ctx->unfenced_cmd;
+       if (cmd_size)
+               ec_ctx->fence = header->bits.msg_fence;
+       ec_ctx->fence &= (~0xf);
+       IPVR_DEBUG_VED("backup cmd for ved: fence 0x%08x, cmd_size %d\n",
+               ec_ctx->fence, cmd_size);
+}
+
+void ved_mtx_message_dump(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       int32_t i, buf_size, buf_offset;
+       buf_size =
+               VED_REG_READ32(MSVDX_COMMS_TO_HOST_BUF_SIZE) & ((1 << 16) - 1);
+       buf_offset =
+               (VED_REG_READ32(MSVDX_COMMS_TO_HOST_BUF_SIZE) >> 16) + 0x2000;
+
+       IPVR_DEBUG_VED("Dump to HOST message buffer (offset:size)%04x:%04x\n",
+                       buf_offset, buf_size);
+       for (i = 0; i < buf_size; i += 4) {
+               uint32_t reg1, reg2, reg3, reg4;
+               reg1 = VED_REG_READ32(buf_offset + i * 4);
+               reg2 = VED_REG_READ32(buf_offset + i * 4 + 4);
+               reg3 = VED_REG_READ32(buf_offset + i * 4 + 8);
+               reg4 = VED_REG_READ32(buf_offset + i * 4 + 12);
+               IPVR_DEBUG_VED("VED: 0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+                               (buf_offset + i * 4), reg1, reg2, reg3, reg4);
+       }
+
+       buf_size = VED_REG_READ32(MSVDX_COMMS_TO_MTX_BUF_SIZE) & ((1 << 16) - 
1);
+       buf_offset = (VED_REG_READ32(MSVDX_COMMS_TO_MTX_BUF_SIZE) >> 16) + 
0x2000;
+
+       IPVR_DEBUG_VED("Dump to MTX message buffer (offset:size)%04x:%04x\n",
+                       buf_offset, buf_size);
+       for (i = 0; i < buf_size; i += 4) {
+               uint32_t reg1, reg2, reg3, reg4;
+               reg1 = VED_REG_READ32(buf_offset + i * 4);
+               reg2 = VED_REG_READ32(buf_offset + i * 4 + 4);
+               reg3 = VED_REG_READ32(buf_offset + i * 4 + 8);
+               reg4 = VED_REG_READ32(buf_offset + i * 4 + 12);
+               IPVR_DEBUG_VED("VED: 0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+                               (buf_offset + i * 4), reg1, reg2, reg3, reg4);
+       }
+
+       buf_size = 12;
+       buf_offset = 0xFD0 + 0x2000;
+
+       IPVR_DEBUG_VED("VED: Comm header (offset:size)%04x:%04x\n",
+                       buf_offset, buf_size);
+       for (i = 0; i < buf_size; i += 4) {
+               uint32_t reg1, reg2, reg3, reg4;
+               reg1 = VED_REG_READ32(buf_offset + i * 4);
+               reg2 = VED_REG_READ32(buf_offset + i * 4 + 4);
+               reg3 = VED_REG_READ32(buf_offset + i * 4 + 8);
+               reg4 = VED_REG_READ32(buf_offset + i * 4 + 12);
+               IPVR_DEBUG_VED("VED: 0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+                               (buf_offset + i * 4), reg1, reg2, reg3, reg4);
+       }
+
+       IPVR_DEBUG_VED("VED: Error status 0x2cc4: 0x%08x\n",
+                       VED_REG_READ32(0x2cc4));
+}
+
+void ved_fw_error_detected(struct drm_device *dev,
+                               uint32_t fence, uint32_t flags)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       struct ved_private *ved_priv = dev_priv->ved_private;
+       struct ved_ec_context *ved_ec_ctx = NULL;
+       struct ved_frame_info *frame_info = NULL;
+       int32_t i, found = 0;
+
+       if (!(flags & FW_DEVA_ERROR_DETECTED))
+               return;
+
+       /*get the frame_info struct for error concealment frame*/
+       for (i = 0; i < VED_MAX_EC_INSTANCE; i++)
+               if (ved_priv->ved_ec_ctx[i]->fence ==
+                                               (fence & (~0xf))) {
+                       ved_ec_ctx = ved_priv->ved_ec_ctx[i];
+                       found++;
+               }
+       /* ved_mtx_message_dump(dev); */
+       if (!ved_ec_ctx || !(ved_ec_ctx->tfile) || found > 1) {
+               IPVR_DEBUG_VED(
+               "no matched ctx: fence 0x%x, found %d, ctx 0x%08lx\n",
+                       fence, found, (unsigned long)ved_ec_ctx);
+               return;
+       }
+
+       if (ved_ec_ctx->cur_frame_info &&
+               ved_ec_ctx->cur_frame_info->fence == (fence & (~0xf))) {
+               frame_info = ved_ec_ctx->cur_frame_info;
+       } else {
+               if (ved_ec_ctx->cur_frame_info)
+                       IPVR_DEBUG_VED(
+                       "cur_frame_info fence(%x) doesn't match fence (%x).\n",
+                               ved_ec_ctx->cur_frame_info->fence, fence);
+               else
+                       IPVR_DEBUG_VED(
+                       "The pointer ved_ec_ctx->cur_frame_info is null\n");
+               return;
+       }
+
+       if (frame_info->decode_status.num_region) {
+               IPVR_DEBUG_VED("Error already record, no need record again\n");
+               return;
+       }
+
+       IPVR_DEBUG_VED("record error as first fault region.\n");
+       frame_info->decode_status.num_region++;
+       frame_info->decode_status.mb_regions[0].start = 0;
+       frame_info->decode_status.mb_regions[0].end = 0;
+
+       /*
+       for (i = 0; i < MAX_DECODE_BUFFERS; i++) {
+               if (ved_ec_ctx->frame_info[i].fence == (fence & (~0xf))) {
+                       break;
+               }
+
+       }
+       */
+}
diff --git a/drivers/gpu/drm/ipvr/ved_ec.h b/drivers/gpu/drm/ipvr/ved_ec.h
new file mode 100644
index 0000000..c5f9fe6
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ved_ec.h
@@ -0,0 +1,207 @@
+/**************************************************************************
+ * ved_ec.h: VED error concealment header file
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *    Li Zeng <li.zeng at intel.com>
+ *
+ **************************************************************************/
+
+#ifndef _VED_EC_H_
+#define _VED_EC_H_
+
+#include "ipvr_drv.h"
+
+#define VED_INVALID_FENCE (0xffff)
+#define VED_INVALID_OFFSET (0xffffffff)
+#define VED_EC_ROLLBACK (9)
+
+struct ved_ec_context {
+       struct drm_file *tfile; /* protected by cmdbuf_mutex */
+       uint32_t context_id;
+       struct ved_frame_info frame_info[MAX_DECODE_BUFFERS];
+       struct ved_frame_info *cur_frame_info;
+       int32_t frame_idx;
+
+       /* 12 render msg + 1 deblock msg
+        * 12 * 20 + 1 * 48 = 288;
+       */
+       uint8_t unfenced_cmd[300];
+       uint32_t cmd_size;
+       uint32_t deblock_cmd_offset;
+       uint16_t fence;
+       struct ved_decode_status decode_status;
+};
+
+#define _PSB_MSVDX_EC_H_
+
+#define MSVDX_CMDS_BASE 0x1000
+#define MSVDX_CMDS_DISPLAY_PICTURE_SIZE_OFFSET (0x0000)
+
+/* MSVDX_CMDS, DISPLAY_PICTURE_SIZE, DISPLAY_PICTURE_HEIGHT */
+#define MSVDX_CMDS_DISPLAY_PICTURE_SIZE_DISPLAY_PICTURE_HEIGHT_MASK \
+       (0x00FFF000)
+#define MSVDX_CMDS_DISPLAY_PICTURE_SIZE_DISPLAY_PICTURE_HEIGHT_SHIFT (12)
+
+/* MSVDX_CMDS, DISPLAY_PICTURE_SIZE, DISPLAY_PICTURE_WIDTH */
+#define MSVDX_CMDS_DISPLAY_PICTURE_SIZE_DISPLAY_PICTURE_WIDTH_MASK (0x00000FFF)
+#define MSVDX_CMDS_DISPLAY_PICTURE_SIZE_DISPLAY_PICTURE_WIDTH_SHIFT (0)
+
+#define MSVDX_CMDS_CODED_PICTURE_SIZE_OFFSET (0x0004)
+
+/* MSVDX_CMDS, CODED_PICTURE_SIZE, CODED_PICTURE_HEIGHT */
+#define MSVDX_CMDS_CODED_PICTURE_SIZE_CODED_PICTURE_HEIGHT_MASK (0x00FFF000)
+#define MSVDX_CMDS_CODED_PICTURE_SIZE_CODED_PICTURE_HEIGHT_SHIFT (12)
+
+/* MSVDX_CMDS, CODED_PICTURE_SIZE, CODED_PICTURE_WIDTH */
+#define MSVDX_CMDS_CODED_PICTURE_SIZE_CODED_PICTURE_WIDTH_MASK (0x00000FFF)
+#define MSVDX_CMDS_CODED_PICTURE_SIZE_CODED_PICTURE_WIDTH_SHIFT (0)
+
+#define MSVDX_CMDS_OPERATING_MODE_OFFSET (0x0008)
+
+/* MSVDX_CMDS, OPERATING_MODE, RPR_ENABLE */
+#define MSVDX_CMDS_OPERATING_MODE_RPR_ENABLE_MASK (0x20000000)
+#define MSVDX_CMDS_OPERATING_MODE_RPR_ENABLE_SHIFT (29)
+
+/* MSVDX_CMDS, OPERATING_MODE, USE_EXT_ROW_STRIDE */
+#define MSVDX_CMDS_OPERATING_MODE_USE_EXT_ROW_STRIDE_MASK (0x10000000)
+#define MSVDX_CMDS_OPERATING_MODE_USE_EXT_ROW_STRIDE_SHIFT (28)
+
+/* MSVDX_CMDS, OPERATING_MODE, CHROMA_INTERLEAVED */
+#define MSVDX_CMDS_OPERATING_MODE_CHROMA_INTERLEAVED_MASK (0x08000000)
+#define MSVDX_CMDS_OPERATING_MODE_CHROMA_INTERLEAVED_SHIFT (27)
+/* MSVDX_CMDS, OPERATING_MODE, ROW_STRIDE */
+#define MSVDX_CMDS_OPERATING_MODE_ROW_STRIDE_MASK (0x07000000)
+#define MSVDX_CMDS_OPERATING_MODE_ROW_STRIDE_SHIFT (24)
+
+/* MSVDX_CMDS, OPERATING_MODE, CODEC_PROFILE */
+#define MSVDX_CMDS_OPERATING_MODE_CODEC_PROFILE_MASK (0x00300000)
+#define MSVDX_CMDS_OPERATING_MODE_CODEC_PROFILE_SHIFT (20)
+
+/* MSVDX_CMDS, OPERATING_MODE, CODEC_MODE */
+#define MSVDX_CMDS_OPERATING_MODE_CODEC_MODE_MASK (0x000F0000)
+#define MSVDX_CMDS_OPERATING_MODE_CODEC_MODE_SHIFT (16)
+
+/* MSVDX_CMDS, OPERATING_MODE, ASYNC_MODE */
+#define MSVDX_CMDS_OPERATING_MODE_ASYNC_MODE_MASK (0x00006000)
+#define MSVDX_CMDS_OPERATING_MODE_ASYNC_MODE_SHIFT (13)
+
+/* MSVDX_CMDS, OPERATING_MODE, CHROMA_FORMAT */
+#define MSVDX_CMDS_OPERATING_MODE_CHROMA_FORMAT_MASK (0x00001000)
+#define MSVDX_CMDS_OPERATING_MODE_CHROMA_FORMAT_SHIFT (12)
+
+/* MSVDX_CMDS, OPERATING_MODE, INTERLACED */
+#define MSVDX_CMDS_OPERATING_MODE_INTERLACED_MASK (0x00000800)
+#define MSVDX_CMDS_OPERATING_MODE_INTERLACED_SHIFT (11)
+
+/* MSVDX_CMDS, OPERATING_MODE, OVERLAP */
+#define MSVDX_CMDS_OPERATING_MODE_OVERLAP_MASK (0x00000400)
+#define MSVDX_CMDS_OPERATING_MODE_OVERLAP_SHIFT (10)
+
+/* MSVDX_CMDS, OPERATING_MODE, PIC_CONDOVER */
+#define MSVDX_CMDS_OPERATING_MODE_PIC_CONDOVER_MASK (0x00000300)
+#define MSVDX_CMDS_OPERATING_MODE_PIC_CONDOVER_SHIFT (8)
+/* MSVDX_CMDS, OPERATING_MODE, DEBLOCK_STRENGTH */
+#define MSVDX_CMDS_OPERATING_MODE_DEBLOCK_STRENGTH_MASK (0x000000E0)
+#define MSVDX_CMDS_OPERATING_MODE_DEBLOCK_STRENGTH_SHIFT (5)
+
+/* MSVDX_CMDS, OPERATING_MODE, PIC_QUANT */
+#define MSVDX_CMDS_OPERATING_MODE_PIC_QUANT_MASK (0x0000001F)
+#define MSVDX_CMDS_OPERATING_MODE_PIC_QUANT_SHIFT (0)
+
+#define MSVDX_CMDS_LUMA_RECONSTRUCTED_PICTURE_BASE_ADDRESSES_OFFSET (0x000C)
+#define MSVDX_CMDS_CHROMA_RECONSTRUCTED_PICTURE_BASE_ADDRESSES_OFFSET (0x0010)
+
+#define MSVDX_CMDS_REFERENCE_PICTURE_BASE_ADDRESSES_OFFSET (0x0100)
+
+#define MSVDX_CMDS_SLICE_PARAMS_OFFSET (0x0400)
+
+/* MSVDX_CMDS, SLICE_PARAMS, SLICE_FIELD_TYPE */
+#define MSVDX_CMDS_SLICE_PARAMS_SLICE_FIELD_TYPE_MASK (0x0000000C)
+#define MSVDX_CMDS_SLICE_PARAMS_SLICE_FIELD_TYPE_SHIFT (2)
+
+
+/* MSVDX_CMDS, SLICE_PARAMS, SLICE_CODE_TYPE */
+#define MSVDX_CMDS_SLICE_PARAMS_SLICE_CODE_TYPE_MASK (0x00000003)
+#define MSVDX_CMDS_SLICE_PARAMS_SLICE_CODE_TYPE_SHIFT (0)
+
+#define MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_OFFSET (0x003C)
+
+#define MSVDX_CMDS_VC1_LUMA_RANGE_MAPPING_BASE_ADDRESS_OFFSET (0x0028)
+#define MSVDX_CMDS_VC1_CHROMA_RANGE_MAPPING_BASE_ADDRESS_OFFSET (0x002C)
+#define MSVDX_CMDS_VC1_RANGE_MAPPING_FLAGS_OFFSET (0x0030)
+
+#define MSVDX_CMDS_EXTENDED_ROW_STRIDE_OFFSET (0x0040)
+
+#define MSVDX_CMDS_MACROBLOCK_NUMBER_OFFSET (0x0408)
+
+/* MSVDX_CMDS, MACROBLOCK_NUMBER, MB_CODE_TYPE */
+#define MSVDX_CMDS_MACROBLOCK_NUMBER_MB_CODE_TYPE_MASK (0x00030000)
+#define MSVDX_CMDS_MACROBLOCK_NUMBER_MB_CODE_TYPE_SHIFT (16)
+
+/* MSVDX_CMDS, MACROBLOCK_NUMBER, MB_NO_Y */
+#define MSVDX_CMDS_MACROBLOCK_NUMBER_MB_NO_Y_MASK (0x0000FF00)
+#define MSVDX_CMDS_MACROBLOCK_NUMBER_MB_NO_Y_SHIFT (8)
+
+/* MSVDX_CMDS, MACROBLOCK_NUMBER, MB_NO_X */
+#define MSVDX_CMDS_MACROBLOCK_NUMBER_MB_NO_X_MASK (0x000000FF)
+#define MSVDX_CMDS_MACROBLOCK_NUMBER_MB_NO_X_SHIFT (0)
+
+#define MSVDX_CMDS_MACROBLOCK_RESIDUAL_FORMAT_OFFSET (0x0418)
+
+#define MSVDX_CMDS_INTER_BLOCK_PREDICTION_OFFSET (0x0430)
+
+/* MSVDX_CMDS, INTER_BLOCK_PREDICTION, REF_INDEX_A_VALID */
+#define MSVDX_CMDS_INTER_BLOCK_PREDICTION_REF_INDEX_A_VALID_MASK (0x00000020)
+#define MSVDX_CMDS_INTER_BLOCK_PREDICTION_REF_INDEX_A_VALID_SHIFT (5)
+
+/* MSVDX_CMDS, INTER_BLOCK_PREDICTION, INTER_PRED_BLOCK_SIZE */
+#define MSVDX_CMDS_INTER_BLOCK_PREDICTION_INTER_PRED_BLOCK_SIZE_MASK (0x70000)
+#define MSVDX_CMDS_INTER_BLOCK_PREDICTION_INTER_PRED_BLOCK_SIZE_SHIFT (16)
+
+/* MSVDX_CMDS, INTER_BLOCK_PREDICTION, REF_INDEX_A */
+#define MSVDX_CMDS_INTER_BLOCK_PREDICTION_REF_INDEX_A_MASK (0x0000000F)
+#define MSVDX_CMDS_INTER_BLOCK_PREDICTION_REF_INDEX_A_SHIFT (0)
+
+/* MSVDX_CMDS, INTER_BLOCK_PREDICTION, REF_INDEX_B */
+#define MSVDX_CMDS_INTER_BLOCK_PREDICTION_REF_INDEX_B_MASK (0x00000F00)
+#define MSVDX_CMDS_INTER_BLOCK_PREDICTION_REF_INDEX_B_SHIFT (8)
+
+#define MSVDX_CMDS_MOTION_VECTOR_OFFSET (0x0500)
+
+#define MSVDX_CORE_CR_MSVDX_COMMAND_SPACE_OFFSET (0x0028)
+
+#define MSVDX_CORE_BASE        (0x600)
+
+void ved_update_frame_info(struct ved_private *ved_priv,
+                               struct drm_file *tfile, void *cmd);
+
+void ved_backup_cmd(struct ved_private *ved_priv,
+                       struct drm_file *tfile, void *cmd,
+                       uint32_t cmd_size, uint32_t deblock_cmd_offset);
+
+void ved_mtx_message_dump(struct drm_device *dev);
+
+void ved_do_concealment(struct work_struct *work);
+
+void ved_fw_error_detected(struct drm_device *dev,
+                               uint32_t fence, uint32_t flags);
+
+#endif
diff --git a/drivers/gpu/drm/ipvr/ved_fw.c b/drivers/gpu/drm/ipvr/ved_fw.c
new file mode 100644
index 0000000..47fbad4
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ved_fw.c
@@ -0,0 +1,660 @@
+/**************************************************************************
+ * ved_fw.c: VED initialization and mtx-firmware upload
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ * Copyright (c) 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *
+ **************************************************************************/
+
+#include "ved_fw.h"
+#include "ipvr_gem.h"
+#include "ved_cmd.h"
+#include "ved_reg.h"
+#include "ved_init.h"
+#include "ipvr_buffer.h"
+#include "ipvr_mmu.h"
+#include <linux/firmware.h>
+#include <asm/cacheflush.h>
+#include <linux/module.h>
+
+#define UPLOAD_FW_BY_DMA 1
+#define STACKGUARDWORD                 0x10101010
+#define MSVDX_MTX_DATA_LOCATION                0x82880000
+#define UNINITILISE_MEM                        0xcdcdcdcd
+#define FIRMWARE_NAME "msvdx_fw_mfld_DE2.0.bin"
+
+/* VED FW header */
+struct ved_fw {
+       uint32_t ver;
+       uint32_t text_size;
+       uint32_t data_size;
+       uint32_t data_location;
+};
+
+#if UPLOAD_FW_BY_DMA
+
+static void ved_get_mtx_control_from_dash(struct drm_ipvr_private *dev_priv)
+{
+       struct ved_private *ved_priv = dev_priv->ved_private;
+       int32_t count = 0;
+       uint32_t reg_val = 0;
+
+       REGIO_WRITE_FIELD(reg_val, MSVDX_MTX_DEBUG, MTX_DBG_IS_SLAVE, 1);
+       REGIO_WRITE_FIELD(reg_val, MSVDX_MTX_DEBUG, MTX_DBG_GPIO_IN, 0x02);
+       VED_REG_WRITE32(reg_val, MSVDX_MTX_DEBUG_OFFSET);
+
+       do {
+               reg_val = VED_REG_READ32(MSVDX_MTX_DEBUG_OFFSET);
+               count++;
+       } while (((reg_val & 0x18) != 0) && count < 50000);
+
+       if (count >= 50000)
+               IPVR_DEBUG_VED("VED: timeout in get_mtx_control_from_dash.\n");
+
+       /* Save the access control register...*/
+       ved_priv->ved_dash_access_ctrl = 
VED_REG_READ32(MTX_RAM_ACCESS_CONTROL_OFFSET);
+}
+
+static void
+ved_release_mtx_control_from_dash(struct drm_ipvr_private *dev_priv)
+{
+       struct ved_private *ved_priv = dev_priv->ved_private;
+
+       /* restore access control */
+       VED_REG_WRITE32(ved_priv->ved_dash_access_ctrl, 
MTX_RAM_ACCESS_CONTROL_OFFSET);
+       /* release bus */
+       VED_REG_WRITE32(0x4, MSVDX_MTX_DEBUG_OFFSET);
+}
+
+/* for future debug info of msvdx related registers */
+static void
+ved_setup_fw_dump(struct drm_ipvr_private *dev_priv, uint32_t dma_channel)
+{
+       IPVR_DEBUG_REG("dump registers during fw upload for debug:\n");
+       /* for DMAC REGISTER */
+       IPVR_DEBUG_REG("MTX_SYSC_CDMAA is 0x%x\n",
+                       VED_REG_READ32(MTX_SYSC_CDMAA_OFFSET));
+       IPVR_DEBUG_REG("MTX_SYSC_CDMAC value is 0x%x\n",
+                       VED_REG_READ32(MTX_SYSC_CDMAC_OFFSET));
+       IPVR_DEBUG_REG("DMAC_SETUP value is 0x%x\n",
+                       VED_REG_READ32(DMAC_DMAC_SETUP_OFFSET + dma_channel));
+       IPVR_DEBUG_REG("DMAC_DMAC_COUNT value is 0x%x\n",
+                       VED_REG_READ32(DMAC_DMAC_COUNT_OFFSET + dma_channel));
+       IPVR_DEBUG_REG("DMAC_DMAC_PERIPH_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(DMAC_DMAC_PERIPH_OFFSET + dma_channel));
+       IPVR_DEBUG_REG("DMAC_DMAC_PERIPHERAL_ADDR value is 0x%x\n",
+                       VED_REG_READ32(DMAC_DMAC_PERIPHERAL_ADDR_OFFSET +
+                                      dma_channel));
+       IPVR_DEBUG_REG("MSVDX_CONTROL value is 0x%x\n",
+                       VED_REG_READ32(MSVDX_CONTROL_OFFSET));
+       IPVR_DEBUG_REG("DMAC_DMAC_IRQ_STAT value is 0x%x\n",
+                       VED_REG_READ32(DMAC_DMAC_IRQ_STAT_OFFSET));
+       IPVR_DEBUG_REG("MSVDX_MMU_CONTROL0 value is 0x%x\n",
+                       VED_REG_READ32(MSVDX_MMU_CONTROL0_OFFSET));
+       IPVR_DEBUG_REG("DMAC_DMAC_COUNT 2222 value is 0x%x\n",
+                       VED_REG_READ32(DMAC_DMAC_COUNT_OFFSET + dma_channel));
+
+       /* for MTX REGISTER */
+       IPVR_DEBUG_REG("MTX_ENABLE_OFFSET is 0x%x\n",
+                       VED_REG_READ32(MTX_ENABLE_OFFSET));
+       IPVR_DEBUG_REG("MTX_KICK_INPUT_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MTX_KICK_INPUT_OFFSET));
+       IPVR_DEBUG_REG("MTX_REG_READ_WRITE_REQUEST_OFFSET value is 0x%x\n",
+               VED_REG_READ32(MTX_REGISTER_READ_WRITE_REQUEST_OFFSET));
+       IPVR_DEBUG_REG("MTX_RAM_ACCESS_CONTROL_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MTX_RAM_ACCESS_CONTROL_OFFSET));
+       IPVR_DEBUG_REG("MTX_RAM_ACCESS_STATUS_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MTX_RAM_ACCESS_STATUS_OFFSET));
+       IPVR_DEBUG_REG("MTX_SYSC_TIMERDIV_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MTX_SYSC_TIMERDIV_OFFSET));
+       IPVR_DEBUG_REG("MTX_SYSC_CDMAC_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MTX_SYSC_CDMAC_OFFSET));
+       IPVR_DEBUG_REG("MTX_SYSC_CDMAA_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MTX_SYSC_CDMAA_OFFSET));
+       IPVR_DEBUG_REG("MTX_SYSC_CDMAS0_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MTX_SYSC_CDMAS0_OFFSET));
+       IPVR_DEBUG_REG("MTX_SYSC_CDMAT_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MTX_SYSC_CDMAT_OFFSET));
+
+       /* for MSVDX CORE REGISTER */
+       IPVR_DEBUG_REG("MSVDX_CONTROL_OFFSET is 0x%x\n",
+                       VED_REG_READ32(MSVDX_CONTROL_OFFSET));
+       IPVR_DEBUG_REG("MSVDX_INTERRUPT_CLEAR_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MSVDX_INTERRUPT_CLEAR_OFFSET));
+       IPVR_DEBUG_REG("MSVDX_INTERRUPT_STATUS_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MSVDX_INTERRUPT_STATUS_OFFSET));
+       IPVR_DEBUG_REG("MMSVDX_HOST_INTERRUPT_ENABLE_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MSVDX_HOST_INTERRUPT_ENABLE_OFFSET));
+       IPVR_DEBUG_REG("MSVDX_MAN_CLK_ENABLE_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MSVDX_MAN_CLK_ENABLE_OFFSET));
+       IPVR_DEBUG_REG("MSVDX_CORE_ID_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MSVDX_CORE_ID_OFFSET));
+       IPVR_DEBUG_REG("MSVDX_MMU_STATUS_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MSVDX_MMU_STATUS_OFFSET));
+       IPVR_DEBUG_REG("FE_MSVDX_WDT_CONTROL_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(FE_MSVDX_WDT_CONTROL_OFFSET));
+       IPVR_DEBUG_REG("FE_MSVDX_WDTIMER_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(FE_MSVDX_WDTIMER_OFFSET));
+       IPVR_DEBUG_REG("BE_MSVDX_WDT_CONTROL_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(BE_MSVDX_WDT_CONTROL_OFFSET));
+       IPVR_DEBUG_REG("BE_MSVDX_WDTIMER_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(BE_MSVDX_WDTIMER_OFFSET));
+
+       /* for MSVDX RENDEC REGISTER */
+       IPVR_DEBUG_REG("VEC_SHIFTREG_CONTROL_OFFSET is 0x%x\n",
+                       VED_REG_READ32(VEC_SHIFTREG_CONTROL_OFFSET));
+       IPVR_DEBUG_REG("MSVDX_RENDEC_CONTROL0_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MSVDX_RENDEC_CONTROL0_OFFSET));
+       IPVR_DEBUG_REG("MSVDX_RENDEC_BUFFER_SIZE_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MSVDX_RENDEC_BUFFER_SIZE_OFFSET));
+       IPVR_DEBUG_REG("MSVDX_RENDEC_BASE_ADDR0_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MSVDX_RENDEC_BASE_ADDR0_OFFSET));
+       IPVR_DEBUG_REG("MMSVDX_RENDEC_BASE_ADDR1_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MSVDX_RENDEC_BASE_ADDR1_OFFSET));
+       IPVR_DEBUG_REG("MSVDX_RENDEC_READ_DATA_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MSVDX_RENDEC_READ_DATA_OFFSET));
+       IPVR_DEBUG_REG("MSVDX_RENDEC_CONTEXT0_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MSVDX_RENDEC_CONTEXT0_OFFSET));
+       IPVR_DEBUG_REG("MSVDX_RENDEC_CONTEXT1_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MSVDX_RENDEC_CONTEXT1_OFFSET));
+       IPVR_DEBUG_REG("MSVDX_CMDS_END_SLICE_PICTURE_OFFSET value is 0x%x\n",
+                       VED_REG_READ32(MSVDX_CMDS_END_SLICE_PICTURE_OFFSET));
+
+       IPVR_DEBUG_REG("MSVDX_MMU_MEM_REQ value is 0x%x\n",
+                       VED_REG_READ32(MSVDX_MMU_MEM_REQ_OFFSET));
+       IPVR_DEBUG_REG("MSVDX_SYS_MEMORY_DEBUG2 value is 0x%x\n",
+                       VED_REG_READ32(0x6fc));
+}
+
+static void ved_upload_fw(struct drm_ipvr_private *dev_priv,
+                               uint32_t address, const uint32_t words)
+{
+       uint32_t reg_val = 0;
+       uint32_t cmd;
+       uint32_t uCountReg, offset, mmu_ptd;
+       uint32_t size = words * 4; /* byte count */
+       uint32_t dma_channel = 0; /* Setup a Simple DMA for Ch0 */
+       struct ved_private *ved_priv = dev_priv->ved_private;
+
+       IPVR_DEBUG_VED("VED: Upload firmware by DMA.\n");
+       ved_get_mtx_control_from_dash(dev_priv);
+
+       /*
+        * dma transfers to/from the mtx have to be 32-bit aligned and
+        * in multiples of 32 bits
+        */
+       VED_REG_WRITE32(address, MTX_SYSC_CDMAA_OFFSET);
+
+       /* burst size in multiples of 64 bits (allowed values are 2 or 4) */
+       REGIO_WRITE_FIELD_LITE(reg_val, MTX_SYSC_CDMAC, BURSTSIZE, 4);
+       /* false means write to mtx mem, true means read from mtx mem */
+       REGIO_WRITE_FIELD_LITE(reg_val, MTX_SYSC_CDMAC, RNW, 0);
+       /* begin transfer */
+       REGIO_WRITE_FIELD_LITE(reg_val, MTX_SYSC_CDMAC, ENABLE, 1);
+       /* specifies transfer size of the DMA operation by 32-bit words */
+       REGIO_WRITE_FIELD_LITE(reg_val, MTX_SYSC_CDMAC, LENGTH, words);
+       VED_REG_WRITE32(reg_val, MTX_SYSC_CDMAC_OFFSET);
+
+       /* toggle channel 0 usage between mtx and other msvdx peripherals */
+       {
+               reg_val = VED_REG_READ32(MSVDX_CONTROL_OFFSET);
+               REGIO_WRITE_FIELD(reg_val, MSVDX_CONTROL, DMAC_CH0_SELECT,  0);
+               VED_REG_WRITE32(reg_val, MSVDX_CONTROL_OFFSET);
+       }
+
+       /* Clear the DMAC Stats */
+       VED_REG_WRITE32(0 , DMAC_DMAC_IRQ_STAT_OFFSET + dma_channel);
+
+       offset = ved_priv->fw_offset;
+       IPVR_DEBUG_VED("fw gpu offset is 0x%x.\n", offset);
+
+       /* use bank 0 */
+       cmd = 0;
+       VED_REG_WRITE32(cmd, MSVDX_MMU_BANK_INDEX_OFFSET);
+
+       /* Write PTD to mmu base 0*/
+       mmu_ptd = ipvr_get_default_pd_addr(dev_priv->mmu);
+       VED_REG_WRITE32(mmu_ptd, MSVDX_MMU_DIR_LIST_BASE_OFFSET + 0);
+       IPVR_DEBUG_VED("mmu_ptd is %d.\n", mmu_ptd);
+
+       /* Invalidate */
+       reg_val = VED_REG_READ32(MSVDX_MMU_CONTROL0_OFFSET);
+       reg_val &= ~0xf;
+       REGIO_WRITE_FIELD(reg_val, MSVDX_MMU_CONTROL0, MMU_INVALDC, 1);
+       VED_REG_WRITE32(reg_val, MSVDX_MMU_CONTROL0_OFFSET);
+
+       VED_REG_WRITE32(offset, DMAC_DMAC_SETUP_OFFSET + dma_channel);
+
+       /* Only use a single dma - assert that this is valid */
+       if ((size / 4) >= (1 << 15)) {
+               IPVR_ERROR("DMA size beyond limit, abort firmware upload.\n");
+               return;
+       }
+
+       uCountReg = PSB_DMAC_VALUE_COUNT(PSB_DMAC_BSWAP_NO_SWAP, 0,
+                                        PSB_DMAC_DIR_MEM_TO_PERIPH, 0,
+                                        (size / 4));
+       /* Set the number of bytes to dma*/
+       VED_REG_WRITE32(uCountReg, DMAC_DMAC_COUNT_OFFSET + dma_channel);
+
+       cmd = PSB_DMAC_VALUE_PERIPH_PARAM(PSB_DMAC_ACC_DEL_0,
+                                         PSB_DMAC_INCR_OFF,
+                                         PSB_DMAC_BURST_2);
+       VED_REG_WRITE32(cmd, DMAC_DMAC_PERIPH_OFFSET + dma_channel);
+
+       /* Set destination port for dma */
+       cmd = 0;
+       REGIO_WRITE_FIELD(cmd, DMAC_DMAC_PERIPHERAL_ADDR, ADDR,
+                         MTX_SYSC_CDMAT_OFFSET);
+       VED_REG_WRITE32(cmd, DMAC_DMAC_PERIPHERAL_ADDR_OFFSET + dma_channel);
+
+
+       /* Finally, rewrite the count register with the enable bit set */
+       VED_REG_WRITE32(uCountReg | DMAC_DMAC_COUNT_EN_MASK,
+                       DMAC_DMAC_COUNT_OFFSET + dma_channel);
+
+       /* Wait for all to be done */
+       if (ved_wait_for_register(dev_priv,
+                                 DMAC_DMAC_IRQ_STAT_OFFSET + dma_channel,
+                                 DMAC_DMAC_IRQ_STAT_TRANSFER_FIN_MASK,
+                                 DMAC_DMAC_IRQ_STAT_TRANSFER_FIN_MASK,
+                                 2000000, 5)) {
+               ved_setup_fw_dump(dev_priv, dma_channel);
+               ved_release_mtx_control_from_dash(dev_priv);
+               return;
+       }
+
+       /* Assert that the MTX DMA port is all done aswell */
+       if (ved_wait_for_register(dev_priv,
+                       MTX_SYSC_CDMAS0_OFFSET,
+                       1, 1, 2000000, 5)) {
+               ved_release_mtx_control_from_dash(dev_priv);
+               return;
+       }
+
+       ved_release_mtx_control_from_dash(dev_priv);
+
+       IPVR_DEBUG_VED("VED: Upload done\n");
+}
+
+#else
+
+static void ved_upload_fw(struct drm_ipvr_private *dev_priv,
+                               const uint32_t data_mem,
+                               uint32_t ram_bank_size, uint32_t address,
+                               const uint32_t words,
+                               const uint32_t * const data)
+{
+       uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0;
+       uint32_t access_ctrl;
+
+       IPVR_DEBUG_VED("VED: Upload firmware by register interface.\n");
+       /* Save the access control register... */
+       access_ctrl = VED_REG_READ32(MTX_RAM_ACCESS_CONTROL_OFFSET);
+
+       /* Wait for MCMSTAT to become be idle 1 */
+       ved_wait_for_register(dev_priv, MTX_RAM_ACCESS_STATUS_OFFSET,
+                             1,        /* Required Value */
+                             0xffffffff, /* Enables */
+                             2000000, 5);
+
+       for (loop = 0; loop < words; loop++) {
+               ram_id = data_mem + (address / ram_bank_size);
+               if (ram_id != cur_bank) {
+                       addr = address >> 2;
+                       ctrl = 0;
+                       REGIO_WRITE_FIELD_LITE(ctrl,
+                                              MTX_RAM_ACCESS_CONTROL,
+                                              MTX_MCMID, ram_id);
+                       REGIO_WRITE_FIELD_LITE(ctrl,
+                                              MTX_RAM_ACCESS_CONTROL,
+                                              MTX_MCM_ADDR, addr);
+                       REGIO_WRITE_FIELD_LITE(ctrl,
+                                              MTX_RAM_ACCESS_CONTROL,
+                                              MTX_MCMAI, 1);
+                       VED_REG_WRITE32(ctrl, MTX_RAM_ACCESS_CONTROL_OFFSET);
+                       cur_bank = ram_id;
+               }
+               address += 4;
+
+               VED_REG_WRITE32(data[loop],
+                               MTX_RAM_ACCESS_DATA_TRANSFER_OFFSET);
+
+               /* Wait for MCMSTAT to become be idle 1 */
+               ved_wait_for_register(dev_priv, MTX_RAM_ACCESS_STATUS_OFFSET,
+                                     1,        /* Required Value */
+                                     0xffffffff, /* Enables */
+                                     2000000, 5);
+       }
+       IPVR_DEBUG_VED("VED: Upload done\n");
+
+       /* Restore the access control register... */
+       VED_REG_WRITE32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
+}
+
+#endif
+
+static int32_t ved_get_fw_bo(struct drm_device *dev,
+                                  const struct firmware **raw, char *name)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       int32_t rc, fw_size;
+       void *ptr = NULL;
+       struct ved_private *ved_priv = dev_priv->ved_private;
+       void *fw_bo_addr = NULL;
+       uint32_t *last_word;
+       struct ved_fw *fw;
+
+       rc = request_firmware(raw, name, &dev->platformdev->dev);
+       if (*raw == NULL || rc < 0) {
+               IPVR_ERROR("VED: %s request_firmware failed: Reason %d.\n",
+                         name, rc);
+               return 1;
+       }
+
+       if ((*raw)->size < sizeof(struct ved_fw)) {
+               IPVR_ERROR("VED: %s is is not correct size(%zd).\n",
+                         name, (*raw)->size);
+               return 1;
+       }
+
+       ptr = (void *)((*raw))->data;
+       if (!ptr) {
+               IPVR_ERROR("VED: Failed to load %s.\n", name);
+               return 1;
+       }
+
+       /* another sanity check... */
+       fw_size = sizeof(struct ved_fw) +
+                 sizeof(uint32_t) * ((struct ved_fw *) ptr)->text_size +
+                 sizeof(uint32_t) * ((struct ved_fw *) ptr)->data_size;
+       if ((*raw)->size < fw_size) {
+               IPVR_ERROR("VED: %s is is not correct size(%zd).\n",
+                         name, (*raw)->size);
+               return 1;
+       }
+
+       fw_bo_addr = ipvr_gem_object_vmap(ved_priv->fw_bo);
+       if (IS_ERR(fw_bo_addr)) {
+               IPVR_ERROR("VED: kmap failed for fw buffer.\n");
+               return 1;
+       }
+
+       fw = (struct ved_fw *)ptr;
+       memset(fw_bo_addr, UNINITILISE_MEM, ved_priv->mtx_mem_size);
+       memcpy(fw_bo_addr, ptr + sizeof(struct ved_fw),
+              sizeof(uint32_t) * fw->text_size);
+       memcpy(fw_bo_addr + (fw->data_location - MSVDX_MTX_DATA_LOCATION),
+              (void *)ptr + sizeof(struct ved_fw) + sizeof(uint32_t) * 
fw->text_size,
+              sizeof(uint32_t) * fw->data_size);
+       last_word = (uint32_t *)(fw_bo_addr + ved_priv->mtx_mem_size - 4);
+       /*
+        * Write a know value to last word in mtx memory
+        * Usefull for detection of stack overrun
+        */
+       *last_word = STACKGUARDWORD;
+
+       vunmap(fw_bo_addr);
+       IPVR_DEBUG_VED("VED: releasing firmware resouces.\n");
+       IPVR_DEBUG_VED("VED: Load firmware into BO successfully.\n");
+       release_firmware(*raw);
+       return rc;
+}
+
+static uint32_t *
+ved_get_fw(struct drm_device *dev, const struct firmware **raw, char *name)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       int32_t rc, fw_size;
+       void *ptr = NULL;
+       struct ved_fw *fw;
+       struct ved_private *ved_priv = dev_priv->ved_private;
+
+       rc = request_firmware(raw, name, &dev->platformdev->dev);
+       if (*raw == NULL || rc < 0) {
+               IPVR_ERROR("VED: %s request_firmware failed: Reason %d\n",
+                         name, rc);
+               return NULL;
+       }
+
+       if ((*raw)->size < sizeof(struct ved_fw)) {
+               IPVR_ERROR("VED: %s is is not correct size(%zd)\n",
+                         name, (*raw)->size);
+               release_firmware(*raw);
+               return NULL;
+       }
+
+       ptr = (int *)((*raw))->data;
+       if (!ptr) {
+               IPVR_ERROR("VED: Failed to load %s.\n", name);
+               release_firmware(*raw);
+               return NULL;
+       }
+       fw = (struct ved_fw *)ptr;
+
+       /* another sanity check... */
+       fw_size = sizeof(fw) +
+                 sizeof(uint32_t) * fw->text_size +
+                 sizeof(uint32_t) * fw->data_size;
+       if ((*raw)->size < fw_size) {
+               IPVR_ERROR("VED: %s is is not correct size(%zd).\n",
+                          name, (*raw)->size);
+               release_firmware(*raw);
+               return NULL;
+       }
+
+       ved_priv->ved_fw_ptr = kzalloc(fw_size, GFP_KERNEL);
+       if (ved_priv->ved_fw_ptr == NULL)
+               IPVR_ERROR("VED: allocate FW buffer failed.\n");
+       else {
+               memcpy(ved_priv->ved_fw_ptr, ptr, fw_size);
+               ved_priv->ved_fw_size = fw_size;
+       }
+
+       IPVR_DEBUG_VED("VED: releasing firmware resouces.\n");
+       release_firmware(*raw);
+
+       return ved_priv->ved_fw_ptr;
+}
+
+static void
+ved_write_mtx_core_reg(struct drm_ipvr_private *dev_priv,
+                              const uint32_t core_reg, const uint32_t val)
+{
+       uint32_t reg = 0;
+
+       /* Put data in MTX_RW_DATA */
+       VED_REG_WRITE32(val, MTX_REGISTER_READ_WRITE_DATA_OFFSET);
+
+       /* DREADY is set to 0 and request a write */
+       reg = core_reg;
+       REGIO_WRITE_FIELD_LITE(reg, MTX_REGISTER_READ_WRITE_REQUEST,
+                              MTX_RNW, 0);
+       REGIO_WRITE_FIELD_LITE(reg, MTX_REGISTER_READ_WRITE_REQUEST,
+                              MTX_DREADY, 0);
+       VED_REG_WRITE32(reg, MTX_REGISTER_READ_WRITE_REQUEST_OFFSET);
+
+       ved_wait_for_register(dev_priv,
+                             MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
+                             MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
+                             MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
+                             2000000, 5);
+}
+
+int32_t ved_alloc_fw_bo(struct drm_ipvr_private *dev_priv)
+{
+       uint32_t core_rev;
+       struct ved_private *ved_priv = dev_priv->ved_private;
+
+       core_rev = VED_REG_READ32(MSVDX_CORE_REV_OFFSET);
+
+       if ((core_rev & 0xffffff) < 0x020000)
+               ved_priv->mtx_mem_size = 16 * 1024;
+       else
+               ved_priv->mtx_mem_size = 56 * 1024;
+
+       IPVR_DEBUG_INIT("VED: MTX mem size is 0x%08x bytes,"
+                       "allocate firmware BO size 0x%08x.\n",
+                       ved_priv->mtx_mem_size,
+                       ved_priv->mtx_mem_size + 4096);
+
+       /* Allocate the new object */
+       ved_priv->fw_bo = ipvr_gem_obj_create_and_bind(dev_priv->dev,
+                                               ved_priv->mtx_mem_size + 4096);
+       if (ved_priv->fw_bo == NULL) {
+               IPVR_ERROR("VED: failed to allocate fw buffer.\n");
+               ved_priv->fw_bo = NULL;
+               return -ENOMEM;
+       }
+       ved_priv->fw_offset = ipvr_gem_obj_mmu_offset(ved_priv->fw_bo);
+       if (IPVR_IS_ERR(ved_priv->fw_offset)) {
+               ved_priv->fw_bo = NULL;
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+int32_t ved_setup_fw(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       uint32_t ram_bank_size;
+       struct ved_private *ved_priv = dev_priv->ved_private;
+       int32_t ret = 0;
+       struct ved_fw *fw;
+       uint32_t *fw_ptr = NULL;
+       uint32_t *text_ptr = NULL;
+       uint32_t *data_ptr = NULL;
+       const struct firmware *raw = NULL;
+
+       /* todo : Assert the clock is on - if not turn it on to upload code */
+       IPVR_DEBUG_VED("VED: ved_setup_fw.\n");
+
+       ved_set_clocks(dev_priv->dev, clk_enable_all);
+
+       /* Reset MTX */
+       VED_REG_WRITE32(MTX_SOFT_RESET_MTX_RESET_MASK,
+                       MTX_SOFT_RESET_OFFSET);
+
+       VED_REG_WRITE32(FIRMWAREID, MSVDX_COMMS_FIRMWARE_ID);
+
+       VED_REG_WRITE32(0, MSVDX_COMMS_ERROR_TRIG);
+       VED_REG_WRITE32(199, MTX_SYSC_TIMERDIV_OFFSET); /* MTX_SYSC_TIMERDIV */
+       VED_REG_WRITE32(0, MSVDX_EXT_FW_ERROR_STATE); /* EXT_FW_ERROR_STATE */
+       VED_REG_WRITE32(0, MSVDX_COMMS_MSG_COUNTER);
+       VED_REG_WRITE32(0, MSVDX_COMMS_SIGNATURE);
+       VED_REG_WRITE32(0, MSVDX_COMMS_TO_HOST_RD_INDEX);
+       VED_REG_WRITE32(0, MSVDX_COMMS_TO_HOST_WRT_INDEX);
+       VED_REG_WRITE32(0, MSVDX_COMMS_TO_MTX_RD_INDEX);
+       VED_REG_WRITE32(0, MSVDX_COMMS_TO_MTX_WRT_INDEX);
+       VED_REG_WRITE32(0, MSVDX_COMMS_FW_STATUS);
+       VED_REG_WRITE32(DSIABLE_IDLE_GPIO_SIG |
+                       DSIABLE_Auto_CLOCK_GATING |
+                       RETURN_VDEB_DATA_IN_COMPLETION |
+                       NOT_ENABLE_ON_HOST_CONCEALMENT,
+                       MSVDX_COMMS_OFFSET_FLAGS);
+       VED_REG_WRITE32(0, MSVDX_COMMS_SIGNATURE);
+
+       /* read register bank size */
+       {
+               uint32_t bank_size, reg;
+               reg = VED_REG_READ32(MSVDX_MTX_RAM_BANK_OFFSET);
+               bank_size =
+                       REGIO_READ_FIELD(reg, MSVDX_MTX_RAM_BANK,
+                                        MTX_RAM_BANK_SIZE);
+               ram_bank_size = (uint32_t)(1 << (bank_size + 2));
+       }
+
+       IPVR_DEBUG_VED("VED: RAM bank size = %d bytes\n", ram_bank_size);
+
+       /* if FW already loaded from storage */
+       if (ved_priv->ved_fw_ptr) {
+               fw_ptr = ved_priv->ved_fw_ptr;
+       } else {
+               fw_ptr = ved_get_fw(dev, &raw, FIRMWARE_NAME);
+               IPVR_DEBUG_VED("VED:load msvdx_fw_mfld_DE2.0.bin by udevd\n");
+       }
+       if (!fw_ptr) {
+               IPVR_ERROR("VED:load ved_fw.bin failed,is udevd running?\n");
+               ret = 1;
+               goto out;
+       }
+
+       if (!ved_priv->fw_loaded_to_bo) { /* Load firmware into BO */
+               IPVR_DEBUG_VED("MSVDX:load ved_fw.bin by udevd into BO\n");
+               ret = ved_get_fw_bo(dev, &raw, FIRMWARE_NAME);
+               if (ret) {
+                       IPVR_ERROR("VED: failed to call ved_get_fw_bo.\n");
+                       ret = 1;
+                       goto out;
+               }
+               ved_priv->fw_loaded_to_bo = 1;
+       }
+
+       fw = (struct ved_fw *) fw_ptr;
+
+       /* need check fw->ver? */
+       text_ptr = (uint32_t *)((uint8_t *) fw_ptr + sizeof(struct ved_fw));
+       data_ptr = text_ptr + fw->text_size;
+
+       /* maybe we can judge fw version according to fw text size */
+
+       IPVR_DEBUG_VED("VED: Retrieved pointers for firmware\n");
+       IPVR_DEBUG_VED("VED: text_size: %d\n", fw->text_size);
+       IPVR_DEBUG_VED("VED: data_size: %d\n", fw->data_size);
+       IPVR_DEBUG_VED("VED: data_location: 0x%x\n", fw->data_location);
+       IPVR_DEBUG_VED("VED: First 4 bytes of text: 0x%x\n", *text_ptr);
+       IPVR_DEBUG_VED("VED: First 4 bytes of data: 0x%x\n", *data_ptr);
+       IPVR_DEBUG_VED("VED: Uploading firmware\n");
+
+#if UPLOAD_FW_BY_DMA
+       ved_upload_fw(dev_priv, 0, ved_priv->mtx_mem_size / 4);
+#else
+       ved_upload_fw(dev_priv, MTX_CORE_CODE_MEM, ram_bank_size,
+                     PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size,
+                     text_ptr);
+       ved_upload_fw(dev_priv, MTX_CORE_DATA_MEM, ram_bank_size,
+                     fw->data_location - MTX_DATA_BASE, fw->data_size,
+                     data_ptr);
+#endif
+
+       /*      -- Set starting PC address      */
+       ved_write_mtx_core_reg(dev_priv, MTX_PC, PC_START_ADDRESS);
+
+       /*      -- Turn on the thread   */
+       VED_REG_WRITE32(MTX_ENABLE_MTX_ENABLE_MASK, MTX_ENABLE_OFFSET);
+
+       /* Wait for the signature value to be written back */
+       ret = ved_wait_for_register(dev_priv, MSVDX_COMMS_SIGNATURE,
+                                   MSVDX_COMMS_SIGNATURE_VALUE,
+                                   0xffffffff, /* Enabled bits */
+                                   2000000, 5);
+       if (ret) {
+               IPVR_ERROR("VED: firmware fails to initialize.\n");
+               goto out;
+       }
+
+       IPVR_DEBUG_VED("VED: MTX Initial indications OK.\n");
+       IPVR_DEBUG_VED("VED: MSVDX_COMMS_AREA_ADDR = %08x.\n",
+                      MSVDX_COMMS_AREA_ADDR);
+out:
+       return ret;
+}
diff --git a/drivers/gpu/drm/ipvr/ved_fw.h b/drivers/gpu/drm/ipvr/ved_fw.h
new file mode 100644
index 0000000..8c2da56
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ved_fw.h
@@ -0,0 +1,73 @@
+/**************************************************************************
+ * ved_fw.h: VED firmware support header file
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ * Copyright (c) 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *
+ **************************************************************************/
+
+
+#ifndef _VED_FW_H_
+#define _VED_FW_H_
+
+#include "ipvr_drv.h"
+
+#define FIRMWAREID             0x014d42ab
+
+/*  Non-Optimal Invalidation is not default */
+#define MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV 2
+
+#define FW_VA_RENDER_HOST_INT          0x00004000
+#define MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION    0x00000020
+#define FW_DEVA_ERROR_DETECTED 0x08000000
+
+/* There is no work currently underway on the hardware */
+#define MSVDX_FW_STATUS_HW_IDLE        0x00000001
+#define MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE    0x00000200
+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0                             \
+       (MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV |                       \
+               MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION |           \
+               MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
+
+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1                             \
+       (MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION |                  \
+               MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
+
+#define MTX_CODE_BASE          (0x80900000)
+#define MTX_DATA_BASE          (0x82880000)
+#define PC_START_ADDRESS       (0x80900000)
+
+#define MTX_CORE_CODE_MEM      (0x10)
+#define MTX_CORE_DATA_MEM      (0x18)
+
+#define RENDEC_A_SIZE  (4 * 1024 * 1024)
+#define RENDEC_B_SIZE  (1024 * 1024)
+
+#define TERMINATION_SIZE       48
+
+#define MSVDX_RESET_NEEDS_REUPLOAD_FW          (0x2)
+#define MSVDX_RESET_NEEDS_INIT_FW              (0x1)
+
+int32_t ved_alloc_fw_bo(struct drm_ipvr_private *dev_priv);
+
+int32_t ved_setup_fw(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/ipvr/ved_init.c b/drivers/gpu/drm/ipvr/ved_init.c
new file mode 100644
index 0000000..bf82dfd
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ved_init.c
@@ -0,0 +1,829 @@
+/**************************************************************************
+ * ved_init.c: VED initialization and deinitialization
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ * Copyright (c) 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *    Li Zeng <li.zeng at intel.com>
+ *    Binglin Chen <binglin.chen at intel.com>
+ **************************************************************************/
+
+#include "ipvr_gem.h"
+#include "ipvr_buffer.h"
+#include "ved_init.h"
+#include "ved_cmd.h"
+#include "ved_msg.h"
+#include "ved_reg.h"
+#include "ved_ec.h"
+#include "ved_pm.h"
+#include "ved_fw.h"
+#include <linux/firmware.h>
+
+static ssize_t
+ved_pmstate_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct drm_device *drm_dev = dev_get_drvdata(dev);
+       struct drm_ipvr_private *dev_priv = NULL;
+       struct ved_private *ved_priv = NULL;
+       int32_t ret = -EINVAL;
+
+       if (drm_dev == NULL)
+               return 0;
+
+       dev_priv = drm_dev->dev_private;
+       ved_priv = dev_priv->ved_private;
+       return ret;
+}
+
+static DEVICE_ATTR(ved_pmstate, 0444, ved_pmstate_show, NULL);
+
+void ved_clear_irq(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       uint32_t mtx_int = 0;
+
+       /* Clear MTX interrupt */
+       REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, MTX_IRQ, 1);
+       VED_REG_WRITE32(mtx_int, MSVDX_INTERRUPT_CLEAR_OFFSET);
+}
+
+/* following two functions also works for CLV and MFLD */
+/* IPVR_INT_ENABLE_R is set in ipvr_irq_(un)install_islands */
+void ved_disable_irq(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       /*uint32_t ier = dev_priv->vdc_irq_mask & (~_PSB_IRQ_MSVDX_FLAG); */
+
+       uint32_t enables = 0;
+
+       REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS, MTX_IRQ, 0);
+       VED_REG_WRITE32(enables, MSVDX_HOST_INTERRUPT_ENABLE_OFFSET);
+
+       /* write in sysirq.c */
+       /* PSB_WVDC32(ier, PSB_INT_ENABLE_R); /\* essential *\/ */
+}
+
+void ved_enable_irq(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       /* uint32_t ier = dev_priv->vdc_irq_mask | _PSB_IRQ_MSVDX_FLAG; */
+       uint32_t enables = 0;
+
+       /* Only enable the master core IRQ*/
+       REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS, MTX_IRQ,
+                              1);
+       VED_REG_WRITE32(enables, MSVDX_HOST_INTERRUPT_ENABLE_OFFSET);
+
+       /* write in sysirq.c */
+       /* PSB_WVDC32(ier, PSB_INT_ENABLE_R); /\* essential *\/ */
+}
+
+/*
+ * the original 1000 of udelay is derive from reference driver
+ * From Liu, Haiyang, changed the originial udelay value from 1000 to 5
+ * can save 3% C0 residence
+ */
+int32_t
+ved_wait_for_register(struct drm_ipvr_private *dev_priv,
+                           uint32_t offset, uint32_t value, uint32_t enable,
+                           uint32_t poll_cnt, uint32_t timeout)
+{
+       uint32_t reg_value = 0;
+       while (poll_cnt) {
+               reg_value = VED_REG_READ32(offset);
+               if (value == (reg_value & enable))
+                       return 0;
+
+               /* Wait a bit */
+               IPVR_UDELAY(timeout);
+               poll_cnt--;
+       }
+       IPVR_DEBUG_REG("MSVDX: Timeout while waiting for register %08x:"
+                      " expecting %08x (mask %08x), got %08x\n",
+                      offset, value, enable, reg_value);
+
+       return -EFAULT;
+}
+
+void
+ved_set_clocks(struct drm_device *dev, uint32_t clock_state)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       uint32_t old_clock_state = 0;
+       /* IPVR_DEBUG_VED("SetClocks to %x.\n", clock_state); */
+       old_clock_state = VED_REG_READ32(MSVDX_MAN_CLK_ENABLE_OFFSET);
+       if (old_clock_state == clock_state)
+               return;
+
+       if (clock_state == 0) {
+               /* Turn off clocks procedure */
+               if (old_clock_state) {
+                       /* Turn off all the clocks except core */
+                       VED_REG_WRITE32(
+                               MSVDX_MAN_CLK_ENABLE_CORE_MAN_CLK_ENABLE_MASK,
+                               MSVDX_MAN_CLK_ENABLE_OFFSET);
+
+                       /* Make sure all the clocks are off except core */
+                       ved_wait_for_register(dev_priv,
+                               MSVDX_MAN_CLK_ENABLE_OFFSET,
+                               MSVDX_MAN_CLK_ENABLE_CORE_MAN_CLK_ENABLE_MASK,
+                               0xffffffff, 2000000, 5);
+
+                       /* Turn off core clock */
+                       VED_REG_WRITE32(0, MSVDX_MAN_CLK_ENABLE_OFFSET);
+               }
+       } else {
+               uint32_t clocks_en = clock_state;
+
+               /*Make sure that core clock is not accidentally turned off */
+               clocks_en |= MSVDX_MAN_CLK_ENABLE_CORE_MAN_CLK_ENABLE_MASK;
+
+               /* If all clocks were disable do the bring up procedure */
+               if (old_clock_state == 0) {
+                       /* turn on core clock */
+                       VED_REG_WRITE32(
+                               MSVDX_MAN_CLK_ENABLE_CORE_MAN_CLK_ENABLE_MASK,
+                               MSVDX_MAN_CLK_ENABLE_OFFSET);
+
+                       /* Make sure core clock is on */
+                       ved_wait_for_register(dev_priv,
+                               MSVDX_MAN_CLK_ENABLE_OFFSET,
+                               MSVDX_MAN_CLK_ENABLE_CORE_MAN_CLK_ENABLE_MASK,
+                               0xffffffff, 2000000, 5);
+
+                       /* turn on the other clocks as well */
+                       VED_REG_WRITE32(clocks_en, MSVDX_MAN_CLK_ENABLE_OFFSET);
+
+                       /* Make sure that all they are on */
+                       ved_wait_for_register(dev_priv,
+                                       MSVDX_MAN_CLK_ENABLE_OFFSET,
+                                       clocks_en, 0xffffffff, 2000000, 5);
+               } else {
+                       VED_REG_WRITE32(clocks_en, MSVDX_MAN_CLK_ENABLE_OFFSET);
+
+                       /* Make sure that they are on */
+                       ved_wait_for_register(dev_priv,
+                                       MSVDX_MAN_CLK_ENABLE_OFFSET,
+                                       clocks_en, 0xffffffff, 2000000, 5);
+               }
+       }
+}
+
+int32_t ved_core_reset(struct drm_ipvr_private *dev_priv)
+{
+       int32_t ret = 0;
+       int32_t loop;
+       uint32_t cmd;
+       struct ved_private *ved_priv = dev_priv->ved_private;
+       /* Enable Clocks */
+       IPVR_DEBUG_GENERAL("Enabling clocks.\n");
+       ved_set_clocks(dev_priv->dev, clk_enable_all);
+
+       /* Always pause the MMU as the core may be still active
+        * when resetting.  It is very bad to have memory
+        * activity at the same time as a reset - Very Very bad
+        */
+       VED_REG_WRITE32(2, MSVDX_MMU_CONTROL0_OFFSET);
+
+       /* BRN26106, BRN23944, BRN33671 */
+       /* This is neccessary for all cores up to Tourmaline */
+       if ((VED_REG_READ32(MSVDX_CORE_REV_OFFSET) < 0x00050502) &&
+               (VED_REG_READ32(MSVDX_INTERRUPT_STATUS_OFFSET)
+                       & MSVDX_INTERRUPT_STATUS_MMU_FAULT_IRQ_MASK) &&
+               (VED_REG_READ32(MSVDX_MMU_STATUS_OFFSET) & 1)) {
+               uint32_t *pptd;
+               uint32_t loop;
+               uint32_t ptd_addr;
+
+               /* do work around */
+               ptd_addr = page_to_pfn(ved_priv->mmu_recover_page)
+                                       << PAGE_SHIFT;
+               pptd = kmap(ved_priv->mmu_recover_page);
+               if (!pptd) {
+                       IPVR_ERROR("failed to kmap mmu recover page.\n");
+                       return -1;
+               }
+               for (loop = 0; loop < 1024; loop++)
+                       pptd[loop] = ptd_addr | 0x00000003;
+               VED_REG_WRITE32(ptd_addr, MSVDX_MMU_DIR_LIST_BASE_OFFSET +  0);
+               VED_REG_WRITE32(ptd_addr, MSVDX_MMU_DIR_LIST_BASE_OFFSET +  4);
+               VED_REG_WRITE32(ptd_addr, MSVDX_MMU_DIR_LIST_BASE_OFFSET +  8);
+               VED_REG_WRITE32(ptd_addr, MSVDX_MMU_DIR_LIST_BASE_OFFSET + 12);
+
+               VED_REG_WRITE32(6, MSVDX_MMU_CONTROL0_OFFSET);
+               VED_REG_WRITE32(MSVDX_INTERRUPT_STATUS_MMU_FAULT_IRQ_MASK,
+                                       MSVDX_INTERRUPT_STATUS_OFFSET);
+               kunmap(ved_priv->mmu_recover_page);
+       }
+
+       /* make sure *ALL* outstanding reads have gone away */
+       for (loop = 0; loop < 10; loop++)
+               ret = ved_wait_for_register(dev_priv, MSVDX_MMU_MEM_REQ_OFFSET,
+                                           0, 0xff, 100, 1);
+       if (ret) {
+               IPVR_DEBUG_WARN("MSVDX_MMU_MEM_REQ is %d,\n"
+                       "indicate outstanding read request 0.\n",
+                       VED_REG_READ32(MSVDX_MMU_MEM_REQ_OFFSET));
+               ret = -1;
+               return ret;
+       }
+       /* disconnect RENDEC decoders from memory */
+       cmd = VED_REG_READ32(MSVDX_RENDEC_CONTROL1_OFFSET);
+       REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1, RENDEC_DEC_DISABLE, 1);
+       VED_REG_WRITE32(cmd, MSVDX_RENDEC_CONTROL1_OFFSET);
+
+       /* Issue software reset for all but core */
+       VED_REG_WRITE32((unsigned int)~MSVDX_CONTROL_MSVDX_SOFT_RESET_MASK,
+                       MSVDX_CONTROL_OFFSET);
+       VED_REG_READ32(MSVDX_CONTROL_OFFSET);
+       /* bit format is set as little endian */
+       VED_REG_WRITE32(0, MSVDX_CONTROL_OFFSET);
+       /* make sure read requests are zero */
+       ret = ved_wait_for_register(dev_priv, MSVDX_MMU_MEM_REQ_OFFSET,
+                                   0, 0xff, 100, 100);
+       if (!ret) {
+               /* Issue software reset */
+               VED_REG_WRITE32(MSVDX_CONTROL_MSVDX_SOFT_RESET_MASK,
+                               MSVDX_CONTROL_OFFSET);
+
+               ret = ved_wait_for_register(dev_priv, MSVDX_CONTROL_OFFSET, 0,
+                                       MSVDX_CONTROL_MSVDX_SOFT_RESET_MASK,
+                                       2000000, 5);
+               if (!ret) {
+                       /* Clear interrupt enabled flag */
+                       VED_REG_WRITE32(0, MSVDX_HOST_INTERRUPT_ENABLE_OFFSET);
+
+                       /* Clear any pending interrupt flags */
+                       VED_REG_WRITE32(0xFFFFFFFF, 
MSVDX_INTERRUPT_CLEAR_OFFSET);
+               } else {
+                       IPVR_DEBUG_WARN("MSVDX_CONTROL_OFFSET is %d,\n"
+                               "indicate software reset failed.\n",
+                               VED_REG_READ32(MSVDX_CONTROL_OFFSET));
+               }
+       } else {
+               IPVR_DEBUG_WARN("MSVDX_MMU_MEM_REQ is %d,\n"
+                       "indicate outstanding read request 1.\n",
+                       VED_REG_READ32(MSVDX_MMU_MEM_REQ_OFFSET));
+       }
+       return ret;
+}
+
+/*
+ * Reset chip and disable interrupts.
+ * Return 0 success, 1 failure
+ * use ved_core_reset instead of ved_reset
+ */
+int32_t ved_reset(struct drm_ipvr_private *dev_priv)
+{
+       int32_t ret = 0;
+
+       /* Issue software reset */
+       /* VED_REG_WRITE32(msvdx_sw_reset_all, MSVDX_CONTROL); */
+       VED_REG_WRITE32(MSVDX_CONTROL_MSVDX_SOFT_RESET_MASK,
+                       MSVDX_CONTROL_OFFSET);
+
+       ret = ved_wait_for_register(dev_priv, MSVDX_CONTROL_OFFSET, 0,
+                       MSVDX_CONTROL_MSVDX_SOFT_RESET_MASK, 2000000, 5);
+       if (!ret) {
+               /* Clear interrupt enabled flag */
+               VED_REG_WRITE32(0, MSVDX_HOST_INTERRUPT_ENABLE_OFFSET);
+
+               /* Clear any pending interrupt flags */
+               VED_REG_WRITE32(0xFFFFFFFF, MSVDX_INTERRUPT_CLEAR_OFFSET);
+       } else {
+               IPVR_DEBUG_WARN("MSVDX_CONTROL_OFFSET is %d,\n"
+                       "indicate software reset failed.\n",
+                       VED_REG_READ32(MSVDX_CONTROL_OFFSET));
+       }
+
+       return ret;
+}
+
+static int32_t ved_alloc_ccb_for_rendec(struct ved_private *ved_priv,
+                                                                               
int32_t ccb0_size,
+                                                                               
int32_t ccb1_size)
+{
+       struct drm_device *dev = ved_priv->dev_priv->dev;
+       size_t size;
+       uint8_t *ccb0_addr = NULL;
+       uint8_t *ccb1_addr = NULL;
+
+       IPVR_DEBUG_INIT("VED: setting up RENDEC, allocate CCB 0/1\n");
+
+       /*handling for ccb0*/
+       if (ved_priv->ccb0 == NULL) {
+               size = roundup(ccb0_size, PAGE_SIZE);
+               if (size == 0)
+                       return -EINVAL;
+
+               /* Allocate the new object */
+               ved_priv->ccb0 = ipvr_gem_obj_create_and_bind(dev, size);
+               if (ved_priv->ccb0 == NULL) {
+                       IPVR_ERROR("VED: failed to allocate ccb0 buffer.\n");
+                       ved_priv->ccb0 = NULL;
+                       return -ENOMEM;
+               }
+
+               ved_priv->base_addr0 = ipvr_gem_obj_mmu_offset(ved_priv->ccb0);
+
+               ccb0_addr = ipvr_gem_object_vmap(ved_priv->ccb0);
+               if (IS_ERR(ccb0_addr)) {
+                       IPVR_ERROR("VED: kmap failed for ccb0 buffer.\n");
+                       return PTR_ERR(ccb0_addr);
+               }
+
+               memset(ccb0_addr, 0, size);
+               vunmap(ccb0_addr);
+       }
+
+       /*handling for ccb1*/
+       if (ved_priv->ccb1 == NULL) {
+               size = roundup(ccb1_size, PAGE_SIZE);
+               if (size == 0)
+                       return -EINVAL;
+
+               /* Allocate the new object */
+               ved_priv->ccb1 = ipvr_gem_obj_create_and_bind(dev, size);
+               if (ved_priv->ccb1 == NULL) {
+                       IPVR_ERROR("VED: failed to allocate ccb1 buffer.\n");
+                       ved_priv->ccb1 = NULL;
+                       return -ENOMEM;
+               }
+
+               ved_priv->base_addr1 = ipvr_gem_obj_mmu_offset(ved_priv->ccb1);
+
+               ccb1_addr = ipvr_gem_object_vmap(ved_priv->ccb1);
+               if (IS_ERR(ccb1_addr)) {
+                       IPVR_ERROR("VED: kmap failed for ccb1 buffer.\n");
+                       return PTR_ERR(ccb1_addr);
+               }
+
+               memset(ccb1_addr, 0, size);
+               vunmap(ccb1_addr);
+       }
+
+       IPVR_DEBUG_INIT("VED: RENDEC A: %08x RENDEC B: %08x\n",
+                       ved_priv->base_addr0, ved_priv->base_addr1);
+
+       return 0;
+}
+
+static void ved_free_ccb(struct ved_private *ved_priv)
+{
+       if (ved_priv->ccb0) {
+               drm_gem_object_unreference_unlocked(&ved_priv->ccb0->base);
+               ved_priv->ccb0 = NULL;
+       }
+       if (ved_priv->ccb1) {
+               drm_gem_object_unreference_unlocked(&ved_priv->ccb1->base);
+               ved_priv->ccb1 = NULL;
+       }
+}
+
+static void ved_rendec_init_by_reg(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       struct ved_private *ved_priv = dev_priv->ved_private;
+       uint32_t cmd;
+
+       VED_REG_WRITE32(ved_priv->base_addr0, MSVDX_RENDEC_BASE_ADDR0_OFFSET);
+       VED_REG_WRITE32(ved_priv->base_addr1, MSVDX_RENDEC_BASE_ADDR1_OFFSET);
+
+       cmd = 0;
+       REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE,
+                       RENDEC_BUFFER_SIZE0, RENDEC_A_SIZE / 4096);
+       REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE,
+                       RENDEC_BUFFER_SIZE1, RENDEC_B_SIZE / 4096);
+       VED_REG_WRITE32(cmd, MSVDX_RENDEC_BUFFER_SIZE_OFFSET);
+
+       cmd = 0;
+       REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
+                       RENDEC_DECODE_START_SIZE, 0);
+       REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
+                       RENDEC_BURST_SIZE_W, 1);
+       REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
+                       RENDEC_BURST_SIZE_R, 1);
+       REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
+                       RENDEC_EXTERNAL_MEMORY, 1);
+       VED_REG_WRITE32(cmd, MSVDX_RENDEC_CONTROL1_OFFSET);
+
+       cmd = 0x00101010;
+       VED_REG_WRITE32(cmd, MSVDX_RENDEC_CONTEXT0_OFFSET);
+       VED_REG_WRITE32(cmd, MSVDX_RENDEC_CONTEXT1_OFFSET);
+       VED_REG_WRITE32(cmd, MSVDX_RENDEC_CONTEXT2_OFFSET);
+       VED_REG_WRITE32(cmd, MSVDX_RENDEC_CONTEXT3_OFFSET);
+       VED_REG_WRITE32(cmd, MSVDX_RENDEC_CONTEXT4_OFFSET);
+       VED_REG_WRITE32(cmd, MSVDX_RENDEC_CONTEXT5_OFFSET);
+
+       cmd = 0;
+       REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL0, RENDEC_INITIALISE,
+                       1);
+       VED_REG_WRITE32(cmd, MSVDX_RENDEC_CONTROL0_OFFSET);
+}
+
+int32_t ved_rendec_init_by_msg(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       struct ved_private *ved_priv = dev_priv->ved_private;
+
+       /* at this stage, FW is uplaoded successfully,
+        * can send RENDEC init message */
+       struct fw_init_msg init_msg;
+       init_msg.header.bits.msg_size = sizeof(struct fw_init_msg);
+       init_msg.header.bits.msg_type = MTX_MSGID_INIT;
+       init_msg.rendec_addr0 = ved_priv->base_addr0;
+       init_msg.rendec_addr1 = ved_priv->base_addr1;
+       init_msg.rendec_size.bits.rendec_size0 = RENDEC_A_SIZE / (4 * 1024);
+       init_msg.rendec_size.bits.rendec_size1 = RENDEC_B_SIZE / (4 * 1024);
+       return ved_mtx_send(dev_priv, (void *)&init_msg);
+}
+
+#ifdef CONFIG_DRM_IPVR_EC
+static void ved_init_ec(struct ved_private *ved_priv)
+{
+       struct drm_ipvr_private *dev_priv = ved_priv->dev_priv;
+
+       /* we should restore the state, if we power down/up during EC */
+       VED_REG_WRITE32(0, 0x2000 + 0xcc4); /* EXT_FW_ERROR_STATE */
+       VED_REG_WRITE32(0, 0x2000 + 0xcb0); /* EXT_FW_LAST_MBS */
+       VED_REG_WRITE32(0, 0x2000 + 0xcb4); /* EXT_FW_LAST_MBS */
+       VED_REG_WRITE32(0, 0x2000 + 0xcb8); /* EXT_FW_LAST_MBS */
+       VED_REG_WRITE32(0, 0x2000 + 0xcbc); /* EXT_FW_LAST_MBS */
+
+       ved_priv->vec_ec_mem_saved = 1;
+
+       ved_priv->ved_ec_ctx[0] =
+               kzalloc(sizeof(struct ved_ec_context) *
+                               VED_MAX_EC_INSTANCE,
+                               GFP_KERNEL);
+       if (ved_priv->ved_ec_ctx[0] == NULL) {
+               IPVR_ERROR("VED: fail to allocate memory for ec ctx.\n");
+       } else {
+               int i;
+               for (i = 1; i < VED_MAX_EC_INSTANCE; i++)
+                       ved_priv->ved_ec_ctx[i] =
+                               ved_priv->ved_ec_ctx[0] + i;
+               for (i = 0; i < VED_MAX_EC_INSTANCE; i++)
+                       ved_priv->ved_ec_ctx[i]->fence =
+                                       VED_INVALID_FENCE;
+       }
+       INIT_WORK(&(ved_priv->ec_work), ved_do_concealment);
+       return;
+}
+#endif
+
+static int32_t ved_startup_init(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       struct ved_private *ved_priv;
+
+       ved_priv = kmalloc(sizeof(struct ved_private), GFP_KERNEL);
+       if (ved_priv == NULL) {
+               IPVR_ERROR("VED: alloc ved_private failed.\n");
+               return -ENOMEM;
+       }
+
+       dev_priv->ved_private = ved_priv;
+       memset(ved_priv, 0, sizeof(struct ved_private));
+       ved_priv->dev_priv = dev_priv;
+       ved_priv->dev = dev;
+       ved_priv->fw_loaded_by_punit = 0;
+
+       ved_priv->pm_gating_count = 0;
+
+       /* get device --> drm_device --> drm_ipvr_private --> ved_priv
+        * for ved_pmstate_show: ved_pmpolicy
+        * if not pci_set_drvdata, can't get drm_device from device
+        */
+       /* pci_set_drvdata(dev->pdev, dev); */
+       if (device_create_file(&dev->platformdev->dev,
+                              &dev_attr_ved_pmstate))
+               IPVR_ERROR("VED: could not create sysfs file\n");
+
+       ved_priv->sysfs_pmstate = sysfs_get_dirent(
+                                               dev->platformdev->dev.kobj.sd,
+                                               "ved_pmstate");
+
+#ifdef CONFIG_DRM_IPVR_EC
+       ved_init_ec(ved_priv);
+#endif
+
+       /* Initialize comand ved queueing */
+       INIT_LIST_HEAD(&ved_priv->ved_queue);
+       mutex_init(&ved_priv->ved_mutex);
+       spin_lock_init(&ved_priv->ved_lock);
+       ved_priv->mmu_recover_page = alloc_page(GFP_DMA32);
+       if (!ved_priv->mmu_recover_page)
+               goto err_exit;
+
+       return 0;
+
+err_exit:
+       IPVR_ERROR("VED: init one time failed.\n");
+       kfree(dev_priv->ved_private);
+
+       return 1;
+}
+
+/* This value is hardcoded in FW */
+#define WDT_CLOCK_DIVIDER 128
+int32_t ved_post_boot_init(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       struct ved_private *ved_priv = dev_priv->ved_private;
+       uint32_t device_node_flags =
+                       DSIABLE_IDLE_GPIO_SIG | DSIABLE_Auto_CLOCK_GATING |
+                       RETURN_VDEB_DATA_IN_COMPLETION |
+                       NOT_ENABLE_ON_HOST_CONCEALMENT;
+
+       /* DDK set fe_wdt_clks as 0x820 and be_wdt_clks as 0x8200 */
+       uint32_t fe_wdt_clks = 0x334 * WDT_CLOCK_DIVIDER;
+       uint32_t be_wdt_clks = 0x2008 * WDT_CLOCK_DIVIDER;
+
+       VED_REG_WRITE32(FIRMWAREID, MSVDX_COMMS_FIRMWARE_ID);
+       VED_REG_WRITE32(device_node_flags, MSVDX_COMMS_OFFSET_FLAGS);
+
+       /* read register bank size */
+       {
+               uint32_t ram_bank_size;
+               uint32_t bank_size, reg;
+               reg = VED_REG_READ32(MSVDX_MTX_RAM_BANK_OFFSET);
+               bank_size =
+                       REGIO_READ_FIELD(reg, MSVDX_MTX_RAM_BANK,
+                                        MTX_RAM_BANK_SIZE);
+               ram_bank_size = (uint32_t)(1 << (bank_size + 2));
+               IPVR_DEBUG_INIT("VED: RAM bank size = %d bytes\n",
+                               ram_bank_size);
+       }
+       /* host end */
+
+       /* DDK setup tiling region here */
+       /* DDK set MMU_CONTROL2 register */
+
+       /* set watchdog timer here */
+       if (!ved_priv->fw_loaded_by_punit) {
+               int reg_val = 0;
+               REGIO_WRITE_FIELD(reg_val, FE_MSVDX_WDT_CONTROL,
+                                 FE_WDT_CNT_CTRL, 0x3);
+               REGIO_WRITE_FIELD(reg_val, FE_MSVDX_WDT_CONTROL,
+                                 FE_WDT_ENABLE, 0);
+               REGIO_WRITE_FIELD(reg_val, FE_MSVDX_WDT_CONTROL,
+                                 FE_WDT_ACTION0, 1);
+               REGIO_WRITE_FIELD(reg_val, FE_MSVDX_WDT_CONTROL,
+                                 FE_WDT_CLEAR_SELECT, 1);
+               REGIO_WRITE_FIELD(reg_val, FE_MSVDX_WDT_CONTROL,
+                                 FE_WDT_CLKDIV_SELECT, 7);
+               VED_REG_WRITE32(fe_wdt_clks / WDT_CLOCK_DIVIDER,
+                               FE_MSVDX_WDT_COMPAREMATCH_OFFSET);
+               VED_REG_WRITE32(reg_val, FE_MSVDX_WDT_CONTROL_OFFSET);
+
+               reg_val = 0;
+               /* DDK set BE_WDT_CNT_CTRL as 0x5 and BE_WDT_CLEAR_SELECT as 
0x1 */
+               REGIO_WRITE_FIELD(reg_val, BE_MSVDX_WDT_CONTROL,
+                                 BE_WDT_CNT_CTRL, 0x7);
+               REGIO_WRITE_FIELD(reg_val, BE_MSVDX_WDT_CONTROL,
+                                 BE_WDT_ENABLE, 0);
+               REGIO_WRITE_FIELD(reg_val, BE_MSVDX_WDT_CONTROL,
+                                 BE_WDT_ACTION0, 1);
+               REGIO_WRITE_FIELD(reg_val, BE_MSVDX_WDT_CONTROL,
+                                 BE_WDT_CLEAR_SELECT, 0xd);
+               REGIO_WRITE_FIELD(reg_val, BE_MSVDX_WDT_CONTROL,
+                                 BE_WDT_CLKDIV_SELECT, 7);
+
+               VED_REG_WRITE32(be_wdt_clks / WDT_CLOCK_DIVIDER,
+                               BE_MSVDX_WDT_COMPAREMATCH_OFFSET);
+               VED_REG_WRITE32(reg_val, BE_MSVDX_WDT_CONTROL_OFFSET);
+       } else {
+               /* for the other two, use the default value punit set */
+               VED_REG_WRITE32(fe_wdt_clks / WDT_CLOCK_DIVIDER,
+                               FE_MSVDX_WDT_COMPAREMATCH_OFFSET);
+               VED_REG_WRITE32(be_wdt_clks / WDT_CLOCK_DIVIDER,
+                               BE_MSVDX_WDT_COMPAREMATCH_OFFSET);
+       }
+
+       return ved_rendec_init_by_msg(dev);
+}
+
+static void ved_post_powerup_core_reset(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       ved_set_clocks(dev_priv->dev, clk_enable_all);
+
+       /* ved_clear_irq only clear CR_MTX_IRQ int,
+        * while DDK set 0xFFFFFFFF */
+       ved_clear_irq(dev);
+       ved_enable_irq(dev);
+}
+
+static int32_t ved_mtx_init(struct drm_device *dev, int32_t error_reset)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       uint32_t clk_divider = 200;
+       int32_t ret;
+       struct ved_private *ved_priv = dev_priv->ved_private;
+
+       /* These should not be reprogrames after a error reset */
+       if (!error_reset) {
+               VED_REG_WRITE32(0, MSVDX_COMMS_MSG_COUNTER);
+               VED_REG_WRITE32(0, MSVDX_EXT_FW_ERROR_STATE);
+       }
+
+       VED_REG_WRITE32(0, MSVDX_COMMS_ERROR_TRIG);
+       VED_REG_WRITE32(0, MSVDX_COMMS_TO_HOST_RD_INDEX);
+       VED_REG_WRITE32(0, MSVDX_COMMS_TO_HOST_WRT_INDEX);
+       VED_REG_WRITE32(0, MSVDX_COMMS_TO_MTX_RD_INDEX);
+       VED_REG_WRITE32(0, MSVDX_COMMS_TO_MTX_WRT_INDEX);
+       VED_REG_WRITE32(0, MSVDX_COMMS_FIRMWARE_ID);
+       /*
+        * IMG DDK set as: gui32DeviceNodeFlags & 0x4000,
+        * while it is not set in fw spec
+        * The bit neede to be set preboot is the performce data bit since this
+        * controls caused the firmware to rebalance the message queues.
+        */
+       VED_REG_WRITE32(0, MSVDX_COMMS_OFFSET_FLAGS);
+
+       /* DDK: check device_node_flags with 0x400 to | (1<<16),
+        * while it is not set in fw spec */
+       VED_REG_WRITE32(clk_divider - 1, MTX_SYSC_TIMERDIV_OFFSET);
+
+       /* DDK: LLDMA upload fw, which is now done by gunit */
+
+       /* DDK: redefine toHost and toMTX msg buffer, seems not needed */
+
+       /* Wait for the signature value to be written back */
+       ret = ved_wait_for_register(dev_priv, MSVDX_COMMS_SIGNATURE,
+                                   MSVDX_COMMS_SIGNATURE_VALUE,
+                                   0xffffffff,
+                                   1000, 1000);
+       if (ret) {
+               IPVR_DEBUG_WARN("WARN: Gunit upload fw failure,\n"
+                               "MSVDX_COMMS_SIGNATURE reg is 0x%x,"
+                               "MSVDX_COMMS_FW_STATUS reg is 0x%x,"
+                               "MTX_ENABLE reg is 0x%x.\n",
+                               VED_REG_READ32(MSVDX_COMMS_SIGNATURE),
+                               VED_REG_READ32(MSVDX_COMMS_FW_STATUS),
+                               VED_REG_READ32(MTX_ENABLE_OFFSET));
+               ved_priv->ved_needs_reset |=
+                               MSVDX_RESET_NEEDS_REUPLOAD_FW |
+                               MSVDX_RESET_NEEDS_INIT_FW;
+       }
+       return ret;
+}
+
+int32_t ved_post_init(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       uint32_t cmd;
+       int ret;
+       struct ved_private *ved_priv;
+
+       if (!dev_priv || !dev_priv->ved_private)
+               return -EINVAL;
+
+       ved_priv = dev_priv->ved_private;
+
+       ved_priv->ved_busy = 0;
+       ved_priv->ved_hw_busy = 1;
+
+       if (ved_priv->fw_loaded_by_punit) {
+               /* DDK: Configure MSVDX Memory Stalling iwth the min, max and 
ratio of access */
+               ved_post_powerup_core_reset(dev);
+       }
+
+       if (!ved_priv->fw_loaded_by_punit) {
+               /* Enable MMU by removing all bypass bits */
+               VED_REG_WRITE32(0, MSVDX_MMU_CONTROL0_OFFSET);
+       } else {
+               ved_priv->rendec_initialized = 0;
+               ret = ved_mtx_init(dev, ved_priv->decoding_err);
+               if (ret) {
+                       IPVR_ERROR("VED: ved_mtx_init failed.\n");
+                       return ret;
+               }
+       }
+
+       if (!ved_priv->fw_loaded_by_punit) {
+               ved_rendec_init_by_reg(dev);
+               if (!ved_priv->fw_bo) {
+                       ret = ved_alloc_fw_bo(dev_priv);
+                       if (ret) {
+                               IPVR_ERROR("VED: ved_alloc_fw_bo failed.\n");
+                               return ret;
+                       }
+               }
+               /* move fw loading to the place receiving first cmd buffer */
+               ved_priv->ved_fw_loaded = 0; /* need to load firware */
+               /* it should be set at punit post boot init phase */
+               VED_REG_WRITE32(820, FE_MSVDX_WDT_COMPAREMATCH_OFFSET);
+               VED_REG_WRITE32(8200, BE_MSVDX_WDT_COMPAREMATCH_OFFSET);
+
+               VED_REG_WRITE32(820, FE_MSVDX_WDT_COMPAREMATCH_OFFSET);
+               VED_REG_WRITE32(8200, BE_MSVDX_WDT_COMPAREMATCH_OFFSET);
+
+               ved_clear_irq(dev);
+               ved_enable_irq(dev);
+
+               cmd = 0;
+               cmd = VED_REG_READ32(VEC_SHIFTREG_CONTROL_OFFSET);
+               REGIO_WRITE_FIELD(cmd, VEC_SHIFTREG_CONTROL,
+                 SR_MASTER_SELECT, 1);  /* Host */
+               VED_REG_WRITE32(cmd, VEC_SHIFTREG_CONTROL_OFFSET);
+       }
+
+       return 0;
+}
+
+int32_t ipvr_ved_init(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       int32_t ret;
+
+       if (!dev_priv->ved_private) {
+               ret = ved_startup_init(dev);
+               if (unlikely(ret)) {
+                       IPVR_ERROR("VED: ved_startup_init failed.\n");
+                       return ret;
+               }
+       }
+
+       if (unlikely(dev_priv->ved_private == NULL))
+               return -ENOMEM;
+
+       ret = ved_alloc_ccb_for_rendec(dev_priv->ved_private,
+                       RENDEC_A_SIZE, RENDEC_B_SIZE);
+       if (unlikely(ret)) {
+               IPVR_ERROR("VED: msvdx_alloc_ccb_for_rendec failed.\n");
+               ved_free_ccb(dev_priv->ved_private);
+               return ret;
+       }
+
+       ret = ved_post_init(dev);
+       if (unlikely(ret)) {
+               IPVR_ERROR("VED: ved_post_init failed.\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+int32_t ipvr_ved_uninit(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       struct ved_private *ved_priv = dev_priv->ved_private;
+
+       /* VED_REG_WRITE32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE); */
+       IPVR_DEBUG_INIT("VED: set the VED clock to 0.\n");
+       ved_set_clocks(dev_priv->dev, 0);
+
+       if (NULL == ved_priv) {
+               IPVR_ERROR("VED: ipvr_ved_uninit: ved_priv is NULL!\n");
+               return -1;
+       }
+
+       if (ved_priv->ccb0 || ved_priv->ccb1)
+               ved_free_ccb(ved_priv);
+
+       if (ved_priv->fw_bo) {
+               drm_gem_object_unreference_unlocked(&ved_priv->fw_bo->base);
+               ved_priv->fw_bo = NULL;
+       }
+
+       if (!ved_priv->fw_loaded_by_punit) {
+               if (ved_priv->ved_fw_ptr)
+                       kfree(ved_priv->ved_fw_ptr);
+       }
+
+       kfree(ved_priv->ved_ec_ctx[0]);
+
+       if (ved_priv->mmu_recover_page)
+               __free_page(ved_priv->mmu_recover_page);
+
+       if (ved_priv) {
+               /* pci_set_drvdata(dev->pdev, NULL); */
+               device_remove_file(&dev->platformdev->dev,
+                                  &dev_attr_ved_pmstate);
+               sysfs_put(ved_priv->sysfs_pmstate);
+               ved_priv->sysfs_pmstate = NULL;
+               kfree(ved_priv);
+               dev_priv->ved_private = NULL;
+       }
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/ipvr/ved_init.h b/drivers/gpu/drm/ipvr/ved_init.h
new file mode 100644
index 0000000..ab57f025
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ved_init.h
@@ -0,0 +1,61 @@
+/**************************************************************************
+ * ved_init.h: VED initialization header file
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ * Copyright (c) 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *
+ **************************************************************************/
+
+
+#ifndef _VED_INIT_H_
+#define _VED_INIT_H_
+
+#include "ipvr_drv.h"
+
+void ved_clear_irq(struct drm_device *dev);
+
+void ved_disable_irq(struct drm_device *dev);
+
+void ved_enable_irq(struct drm_device *dev);
+
+int32_t ved_wait_for_register(struct drm_ipvr_private *dev_priv,
+                          uint32_t offset, uint32_t value, uint32_t enable,
+                          uint32_t poll_cnt, uint32_t timeout);
+
+void ved_set_clocks(struct drm_device *dev, uint32_t clock_state);
+
+int32_t ved_core_reset(struct drm_ipvr_private *dev_priv);
+
+/* todo: ved_reset is used for the case of fw loading by driver
+ * Later we can test if it can be removed. */
+int32_t ved_reset(struct drm_ipvr_private *dev_priv);
+
+int32_t ved_rendec_init_by_msg(struct drm_device *dev);
+
+int32_t ved_post_init(struct drm_device *dev);
+
+int32_t ved_post_boot_init(struct drm_device *dev);
+
+int32_t ipvr_ved_init(struct drm_device *dev);
+
+int32_t ipvr_ved_uninit(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/ipvr/ved_msg.h b/drivers/gpu/drm/ipvr/ved_msg.h
new file mode 100644
index 0000000..1ffba7e
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ved_msg.h
@@ -0,0 +1,364 @@
+/**************************************************************************
+ * ved_msg.h: VED message definition
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) 2003 Imagination Technologies Limited, UK
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *    Li Zeng <li.zeng at intel.com>
+ *
+ **************************************************************************/
+
+#ifndef _VED_MSG_H_
+#define _VED_MSG_H_
+
+/* Start of parser specific Host->MTX messages. */
+#define        FWRK_MSGID_START_PSR_HOSTMTX_MSG        (0x80)
+
+/* Start of parser specific MTX->Host messages. */
+#define        FWRK_MSGID_START_PSR_MTXHOST_MSG        (0xC0)
+
+/* Host defined msg, just for host use, MTX not recgnize */
+#define        FWRK_MSGID_HOST_EMULATED                (0x40)
+
+/* This type defines the framework specified message ids */
+enum {
+       /* ! Sent by the VA driver on the host to the mtx firmware.
+        */
+       MTX_MSGID_PADDING = 0,
+       MTX_MSGID_INIT = FWRK_MSGID_START_PSR_HOSTMTX_MSG,
+       MTX_MSGID_DECODE_FE,
+       MTX_MSGID_DEBLOCK,
+       MTX_MSGID_INTRA_OOLD,
+       MTX_MSGID_DECODE_BE,
+       MTX_MSGID_HOST_BE_OPP,
+
+       /*! Sent by the mtx firmware to itself.
+        */
+       MTX_MSGID_RENDER_MC_INTERRUPT,
+
+       /* used to ditinguish mrst and mfld */
+       MTX_MSGID_DEBLOCK_MFLD = FWRK_MSGID_HOST_EMULATED,
+       MTX_MSGID_INTRA_OOLD_MFLD,
+       MTX_MSGID_DECODE_BE_MFLD,
+       MTX_MSGID_HOST_BE_OPP_MFLD,
+
+       /*! Sent by the DXVA firmware on the MTX to the host.
+        */
+       MTX_MSGID_COMPLETED = FWRK_MSGID_START_PSR_MTXHOST_MSG,
+       MTX_MSGID_COMPLETED_BATCH,
+       MTX_MSGID_DEBLOCK_REQUIRED,
+       MTX_MSGID_TEST_RESPONCE,
+       MTX_MSGID_ACK,
+       MTX_MSGID_FAILED,
+       MTX_MSGID_CONTIGUITY_WARNING,
+       MTX_MSGID_HW_PANIC,
+};
+
+#define MTX_GENMSG_SIZE_TYPE           uint8_t
+#define MTX_GENMSG_SIZE_MASK           (0xFF)
+#define MTX_GENMSG_SIZE_SHIFT          (0)
+#define MTX_GENMSG_SIZE_OFFSET         (0x0000)
+
+#define MTX_GENMSG_ID_TYPE             uint8_t
+#define MTX_GENMSG_ID_MASK             (0xFF)
+#define MTX_GENMSG_ID_SHIFT            (0)
+#define MTX_GENMSG_ID_OFFSET           (0x0001)
+
+#define MTX_GENMSG_HEADER_SIZE         2
+
+#define MTX_GENMSG_FENCE_TYPE          uint16_t
+#define MTX_GENMSG_FENCE_MASK          (0xFFFF)
+#define MTX_GENMSG_FENCE_OFFSET                (0x0002)
+#define MTX_GENMSG_FENCE_SHIFT         (0)
+
+#define FW_INVALIDATE_MMU              (0x0010)
+
+union msg_header {
+       struct {
+               uint32_t msg_size:8;
+               uint32_t msg_type:8;
+               uint32_t msg_fence:16;
+       } bits;
+       uint32_t value;
+};
+
+struct fw_init_msg {
+       union {
+               struct {
+                       uint32_t msg_size:8;
+                       uint32_t msg_type:8;
+                       uint32_t reserved:16;
+               } bits;
+               uint32_t value;
+       } header;
+       uint32_t rendec_addr0;
+       uint32_t rendec_addr1;
+       union {
+               struct {
+                       uint32_t rendec_size0:16;
+                       uint32_t rendec_size1:16;
+               } bits;
+               uint32_t value;
+       } rendec_size;
+};
+
+struct fw_decode_msg {
+       union {
+               struct {
+                       uint32_t msg_size:8;
+                       uint32_t msg_type:8;
+                       uint32_t msg_fence:16;
+               } bits;
+               uint32_t value;
+       } header;
+       union {
+               struct {
+                       uint32_t flags:16;
+                       uint32_t buffer_size:16;
+               } bits;
+               uint32_t value;
+       } flag_size;
+       uint32_t crtl_alloc_addr;
+       union {
+               struct {
+                       uint32_t context:8;
+                       uint32_t mmu_ptd:24;
+               } bits;
+               uint32_t value;
+       } mmu_context;
+       uint32_t operating_mode;
+};
+
+struct fw_deblock_msg {
+       union {
+               struct {
+                       uint32_t msg_size:8;
+                       uint32_t msg_type:8;
+                       uint32_t msg_fence:16;
+               } bits;
+               uint32_t value;
+       } header;
+       union {
+               struct {
+                       uint32_t flags:16;
+                       uint32_t slice_field_type:2;
+                       uint32_t reserved:14;
+               } bits;
+               uint32_t value;
+       } flag_type;
+       uint32_t operating_mode;
+       union {
+               struct {
+                       uint32_t context:8;
+                       uint32_t mmu_ptd:24;
+               } bits;
+               uint32_t value;
+       } mmu_context;
+       union {
+               struct {
+                       uint32_t frame_height_mb:16;
+                       uint32_t pic_width_mb:16;
+               } bits;
+               uint32_t value;
+       } pic_size;
+       uint32_t address_a0;
+       uint32_t address_a1;
+       uint32_t mb_param_address;
+       uint32_t ext_stride_a;
+       uint32_t address_b0;
+       uint32_t address_b1;
+       uint32_t alt_output_flags_b;
+       /* additional msg outside of IMG msg */
+       uint32_t address_c0;
+       uint32_t address_c1;
+};
+
+#define MTX_PADMSG_SIZE 2
+struct fw_padding_msg {
+       union {
+               struct {
+                       uint32_t msg_size:8;
+                       uint32_t msg_type:8;
+               } bits;
+               uint16_t value;
+       } header;
+};
+
+struct fw_msg_header {
+       union {
+               struct {
+                       uint32_t msg_size:8;
+                       uint32_t msg_type:8;
+                       uint32_t msg_fence:16;
+               } bits;
+               uint32_t value;
+       } header;
+};
+
+struct fw_completed_msg {
+       union {
+               struct {
+                       uint32_t msg_size:8;
+                       uint32_t msg_type:8;
+                       uint32_t msg_fence:16;
+               } bits;
+               uint32_t value;
+       } header;
+       union {
+               struct {
+                       uint32_t start_mb:16;
+                       uint32_t last_mb:16;
+               } bits;
+               uint32_t value;
+       } mb;
+       uint32_t flags;
+       uint32_t vdebcr;
+};
+
+struct fw_deblock_required_msg {
+       union {
+               struct {
+                       uint32_t msg_size:8;
+                       uint32_t msg_type:8;
+                       uint32_t msg_fence:16;
+               } bits;
+               uint32_t value;
+       } header;
+};
+
+struct fw_panic_msg {
+       union {
+               struct {
+                       uint32_t msg_size:8;
+                       uint32_t msg_type:8;
+                       uint32_t msg_fence:16;
+               } bits;
+               uint32_t value;
+       } header;
+       uint32_t fe_status;
+       uint32_t be_status;
+       union {
+               struct {
+                       uint32_t last_mb:16;
+                       uint32_t reserved2:16;
+               } bits;
+               uint32_t value;
+       } mb;
+};
+
+struct fw_contiguity_msg {
+       union {
+               struct {
+                       uint32_t msg_size:8;
+                       uint32_t msg_type:8;
+                       uint32_t msg_fence:16;
+               } bits;
+               uint32_t value;
+       } header;
+       union {
+               struct {
+                       uint32_t end_mb_num:16;
+                       uint32_t begin_mb_num:16;
+               } bits;
+               uint32_t value;
+       } mb;
+};
+
+struct fw_slice_header_extract_msg {
+       union {
+               struct {
+                       uint32_t msg_size:8;
+                       uint32_t msg_type:8;
+                       uint32_t msg_fence:16;
+               } bits;
+               uint32_t value;
+       } header;
+
+       union {
+               struct {
+                       uint32_t flags:16;
+                       uint32_t res:16;
+               } bits;
+               uint32_t value;
+       } flags;
+
+       uint32_t src;
+
+       union {
+               struct {
+                       uint32_t context:8;
+                       uint32_t mmu_ptd:24;
+               } bits;
+               uint32_t value;
+       } mmu_context;
+
+       uint32_t dst;
+       uint32_t src_size;
+       uint32_t dst_size;
+
+       union {
+               struct {
+                       uint32_t expected_pps_id:8;
+                       uint32_t nalu_header_unit_type:5;
+                       uint32_t nalu_header_ref_idc:2;
+                       uint32_t nalu_header_reserved:1;
+                       uint32_t continue_parse_flag:1;
+                       uint32_t frame_mbs_only_flag:1;
+                       uint32_t pic_order_present_flag:1;
+                       uint32_t delta_pic_order_always_zero_flag:1;
+                       uint32_t redundant_pic_cnt_present_flag:1;
+                       uint32_t weighted_pred_flag:1;
+                       uint32_t entropy_coding_mode_flag:1;
+                       uint32_t deblocking_filter_control_present_flag:1;
+                       uint32_t weighted_bipred_idc:2;
+                       uint32_t residual_colour_transform_flag:1;
+                       uint32_t chroma_format_idc:2;
+                       uint32_t idr_flag:1;
+                       uint32_t pic_order_cnt_type:2;
+               } bits;
+               uint32_t value;
+       } flag_bitfield;
+
+       union {
+               struct {
+                       uint8_t num_slice_groups_minus1:3;
+                       uint8_t num_ref_idc_l1_active_minus1:5;
+                       uint8_t slice_group_map_type:3;
+                       uint8_t num_ref_idc_l0_active_minus1:5;
+                       uint8_t log2_slice_group_change_cycle:4;
+                       uint8_t slice_header_bit_offset:4;
+                       uint8_t log2_max_frame_num_minus4:4;
+                       uint8_t logs_max_pic_order_cnt_lsb_minus4:4;
+               } bits;
+               uint32_t value;
+       } pic_param0;
+};
+
+struct fw_slice_header_extract_done_msg {
+       union {
+               struct {
+                       uint32_t msg_size:8;
+                       uint32_t msg_type:8;
+                       uint32_t msg_fence:16;
+               } bits;
+               uint32_t value;
+       } header;
+};
+
+#endif
diff --git a/drivers/gpu/drm/ipvr/ved_pm.c b/drivers/gpu/drm/ipvr/ved_pm.c
new file mode 100644
index 0000000..9aee650
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ved_pm.c
@@ -0,0 +1,392 @@
+/**************************************************************************
+ * ved_pm.c: VED power management support
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *    Yao Cheng <yao.cheng at intel.com>
+ *
+ **************************************************************************/
+
+
+#include "ved_pm.h"
+#include "ved_init.h"
+#include "ved_reg.h"
+#include "ved_cmd.h"
+#include "ved_fw.h"
+#include "ipvr_trace.h"
+
+#ifdef CONFIG_INTEL_SOC_PMC
+#include <linux/intel_mid_pm.h>
+#endif
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+extern int32_t drm_ipvr_freq;
+
+#ifdef CONFIG_INTEL_SOC_PMC
+extern int pmc_nc_set_power_state(int islands, int state_type, int reg);
+extern int pmc_nc_get_power_state(int islands, int reg);
+#endif
+
+#define PCI_ROOT_MSGBUS_CTRL_REG       0xD0
+#define PCI_ROOT_MSGBUS_DATA_REG       0xD4
+#define PCI_ROOT_MSGBUS_CTRL_EXT_REG   0xD8
+#define PCI_ROOT_MSGBUS_READ           0x10
+#define PCI_ROOT_MSGBUS_WRITE          0x11
+#define PCI_ROOT_MSGBUS_DWORD_ENABLE   0xf0
+
+/* VED power state set/get */
+#define PUNIT_PORT                     0x04
+#define VEDSSPM0                       0x32
+#define VEDSSPM1                       0x33
+#define VEDSSC                         0x1
+
+/* VED frequency set/get */
+#define IP_FREQ_VALID     0x80     /* Freq is valid bit */
+
+#define IP_FREQ_SIZE         5     /* number of bits in freq fields */
+#define IP_FREQ_MASK      0x1f     /* Bit mask for freq field */
+
+/*  Positions of various frequency fields */
+#define IP_FREQ_POS          0     /* Freq control [4:0] */
+#define IP_FREQ_GUAR_POS     8     /* Freq guar   [12:8] */
+#define IP_FREQ_STAT_POS    24     /* Freq status [28:24] */
+
+#define IP_FREQ_RESUME_SET 0x64
+
+#define IPVR_VED_CLOCKGATING_OFFSET      0x2064
+
+enum APM_VED_STATUS {
+       VED_APM_STS_D0 = 0,
+       VED_APM_STS_D1,
+       VED_APM_STS_D2,
+       VED_APM_STS_D3
+};
+
+static int32_t ved_save_context(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+       struct ved_private *ved_priv = dev_priv->ved_private;
+       int32_t offset;
+       int ret;
+
+       if (ved_priv->fw_loaded_by_punit)
+               ved_priv->ved_needs_reset = MSVDX_RESET_NEEDS_INIT_FW;
+       else
+               ved_priv->ved_needs_reset = 1;
+
+#ifdef CONFIG_DRM_IPVR_EC
+       /* we should restore the state, if we power down/up during EC */
+       for (offset = 0; offset < 4; ++offset)
+               ved_priv->vec_ec_mem_data[offset] =
+                       VED_REG_READ32(0x2cb0 + offset * 4);
+
+       ved_priv->vec_ec_mem_data[4] = VED_REG_READ32(0x2cc4);
+
+       ved_priv->vec_ec_mem_saved = 1;
+       IPVR_DEBUG_VED("ec last mb %d %d %d %d\n",
+                       ved_priv->vec_ec_mem_data[0],
+                       ved_priv->vec_ec_mem_data[1],
+                       ved_priv->vec_ec_mem_data[2],
+                       ved_priv->vec_ec_mem_data[3]);
+       IPVR_DEBUG_VED("ec error state %d\n", ved_priv->vec_ec_mem_data[4]);
+#endif
+
+       /* Reset MTX */
+       VED_REG_WRITE32(MTX_SOFT_RESET_MTXRESET, MTX_SOFT_RESET_OFFSET);
+
+       /* why need reset msvdx before power off it, need check IMG */
+       ret = ved_core_reset(dev_priv);
+       if (unlikely(ret))
+               IPVR_DEBUG_WARN("failed to call ved_core_reset: %d\n", ret);
+
+       /* Initialize VEC Local RAM */
+       for (offset = 0; offset < VEC_LOCAL_MEM_BYTE_SIZE / 4; ++offset)
+               VED_REG_WRITE32(0, VEC_LOCAL_MEM_OFFSET + offset * 4);
+
+       if (ved_priv->fw_loaded_by_punit) {
+               VED_REG_WRITE32(0, MTX_ENABLE_OFFSET);
+               ved_set_clocks(dev_priv->dev, 0);
+       }
+
+       return 0;
+}
+
+static u32 __ipvr_msgbus_read32(struct pci_dev *pci_root, u8 port, u32 addr)
+{
+    uint32_t data;
+    uint32_t cmd;
+    uint32_t cmdext;
+
+    cmd = (PCI_ROOT_MSGBUS_READ << 24) | (port << 16) |
+        ((addr & 0xff) << 8) | PCI_ROOT_MSGBUS_DWORD_ENABLE;
+    cmdext = addr & 0xffffff00;
+
+    if (cmdext) {
+        /* This resets to 0 automatically, no need to write 0 */
+        pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG,
+                    cmdext);
+    }
+
+    pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+    pci_read_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, &data);
+
+    return data;
+}
+
+static void __ipvr_msgbus_write32(struct pci_dev *pci_root, u8 port, u32 addr, 
u32 data)
+{
+    uint32_t cmd;
+    uint32_t cmdext;
+
+    cmd = (PCI_ROOT_MSGBUS_WRITE << 24) | (port << 16) |
+        ((addr & 0xFF) << 8) | PCI_ROOT_MSGBUS_DWORD_ENABLE;
+    cmdext = addr & 0xffffff00;
+
+    pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, data);
+
+    if (cmdext) {
+        /* This resets to 0 automatically, no need to write 0 */
+        pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG,
+            cmdext);
+    }
+
+    pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+}
+
+static int __ipvr_pm_cmd_freq_wait(struct pci_dev *pci_root, u32 reg_freq, u32 
*freq_code_rlzd)
+{
+       int32_t tcount;
+       uint32_t freq_val;
+
+       for (tcount = 0; ; tcount++) {
+               freq_val = __ipvr_msgbus_read32(pci_root, PUNIT_PORT, reg_freq);
+               if ((freq_val & IP_FREQ_VALID) == 0)
+                       break;
+               if (tcount > 500) {
+                       IPVR_ERROR("P-Unit freq request wait timeout %x",
+                               freq_val);
+                       return -EBUSY;
+               }
+               udelay(1);
+       }
+
+       if (freq_code_rlzd) {
+               *freq_code_rlzd = ((freq_val >> IP_FREQ_STAT_POS) &
+                       IP_FREQ_MASK);
+       }
+
+       return 0;
+}
+
+static int32_t __ipvr_pm_cmd_freq_get(struct pci_dev *pci_root, u32 reg_freq)
+{
+       uint32_t freq_val;
+       int32_t freq_code=0;
+
+       __ipvr_pm_cmd_freq_wait(pci_root, reg_freq, NULL);
+
+       freq_val = __ipvr_msgbus_read32(pci_root, PUNIT_PORT, reg_freq);
+       freq_code =(int)((freq_val>>IP_FREQ_STAT_POS) & ~IP_FREQ_VALID);
+       return freq_code;
+}
+
+static int32_t __ipvr_pm_cmd_freq_set(struct pci_dev *pci_root, u32 reg_freq, 
u32 freq_code, u32 *p_freq_code_rlzd)
+{
+       uint32_t freq_val;
+       uint32_t freq_code_realized;
+       int32_t rva;
+
+       rva = __ipvr_pm_cmd_freq_wait(pci_root, reg_freq, NULL);
+       if (rva < 0) {
+               IPVR_ERROR("pm_cmd_freq_wait 1 failed: %d\n", rva);
+               return rva;
+       }
+
+       freq_val = IP_FREQ_VALID | freq_code;
+       __ipvr_msgbus_write32(pci_root, PUNIT_PORT, reg_freq, freq_val);
+
+       rva = __ipvr_pm_cmd_freq_wait(pci_root, reg_freq, &freq_code_realized);
+       if (rva < 0) {
+               IPVR_ERROR("pm_cmd_freq_wait 2 failed: %d\n", rva);
+               return rva;
+       }
+
+       if (p_freq_code_rlzd)
+               *p_freq_code_rlzd = freq_code_realized;
+
+       return rva;
+}
+
+static int32_t ved_set_freq(struct drm_device *dev, u32 freq_code)
+{
+       uint32_t freq_code_rlzd = 0;
+       int32_t ret;
+       struct drm_ipvr_private *dev_priv = (struct 
drm_ipvr_private*)dev->dev_private;
+
+       ret = __ipvr_pm_cmd_freq_set(dev_priv->pci_root, VEDSSPM1, freq_code, 
&freq_code_rlzd);
+       if (ret < 0) {
+               IPVR_ERROR("failed to set freqency, current is %x\n",
+                       freq_code_rlzd);
+       }
+
+       return ret;
+}
+
+static int32_t ved_get_freq(struct drm_device *dev)
+{
+       struct drm_ipvr_private *dev_priv = (struct 
drm_ipvr_private*)dev->dev_private;
+       return __ipvr_pm_cmd_freq_get(dev_priv->pci_root, VEDSSPM1);
+}
+
+#ifdef CONFIG_INTEL_SOC_PMC
+bool ved_power_on(struct drm_device *dev)
+{
+       int32_t ved_freq;
+       int32_t ret;
+       IPVR_DEBUG_PM("VED: power on msvdx using ATOM_PMC.\n");
+
+       ret = pmc_nc_set_power_state(VEDSSC, 0, VEDSSPM0);
+       if (unlikely(ret)) {
+               IPVR_ERROR("VED: pmu_nc_set_power_state ON fail!\n");
+               return false;
+       }
+
+       ved_freq = ved_get_freq(dev);
+
+       IPVR_DEBUG_PM("VED freqency: code %d (%dMHz)\n", ved_freq, 
GET_VED_FREQUENCY(ved_freq));
+
+       trace_ved_power_on(GET_VED_FREQUENCY(ved_freq));
+       return true;
+}
+bool ved_power_off(struct drm_device *dev)
+{
+       int32_t ved_freq;
+       int32_t ret;
+       struct drm_ipvr_private *dev_priv = (struct 
drm_ipvr_private*)dev->dev_private;
+       IPVR_DEBUG_PM("VED: power off msvdx using ATOM_PMC.\n");
+
+       if (dev_priv->ved_private) {
+               ret = ved_save_context(dev);
+               if (unlikely(ret)) {
+                       IPVR_ERROR("Failed to save VED context, stop powering 
off\n");
+                       return false;
+               }
+       }
+
+       ved_freq = ved_get_freq(dev);
+       IPVR_DEBUG_PM("VED freqency: code %d (%dMHz)\n", ved_freq, 
GET_VED_FREQUENCY(ved_freq));
+
+       ret = pmc_nc_set_power_state(VEDSSC, 1, VEDSSPM0);
+       if (unlikely(ret)) {
+               IPVR_ERROR("VED: pmu_nc_set_power_state DOWN fail!\n");
+               return false;
+       }
+
+       trace_ved_power_off(GET_VED_FREQUENCY(ved_freq));
+       return true;
+}
+#else
+bool ved_power_on(struct drm_device *dev)
+{
+       int32_t ved_freq_before, ved_freq_after;
+       uint32_t pwr_sts;
+       struct drm_ipvr_private *dev_priv = (struct 
drm_ipvr_private*)dev->dev_private;
+       IPVR_DEBUG_PM("VED: power on msvdx.\n");
+
+       /* FIXME: add timeout check */
+       do {
+               __ipvr_msgbus_write32(dev_priv->pci_root, PUNIT_PORT, VEDSSPM0, 
VED_APM_STS_D0);
+               udelay(10);
+               pwr_sts = __ipvr_msgbus_read32(dev_priv->pci_root, PUNIT_PORT, 
VEDSSPM0);
+       } while (pwr_sts != 0x0);
+
+       do {
+               __ipvr_msgbus_write32(dev_priv->pci_root, PUNIT_PORT, VEDSSPM0, 
VED_APM_STS_D3);
+               udelay(10);
+               pwr_sts = __ipvr_msgbus_read32(dev_priv->pci_root, PUNIT_PORT, 
VEDSSPM0);
+       } while (pwr_sts != 0x03000003);
+
+       do {
+               __ipvr_msgbus_write32(dev_priv->pci_root, PUNIT_PORT, VEDSSPM0, 
VED_APM_STS_D0);
+               udelay(10);
+               pwr_sts = __ipvr_msgbus_read32(dev_priv->pci_root, PUNIT_PORT, 
VEDSSPM0);
+       } while (pwr_sts != 0x0);
+
+       ved_freq_before = ved_get_freq(dev);
+
+       if (ved_set_freq(dev, drm_ipvr_freq)) {
+               IPVR_ERROR("Failed to set VED frequency\n");
+       }
+
+       ved_freq_after = ved_get_freq(dev);
+       IPVR_DEBUG_PM("VED freqency: %dMHz => %dMHz\n",
+               GET_VED_FREQUENCY(ved_freq_before), 
GET_VED_FREQUENCY(ved_freq_after));
+
+       trace_ved_power_on(GET_VED_FREQUENCY(ved_freq_after));
+       return true;
+}
+
+bool ved_power_off(struct drm_device *dev)
+{
+       uint32_t pwr_sts;
+       int32_t ved_freq;
+       int32_t ret;
+       struct drm_ipvr_private *dev_priv = (struct 
drm_ipvr_private*)dev->dev_private;
+       IPVR_DEBUG_PM("VED: power off msvdx.\n");
+
+       if (dev_priv->ved_private) {
+               ret = ved_save_context(dev);
+               if (unlikely(ret)) {
+                       IPVR_ERROR("Failed to save VED context: %d, stop 
powering off\n", ret);
+                       return false;
+               }
+       }
+
+       ved_freq = ved_get_freq(dev);
+       IPVR_DEBUG_PM("VED freqency: code %d (%dMHz)\n", ved_freq, 
GET_VED_FREQUENCY(ved_freq));
+
+       /* FIXME: add timeout check */
+       do {
+               __ipvr_msgbus_write32(dev_priv->pci_root, PUNIT_PORT, VEDSSPM0, 
VED_APM_STS_D3);
+               udelay(10);
+               pwr_sts = __ipvr_msgbus_read32(dev_priv->pci_root, PUNIT_PORT, 
VEDSSPM0);
+       } while (pwr_sts != 0x03000003);
+
+       trace_ved_power_off(GET_VED_FREQUENCY(ved_freq));
+       return true;
+}
+
+#endif
+/**
+ * is_island_on
+ *
+ * Description: checks to see if the island is up
+ * returns true if hw_island is ON
+ * returns false if hw_island is OFF
+ */
+bool is_ved_on(struct drm_device *dev)
+{
+       uint32_t pwr_sts;
+       struct drm_ipvr_private *dev_priv = dev->dev_private;
+
+       pwr_sts = __ipvr_msgbus_read32(dev_priv->pci_root, PUNIT_PORT, 
VEDSSPM0);
+
+       return (pwr_sts == VED_APM_STS_D0);
+}
diff --git a/drivers/gpu/drm/ipvr/ved_pm.h b/drivers/gpu/drm/ipvr/ved_pm.h
new file mode 100644
index 0000000..1f00f8b
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ved_pm.h
@@ -0,0 +1,55 @@
+/**************************************************************************
+ * ved_pm.h: VED power management header file
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *    Yao Cheng <yao.cheng at intel.com>
+ *
+ **************************************************************************/
+
+#ifndef _VED_PM_H_
+#define _VED_PM_H_
+
+#include "ipvr_drv.h"
+
+#define IP_FREQ_100_00 0x1f        /* 0b11111 100.00 */
+#define IP_FREQ_106_67 0x1d        /* 0b11101 106.67 */
+#define IP_FREQ_133_30 0x17        /* 0b10111 133.30 */
+#define IP_FREQ_160_00 0x13        /* 0b10011 160.00 */
+#define IP_FREQ_177_78 0x11        /* 0b10001 177.78 */
+#define IP_FREQ_200_00 0x0f        /* 0b01111 200.00 */
+#define IP_FREQ_213_33 0x0e        /* 0b01110 213.33 */
+#define IP_FREQ_266_67 0x0b        /* 0b01011 266.67 */
+#define IP_FREQ_320_00 0x09        /* 0b01001 320.00 */
+#define IP_FREQ_355_56 0x08        /* 0b01000 355.56 */
+#define IP_FREQ_400_00 0x07        /* 0b00111 400.00 */
+#define IP_FREQ_457_14 0x06        /* 0b00110 457.14 */
+#define IP_FREQ_533_33 0x05        /* 0b00101 533.33 */
+#define IP_FREQ_640_00 0x04        /* 0b00100 640.00 */
+#define IP_FREQ_800_00 0x03        /* 0b00011 800.00 */
+
+#define GET_VED_FREQUENCY(freq_code)   ((1600 * 2)/((freq_code) + 1))
+
+bool is_ved_on(struct drm_device *dev);
+
+bool ved_power_on(struct drm_device *dev);
+
+bool ved_power_off(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/ipvr/ved_reg.h b/drivers/gpu/drm/ipvr/ved_reg.h
new file mode 100644
index 0000000..1d55f2a
--- /dev/null
+++ b/drivers/gpu/drm/ipvr/ved_reg.h
@@ -0,0 +1,609 @@
+/**************************************************************************
+ * ved_reg.h: VED register definition
+ *
+ * Copyright (c) 2014 Intel Corporation, Hillsboro, OR, USA
+ * Copyright (c) Imagination Technologies Limited, UK
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Fei Jiang <fei.jiang at intel.com>
+ *
+ **************************************************************************/
+
+#ifndef _VED_REG_H_
+#define _VED_REG_H_
+
+#include "ipvr_drv.h"
+
+#if (defined MFLD_MSVDX_FABRIC_DEBUG) && MFLD_MSVDX_FABRIC_DEBUG
+#define VED_REG_WRITE32(_val, _offs)                                   \
+do {                                                                   \
+       if (ipvr_get_power_state(OSPM_VIDEO_DEC_ISLAND) == 0)           \
+               panic("msvdx reg 0x%x write failed.\n",                 \
+                               (unsigned int)(_offs));                 \
+       else                                                            \
+               iowrite32(_val, dev_priv->ved_reg_base + (_offs));              
\
+} while (0)
+
+static inline uint32_t VED_REG_READ32(uint32_t _offs)
+{
+       struct drm_ipvr_private *dev_priv =
+               (struct drm_ipvr_private *)gpDrmDevice->dev_private;
+       if (ipvr_get_power_state(OSPM_VIDEO_DEC_ISLAND) == 0) {
+               panic("msvdx reg 0x%x read failed.\n", (unsigned int)(_offs));
+               return 0;
+       } else {
+               return ioread32(dev_priv->ved_reg_base + (_offs));
+       }
+}
+
+#elif (defined MSVDX_REG_DUMP) && MSVDX_REG_DUMP
+
+#define VED_REG_WRITE32(_val, _offs) \
+do {                                                \
+       printk(KERN_INFO"MSVDX: write %08x to reg 0x%08x\n", \
+                       (unsigned int)(_val),       \
+                       (unsigned int)(_offs));     \
+       iowrite32(_val, dev_priv->ved_reg_base + (_offs));   \
+} while (0)
+
+static inline uint32_t VED_REG_READ32(uint32_t _offs)
+{
+       uint32_t val = ioread32(dev_priv->ved_reg_base + (_offs));
+       printk(KERN_INFO"MSVDX: read reg 0x%08x, get %08x\n",
+                       (unsigned int)(_offs), val);
+       return val;
+}
+
+#else
+
+#define VED_REG_WRITE32(_val, _offs) \
+       iowrite32(_val, dev_priv->ved_reg_base + (_offs))
+#define VED_REG_READ32(_offs) \
+       ioread32(dev_priv->ved_reg_base + (_offs))
+
+#endif
+
+#define REGISTER(__group__, __reg__) (__group__##_##__reg__##_OFFSET)
+
+#define MTX_INTERNAL_REG(R_SPECIFIER , U_SPECIFIER)    \
+       (((R_SPECIFIER)<<4) | (U_SPECIFIER))
+#define MTX_PC         MTX_INTERNAL_REG(0, 5)
+
+#define MEMIO_READ_FIELD(vpMem, field)                                         
\
+       ((uint32_t)(((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \
+                       & field##_MASK) >> field##_SHIFT))                      
\
+
+#define MEMIO_WRITE_FIELD(vpMem, field, value)                                 
\
+do {                                                                           
\
+       ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) =     \
+               ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET)))       
\
+                       & (field##_TYPE)~field##_MASK) |                        
\
+       (field##_TYPE)(((uint32_t)(value) << field##_SHIFT) & field##_MASK)); \
+} while (0)
+
+#define MEMIO_WRITE_FIELD_LITE(vpMem, field, value)                            
\
+do {                                                                           
\
+        (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) =             
\
+       ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) |             
\
+               (field##_TYPE)(((uint32_t)(value) << field##_SHIFT)));  \
+} while (0)
+
+#define REGIO_READ_FIELD(reg_val, reg, field)                                  
\
+       ((reg_val & reg##_##field##_MASK) >> reg##_##field##_SHIFT)
+
+#define REGIO_WRITE_FIELD(reg_val, reg, field, value)                          
\
+do {                                                                           
\
+       (reg_val) =                                                             
\
+       ((reg_val) & ~(reg##_##field##_MASK)) |                         \
+       (((value) << (reg##_##field##_SHIFT)) & (reg##_##field##_MASK));        
\
+} while (0)
+
+
+#define REGIO_WRITE_FIELD_LITE(reg_val, reg, field, value)                     
\
+do {                                                                           
\
+       (reg_val) = ((reg_val) | ((value) << (reg##_##field##_SHIFT))); \
+} while (0)
+
+/****** MSVDX.Technical Reference Manual.2.0.2.4.External VXD38x **************
+Offset address                         Name                    Identifier
+0x0000 - 0x03FF (1024B)                MTX Register            REG_MSVDX_MTX
+0x0400 - 0x047F (128B)         VDMC Register           REG_MSVDX _VDMC
+0x0480 - 0x04FF (128B)         VDEB Register           REG_MSVDX _VDEB
+0x0500 - 0x05FF (256B)         DMAC Register           REG_MSVDX _DMAC
+0x0600 - 0x06FF (256B)         MSVDX Core Register     REG_MSVDX _SYS
+0x0700 - 0x07FF (256B)         VEC iQ Matrix RAM       REG_MSVDX_VEC_IQRAM
+0x0800 - 0x0FFF (2048B)                VEC Registers           REG_MSVDX _VEC
+0x1000 - 0x1FFF (4kB)          Command Register        REG_MSVDX _CMD
+0x2000 - 0x2FFF (4kB)          VEC Local RAM           REG_MSVDX _VEC_RAM
+0x3000 - 0x4FFF (8kB)          VEC VLC Table           RAM REG_MSVDX _VEC_VLC
+0x5000 - 0x5FFF (4kB)          AXI Register            REG_MSVDX _AXI
+******************************************************************************/
+
+/*************** MTX registers start: 0x0000 - 0x03FF (1024B) ****************/
+#define MTX_ENABLE_OFFSET                              (0x0000)
+#define MTX_ENABLE_MTX_ENABLE_MASK                             (0x00000001)
+#define MTX_ENABLE_MTX_ENABLE_SHIFT                            (0)
+
+#define MTX_KICK_INPUT_OFFSET                          (0x0080)
+
+#define MTX_REGISTER_READ_WRITE_REQUEST_OFFSET         (0x00FC)
+#define MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK        (0x80000000)
+#define MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_SHIFT       (31)
+#define MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK           (0x00010000)
+#define MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_SHIFT          (16)
+
+#define MTX_REGISTER_READ_WRITE_DATA_OFFSET            (0x00F8)
+
+#define MTX_RAM_ACCESS_DATA_TRANSFER_OFFSET            (0x0104)
+
+#define MTX_RAM_ACCESS_CONTROL_OFFSET                  (0x0108)
+#define MTX_RAM_ACCESS_CONTROL_MTX_MCMID_MASK                  (0x0FF00000)
+#define MTX_RAM_ACCESS_CONTROL_MTX_MCMID_SHIFT                 (20)
+#define MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_MASK               (0x000FFFFC)
+#define MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_SHIFT              (2)
+#define MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_MASK                  (0x00000002)
+#define MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_SHIFT                 (1)
+#define MTX_RAM_ACCESS_CONTROL_MTX_MCMR_MASK                   (0x00000001)
+#define MTX_RAM_ACCESS_CONTROL_MTX_MCMR_SHIFT                  (0)
+
+#define MTX_RAM_ACCESS_STATUS_OFFSET                   (0x010C)
+
+#define MTX_SOFT_RESET_OFFSET                          (0x0200)
+#define MTX_SOFT_RESET_MTX_RESET_MASK                          (0x00000001)
+#define MTX_SOFT_RESET_MTX_RESET_SHIFT                         (0)
+#define        MTX_SOFT_RESET_MTXRESET                         (0x00000001)
+
+#define MTX_SYSC_TIMERDIV_OFFSET                       (0x0208)
+
+#define MTX_SYSC_CDMAC_OFFSET                          (0x0340)
+#define MTX_SYSC_CDMAC_BURSTSIZE_MASK                          (0x07000000)
+#define MTX_SYSC_CDMAC_BURSTSIZE_SHIFT                         (24)
+#define MTX_SYSC_CDMAC_RNW_MASK                                (0x00020000)
+#define MTX_SYSC_CDMAC_RNW_SHIFT                               (17)
+#define MTX_SYSC_CDMAC_ENABLE_MASK                             (0x00010000)
+#define MTX_SYSC_CDMAC_ENABLE_SHIFT                            (16)
+#define MTX_SYSC_CDMAC_LENGTH_MASK                             (0x0000FFFF)
+#define MTX_SYSC_CDMAC_LENGTH_SHIFT                            (0)
+
+#define MTX_SYSC_CDMAA_OFFSET                          (0x0344)
+
+#define MTX_SYSC_CDMAS0_OFFSET                         (0x0348)
+
+#define MTX_SYSC_CDMAT_OFFSET                          (0x0350)
+/************************** MTX registers end **************************/
+
+/**************** DMAC Registers: 0x0500 - 0x05FF (256B) ***************/
+#define DMAC_DMAC_COUNT_EN_MASK                        (0x00010000)
+#define DMAC_DMAC_IRQ_STAT_TRANSFER_FIN_MASK            (0x00020000)
+
+#define DMAC_DMAC_SETUP_OFFSET                         (0x0500)
+
+#define DMAC_DMAC_COUNT_OFFSET                         (0x0504)
+#define DMAC_DMAC_COUNT_BSWAP_LSBMASK                          (0x00000001)
+#define DMAC_DMAC_COUNT_BSWAP_SHIFT                            (30)
+#define DMAC_DMAC_COUNT_PW_LSBMASK                             (0x00000003)
+#define DMAC_DMAC_COUNT_PW_SHIFT                               (27)
+#define DMAC_DMAC_COUNT_DIR_LSBMASK                            (0x00000001)
+#define DMAC_DMAC_COUNT_DIR_SHIFT                              (26)
+#define DMAC_DMAC_COUNT_PI_LSBMASK                             (0x00000003)
+#define DMAC_DMAC_COUNT_PI_SHIFT                               (24)
+#define DMAC_DMAC_COUNT_CNT_LSBMASK                            (0x0000FFFF)
+#define DMAC_DMAC_COUNT_CNT_SHIFT                              (0)
+#define DMAC_DMAC_COUNT_EN_MASK                                (0x00010000)
+#define DMAC_DMAC_COUNT_EN_SHIFT                               (16)
+
+#define DMAC_DMAC_PERIPH_OFFSET                                (0x0508)
+#define DMAC_DMAC_PERIPH_ACC_DEL_LSBMASK                       (0x00000007)
+#define DMAC_DMAC_PERIPH_ACC_DEL_SHIFT                         (29)
+#define DMAC_DMAC_PERIPH_INCR_LSBMASK                          (0x00000001)
+#define DMAC_DMAC_PERIPH_INCR_SHIFT                            (27)
+#define DMAC_DMAC_PERIPH_BURST_LSBMASK                         (0x00000007)
+#define DMAC_DMAC_PERIPH_BURST_SHIFT                           (24)
+
+#define DMAC_DMAC_IRQ_STAT_OFFSET                      (0x050C)
+#define DMAC_DMAC_IRQ_STAT_TRANSFER_FIN_MASK                   (0x00020000)
+
+#define DMAC_DMAC_PERIPHERAL_ADDR_OFFSET               (0x0514)
+#define DMAC_DMAC_PERIPHERAL_ADDR_ADDR_MASK                    (0x007FFFFF)
+#define DMAC_DMAC_PERIPHERAL_ADDR_ADDR_LSBMASK                 (0x007FFFFF)
+#define DMAC_DMAC_PERIPHERAL_ADDR_ADDR_SHIFT                   (0)
+
+/* DMAC control */
+#define PSB_DMAC_VALUE_COUNT(BSWAP, PW, DIR, PERIPH_INCR, COUNT)       \
+               ((((BSWAP) & DMAC_DMAC_COUNT_BSWAP_LSBMASK) <<  \
+                       DMAC_DMAC_COUNT_BSWAP_SHIFT) |          \
+               (((PW) & DMAC_DMAC_COUNT_PW_LSBMASK) <<         \
+                       DMAC_DMAC_COUNT_PW_SHIFT) |                     \
+               (((DIR) & DMAC_DMAC_COUNT_DIR_LSBMASK) <<               \
+                       DMAC_DMAC_COUNT_DIR_SHIFT) |                    \
+               (((PERIPH_INCR) & DMAC_DMAC_COUNT_PI_LSBMASK) <<        \
+                       DMAC_DMAC_COUNT_PI_SHIFT) |                     \
+               (((COUNT) & DMAC_DMAC_COUNT_CNT_LSBMASK) <<             \
+                       DMAC_DMAC_COUNT_CNT_SHIFT))
+
+#define PSB_DMAC_VALUE_PERIPH_PARAM(ACC_DEL, INCR, BURST)              \
+               ((((ACC_DEL) & DMAC_DMAC_PERIPH_ACC_DEL_LSBMASK) <<     \
+                       DMAC_DMAC_PERIPH_ACC_DEL_SHIFT) |               \
+               (((INCR) & DMAC_DMAC_PERIPH_INCR_LSBMASK) <<            \
+                       DMAC_DMAC_PERIPH_INCR_SHIFT) |          \
+               (((BURST) & DMAC_DMAC_PERIPH_BURST_LSBMASK) <<          \
+                       DMAC_DMAC_PERIPH_BURST_SHIFT))
+
+typedef enum {
+       /* !< No byte swapping will be performed. */
+       PSB_DMAC_BSWAP_NO_SWAP = 0x0,
+       /* !< Byte order will be reversed. */
+       PSB_DMAC_BSWAP_REVERSE = 0x1,
+} DMAC_eBSwap;
+
+typedef enum {
+       /* !< Data from memory to peripheral. */
+       PSB_DMAC_DIR_MEM_TO_PERIPH = 0x0,
+       /* !< Data from peripheral to memory. */
+       PSB_DMAC_DIR_PERIPH_TO_MEM = 0x1,
+} DMAC_eDir;
+
+typedef enum {
+       PSB_DMAC_ACC_DEL_0      = 0x0,  /* !< Access delay zero clock cycles */
+       PSB_DMAC_ACC_DEL_256    = 0x1,  /* !< Access delay 256 clock cycles */
+       PSB_DMAC_ACC_DEL_512    = 0x2,  /* !< Access delay 512 clock cycles */
+       PSB_DMAC_ACC_DEL_768    = 0x3,  /* !< Access delay 768 clock cycles */
+       PSB_DMAC_ACC_DEL_1024   = 0x4,  /* !< Access delay 1024 clock cycles */
+       PSB_DMAC_ACC_DEL_1280   = 0x5,  /* !< Access delay 1280 clock cycles */
+       PSB_DMAC_ACC_DEL_1536   = 0x6,  /* !< Access delay 1536 clock cycles */
+       PSB_DMAC_ACC_DEL_1792   = 0x7,  /* !< Access delay 1792 clock cycles */
+} DMAC_eAccDel;
+
+typedef enum {
+       PSB_DMAC_INCR_OFF       = 0,    /* !< Static peripheral address. */
+       PSB_DMAC_INCR_ON        = 1,    /* !< Incrementing peripheral address. 
*/
+} DMAC_eIncr;
+
+typedef enum {
+       PSB_DMAC_BURST_0        = 0x0,  /* !< burst size of 0 */
+       PSB_DMAC_BURST_1        = 0x1,  /* !< burst size of 1 */
+       PSB_DMAC_BURST_2        = 0x2,  /* !< burst size of 2 */
+       PSB_DMAC_BURST_3        = 0x3,  /* !< burst size of 3 */
+       PSB_DMAC_BURST_4        = 0x4,  /* !< burst size of 4 */
+       PSB_DMAC_BURST_5        = 0x5,  /* !< burst size of 5 */
+       PSB_DMAC_BURST_6        = 0x6,  /* !< burst size of 6 */
+       PSB_DMAC_BURST_7        = 0x7,  /* !< burst size of 7 */
+} DMAC_eBurst;
+/************************** DMAC Registers end **************************/
+
+/**************** MSVDX Core Registers: 0x0600 - 0x06FF (256B) ***************/
+#define MSVDX_CONTROL_OFFSET                                   (0x0600)
+#define MSVDX_CONTROL_MSVDX_SOFT_RESET_MASK                    (0x00000100)
+#define MSVDX_CONTROL_MSVDX_SOFT_RESET_SHIFT                   (8)
+#define MSVDX_CONTROL_DMAC_CH0_SELECT_MASK                     (0x00001000)
+#define MSVDX_CONTROL_DMAC_CH0_SELECT_SHIFT                    (12)
+#define MSVDX_CONTROL_MSVDX_SOFT_RESET_MASK                    (0x00000100)
+#define MSVDX_CONTROL_MSVDX_SOFT_RESET_SHIFT                   (8)
+#define MSVDX_CONTROL_MSVDX_FE_SOFT_RESET_MASK                 (0x00010000)
+#define MSVDX_CONTROL_MSVDX_BE_SOFT_RESET_MASK                 (0x00100000)
+#define MSVDX_CONTROL_MSVDX_VEC_MEMIF_SOFT_RESET_MASK          (0x01000000)
+#define MSVDX_CONTROL_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK     (0x10000000)
+#define msvdx_sw_reset_all \
+       (MSVDX_CONTROL_MSVDX_SOFT_RESET_MASK |                  \
+       MSVDX_CONTROL_MSVDX_FE_SOFT_RESET_MASK |                \
+       MSVDX_CONTROL_MSVDX_BE_SOFT_RESET_MASK  |               \
+       MSVDX_CONTROL_MSVDX_VEC_MEMIF_SOFT_RESET_MASK | \
+       MSVDX_CONTROL_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK)
+
+#define MSVDX_INTERRUPT_CLEAR_OFFSET                   (0x060C)
+
+#define MSVDX_INTERRUPT_STATUS_OFFSET                  (0x0608)
+#define MSVDX_INTERRUPT_STATUS_MMU_FAULT_IRQ_MASK              (0x00000F00)
+#define MSVDX_INTERRUPT_STATUS_MMU_FAULT_IRQ_SHIFT             (8)
+#define MSVDX_INTERRUPT_STATUS_MTX_IRQ_MASK                    (0x00004000)
+#define MSVDX_INTERRUPT_STATUS_MTX_IRQ_SHIFT                   (14)
+
+#define MSVDX_HOST_INTERRUPT_ENABLE_OFFSET             (0x0610)
+
+#define MSVDX_MAN_CLK_ENABLE_OFFSET                    (0x0620)
+#define MSVDX_MAN_CLK_ENABLE_CORE_MAN_CLK_ENABLE_MASK          (0x00000001)
+#define MSVDX_MAN_CLK_ENABLE_VDEB_PROCESS_MAN_CLK_ENABLE_MASK  (0x00000002)
+#define MSVDX_MAN_CLK_ENABLE_VDEB_ACCESS_MAN_CLK_ENABLE_MASK   (0x00000004)
+#define MSVDX_MAN_CLK_ENABLE_VDMC_MAN_CLK_ENABLE_MASK          (0x00000008)
+#define MSVDX_MAN_CLK_ENABLE_VEC_ENTDEC_MAN_CLK_ENABLE_MASK    (0x00000010)
+#define MSVDX_MAN_CLK_ENABLE_VEC_ITRANS_MAN_CLK_ENABLE_MASK    (0x00000020)
+#define MSVDX_MAN_CLK_ENABLE_MTX_MAN_CLK_ENABLE_MASK           (0x00000040)
+#define MSVDX_MAN_CLK_ENABLE_VDEB_PROCESS_AUTO_CLK_ENABLE_MASK (0x00020000)
+#define MSVDX_MAN_CLK_ENABLE_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK  (0x00040000)
+#define MSVDX_MAN_CLK_ENABLE_VDMC_AUTO_CLK_ENABLE_MASK         (0x00080000)
+#define MSVDX_MAN_CLK_ENABLE_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK   (0x00100000)
+#define MSVDX_MAN_CLK_ENABLE_VEC_ITRANS_AUTO_CLK_ENABLE_MASK   (0x00200000)
+
+#define clk_enable_all \
+       (MSVDX_MAN_CLK_ENABLE_CORE_MAN_CLK_ENABLE_MASK                  | \
+       MSVDX_MAN_CLK_ENABLE_VDEB_PROCESS_MAN_CLK_ENABLE_MASK           | \
+       MSVDX_MAN_CLK_ENABLE_VDEB_ACCESS_MAN_CLK_ENABLE_MASK            | \
+       MSVDX_MAN_CLK_ENABLE_VDMC_MAN_CLK_ENABLE_MASK                   | \
+       MSVDX_MAN_CLK_ENABLE_VEC_ENTDEC_MAN_CLK_ENABLE_MASK             | \
+       MSVDX_MAN_CLK_ENABLE_VEC_ITRANS_MAN_CLK_ENABLE_MASK             | \
+       MSVDX_MAN_CLK_ENABLE_MTX_MAN_CLK_ENABLE_MASK)
+
+#define clk_enable_minimal \
+       (MSVDX_MAN_CLK_ENABLE_CORE_MAN_CLK_ENABLE_MASK | \
+       MSVDX_MAN_CLK_ENABLE_MTX_MAN_CLK_ENABLE_MASK)
+
+#define clk_enable_auto        \
+       (MSVDX_MAN_CLK_ENABLE_VDEB_PROCESS_AUTO_CLK_ENABLE_MASK | \
+       MSVDX_MAN_CLK_ENABLE_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK           | \
+       MSVDX_MAN_CLK_ENABLE_VDMC_AUTO_CLK_ENABLE_MASK                  | \
+       MSVDX_MAN_CLK_ENABLE_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK            | \
+       MSVDX_MAN_CLK_ENABLE_VEC_ITRANS_AUTO_CLK_ENABLE_MASK            | \
+       MSVDX_MAN_CLK_ENABLE_CORE_MAN_CLK_ENABLE_MASK                   | \
+       MSVDX_MAN_CLK_ENABLE_MTX_MAN_CLK_ENABLE_MASK)
+
+#define MSVDX_CORE_ID_OFFSET                           (0x0630)
+#define MSVDX_CORE_REV_OFFSET                          (0x0640)
+
+#define MSVDX_DMAC_STREAM_STATUS_OFFSET                        (0x0648)
+
+#define MSVDX_MMU_CONTROL0_OFFSET                      (0x0680)
+#define MSVDX_MMU_CONTROL0_MMU_PAUSE_MASK                      (0x00000002)
+#define MSVDX_MMU_CONTROL0_MMU_PAUSE_SHIFT                     (1)
+#define MSVDX_MMU_CONTROL0_MMU_INVALDC_MASK                    (0x00000008)
+#define MSVDX_MMU_CONTROL0_MMU_INVALDC_SHIFT                   (3)
+
+#define MSVDX_MMU_BANK_INDEX_OFFSET                    (0x0688)
+
+#define MSVDX_MMU_STATUS_OFFSET                                (0x068C)
+
+#define MSVDX_MMU_CONTROL2_OFFSET                      (0x0690)
+
+#define MSVDX_MMU_DIR_LIST_BASE_OFFSET                 (0x0694)
+
+#define MSVDX_MMU_MEM_REQ_OFFSET                       (0x06D0)
+
+#define MSVDX_MMU_TILE_BASE0_OFFSET                    (0x06D4)
+
+#define MSVDX_MMU_TILE_BASE1_OFFSET                    (0x06D8)
+
+#define MSVDX_MTX_RAM_BANK_OFFSET                      (0x06F0)
+#define MSVDX_MTX_RAM_BANK_MTX_RAM_BANK_SIZE_MASK              (0x000F0000)
+#define MSVDX_MTX_RAM_BANK_MTX_RAM_BANK_SIZE_SHIFT             (16)
+
+#define MSVDX_MTX_DEBUG_OFFSET                         
MSVDX_MTX_RAM_BANK_OFFSET
+#define MSVDX_MTX_DEBUG_MTX_DBG_IS_SLAVE_MASK                  (0x00000004)
+#define MSVDX_MTX_DEBUG_MTX_DBG_IS_SLAVE_LSBMASK               (0x00000001)
+#define MSVDX_MTX_DEBUG_MTX_DBG_IS_SLAVE_SHIFT                 (2)
+#define MSVDX_MTX_DEBUG_MTX_DBG_GPIO_IN_MASK                   (0x00000003)
+#define MSVDX_MTX_DEBUG_MTX_DBG_GPIO_IN_LSBMASK                (0x00000003)
+#define MSVDX_MTX_DEBUG_MTX_DBG_GPIO_IN_SHIFT                  (0)
+
+/*watch dog for FE and BE*/
+#define FE_MSVDX_WDT_CONTROL_OFFSET                    (0x0664)
+/* MSVDX_CORE, CR_FE_MSVDX_WDT_CONTROL, FE_WDT_CNT_CTRL */
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_CNT_CTRL_MASK              (0x00060000)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_CNT_CTRL_LSBMASK           (0x00000003)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_CNT_CTRL_SHIFT             (17)
+/* MSVDX_CORE, CR_FE_MSVDX_WDT_CONTROL, FE_WDT_ENABLE */
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_ENABLE_MASK                (0x00010000)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_ENABLE_LSBMASK             (0x00000001)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_ENABLE_SHIFT               (16)
+/* MSVDX_CORE, CR_FE_MSVDX_WDT_CONTROL, FE_WDT_ACTION1 */
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_ACTION1_MASK               (0x00003000)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_ACTION1_LSBMASK            (0x00000003)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_ACTION1_SHIFT              (12)
+/* MSVDX_CORE, CR_FE_MSVDX_WDT_CONTROL, FE_WDT_ACTION0 */
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_ACTION0_MASK               (0x00000100)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_ACTION0_LSBMASK            (0x00000001)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_ACTION0_SHIFT              (8)
+/* MSVDX_CORE, CR_FE_MSVDX_WDT_CONTROL, FE_WDT_CLEAR_SELECT */
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_CLEAR_SELECT_MASK          (0x00000030)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_CLEAR_SELECT_LSBMASK       (0x00000003)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_CLEAR_SELECT_SHIFT         (4)
+/* MSVDX_CORE, CR_FE_MSVDX_WDT_CONTROL, FE_WDT_CLKDIV_SELECT */
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_CLKDIV_SELECT_MASK         (0x00000007)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_CLKDIV_SELECT_LSBMASK      (0x00000007)
+#define FE_MSVDX_WDT_CONTROL_FE_WDT_CLKDIV_SELECT_SHIFT        (0)
+
+#define FE_MSVDX_WDTIMER_OFFSET                                (0x0668)
+/* MSVDX_CORE, CR_FE_MSVDX_WDTIMER, FE_WDT_COUNTER */
+#define FE_MSVDX_WDTIMER_FE_WDT_COUNTER_MASK                   (0x0000FFFF)
+#define FE_MSVDX_WDTIMER_FE_WDT_COUNTER_LSBMASK                (0x0000FFFF)
+#define FE_MSVDX_WDTIMER_FE_WDT_COUNTER_SHIFT                  (0)
+
+#define FE_MSVDX_WDT_COMPAREMATCH_OFFSET               (0x066c)
+/* MSVDX_CORE, CR_FE_MSVDX_WDT_COMPAREMATCH, FE_WDT_CM1 */
+#define FE_MSVDX_WDT_COMPAREMATCH_FE_WDT_CM1_MASK              (0xFFFF0000)
+#define FE_MSVDX_WDT_COMPAREMATCH_FE_WDT_CM1_LSBMASK           (0x0000FFFF)
+#define FE_MSVDX_WDT_COMPAREMATCH_FE_WDT_CM1_SHIFT             (16)
+/* MSVDX_CORE, CR_FE_MSVDX_WDT_COMPAREMATCH, FE_WDT_CM0 */
+#define FE_MSVDX_WDT_COMPAREMATCH_FE_WDT_CM0_MASK              (0x0000FFFF)
+#define FE_MSVDX_WDT_COMPAREMATCH_FE_WDT_CM0_LSBMASK           (0x0000FFFF)
+#define FE_MSVDX_WDT_COMPAREMATCH_FE_WDT_CM0_SHIFT             (0)
+
+#define BE_MSVDX_WDT_CONTROL_OFFSET                    (0x0670)
+/* MSVDX_CORE, CR_BE_MSVDX_WDT_CONTROL, BE_WDT_CNT_CTRL */
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_CNT_CTRL_MASK              (0x001E0000)
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_CNT_CTRL_LSBMASK           (0x0000000F)
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_CNT_CTRL_SHIFT             (17)
+/* MSVDX_CORE, CR_BE_MSVDX_WDT_CONTROL, BE_WDT_ENABLE */
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_ENABLE_MASK                (0x00010000)
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_ENABLE_LSBMASK             (0x00000001)
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_ENABLE_SHIFT               (16)
+/* MSVDX_CORE, CR_BE_MSVDX_WDT_CONTROL, BE_WDT_ACTION0 */
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_ACTION0_MASK               (0x00000100)
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_ACTION0_LSBMASK            (0x00000001)
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_ACTION0_SHIFT              (8)
+/* MSVDX_CORE, CR_BE_MSVDX_WDT_CONTROL, BE_WDT_CLEAR_SELECT */
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_CLEAR_SELECT_MASK          (0x000000F0)
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_CLEAR_SELECT_LSBMASK       (0x0000000F)
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_CLEAR_SELECT_SHIFT         (4)
+/* MSVDX_CORE, CR_BE_MSVDX_WDT_CONTROL, BE_WDT_CLKDIV_SELECT */
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_CLKDIV_SELECT_MASK         (0x00000007)
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_CLKDIV_SELECT_LSBMASK      (0x00000007)
+#define BE_MSVDX_WDT_CONTROL_BE_WDT_CLKDIV_SELECT_SHIFT        (0)
+
+#define BE_MSVDX_WDTIMER_OFFSET                                (0x0674)
+/* MSVDX_CORE, CR_BE_MSVDX_WDTIMER, BE_WDT_COUNTER */
+#define BE_MSVDX_WDTIMER_BE_WDT_COUNTER_MASK                   (0x0000FFFF)
+#define BE_MSVDX_WDTIMER_BE_WDT_COUNTER_LSBMASK                (0x0000FFFF)
+#define BE_MSVDX_WDTIMER_BE_WDT_COUNTER_SHIFT                  (0)
+
+#define BE_MSVDX_WDT_COMPAREMATCH_OFFSET               (0x678)
+/* MSVDX_CORE, CR_BE_MSVDX_WDT_COMPAREMATCH, BE_WDT_CM0 */
+#define BE_MSVDX_WDT_COMPAREMATCH_BE_WDT_CM0_MASK              (0x0000FFFF)
+#define BE_MSVDX_WDT_COMPAREMATCH_BE_WDT_CM0_LSBMASK           (0x0000FFFF)
+#define BE_MSVDX_WDT_COMPAREMATCH_BE_WDT_CM0_SHIFT             (0)
+
+/*watch dog end*/
+/************************** MSVDX Core Registers end *************************/
+
+/******************* VEC Registers: 0x0800 - 0x0FFF (2048B) ******************/
+#define VEC_SHIFTREG_CONTROL_OFFSET                    (0x0818)
+#define VEC_SHIFTREG_CONTROL_SR_MASTER_SELECT_MASK             (0x00000300)
+#define VEC_SHIFTREG_CONTROL_SR_MASTER_SELECT_SHIFT            (8)
+/************************** VEC Registers end **************************/
+
+/************************** RENDEC Registers **************************/
+#define MSVDX_RENDEC_CONTROL0_OFFSET                   (0x0868)
+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_MASK           (0x00000001)
+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_SHIFT          (0)
+
+#define MSVDX_RENDEC_CONTROL1_OFFSET                   (0x086C)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_MASK    (0x000000FF)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_SHIFT   (0)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_MASK         (0x000C0000)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_SHIFT                (18)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_MASK         (0x00030000)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_SHIFT                (16)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_MASK      (0x01000000)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_SHIFT     (24)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_DEC_DISABLE_MASK          (0x08000000)
+#define MSVDX_RENDEC_CONTROL1_RENDEC_DEC_DISABLE_SHIFT         (27)
+
+#define MSVDX_RENDEC_BUFFER_SIZE_OFFSET                        (0x0870)
+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_MASK      (0x0000FFFF)
+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_SHIFT     (0)
+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_MASK      (0xFFFF0000)
+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_SHIFT     (16)
+
+#define MSVDX_RENDEC_BASE_ADDR0_OFFSET                 (0x0874)
+
+#define MSVDX_RENDEC_BASE_ADDR1_OFFSET                 (0x0878)
+
+#define MSVDX_RENDEC_READ_DATA_OFFSET                  (0x0898)
+
+#define MSVDX_RENDEC_CONTEXT0_OFFSET                   (0x0950)
+
+#define MSVDX_RENDEC_CONTEXT1_OFFSET                   (0x0954)
+
+#define MSVDX_RENDEC_CONTEXT2_OFFSET                   (0x0958)
+
+#define MSVDX_RENDEC_CONTEXT3_OFFSET                   (0x095C)
+
+#define MSVDX_RENDEC_CONTEXT4_OFFSET                   (0x0960)
+
+#define MSVDX_RENDEC_CONTEXT5_OFFSET                   (0x0964)
+/*************************** RENDEC registers end ****************************/
+
+/******************** CMD Register: 0x1000 - 0x1FFF (4kB) ********************/
+#define MSVDX_CMDS_END_SLICE_PICTURE_OFFSET            (0x1404)
+/****************************** CMD Register end *****************************/
+
+/******************** VEC Local RAM: 0x2000 - 0x2FFF (4kB) *******************/
+/* vec local MEM save/restore */
+#define VEC_LOCAL_MEM_BYTE_SIZE (4 * 1024)
+#define VEC_LOCAL_MEM_OFFSET 0x2000
+
+#define MSVDX_EXT_FW_ERROR_STATE               (0x2CC4)
+/* Decode operations in progress or not complete */
+#define MSVDX_FW_STATUS_IN_PROGRESS                    0x00000000
+/* there's no work underway on the hardware, idle, can be powered down */
+#define MSVDX_FW_STATUS_HW_IDLE                                0x00000001
+/* Panic, waiting to be reloaded */
+#define MSVDX_FW_STATUS_HW_PANIC                       0x00000003
+
+/*
+ * This defines the MSVDX communication buffer
+ */
+#define MSVDX_COMMS_SIGNATURE_VALUE    (0xA5A5A5A5)    /*!< Signature value */
+/*!< Host buffer size (in 32-bit words) */
+#define NUM_WORDS_HOST_BUF             (100)
+/*!< MTX buffer size (in 32-bit words) */
+#define NUM_WORDS_MTX_BUF              (100)
+
+#define MSVDX_COMMS_AREA_ADDR                  (0x02fe0)
+#define MSVDX_COMMS_CORE_WTD                   (MSVDX_COMMS_AREA_ADDR - 0x08)
+#define MSVDX_COMMS_ERROR_TRIG                 (MSVDX_COMMS_AREA_ADDR - 0x08)
+#define MSVDX_COMMS_FIRMWARE_ID                        (MSVDX_COMMS_AREA_ADDR 
- 0x0C)
+#define MSVDX_COMMS_OFFSET_FLAGS               (MSVDX_COMMS_AREA_ADDR + 0x18)
+#define        MSVDX_COMMS_MSG_COUNTER                 (MSVDX_COMMS_AREA_ADDR 
- 0x04)
+#define MSVDX_COMMS_FW_STATUS                  (MSVDX_COMMS_AREA_ADDR - 0x10)
+#define        MSVDX_COMMS_SIGNATURE                   (MSVDX_COMMS_AREA_ADDR 
+ 0x00)
+#define        MSVDX_COMMS_TO_HOST_BUF_SIZE            (MSVDX_COMMS_AREA_ADDR 
+ 0x04)
+#define MSVDX_COMMS_TO_HOST_RD_INDEX           (MSVDX_COMMS_AREA_ADDR + 0x08)
+#define MSVDX_COMMS_TO_HOST_WRT_INDEX          (MSVDX_COMMS_AREA_ADDR + 0x0C)
+#define MSVDX_COMMS_TO_MTX_BUF_SIZE            (MSVDX_COMMS_AREA_ADDR + 0x10)
+#define MSVDX_COMMS_TO_MTX_RD_INDEX            (MSVDX_COMMS_AREA_ADDR + 0x14)
+#define MSVDX_COMMS_TO_MTX_CB_RD_INDEX         (MSVDX_COMMS_AREA_ADDR + 0x18)
+#define MSVDX_COMMS_TO_MTX_WRT_INDEX           (MSVDX_COMMS_AREA_ADDR + 0x1C)
+#define MSVDX_COMMS_TO_HOST_BUF                        (MSVDX_COMMS_AREA_ADDR 
+ 0x20)
+#define MSVDX_COMMS_TO_MTX_BUF \
+                       (MSVDX_COMMS_TO_HOST_BUF + (NUM_WORDS_HOST_BUF << 2))
+
+/*
+ * FW FLAGs: it shall be written by the host prior to starting the Firmware.
+ */
+/* Disable Firmware based Watch dog timers. */
+#define DSIABLE_FW_WDT                         0x0008
+       /* Abort Immediately on errors */
+#define ABORT_ON_ERRORS_IMMEDIATE              0x0010
+       /* Aborts faulted slices as soon as possible. Allows non faulted slices
+        * to reach backend but faulted slice will not be allowed to start. */
+#define ABORT_FAULTED_SLICE_IMMEDIATE          0x0020
+       /* Flush faulted slices - Debug option */
+#define FLUSH_FAULTED_SLICES                   0x0080
+       /* Don't interrupt host when to host buffer becomes full.
+        * Stall until space is freed up by host on it's own. */
+#define NOT_INTERRUPT_WHEN_HOST_IS_FULL                0x0200
+       /* Contiguity warning msg will be send to host for stream with
+         * FW_ERROR_DETECTION_AND_RECOVERY flag set if non-contiguous
+        * macroblocks are detected. */
+#define NOT_ENABLE_ON_HOST_CONCEALMENT         0x0400
+       /* Return VDEB Signature Value in Completion message.
+        * This requires a VDEB data flush every slice for constant results.*/
+#define RETURN_VDEB_DATA_IN_COMPLETION         0x0800
+       /* Disable Auto Clock Gating. */
+#define DSIABLE_Auto_CLOCK_GATING              0x1000
+       /* Disable Idle GPIO signal. */
+#define DSIABLE_IDLE_GPIO_SIG                  0x2000
+       /* Enable Setup, FE and BE Time stamps in completion message.
+        * Used by IMG only for firmware profiling. */
+#define ENABLE_TIMESTAMPS_IN_COMPLETE_MSG      0x4000
+       /* Disable off-host 2nd pass Deblocking in Firmware.  */
+#define DSIABLE_OFFHOST_SECOND_DEBLOCK         0x20000
+       /* Sum address signature to data signature
+        * when returning VDEB signature values. */
+#define SUM_ADD_SIG_TO_DATA_SIGNATURE          0x80000
+
+/*
+#define MSVDX_COMMS_AREA_END   \
+  (MSVDX_COMMS_TO_MTX_BUF + (NUM_WORDS_HOST_BUF << 2))
+*/
+#define MSVDX_COMMS_AREA_END 0x03000
+
+#if (MSVDX_COMMS_AREA_END != 0x03000)
+#error
+#endif
+/***************************** VEC Local RAM end *****************************/
+
+#endif
-- 
1.9.1

Reply via email to