From: The etnaviv authors <dri-devel@lists.freedesktop.org>

This adds the GPU MMU handling functionality, used to drive the MMUs
attached to each GPU core.

Signed-off-by: Christian Gmeiner <christian.gmeiner at gmail.com>
Signed-off-by: Russell King <rmk+kernel at arm.linux.org.uk>
Signed-off-by: Lucas Stach <l.stach at pengutronix.de>
---
 drivers/gpu/drm/etnaviv/etnaviv_iommu.c    | 240 +++++++++++++++++++++++
 drivers/gpu/drm/etnaviv/etnaviv_iommu.h    |  28 +++
 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c |  33 ++++
 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h |  25 +++
 drivers/gpu/drm/etnaviv/etnaviv_mmu.c      | 299 +++++++++++++++++++++++++++++
 drivers/gpu/drm/etnaviv/etnaviv_mmu.h      |  71 +++++++
 6 files changed, 696 insertions(+)
 create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_iommu.c
 create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_iommu.h
 create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
 create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h
 create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_mmu.c
 create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_mmu.h

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c 
b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
new file mode 100644
index 000000000000..522cfd447892
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner at gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+
+#include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
+#include "etnaviv_iommu.h"
+#include "state_hi.xml.h"
+
+#define PT_SIZE                SZ_2M
+#define PT_ENTRIES     (PT_SIZE / sizeof(u32))
+
+#define GPU_MEM_START  0x80000000
+
+struct etnaviv_iommu_domain_pgtable {
+       u32 *pgtable;
+       dma_addr_t paddr;
+};
+
+struct etnaviv_iommu_domain {
+       struct iommu_domain domain;
+       struct device *dev;
+       void *bad_page_cpu;
+       dma_addr_t bad_page_dma;
+       struct etnaviv_iommu_domain_pgtable pgtable;
+       spinlock_t map_lock;
+};
+
+static struct etnaviv_iommu_domain *to_etnaviv_domain(struct iommu_domain 
*domain)
+{
+       return container_of(domain, struct etnaviv_iommu_domain, domain);
+}
+
+static int pgtable_alloc(struct etnaviv_iommu_domain_pgtable *pgtable,
+                        size_t size)
+{
+       pgtable->pgtable = dma_alloc_coherent(NULL, size, &pgtable->paddr, 
GFP_KERNEL);
+       if (!pgtable->pgtable)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void pgtable_free(struct etnaviv_iommu_domain_pgtable *pgtable,
+                        size_t size)
+{
+       dma_free_coherent(NULL, size, pgtable->pgtable, pgtable->paddr);
+}
+
+static u32 pgtable_read(struct etnaviv_iommu_domain_pgtable *pgtable,
+                          unsigned long iova)
+{
+       /* calcuate index into page table */
+       unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
+       phys_addr_t paddr;
+
+       paddr = pgtable->pgtable[index];
+
+       return paddr;
+}
+
+static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable,
+                         unsigned long iova, phys_addr_t paddr)
+{
+       /* calcuate index into page table */
+       unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
+
+       pgtable->pgtable[index] = paddr;
+}
+
+static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain)
+{
+       u32 *p;
+       int ret, i;
+
+       etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
+                                                 SZ_4K,
+                                                 &etnaviv_domain->bad_page_dma,
+                                                 GFP_KERNEL);
+       if (!etnaviv_domain->bad_page_cpu)
+               return -ENOMEM;
+
+       p = etnaviv_domain->bad_page_cpu;
+       for (i = 0; i < SZ_4K / 4; i++)
+               *p++ = 0xdead55aa;
+
+       ret = pgtable_alloc(&etnaviv_domain->pgtable, PT_SIZE);
+       if (ret < 0) {
+               dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+                                 etnaviv_domain->bad_page_cpu,
+                                 etnaviv_domain->bad_page_dma);
+               return ret;
+       }
+
+       for (i = 0; i < PT_ENTRIES; i++)
+               etnaviv_domain->pgtable.pgtable[i] =
+                       etnaviv_domain->bad_page_dma;
+
+       spin_lock_init(&etnaviv_domain->map_lock);
+
+       return 0;
+}
+
+static void etnaviv_domain_free(struct iommu_domain *domain)
+{
+       struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+
+       pgtable_free(&etnaviv_domain->pgtable, PT_SIZE);
+
+       dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+                         etnaviv_domain->bad_page_cpu,
+                         etnaviv_domain->bad_page_dma);
+
+       kfree(etnaviv_domain);
+}
+
+static int etnaviv_iommuv1_map(struct iommu_domain *domain, unsigned long iova,
+          phys_addr_t paddr, size_t size, int prot)
+{
+       struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+
+       if (size != SZ_4K)
+               return -EINVAL;
+
+       spin_lock(&etnaviv_domain->map_lock);
+       pgtable_write(&etnaviv_domain->pgtable, iova, paddr);
+       spin_unlock(&etnaviv_domain->map_lock);
+
+       return 0;
+}
+
+static size_t etnaviv_iommuv1_unmap(struct iommu_domain *domain,
+       unsigned long iova, size_t size)
+{
+       struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+
+       if (size != SZ_4K)
+               return -EINVAL;
+
+       spin_lock(&etnaviv_domain->map_lock);
+       pgtable_write(&etnaviv_domain->pgtable, iova,
+                     etnaviv_domain->bad_page_dma);
+       spin_unlock(&etnaviv_domain->map_lock);
+
+       return SZ_4K;
+}
+
+static phys_addr_t etnaviv_iommu_iova_to_phys(struct iommu_domain *domain,
+       dma_addr_t iova)
+{
+       struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+
+       return pgtable_read(&etnaviv_domain->pgtable, iova);
+}
+
+static size_t etnaviv_iommuv1_dump_size(struct iommu_domain *domain)
+{
+       return PT_SIZE;
+}
+
+static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf)
+{
+       struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+
+       memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE);
+}
+
+static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
+       .ops = {
+               .domain_free = etnaviv_domain_free,
+               .map = etnaviv_iommuv1_map,
+               .unmap = etnaviv_iommuv1_unmap,
+               .iova_to_phys = etnaviv_iommu_iova_to_phys,
+               .pgsize_bitmap = SZ_4K,
+       },
+       .dump_size = etnaviv_iommuv1_dump_size,
+       .dump = etnaviv_iommuv1_dump,
+};
+
+void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
+       struct iommu_domain *domain)
+{
+       struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+       u32 pgtable;
+
+       /* set page table address in MC */
+       pgtable = (u32)etnaviv_domain->pgtable.paddr;
+
+       gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
+       gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
+       gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable);
+       gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable);
+       gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
+}
+
+struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu)
+{
+       struct etnaviv_iommu_domain *etnaviv_domain;
+       int ret;
+
+       etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
+       if (!etnaviv_domain)
+               return NULL;
+
+       etnaviv_domain->dev = gpu->dev;
+
+       etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
+       etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
+       etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
+       etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + 
PT_ENTRIES * SZ_4K - 1;
+
+       ret = __etnaviv_iommu_init(etnaviv_domain);
+       if (ret)
+               goto out_free;
+
+       return &etnaviv_domain->domain;
+
+out_free:
+       kfree(etnaviv_domain);
+       return NULL;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h 
b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
new file mode 100644
index 000000000000..cf45503f6b6f
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner at gmail.com>
+  *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_IOMMU_H__
+#define __ETNAVIV_IOMMU_H__
+
+#include <linux/iommu.h>
+struct etnaviv_gpu;
+
+struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu);
+void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
+       struct iommu_domain *domain);
+struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu);
+
+#endif /* __ETNAVIV_IOMMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c 
b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
new file mode 100644
index 000000000000..fbb4aed3dc80
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner at gmail.com>
+  *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+
+#include "etnaviv_gpu.h"
+#include "etnaviv_iommu.h"
+#include "state_hi.xml.h"
+
+
+struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu)
+{
+       /* TODO */
+       return NULL;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h 
b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h
new file mode 100644
index 000000000000..603ea41c5389
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner at gmail.com>
+  *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_IOMMU_V2_H__
+#define __ETNAVIV_IOMMU_V2_H__
+
+#include <linux/iommu.h>
+struct etnaviv_gpu;
+
+struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu);
+
+#endif /* __ETNAVIV_IOMMU_V2_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c 
b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
new file mode 100644
index 000000000000..6743bc648dc8
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "etnaviv_drv.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
+
+static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device 
*dev,
+               unsigned long iova, int flags, void *arg)
+{
+       DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
+       return 0;
+}
+
+int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
+               struct sg_table *sgt, unsigned len, int prot)
+{
+       struct iommu_domain *domain = iommu->domain;
+       struct scatterlist *sg;
+       unsigned int da = iova;
+       unsigned int i, j;
+       int ret;
+
+       if (!domain || !sgt)
+               return -EINVAL;
+
+       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+               u32 pa = sg_dma_address(sg) - sg->offset;
+               size_t bytes = sg_dma_len(sg) + sg->offset;
+
+               VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
+
+               ret = iommu_map(domain, da, pa, bytes, prot);
+               if (ret)
+                       goto fail;
+
+               da += bytes;
+       }
+
+       return 0;
+
+fail:
+       da = iova;
+
+       for_each_sg(sgt->sgl, sg, i, j) {
+               size_t bytes = sg_dma_len(sg) + sg->offset;
+
+               iommu_unmap(domain, da, bytes);
+               da += bytes;
+       }
+       return ret;
+}
+
+int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
+               struct sg_table *sgt, unsigned len)
+{
+       struct iommu_domain *domain = iommu->domain;
+       struct scatterlist *sg;
+       unsigned int da = iova;
+       int i;
+
+       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+               size_t bytes = sg_dma_len(sg) + sg->offset;
+               size_t unmapped;
+
+               unmapped = iommu_unmap(domain, da, bytes);
+               if (unmapped < bytes)
+                       return unmapped;
+
+               VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+
+               BUG_ON(!PAGE_ALIGNED(bytes));
+
+               da += bytes;
+       }
+
+       return 0;
+}
+
+static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
+       struct etnaviv_vram_mapping *mapping)
+{
+       struct etnaviv_gem_object *etnaviv_obj = mapping->object;
+
+       etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
+                           etnaviv_obj->sgt, etnaviv_obj->base.size);
+       drm_mm_remove_node(&mapping->vram_node);
+}
+
+int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
+       struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
+       struct etnaviv_vram_mapping *mapping)
+{
+       struct etnaviv_vram_mapping *free = NULL;
+       struct sg_table *sgt = etnaviv_obj->sgt;
+       struct drm_mm_node *node;
+       int ret;
+
+       lockdep_assert_held(&etnaviv_obj->lock);
+
+       mutex_lock(&mmu->lock);
+
+       /* v1 MMU can optimize single entry (contiguous) scatterlists */
+       if (sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
+               u32 iova;
+
+               iova = sg_dma_address(sgt->sgl) - memory_base;
+               if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
+                       mapping->iova = iova;
+                       list_add_tail(&mapping->mmu_node, &mmu->mappings);
+                       mutex_unlock(&mmu->lock);
+                       return 0;
+               }
+       }
+
+       node = &mapping->vram_node;
+       while (1) {
+               struct etnaviv_vram_mapping *m, *n;
+               struct list_head list;
+               bool found;
+
+               ret = drm_mm_insert_node_in_range(&mmu->mm, node,
+                       etnaviv_obj->base.size, 0, mmu->last_iova, ~0UL,
+                       DRM_MM_SEARCH_DEFAULT);
+
+               if (ret != -ENOSPC)
+                       break;
+
+               /*
+                * If we did not search from the start of the MMU region,
+                * try again in case there are free slots.
+                */
+               if (mmu->last_iova) {
+                       mmu->last_iova = 0;
+                       mmu->need_flush = true;
+                       continue;
+               }
+
+               /* Try to retire some entries */
+               drm_mm_init_scan(&mmu->mm, etnaviv_obj->base.size, 0, 0);
+
+               found = 0;
+               INIT_LIST_HEAD(&list);
+               list_for_each_entry(free, &mmu->mappings, mmu_node) {
+                       /* If this vram node has not been used, skip this. */
+                       if (!free->vram_node.mm)
+                               continue;
+
+                       /*
+                        * If the iova is pinned, then it's in-use,
+                        * so we must keep its mapping.
+                        */
+                       if (free->use)
+                               continue;
+
+                       list_add(&free->scan_node, &list);
+                       if (drm_mm_scan_add_block(&free->vram_node)) {
+                               found = true;
+                               break;
+                       }
+               }
+
+               if (!found) {
+                       /* Nothing found, clean up and fail */
+                       list_for_each_entry_safe(m, n, &list, scan_node)
+                               BUG_ON(drm_mm_scan_remove_block(&m->vram_node));
+                       break;
+               }
+
+               /*
+                * drm_mm does not allow any other operations while
+                * scanning, so we have to remove all blocks first.
+                * If drm_mm_scan_remove_block() returns false, we
+                * can leave the block pinned.
+                */
+               list_for_each_entry_safe(m, n, &list, scan_node)
+                       if (!drm_mm_scan_remove_block(&m->vram_node))
+                               list_del_init(&m->scan_node);
+
+               /*
+                * Unmap the blocks which need to be reaped from the MMU.
+                * Clear the mmu pointer to prevent the get_iova finding
+                * this mapping.
+                */
+               list_for_each_entry_safe(m, n, &list, scan_node) {
+                       etnaviv_iommu_remove_mapping(mmu, m);
+                       m->mmu = NULL;
+                       list_del_init(&m->mmu_node);
+                       list_del_init(&m->scan_node);
+               }
+
+               /*
+                * We removed enough mappings so that the new allocation will
+                * succeed.  Ensure that the MMU will be flushed before the
+                * associated commit requesting this mapping, and retry the
+                * allocation one more time.
+                */
+               mmu->need_flush = true;
+       }
+
+       if (ret < 0) {
+               mutex_unlock(&mmu->lock);
+               return ret;
+       }
+
+       mmu->last_iova = node->start + etnaviv_obj->base.size;
+       mapping->iova = node->start;
+       ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
+                               IOMMU_READ | IOMMU_WRITE);
+
+       if (ret < 0) {
+               drm_mm_remove_node(node);
+               mutex_unlock(&mmu->lock);
+               return ret;
+       }
+
+       list_add_tail(&mapping->mmu_node, &mmu->mappings);
+       mutex_unlock(&mmu->lock);
+
+       return ret;
+}
+
+void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
+       struct etnaviv_vram_mapping *mapping)
+{
+       WARN_ON(mapping->use);
+
+       mutex_lock(&mmu->lock);
+
+       /* If the vram node is on the mm, unmap and remove the node */
+       if (mapping->vram_node.mm == &mmu->mm)
+               etnaviv_iommu_remove_mapping(mmu, mapping);
+
+       list_del(&mapping->mmu_node);
+       mutex_unlock(&mmu->lock);
+}
+
+void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
+{
+       drm_mm_takedown(&mmu->mm);
+       iommu_domain_free(mmu->domain);
+       kfree(mmu);
+}
+
+struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu,
+       struct iommu_domain *domain, enum etnaviv_iommu_version version)
+{
+       struct etnaviv_iommu *mmu;
+
+       mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
+       if (!mmu)
+               return ERR_PTR(-ENOMEM);
+
+       mmu->domain = domain;
+       mmu->gpu = gpu;
+       mmu->version = version;
+       mutex_init(&mmu->lock);
+       INIT_LIST_HEAD(&mmu->mappings);
+
+       drm_mm_init(&mmu->mm, domain->geometry.aperture_start,
+                   domain->geometry.aperture_end -
+                     domain->geometry.aperture_start + 1);
+
+       iommu_set_fault_handler(domain, etnaviv_fault_handler, gpu->dev);
+
+       return mmu;
+}
+
+size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
+{
+       struct etnaviv_iommu_ops *ops;
+
+       ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
+
+       return ops->dump_size(iommu->domain);
+}
+
+void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
+{
+       struct etnaviv_iommu_ops *ops;
+
+       ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
+
+       ops->dump(iommu->domain, buf);
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h 
b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
new file mode 100644
index 000000000000..fff215a47630
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_MMU_H__
+#define __ETNAVIV_MMU_H__
+
+#include <linux/iommu.h>
+
+enum etnaviv_iommu_version {
+       ETNAVIV_IOMMU_V1 = 0,
+       ETNAVIV_IOMMU_V2,
+};
+
+struct etnaviv_gpu;
+struct etnaviv_vram_mapping;
+
+struct etnaviv_iommu_ops {
+       struct iommu_ops ops;
+       size_t (*dump_size)(struct iommu_domain *);
+       void (*dump)(struct iommu_domain *, void *);
+};
+
+struct etnaviv_iommu {
+       struct etnaviv_gpu *gpu;
+       struct iommu_domain *domain;
+
+       enum etnaviv_iommu_version version;
+
+       /* memory manager for GPU address area */
+       struct mutex lock;
+       struct list_head mappings;
+       struct drm_mm mm;
+       u32 last_iova;
+       bool need_flush;
+};
+
+struct etnaviv_gem_object;
+
+int etnaviv_iommu_attach(struct etnaviv_iommu *iommu, const char **names,
+       int cnt);
+int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
+       struct sg_table *sgt, unsigned len, int prot);
+int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
+       struct sg_table *sgt, unsigned len);
+int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
+       struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
+       struct etnaviv_vram_mapping *mapping);
+void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
+       struct etnaviv_vram_mapping *mapping);
+void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
+
+size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
+void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
+
+struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu,
+       struct iommu_domain *domain, enum etnaviv_iommu_version version);
+
+#endif /* __ETNAVIV_MMU_H__ */
-- 
2.6.2

Reply via email to