From: Rob Clark <robdcl...@chromium.org>

Introduce a mechanism to count the worst case # of pages required in a
VM_BIND op.

Note that previously we would have had to somehow account for
allocations in unmap, when splitting a block.  This behavior was removed
in commit 33729a5fc0ca ("iommu/io-pgtable-arm: Remove split on unmap
behavior)"

Signed-off-by: Rob Clark <robdcl...@chromium.org>
---
 drivers/gpu/drm/msm/adreno/a6xx_gpu.c |   2 +-
 drivers/gpu/drm/msm/msm_gem.h         |   7 +-
 drivers/gpu/drm/msm/msm_gem_submit.c  |   5 +-
 drivers/gpu/drm/msm/msm_gem_vma.c     |  18 ++-
 drivers/gpu/drm/msm/msm_iommu.c       | 201 +++++++++++++++++++++++++-
 drivers/gpu/drm/msm/msm_mmu.h         |  36 ++++-
 6 files changed, 260 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c 
b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index ca3247f845b5..9f66ad5bf0dc 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -2267,7 +2267,7 @@ a6xx_create_private_vm(struct msm_gpu *gpu, bool 
kernel_managed)
 {
        struct msm_mmu *mmu;
 
-       mmu = msm_iommu_pagetable_create(to_msm_vm(gpu->vm)->mmu);
+       mmu = msm_iommu_pagetable_create(to_msm_vm(gpu->vm)->mmu, 
kernel_managed);
 
        if (IS_ERR(mmu))
                return ERR_CAST(mmu);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index d2ffaa11ec1a..117f0e35e628 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -7,6 +7,7 @@
 #ifndef __MSM_GEM_H__
 #define __MSM_GEM_H__
 
+#include "msm_mmu.h"
 #include <linux/kref.h>
 #include <linux/dma-resv.h>
 #include "drm/drm_exec.h"
@@ -115,7 +116,7 @@ msm_gem_vm_create(struct drm_device *drm, struct msm_mmu 
*mmu, const char *name,
 
 void msm_gem_vm_close(struct drm_gpuvm *gpuvm);
 
-void msm_vma_job_prepare(struct msm_gem_submit *submit);
+int msm_vma_job_prepare(struct msm_gem_submit *submit);
 void msm_vma_job_cleanup(struct msm_gem_submit *submit);
 
 struct msm_fence_context;
@@ -348,6 +349,10 @@ struct msm_gem_submit {
         */
        struct list_head preallocated_vmas;
 
+       /* Tracking for pre-allocated pgtable pages.
+        */
+       struct msm_mmu_prealloc prealloc;
+
        struct pid *pid;    /* submitting process */
        bool bos_pinned : 1;
        bool fault_dumped:1;/* Limit devcoredump dumping to one per submit */
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c 
b/drivers/gpu/drm/msm/msm_gem_submit.c
index a9b3e6692db3..ed0265ac4e1d 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -916,8 +916,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        }
 
        if (submit_is_vmbind(submit)) {
-               msm_vma_job_prepare(submit);
-               ret = submit_get_pages(submit);
+               ret = msm_vma_job_prepare(submit);
+               if (!ret)
+                       ret = submit_get_pages(submit);
        } else {
                ret = submit_pin_vmas(submit);
        }
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c 
b/drivers/gpu/drm/msm/msm_gem_vma.c
index b1808d95002f..554ec93456a0 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -270,9 +270,10 @@ static const struct drm_sched_backend_ops msm_vm_bind_ops 
= {
  *
  * Called after BOs are locked.
  */
-void
+int
 msm_vma_job_prepare(struct msm_gem_submit *submit)
 {
+       struct msm_mmu *mmu = to_msm_vm(submit->vm)->mmu;
        unsigned num_prealloc_vmas = 0;
 
        for (int i = 0; i < submit->nr_bos; i++) {
@@ -299,13 +300,23 @@ msm_vma_job_prepare(struct msm_gem_submit *submit)
                 * OP_UNMAP could trigger a remap with either a prev or
                 * next VMA, but not both.
                 */
-               num_prealloc_vmas += (op == MSM_SUBMIT_BO_OP_UNMAP) ? 1 : 3;
+               if (op != MSM_SUBMIT_BO_OP_UNMAP) {
+                       num_prealloc_vmas += 3;
+
+                       mmu->funcs->prealloc_count(mmu, &submit->prealloc,
+                               submit->bos[i].iova,
+                               submit->bos[i].range);
+               } else {
+                       num_prealloc_vmas += 1;
+               }
        }
 
        while (num_prealloc_vmas-- > 0) {
                struct msm_gem_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
                list_add_tail(&vma->base.rb.entry, &submit->preallocated_vmas);
        }
+
+       return mmu->funcs->prealloc_allocate(mmu, &submit->prealloc);
 }
 
 /**
@@ -317,6 +328,7 @@ msm_vma_job_prepare(struct msm_gem_submit *submit)
 void
 msm_vma_job_cleanup(struct msm_gem_submit *submit)
 {
+       struct msm_mmu *mmu = to_msm_vm(submit->vm)->mmu;
        struct drm_gpuva *vma;
 
        for (int i = 0; i < submit->nr_bos; i++) {
@@ -331,6 +343,8 @@ msm_vma_job_cleanup(struct msm_gem_submit *submit)
                list_del(&vma->rb.entry);
                kfree(to_msm_vma(vma));
        }
+
+       mmu->funcs->prealloc_cleanup(mmu, &submit->prealloc);
 }
 
 /**
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 756bd55ee94f..ff04f2451d1d 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -6,6 +6,7 @@
 
 #include <linux/adreno-smmu-priv.h>
 #include <linux/io-pgtable.h>
+#include <linux/kmemleak.h>
 #include "msm_drv.h"
 #include "msm_mmu.h"
 
@@ -14,6 +15,8 @@ struct msm_iommu {
        struct iommu_domain *domain;
        atomic_t pagetables;
        struct page *prr_page;
+
+       struct kmem_cache *pt_cache;
 };
 
 #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
@@ -27,6 +30,12 @@ struct msm_iommu_pagetable {
        unsigned long pgsize_bitmap;    /* Bitmap of page sizes in use */
        phys_addr_t ttbr;
        u32 asid;
+
+       /** @root_page_table: Stores the root page table pointer. */
+       void *root_page_table;
+
+       /** @tblsz: pt table entry size */
+       size_t tblsz;
 };
 static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
 {
@@ -273,7 +282,145 @@ msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned 
long iova, uint64_t ptes[
        return 0;
 }
 
+static void
+msm_iommu_pagetable_prealloc_count(struct msm_mmu *mmu, struct 
msm_mmu_prealloc *p,
+                                  uint64_t iova, size_t len)
+{
+       u64 pt_count;
+
+       /*
+        * L1, L2 and L3 page tables.
+        *
+        * We could optimize L3 allocation by iterating over the sgt and merging
+        * 2M contiguous blocks, but it's simpler to over-provision and return
+        * the pages if they're not used.
+        *
+        * The first level descriptor (v8 / v7-lpae page table format) encodes
+        * 30 bits of address.  The second level encodes 29.  For the 3rd it is
+        * 39.
+        *
+        * 
https://developer.arm.com/documentation/ddi0406/c/System-Level-Architecture/Virtual-Memory-System-Architecture--VMSA-/Long-descriptor-translation-table-format/Long-descriptor-translation-table-format-descriptors?lang=en#BEIHEFFB
+        */
+       pt_count = ((ALIGN(iova + len, 1ull << 39) - ALIGN_DOWN(iova, 1ull << 
39)) >> 39) +
+                  ((ALIGN(iova + len, 1ull << 30) - ALIGN_DOWN(iova, 1ull << 
30)) >> 30) +
+                  ((ALIGN(iova + len, 1ull << 21) - ALIGN_DOWN(iova, 1ull << 
21)) >> 21);
+
+       p->count += pt_count;
+}
+
+static struct kmem_cache *
+get_pt_cache(struct msm_mmu *mmu)
+{
+       struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+       return to_msm_iommu(pagetable->parent)->pt_cache;
+}
+
+static int
+msm_iommu_pagetable_prealloc_allocate(struct msm_mmu *mmu, struct 
msm_mmu_prealloc *p)
+{
+       struct kmem_cache *pt_cache = get_pt_cache(mmu);
+       int ret;
+
+       p->pages = kcalloc(p->count, sizeof(p->pages), GFP_KERNEL);
+       if (!p->pages)
+               return -ENOMEM;
+
+       ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages);
+       if (ret != p->count) {
+               p->count = ret;
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void
+msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct 
msm_mmu_prealloc *p)
+{
+       struct kmem_cache *pt_cache = get_pt_cache(mmu);
+       uint32_t remaining_pt_count = p->count - p->ptr;
+
+       kmem_cache_free_bulk(pt_cache, remaining_pt_count, &p->pages[p->ptr]);
+       kfree(p->pages);
+}
+
+/**
+ * alloc_pt() - Custom page table allocator
+ * @cookie: Cookie passed at page table allocation time.
+ * @size: Size of the page table. This size should be fixed,
+ * and determined at creation time based on the granule size.
+ * @gfp: GFP flags.
+ *
+ * We want a custom allocator so we can use a cache for page table
+ * allocations and amortize the cost of the over-reservation that's
+ * done to allow asynchronous VM operations.
+ *
+ * Return: non-NULL on success, NULL if the allocation failed for any
+ * reason.
+ */
+static void *
+msm_iommu_pagetable_alloc_pt(void *cookie, size_t size, gfp_t gfp)
+{
+       struct msm_iommu_pagetable *pagetable = cookie;
+       struct msm_mmu_prealloc *p = pagetable->base.prealloc;
+       void *page;
+
+       /* Allocation of the root page table happening during init. */
+       if (unlikely(!pagetable->root_page_table)) {
+               struct page *p;
+
+               p = alloc_pages_node(dev_to_node(pagetable->iommu_dev),
+                                    gfp | __GFP_ZERO, get_order(size));
+               page = p ? page_address(p) : NULL;
+               pagetable->root_page_table = page;
+               return page;
+       }
+
+       if (WARN_ON(!p) || WARN_ON(p->ptr >= p->count))
+               return NULL;
+
+       page = p->pages[p->ptr++];
+       memset(page, 0, size);
+
+       /*
+        * Page table entries don't use virtual addresses, which trips out
+        * kmemleak. kmemleak_alloc_phys() might work, but physical addresses
+        * are mixed with other fields, and I fear kmemleak won't detect that
+        * either.
+        *
+        * Let's just ignore memory passed to the page-table driver for now.
+        */
+       kmemleak_ignore(page);
+
+       return page;
+}
+
+
+/**
+ * free_pt() - Custom page table free function
+ * @cookie: Cookie passed at page table allocation time.
+ * @data: Page table to free.
+ * @size: Size of the page table. This size should be fixed,
+ * and determined at creation time based on the granule size.
+ */
+static void
+msm_iommu_pagetable_free_pt(void *cookie, void *data, size_t size)
+{
+       struct msm_iommu_pagetable *pagetable = cookie;
+
+       if (unlikely(pagetable->root_page_table == data)) {
+               free_pages((unsigned long)data, get_order(size));
+               pagetable->root_page_table = NULL;
+               return;
+       }
+
+       kmem_cache_free(get_pt_cache(&pagetable->base), data);
+}
+
 static const struct msm_mmu_funcs pagetable_funcs = {
+               .prealloc_count = msm_iommu_pagetable_prealloc_count,
+               .prealloc_allocate = msm_iommu_pagetable_prealloc_allocate,
+               .prealloc_cleanup = msm_iommu_pagetable_prealloc_cleanup,
                .map = msm_iommu_pagetable_map,
                .unmap = msm_iommu_pagetable_unmap,
                .destroy = msm_iommu_pagetable_destroy,
@@ -324,7 +471,19 @@ static const struct iommu_flush_ops tlb_ops = {
 static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device 
*dev,
                unsigned long iova, int flags, void *arg);
 
-struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
+
+static size_t get_tblsz(const struct io_pgtable_cfg *cfg)
+{
+       int pg_shift, bits_per_level;
+
+       pg_shift = __ffs(cfg->pgsize_bitmap);
+       /* arm_lpae_iopte is u64: */
+       bits_per_level = pg_shift - ilog2(sizeof(u64));
+
+       return sizeof(u64) << bits_per_level;
+}
+
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool 
kernel_managed)
 {
        struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
        struct msm_iommu *iommu = to_msm_iommu(parent);
@@ -358,6 +517,36 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu 
*parent)
        ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
        ttbr0_cfg.tlb = &tlb_ops;
 
+       if (!kernel_managed) {
+               /*
+                * With userspace managed VM (aka VM_BIND), we need to pre-
+                * allocate pages ahead of time for map/unmap operations,
+                * handing them to io-pgtable via custom alloc/free ops as
+                * needed:
+                */
+               ttbr0_cfg.alloc = msm_iommu_pagetable_alloc_pt;
+               ttbr0_cfg.free  = msm_iommu_pagetable_free_pt;
+
+               pagetable->tblsz = get_tblsz(&ttbr0_cfg);
+
+               /*
+                * Restrict to single page granules.  Otherwise we may run
+                * into a situation where userspace wants to unmap/remap
+                * only a part of a larger block mapping, which is not
+                * possible without unmapping the entire block.  Which in
+                * turn could cause faults if the GPU is accessing other
+                * parts of the block mapping.
+                *
+                * Note that prior to commit 33729a5fc0ca 
("iommu/io-pgtable-arm:
+                * Remove split on unmap behavior)" this was handled in
+                * io-pgtable-arm.  But this apparently does not work
+                * correctly on SMMUv3.
+                */
+               WARN_ON(!(ttbr0_cfg.pgsize_bitmap & PAGE_SIZE));
+               ttbr0_cfg.pgsize_bitmap = PAGE_SIZE;
+       }
+
+       pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
        pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
                &ttbr0_cfg, pagetable);
 
@@ -401,7 +590,6 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu 
*parent)
        /* Needed later for TLB flush */
        pagetable->parent = parent;
        pagetable->tlb = ttbr1_cfg->tlb;
-       pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
        pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
        pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
 
@@ -509,6 +697,7 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
 {
        struct msm_iommu *iommu = to_msm_iommu(mmu);
        iommu_domain_free(iommu->domain);
+       kmem_cache_destroy(iommu->pt_cache);
        kfree(iommu);
 }
 
@@ -583,6 +772,14 @@ struct msm_mmu *msm_iommu_gpu_new(struct device *dev, 
struct msm_gpu *gpu, unsig
                return mmu;
 
        iommu = to_msm_iommu(mmu);
+       if (adreno_smmu && adreno_smmu->cookie) {
+               const struct io_pgtable_cfg *cfg =
+                       adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
+               size_t tblsz = get_tblsz(cfg);
+
+               iommu->pt_cache =
+                       kmem_cache_create("msm-mmu-pt", tblsz, tblsz, 0, NULL);
+       }
        iommu_set_fault_handler(iommu->domain, msm_gpu_fault_handler, iommu);
 
        /* Enable stall on iommu fault: */
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index c874852b7331..24ef04d267a6 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -9,8 +9,16 @@
 
 #include <linux/iommu.h>
 
+struct msm_mmu_prealloc;
+struct msm_mmu;
+struct msm_gpu;
+
 struct msm_mmu_funcs {
        void (*detach)(struct msm_mmu *mmu);
+       void (*prealloc_count)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p,
+                              uint64_t iova, size_t len);
+       int (*prealloc_allocate)(struct msm_mmu *mmu, struct msm_mmu_prealloc 
*p);
+       void (*prealloc_cleanup)(struct msm_mmu *mmu, struct msm_mmu_prealloc 
*p);
        int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
                        size_t off, size_t len, int prot);
        int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
@@ -25,12 +33,38 @@ enum msm_mmu_type {
        MSM_MMU_IOMMU_PAGETABLE,
 };
 
+/**
+ * struct msm_mmu_prealloc - Tracking for pre-allocated pages for MMU updates.
+ */
+struct msm_mmu_prealloc {
+       /** @count: Number of pages reserved. */
+       uint32_t count;
+       /** @ptr: Index of first unused page in @pages */
+       uint32_t ptr;
+       /**
+        * @pages: Array of pages preallocated for MMU table updates.
+        *
+        * After a VM operation, there might be free pages remaining in this
+        * array (since the amount allocated is a worst-case).  These are
+        * returned to the pt_cache at mmu->prealloc_cleanup().
+        */
+       void **pages;
+};
+
 struct msm_mmu {
        const struct msm_mmu_funcs *funcs;
        struct device *dev;
        int (*handler)(void *arg, unsigned long iova, int flags, void *data);
        void *arg;
        enum msm_mmu_type type;
+
+       /**
+        * @prealloc: pre-allocated pages for pgtable
+        *
+        * Set while a VM_BIND job is running, serialized under
+        * msm_gem_vm::op_lock.
+        */
+       struct msm_mmu_prealloc *prealloc;
 };
 
 static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
@@ -52,7 +86,7 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu 
*mmu, void *arg,
        mmu->handler = handler;
 }
 
-struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent);
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool 
kernel_managed);
 
 int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
                               int *asid);
-- 
2.48.1

Reply via email to