From: Rob Clark <robdcl...@chromium.org>

Core drm already provides a helper to dump vm state.  We just need to
wire up tracking of VMs and giving userspace VMs a suitable name.

Signed-off-by: Rob Clark <robdcl...@chromium.org>
---
 drivers/gpu/drm/msm/adreno/a6xx_gpu.c |  2 +-
 drivers/gpu/drm/msm/msm_debugfs.c     | 20 ++++++++++++++++++++
 drivers/gpu/drm/msm/msm_drv.c         |  3 +++
 drivers/gpu/drm/msm/msm_drv.h         |  4 ++++
 drivers/gpu/drm/msm/msm_gem.h         |  8 ++++++++
 drivers/gpu/drm/msm/msm_gem_vma.c     | 23 +++++++++++++++++++++++
 6 files changed, 59 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c 
b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 9f66ad5bf0dc..3189a6f75d74 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -2272,7 +2272,7 @@ a6xx_create_private_vm(struct msm_gpu *gpu, bool 
kernel_managed)
        if (IS_ERR(mmu))
                return ERR_CAST(mmu);
 
-       return msm_gem_vm_create(gpu->dev, mmu, "gpu", 0x100000000ULL,
+       return msm_gem_vm_create(gpu->dev, mmu, NULL, 0x100000000ULL,
                                 adreno_private_vm_size(gpu), kernel_managed);
 }
 
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c 
b/drivers/gpu/drm/msm/msm_debugfs.c
index 7ab607252d18..bde25981254f 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -10,6 +10,7 @@
 #include <linux/fault-inject.h>
 
 #include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_file.h>
 #include <drm/drm_framebuffer.h>
@@ -238,6 +239,24 @@ static int msm_mm_show(struct seq_file *m, void *arg)
        return 0;
 }
 
+static int msm_gpuvas_show(struct seq_file *m, void *arg)
+{
+       struct drm_info_node *node = m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct msm_drm_private *priv = dev->dev_private;
+       struct msm_gem_vm *vm;
+
+       mutex_lock(&priv->vm_lock);
+       list_for_each_entry(vm, &priv->vms, node) {
+               mutex_lock(&vm->op_lock);
+               drm_debugfs_gpuva_info(m, &vm->base);
+               mutex_unlock(&vm->op_lock);
+       }
+       mutex_unlock(&priv->vm_lock);
+
+       return 0;
+}
+
 static int msm_fb_show(struct seq_file *m, void *arg)
 {
        struct drm_info_node *node = m->private;
@@ -266,6 +285,7 @@ static int msm_fb_show(struct seq_file *m, void *arg)
 static struct drm_info_list msm_debugfs_list[] = {
                {"gem", msm_gem_show},
                { "mm", msm_mm_show },
+               DRM_DEBUGFS_GPUVA_INFO(msm_gpuvas_show, NULL),
 };
 
 static struct drm_info_list msm_kms_debugfs_list[] = {
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 5b5a64c8dddb..70c3a3712a3e 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -124,6 +124,9 @@ static int msm_drm_init(struct device *dev, const struct 
drm_driver *drv)
                goto err_put_dev;
        }
 
+       INIT_LIST_HEAD(&priv->vms);
+       mutex_init(&priv->vm_lock);
+
        INIT_LIST_HEAD(&priv->objects);
        mutex_init(&priv->obj_lock);
 
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index b0add236cbb3..83d2a480cfcf 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -112,6 +112,10 @@ struct msm_drm_private {
         */
        atomic64_t total_mem;
 
+       /** @vms: List of all VMs, protected by @vm_lock */
+       struct list_head vms;
+       struct mutex vm_lock;
+
        /**
         * List of all GEM objects (mainly for debugfs, protected by obj_lock
         * (acquire before per GEM object lock)
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 7f6315a66751..0409d35ebb32 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -54,6 +54,9 @@ struct msm_gem_vm {
        /** @base: Inherit from drm_gpuvm. */
        struct drm_gpuvm base;
 
+       /** @name: Storage for dynamically generated VM name for user VMs */
+       char name[32];
+
        /**
         * @sched: Scheduler used for asynchronous VM_BIND request.
         *
@@ -95,6 +98,11 @@ struct msm_gem_vm {
         */
        struct pid *pid;
 
+       /**
+        * @node: List node in msm_drm_private.vms list
+        */
+       struct list_head node;
+
        /** @faults: the number of GPU hangs associated with this address space 
*/
        int faults;
 
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c 
b/drivers/gpu/drm/msm/msm_gem_vma.c
index 09d4746248c2..8d0c4d3afa13 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -14,6 +14,11 @@ static void
 msm_gem_vm_free(struct drm_gpuvm *gpuvm)
 {
        struct msm_gem_vm *vm = container_of(gpuvm, struct msm_gem_vm, base);
+       struct msm_drm_private *priv = gpuvm->drm->dev_private;
+
+       mutex_lock(&priv->vm_lock);
+       list_del(&vm->node);
+       mutex_unlock(&priv->vm_lock);
 
        drm_mm_takedown(&vm->mm);
        if (vm->mmu)
@@ -640,6 +645,7 @@ struct drm_gpuvm *
 msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char 
*name,
                  u64 va_start, u64 va_size, bool managed)
 {
+       struct msm_drm_private *priv = drm->dev_private;
        enum drm_gpuvm_flags flags = managed ? DRM_GPUVM_VA_WEAK_REF : 0;
        struct msm_gem_vm *vm;
        struct drm_gem_object *dummy_gem;
@@ -673,6 +679,19 @@ msm_gem_vm_create(struct drm_device *drm, struct msm_mmu 
*mmu, const char *name,
                        goto err_free_dummy;
        }
 
+       /* For userspace pgtables, generate a VM name based on comm and PID nr: 
*/
+       if (!name) {
+               char tmpname[TASK_COMM_LEN];
+               struct pid *pid = get_pid(task_tgid(current));
+
+               get_task_comm(tmpname, current);
+               rcu_read_lock();
+               snprintf(vm->name, sizeof(name), "%s[%d]", tmpname, 
pid_nr(pid));
+               rcu_read_unlock();
+
+               name = vm->name;
+       }
+
        drm_gpuvm_init(&vm->base, name, flags, drm, dummy_gem,
                       va_start, va_size, 0, 0, &msm_gpuvm_ops);
        drm_gem_object_put(dummy_gem);
@@ -686,6 +705,10 @@ msm_gem_vm_create(struct drm_device *drm, struct msm_mmu 
*mmu, const char *name,
 
        drm_mm_init(&vm->mm, va_start, va_size);
 
+       mutex_lock(&priv->vm_lock);
+       list_add_tail(&vm->node, &priv->vms);
+       mutex_unlock(&priv->vm_lock);
+
        return &vm->base;
 
 err_free_dummy:
-- 
2.48.1

Reply via email to