From: Rob Clark <robdcl...@chromium.org>

Add a SET_PARAM for userspace to request to manage to the VM itself,
instead of getting a kernel managed VM.

In order to transition to a userspace managed VM, this param must be set
before any mappings are created.

Signed-off-by: Rob Clark <robdcl...@chromium.org>
---
 drivers/gpu/drm/msm/adreno/a6xx_gpu.c   |  4 ++--
 drivers/gpu/drm/msm/adreno/adreno_gpu.c | 15 +++++++++++++
 drivers/gpu/drm/msm/msm_drv.c           | 13 +++++++++--
 drivers/gpu/drm/msm/msm_gem.c           |  8 +++++++
 drivers/gpu/drm/msm/msm_gpu.c           |  5 +++--
 drivers/gpu/drm/msm/msm_gpu.h           | 29 +++++++++++++++++++++++--
 include/uapi/drm/msm_drm.h              | 24 ++++++++++++++++++++
 7 files changed, 90 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c 
b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 0b1e2ba3539e..ca3247f845b5 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -2263,7 +2263,7 @@ a6xx_create_vm(struct msm_gpu *gpu, struct 
platform_device *pdev)
 }
 
 static struct drm_gpuvm *
-a6xx_create_private_vm(struct msm_gpu *gpu)
+a6xx_create_private_vm(struct msm_gpu *gpu, bool kernel_managed)
 {
        struct msm_mmu *mmu;
 
@@ -2273,7 +2273,7 @@ a6xx_create_private_vm(struct msm_gpu *gpu)
                return ERR_CAST(mmu);
 
        return msm_gem_vm_create(gpu->dev, mmu, "gpu", 0x100000000ULL,
-                                adreno_private_vm_size(gpu), true);
+                                adreno_private_vm_size(gpu), kernel_managed);
 }
 
 static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c 
b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index e4d895dda051..739161df3e3c 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -483,6 +483,21 @@ int adreno_set_param(struct msm_gpu *gpu, struct 
msm_context *ctx,
                if (!capable(CAP_SYS_ADMIN))
                        return UERR(EPERM, drm, "invalid permissions");
                return msm_context_set_sysprof(ctx, gpu, value);
+       case MSM_PARAM_EN_VM_BIND:
+               /* We can only support VM_BIND with per-process pgtables: */
+               if (ctx->vm == gpu->vm)
+                       return UERR(EINVAL, drm, "requires per-process 
pgtables");
+
+               /*
+                * We can only swtich to VM_BIND mode if the VM has not yet
+                * been created:
+                */
+               if (ctx->vm)
+                       return UERR(EBUSY, drm, "VM already created");
+
+               ctx->userspace_managed_vm = value;
+
+               return 0;
        default:
                return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, 
param);
        }
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 6fd981ee6aee..5b5a64c8dddb 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -229,8 +229,11 @@ static void load_gpu(struct drm_device *dev)
 struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context 
*ctx)
 {
        struct msm_drm_private *priv = dev->dev_private;
-       if (!ctx->vm)
-               ctx->vm = msm_gpu_create_private_vm(priv->gpu, current);
+       if (!ctx->vm) {
+               ctx->vm = msm_gpu_create_private_vm(
+                       priv->gpu, current, !ctx->userspace_managed_vm);
+
+       }
        return ctx->vm;
 }
 
@@ -419,6 +422,9 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
        if (!priv->gpu)
                return -EINVAL;
 
+       if (msm_context_is_vmbind(ctx))
+               return UERR(EINVAL, dev, "VM_BIND is enabled");
+
        if (should_fail(&fail_gem_iova, obj->size))
                return -ENOMEM;
 
@@ -440,6 +446,9 @@ static int msm_ioctl_gem_info_set_iova(struct drm_device 
*dev,
        if (!priv->gpu)
                return -EINVAL;
 
+       if (msm_context_is_vmbind(ctx))
+               return UERR(EINVAL, dev, "VM_BIND is enabled");
+
        /* Only supported if per-process address space is supported: */
        if (priv->gpu->vm == vm)
                return UERR(EOPNOTSUPP, dev, "requires per-process pgtables");
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index a0c15cca9245..5a5220b6f21d 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -63,6 +63,14 @@ static void msm_gem_close(struct drm_gem_object *obj, struct 
drm_file *file)
        if (!ctx->vm)
                return;
 
+       /*
+        * VM_BIND does not depend on implicit teardown of VMAs on handle
+        * close, but instead on implicit teardown of the VM when the device
+        * is closed (see msm_gem_vm_close())
+        */
+       if (msm_context_is_vmbind(ctx))
+               return;
+
        /*
         * TODO we might need to kick this to a queue to avoid blocking
         * in CLOSE ioctl
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 4d24dcf62064..503e4dcc5a6f 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -831,7 +831,8 @@ static int get_clocks(struct platform_device *pdev, struct 
msm_gpu *gpu)
 
 /* Return a new address space for a msm_drm_private instance */
 struct drm_gpuvm *
-msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task)
+msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
+                         bool kernel_managed)
 {
        struct drm_gpuvm *vm = NULL;
 
@@ -843,7 +844,7 @@ msm_gpu_create_private_vm(struct msm_gpu *gpu, struct 
task_struct *task)
         * the global one
         */
        if (gpu->funcs->create_private_vm) {
-               vm = gpu->funcs->create_private_vm(gpu);
+               vm = gpu->funcs->create_private_vm(gpu, kernel_managed);
                if (!IS_ERR(vm))
                        to_msm_vm(vm)->pid = get_pid(task_pid(task));
        }
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index c15aad288552..20f52d9636b0 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -79,7 +79,7 @@ struct msm_gpu_funcs {
        void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp,
                             bool suspended);
        struct drm_gpuvm *(*create_vm)(struct msm_gpu *gpu, struct 
platform_device *pdev);
-       struct drm_gpuvm *(*create_private_vm)(struct msm_gpu *gpu);
+       struct drm_gpuvm *(*create_private_vm)(struct msm_gpu *gpu, bool 
kernel_managed);
        uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
 
        /**
@@ -362,6 +362,14 @@ struct msm_context {
         */
        int queueid;
 
+       /**
+        * @userspace_managed_vm:
+        *
+        * Has userspace opted-in to userspace managed VM (ie. VM_BIND) via
+        * MSM_PARAM_EN_VM_BIND?
+        */
+       bool userspace_managed_vm;
+
        /**
         * @vm:
         *
@@ -454,6 +462,22 @@ struct msm_context {
 
 struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context 
*ctx);
 
+/**
+ * msm_context_is_vm_bind() - has userspace opted in to VM_BIND?
+ *
+ * @ctx: the drm_file context
+ *
+ * See MSM_PARAM_EN_VM_BIND.  If userspace is managing the VM, it can
+ * do sparse binding including having multiple, potentially partial,
+ * mappings in the VM.  Therefore certain legacy uabi (ie. GET_IOVA,
+ * SET_IOVA) are rejected because they don't have a sensible meaning.
+ */
+static inline bool
+msm_context_is_vmbind(struct msm_context *ctx)
+{
+       return ctx->userspace_managed_vm;
+}
+
 /**
  * msm_gpu_convert_priority - Map userspace priority to ring # and sched 
priority
  *
@@ -681,7 +705,8 @@ int msm_gpu_init(struct drm_device *drm, struct 
platform_device *pdev,
                const char *name, struct msm_gpu_config *config);
 
 struct drm_gpuvm *
-msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task);
+msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
+                         bool kernel_managed);
 
 void msm_gpu_cleanup(struct msm_gpu *gpu);
 
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 2342cb90857e..072e82a80607 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -91,6 +91,30 @@ struct drm_msm_timespec {
 #define MSM_PARAM_UBWC_SWIZZLE 0x12 /* RO */
 #define MSM_PARAM_MACROTILE_MODE 0x13 /* RO */
 #define MSM_PARAM_UCHE_TRAP_BASE 0x14 /* RO */
+/* MSM_PARAM_EN_VM_BIND is set to 1 to enable VM_BIND ops.
+ *
+ * With VM_BIND enabled, userspace is required to allocate iova and use the
+ * VM_BIND ops for map/unmap ioctls.  MSM_INFO_SET_IOVA and MSM_INFO_GET_IOVA
+ * will be rejected.  (The latter does not have a sensible meaning when a BO
+ * can have multiple and/or partial mappings.)
+ *
+ * With VM_BIND enabled, userspace does not include a submit_bo table in the
+ * SUBMIT ioctl (this will be rejected), the resident set is determined by
+ * the the VM_BIND ops.
+ *
+ * Enabling VM_BIND will fail on devices which do not have per-process 
pgtables.
+ * And it is not allowed to disable VM_BIND once it has been enabled.
+ *
+ * Enabling VM_BIND should be done (attempted) prior to allocating any BOs or
+ * submitqueues of type MSM_SUBMITQUEUE_VM_BIND.
+ *
+ * Relatedly, when VM_BIND mode is enabled, the kernel will not try to recover
+ * from GPU faults or failed async VM_BIND ops, in particular because it is
+ * difficult to communicate to userspace which op failed so that userspace
+ * could rewind and try again.  When the VM is marked unusable, the SUBMIT
+ * ioctl will throw -EPIPE.
+ */
+#define MSM_PARAM_EN_VM_BIND 0x15  /* WO, once */
 
 /* For backwards compat.  The original support for preemption was based on
  * a single ring per priority level so # of priority levels equals the #
-- 
2.48.1

Reply via email to