Add support for userspace to request a list of observed faults
from a specified VM.

v2:
- Only allow querying of failed pagefaults (Matt Brost)

v3:
- Remove unnecessary size parameter from helper function, as it
  is a property of the arguments. (jcavitt)
- Remove unnecessary copy_from_user (Jainxun)
- Set address_precision to 1 (Jainxun)
- Report max size instead of dynamic size for memory allocation
  purposes.  Total memory usage is reported separately.

v4:
- Return int from xe_vm_get_property_size (Shuicheng)
- Fix memory leak (Shuicheng)
- Remove unnecessary size variable (jcavitt)

v5:
- Rename ioctl to xe_vm_get_faults_ioctl (jcavitt)
- Update fill_property_pfs to eliminate need for kzalloc (Jianxun)

v6:
- Repair and move fill_faults break condition (Dan Carpenter)
- Free vm after use (jcavitt)
- Combine assertions (jcavitt)
- Expand size check in xe_vm_get_faults_ioctl (jcavitt)
- Remove return mask from fill_faults, as return is already -EFAULT or 0
  (jcavitt)

v7:
- Revert back to using xe_vm_get_property_ioctl
- Apply better copy_to_user logic (jcavitt)

v8:
- Fix and clean up error value handling in ioctl (jcavitt)
- Reapply return mask for fill_faults (jcavitt)

v9:
- Future-proof size logic for zero-size properties (jcavitt)
- Add access and fault types (Jianxun)
- Remove address type (Jianxun)

v10:
- Remove unnecessary switch case logic (Raag)
- Compress size get, size validation, and property fill functions into a
  single helper function (jcavitt)
- Assert valid size (jcavitt)

v11:
- Remove unnecessary else condition
- Correct backwards helper function size logic (jcavitt)

v12:
- Use size_t instead of int (Raag)

v13:
- Remove engine class and instance (Ivan)

v14:
- Map access type, fault type, and fault level to user macros (Matt
  Brost, Ivan)

v15:
- Remove unnecessary size assertion (jcavitt)

Signed-off-by: Jonathan Cavitt <jonathan.cav...@intel.com>
Suggested-by: Matthew Brost <matthew.br...@intel.com>
Cc: Jainxun Zhang <jianxun.zh...@intel.com>
Cc: Shuicheng Lin <shuicheng....@intel.com>
Cc: Raag Jadav <raag.ja...@intel.com>
Cc: Ivan Briano <ivan.bri...@intel.com>
---
 drivers/gpu/drm/xe/xe_device.c |   3 +
 drivers/gpu/drm/xe/xe_vm.c     | 107 +++++++++++++++++++++++++++++++++
 drivers/gpu/drm/xe/xe_vm.h     |   2 +
 3 files changed, 112 insertions(+)

diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 75e753e0a682..6816dc3a428c 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -196,6 +196,9 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
        DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
                          DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, 
DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(XE_VM_GET_PROPERTY, xe_vm_get_property_ioctl,
+                         DRM_RENDER_ALLOW),
+
 };
 
 static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long 
arg)
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 107e397b4987..c20ac51d8573 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -3600,6 +3600,113 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void 
*data, struct drm_file *file)
        return err;
 }
 
+/*
+ * Map access type, fault type, and fault level from current bspec
+ * specification to user spec abstraction.  The current mapping is
+ * 1-to-1, but if there is ever a hardware change, we will need
+ * this abstraction layer to maintain API stability through the
+ * hardware change.
+ */
+static u8 xe_to_user_access_type(u8 access_type)
+{
+       return access_type;
+}
+
+static u8 xe_to_user_fault_type(u8 fault_type)
+{
+       return fault_type;
+}
+
+static u8 xe_to_user_fault_level(u8 fault_level)
+{
+       return fault_level;
+}
+
+static int fill_faults(struct xe_vm *vm,
+                      struct drm_xe_vm_get_property *args)
+{
+       struct xe_vm_fault __user *usr_ptr = u64_to_user_ptr(args->data);
+       struct xe_vm_fault store = { 0 };
+       struct xe_vm_fault_entry *entry;
+       int ret = 0, i = 0, count, entry_size;
+
+       entry_size = sizeof(struct xe_vm_fault);
+       count = args->size / entry_size;
+
+       spin_lock(&vm->faults.lock);
+       list_for_each_entry(entry, &vm->faults.list, list) {
+               if (i++ == count)
+                       break;
+
+               memset(&store, 0, entry_size);
+
+               store.address = entry->address;
+               store.address_precision = entry->address_precision;
+
+               store.access_type = xe_to_user_access_type(entry->access_type);
+               store.fault_type = xe_to_user_fault_type(entry->fault_type);
+               store.fault_level = xe_to_user_fault_level(entry->fault_level);
+
+               ret = copy_to_user(usr_ptr, &store, entry_size);
+               if (ret)
+                       break;
+
+               usr_ptr++;
+       }
+       spin_unlock(&vm->faults.lock);
+
+       return ret ? -EFAULT : 0;
+}
+
+static int xe_vm_get_property_helper(struct xe_vm *vm,
+                                    struct drm_xe_vm_get_property *args)
+{
+       size_t size;
+
+       switch (args->property) {
+       case DRM_XE_VM_GET_PROPERTY_FAULTS:
+               spin_lock(&vm->faults.lock);
+               size = size_mul(sizeof(struct xe_vm_fault), vm->faults.len);
+               spin_unlock(&vm->faults.lock);
+
+               if (args->size)
+                       /*
+                        * Number of faults may increase between calls to
+                        * xe_vm_get_property_ioctl, so just report the
+                        * number of faults the user requests if it's less
+                        * than or equal to the number of faults in the VM
+                        * fault array.
+                        */
+                       return args->size <= size ? fill_faults(vm, args) : 
-EINVAL;
+
+               args->size = size;
+               return 0;
+       }
+       return -EINVAL;
+}
+
+int xe_vm_get_property_ioctl(struct drm_device *drm, void *data,
+                            struct drm_file *file)
+{
+       struct xe_device *xe = to_xe_device(drm);
+       struct xe_file *xef = to_xe_file(file);
+       struct drm_xe_vm_get_property *args = data;
+       struct xe_vm *vm;
+       int ret = 0;
+
+       if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
+               return -EINVAL;
+
+       vm = xe_vm_lookup(xef, args->vm_id);
+       if (XE_IOCTL_DBG(xe, !vm))
+               return -ENOENT;
+
+       ret = xe_vm_get_property_helper(vm, args);
+
+       xe_vm_put(vm);
+       return ret;
+}
+
 /**
  * xe_vm_bind_kernel_bo - bind a kernel BO to a VM
  * @vm: VM to bind the BO to
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 9bd7e93824da..63ec22458e04 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -196,6 +196,8 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file);
 int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
                     struct drm_file *file);
+int xe_vm_get_property_ioctl(struct drm_device *dev, void *data,
+                            struct drm_file *file);
 
 void xe_vm_close_and_put(struct xe_vm *vm);
 
-- 
2.43.0

Reply via email to