If the start or end of input address range lies within system allocator
vma split the vma to create new vma's as per input range.

v2 (Matthew Brost)
- Add lockdep_assert_write for vm->lock
- Remove unnecessary page aligned checks
- Add kerrnel-doc and comments
- Remove unnecessary unwind_ops and return

v3
- Fix copying of attributes

v4
- Nit fixes

v5
- Squash identifier for madvise in xe_vma_ops to this patch

v6/v7/v8
- Rebase on drm_gpuvm changes

Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimi...@intel.com>
Reviewed-by: Matthew Brost <matthew.br...@intel.com>
---
 drivers/gpu/drm/xe/xe_vm.c       | 108 +++++++++++++++++++++++++++++++
 drivers/gpu/drm/xe/xe_vm.h       |   2 +
 drivers/gpu/drm/xe/xe_vm_types.h |   1 +
 3 files changed, 111 insertions(+)

diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 4d98a0ae510d..831e9e574e58 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -4203,3 +4203,111 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
        }
        kvfree(snap);
 }
+
+/**
+ * xe_vm_alloc_madvise_vma - Allocate VMA's with madvise ops
+ * @vm: Pointer to the xe_vm structure
+ * @start: Starting input address
+ * @range: Size of the input range
+ *
+ * This function splits existing vma to create new vma for user provided input 
range
+ *
+ *  Return: 0 if success
+ */
+int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t start, uint64_t range)
+{
+       struct drm_gpuvm_map_req map_req = {
+               .map.va.addr = start,
+               .map.va.range = range,
+       };
+
+       struct xe_vma_ops vops;
+       struct drm_gpuva_ops *ops = NULL;
+       struct drm_gpuva_op *__op;
+       bool is_cpu_addr_mirror = false;
+       bool remap_op = false;
+       struct xe_vma_mem_attr tmp_attr;
+       int err;
+
+       lockdep_assert_held_write(&vm->lock);
+
+       vm_dbg(&vm->xe->drm, "MADVISE_OPS_CREATE: addr=0x%016llx, 
size=0x%016llx", start, range);
+       ops = drm_gpuvm_madvise_ops_create(&vm->gpuvm, &map_req);
+       if (IS_ERR(ops))
+               return PTR_ERR(ops);
+
+       if (list_empty(&ops->list)) {
+               err = 0;
+               goto free_ops;
+       }
+
+       drm_gpuva_for_each_op(__op, ops) {
+               struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+
+               if (__op->op == DRM_GPUVA_OP_REMAP) {
+                       xe_assert(vm->xe, !remap_op);
+                       remap_op = true;
+
+                       if 
(xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.remap.unmap->va)))
+                               is_cpu_addr_mirror = true;
+                       else
+                               is_cpu_addr_mirror = false;
+               }
+
+               if (__op->op == DRM_GPUVA_OP_MAP) {
+                       xe_assert(vm->xe, remap_op);
+                       remap_op = false;
+
+                       /* In case of madvise ops DRM_GPUVA_OP_MAP is always 
after
+                        * DRM_GPUVA_OP_REMAP, so ensure we assign 
op->map.is_cpu_addr_mirror true
+                        * if REMAP is for xe_vma_is_cpu_addr_mirror vma
+                        */
+                       op->map.is_cpu_addr_mirror = is_cpu_addr_mirror;
+               }
+
+               print_op(vm->xe, __op);
+       }
+
+       xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+       vops.flags |= XE_VMA_OPS_FLAG_MADVISE;
+       err = vm_bind_ioctl_ops_parse(vm, ops, &vops);
+       if (err)
+               goto unwind_ops;
+
+       xe_vm_lock(vm, false);
+
+       drm_gpuva_for_each_op(__op, ops) {
+               struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+               struct xe_vma *vma;
+
+               if (__op->op == DRM_GPUVA_OP_UNMAP) {
+                       /* There should be no unmap */
+                       XE_WARN_ON("UNEXPECTED UNMAP");
+                       xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), NULL);
+               } else if (__op->op == DRM_GPUVA_OP_REMAP) {
+                       vma = gpuva_to_vma(op->base.remap.unmap->va);
+                       /* Store attributes for REMAP UNMAPPED VMA, so they can 
be assigned
+                        * to newly MAP created vma.
+                        */
+                       tmp_attr = vma->attr;
+                       xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va), 
NULL);
+               } else if (__op->op == DRM_GPUVA_OP_MAP) {
+                       vma = op->map.vma;
+                       /* In case of madvise call, MAP will always be follwed 
by REMAP.
+                        * Therefore temp_attr will always have sane values, 
making it safe to
+                        * copy them to new vma.
+                        */
+                       vma->attr = tmp_attr;
+               }
+       }
+
+       xe_vm_unlock(vm);
+       drm_gpuva_ops_free(&vm->gpuvm, ops);
+       return 0;
+
+unwind_ops:
+       vm_bind_ioctl_ops_unwind(vm, &ops, 1);
+free_ops:
+       drm_gpuva_ops_free(&vm->gpuvm, ops);
+       return err;
+}
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 2f213737c7e5..97073726dcdb 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -171,6 +171,8 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
 
 struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
 
+int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
+
 /**
  * to_userptr_vma() - Return a pointer to an embedding userptr vma
  * @vma: Pointer to the embedded struct xe_vma
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index c7b2bfa0a0d1..dde7218ceba6 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -495,6 +495,7 @@ struct xe_vma_ops {
        struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE];
        /** @flag: signify the properties within xe_vma_ops*/
 #define XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH BIT(0)
+#define XE_VMA_OPS_FLAG_MADVISE          BIT(1)
        u32 flags;
 #ifdef TEST_VM_OPS_ERROR
        /** @inject_error: inject error to test error handling */
-- 
2.34.1

Reply via email to