This change simplifies the logic by ensuring that remapped previous or
next VMAs are created with the same memory attributes as the original VMA.
By passing struct xe_vma_mem_attr as a parameter, we maintain consistency
in memory attributes.

-v2
 *dst = *src (Matthew Brost)

-v3 (Matthew Brost)
 Drop unnecessary helper
 pass attr ptr as input to new_vma and vma_create

Cc: Matthew Brost <matthew.br...@intel.com>
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimi...@intel.com>
Reviewed-by: Matthew Brost <matthew.br...@intel.com>
---
 drivers/gpu/drm/xe/xe_vm.c | 24 +++++++++++++++++-------
 1 file changed, 17 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index d7f829fba49c..4d98a0ae510d 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1168,7 +1168,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
                                    struct xe_bo *bo,
                                    u64 bo_offset_or_userptr,
                                    u64 start, u64 end,
-                                   u16 pat_index, unsigned int flags)
+                                   struct xe_vma_mem_attr *attr,
+                                   unsigned int flags)
 {
        struct xe_vma *vma;
        struct xe_tile *tile;
@@ -1223,7 +1224,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
        if (vm->xe->info.has_atomic_enable_pte_bit)
                vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
 
-       vma->attr.pat_index = pat_index;
+       vma->attr = *attr;
 
        if (bo) {
                struct drm_gpuvm_bo *vm_bo;
@@ -2471,7 +2472,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct 
xe_vma_ops *vops,
 ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
 
 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
-                             u16 pat_index, unsigned int flags)
+                             struct xe_vma_mem_attr *attr, unsigned int flags)
 {
        struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
        struct drm_exec exec;
@@ -2500,7 +2501,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct 
drm_gpuva_op_map *op,
        }
        vma = xe_vma_create(vm, bo, op->gem.offset,
                            op->va.addr, op->va.addr +
-                           op->va.range - 1, pat_index, flags);
+                           op->va.range - 1, attr, flags);
        if (IS_ERR(vma))
                goto err_unlock;
 
@@ -2643,6 +2644,15 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, 
struct drm_gpuva_ops *ops,
                switch (op->base.op) {
                case DRM_GPUVA_OP_MAP:
                {
+                       struct xe_vma_mem_attr default_attr = {
+                               .preferred_loc = {
+                                       .devmem_fd = 
DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
+                                       .migration_policy = 
DRM_XE_MIGRATE_ALL_PAGES,
+                               },
+                               .atomic_access = DRM_XE_ATOMIC_UNDEFINED,
+                               .pat_index = op->map.pat_index,
+                       };
+
                        flags |= op->map.read_only ?
                                VMA_CREATE_FLAG_READ_ONLY : 0;
                        flags |= op->map.is_null ?
@@ -2652,7 +2662,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, 
struct drm_gpuva_ops *ops,
                        flags |= op->map.is_cpu_addr_mirror ?
                                VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
 
-                       vma = new_vma(vm, &op->base.map, op->map.pat_index,
+                       vma = new_vma(vm, &op->base.map, &default_attr,
                                      flags);
                        if (IS_ERR(vma))
                                return PTR_ERR(vma);
@@ -2700,7 +2710,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, 
struct drm_gpuva_ops *ops,
 
                        if (op->base.remap.prev) {
                                vma = new_vma(vm, op->base.remap.prev,
-                                             old->attr.pat_index, flags);
+                                             &old->attr, flags);
                                if (IS_ERR(vma))
                                        return PTR_ERR(vma);
 
@@ -2730,7 +2740,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, 
struct drm_gpuva_ops *ops,
 
                        if (op->base.remap.next) {
                                vma = new_vma(vm, op->base.remap.next,
-                                             old->attr.pat_index, flags);
+                                             &old->attr, flags);
                                if (IS_ERR(vma))
                                        return PTR_ERR(vma);
 
-- 
2.34.1

Reply via email to