The PAT index determines how PTEs are encoded and can be modified by
madvise. Therefore, it is now part of the vma attributes.

Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimi...@intel.com>
Reviewed-by: Matthew Brost <matthew.br...@intel.com>
---
 drivers/gpu/drm/xe/xe_pt.c       |  2 +-
 drivers/gpu/drm/xe/xe_vm.c       |  6 +++---
 drivers/gpu/drm/xe/xe_vm_types.h | 10 +++++-----
 3 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index f3a39e734a90..ba7a50bf3a2d 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -518,7 +518,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t 
offset,
 {
        struct xe_pt_stage_bind_walk *xe_walk =
                container_of(walk, typeof(*xe_walk), base);
-       u16 pat_index = xe_walk->vma->pat_index;
+       u16 pat_index = xe_walk->vma->attr.pat_index;
        struct xe_pt *xe_parent = container_of(parent, typeof(*xe_parent), 
base);
        struct xe_vm *vm = xe_walk->vm;
        struct xe_pt *xe_child;
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index f35d69c0b4c6..d7f829fba49c 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1223,7 +1223,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
        if (vm->xe->info.has_atomic_enable_pte_bit)
                vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
 
-       vma->pat_index = pat_index;
+       vma->attr.pat_index = pat_index;
 
        if (bo) {
                struct drm_gpuvm_bo *vm_bo;
@@ -2700,7 +2700,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, 
struct drm_gpuva_ops *ops,
 
                        if (op->base.remap.prev) {
                                vma = new_vma(vm, op->base.remap.prev,
-                                             old->pat_index, flags);
+                                             old->attr.pat_index, flags);
                                if (IS_ERR(vma))
                                        return PTR_ERR(vma);
 
@@ -2730,7 +2730,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, 
struct drm_gpuva_ops *ops,
 
                        if (op->base.remap.next) {
                                vma = new_vma(vm, op->base.remap.next,
-                                             old->pat_index, flags);
+                                             old->attr.pat_index, flags);
                                if (IS_ERR(vma))
                                        return PTR_ERR(vma);
 
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 8e8138c2b80a..c7b2bfa0a0d1 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -102,6 +102,11 @@ struct xe_vma_mem_attr {
         * values. These are defined in uapi/drm/xe_drm.h.
         */
        u32 atomic_access;
+
+       /**
+        * @pat_index: The pat index to use when encoding the PTEs for this vma.
+        */
+       u16 pat_index;
 };
 
 struct xe_vma {
@@ -152,11 +157,6 @@ struct xe_vma {
        /** @tile_staged: bind is staged for this VMA */
        u8 tile_staged;
 
-       /**
-        * @pat_index: The pat index to use when encoding the PTEs for this vma.
-        */
-       u16 pat_index;
-
        /**
         * @ufence: The user fence that was provided with MAP.
         * Needs to be signalled before UNMAP can be processed.
-- 
2.34.1

Reply via email to