Add some useful SVM debug logging fro SVM range which prints the range's
state.

v2:
 - Update logging with latest structure layout
v3:
 - Better commit message (Thomas)
 - New range structure (Thomas)
 - s/COLLECTOT/s/COLLECTOR (Thomas)
v4:
 - Drop partial evict message (Thomas)
 - Use %p for pointers print (Thomas)
v6:
 - Cast dma_addr to u64 (CI)
 - Only compile if CONFIG_DRM_GPUSVM selected (CI, Lucas)

Signed-off-by: Matthew Brost <matthew.br...@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellst...@linux.intel.com>
Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimi...@intel.com>
---
 drivers/gpu/drm/xe/xe_pt.c  |  8 ++++
 drivers/gpu/drm/xe/xe_svm.c | 84 +++++++++++++++++++++++++++++++++----
 drivers/gpu/drm/xe/xe_svm.h |  7 ++++
 3 files changed, 92 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index ab8847e3b042..ffaf0d02dc7d 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -654,6 +654,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
                /* Move this entire thing to xe_svm.c? */
                xe_svm_notifier_lock(xe_vma_vm(vma));
                if (!xe_svm_range_pages_valid(range)) {
+                       xe_svm_range_debug(range, "BIND PREPARE - RETRY");
                        xe_svm_notifier_unlock(xe_vma_vm(vma));
                        return -EAGAIN;
                }
@@ -662,6 +663,10 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
                                         range->base.itree.last + 1 - 
range->base.itree.start,
                                         &curs);
                        is_devmem = xe_res_is_vram(&curs);
+                       if (is_devmem)
+                               xe_svm_range_debug(range, "BIND PREPARE - DMA 
VRAM");
+                       else
+                               xe_svm_range_debug(range, "BIND PREPARE - DMA");
                } else {
                        xe_assert(xe, false);
                }
@@ -1434,10 +1439,13 @@ static int xe_pt_svm_pre_commit(struct 
xe_migrate_pt_update *pt_update)
                if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
                        continue;
 
+               xe_svm_range_debug(range, "PRE-COMMIT");
+
                xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
                xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
 
                if (!xe_svm_range_pages_valid(range)) {
+                       xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
                        xe_svm_notifier_unlock(vm);
                        return -EAGAIN;
                }
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index f4332b8ffdba..a15d46701af2 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -12,6 +12,18 @@
 #include "xe_vm.h"
 #include "xe_vm_types.h"
 
+static bool xe_svm_range_in_vram(struct xe_svm_range *range)
+{
+       /* Not reliable without notifier lock */
+       return range->base.flags.has_devmem_pages;
+}
+
+static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
+{
+       /* Not reliable without notifier lock */
+       return xe_svm_range_in_vram(range) && range->tile_present;
+}
+
 static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm)
 {
        return container_of(gpusvm, struct xe_vm, svm.gpusvm);
@@ -37,6 +49,23 @@ static unsigned long xe_svm_range_size(struct xe_svm_range 
*range)
        return drm_gpusvm_range_size(&range->base);
 }
 
+#define range_debug(r__, operaton__)                                   \
+       vm_dbg(&range_to_vm(&(r__)->base)->xe->drm,                     \
+              "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
+              "start=0x%014lx, end=0x%014lx, size=%lu",                \
+              (operaton__), range_to_vm(&(r__)->base)->usm.asid,       \
+              (r__)->base.gpusvm,                                      \
+              xe_svm_range_in_vram((r__)) ? 1 : 0,                     \
+              xe_svm_range_has_vram_binding((r__)) ? 1 : 0,            \
+              (r__)->base.notifier_seq,                                \
+              xe_svm_range_start((r__)), xe_svm_range_end((r__)),      \
+              xe_svm_range_size((r__)))
+
+void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
+{
+       range_debug(range, operation);
+}
+
 static void *xe_svm_devm_owner(struct xe_device *xe)
 {
        return xe;
@@ -74,6 +103,8 @@ xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct 
xe_svm_range *range,
 {
        struct xe_device *xe = vm->xe;
 
+       range_debug(range, "GARBAGE COLLECTOR ADD");
+
        drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
 
        spin_lock(&vm->svm.garbage_collector.lock);
@@ -99,10 +130,14 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct 
drm_gpusvm_range *r,
 
        xe_svm_assert_in_notifier(vm);
 
+       range_debug(range, "NOTIFIER");
+
        /* Skip if already unmapped or if no binding exist */
        if (range->base.flags.unmapped || !range->tile_present)
                return 0;
 
+       range_debug(range, "NOTIFIER - EXECUTE");
+
        /* Adjust invalidation to range boundaries */
        *adj_start = min(xe_svm_range_start(range), mmu_range->start);
        *adj_end = max(xe_svm_range_end(range), mmu_range->end);
@@ -153,6 +188,11 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
 
        xe_svm_assert_in_notifier(vm);
 
+       vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm,
+              "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, 
end=0x%016lx, event=%d",
+              vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq,
+              mmu_range->start, mmu_range->end, mmu_range->event);
+
        /* Adjust invalidation to notifier boundaries */
        adj_start = max(drm_gpusvm_notifier_start(notifier), adj_start);
        adj_end = min(drm_gpusvm_notifier_end(notifier), adj_end);
@@ -237,6 +277,8 @@ static int __xe_svm_garbage_collector(struct xe_vm *vm,
 {
        struct dma_fence *fence;
 
+       range_debug(range, "GARBAGE COLLECTOR");
+
        xe_vm_lock(vm, false);
        fence = xe_vm_range_unbind(vm, range);
        xe_vm_unlock(vm);
@@ -396,16 +438,23 @@ static int xe_svm_copy(struct page **pages, dma_addr_t 
*dma_addr,
                        int incr = (match && last) ? 1 : 0;
 
                        if (vram_addr != XE_VRAM_ADDR_INVALID) {
-                               if (sram)
+                               if (sram) {
+                                       vm_dbg(&tile->xe->drm,
+                                              "COPY TO SRAM - 0x%016llx -> 
0x%016llx, NPAGES=%ld",
+                                              vram_addr, (u64)dma_addr[pos], i 
- pos + incr);
                                        __fence = 
xe_migrate_from_vram(tile->migrate,
                                                                       i - pos 
+ incr,
                                                                       
vram_addr,
                                                                       dma_addr 
+ pos);
-                               else
+                               } else {
+                                       vm_dbg(&tile->xe->drm,
+                                              "COPY TO VRAM - 0x%016llx -> 
0x%016llx, NPAGES=%ld",
+                                              (u64)dma_addr[pos], vram_addr, i 
- pos + incr);
                                        __fence = 
xe_migrate_to_vram(tile->migrate,
                                                                     i - pos + 
incr,
                                                                     dma_addr + 
pos,
                                                                     vram_addr);
+                               }
                                if (IS_ERR(__fence)) {
                                        err = PTR_ERR(__fence);
                                        goto err_out;
@@ -425,14 +474,21 @@ static int xe_svm_copy(struct page **pages, dma_addr_t 
*dma_addr,
 
                        /* Extra mismatched device page, copy it */
                        if (!match && last && vram_addr != 
XE_VRAM_ADDR_INVALID) {
-                               if (sram)
+                               if (sram) {
+                                       vm_dbg(&tile->xe->drm,
+                                              "COPY TO SRAM - 0x%016llx -> 
0x%016llx, NPAGES=%d",
+                                              vram_addr, (u64)dma_addr[pos], 
1);
                                        __fence = 
xe_migrate_from_vram(tile->migrate, 1,
                                                                       
vram_addr,
                                                                       dma_addr 
+ pos);
-                               else
+                               } else {
+                                       vm_dbg(&tile->xe->drm,
+                                              "COPY TO VRAM - 0x%016llx -> 
0x%016llx, NPAGES=%d",
+                                              (u64)dma_addr[pos], vram_addr, 
1);
                                        __fence = 
xe_migrate_to_vram(tile->migrate, 1,
                                                                     dma_addr + 
pos,
                                                                     vram_addr);
+                               }
                                if (IS_ERR(__fence)) {
                                        err = PTR_ERR(__fence);
                                        goto err_out;
@@ -609,6 +665,8 @@ static int xe_svm_alloc_vram(struct xe_vm *vm, struct 
xe_tile *tile,
        ktime_t end = 0;
        int err;
 
+       range_debug(range, "ALLOCATE VRAM");
+
        if (!mmget_not_zero(mm))
                return -EFAULT;
        mmap_read_lock(mm);
@@ -699,6 +757,8 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma 
*vma,
        if (xe_svm_range_is_valid(range, tile))
                return 0;
 
+       range_debug(range, "PAGE FAULT");
+
        /* XXX: Add migration policy, for now migrate range once */
        if (!range->skip_migrate && range->base.flags.migrate_devmem &&
            xe_svm_range_size(range) >= SZ_64K) {
@@ -714,18 +774,26 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct 
xe_vma *vma,
                }
        }
 
+       range_debug(range, "GET PAGES");
        err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx);
        /* Corner where CPU mappings have changed */
        if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
-               if (err == -EOPNOTSUPP)
+               if (err == -EOPNOTSUPP) {
+                       range_debug(range, "PAGE FAULT - EVICT PAGES");
                        drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
+               }
                drm_dbg(&vm->xe->drm,
                        "Get pages failed, falling back to retrying, asid=%u, 
gpusvm=%p, errno=%pe\n",
                        vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
+               range_debug(range, "PAGE FAULT - RETRY PAGES");
                goto retry;
        }
-       if (err)
+       if (err) {
+               range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
                goto err_out;
+       }
+
+       range_debug(range, "PAGE FAULT - BIND");
 
 retry_bind:
        drm_exec_init(&exec, 0, 0);
@@ -741,8 +809,10 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct 
xe_vma *vma,
                if (IS_ERR(fence)) {
                        drm_exec_fini(&exec);
                        err = PTR_ERR(fence);
-                       if (err == -EAGAIN)
+                       if (err == -EAGAIN) {
+                               range_debug(range, "PAGE FAULT - RETRY BIND");
                                goto retry;
+                       }
                        if (xe_vm_validate_should_retry(&exec, err, &end))
                                goto retry_bind;
                        goto err_out;
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 855aa8e1dd38..e059590e5076 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -70,6 +70,8 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma 
*vma,
 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
 
 int xe_svm_bo_evict(struct xe_bo *bo);
+
+void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
 #else
 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
 {
@@ -117,6 +119,11 @@ int xe_svm_bo_evict(struct xe_bo *bo)
 {
        return 0;
 }
+
+static inline
+void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
+{
+}
 #endif
 
 /**
-- 
2.34.1

Reply via email to