The drm_pagemap functionality does not depend on the device having
recoverable pagefaults available. So allow xe_migrate_vram() also for
such devices. Even if this will have little use in practice, it's
beneficial for testin multi-device SVM, since a memory provider could
be a non-pagefault capable gpu.

Signed-off-by: Thomas Hellström <thomas.hellst...@linux.intel.com>
---
 drivers/gpu/drm/xe/xe_migrate.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 3894efe7ba60..23c258b775a0 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -1609,6 +1609,7 @@ static struct dma_fence *xe_migrate_vram(struct 
xe_migrate *m,
 {
        struct xe_gt *gt = m->tile->primary_gt;
        struct xe_device *xe = gt_to_xe(gt);
+       bool use_usm_batch = xe->info.has_usm;
        struct dma_fence *fence = NULL;
        u32 batch_size = 2;
        u64 src_L0_ofs, dst_L0_ofs;
@@ -1625,7 +1626,7 @@ static struct dma_fence *xe_migrate_vram(struct 
xe_migrate *m,
        batch_size += pte_update_cmd_size(round_update_size);
        batch_size += EMIT_COPY_DW;
 
-       bb = xe_bb_new(gt, batch_size, true);
+       bb = xe_bb_new(gt, batch_size, use_usm_batch);
        if (IS_ERR(bb)) {
                err = PTR_ERR(bb);
                return ERR_PTR(err);
@@ -1650,7 +1651,7 @@ static struct dma_fence *xe_migrate_vram(struct 
xe_migrate *m,
                  XE_PAGE_SIZE);
 
        job = xe_bb_create_migration_job(m->q, bb,
-                                        xe_migrate_batch_base(m, true),
+                                        xe_migrate_batch_base(m, 
use_usm_batch),
                                         update_idx);
        if (IS_ERR(job)) {
                err = PTR_ERR(job);
-- 
2.48.1

Reply via email to