On Wed, 2025-06-04 at 08:04 -0700, Matthew Brost wrote:
> On Wed, Jun 04, 2025 at 11:35:36AM +0200, Thomas Hellström wrote:
> > Add runtime PM since we might call populate_mm on a foreign device.
> 
> I think taking a runtime PM will fix hard to hit splat [1] too.
> 
> [1]
> https://patchwork.freedesktop.org/patch/648954/?series=147849&rev=1
> 
> > Also create the VRAM bos as ttm_bo_type_kernel. This avoids the
> > initial clearing and the creation of an mmap handle.
> > 
> > Signed-off-by: Thomas Hellström <thomas.hellst...@linux.intel.com>
> > ---
> >  drivers/gpu/drm/drm_pagemap.c |   1 +
> >  drivers/gpu/drm/xe/xe_svm.c   | 104 ++++++++++++++++++++----------
> > ----
> >  drivers/gpu/drm/xe/xe_svm.h   |  10 ++--
> >  drivers/gpu/drm/xe/xe_tile.h  |  11 ++++
> >  drivers/gpu/drm/xe/xe_vm.c    |   2 +-
> >  5 files changed, 78 insertions(+), 50 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/drm_pagemap.c
> > b/drivers/gpu/drm/drm_pagemap.c
> > index 25395685a9b8..94619be00d2a 100644
> > --- a/drivers/gpu/drm/drm_pagemap.c
> > +++ b/drivers/gpu/drm/drm_pagemap.c
> > @@ -843,3 +843,4 @@ int drm_pagemap_populate_mm(struct drm_pagemap
> > *dpagemap,
> >  
> >     return err;
> >  }
> > +EXPORT_SYMBOL(drm_pagemap_populate_mm);
> > diff --git a/drivers/gpu/drm/xe/xe_svm.c
> > b/drivers/gpu/drm/xe/xe_svm.c
> > index e161ce3e67a1..a10aab3768d8 100644
> > --- a/drivers/gpu/drm/xe/xe_svm.c
> > +++ b/drivers/gpu/drm/xe/xe_svm.c
> > @@ -3,13 +3,17 @@
> >   * Copyright © 2024 Intel Corporation
> >   */
> >  
> > +#include <drm/drm_drv.h>
> > +
> >  #include "xe_bo.h"
> >  #include "xe_gt_stats.h"
> >  #include "xe_gt_tlb_invalidation.h"
> >  #include "xe_migrate.h"
> >  #include "xe_module.h"
> > +#include "xe_pm.h"
> >  #include "xe_pt.h"
> >  #include "xe_svm.h"
> > +#include "xe_tile.h"
> >  #include "xe_ttm_vram_mgr.h"
> >  #include "xe_vm.h"
> >  #include "xe_vm_types.h"
> > @@ -525,8 +529,10 @@ static struct xe_bo *to_xe_bo(struct
> > drm_pagemap_devmem *devmem_allocation)
> >  static void xe_svm_devmem_release(struct drm_pagemap_devmem
> > *devmem_allocation)
> >  {
> >     struct xe_bo *bo = to_xe_bo(devmem_allocation);
> > +   struct xe_device *xe = xe_bo_device(bo);
> >  
> >     xe_bo_put_async(bo);
> > +   xe_pm_runtime_put(xe);
> >  }
> >  
> >  static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64
> > offset)
> > @@ -720,76 +726,63 @@ static struct xe_vram_region
> > *tile_to_vr(struct xe_tile *tile)
> >     return &tile->mem.vram;
> >  }
> >  
> > -/**
> > - * xe_svm_alloc_vram()- Allocate device memory pages for range,
> > - * migrating existing data.
> > - * @vm: The VM.
> > - * @tile: tile to allocate vram from
> > - * @range: SVM range
> > - * @ctx: DRM GPU SVM context
> > - *
> > - * Return: 0 on success, error code on failure.
> > - */
> > -int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
> > -                 struct xe_svm_range *range,
> > -                 const struct drm_gpusvm_ctx *ctx)
> > +static int xe_drm_pagemap_populate_mm(struct drm_pagemap
> > *dpagemap,
> > +                                 unsigned long start,
> > unsigned long end,
> > +                                 struct mm_struct *mm,
> > +                                 unsigned long timeslice_ms)
> >  {
> > -   struct mm_struct *mm = vm->svm.gpusvm.mm;
> > +   struct xe_tile *tile = container_of(dpagemap,
> > typeof(*tile), mem.vram.dpagemap);
> 
> I think this is going to chnage here [2] making mem.vram a pointer.
> Maybe a helper to go from dpagemap -> tile to future proof a little.
> I
> think once [2] lands, we will need to pick the root tile here. 
> 
> [2] https://patchwork.freedesktop.org/series/149503/

OK, Yes I think such a helper is part of later patches in the
multidevice series not posted yet pending madvise.

In any case we probably need to associate a dpagemap with a vram region
and then figure out what to use for migration.

> 
> > +   struct xe_device *xe = tile_to_xe(tile);
> > +   struct device *dev = xe->drm.dev;
> >     struct xe_vram_region *vr = tile_to_vr(tile);
> >     struct drm_buddy_block *block;
> >     struct list_head *blocks;
> >     struct xe_bo *bo;
> > -   ktime_t end = 0;
> > -   int err;
> > -
> > -   if (!range->base.flags.migrate_devmem)
> > -           return -EINVAL;
> > +   ktime_t time_end = 0;
> > +   int err, idx;
> >  
> > -   range_debug(range, "ALLOCATE VRAM");
> > +   if (!drm_dev_enter(&xe->drm, &idx))
> > +           return -ENODEV;
> >  
> > -   if (!mmget_not_zero(mm))
> > -           return -EFAULT;
> > -   mmap_read_lock(mm);
> > +   xe_pm_runtime_get(xe);
> 
> A forgien device might not be awake so is that why you are using
> xe_pm_runtime_get rather than xe_pm_runtime_get_noresume?

Yes exactly.

>  We only have
> the MMAP lock here so assuming that is safe with our runtime PM flow.

It should. We use the same in the CPU fault handle.

> 
> >  
> > -retry:
> > -   bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL,
> > -                            xe_svm_range_size(range),
> > -                            ttm_bo_type_device,
> > + retry:
> > +   bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL, end
> > - start,
> > +                            ttm_bo_type_kernel,
> >                              XE_BO_FLAG_VRAM_IF_DGFX(tile) |
> >                              XE_BO_FLAG_CPU_ADDR_MIRROR);
> >     if (IS_ERR(bo)) {
> >             err = PTR_ERR(bo);
> > -           if (xe_vm_validate_should_retry(NULL, err, &end))
> > +           if (xe_vm_validate_should_retry(NULL, err,
> > &time_end))
> >                     goto retry;
> > -           goto unlock;
> > +           goto out_pm_put;
> >     }
> >  
> > -   drm_pagemap_devmem_init(&bo->devmem_allocation,
> > -                           vm->xe->drm.dev, mm,
> > +   drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
> >                             &dpagemap_devmem_ops,
> >                             &tile->mem.vram.dpagemap,
> > -                           xe_svm_range_size(range));
> > +                           end - start);
> >  
> >     blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)-
> > >blocks;
> >     list_for_each_entry(block, blocks, link)
> >             block->private = vr;
> >  
> >     xe_bo_get(bo);
> > -   err = drm_pagemap_migrate_to_devmem(&bo-
> > >devmem_allocation,
> > -                                       mm,
> > -                                      
> > xe_svm_range_start(range),
> > -                                      
> > xe_svm_range_end(range),
> > -                                       ctx->timeslice_ms,
> > -                                       xe_svm_devm_owner(vm-
> > >xe));
> > +
> > +   /* Ensure the device has a pm ref while there are device
> > pages active. */
> > +   xe_pm_runtime_get_noresume(xe);
> > +   err = drm_pagemap_migrate_to_devmem(&bo-
> > >devmem_allocation, mm,
> > +                                       start, end,
> > timeslice_ms,
> > +                                      
> > xe_svm_devm_owner(xe));
> >     if (err)
> >             xe_svm_devmem_release(&bo->devmem_allocation);
> >  
> >     xe_bo_unlock(bo);
> >     xe_bo_put(bo);
> >  
> > -unlock:
> > -   mmap_read_unlock(mm);
> > -   mmput(mm);
> > +out_pm_put:
> > +   xe_pm_runtime_put(xe);
> > +   drm_dev_exit(idx);
> >  
> >     return err;
> >  }
> > @@ -898,7 +891,7 @@ int xe_svm_handle_pagefault(struct xe_vm *vm,
> > struct xe_vma *vma,
> >  
> >     if (--migrate_try_count >= 0 &&
> >         xe_svm_range_needs_migrate_to_vram(range, vma,
> > IS_DGFX(vm->xe))) {
> > -           err = xe_svm_alloc_vram(vm, tile, range, &ctx);
> > +           err = xe_svm_alloc_vram(tile, range, &ctx);
> >             ctx.timeslice_ms <<= 1; /* Double
> > timeslice if we have to retry */
> >             if (err) {
> >                     if (migrate_try_count || !ctx.devmem_only)
> > {
> > @@ -1054,6 +1047,30 @@ int xe_svm_range_get_pages(struct xe_vm *vm,
> > struct xe_svm_range *range,
> >  
> >  #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
> >  
> > +/**
> > + * xe_svm_alloc_vram()- Allocate device memory pages for range,
> > + * migrating existing data.
> > + * @vm: The VM.
> > + * @tile: tile to allocate vram from
> > + * @range: SVM range
> > + * @ctx: DRM GPU SVM context
> > + *
> > + * Return: 0 on success, error code on failure.
> > + */
> > +int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range
> > *range,
> > +                 const struct drm_gpusvm_ctx *ctx)
> > +{
> > +   struct drm_pagemap *dpagemap;
> > +
> > +   range_debug(range, "ALLOCATE VRAM");
> > +
> 
>       if (!range->base.flags.migrate_devmem)
>               return -EINVAL;
> 
> Or I guess an assert would work too as caller should have check this
> field.

Sure. Will fix.
/Thomas


> 
> Matt
> 
> > +   dpagemap = xe_tile_local_pagemap(tile);
> > +   return drm_pagemap_populate_mm(dpagemap,
> > xe_svm_range_start(range),
> > +                                  xe_svm_range_end(range),
> > +                                  range->base.gpusvm->mm,
> > +                                  ctx->timeslice_ms);
> > +}
> > +
> >  static struct drm_pagemap_device_addr
> >  xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
> >                       struct device *dev,
> > @@ -1078,6 +1095,7 @@ xe_drm_pagemap_device_map(struct drm_pagemap
> > *dpagemap,
> >  
> >  static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
> >     .device_map = xe_drm_pagemap_device_map,
> > +   .populate_mm = xe_drm_pagemap_populate_mm,
> >  };
> >  
> >  /**
> > @@ -1130,7 +1148,7 @@ int xe_devm_add(struct xe_tile *tile, struct
> > xe_vram_region *vr)
> >     return 0;
> >  }
> >  #else
> > -int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
> > +int xe_svm_alloc_vram(struct xe_tile *tile,
> >                   struct xe_svm_range *range,
> >                   const struct drm_gpusvm_ctx *ctx)
> >  {
> > diff --git a/drivers/gpu/drm/xe/xe_svm.h
> > b/drivers/gpu/drm/xe/xe_svm.h
> > index 19ce4f2754a7..da9a69ea0bb1 100644
> > --- a/drivers/gpu/drm/xe/xe_svm.h
> > +++ b/drivers/gpu/drm/xe/xe_svm.h
> > @@ -70,8 +70,7 @@ int xe_svm_bo_evict(struct xe_bo *bo);
> >  
> >  void xe_svm_range_debug(struct xe_svm_range *range, const char
> > *operation);
> >  
> > -int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
> > -                 struct xe_svm_range *range,
> > +int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range
> > *range,
> >                   const struct drm_gpusvm_ctx *ctx);
> >  
> >  struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm,
> > u64 addr,
> > @@ -237,10 +236,9 @@ void xe_svm_range_debug(struct xe_svm_range
> > *range, const char *operation)
> >  {
> >  }
> >  
> > -static inline
> > -int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
> > -                 struct xe_svm_range *range,
> > -                 const struct drm_gpusvm_ctx *ctx)
> > +static inline int
> > +xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range
> > *range,
> > +             const struct drm_gpusvm_ctx *ctx)
> >  {
> >     return -EOPNOTSUPP;
> >  }
> > diff --git a/drivers/gpu/drm/xe/xe_tile.h
> > b/drivers/gpu/drm/xe/xe_tile.h
> > index eb939316d55b..066a3d0cea79 100644
> > --- a/drivers/gpu/drm/xe/xe_tile.h
> > +++ b/drivers/gpu/drm/xe/xe_tile.h
> > @@ -16,4 +16,15 @@ int xe_tile_init(struct xe_tile *tile);
> >  
> >  void xe_tile_migrate_wait(struct xe_tile *tile);
> >  
> > +#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
> > +static inline struct drm_pagemap *xe_tile_local_pagemap(struct
> > xe_tile *tile)
> > +{
> > +   return &tile->mem.vram.dpagemap;
> > +}
> > +#else
> > +static inline struct drm_pagemap *xe_tile_local_pagemap(struct
> > xe_tile *tile)
> > +{
> > +   return NULL;
> > +}
> > +#endif
> >  #endif
> > diff --git a/drivers/gpu/drm/xe/xe_vm.c
> > b/drivers/gpu/drm/xe/xe_vm.c
> > index 7140d8856bad..def493acb4d7 100644
> > --- a/drivers/gpu/drm/xe/xe_vm.c
> > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > @@ -2911,7 +2911,7 @@ static int prefetch_ranges(struct xe_vm *vm,
> > struct xe_vma_op *op)
> >  
> >             if (xe_svm_range_needs_migrate_to_vram(svm_range,
> > vma, region)) {
> >                     tile = &vm->xe-
> > >tiles[region_to_mem_type[region] - XE_PL_VRAM0];
> > -                   err = xe_svm_alloc_vram(vm, tile,
> > svm_range, &ctx);
> > +                   err = xe_svm_alloc_vram(tile, svm_range,
> > &ctx);
> >                     if (err) {
> >                             drm_dbg(&vm->xe->drm, "VRAM
> > allocation failed, retry from userspace, asid=%u, gpusvm=%p,
> > errno=%pe\n",
> >                                     vm->usm.asid, &vm-
> > >svm.gpusvm, ERR_PTR(err));
> > -- 
> > 2.49.0
> > 

Reply via email to