On Tue, 2024-10-15 at 20:25 -0700, Matthew Brost wrote:
> Migration is implemented with range granularity, with VRAM backing
> being
> a VM private TTM BO (i.e., shares dma-resv with VM). The lifetime of
> the
> TTM BO is limited to when the SVM range is in VRAM (i.e., when a VRAM
> SVM range is migrated to SRAM, the TTM BO is destroyed).
> 
> The design choice for using TTM BO for VRAM backing store, as opposed
> to
> direct buddy allocation, is as follows:
> 
> - DRM buddy allocations are not at page granularity, offering no
>   advantage over a BO.
> - Unified eviction is required (SVM VRAM and TTM BOs need to be able
> to
>   evict each other).
> - For exhaustive eviction [1], SVM VRAM allocations will almost
> certainly
>   require a dma-resv.
> - Likely allocation size is 2M which makes of size of BO (872)
>   acceptable per allocation (872 / 2M == .0004158).
> 
> With this, using TTM BO for VRAM backing store seems to be an obvious
> choice as it allows leveraging of the TTM eviction code.
> 
> Current migration policy is migrate any SVM range greater than or
> equal
> to 64k once.
> 
> [1] https://patchwork.freedesktop.org/series/133643/
> 
> v2:
>  - Rebase on latest GPU SVM
>  - Retry page fault on get pages returning mixed allocation
>  - Use drm_gpusvm_devmem
> 
> Signed-off-by: Matthew Brost matthew.br...@intel.com
> ---
>  drivers/gpu/drm/xe/xe_svm.c | 96
> +++++++++++++++++++++++++++++++++++--
>  drivers/gpu/drm/xe/xe_svm.h |  1 +
>  2 files changed, 94 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_svm.c
> b/drivers/gpu/drm/xe/xe_svm.c
> index 976b4ce15db4..31b80cde15c4 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -218,6 +218,9 @@ static int __xe_svm_garbage_collector(struct
> xe_vm *vm,
>  {
>       struct dma_fence *fence;
>  
> +     if (IS_DGFX(vm->xe) && range->base.flags.partial_unmap)
> +             drm_gpusvm_range_evict(&vm->svm.gpusvm, &range-
> >base);
> +
>       xe_vm_lock(vm, false);
>       fence = xe_vm_range_unbind(vm, range);
>       xe_vm_unlock(vm);
> @@ -458,7 +461,6 @@ static int xe_svm_populate_devmem_pfn(struct
> drm_gpusvm_devmem *devmem_allocatio
>       return 0;
>  }
>  
> -__maybe_unused
>  static const struct drm_gpusvm_devmem_ops gpusvm_devmem_ops = {
>       .devmem_release = xe_svm_devmem_release,
>       .populate_devmem_pfn = xe_svm_populate_devmem_pfn,
> @@ -542,21 +544,84 @@ static bool xe_svm_range_is_valid(struct
> xe_svm_range *range,
>       return (range->tile_present & ~range->tile_invalidated) &
> BIT(tile->id);
>  }
>  
> +static struct xe_mem_region *tile_to_mr(struct xe_tile *tile)
> +{
> +     return &tile->mem.vram;
> +}
> +
> +static struct xe_bo *xe_svm_alloc_vram(struct xe_vm *vm, struct
> xe_tile *tile,
> +                                    struct xe_svm_range *range,
> +                                    const struct drm_gpusvm_ctx
> *ctx)

This function will se substantial updates with multi-device, but let's
leave as is for now.

> +{
> +     struct xe_mem_region *mr = tile_to_mr(tile);
> +     struct drm_buddy_block *block;
> +     struct list_head *blocks;
> +     struct xe_bo *bo;
> +     ktime_t end = 0;
> +     int err;
> +
> +retry:
> +     xe_vm_lock(vm, false);
> +     bo = xe_bo_create(tile_to_xe(tile), tile, vm, range-
> >base.va.end -
> +                       range->base.va.start, ttm_bo_type_device,
> +                       XE_BO_FLAG_VRAM_IF_DGFX(tile) |
> +                       XE_BO_FLAG_SYSTEM_ALLOC |
> XE_BO_FLAG_SKIP_CLEAR);
> +     xe_vm_unlock(vm);
> +     if (IS_ERR(bo)) {
> +             err = PTR_ERR(bo);
> +             if (xe_vm_validate_should_retry(NULL, err, &end))
> +                     goto retry;
> +             return bo;
> +     }
> +
> +     drm_gpusvm_devmem_init(&bo->devmem_allocation,
> +                            vm->xe->drm.dev, vm->svm.gpusvm.mm,
> +                            &gpusvm_devmem_ops,
> +                            &tile->mem.vram.dpagemap,
> +                            range->base.va.end -
> +                            range->base.va.start);
> +
> +     blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)-
> >blocks;
> +     list_for_each_entry(block, blocks, link)
> +             block->private = mr;
> +
> +     /*
> +      * Take ref because as soon as drm_gpusvm_migrate_to_devmem
> succeeds the
> +      * creation ref can be dropped upon CPU fault or unmap.
> +      */
> +     xe_bo_get(bo);
> +
> +     err = drm_gpusvm_migrate_to_devmem(&vm->svm.gpusvm, &range-
> >base,
> +                                        &bo->devmem_allocation,
> ctx);
> +     if (err) {
> +             xe_bo_put(bo);  /* Local ref */
> +             xe_bo_put(bo);  /* Creation ref */
> +             return ERR_PTR(err);
> +     }
> +
> +     return bo;
> +}
> +
>  int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
>                           struct xe_tile *tile, u64 fault_addr,
>                           bool atomic)
>  {
> -     struct drm_gpusvm_ctx ctx = { .read_only =
> xe_vma_read_only(vma), };
> +     struct drm_gpusvm_ctx ctx = { .read_only =
> xe_vma_read_only(vma),
> +             .devmem_possible = IS_DGFX(vm->xe), .check_pages =
> true, };
>       struct xe_svm_range *range;
>       struct drm_gpusvm_range *r;
>       struct drm_exec exec;
>       struct dma_fence *fence;
> +     struct xe_bo *bo = NULL;
>       ktime_t end = 0;
>       int err;
>  
>       lockdep_assert_held_write(&vm->lock);
>  
>  retry:
> +     xe_bo_put(bo);
> +     bo = NULL;
> +
>       /* Always process UNMAPs first so view SVM ranges is current
> */
>       err = xe_svm_garbage_collector(vm);
>       if (err)
> @@ -572,9 +637,32 @@ int xe_svm_handle_pagefault(struct xe_vm *vm,
> struct xe_vma *vma,
>       if (xe_svm_range_is_valid(range, tile))
>               return 0;
>  
> +     /* XXX: Add migration policy, for now migrate range once */
> +     if (IS_DGFX(vm->xe) && !range->migrated &&
> +         range->base.flags.migrate_devmem &&
> +         (range->base.va.end - range->base.va.start) >= SZ_64K) {
> +             range->migrated = true;
> +
> +             bo = xe_svm_alloc_vram(vm, tile, range, &ctx);
> +             if (IS_ERR(bo)) {
> +                     drm_info(&vm->xe->drm,
> +                              "VRAM allocation failed, falling
> back to retrying, asid=%u, errno %ld\n",
> +                              vm->usm.asid, PTR_ERR(bo));
> +                     bo = NULL;
> +                     goto retry;
> +             }
> +     }
> +
>       err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx);
>       if (err == -EFAULT || err == -EPERM)    /* Corner where CPU
> mappings have change */
> -            goto retry;
> +     if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM)
> {     /* Corner where CPU mappings have change */

have changed or have seen a change?


> +             if (err == -EOPNOTSUPP)
> +                     drm_gpusvm_range_evict(&vm->svm.gpusvm,
> &range->base);
> +             drm_info(&vm->xe->drm,
> +                      "Get pages failed, falling back to
> retrying, asid=%u, gpusvm=0x%016llx, errno %d\n",
> +                      vm->usm.asid, (u64)&vm->svm.gpusvm, err);
> +             goto retry;
> +     }
>       if (err)
>               goto err_out;
>  
> @@ -605,6 +693,8 @@ int xe_svm_handle_pagefault(struct xe_vm *vm,
> struct xe_vma *vma,
>       dma_fence_put(fence);
>  
>  err_out:
> +     xe_bo_put(bo);
> +
>       return err;
>  }
>  
> diff --git a/drivers/gpu/drm/xe/xe_svm.h
> b/drivers/gpu/drm/xe/xe_svm.h
> index 760d22cefb1e..6893664dae70 100644
> --- a/drivers/gpu/drm/xe/xe_svm.h
> +++ b/drivers/gpu/drm/xe/xe_svm.h
> @@ -21,6 +21,7 @@ struct xe_svm_range {
>       struct list_head garbage_collector_link;
>       u8 tile_present;
>       u8 tile_invalidated;
> +     u8 migrated     :1;

Kerneldoc, including protection information

>  };
>  
>  int xe_devm_add(struct xe_tile *tile, struct xe_mem_region *mr);

Thanks,
Thomas

Reply via email to