On Thu, Apr 24, 2025 at 03:01:21PM -0700, Matthew Brost wrote:
> On Thu, Apr 24, 2025 at 01:18:33PM +0100, Matthew Auld wrote:
> > Export get/unmap/free pages API. We also need to tweak the SVM init to
> > allow skipping much of the unneeded parts.
> > 
> > Signed-off-by: Matthew Auld <matthew.a...@intel.com>
> > Cc: Thomas Hellström <thomas.hellst...@linux.intel.com>
> > Cc: Matthew Brost <matthew.br...@intel.com>
> > ---
> >  drivers/gpu/drm/drm_gpusvm.c | 66 ++++++++++++++++++++++++++++--------
> >  include/drm/drm_gpusvm.h     | 16 +++++++++
> >  2 files changed, 67 insertions(+), 15 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
> > index fbe0d70ef163..0e0a3c995b4b 100644
> > --- a/drivers/gpu/drm/drm_gpusvm.c
> > +++ b/drivers/gpu/drm/drm_gpusvm.c
> > @@ -539,6 +539,12 @@ static const struct mmu_interval_notifier_ops 
> > drm_gpusvm_notifier_ops = {
> >   *
> >   * This function initializes the GPU SVM.
> >   *
> > + * Note: If only using the simple drm_gpusvm_pages API (get/unmap/free),
> > + * then only @gpusvm, @name, and @drm are expected. However, the same base
> > + * @gpusvm can also be used with both modes together in which case the full
> > + * setup is needed, where the core drm_gpusvm_pages API will simply never 
> > use
> > + * the other fields.
> > + *
> >   * Return: 0 on success, a negative error code on failure.
> >   */
> >  int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
> > @@ -549,8 +555,16 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
> >                 const struct drm_gpusvm_ops *ops,
> >                 const unsigned long *chunk_sizes, int num_chunks)
> >  {
> > -   if (!ops->invalidate || !num_chunks)
> > -           return -EINVAL;
> > +   if (mm) {
> 
> Do you still need this if statement if GPU SVM is shared between userptr
> and SVM? Shouldn't we always pass in the MM?
> 
> Or is this for the mode where SVM is disabled in Xe and we just use the
> get_pages functionality?
> 

Nevermind, I see how this is used in the following patch. Make sense.

With that:
Reviewed-by: Matthew Brost <matthew.br...@intel.com>

> Matt
> 
> > +           if (!ops->invalidate || !num_chunks)
> > +                   return -EINVAL;
> > +           mmgrab(mm);
> > +   } else {
> > +           /* No full SVM mode, only core drm_gpusvm_pages API. */
> > +           if (ops || num_chunks || mm_range || notifier_size ||
> > +               device_private_page_owner)
> > +                   return -EINVAL;
> > +   }
> >  
> >     gpusvm->name = name;
> >     gpusvm->drm = drm;
> > @@ -563,7 +577,6 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
> >     gpusvm->chunk_sizes = chunk_sizes;
> >     gpusvm->num_chunks = num_chunks;
> >  
> > -   mmgrab(mm);
> >     gpusvm->root = RB_ROOT_CACHED;
> >     INIT_LIST_HEAD(&gpusvm->notifier_list);
> >  
> > @@ -671,7 +684,8 @@ void drm_gpusvm_fini(struct drm_gpusvm *gpusvm)
> >                     drm_gpusvm_range_remove(gpusvm, range);
> >     }
> >  
> > -   mmdrop(gpusvm->mm);
> > +   if (gpusvm->mm)
> > +           mmdrop(gpusvm->mm);
> >     WARN_ON(!RB_EMPTY_ROOT(&gpusvm->root.rb_root));
> >  }
> >  EXPORT_SYMBOL_GPL(drm_gpusvm_fini);
> > @@ -1185,6 +1199,27 @@ static void __drm_gpusvm_free_pages(struct 
> > drm_gpusvm *gpusvm,
> >     }
> >  }
> >  
> > +/**
> > + * drm_gpusvm_free_pages() - Free dma-mapping associated with GPU SVM pages
> > + * struct
> > + * @gpusvm: Pointer to the GPU SVM structure
> > + * @svm_pages: Pointer to the GPU SVM pages structure
> > + * @npages: Number of mapped pages
> > + *
> > + * This function unmaps and frees the dma address array associated with a 
> > GPU
> > + * SVM pages struct.
> > + */
> > +void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
> > +                      struct drm_gpusvm_pages *svm_pages,
> > +                      unsigned long npages)
> > +{
> > +   drm_gpusvm_notifier_lock(gpusvm);
> > +   __drm_gpusvm_unmap_pages(gpusvm, svm_pages, npages);
> > +   __drm_gpusvm_free_pages(gpusvm, svm_pages);
> > +   drm_gpusvm_notifier_unlock(gpusvm);
> > +}
> > +EXPORT_SYMBOL_GPL(drm_gpusvm_free_pages);
> > +
> >  /**
> >   * drm_gpusvm_range_remove() - Remove GPU SVM range
> >   * @gpusvm: Pointer to the GPU SVM structure
> > @@ -1360,13 +1395,12 @@ static bool drm_gpusvm_pages_valid_unlocked(struct 
> > drm_gpusvm *gpusvm,
> >   *
> >   * Return: 0 on success, negative error code on failure.
> >   */
> > -static int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
> > -                           struct drm_gpusvm_pages *svm_pages,
> > -                           struct mm_struct *mm,
> > -                           struct mmu_interval_notifier *notifier,
> > -                           unsigned long pages_start,
> > -                           unsigned long pages_end,
> > -                           const struct drm_gpusvm_ctx *ctx)
> > +int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
> > +                    struct drm_gpusvm_pages *svm_pages,
> > +                    struct mm_struct *mm,
> > +                    struct mmu_interval_notifier *notifier,
> > +                    unsigned long pages_start, unsigned long pages_end,
> > +                    const struct drm_gpusvm_ctx *ctx)
> >  {
> >     struct hmm_range hmm_range = {
> >             .default_flags = HMM_PFN_REQ_FAULT | (ctx->read_only ? 0 :
> > @@ -1548,6 +1582,7 @@ static int drm_gpusvm_get_pages(struct drm_gpusvm 
> > *gpusvm,
> >             goto retry;
> >     return err;
> >  }
> > +EXPORT_SYMBOL_GPL(drm_gpusvm_get_pages);
> >  
> >  /**
> >   * drm_gpusvm_range_get_pages() - Get pages for a GPU SVM range
> > @@ -1583,10 +1618,10 @@ EXPORT_SYMBOL_GPL(drm_gpusvm_range_get_pages);
> >   * Must be called in the invalidate() callback of the corresponding 
> > notifier for
> >   * IOMMU security model.
> >   */
> > -static void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
> > -                              struct drm_gpusvm_pages *svm_pages,
> > -                              unsigned long npages,
> > -                              const struct drm_gpusvm_ctx *ctx)
> > +void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
> > +                       struct drm_gpusvm_pages *svm_pages,
> > +                       unsigned long npages,
> > +                       const struct drm_gpusvm_ctx *ctx)
> >  {
> >     if (ctx->in_notifier)
> >             lockdep_assert_held_write(&gpusvm->notifier_lock);
> > @@ -1598,6 +1633,7 @@ static void drm_gpusvm_unmap_pages(struct drm_gpusvm 
> > *gpusvm,
> >     if (!ctx->in_notifier)
> >             drm_gpusvm_notifier_unlock(gpusvm);
> >  }
> > +EXPORT_SYMBOL_GPL(drm_gpusvm_unmap_pages);
> >  
> >  /**
> >   * drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM 
> > range
> > diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h
> > index 1b7ed4f4a8e2..611aaba1ac80 100644
> > --- a/include/drm/drm_gpusvm.h
> > +++ b/include/drm/drm_gpusvm.h
> > @@ -370,6 +370,22 @@ void drm_gpusvm_devmem_init(struct drm_gpusvm_devmem 
> > *devmem_allocation,
> >                         const struct drm_gpusvm_devmem_ops *ops,
> >                         struct drm_pagemap *dpagemap, size_t size);
> >  
> > +int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
> > +                    struct drm_gpusvm_pages *svm_pages,
> > +                    struct mm_struct *mm,
> > +                    struct mmu_interval_notifier *notifier,
> > +                    unsigned long pages_start, unsigned long pages_end,
> > +                    const struct drm_gpusvm_ctx *ctx);
> > +
> > +void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
> > +                       struct drm_gpusvm_pages *svm_pages,
> > +                       unsigned long npages,
> > +                       const struct drm_gpusvm_ctx *ctx);
> > +
> > +void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
> > +                      struct drm_gpusvm_pages *svm_pages,
> > +                      unsigned long npages);
> > +
> >  #ifdef CONFIG_LOCKDEP
> >  /**
> >   * drm_gpusvm_driver_set_lock() - Set the lock protecting accesses to GPU 
> > SVM
> > -- 
> > 2.49.0
> > 

Reply via email to