On Wed, 2025-06-04 at 15:05 -0700, Matthew Brost wrote:
> On Wed, Jun 04, 2025 at 11:35:35AM +0200, Thomas Hellström wrote:
> > Add an operation to populate a part of a drm_mm with device
> > private memory.
> > 
> 
> With the kernel doc fixed:
> Reviewed-by: Matthew Brost <matthew.br...@intel.com>

Thanks for reviewing,
Thomas

> 
> > Signed-off-by: Thomas Hellström <thomas.hellst...@linux.intel.com>
> > ---
> >  drivers/gpu/drm/drm_gpusvm.c  |  7 ++-----
> >  drivers/gpu/drm/drm_pagemap.c | 34
> > ++++++++++++++++++++++++++++++++++
> >  include/drm/drm_pagemap.h     | 34
> > ++++++++++++++++++++++++++++++++++
> >  3 files changed, 70 insertions(+), 5 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/drm_gpusvm.c
> > b/drivers/gpu/drm/drm_gpusvm.c
> > index ef81381609de..51afc8a9704d 100644
> > --- a/drivers/gpu/drm/drm_gpusvm.c
> > +++ b/drivers/gpu/drm/drm_gpusvm.c
> > @@ -175,11 +175,8 @@
> >   *         }
> >   *
> >   *         if (driver_migration_policy(range)) {
> > - *                 mmap_read_lock(mm);
> > - *                 devmem = driver_alloc_devmem();
> > - *                 err =
> > drm_pagemap_migrate_to_devmem(devmem, gpusvm->mm, gpuva_start,
> > - *                                                         
> > gpuva_end, driver_pgmap_owner());
> > - *                      mmap_read_unlock(mm);
> > + *                 err =
> > drm_pagemap_populate_mm(driver_choose_drm_pagemap(),
> > + *                                                    gpuva_start,
> > gpuva_end, gpusvm->mm);
> >   *                 if (err)        // CPU mappings may have
> > changed
> >   *                         goto retry;
> >   *         }
> > diff --git a/drivers/gpu/drm/drm_pagemap.c
> > b/drivers/gpu/drm/drm_pagemap.c
> > index 3551a50d7381..25395685a9b8 100644
> > --- a/drivers/gpu/drm/drm_pagemap.c
> > +++ b/drivers/gpu/drm/drm_pagemap.c
> > @@ -6,6 +6,7 @@
> >  #include <linux/dma-mapping.h>
> >  #include <linux/migrate.h>
> >  #include <linux/pagemap.h>
> > +#include <drm/drm_drv.h>
> >  #include <drm/drm_pagemap.h>
> >  
> >  /**
> > @@ -809,3 +810,36 @@ struct drm_pagemap
> > *drm_pagemap_page_to_dpagemap(struct page *page)
> >     return zdd->devmem_allocation->dpagemap;
> >  }
> >  EXPORT_SYMBOL_GPL(drm_pagemap_page_to_dpagemap);
> > +
> > +/**
> > + * drm_pagemap_populate_mm() - Populate a virtual range with
> > device memory pages
> > + * @dpagemap: Pointer to the drm_pagemap managing the device
> > memory
> > + * @start: Start of the virtual range to populate.
> > + * @end: End of the virtual range to populate.
> > + * @mm: Pointer to the virtual address space.
> > + *
> > + * Attempt to populate a virtual range with device memory pages,
> > + * clearing them or migrating data from the existing pages if
> > necessary.
> > + * The function is best effort only, and implementations may vary
> > + * in how hard they try to satisfy the request.
> > + *
> > + * Return: 0 on success, negative error code on error. If the
> > hardware
> > + * device was removed / unbound the function will return -ENODEV;
> > + */
> > +int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
> > +                       unsigned long start, unsigned long
> > end,
> > +                       struct mm_struct *mm,
> > +                       unsigned long timeslice_ms)
> > +{
> > +   int err;
> > +
> > +   if (!mmget_not_zero(mm))
> > +           return -EFAULT;
> > +   mmap_read_lock(mm);
> > +   err = dpagemap->ops->populate_mm(dpagemap, start, end, mm,
> > +                                    timeslice_ms);
> > +   mmap_read_unlock(mm);
> > +   mmput(mm);
> > +
> > +   return err;
> > +}
> > diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
> > index dabc9c365df4..e5f20a1235be 100644
> > --- a/include/drm/drm_pagemap.h
> > +++ b/include/drm/drm_pagemap.h
> > @@ -92,6 +92,35 @@ struct drm_pagemap_ops {
> >                          struct device *dev,
> >                          struct drm_pagemap_device_addr addr);
> >  
> > +   /**
> > +    * @populate_mm: Populate part of the mm with @dpagemap
> > memory,
> > +    * migrating existing data.
> > +    * @dpagemap: The struct drm_pagemap managing the memory.
> > +    * @start: The virtual start address in @mm
> > +    * @end: The virtual end address in @mm
> > +    * @mm: Pointer to a live mm. The caller must have an
> > mmget()
> > +    * reference.
> > +    *
> > +    * The caller will have the mm lock at least in read mode.
> > +    * Note that there is no guarantee that the memory is
> > resident
> > +    * after the function returns, it's best effort only.
> > +    * When the mm is not using the memory anymore,
> > +    * it will be released. The struct drm_pagemap might have
> > a
> > +    * mechanism in place to reclaim the memory and the data
> > will
> > +    * then be migrated. Typically to system memory.
> > +    * The implementation should hold sufficient runtime
> > power-
> > +    * references while pages are used in an address space and
> > +    * should ideally guard against hardware device unbind in
> > +    * a way such that device pages are migrated back to
> > system
> > +    * followed by device page removal. The implementation
> > should
> > +    * return -ENODEV after device removal.
> > +    *
> > +    * Return: 0 if successful. Negative error code on error.
> > +    */
> > +   int (*populate_mm)(struct drm_pagemap *dpagemap,
> > +                      unsigned long start, unsigned long end,
> > +                      struct mm_struct *mm,
> > +                      unsigned long timeslice_ms);
> >  };
> >  
> >  /**
> > @@ -205,4 +234,9 @@ void drm_pagemap_devmem_init(struct
> > drm_pagemap_devmem *devmem_allocation,
> >                          const struct drm_pagemap_devmem_ops
> > *ops,
> >                          struct drm_pagemap *dpagemap, size_t
> > size);
> >  
> > +int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
> > +                       unsigned long start, unsigned long
> > end,
> > +                       struct mm_struct *mm,
> > +                       unsigned long timeslice_ms);
> > +
> >  #endif
> > -- 
> > 2.49.0
> > 

Reply via email to