Change the code to add support for MIGRATE_VMA_SELECT_COMPOUND
and appropriately handling page sizes in the migrate/evict
code paths.

Signed-off-by: Balbir Singh <balb...@nvidia.com>
---
 drivers/gpu/drm/nouveau/nouveau_dmem.c | 244 +++++++++++++++++--------
 drivers/gpu/drm/nouveau/nouveau_svm.c  |   6 +-
 drivers/gpu/drm/nouveau/nouveau_svm.h  |   3 +-
 3 files changed, 176 insertions(+), 77 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c 
b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 61d0f411ef84..bf3681f52ce0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -83,9 +83,15 @@ struct nouveau_dmem {
        struct list_head chunks;
        struct mutex mutex;
        struct page *free_pages;
+       struct folio *free_folios;
        spinlock_t lock;
 };
 
+struct nouveau_dmem_dma_info {
+       dma_addr_t dma_addr;
+       size_t size;
+};
+
 static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
 {
        return container_of(page_pgmap(page), struct nouveau_dmem_chunk,
@@ -112,10 +118,16 @@ static void nouveau_dmem_page_free(struct page *page)
 {
        struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
        struct nouveau_dmem *dmem = chunk->drm->dmem;
+       struct folio *folio = page_folio(page);
 
        spin_lock(&dmem->lock);
-       page->zone_device_data = dmem->free_pages;
-       dmem->free_pages = page;
+       if (folio_order(folio)) {
+               folio_set_zone_device_data(folio, dmem->free_folios);
+               dmem->free_folios = folio;
+       } else {
+               page->zone_device_data = dmem->free_pages;
+               dmem->free_pages = page;
+       }
 
        WARN_ON(!chunk->callocated);
        chunk->callocated--;
@@ -139,20 +151,28 @@ static void nouveau_dmem_fence_done(struct nouveau_fence 
**fence)
        }
 }
 
-static int nouveau_dmem_copy_one(struct nouveau_drm *drm, struct page *spage,
-                               struct page *dpage, dma_addr_t *dma_addr)
+static int nouveau_dmem_copy_folio(struct nouveau_drm *drm,
+                                  struct folio *sfolio, struct folio *dfolio,
+                                  struct nouveau_dmem_dma_info *dma_info)
 {
        struct device *dev = drm->dev->dev;
+       struct page *dpage = folio_page(dfolio, 0);
+       struct page *spage = folio_page(sfolio, 0);
 
-       lock_page(dpage);
+       folio_lock(dfolio);
 
-       *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
-       if (dma_mapping_error(dev, *dma_addr))
+       dma_info->dma_addr = dma_map_page(dev, dpage, 0, page_size(dpage),
+                                       DMA_BIDIRECTIONAL);
+       dma_info->size = page_size(dpage);
+       if (dma_mapping_error(dev, dma_info->dma_addr))
                return -EIO;
 
-       if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
-                                        NOUVEAU_APER_VRAM, 
nouveau_dmem_page_addr(spage))) {
-               dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+       if (drm->dmem->migrate.copy_func(drm, folio_nr_pages(sfolio),
+                                        NOUVEAU_APER_HOST, dma_info->dma_addr,
+                                        NOUVEAU_APER_VRAM,
+                                        nouveau_dmem_page_addr(spage))) {
+               dma_unmap_page(dev, dma_info->dma_addr, page_size(dpage),
+                                       DMA_BIDIRECTIONAL);
                return -EIO;
        }
 
@@ -165,21 +185,38 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct 
vm_fault *vmf)
        struct nouveau_dmem *dmem = drm->dmem;
        struct nouveau_fence *fence;
        struct nouveau_svmm *svmm;
-       struct page *spage, *dpage;
-       unsigned long src = 0, dst = 0;
-       dma_addr_t dma_addr = 0;
+       struct page *dpage;
        vm_fault_t ret = 0;
        struct migrate_vma args = {
                .vma            = vmf->vma,
-               .start          = vmf->address,
-               .end            = vmf->address + PAGE_SIZE,
-               .src            = &src,
-               .dst            = &dst,
                .pgmap_owner    = drm->dev,
                .fault_page     = vmf->page,
-               .flags          = MIGRATE_VMA_SELECT_DEVICE_PRIVATE,
+               .flags          = MIGRATE_VMA_SELECT_DEVICE_PRIVATE |
+                                 MIGRATE_VMA_SELECT_COMPOUND,
+               .src = NULL,
+               .dst = NULL,
        };
-
+       unsigned int order, nr;
+       struct folio *sfolio, *dfolio;
+       struct nouveau_dmem_dma_info dma_info;
+
+       sfolio = page_folio(vmf->page);
+       order = folio_order(sfolio);
+       nr = 1 << order;
+
+       if (order)
+               args.flags |= MIGRATE_VMA_SELECT_COMPOUND;
+
+       args.start = ALIGN_DOWN(vmf->address, (1 << (PAGE_SHIFT + order)));
+       args.vma = vmf->vma;
+       args.end = args.start + (PAGE_SIZE << order);
+       args.src = kcalloc(nr, sizeof(*args.src), GFP_KERNEL);
+       args.dst = kcalloc(nr, sizeof(*args.dst), GFP_KERNEL);
+
+       if (!args.src || !args.dst) {
+               ret = VM_FAULT_OOM;
+               goto err;
+       }
        /*
         * FIXME what we really want is to find some heuristic to migrate more
         * than just one page on CPU fault. When such fault happens it is very
@@ -190,20 +227,26 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct 
vm_fault *vmf)
        if (!args.cpages)
                return 0;
 
-       spage = migrate_pfn_to_page(src);
-       if (!spage || !(src & MIGRATE_PFN_MIGRATE))
-               goto done;
-
-       dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma, 
vmf->address);
-       if (!dpage)
+       if (order)
+               dpage = folio_page(vma_alloc_folio(GFP_HIGHUSER | __GFP_ZERO,
+                                       order, vmf->vma, vmf->address), 0);
+       else
+               dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma,
+                                       vmf->address);
+       if (!dpage) {
+               ret = VM_FAULT_OOM;
                goto done;
+       }
 
-       dst = migrate_pfn(page_to_pfn(dpage));
+       args.dst[0] = migrate_pfn(page_to_pfn(dpage));
+       if (order)
+               args.dst[0] |= MIGRATE_PFN_COMPOUND;
+       dfolio = page_folio(dpage);
 
-       svmm = spage->zone_device_data;
+       svmm = folio_zone_device_data(sfolio);
        mutex_lock(&svmm->mutex);
        nouveau_svmm_invalidate(svmm, args.start, args.end);
-       ret = nouveau_dmem_copy_one(drm, spage, dpage, &dma_addr);
+       ret = nouveau_dmem_copy_folio(drm, sfolio, dfolio, &dma_info);
        mutex_unlock(&svmm->mutex);
        if (ret) {
                ret = VM_FAULT_SIGBUS;
@@ -213,19 +256,31 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct 
vm_fault *vmf)
        nouveau_fence_new(&fence, dmem->migrate.chan);
        migrate_vma_pages(&args);
        nouveau_dmem_fence_done(&fence);
-       dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+       dma_unmap_page(drm->dev->dev, dma_info.dma_addr, PAGE_SIZE,
+                               DMA_BIDIRECTIONAL);
 done:
        migrate_vma_finalize(&args);
+err:
+       kfree(args.src);
+       kfree(args.dst);
        return ret;
 }
 
+static void nouveau_dmem_folio_split(struct folio *head, struct folio *tail)
+{
+       tail->pgmap = head->pgmap;
+       folio_set_zone_device_data(tail, folio_zone_device_data(head));
+}
+
 static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
        .page_free              = nouveau_dmem_page_free,
        .migrate_to_ram         = nouveau_dmem_migrate_to_ram,
+       .folio_split            = nouveau_dmem_folio_split,
 };
 
 static int
-nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
+nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage,
+                        bool is_large)
 {
        struct nouveau_dmem_chunk *chunk;
        struct resource *res;
@@ -279,16 +334,21 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct 
page **ppage)
        pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT;
        page = pfn_to_page(pfn_first);
        spin_lock(&drm->dmem->lock);
-       for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
-               page->zone_device_data = drm->dmem->free_pages;
-               drm->dmem->free_pages = page;
+
+       if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) || !is_large) {
+               for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
+                       page->zone_device_data = drm->dmem->free_pages;
+                       drm->dmem->free_pages = page;
+               }
        }
+
        *ppage = page;
        chunk->callocated++;
        spin_unlock(&drm->dmem->lock);
 
-       NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
-               DMEM_CHUNK_SIZE >> 20);
+       NV_INFO(drm, "DMEM: registered %ldMB of %sdevice memory %lx %lx\n",
+               DMEM_CHUNK_SIZE >> 20, is_large ? "THP " : "", pfn_first,
+               nouveau_dmem_page_addr(page));
 
        return 0;
 
@@ -305,27 +365,37 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct 
page **ppage)
 }
 
 static struct page *
-nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
+nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm, bool is_large)
 {
        struct nouveau_dmem_chunk *chunk;
        struct page *page = NULL;
+       struct folio *folio = NULL;
        int ret;
+       unsigned int order = 0;
 
        spin_lock(&drm->dmem->lock);
-       if (drm->dmem->free_pages) {
+       if (is_large && drm->dmem->free_folios) {
+               folio = drm->dmem->free_folios;
+               drm->dmem->free_folios = folio_zone_device_data(folio);
+               chunk = nouveau_page_to_chunk(page);
+               chunk->callocated++;
+               spin_unlock(&drm->dmem->lock);
+               order = DMEM_CHUNK_NPAGES;
+       } else if (!is_large && drm->dmem->free_pages) {
                page = drm->dmem->free_pages;
                drm->dmem->free_pages = page->zone_device_data;
                chunk = nouveau_page_to_chunk(page);
                chunk->callocated++;
                spin_unlock(&drm->dmem->lock);
+               folio = page_folio(page);
        } else {
                spin_unlock(&drm->dmem->lock);
-               ret = nouveau_dmem_chunk_alloc(drm, &page);
+               ret = nouveau_dmem_chunk_alloc(drm, &page, is_large);
                if (ret)
                        return NULL;
        }
 
-       zone_device_page_init(page);
+       init_zone_device_folio(folio, order);
        return page;
 }
 
@@ -376,12 +446,12 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
 {
        unsigned long i, npages = range_len(&chunk->pagemap.range) >> 
PAGE_SHIFT;
        unsigned long *src_pfns, *dst_pfns;
-       dma_addr_t *dma_addrs;
+       struct nouveau_dmem_dma_info *dma_info;
        struct nouveau_fence *fence;
 
        src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | 
__GFP_NOFAIL);
        dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | 
__GFP_NOFAIL);
-       dma_addrs = kvcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL | 
__GFP_NOFAIL);
+       dma_info = kvcalloc(npages, sizeof(*dma_info), GFP_KERNEL | 
__GFP_NOFAIL);
 
        migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
                        npages);
@@ -389,17 +459,28 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
        for (i = 0; i < npages; i++) {
                if (src_pfns[i] & MIGRATE_PFN_MIGRATE) {
                        struct page *dpage;
+                       struct folio *folio = page_folio(
+                               migrate_pfn_to_page(src_pfns[i]));
+                       unsigned int order = folio_order(folio);
+
+                       if (src_pfns[i] & MIGRATE_PFN_COMPOUND) {
+                               dpage = folio_page(
+                                               folio_alloc(
+                                               GFP_HIGHUSER_MOVABLE, order), 
0);
+                       } else {
+                               /*
+                                * _GFP_NOFAIL because the GPU is going away 
and there
+                                * is nothing sensible we can do if we can't 
copy the
+                                * data back.
+                                */
+                               dpage = alloc_page(GFP_HIGHUSER | __GFP_NOFAIL);
+                       }
 
-                       /*
-                        * _GFP_NOFAIL because the GPU is going away and there
-                        * is nothing sensible we can do if we can't copy the
-                        * data back.
-                        */
-                       dpage = alloc_page(GFP_HIGHUSER | __GFP_NOFAIL);
                        dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
-                       nouveau_dmem_copy_one(chunk->drm,
-                                       migrate_pfn_to_page(src_pfns[i]), dpage,
-                                       &dma_addrs[i]);
+                       nouveau_dmem_copy_folio(chunk->drm,
+                               page_folio(migrate_pfn_to_page(src_pfns[i])),
+                               page_folio(dpage),
+                               &dma_info[i]);
                }
        }
 
@@ -410,8 +491,9 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
        kvfree(src_pfns);
        kvfree(dst_pfns);
        for (i = 0; i < npages; i++)
-               dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, 
DMA_BIDIRECTIONAL);
-       kvfree(dma_addrs);
+               dma_unmap_page(chunk->drm->dev->dev, dma_info[i].dma_addr,
+                               dma_info[i].size, DMA_BIDIRECTIONAL);
+       kvfree(dma_info);
 }
 
 void
@@ -615,31 +697,35 @@ nouveau_dmem_init(struct nouveau_drm *drm)
 
 static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
                struct nouveau_svmm *svmm, unsigned long src,
-               dma_addr_t *dma_addr, u64 *pfn)
+               struct nouveau_dmem_dma_info *dma_info, u64 *pfn)
 {
        struct device *dev = drm->dev->dev;
        struct page *dpage, *spage;
        unsigned long paddr;
+       bool is_large = false;
 
        spage = migrate_pfn_to_page(src);
        if (!(src & MIGRATE_PFN_MIGRATE))
                goto out;
 
-       dpage = nouveau_dmem_page_alloc_locked(drm);
+       is_large = src & MIGRATE_PFN_COMPOUND;
+       dpage = nouveau_dmem_page_alloc_locked(drm, is_large);
        if (!dpage)
                goto out;
 
        paddr = nouveau_dmem_page_addr(dpage);
        if (spage) {
-               *dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
+               dma_info->dma_addr = dma_map_page(dev, spage, 0, 
page_size(spage),
                                         DMA_BIDIRECTIONAL);
-               if (dma_mapping_error(dev, *dma_addr))
+               dma_info->size = page_size(spage);
+               if (dma_mapping_error(dev, dma_info->dma_addr))
                        goto out_free_page;
-               if (drm->dmem->migrate.copy_func(drm, 1,
-                       NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
+               if (drm->dmem->migrate.copy_func(drm, 
folio_nr_pages(page_folio(spage)),
+                       NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST,
+                       dma_info->dma_addr))
                        goto out_dma_unmap;
        } else {
-               *dma_addr = DMA_MAPPING_ERROR;
+               dma_info->dma_addr = DMA_MAPPING_ERROR;
                if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
                        NOUVEAU_APER_VRAM, paddr))
                        goto out_free_page;
@@ -653,7 +739,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct 
nouveau_drm *drm,
        return migrate_pfn(page_to_pfn(dpage));
 
 out_dma_unmap:
-       dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+       dma_unmap_page(dev, dma_info->dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
 out_free_page:
        nouveau_dmem_page_free_locked(drm, dpage);
 out:
@@ -663,27 +749,33 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct 
nouveau_drm *drm,
 
 static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
                struct nouveau_svmm *svmm, struct migrate_vma *args,
-               dma_addr_t *dma_addrs, u64 *pfns)
+               struct nouveau_dmem_dma_info *dma_info, u64 *pfns)
 {
        struct nouveau_fence *fence;
        unsigned long addr = args->start, nr_dma = 0, i;
+       unsigned long order = 0;
 
-       for (i = 0; addr < args->end; i++) {
+       for (i = 0; addr < args->end; ) {
+               struct folio *folio;
+
+               folio = page_folio(migrate_pfn_to_page(args->dst[i]));
+               order = folio_order(folio);
                args->dst[i] = nouveau_dmem_migrate_copy_one(drm, svmm,
-                               args->src[i], dma_addrs + nr_dma, pfns + i);
-               if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
+                               args->src[i], dma_info + nr_dma, pfns + i);
+               if (!dma_mapping_error(drm->dev->dev, 
dma_info[nr_dma].dma_addr))
                        nr_dma++;
-               addr += PAGE_SIZE;
+               i += 1 << order;
+               addr += (1 << order) * PAGE_SIZE;
        }
 
        nouveau_fence_new(&fence, drm->dmem->migrate.chan);
        migrate_vma_pages(args);
        nouveau_dmem_fence_done(&fence);
-       nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
+       nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i, order);
 
        while (nr_dma--) {
-               dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
-                               DMA_BIDIRECTIONAL);
+               dma_unmap_page(drm->dev->dev, dma_info[nr_dma].dma_addr,
+                               dma_info[nr_dma].size, DMA_BIDIRECTIONAL);
        }
        migrate_vma_finalize(args);
 }
@@ -697,20 +789,24 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
 {
        unsigned long npages = (end - start) >> PAGE_SHIFT;
        unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
-       dma_addr_t *dma_addrs;
        struct migrate_vma args = {
                .vma            = vma,
                .start          = start,
                .pgmap_owner    = drm->dev,
-               .flags          = MIGRATE_VMA_SELECT_SYSTEM,
+               .flags          = MIGRATE_VMA_SELECT_SYSTEM
+                                 | MIGRATE_VMA_SELECT_COMPOUND,
        };
        unsigned long i;
        u64 *pfns;
        int ret = -ENOMEM;
+       struct nouveau_dmem_dma_info *dma_info;
 
        if (drm->dmem == NULL)
                return -ENODEV;
 
+       if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+               max = max(HPAGE_PMD_NR, max);
+
        args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
        if (!args.src)
                goto out;
@@ -718,8 +814,8 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
        if (!args.dst)
                goto out_free_src;
 
-       dma_addrs = kmalloc_array(max, sizeof(*dma_addrs), GFP_KERNEL);
-       if (!dma_addrs)
+       dma_info = kmalloc_array(max, sizeof(*dma_info), GFP_KERNEL);
+       if (!dma_info)
                goto out_free_dst;
 
        pfns = nouveau_pfns_alloc(max);
@@ -737,7 +833,7 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
                        goto out_free_pfns;
 
                if (args.cpages)
-                       nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
+                       nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_info,
                                                   pfns);
                args.start = args.end;
        }
@@ -746,7 +842,7 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
 out_free_pfns:
        nouveau_pfns_free(pfns);
 out_free_dma:
-       kfree(dma_addrs);
+       kfree(dma_info);
 out_free_dst:
        kfree(args.dst);
 out_free_src:
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c 
b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 1fed638b9eba..0693179d0a7d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -920,12 +920,14 @@ nouveau_pfns_free(u64 *pfns)
 
 void
 nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
-                unsigned long addr, u64 *pfns, unsigned long npages)
+                unsigned long addr, u64 *pfns, unsigned long npages,
+                unsigned int page_shift)
 {
        struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
 
        args->p.addr = addr;
-       args->p.size = npages << PAGE_SHIFT;
+       args->p.size = npages << page_shift;
+       args->p.page = page_shift;
 
        mutex_lock(&svmm->mutex);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.h 
b/drivers/gpu/drm/nouveau/nouveau_svm.h
index e7d63d7f0c2d..3fd78662f17e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.h
@@ -33,7 +33,8 @@ void nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 
start, u64 limit);
 u64 *nouveau_pfns_alloc(unsigned long npages);
 void nouveau_pfns_free(u64 *pfns);
 void nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
-                     unsigned long addr, u64 *pfns, unsigned long npages);
+                     unsigned long addr, u64 *pfns, unsigned long npages,
+                     unsigned int page_shift);
 #else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
 static inline void nouveau_svm_init(struct nouveau_drm *drm) {}
 static inline void nouveau_svm_fini(struct nouveau_drm *drm) {}
-- 
2.48.1

Reply via email to