> Subject: [PATCH] udmabuf: Do not create malformed scatterlists
>
> Using a sg_set_folio() loop for every 4K results in a malformed scatterlist
> because sg_set_folio() has an issue with offsets > PAGE_SIZE and because
> scatterlist expects the creator to build a list which consolidates any
> physical contiguity.
>
> sg_alloc_table_from_pages() creates a valid scatterlist directly from a
> struct page array, so go back to that.
>
> Remove the offsets allocation and just store an array of tail pages as it
> did before the below commit. Everything wants that anyhow.
>
> Fixes: 0c8b91ef5100 ("udmabuf: add back support for mapping hugetlb
> pages")
> Reported-by: Julian Orth <[email protected]>
> Closes: https://lore.kernel.org/all/20260308-scatterlist-v1-1-
> [email protected]/
> Signed-off-by: Jason Gunthorpe <[email protected]>
> ---
> drivers/dma-buf/udmabuf.c | 49 +++++++++++----------------------------
> 1 file changed, 13 insertions(+), 36 deletions(-)
>
> diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
> index 94b8ecb892bb17..5d687860445137 100644
> --- a/drivers/dma-buf/udmabuf.c
> +++ b/drivers/dma-buf/udmabuf.c
> @@ -26,10 +26,10 @@ MODULE_PARM_DESC(size_limit_mb, "Max size
> of a dmabuf, in megabytes. Default is
>
> struct udmabuf {
> pgoff_t pagecount;
> - struct folio **folios;
> + struct page **pages;
>
> /**
> - * Unlike folios, pinned_folios is only used for unpin.
> + * Unlike pages, pinned_folios is only used for unpin.
> * So, nr_pinned is not the same to pagecount, the pinned_folios
> * only set each folio which already pinned when udmabuf_create.
> * Note that, since a folio may be pinned multiple times, each folio
> @@ -41,7 +41,6 @@ struct udmabuf {
>
> struct sg_table *sg;
> struct miscdevice *device;
> - pgoff_t *offsets;
> };
>
> static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
> @@ -55,8 +54,7 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault
> *vmf)
> if (pgoff >= ubuf->pagecount)
> return VM_FAULT_SIGBUS;
>
> - pfn = folio_pfn(ubuf->folios[pgoff]);
> - pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
> + pfn = page_to_pfn(ubuf->pages[pgoff]);
>
> ret = vmf_insert_pfn(vma, vmf->address, pfn);
> if (ret & VM_FAULT_ERROR)
> @@ -73,8 +71,7 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault
> *vmf)
> if (WARN_ON(pgoff >= ubuf->pagecount))
> break;
>
> - pfn = folio_pfn(ubuf->folios[pgoff]);
> - pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
> + pfn = page_to_pfn(ubuf->pages[pgoff]);
>
> /**
> * If the below vmf_insert_pfn() fails, we do not return an
> @@ -109,22 +106,11 @@ static int mmap_udmabuf(struct dma_buf *buf,
> struct vm_area_struct *vma)
> static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
> {
> struct udmabuf *ubuf = buf->priv;
> - struct page **pages;
> void *vaddr;
> - pgoff_t pg;
>
> dma_resv_assert_held(buf->resv);
>
> - pages = kvmalloc_objs(*pages, ubuf->pagecount);
> - if (!pages)
> - return -ENOMEM;
> -
> - for (pg = 0; pg < ubuf->pagecount; pg++)
> - pages[pg] = folio_page(ubuf->folios[pg],
> - ubuf->offsets[pg] >> PAGE_SHIFT);
> -
> - vaddr = vm_map_ram(pages, ubuf->pagecount, -1);
> - kvfree(pages);
> + vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1);
> if (!vaddr)
> return -EINVAL;
>
> @@ -146,22 +132,18 @@ static struct sg_table *get_sg_table(struct
> device *dev, struct dma_buf *buf,
> {
> struct udmabuf *ubuf = buf->priv;
> struct sg_table *sg;
> - struct scatterlist *sgl;
> - unsigned int i = 0;
> int ret;
>
> sg = kzalloc_obj(*sg);
> if (!sg)
> return ERR_PTR(-ENOMEM);
>
> - ret = sg_alloc_table(sg, ubuf->pagecount, GFP_KERNEL);
> + ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf-
> >pagecount, 0,
> + ubuf->pagecount << PAGE_SHIFT,
> + GFP_KERNEL);
> if (ret < 0)
> goto err_alloc;
>
> - for_each_sg(sg->sgl, sgl, ubuf->pagecount, i)
> - sg_set_folio(sgl, ubuf->folios[i], PAGE_SIZE,
> - ubuf->offsets[i]);
> -
> ret = dma_map_sgtable(dev, sg, direction, 0);
> if (ret < 0)
> goto err_map;
> @@ -207,12 +189,8 @@ static void unpin_all_folios(struct udmabuf
> *ubuf)
>
> static __always_inline int init_udmabuf(struct udmabuf *ubuf, pgoff_t
> pgcnt)
> {
> - ubuf->folios = kvmalloc_objs(*ubuf->folios, pgcnt);
> - if (!ubuf->folios)
> - return -ENOMEM;
> -
> - ubuf->offsets = kvzalloc_objs(*ubuf->offsets, pgcnt);
> - if (!ubuf->offsets)
> + ubuf->pages = kvmalloc_objs(*ubuf->pages, pgcnt);
> + if (!ubuf->pages)
> return -ENOMEM;
>
> ubuf->pinned_folios = kvmalloc_objs(*ubuf->pinned_folios,
> pgcnt);
> @@ -225,8 +203,7 @@ static __always_inline int init_udmabuf(struct
> udmabuf *ubuf, pgoff_t pgcnt)
> static __always_inline void deinit_udmabuf(struct udmabuf *ubuf)
> {
> unpin_all_folios(ubuf);
> - kvfree(ubuf->offsets);
> - kvfree(ubuf->folios);
> + kvfree(ubuf->pages);
> }
>
> static void release_udmabuf(struct dma_buf *buf)
> @@ -344,8 +321,8 @@ static long udmabuf_pin_folios(struct udmabuf
> *ubuf, struct file *memfd,
> ubuf->pinned_folios[nr_pinned++] = folios[cur_folio];
>
> for (; subpgoff < fsize; subpgoff += PAGE_SIZE) {
> - ubuf->folios[upgcnt] = folios[cur_folio];
> - ubuf->offsets[upgcnt] = subpgoff;
> + ubuf->pages[upgcnt] = folio_page(folios[cur_folio],
> + subpgoff >> PAGE_SHIFT);
> ++upgcnt;
LGTM.
Reviewed-by: Vivek Kasireddy <[email protected]>
Thanks,
Vivek
>
> if (++cur_pgcnt >= pgcnt)
>
> base-commit: 1f318b96cc84d7c2ab792fcc0bfd42a7ca890681
> --
> 2.43.0