amdxdna_get_ubuf() incorrectly accounted mm->pinned_vm using the requested number of pages before pin_user_pages_fast() completed.
Additionally, the RLIMIT_MEMLOCK check was performed after pinning, and error handling for partial pinning could lead to inconsistent cleanup. Fix this by: - checking RLIMIT_MEMLOCK before attempting to pin pages - ensuring partially pinned pages are properly tracked and released - accounting mm->pinned_vm only after successful pinning - removing incorrect error-path accounting and double-subtraction The RLIMIT_MEMLOCK check is performed conservatively against the requested number of pages to prevent excessive pinning attempts. This ensures mm->pinned_vm remains consistent and aligns the driver with expected GUP and memory accounting semantics. Signed-off-by: Vineet Agarwal <[email protected]> Changes in v2: - Move RLIMIT_MEMLOCK check before pinning - Fix partial pin handling and cleanup paths - Remove incorrect pinned_vm accounting in error paths - Ensure symmetric accounting between pin and release - Drop unused pinned_total variable --- drivers/accel/amdxdna/amdxdna_ubuf.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c index fb999aa25318..571708f78cfe 100644 --- a/drivers/accel/amdxdna/amdxdna_ubuf.c +++ b/drivers/accel/amdxdna/amdxdna_ubuf.c @@ -129,7 +129,7 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev, u32 num_entries, void __user *va_entries) { struct amdxdna_dev *xdna = to_xdna_dev(dev); - unsigned long lock_limit, new_pinned; + unsigned long lock_limit; struct amdxdna_drm_va_entry *va_ent; struct amdxdna_ubuf_priv *ubuf; u32 npages, start = 0; @@ -176,18 +176,17 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev, ubuf->nr_pages = exp_info.size >> PAGE_SHIFT; lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; - new_pinned = atomic64_add_return(ubuf->nr_pages, &ubuf->mm->pinned_vm); - if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) { - XDNA_DBG(xdna, "New pin %ld, limit %ld, cap %d", - new_pinned, lock_limit, capable(CAP_IPC_LOCK)); + + if (ubuf->nr_pages + atomic64_read(&ubuf->mm->pinned_vm) > lock_limit && + !capable(CAP_IPC_LOCK)) { ret = -ENOMEM; - goto sub_pin_cnt; + goto free_ent; } ubuf->pages = kvmalloc_objs(*ubuf->pages, ubuf->nr_pages); if (!ubuf->pages) { ret = -ENOMEM; - goto sub_pin_cnt; + goto free_ent; } for (i = 0; i < num_entries; i++) { @@ -196,15 +195,17 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev, ret = pin_user_pages_fast(va_ent[i].vaddr, npages, FOLL_WRITE | FOLL_LONGTERM, &ubuf->pages[start]); - if (ret < 0 || ret != npages) { + if (ret < 0) + goto destroy_pages; + start += ret; + if (ret != npages) { ret = -ENOMEM; - XDNA_ERR(xdna, "Failed to pin pages ret %d", ret); goto destroy_pages; } - - start += ret; } + atomic64_add(ubuf->nr_pages, &ubuf->mm->pinned_vm); + exp_info.ops = &amdxdna_ubuf_dmabuf_ops; exp_info.priv = ubuf; exp_info.flags = O_RDWR | O_CLOEXEC; @@ -222,8 +223,6 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev, if (start) unpin_user_pages(ubuf->pages, start); kvfree(ubuf->pages); -sub_pin_cnt: - atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm); free_ent: kvfree(va_ent); free_ubuf: -- 2.54.0
