Module Name:    src
Committed By:   riastradh
Date:           Sun Jun 23 00:48:56 UTC 2024

Modified Files:
        src/sys/external/bsd/drm2/ttm: ttm_bo_vm.c

Log Message:
ttm: Sync more with Linux.

Add the original copyright and attribution since this is now,
intentionally, a modified copy of the original and not just roughly
the same algorithm.


To generate a diff of this commit:
cvs rdiff -u -r1.24 -r1.25 src/sys/external/bsd/drm2/ttm/ttm_bo_vm.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/external/bsd/drm2/ttm/ttm_bo_vm.c
diff -u src/sys/external/bsd/drm2/ttm/ttm_bo_vm.c:1.24 src/sys/external/bsd/drm2/ttm/ttm_bo_vm.c:1.25
--- src/sys/external/bsd/drm2/ttm/ttm_bo_vm.c:1.24	Sun May 19 13:50:04 2024
+++ src/sys/external/bsd/drm2/ttm/ttm_bo_vm.c	Sun Jun 23 00:48:56 2024
@@ -1,4 +1,4 @@
-/*	$NetBSD: ttm_bo_vm.c,v 1.24 2024/05/19 13:50:04 riastradh Exp $	*/
+/*	$NetBSD: ttm_bo_vm.c,v 1.25 2024/06/23 00:48:56 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -29,8 +29,38 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: ttm_bo_vm.c,v 1.24 2024/05/19 13:50:04 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: ttm_bo_vm.c,v 1.25 2024/06/23 00:48:56 riastradh Exp $");
 
 #include <sys/types.h>
 
@@ -44,8 +74,6 @@ __KERNEL_RCSID(0, "$NetBSD: ttm_bo_vm.c,
 
 #include <ttm/ttm_bo_driver.h>
 
-static int	ttm_bo_uvm_fault_idle(struct ttm_buffer_object *,
-		    struct uvm_faultinfo *);
 static int	ttm_bo_uvm_lookup(struct ttm_bo_device *, unsigned long,
 		    unsigned long, struct ttm_buffer_object **);
 
@@ -67,12 +95,81 @@ ttm_bo_uvm_detach(struct uvm_object *uob
 	ttm_bo_put(bo);
 }
 
-int
-ttm_bo_uvm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr,
+static int
+ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, struct uvm_faultinfo *vmf)
+{
+	int err, ret = 0;
+
+	if (__predict_true(!bo->moving))
+		goto out_unlock;
+
+	/*
+	 * Quick non-stalling check for idle.
+	 */
+	if (dma_fence_is_signaled(bo->moving))
+		goto out_clear;
+
+	/*
+	 * If possible, avoid waiting for GPU with mmap_sem
+	 * held.
+	 */
+	if (1) {		/* always retriable in NetBSD */
+		ret = ERESTART;
+
+		ttm_bo_get(bo);
+		uvmfault_unlockall(vmf, vmf->entry->aref.ar_amap, NULL);
+		(void) dma_fence_wait(bo->moving, true);
+		dma_resv_unlock(bo->base.resv);
+		ttm_bo_put(bo);
+		goto out_unlock;
+	}
+
+	/*
+	 * Ordinary wait.
+	 */
+	err = dma_fence_wait(bo->moving, true);
+	if (__predict_false(err != 0)) {
+		ret = (err != -ERESTARTSYS) ? EINVAL/*SIGBUS*/ :
+		    0/*retry access in userland*/;
+		goto out_unlock;
+	}
+
+out_clear:
+	dma_fence_put(bo->moving);
+	bo->moving = NULL;
+
+out_unlock:
+	return ret;
+}
+
+static int
+ttm_bo_vm_reserve(struct ttm_buffer_object *bo, struct uvm_faultinfo *vmf)
+{
+
+	/*
+	 * Work around locking order reversal in fault / nopfn
+	 * between mmap_sem and bo_reserve: Perform a trylock operation
+	 * for reserve, and if it fails, retry the fault after waiting
+	 * for the buffer to become unreserved.
+	 */
+	if (__predict_false(!dma_resv_trylock(bo->base.resv))) {
+		ttm_bo_get(bo);
+		uvmfault_unlockall(vmf, vmf->entry->aref.ar_amap, NULL);
+		if (!dma_resv_lock_interruptible(bo->base.resv, NULL))
+			dma_resv_unlock(bo->base.resv);
+		ttm_bo_put(bo);
+		return ERESTART;
+	}
+
+	return 0;
+}
+
+static int
+ttm_bo_uvm_fault_reserved(struct uvm_faultinfo *vmf, vaddr_t vaddr,
     struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
     int flags)
 {
-	struct uvm_object *const uobj = ufi->entry->object.uvm_obj;
+	struct uvm_object *const uobj = vmf->entry->object.uvm_obj;
 	struct ttm_buffer_object *const bo = container_of(uobj,
 	    struct ttm_buffer_object, uvmobj);
 	struct ttm_bo_device *const bdev = bo->bdev;
@@ -88,100 +185,89 @@ ttm_bo_uvm_fault(struct uvm_faultinfo *u
 	unsigned i;
 	vm_prot_t vm_prot;	/* VM_PROT_* */
 	pgprot_t pgprot;	/* VM_PROT_* | PMAP_* cacheability flags */
-	int ret;
-
-	/* Thanks, uvm, but we don't need this lock.  */
-	rw_exit(uobj->vmobjlock);
-
-	/* Copy-on-write mappings make no sense for the graphics aperture.  */
-	if (UVM_ET_ISCOPYONWRITE(ufi->entry)) {
-		ret = -EIO;
-		goto out0;
-	}
-
-	/* Try to lock the buffer.  */
-	ret = ttm_bo_reserve(bo, true, true, NULL);
-	if (ret) {
-		if (ret != -EBUSY)
-			goto out0;
-		/*
-		 * It's currently locked.  Unlock the fault, wait for
-		 * it, and start over.
-		 */
-		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
-		if (!dma_resv_lock_interruptible(bo->base.resv, NULL))
-			dma_resv_unlock(bo->base.resv);
+	int err, ret;
 
-		return ERESTART;
-	}
-
-	/* drm prime buffers are not mappable.  XXX Catch this earlier?  */
-	if (bo->ttm && ISSET(bo->ttm->page_flags, TTM_PAGE_FLAG_SG)) {
-		ret = -EINVAL;
-		goto out1;
-	}
+	/*
+	 * Refuse to fault imported pages. This should be handled
+	 * (if at all) by redirecting mmap to the exporter.
+	 */
+	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
+		return EINVAL;	/* SIGBUS */
 
-	/* Notify the driver of a fault if it wants.  */
 	if (bdev->driver->fault_reserve_notify) {
-		ret = (*bdev->driver->fault_reserve_notify)(bo);
-		if (ret) {
-			if (ret == -ERESTART)
-				ret = -EIO;
-			goto out1;
+		struct dma_fence *moving = dma_fence_get(bo->moving);
+
+		err = bdev->driver->fault_reserve_notify(bo);
+		switch (err) {
+		case 0:
+			break;
+		case -EBUSY:
+		case -ERESTARTSYS:
+			return 0;	/* retry access in userland */
+		default:
+			return EINVAL;	/* SIGBUS */
 		}
-	}
 
-	ret = ttm_bo_uvm_fault_idle(bo, ufi);
-	if (ret) {
-		KASSERT(ret == -ERESTART || ret == -EFAULT);
-		goto out1;
+		if (bo->moving != moving) {
+			spin_lock(&ttm_bo_glob.lru_lock);
+			ttm_bo_move_to_lru_tail(bo, NULL);
+			spin_unlock(&ttm_bo_glob.lru_lock);
+		}
+		dma_fence_put(moving);
 	}
 
-	ret = ttm_mem_io_lock(man, true);
-	if (ret) {
-		ret = -EIO;
-		goto out1;
-	}
-	ret = ttm_mem_io_reserve_vm(bo);
-	if (ret) {
-		ret = -EIO;
-		goto out2;
+	/*
+	 * Wait for buffer data in transit, due to a pipelined
+	 * move.
+	 */
+	ret = ttm_bo_vm_fault_idle(bo, vmf);
+	if (__predict_false(ret != 0))
+		return ret;
+
+	err = ttm_mem_io_lock(man, true);
+	if (__predict_false(err != 0))
+		return 0;	/* retry access in userland */
+	err = ttm_mem_io_reserve_vm(bo);
+	if (__predict_false(err != 0)) {
+		ret = EINVAL;	/* SIGBUS */
+		goto out_io_unlock;
 	}
 
-	vm_prot = ufi->entry->protection;
-	if (bo->mem.bus.is_iomem) {
-		u.base = (bo->mem.bus.base + bo->mem.bus.offset);
-		size = bo->mem.bus.size;
-		pgprot = ttm_io_prot(bo->mem.placement, vm_prot);
-	} else {
+	vm_prot = vmf->entry->protection;
+	if (!bo->mem.bus.is_iomem) {
 		struct ttm_operation_ctx ctx = {
 			.interruptible = false,
 			.no_wait_gpu = false,
-			.flags = TTM_OPT_FLAG_FORCE_ALLOC,
+			.flags = TTM_OPT_FLAG_FORCE_ALLOC
 		};
+
 		u.ttm = bo->ttm;
 		size = (size_t)bo->ttm->num_pages << PAGE_SHIFT;
 		if (ISSET(bo->mem.placement, TTM_PL_FLAG_CACHED))
 			pgprot = vm_prot;
 		else
 			pgprot = ttm_io_prot(bo->mem.placement, vm_prot);
-		if (ttm_tt_populate(u.ttm, &ctx)) {
-			ret = -ENOMEM;
-			goto out2;
+		if (ttm_tt_populate(bo->ttm, &ctx)) {
+			ret = ENOMEM;
+			goto out_io_unlock;
 		}
+	} else {
+		u.base = (bo->mem.bus.base + bo->mem.bus.offset);
+		size = bo->mem.bus.size;
+		pgprot = ttm_io_prot(bo->mem.placement, vm_prot);
 	}
 
-	KASSERT(ufi->entry->start <= vaddr);
-	KASSERT((ufi->entry->offset & (PAGE_SIZE - 1)) == 0);
-	KASSERT(ufi->entry->offset <= size);
-	KASSERT((vaddr - ufi->entry->start) <= (size - ufi->entry->offset));
+	KASSERT(vmf->entry->start <= vaddr);
+	KASSERT((vmf->entry->offset & (PAGE_SIZE - 1)) == 0);
+	KASSERT(vmf->entry->offset <= size);
+	KASSERT((vaddr - vmf->entry->start) <= (size - vmf->entry->offset));
 	KASSERTMSG(((size_t)npages << PAGE_SHIFT <=
-		((size - ufi->entry->offset) - (vaddr - ufi->entry->start))),
+		((size - vmf->entry->offset) - (vaddr - vmf->entry->start))),
 	    "vaddr=%jx npages=%d bo=%p is_iomem=%d size=%zu"
 	    " start=%jx offset=%jx",
 	    (uintmax_t)vaddr, npages, bo, (int)bo->mem.bus.is_iomem, size,
-	    (uintmax_t)ufi->entry->start, (uintmax_t)ufi->entry->offset);
-	uoffset = (ufi->entry->offset + (vaddr - ufi->entry->start));
+	    (uintmax_t)vmf->entry->start, (uintmax_t)vmf->entry->offset);
+	uoffset = (vmf->entry->offset + (vaddr - vmf->entry->start));
 	startpage = (uoffset >> PAGE_SHIFT);
 	for (i = 0; i < npages; i++) {
 		paddr_t paddr;
@@ -201,65 +287,55 @@ ttm_bo_uvm_fault(struct uvm_faultinfo *u
 
 			paddr = pmap_phys_address(cookie);
 		}
-		ret = -pmap_enter(ufi->orig_map->pmap, vaddr + i*PAGE_SIZE,
+		ret = pmap_enter(vmf->orig_map->pmap, vaddr + i*PAGE_SIZE,
 		    paddr, vm_prot, (PMAP_CANFAIL | pgprot));
-		if (ret)
-			goto out3;
+		if (ret) {
+			KASSERT(ret != ERESTART);
+			break;
+		}
 	}
-
-out3:	pmap_update(ufi->orig_map->pmap);
-out2:	ttm_mem_io_unlock(man);
-out1:	ttm_bo_unreserve(bo);
-out0:	uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
-	/* XXX errno Linux->NetBSD */
-	return -ret;
+	pmap_update(vmf->orig_map->pmap);
+	ret = 0;		/* retry access in userland */
+out_io_unlock:
+	ttm_mem_io_unlock(man);
+	KASSERT(ret != ERESTART);
+	return ret;
 }
 
-static int
-ttm_bo_uvm_fault_idle(struct ttm_buffer_object *bo, struct uvm_faultinfo *ufi)
+int
+ttm_bo_uvm_fault(struct uvm_faultinfo *vmf, vaddr_t vaddr,
+    struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
+    int flags)
 {
-	int err, ret = 0;
-
-	if (__predict_true(!bo->moving))
-		goto out_unlock;
-
-	/*
-	 * Quick non-stalling check for idle.
-	 */
-	if (dma_fence_is_signaled(bo->moving))
-		goto out_clear;
+	struct uvm_object *const uobj = vmf->entry->object.uvm_obj;
+	struct ttm_buffer_object *const bo = container_of(uobj,
+	    struct ttm_buffer_object, uvmobj);
+	int ret;
 
-	/*
-	 * If possible, avoid waiting for GPU with mmap_sem
-	 * held.
-	 * XXX Need to integrate this properly into caller's
-	 * error branches so it doesn't uvmfault_unlockall twice.
-	 */
-	if (0) {
-		ret = -ERESTART;
+	/* Thanks, uvm, but we don't need this lock.  */
+	rw_exit(uobj->vmobjlock);
 
-		ttm_bo_get(bo);
-		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
-		(void) dma_fence_wait(bo->moving, true);
-		dma_resv_unlock(bo->base.resv);
-		ttm_bo_put(bo);
-		goto out_unlock;
+	/* Copy-on-write mappings make no sense for the graphics aperture.  */
+	if (UVM_ET_ISCOPYONWRITE(vmf->entry)) {
+		ret = EINVAL;	/* SIGBUS */
+		goto out;
 	}
 
-	/*
-	 * Ordinary wait.
-	 */
-	err = dma_fence_wait(bo->moving, true);
-	if (__predict_false(ret != 0)) {
-		ret = (err != -ERESTARTSYS) ? -EFAULT : -ERESTART;
-		goto out_unlock;
+	ret = ttm_bo_vm_reserve(bo, vmf);
+	if (ret) {
+		/* ttm_bo_vm_reserve already unlocked on ERESTART */
+		KASSERTMSG(ret == ERESTART, "ret=%d", ret);
+		return ret;
 	}
 
-out_clear:
-	dma_fence_put(bo->moving);
-	bo->moving = NULL;
+	ret = ttm_bo_uvm_fault_reserved(vmf, vaddr, pps, npages, centeridx,
+	    access_type, flags);
+	if (ret == ERESTART)	/* already unlocked on ERESTART */
+		return ret;
 
-out_unlock:
+	dma_resv_unlock(bo->base.resv);
+
+out:	uvmfault_unlockall(vmf, vmf->entry->aref.ar_amap, NULL);
 	return ret;
 }
 
@@ -333,7 +409,8 @@ ttm_bo_uvm_lookup(struct ttm_bo_device *
 	node = drm_vma_offset_lookup_locked(bdev->vma_manager, startpage,
 	    npages);
 	if (node != NULL) {
-		bo = container_of(node, struct ttm_buffer_object, base.vma_node);
+		bo = container_of(node, struct ttm_buffer_object,
+		    base.vma_node);
 		if (!kref_get_unless_zero(&bo->kref))
 			bo = NULL;
 	}

Reply via email to