Module Name:    src
Committed By:   riastradh
Date:           Sun Dec 19 11:52:38 UTC 2021

Modified Files:
        src/sys/external/bsd/drm2/dist/drm/radeon: radeon.h radeon_device.c
            radeon_fence.c

Log Message:
radeon: Reduce #ifdef __NetBSD__ around condvars.


To generate a diff of this commit:
cvs rdiff -u -r1.10 -r1.11 src/sys/external/bsd/drm2/dist/drm/radeon/radeon.h
cvs rdiff -u -r1.12 -r1.13 \
    src/sys/external/bsd/drm2/dist/drm/radeon/radeon_device.c
cvs rdiff -u -r1.21 -r1.22 \
    src/sys/external/bsd/drm2/dist/drm/radeon/radeon_fence.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/external/bsd/drm2/dist/drm/radeon/radeon.h
diff -u src/sys/external/bsd/drm2/dist/drm/radeon/radeon.h:1.10 src/sys/external/bsd/drm2/dist/drm/radeon/radeon.h:1.11
--- src/sys/external/bsd/drm2/dist/drm/radeon/radeon.h:1.10	Sat Dec 18 23:45:42 2021
+++ src/sys/external/bsd/drm2/dist/drm/radeon/radeon.h	Sun Dec 19 11:52:38 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: radeon.h,v 1.10 2021/12/18 23:45:42 riastradh Exp $	*/
+/*	$NetBSD: radeon.h,v 1.11 2021/12/19 11:52:38 riastradh Exp $	*/
 
 /*
  * Copyright 2008 Advanced Micro Devices, Inc.
@@ -392,11 +392,7 @@ struct radeon_fence {
 	unsigned		ring;
 	bool			is_vm_update;
 
-#ifdef __NetBSD__
 	TAILQ_ENTRY(radeon_fence)	fence_check;
-#else
-	wait_queue_entry_t		fence_wake;
-#endif
 };
 
 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
@@ -2435,13 +2431,9 @@ struct radeon_device {
 	struct radeon_doorbell		doorbell;
 	struct radeon_mman		mman;
 	struct radeon_fence_driver	fence_drv[RADEON_NUM_RINGS];
-#ifdef __NetBSD__
 	spinlock_t			fence_lock;
 	drm_waitqueue_t			fence_queue;
 	TAILQ_HEAD(, radeon_fence)	fence_check;
-#else
-	wait_queue_head_t		fence_queue;
-#endif
 	u64				fence_context;
 	struct mutex			ring_lock;
 	struct radeon_ring		ring[RADEON_NUM_RINGS];

Index: src/sys/external/bsd/drm2/dist/drm/radeon/radeon_device.c
diff -u src/sys/external/bsd/drm2/dist/drm/radeon/radeon_device.c:1.12 src/sys/external/bsd/drm2/dist/drm/radeon/radeon_device.c:1.13
--- src/sys/external/bsd/drm2/dist/drm/radeon/radeon_device.c:1.12	Sun Dec 19 11:26:26 2021
+++ src/sys/external/bsd/drm2/dist/drm/radeon/radeon_device.c	Sun Dec 19 11:52:38 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: radeon_device.c,v 1.12 2021/12/19 11:26:26 riastradh Exp $	*/
+/*	$NetBSD: radeon_device.c,v 1.13 2021/12/19 11:52:38 riastradh Exp $	*/
 
 /*
  * Copyright 2008 Advanced Micro Devices, Inc.
@@ -29,7 +29,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: radeon_device.c,v 1.12 2021/12/19 11:26:26 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: radeon_device.c,v 1.13 2021/12/19 11:52:38 riastradh Exp $");
 
 #include <linux/console.h>
 #include <linux/efi.h>
@@ -1430,12 +1430,8 @@ int radeon_device_init(struct radeon_dev
 	mutex_init(&rdev->srbm_mutex);
 	init_rwsem(&rdev->pm.mclk_lock);
 	init_rwsem(&rdev->exclusive_lock);
-#ifdef __NetBSD__
 	spin_lock_init(&rdev->irq.vblank_lock);
 	DRM_INIT_WAITQUEUE(&rdev->irq.vblank_queue, "radvblnk");
-#else
-	init_waitqueue_head(&rdev->irq.vblank_queue);
-#endif
 	r = radeon_gem_init(rdev);
 	if (r)
 		return r;
@@ -1710,12 +1706,10 @@ void radeon_device_fini(struct radeon_de
 	if (rdev->family >= CHIP_BONAIRE)
 		radeon_doorbell_fini(rdev);
 
-#ifdef __NetBSD__
 	DRM_DESTROY_WAITQUEUE(&rdev->irq.vblank_queue);
 	spin_lock_destroy(&rdev->irq.vblank_lock);
 	destroy_rwsem(&rdev->exclusive_lock);
 	destroy_rwsem(&rdev->pm.mclk_lock);
-#endif
 	mutex_destroy(&rdev->srbm_mutex);
 	mutex_destroy(&rdev->gpu_clock_mutex);
 	mutex_destroy(&rdev->pm.mutex);

Index: src/sys/external/bsd/drm2/dist/drm/radeon/radeon_fence.c
diff -u src/sys/external/bsd/drm2/dist/drm/radeon/radeon_fence.c:1.21 src/sys/external/bsd/drm2/dist/drm/radeon/radeon_fence.c:1.22
--- src/sys/external/bsd/drm2/dist/drm/radeon/radeon_fence.c:1.21	Sun Dec 19 11:08:25 2021
+++ src/sys/external/bsd/drm2/dist/drm/radeon/radeon_fence.c	Sun Dec 19 11:52:38 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: radeon_fence.c,v 1.21 2021/12/19 11:08:25 riastradh Exp $	*/
+/*	$NetBSD: radeon_fence.c,v 1.22 2021/12/19 11:52:38 riastradh Exp $	*/
 
 /*
  * Copyright 2009 Jerome Glisse.
@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: radeon_fence.c,v 1.21 2021/12/19 11:08:25 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: radeon_fence.c,v 1.22 2021/12/19 11:52:38 riastradh Exp $");
 
 #include <linux/atomic.h>
 #include <linux/firmware.h>
@@ -171,20 +171,10 @@ int radeon_fence_emit(struct radeon_devi
  * for the fence locking itself, so unlocked variants are used for
  * fence_signal, and remove_wait_queue.
  */
-#ifdef __NetBSD__
 static int radeon_fence_check_signaled(struct radeon_fence *fence)
-#else
-static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode, int flags, void *key)
-#endif
 {
-#ifndef __NetBSD__
-	struct radeon_fence *fence;
-#endif
 	u64 seq;
 
-#ifndef __NetBSD__
-	fence = container_of(wait, struct radeon_fence, fence_wake);
-#endif
 	BUG_ON(!spin_is_locked(&fence->rdev->fence_lock));
 
 	/*
@@ -201,18 +191,13 @@ static int radeon_fence_check_signaled(w
 			DMA_FENCE_TRACE(&fence->base, "was already signaled\n");
 
 		radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
-#ifdef __NetBSD__
 		TAILQ_REMOVE(&fence->rdev->fence_check, fence, fence_check);
-#else
-		__remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
-#endif
 		dma_fence_put(&fence->base);
 	} else
 		DMA_FENCE_TRACE(&fence->base, "pending\n");
 	return 0;
 }
 
-#ifdef __NetBSD__
 void
 radeon_fence_wakeup_locked(struct radeon_device *rdev)
 {
@@ -224,7 +209,6 @@ radeon_fence_wakeup_locked(struct radeon
 		radeon_fence_check_signaled(fence);
 	}
 }
-#endif
 
 /**
  * radeon_fence_activity - check for fence activity
@@ -319,16 +303,12 @@ static void radeon_fence_check_lockup(st
 	rdev = fence_drv->rdev;
 	ring = fence_drv - &rdev->fence_drv[0];
 
-#ifdef __NetBSD__
 	spin_lock(&rdev->fence_lock);
-#endif
 
 	if (!down_read_trylock(&rdev->exclusive_lock)) {
 		/* just reschedule the check if a reset is going on */
 		radeon_fence_schedule_check(rdev, ring);
-#ifdef __NetBSD__
 		spin_unlock(&rdev->fence_lock);
-#endif
 		return;
 	}
 
@@ -342,11 +322,7 @@ static void radeon_fence_check_lockup(st
 	}
 
 	if (radeon_fence_activity(rdev, ring))
-#ifdef __NetBSD__
 		radeon_fence_wakeup_locked(rdev);
-#else
-		wake_up_all(&rdev->fence_queue);
-#endif
 
 	else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
 
@@ -358,16 +334,10 @@ static void radeon_fence_check_lockup(st
 
 		/* remember that we need an reset */
 		rdev->needs_reset = true;
-#ifdef __NetBSD__
 		radeon_fence_wakeup_locked(rdev);
-#else
-		wake_up_all(&rdev->fence_queue);
-#endif
 	}
 	up_read(&rdev->exclusive_lock);
-#ifdef __NetBSD__
 	spin_unlock(&rdev->fence_lock);
-#endif
 }
 
 /**
@@ -382,11 +352,7 @@ static void radeon_fence_check_lockup(st
 static void radeon_fence_process_locked(struct radeon_device *rdev, int ring)
 {
 	if (radeon_fence_activity(rdev, ring))
-#ifdef __NetBSD__
 		radeon_fence_wakeup_locked(rdev);
-#else
-		wake_up_all(&rdev->fence_queue);
-#endif
 }
 
 void radeon_fence_process(struct radeon_device *rdev, int ring)
@@ -472,11 +438,7 @@ static bool radeon_fence_enable_signalin
 		radeon_irq_kms_sw_irq_get(rdev, fence->ring);
 
 		if (radeon_fence_activity(rdev, fence->ring))
-#ifdef __NetBSD__
 			radeon_fence_wakeup_locked(rdev);
-#else
-			wake_up_all_locked(&rdev->fence_queue);
-#endif
 
 		/* did fence get signaled after we enabled the sw irq? */
 		if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) {
@@ -493,14 +455,7 @@ static bool radeon_fence_enable_signalin
 		radeon_fence_schedule_check(rdev, fence->ring);
 	}
 
-#ifdef __NetBSD__
 	TAILQ_INSERT_TAIL(&rdev->fence_check, fence, fence_check);
-#else
-	fence->fence_wake.flags = 0;
-	fence->fence_wake.private = NULL;
-	fence->fence_wake.func = radeon_fence_check_signaled;
-	__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
-#endif
 	dma_fence_get(f);
 
 	DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
@@ -594,7 +549,6 @@ static long radeon_fence_wait_seq_timeou
 		radeon_irq_kms_sw_irq_get(rdev, i);
 	}
 
-#ifdef __NetBSD__
 	if (intr)
 		DRM_SPIN_TIMED_WAIT_UNTIL(r, &rdev->fence_queue,
 		    &rdev->fence_lock, timeout,
@@ -605,17 +559,6 @@ static long radeon_fence_wait_seq_timeou
 		    &rdev->fence_lock, timeout,
 		    (radeon_fence_any_seq_signaled(rdev, target_seq)
 			|| rdev->needs_reset));
-#else
-	if (intr) {
-		r = wait_event_interruptible_timeout(rdev->fence_queue, (
-			radeon_fence_any_seq_signaled(rdev, target_seq)
-			 || rdev->needs_reset), timeout);
-	} else {
-		r = wait_event_timeout(rdev->fence_queue, (
-			radeon_fence_any_seq_signaled(rdev, target_seq)
-			 || rdev->needs_reset), timeout);
-	}
-#endif
 
 	if (rdev->needs_reset)
 		r = -EDEADLK;
@@ -1021,13 +964,9 @@ int radeon_fence_driver_init(struct rade
 {
 	int ring;
 
-#ifdef __NetBSD__
 	spin_lock_init(&rdev->fence_lock);
 	DRM_INIT_WAITQUEUE(&rdev->fence_queue, "radfence");
 	TAILQ_INIT(&rdev->fence_check);
-#else
-	init_waitqueue_head(&rdev->fence_queue);
-#endif
 	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
 		radeon_fence_driver_init_ring(rdev, ring);
 	}
@@ -1059,23 +998,17 @@ void radeon_fence_driver_fini(struct rad
 			radeon_fence_driver_force_completion(rdev, ring);
 		}
 		cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
-#ifdef __NetBSD__
 		spin_lock(&rdev->fence_lock);
 		radeon_fence_wakeup_locked(rdev);
 		spin_unlock(&rdev->fence_lock);
-#else
-		wake_up_all(&rdev->fence_queue);
-#endif
 		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
 		rdev->fence_drv[ring].initialized = false;
 	}
 	mutex_unlock(&rdev->ring_lock);
 
-#ifdef __NetBSD__
 	BUG_ON(!TAILQ_EMPTY(&rdev->fence_check));
 	DRM_DESTROY_WAITQUEUE(&rdev->fence_queue);
 	spin_lock_destroy(&rdev->fence_lock);
-#endif
 }
 
 /**
@@ -1189,106 +1122,48 @@ static inline bool radeon_test_signaled(
 	return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
 }
 
-#ifdef __NetBSD__
+struct radeon_wait_cb {
+	struct dma_fence_cb base;
+};
 
 static void
-radeon_fence_wakeup_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 {
 	struct radeon_fence *rfence = to_radeon_fence(fence);
 	struct radeon_device *rdev = rfence->rdev;
 
 	BUG_ON(!spin_is_locked(&rdev->fence_lock));
-	cv_broadcast(&rdev->fence_queue);
+	DRM_SPIN_WAKEUP_ALL(&rdev->fence_queue, &rdev->fence_lock);
 }
 
-static signed long
-radeon_fence_default_wait(struct dma_fence *f, bool intr, signed long timo)
+static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr,
+					     signed long t)
 {
-	struct dma_fence_cb fcb;
 	struct radeon_fence *fence = to_radeon_fence(f);
 	struct radeon_device *rdev = fence->rdev;
+	struct radeon_wait_cb cb;
 	int r;
 
-	r = dma_fence_add_callback(f, &fcb, radeon_fence_wakeup_cb);
-	if (r)			/* fence is done already */
-		return timo;
+	if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
+		return t;
 
 	spin_lock(&rdev->fence_lock);
 	if (intr) {
 		DRM_SPIN_TIMED_WAIT_UNTIL(r, &rdev->fence_queue,
-		    &rdev->fence_lock, timo,
+		    &rdev->fence_lock, t,
 		    radeon_test_signaled(fence));
 	} else {
 		DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(r, &rdev->fence_queue,
-		    &rdev->fence_lock, timo,
+		    &rdev->fence_lock, t,
 		    radeon_test_signaled(fence));
 	}
 	spin_unlock(&rdev->fence_lock);
 
-	(void)dma_fence_remove_callback(f, &fcb);
-
-	return r;
-}
-
-#else
-
-struct radeon_wait_cb {
-	struct dma_fence_cb base;
-	struct task_struct *task;
-};
-
-static void
-radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
-{
-	struct radeon_wait_cb *wait =
-		container_of(cb, struct radeon_wait_cb, base);
-
-	wake_up_process(wait->task);
-}
-
-static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr,
-					     signed long t)
-{
-	struct radeon_fence *fence = to_radeon_fence(f);
-	struct radeon_device *rdev = fence->rdev;
-	struct radeon_wait_cb cb;
-
-	cb.task = current;
-
-	if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
-		return t;
-
-	while (t > 0) {
-		if (intr)
-			set_current_state(TASK_INTERRUPTIBLE);
-		else
-			set_current_state(TASK_UNINTERRUPTIBLE);
-
-		/*
-		 * radeon_test_signaled must be called after
-		 * set_current_state to prevent a race with wake_up_process
-		 */
-		if (radeon_test_signaled(fence))
-			break;
-
-		if (rdev->needs_reset) {
-			t = -EDEADLK;
-			break;
-		}
-
-		t = schedule_timeout(t);
-
-		if (t > 0 && intr && signal_pending(current))
-			t = -ERESTARTSYS;
-	}
-
-	__set_current_state(TASK_RUNNING);
 	dma_fence_remove_callback(f, &cb.base);
 
-	return t;
+	return r;
 }
 
-#endif
 const struct dma_fence_ops radeon_fence_ops = {
 	.get_driver_name = radeon_fence_get_driver_name,
 	.get_timeline_name = radeon_fence_get_timeline_name,

Reply via email to