Module Name:    src
Committed By:   riastradh
Date:           Thu Feb 23 14:55:36 UTC 2023

Modified Files:
        src/sys/arch/hppa/include: cpu.h

Log Message:
hppa: Add missing barriers in cpu_switchto.

PR kern/57240

Not sure hppa has ever had working MULTIPROCESSOR, so maybe no
pullups needed?


To generate a diff of this commit:
cvs rdiff -u -r1.12 -r1.13 src/sys/arch/hppa/include/cpu.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/hppa/include/cpu.h
diff -u src/sys/arch/hppa/include/cpu.h:1.12 src/sys/arch/hppa/include/cpu.h:1.13
--- src/sys/arch/hppa/include/cpu.h:1.12	Tue Nov  2 11:26:04 2021
+++ src/sys/arch/hppa/include/cpu.h	Thu Feb 23 14:55:36 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.h,v 1.12 2021/11/02 11:26:04 ryo Exp $	*/
+/*	$NetBSD: cpu.h,v 1.13 2023/02/23 14:55:36 riastradh Exp $	*/
 
 /*	$OpenBSD: cpu.h,v 1.55 2008/07/23 17:39:35 kettenis Exp $	*/
 
@@ -200,7 +200,26 @@ extern int cpu_revision;
 #define	GET_CURLWP(r)		mfctl CR_CURCPU, r ! ldw CI_CURLWP(r), r
 #define	GET_CURLWP_SPACE(s, r)	mfctl CR_CURCPU, r ! ldw CI_CURLWP(s, r), r
 
-#define	SET_CURLWP(r,t)		mfctl CR_CURCPU, t ! stw r, CI_CURLWP(t)
+/*
+ * Issue barriers to coordinate mutex_exit on this CPU with
+ * mutex_vector_enter on another CPU.
+ *
+ * 1. Any prior mutex_exit by oldlwp must be visible to other
+ *    CPUs before we set ci_curlwp := newlwp on this one,
+ *    requiring a store-before-store barrier.
+ *
+ * 2. ci_curlwp := newlwp must be visible on all other CPUs
+ *    before any subsequent mutex_exit by newlwp can even test
+ *    whether there might be waiters, requiring a
+ *    store-before-load barrier.
+ *
+ * See kern_mutex.c for details -- this is necessary for
+ * adaptive mutexes to detect whether the lwp is on the CPU in
+ * order to safely block without requiring atomic r/m/w in
+ * mutex_exit.
+ */
+#define	SET_CURLWP(r,t)		\
+	sync ! mfctl CR_CURCPU, t ! stw r, CI_CURLWP(t) ! sync
 
 #else /*  MULTIPROCESSOR */
 

Reply via email to