Module Name:    src
Committed By:   martin
Date:           Mon Jul 31 14:56:20 UTC 2023

Modified Files:
        src/sys/arch/xen/x86 [netbsd-9]: xen_intr.c

Log Message:
Pull up following revision(s) (requested by riastradh in ticket #1679):

        sys/arch/xen/x86/xen_intr.c: revision 1.31

xen_intr.c: Use kpreempt_disable/enable around access to curcpu().

curcpu() is not otherwise guaranteed to be stable at these points.

While here, nix nonsensical membars.  This need only be synchronized
with interrupts on the same CPU.

Proposed on port-xen:
https://mail-index.netbsd.org/port-xen/2022/07/13/msg010250.html


To generate a diff of this commit:
cvs rdiff -u -r1.17 -r1.17.2.1 src/sys/arch/xen/x86/xen_intr.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/xen/x86/xen_intr.c
diff -u src/sys/arch/xen/x86/xen_intr.c:1.17 src/sys/arch/xen/x86/xen_intr.c:1.17.2.1
--- src/sys/arch/xen/x86/xen_intr.c:1.17	Fri Jun  7 12:43:52 2019
+++ src/sys/arch/xen/x86/xen_intr.c	Mon Jul 31 14:56:19 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: xen_intr.c,v 1.17 2019/06/07 12:43:52 cherry Exp $	*/
+/*	$NetBSD: xen_intr.c,v 1.17.2.1 2023/07/31 14:56:19 martin Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.17 2019/06/07 12:43:52 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.17.2.1 2023/07/31 14:56:19 martin Exp $");
 
 #include <sys/param.h>
 #include <sys/kernel.h>
@@ -103,19 +103,28 @@ xen_spllower(int nlevel)
 void
 x86_disable_intr(void)
 {
+
+	kpreempt_disable();
 	curcpu()->ci_vcpu->evtchn_upcall_mask = 1;
-	x86_lfence();
+	kpreempt_enable();
+
+	__insn_barrier();
 }
 
 void
 x86_enable_intr(void)
 {
-	volatile struct vcpu_info *_vci = curcpu()->ci_vcpu;
+	struct cpu_info *ci;
+
+	__insn_barrier();
+
+	kpreempt_disable();
+	ci = curcpu();
+	ci->ci_vcpu->evtchn_upcall_mask = 0;
 	__insn_barrier();
-	_vci->evtchn_upcall_mask = 0;
-	x86_lfence(); /* unmask then check (avoid races) */
-	if (__predict_false(_vci->evtchn_upcall_pending))
+	if (__predict_false(ci->ci_vcpu->evtchn_upcall_pending))
 		hypervisor_force_callback();
+	kpreempt_enable();
 }
 
 #endif /* !XENPVHVM */
@@ -123,20 +132,27 @@ x86_enable_intr(void)
 u_long
 xen_read_psl(void)
 {
+	u_long psl;
+
+	kpreempt_disable();
+	psl = curcpu()->ci_vcpu->evtchn_upcall_mask;
+	kpreempt_enable();
 
-	return (curcpu()->ci_vcpu->evtchn_upcall_mask);
+	return psl;
 }
 
 void
 xen_write_psl(u_long psl)
 {
-	struct cpu_info *ci = curcpu();
+	struct cpu_info *ci;
 
+	kpreempt_disable();
+	ci = curcpu();
 	ci->ci_vcpu->evtchn_upcall_mask = psl;
-	xen_rmb();
-	if (ci->ci_vcpu->evtchn_upcall_pending && psl == 0) {
+	__insn_barrier();
+	if (__predict_false(ci->ci_vcpu->evtchn_upcall_pending) && psl == 0)
 	    	hypervisor_force_callback();
-	}
+	kpreempt_enable();
 }
 
 void *

Reply via email to