Module Name: src Committed By: riastradh Date: Sat Feb 25 00:32:13 UTC 2023
Modified Files: src/sys/arch/xen/x86: xen_intr.c Log Message: xen_intr.c: Use kpreempt_disable/enable around access to curcpu(). curcpu() is not otherwise guaranteed to be stable at these points. While here, nix nonsensical membars. This need only be synchronized with interrupts on the same CPU. Proposed on port-xen: https://mail-index.netbsd.org/port-xen/2022/07/13/msg010250.html XXX pullup-8 (in __sti/__cli, __save/restore_flags in include/xen.h) XXX pullup-9 XXX pullup-10 To generate a diff of this commit: cvs rdiff -u -r1.30 -r1.31 src/sys/arch/xen/x86/xen_intr.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/xen/x86/xen_intr.c diff -u src/sys/arch/xen/x86/xen_intr.c:1.30 src/sys/arch/xen/x86/xen_intr.c:1.31 --- src/sys/arch/xen/x86/xen_intr.c:1.30 Tue May 24 14:00:23 2022 +++ src/sys/arch/xen/x86/xen_intr.c Sat Feb 25 00:32:13 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: xen_intr.c,v 1.30 2022/05/24 14:00:23 bouyer Exp $ */ +/* $NetBSD: xen_intr.c,v 1.31 2023/02/25 00:32:13 riastradh Exp $ */ /*- * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. @@ -30,7 +30,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.30 2022/05/24 14:00:23 bouyer Exp $"); +__KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.31 2023/02/25 00:32:13 riastradh Exp $"); #include "opt_multiprocessor.h" #include "opt_pci.h" @@ -83,19 +83,28 @@ static const char *xen_ipi_names[XEN_NIP void x86_disable_intr(void) { + + kpreempt_disable(); curcpu()->ci_vcpu->evtchn_upcall_mask = 1; - x86_lfence(); + kpreempt_enable(); + + __insn_barrier(); } void x86_enable_intr(void) { - volatile struct vcpu_info *_vci = curcpu()->ci_vcpu; + struct cpu_info *ci; + __insn_barrier(); - _vci->evtchn_upcall_mask = 0; - x86_lfence(); /* unmask then check (avoid races) */ - if (__predict_false(_vci->evtchn_upcall_pending)) + + kpreempt_disable(); + ci = curcpu(); + ci->ci_vcpu->evtchn_upcall_mask = 0; + __insn_barrier(); + if (__predict_false(ci->ci_vcpu->evtchn_upcall_pending)) hypervisor_force_callback(); + kpreempt_enable(); } #endif /* !XENPVHVM */ @@ -103,20 +112,27 @@ x86_enable_intr(void) u_long xen_read_psl(void) { + u_long psl; + + kpreempt_disable(); + psl = curcpu()->ci_vcpu->evtchn_upcall_mask; + kpreempt_enable(); - return (curcpu()->ci_vcpu->evtchn_upcall_mask); + return psl; } void xen_write_psl(u_long psl) { - struct cpu_info *ci = curcpu(); + struct cpu_info *ci; + kpreempt_disable(); + ci = curcpu(); ci->ci_vcpu->evtchn_upcall_mask = psl; - xen_rmb(); - if (ci->ci_vcpu->evtchn_upcall_pending && psl == 0) { + __insn_barrier(); + if (__predict_false(ci->ci_vcpu->evtchn_upcall_pending) && psl == 0) hypervisor_force_callback(); - } + kpreempt_enable(); } void *