Module Name: src Committed By: riastradh Date: Thu Feb 23 14:55:25 UTC 2023
Modified Files: src/sys/arch/arm/arm: armv6_start.S src/sys/arch/arm/arm32: cpuswitch.S Log Message: arm32: Add missing barriers in cpu_switchto. Details in comments. PR kern/57240 XXX pullup-8 XXX pullup-9 XXX pullup-10 To generate a diff of this commit: cvs rdiff -u -r1.37 -r1.38 src/sys/arch/arm/arm/armv6_start.S cvs rdiff -u -r1.105 -r1.106 src/sys/arch/arm/arm32/cpuswitch.S Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/arm/arm/armv6_start.S diff -u src/sys/arch/arm/arm/armv6_start.S:1.37 src/sys/arch/arm/arm/armv6_start.S:1.38 --- src/sys/arch/arm/arm/armv6_start.S:1.37 Sun Nov 14 16:56:32 2021 +++ src/sys/arch/arm/arm/armv6_start.S Thu Feb 23 14:55:24 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: armv6_start.S,v 1.37 2021/11/14 16:56:32 riastradh Exp $ */ +/* $NetBSD: armv6_start.S,v 1.38 2023/02/23 14:55:24 riastradh Exp $ */ /*- * Copyright (c) 2012, 2017, 2018 The NetBSD Foundation, Inc. @@ -943,6 +943,11 @@ armv7_mpcontinuation: #else #error either TPIDRPRW_IS_CURCPU or TPIDRPRW_IS_CURLWP must be defined #endif + /* + * No membar needed because we're not switching from a + * previous lwp, and the idle lwp we're switching to can't be + * holding locks already; see cpu_switchto. + */ str r6, [r5, #CI_CURLWP] // and note we are running on it mov r0, r5 // pass cpu_info Index: src/sys/arch/arm/arm32/cpuswitch.S diff -u src/sys/arch/arm/arm32/cpuswitch.S:1.105 src/sys/arch/arm/arm32/cpuswitch.S:1.106 --- src/sys/arch/arm/arm32/cpuswitch.S:1.105 Sun May 30 06:53:15 2021 +++ src/sys/arch/arm/arm32/cpuswitch.S Thu Feb 23 14:55:25 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: cpuswitch.S,v 1.105 2021/05/30 06:53:15 dholland Exp $ */ +/* $NetBSD: cpuswitch.S,v 1.106 2023/02/23 14:55:25 riastradh Exp $ */ /* * Copyright 2003 Wasabi Systems, Inc. @@ -87,7 +87,7 @@ #include <arm/asm.h> #include <arm/locore.h> - RCSID("$NetBSD: cpuswitch.S,v 1.105 2021/05/30 06:53:15 dholland Exp $") + RCSID("$NetBSD: cpuswitch.S,v 1.106 2023/02/23 14:55:25 riastradh Exp $") /* LINTSTUB: include <sys/param.h> */ @@ -189,11 +189,32 @@ ENTRY(cpu_switchto) mcr p15, 0, r6, c13, c0, 4 /* set current lwp */ #endif + /* + * Issue barriers to coordinate mutex_exit on this CPU with + * mutex_vector_enter on another CPU. + * + * 1. Any prior mutex_exit by oldlwp must be visible to other + * CPUs before we set ci_curlwp := newlwp on this one, + * requiring a store-before-store barrier. + * + * 2. ci_curlwp := newlwp must be visible on all other CPUs + * before any subsequent mutex_exit by newlwp can even test + * whether there might be waiters, requiring a + * store-before-load barrier. + * + * See kern_mutex.c for details -- this is necessary for + * adaptive mutexes to detect whether the lwp is on the CPU in + * order to safely block without requiring atomic r/m/w in + * mutex_exit. + */ + /* We have a new curlwp now so make a note of it */ +#ifdef _ARM_ARCH_7 + dmb /* store-before-store */ +#endif str r6, [r5, #(CI_CURLWP)] - #ifdef _ARM_ARCH_7 - dmb /* see comments in kern_mutex.c */ + dmb /* store-before-load */ #endif /* Get the new pcb */ @@ -392,9 +413,12 @@ ENTRY_NP(softint_switch) #if defined(TPIDRPRW_IS_CURLWP) mcr p15, 0, r5, c13, c0, 4 /* save new lwp */ #endif +#ifdef _ARM_ARCH_7 + dmb /* for mutex_enter; see cpu_switchto */ +#endif str r5, [r7, #(CI_CURLWP)] /* save new lwp */ #ifdef _ARM_ARCH_7 - dmb /* see comments in kern_mutex.c */ + dmb /* for mutex_enter; see cpu_switchto */ #endif #ifdef KASAN @@ -428,7 +452,13 @@ ENTRY_NP(softint_switch) #if defined(TPIDRPRW_IS_CURLWP) mcr p15, 0, r4, c13, c0, 4 /* restore pinned lwp */ #endif +#ifdef _ARM_ARCH_7 + dmb /* for mutex_enter; see cpu_switchto */ +#endif str r4, [r7, #(CI_CURLWP)] /* restore pinned lwp */ +#ifdef _ARM_ARCH_7 + dmb /* for mutex_enter; see cpu_switchto */ +#endif ldr sp, [r2, #(PCB_KSP)] /* now running on the old stack. */ /* At this point we can allow IRQ's again. */