Module Name:    src
Committed By:   mrg
Date:           Fri Jan 17 04:11:33 UTC 2025

Modified Files:
        src/sys/kern: kern_cpu.c kern_runq.c sched_4bsd.c
        src/sys/sys: cpu.h

Log Message:
partly prepare for more than 2-level CPU speed scheduler support

put the calls behind looking at SPCF_IDLE and SPCF_1STCLASS mostly
behind functions that can grow support for more than 2 CPU classes.
4 new functions, with 2 of them just simple aliases for the 1st:

    bool cpu_is_type(struct cpu_info *ci, int wanted);
    bool cpu_is_idle_1stclass(struct cpu_info *ci)
    bool cpu_is_1stclass(struct cpu_info *ci)
    bool cpu_is_better(struct cpu_info *ci1, struct cpu_info *ci2);

with this in place, we can retain the desire to run on 1st-class by
preference, while also expanding cpu_is_better() to handle multiple
non 1st-class CPUs.  ultimately, i envision seeing a priority number
where we can mark the fastest turbo-speed cores ahead of others, for
the case we can detect this.

XXX: use struct schedstate_percpu instead of cpu_info?

NFCI.


To generate a diff of this commit:
cvs rdiff -u -r1.97 -r1.98 src/sys/kern/kern_cpu.c
cvs rdiff -u -r1.70 -r1.71 src/sys/kern/kern_runq.c
cvs rdiff -u -r1.46 -r1.47 src/sys/kern/sched_4bsd.c
cvs rdiff -u -r1.54 -r1.55 src/sys/sys/cpu.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/kern/kern_cpu.c
diff -u src/sys/kern/kern_cpu.c:1.97 src/sys/kern/kern_cpu.c:1.98
--- src/sys/kern/kern_cpu.c:1.97	Sat Sep  2 17:44:59 2023
+++ src/sys/kern/kern_cpu.c	Fri Jan 17 04:11:33 2025
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_cpu.c,v 1.97 2023/09/02 17:44:59 riastradh Exp $	*/
+/*	$NetBSD: kern_cpu.c,v 1.98 2025/01/17 04:11:33 mrg Exp $	*/
 
 /*-
  * Copyright (c) 2007, 2008, 2009, 2010, 2012, 2019 The NetBSD Foundation, Inc.
@@ -60,7 +60,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.97 2023/09/02 17:44:59 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.98 2025/01/17 04:11:33 mrg Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_cpu_ucode.h"
@@ -452,6 +452,42 @@ cpu_setstate(struct cpu_info *ci, bool o
 	return 0;
 }
 
+bool
+cpu_is_type(struct cpu_info *ci, int wanted)
+{
+
+	return (ci->ci_schedstate.spc_flags & wanted) == wanted;
+}
+
+bool
+cpu_is_idle_1stclass(struct cpu_info *ci)
+{
+	const int wanted = SPCF_IDLE | SPCF_1STCLASS;
+
+	return cpu_is_type(ci, wanted);
+}
+
+bool
+cpu_is_1stclass(struct cpu_info *ci)
+{
+	const int wanted = SPCF_1STCLASS;
+
+	return cpu_is_type(ci, wanted);
+}
+
+bool
+cpu_is_better(struct cpu_info *ci1, struct cpu_info *ci2)
+{
+	const int ci1_flags = ci1->ci_schedstate.spc_flags;
+	const int ci2_flags = ci2->ci_schedstate.spc_flags;
+
+	if ((ci1_flags & SPCF_1STCLASS) != 0 &&
+	    (ci2_flags & SPCF_1STCLASS) == 0)
+		return ci1;
+
+	return ci2;
+}
+
 #if defined(__HAVE_INTR_CONTROL)
 static void
 cpu_xc_intr(struct cpu_info *ci, void *unused)

Index: src/sys/kern/kern_runq.c
diff -u src/sys/kern/kern_runq.c:1.70 src/sys/kern/kern_runq.c:1.71
--- src/sys/kern/kern_runq.c:1.70	Tue Sep 19 22:15:32 2023
+++ src/sys/kern/kern_runq.c	Fri Jan 17 04:11:33 2025
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_runq.c,v 1.70 2023/09/19 22:15:32 ad Exp $	*/
+/*	$NetBSD: kern_runq.c,v 1.71 2025/01/17 04:11:33 mrg Exp $	*/
 
 /*-
  * Copyright (c) 2019, 2020 The NetBSD Foundation, Inc.
@@ -56,7 +56,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.70 2023/09/19 22:15:32 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.71 2025/01/17 04:11:33 mrg Exp $");
 
 #include "opt_dtrace.h"
 
@@ -523,8 +523,7 @@ sched_bestcpu(struct lwp *l, struct cpu_
 			curspc = &curci->ci_schedstate;
 
 			/* If this CPU is idle and 1st class, we're done. */
-			if ((curspc->spc_flags & (SPCF_IDLE | SPCF_1STCLASS)) ==
-			    (SPCF_IDLE | SPCF_1STCLASS)) {
+			if (cpu_is_idle_1stclass(curci)) {
 				return curci;
 			}
 
@@ -536,8 +535,7 @@ sched_bestcpu(struct lwp *l, struct cpu_
 			}
 			if (curpri == bestpri) {
 				/* Prefer first class CPUs over others. */
-				if ((curspc->spc_flags & SPCF_1STCLASS) == 0 &&
-				    (bestspc->spc_flags & SPCF_1STCLASS) != 0) {
+				if (cpu_is_better(bestci, curci)) {
 				    	continue;
 				}
 				/*
@@ -568,7 +566,7 @@ sched_bestcpu(struct lwp *l, struct cpu_
 struct cpu_info *
 sched_takecpu(struct lwp *l)
 {
-	struct schedstate_percpu *spc, *tspc;
+	struct schedstate_percpu *spc;
 	struct cpu_info *ci, *curci, *tci;
 	pri_t eprio;
 	int flags;
@@ -611,9 +609,7 @@ sched_takecpu(struct lwp *l)
 	 */
 	tci = ci;
 	do {
-		tspc = &tci->ci_schedstate;
-		if ((tspc->spc_flags & flags) == flags &&
-		    sched_migratable(l, tci)) {
+		if (cpu_is_type(tci, flags) && sched_migratable(l, tci)) {
 			return tci;
 		}
 		tci = tci->ci_sibling[CPUREL_CORE];
@@ -635,9 +631,7 @@ sched_takecpu(struct lwp *l)
 	curci = curcpu();
 	tci = curci;
 	do {
-		tspc = &tci->ci_schedstate;
-		if ((tspc->spc_flags & flags) == flags &&
-		    sched_migratable(l, tci)) {
+		if (cpu_is_type(tci, flags) && sched_migratable(l, tci)) {
 			return tci;
 		}
 		tci = tci->ci_sibling[CPUREL_CORE];
@@ -670,8 +664,7 @@ sched_catchlwp(struct cpu_info *ci)
 	 * Be more aggressive if this CPU is first class, and the other
 	 * is not.
 	 */
-	gentle = ((curspc->spc_flags & SPCF_1STCLASS) == 0 ||
-	    (spc->spc_flags & SPCF_1STCLASS) != 0);
+	gentle = cpu_is_better(curci, ci);
 
 	if (atomic_load_relaxed(&spc->spc_mcount) < (gentle ? min_catch : 1) ||
 	    curspc->spc_psid != spc->spc_psid) {
@@ -913,7 +906,6 @@ sched_idle(void)
 void
 sched_preempted(struct lwp *l)
 {
-	const int flags = SPCF_IDLE | SPCF_1STCLASS;
 	struct schedstate_percpu *tspc;
 	struct cpu_info *ci, *tci;
 
@@ -930,8 +922,8 @@ sched_preempted(struct lwp *l)
 	 * - or this LWP is a child of vfork() that has just done execve()
 	 */
 	if (l->l_target_cpu != NULL ||
-	    ((tspc->spc_flags & SPCF_1STCLASS) != 0 &&
-	    (l->l_pflag & LP_TELEPORT) == 0)) {
+	    (cpu_is_1stclass(ci) &&
+	     (l->l_pflag & LP_TELEPORT) == 0)) {
 		return;
 	}
 
@@ -944,8 +936,7 @@ sched_preempted(struct lwp *l)
 	tci = ci->ci_sibling[CPUREL_CORE];
 	while (tci != ci) {
 		tspc = &tci->ci_schedstate;
-		if ((tspc->spc_flags & flags) == flags &&
-		    sched_migratable(l, tci)) {
+		if (cpu_is_idle_1stclass(tci) && sched_migratable(l, tci)) {
 		    	l->l_target_cpu = tci;
 			l->l_pflag &= ~LP_TELEPORT;
 		    	return;
@@ -976,8 +967,7 @@ sched_preempted(struct lwp *l)
 		 * whole system if needed.
 		 */
 		tci = sched_bestcpu(l, l->l_cpu);
-		if (tci != ci &&
-		    (tci->ci_schedstate.spc_flags & flags) == flags) {
+		if (tci != ci && cpu_is_idle_1stclass(tci)) {
 			l->l_target_cpu = tci;
 		}
 	}
@@ -998,8 +988,7 @@ sched_vforkexec(struct lwp *l, bool same
 {
 
 	KASSERT(l == curlwp);
-	if ((samecpu && ncpu > 1) ||
-	    (l->l_cpu->ci_schedstate.spc_flags & SPCF_1STCLASS) == 0) {
+	if ((samecpu && ncpu > 1) || !cpu_is_1stclass(l->l_cpu)) {
 		l->l_pflag |= LP_TELEPORT;
 		preempt();
 	}

Index: src/sys/kern/sched_4bsd.c
diff -u src/sys/kern/sched_4bsd.c:1.46 src/sys/kern/sched_4bsd.c:1.47
--- src/sys/kern/sched_4bsd.c:1.46	Wed Oct 26 23:24:09 2022
+++ src/sys/kern/sched_4bsd.c	Fri Jan 17 04:11:33 2025
@@ -1,4 +1,4 @@
-/*	$NetBSD: sched_4bsd.c,v 1.46 2022/10/26 23:24:09 riastradh Exp $	*/
+/*	$NetBSD: sched_4bsd.c,v 1.47 2025/01/17 04:11:33 mrg Exp $	*/
 
 /*
  * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2019, 2020
@@ -69,7 +69,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.46 2022/10/26 23:24:09 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.47 2025/01/17 04:11:33 mrg Exp $");
 
 #include "opt_ddb.h"
 #include "opt_lockdebug.h"
@@ -146,7 +146,7 @@ sched_tick(struct cpu_info *ci)
 			 */
 			pri = MAXPRI_KTHREAD;
 			spc->spc_flags |= SPCF_SHOULDYIELD;
-		} else if ((spc->spc_flags & SPCF_1STCLASS) == 0) {
+		} else if (!cpu_is_1stclass(ci)) {
 			/*
 			 * For SMT or asymmetric systems push a little
 			 * harder: if this is not a 1st class CPU, try to

Index: src/sys/sys/cpu.h
diff -u src/sys/sys/cpu.h:1.54 src/sys/sys/cpu.h:1.55
--- src/sys/sys/cpu.h:1.54	Tue Mar  5 20:59:41 2024
+++ src/sys/sys/cpu.h	Fri Jan 17 04:11:33 2025
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.h,v 1.54 2024/03/05 20:59:41 thorpej Exp $	*/
+/*	$NetBSD: cpu.h,v 1.55 2025/01/17 04:11:33 mrg Exp $	*/
 
 /*-
  * Copyright (c) 2007 YAMAMOTO Takashi,
@@ -115,6 +115,12 @@ cpu_name(struct cpu_info *ci)
 	return ci->ci_data.cpu_name;
 }
 
+/* Scheduler helpers */
+bool cpu_is_type(struct cpu_info *, int);
+bool cpu_is_idle_1stclass(struct cpu_info *);
+bool cpu_is_1stclass(struct cpu_info *);
+bool cpu_is_better(struct cpu_info *, struct cpu_info *);
+
 #ifdef CPU_UCODE
 struct cpu_ucode_softc {
 	int loader_version;

Reply via email to