From: "Peter Zijlstra (Intel)" <pet...@infradead.org> Support the preempt= boot option and patch the static call sites accordingly.
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org> Cc: Thomas Gleixner <t...@linutronix.de> Cc: Mel Gorman <mgor...@suse.de> Cc: Ingo Molnar <mi...@redhat.com> Cc: Michal Hocko <mho...@kernel.org> Cc: Paul E. McKenney <paul...@kernel.org> [remove the mad scientist experiments] Signed-off-by: Frederic Weisbecker <frede...@kernel.org> --- kernel/sched/core.c | 67 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 66 insertions(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6715caa17ea7..84ac05d2df3a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -12,6 +12,7 @@ #include "sched.h" +#include <linux/entry-common.h> #include <linux/nospec.h> #include <linux/kcov.h> @@ -4772,9 +4773,73 @@ DEFINE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func()); EXPORT_STATIC_CALL(preempt_schedule_notrace); #endif - #endif /* CONFIG_PREEMPTION */ +#ifdef CONFIG_PREEMPT_DYNAMIC + +/* + * SC:cond_resched + * SC:might_resched + * SC:preempt_schedule + * SC:preempt_schedule_notrace + * SC:irqentry_exit_cond_resched + * + * + * NONE: + * cond_resched <- __cond_resched + * might_resched <- RET0 + * preempt_schedule <- NOP + * preempt_schedule_notrace <- NOP + * irqentry_exit_cond_resched <- NOP + * + * VOLUNTARY: + * cond_resched <- __cond_resched + * might_resched <- __cond_resched + * preempt_schedule <- NOP + * preempt_schedule_notrace <- NOP + * irqentry_exit_cond_resched <- NOP + * + * FULL: + * cond_resched <- RET0 + * might_resched <- RET0 + * preempt_schedule <- preempt_schedule + * preempt_schedule_notrace <- preempt_schedule_notrace + * irqentry_exit_cond_resched <- irqentry_exit_cond_resched + */ +static int __init setup_preempt_mode(char *str) +{ + if (!strcmp(str, "none")) { + static_call_update(cond_resched, __cond_resched); + static_call_update(might_resched, __static_call_return0); + static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL); + static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL); + static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL); + pr_info("Dynamic Preempt: %s\n", str); + } else if (!strcmp(str, "voluntary")) { + static_call_update(cond_resched, __cond_resched); + static_call_update(might_resched, __cond_resched); + static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL); + static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL); + static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL); + pr_info("Dynamic Preempt: %s\n", str); + } else if (!strcmp(str, "full")) { + static_call_update(cond_resched, __static_call_return0); + static_call_update(might_resched, __static_call_return0); + static_call_update(preempt_schedule, __preempt_schedule_func()); + static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func()); + static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched); + pr_info("Dynamic Preempt: %s\n", str); + } else { + pr_warn("Dynamic Preempt: Unsupported preempt mode %s, default to full\n", str); + return 1; + } + return 0; +} +__setup("preempt=", setup_preempt_mode); + +#endif /* CONFIG_PREEMPT_DYNAMIC */ + + /* * This is the entry point to schedule() from kernel preemption * off of irq context. -- 2.25.1