> On Feb 18, 2021, at 12:09 AM, Christoph Hellwig <h...@infradead.org> wrote: > > On Tue, Feb 09, 2021 at 02:16:46PM -0800, Nadav Amit wrote: >> +/* >> + * Flags to be used as scf_flags argument of smp_call_function_many_cond(). >> + */ >> +#define SCF_WAIT (1U << 0) /* Wait until function execution >> completed */ >> +#define SCF_RUN_LOCAL (1U << 1) /* Run also locally if local >> cpu is set in cpumask */ > > Can you move the comments on top of the defines to avoid the crazy > long lines? > >> + if (cpu_online(this_cpu) && !oops_in_progress && >> !early_boot_irqs_disabled) > > Another pointlessly overly long line, with various more following. > >> EXPORT_SYMBOL(on_each_cpu_cond_mask); > > This isn't used by any modular code, so maybe throw in a patch to drop > the export?
I prefer to export on_each_cpu_cond_mask() and instead turn the users (on_each_cpu(), on_each_cpu_mask() and on_each_cpu_cond()) into inline functions in smp.h. Otherwise, the call-chain becomes longer for no reason. Let me know if you object. So I will add something like: -- >8 -- Author: Nadav Amit <na...@vmware.com> Date: Tue Feb 16 11:04:30 2021 -0800 smp: inline on_each_cpu_cond() and on_each_cpu_cond_mask() Suggested-by: Peter Zijlstra <pet...@infradead.org> Signed-off-by: Nadav Amit <na...@vmware.com> diff --git a/include/linux/smp.h b/include/linux/smp.h index 70c6f6284dcf..84a0b4828f66 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -50,30 +50,52 @@ extern unsigned int total_cpus; int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, int wait); +void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, + void *info, bool wait, const struct cpumask *mask); + +int smp_call_function_single_async(int cpu, call_single_data_t *csd); + /* * Call a function on all processors */ -void on_each_cpu(smp_call_func_t func, void *info, int wait); +static inline void on_each_cpu(smp_call_func_t func, void *info, int wait) +{ + on_each_cpu_cond_mask(NULL, func, info, wait, cpu_online_mask); +} -/* - * Call a function on processors specified by mask, which might include - * the local one. +/** + * on_each_cpu_mask(): Run a function on processors specified by + * cpumask, which may include the local processor. + * @mask: The set of cpus to run on (only runs on online subset). + * @func: The function to run. This must be fast and non-blocking. + * @info: An arbitrary pointer to pass to the function. + * @wait: If true, wait (atomically) until function has completed + * on other CPUs. + * + * If @wait is true, then returns once @func has returned. + * + * You must not call this function with disabled interrupts or from a + * hardware interrupt handler or from a bottom half handler. The + * exception is that it may be used during early boot while + * early_boot_irqs_disabled is set. */ -void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, - void *info, bool wait); +static inline void on_each_cpu_mask(const struct cpumask *mask, + smp_call_func_t func, void *info, bool wait) +{ + on_each_cpu_cond_mask(NULL, func, info, wait, mask); +} /* * Call a function on each processor for which the supplied function * cond_func returns a positive value. This may include the local - * processor. + * processor. May be used during early boot while early_boot_irqs_disabled is + * set. Use local_irq_save/restore() instead of local_irq_disable/enable(). */ -void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func, - void *info, bool wait); - -void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, - void *info, bool wait, const struct cpumask *mask); - -int smp_call_function_single_async(int cpu, call_single_data_t *csd); +static inline void on_each_cpu_cond(smp_cond_func_t cond_func, + smp_call_func_t func, void *info, bool wait) +{ + on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask); +} #ifdef CONFIG_SMP diff --git a/kernel/smp.c b/kernel/smp.c index 629f1f7b80db..a75f3d1dd1b7 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -843,55 +843,6 @@ void __init smp_init(void) smp_cpus_done(setup_max_cpus); } -/* - * Call a function on all processors. May be used during early boot while - * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead - * of local_irq_disable/enable(). - */ -void on_each_cpu(smp_call_func_t func, void *info, int wait) -{ - unsigned long flags; - - preempt_disable(); - smp_call_function(func, info, wait); - local_irq_save(flags); - func(info); - local_irq_restore(flags); - preempt_enable(); -} -EXPORT_SYMBOL(on_each_cpu); - -/** - * on_each_cpu_mask(): Run a function on processors specified by - * cpumask, which may include the local processor. - * @mask: The set of cpus to run on (only runs on online subset). - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. - * @wait: If true, wait (atomically) until function has completed - * on other CPUs. - * - * If @wait is true, then returns once @func has returned. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. The - * exception is that it may be used during early boot while - * early_boot_irqs_disabled is set. - */ -void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, - void *info, bool wait) -{ - unsigned int scf_flags; - - scf_flags = SCF_RUN_LOCAL; - if (wait) - scf_flags |= SCF_WAIT; - - preempt_disable(); - smp_call_function_many_cond(mask, func, info, scf_flags, NULL); - preempt_enable(); -} -EXPORT_SYMBOL(on_each_cpu_mask); - /* * on_each_cpu_cond(): Call a function on each processor for which * the supplied function cond_func returns true, optionally waiting @@ -928,13 +879,6 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, } EXPORT_SYMBOL(on_each_cpu_cond_mask); -void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func, - void *info, bool wait) -{ - on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask); -} -EXPORT_SYMBOL(on_each_cpu_cond); - static void do_nothing(void *unused) { }