In the current code, we don't free smp_alt_modules when enable smp, so have to wait module unload to call alternatives_smp_module_del() to free its smp_alt_module. This strategy has shortcomings. We can make sure smp_alt_modules will be useless after enable smp, so free it all. And alternatives_smp_module_del() can return directly when !uniproc_patched to avoid a list traversal.
Signed-off-by: Zhou Chengming <zhouchengmi...@huawei.com> --- arch/x86/kernel/alternative.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 3344d33..8549269 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -534,6 +534,9 @@ void __init_or_module alternatives_smp_module_del(struct module *mod) struct smp_alt_module *item; mutex_lock(&smp_alt); + if (!uniproc_patched) + goto unlock; + list_for_each_entry(item, &smp_alt_modules, next) { if (mod != item->mod) continue; @@ -541,12 +544,13 @@ void __init_or_module alternatives_smp_module_del(struct module *mod) kfree(item); break; } +unlock: mutex_unlock(&smp_alt); } void alternatives_enable_smp(void) { - struct smp_alt_module *mod; + struct smp_alt_module *mod, *tmp; /* Why bother if there are no other CPUs? */ BUG_ON(num_possible_cpus() == 1); @@ -558,9 +562,12 @@ void alternatives_enable_smp(void) BUG_ON(num_online_cpus() != 1); clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP); - list_for_each_entry(mod, &smp_alt_modules, next) + list_for_each_entry_safe(mod, tmp, &smp_alt_modules, next) { alternatives_smp_lock(mod->locks, mod->locks_end, mod->text, mod->text_end); + list_del(&mod->next); + kfree(mod); + } uniproc_patched = false; } mutex_unlock(&smp_alt); -- 1.8.3.1