Previous two patches can make sure the smp_alt_modules will only
be used when UP, so we don't need a mutex to protect the list,
we only need to preempt_disable() when traverse the list.

Signed-off-by: Zhou Chengming <zhouchengmi...@huawei.com>
---
 arch/x86/kernel/alternative.c | 31 +++++++++++++++----------------
 1 file changed, 15 insertions(+), 16 deletions(-)

diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 5c3f593..7eab6f6 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -485,8 +485,7 @@ struct smp_alt_module {
        struct list_head next;
 };
 static LIST_HEAD(smp_alt_modules);
-static DEFINE_MUTEX(smp_alt);
-static bool uniproc_patched = false;   /* protected by smp_alt */
+static bool uniproc_patched = false;
 
 void __init_or_module alternatives_smp_module_add(struct module *mod,
                                                  char *name,
@@ -495,18 +494,18 @@ void __init_or_module alternatives_smp_module_add(struct 
module *mod,
 {
        struct smp_alt_module *smp;
 
-       mutex_lock(&smp_alt);
+       preempt_disable();
        if (!uniproc_patched)
-               goto unlock;
+               goto out;
 
        if (num_possible_cpus() == 1)
                /* Don't bother remembering, we'll never have to undo it. */
-               goto smp_unlock;
+               goto smp_out;
 
-       smp = kzalloc(sizeof(*smp), GFP_KERNEL);
+       smp = kzalloc(sizeof(*smp), GFP_ATOMIC);
        if (NULL == smp)
                /* we'll run the (safe but slow) SMP code then ... */
-               goto unlock;
+               goto out;
 
        smp->mod        = mod;
        smp->name       = name;
@@ -519,19 +518,19 @@ void __init_or_module alternatives_smp_module_add(struct 
module *mod,
                smp->text, smp->text_end, smp->name);
 
        list_add_tail(&smp->next, &smp_alt_modules);
-smp_unlock:
+smp_out:
        alternatives_smp_unlock(locks, locks_end, text, text_end);
-unlock:
-       mutex_unlock(&smp_alt);
+out:
+       preempt_enable();
 }
 
 void __init_or_module alternatives_smp_module_del(struct module *mod)
 {
        struct smp_alt_module *item;
 
-       mutex_lock(&smp_alt);
+       preempt_disable();
        if (!uniproc_patched)
-               goto unlock;
+               goto out;
 
        list_for_each_entry(item, &smp_alt_modules, next) {
                if (mod != item->mod)
@@ -540,8 +539,8 @@ void __init_or_module alternatives_smp_module_del(struct 
module *mod)
                kfree(item);
                break;
        }
-unlock:
-       mutex_unlock(&smp_alt);
+out:
+       preempt_enable();
 }
 
 void alternatives_enable_smp(void)
@@ -551,7 +550,7 @@ void alternatives_enable_smp(void)
        /* Why bother if there are no other CPUs? */
        BUG_ON(num_possible_cpus() == 1);
 
-       mutex_lock(&smp_alt);
+       preempt_disable();
 
        if (uniproc_patched) {
                pr_info("switching to SMP code\n");
@@ -566,7 +565,7 @@ void alternatives_enable_smp(void)
                }
                uniproc_patched = false;
        }
-       mutex_unlock(&smp_alt);
+       preempt_enable();
 }
 
 /* Return 1 if the address range is reserved for smp-alternatives */
-- 
1.8.3.1

Reply via email to