In the current code, we don't free smp_alt_modules when enable smp,
so have to wait module unload to call alternatives_smp_module_del()
to free its smp_alt_module. This strategy has shortcomings.

First, maybe some modules don't unload after smp enabled, so these
smp_alt_modules won't be freed even they are useless anymore. Second,
every module has to call alternatives_smp_module_del() when unload
to see if it's in the list even after smp enabled, it's complete
useless work.

And more important, alternatives_smp_module_del() will not traverse
the list after smp enabled, so we don't need a mutex to protect
the list, only need to use preempt_disable().

We can make sure smp_alt_modules will be useless after enable smp,
so free it all. And alternatives_smp_module_del() can return directly
when !uniproc_patched to avoid a list traversal.

Signed-off-by: Zhou Chengming <zhouchengmi...@huawei.com>
---
 arch/x86/kernel/alternative.c | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 3344d33..8549269 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -534,6 +534,9 @@ void __init_or_module alternatives_smp_module_del(struct 
module *mod)
        struct smp_alt_module *item;
 
        mutex_lock(&smp_alt);
+       if (!uniproc_patched)
+               goto unlock;
+
        list_for_each_entry(item, &smp_alt_modules, next) {
                if (mod != item->mod)
                        continue;
@@ -541,12 +544,13 @@ void __init_or_module alternatives_smp_module_del(struct 
module *mod)
                kfree(item);
                break;
        }
+unlock:
        mutex_unlock(&smp_alt);
 }
 
 void alternatives_enable_smp(void)
 {
-       struct smp_alt_module *mod;
+       struct smp_alt_module *mod, *tmp;
 
        /* Why bother if there are no other CPUs? */
        BUG_ON(num_possible_cpus() == 1);
@@ -558,9 +562,12 @@ void alternatives_enable_smp(void)
                BUG_ON(num_online_cpus() != 1);
                clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
                clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
-               list_for_each_entry(mod, &smp_alt_modules, next)
+               list_for_each_entry_safe(mod, tmp, &smp_alt_modules, next) {
                        alternatives_smp_lock(mod->locks, mod->locks_end,
                                              mod->text, mod->text_end);
+                       list_del(&mod->next);
+                       kfree(mod);
+               }
                uniproc_patched = false;
        }
        mutex_unlock(&smp_alt);
-- 
1.8.3.1

Reply via email to