Remove the livepatch module notifier in favor of directly enabling and disabling patches to modules in the module loader. Hard-coding the function calls ensures that ftrace_module_enable() is run before klp_module_coming() during module load, and that klp_module_going() is run before ftrace_release_mod() during module unload. This way, ftrace and livepatch code is run in the correct order during the module load/unload sequence without dependence on the module notifier call chain.
This fixes a notifier ordering issue in which the ftrace module notifier (and hence ftrace_module_enable()) for coming modules was being called after klp_module_notify(), which caused livepatch modules to initialize incorrectly. Signed-off-by: Jessica Yu <j...@redhat.com> --- include/linux/livepatch.h | 9 +++ kernel/livepatch/core.c | 153 +++++++++++++++++++++++----------------------- kernel/module.c | 20 +++++- 3 files changed, 103 insertions(+), 79 deletions(-) diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index a882865..bd830d5 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -134,6 +134,15 @@ int klp_unregister_patch(struct klp_patch *); int klp_enable_patch(struct klp_patch *); int klp_disable_patch(struct klp_patch *); +/* Called from the module loader during module coming/going states */ +int klp_module_coming(struct module *mod); +void klp_module_going(struct module *mod); + +#else /* !CONFIG_LIVEPATCH */ + +static inline int klp_module_coming(struct module *mod) { return 0; } +static inline void klp_module_going(struct module *mod) { } + #endif /* CONFIG_LIVEPATCH */ #endif /* _LINUX_LIVEPATCH_H_ */ diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index bc2c85c..1d47f96 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -99,12 +99,12 @@ static void klp_find_object_module(struct klp_object *obj) /* * We do not want to block removal of patched modules and therefore * we do not take a reference here. The patches are removed by - * a going module handler instead. + * klp_module_going() instead. */ mod = find_module(obj->name); /* - * Do not mess work of the module coming and going notifiers. - * Note that the patch might still be needed before the going handler + * Do not mess work of klp_module_coming() and klp_module_going(). + * Note that the patch might still be needed before klp_module_going() * is called. Module functions can be called even in the GOING state * until mod->exit() finishes. This is especially important for * patches that modify semantic of the functions. @@ -866,103 +866,112 @@ int klp_register_patch(struct klp_patch *patch) } EXPORT_SYMBOL_GPL(klp_register_patch); -static int klp_module_notify_coming(struct klp_patch *patch, - struct klp_object *obj) +int klp_module_coming(struct module *mod) { - struct module *pmod = patch->mod; - struct module *mod = obj->mod; int ret; + struct klp_patch *patch; + struct klp_object *obj; - ret = klp_init_object_loaded(patch, obj); - if (ret) { - pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n", - pmod->name, mod->name, ret); - return ret; - } + if (WARN_ON(mod->state != MODULE_STATE_COMING)) + return -EINVAL; - if (patch->state == KLP_DISABLED) - return 0; + mutex_lock(&klp_mutex); + /* + * Each module has to know that klp_module_coming() + * has been called. We never know what module will + * get patched by a new patch. + */ + mod->klp_alive = true; - pr_notice("applying patch '%s' to loading module '%s'\n", - pmod->name, mod->name); + list_for_each_entry(patch, &klp_patches, list) { + klp_for_each_object(patch, obj) { + if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) + continue; - ret = klp_enable_object(obj); - if (ret) - pr_warn("failed to apply patch '%s' to module '%s' (%d)\n", - pmod->name, mod->name, ret); - return ret; -} + obj->mod = mod; -static void klp_module_notify_going(struct klp_patch *patch, - struct klp_object *obj) -{ - struct module *pmod = patch->mod; - struct module *mod = obj->mod; + ret = klp_init_object_loaded(patch, obj); + if (ret) { + pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n", + patch->mod->name, obj->mod->name, ret); + goto err; + } - if (patch->state == KLP_DISABLED) - goto disabled; + if (patch->state == KLP_DISABLED) + break; - pr_notice("reverting patch '%s' on unloading module '%s'\n", - pmod->name, mod->name); + pr_notice("applying patch '%s' to loading module '%s'\n", + patch->mod->name, obj->mod->name); - klp_disable_object(obj); + ret = klp_enable_object(obj); + if (ret) { + pr_warn("failed to apply patch '%s' to module '%s' (%d)\n", + patch->mod->name, obj->mod->name, ret); + goto err; + } -disabled: - klp_free_object_loaded(obj); + break; + } + } + + mutex_unlock(&klp_mutex); + + return 0; + +err: + /* + * If a patch is unsuccessfully applied, return + * error to the module loader. + */ + pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n", + patch->mod->name, obj->mod->name, obj->mod->name); + obj->mod = NULL; + mutex_unlock(&klp_mutex); + + return ret; } -static int klp_module_notify(struct notifier_block *nb, unsigned long action, - void *data) +void klp_module_going(struct module *mod) { - int ret; - struct module *mod = data; struct klp_patch *patch; struct klp_object *obj; - if (action != MODULE_STATE_COMING && action != MODULE_STATE_GOING) - return 0; + /* + * Module state is normally GOING when this is called, but it could + * still be COMING in cases where we are cleaning up after a module + * that failed to load. + */ + if (WARN_ON(mod->state != MODULE_STATE_GOING && + mod->state != MODULE_STATE_COMING)) + return; mutex_lock(&klp_mutex); - /* - * Each module has to know that the notifier has been called. - * We never know what module will get patched by a new patch. + * Each module has to know that klp_module_going() + * has been called. We never know what module will + * get patched by a new patch. */ - if (action == MODULE_STATE_COMING) - mod->klp_alive = true; - else /* MODULE_STATE_GOING */ - mod->klp_alive = false; + mod->klp_alive = false; list_for_each_entry(patch, &klp_patches, list) { klp_for_each_object(patch, obj) { if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) continue; - if (action == MODULE_STATE_COMING) { - obj->mod = mod; - ret = klp_module_notify_coming(patch, obj); - if (ret) { - obj->mod = NULL; - pr_warn("patch '%s' is in an inconsistent state!\n", - patch->mod->name); - } - } else /* MODULE_STATE_GOING */ - klp_module_notify_going(patch, obj); + if (patch->state != KLP_DISABLED) { + pr_notice("reverting patch '%s' on unloading module '%s'\n", + patch->mod->name, obj->mod->name); + klp_disable_object(obj); + } + klp_free_object_loaded(obj); break; } } mutex_unlock(&klp_mutex); - - return 0; } -static struct notifier_block klp_module_nb = { - .notifier_call = klp_module_notify, - .priority = INT_MIN+1, /* called late but before ftrace notifier */ -}; - static int __init klp_init(void) { int ret; @@ -973,21 +982,11 @@ static int __init klp_init(void) return -EINVAL; } - ret = register_module_notifier(&klp_module_nb); - if (ret) - return ret; - klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj); - if (!klp_root_kobj) { - ret = -ENOMEM; - goto unregister; - } + if (!klp_root_kobj) + return -ENOMEM; return 0; - -unregister: - unregister_module_notifier(&klp_module_nb); - return ret; } module_init(klp_init); diff --git a/kernel/module.c b/kernel/module.c index 794ebe8..0fec949 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -53,6 +53,7 @@ #include <asm/sections.h> #include <linux/tracepoint.h> #include <linux/ftrace.h> +#include <linux/livepatch.h> #include <linux/async.h> #include <linux/percpu.h> #include <linux/kmemleak.h> @@ -984,6 +985,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user, mod->exit(); blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_GOING, mod); + klp_module_going(mod); ftrace_release_mod(mod); async_synchronize_full(); @@ -3315,6 +3317,7 @@ fail: module_put(mod); blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_GOING, mod); + klp_module_going(mod); ftrace_release_mod(mod); free_module(mod); wake_up_all(&module_wq); @@ -3377,8 +3380,10 @@ static int complete_formation(struct module *mod, struct load_info *info) /* Find duplicate symbols (must be called under lock). */ err = verify_export_symbols(mod); - if (err < 0) - goto out; + if (err < 0) { + mutex_unlock(&module_mutex); + return err; + } /* This relies on module_mutex for list integrity. */ module_bug_finalize(info->hdr, info->sechdrs, mod); @@ -3393,12 +3398,22 @@ static int complete_formation(struct module *mod, struct load_info *info) mutex_unlock(&module_mutex); ftrace_module_enable(mod); + err = klp_module_coming(mod); + if (err) + goto out; + blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_COMING, mod); return 0; out: + mutex_lock(&module_mutex); + module_bug_cleanup(mod); mutex_unlock(&module_mutex); + + module_disable_ro(mod); + module_disable_nx(mod); + return err; } @@ -3549,6 +3564,7 @@ static int load_module(struct load_info *info, const char __user *uargs, blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_GOING, mod); + klp_module_going(mod); /* we can't deallocate the module until we clear memory protection */ module_disable_ro(mod); -- 2.4.3