From: Jason Baron <jba...@akamai.com>

Currently klp_patch contains a pointer to a statically allocated array of
struct klp_object and struct klp_objects contains a pointer to a statically
allocated array of klp_func. In order to allow for the dynamic allocation
of objects and functions, link klp_patch, klp_object, and klp_func together
via linked lists. This allows us to more easily allocate new objects and
functions, while having the iterator be a simple linked list walk.

The static structures are added to the lists early. It allows to add
the dynamically allocated objects before klp_init_object() and
klp_init_func() calls. Therefore it reduces the further changes
to the code.

This patch does not change the existing behavior.

Signed-off-by: Jason Baron <jba...@akamai.com>
[pmla...@suse.com: Initialize lists before init calls]
Signed-off-by: Petr Mladek <pmla...@suse.com>
Cc: Josh Poimboeuf <jpoim...@redhat.com>
Cc: Jessica Yu <j...@kernel.org>
Cc: Jiri Kosina <ji...@kernel.org>
Cc: Miroslav Benes <mbe...@suse.cz>
---
 include/linux/livepatch.h | 19 +++++++++++++++++--
 kernel/livepatch/core.c   |  9 +++++++--
 2 files changed, 24 insertions(+), 4 deletions(-)

diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 1366dbb159ab..662e4cf664b8 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -24,6 +24,7 @@
 #include <linux/module.h>
 #include <linux/ftrace.h>
 #include <linux/completion.h>
+#include <linux/list.h>
 
 #if IS_ENABLED(CONFIG_LIVEPATCH)
 
@@ -42,6 +43,7 @@
  *             can be found (optional)
  * @old_func:  pointer to the function being patched
  * @kobj:      kobject for sysfs resources
+ * @node:      list node for klp_object func_list
  * @stack_node:        list node for klp_ops func_stack list
  * @old_size:  size of the old function
  * @new_size:  size of the new function
@@ -80,6 +82,7 @@ struct klp_func {
        /* internal */
        void *old_func;
        struct kobject kobj;
+       struct list_head node;
        struct list_head stack_node;
        unsigned long old_size, new_size;
        bool kobj_alive;
@@ -117,6 +120,8 @@ struct klp_callbacks {
  * @funcs:     function entries for functions to be patched in the object
  * @callbacks: functions to be executed pre/post (un)patching
  * @kobj:      kobject for sysfs resources
+ * @func_list: dynamic list of the function entries
+ * @node:      list node for klp_patch obj_list
  * @mod:       kernel module associated with the patched object
  *             (NULL for vmlinux)
  * @kobj_alive: @kobj has been added and needs freeing
@@ -130,6 +135,8 @@ struct klp_object {
 
        /* internal */
        struct kobject kobj;
+       struct list_head func_list;
+       struct list_head node;
        struct module *mod;
        bool kobj_alive;
        bool patched;
@@ -141,6 +148,7 @@ struct klp_object {
  * @objs:      object entries for kernel objects to be patched
  * @list:      list node for global list of registered patches
  * @kobj:      kobject for sysfs resources
+ * @obj_list:  dynamic list of the object entries
  * @kobj_alive: @kobj has been added and needs freeing
  * @enabled:   the patch is enabled (but operation may be incomplete)
  * @forced:    was involved in a forced transition
@@ -155,6 +163,7 @@ struct klp_patch {
        /* internal */
        struct list_head list;
        struct kobject kobj;
+       struct list_head obj_list;
        bool kobj_alive;
        bool enabled;
        bool forced;
@@ -162,14 +171,20 @@ struct klp_patch {
        struct completion finish;
 };
 
-#define klp_for_each_object(patch, obj) \
+#define klp_for_each_object_static(patch, obj) \
        for (obj = patch->objs; obj->funcs || obj->name; obj++)
 
-#define klp_for_each_func(obj, func) \
+#define klp_for_each_object(patch, obj)        \
+       list_for_each_entry(obj, &patch->obj_list, node)
+
+#define klp_for_each_func_static(obj, func) \
        for (func = obj->funcs; \
             func->old_name || func->new_func || func->old_sympos; \
             func++)
 
+#define klp_for_each_func(obj, func)   \
+       list_for_each_entry(func, &obj->func_list, node)
+
 int klp_enable_patch(struct klp_patch *);
 
 void arch_klp_init_object_loaded(struct klp_patch *patch,
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index e01dfa3b58d2..c48f34272473 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -662,20 +662,25 @@ static int klp_init_patch_before_free(struct klp_patch 
*patch)
                return -EINVAL;
 
        INIT_LIST_HEAD(&patch->list);
+       INIT_LIST_HEAD(&patch->obj_list);
        patch->kobj_alive = false;
        patch->enabled = false;
        patch->forced = false;
        INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
        init_completion(&patch->finish);
 
-       klp_for_each_object(patch, obj) {
+       klp_for_each_object_static(patch, obj) {
                if (!obj->funcs)
                        return -EINVAL;
 
+               INIT_LIST_HEAD(&obj->func_list);
                obj->kobj_alive = false;
+               list_add_tail(&obj->node, &patch->obj_list);
 
-               klp_for_each_func(obj, func)
+               klp_for_each_func_static(obj, func) {
                        func->kobj_alive = false;
+                       list_add_tail(&func->node, &obj->func_list);
+               }
        }
 
        if (!try_module_get(patch->mod))
-- 
2.13.7

Reply via email to