Now it is safe to remove dependency from stop_machine() for us to patch
code in ftrace.

Signed-off-by: Andy Chiu <andy.c...@sifive.com>
---
 arch/riscv/kernel/ftrace.c | 53 ++++------------------------------------------
 1 file changed, 4 insertions(+), 49 deletions(-)

diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 5ebe412280ef..57a6558e212e 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -13,23 +13,13 @@
 #include <asm/patch.h>
 
 #ifdef CONFIG_DYNAMIC_FTRACE
-void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
+void arch_ftrace_update_code(int command)
 {
        mutex_lock(&text_mutex);
-
-       /*
-        * The code sequences we use for ftrace can't be patched while the
-        * kernel is running, so we need to use stop_machine() to modify them
-        * for now.  This doesn't play nice with text_mutex, we use this flag
-        * to elide the check.
-        */
-       riscv_patch_in_stop_machine = true;
-}
-
-void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
-{
-       riscv_patch_in_stop_machine = false;
+       command |= FTRACE_MAY_SLEEP;
+       ftrace_modify_all_code(command);
        mutex_unlock(&text_mutex);
+       flush_icache_all();
 }
 
 static int ftrace_check_current_call(unsigned long hook_pos,
@@ -155,41 +145,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
        return __ftrace_modify_call_site(&ftrace_call_dest, func, true);
 }
 
-struct ftrace_modify_param {
-       int command;
-       atomic_t cpu_count;
-};
-
-static int __ftrace_modify_code(void *data)
-{
-       struct ftrace_modify_param *param = data;
-
-       if (atomic_inc_return(&param->cpu_count) == num_online_cpus()) {
-               ftrace_modify_all_code(param->command);
-               /*
-                * Make sure the patching store is effective *before* we
-                * increment the counter which releases all waiting CPUs
-                * by using the release variant of atomic increment. The
-                * release pairs with the call to local_flush_icache_all()
-                * on the waiting CPU.
-                */
-               atomic_inc_return_release(&param->cpu_count);
-       } else {
-               while (atomic_read(&param->cpu_count) <= num_online_cpus())
-                       cpu_relax();
-
-               local_flush_icache_all();
-       }
-
-       return 0;
-}
-
-void arch_ftrace_update_code(int command)
-{
-       struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
-
-       stop_machine(__ftrace_modify_code, &param, cpu_online_mask);
-}
 #endif
 
 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS

-- 
2.43.0


Reply via email to