Naveen N. Rao wrote:
+
+unsigned long ftrace_get_traced_func_if_no_stackframe(unsigned long ip, 
unsigned long *stack)
+{
+       if (!is_ftrace_entry(ip))
+               return 0;
+
+       if (IS_ENABLED(CONFIG_PPC32))
+               return stack[11]; /* see MCOUNT_SAVE_FRAME */
+
+       if (!IS_ENABLED(CONFIG_MPROFILE_KERNEL))
+               return 0;
+
+       return stack[(STACK_FRAME_OVERHEAD + offsetof(struct pt_regs, nip)) / 
sizeof(unsigned long)];

Looking at Daniel's patch to address KASAN errors with our stack walk code in show_stack() [*], I realized that I am not validating the stack pointer here for the above accesses...

[*] http://lkml.kernel.org/r/20210528074806.1311297-1-...@axtens.net

+}
+
+#ifdef CONFIG_STACK_TRACER
+void stack_get_trace(unsigned long traced_ip,
+                    unsigned long *stack_ref __maybe_unused,
+                    unsigned long stack_size __maybe_unused,
+                    int *tracer_frame)
+{
+       unsigned long sp, newsp, top, ip;
+       int ftrace_call_found = 0;
+       unsigned long *stack;
+       int i = 0;
+
+       sp = current_stack_frame();
+       top = (unsigned long)task_stack_page(current) + THREAD_SIZE;
+
+       while (validate_sp(sp, current, STACK_FRAME_OVERHEAD) && i < 
STACK_TRACE_ENTRIES) {
+               stack = (unsigned long *) sp;
+               newsp = stack[0];
+               ip = stack[STACK_FRAME_LR_SAVE];
+
+               if (ftrace_call_found) {
+                       stack_dump_trace[i] = ip;
+                       stack_trace_index[i++] = top - sp;
+               }

And I need to make the above accesses bypass KASAN as well.


- Naveen

Reply via email to