On Mon, Jan 12, 2026 at 05:07:57PM -0500, Steven Rostedt wrote:
> On Mon, 12 Jan 2026 22:49:38 +0100
> Jiri Olsa <[email protected]> wrote:
>
> > To recreate same stack setup for return probe as we have for entry
> > probe, we set the instruction pointer to the attached function address,
> > which gets us the same unwind setup and same stack trace.
> >
> > With the fix, entry probe:
> >
> > # bpftrace -e 'kprobe:__x64_sys_newuname* { print(kstack)}'
> > Attaching 1 probe...
> >
> > __x64_sys_newuname+9
> > do_syscall_64+134
> > entry_SYSCALL_64_after_hwframe+118
> >
> > return probe:
> >
> > # bpftrace -e 'kretprobe:__x64_sys_newuname* { print(kstack)}'
> > Attaching 1 probe...
> >
> > __x64_sys_newuname+4
> > do_syscall_64+134
> > entry_SYSCALL_64_after_hwframe+118
>
> But is this really correct?
>
> The stack trace of the return from __x86_sys_newuname is from offset "+4".
>
> The stack trace from entry is offset "+9". Isn't it confusing that the
> offset is likely not from the return portion of that function?
right, makes sense.. so standard kprobe actualy skips attached function
(__x86_sys_newuname) on return probe stacktrace.. perhaps we should do
the same for kprobe_multi
I managed to get that with the change below, but it's wrong wrt arch code,
note the ftrace_regs_set_stack_pointer(fregs, stack + 8) .. will try to
figure out better way when we agree on the solution
thanks,
jirka
---
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index c56e1e63b893..b0e8ce4934e7 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -71,6 +71,9 @@ arch_ftrace_get_regs(struct ftrace_regs *fregs)
#define ftrace_regs_set_instruction_pointer(fregs, _ip) \
do { arch_ftrace_regs(fregs)->regs.ip = (_ip); } while (0)
+#define ftrace_regs_set_stack_pointer(fregs, _sp) \
+ do { arch_ftrace_regs(fregs)->regs.sp = (_sp); } while (0)
+
static __always_inline unsigned long
ftrace_regs_get_return_address(struct ftrace_regs *fregs)
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index 6279e0a753cf..b1510c412dcb 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -717,7 +717,8 @@ int function_graph_enter_regs(unsigned long ret, unsigned
long func,
/* Retrieve a function return address to the trace stack on thread info.*/
static struct ftrace_ret_stack *
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
- unsigned long frame_pointer, int *offset)
+ unsigned long *stack, unsigned long frame_pointer,
+ int *offset)
{
struct ftrace_ret_stack *ret_stack;
@@ -762,6 +763,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace,
unsigned long *ret,
*offset += FGRAPH_FRAME_OFFSET;
*ret = ret_stack->ret;
+ *stack = (unsigned long) ret_stack->retp;
trace->func = ret_stack->func;
trace->overrun = atomic_read(¤t->trace_overrun);
trace->depth = current->curr_ret_depth;
@@ -810,12 +812,13 @@ __ftrace_return_to_handler(struct ftrace_regs *fregs,
unsigned long frame_pointe
struct ftrace_ret_stack *ret_stack;
struct ftrace_graph_ret trace;
unsigned long bitmap;
+ unsigned long stack;
unsigned long ret;
int offset;
int bit;
int i;
- ret_stack = ftrace_pop_return_trace(&trace, &ret, frame_pointer,
&offset);
+ ret_stack = ftrace_pop_return_trace(&trace, &ret, &stack,
frame_pointer, &offset);
if (unlikely(!ret_stack)) {
ftrace_graph_stop();
@@ -824,8 +827,11 @@ __ftrace_return_to_handler(struct ftrace_regs *fregs,
unsigned long frame_pointe
return (unsigned long)panic;
}
- if (fregs)
- ftrace_regs_set_instruction_pointer(fregs, trace.func);
+ if (fregs) {
+ ftrace_regs_set_instruction_pointer(fregs, ret);
+ ftrace_regs_set_stack_pointer(fregs, stack + 8);
+ }
+
bit = ftrace_test_recursion_trylock(trace.func, ret);
/*
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c
b/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c
index e1a9b55e07cb..852830536109 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c
@@ -74,12 +74,20 @@ static void test_stacktrace_ips_kprobe_multi(bool retprobe)
load_kallsyms();
- check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap),
skel->bss->stack_key, 5,
- ksym_get_addr("bpf_testmod_stacktrace_test"),
- ksym_get_addr("bpf_testmod_stacktrace_test_3"),
- ksym_get_addr("bpf_testmod_stacktrace_test_2"),
- ksym_get_addr("bpf_testmod_stacktrace_test_1"),
- ksym_get_addr("bpf_testmod_test_read"));
+ if (retprobe) {
+ check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap),
skel->bss->stack_key, 4,
+
ksym_get_addr("bpf_testmod_stacktrace_test_3"),
+
ksym_get_addr("bpf_testmod_stacktrace_test_2"),
+
ksym_get_addr("bpf_testmod_stacktrace_test_1"),
+ ksym_get_addr("bpf_testmod_test_read"));
+ } else {
+ check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap),
skel->bss->stack_key, 5,
+
ksym_get_addr("bpf_testmod_stacktrace_test"),
+
ksym_get_addr("bpf_testmod_stacktrace_test_3"),
+
ksym_get_addr("bpf_testmod_stacktrace_test_2"),
+
ksym_get_addr("bpf_testmod_stacktrace_test_1"),
+ ksym_get_addr("bpf_testmod_test_read"));
+ }
cleanup:
stacktrace_ips__destroy(skel);