Hi Steve, On Wed, Mar 22, 2017 at 10:35 AM, Steven Rostedt <rost...@goodmis.org> wrote: > From: "Steven Rostedt (VMware)" <rost...@goodmis.org> > > The function tracing hook code for ftrace is not an entry point from > userspace and does not belong in the entry_*.S files. It has already been > moved out of entry_64.S. This moves it out of entry_32.S into its own > ftrace_32.S file. > > Signed-off-by: Steven Rostedt (VMware) <rost...@goodmis.org> > ---
[SNIP] > diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S > new file mode 100644 > index 000000000000..1889a74823ce > --- /dev/null > +++ b/arch/x86/kernel/ftrace_32.S > @@ -0,0 +1,177 @@ > +/* > + * linux/arch/x86_64/mcount_64.S You may want to change this line.. :) Thanks, Namhyung > + * > + * Copyright (C) 2017 Steven Rostedt, VMware Inc. > + */ > + > +#include <linux/linkage.h> > +#include <asm/page_types.h> > +#include <asm/segment.h> > +#include <asm/export.h> > +#include <asm/ftrace.h> > + > +#ifdef CONFIG_FUNCTION_TRACER > +#ifdef CONFIG_DYNAMIC_FTRACE > + > +ENTRY(mcount) > + ret > +END(mcount) > + > +ENTRY(ftrace_caller) > + pushl %eax > + pushl %ecx > + pushl %edx > + pushl $0 /* Pass NULL as regs pointer > */ > + movl 4*4(%esp), %eax > + movl 0x4(%ebp), %edx > + movl function_trace_op, %ecx > + subl $MCOUNT_INSN_SIZE, %eax > + > +.globl ftrace_call > +ftrace_call: > + call ftrace_stub > + > + addl $4, %esp /* skip NULL pointer */ > + popl %edx > + popl %ecx > + popl %eax > +.Lftrace_ret: > +#ifdef CONFIG_FUNCTION_GRAPH_TRACER > +.globl ftrace_graph_call > +ftrace_graph_call: > + jmp ftrace_stub > +#endif > + > +/* This is weak to keep gas from relaxing the jumps */ > +WEAK(ftrace_stub) > + ret > +END(ftrace_caller) > + > +ENTRY(ftrace_regs_caller) > + pushf /* push flags before compare (in cs location) */ > + > + /* > + * i386 does not save SS and ESP when coming from kernel. > + * Instead, to get sp, ®s->sp is used (see ptrace.h). > + * Unfortunately, that means eflags must be at the same location > + * as the current return ip is. We move the return ip into the > + * ip location, and move flags into the return ip location. > + */ > + pushl 4(%esp) /* save return ip into ip > slot */ > + > + pushl $0 /* Load 0 into orig_ax */ > + pushl %gs > + pushl %fs > + pushl %es > + pushl %ds > + pushl %eax > + pushl %ebp > + pushl %edi > + pushl %esi > + pushl %edx > + pushl %ecx > + pushl %ebx > + > + movl 13*4(%esp), %eax /* Get the saved flags */ > + movl %eax, 14*4(%esp) /* Move saved flags into > regs->flags location */ > + /* clobbering return ip */ > + movl $__KERNEL_CS, 13*4(%esp) > + > + movl 12*4(%esp), %eax /* Load ip (1st parameter) */ > + subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ > + movl 0x4(%ebp), %edx /* Load parent ip (2nd > parameter) */ > + movl function_trace_op, %ecx /* Save ftrace_pos in 3rd > parameter */ > + pushl %esp /* Save pt_regs as 4th > parameter */ > + > +GLOBAL(ftrace_regs_call) > + call ftrace_stub > + > + addl $4, %esp /* Skip pt_regs */ > + movl 14*4(%esp), %eax /* Move flags back into cs */ > + movl %eax, 13*4(%esp) /* Needed to keep addl from > modifying flags */ > + movl 12*4(%esp), %eax /* Get return ip from > regs->ip */ > + movl %eax, 14*4(%esp) /* Put return ip back for ret > */ > + > + popl %ebx > + popl %ecx > + popl %edx > + popl %esi > + popl %edi > + popl %ebp > + popl %eax > + popl %ds > + popl %es > + popl %fs > + popl %gs > + addl $8, %esp /* Skip orig_ax and ip */ > + popf /* Pop flags at end (no addl > to corrupt flags) */ > + jmp .Lftrace_ret > + > + popf > + jmp ftrace_stub > +#else /* ! CONFIG_DYNAMIC_FTRACE */ > + > +ENTRY(mcount) > + cmpl $__PAGE_OFFSET, %esp > + jb ftrace_stub /* Paging not enabled yet? */ > + > + cmpl $ftrace_stub, ftrace_trace_function > + jnz .Ltrace > +#ifdef CONFIG_FUNCTION_GRAPH_TRACER > + cmpl $ftrace_stub, ftrace_graph_return > + jnz ftrace_graph_caller > + > + cmpl $ftrace_graph_entry_stub, ftrace_graph_entry > + jnz ftrace_graph_caller > +#endif > +.globl ftrace_stub > +ftrace_stub: > + ret > + > + /* taken from glibc */ > +.Ltrace: > + pushl %eax > + pushl %ecx > + pushl %edx > + movl 0xc(%esp), %eax > + movl 0x4(%ebp), %edx > + subl $MCOUNT_INSN_SIZE, %eax > + > + call *ftrace_trace_function > + > + popl %edx > + popl %ecx > + popl %eax > + jmp ftrace_stub > +END(mcount) > +#endif /* CONFIG_DYNAMIC_FTRACE */ > +EXPORT_SYMBOL(mcount) > +#endif /* CONFIG_FUNCTION_TRACER */ > + > +#ifdef CONFIG_FUNCTION_GRAPH_TRACER > +ENTRY(ftrace_graph_caller) > + pushl %eax > + pushl %ecx > + pushl %edx > + movl 0xc(%esp), %eax > + lea 0x4(%ebp), %edx > + movl (%ebp), %ecx > + subl $MCOUNT_INSN_SIZE, %eax > + call prepare_ftrace_return > + popl %edx > + popl %ecx > + popl %eax > + ret > +END(ftrace_graph_caller) > + > +.globl return_to_handler > +return_to_handler: > + pushl %eax > + pushl %edx > + movl %ebp, %eax > + call ftrace_return_to_handler > + movl %eax, %ecx > + popl %edx > + popl %eax > + jmp *%ecx > +#endif > -- > 2.10.2 > >