From: "Steven Rostedt (VMware)" <rost...@goodmis.org>

The function tracing hook code for ftrace is not an entry point from
userspace and does not belong in the entry_*.S files. It has already been
moved out of entry_64.S. This moves it out of entry_32.S into its own
ftrace_32.S file.

Signed-off-by: Steven Rostedt (VMware) <rost...@goodmis.org>
---
 arch/x86/entry/entry_32.S   | 169 ------------------------------------------
 arch/x86/kernel/Makefile    |   1 +
 arch/x86/kernel/ftrace_32.S | 177 ++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 178 insertions(+), 169 deletions(-)
 create mode 100644 arch/x86/kernel/ftrace_32.S

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 57f7ec35216e..169b3b0c5ec6 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -35,16 +35,13 @@
 #include <asm/errno.h>
 #include <asm/segment.h>
 #include <asm/smp.h>
-#include <asm/page_types.h>
 #include <asm/percpu.h>
 #include <asm/processor-flags.h>
-#include <asm/ftrace.h>
 #include <asm/irq_vectors.h>
 #include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
-#include <asm/export.h>
 #include <asm/frame.h>
 
        .section .entry.text, "ax"
@@ -886,172 +883,6 @@ BUILD_INTERRUPT3(hyperv_callback_vector, 
HYPERVISOR_CALLBACK_VECTOR,
 
 #endif /* CONFIG_HYPERV */
 
-#ifdef CONFIG_FUNCTION_TRACER
-#ifdef CONFIG_DYNAMIC_FTRACE
-
-ENTRY(mcount)
-       ret
-END(mcount)
-
-ENTRY(ftrace_caller)
-       pushl   %eax
-       pushl   %ecx
-       pushl   %edx
-       pushl   $0                              /* Pass NULL as regs pointer */
-       movl    4*4(%esp), %eax
-       movl    0x4(%ebp), %edx
-       movl    function_trace_op, %ecx
-       subl    $MCOUNT_INSN_SIZE, %eax
-
-.globl ftrace_call
-ftrace_call:
-       call    ftrace_stub
-
-       addl    $4, %esp                        /* skip NULL pointer */
-       popl    %edx
-       popl    %ecx
-       popl    %eax
-.Lftrace_ret:
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-.globl ftrace_graph_call
-ftrace_graph_call:
-       jmp     ftrace_stub
-#endif
-
-/* This is weak to keep gas from relaxing the jumps */
-WEAK(ftrace_stub)
-       ret
-END(ftrace_caller)
-
-ENTRY(ftrace_regs_caller)
-       pushf   /* push flags before compare (in cs location) */
-
-       /*
-        * i386 does not save SS and ESP when coming from kernel.
-        * Instead, to get sp, &regs->sp is used (see ptrace.h).
-        * Unfortunately, that means eflags must be at the same location
-        * as the current return ip is. We move the return ip into the
-        * ip location, and move flags into the return ip location.
-        */
-       pushl   4(%esp)                         /* save return ip into ip slot 
*/
-
-       pushl   $0                              /* Load 0 into orig_ax */
-       pushl   %gs
-       pushl   %fs
-       pushl   %es
-       pushl   %ds
-       pushl   %eax
-       pushl   %ebp
-       pushl   %edi
-       pushl   %esi
-       pushl   %edx
-       pushl   %ecx
-       pushl   %ebx
-
-       movl    13*4(%esp), %eax                /* Get the saved flags */
-       movl    %eax, 14*4(%esp)                /* Move saved flags into 
regs->flags location */
-                                               /* clobbering return ip */
-       movl    $__KERNEL_CS, 13*4(%esp)
-
-       movl    12*4(%esp), %eax                /* Load ip (1st parameter) */
-       subl    $MCOUNT_INSN_SIZE, %eax         /* Adjust ip */
-       movl    0x4(%ebp), %edx                 /* Load parent ip (2nd 
parameter) */
-       movl    function_trace_op, %ecx         /* Save ftrace_pos in 3rd 
parameter */
-       pushl   %esp                            /* Save pt_regs as 4th 
parameter */
-
-GLOBAL(ftrace_regs_call)
-       call    ftrace_stub
-
-       addl    $4, %esp                        /* Skip pt_regs */
-       movl    14*4(%esp), %eax                /* Move flags back into cs */
-       movl    %eax, 13*4(%esp)                /* Needed to keep addl  from 
modifying flags */
-       movl    12*4(%esp), %eax                /* Get return ip from regs->ip 
*/
-       movl    %eax, 14*4(%esp)                /* Put return ip back for ret */
-
-       popl    %ebx
-       popl    %ecx
-       popl    %edx
-       popl    %esi
-       popl    %edi
-       popl    %ebp
-       popl    %eax
-       popl    %ds
-       popl    %es
-       popl    %fs
-       popl    %gs
-       addl    $8, %esp                        /* Skip orig_ax and ip */
-       popf                                    /* Pop flags at end (no addl to 
corrupt flags) */
-       jmp     .Lftrace_ret
-
-       popf
-       jmp     ftrace_stub
-#else /* ! CONFIG_DYNAMIC_FTRACE */
-
-ENTRY(mcount)
-       cmpl    $__PAGE_OFFSET, %esp
-       jb      ftrace_stub                     /* Paging not enabled yet? */
-
-       cmpl    $ftrace_stub, ftrace_trace_function
-       jnz     .Ltrace
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       cmpl    $ftrace_stub, ftrace_graph_return
-       jnz     ftrace_graph_caller
-
-       cmpl    $ftrace_graph_entry_stub, ftrace_graph_entry
-       jnz     ftrace_graph_caller
-#endif
-.globl ftrace_stub
-ftrace_stub:
-       ret
-
-       /* taken from glibc */
-.Ltrace:
-       pushl   %eax
-       pushl   %ecx
-       pushl   %edx
-       movl    0xc(%esp), %eax
-       movl    0x4(%ebp), %edx
-       subl    $MCOUNT_INSN_SIZE, %eax
-
-       call    *ftrace_trace_function
-
-       popl    %edx
-       popl    %ecx
-       popl    %eax
-       jmp     ftrace_stub
-END(mcount)
-#endif /* CONFIG_DYNAMIC_FTRACE */
-EXPORT_SYMBOL(mcount)
-#endif /* CONFIG_FUNCTION_TRACER */
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-ENTRY(ftrace_graph_caller)
-       pushl   %eax
-       pushl   %ecx
-       pushl   %edx
-       movl    0xc(%esp), %eax
-       lea     0x4(%ebp), %edx
-       movl    (%ebp), %ecx
-       subl    $MCOUNT_INSN_SIZE, %eax
-       call    prepare_ftrace_return
-       popl    %edx
-       popl    %ecx
-       popl    %eax
-       ret
-END(ftrace_graph_caller)
-
-.globl return_to_handler
-return_to_handler:
-       pushl   %eax
-       pushl   %edx
-       movl    %ebp, %eax
-       call    ftrace_return_to_handler
-       movl    %eax, %ecx
-       popl    %edx
-       popl    %eax
-       jmp     *%ecx
-#endif
-
 #ifdef CONFIG_TRACING
 ENTRY(trace_page_fault)
        ASM_CLAC
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index d3743a37e990..55e8902c461f 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -47,6 +47,7 @@ obj-y                 += setup.o x86_init.o i8259.o irqinit.o 
jump_label.o
 obj-$(CONFIG_IRQ_WORK)  += irq_work.o
 obj-y                  += probe_roms.o
 obj-$(CONFIG_X86_64)   += sys_x86_64.o ftrace_64.o
+obj-$(CONFIG_X86_32)   += ftrace_32.o
 obj-$(CONFIG_X86_ESPFIX64)     += espfix_64.o
 obj-$(CONFIG_SYSFS)    += ksysfs.o
 obj-y                  += bootflag.o e820.o
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
new file mode 100644
index 000000000000..1889a74823ce
--- /dev/null
+++ b/arch/x86/kernel/ftrace_32.S
@@ -0,0 +1,177 @@
+/*
+ *  linux/arch/x86_64/mcount_64.S
+ *
+ *  Copyright (C) 2017  Steven Rostedt, VMware Inc.
+ */
+
+#include <linux/linkage.h>
+#include <asm/page_types.h>
+#include <asm/segment.h>
+#include <asm/export.h>
+#include <asm/ftrace.h>
+
+#ifdef CONFIG_FUNCTION_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+ENTRY(mcount)
+       ret
+END(mcount)
+
+ENTRY(ftrace_caller)
+       pushl   %eax
+       pushl   %ecx
+       pushl   %edx
+       pushl   $0                              /* Pass NULL as regs pointer */
+       movl    4*4(%esp), %eax
+       movl    0x4(%ebp), %edx
+       movl    function_trace_op, %ecx
+       subl    $MCOUNT_INSN_SIZE, %eax
+
+.globl ftrace_call
+ftrace_call:
+       call    ftrace_stub
+
+       addl    $4, %esp                        /* skip NULL pointer */
+       popl    %edx
+       popl    %ecx
+       popl    %eax
+.Lftrace_ret:
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+.globl ftrace_graph_call
+ftrace_graph_call:
+       jmp     ftrace_stub
+#endif
+
+/* This is weak to keep gas from relaxing the jumps */
+WEAK(ftrace_stub)
+       ret
+END(ftrace_caller)
+
+ENTRY(ftrace_regs_caller)
+       pushf   /* push flags before compare (in cs location) */
+
+       /*
+        * i386 does not save SS and ESP when coming from kernel.
+        * Instead, to get sp, &regs->sp is used (see ptrace.h).
+        * Unfortunately, that means eflags must be at the same location
+        * as the current return ip is. We move the return ip into the
+        * ip location, and move flags into the return ip location.
+        */
+       pushl   4(%esp)                         /* save return ip into ip slot 
*/
+
+       pushl   $0                              /* Load 0 into orig_ax */
+       pushl   %gs
+       pushl   %fs
+       pushl   %es
+       pushl   %ds
+       pushl   %eax
+       pushl   %ebp
+       pushl   %edi
+       pushl   %esi
+       pushl   %edx
+       pushl   %ecx
+       pushl   %ebx
+
+       movl    13*4(%esp), %eax                /* Get the saved flags */
+       movl    %eax, 14*4(%esp)                /* Move saved flags into 
regs->flags location */
+                                               /* clobbering return ip */
+       movl    $__KERNEL_CS, 13*4(%esp)
+
+       movl    12*4(%esp), %eax                /* Load ip (1st parameter) */
+       subl    $MCOUNT_INSN_SIZE, %eax         /* Adjust ip */
+       movl    0x4(%ebp), %edx                 /* Load parent ip (2nd 
parameter) */
+       movl    function_trace_op, %ecx         /* Save ftrace_pos in 3rd 
parameter */
+       pushl   %esp                            /* Save pt_regs as 4th 
parameter */
+
+GLOBAL(ftrace_regs_call)
+       call    ftrace_stub
+
+       addl    $4, %esp                        /* Skip pt_regs */
+       movl    14*4(%esp), %eax                /* Move flags back into cs */
+       movl    %eax, 13*4(%esp)                /* Needed to keep addl  from 
modifying flags */
+       movl    12*4(%esp), %eax                /* Get return ip from regs->ip 
*/
+       movl    %eax, 14*4(%esp)                /* Put return ip back for ret */
+
+       popl    %ebx
+       popl    %ecx
+       popl    %edx
+       popl    %esi
+       popl    %edi
+       popl    %ebp
+       popl    %eax
+       popl    %ds
+       popl    %es
+       popl    %fs
+       popl    %gs
+       addl    $8, %esp                        /* Skip orig_ax and ip */
+       popf                                    /* Pop flags at end (no addl to 
corrupt flags) */
+       jmp     .Lftrace_ret
+
+       popf
+       jmp     ftrace_stub
+#else /* ! CONFIG_DYNAMIC_FTRACE */
+
+ENTRY(mcount)
+       cmpl    $__PAGE_OFFSET, %esp
+       jb      ftrace_stub                     /* Paging not enabled yet? */
+
+       cmpl    $ftrace_stub, ftrace_trace_function
+       jnz     .Ltrace
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       cmpl    $ftrace_stub, ftrace_graph_return
+       jnz     ftrace_graph_caller
+
+       cmpl    $ftrace_graph_entry_stub, ftrace_graph_entry
+       jnz     ftrace_graph_caller
+#endif
+.globl ftrace_stub
+ftrace_stub:
+       ret
+
+       /* taken from glibc */
+.Ltrace:
+       pushl   %eax
+       pushl   %ecx
+       pushl   %edx
+       movl    0xc(%esp), %eax
+       movl    0x4(%ebp), %edx
+       subl    $MCOUNT_INSN_SIZE, %eax
+
+       call    *ftrace_trace_function
+
+       popl    %edx
+       popl    %ecx
+       popl    %eax
+       jmp     ftrace_stub
+END(mcount)
+#endif /* CONFIG_DYNAMIC_FTRACE */
+EXPORT_SYMBOL(mcount)
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
+       pushl   %eax
+       pushl   %ecx
+       pushl   %edx
+       movl    0xc(%esp), %eax
+       lea     0x4(%ebp), %edx
+       movl    (%ebp), %ecx
+       subl    $MCOUNT_INSN_SIZE, %eax
+       call    prepare_ftrace_return
+       popl    %edx
+       popl    %ecx
+       popl    %eax
+       ret
+END(ftrace_graph_caller)
+
+.globl return_to_handler
+return_to_handler:
+       pushl   %eax
+       pushl   %edx
+       movl    %ebp, %eax
+       call    ftrace_return_to_handler
+       movl    %eax, %ecx
+       popl    %edx
+       popl    %eax
+       jmp     *%ecx
+#endif
-- 
2.10.2


Reply via email to