Commit-ID:  ff04b440d2d645fd8a5b3385b1b2e4d19d3fe746
Gitweb:     http://git.kernel.org/tip/ff04b440d2d645fd8a5b3385b1b2e4d19d3fe746
Author:     Steven Rostedt (VMware) <rost...@goodmis.org>
AuthorDate: Thu, 23 Mar 2017 10:33:51 -0400
Committer:  Thomas Gleixner <t...@linutronix.de>
CommitDate: Fri, 24 Mar 2017 10:14:07 +0100

x86/ftrace: Clean up ftrace_regs_caller

When ftrace_regs_caller was created, it was designed to preserve flags as
much as possible as it needed to act just like a breakpoint triggered on the
same location. But the design is over complicated as it treated all
operations as modifying flags. But push, mov and lea do not modify flags.
This means the code can become more simplified by allowing flags to be
stored further down.

Making ftrace_regs_caller simpler will also be useful in implementing fentry
logic.

Suggested-by: Linus Torvalds <torva...@linux-foundation.org>
Signed-off-by: Steven Rostedt (VMware) <rost...@goodmis.org>
Reviewed-by: Masami Hiramatsu <mhira...@kernel.org>
Reviewed-by: Josh Poimboeuf <jpoim...@redhat.com>
Reviewed-by: Ingo Molnar <mi...@kernel.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Andy Lutomirski <l...@amacapital.net>
Cc: Andrew Morton <a...@linux-foundation.org>
Link: http://lkml.kernel.org/r/20170316135328.36123...@gandalf.local.home
Link: http://lkml.kernel.org/r/20170323143445.917292...@goodmis.org
Signed-off-by: Thomas Gleixner <t...@linutronix.de>

---
 arch/x86/kernel/ftrace_32.S | 40 +++++++++++++++++++++-------------------
 1 file changed, 21 insertions(+), 19 deletions(-)

diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index a4e2872..93e2664 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -54,23 +54,27 @@ WEAK(ftrace_stub)
 END(ftrace_caller)
 
 ENTRY(ftrace_regs_caller)
-       pushf   /* push flags before compare (in cs location) */
-
        /*
         * i386 does not save SS and ESP when coming from kernel.
         * Instead, to get sp, &regs->sp is used (see ptrace.h).
         * Unfortunately, that means eflags must be at the same location
         * as the current return ip is. We move the return ip into the
-        * ip location, and move flags into the return ip location.
+        * regs->ip location, and move flags into the return ip location.
         */
-       pushl   4(%esp)                         /* save return ip into ip slot 
*/
-
+       pushl   $__KERNEL_CS
+       pushl   4(%esp)                         /* Save the return ip */
        pushl   $0                              /* Load 0 into orig_ax */
        pushl   %gs
        pushl   %fs
        pushl   %es
        pushl   %ds
        pushl   %eax
+
+       /* Get flags and place them into the return ip slot */
+       pushf
+       popl    %eax
+       movl    %eax, 8*4(%esp)
+
        pushl   %ebp
        pushl   %edi
        pushl   %esi
@@ -78,11 +82,6 @@ ENTRY(ftrace_regs_caller)
        pushl   %ecx
        pushl   %ebx
 
-       movl    13*4(%esp), %eax                /* Get the saved flags */
-       movl    %eax, 14*4(%esp)                /* Move saved flags into 
regs->flags location */
-                                               /* clobbering return ip */
-       movl    $__KERNEL_CS, 13*4(%esp)
-
        movl    12*4(%esp), %eax                /* Load ip (1st parameter) */
        subl    $MCOUNT_INSN_SIZE, %eax         /* Adjust ip */
        movl    0x4(%ebp), %edx                 /* Load parent ip (2nd 
parameter) */
@@ -93,10 +92,14 @@ GLOBAL(ftrace_regs_call)
        call    ftrace_stub
 
        addl    $4, %esp                        /* Skip pt_regs */
-       movl    14*4(%esp), %eax                /* Move flags back into cs */
-       movl    %eax, 13*4(%esp)                /* Needed to keep addl  from 
modifying flags */
-       movl    12*4(%esp), %eax                /* Get return ip from regs->ip 
*/
-       movl    %eax, 14*4(%esp)                /* Put return ip back for ret */
+
+       /* restore flags */
+       push    14*4(%esp)
+       popf
+
+       /* Move return ip back to its original location */
+       movl    12*4(%esp), %eax
+       movl    %eax, 14*4(%esp)
 
        popl    %ebx
        popl    %ecx
@@ -109,12 +112,11 @@ GLOBAL(ftrace_regs_call)
        popl    %es
        popl    %fs
        popl    %gs
-       addl    $8, %esp                        /* Skip orig_ax and ip */
-       popf                                    /* Pop flags at end (no addl to 
corrupt flags) */
-       jmp     .Lftrace_ret
 
-       popf
-       jmp     ftrace_stub
+       /* use lea to not affect flags */
+       lea     3*4(%esp), %esp                 /* Skip orig_ax, ip and cs */
+
+       jmp     .Lftrace_ret
 #else /* ! CONFIG_DYNAMIC_FTRACE */
 
 ENTRY(mcount)

Reply via email to