Re: Re: [RFC][PATCH 2/4 v4] ftrace/x86: Add save_regs for i386 function calls
On Thu, 2012-07-19 at 08:52 -0400, Steven Rostedt wrote: > On Thu, 2012-07-19 at 11:20 +0900, Masami Hiramatsu wrote: > > (2012/07/19 0:59), Steven Rostedt wrote: > > > On Fri, 2012-07-13 at 14:47 -0400, Steven Rostedt wrote: > > > > > > Masami, can you give your Reviewed-by tag for this version? Or is there > > > something else needing to be fixed? > > > > No, that is OK for me. I've just missed that... > > > > Reviewed-by: Masami Hiramatsu > > > > Thank you! > > > > Thanks! Except I have one more version. Someone offlist gave me some > ideas. > > Here's the diff from the previous patch. > > diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S > index 46caa56..ca5a146 100644 > --- a/arch/x86/kernel/entry_32.S > +++ b/arch/x86/kernel/entry_32.S > @@ -1169,10 +1169,9 @@ ENTRY(ftrace_regs_caller) > movl $__KERNEL_CS,13*4(%esp) > > movl 12*4(%esp), %eax /* Load ip (1st parameter) */ > - movl 0x4(%ebp), %edx/* Load parent ip (2cd parameter) */ > - lea (%esp), %ecx > - pushl %ecx /* Save pt_regs as 4th parameter */ > + movl 0x4(%ebp), %edx/* Load parent ip (2nd parameter) */ > leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ > + pushl %esp /* Save pt_regs as 4th parameter */ > > GLOBAL(ftrace_regs_call) I'm adding this as a separate patch under Uros's name. I'll be posting the full series for pulling either tonight or tomorrow, depending on how it handles all my stress tests. -- Steve -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: Re: [RFC][PATCH 2/4 v4] ftrace/x86: Add save_regs for i386 function calls
On Thu, 2012-07-19 at 08:52 -0400, Steven Rostedt wrote: > > GLOBAL(ftrace_regs_call) > call ftrace_stub > @@ -1195,8 +1194,8 @@ GLOBAL(ftrace_regs_call) > popl %es > popl %fs > popl %gs > - addl $8, %esp /* Skip orig_ax and ip */ > - popf/* Pop flags at end (no addl to corrupt flags) > */ > + lea 8(%esp), %esp /* Skip orig_ax and ip */ > + popf/* Pop flags at end */ > jmp ftrace_ret > > ftrace_restore_flags: > > > Because we no longer have that 4 byte offset on the stack when we need > to load the 4th parameter, we can just load the current stack pointer > into the stack (pushl %esp), without the save to %ecx step. > > also, because lea is faster than add (and doesn't even modify flags), I > changed the last part to use lea instead of addl. Now I'm told that this is not always the case (at least not for Atom), so I reverted this part and put back the addl. But can you still give you reviewed by for the first part? > > Can you give your reviewed-by tag for this too? I'd like to push this > out today so we can still make 3.6. > > Thanks! > > -- Steve > > here's the full patch: > > diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h > index a847501..a6cae0c 100644 > --- a/arch/x86/include/asm/ftrace.h > +++ b/arch/x86/include/asm/ftrace.h > @@ -40,10 +40,8 @@ > > #ifdef CONFIG_DYNAMIC_FTRACE > #define ARCH_SUPPORTS_FTRACE_OPS 1 > -#ifdef CONFIG_X86_64 > #define ARCH_SUPPORTS_FTRACE_SAVE_REGS > #endif > -#endif > > #ifndef __ASSEMBLY__ > extern void mcount(void); > diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S > index 5da11d1..ca5a146 100644 > --- a/arch/x86/kernel/entry_32.S > +++ b/arch/x86/kernel/entry_32.S > @@ -1123,6 +1123,7 @@ ftrace_call: > popl %edx > popl %ecx > popl %eax > +ftrace_ret: > #ifdef CONFIG_FUNCTION_GRAPH_TRACER > .globl ftrace_graph_call > ftrace_graph_call: > @@ -1134,6 +1135,72 @@ ftrace_stub: > ret > END(ftrace_caller) > > +ENTRY(ftrace_regs_caller) > + pushf /* push flags before compare (in cs location) */ > + cmpl $0, function_trace_stop > + jne ftrace_restore_flags > + > + /* > + * i386 does not save SS and ESP when coming from kernel. > + * Instead, to get sp, >sp is used (see ptrace.h). > + * Unfortunately, that means eflags must be at the same location > + * as the current return ip is. We move the return ip into the > + * ip location, and move flags into the return ip location. > + */ > + pushl 4(%esp) /* save return ip into ip slot */ > + subl $MCOUNT_INSN_SIZE, (%esp) /* Adjust ip */ > + > + pushl $0/* Load 0 into orig_ax */ > + pushl %gs > + pushl %fs > + pushl %es > + pushl %ds > + pushl %eax > + pushl %ebp > + pushl %edi > + pushl %esi > + pushl %edx > + pushl %ecx > + pushl %ebx > + > + movl 13*4(%esp), %eax /* Get the saved flags */ > + movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */ > + /* clobbering return ip */ > + movl $__KERNEL_CS,13*4(%esp) > + > + movl 12*4(%esp), %eax /* Load ip (1st parameter) */ > + movl 0x4(%ebp), %edx/* Load parent ip (2nd parameter) */ > + leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ > + pushl %esp /* Save pt_regs as 4th parameter */ > + > +GLOBAL(ftrace_regs_call) > + call ftrace_stub > + > + addl $4, %esp /* Skip pt_regs */ > + movl 14*4(%esp), %eax /* Move flags back into cs */ > + movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */ > + movl 12*4(%esp), %eax /* Get return ip from regs->ip */ > + addl $MCOUNT_INSN_SIZE, %eax > + movl %eax, 14*4(%esp) /* Put return ip back for ret */ > + > + popl %ebx > + popl %ecx > + popl %edx > + popl %esi > + popl %edi > + popl %ebp > + popl %eax > + popl %ds > + popl %es > + popl %fs > + popl %gs + addl $8, %esp /* Skip orig_ax and ip */ + popf/* Pop flags at end (no addl to corrupt flags) */ The above has been changed to this again. -- Steve > + jmp ftrace_ret > + > +ftrace_restore_flags: > + popf > + jmp ftrace_stub > #else /* ! CONFIG_DYNAMIC_FTRACE */ > > ENTRY(mcount) > diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c > index b90eb1a..1d41402 100644 > --- a/arch/x86/kernel/ftrace.c > +++ b/arch/x86/kernel/ftrace.c > @@ -206,7 +206,6 @@ static int > ftrace_modify_code(unsigned long ip, unsigned const char *old_code, > unsigned const char *new_code); > > -#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS > /* > * Should never be called: > * As it is only called by __ftrace_replace_code() which is called by > @@ -221,7 +220,6 @@ int ftrace_modify_call(struct
Re: Re: [RFC][PATCH 2/4 v4] ftrace/x86: Add save_regs for i386 function calls
On Thu, 2012-07-19 at 11:20 +0900, Masami Hiramatsu wrote: > (2012/07/19 0:59), Steven Rostedt wrote: > > On Fri, 2012-07-13 at 14:47 -0400, Steven Rostedt wrote: > > > > Masami, can you give your Reviewed-by tag for this version? Or is there > > something else needing to be fixed? > > No, that is OK for me. I've just missed that... > > Reviewed-by: Masami Hiramatsu > > Thank you! > Thanks! Except I have one more version. Someone offlist gave me some ideas. Here's the diff from the previous patch. diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 46caa56..ca5a146 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1169,10 +1169,9 @@ ENTRY(ftrace_regs_caller) movl $__KERNEL_CS,13*4(%esp) movl 12*4(%esp), %eax /* Load ip (1st parameter) */ - movl 0x4(%ebp), %edx/* Load parent ip (2cd parameter) */ - lea (%esp), %ecx - pushl %ecx /* Save pt_regs as 4th parameter */ + movl 0x4(%ebp), %edx/* Load parent ip (2nd parameter) */ leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ + pushl %esp /* Save pt_regs as 4th parameter */ GLOBAL(ftrace_regs_call) call ftrace_stub @@ -1195,8 +1194,8 @@ GLOBAL(ftrace_regs_call) popl %es popl %fs popl %gs - addl $8, %esp /* Skip orig_ax and ip */ - popf/* Pop flags at end (no addl to corrupt flags) */ + lea 8(%esp), %esp /* Skip orig_ax and ip */ + popf/* Pop flags at end */ jmp ftrace_ret ftrace_restore_flags: Because we no longer have that 4 byte offset on the stack when we need to load the 4th parameter, we can just load the current stack pointer into the stack (pushl %esp), without the save to %ecx step. also, because lea is faster than add (and doesn't even modify flags), I changed the last part to use lea instead of addl. Can you give your reviewed-by tag for this too? I'd like to push this out today so we can still make 3.6. Thanks! -- Steve here's the full patch: diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index a847501..a6cae0c 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -40,10 +40,8 @@ #ifdef CONFIG_DYNAMIC_FTRACE #define ARCH_SUPPORTS_FTRACE_OPS 1 -#ifdef CONFIG_X86_64 #define ARCH_SUPPORTS_FTRACE_SAVE_REGS #endif -#endif #ifndef __ASSEMBLY__ extern void mcount(void); diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 5da11d1..ca5a146 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1123,6 +1123,7 @@ ftrace_call: popl %edx popl %ecx popl %eax +ftrace_ret: #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: @@ -1134,6 +1135,72 @@ ftrace_stub: ret END(ftrace_caller) +ENTRY(ftrace_regs_caller) + pushf /* push flags before compare (in cs location) */ + cmpl $0, function_trace_stop + jne ftrace_restore_flags + + /* +* i386 does not save SS and ESP when coming from kernel. +* Instead, to get sp, >sp is used (see ptrace.h). +* Unfortunately, that means eflags must be at the same location +* as the current return ip is. We move the return ip into the +* ip location, and move flags into the return ip location. +*/ + pushl 4(%esp) /* save return ip into ip slot */ + subl $MCOUNT_INSN_SIZE, (%esp) /* Adjust ip */ + + pushl $0/* Load 0 into orig_ax */ + pushl %gs + pushl %fs + pushl %es + pushl %ds + pushl %eax + pushl %ebp + pushl %edi + pushl %esi + pushl %edx + pushl %ecx + pushl %ebx + + movl 13*4(%esp), %eax /* Get the saved flags */ + movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */ + /* clobbering return ip */ + movl $__KERNEL_CS,13*4(%esp) + + movl 12*4(%esp), %eax /* Load ip (1st parameter) */ + movl 0x4(%ebp), %edx/* Load parent ip (2nd parameter) */ + leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ + pushl %esp /* Save pt_regs as 4th parameter */ + +GLOBAL(ftrace_regs_call) + call ftrace_stub + + addl $4, %esp /* Skip pt_regs */ + movl 14*4(%esp), %eax /* Move flags back into cs */ + movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */ + movl 12*4(%esp), %eax /* Get return ip from regs->ip */ + addl $MCOUNT_INSN_SIZE, %eax + movl %eax, 14*4(%esp) /* Put return ip back for ret */ + + popl %ebx + popl %ecx + popl %edx + popl %esi + popl %edi + popl %ebp + popl %eax + popl %ds + popl %es + popl %fs + popl %gs + lea 8(%esp), %esp
Re: Re: [RFC][PATCH 2/4 v4] ftrace/x86: Add save_regs for i386 function calls
On Thu, 2012-07-19 at 11:20 +0900, Masami Hiramatsu wrote: (2012/07/19 0:59), Steven Rostedt wrote: On Fri, 2012-07-13 at 14:47 -0400, Steven Rostedt wrote: Masami, can you give your Reviewed-by tag for this version? Or is there something else needing to be fixed? No, that is OK for me. I've just missed that... Reviewed-by: Masami Hiramatsu masami.hiramatsu...@hitachi.com Thank you! Thanks! Except I have one more version. Someone offlist gave me some ideas. Here's the diff from the previous patch. diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 46caa56..ca5a146 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1169,10 +1169,9 @@ ENTRY(ftrace_regs_caller) movl $__KERNEL_CS,13*4(%esp) movl 12*4(%esp), %eax /* Load ip (1st parameter) */ - movl 0x4(%ebp), %edx/* Load parent ip (2cd parameter) */ - lea (%esp), %ecx - pushl %ecx /* Save pt_regs as 4th parameter */ + movl 0x4(%ebp), %edx/* Load parent ip (2nd parameter) */ leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ + pushl %esp /* Save pt_regs as 4th parameter */ GLOBAL(ftrace_regs_call) call ftrace_stub @@ -1195,8 +1194,8 @@ GLOBAL(ftrace_regs_call) popl %es popl %fs popl %gs - addl $8, %esp /* Skip orig_ax and ip */ - popf/* Pop flags at end (no addl to corrupt flags) */ + lea 8(%esp), %esp /* Skip orig_ax and ip */ + popf/* Pop flags at end */ jmp ftrace_ret ftrace_restore_flags: Because we no longer have that 4 byte offset on the stack when we need to load the 4th parameter, we can just load the current stack pointer into the stack (pushl %esp), without the save to %ecx step. also, because lea is faster than add (and doesn't even modify flags), I changed the last part to use lea instead of addl. Can you give your reviewed-by tag for this too? I'd like to push this out today so we can still make 3.6. Thanks! -- Steve here's the full patch: diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index a847501..a6cae0c 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -40,10 +40,8 @@ #ifdef CONFIG_DYNAMIC_FTRACE #define ARCH_SUPPORTS_FTRACE_OPS 1 -#ifdef CONFIG_X86_64 #define ARCH_SUPPORTS_FTRACE_SAVE_REGS #endif -#endif #ifndef __ASSEMBLY__ extern void mcount(void); diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 5da11d1..ca5a146 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1123,6 +1123,7 @@ ftrace_call: popl %edx popl %ecx popl %eax +ftrace_ret: #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: @@ -1134,6 +1135,72 @@ ftrace_stub: ret END(ftrace_caller) +ENTRY(ftrace_regs_caller) + pushf /* push flags before compare (in cs location) */ + cmpl $0, function_trace_stop + jne ftrace_restore_flags + + /* +* i386 does not save SS and ESP when coming from kernel. +* Instead, to get sp, regs-sp is used (see ptrace.h). +* Unfortunately, that means eflags must be at the same location +* as the current return ip is. We move the return ip into the +* ip location, and move flags into the return ip location. +*/ + pushl 4(%esp) /* save return ip into ip slot */ + subl $MCOUNT_INSN_SIZE, (%esp) /* Adjust ip */ + + pushl $0/* Load 0 into orig_ax */ + pushl %gs + pushl %fs + pushl %es + pushl %ds + pushl %eax + pushl %ebp + pushl %edi + pushl %esi + pushl %edx + pushl %ecx + pushl %ebx + + movl 13*4(%esp), %eax /* Get the saved flags */ + movl %eax, 14*4(%esp) /* Move saved flags into regs-flags location */ + /* clobbering return ip */ + movl $__KERNEL_CS,13*4(%esp) + + movl 12*4(%esp), %eax /* Load ip (1st parameter) */ + movl 0x4(%ebp), %edx/* Load parent ip (2nd parameter) */ + leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ + pushl %esp /* Save pt_regs as 4th parameter */ + +GLOBAL(ftrace_regs_call) + call ftrace_stub + + addl $4, %esp /* Skip pt_regs */ + movl 14*4(%esp), %eax /* Move flags back into cs */ + movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */ + movl 12*4(%esp), %eax /* Get return ip from regs-ip */ + addl $MCOUNT_INSN_SIZE, %eax + movl %eax, 14*4(%esp) /* Put return ip back for ret */ + + popl %ebx + popl %ecx + popl %edx + popl %esi + popl %edi + popl %ebp + popl %eax + popl %ds + popl %es + popl %fs + popl %gs + lea
Re: Re: [RFC][PATCH 2/4 v4] ftrace/x86: Add save_regs for i386 function calls
On Thu, 2012-07-19 at 08:52 -0400, Steven Rostedt wrote: GLOBAL(ftrace_regs_call) call ftrace_stub @@ -1195,8 +1194,8 @@ GLOBAL(ftrace_regs_call) popl %es popl %fs popl %gs - addl $8, %esp /* Skip orig_ax and ip */ - popf/* Pop flags at end (no addl to corrupt flags) */ + lea 8(%esp), %esp /* Skip orig_ax and ip */ + popf/* Pop flags at end */ jmp ftrace_ret ftrace_restore_flags: Because we no longer have that 4 byte offset on the stack when we need to load the 4th parameter, we can just load the current stack pointer into the stack (pushl %esp), without the save to %ecx step. also, because lea is faster than add (and doesn't even modify flags), I changed the last part to use lea instead of addl. Now I'm told that this is not always the case (at least not for Atom), so I reverted this part and put back the addl. But can you still give you reviewed by for the first part? Can you give your reviewed-by tag for this too? I'd like to push this out today so we can still make 3.6. Thanks! -- Steve here's the full patch: diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index a847501..a6cae0c 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -40,10 +40,8 @@ #ifdef CONFIG_DYNAMIC_FTRACE #define ARCH_SUPPORTS_FTRACE_OPS 1 -#ifdef CONFIG_X86_64 #define ARCH_SUPPORTS_FTRACE_SAVE_REGS #endif -#endif #ifndef __ASSEMBLY__ extern void mcount(void); diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 5da11d1..ca5a146 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1123,6 +1123,7 @@ ftrace_call: popl %edx popl %ecx popl %eax +ftrace_ret: #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: @@ -1134,6 +1135,72 @@ ftrace_stub: ret END(ftrace_caller) +ENTRY(ftrace_regs_caller) + pushf /* push flags before compare (in cs location) */ + cmpl $0, function_trace_stop + jne ftrace_restore_flags + + /* + * i386 does not save SS and ESP when coming from kernel. + * Instead, to get sp, regs-sp is used (see ptrace.h). + * Unfortunately, that means eflags must be at the same location + * as the current return ip is. We move the return ip into the + * ip location, and move flags into the return ip location. + */ + pushl 4(%esp) /* save return ip into ip slot */ + subl $MCOUNT_INSN_SIZE, (%esp) /* Adjust ip */ + + pushl $0/* Load 0 into orig_ax */ + pushl %gs + pushl %fs + pushl %es + pushl %ds + pushl %eax + pushl %ebp + pushl %edi + pushl %esi + pushl %edx + pushl %ecx + pushl %ebx + + movl 13*4(%esp), %eax /* Get the saved flags */ + movl %eax, 14*4(%esp) /* Move saved flags into regs-flags location */ + /* clobbering return ip */ + movl $__KERNEL_CS,13*4(%esp) + + movl 12*4(%esp), %eax /* Load ip (1st parameter) */ + movl 0x4(%ebp), %edx/* Load parent ip (2nd parameter) */ + leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ + pushl %esp /* Save pt_regs as 4th parameter */ + +GLOBAL(ftrace_regs_call) + call ftrace_stub + + addl $4, %esp /* Skip pt_regs */ + movl 14*4(%esp), %eax /* Move flags back into cs */ + movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */ + movl 12*4(%esp), %eax /* Get return ip from regs-ip */ + addl $MCOUNT_INSN_SIZE, %eax + movl %eax, 14*4(%esp) /* Put return ip back for ret */ + + popl %ebx + popl %ecx + popl %edx + popl %esi + popl %edi + popl %ebp + popl %eax + popl %ds + popl %es + popl %fs + popl %gs + addl $8, %esp /* Skip orig_ax and ip */ + popf/* Pop flags at end (no addl to corrupt flags) */ The above has been changed to this again. -- Steve + jmp ftrace_ret + +ftrace_restore_flags: + popf + jmp ftrace_stub #else /* ! CONFIG_DYNAMIC_FTRACE */ ENTRY(mcount) diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index b90eb1a..1d41402 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -206,7 +206,6 @@ static int ftrace_modify_code(unsigned long ip, unsigned const char *old_code, unsigned const char *new_code); -#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS /* * Should never be called: * As it is only called by __ftrace_replace_code() which is called by @@ -221,7 +220,6 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, WARN_ON(1); return -EINVAL; } -#endif int ftrace_update_ftrace_func(ftrace_func_t
Re: Re: [RFC][PATCH 2/4 v4] ftrace/x86: Add save_regs for i386 function calls
On Thu, 2012-07-19 at 08:52 -0400, Steven Rostedt wrote: On Thu, 2012-07-19 at 11:20 +0900, Masami Hiramatsu wrote: (2012/07/19 0:59), Steven Rostedt wrote: On Fri, 2012-07-13 at 14:47 -0400, Steven Rostedt wrote: Masami, can you give your Reviewed-by tag for this version? Or is there something else needing to be fixed? No, that is OK for me. I've just missed that... Reviewed-by: Masami Hiramatsu masami.hiramatsu...@hitachi.com Thank you! Thanks! Except I have one more version. Someone offlist gave me some ideas. Here's the diff from the previous patch. diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 46caa56..ca5a146 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1169,10 +1169,9 @@ ENTRY(ftrace_regs_caller) movl $__KERNEL_CS,13*4(%esp) movl 12*4(%esp), %eax /* Load ip (1st parameter) */ - movl 0x4(%ebp), %edx/* Load parent ip (2cd parameter) */ - lea (%esp), %ecx - pushl %ecx /* Save pt_regs as 4th parameter */ + movl 0x4(%ebp), %edx/* Load parent ip (2nd parameter) */ leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ + pushl %esp /* Save pt_regs as 4th parameter */ GLOBAL(ftrace_regs_call) I'm adding this as a separate patch under Uros's name. I'll be posting the full series for pulling either tonight or tomorrow, depending on how it handles all my stress tests. -- Steve -- To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: Re: [RFC][PATCH 2/4 v4] ftrace/x86: Add save_regs for i386 function calls
(2012/07/19 0:59), Steven Rostedt wrote: > On Fri, 2012-07-13 at 14:47 -0400, Steven Rostedt wrote: > > Masami, can you give your Reviewed-by tag for this version? Or is there > something else needing to be fixed? No, that is OK for me. I've just missed that... Reviewed-by: Masami Hiramatsu Thank you! > > Thanks! > > -- Steve > >> From: Steven Rostedt >> Date: Tue, 5 Jun 2012 20:00:11 -0400 >> Subject: [PATCH] ftrace/x86: Add save_regs for i386 function calls >> >> Add saving full regs for function tracing on i386. >> The saving of regs was influenced by patches sent out by >> Masami Hiramatsu. >> >> Cc: Masami Hiramatsu >> Signed-off-by: Steven Rostedt >> --- >> arch/x86/include/asm/ftrace.h |2 - >> arch/x86/kernel/entry_32.S| 68 >> + >> arch/x86/kernel/ftrace.c |4 -- >> 3 files changed, 68 insertions(+), 6 deletions(-) >> >> diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h >> index a847501..a6cae0c 100644 >> --- a/arch/x86/include/asm/ftrace.h >> +++ b/arch/x86/include/asm/ftrace.h >> @@ -40,10 +40,8 @@ >> >> #ifdef CONFIG_DYNAMIC_FTRACE >> #define ARCH_SUPPORTS_FTRACE_OPS 1 >> -#ifdef CONFIG_X86_64 >> #define ARCH_SUPPORTS_FTRACE_SAVE_REGS >> #endif >> -#endif >> >> #ifndef __ASSEMBLY__ >> extern void mcount(void); >> diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S >> index 5da11d1..46caa56 100644 >> --- a/arch/x86/kernel/entry_32.S >> +++ b/arch/x86/kernel/entry_32.S >> @@ -1123,6 +1123,7 @@ ftrace_call: >> popl %edx >> popl %ecx >> popl %eax >> +ftrace_ret: >> #ifdef CONFIG_FUNCTION_GRAPH_TRACER >> .globl ftrace_graph_call >> ftrace_graph_call: >> @@ -1134,6 +1135,73 @@ ftrace_stub: >> ret >> END(ftrace_caller) >> >> +ENTRY(ftrace_regs_caller) >> +pushf /* push flags before compare (in cs location) */ >> +cmpl $0, function_trace_stop >> +jne ftrace_restore_flags >> + >> +/* >> + * i386 does not save SS and ESP when coming from kernel. >> + * Instead, to get sp, >sp is used (see ptrace.h). >> + * Unfortunately, that means eflags must be at the same location >> + * as the current return ip is. We move the return ip into the >> + * ip location, and move flags into the return ip location. >> + */ >> +pushl 4(%esp) /* save return ip into ip slot */ >> +subl $MCOUNT_INSN_SIZE, (%esp) /* Adjust ip */ >> + >> +pushl $0/* Load 0 into orig_ax */ >> +pushl %gs >> +pushl %fs >> +pushl %es >> +pushl %ds >> +pushl %eax >> +pushl %ebp >> +pushl %edi >> +pushl %esi >> +pushl %edx >> +pushl %ecx >> +pushl %ebx >> + >> +movl 13*4(%esp), %eax /* Get the saved flags */ >> +movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */ >> +/* clobbering return ip */ >> +movl $__KERNEL_CS,13*4(%esp) >> + >> +movl 12*4(%esp), %eax /* Load ip (1st parameter) */ >> +movl 0x4(%ebp), %edx/* Load parent ip (2cd parameter) */ >> +lea (%esp), %ecx >> +pushl %ecx /* Save pt_regs as 4th parameter */ >> +leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ >> + >> +GLOBAL(ftrace_regs_call) >> +call ftrace_stub >> + >> +addl $4, %esp /* Skip pt_regs */ >> +movl 14*4(%esp), %eax /* Move flags back into cs */ >> +movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */ >> +movl 12*4(%esp), %eax /* Get return ip from regs->ip */ >> +addl $MCOUNT_INSN_SIZE, %eax >> +movl %eax, 14*4(%esp) /* Put return ip back for ret */ >> + >> +popl %ebx >> +popl %ecx >> +popl %edx >> +popl %esi >> +popl %edi >> +popl %ebp >> +popl %eax >> +popl %ds >> +popl %es >> +popl %fs >> +popl %gs >> +addl $8, %esp /* Skip orig_ax and ip */ >> +popf/* Pop flags at end (no addl to corrupt flags) >> */ >> +jmp ftrace_ret >> + >> +ftrace_restore_flags: >> +popf >> +jmp ftrace_stub >> #else /* ! CONFIG_DYNAMIC_FTRACE */ >> >> ENTRY(mcount) >> diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c >> index b90eb1a..1d41402 100644 >> --- a/arch/x86/kernel/ftrace.c >> +++ b/arch/x86/kernel/ftrace.c >> @@ -206,7 +206,6 @@ static int >> ftrace_modify_code(unsigned long ip, unsigned const char *old_code, >> unsigned const char *new_code); >> >> -#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS >> /* >> * Should never be called: >> * As it is only called by __ftrace_replace_code() which is called by >> @@ -221,7 +220,6 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned >> long old_addr, >> WARN_ON(1); >> return -EINVAL; >> } >> -#endif >> >> int ftrace_update_ftrace_func(ftrace_func_t func) >> { >> @@ -237,7 +235,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func) >> >> ret =
Re: Re: [RFC][PATCH 2/4 v4] ftrace/x86: Add save_regs for i386 function calls
(2012/07/19 0:59), Steven Rostedt wrote: On Fri, 2012-07-13 at 14:47 -0400, Steven Rostedt wrote: Masami, can you give your Reviewed-by tag for this version? Or is there something else needing to be fixed? No, that is OK for me. I've just missed that... Reviewed-by: Masami Hiramatsu masami.hiramatsu...@hitachi.com Thank you! Thanks! -- Steve From: Steven Rostedt srost...@redhat.com Date: Tue, 5 Jun 2012 20:00:11 -0400 Subject: [PATCH] ftrace/x86: Add save_regs for i386 function calls Add saving full regs for function tracing on i386. The saving of regs was influenced by patches sent out by Masami Hiramatsu. Cc: Masami Hiramatsu masami.hiramatsu...@hitachi.com Signed-off-by: Steven Rostedt rost...@goodmis.org --- arch/x86/include/asm/ftrace.h |2 - arch/x86/kernel/entry_32.S| 68 + arch/x86/kernel/ftrace.c |4 -- 3 files changed, 68 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index a847501..a6cae0c 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -40,10 +40,8 @@ #ifdef CONFIG_DYNAMIC_FTRACE #define ARCH_SUPPORTS_FTRACE_OPS 1 -#ifdef CONFIG_X86_64 #define ARCH_SUPPORTS_FTRACE_SAVE_REGS #endif -#endif #ifndef __ASSEMBLY__ extern void mcount(void); diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 5da11d1..46caa56 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1123,6 +1123,7 @@ ftrace_call: popl %edx popl %ecx popl %eax +ftrace_ret: #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: @@ -1134,6 +1135,73 @@ ftrace_stub: ret END(ftrace_caller) +ENTRY(ftrace_regs_caller) +pushf /* push flags before compare (in cs location) */ +cmpl $0, function_trace_stop +jne ftrace_restore_flags + +/* + * i386 does not save SS and ESP when coming from kernel. + * Instead, to get sp, regs-sp is used (see ptrace.h). + * Unfortunately, that means eflags must be at the same location + * as the current return ip is. We move the return ip into the + * ip location, and move flags into the return ip location. + */ +pushl 4(%esp) /* save return ip into ip slot */ +subl $MCOUNT_INSN_SIZE, (%esp) /* Adjust ip */ + +pushl $0/* Load 0 into orig_ax */ +pushl %gs +pushl %fs +pushl %es +pushl %ds +pushl %eax +pushl %ebp +pushl %edi +pushl %esi +pushl %edx +pushl %ecx +pushl %ebx + +movl 13*4(%esp), %eax /* Get the saved flags */ +movl %eax, 14*4(%esp) /* Move saved flags into regs-flags location */ +/* clobbering return ip */ +movl $__KERNEL_CS,13*4(%esp) + +movl 12*4(%esp), %eax /* Load ip (1st parameter) */ +movl 0x4(%ebp), %edx/* Load parent ip (2cd parameter) */ +lea (%esp), %ecx +pushl %ecx /* Save pt_regs as 4th parameter */ +leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ + +GLOBAL(ftrace_regs_call) +call ftrace_stub + +addl $4, %esp /* Skip pt_regs */ +movl 14*4(%esp), %eax /* Move flags back into cs */ +movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */ +movl 12*4(%esp), %eax /* Get return ip from regs-ip */ +addl $MCOUNT_INSN_SIZE, %eax +movl %eax, 14*4(%esp) /* Put return ip back for ret */ + +popl %ebx +popl %ecx +popl %edx +popl %esi +popl %edi +popl %ebp +popl %eax +popl %ds +popl %es +popl %fs +popl %gs +addl $8, %esp /* Skip orig_ax and ip */ +popf/* Pop flags at end (no addl to corrupt flags) */ +jmp ftrace_ret + +ftrace_restore_flags: +popf +jmp ftrace_stub #else /* ! CONFIG_DYNAMIC_FTRACE */ ENTRY(mcount) diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index b90eb1a..1d41402 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -206,7 +206,6 @@ static int ftrace_modify_code(unsigned long ip, unsigned const char *old_code, unsigned const char *new_code); -#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS /* * Should never be called: * As it is only called by __ftrace_replace_code() which is called by @@ -221,7 +220,6 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, WARN_ON(1); return -EINVAL; } -#endif int ftrace_update_ftrace_func(ftrace_func_t func) { @@ -237,7 +235,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func) ret = ftrace_modify_code(ip, old, new); -#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS /* Also update the regs callback function */ if (!ret) { ip = (unsigned long)(ftrace_regs_call); @@ -245,7
Re: Re: Re: [RFC][PATCH 2/4 v4] ftrace/x86: Add save_regs for i386 function calls
(2012/07/17 12:05), Steven Rostedt wrote: > On Tue, 2012-07-17 at 11:08 +0900, Masami Hiramatsu wrote: > >>> I found that regs_get_register() doesn't honor this either. Thus, >>> kprobes in tracing gets this: >>> >>> # echo 'p:ftrace sys_read+4 s=%sp' > /debug/tracing/kprobe_events >>> # echo 1 > /debug/tracing/events/kprobes/enable >>> # cat trace >>> sshd-1345 [000] d... 489.117168: ftrace: (sys_read+0x4/0x70) >>> s=b7e96768 >>> sshd-1345 [000] d... 489.117191: ftrace: (sys_read+0x4/0x70) >>> s=b7e96768 >>> cat-1447 [000] d... 489.117392: ftrace: (sys_read+0x4/0x70) >>> s=5a7 >>> cat-1447 [001] d... 489.118023: ftrace: (sys_read+0x4/0x70) >>> s=b77ad05f >>> less-1448 [000] d... 489.118079: ftrace: (sys_read+0x4/0x70) >>> s=b7762e06 >>> less-1448 [000] d... 489.118117: ftrace: (sys_read+0x4/0x70) >>> s=b7764970 >>> >> >> Yes, that is by design, since I made it so. :) >> Instead of %sp, kprobe tracer provides $stack special argument >> for stack address, because "sp" is not always means the stack >> address on every arch. > > But is that useful? Wouldn't the actual stack pointer be more > informative? It is just FYI :). I rather like your "%sp" enhancement than current meaningless "%sp" on i386... However, I think "$stack" is more general and informative for users, thus, at least perf probe uses it for getting variables from stack. Thank you, -- Masami HIRAMATSU Software Platform Research Dept. Linux Technology Center Hitachi, Ltd., Yokohama Research Laboratory E-mail: masami.hiramatsu...@hitachi.com -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: Re: [RFC][PATCH 2/4 v4] ftrace/x86: Add save_regs for i386 function calls
On Tue, 2012-07-17 at 11:08 +0900, Masami Hiramatsu wrote: > > I found that regs_get_register() doesn't honor this either. Thus, > > kprobes in tracing gets this: > > > > # echo 'p:ftrace sys_read+4 s=%sp' > /debug/tracing/kprobe_events > > # echo 1 > /debug/tracing/events/kprobes/enable > > # cat trace > > sshd-1345 [000] d... 489.117168: ftrace: (sys_read+0x4/0x70) > > s=b7e96768 > > sshd-1345 [000] d... 489.117191: ftrace: (sys_read+0x4/0x70) > > s=b7e96768 > > cat-1447 [000] d... 489.117392: ftrace: (sys_read+0x4/0x70) > > s=5a7 > > cat-1447 [001] d... 489.118023: ftrace: (sys_read+0x4/0x70) > > s=b77ad05f > > less-1448 [000] d... 489.118079: ftrace: (sys_read+0x4/0x70) > > s=b7762e06 > > less-1448 [000] d... 489.118117: ftrace: (sys_read+0x4/0x70) > > s=b7764970 > > > > Yes, that is by design, since I made it so. :) > Instead of %sp, kprobe tracer provides $stack special argument > for stack address, because "sp" is not always means the stack > address on every arch. But is that useful? Wouldn't the actual stack pointer be more informative? -- Steve -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: Re: [RFC][PATCH 2/4 v4] ftrace/x86: Add save_regs for i386 function calls
(2012/07/14 3:47), Steven Rostedt wrote: > On Thu, 2012-07-12 at 21:39 +0900, Masami Hiramatsu wrote: > >> /* >> * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode >> * when it traps. The previous stack will be directly underneath the saved >> * registers, and 'sp/ss' won't even have been saved. Thus the '>sp'. >> * >> * This is valid only for kernel mode traps. >> */ >> static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) >> { >> #ifdef CONFIG_X86_32 >> return (unsigned long)(>sp); >> #else >> return regs->sp; >> #endif >> } > > I found that regs_get_register() doesn't honor this either. Thus, > kprobes in tracing gets this: > > # echo 'p:ftrace sys_read+4 s=%sp' > /debug/tracing/kprobe_events > # echo 1 > /debug/tracing/events/kprobes/enable > # cat trace > sshd-1345 [000] d... 489.117168: ftrace: (sys_read+0x4/0x70) > s=b7e96768 > sshd-1345 [000] d... 489.117191: ftrace: (sys_read+0x4/0x70) > s=b7e96768 > cat-1447 [000] d... 489.117392: ftrace: (sys_read+0x4/0x70) > s=5a7 > cat-1447 [001] d... 489.118023: ftrace: (sys_read+0x4/0x70) > s=b77ad05f > less-1448 [000] d... 489.118079: ftrace: (sys_read+0x4/0x70) > s=b7762e06 > less-1448 [000] d... 489.118117: ftrace: (sys_read+0x4/0x70) > s=b7764970 > Yes, that is by design, since I made it so. :) Instead of %sp, kprobe tracer provides $stack special argument for stack address, because "sp" is not always means the stack address on every arch. Thanks, -- Masami HIRAMATSU Software Platform Research Dept. Linux Technology Center Hitachi, Ltd., Yokohama Research Laboratory E-mail: masami.hiramatsu...@hitachi.com -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: Re: [RFC][PATCH 2/4 v4] ftrace/x86: Add save_regs for i386 function calls
(2012/07/14 3:47), Steven Rostedt wrote: On Thu, 2012-07-12 at 21:39 +0900, Masami Hiramatsu wrote: /* * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode * when it traps. The previous stack will be directly underneath the saved * registers, and 'sp/ss' won't even have been saved. Thus the 'regs-sp'. * * This is valid only for kernel mode traps. */ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) { #ifdef CONFIG_X86_32 return (unsigned long)(regs-sp); #else return regs-sp; #endif } I found that regs_get_register() doesn't honor this either. Thus, kprobes in tracing gets this: # echo 'p:ftrace sys_read+4 s=%sp' /debug/tracing/kprobe_events # echo 1 /debug/tracing/events/kprobes/enable # cat trace sshd-1345 [000] d... 489.117168: ftrace: (sys_read+0x4/0x70) s=b7e96768 sshd-1345 [000] d... 489.117191: ftrace: (sys_read+0x4/0x70) s=b7e96768 cat-1447 [000] d... 489.117392: ftrace: (sys_read+0x4/0x70) s=5a7 cat-1447 [001] d... 489.118023: ftrace: (sys_read+0x4/0x70) s=b77ad05f less-1448 [000] d... 489.118079: ftrace: (sys_read+0x4/0x70) s=b7762e06 less-1448 [000] d... 489.118117: ftrace: (sys_read+0x4/0x70) s=b7764970 Yes, that is by design, since I made it so. :) Instead of %sp, kprobe tracer provides $stack special argument for stack address, because sp is not always means the stack address on every arch. Thanks, -- Masami HIRAMATSU Software Platform Research Dept. Linux Technology Center Hitachi, Ltd., Yokohama Research Laboratory E-mail: masami.hiramatsu...@hitachi.com -- To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: Re: [RFC][PATCH 2/4 v4] ftrace/x86: Add save_regs for i386 function calls
On Tue, 2012-07-17 at 11:08 +0900, Masami Hiramatsu wrote: I found that regs_get_register() doesn't honor this either. Thus, kprobes in tracing gets this: # echo 'p:ftrace sys_read+4 s=%sp' /debug/tracing/kprobe_events # echo 1 /debug/tracing/events/kprobes/enable # cat trace sshd-1345 [000] d... 489.117168: ftrace: (sys_read+0x4/0x70) s=b7e96768 sshd-1345 [000] d... 489.117191: ftrace: (sys_read+0x4/0x70) s=b7e96768 cat-1447 [000] d... 489.117392: ftrace: (sys_read+0x4/0x70) s=5a7 cat-1447 [001] d... 489.118023: ftrace: (sys_read+0x4/0x70) s=b77ad05f less-1448 [000] d... 489.118079: ftrace: (sys_read+0x4/0x70) s=b7762e06 less-1448 [000] d... 489.118117: ftrace: (sys_read+0x4/0x70) s=b7764970 Yes, that is by design, since I made it so. :) Instead of %sp, kprobe tracer provides $stack special argument for stack address, because sp is not always means the stack address on every arch. But is that useful? Wouldn't the actual stack pointer be more informative? -- Steve -- To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: Re: Re: [RFC][PATCH 2/4 v4] ftrace/x86: Add save_regs for i386 function calls
(2012/07/17 12:05), Steven Rostedt wrote: On Tue, 2012-07-17 at 11:08 +0900, Masami Hiramatsu wrote: I found that regs_get_register() doesn't honor this either. Thus, kprobes in tracing gets this: # echo 'p:ftrace sys_read+4 s=%sp' /debug/tracing/kprobe_events # echo 1 /debug/tracing/events/kprobes/enable # cat trace sshd-1345 [000] d... 489.117168: ftrace: (sys_read+0x4/0x70) s=b7e96768 sshd-1345 [000] d... 489.117191: ftrace: (sys_read+0x4/0x70) s=b7e96768 cat-1447 [000] d... 489.117392: ftrace: (sys_read+0x4/0x70) s=5a7 cat-1447 [001] d... 489.118023: ftrace: (sys_read+0x4/0x70) s=b77ad05f less-1448 [000] d... 489.118079: ftrace: (sys_read+0x4/0x70) s=b7762e06 less-1448 [000] d... 489.118117: ftrace: (sys_read+0x4/0x70) s=b7764970 Yes, that is by design, since I made it so. :) Instead of %sp, kprobe tracer provides $stack special argument for stack address, because sp is not always means the stack address on every arch. But is that useful? Wouldn't the actual stack pointer be more informative? It is just FYI :). I rather like your %sp enhancement than current meaningless %sp on i386... However, I think $stack is more general and informative for users, thus, at least perf probe uses it for getting variables from stack. Thank you, -- Masami HIRAMATSU Software Platform Research Dept. Linux Technology Center Hitachi, Ltd., Yokohama Research Laboratory E-mail: masami.hiramatsu...@hitachi.com -- To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/