diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/ftrace.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/entry_32.S | 68 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 4 |
3 files changed, 68 insertions, 6 deletions
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index a8475014c4a..a6cae0c1720 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -40,10 +40,8 @@ #ifdef CONFIG_DYNAMIC_FTRACE #define ARCH_SUPPORTS_FTRACE_OPS 1 -#ifdef CONFIG_X86_64 #define ARCH_SUPPORTS_FTRACE_SAVE_REGS #endif -#endif #ifndef __ASSEMBLY__ extern void mcount(void); diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 5da11d1b85a..46caa5649a5 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1123,6 +1123,7 @@ ftrace_call: popl %edx popl %ecx popl %eax +ftrace_ret: #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: @@ -1134,6 +1135,73 @@ ftrace_stub: ret END(ftrace_caller) +ENTRY(ftrace_regs_caller) + pushf /* push flags before compare (in cs location) */ + cmpl $0, function_trace_stop + jne ftrace_restore_flags + + /* + * i386 does not save SS and ESP when coming from kernel. + * Instead, to get sp, ®s->sp is used (see ptrace.h). + * Unfortunately, that means eflags must be at the same location + * as the current return ip is. We move the return ip into the + * ip location, and move flags into the return ip location. + */ + pushl 4(%esp) /* save return ip into ip slot */ + subl $MCOUNT_INSN_SIZE, (%esp) /* Adjust ip */ + + pushl $0 /* Load 0 into orig_ax */ + pushl %gs + pushl %fs + pushl %es + pushl %ds + pushl %eax + pushl %ebp + pushl %edi + pushl %esi + pushl %edx + pushl %ecx + pushl %ebx + + movl 13*4(%esp), %eax /* Get the saved flags */ + movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */ + /* clobbering return ip */ + movl $__KERNEL_CS,13*4(%esp) + + movl 12*4(%esp), %eax /* Load ip (1st parameter) */ + movl 0x4(%ebp), %edx /* Load parent ip (2cd parameter) */ + lea (%esp), %ecx + pushl %ecx /* Save pt_regs as 4th parameter */ + leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ + +GLOBAL(ftrace_regs_call) + call ftrace_stub + + addl $4, %esp /* Skip pt_regs */ + movl 14*4(%esp), %eax /* Move flags back into cs */ + movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */ + movl 12*4(%esp), %eax /* Get return ip from regs->ip */ + addl $MCOUNT_INSN_SIZE, %eax + movl %eax, 14*4(%esp) /* Put return ip back for ret */ + + popl %ebx + popl %ecx + popl %edx + popl %esi + popl %edi + popl %ebp + popl %eax + popl %ds + popl %es + popl %fs + popl %gs + addl $8, %esp /* Skip orig_ax and ip */ + popf /* Pop flags at end (no addl to corrupt flags) */ + jmp ftrace_ret + +ftrace_restore_flags: + popf + jmp ftrace_stub #else /* ! CONFIG_DYNAMIC_FTRACE */ ENTRY(mcount) diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index b90eb1a1307..1d414029f1d 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -206,7 +206,6 @@ static int ftrace_modify_code(unsigned long ip, unsigned const char *old_code, unsigned const char *new_code); -#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS /* * Should never be called: * As it is only called by __ftrace_replace_code() which is called by @@ -221,7 +220,6 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, WARN_ON(1); return -EINVAL; } -#endif int ftrace_update_ftrace_func(ftrace_func_t func) { @@ -237,7 +235,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func) ret = ftrace_modify_code(ip, old, new); -#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS /* Also update the regs callback function */ if (!ret) { ip = (unsigned long)(&ftrace_regs_call); @@ -245,7 +242,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func) new = ftrace_call_replace(ip, (unsigned long)func); ret = ftrace_modify_code(ip, old, new); } -#endif atomic_dec(&modifying_ftrace_code); |