diff options
author | Steven Rostedt (Red Hat) <rostedt@goodmis.org> | 2013-03-13 20:43:57 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-05-07 19:51:54 -0700 |
commit | 893238186e35bdd59efde4572bcb6d0653c0ec65 (patch) | |
tree | 4da26b69bc9c3432a7a77c653f882ccd2ced8424 /kernel | |
parent | a393520aa653a5e1133e14a1984b5e5815e01cf6 (diff) |
tracing: Use stack of calling function for stack tracer
commit 87889501d0adfae10e3b0f0e6f2d7536eed9ae84 upstream.
Use the stack of stack_trace_call() instead of check_stack() as
the test pointer for max stack size. It makes it a bit cleaner
and a little more accurate.
Adding stable, as a later fix depends on this patch.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/trace_stack.c | 12 |
1 files changed, 7 insertions, 5 deletions
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index d4545f49242..54d9a9f8c58 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -40,20 +40,21 @@ static DEFINE_MUTEX(stack_sysctl_mutex); int stack_tracer_enabled; static int last_stack_tracer_enabled; -static inline void check_stack(void) +static inline void +check_stack(unsigned long *stack) { unsigned long this_size, flags; unsigned long *p, *top, *start; int i; - this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1); + this_size = ((unsigned long)stack) & (THREAD_SIZE-1); this_size = THREAD_SIZE - this_size; if (this_size <= max_stack_size) return; /* we do not handle interrupt stacks yet */ - if (!object_is_on_stack(&this_size)) + if (!object_is_on_stack(stack)) return; local_irq_save(flags); @@ -74,7 +75,7 @@ static inline void check_stack(void) * Now find where in the stack these are. */ i = 0; - start = &this_size; + start = stack; top = (unsigned long *) (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); @@ -113,6 +114,7 @@ static inline void check_stack(void) static void stack_trace_call(unsigned long ip, unsigned long parent_ip) { + unsigned long stack; int cpu; if (unlikely(!ftrace_enabled || stack_trace_disabled)) @@ -125,7 +127,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) if (per_cpu(trace_active, cpu)++ != 0) goto out; - check_stack(); + check_stack(&stack); out: per_cpu(trace_active, cpu)--; |