diff options
author | Steven Rostedt (Red Hat) <rostedt@goodmis.org> | 2013-03-13 20:43:57 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-05-07 19:57:23 -0700 |
commit | f42aa66c19796fd453f434be29a039707aec435f (patch) | |
tree | 106307eb63d30b0e2a5495214506cf19b7bb71a7 /kernel | |
parent | 02f1fef6377f64ae0ea5b542a39eddf1424b505d (diff) |
tracing: Use stack of calling function for stack tracer
commit 87889501d0adfae10e3b0f0e6f2d7536eed9ae84 upstream.
Use the stack of stack_trace_call() instead of check_stack() as
the test pointer for max stack size. It makes it a bit cleaner
and a little more accurate.
Adding stable, as a later fix depends on this patch.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/trace_stack.c | 12 |
1 files changed, 7 insertions, 5 deletions
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index b0b53b8e4c2..d21f8440e03 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -37,20 +37,21 @@ static DEFINE_MUTEX(stack_sysctl_mutex); int stack_tracer_enabled; static int last_stack_tracer_enabled; -static inline void check_stack(void) +static inline void +check_stack(unsigned long *stack) { unsigned long this_size, flags; unsigned long *p, *top, *start; int i; - this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1); + this_size = ((unsigned long)stack) & (THREAD_SIZE-1); this_size = THREAD_SIZE - this_size; if (this_size <= max_stack_size) return; /* we do not handle interrupt stacks yet */ - if (!object_is_on_stack(&this_size)) + if (!object_is_on_stack(stack)) return; local_irq_save(flags); @@ -71,7 +72,7 @@ static inline void check_stack(void) * Now find where in the stack these are. */ i = 0; - start = &this_size; + start = stack; top = (unsigned long *) (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); @@ -110,6 +111,7 @@ static inline void check_stack(void) static void stack_trace_call(unsigned long ip, unsigned long parent_ip) { + unsigned long stack; int cpu; if (unlikely(!ftrace_enabled || stack_trace_disabled)) @@ -122,7 +124,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) if (per_cpu(trace_active, cpu)++ != 0) goto out; - check_stack(); + check_stack(&stack); out: per_cpu(trace_active, cpu)--; |