diff options
author | Steven Rostedt (Red Hat) <rostedt@goodmis.org> | 2013-03-13 20:43:57 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-05-07 20:33:08 -0700 |
commit | 2fbd7c15d610fcbeee37feb14dcf76104c2b33e8 (patch) | |
tree | 57e8ca1d3734cdad38945ce1d72ee9227779dd8e | |
parent | d9d89e45ff101119f7e8faea26f3de08c5999159 (diff) |
tracing: Use stack of calling function for stack tracer
commit 87889501d0adfae10e3b0f0e6f2d7536eed9ae84 upstream.
Use the stack of stack_trace_call() instead of check_stack() as
the test pointer for max stack size. It makes it a bit cleaner
and a little more accurate.
Adding stable, as a later fix depends on this patch.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | kernel/trace/trace_stack.c | 12 |
1 files changed, 7 insertions, 5 deletions
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 83a8b5b7bd3..856b407375f 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -39,20 +39,21 @@ static DEFINE_MUTEX(stack_sysctl_mutex); int stack_tracer_enabled; static int last_stack_tracer_enabled; -static inline void check_stack(void) +static inline void +check_stack(unsigned long *stack) { unsigned long this_size, flags; unsigned long *p, *top, *start; int i; - this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1); + this_size = ((unsigned long)stack) & (THREAD_SIZE-1); this_size = THREAD_SIZE - this_size; if (this_size <= max_stack_size) return; /* we do not handle interrupt stacks yet */ - if (!object_is_on_stack(&this_size)) + if (!object_is_on_stack(stack)) return; local_irq_save(flags); @@ -73,7 +74,7 @@ static inline void check_stack(void) * Now find where in the stack these are. */ i = 0; - start = &this_size; + start = stack; top = (unsigned long *) (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); @@ -113,6 +114,7 @@ static void stack_trace_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *pt_regs) { + unsigned long stack; int cpu; preempt_disable_notrace(); @@ -122,7 +124,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip, if (per_cpu(trace_active, cpu)++ != 0) goto out; - check_stack(); + check_stack(&stack); out: per_cpu(trace_active, cpu)--; |