diff options
| author | Michal Marek <mmarek@suse.cz> | 2011-03-09 16:15:44 +0100 | 
|---|---|---|
| committer | Michal Marek <mmarek@suse.cz> | 2011-03-09 16:15:44 +0100 | 
| commit | 2d8ad8719591fa803b0d589ed057fa46f49b7155 (patch) | |
| tree | 4ae051577dad1161c91dafbf4207bb10a9dc91bb /kernel/trace/trace_stack.c | |
| parent | 9b4ce7bce5f30712fd926ab4599a803314a07719 (diff) | |
| parent | c56eb8fb6dccb83d9fe62fd4dc00c834de9bc470 (diff) | |
Merge commit 'v2.6.38-rc1' into kbuild/packaging
Diffstat (limited to 'kernel/trace/trace_stack.c')
| -rw-r--r-- | kernel/trace/trace_stack.c | 33 | 
1 files changed, 29 insertions, 4 deletions
| diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 678a5120ee3..4c5dead0c23 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -110,12 +110,12 @@ static inline void check_stack(void)  static void  stack_trace_call(unsigned long ip, unsigned long parent_ip)  { -	int cpu, resched; +	int cpu;  	if (unlikely(!ftrace_enabled || stack_trace_disabled))  		return; -	resched = ftrace_preempt_disable(); +	preempt_disable_notrace();  	cpu = raw_smp_processor_id();  	/* no atomic needed, we only modify this variable by this cpu */ @@ -127,7 +127,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)   out:  	per_cpu(trace_active, cpu)--;  	/* prevent recursion in schedule */ -	ftrace_preempt_enable(resched); +	preempt_enable_notrace();  }  static struct ftrace_ops trace_ops __read_mostly = @@ -157,6 +157,7 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,  	unsigned long val, flags;  	char buf[64];  	int ret; +	int cpu;  	if (count >= sizeof(buf))  		return -EINVAL; @@ -171,9 +172,20 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,  		return ret;  	local_irq_save(flags); + +	/* +	 * In case we trace inside arch_spin_lock() or after (NMI), +	 * we will cause circular lock, so we also need to increase +	 * the percpu trace_active here. +	 */ +	cpu = smp_processor_id(); +	per_cpu(trace_active, cpu)++; +  	arch_spin_lock(&max_stack_lock);  	*ptr = val;  	arch_spin_unlock(&max_stack_lock); + +	per_cpu(trace_active, cpu)--;  	local_irq_restore(flags);  	return count; @@ -183,6 +195,7 @@ static const struct file_operations stack_max_size_fops = {  	.open		= tracing_open_generic,  	.read		= stack_max_size_read,  	.write		= stack_max_size_write, +	.llseek		= default_llseek,  };  static void * @@ -206,7 +219,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos)  static void *t_start(struct seq_file *m, loff_t *pos)  { +	int cpu; +  	local_irq_disable(); + +	cpu = smp_processor_id(); +	per_cpu(trace_active, cpu)++; +  	arch_spin_lock(&max_stack_lock);  	if (*pos == 0) @@ -217,7 +236,13 @@ static void *t_start(struct seq_file *m, loff_t *pos)  static void t_stop(struct seq_file *m, void *p)  { +	int cpu; +  	arch_spin_unlock(&max_stack_lock); + +	cpu = smp_processor_id(); +	per_cpu(trace_active, cpu)--; +  	local_irq_enable();  } @@ -225,7 +250,7 @@ static int trace_lookup_stack(struct seq_file *m, long i)  {  	unsigned long addr = stack_dump_trace[i]; -	return seq_printf(m, "%pF\n", (void *)addr); +	return seq_printf(m, "%pS\n", (void *)addr);  }  static void print_disabled(struct seq_file *m) | 
