diff options
Diffstat (limited to 'kernel/trace/trace_irqsoff.c')
| -rw-r--r-- | kernel/trace/trace_irqsoff.c | 85 | 
1 files changed, 49 insertions, 36 deletions
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 2aefbee93a6..9bb104f748d 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -151,16 +151,11 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,  	atomic_dec(&data->disabled);  } - -static struct ftrace_ops trace_ops __read_mostly = -{ -	.func = irqsoff_tracer_call, -	.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, -};  #endif /* CONFIG_FUNCTION_TRACER */  #ifdef CONFIG_FUNCTION_GRAPH_TRACER -static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) +static int +irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)  {  	int cpu; @@ -175,7 +170,7 @@ static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)  	for_each_possible_cpu(cpu)  		per_cpu(tracing_cpu, cpu) = 0; -	tracing_max_latency = 0; +	tr->max_latency = 0;  	tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);  	return start_irqsoff_tracer(irqsoff_trace, set); @@ -266,7 +261,8 @@ __trace_function(struct trace_array *tr,  #else  #define __trace_function trace_function -static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) +static int +irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)  {  	return -EINVAL;  } @@ -301,13 +297,13 @@ static void irqsoff_print_header(struct seq_file *s)  /*   * Should this new latency be reported/recorded?   */ -static int report_latency(cycle_t delta) +static int report_latency(struct trace_array *tr, cycle_t delta)  {  	if (tracing_thresh) {  		if (delta < tracing_thresh)  			return 0;  	} else { -		if (delta <= tracing_max_latency) +		if (delta <= tr->max_latency)  			return 0;  	}  	return 1; @@ -331,13 +327,13 @@ check_critical_timing(struct trace_array *tr,  	pc = preempt_count(); -	if (!report_latency(delta)) +	if (!report_latency(tr, delta))  		goto out;  	raw_spin_lock_irqsave(&max_trace_lock, flags);  	/* check if we are still the max latency */ -	if (!report_latency(delta)) +	if (!report_latency(tr, delta))  		goto out_unlock;  	__trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); @@ -350,7 +346,7 @@ check_critical_timing(struct trace_array *tr,  	data->critical_end = parent_ip;  	if (likely(!is_tracing_stopped())) { -		tracing_max_latency = delta; +		tr->max_latency = delta;  		update_max_tr_single(tr, current, cpu);  	} @@ -498,14 +494,14 @@ void trace_hardirqs_off(void)  }  EXPORT_SYMBOL(trace_hardirqs_off); -void trace_hardirqs_on_caller(unsigned long caller_addr) +__visible void trace_hardirqs_on_caller(unsigned long caller_addr)  {  	if (!preempt_trace() && irq_trace())  		stop_critical_timing(CALLER_ADDR0, caller_addr);  }  EXPORT_SYMBOL(trace_hardirqs_on_caller); -void trace_hardirqs_off_caller(unsigned long caller_addr) +__visible void trace_hardirqs_off_caller(unsigned long caller_addr)  {  	if (!preempt_trace() && irq_trace())  		start_critical_timing(CALLER_ADDR0, caller_addr); @@ -529,7 +525,7 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)  }  #endif /* CONFIG_PREEMPT_TRACER */ -static int register_irqsoff_function(int graph, int set) +static int register_irqsoff_function(struct trace_array *tr, int graph, int set)  {  	int ret; @@ -541,7 +537,7 @@ static int register_irqsoff_function(int graph, int set)  		ret = register_ftrace_graph(&irqsoff_graph_return,  					    &irqsoff_graph_entry);  	else -		ret = register_ftrace_function(&trace_ops); +		ret = register_ftrace_function(tr->ops);  	if (!ret)  		function_enabled = true; @@ -549,7 +545,7 @@ static int register_irqsoff_function(int graph, int set)  	return ret;  } -static void unregister_irqsoff_function(int graph) +static void unregister_irqsoff_function(struct trace_array *tr, int graph)  {  	if (!function_enabled)  		return; @@ -557,23 +553,25 @@ static void unregister_irqsoff_function(int graph)  	if (graph)  		unregister_ftrace_graph();  	else -		unregister_ftrace_function(&trace_ops); +		unregister_ftrace_function(tr->ops);  	function_enabled = false;  } -static void irqsoff_function_set(int set) +static void irqsoff_function_set(struct trace_array *tr, int set)  {  	if (set) -		register_irqsoff_function(is_graph(), 1); +		register_irqsoff_function(tr, is_graph(), 1);  	else -		unregister_irqsoff_function(is_graph()); +		unregister_irqsoff_function(tr, is_graph());  } -static int irqsoff_flag_changed(struct tracer *tracer, u32 mask, int set) +static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)  { +	struct tracer *tracer = tr->current_trace; +  	if (mask & TRACE_ITER_FUNCTION) -		irqsoff_function_set(set); +		irqsoff_function_set(tr, set);  	return trace_keep_overwrite(tracer, mask, set);  } @@ -582,7 +580,7 @@ static int start_irqsoff_tracer(struct trace_array *tr, int graph)  {  	int ret; -	ret = register_irqsoff_function(graph, 0); +	ret = register_irqsoff_function(tr, graph, 0);  	if (!ret && tracing_is_enabled())  		tracer_enabled = 1; @@ -596,25 +594,37 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph)  {  	tracer_enabled = 0; -	unregister_irqsoff_function(graph); +	unregister_irqsoff_function(tr, graph);  } -static void __irqsoff_tracer_init(struct trace_array *tr) +static bool irqsoff_busy; + +static int __irqsoff_tracer_init(struct trace_array *tr)  { +	if (irqsoff_busy) +		return -EBUSY; +  	save_flags = trace_flags;  	/* non overwrite screws up the latency tracers */  	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);  	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); -	tracing_max_latency = 0; +	tr->max_latency = 0;  	irqsoff_trace = tr;  	/* make sure that the tracer is visible */  	smp_wmb();  	tracing_reset_online_cpus(&tr->trace_buffer); -	if (start_irqsoff_tracer(tr, is_graph())) +	ftrace_init_array_ops(tr, irqsoff_tracer_call); + +	/* Only toplevel instance supports graph tracing */ +	if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL && +				      is_graph())))  		printk(KERN_ERR "failed to start irqsoff tracer\n"); + +	irqsoff_busy = true; +	return 0;  }  static void irqsoff_tracer_reset(struct trace_array *tr) @@ -626,6 +636,9 @@ static void irqsoff_tracer_reset(struct trace_array *tr)  	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);  	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); +	ftrace_reset_array_ops(tr); + +	irqsoff_busy = false;  }  static void irqsoff_tracer_start(struct trace_array *tr) @@ -643,8 +656,7 @@ static int irqsoff_tracer_init(struct trace_array *tr)  {  	trace_type = TRACER_IRQS_OFF; -	__irqsoff_tracer_init(tr); -	return 0; +	return __irqsoff_tracer_init(tr);  }  static struct tracer irqsoff_tracer __read_mostly =  { @@ -664,6 +676,7 @@ static struct tracer irqsoff_tracer __read_mostly =  #endif  	.open           = irqsoff_trace_open,  	.close          = irqsoff_trace_close, +	.allow_instances = true,  	.use_max_tr	= true,  };  # define register_irqsoff(trace) register_tracer(&trace) @@ -676,8 +689,7 @@ static int preemptoff_tracer_init(struct trace_array *tr)  {  	trace_type = TRACER_PREEMPT_OFF; -	__irqsoff_tracer_init(tr); -	return 0; +	return __irqsoff_tracer_init(tr);  }  static struct tracer preemptoff_tracer __read_mostly = @@ -698,6 +710,7 @@ static struct tracer preemptoff_tracer __read_mostly =  #endif  	.open		= irqsoff_trace_open,  	.close		= irqsoff_trace_close, +	.allow_instances = true,  	.use_max_tr	= true,  };  # define register_preemptoff(trace) register_tracer(&trace) @@ -712,8 +725,7 @@ static int preemptirqsoff_tracer_init(struct trace_array *tr)  {  	trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; -	__irqsoff_tracer_init(tr); -	return 0; +	return __irqsoff_tracer_init(tr);  }  static struct tracer preemptirqsoff_tracer __read_mostly = @@ -734,6 +746,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =  #endif  	.open		= irqsoff_trace_open,  	.close		= irqsoff_trace_close, +	.allow_instances = true,  	.use_max_tr	= true,  };  | 
