diff options
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 209 | 
1 files changed, 201 insertions, 8 deletions
| diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 6f233698518..76b05980225 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -15,15 +15,19 @@  #include "trace.h"  #include "trace_output.h" +/* When set, irq functions will be ignored */ +static int ftrace_graph_skip_irqs; +  struct fgraph_cpu_data {  	pid_t		last_pid;  	int		depth; +	int		depth_irq;  	int		ignore;  	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];  };  struct fgraph_data { -	struct fgraph_cpu_data		*cpu_data; +	struct fgraph_cpu_data __percpu *cpu_data;  	/* Place to preserve last processed entry. */  	struct ftrace_graph_ent_entry	ent; @@ -41,6 +45,7 @@ struct fgraph_data {  #define TRACE_GRAPH_PRINT_PROC		0x8  #define TRACE_GRAPH_PRINT_DURATION	0x10  #define TRACE_GRAPH_PRINT_ABS_TIME	0x20 +#define TRACE_GRAPH_PRINT_IRQS		0x40  static struct tracer_opt trace_opts[] = {  	/* Display overruns? (for self-debug purpose) */ @@ -55,13 +60,15 @@ static struct tracer_opt trace_opts[] = {  	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },  	/* Display absolute time of an entry */  	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, +	/* Display interrupts */ +	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },  	{ } /* Empty entry */  };  static struct tracer_flags tracer_flags = {  	/* Don't display overruns and proc by default */  	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | -	       TRACE_GRAPH_PRINT_DURATION, +	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,  	.opts = trace_opts  }; @@ -204,6 +211,14 @@ int __trace_graph_entry(struct trace_array *tr,  	return 1;  } +static inline int ftrace_graph_ignore_irqs(void) +{ +	if (!ftrace_graph_skip_irqs) +		return 0; + +	return in_irq(); +} +  int trace_graph_entry(struct ftrace_graph_ent *trace)  {  	struct trace_array *tr = graph_array; @@ -218,7 +233,8 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)  		return 0;  	/* trace it when it is-nested-in or is a function enabled. */ -	if (!(trace->depth || ftrace_graph_addr(trace->func))) +	if (!(trace->depth || ftrace_graph_addr(trace->func)) || +	      ftrace_graph_ignore_irqs())  		return 0;  	local_irq_save(flags); @@ -246,6 +262,34 @@ int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)  		return trace_graph_entry(trace);  } +static void +__trace_graph_function(struct trace_array *tr, +		unsigned long ip, unsigned long flags, int pc) +{ +	u64 time = trace_clock_local(); +	struct ftrace_graph_ent ent = { +		.func  = ip, +		.depth = 0, +	}; +	struct ftrace_graph_ret ret = { +		.func     = ip, +		.depth    = 0, +		.calltime = time, +		.rettime  = time, +	}; + +	__trace_graph_entry(tr, &ent, flags, pc); +	__trace_graph_return(tr, &ret, flags, pc); +} + +void +trace_graph_function(struct trace_array *tr, +		unsigned long ip, unsigned long parent_ip, +		unsigned long flags, int pc) +{ +	__trace_graph_function(tr, ip, flags, pc); +} +  void __trace_graph_return(struct trace_array *tr,  				struct ftrace_graph_ret *trace,  				unsigned long flags, @@ -649,8 +693,9 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)  	/* Print nsecs (we don't want to exceed 7 numbers) */  	if (len < 7) { -		snprintf(nsecs_str, min(sizeof(nsecs_str), 8UL - len), "%03lu", -			 nsecs_rem); +		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); + +		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);  		ret = trace_seq_printf(s, ".%s", nsecs_str);  		if (!ret)  			return TRACE_TYPE_PARTIAL_LINE; @@ -855,6 +900,108 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,  	return 0;  } +/* + * Entry check for irq code + * + * returns 1 if + *  - we are inside irq code + *  - we just extered irq code + * + * retunns 0 if + *  - funcgraph-interrupts option is set + *  - we are not inside irq code + */ +static int +check_irq_entry(struct trace_iterator *iter, u32 flags, +		unsigned long addr, int depth) +{ +	int cpu = iter->cpu; +	int *depth_irq; +	struct fgraph_data *data = iter->private; + +	/* +	 * If we are either displaying irqs, or we got called as +	 * a graph event and private data does not exist, +	 * then we bypass the irq check. +	 */ +	if ((flags & TRACE_GRAPH_PRINT_IRQS) || +	    (!data)) +		return 0; + +	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); + +	/* +	 * We are inside the irq code +	 */ +	if (*depth_irq >= 0) +		return 1; + +	if ((addr < (unsigned long)__irqentry_text_start) || +	    (addr >= (unsigned long)__irqentry_text_end)) +		return 0; + +	/* +	 * We are entering irq code. +	 */ +	*depth_irq = depth; +	return 1; +} + +/* + * Return check for irq code + * + * returns 1 if + *  - we are inside irq code + *  - we just left irq code + * + * returns 0 if + *  - funcgraph-interrupts option is set + *  - we are not inside irq code + */ +static int +check_irq_return(struct trace_iterator *iter, u32 flags, int depth) +{ +	int cpu = iter->cpu; +	int *depth_irq; +	struct fgraph_data *data = iter->private; + +	/* +	 * If we are either displaying irqs, or we got called as +	 * a graph event and private data does not exist, +	 * then we bypass the irq check. +	 */ +	if ((flags & TRACE_GRAPH_PRINT_IRQS) || +	    (!data)) +		return 0; + +	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); + +	/* +	 * We are not inside the irq code. +	 */ +	if (*depth_irq == -1) +		return 0; + +	/* +	 * We are inside the irq code, and this is returning entry. +	 * Let's not trace it and clear the entry depth, since +	 * we are out of irq code. +	 * +	 * This condition ensures that we 'leave the irq code' once +	 * we are out of the entry depth. Thus protecting us from +	 * the RETURN entry loss. +	 */ +	if (*depth_irq >= depth) { +		*depth_irq = -1; +		return 1; +	} + +	/* +	 * We are inside the irq code, and this is not the entry. +	 */ +	return 1; +} +  static enum print_line_t  print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,  			struct trace_iterator *iter, u32 flags) @@ -865,6 +1012,9 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,  	static enum print_line_t ret;  	int cpu = iter->cpu; +	if (check_irq_entry(iter, flags, call->func, call->depth)) +		return TRACE_TYPE_HANDLED; +  	if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))  		return TRACE_TYPE_PARTIAL_LINE; @@ -902,6 +1052,9 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,  	int ret;  	int i; +	if (check_irq_return(iter, flags, trace->depth)) +		return TRACE_TYPE_HANDLED; +  	if (data) {  		struct fgraph_cpu_data *cpu_data;  		int cpu = iter->cpu; @@ -1054,7 +1207,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,  enum print_line_t -print_graph_function_flags(struct trace_iterator *iter, u32 flags) +__print_graph_function_flags(struct trace_iterator *iter, u32 flags)  {  	struct ftrace_graph_ent_entry *field;  	struct fgraph_data *data = iter->private; @@ -1117,7 +1270,18 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags)  static enum print_line_t  print_graph_function(struct trace_iterator *iter)  { -	return print_graph_function_flags(iter, tracer_flags.val); +	return __print_graph_function_flags(iter, tracer_flags.val); +} + +enum print_line_t print_graph_function_flags(struct trace_iterator *iter, +					     u32 flags) +{ +	if (trace_flags & TRACE_ITER_LATENCY_FMT) +		flags |= TRACE_GRAPH_PRINT_DURATION; +	else +		flags |= TRACE_GRAPH_PRINT_ABS_TIME; + +	return __print_graph_function_flags(iter, flags);  }  static enum print_line_t @@ -1149,7 +1313,7 @@ static void print_lat_header(struct seq_file *s, u32 flags)  	seq_printf(s, "#%.*s|||| /                     \n", size, spaces);  } -void print_graph_headers_flags(struct seq_file *s, u32 flags) +static void __print_graph_headers_flags(struct seq_file *s, u32 flags)  {  	int lat = trace_flags & TRACE_ITER_LATENCY_FMT; @@ -1190,6 +1354,23 @@ void print_graph_headers(struct seq_file *s)  	print_graph_headers_flags(s, tracer_flags.val);  } +void print_graph_headers_flags(struct seq_file *s, u32 flags) +{ +	struct trace_iterator *iter = s->private; + +	if (trace_flags & TRACE_ITER_LATENCY_FMT) { +		/* print nothing if the buffers are empty */ +		if (trace_empty(iter)) +			return; + +		print_trace_header(s, iter); +		flags |= TRACE_GRAPH_PRINT_DURATION; +	} else +		flags |= TRACE_GRAPH_PRINT_ABS_TIME; + +	__print_graph_headers_flags(s, flags); +} +  void graph_trace_open(struct trace_iterator *iter)  {  	/* pid and depth on the last trace processed */ @@ -1210,9 +1391,12 @@ void graph_trace_open(struct trace_iterator *iter)  		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);  		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);  		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); +		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); +  		*pid = -1;  		*depth = 0;  		*ignore = 0; +		*depth_irq = -1;  	}  	iter->private = data; @@ -1235,6 +1419,14 @@ void graph_trace_close(struct trace_iterator *iter)  	}  } +static int func_graph_set_flag(u32 old_flags, u32 bit, int set) +{ +	if (bit == TRACE_GRAPH_PRINT_IRQS) +		ftrace_graph_skip_irqs = !set; + +	return 0; +} +  static struct trace_event_functions graph_functions = {  	.trace		= print_graph_function_event,  }; @@ -1261,6 +1453,7 @@ static struct tracer graph_trace __read_mostly = {  	.print_line	= print_graph_function,  	.print_header	= print_graph_headers,  	.flags		= &tracer_flags, +	.set_flag	= func_graph_set_flag,  #ifdef CONFIG_FTRACE_SELFTEST  	.selftest	= trace_selftest_startup_function_graph,  #endif | 
