diff options
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 104 | 
1 files changed, 74 insertions, 30 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index b5c09242683..4de3e57f723 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -38,15 +38,6 @@ struct fgraph_data {  #define TRACE_GRAPH_INDENT	2 -/* Flag options */ -#define TRACE_GRAPH_PRINT_OVERRUN	0x1 -#define TRACE_GRAPH_PRINT_CPU		0x2 -#define TRACE_GRAPH_PRINT_OVERHEAD	0x4 -#define TRACE_GRAPH_PRINT_PROC		0x8 -#define TRACE_GRAPH_PRINT_DURATION	0x10 -#define TRACE_GRAPH_PRINT_ABS_TIME	0x20 -#define TRACE_GRAPH_PRINT_IRQS		0x40 -  static unsigned int max_depth;  static struct tracer_opt trace_opts[] = { @@ -64,11 +55,13 @@ static struct tracer_opt trace_opts[] = {  	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },  	/* Display interrupts */  	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, +	/* Display function name after trailing } */ +	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },  	{ } /* Empty entry */  };  static struct tracer_flags tracer_flags = { -	/* Don't display overruns and proc by default */ +	/* Don't display overruns, proc, or tail by default */  	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |  	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,  	.opts = trace_opts @@ -82,9 +75,9 @@ static struct trace_array *graph_array;   * to fill in space into DURATION column.   */  enum { -	DURATION_FILL_FULL  = -1, -	DURATION_FILL_START = -2, -	DURATION_FILL_END   = -3, +	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT, +	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT, +	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,  };  static enum print_line_t @@ -114,16 +107,37 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,  		return -EBUSY;  	} +	/* +	 * The curr_ret_stack is an index to ftrace return stack of +	 * current task.  Its value should be in [0, FTRACE_RETFUNC_ +	 * DEPTH) when the function graph tracer is used.  To support +	 * filtering out specific functions, it makes the index +	 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH) +	 * so when it sees a negative index the ftrace will ignore +	 * the record.  And the index gets recovered when returning +	 * from the filtered function by adding the FTRACE_NOTRACE_ +	 * DEPTH and then it'll continue to record functions normally. +	 * +	 * The curr_ret_stack is initialized to -1 and get increased +	 * in this function.  So it can be less than -1 only if it was +	 * filtered out via ftrace_graph_notrace_addr() which can be +	 * set from set_graph_notrace file in debugfs by user. +	 */ +	if (current->curr_ret_stack < -1) +		return -EBUSY; +  	calltime = trace_clock_local();  	index = ++current->curr_ret_stack; +	if (ftrace_graph_notrace_addr(func)) +		current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;  	barrier();  	current->ret_stack[index].ret = ret;  	current->ret_stack[index].func = func;  	current->ret_stack[index].calltime = calltime;  	current->ret_stack[index].subtime = 0;  	current->ret_stack[index].fp = frame_pointer; -	*depth = index; +	*depth = current->curr_ret_stack;  	return 0;  } @@ -137,7 +151,17 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,  	index = current->curr_ret_stack; -	if (unlikely(index < 0)) { +	/* +	 * A negative index here means that it's just returned from a +	 * notrace'd function.  Recover index to get an original +	 * return address.  See ftrace_push_return_trace(). +	 * +	 * TODO: Need to check whether the stack gets corrupted. +	 */ +	if (index < 0) +		index += FTRACE_NOTRACE_DEPTH; + +	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {  		ftrace_graph_stop();  		WARN_ON(1);  		/* Might as well panic, otherwise we have no where to go */ @@ -193,6 +217,15 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)  	trace.rettime = trace_clock_local();  	barrier();  	current->curr_ret_stack--; +	/* +	 * The curr_ret_stack can be less than -1 only if it was +	 * filtered out and it's about to return from the function. +	 * Recover the index and continue to trace normal functions. +	 */ +	if (current->curr_ret_stack < -1) { +		current->curr_ret_stack += FTRACE_NOTRACE_DEPTH; +		return ret; +	}  	/*  	 * The trace should run after decrementing the ret counter @@ -230,7 +263,7 @@ int __trace_graph_entry(struct trace_array *tr,  		return 0;  	entry	= ring_buffer_event_data(event);  	entry->graph_ent			= *trace; -	if (!filter_current_check_discard(buffer, call, entry, event)) +	if (!call_filter_check_discard(call, entry, buffer, event))  		__buffer_unlock_commit(buffer, event);  	return 1; @@ -259,10 +292,20 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)  	/* trace it when it is-nested-in or is a function enabled. */  	if ((!(trace->depth || ftrace_graph_addr(trace->func)) || -	     ftrace_graph_ignore_irqs()) || +	     ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||  	    (max_depth && trace->depth >= max_depth))  		return 0; +	/* +	 * Do not trace a function if it's filtered by set_graph_notrace. +	 * Make the index of ret stack negative to indicate that it should +	 * ignore further functions.  But it needs its own ret stack entry +	 * to recover the original index in order to continue tracing after +	 * returning from the function. +	 */ +	if (ftrace_graph_notrace_addr(trace->func)) +		return 1; +  	local_irq_save(flags);  	cpu = raw_smp_processor_id();  	data = per_cpu_ptr(tr->trace_buffer.data, cpu); @@ -335,7 +378,7 @@ void __trace_graph_return(struct trace_array *tr,  		return;  	entry	= ring_buffer_event_data(event);  	entry->ret				= *trace; -	if (!filter_current_check_discard(buffer, call, entry, event)) +	if (!call_filter_check_discard(call, entry, buffer, event))  		__buffer_unlock_commit(buffer, event);  } @@ -652,7 +695,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,  	}  	/* No overhead */ -	ret = print_graph_duration(DURATION_FILL_START, s, flags); +	ret = print_graph_duration(0, s, flags | FLAGS_FILL_START);  	if (ret != TRACE_TYPE_HANDLED)  		return ret; @@ -664,7 +707,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,  	if (!ret)  		return TRACE_TYPE_PARTIAL_LINE; -	ret = print_graph_duration(DURATION_FILL_END, s, flags); +	ret = print_graph_duration(0, s, flags | FLAGS_FILL_END);  	if (ret != TRACE_TYPE_HANDLED)  		return ret; @@ -729,14 +772,14 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s,  			return TRACE_TYPE_HANDLED;  	/* No real adata, just filling the column with spaces */ -	switch (duration) { -	case DURATION_FILL_FULL: +	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) { +	case FLAGS_FILL_FULL:  		ret = trace_seq_puts(s, "              |  ");  		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; -	case DURATION_FILL_START: +	case FLAGS_FILL_START:  		ret = trace_seq_puts(s, "  ");  		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; -	case DURATION_FILL_END: +	case FLAGS_FILL_END:  		ret = trace_seq_puts(s, " |");  		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;  	} @@ -852,7 +895,7 @@ print_graph_entry_nested(struct trace_iterator *iter,  	}  	/* No time */ -	ret = print_graph_duration(DURATION_FILL_FULL, s, flags); +	ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);  	if (ret != TRACE_TYPE_HANDLED)  		return ret; @@ -1126,9 +1169,10 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,  	 * If the return function does not have a matching entry,  	 * then the entry was lost. Instead of just printing  	 * the '}' and letting the user guess what function this -	 * belongs to, write out the function name. +	 * belongs to, write out the function name. Always do +	 * that if the funcgraph-tail option is enabled.  	 */ -	if (func_match) { +	if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) {  		ret = trace_seq_puts(s, "}\n");  		if (!ret)  			return TRACE_TYPE_PARTIAL_LINE; @@ -1172,7 +1216,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,  		return TRACE_TYPE_PARTIAL_LINE;  	/* No time */ -	ret = print_graph_duration(DURATION_FILL_FULL, s, flags); +	ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);  	if (ret != TRACE_TYPE_HANDLED)  		return ret; @@ -1426,7 +1470,8 @@ void graph_trace_close(struct trace_iterator *iter)  	}  } -static int func_graph_set_flag(u32 old_flags, u32 bit, int set) +static int +func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)  {  	if (bit == TRACE_GRAPH_PRINT_IRQS)  		ftrace_graph_skip_irqs = !set; @@ -1454,7 +1499,6 @@ static struct tracer graph_trace __tracer_data = {  	.pipe_open	= graph_trace_open,  	.close		= graph_trace_close,  	.pipe_close	= graph_trace_close, -	.wait_pipe	= poll_wait_pipe,  	.init		= graph_trace_init,  	.reset		= graph_trace_reset,  	.print_line	= print_graph_function,  | 
