diff options
Diffstat (limited to 'kernel/trace/ftrace.c')
| -rw-r--r-- | kernel/trace/ftrace.c | 143 | 
1 files changed, 120 insertions, 23 deletions
| diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 6c508ff33c6..a6d098c6df3 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -413,6 +413,17 @@ static int __register_ftrace_function(struct ftrace_ops *ops)  	return 0;  } +static void ftrace_sync(struct work_struct *work) +{ +	/* +	 * This function is just a stub to implement a hard force +	 * of synchronize_sched(). This requires synchronizing +	 * tasks even in userspace and idle. +	 * +	 * Yes, function tracing is rude. +	 */ +} +  static int __unregister_ftrace_function(struct ftrace_ops *ops)  {  	int ret; @@ -440,8 +451,12 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)  			 * so there'll be no new users. We must ensure  			 * all current users are done before we free  			 * the control data. +			 * Note synchronize_sched() is not enough, as we +			 * use preempt_disable() to do RCU, but the function +			 * tracer can be called where RCU is not active +			 * (before user_exit()).  			 */ -			synchronize_sched(); +			schedule_on_each_cpu(ftrace_sync);  			control_ops_free(ops);  		}  	} else @@ -456,9 +471,13 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)  	/*  	 * Dynamic ops may be freed, we must make sure that all  	 * callers are done before leaving this function. +	 * +	 * Again, normal synchronize_sched() is not good enough. +	 * We need to do a hard force of sched synchronization.  	 */  	if (ops->flags & FTRACE_OPS_FL_DYNAMIC) -		synchronize_sched(); +		schedule_on_each_cpu(ftrace_sync); +  	return 0;  } @@ -622,12 +641,18 @@ static int function_stat_show(struct seq_file *m, void *v)  	if (rec->counter <= 1)  		stddev = 0;  	else { -		stddev = rec->time_squared - rec->counter * avg * avg; +		/* +		 * Apply Welford's method: +		 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) +		 */ +		stddev = rec->counter * rec->time_squared - +			 rec->time * rec->time; +  		/*  		 * Divide only 1000 for ns^2 -> us^2 conversion.  		 * trace_print_graph_duration will divide 1000 again.  		 */ -		do_div(stddev, (rec->counter - 1) * 1000); +		do_div(stddev, rec->counter * (rec->counter - 1) * 1000);  	}  	trace_seq_init(&s); @@ -1416,12 +1441,22 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,   * the hashes are freed with call_rcu_sched().   */  static int -ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) +ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)  {  	struct ftrace_hash *filter_hash;  	struct ftrace_hash *notrace_hash;  	int ret; +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +	/* +	 * There's a small race when adding ops that the ftrace handler +	 * that wants regs, may be called without them. We can not +	 * allow that handler to be called if regs is NULL. +	 */ +	if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) +		return 0; +#endif +  	filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);  	notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); @@ -2134,12 +2169,57 @@ static cycle_t		ftrace_update_time;  static unsigned long	ftrace_update_cnt;  unsigned long		ftrace_update_tot_cnt; -static int ops_traces_mod(struct ftrace_ops *ops) +static inline int ops_traces_mod(struct ftrace_ops *ops)  { -	struct ftrace_hash *hash; +	/* +	 * Filter_hash being empty will default to trace module. +	 * But notrace hash requires a test of individual module functions. +	 */ +	return ftrace_hash_empty(ops->filter_hash) && +		ftrace_hash_empty(ops->notrace_hash); +} + +/* + * Check if the current ops references the record. + * + * If the ops traces all functions, then it was already accounted for. + * If the ops does not trace the current record function, skip it. + * If the ops ignores the function via notrace filter, skip it. + */ +static inline bool +ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) +{ +	/* If ops isn't enabled, ignore it */ +	if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) +		return 0; -	hash = ops->filter_hash; -	return ftrace_hash_empty(hash); +	/* If ops traces all mods, we already accounted for it */ +	if (ops_traces_mod(ops)) +		return 0; + +	/* The function must be in the filter */ +	if (!ftrace_hash_empty(ops->filter_hash) && +	    !ftrace_lookup_ip(ops->filter_hash, rec->ip)) +		return 0; + +	/* If in notrace hash, we ignore it too */ +	if (ftrace_lookup_ip(ops->notrace_hash, rec->ip)) +		return 0; + +	return 1; +} + +static int referenced_filters(struct dyn_ftrace *rec) +{ +	struct ftrace_ops *ops; +	int cnt = 0; + +	for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { +		if (ops_references_rec(ops, rec)) +		    cnt++; +	} + +	return cnt;  }  static int ftrace_update_code(struct module *mod) @@ -2148,6 +2228,7 @@ static int ftrace_update_code(struct module *mod)  	struct dyn_ftrace *p;  	cycle_t start, stop;  	unsigned long ref = 0; +	bool test = false;  	int i;  	/* @@ -2161,9 +2242,12 @@ static int ftrace_update_code(struct module *mod)  		for (ops = ftrace_ops_list;  		     ops != &ftrace_list_end; ops = ops->next) { -			if (ops->flags & FTRACE_OPS_FL_ENABLED && -			    ops_traces_mod(ops)) -				ref++; +			if (ops->flags & FTRACE_OPS_FL_ENABLED) { +				if (ops_traces_mod(ops)) +					ref++; +				else +					test = true; +			}  		}  	} @@ -2173,12 +2257,16 @@ static int ftrace_update_code(struct module *mod)  	for (pg = ftrace_new_pgs; pg; pg = pg->next) {  		for (i = 0; i < pg->index; i++) { +			int cnt = ref; +  			/* If something went wrong, bail without enabling anything */  			if (unlikely(ftrace_disabled))  				return -1;  			p = &pg->records[i]; -			p->flags = ref; +			if (test) +				cnt += referenced_filters(p); +			p->flags = cnt;  			/*  			 * Do the initial record conversion from mcount jump @@ -2198,7 +2286,7 @@ static int ftrace_update_code(struct module *mod)  			 * conversion puts the module to the correct state, thus  			 * passing the ftrace_make_call check.  			 */ -			if (ftrace_start_up && ref) { +			if (ftrace_start_up && cnt) {  				int failed = __ftrace_replace_code(p, 1);  				if (failed)  					ftrace_bug(failed, p->ip); @@ -3349,6 +3437,12 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)  	return add_hash_entry(hash, ip);  } +static void ftrace_ops_update_code(struct ftrace_ops *ops) +{ +	if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) +		ftrace_run_update_code(FTRACE_UPDATE_CALLS); +} +  static int  ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,  		unsigned long ip, int remove, int reset, int enable) @@ -3391,9 +3485,8 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,  	mutex_lock(&ftrace_lock);  	ret = ftrace_hash_move(ops, enable, orig_hash, hash); -	if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED -	    && ftrace_enabled) -		ftrace_run_update_code(FTRACE_UPDATE_CALLS); +	if (!ret) +		ftrace_ops_update_code(ops);  	mutex_unlock(&ftrace_lock); @@ -3512,8 +3605,12 @@ EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);  static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;  static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; +/* Used by function selftest to not test if filter is set */ +bool ftrace_filter_param __initdata; +  static int __init set_ftrace_notrace(char *str)  { +	ftrace_filter_param = true;  	strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);  	return 1;  } @@ -3521,6 +3618,7 @@ __setup("ftrace_notrace=", set_ftrace_notrace);  static int __init set_ftrace_filter(char *str)  { +	ftrace_filter_param = true;  	strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);  	return 1;  } @@ -3615,9 +3713,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)  		mutex_lock(&ftrace_lock);  		ret = ftrace_hash_move(iter->ops, filter_hash,  				       orig_hash, iter->hash); -		if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED) -		    && ftrace_enabled) -			ftrace_run_update_code(FTRACE_UPDATE_CALLS); +		if (!ret) +			ftrace_ops_update_code(iter->ops);  		mutex_unlock(&ftrace_lock);  	} @@ -4188,7 +4285,7 @@ static inline void ftrace_startup_enable(int command) { }  # define ftrace_shutdown_sysctl()	do { } while (0)  static inline int -ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) +ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)  {  	return 1;  } @@ -4211,7 +4308,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,  	do_for_each_ftrace_op(op, ftrace_control_list) {  		if (!(op->flags & FTRACE_OPS_FL_STUB) &&  		    !ftrace_function_local_disabled(op) && -		    ftrace_ops_test(op, ip)) +		    ftrace_ops_test(op, ip, regs))  			op->func(ip, parent_ip, op, regs);  	} while_for_each_ftrace_op(op);  	trace_recursion_clear(TRACE_CONTROL_BIT); @@ -4244,7 +4341,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,  	 */  	preempt_disable_notrace();  	do_for_each_ftrace_op(op, ftrace_ops_list) { -		if (ftrace_ops_test(op, ip)) +		if (ftrace_ops_test(op, ip, regs))  			op->func(ip, parent_ip, op, regs);  	} while_for_each_ftrace_op(op);  	preempt_enable_notrace(); | 
