diff options
Diffstat (limited to 'kernel/trace/trace_selftest.c')
| -rw-r--r-- | kernel/trace/trace_selftest.c | 635 | 
1 files changed, 555 insertions, 80 deletions
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 155a415b320..5ef60499dc8 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -21,13 +21,13 @@ static inline int trace_valid_entry(struct trace_entry *entry)  	return 0;  } -static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) +static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)  {  	struct ring_buffer_event *event;  	struct trace_entry *entry;  	unsigned int loops = 0; -	while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) { +	while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {  		entry = ring_buffer_event_data(event);  		/* @@ -58,16 +58,16 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)   * Test the trace buffer to see if all the elements   * are still sane.   */ -static int trace_test_buffer(struct trace_array *tr, unsigned long *count) +static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)  {  	unsigned long flags, cnt = 0;  	int cpu, ret = 0;  	/* Don't allow flipping of max traces now */  	local_irq_save(flags); -	arch_spin_lock(&ftrace_max_lock); +	arch_spin_lock(&buf->tr->max_lock); -	cnt = ring_buffer_entries(tr->buffer); +	cnt = ring_buffer_entries(buf->buffer);  	/*  	 * The trace_test_buffer_cpu runs a while loop to consume all data. @@ -78,12 +78,12 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)  	 */  	tracing_off();  	for_each_possible_cpu(cpu) { -		ret = trace_test_buffer_cpu(tr, cpu); +		ret = trace_test_buffer_cpu(buf, cpu);  		if (ret)  			break;  	}  	tracing_on(); -	arch_spin_unlock(&ftrace_max_lock); +	arch_spin_unlock(&buf->tr->max_lock);  	local_irq_restore(flags);  	if (count) @@ -101,13 +101,230 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)  #ifdef CONFIG_DYNAMIC_FTRACE +static int trace_selftest_test_probe1_cnt; +static void trace_selftest_test_probe1_func(unsigned long ip, +					    unsigned long pip, +					    struct ftrace_ops *op, +					    struct pt_regs *pt_regs) +{ +	trace_selftest_test_probe1_cnt++; +} + +static int trace_selftest_test_probe2_cnt; +static void trace_selftest_test_probe2_func(unsigned long ip, +					    unsigned long pip, +					    struct ftrace_ops *op, +					    struct pt_regs *pt_regs) +{ +	trace_selftest_test_probe2_cnt++; +} + +static int trace_selftest_test_probe3_cnt; +static void trace_selftest_test_probe3_func(unsigned long ip, +					    unsigned long pip, +					    struct ftrace_ops *op, +					    struct pt_regs *pt_regs) +{ +	trace_selftest_test_probe3_cnt++; +} + +static int trace_selftest_test_global_cnt; +static void trace_selftest_test_global_func(unsigned long ip, +					    unsigned long pip, +					    struct ftrace_ops *op, +					    struct pt_regs *pt_regs) +{ +	trace_selftest_test_global_cnt++; +} + +static int trace_selftest_test_dyn_cnt; +static void trace_selftest_test_dyn_func(unsigned long ip, +					 unsigned long pip, +					 struct ftrace_ops *op, +					 struct pt_regs *pt_regs) +{ +	trace_selftest_test_dyn_cnt++; +} + +static struct ftrace_ops test_probe1 = { +	.func			= trace_selftest_test_probe1_func, +	.flags			= FTRACE_OPS_FL_RECURSION_SAFE, +}; + +static struct ftrace_ops test_probe2 = { +	.func			= trace_selftest_test_probe2_func, +	.flags			= FTRACE_OPS_FL_RECURSION_SAFE, +}; + +static struct ftrace_ops test_probe3 = { +	.func			= trace_selftest_test_probe3_func, +	.flags			= FTRACE_OPS_FL_RECURSION_SAFE, +}; + +static void print_counts(void) +{ +	printk("(%d %d %d %d %d) ", +	       trace_selftest_test_probe1_cnt, +	       trace_selftest_test_probe2_cnt, +	       trace_selftest_test_probe3_cnt, +	       trace_selftest_test_global_cnt, +	       trace_selftest_test_dyn_cnt); +} + +static void reset_counts(void) +{ +	trace_selftest_test_probe1_cnt = 0; +	trace_selftest_test_probe2_cnt = 0; +	trace_selftest_test_probe3_cnt = 0; +	trace_selftest_test_global_cnt = 0; +	trace_selftest_test_dyn_cnt = 0; +} + +static int trace_selftest_ops(struct trace_array *tr, int cnt) +{ +	int save_ftrace_enabled = ftrace_enabled; +	struct ftrace_ops *dyn_ops; +	char *func1_name; +	char *func2_name; +	int len1; +	int len2; +	int ret = -1; + +	printk(KERN_CONT "PASSED\n"); +	pr_info("Testing dynamic ftrace ops #%d: ", cnt); + +	ftrace_enabled = 1; +	reset_counts(); + +	/* Handle PPC64 '.' name */ +	func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); +	func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); +	len1 = strlen(func1_name); +	len2 = strlen(func2_name); + +	/* +	 * Probe 1 will trace function 1. +	 * Probe 2 will trace function 2. +	 * Probe 3 will trace functions 1 and 2. +	 */ +	ftrace_set_filter(&test_probe1, func1_name, len1, 1); +	ftrace_set_filter(&test_probe2, func2_name, len2, 1); +	ftrace_set_filter(&test_probe3, func1_name, len1, 1); +	ftrace_set_filter(&test_probe3, func2_name, len2, 0); + +	register_ftrace_function(&test_probe1); +	register_ftrace_function(&test_probe2); +	register_ftrace_function(&test_probe3); +	/* First time we are running with main function */ +	if (cnt > 1) { +		ftrace_init_array_ops(tr, trace_selftest_test_global_func); +		register_ftrace_function(tr->ops); +	} + +	DYN_FTRACE_TEST_NAME(); + +	print_counts(); + +	if (trace_selftest_test_probe1_cnt != 1) +		goto out; +	if (trace_selftest_test_probe2_cnt != 0) +		goto out; +	if (trace_selftest_test_probe3_cnt != 1) +		goto out; +	if (cnt > 1) { +		if (trace_selftest_test_global_cnt == 0) +			goto out; +	} + +	DYN_FTRACE_TEST_NAME2(); + +	print_counts(); + +	if (trace_selftest_test_probe1_cnt != 1) +		goto out; +	if (trace_selftest_test_probe2_cnt != 1) +		goto out; +	if (trace_selftest_test_probe3_cnt != 2) +		goto out; + +	/* Add a dynamic probe */ +	dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); +	if (!dyn_ops) { +		printk("MEMORY ERROR "); +		goto out; +	} + +	dyn_ops->func = trace_selftest_test_dyn_func; + +	register_ftrace_function(dyn_ops); + +	trace_selftest_test_global_cnt = 0; + +	DYN_FTRACE_TEST_NAME(); + +	print_counts(); + +	if (trace_selftest_test_probe1_cnt != 2) +		goto out_free; +	if (trace_selftest_test_probe2_cnt != 1) +		goto out_free; +	if (trace_selftest_test_probe3_cnt != 3) +		goto out_free; +	if (cnt > 1) { +		if (trace_selftest_test_global_cnt == 0) +			goto out; +	} +	if (trace_selftest_test_dyn_cnt == 0) +		goto out_free; + +	DYN_FTRACE_TEST_NAME2(); + +	print_counts(); + +	if (trace_selftest_test_probe1_cnt != 2) +		goto out_free; +	if (trace_selftest_test_probe2_cnt != 2) +		goto out_free; +	if (trace_selftest_test_probe3_cnt != 4) +		goto out_free; + +	ret = 0; + out_free: +	unregister_ftrace_function(dyn_ops); +	kfree(dyn_ops); + + out: +	/* Purposely unregister in the same order */ +	unregister_ftrace_function(&test_probe1); +	unregister_ftrace_function(&test_probe2); +	unregister_ftrace_function(&test_probe3); +	if (cnt > 1) +		unregister_ftrace_function(tr->ops); +	ftrace_reset_array_ops(tr); + +	/* Make sure everything is off */ +	reset_counts(); +	DYN_FTRACE_TEST_NAME(); +	DYN_FTRACE_TEST_NAME(); + +	if (trace_selftest_test_probe1_cnt || +	    trace_selftest_test_probe2_cnt || +	    trace_selftest_test_probe3_cnt || +	    trace_selftest_test_global_cnt || +	    trace_selftest_test_dyn_cnt) +		ret = -1; + +	ftrace_enabled = save_ftrace_enabled; + +	return ret; +} +  /* Test dynamic code modification and ftrace filters */ -int trace_selftest_startup_dynamic_tracing(struct tracer *trace, -					   struct trace_array *tr, -					   int (*func)(void)) +static int trace_selftest_startup_dynamic_tracing(struct tracer *trace, +						  struct trace_array *tr, +						  int (*func)(void))  {  	int save_ftrace_enabled = ftrace_enabled; -	int save_tracer_enabled = tracer_enabled;  	unsigned long count;  	char *func_name;  	int ret; @@ -118,7 +335,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,  	/* enable tracing, and record the filter function */  	ftrace_enabled = 1; -	tracer_enabled = 1;  	/* passed in by parameter to fool gcc from optimizing */  	func(); @@ -131,7 +347,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,  	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);  	/* filter only on our function */ -	ftrace_set_filter(func_name, strlen(func_name), 1); +	ftrace_set_global_filter(func_name, strlen(func_name), 1);  	/* enable tracing */  	ret = tracer_init(trace, tr); @@ -144,7 +360,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,  	msleep(100);  	/* we should have nothing in the buffer */ -	ret = trace_test_buffer(tr, &count); +	ret = trace_test_buffer(&tr->trace_buffer, &count);  	if (ret)  		goto out; @@ -165,49 +381,289 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,  	ftrace_enabled = 0;  	/* check the trace buffer */ -	ret = trace_test_buffer(tr, &count); -	trace->reset(tr); +	ret = trace_test_buffer(&tr->trace_buffer, &count);  	tracing_start();  	/* we should only have one item */  	if (!ret && count != 1) { +		trace->reset(tr);  		printk(KERN_CONT ".. filter failed count=%ld ..", count);  		ret = -1;  		goto out;  	} +	/* Test the ops with global tracing running */ +	ret = trace_selftest_ops(tr, 1); +	trace->reset(tr); +   out:  	ftrace_enabled = save_ftrace_enabled; -	tracer_enabled = save_tracer_enabled;  	/* Enable tracing on all functions again */ -	ftrace_set_filter(NULL, 0, 1); +	ftrace_set_global_filter(NULL, 0, 1); + +	/* Test the ops with global tracing off */ +	if (!ret) +		ret = trace_selftest_ops(tr, 2); + +	return ret; +} + +static int trace_selftest_recursion_cnt; +static void trace_selftest_test_recursion_func(unsigned long ip, +					       unsigned long pip, +					       struct ftrace_ops *op, +					       struct pt_regs *pt_regs) +{ +	/* +	 * This function is registered without the recursion safe flag. +	 * The ftrace infrastructure should provide the recursion +	 * protection. If not, this will crash the kernel! +	 */ +	if (trace_selftest_recursion_cnt++ > 10) +		return; +	DYN_FTRACE_TEST_NAME(); +} + +static void trace_selftest_test_recursion_safe_func(unsigned long ip, +						    unsigned long pip, +						    struct ftrace_ops *op, +						    struct pt_regs *pt_regs) +{ +	/* +	 * We said we would provide our own recursion. By calling +	 * this function again, we should recurse back into this function +	 * and count again. But this only happens if the arch supports +	 * all of ftrace features and nothing else is using the function +	 * tracing utility. +	 */ +	if (trace_selftest_recursion_cnt++) +		return; +	DYN_FTRACE_TEST_NAME(); +} + +static struct ftrace_ops test_rec_probe = { +	.func			= trace_selftest_test_recursion_func, +}; + +static struct ftrace_ops test_recsafe_probe = { +	.func			= trace_selftest_test_recursion_safe_func, +	.flags			= FTRACE_OPS_FL_RECURSION_SAFE, +}; + +static int +trace_selftest_function_recursion(void) +{ +	int save_ftrace_enabled = ftrace_enabled; +	char *func_name; +	int len; +	int ret; + +	/* The previous test PASSED */ +	pr_cont("PASSED\n"); +	pr_info("Testing ftrace recursion: "); + + +	/* enable tracing, and record the filter function */ +	ftrace_enabled = 1; + +	/* Handle PPC64 '.' name */ +	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); +	len = strlen(func_name); + +	ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1); +	if (ret) { +		pr_cont("*Could not set filter* "); +		goto out; +	} + +	ret = register_ftrace_function(&test_rec_probe); +	if (ret) { +		pr_cont("*could not register callback* "); +		goto out; +	} + +	DYN_FTRACE_TEST_NAME(); + +	unregister_ftrace_function(&test_rec_probe); + +	ret = -1; +	if (trace_selftest_recursion_cnt != 1) { +		pr_cont("*callback not called once (%d)* ", +			trace_selftest_recursion_cnt); +		goto out; +	} + +	trace_selftest_recursion_cnt = 1; + +	pr_cont("PASSED\n"); +	pr_info("Testing ftrace recursion safe: "); + +	ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1); +	if (ret) { +		pr_cont("*Could not set filter* "); +		goto out; +	} + +	ret = register_ftrace_function(&test_recsafe_probe); +	if (ret) { +		pr_cont("*could not register callback* "); +		goto out; +	} + +	DYN_FTRACE_TEST_NAME(); + +	unregister_ftrace_function(&test_recsafe_probe); + +	ret = -1; +	if (trace_selftest_recursion_cnt != 2) { +		pr_cont("*callback not called expected 2 times (%d)* ", +			trace_selftest_recursion_cnt); +		goto out; +	} + +	ret = 0; +out: +	ftrace_enabled = save_ftrace_enabled;  	return ret;  }  #else  # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) +# define trace_selftest_function_recursion() ({ 0; })  #endif /* CONFIG_DYNAMIC_FTRACE */ +static enum { +	TRACE_SELFTEST_REGS_START, +	TRACE_SELFTEST_REGS_FOUND, +	TRACE_SELFTEST_REGS_NOT_FOUND, +} trace_selftest_regs_stat; + +static void trace_selftest_test_regs_func(unsigned long ip, +					  unsigned long pip, +					  struct ftrace_ops *op, +					  struct pt_regs *pt_regs) +{ +	if (pt_regs) +		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND; +	else +		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND; +} + +static struct ftrace_ops test_regs_probe = { +	.func		= trace_selftest_test_regs_func, +	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS, +}; + +static int +trace_selftest_function_regs(void) +{ +	int save_ftrace_enabled = ftrace_enabled; +	char *func_name; +	int len; +	int ret; +	int supported = 0; + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +	supported = 1; +#endif + +	/* The previous test PASSED */ +	pr_cont("PASSED\n"); +	pr_info("Testing ftrace regs%s: ", +		!supported ? "(no arch support)" : ""); + +	/* enable tracing, and record the filter function */ +	ftrace_enabled = 1; + +	/* Handle PPC64 '.' name */ +	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); +	len = strlen(func_name); + +	ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1); +	/* +	 * If DYNAMIC_FTRACE is not set, then we just trace all functions. +	 * This test really doesn't care. +	 */ +	if (ret && ret != -ENODEV) { +		pr_cont("*Could not set filter* "); +		goto out; +	} + +	ret = register_ftrace_function(&test_regs_probe); +	/* +	 * Now if the arch does not support passing regs, then this should +	 * have failed. +	 */ +	if (!supported) { +		if (!ret) { +			pr_cont("*registered save-regs without arch support* "); +			goto out; +		} +		test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED; +		ret = register_ftrace_function(&test_regs_probe); +	} +	if (ret) { +		pr_cont("*could not register callback* "); +		goto out; +	} + + +	DYN_FTRACE_TEST_NAME(); + +	unregister_ftrace_function(&test_regs_probe); + +	ret = -1; + +	switch (trace_selftest_regs_stat) { +	case TRACE_SELFTEST_REGS_START: +		pr_cont("*callback never called* "); +		goto out; + +	case TRACE_SELFTEST_REGS_FOUND: +		if (supported) +			break; +		pr_cont("*callback received regs without arch support* "); +		goto out; + +	case TRACE_SELFTEST_REGS_NOT_FOUND: +		if (!supported) +			break; +		pr_cont("*callback received NULL regs* "); +		goto out; +	} + +	ret = 0; +out: +	ftrace_enabled = save_ftrace_enabled; + +	return ret; +} +  /*   * Simple verification test of ftrace function tracer.   * Enable ftrace, sleep 1/10 second, and then read the trace   * buffer to see if all is in order.   */ -int +__init int  trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)  {  	int save_ftrace_enabled = ftrace_enabled; -	int save_tracer_enabled = tracer_enabled;  	unsigned long count;  	int ret; +#ifdef CONFIG_DYNAMIC_FTRACE +	if (ftrace_filter_param) { +		printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); +		return 0; +	} +#endif +  	/* make sure msleep has been recorded */  	msleep(1);  	/* start the tracing */  	ftrace_enabled = 1; -	tracer_enabled = 1;  	ret = tracer_init(trace, tr);  	if (ret) { @@ -222,7 +678,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)  	ftrace_enabled = 0;  	/* check the trace buffer */ -	ret = trace_test_buffer(tr, &count); +	ret = trace_test_buffer(&tr->trace_buffer, &count);  	trace->reset(tr);  	tracing_start(); @@ -234,10 +690,16 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)  	ret = trace_selftest_startup_dynamic_tracing(trace, tr,  						     DYN_FTRACE_TEST_NAME); +	if (ret) +		goto out; +	ret = trace_selftest_function_recursion(); +	if (ret) +		goto out; + +	ret = trace_selftest_function_regs();   out:  	ftrace_enabled = save_ftrace_enabled; -	tracer_enabled = save_tracer_enabled;  	/* kill ftrace totally if we failed */  	if (ret) @@ -253,8 +715,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)  /* Maximum number of functions to trace before diagnosing a hang */  #define GRAPH_MAX_FUNC_TEST	100000000 -static void -__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);  static unsigned int graph_hang_thresh;  /* Wrap the real function entry probe to avoid possible hanging */ @@ -264,8 +724,11 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)  	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {  		ftrace_graph_stop();  		printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); -		if (ftrace_dump_on_oops) -			__ftrace_dump(false, DUMP_ALL); +		if (ftrace_dump_on_oops) { +			ftrace_dump(DUMP_ALL); +			/* ftrace_dump() disables tracing */ +			tracing_on(); +		}  		return 0;  	} @@ -276,18 +739,25 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)   * Pretty much the same than for the function tracer from which the selftest   * has been borrowed.   */ -int +__init int  trace_selftest_startup_function_graph(struct tracer *trace,  					struct trace_array *tr)  {  	int ret;  	unsigned long count; +#ifdef CONFIG_DYNAMIC_FTRACE +	if (ftrace_filter_param) { +		printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); +		return 0; +	} +#endif +  	/*  	 * Simulate the init() callback but we attach a watchdog callback  	 * to detect and recover from possible hangs  	 */ -	tracing_reset_online_cpus(tr); +	tracing_reset_online_cpus(&tr->trace_buffer);  	set_graph_array(tr);  	ret = register_ftrace_graph(&trace_graph_return,  				    &trace_graph_entry_watchdog); @@ -310,7 +780,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,  	tracing_stop();  	/* check the trace buffer */ -	ret = trace_test_buffer(tr, &count); +	ret = trace_test_buffer(&tr->trace_buffer, &count);  	trace->reset(tr);  	tracing_start(); @@ -337,7 +807,7 @@ out:  int  trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)  { -	unsigned long save_max = tracing_max_latency; +	unsigned long save_max = tr->max_latency;  	unsigned long count;  	int ret; @@ -349,7 +819,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)  	}  	/* reset the max latency */ -	tracing_max_latency = 0; +	tr->max_latency = 0;  	/* disable interrupts for a bit */  	local_irq_disable();  	udelay(100); @@ -365,9 +835,9 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)  	/* stop the tracing. */  	tracing_stop();  	/* check both trace buffers */ -	ret = trace_test_buffer(tr, NULL); +	ret = trace_test_buffer(&tr->trace_buffer, NULL);  	if (!ret) -		ret = trace_test_buffer(&max_tr, &count); +		ret = trace_test_buffer(&tr->max_buffer, &count);  	trace->reset(tr);  	tracing_start(); @@ -376,7 +846,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)  		ret = -1;  	} -	tracing_max_latency = save_max; +	tr->max_latency = save_max;  	return ret;  } @@ -386,7 +856,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)  int  trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)  { -	unsigned long save_max = tracing_max_latency; +	unsigned long save_max = tr->max_latency;  	unsigned long count;  	int ret; @@ -411,7 +881,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)  	}  	/* reset the max latency */ -	tracing_max_latency = 0; +	tr->max_latency = 0;  	/* disable preemption for a bit */  	preempt_disable();  	udelay(100); @@ -427,9 +897,9 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)  	/* stop the tracing. */  	tracing_stop();  	/* check both trace buffers */ -	ret = trace_test_buffer(tr, NULL); +	ret = trace_test_buffer(&tr->trace_buffer, NULL);  	if (!ret) -		ret = trace_test_buffer(&max_tr, &count); +		ret = trace_test_buffer(&tr->max_buffer, &count);  	trace->reset(tr);  	tracing_start(); @@ -438,7 +908,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)  		ret = -1;  	} -	tracing_max_latency = save_max; +	tr->max_latency = save_max;  	return ret;  } @@ -448,7 +918,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)  int  trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)  { -	unsigned long save_max = tracing_max_latency; +	unsigned long save_max = tr->max_latency;  	unsigned long count;  	int ret; @@ -473,7 +943,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *  	}  	/* reset the max latency */ -	tracing_max_latency = 0; +	tr->max_latency = 0;  	/* disable preemption and interrupts for a bit */  	preempt_disable(); @@ -493,11 +963,11 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *  	/* stop the tracing. */  	tracing_stop();  	/* check both trace buffers */ -	ret = trace_test_buffer(tr, NULL); +	ret = trace_test_buffer(&tr->trace_buffer, NULL);  	if (ret)  		goto out; -	ret = trace_test_buffer(&max_tr, &count); +	ret = trace_test_buffer(&tr->max_buffer, &count);  	if (ret)  		goto out; @@ -508,7 +978,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *  	}  	/* do the test by disabling interrupts first this time */ -	tracing_max_latency = 0; +	tr->max_latency = 0;  	tracing_start();  	trace->start(tr); @@ -523,11 +993,11 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *  	/* stop the tracing. */  	tracing_stop();  	/* check both trace buffers */ -	ret = trace_test_buffer(tr, NULL); +	ret = trace_test_buffer(&tr->trace_buffer, NULL);  	if (ret)  		goto out; -	ret = trace_test_buffer(&max_tr, &count); +	ret = trace_test_buffer(&tr->max_buffer, &count);  	if (!ret && !count) {  		printk(KERN_CONT ".. no entries found .."); @@ -539,7 +1009,7 @@ out:  	tracing_start();  out_no_start:  	trace->reset(tr); -	tracing_max_latency = save_max; +	tr->max_latency = save_max;  	return ret;  } @@ -557,11 +1027,16 @@ trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)  #ifdef CONFIG_SCHED_TRACER  static int trace_wakeup_test_thread(void *data)  { -	/* Make this a RT thread, doesn't need to be too high */ -	struct sched_param param = { .sched_priority = 5 }; +	/* Make this a -deadline thread */ +	static const struct sched_attr attr = { +		.sched_policy = SCHED_DEADLINE, +		.sched_runtime = 100000ULL, +		.sched_deadline = 10000000ULL, +		.sched_period = 10000000ULL +	};  	struct completion *x = data; -	sched_setscheduler(current, SCHED_FIFO, ¶m); +	sched_setattr(current, &attr);  	/* Make it know we have a new prio */  	complete(x); @@ -570,11 +1045,13 @@ static int trace_wakeup_test_thread(void *data)  	set_current_state(TASK_INTERRUPTIBLE);  	schedule(); +	complete(x); +  	/* we are awake, now wait to disappear */  	while (!kthread_should_stop()) {  		/* -		 * This is an RT task, do short sleeps to let -		 * others run. +		 * This will likely be the system top priority +		 * task, do short sleeps to let others run.  		 */  		msleep(100);  	} @@ -585,23 +1062,23 @@ static int trace_wakeup_test_thread(void *data)  int  trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)  { -	unsigned long save_max = tracing_max_latency; +	unsigned long save_max = tr->max_latency;  	struct task_struct *p; -	struct completion isrt; +	struct completion is_ready;  	unsigned long count;  	int ret; -	init_completion(&isrt); +	init_completion(&is_ready); -	/* create a high prio thread */ -	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); +	/* create a -deadline thread */ +	p = kthread_run(trace_wakeup_test_thread, &is_ready, "ftrace-test");  	if (IS_ERR(p)) {  		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");  		return -1;  	} -	/* make sure the thread is running at an RT prio */ -	wait_for_completion(&isrt); +	/* make sure the thread is running at -deadline policy */ +	wait_for_completion(&is_ready);  	/* start the tracing */  	ret = tracer_init(trace, tr); @@ -611,39 +1088,37 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)  	}  	/* reset the max latency */ -	tracing_max_latency = 0; +	tr->max_latency = 0; -	/* sleep to let the RT thread sleep too */ -	msleep(100); +	while (p->on_rq) { +		/* +		 * Sleep to make sure the -deadline thread is asleep too. +		 * On virtual machines we can't rely on timings, +		 * but we want to make sure this test still works. +		 */ +		msleep(100); +	} -	/* -	 * Yes this is slightly racy. It is possible that for some -	 * strange reason that the RT thread we created, did not -	 * call schedule for 100ms after doing the completion, -	 * and we do a wakeup on a task that already is awake. -	 * But that is extremely unlikely, and the worst thing that -	 * happens in such a case, is that we disable tracing. -	 * Honestly, if this race does happen something is horrible -	 * wrong with the system. -	 */ +	init_completion(&is_ready);  	wake_up_process(p); -	/* give a little time to let the thread wake up */ -	msleep(100); +	/* Wait for the task to wake up */ +	wait_for_completion(&is_ready);  	/* stop the tracing. */  	tracing_stop();  	/* check both trace buffers */ -	ret = trace_test_buffer(tr, NULL); +	ret = trace_test_buffer(&tr->trace_buffer, NULL); +	printk("ret = %d\n", ret);  	if (!ret) -		ret = trace_test_buffer(&max_tr, &count); +		ret = trace_test_buffer(&tr->max_buffer, &count);  	trace->reset(tr);  	tracing_start(); -	tracing_max_latency = save_max; +	tr->max_latency = save_max;  	/* kill the thread */  	kthread_stop(p); @@ -676,7 +1151,7 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr  	/* stop the tracing. */  	tracing_stop();  	/* check the trace buffer */ -	ret = trace_test_buffer(tr, &count); +	ret = trace_test_buffer(&tr->trace_buffer, &count);  	trace->reset(tr);  	tracing_start(); @@ -708,7 +1183,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)  	/* stop the tracing. */  	tracing_stop();  	/* check the trace buffer */ -	ret = trace_test_buffer(tr, &count); +	ret = trace_test_buffer(&tr->trace_buffer, &count);  	trace->reset(tr);  	tracing_start();  | 
