From 213cc060797378059a28ebc5c539f3e9a80160bd Mon Sep 17 00:00:00 2001 From: Pekka J Enberg Date: Fri, 19 Dec 2008 12:08:39 +0200 Subject: ftrace: introduce tracing_reset_online_cpus() helper Impact: cleanup This patch factors out common code from multiple tracers into a tracing_reset_online_cpus() function and converts the tracers to use it. Signed-off-by: Pekka Enberg Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 10 ++++++++++ kernel/trace/trace.h | 1 + kernel/trace/trace_boot.c | 12 +----------- kernel/trace/trace_functions.c | 14 ++------------ kernel/trace/trace_hw_branches.c | 14 ++------------ kernel/trace/trace_mmiotrace.c | 6 +----- kernel/trace/trace_sched_switch.c | 14 ++------------ kernel/trace/trace_sysprof.c | 12 +----------- 8 files changed, 20 insertions(+), 63 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 0eb6d48347f..79db26e8216 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -679,6 +679,16 @@ void tracing_reset(struct trace_array *tr, int cpu) ftrace_enable_cpu(); } +void tracing_reset_online_cpus(struct trace_array *tr) +{ + int cpu; + + tr->time_start = ftrace_now(tr->cpu); + + for_each_online_cpu(cpu) + tracing_reset(tr, cpu); +} + #define SAVED_CMDLINES 128 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index fc75dce7a66..cc7a4f86403 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -374,6 +374,7 @@ struct trace_iterator { int tracing_is_enabled(void); void trace_wake_up(void); void tracing_reset(struct trace_array *tr, int cpu); +void tracing_reset_online_cpus(struct trace_array *tr); int tracing_open_generic(struct inode *inode, struct file *filp); struct dentry *tracing_init_dentry(void); void init_tracer_sysprof_debugfs(struct dentry *d_tracer); diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index a4fa2c57e34..3ccebde2848 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -37,16 +37,6 @@ void disable_boot_trace(void) tracing_stop_sched_switch_record(); } -static void reset_boot_trace(struct trace_array *tr) -{ - int cpu; - - tr->time_start = ftrace_now(tr->cpu); - - for_each_online_cpu(cpu) - tracing_reset(tr, cpu); -} - static int boot_trace_init(struct trace_array *tr) { int cpu; @@ -130,7 +120,7 @@ struct tracer boot_tracer __read_mostly = { .name = "initcall", .init = boot_trace_init, - .reset = reset_boot_trace, + .reset = tracing_reset_online_cpus, .print_line = initcall_print_line, }; diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index e74f6d0a321..9236d7e25a1 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -16,20 +16,10 @@ #include "trace.h" -static void function_reset(struct trace_array *tr) -{ - int cpu; - - tr->time_start = ftrace_now(tr->cpu); - - for_each_online_cpu(cpu) - tracing_reset(tr, cpu); -} - static void start_function_trace(struct trace_array *tr) { tr->cpu = get_cpu(); - function_reset(tr); + tracing_reset_online_cpus(tr); put_cpu(); tracing_start_cmdline_record(); @@ -55,7 +45,7 @@ static void function_trace_reset(struct trace_array *tr) static void function_trace_start(struct trace_array *tr) { - function_reset(tr); + tracing_reset_online_cpus(tr); } static struct tracer function_trace __read_mostly = diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index ee29e012aa9..b6a3e20a49a 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c @@ -25,16 +25,6 @@ static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer); #define this_buffer per_cpu(buffer, smp_processor_id()) -static void bts_trace_reset(struct trace_array *tr) -{ - int cpu; - - tr->time_start = ftrace_now(tr->cpu); - - for_each_online_cpu(cpu) - tracing_reset(tr, cpu); -} - static void bts_trace_start_cpu(void *arg) { if (this_tracer) @@ -54,7 +44,7 @@ static void bts_trace_start(struct trace_array *tr) { int cpu; - bts_trace_reset(tr); + tracing_reset_online_cpus(tr); for_each_cpu_mask(cpu, cpu_possible_map) smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); @@ -78,7 +68,7 @@ static void bts_trace_stop(struct trace_array *tr) static int bts_trace_init(struct trace_array *tr) { - bts_trace_reset(tr); + tracing_reset_online_cpus(tr); bts_trace_start(tr); return 0; diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 2fb6da6523b..fffcb069f1d 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c @@ -22,14 +22,10 @@ static unsigned long prev_overruns; static void mmio_reset_data(struct trace_array *tr) { - int cpu; - overrun_detected = false; prev_overruns = 0; - tr->time_start = ftrace_now(tr->cpu); - for_each_online_cpu(cpu) - tracing_reset(tr, cpu); + tracing_reset_online_cpus(tr); } static int mmio_trace_init(struct trace_array *tr) diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index 781d72ef873..add2c1fdae9 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c @@ -72,16 +72,6 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) local_irq_restore(flags); } -static void sched_switch_reset(struct trace_array *tr) -{ - int cpu; - - tr->time_start = ftrace_now(tr->cpu); - - for_each_online_cpu(cpu) - tracing_reset(tr, cpu); -} - static int tracing_sched_register(void) { int ret; @@ -197,7 +187,7 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr) static void start_sched_trace(struct trace_array *tr) { - sched_switch_reset(tr); + tracing_reset_online_cpus(tr); tracing_start_sched_switch_record(); } @@ -221,7 +211,7 @@ static void sched_switch_trace_reset(struct trace_array *tr) static void sched_switch_trace_start(struct trace_array *tr) { - sched_switch_reset(tr); + tracing_reset_online_cpus(tr); tracing_start_sched_switch(); } diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index 54960edb96d..01becf1f19f 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c @@ -234,20 +234,10 @@ static void stop_stack_timers(void) stop_stack_timer(cpu); } -static void stack_reset(struct trace_array *tr) -{ - int cpu; - - tr->time_start = ftrace_now(tr->cpu); - - for_each_online_cpu(cpu) - tracing_reset(tr, cpu); -} - static void start_stack_trace(struct trace_array *tr) { mutex_lock(&sample_timer_lock); - stack_reset(tr); + tracing_reset_online_cpus(tr); start_stack_timers(); tracer_enabled = 1; mutex_unlock(&sample_timer_lock); -- cgit v1.2.3-18-g5258 From a8ccf1d6f60e3e6ae63122e02378cd4d40dd4aac Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 23 Dec 2008 11:32:24 -0500 Subject: ring-buffer: fix dangling commit race Impact: fix stuck trace-buffers If an interrupt comes in during the rb_set_commit_to_write and pushes the tail page forward just at the right time, the commit updates will miss the adding of the interrupt data. This will cause the commit pointer to cease from moving forward. Thanks to Jiaying Zhang for finding this race. Reported-by: Jiaying Zhang Signed-off-by: Steven Rostedt Cc: Signed-off-by: Ingo Molnar --- kernel/trace/ring_buffer.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'kernel/trace') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index bb6922a931b..d03f4f44a82 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -838,6 +838,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) * back to us). This allows us to do a simple loop to * assign the commit to the tail. */ + again: while (cpu_buffer->commit_page != cpu_buffer->tail_page) { cpu_buffer->commit_page->page->commit = cpu_buffer->commit_page->write; @@ -853,6 +854,17 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) cpu_buffer->commit_page->write; barrier(); } + + /* again, keep gcc from optimizing */ + barrier(); + + /* + * If an interrupt came in just after the first while loop + * and pushed the tail page forward, we will be left with + * a dangling commit that will never go forward. + */ + if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page)) + goto again; } static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) -- cgit v1.2.3-18-g5258 From 98db8df777438e16ad0f44a0fba05ebbdb73db8d Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 23 Dec 2008 11:32:25 -0500 Subject: ring-buffer: prevent false positive warning Impact: eliminate false WARN_ON message If an interrupt goes off after the setting of the local variable tail_page and before incrementing the write index of that page, the interrupt could push the commit forward to the next page. Later a check is made to see if interrupts pushed the buffer around the entire ring buffer by comparing the next page to the last commited page. This can produce a false positive if the interrupt had pushed the commit page forward as stated above. Thanks to Jiaying Zhang for finding this race. Reported-by: Jiaying Zhang Signed-off-by: Steven Rostedt Cc: Signed-off-by: Ingo Molnar --- kernel/trace/ring_buffer.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index d03f4f44a82..76f34c0ef29 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -962,12 +962,15 @@ static struct ring_buffer_event * __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, unsigned type, unsigned long length, u64 *ts) { - struct buffer_page *tail_page, *head_page, *reader_page; + struct buffer_page *tail_page, *head_page, *reader_page, *commit_page; unsigned long tail, write; struct ring_buffer *buffer = cpu_buffer->buffer; struct ring_buffer_event *event; unsigned long flags; + commit_page = cpu_buffer->commit_page; + /* we just need to protect against interrupts */ + barrier(); tail_page = cpu_buffer->tail_page; write = local_add_return(length, &tail_page->write); tail = write - length; @@ -993,7 +996,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, * it all the way around the buffer, bail, and warn * about it. */ - if (unlikely(next_page == cpu_buffer->commit_page)) { + if (unlikely(next_page == commit_page)) { WARN_ON_ONCE(1); goto out_unlock; } -- cgit v1.2.3-18-g5258 From 468a15bb4cc61694495cc5ed7ffca29e87c79b69 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 16 Dec 2008 08:07:03 +0100 Subject: sched, trace: update trace_sched_wakeup() Impact: extend the wakeup tracepoint with the info whether the wakeup was real Add the information needed to distinguish 'real' wakeups from 'false' wakeups. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/trace/trace_sched_switch.c | 2 +- kernel/trace/trace_sched_wakeup.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index add2c1fdae9..df175cb4564 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c @@ -49,7 +49,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, } static void -probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) +probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success) { struct trace_array_cpu *data; unsigned long flags; diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 0067b49746c..43586b689e3 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -211,7 +211,7 @@ static void wakeup_reset(struct trace_array *tr) } static void -probe_wakeup(struct rq *rq, struct task_struct *p) +probe_wakeup(struct rq *rq, struct task_struct *p, int success) { int cpu = smp_processor_id(); unsigned long flags; -- cgit v1.2.3-18-g5258