aboutsummaryrefslogtreecommitdiff
path: root/kernel/trace/trace_irqsoff.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_irqsoff.c')
-rw-r--r--kernel/trace/trace_irqsoff.c212
1 files changed, 142 insertions, 70 deletions
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 5cf8c602b88..9bb104f748d 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -7,7 +7,7 @@
* From code in the latency_tracer, that is:
*
* Copyright (C) 2004-2006 Ingo Molnar
- * Copyright (C) 2004 William Lee Irwin III
+ * Copyright (C) 2004 Nadia Yvette Chambers
*/
#include <linux/kallsyms.h>
#include <linux/debugfs.h>
@@ -23,7 +23,7 @@ static int tracer_enabled __read_mostly;
static DEFINE_PER_CPU(int, tracing_cpu);
-static DEFINE_SPINLOCK(max_trace_lock);
+static DEFINE_RAW_SPINLOCK(max_trace_lock);
enum {
TRACER_IRQS_OFF = (1 << 1),
@@ -32,7 +32,8 @@ enum {
static int trace_type __read_mostly;
-static int save_lat_flag;
+static int save_flags;
+static bool function_enabled;
static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
static int start_irqsoff_tracer(struct trace_array *tr, int graph);
@@ -80,7 +81,7 @@ static struct tracer_flags tracer_flags = {
* skip the latency if the sequence has changed - some other section
* did a maximum and could disturb our measurement with serial console
* printouts, etc. Truly coinciding maximum latencies should be rare
- * and what happens together happens separately as well, so this doesnt
+ * and what happens together happens separately as well, so this doesn't
* decrease the validity of the maximum found:
*/
static __cacheline_aligned_in_smp unsigned long max_sequence;
@@ -121,7 +122,7 @@ static int func_prolog_dec(struct trace_array *tr,
if (!irqs_disabled_flags(*flags))
return 0;
- *data = tr->data[cpu];
+ *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&(*data)->disabled);
if (likely(disabled == 1))
@@ -136,7 +137,8 @@ static int func_prolog_dec(struct trace_array *tr,
* irqsoff uses its own tracer function to keep the overhead down:
*/
static void
-irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
+irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *pt_regs)
{
struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data;
@@ -149,15 +151,11 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
atomic_dec(&data->disabled);
}
-
-static struct ftrace_ops trace_ops __read_mostly =
-{
- .func = irqsoff_tracer_call,
-};
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
+static int
+irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
{
int cpu;
@@ -172,8 +170,8 @@ static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
for_each_possible_cpu(cpu)
per_cpu(tracing_cpu, cpu) = 0;
- tracing_max_latency = 0;
- tracing_reset_online_cpus(irqsoff_trace);
+ tr->max_latency = 0;
+ tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
return start_irqsoff_tracer(irqsoff_trace, set);
}
@@ -225,7 +223,9 @@ static void irqsoff_trace_close(struct trace_iterator *iter)
}
#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
- TRACE_GRAPH_PRINT_PROC)
+ TRACE_GRAPH_PRINT_PROC | \
+ TRACE_GRAPH_PRINT_ABS_TIME | \
+ TRACE_GRAPH_PRINT_DURATION)
static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
{
@@ -261,7 +261,8 @@ __trace_function(struct trace_array *tr,
#else
#define __trace_function trace_function
-static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
+static int
+irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
{
return -EINVAL;
}
@@ -277,21 +278,32 @@ static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
}
static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
-static void irqsoff_print_header(struct seq_file *s) { }
static void irqsoff_trace_open(struct trace_iterator *iter) { }
static void irqsoff_trace_close(struct trace_iterator *iter) { }
+
+#ifdef CONFIG_FUNCTION_TRACER
+static void irqsoff_print_header(struct seq_file *s)
+{
+ trace_default_header(s);
+}
+#else
+static void irqsoff_print_header(struct seq_file *s)
+{
+ trace_latency_header(s);
+}
+#endif /* CONFIG_FUNCTION_TRACER */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
/*
* Should this new latency be reported/recorded?
*/
-static int report_latency(cycle_t delta)
+static int report_latency(struct trace_array *tr, cycle_t delta)
{
if (tracing_thresh) {
if (delta < tracing_thresh)
return 0;
} else {
- if (delta <= tracing_max_latency)
+ if (delta <= tr->max_latency)
return 0;
}
return 1;
@@ -315,13 +327,13 @@ check_critical_timing(struct trace_array *tr,
pc = preempt_count();
- if (!report_latency(delta))
+ if (!report_latency(tr, delta))
goto out;
- spin_lock_irqsave(&max_trace_lock, flags);
+ raw_spin_lock_irqsave(&max_trace_lock, flags);
/* check if we are still the max latency */
- if (!report_latency(delta))
+ if (!report_latency(tr, delta))
goto out_unlock;
__trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
@@ -334,14 +346,14 @@ check_critical_timing(struct trace_array *tr,
data->critical_end = parent_ip;
if (likely(!is_tracing_stopped())) {
- tracing_max_latency = delta;
+ tr->max_latency = delta;
update_max_tr_single(tr, current, cpu);
}
max_sequence++;
out_unlock:
- spin_unlock_irqrestore(&max_trace_lock, flags);
+ raw_spin_unlock_irqrestore(&max_trace_lock, flags);
out:
data->critical_sequence = max_sequence;
@@ -357,7 +369,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
struct trace_array_cpu *data;
unsigned long flags;
- if (likely(!tracer_enabled))
+ if (!tracer_enabled || !tracing_is_enabled())
return;
cpu = raw_smp_processor_id();
@@ -365,7 +377,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
if (per_cpu(tracing_cpu, cpu))
return;
- data = tr->data[cpu];
+ data = per_cpu_ptr(tr->trace_buffer.data, cpu);
if (unlikely(!data) || atomic_read(&data->disabled))
return;
@@ -400,10 +412,10 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
else
return;
- if (!tracer_enabled)
+ if (!tracer_enabled || !tracing_is_enabled())
return;
- data = tr->data[cpu];
+ data = per_cpu_ptr(tr->trace_buffer.data, cpu);
if (unlikely(!data) ||
!data->critical_start || atomic_read(&data->disabled))
@@ -453,14 +465,6 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
* Stubs:
*/
-void early_boot_irqs_off(void)
-{
-}
-
-void early_boot_irqs_on(void)
-{
-}
-
void trace_softirqs_on(unsigned long ip)
{
}
@@ -490,14 +494,14 @@ void trace_hardirqs_off(void)
}
EXPORT_SYMBOL(trace_hardirqs_off);
-void trace_hardirqs_on_caller(unsigned long caller_addr)
+__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
{
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, caller_addr);
}
EXPORT_SYMBOL(trace_hardirqs_on_caller);
-void trace_hardirqs_off_caller(unsigned long caller_addr)
+__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, caller_addr);
@@ -510,26 +514,73 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
#ifdef CONFIG_PREEMPT_TRACER
void trace_preempt_on(unsigned long a0, unsigned long a1)
{
- if (preempt_trace())
+ if (preempt_trace() && !irq_trace())
stop_critical_timing(a0, a1);
}
void trace_preempt_off(unsigned long a0, unsigned long a1)
{
- if (preempt_trace())
+ if (preempt_trace() && !irq_trace())
start_critical_timing(a0, a1);
}
#endif /* CONFIG_PREEMPT_TRACER */
-static int start_irqsoff_tracer(struct trace_array *tr, int graph)
+static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
{
- int ret = 0;
+ int ret;
- if (!graph)
- ret = register_ftrace_function(&trace_ops);
- else
+ /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
+ if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION)))
+ return 0;
+
+ if (graph)
ret = register_ftrace_graph(&irqsoff_graph_return,
&irqsoff_graph_entry);
+ else
+ ret = register_ftrace_function(tr->ops);
+
+ if (!ret)
+ function_enabled = true;
+
+ return ret;
+}
+
+static void unregister_irqsoff_function(struct trace_array *tr, int graph)
+{
+ if (!function_enabled)
+ return;
+
+ if (graph)
+ unregister_ftrace_graph();
+ else
+ unregister_ftrace_function(tr->ops);
+
+ function_enabled = false;
+}
+
+static void irqsoff_function_set(struct trace_array *tr, int set)
+{
+ if (set)
+ register_irqsoff_function(tr, is_graph(), 1);
+ else
+ unregister_irqsoff_function(tr, is_graph());
+}
+
+static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
+{
+ struct tracer *tracer = tr->current_trace;
+
+ if (mask & TRACE_ITER_FUNCTION)
+ irqsoff_function_set(tr, set);
+
+ return trace_keep_overwrite(tracer, mask, set);
+}
+
+static int start_irqsoff_tracer(struct trace_array *tr, int graph)
+{
+ int ret;
+
+ ret = register_irqsoff_function(tr, graph, 0);
if (!ret && tracing_is_enabled())
tracer_enabled = 1;
@@ -543,33 +594,51 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
{
tracer_enabled = 0;
- if (!graph)
- unregister_ftrace_function(&trace_ops);
- else
- unregister_ftrace_graph();
+ unregister_irqsoff_function(tr, graph);
}
-static void __irqsoff_tracer_init(struct trace_array *tr)
+static bool irqsoff_busy;
+
+static int __irqsoff_tracer_init(struct trace_array *tr)
{
- save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
- trace_flags |= TRACE_ITER_LATENCY_FMT;
+ if (irqsoff_busy)
+ return -EBUSY;
+
+ save_flags = trace_flags;
+
+ /* non overwrite screws up the latency tracers */
+ set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
+ set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
- tracing_max_latency = 0;
+ tr->max_latency = 0;
irqsoff_trace = tr;
/* make sure that the tracer is visible */
smp_wmb();
- tracing_reset_online_cpus(tr);
+ tracing_reset_online_cpus(&tr->trace_buffer);
- if (start_irqsoff_tracer(tr, is_graph()))
+ ftrace_init_array_ops(tr, irqsoff_tracer_call);
+
+ /* Only toplevel instance supports graph tracing */
+ if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
+ is_graph())))
printk(KERN_ERR "failed to start irqsoff tracer\n");
+
+ irqsoff_busy = true;
+ return 0;
}
static void irqsoff_tracer_reset(struct trace_array *tr)
{
+ int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
+ int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
+
stop_irqsoff_tracer(tr, is_graph());
- if (!save_lat_flag)
- trace_flags &= ~TRACE_ITER_LATENCY_FMT;
+ set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
+ set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
+ ftrace_reset_array_ops(tr);
+
+ irqsoff_busy = false;
}
static void irqsoff_tracer_start(struct trace_array *tr)
@@ -587,8 +656,7 @@ static int irqsoff_tracer_init(struct trace_array *tr)
{
trace_type = TRACER_IRQS_OFF;
- __irqsoff_tracer_init(tr);
- return 0;
+ return __irqsoff_tracer_init(tr);
}
static struct tracer irqsoff_tracer __read_mostly =
{
@@ -597,17 +665,19 @@ static struct tracer irqsoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
- .print_max = 1,
+ .print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
.set_flag = irqsoff_set_flag,
+ .flag_changed = irqsoff_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_irqsoff,
#endif
.open = irqsoff_trace_open,
.close = irqsoff_trace_close,
- .use_max_tr = 1,
+ .allow_instances = true,
+ .use_max_tr = true,
};
# define register_irqsoff(trace) register_tracer(&trace)
#else
@@ -619,8 +689,7 @@ static int preemptoff_tracer_init(struct trace_array *tr)
{
trace_type = TRACER_PREEMPT_OFF;
- __irqsoff_tracer_init(tr);
- return 0;
+ return __irqsoff_tracer_init(tr);
}
static struct tracer preemptoff_tracer __read_mostly =
@@ -630,17 +699,19 @@ static struct tracer preemptoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
- .print_max = 1,
+ .print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
.set_flag = irqsoff_set_flag,
+ .flag_changed = irqsoff_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_preemptoff,
#endif
.open = irqsoff_trace_open,
.close = irqsoff_trace_close,
- .use_max_tr = 1,
+ .allow_instances = true,
+ .use_max_tr = true,
};
# define register_preemptoff(trace) register_tracer(&trace)
#else
@@ -654,8 +725,7 @@ static int preemptirqsoff_tracer_init(struct trace_array *tr)
{
trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
- __irqsoff_tracer_init(tr);
- return 0;
+ return __irqsoff_tracer_init(tr);
}
static struct tracer preemptirqsoff_tracer __read_mostly =
@@ -665,17 +735,19 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
- .print_max = 1,
+ .print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
.set_flag = irqsoff_set_flag,
+ .flag_changed = irqsoff_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_preemptirqsoff,
#endif
.open = irqsoff_trace_open,
.close = irqsoff_trace_close,
- .use_max_tr = 1,
+ .allow_instances = true,
+ .use_max_tr = true,
};
# define register_preemptirqsoff(trace) register_tracer(&trace)
@@ -691,4 +763,4 @@ __init static int init_irqsoff_tracer(void)
return 0;
}
-device_initcall(init_irqsoff_tracer);
+core_initcall(init_irqsoff_tracer);