aboutsummaryrefslogtreecommitdiff
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2013-07-02 09:01:31 -0700
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2013-07-02 09:01:31 -0700
commit27eb2c4b3d3e13f376a359e293c212a2e9407af5 (patch)
tree556aa7b5cd6eeb4214dec129c789515157187010 /kernel/trace/trace_functions_graph.c
parent5705b8aca5a80141de5637ff0e23b31b26f2c5bf (diff)
parent67bf12ca50d524f9e225347fe63533562e2004de (diff)
Merge branch 'next' into for-linus
Prepare first set of updates for 3.11 merge window.
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 39ada66389c..8388bc99f2e 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -218,7 +218,7 @@ int __trace_graph_entry(struct trace_array *tr,
{
struct ftrace_event_call *call = &event_funcgraph_entry;
struct ring_buffer_event *event;
- struct ring_buffer *buffer = tr->buffer;
+ struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ftrace_graph_ent_entry *entry;
if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
@@ -265,7 +265,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
local_irq_save(flags);
cpu = raw_smp_processor_id();
- data = tr->data[cpu];
+ data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
@@ -323,7 +323,7 @@ void __trace_graph_return(struct trace_array *tr,
{
struct ftrace_event_call *call = &event_funcgraph_exit;
struct ring_buffer_event *event;
- struct ring_buffer *buffer = tr->buffer;
+ struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ftrace_graph_ret_entry *entry;
if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
@@ -350,7 +350,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
local_irq_save(flags);
cpu = raw_smp_processor_id();
- data = tr->data[cpu];
+ data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
@@ -560,9 +560,9 @@ get_return_for_leaf(struct trace_iterator *iter,
* We need to consume the current entry to see
* the next one.
*/
- ring_buffer_consume(iter->tr->buffer, iter->cpu,
+ ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
NULL, NULL);
- event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
+ event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
NULL, NULL);
}