aboutsummaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig13
-rw-r--r--kernel/trace/blktrace.c12
-rw-r--r--kernel/trace/ftrace.c107
-rw-r--r--kernel/trace/kmemtrace.c149
-rw-r--r--kernel/trace/ring_buffer.c1112
-rw-r--r--kernel/trace/trace.c679
-rw-r--r--kernel/trace/trace.h76
-rw-r--r--kernel/trace/trace_boot.c16
-rw-r--r--kernel/trace/trace_events.c146
-rw-r--r--kernel/trace/trace_events_filter.c261
-rw-r--r--kernel/trace/trace_export.c28
-rw-r--r--kernel/trace/trace_functions.c4
-rw-r--r--kernel/trace/trace_functions_graph.c166
-rw-r--r--kernel/trace/trace_irqsoff.c3
-rw-r--r--kernel/trace/trace_mmiotrace.c10
-rw-r--r--kernel/trace/trace_power.c22
-rw-r--r--kernel/trace/trace_sched_switch.c59
-rw-r--r--kernel/trace/trace_sched_wakeup.c7
-rw-r--r--kernel/trace/trace_selftest.c1
-rw-r--r--kernel/trace/trace_stack.c43
-rw-r--r--kernel/trace/trace_stat.c17
-rw-r--r--kernel/trace/trace_stat.h2
-rw-r--r--kernel/trace/trace_syscalls.c471
-rw-r--r--kernel/trace/trace_workqueue.c32
24 files changed, 2361 insertions, 1075 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 019f380fd76..1ea0d1234f4 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -41,7 +41,7 @@ config HAVE_FTRACE_MCOUNT_RECORD
config HAVE_HW_BRANCH_TRACER
bool
-config HAVE_FTRACE_SYSCALLS
+config HAVE_SYSCALL_TRACEPOINTS
bool
config TRACER_MAX_TRACE
@@ -60,9 +60,14 @@ config EVENT_TRACING
bool
config CONTEXT_SWITCH_TRACER
- select MARKERS
bool
+config RING_BUFFER_ALLOW_SWAP
+ bool
+ help
+ Allow the use of ring_buffer_swap_cpu.
+ Adds a very slight overhead to tracing when enabled.
+
# All tracer options should select GENERIC_TRACER. For those options that are
# enabled by all tracers (context switch and event tracer) they select TRACING.
# This allows those options to appear when no other tracer is selected. But the
@@ -147,6 +152,7 @@ config IRQSOFF_TRACER
select TRACE_IRQFLAGS
select GENERIC_TRACER
select TRACER_MAX_TRACE
+ select RING_BUFFER_ALLOW_SWAP
help
This option measures the time spent in irqs-off critical
sections, with microsecond accuracy.
@@ -168,6 +174,7 @@ config PREEMPT_TRACER
depends on PREEMPT
select GENERIC_TRACER
select TRACER_MAX_TRACE
+ select RING_BUFFER_ALLOW_SWAP
help
This option measures the time spent in preemption off critical
sections, with microsecond accuracy.
@@ -211,7 +218,7 @@ config ENABLE_DEFAULT_TRACERS
config FTRACE_SYSCALLS
bool "Trace syscalls"
- depends on HAVE_FTRACE_SYSCALLS
+ depends on HAVE_SYSCALL_TRACEPOINTS
select GENERIC_TRACER
select KALLSYMS
help
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 7a34cb563fe..3eb159c277c 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -65,13 +65,15 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
{
struct blk_io_trace *t;
struct ring_buffer_event *event = NULL;
+ struct ring_buffer *buffer = NULL;
int pc = 0;
int cpu = smp_processor_id();
bool blk_tracer = blk_tracer_enabled;
if (blk_tracer) {
+ buffer = blk_tr->buffer;
pc = preempt_count();
- event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
+ event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + len,
0, pc);
if (!event)
@@ -96,7 +98,7 @@ record_it:
memcpy((void *) t + sizeof(*t), data, len);
if (blk_tracer)
- trace_buffer_unlock_commit(blk_tr, event, 0, pc);
+ trace_buffer_unlock_commit(buffer, event, 0, pc);
}
}
@@ -179,6 +181,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
{
struct task_struct *tsk = current;
struct ring_buffer_event *event = NULL;
+ struct ring_buffer *buffer = NULL;
struct blk_io_trace *t;
unsigned long flags = 0;
unsigned long *sequence;
@@ -204,8 +207,9 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
if (blk_tracer) {
tracing_record_cmdline(current);
+ buffer = blk_tr->buffer;
pc = preempt_count();
- event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
+ event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + pdu_len,
0, pc);
if (!event)
@@ -252,7 +256,7 @@ record_it:
memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
if (blk_tracer) {
- trace_buffer_unlock_commit(blk_tr, event, 0, pc);
+ trace_buffer_unlock_commit(buffer, event, 0, pc);
return;
}
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 25edd5cc593..8c804e24f96 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1016,71 +1016,35 @@ static int
__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
{
unsigned long ftrace_addr;
- unsigned long ip, fl;
+ unsigned long flag = 0UL;
ftrace_addr = (unsigned long)FTRACE_ADDR;
- ip = rec->ip;
-
/*
- * If this record is not to be traced and
- * it is not enabled then do nothing.
+ * If this record is not to be traced or we want to disable it,
+ * then disable it.
*
- * If this record is not to be traced and
- * it is enabled then disable it.
+ * If we want to enable it and filtering is off, then enable it.
*
+ * If we want to enable it and filtering is on, enable it only if
+ * it's filtered
*/
- if (rec->flags & FTRACE_FL_NOTRACE) {
- if (rec->flags & FTRACE_FL_ENABLED)
- rec->flags &= ~FTRACE_FL_ENABLED;
- else
- return 0;
-
- } else if (ftrace_filtered && enable) {
- /*
- * Filtering is on:
- */
-
- fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
-
- /* Record is filtered and enabled, do nothing */
- if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
- return 0;
-
- /* Record is not filtered or enabled, do nothing */
- if (!fl)
- return 0;
-
- /* Record is not filtered but enabled, disable it */
- if (fl == FTRACE_FL_ENABLED)
- rec->flags &= ~FTRACE_FL_ENABLED;
- else
- /* Otherwise record is filtered but not enabled, enable it */
- rec->flags |= FTRACE_FL_ENABLED;
- } else {
- /* Disable or not filtered */
-
- if (enable) {
- /* if record is enabled, do nothing */
- if (rec->flags & FTRACE_FL_ENABLED)
- return 0;
-
- rec->flags |= FTRACE_FL_ENABLED;
-
- } else {
+ if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) {
+ if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER))
+ flag = FTRACE_FL_ENABLED;
+ }
- /* if record is not enabled, do nothing */
- if (!(rec->flags & FTRACE_FL_ENABLED))
- return 0;
+ /* If the state of this record hasn't changed, then do nothing */
+ if ((rec->flags & FTRACE_FL_ENABLED) == flag)
+ return 0;
- rec->flags &= ~FTRACE_FL_ENABLED;
- }
+ if (flag) {
+ rec->flags |= FTRACE_FL_ENABLED;
+ return ftrace_make_call(rec, ftrace_addr);
}
- if (rec->flags & FTRACE_FL_ENABLED)
- return ftrace_make_call(rec, ftrace_addr);
- else
- return ftrace_make_nop(NULL, rec, ftrace_addr);
+ rec->flags &= ~FTRACE_FL_ENABLED;
+ return ftrace_make_nop(NULL, rec, ftrace_addr);
}
static void ftrace_replace_code(int enable)
@@ -1375,7 +1339,6 @@ struct ftrace_iterator {
unsigned flags;
unsigned char buffer[FTRACE_BUFF_MAX+1];
unsigned buffer_idx;
- unsigned filtered;
};
static void *
@@ -1438,18 +1401,13 @@ static int t_hash_show(struct seq_file *m, void *v)
{
struct ftrace_func_probe *rec;
struct hlist_node *hnd = v;
- char str[KSYM_SYMBOL_LEN];
rec = hlist_entry(hnd, struct ftrace_func_probe, node);
if (rec->ops->print)
return rec->ops->print(m, rec->ip, rec->ops, rec->data);
- kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
- seq_printf(m, "%s:", str);
-
- kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
- seq_printf(m, "%s", str);
+ seq_printf(m, "%pf:%pf", (void *)rec->ip, (void *)rec->ops->func);
if (rec->data)
seq_printf(m, ":%p", rec->data);
@@ -1547,7 +1505,6 @@ static int t_show(struct seq_file *m, void *v)
{
struct ftrace_iterator *iter = m->private;
struct dyn_ftrace *rec = v;
- char str[KSYM_SYMBOL_LEN];
if (iter->flags & FTRACE_ITER_HASH)
return t_hash_show(m, v);
@@ -1560,9 +1517,7 @@ static int t_show(struct seq_file *m, void *v)
if (!rec)
return 0;
- kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
-
- seq_printf(m, "%s\n", str);
+ seq_printf(m, "%pf\n", (void *)rec->ip);
return 0;
}
@@ -1601,17 +1556,6 @@ ftrace_avail_open(struct inode *inode, struct file *file)
return ret;
}
-int ftrace_avail_release(struct inode *inode, struct file *file)
-{
- struct seq_file *m = (struct seq_file *)file->private_data;
- struct ftrace_iterator *iter = m->private;
-
- seq_release(inode, file);
- kfree(iter);
-
- return 0;
-}
-
static int
ftrace_failures_open(struct inode *inode, struct file *file)
{
@@ -2317,7 +2261,6 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
}
if (isspace(ch)) {
- iter->filtered++;
iter->buffer[iter->buffer_idx] = 0;
ret = ftrace_process_regex(iter->buffer,
iter->buffer_idx, enable);
@@ -2448,7 +2391,6 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
iter = file->private_data;
if (iter->buffer_idx) {
- iter->filtered++;
iter->buffer[iter->buffer_idx] = 0;
ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
}
@@ -2479,14 +2421,14 @@ static const struct file_operations ftrace_avail_fops = {
.open = ftrace_avail_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = ftrace_avail_release,
+ .release = seq_release_private,
};
static const struct file_operations ftrace_failures_fops = {
.open = ftrace_failures_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = ftrace_avail_release,
+ .release = seq_release_private,
};
static const struct file_operations ftrace_filter_fops = {
@@ -2548,7 +2490,6 @@ static void g_stop(struct seq_file *m, void *p)
static int g_show(struct seq_file *m, void *v)
{
unsigned long *ptr = v;
- char str[KSYM_SYMBOL_LEN];
if (!ptr)
return 0;
@@ -2558,9 +2499,7 @@ static int g_show(struct seq_file *m, void *v)
return 0;
}
- kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
-
- seq_printf(m, "%s\n", str);
+ seq_printf(m, "%pf\n", v);
return 0;
}
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
index 1edaa9516e8..81b1645c854 100644
--- a/kernel/trace/kmemtrace.c
+++ b/kernel/trace/kmemtrace.c
@@ -183,11 +183,9 @@ static void kmemtrace_stop_probes(void)
static int kmem_trace_init(struct trace_array *tr)
{
- int cpu;
kmemtrace_array = tr;
- for_each_cpu(cpu, cpu_possible_mask)
- tracing_reset(tr, cpu);
+ tracing_reset_online_cpus(tr);
kmemtrace_start_probes();
@@ -239,12 +237,52 @@ struct kmemtrace_user_event_alloc {
};
static enum print_line_t
-kmemtrace_print_alloc_user(struct trace_iterator *iter,
- struct kmemtrace_alloc_entry *entry)
+kmemtrace_print_alloc(struct trace_iterator *iter, int flags)
{
- struct kmemtrace_user_event_alloc *ev_alloc;
struct trace_seq *s = &iter->seq;
+ struct kmemtrace_alloc_entry *entry;
+ int ret;
+
+ trace_assign_type(entry, iter->ent);
+
+ ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu "
+ "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n",
+ entry->type_id, (void *)entry->call_site, (unsigned long)entry->ptr,
+ (unsigned long)entry->bytes_req, (unsigned long)entry->bytes_alloc,
+ (unsigned long)entry->gfp_flags, entry->node);
+
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t
+kmemtrace_print_free(struct trace_iterator *iter, int flags)
+{
+ struct trace_seq *s = &iter->seq;
+ struct kmemtrace_free_entry *entry;
+ int ret;
+
+ trace_assign_type(entry, iter->ent);
+
+ ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu\n",
+ entry->type_id, (void *)entry->call_site,
+ (unsigned long)entry->ptr);
+
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t
+kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags)
+{
+ struct trace_seq *s = &iter->seq;
+ struct kmemtrace_alloc_entry *entry;
struct kmemtrace_user_event *ev;
+ struct kmemtrace_user_event_alloc *ev_alloc;
+
+ trace_assign_type(entry, iter->ent);
ev = trace_seq_reserve(s, sizeof(*ev));
if (!ev)
@@ -271,12 +309,14 @@ kmemtrace_print_alloc_user(struct trace_iterator *iter,
}
static enum print_line_t
-kmemtrace_print_free_user(struct trace_iterator *iter,
- struct kmemtrace_free_entry *entry)
+kmemtrace_print_free_user(struct trace_iterator *iter, int flags)
{
struct trace_seq *s = &iter->seq;
+ struct kmemtrace_free_entry *entry;
struct kmemtrace_user_event *ev;
+ trace_assign_type(entry, iter->ent);
+
ev = trace_seq_reserve(s, sizeof(*ev));
if (!ev)
return TRACE_TYPE_PARTIAL_LINE;
@@ -294,12 +334,14 @@ kmemtrace_print_free_user(struct trace_iterator *iter,
/* The two other following provide a more minimalistic output */
static enum print_line_t
-kmemtrace_print_alloc_compress(struct trace_iterator *iter,
- struct kmemtrace_alloc_entry *entry)
+kmemtrace_print_alloc_compress(struct trace_iterator *iter)
{
+ struct kmemtrace_alloc_entry *entry;
struct trace_seq *s = &iter->seq;
int ret;
+ trace_assign_type(entry, iter->ent);
+
/* Alloc entry */
ret = trace_seq_printf(s, " + ");
if (!ret)
@@ -345,29 +387,24 @@ kmemtrace_print_alloc_compress(struct trace_iterator *iter,
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
- /* Node */
- ret = trace_seq_printf(s, "%4d ", entry->node);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
-
- /* Call site */
- ret = seq_print_ip_sym(s, entry->call_site, 0);
+ /* Node and call site*/
+ ret = trace_seq_printf(s, "%4d %pf\n", entry->node,
+ (void *)entry->call_site);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
- if (!trace_seq_printf(s, "\n"))
- return TRACE_TYPE_PARTIAL_LINE;
-
return TRACE_TYPE_HANDLED;
}
static enum print_line_t
-kmemtrace_print_free_compress(struct trace_iterator *iter,
- struct kmemtrace_free_entry *entry)
+kmemtrace_print_free_compress(struct trace_iterator *iter)
{
+ struct kmemtrace_free_entry *entry;
struct trace_seq *s = &iter->seq;
int ret;
+ trace_assign_type(entry, iter->ent);
+
/* Free entry */
ret = trace_seq_printf(s, " - ");
if (!ret)
@@ -401,19 +438,11 @@ kmemtrace_print_free_compress(struct trace_iterator *iter,
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
- /* Skip node */
- ret = trace_seq_printf(s, " ");
+ /* Skip node and print call site*/
+ ret = trace_seq_printf(s, " %pf\n", (void *)entry->call_site);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
- /* Call site */
- ret = seq_print_ip_sym(s, entry->call_site, 0);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
-
- if (!trace_seq_printf(s, "\n"))
- return TRACE_TYPE_PARTIAL_LINE;
-
return TRACE_TYPE_HANDLED;
}
@@ -421,32 +450,31 @@ static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
{
struct trace_entry *entry = iter->ent;
- switch (entry->type) {
- case TRACE_KMEM_ALLOC: {
- struct kmemtrace_alloc_entry *field;
-
- trace_assign_type(field, entry);
- if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
- return kmemtrace_print_alloc_compress(iter, field);
- else
- return kmemtrace_print_alloc_user(iter, field);
- }
-
- case TRACE_KMEM_FREE: {
- struct kmemtrace_free_entry *field;
-
- trace_assign_type(field, entry);
- if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
- return kmemtrace_print_free_compress(iter, field);
- else
- return kmemtrace_print_free_user(iter, field);
- }
+ if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
+ return TRACE_TYPE_UNHANDLED;
+ switch (entry->type) {
+ case TRACE_KMEM_ALLOC:
+ return kmemtrace_print_alloc_compress(iter);
+ case TRACE_KMEM_FREE:
+ return kmemtrace_print_free_compress(iter);
default:
return TRACE_TYPE_UNHANDLED;
}
}
+static struct trace_event kmem_trace_alloc = {
+ .type = TRACE_KMEM_ALLOC,
+ .trace = kmemtrace_print_alloc,
+ .binary = kmemtrace_print_alloc_user,
+};
+
+static struct trace_event kmem_trace_free = {
+ .type = TRACE_KMEM_FREE,
+ .trace = kmemtrace_print_free,
+ .binary = kmemtrace_print_free_user,
+};
+
static struct tracer kmem_tracer __read_mostly = {
.name = "kmemtrace",
.init = kmem_trace_init,
@@ -463,6 +491,21 @@ void kmemtrace_init(void)
static int __init init_kmem_tracer(void)
{
- return register_tracer(&kmem_tracer);
+ if (!register_ftrace_event(&kmem_trace_alloc)) {
+ pr_warning("Warning: could not register kmem events\n");
+ return 1;
+ }
+
+ if (!register_ftrace_event(&kmem_trace_free)) {
+ pr_warning("Warning: could not register kmem events\n");
+ return 1;
+ }
+
+ if (!register_tracer(&kmem_tracer)) {
+ pr_warning("Warning: could not register the kmem tracer\n");
+ return 1;
+ }
+
+ return 0;
}
device_initcall(init_kmem_tracer);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index a330513d96c..454e74e718c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -218,17 +218,12 @@ enum {
static inline int rb_null_event(struct ring_buffer_event *event)
{
- return event->type_len == RINGBUF_TYPE_PADDING
- && event->time_delta == 0;
-}
-
-static inline int rb_discarded_event(struct ring_buffer_event *event)
-{
- return event->type_len == RINGBUF_TYPE_PADDING && event->time_delta;
+ return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
}
static void rb_event_set_padding(struct ring_buffer_event *event)
{
+ /* padding has a NULL time_delta */
event->type_len = RINGBUF_TYPE_PADDING;
event->time_delta = 0;
}
@@ -322,6 +317,14 @@ struct buffer_data_page {
unsigned char data[]; /* data of buffer page */
};
+/*
+ * Note, the buffer_page list must be first. The buffer pages
+ * are allocated in cache lines, which means that each buffer
+ * page will be at the beginning of a cache line, and thus
+ * the least significant bits will be zero. We use this to
+ * add flags in the list struct pointers, to make the ring buffer
+ * lockless.
+ */
struct buffer_page {
struct list_head list; /* list of buffer pages */
local_t write; /* index for next write */
@@ -330,6 +333,21 @@ struct buffer_page {
struct buffer_data_page *page; /* Actual data page */
};
+/*
+ * The buffer page counters, write and entries, must be reset
+ * atomically when crossing page boundaries. To synchronize this
+ * update, two counters are inserted into the number. One is
+ * the actual counter for the write position or count on the page.
+ *
+ * The other is a counter of updaters. Before an update happens
+ * the update partition of the counter is incremented. This will
+ * allow the updater to update the counter atomically.
+ *
+ * The counter is 20 bits, and the state data is 12.
+ */
+#define RB_WRITE_MASK 0xfffff
+#define RB_WRITE_INTCNT (1 << 20)
+
static void rb_init_page(struct buffer_data_page *bpage)
{
local_set(&bpage->commit, 0);
@@ -403,21 +421,20 @@ int ring_buffer_print_page_header(struct trace_seq *s)
struct ring_buffer_per_cpu {
int cpu;
struct ring_buffer *buffer;
- spinlock_t reader_lock; /* serialize readers */
+ spinlock_t reader_lock; /* serialize readers */
raw_spinlock_t lock;
struct lock_class_key lock_key;
- struct list_head pages;
+ struct list_head *pages;
struct buffer_page *head_page; /* read from head */
struct buffer_page *tail_page; /* write to tail */
struct buffer_page *commit_page; /* committed pages */
struct buffer_page *reader_page;
- unsigned long nmi_dropped;
- unsigned long commit_overrun;
- unsigned long overrun;
- unsigned long read;
+ local_t commit_overrun;
+ local_t overrun;
local_t entries;
local_t committing;
local_t commits;
+ unsigned long read;
u64 write_stamp;
u64 read_stamp;
atomic_t record_disabled;
@@ -450,14 +467,19 @@ struct ring_buffer_iter {
};
/* buffer may be either ring_buffer or ring_buffer_per_cpu */
-#define RB_WARN_ON(buffer, cond) \
- ({ \
- int _____ret = unlikely(cond); \
- if (_____ret) { \
- atomic_inc(&buffer->record_disabled); \
- WARN_ON(1); \
- } \
- _____ret; \
+#define RB_WARN_ON(b, cond) \
+ ({ \
+ int _____ret = unlikely(cond); \
+ if (_____ret) { \
+ if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
+ struct ring_buffer_per_cpu *__b = \
+ (void *)b; \
+ atomic_inc(&__b->buffer->record_disabled); \
+ } else \
+ atomic_inc(&b->record_disabled); \
+ WARN_ON(1); \
+ } \
+ _____ret; \
})
/* Up this if you want to test the TIME_EXTENTS and normalization */
@@ -489,6 +511,390 @@ void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
}
EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
+/*
+ * Making the ring buffer lockless makes things tricky.
+ * Although writes only happen on the CPU that they are on,
+ * and they only need to worry about interrupts. Reads can
+ * happen on any CPU.
+ *
+ * The reader page is always off the ring buffer, but when the
+ * reader finishes with a page, it needs to swap its page with
+ * a new one from the buffer. The reader needs to take from
+ * the head (writes go to the tail). But if a writer is in overwrite
+ * mode and wraps, it must push the head page forward.
+ *
+ * Here lies the problem.
+ *
+ * The reader must be careful to replace only the head page, and
+ * not another one. As described at the top of the file in the
+ * ASCII art, the reader sets its old page to point to the next
+ * page after head. It then sets the page after head to point to
+ * the old reader page. But if the writer moves the head page
+ * during this operation, the reader could end up with the tail.
+ *
+ * We use cmpxchg to help prevent this race. We also do something
+ * special with the page before head. We set the LSB to 1.
+ *
+ * When the writer must push the page forward, it will clear the
+ * bit that points to the head page, move the head, and then set
+ * the bit that points to the new head page.
+ *
+ * We also don't want an interrupt coming in and moving the head
+ * page on another writer. Thus we use the second LSB to catch
+ * that too. Thus:
+ *
+ * head->list->prev->next bit 1 bit 0
+ * ------- -------
+ * Normal page 0 0
+ * Points to head page 0 1
+ * New head page 1 0
+ *
+ * Note we can not trust the prev pointer of the head page, because:
+ *
+ * +----+ +-----+ +-----+
+ * | |------>| T |---X--->| N |
+ * | |<------| | | |
+ * +----+ +-----+ +-----+
+ * ^ ^ |
+ * | +-----+ | |
+ * +----------| R |----------+ |
+ * | |<-----------+
+ * +-----+
+ *
+ * Key: ---X--> HEAD flag set in pointer
+ * T Tail page
+ * R Reader page
+ * N Next page
+ *
+ * (see __rb_reserve_next() to see where this happens)
+ *
+ * What the above shows is that the reader just swapped out
+ * the reader page with a page in the buffer, but before it
+ * could make the new header point back to the new page added
+ * it was preempted by a writer. The writer moved forward onto
+ * the new page added by the reader and is about to move forward
+ * again.
+ *
+ * You can see, it is legitimate for the previous pointer of
+ * the head (or any page) not to point back to itself. But only
+ * temporarially.
+ */
+
+#define RB_PAGE_NORMAL 0UL
+#define RB_PAGE_HEAD 1UL
+#define RB_PAGE_UPDATE 2UL
+
+
+#define RB_FLAG_MASK 3UL
+
+/* PAGE_MOVED is not part of the mask */
+#define RB_PAGE_MOVED 4UL
+
+/*
+ * rb_list_head - remove any bit
+ */
+static struct list_head *rb_list_head(struct list_head *list)
+{
+ unsigned long val = (unsigned long)list;
+
+ return (struct list_head *)(val & ~RB_FLAG_MASK);
+}
+
+/*
+ * rb_is_head_page - test if the give page is the head page
+ *
+ * Because the reader may move the head_page pointer, we can
+ * not trust what the head page is (it may be pointing to
+ * the reader page). But if the next page is a header page,
+ * its flags will be non zero.
+ */
+static int inline
+rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page *page, struct list_head *list)
+{
+ unsigned long val;
+
+ val = (unsigned long)list->next;
+
+ if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
+ return RB_PAGE_MOVED;
+
+ return val & RB_FLAG_MASK;
+}
+
+/*
+ * rb_is_reader_page
+ *
+ * The unique thing about the reader page, is that, if the
+ * writer is ever on it, the previous pointer never points
+ * back to the reader page.
+ */
+static int rb_is_reader_page(struct buffer_page *page)
+{
+ struct list_head *list = page->list.prev;
+
+ return rb_list_head(list->next) != &page->list;
+}
+
+/*
+ * rb_set_list_to_head - set a list_head to be pointing to head.
+ */
+static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
+ struct list_head *list)
+{
+ unsigned long *ptr;
+
+ ptr = (unsigned long *)&list->next;
+ *ptr |= RB_PAGE_HEAD;
+ *ptr &= ~RB_PAGE_UPDATE;
+}
+
+/*
+ * rb_head_page_activate - sets up head page
+ */
+static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ struct buffer_page *head;
+
+ head = cpu_buffer->head_page;
+ if (!head)
+ return;
+
+ /*
+ * Set the previous list pointer to have the HEAD flag.
+ */
+ rb_set_list_to_head(cpu_buffer, head->list.prev);
+}
+
+static void rb_list_head_clear(struct list_head *list)
+{
+ unsigned long *ptr = (unsigned long *)&list->next;
+
+ *ptr &= ~RB_FLAG_MASK;
+}
+
+/*
+ * rb_head_page_dactivate - clears head page ptr (for free list)
+ */
+static void
+rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ struct list_head *hd;
+
+ /* Go through the whole list and clear any pointers found. */
+ rb_list_head_clear(cpu_buffer->pages);
+
+ list_for_each(hd, cpu_buffer->pages)
+ rb_list_head_clear(hd);
+}
+
+static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page *head,
+ struct buffer_page *prev,
+ int old_flag, int new_flag)
+{
+ struct list_head *list;
+ unsigned long val = (unsigned long)&head->list;
+ unsigned long ret;
+
+ list = &prev->list;
+
+ val &= ~RB_FLAG_MASK;
+
+ ret = (unsigned long)cmpxchg(&list->next,
+ val | old_flag, val | new_flag);
+
+ /* check if the reader took the page */
+ if ((ret & ~RB_FLAG_MASK) != val)
+ return RB_PAGE_MOVED;
+
+ return ret & RB_FLAG_MASK;
+}
+
+static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page *head,
+ struct buffer_page *prev,
+ int old_flag)
+{
+ return rb_head_page_set(cpu_buffer, head, prev,
+ old_flag, RB_PAGE_UPDATE);
+}
+
+static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page *head,
+ struct buffer_page *prev,
+ int old_flag)
+{
+ return rb_head_page_set(cpu_buffer, head, prev,
+ old_flag, RB_PAGE_HEAD);
+}
+
+static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page *head,
+ struct buffer_page *prev,
+ int old_flag)
+{
+ return rb_head_page_set(cpu_buffer, head, prev,
+ old_flag, RB_PAGE_NORMAL);
+}
+
+static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page **bpage)
+{
+ struct list_head *p = rb_list_head((*bpage)->list.next);
+
+ *bpage = list_entry(p, struct buffer_page, list);
+}
+
+static struct buffer_page *
+rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ struct buffer_page *head;
+ struct buffer_page *page;
+ struct list_head *list;
+ int i;
+
+ if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
+ return NULL;
+
+ /* sanity check */
+ list = cpu_buffer->pages;
+ if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
+ return NULL;
+
+ page = head = cpu_buffer->head_page;
+ /*
+ * It is possible that the writer moves the header behind
+ * where we started, and we miss in one loop.
+ * A second loop should grab the header, but we'll do
+ * three loops just because I'm paranoid.
+ */
+ for (i = 0; i < 3; i++) {
+ do {
+ if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
+ cpu_buffer->head_page = page;
+ return page;
+ }
+ rb_inc_page(cpu_buffer, &page);
+ } while (page != head);
+ }
+
+ RB_WARN_ON(cpu_buffer, 1);
+
+ return NULL;
+}
+
+static int rb_head_page_replace(struct buffer_page *old,
+ struct buffer_page *new)
+{
+ unsigned long *ptr = (unsigned long *)&old->list.prev->next;
+ unsigned long val;
+ unsigned long ret;
+
+ val = *ptr & ~RB_FLAG_MASK;
+ val |= RB_PAGE_HEAD;
+
+ ret = cmpxchg(ptr, val