aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/perf_event.h33
-rw-r--r--kernel/events/Makefile2
-rw-r--r--kernel/events/core.c788
-rw-r--r--kernel/events/internal.h97
-rw-r--r--kernel/events/ring_buffer.c399
-rw-r--r--tools/perf/Documentation/perf-script.txt12
-rw-r--r--tools/perf/builtin-script.c108
-rw-r--r--tools/perf/builtin-stat.c9
-rw-r--r--tools/perf/util/evsel.c1
-rw-r--r--tools/perf/util/evsel.h1
-rw-r--r--tools/perf/util/python.c17
-rw-r--r--tools/perf/util/session.c61
-rw-r--r--tools/perf/util/session.h5
13 files changed, 841 insertions, 692 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index e0786e35f24..e76a41010e1 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -680,33 +680,6 @@ enum perf_event_active_state {
};
struct file;
-
-#define PERF_BUFFER_WRITABLE 0x01
-
-struct perf_buffer {
- atomic_t refcount;
- struct rcu_head rcu_head;
-#ifdef CONFIG_PERF_USE_VMALLOC
- struct work_struct work;
- int page_order; /* allocation order */
-#endif
- int nr_pages; /* nr of data pages */
- int writable; /* are we writable */
-
- atomic_t poll; /* POLL_ for wakeups */
-
- local_t head; /* write position */
- local_t nest; /* nested writers */
- local_t events; /* event limit */
- local_t wakeup; /* wakeup stamp */
- local_t lost; /* nr records lost */
-
- long watermark; /* wakeup watermark */
-
- struct perf_event_mmap_page *user_page;
- void *data_pages[0];
-};
-
struct perf_sample_data;
typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
@@ -745,6 +718,8 @@ struct perf_cgroup {
};
#endif
+struct ring_buffer;
+
/**
* struct perf_event - performance event kernel representation:
*/
@@ -834,7 +809,7 @@ struct perf_event {
atomic_t mmap_count;
int mmap_locked;
struct user_struct *mmap_user;
- struct perf_buffer *buffer;
+ struct ring_buffer *rb;
/* poll related */
wait_queue_head_t waitq;
@@ -945,7 +920,7 @@ struct perf_cpu_context {
struct perf_output_handle {
struct perf_event *event;
- struct perf_buffer *buffer;
+ struct ring_buffer *rb;
unsigned long wakeup;
unsigned long size;
void *addr;
diff --git a/kernel/events/Makefile b/kernel/events/Makefile
index 1ce23d3d839..89e5e8aa4c3 100644
--- a/kernel/events/Makefile
+++ b/kernel/events/Makefile
@@ -2,5 +2,5 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_core.o = -pg
endif
-obj-y := core.o
+obj-y := core.o ring_buffer.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9efe7108cca..5e70f62752a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -36,6 +36,8 @@
#include <linux/ftrace_event.h>
#include <linux/hw_breakpoint.h>
+#include "internal.h"
+
#include <asm/irq_regs.h>
struct remote_function_call {
@@ -200,6 +202,22 @@ __get_cpu_context(struct perf_event_context *ctx)
return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
}
+static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
+ struct perf_event_context *ctx)
+{
+ raw_spin_lock(&cpuctx->ctx.lock);
+ if (ctx)
+ raw_spin_lock(&ctx->lock);
+}
+
+static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
+ struct perf_event_context *ctx)
+{
+ if (ctx)
+ raw_spin_unlock(&ctx->lock);
+ raw_spin_unlock(&cpuctx->ctx.lock);
+}
+
#ifdef CONFIG_CGROUP_PERF
/*
@@ -340,11 +358,8 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
-
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
- perf_pmu_disable(cpuctx->ctx.pmu);
-
/*
* perf_cgroup_events says at least one
* context on this CPU has cgroup events.
@@ -353,6 +368,8 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
* events for a context.
*/
if (cpuctx->ctx.nr_cgroups > 0) {
+ perf_ctx_lock(cpuctx, cpuctx->task_ctx);
+ perf_pmu_disable(cpuctx->ctx.pmu);
if (mode & PERF_CGROUP_SWOUT) {
cpu_ctx_sched_out(cpuctx, EVENT_ALL);
@@ -372,9 +389,9 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
cpuctx->cgrp = perf_cgroup_from_task(task);
cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
}
+ perf_pmu_enable(cpuctx->ctx.pmu);
+ perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
-
- perf_pmu_enable(cpuctx->ctx.pmu);
}
rcu_read_unlock();
@@ -1105,6 +1122,10 @@ static int __perf_remove_from_context(void *info)
raw_spin_lock(&ctx->lock);
event_sched_out(event, cpuctx, ctx);
list_del_event(event, ctx);
+ if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
+ ctx->is_active = 0;
+ cpuctx->task_ctx = NULL;
+ }
raw_spin_unlock(&ctx->lock);
return 0;
@@ -1454,8 +1475,24 @@ static void add_event_to_ctx(struct perf_event *event,
event->tstamp_stopped = tstamp;
}
-static void perf_event_context_sched_in(struct perf_event_context *ctx,
- struct task_struct *tsk);
+static void task_ctx_sched_out(struct perf_event_context *ctx);
+static void
+ctx_sched_in(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx,
+ enum event_type_t event_type,
+ struct task_struct *task);
+
+static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
+ struct perf_event_context *ctx,
+ struct task_struct *task)
+{
+ cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
+ if (ctx)
+ ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
+ cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
+ if (ctx)
+ ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
+}
/*
* Cross CPU call to install and enable a performance event
@@ -1466,20 +1503,37 @@ static int __perf_install_in_context(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
- struct perf_event *leader = event->group_leader;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
- int err;
+ struct perf_event_context *task_ctx = cpuctx->task_ctx;
+ struct task_struct *task = current;
+
+ perf_ctx_lock(cpuctx, task_ctx);
+ perf_pmu_disable(cpuctx->ctx.pmu);
/*
- * In case we're installing a new context to an already running task,
- * could also happen before perf_event_task_sched_in() on architectures
- * which do context switches with IRQs enabled.
+ * If there was an active task_ctx schedule it out.
*/
- if (ctx->task && !cpuctx->task_ctx)
- perf_event_context_sched_in(ctx, ctx->task);
+ if (task_ctx)
+ task_ctx_sched_out(task_ctx);
+
+ /*
+ * If the context we're installing events in is not the
+ * active task_ctx, flip them.
+ */
+ if (ctx->task && task_ctx != ctx) {
+ if (task_ctx)
+ raw_spin_unlock(&task_ctx->lock);
+ raw_spin_lock(&ctx->lock);
+ task_ctx = ctx;
+ }
+
+ if (task_ctx) {
+ cpuctx->task_ctx = task_ctx;
+ task = task_ctx->task;
+ }
+
+ cpu_ctx_sched_out(cpuctx, EVENT_ALL);
- raw_spin_lock(&ctx->lock);
- ctx->is_active = 1;
update_context_time(ctx);
/*
* update cgrp time only if current cgrp
@@ -1490,43 +1544,13 @@ static int __perf_install_in_context(void *info)
add_event_to_ctx(event, ctx);
- if (!event_filter_match(event))
- goto unlock;
-
/*
- * Don't put the event on if it is disabled or if
- * it is in a group and the group isn't on.
+ * Schedule everything back in
*/
- if (event->state != PERF_EVENT_STATE_INACTIVE ||
- (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
- goto unlock;
-
- /*
- * An exclusive event can't go on if there are already active
- * hardware events, and no hardware event can go on if there
- * is already an exclusive event on.
- */
- if (!group_can_go_on(event, cpuctx, 1))
- err = -EEXIST;
- else
- err = event_sched_in(event, cpuctx, ctx);
+ perf_event_sched_in(cpuctx, task_ctx, task);
- if (err) {
- /*
- * This event couldn't go on. If it is in a group
- * then we have to pull the whole group off.
- * If the event group is pinned then put it in error state.
- */
- if (leader != event)
- group_sched_out(leader, cpuctx, ctx);
- if (leader->attr.pinned) {
- update_group_times(leader);
- leader->state = PERF_EVENT_STATE_ERROR;
- }
- }
-
-unlock:
- raw_spin_unlock(&ctx->lock);
+ perf_pmu_enable(cpuctx->ctx.pmu);
+ perf_ctx_unlock(cpuctx, task_ctx);
return 0;
}
@@ -1758,30 +1782,28 @@ static void ctx_sched_out(struct perf_event_context *ctx,
enum event_type_t event_type)
{
struct perf_event *event;
+ int is_active = ctx->is_active;
- raw_spin_lock(&ctx->lock);
- perf_pmu_disable(ctx->pmu);
- ctx->is_active = 0;
+ ctx->is_active &= ~event_type;
if (likely(!ctx->nr_events))
- goto out;
+ return;
+
update_context_time(ctx);
update_cgrp_time_from_cpuctx(cpuctx);
-
if (!ctx->nr_active)
- goto out;
+ return;
- if (event_type & EVENT_PINNED) {
+ perf_pmu_disable(ctx->pmu);
+ if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
list_for_each_entry(event, &ctx->pinned_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
}
- if (event_type & EVENT_FLEXIBLE) {
+ if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
list_for_each_entry(event, &ctx->flexible_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
}
-out:
perf_pmu_enable(ctx->pmu);
- raw_spin_unlock(&ctx->lock);
}
/*
@@ -1929,8 +1951,10 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
rcu_read_unlock();
if (do_switch) {
+ raw_spin_lock(&ctx->lock);
ctx_sched_out(ctx, cpuctx, EVENT_ALL);
cpuctx->task_ctx = NULL;
+ raw_spin_unlock(&ctx->lock);
}
}
@@ -1965,8 +1989,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
perf_cgroup_sched_out(task);
}
-static void task_ctx_sched_out(struct perf_event_context *ctx,
- enum event_type_t event_type)
+static void task_ctx_sched_out(struct perf_event_context *ctx)
{
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
@@ -1976,7 +1999,7 @@ static void task_ctx_sched_out(struct perf_event_context *ctx,
if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
return;
- ctx_sched_out(ctx, cpuctx, event_type);
+ ctx_sched_out(ctx, cpuctx, EVENT_ALL);
cpuctx->task_ctx = NULL;
}
@@ -2055,11 +2078,11 @@ ctx_sched_in(struct perf_event_context *ctx,
struct task_struct *task)
{
u64 now;
+ int is_active = ctx->is_active;
- raw_spin_lock(&ctx->lock);
- ctx->is_active = 1;
+ ctx->is_active |= event_type;
if (likely(!ctx->nr_events))
- goto out;
+ return;
now = perf_clock();
ctx->timestamp = now;
@@ -2068,15 +2091,12 @@ ctx_sched_in(struct perf_event_context *ctx,
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
*/
- if (event_type & EVENT_PINNED)
+ if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
ctx_pinned_sched_in(ctx, cpuctx);
/* Then walk through the lower prio flexible groups */
- if (event_type & EVENT_FLEXIBLE)
+ if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
ctx_flexible_sched_in(ctx, cpuctx);
-
-out:
- raw_spin_unlock(&ctx->lock);
}
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
@@ -2088,19 +2108,6 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
ctx_sched_in(ctx, cpuctx, event_type, task);
}
-static void task_ctx_sched_in(struct perf_event_context *ctx,
- enum event_type_t event_type)
-{
- struct perf_cpu_context *cpuctx;
-
- cpuctx = __get_cpu_context(ctx);
- if (cpuctx->task_ctx == ctx)
- return;
-
- ctx_sched_in(ctx, cpuctx, event_type, NULL);
- cpuctx->task_ctx = ctx;
-}
-
static void perf_event_context_sched_in(struct perf_event_context *ctx,
struct task_struct *task)
{
@@ -2110,6 +2117,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
if (cpuctx->task_ctx == ctx)
return;
+ perf_ctx_lock(cpuctx, ctx);
perf_pmu_disable(ctx->pmu);
/*
* We want to keep the following priority order:
@@ -2118,18 +2126,18 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
*/
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
- ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
- cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
- ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
+ perf_event_sched_in(cpuctx, ctx, task);
cpuctx->task_ctx = ctx;
+ perf_pmu_enable(ctx->pmu);
+ perf_ctx_unlock(cpuctx, ctx);
+
/*
* Since these rotations are per-cpu, we need to ensure the
* cpu-context we got scheduled on is actually rotating.
*/
perf_pmu_rotate_start(ctx->pmu);
- perf_pmu_enable(ctx->pmu);
}
/*
@@ -2269,7 +2277,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
u64 interrupts, now;
s64 delta;
- raw_spin_lock(&ctx->lock);
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
@@ -2301,7 +2308,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
if (delta > 0)
perf_adjust_period(event, period, delta);
}
- raw_spin_unlock(&ctx->lock);
}
/*
@@ -2309,16 +2315,12 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
*/
static void rotate_ctx(struct perf_event_context *ctx)
{
- raw_spin_lock(&ctx->lock);
-
/*
* Rotate the first entry last of non-pinned groups. Rotation might be
* disabled by the inheritance code.
*/
if (!ctx->rotate_disable)
list_rotate_left(&ctx->flexible_groups);
-
- raw_spin_unlock(&ctx->lock);
}
/*
@@ -2345,6 +2347,7 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
rotate = 1;
}
+ perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
perf_ctx_adjust_freq(&cpuctx->ctx, interval);
if (ctx)
@@ -2355,21 +2358,20 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
if (ctx)
- task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
+ ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
rotate_ctx(&cpuctx->ctx);
if (ctx)
rotate_ctx(ctx);
- cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current);
- if (ctx)
- task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
+ perf_event_sched_in(cpuctx, ctx, current);
done:
if (remove)
list_del_init(&cpuctx->rotation_list);
perf_pmu_enable(cpuctx->ctx.pmu);
+ perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
void perf_event_task_tick(void)
@@ -2424,9 +2426,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
* in.
*/
perf_cgroup_sched_out(current);
- task_ctx_sched_out(ctx, EVENT_ALL);
raw_spin_lock(&ctx->lock);
+ task_ctx_sched_out(ctx);
list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
ret = event_enable_on_exec(event, ctx);
@@ -2835,16 +2837,12 @@ retry:
unclone_ctx(ctx);
++ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
- }
-
- if (!ctx) {
+ } else {
ctx = alloc_perf_context(pmu, task);
err = -ENOMEM;
if (!ctx)
goto errout;
- get_ctx(ctx);
-
err = 0;
mutex_lock(&task->perf_event_mutex);
/*
@@ -2856,14 +2854,14 @@ retry:
else if (task->perf_event_ctxp[ctxn])
err = -EAGAIN;
else {
+ get_ctx(ctx);
++ctx->pin_count;
rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
}
mutex_unlock(&task->perf_event_mutex);
if (unlikely(err)) {
- put_task_struct(task);
- kfree(ctx);
+ put_ctx(ctx);
if (err == -EAGAIN)
goto retry;
@@ -2890,7 +2888,7 @@ static void free_event_rcu(struct rcu_head *head)
kfree(event);
}
-static void perf_buffer_put(struct perf_buffer *buffer);
+static void ring_buffer_put(struct ring_buffer *rb);
static void free_event(struct perf_event *event)
{
@@ -2913,9 +2911,9 @@ static void free_event(struct perf_event *event)
}
}
- if (event->buffer) {
- perf_buffer_put(event->buffer);
- event->buffer = NULL;
+ if (event->rb) {
+ ring_buffer_put(event->rb);
+ event->rb = NULL;
}
if (is_cgroup_event(event))
@@ -2934,12 +2932,6 @@ int perf_event_release_kernel(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
- /*
- * Remove from the PMU, can't get re-enabled since we got
- * here because the last ref went.
- */
- perf_event_disable(event);
-
WARN_ON_ONCE(ctx->parent_ctx);
/*
* There are two ways this annotation is useful:
@@ -2956,8 +2948,8 @@ int perf_event_release_kernel(struct perf_event *event)
mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
raw_spin_lock_irq(&ctx->lock);
perf_group_detach(event);
- list_del_event(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
+ perf_remove_from_context(event);
mutex_unlock(&ctx->mutex);
free_event(event);
@@ -3149,13 +3141,13 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
static unsigned int perf_poll(struct file *file, poll_table *wait)
{
struct perf_event *event = file->private_data;
- struct perf_buffer *buffer;
+ struct ring_buffer *rb;
unsigned int events = POLL_HUP;
rcu_read_lock();
- buffer = rcu_dereference(event->buffer);
- if (buffer)
- events = atomic_xchg(&buffer->poll, 0);
+ rb = rcu_dereference(event->rb);
+ if (rb)
+ events = atomic_xchg(&rb->poll, 0);
rcu_read_unlock();
poll_wait(file, &event->waitq, wait);
@@ -3366,14 +3358,14 @@ static int perf_event_index(struct perf_event *event)
void perf_event_update_userpage(struct perf_event *event)
{
struct perf_event_mmap_page *userpg;
- struct perf_buffer *buffer;
+ struct ring_buffer *rb;
rcu_read_lock();
- buffer = rcu_dereference(event->buffer);
- if (!buffer)
+ rb = rcu_dereference(event->rb);
+ if (!rb)
goto unlock;
- userpg = buffer->user_page;
+ userpg = rb->user_page;
/*
* Disable preemption so as to not let the corresponding user-space
@@ -3400,220 +3392,10 @@ unlock:
rcu_read_unlock();
}
-static unsigned long perf_data_size(struct perf_buffer *buffer);
-
-static void
-perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags)
-{
- long max_size = perf_data_size(buffer);
-
- if (watermark)
- buffer->watermark = min(max_size, watermark);
-
- if (!buffer->watermark)
- buffer->watermark = max_size / 2;
-
- if (flags & PERF_BUFFER_WRITABLE)
- buffer->writable = 1;
-
- atomic_set(&buffer->refcount, 1);
-}
-
-#ifndef CONFIG_PERF_USE_VMALLOC
-
-/*
- * Back perf_mmap() with regular GFP_KERNEL-0 pages.
- */
-
-static struct page *
-perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
-{
- if (pgoff > buffer->nr_pages)
- return NULL;
-
- if (pgoff == 0)
- return virt_to_page(buffer->user_page);
-
- return virt_to_page(buffer->data_pages[pgoff - 1]);
-}
-
-static void *perf_mmap_alloc_page(int cpu)
-{
- struct page *page;
- int node;
-
- node = (cpu == -1) ? cpu : cpu_to_node(cpu);
- page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
- if (!page)
- return NULL;
-
- return page_address(page);
-}
-
-static struct perf_buffer *
-perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
-{
- struct perf_buffer *buffer;
- unsigned long size;
- int i;
-
- size = sizeof(struct perf_buffer);
- size += nr_pages * sizeof(void *);
-
- buffer = kzalloc(size, GFP_KERNEL);
- if (!buffer)
- goto fail;
-
- buffer->user_page = perf_mmap_alloc_page(cpu);
- if (!buffer->user_page)
- goto fail_user_page;
-
- for (i = 0; i < nr_pages; i++) {
- buffer->data_pages[i] = perf_mmap_alloc_page(cpu);
- if (!buffer->data_pages[i])
- goto fail_data_pages;
- }
-
- buffer->nr_pages = nr_pages;
-
- perf_buffer_init(buffer, watermark, flags);
-
- return buffer;
-
-fail_data_pages:
- for (i--; i >= 0; i--)
- free_page((unsigned long)buffer->data_pages[i]);
-
- free_page((unsigned long)buffer->user_page);
-
-fail_user_page:
- kfree(buffer);
-
-fail:
- return NULL;
-}
-
-static void perf_mmap_free_page(unsigned long addr)
-{
- struct page *page = virt_to_page((void *)addr);
-
- page->mapping = NULL;
- __free_page(page);
-}
-
-static void perf_buffer_free(struct perf_buffer *buffer)
-{
- int i;
-
- perf_mmap_free_page((unsigned long)buffer->user_page);
- for (i = 0; i < buffer->nr_pages; i++)
- perf_mmap_free_page((unsigned long)buffer->data_pages[i]);
- kfree(buffer);
-}
-
-static inline int page_order(struct perf_buffer *buffer)
-{
- return 0;
-}
-
-#else
-
-/*
- * Back perf_mmap() with vmalloc memory.
- *
- * Required for architectures that have d-cache aliasing issues.
- */
-
-static inline int page_order(struct perf_buffer *buffer)
-{
- return buffer->page_order;
-}
-
-static struct page *
-perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
-{
- if (pgoff > (1UL << page_order(buffer)))
- return NULL;
-
- return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE);
-}
-
-static void perf_mmap_unmark_page(void *addr)
-{
- struct page *page = vmalloc_to_page(addr);
-
- page->mapping = NULL;
-}
-
-static void perf_buffer_free_work(struct work_struct *work)
-{
- struct perf_buffer *buffer;
- void *base;
- int i, nr;
-
- buffer = container_of(work, struct perf_buffer, work);
- nr = 1 << page_order(buffer);
-
- base = buffer->user_page;
- for (i = 0; i < nr + 1; i++)
- perf_mmap_unmark_page(base + (i * PAGE_SIZE));
-
- vfree(base);
- kfree(buffer);
-}
-
-static void perf_buffer_free(struct perf_buffer *buffer)
-{
- schedule_work(&buffer->work);
-}
-
-static struct perf_buffer *
-perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
-{
- struct perf_buffer *buffer;
- unsigned long size;
- void *all_buf;
-
- size = sizeof(struct perf_buffer);
- size += sizeof(void *);
-
- buffer = kzalloc(size, GFP_KERNEL);
- if (!buffer)
- goto fail;
-
- INIT_WORK(&buffer->work, perf_buffer_free_work);
-
- all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
- if (!all_buf)
- goto fail_all_buf;
-
- buffer->user_page = all_buf;
- buffer->data_pages[0] = all_buf + PAGE_SIZE;
- buffer->page_order = ilog2(nr_pages);
- buffer->nr_pages = 1;
-
- perf_buffer_init(buffer, watermark, flags);
-
- return buffer;
-
-fail_all_buf:
- kfree(buffer);
-
-fail:
- return NULL;
-}
-
-#endif
-
-static unsigned long perf_data_size(struct perf_buffer *buffer)
-{
- return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer));
-}
-
static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct perf_event *event = vma->vm_file->private_data;
- struct perf_buffer *buffer;
+ struct ring_buffer *rb;
int ret = VM_FAULT_SIGBUS;
if (vmf->flags & FAULT_FLAG_MKWRITE) {
@@ -3623,14 +3405,14 @@ static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
rcu_read_lock();
- buffer = rcu_dereference(event->buffer);
- if (!buffer)
+ rb = rcu_dereference(event->rb);
+ if (!rb)
goto unlock;
if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
goto unlock;
- vmf->page = perf_mmap_to_page(buffer, vmf->pgoff);
+ vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
if (!vmf->page)
goto unlock;
@@ -3645,35 +3427,35 @@ unlock:
return ret;
}
-static void perf_buffer_free_rcu(struct rcu_head *rcu_head)
+static void rb_free_rcu(struct rcu_head *rcu_head)
{
- struct perf_buffer *buffer;
+ struct ring_buffer *rb;
- buffer = container_of(rcu_head, struct perf_buffer, rcu_head);
- perf_buffer_free(buffer);
+ rb = container_of(rcu_head, struct ring_buffer, rcu_head);
+ rb_free(rb);
}
-static struct perf_buffer *perf_buffer_get(struct perf_event *event)
+static struct ring_buffer *ring_buffer_get(struct perf_event *event)
{
- struct perf_buffer *buffer;
+ struct ring_buffer *rb;
rcu_read_lock();
- buffer = rcu_dereference(event->buffer);
- if (buffer) {
- if (!atomic_inc_not_zero(&buffer->refcount))
- buffer = NULL;
+ rb = rcu_dereference(event->rb);
+ if (rb) {
+ if (!atomic_inc_not_zero(&rb->refcount))
+ rb = NULL;
}
rcu_read_unlock();
- return buffer;
+ return rb;
}
-static void perf_buffer_put(struct perf_buffer *buffer)
+static void ring_buffer_put(struct ring_buffer *rb)
{
- if (!atomic_dec_and_test(&buffer->refcount))
+ if (!atomic_dec_and_test(&rb->refcount))
return;
- call_rcu(&buffer->rcu_head, perf_buffer_free_rcu);
+ call_rcu(&rb->rcu_head, rb_free_rcu);
}
static void perf_mmap_open(struct vm_area_struct *vma)
@@ -3688,16 +3470,16 @@ static void perf_mmap_close(struct vm_area_struct *vma)
struct perf_event *event = vma->vm_file->private_data;
if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
- unsigned long size = perf_data_size(event->buffer);
+ unsigned long size = perf_data_size(event->rb);
struct user_struct *user = event->mmap_user;
- struct perf_buffer *buffer = event->buffer;
+ struct ring_buffer *rb = event->rb;
atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
vma->vm_mm->locked_vm -= event->mmap_locked;
- rcu_assign_pointer(event->buffer, NULL);
+ rcu_assign_pointer(event->rb, NULL);
mutex_unlock(&event->mmap_mutex);
- perf_buffer_put(buffer);
+ ring_buffer_put(rb);
free_uid(user);
}
}
@@ -3715,7 +3497,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
unsigned long user_locked, user_lock_limit;
struct user_struct *user = current_user();
unsigned long locked, lock_limit;
- struct perf_buffer *buffer;
+ struct ring_buffer *rb;
unsigned long vma_size;
unsigned long nr_pages;
long user_extra, extra;
@@ -3724,7 +3506,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
/*
* Don't allow mmap() of inherited per-task counters. This would
* create a performance issue due to all children writing to the
- * same buffer.
+ * same rb.
*/
if (event->cpu == -1 && event->attr.inherit)
return -EINVAL;
@@ -3736,7 +3518,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
nr_pages = (vma_size / PAGE_SIZE) - 1;
/*
- * If we have buffer pages ensure they're a power-of-two number, so we
+ * If we have rb pages ensure they're a power-of-two number, so we
* can do bitmasks instead of modulo.
*/
if (nr_pages != 0 && !is_power_of_2(nr_pages))
@@ -3750,9 +3532,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
WARN_ON_ONCE(event->ctx->parent_ctx);
mutex_lock(&event->mmap_mutex);
- if (event->buffer) {
- if (event->buffer->nr_pages == nr_pages)
- atomic_inc(&event->buffer->refcount);
+ if (event->rb) {
+ if (event->rb->nr_pages == nr_pages)
+ atomic_inc(&event->rb->refcount);
else
ret = -EINVAL;
goto unlock;
@@ -3782,18 +3564,18 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
goto unlock;
}
- WARN_ON(event->buffer);
+ WARN_ON(event->rb);
if (vma->vm_flags & VM_WRITE)
- flags |= PERF_BUFFER_WRITABLE;
+ flags |= RING_BUFFER_WRITABLE;
- buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark,
+ rb = rb_alloc(nr_pages, event->attr.wakeup_watermark,
event->cpu, flags);
- if (!buffer) {
+ if (!rb) {
ret = -ENOMEM;
goto unlock;
}
- rcu_assign_pointer(event->buffer, buffer);
+ rcu_assign_pointer(event->rb, rb);
atomic_long_add(user_extra, &user->locked_vm);
event->mmap_locked = extra;
@@ -3892,117 +3674,6 @@ int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
}
EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
-/*
- * Output
- */
-static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail,
- unsigned long offset, unsigned long head)
-{
- unsigned long mask;
-
- if (!buffer->writable)
- return true;
-
- mask = perf_data_size(buffer) - 1;
-
- offset = (offset - tail) & mask;
- head = (head - tail) & mask;
-
- if ((int)(head - offset) < 0)
- return false;
-
- return true;
-}
-
-static void perf_output_wakeup(struct perf_output_handle *handle)
-{
- atomic_set(&handle->buffer->poll, POLL_IN);
-
- if (handle->nmi) {
- handle->event->pending_wakeup = 1;
- irq_work_queue(&handle->event->pending);
- } else
- perf_event_wakeup(handle->event);
-}
-
-/*
- * We need to ensure a later event_id doesn't publish a head when a former
- * event isn't done writing. However since we need to deal with NMIs we
- * cannot fully serialize things.
- *
- * We only publish the head (and generate a wakeup) when the outer-most
- * event completes.
- */
-static void perf_output_get_handle(struct perf_output_handle *handle)
-{
- struct perf_buffer *buffer = handle->buffer;
-
- preempt_disable();
- local_inc(&buffer->nest);
- handle->wakeup = local_read(&buffer->wakeup);
-}
-
-static void perf_output_put_handle(struct perf_output_handle *handle)
-{
- struct perf_buffer *buffer = handle->buffer;
- unsigned long head;
-
-again:
- head = local_read(&buffer->head);
-
- /*
- * IRQ/NMI can happen here, which means we can miss a head update.
- */
-
- if (!local_dec_and_test(&buffer->nest))
- goto out;
-
- /*
- * Publish the known good head. Rely on the full barrier implied
- * by atomic_dec_and_test() order the buffer->head read and this
- * write.
- */
- buffer->user_page->data_head = head;
-
- /*
- * Now check if we missed an update, rely on the (compiler)
- * barrier in atomic_dec_and_test() to re-read buffer->head.
- */
- if (unlikely(head != local_read(&buffer->head))) {
- local_inc(&buffer->nest);
- goto again;
- }
-
- if (handle->wakeup != local_read(&buffer->wakeup))
- perf_output_wakeup(handle);
-
-out:
- preempt_enable();
-}
-
-__always_inline void perf_output_copy(struct perf_output_handle *handle,
- const void *buf, unsigned int len)
-{
- do {
- unsigned long size = min_t(unsigned long, handle->size, len);
-
- memcpy(handle->addr, buf, size);
-
- len -= size;
- handle->addr += size;
- buf += size;
- handle->size -= size;
- if (!handle->size) {
- struct perf_buffer *buffer = handle->buffer;
-
- handle->page++;
- handle