aboutsummaryrefslogtreecommitdiff
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c70
1 files changed, 42 insertions, 28 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 9052d6c8c9f..2ae7409bf38 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -782,6 +782,9 @@ static void __perf_install_in_context(void *info)
add_event_to_ctx(event, ctx);
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
+ goto unlock;
+
/*
* Don't put the event on if it is disabled or if
* it is in a group and the group isn't on.
@@ -925,6 +928,9 @@ static void __perf_event_enable(void *info)
goto unlock;
__perf_event_mark_enabled(event, ctx);
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
+ goto unlock;
+
/*
* If the event is in a group and isn't the group leader,
* then don't put it on unless the group is on.
@@ -1375,6 +1381,9 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
+ continue;
+
hwc = &event->hw;
interrupts = hwc->interrupts;
@@ -1595,15 +1604,12 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
unsigned long flags;
int err;
- /*
- * If cpu is not a wildcard then this is a percpu event:
- */
- if (cpu != -1) {
+ if (pid == -1 && cpu != -1) {
/* Must be root to operate on a CPU event: */
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
return ERR_PTR(-EACCES);
- if (cpu < 0 || cpu > num_possible_cpus())
+ if (cpu < 0 || cpu >= nr_cpumask_bits)
return ERR_PTR(-EINVAL);
/*
@@ -1611,7 +1617,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
* offline CPU and activate it when the CPU comes up, but
* that's for later.
*/
- if (!cpu_isset(cpu, cpu_online_map))
+ if (!cpu_online(cpu))
return ERR_PTR(-ENODEV);
cpuctx = &per_cpu(perf_cpu_context, cpu);
@@ -3253,8 +3259,6 @@ static void perf_event_task_output(struct perf_event *event,
task_event->event_id.tid = perf_event_tid(event, task);
task_event->event_id.ptid = perf_event_tid(event, current);
- task_event->event_id.time = perf_clock();
-
perf_output_put(&handle, task_event->event_id);
perf_output_end(&handle);
@@ -3262,6 +3266,12 @@ static void perf_event_task_output(struct perf_event *event,
static int perf_event_task_match(struct perf_event *event)
{
+ if (event->state < PERF_EVENT_STATE_INACTIVE)
+ return 0;
+
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
+ return 0;
+
if (event->attr.comm || event->attr.mmap || event->attr.task)
return 1;
@@ -3287,12 +3297,11 @@ static void perf_event_task_event(struct perf_task_event *task_event)
rcu_read_lock();
cpuctx = &get_cpu_var(perf_cpu_context);
perf_event_task_ctx(&cpuctx->ctx, task_event);
- put_cpu_var(perf_cpu_context);
-
if (!ctx)
- ctx = rcu_dereference(task_event->task->perf_event_ctxp);
+ ctx = rcu_dereference(current->perf_event_ctxp);
if (ctx)
perf_event_task_ctx(ctx, task_event);
+ put_cpu_var(perf_cpu_context);
rcu_read_unlock();
}
@@ -3320,6 +3329,7 @@ static void perf_event_task(struct task_struct *task,
/* .ppid */
/* .tid */
/* .ptid */
+ .time = perf_clock(),
},
};
@@ -3369,6 +3379,12 @@ static void perf_event_comm_output(struct perf_event *event,
static int perf_event_comm_match(struct perf_event *event)
{
+ if (event->state < PERF_EVENT_STATE_INACTIVE)
+ return 0;
+
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
+ return 0;
+
if (event->attr.comm)
return 1;
@@ -3405,15 +3421,10 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
rcu_read_lock();
cpuctx = &get_cpu_var(perf_cpu_context);
perf_event_comm_ctx(&cpuctx->ctx, comm_event);
- put_cpu_var(perf_cpu_context);
-
- /*
- * doesn't really matter which of the child contexts the
- * events ends up in.
- */
ctx = rcu_dereference(current->perf_event_ctxp);
if (ctx)
perf_event_comm_ctx(ctx, comm_event);
+ put_cpu_var(perf_cpu_context);
rcu_read_unlock();
}
@@ -3488,6 +3499,12 @@ static void perf_event_mmap_output(struct perf_event *event,
static int perf_event_mmap_match(struct perf_event *event,
struct perf_mmap_event *mmap_event)
{
+ if (event->state < PERF_EVENT_STATE_INACTIVE)
+ return 0;
+
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
+ return 0;
+
if (event->attr.mmap)
return 1;
@@ -3561,15 +3578,10 @@ got_name:
rcu_read_lock();
cpuctx = &get_cpu_var(perf_cpu_context);
perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
- put_cpu_var(perf_cpu_context);
-
- /*
- * doesn't really matter which of the child contexts the
- * events ends up in.
- */
ctx = rcu_dereference(current->perf_event_ctxp);
if (ctx)
perf_event_mmap_ctx(ctx, mmap_event);
+ put_cpu_var(perf_cpu_context);
rcu_read_unlock();
kfree(buf);
@@ -3860,6 +3872,9 @@ static int perf_swevent_match(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
+ return 0;
+
if (!perf_swevent_is_counting(event))
return 0;
@@ -4564,7 +4579,7 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
if (attr->type >= PERF_TYPE_MAX)
return -EINVAL;
- if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
+ if (attr->__reserved_1)
return -EINVAL;
if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
@@ -4717,7 +4732,7 @@ SYSCALL_DEFINE5(perf_event_open,
if (IS_ERR(event))
goto err_put_context;
- err = anon_inode_getfd("[perf_event]", &perf_fops, event, 0);
+ err = anon_inode_getfd("[perf_event]", &perf_fops, event, O_RDWR);
if (err < 0)
goto err_free_put_context;
@@ -5141,7 +5156,7 @@ int perf_event_init_task(struct task_struct *child)
GFP_KERNEL);
if (!child_ctx) {
ret = -ENOMEM;
- goto exit;
+ break;
}
__perf_event_init_context(child_ctx, child);
@@ -5157,7 +5172,7 @@ int perf_event_init_task(struct task_struct *child)
}
}
- if (inherited_all) {
+ if (child_ctx && inherited_all) {
/*
* Mark the child context as a clone of the parent
* context, or of whatever the parent is a clone of.
@@ -5177,7 +5192,6 @@ int perf_event_init_task(struct task_struct *child)
get_ctx(child_ctx->parent_ctx);
}
-exit:
mutex_unlock(&parent_ctx->mutex);
perf_unpin_context(parent_ctx);