aboutsummaryrefslogtreecommitdiff
path: root/tools/perf/util/evsel.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/evsel.c')
-rw-r--r--tools/perf/util/evsel.c174
1 files changed, 89 insertions, 85 deletions
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 0ce9febf1ba..8606175fe1e 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -9,7 +9,7 @@
#include <byteswap.h>
#include <linux/bitops.h>
-#include <lk/debugfs.h>
+#include <api/fs/debugfs.h>
#include <traceevent/event-parse.h>
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
@@ -23,6 +23,7 @@
#include "target.h"
#include "perf_regs.h"
#include "debug.h"
+#include "trace-event.h"
static struct {
bool sample_id_all;
@@ -162,13 +163,15 @@ void perf_evsel__init(struct perf_evsel *evsel,
evsel->idx = idx;
evsel->attr = *attr;
evsel->leader = evsel;
+ evsel->unit = "";
+ evsel->scale = 1.0;
INIT_LIST_HEAD(&evsel->node);
hists__init(&evsel->hists);
evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
perf_evsel__calc_id_pos(evsel);
}
-struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
+struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
{
struct perf_evsel *evsel = zalloc(sizeof(*evsel));
@@ -178,48 +181,7 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
return evsel;
}
-struct event_format *event_format__new(const char *sys, const char *name)
-{
- int fd, n;
- char *filename;
- void *bf = NULL, *nbf;
- size_t size = 0, alloc_size = 0;
- struct event_format *format = NULL;
-
- if (asprintf(&filename, "%s/%s/%s/format", tracing_events_path, sys, name) < 0)
- goto out;
-
- fd = open(filename, O_RDONLY);
- if (fd < 0)
- goto out_free_filename;
-
- do {
- if (size == alloc_size) {
- alloc_size += BUFSIZ;
- nbf = realloc(bf, alloc_size);
- if (nbf == NULL)
- goto out_free_bf;
- bf = nbf;
- }
-
- n = read(fd, bf + size, alloc_size - size);
- if (n < 0)
- goto out_free_bf;
- size += n;
- } while (n > 0);
-
- pevent_parse_format(&format, bf, size, sys);
-
-out_free_bf:
- free(bf);
- close(fd);
-out_free_filename:
- free(filename);
-out:
- return format;
-}
-
-struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx)
+struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
{
struct perf_evsel *evsel = zalloc(sizeof(*evsel));
@@ -233,7 +195,7 @@ struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx)
if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
goto out_free;
- evsel->tp_format = event_format__new(sys, name);
+ evsel->tp_format = trace_event__tp_format(sys, name);
if (evsel->tp_format == NULL)
goto out_free;
@@ -246,7 +208,7 @@ struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx)
return evsel;
out_free:
- free(evsel->name);
+ zfree(&evsel->name);
free(evsel);
return NULL;
}
@@ -538,6 +500,34 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
return ret;
}
+static void
+perf_evsel__config_callgraph(struct perf_evsel *evsel,
+ struct record_opts *opts)
+{
+ bool function = perf_evsel__is_function_event(evsel);
+ struct perf_event_attr *attr = &evsel->attr;
+
+ perf_evsel__set_sample_bit(evsel, CALLCHAIN);
+
+ if (opts->call_graph == CALLCHAIN_DWARF) {
+ if (!function) {
+ perf_evsel__set_sample_bit(evsel, REGS_USER);
+ perf_evsel__set_sample_bit(evsel, STACK_USER);
+ attr->sample_regs_user = PERF_REGS_MASK;
+ attr->sample_stack_user = opts->stack_dump_size;
+ attr->exclude_callchain_user = 1;
+ } else {
+ pr_info("Cannot use DWARF unwind for function trace event,"
+ " falling back to framepointers.\n");
+ }
+ }
+
+ if (function) {
+ pr_info("Disabling user space callchains for function trace event.\n");
+ attr->exclude_callchain_user = 1;
+ }
+}
+
/*
* The enable_on_exec/disabled value strategy:
*
@@ -566,12 +556,12 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
* enable/disable events specifically, as there's no
* initial traced exec call.
*/
-void perf_evsel__config(struct perf_evsel *evsel,
- struct perf_record_opts *opts)
+void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
{
struct perf_evsel *leader = evsel->leader;
struct perf_event_attr *attr = &evsel->attr;
int track = !evsel->idx; /* only the first counter needs these */
+ bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
attr->inherit = !opts->no_inherit;
@@ -599,10 +589,10 @@ void perf_evsel__config(struct perf_evsel *evsel,
}
/*
- * We default some events to a 1 default interval. But keep
+ * We default some events to have a default interval. But keep
* it a weak assumption overridable by the user.
*/
- if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
+ if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
opts->user_interval != ULLONG_MAX)) {
if (opts->freq) {
perf_evsel__set_sample_bit(evsel, PERIOD);
@@ -633,19 +623,10 @@ void perf_evsel__config(struct perf_evsel *evsel,
attr->mmap_data = track;
}
- if (opts->call_graph) {
- perf_evsel__set_sample_bit(evsel, CALLCHAIN);
+ if (opts->call_graph_enabled)
+ perf_evsel__config_callgraph(evsel, opts);
- if (opts->call_graph == CALLCHAIN_DWARF) {
- perf_evsel__set_sample_bit(evsel, REGS_USER);
- perf_evsel__set_sample_bit(evsel, STACK_USER);
- attr->sample_regs_user = PERF_REGS_MASK;
- attr->sample_stack_user = opts->stack_dump_size;
- attr->exclude_callchain_user = 1;
- }
- }
-
- if (perf_target__has_cpu(&opts->target))
+ if (target__has_cpu(&opts->target))
perf_evsel__set_sample_bit(evsel, CPU);
if (opts->period)
@@ -653,7 +634,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
if (!perf_missing_features.sample_id_all &&
(opts->sample_time || !opts->no_inherit ||
- perf_target__has_cpu(&opts->target)))
+ target__has_cpu(&opts->target) || per_cpu))
perf_evsel__set_sample_bit(evsel, TIME);
if (opts->raw_samples) {
@@ -663,9 +644,9 @@ void perf_evsel__config(struct perf_evsel *evsel,
}
if (opts->sample_address)
- attr->sample_type |= PERF_SAMPLE_DATA_SRC;
+ perf_evsel__set_sample_bit(evsel, DATA_SRC);
- if (opts->no_delay) {
+ if (opts->no_buffering) {
attr->watermark = 0;
attr->wakeup_events = 1;
}
@@ -675,12 +656,15 @@ void perf_evsel__config(struct perf_evsel *evsel,
}
if (opts->sample_weight)
- attr->sample_type |= PERF_SAMPLE_WEIGHT;
+ perf_evsel__set_sample_bit(evsel, WEIGHT);
attr->mmap = track;
attr->mmap2 = track && !perf_missing_features.mmap2;
attr->comm = track;
+ if (opts->sample_transaction)
+ perf_evsel__set_sample_bit(evsel, TRANSACTION);
+
/*
* XXX see the function comment above
*
@@ -694,7 +678,8 @@ void perf_evsel__config(struct perf_evsel *evsel,
* Setting enable_on_exec for independent events and
* group leaders for traced executed by perf.
*/
- if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
+ if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) &&
+ !opts->initial_delay)
attr->enable_on_exec = 1;
}
@@ -786,8 +771,7 @@ void perf_evsel__free_id(struct perf_evsel *evsel)
{
xyarray__delete(evsel->sample_id);
evsel->sample_id = NULL;
- free(evsel->id);
- evsel->id = NULL;
+ zfree(&evsel->id);
}
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
@@ -803,7 +787,7 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
void perf_evsel__free_counts(struct perf_evsel *evsel)
{
- free(evsel->counts);
+ zfree(&evsel->counts);
}
void perf_evsel__exit(struct perf_evsel *evsel)
@@ -817,10 +801,10 @@ void perf_evsel__delete(struct perf_evsel *evsel)
{
perf_evsel__exit(evsel);
close_cgroup(evsel->cgrp);
- free(evsel->group_name);
+ zfree(&evsel->group_name);
if (evsel->tp_format)
pevent_free_format(evsel->tp_format);
- free(evsel->name);
+ zfree(&evsel->name);
free(evsel);
}
@@ -983,6 +967,7 @@ static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp)
ret += PRINT_ATTR2(exclude_host, exclude_guest);
ret += PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel,
"excl.callchain_user", exclude_callchain_user);
+ ret += PRINT_ATTR_U32(mmap2);
ret += PRINT_ATTR_U32(wakeup_events);
ret += PRINT_ATTR_U32(wakeup_watermark);
@@ -1039,7 +1024,7 @@ retry_sample_id:
group_fd = get_group_fd(evsel, cpu, thread);
retry_open:
- pr_debug2("perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
+ pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
pid, cpus->map[cpu], group_fd, flags);
FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
@@ -1048,6 +1033,8 @@ retry_open:
group_fd, flags);
if (FD(evsel, cpu, thread) < 0) {
err = -errno;
+ pr_debug2("sys_perf_event_open failed, error %d\n",
+ err);
goto try_fallback;
}
set_rlimit = NO_CHANGE;
@@ -1114,7 +1101,6 @@ void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
perf_evsel__close_fd(evsel, ncpus, nthreads);
perf_evsel__free_fd(evsel);
- evsel->fd = NULL;
}
static struct {
@@ -1214,6 +1200,7 @@ static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
sample->pid = u.val32[0];
sample->tid = u.val32[1];
+ array--;
}
return 0;
@@ -1253,7 +1240,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
memset(data, 0, sizeof(*data));
data->cpu = data->pid = data->tid = -1;
data->stream_id = data->id = data->time = -1ULL;
- data->period = 1;
+ data->period = evsel->attr.sample_period;
data->weight = 0;
if (event->header.type != PERF_RECORD_SAMPLE) {
@@ -1429,10 +1416,11 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
array++;
if (data->user_regs.abi) {
- u64 regs_user = evsel->attr.sample_regs_user;
+ u64 mask = evsel->attr.sample_regs_user;
- sz = hweight_long(regs_user) * sizeof(u64);
+ sz = hweight_long(mask) * sizeof(u64);
OVERFLOW_CHECK(array, sz, max_size);
+ data->user_regs.mask = mask;
data->user_regs.regs = (u64 *)array;
array = (void *)array + sz;
}
@@ -1453,6 +1441,9 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
array = (void *)array + sz;
OVERFLOW_CHECK_u64(array);
data->user_stack.size = *array++;
+ if (WARN_ONCE(data->user_stack.size > sz,
+ "user stack dump failure\n"))
+ return -EFAULT;
}
}
@@ -1470,11 +1461,18 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
array++;
}
+ data->transaction = 0;
+ if (type & PERF_SAMPLE_TRANSACTION) {
+ OVERFLOW_CHECK_u64(array);
+ data->transaction = *array;
+ array++;
+ }
+
return 0;
}
size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
- u64 sample_regs_user, u64 read_format)
+ u64 read_format)
{
size_t sz, result = sizeof(struct sample_event);
@@ -1540,7 +1538,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
if (type & PERF_SAMPLE_REGS_USER) {
if (sample->user_regs.abi) {
result += sizeof(u64);
- sz = hweight_long(sample_regs_user) * sizeof(u64);
+ sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
result += sz;
} else {
result += sizeof(u64);
@@ -1562,11 +1560,14 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
if (type & PERF_SAMPLE_DATA_SRC)
result += sizeof(u64);
+ if (type & PERF_SAMPLE_TRANSACTION)
+ result += sizeof(u64);
+
return result;
}
int perf_event__synthesize_sample(union perf_event *event, u64 type,
- u64 sample_regs_user, u64 read_format,
+ u64 read_format,
const struct perf_sample *sample,
bool swapped)
{
@@ -1707,7 +1708,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
if (type & PERF_SAMPLE_REGS_USER) {
if (sample->user_regs.abi) {
*array++ = sample->user_regs.abi;
- sz = hweight_long(sample_regs_user) * sizeof(u64);
+ sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
memcpy(array, sample->user_regs.regs, sz);
array = (void *)array + sz;
} else {
@@ -1735,6 +1736,11 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
array++;
}
+ if (type & PERF_SAMPLE_TRANSACTION) {
+ *array = sample->transaction;
+ array++;
+ }
+
return 0;
}
@@ -1974,16 +1980,14 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
evsel->attr.type = PERF_TYPE_SOFTWARE;
evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
- free(evsel->name);
- evsel->name = NULL;
+ zfree(&evsel->name);
return true;
}
return false;
}
-int perf_evsel__open_strerror(struct perf_evsel *evsel,
- struct perf_target *target,
+int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
int err, char *msg, size_t size)
{
switch (err) {