aboutsummaryrefslogtreecommitdiff
path: root/tools/perf/util
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util')
-rwxr-xr-xtools/perf/util/PERF-VERSION-GEN45
-rw-r--r--tools/perf/util/alias.c9
-rw-r--r--tools/perf/util/annotate.c1412
-rw-r--r--tools/perf/util/annotate.h177
-rw-r--r--tools/perf/util/bitmap.c10
-rw-r--r--tools/perf/util/build-id.c65
-rw-r--r--tools/perf/util/build-id.h14
-rw-r--r--tools/perf/util/cache.h17
-rw-r--r--tools/perf/util/callchain.c506
-rw-r--r--tools/perf/util/callchain.h128
-rw-r--r--tools/perf/util/cgroup.c177
-rw-r--r--tools/perf/util/cgroup.h17
-rw-r--r--tools/perf/util/color.c33
-rw-r--r--tools/perf/util/color.h3
-rw-r--r--tools/perf/util/comm.c122
-rw-r--r--tools/perf/util/comm.h21
-rw-r--r--tools/perf/util/config.c53
-rw-r--r--tools/perf/util/cpumap.c431
-rw-r--r--tools/perf/util/cpumap.h81
-rw-r--r--tools/perf/util/ctype.c2
-rw-r--r--tools/perf/util/data.c133
-rw-r--r--tools/perf/util/data.h50
-rw-r--r--tools/perf/util/debug.c71
-rw-r--r--tools/perf/util/debug.h30
-rw-r--r--tools/perf/util/debugfs.c240
-rw-r--r--tools/perf/util/debugfs.h25
-rw-r--r--tools/perf/util/dso.c900
-rw-r--r--tools/perf/util/dso.h232
-rw-r--r--tools/perf/util/dwarf-aux.c884
-rw-r--r--tools/perf/util/dwarf-aux.h118
-rw-r--r--tools/perf/util/event.c1136
-rw-r--r--tools/perf/util/event.h225
-rw-r--r--tools/perf/util/evlist.c1245
-rw-r--r--tools/perf/util/evlist.h265
-rw-r--r--tools/perf/util/evsel.c2036
-rw-r--r--tools/perf/util/evsel.h365
-rw-r--r--tools/perf/util/exec_cmd.c19
-rwxr-xr-xtools/perf/util/generate-cmdlist.sh15
-rw-r--r--tools/perf/util/header.c3092
-rw-r--r--tools/perf/util/header.h176
-rw-r--r--tools/perf/util/help.c11
-rw-r--r--tools/perf/util/hist.c1894
-rw-r--r--tools/perf/util/hist.h343
-rw-r--r--tools/perf/util/include/asm/alternative-asm.h8
-rw-r--r--tools/perf/util/include/asm/bug.h22
-rw-r--r--tools/perf/util/include/asm/byteorder.h2
-rw-r--r--tools/perf/util/include/asm/cpufeature.h9
-rw-r--r--tools/perf/util/include/asm/dwarf2.h13
-rw-r--r--tools/perf/util/include/asm/hash.h6
-rw-r--r--tools/perf/util/include/asm/unistd_32.h1
-rw-r--r--tools/perf/util/include/asm/unistd_64.h1
-rw-r--r--tools/perf/util/include/dwarf-regs.h2
-rw-r--r--tools/perf/util/include/linux/bitmap.h14
-rw-r--r--tools/perf/util/include/linux/bitops.h133
-rw-r--r--tools/perf/util/include/linux/compiler.h12
-rw-r--r--tools/perf/util/include/linux/const.h1
-rw-r--r--tools/perf/util/include/linux/hash.h5
-rw-r--r--tools/perf/util/include/linux/kernel.h33
-rw-r--r--tools/perf/util/include/linux/linkage.h13
-rw-r--r--tools/perf/util/include/linux/list.h5
-rw-r--r--tools/perf/util/include/linux/module.h6
-rw-r--r--tools/perf/util/include/linux/prefetch.h6
-rw-r--r--tools/perf/util/include/linux/rbtree.h1
-rw-r--r--tools/perf/util/include/linux/rbtree_augmented.h2
-rw-r--r--tools/perf/util/include/linux/string.h3
-rw-r--r--tools/perf/util/include/linux/types.h21
-rw-r--r--tools/perf/util/intlist.c146
-rw-r--r--tools/perf/util/intlist.h77
-rw-r--r--tools/perf/util/machine.c1422
-rw-r--r--tools/perf/util/machine.h194
-rw-r--r--tools/perf/util/map.c646
-rw-r--r--tools/perf/util/map.h189
-rw-r--r--tools/perf/util/pager.c8
-rw-r--r--tools/perf/util/parse-events.c1475
-rw-r--r--tools/perf/util/parse-events.h99
-rw-r--r--tools/perf/util/parse-events.l218
-rw-r--r--tools/perf/util/parse-events.y454
-rw-r--r--tools/perf/util/parse-options.c286
-rw-r--r--tools/perf/util/parse-options.h23
-rw-r--r--tools/perf/util/path.c10
-rw-r--r--tools/perf/util/perf_regs.c27
-rw-r--r--tools/perf/util/perf_regs.h29
-rw-r--r--tools/perf/util/pmu.c796
-rw-r--r--tools/perf/util/pmu.h49
-rw-r--r--tools/perf/util/pmu.l43
-rw-r--r--tools/perf/util/pmu.y92
-rw-r--r--tools/perf/util/probe-event.c1529
-rw-r--r--tools/perf/util/probe-event.h32
-rw-r--r--tools/perf/util/probe-finder.c1673
-rw-r--r--tools/perf/util/probe-finder.h59
-rw-r--r--tools/perf/util/pstack.c46
-rw-r--r--tools/perf/util/pstack.h10
-rw-r--r--tools/perf/util/python-ext-sources22
-rw-r--r--tools/perf/util/python.c1074
-rw-r--r--tools/perf/util/rblist.c128
-rw-r--r--tools/perf/util/rblist.h48
-rw-r--r--tools/perf/util/record.c215
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c127
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c170
-rw-r--r--tools/perf/util/session.c1804
-rw-r--r--tools/perf/util/session.h174
-rw-r--r--tools/perf/util/setup.py48
-rw-r--r--tools/perf/util/sort.c1693
-rw-r--r--tools/perf/util/sort.h165
-rw-r--r--tools/perf/util/srcline.c299
-rw-r--r--tools/perf/util/stat.c63
-rw-r--r--tools/perf/util/stat.h25
-rw-r--r--tools/perf/util/strbuf.c5
-rw-r--r--tools/perf/util/strfilter.c199
-rw-r--r--tools/perf/util/strfilter.h48
-rw-r--r--tools/perf/util/string.c125
-rw-r--r--tools/perf/util/strlist.c169
-rw-r--r--tools/perf/util/strlist.h47
-rw-r--r--tools/perf/util/svghelper.c250
-rw-r--r--tools/perf/util/svghelper.h19
-rw-r--r--tools/perf/util/symbol-elf.c1625
-rw-r--r--tools/perf/util/symbol-minimal.c330
-rw-r--r--tools/perf/util/symbol.c2763
-rw-r--r--tools/perf/util/symbol.h285
-rw-r--r--tools/perf/util/target.c158
-rw-r--r--tools/perf/util/target.h79
-rw-r--r--tools/perf/util/thread.c253
-rw-r--r--tools/perf/util/thread.h80
-rw-r--r--tools/perf/util/thread_map.c294
-rw-r--r--tools/perf/util/thread_map.h29
-rw-r--r--tools/perf/util/tool.h47
-rw-r--r--tools/perf/util/top.c117
-rw-r--r--tools/perf/util/top.h47
-rw-r--r--tools/perf/util/trace-event-info.c615
-rw-r--r--tools/perf/util/trace-event-parse.c3206
-rw-r--r--tools/perf/util/trace-event-read.c539
-rw-r--r--tools/perf/util/trace-event-scripting.c32
-rw-r--r--tools/perf/util/trace-event.c82
-rw-r--r--tools/perf/util/trace-event.h310
-rw-r--r--tools/perf/util/types.h17
-rw-r--r--tools/perf/util/ui/browser.c337
-rw-r--r--tools/perf/util/ui/browser.h51
-rw-r--r--tools/perf/util/ui/browsers/annotate.c237
-rw-r--r--tools/perf/util/ui/browsers/hists.c1013
-rw-r--r--tools/perf/util/ui/browsers/map.c155
-rw-r--r--tools/perf/util/ui/browsers/map.h6
-rw-r--r--tools/perf/util/ui/helpline.c69
-rw-r--r--tools/perf/util/ui/helpline.h11
-rw-r--r--tools/perf/util/ui/libslang.h27
-rw-r--r--tools/perf/util/ui/progress.c60
-rw-r--r--tools/perf/util/ui/progress.h11
-rw-r--r--tools/perf/util/ui/setup.c42
-rw-r--r--tools/perf/util/ui/util.c113
-rw-r--r--tools/perf/util/ui/util.h10
-rw-r--r--tools/perf/util/unwind-libdw.c210
-rw-r--r--tools/perf/util/unwind-libdw.h21
-rw-r--r--tools/perf/util/unwind-libunwind.c579
-rw-r--r--tools/perf/util/unwind.h37
-rw-r--r--tools/perf/util/usage.c6
-rw-r--r--tools/perf/util/util.c436
-rw-r--r--tools/perf/util/util.h135
-rw-r--r--tools/perf/util/values.c23
-rw-r--r--tools/perf/util/values.h2
-rw-r--r--tools/perf/util/vdso.c111
-rw-r--r--tools/perf/util/vdso.h18
-rw-r--r--tools/perf/util/wrapper.c3
-rw-r--r--tools/perf/util/xyarray.c20
-rw-r--r--tools/perf/util/xyarray.h20
163 files changed, 33874 insertions, 14493 deletions
diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN
index 97d76562a1a..39f17507578 100755
--- a/tools/perf/util/PERF-VERSION-GEN
+++ b/tools/perf/util/PERF-VERSION-GEN
@@ -9,39 +9,42 @@ GVF=${OUTPUT}PERF-VERSION-FILE
LF='
'
+#
# First check if there is a .git to get the version from git describe
-# otherwise try to get the version from the kernel makefile
-if test -d ../../.git -o -f ../../.git &&
- VN=$(git describe --abbrev=4 HEAD 2>/dev/null) &&
- case "$VN" in
- *$LF*) (exit 1) ;;
- v[0-9]*)
- git update-index -q --refresh
- test -z "$(git diff-index --name-only HEAD --)" ||
- VN="$VN-dirty" ;;
- esac
+# otherwise try to get the version from the kernel Makefile
+#
+CID=
+TAG=
+if test -d ../../.git -o -f ../../.git
then
- VN=$(echo "$VN" | sed -e 's/-/./g');
-else
- eval `grep '^VERSION\s*=' ../../Makefile|tr -d ' '`
- eval `grep '^PATCHLEVEL\s*=' ../../Makefile|tr -d ' '`
- eval `grep '^SUBLEVEL\s*=' ../../Makefile|tr -d ' '`
- eval `grep '^EXTRAVERSION\s*=' ../../Makefile|tr -d ' '`
-
- VN="${VERSION}.${PATCHLEVEL}.${SUBLEVEL}${EXTRAVERSION}"
+ TAG=$(git describe --abbrev=0 --match "v[0-9].[0-9]*" 2>/dev/null )
+ CID=$(git log -1 --abbrev=4 --pretty=format:"%h" 2>/dev/null) && CID="-g$CID"
+elif test -f ../../PERF-VERSION-FILE
+then
+ TAG=$(cut -d' ' -f3 ../../PERF-VERSION-FILE | sed -e 's/\"//g')
+fi
+if test -z "$TAG"
+then
+ TAG=$(MAKEFLAGS= make -sC ../.. kernelversion)
+fi
+VN="$TAG$CID"
+if test -n "$CID"
+then
+ # format version string, strip trailing zero of sublevel:
+ VN=$(echo "$VN" | sed -e 's/-/./g;s/\([0-9]*[.][0-9]*\)[.]0/\1/')
fi
VN=$(expr "$VN" : v*'\(.*\)')
if test -r $GVF
then
- VC=$(sed -e 's/^PERF_VERSION = //' <$GVF)
+ VC=$(sed -e 's/^#define PERF_VERSION "\(.*\)"/\1/' <$GVF)
else
VC=unset
fi
test "$VN" = "$VC" || {
- echo >&2 "PERF_VERSION = $VN"
- echo "PERF_VERSION = $VN" >$GVF
+ echo >&2 " PERF_VERSION = $VN"
+ echo "#define PERF_VERSION \"$VN\"" >$GVF
}
diff --git a/tools/perf/util/alias.c b/tools/perf/util/alias.c
index b8144e80bb1..c0b43ee40d9 100644
--- a/tools/perf/util/alias.c
+++ b/tools/perf/util/alias.c
@@ -3,7 +3,8 @@
static const char *alias_key;
static char *alias_val;
-static int alias_lookup_cb(const char *k, const char *v, void *cb __used)
+static int alias_lookup_cb(const char *k, const char *v,
+ void *cb __maybe_unused)
{
if (!prefixcmp(k, "alias.") && !strcmp(k+6, alias_key)) {
if (!v)
@@ -54,8 +55,7 @@ int split_cmdline(char *cmdline, const char ***argv)
src++;
c = cmdline[src];
if (!c) {
- free(*argv);
- *argv = NULL;
+ zfree(argv);
return error("cmdline ends with \\");
}
}
@@ -67,8 +67,7 @@ int split_cmdline(char *cmdline, const char ***argv)
cmdline[dst] = 0;
if (quoted) {
- free(*argv);
- *argv = NULL;
+ zfree(argv);
return error("unclosed quote");
}
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
new file mode 100644
index 00000000000..809b4c50bea
--- /dev/null
+++ b/tools/perf/util/annotate.c
@@ -0,0 +1,1412 @@
+/*
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Parts came from builtin-annotate.c, see those files for further
+ * copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
+#include "util.h"
+#include "ui/ui.h"
+#include "sort.h"
+#include "build-id.h"
+#include "color.h"
+#include "cache.h"
+#include "symbol.h"
+#include "debug.h"
+#include "annotate.h"
+#include "evsel.h"
+#include <pthread.h>
+#include <linux/bitops.h>
+
+const char *disassembler_style;
+const char *objdump_path;
+
+static struct ins *ins__find(const char *name);
+static int disasm_line__parse(char *line, char **namep, char **rawp);
+
+static void ins__delete(struct ins_operands *ops)
+{
+ zfree(&ops->source.raw);
+ zfree(&ops->source.name);
+ zfree(&ops->target.raw);
+ zfree(&ops->target.name);
+}
+
+static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops)
+{
+ return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->raw);
+}
+
+int ins__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops)
+{
+ if (ins->ops->scnprintf)
+ return ins->ops->scnprintf(ins, bf, size, ops);
+
+ return ins__raw_scnprintf(ins, bf, size, ops);
+}
+
+static int call__parse(struct ins_operands *ops)
+{
+ char *endptr, *tok, *name;
+
+ ops->target.addr = strtoull(ops->raw, &endptr, 16);
+
+ name = strchr(endptr, '<');
+ if (name == NULL)
+ goto indirect_call;
+
+ name++;
+
+ tok = strchr(name, '>');
+ if (tok == NULL)
+ return -1;
+
+ *tok = '\0';
+ ops->target.name = strdup(name);
+ *tok = '>';
+
+ return ops->target.name == NULL ? -1 : 0;
+
+indirect_call:
+ tok = strchr(endptr, '(');
+ if (tok != NULL) {
+ ops->target.addr = 0;
+ return 0;
+ }
+
+ tok = strchr(endptr, '*');
+ if (tok == NULL)
+ return -1;
+
+ ops->target.addr = strtoull(tok + 1, NULL, 16);
+ return 0;
+}
+
+static int call__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops)
+{
+ if (ops->target.name)
+ return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name);
+
+ if (ops->target.addr == 0)
+ return ins__raw_scnprintf(ins, bf, size, ops);
+
+ return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr);
+}
+
+static struct ins_ops call_ops = {
+ .parse = call__parse,
+ .scnprintf = call__scnprintf,
+};
+
+bool ins__is_call(const struct ins *ins)
+{
+ return ins->ops == &call_ops;
+}
+
+static int jump__parse(struct ins_operands *ops)
+{
+ const char *s = strchr(ops->raw, '+');
+
+ ops->target.addr = strtoull(ops->raw, NULL, 16);
+
+ if (s++ != NULL)
+ ops->target.offset = strtoull(s, NULL, 16);
+ else
+ ops->target.offset = UINT64_MAX;
+
+ return 0;
+}
+
+static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops)
+{
+ return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset);
+}
+
+static struct ins_ops jump_ops = {
+ .parse = jump__parse,
+ .scnprintf = jump__scnprintf,
+};
+
+bool ins__is_jump(const struct ins *ins)
+{
+ return ins->ops == &jump_ops;
+}
+
+static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
+{
+ char *endptr, *name, *t;
+
+ if (strstr(raw, "(%rip)") == NULL)
+ return 0;
+
+ *addrp = strtoull(comment, &endptr, 16);
+ name = strchr(endptr, '<');
+ if (name == NULL)
+ return -1;
+
+ name++;
+
+ t = strchr(name, '>');
+ if (t == NULL)
+ return 0;
+
+ *t = '\0';
+ *namep = strdup(name);
+ *t = '>';
+
+ return 0;
+}
+
+static int lock__parse(struct ins_operands *ops)
+{
+ char *name;
+
+ ops->locked.ops = zalloc(sizeof(*ops->locked.ops));
+ if (ops->locked.ops == NULL)
+ return 0;
+
+ if (disasm_line__parse(ops->raw, &name, &ops->locked.ops->raw) < 0)
+ goto out_free_ops;
+
+ ops->locked.ins = ins__find(name);
+ if (ops->locked.ins == NULL)
+ goto out_free_ops;
+
+ if (!ops->locked.ins->ops)
+ return 0;
+
+ if (ops->locked.ins->ops->parse)
+ ops->locked.ins->ops->parse(ops->locked.ops);
+
+ return 0;
+
+out_free_ops:
+ zfree(&ops->locked.ops);
+ return 0;
+}
+
+static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops)
+{
+ int printed;
+
+ if (ops->locked.ins == NULL)
+ return ins__raw_scnprintf(ins, bf, size, ops);
+
+ printed = scnprintf(bf, size, "%-6.6s ", ins->name);
+ return printed + ins__scnprintf(ops->locked.ins, bf + printed,
+ size - printed, ops->locked.ops);
+}
+
+static void lock__delete(struct ins_operands *ops)
+{
+ zfree(&ops->locked.ops);
+ zfree(&ops->target.raw);
+ zfree(&ops->target.name);
+}
+
+static struct ins_ops lock_ops = {
+ .free = lock__delete,
+ .parse = lock__parse,
+ .scnprintf = lock__scnprintf,
+};
+
+static int mov__parse(struct ins_operands *ops)
+{
+ char *s = strchr(ops->raw, ','), *target, *comment, prev;
+
+ if (s == NULL)
+ return -1;
+
+ *s = '\0';
+ ops->source.raw = strdup(ops->raw);
+ *s = ',';
+
+ if (ops->source.raw == NULL)
+ return -1;
+
+ target = ++s;
+
+ while (s[0] != '\0' && !isspace(s[0]))
+ ++s;
+ prev = *s;
+ *s = '\0';
+
+ ops->target.raw = strdup(target);
+ *s = prev;
+
+ if (ops->target.raw == NULL)
+ goto out_free_source;
+
+ comment = strchr(s, '#');
+ if (comment == NULL)
+ return 0;
+
+ while (comment[0] != '\0' && isspace(comment[0]))
+ ++comment;
+
+ comment__symbol(ops->source.raw, comment, &ops->source.addr, &ops->source.name);
+ comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
+
+ return 0;
+
+out_free_source:
+ zfree(&ops->source.raw);
+ return -1;
+}
+
+static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops)
+{
+ return scnprintf(bf, size, "%-6.6s %s,%s", ins->name,
+ ops->source.name ?: ops->source.raw,
+ ops->target.name ?: ops->target.raw);
+}
+
+static struct ins_ops mov_ops = {
+ .parse = mov__parse,
+ .scnprintf = mov__scnprintf,
+};
+
+static int dec__parse(struct ins_operands *ops)
+{
+ char *target, *comment, *s, prev;
+
+ target = s = ops->raw;
+
+ while (s[0] != '\0' && !isspace(s[0]))
+ ++s;
+ prev = *s;
+ *s = '\0';
+
+ ops->target.raw = strdup(target);
+ *s = prev;
+
+ if (ops->target.raw == NULL)
+ return -1;
+
+ comment = strchr(s, '#');
+ if (comment == NULL)
+ return 0;
+
+ while (comment[0] != '\0' && isspace(comment[0]))
+ ++comment;
+
+ comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
+
+ return 0;
+}
+
+static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops)
+{
+ return scnprintf(bf, size, "%-6.6s %s", ins->name,
+ ops->target.name ?: ops->target.raw);
+}
+
+static struct ins_ops dec_ops = {
+ .parse = dec__parse,
+ .scnprintf = dec__scnprintf,
+};
+
+static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
+ struct ins_operands *ops __maybe_unused)
+{
+ return scnprintf(bf, size, "%-6.6s", "nop");
+}
+
+static struct ins_ops nop_ops = {
+ .scnprintf = nop__scnprintf,
+};
+
+/*
+ * Must be sorted by name!
+ */
+static struct ins instructions[] = {
+ { .name = "add", .ops = &mov_ops, },
+ { .name = "addl", .ops = &mov_ops, },
+ { .name = "addq", .ops = &mov_ops, },
+ { .name = "addw", .ops = &mov_ops, },
+ { .name = "and", .ops = &mov_ops, },
+ { .name = "bts", .ops = &mov_ops, },
+ { .name = "call", .ops = &call_ops, },
+ { .name = "callq", .ops = &call_ops, },
+ { .name = "cmp", .ops = &mov_ops, },
+ { .name = "cmpb", .ops = &mov_ops, },
+ { .name = "cmpl", .ops = &mov_ops, },
+ { .name = "cmpq", .ops = &mov_ops, },
+ { .name = "cmpw", .ops = &mov_ops, },
+ { .name = "cmpxch", .ops = &mov_ops, },
+ { .name = "dec", .ops = &dec_ops, },
+ { .name = "decl", .ops = &dec_ops, },
+ { .name = "imul", .ops = &mov_ops, },
+ { .name = "inc", .ops = &dec_ops, },
+ { .name = "incl", .ops = &dec_ops, },
+ { .name = "ja", .ops = &jump_ops, },
+ { .name = "jae", .ops = &jump_ops, },
+ { .name = "jb", .ops = &jump_ops, },
+ { .name = "jbe", .ops = &jump_ops, },
+ { .name = "jc", .ops = &jump_ops, },
+ { .name = "jcxz", .ops = &jump_ops, },
+ { .name = "je", .ops = &jump_ops, },
+ { .name = "jecxz", .ops = &jump_ops, },
+ { .name = "jg", .ops = &jump_ops, },
+ { .name = "jge", .ops = &jump_ops, },
+ { .name = "jl", .ops = &jump_ops, },
+ { .name = "jle", .ops = &jump_ops, },
+ { .name = "jmp", .ops = &jump_ops, },
+ { .name = "jmpq", .ops = &jump_ops, },
+ { .name = "jna", .ops = &jump_ops, },
+ { .name = "jnae", .ops = &jump_ops, },
+ { .name = "jnb", .ops = &jump_ops, },
+ { .name = "jnbe", .ops = &jump_ops, },
+ { .name = "jnc", .ops = &jump_ops, },
+ { .name = "jne", .ops = &jump_ops, },
+ { .name = "jng", .ops = &jump_ops, },
+ { .name = "jnge", .ops = &jump_ops, },
+ { .name = "jnl", .ops = &jump_ops, },
+ { .name = "jnle", .ops = &jump_ops, },
+ { .name = "jno", .ops = &jump_ops, },
+ { .name = "jnp", .ops = &jump_ops, },
+ { .name = "jns", .ops = &jump_ops, },
+ { .name = "jnz", .ops = &jump_ops, },
+ { .name = "jo", .ops = &jump_ops, },
+ { .name = "jp", .ops = &jump_ops, },
+ { .name = "jpe", .ops = &jump_ops, },
+ { .name = "jpo", .ops = &jump_ops, },
+ { .name = "jrcxz", .ops = &jump_ops, },
+ { .name = "js", .ops = &jump_ops, },
+ { .name = "jz", .ops = &jump_ops, },
+ { .name = "lea", .ops = &mov_ops, },
+ { .name = "lock", .ops = &lock_ops, },
+ { .name = "mov", .ops = &mov_ops, },
+ { .name = "movb", .ops = &mov_ops, },
+ { .name = "movdqa",.ops = &mov_ops, },
+ { .name = "movl", .ops = &mov_ops, },
+ { .name = "movq", .ops = &mov_ops, },
+ { .name = "movslq", .ops = &mov_ops, },
+ { .name = "movzbl", .ops = &mov_ops, },
+ { .name = "movzwl", .ops = &mov_ops, },
+ { .name = "nop", .ops = &nop_ops, },
+ { .name = "nopl", .ops = &nop_ops, },
+ { .name = "nopw", .ops = &nop_ops, },
+ { .name = "or", .ops = &mov_ops, },
+ { .name = "orl", .ops = &mov_ops, },
+ { .name = "test", .ops = &mov_ops, },
+ { .name = "testb", .ops = &mov_ops, },
+ { .name = "testl", .ops = &mov_ops, },
+ { .name = "xadd", .ops = &mov_ops, },
+ { .name = "xbeginl", .ops = &jump_ops, },
+ { .name = "xbeginq", .ops = &jump_ops, },
+};
+
+static int ins__cmp(const void *name, const void *insp)
+{
+ const struct ins *ins = insp;
+
+ return strcmp(name, ins->name);
+}
+
+static struct ins *ins__find(const char *name)
+{
+ const int nmemb = ARRAY_SIZE(instructions);
+
+ return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__cmp);
+}
+
+int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ pthread_mutex_init(&notes->lock, NULL);
+ return 0;
+}
+
+int symbol__alloc_hist(struct symbol *sym)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ const size_t size = symbol__size(sym);
+ size_t sizeof_sym_hist;
+
+ /* Check for overflow when calculating sizeof_sym_hist */
+ if (size > (SIZE_MAX - sizeof(struct sym_hist)) / sizeof(u64))
+ return -1;
+
+ sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64));
+
+ /* Check for overflow in zalloc argument */
+ if (sizeof_sym_hist > (SIZE_MAX - sizeof(*notes->src))
+ / symbol_conf.nr_events)
+ return -1;
+
+ notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist);
+ if (notes->src == NULL)
+ return -1;
+ notes->src->sizeof_sym_hist = sizeof_sym_hist;
+ notes->src->nr_histograms = symbol_conf.nr_events;
+ INIT_LIST_HEAD(&notes->src->source);
+ return 0;
+}
+
+void symbol__annotate_zero_histograms(struct symbol *sym)
+{
+ struct annotation *notes = symbol__annotation(sym);
+
+ pthread_mutex_lock(&notes->lock);
+ if (notes->src != NULL)
+ memset(notes->src->histograms, 0,
+ notes->src->nr_histograms * notes->src->sizeof_sym_hist);
+ pthread_mutex_unlock(&notes->lock);
+}
+
+static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
+ struct annotation *notes, int evidx, u64 addr)
+{
+ unsigned offset;
+ struct sym_hist *h;
+
+ pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
+
+ if (addr < sym->start || addr > sym->end)
+ return -ERANGE;
+
+ offset = addr - sym->start;
+ h = annotation__histogram(notes, evidx);
+ h->sum++;
+ h->addr[offset]++;
+
+ pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
+ ", evidx=%d] => %" PRIu64 "\n", sym->start, sym->name,
+ addr, addr - sym->start, evidx, h->addr[offset]);
+ return 0;
+}
+
+static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
+ int evidx, u64 addr)
+{
+ struct annotation *notes;
+
+ if (sym == NULL)
+ return 0;
+
+ notes = symbol__annotation(sym);
+ if (notes->src == NULL) {
+ if (symbol__alloc_hist(sym) < 0)
+ return -ENOMEM;
+ }
+
+ return __symbol__inc_addr_samples(sym, map, notes, evidx, addr);
+}
+
+int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx)
+{
+ return symbol__inc_addr_samples(ams->sym, ams->map, evidx, ams->al_addr);
+}
+
+int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
+{
+ return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
+}
+
+static void disasm_line__init_ins(struct disasm_line *dl)
+{
+ dl->ins = ins__find(dl->name);
+
+ if (dl->ins == NULL)
+ return;
+
+ if (!dl->ins->ops)
+ return;
+
+ if (dl->ins->ops->parse)
+ dl->ins->ops->parse(&dl->ops);
+}
+
+static int disasm_line__parse(char *line, char **namep, char **rawp)
+{
+ char *name = line, tmp;
+
+ while (isspace(name[0]))
+ ++name;
+
+ if (name[0] == '\0')
+ return -1;
+
+ *rawp = name + 1;
+
+ while ((*rawp)[0] != '\0' && !isspace((*rawp)[0]))
+ ++*rawp;
+
+ tmp = (*rawp)[0];
+ (*rawp)[0] = '\0';
+ *namep = strdup(name);
+
+ if (*namep == NULL)
+ goto out_free_name;
+
+ (*rawp)[0] = tmp;
+
+ if ((*rawp)[0] != '\0') {
+ (*rawp)++;
+ while (isspace((*rawp)[0]))
+ ++(*rawp);
+ }
+
+ return 0;
+
+out_free_name:
+ zfree(namep);
+ return -1;
+}
+
+static struct disasm_line *disasm_line__new(s64 offset, char *line, size_t privsize)
+{
+ struct disasm_line *dl = zalloc(sizeof(*dl) + privsize);
+
+ if (dl != NULL) {
+ dl->offset = offset;
+ dl->line = strdup(line);
+ if (dl->line == NULL)
+ goto out_delete;
+
+ if (offset != -1) {
+ if (disasm_line__parse(dl->line, &dl->name, &dl->ops.raw) < 0)
+ goto out_free_line;
+
+ disasm_line__init_ins(dl);
+ }
+ }
+
+ return dl;
+
+out_free_line:
+ zfree(&dl->line);
+out_delete:
+ free(dl);
+ return NULL;
+}
+
+void disasm_line__free(struct disasm_line *dl)
+{
+ zfree(&dl->line);
+ zfree(&dl->name);
+ if (dl->ins && dl->ins->ops->free)
+ dl->ins->ops->free(&dl->ops);
+ else
+ ins__delete(&dl->ops);
+ free(dl);
+}
+
+int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
+{
+ if (raw || !dl->ins)
+ return scnprintf(bf, size, "%-6.6s %s", dl->name, dl->ops.raw);
+
+ return ins__scnprintf(dl->ins, bf, size, &dl->ops);
+}
+
+static void disasm__add(struct list_head *head, struct disasm_line *line)
+{
+ list_add_tail(&line->node, head);
+}
+
+struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos)
+{
+ list_for_each_entry_continue(pos, head, node)
+ if (pos->offset >= 0)
+ return pos;
+
+ return NULL;
+}
+
+double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
+ s64 end, const char **path)
+{
+ struct source_line *src_line = notes->src->lines;
+ double percent = 0.0;
+
+ if (src_line) {
+ size_t sizeof_src_line = sizeof(*src_line) +
+ sizeof(src_line->p) * (src_line->nr_pcnt - 1);
+
+ while (offset < end) {
+ src_line = (void *)notes->src->lines +
+ (sizeof_src_line * offset);
+
+ if (*path == NULL)
+ *path = src_line->path;
+
+ percent += src_line->p[evidx].percent;
+ offset++;
+ }
+ } else {
+ struct sym_hist *h = annotation__histogram(notes, evidx);
+ unsigned int hits = 0;
+
+ while (offset < end)
+ hits += h->addr[offset++];
+
+ if (h->sum)
+ percent = 100.0 * hits / h->sum;
+ }
+
+ return percent;
+}
+
+static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 start,
+ struct perf_evsel *evsel, u64 len, int min_pcnt, int printed,
+ int max_lines, struct disasm_line *queue)
+{
+ static const char *prev_line;
+ static const char *prev_color;
+
+ if (dl->offset != -1) {
+ const char *path = NULL;
+ double percent, max_percent = 0.0;
+ double *ppercents = &percent;
+ int i, nr_percent = 1;
+ const char *color;
+ struct annotation *notes = symbol__annotation(sym);
+ s64 offset = dl->offset;
+ const u64 addr = start + offset;
+ struct disasm_line *next;
+
+ next = disasm__get_next_ip_line(&notes->src->source, dl);
+
+ if (perf_evsel__is_group_event(evsel)) {
+ nr_percent = evsel->nr_members;
+ ppercents = calloc(nr_percent, sizeof(double));
+ if (ppercents == NULL)
+ return -1;
+ }
+
+ for (i = 0; i < nr_percent; i++) {
+ percent = disasm__calc_percent(notes,
+ notes->src->lines ? i : evsel->idx + i,
+ offset,
+ next ? next->offset : (s64) len,
+ &path);
+
+ ppercents[i] = percent;
+ if (percent > max_percent)
+ max_percent = percent;
+ }
+
+ if (max_percent < min_pcnt)
+ return -1;
+
+ if (max_lines && printed >= max_lines)
+ return 1;
+
+ if (queue != NULL) {
+ list_for_each_entry_from(queue, &notes->src->source, node) {
+ if (queue == dl)
+ break;
+ disasm_line__print(queue, sym, start, evsel, len,
+ 0, 0, 1, NULL);
+ }
+ }
+
+ color = get_percent_color(max_percent);
+
+ /*
+ * Also color the filename and line if needed, with
+ * the same color than the percentage. Don't print it
+ * twice for close colored addr with the same filename:line
+ */
+ if (path) {
+ if (!prev_line || strcmp(prev_line, path)
+ || color != prev_color) {
+ color_fprintf(stdout, color, " %s", path);
+ prev_line = path;
+ prev_color = color;
+ }
+ }
+
+ for (i = 0; i < nr_percent; i++) {
+ percent = ppercents[i];
+ color = get_percent_color(percent);
+ color_fprintf(stdout, color, " %7.2f", percent);
+ }
+
+ printf(" : ");
+ color_fprintf(stdout, PERF_COLOR_MAGENTA, " %" PRIx64 ":", addr);
+ color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", dl->line);
+
+ if (ppercents != &percent)
+ free(ppercents);
+
+ } else if (max_lines && printed >= max_lines)
+ return 1;
+ else {
+ int width = 8;
+
+ if (queue)
+ return -1;
+
+ if (perf_evsel__is_group_event(evsel))
+ width *= evsel->nr_members;
+
+ if (!*dl->line)
+ printf(" %*s:\n", width, " ");
+ else
+ printf(" %*s: %s\n", width, " ", dl->line);
+ }
+
+ return 0;
+}
+
+/*
+ * symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw)
+ * which looks like following
+ *
+ * 0000000000415500 <_init>:
+ * 415500: sub $0x8,%rsp
+ * 415504: mov 0x2f5ad5(%rip),%rax # 70afe0 <_DYNAMIC+0x2f8>
+ * 41550b: test %rax,%rax
+ * 41550e: je 415515 <_init+0x15>
+ * 415510: callq 416e70 <__gmon_start__@plt>
+ * 415515: add $0x8,%rsp
+ * 415519: retq
+ *
+ * it will be parsed and saved into struct disasm_line as
+ * <offset> <name> <ops.raw>
+ *
+ * The offset will be a relative offset from the start of the symbol and -1
+ * means that it's not a disassembly line so should be treated differently.
+ * The ops.raw part will be parsed further according to type of the instruction.
+ */
+static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
+ FILE *file, size_t privsize)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct disasm_line *dl;
+ char *line = NULL, *parsed_line, *tmp, *tmp2, *c;
+ size_t line_len;
+ s64 line_ip, offset = -1;
+
+ if (getline(&line, &line_len, file) < 0)
+ return -1;
+
+ if (!line)
+ return -1;
+
+ while (line_len != 0 && isspace(line[line_len - 1]))
+ line[--line_len] = '\0';
+
+ c = strchr(line, '\n');
+ if (c)
+ *c = 0;
+
+ line_ip = -1;
+ parsed_line = line;
+
+ /*
+ * Strip leading spaces:
+ */
+ tmp = line;
+ while (*tmp) {
+ if (*tmp != ' ')
+ break;
+ tmp++;
+ }
+
+ if (*tmp) {
+ /*
+ * Parse hexa addresses followed by ':'
+ */
+ line_ip = strtoull(tmp, &tmp2, 16);
+ if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
+ line_ip = -1;
+ }
+
+ if (line_ip != -1) {
+ u64 start = map__rip_2objdump(map, sym->start),
+ end = map__rip_2objdump(map, sym->end);
+
+ offset = line_ip - start;
+ if ((u64)line_ip < start || (u64)line_ip > end)
+ offset = -1;
+ else
+ parsed_line = tmp2 + 1;
+ }
+
+ dl = disasm_line__new(offset, parsed_line, privsize);
+ free(line);
+
+ if (dl == NULL)
+ return -1;
+
+ if (dl->ops.target.offset == UINT64_MAX)
+ dl->ops.target.offset = dl->ops.target.addr -
+ map__rip_2objdump(map, sym->start);
+
+ /* kcore has no symbols, so add the call target name */
+ if (dl->ins && ins__is_call(dl->ins) && !dl->ops.target.name) {
+ struct addr_map_symbol target = {
+ .map = map,
+ .addr = dl->ops.target.addr,
+ };
+
+ if (!map_groups__find_ams(&target, NULL) &&
+ target.sym->start == target.al_addr)
+ dl->ops.target.name = strdup(target.sym->name);
+ }
+
+ disasm__add(&notes->src->source, dl);
+
+ return 0;
+}
+
+static void delete_last_nop(struct symbol *sym)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct list_head *list = &notes->src->source;
+ struct disasm_line *dl;
+
+ while (!list_empty(list)) {
+ dl = list_entry(list->prev, struct disasm_line, node);
+
+ if (dl->ins && dl->ins->ops) {
+ if (dl->ins->ops != &nop_ops)
+ return;
+ } else {
+ if (!strstr(dl->line, " nop ") &&
+ !strstr(dl->line, " nopl ") &&
+ !strstr(dl->line, " nopw "))
+ return;
+ }
+
+ list_del(&dl->node);
+ disasm_line__free(dl);
+ }
+}
+
+int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
+{
+ struct dso *dso = map->dso;
+ char *filename = dso__build_id_filename(dso, NULL, 0);
+ bool free_filename = true;
+ char command[PATH_MAX * 2];
+ FILE *file;
+ int err = 0;
+ char symfs_filename[PATH_MAX];
+ struct kcore_extract kce;
+ bool delete_extract = false;
+
+ if (filename) {
+ snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
+ symbol_conf.symfs, filename);
+ }
+
+ if (filename == NULL) {
+ if (dso->has_build_id) {
+ pr_err("Can't annotate %s: not enough memory\n",
+ sym->name);
+ return -ENOMEM;
+ }
+ goto fallback;
+ } else if (readlink(symfs_filename, command, sizeof(command)) < 0 ||
+ strstr(command, "[kernel.kallsyms]") ||
+ access(symfs_filename, R_OK)) {
+ free(filename);
+fallback:
+ /*
+ * If we don't have build-ids or the build-id file isn't in the
+ * cache, or is just a kallsyms file, well, lets hope that this
+ * DSO is the same as when 'perf record' ran.
+ */
+ filename = (char *)dso->long_name;
+ snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
+ symbol_conf.symfs, filename);
+ free_filename = false;
+ }
+
+ if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
+ !dso__is_kcore(dso)) {
+ char bf[BUILD_ID_SIZE * 2 + 16] = " with build id ";
+ char *build_id_msg = NULL;
+
+ if (dso->annotate_warned)
+ goto out_free_filename;
+
+ if (dso->has_build_id) {
+ build_id__sprintf(dso->build_id,
+ sizeof(dso->build_id), bf + 15);
+ build_id_msg = bf;
+ }
+ err = -ENOENT;
+ dso->annotate_warned = 1;
+ pr_err("Can't annotate %s:\n\n"
+ "No vmlinux file%s\nwas found in the path.\n\n"
+ "Please use:\n\n"
+ " perf buildid-cache -vu vmlinux\n\n"
+ "or:\n\n"
+ " --vmlinux vmlinux\n",
+ sym->name, build_id_msg ?: "");
+ goto out_free_filename;
+ }
+
+ pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
+ filename, sym->name, map->unmap_ip(map, sym->start),
+ map->unmap_ip(map, sym->end));
+
+ pr_debug("annotating [%p] %30s : [%p] %30s\n",
+ dso, dso->long_name, sym, sym->name);
+
+ if (dso__is_kcore(dso)) {
+ kce.kcore_filename = symfs_filename;
+ kce.addr = map__rip_2objdump(map, sym->start);
+ kce.offs = sym->start;
+ kce.len = sym->end + 1 - sym->start;
+ if (!kcore_extract__create(&kce)) {
+ delete_extract = true;
+ strlcpy(symfs_filename, kce.extract_filename,
+ sizeof(symfs_filename));
+ if (free_filename) {
+ free(filename);
+ free_filename = false;
+ }
+ filename = symfs_filename;
+ }
+ }
+
+ snprintf(command, sizeof(command),
+ "%s %s%s --start-address=0x%016" PRIx64
+ " --stop-address=0x%016" PRIx64
+ " -d %s %s -C %s 2>/dev/null|grep -v %s|expand",
+ objdump_path ? objdump_path : "objdump",
+ disassembler_style ? "-M " : "",
+ disassembler_style ? disassembler_style : "",
+ map__rip_2objdump(map, sym->start),
+ map__rip_2objdump(map, sym->end+1),
+ symbol_conf.annotate_asm_raw ? "" : "--no-show-raw",
+ symbol_conf.annotate_src ? "-S" : "",
+ symfs_filename, filename);
+
+ pr_debug("Executing: %s\n", command);
+
+ file = popen(command, "r");
+ if (!file)
+ goto out_free_filename;
+
+ while (!feof(file))
+ if (symbol__parse_objdump_line(sym, map, file, privsize) < 0)
+ break;
+
+ /*
+ * kallsyms does not have symbol sizes so there may a nop at the end.
+ * Remove it.
+ */
+ if (dso__is_kcore(dso))
+ delete_last_nop(sym);
+
+ pclose(file);
+out_free_filename:
+ if (delete_extract)
+ kcore_extract__delete(&kce);
+ if (free_filename)
+ free(filename);
+ return err;
+}
+
+static void insert_source_line(struct rb_root *root, struct source_line *src_line)
+{
+ struct source_line *iter;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ int i, ret;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct source_line, node);
+
+ ret = strcmp(iter->path, src_line->path);
+ if (ret == 0) {
+ for (i = 0; i < src_line->nr_pcnt; i++)
+ iter->p[i].percent_sum += src_line->p[i].percent;
+ return;
+ }
+
+ if (ret < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ for (i = 0; i < src_line->nr_pcnt; i++)
+ src_line->p[i].percent_sum = src_line->p[i].percent;
+
+ rb_link_node(&src_line->node, parent, p);
+ rb_insert_color(&src_line->node, root);
+}
+
+static int cmp_source_line(struct source_line *a, struct source_line *b)
+{
+ int i;
+
+ for (i = 0; i < a->nr_pcnt; i++) {
+ if (a->p[i].percent_sum == b->p[i].percent_sum)
+ continue;
+ return a->p[i].percent_sum > b->p[i].percent_sum;
+ }
+
+ return 0;
+}
+
+static void __resort_source_line(struct rb_root *root, struct source_line *src_line)
+{
+ struct source_line *iter;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct source_line, node);
+
+ if (cmp_source_line(src_line, iter))
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&src_line->node, parent, p);
+ rb_insert_color(&src_line->node, root);
+}
+
+static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
+{
+ struct source_line *src_line;
+ struct rb_node *node;
+
+ node = rb_first(src_root);
+ while (node) {
+ struct rb_node *next;
+
+ src_line = rb_entry(node, struct source_line, node);
+ next = rb_next(node);
+ rb_erase(node, src_root);
+
+ __resort_source_line(dest_root, src_line);
+ node = next;
+ }
+}
+
+static void symbol__free_source_line(struct symbol *sym, int len)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct source_line *src_line = notes->src->lines;
+ size_t sizeof_src_line;
+ int i;
+
+ sizeof_src_line = sizeof(*src_line) +
+ (sizeof(src_line->p) * (src_line->nr_pcnt - 1));
+
+ for (i = 0; i < len; i++) {
+ free_srcline(src_line->path);
+ src_line = (void *)src_line + sizeof_src_line;
+ }
+
+ zfree(&notes->src->lines);
+}
+
+/* Get the filename:line for the colored entries */
+static int symbol__get_source_line(struct symbol *sym, struct map *map,
+ struct perf_evsel *evsel,
+ struct rb_root *root, int len)
+{
+ u64 start;
+ int i, k;
+ int evidx = evsel->idx;
+ struct source_line *src_line;
+ struct annotation *notes = symbol__annotation(sym);
+ struct sym_hist *h = annotation__histogram(notes, evidx);
+ struct rb_root tmp_root = RB_ROOT;
+ int nr_pcnt = 1;
+ u64 h_sum = h->sum;
+ size_t sizeof_src_line = sizeof(struct source_line);
+
+ if (perf_evsel__is_group_event(evsel)) {
+ for (i = 1; i < evsel->nr_members; i++) {
+ h = annotation__histogram(notes, evidx + i);
+ h_sum += h->sum;
+ }
+ nr_pcnt = evsel->nr_members;
+ sizeof_src_line += (nr_pcnt - 1) * sizeof(src_line->p);
+ }
+
+ if (!h_sum)
+ return 0;
+
+ src_line = notes->src->lines = calloc(len, sizeof_src_line);
+ if (!notes->src->lines)
+ return -1;
+
+ start = map__rip_2objdump(map, sym->start);
+
+ for (i = 0; i < len; i++) {
+ u64 offset;
+ double percent_max = 0.0;
+
+ src_line->nr_pcnt = nr_pcnt;
+
+ for (k = 0; k < nr_pcnt; k++) {
+ h = annotation__histogram(notes, evidx + k);
+ src_line->p[k].percent = 100.0 * h->addr[i] / h->sum;
+
+ if (src_line->p[k].percent > percent_max)
+ percent_max = src_line->p[k].percent;
+ }
+
+ if (percent_max <= 0.5)
+ goto next;
+
+ offset = start + i;
+ src_line->path = get_srcline(map->dso, offset);
+ insert_source_line(&tmp_root, src_line);
+
+ next:
+ src_line = (void *)src_line + sizeof_src_line;
+ }
+
+ resort_source_line(root, &tmp_root);
+ return 0;
+}
+
+static void print_summary(struct rb_root *root, const char *filename)
+{
+ struct source_line *src_line;
+ struct rb_node *node;
+
+ printf("\nSorted summary for file %s\n", filename);
+ printf("----------------------------------------------\n\n");
+
+ if (RB_EMPTY_ROOT(root)) {
+ printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
+ return;
+ }
+
+ node = rb_first(root);
+ while (node) {
+ double percent, percent_max = 0.0;
+ const char *color;
+ char *path;
+ int i;
+
+ src_line = rb_entry(node, struct source_line, node);
+ for (i = 0; i < src_line->nr_pcnt; i++) {
+ percent = src_line->p[i].percent_sum;
+ color = get_percent_color(percent);
+ color_fprintf(stdout, color, " %7.2f", percent);
+
+ if (percent > percent_max)
+ percent_max = percent;
+ }
+
+ path = src_line->path;
+ color = get_percent_color(percent_max);
+ color_fprintf(stdout, color, " %s\n", path);
+
+ node = rb_next(node);
+ }
+}
+
+static void symbol__annotate_hits(struct symbol *sym, struct perf_evsel *evsel)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct sym_hist *h = annotation__histogram(notes, evsel->idx);
+ u64 len = symbol__size(sym), offset;
+
+ for (offset = 0; offset < len; ++offset)
+ if (h->addr[offset] != 0)
+ printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
+ sym->start + offset, h->addr[offset]);
+ printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum);
+}
+
+int symbol__annotate_printf(struct symbol *sym, struct map *map,
+ struct perf_evsel *evsel, bool full_paths,
+ int min_pcnt, int max_lines, int context)
+{
+ struct dso *dso = map->dso;
+ char *filename;
+ const char *d_filename;
+ const char *evsel_name = perf_evsel__name(evsel);
+ struct annotation *notes = symbol__annotation(sym);
+ struct disasm_line *pos, *queue = NULL;
+ u64 start = map__rip_2objdump(map, sym->start);
+ int printed = 2, queue_len = 0;
+ int more = 0;
+ u64 len;
+ int width = 8;
+ int namelen, evsel_name_len, graph_dotted_len;
+
+ filename = strdup(dso->long_name);
+ if (!filename)
+ return -ENOMEM;
+
+ if (full_paths)
+ d_filename = filename;
+ else
+ d_filename = basename(filename);
+
+ len = symbol__size(sym);
+ namelen = strlen(d_filename);
+ evsel_name_len = strlen(evsel_name);
+
+ if (perf_evsel__is_group_event(evsel))
+ width *= evsel->nr_members;
+
+ printf(" %-*.*s| Source code & Disassembly of %s for %s\n",
+ width, width, "Percent", d_filename, evsel_name);
+
+ graph_dotted_len = width + namelen + evsel_name_len;
+ printf("-%-*.*s-----------------------------------------\n",
+ graph_dotted_len, graph_dotted_len, graph_dotted_line);
+
+ if (verbose)
+ symbol__annotate_hits(sym, evsel);
+
+ list_for_each_entry(pos, &notes->src->source, node) {
+ if (context && queue == NULL) {
+ queue = pos;
+ queue_len = 0;
+ }
+
+ switch (disasm_line__print(pos, sym, start, evsel, len,
+ min_pcnt, printed, max_lines,
+ queue)) {
+ case 0:
+ ++printed;
+ if (context) {
+ printed += queue_len;
+ queue = NULL;
+ queue_len = 0;
+ }
+ break;
+ case 1:
+ /* filtered by max_lines */
+ ++more;
+ break;
+ case -1:
+ default:
+ /*
+ * Filtered by min_pcnt or non IP lines when
+ * context != 0
+ */
+ if (!context)
+ break;
+ if (queue_len == context)
+ queue = list_entry(queue->node.next, typeof(*queue), node);
+ else
+ ++queue_len;
+ break;
+ }
+ }
+
+ free(filename);
+
+ return more;
+}
+
+void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct sym_hist *h = annotation__histogram(notes, evidx);
+
+ memset(h, 0, notes->src->sizeof_sym_hist);
+}
+
+void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct sym_hist *h = annotation__histogram(notes, evidx);
+ int len = symbol__size(sym), offset;
+
+ h->sum = 0;
+ for (offset = 0; offset < len; ++offset) {
+ h->addr[offset] = h->addr[offset] * 7 / 8;
+ h->sum += h->addr[offset];
+ }
+}
+
+void disasm__purge(struct list_head *head)
+{
+ struct disasm_line *pos, *n;
+
+ list_for_each_entry_safe(pos, n, head, node) {
+ list_del(&pos->node);
+ disasm_line__free(pos);
+ }
+}
+
+static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
+{
+ size_t printed;
+
+ if (dl->offset == -1)
+ return fprintf(fp, "%s\n", dl->line);
+
+ printed = fprintf(fp, "%#" PRIx64 " %s", dl->offset, dl->name);
+
+ if (dl->ops.raw[0] != '\0') {
+ printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
+ dl->ops.raw);
+ }
+
+ return printed + fprintf(fp, "\n");
+}
+
+size_t disasm__fprintf(struct list_head *head, FILE *fp)
+{
+ struct disasm_line *pos;
+ size_t printed = 0;
+
+ list_for_each_entry(pos, head, node)
+ printed += disasm_line__fprintf(pos, fp);
+
+ return printed;
+}
+
+int symbol__tty_annotate(struct symbol *sym, struct map *map,
+ struct perf_evsel *evsel, bool print_lines,
+ bool full_paths, int min_pcnt, int max_lines)
+{
+ struct dso *dso = map->dso;
+ struct rb_root source_line = RB_ROOT;
+ u64 len;
+
+ if (symbol__annotate(sym, map, 0) < 0)
+ return -1;
+
+ len = symbol__size(sym);
+
+ if (print_lines) {
+ symbol__get_source_line(sym, map, evsel, &source_line, len);
+ print_summary(&source_line, dso->long_name);
+ }
+
+ symbol__annotate_printf(sym, map, evsel, full_paths,
+ min_pcnt, max_lines, 0);
+ if (print_lines)
+ symbol__free_source_line(sym, len);
+
+ disasm__purge(&symbol__annotation(sym)->src->source);
+
+ return 0;
+}
+
+int hist_entry__annotate(struct hist_entry *he, size_t privsize)
+{
+ return symbol__annotate(he->ms.sym, he->ms.map, privsize);
+}
+
+bool ui__has_annotation(void)
+{
+ return use_browser == 1 && sort__has_sym;
+}
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
new file mode 100644
index 00000000000..112d6e26815
--- /dev/null
+++ b/tools/perf/util/annotate.h
@@ -0,0 +1,177 @@
+#ifndef __PERF_ANNOTATE_H
+#define __PERF_ANNOTATE_H
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <linux/types.h>
+#include "symbol.h"
+#include "hist.h"
+#include "sort.h"
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <pthread.h>
+
+struct ins;
+
+struct ins_operands {
+ char *raw;
+ struct {
+ char *raw;
+ char *name;
+ u64 addr;
+ u64 offset;
+ } target;
+ union {
+ struct {
+ char *raw;
+ char *name;
+ u64 addr;
+ } source;
+ struct {
+ struct ins *ins;
+ struct ins_operands *ops;
+ } locked;
+ };
+};
+
+struct ins_ops {
+ void (*free)(struct ins_operands *ops);
+ int (*parse)(struct ins_operands *ops);
+ int (*scnprintf)(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops);
+};
+
+struct ins {
+ const char *name;
+ struct ins_ops *ops;
+};
+
+bool ins__is_jump(const struct ins *ins);
+bool ins__is_call(const struct ins *ins);
+int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops);
+
+struct annotation;
+
+struct disasm_line {
+ struct list_head node;
+ s64 offset;
+ char *line;
+ char *name;
+ struct ins *ins;
+ struct ins_operands ops;
+};
+
+static inline bool disasm_line__has_offset(const struct disasm_line *dl)
+{
+ return dl->ops.target.offset != UINT64_MAX;
+}
+
+void disasm_line__free(struct disasm_line *dl);
+struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos);
+int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw);
+size_t disasm__fprintf(struct list_head *head, FILE *fp);
+double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
+ s64 end, const char **path);
+
+struct sym_hist {
+ u64 sum;
+ u64 addr[0];
+};
+
+struct source_line_percent {
+ double percent;
+ double percent_sum;
+};
+
+struct source_line {
+ struct rb_node node;
+ char *path;
+ int nr_pcnt;
+ struct source_line_percent p[1];
+};
+
+/** struct annotated_source - symbols with hits have this attached as in sannotation
+ *
+ * @histogram: Array of addr hit histograms per event being monitored
+ * @lines: If 'print_lines' is specified, per source code line percentages
+ * @source: source parsed from a disassembler like objdump -dS
+ *
+ * lines is allocated, percentages calculated and all sorted by percentage
+ * when the annotation is about to be presented, so the percentages are for
+ * one of the entries in the histogram array, i.e. for the event/counter being
+ * presented. It is deallocated right after symbol__{tui,tty,etc}_annotate
+ * returns.
+ */
+struct annotated_source {
+ struct list_head source;
+ struct source_line *lines;
+ int nr_histograms;
+ int sizeof_sym_hist;
+ struct sym_hist histograms[0];
+};
+
+struct annotation {
+ pthread_mutex_t lock;
+ struct annotated_source *src;
+};
+
+struct sannotation {
+ struct annotation annotation;
+ struct symbol symbol;
+};
+
+static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx)
+{
+ return (((void *)&notes->src->histograms) +
+ (notes->src->sizeof_sym_hist * idx));
+}
+
+static inline struct annotation *symbol__annotation(struct symbol *sym)
+{
+ struct sannotation *a = container_of(sym, struct sannotation, symbol);
+ return &a->annotation;
+}
+
+int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx);
+
+int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 addr);
+
+int symbol__alloc_hist(struct symbol *sym);
+void symbol__annotate_zero_histograms(struct symbol *sym);
+
+int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize);
+
+int hist_entry__annotate(struct hist_entry *he, size_t privsize);
+
+int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym);
+int symbol__annotate_printf(struct symbol *sym, struct map *map,
+ struct perf_evsel *evsel, bool full_paths,
+ int min_pcnt, int max_lines, int context);
+void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
+void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
+void disasm__purge(struct list_head *head);
+
+bool ui__has_annotation(void);
+
+int symbol__tty_annotate(struct symbol *sym, struct map *map,
+ struct perf_evsel *evsel, bool print_lines,
+ bool full_paths, int min_pcnt, int max_lines);
+
+#ifdef HAVE_SLANG_SUPPORT
+int symbol__tui_annotate(struct symbol *sym, struct map *map,
+ struct perf_evsel *evsel,
+ struct hist_browser_timer *hbt);
+#else
+static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused,
+ struct map *map __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused,
+ struct hist_browser_timer *hbt
+ __maybe_unused)
+{
+ return 0;
+}
+#endif
+
+extern const char *disassembler_style;
+
+#endif /* __PERF_ANNOTATE_H */
diff --git a/tools/perf/util/bitmap.c b/tools/perf/util/bitmap.c
index 5e230acae1e..0a1adc1111f 100644
--- a/tools/perf/util/bitmap.c
+++ b/tools/perf/util/bitmap.c
@@ -19,3 +19,13 @@ int __bitmap_weight(const unsigned long *bitmap, int bits)
return w;
}
+
+void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits)
+{
+ int k;
+ int nr = BITS_TO_LONGS(bits);
+
+ for (k = 0; k < nr; k++)
+ dst[k] = bitmap1[k] | bitmap2[k];
+}
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index e437edb7241..a904a4cfe7d 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -13,12 +13,19 @@
#include "symbol.h"
#include <linux/kernel.h>
#include "debug.h"
+#include "session.h"
+#include "tool.h"
-static int build_id__mark_dso_hit(event_t *event, struct perf_session *session)
+int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel __maybe_unused,
+ struct machine *machine)
{
struct addr_location al;
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- struct thread *thread = perf_session__findnew(session, event->ip.pid);
+ struct thread *thread = machine__findnew_thread(machine, sample->pid,
+ sample->tid);
if (thread == NULL) {
pr_err("problem processing %d event, skipping it.\n",
@@ -26,8 +33,8 @@ static int build_id__mark_dso_hit(event_t *event, struct perf_session *session)
return -1;
}
- thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
- event->ip.pid, event->ip.ip, &al);
+ thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
+ sample->ip, &al);
if (al.map != NULL)
al.map->dso->hit = 1;
@@ -35,37 +42,61 @@ static int build_id__mark_dso_hit(event_t *event, struct perf_session *session)
return 0;
}
-static int event__exit_del_thread(event_t *self, struct perf_session *session)
+static int perf_event__exit_del_thread(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample
+ __maybe_unused,
+ struct machine *machine)
{
- struct thread *thread = perf_session__findnew(session, self->fork.tid);
+ struct thread *thread = machine__findnew_thread(machine,
+ event->fork.pid,
+ event->fork.tid);
- dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
- self->fork.ppid, self->fork.ptid);
+ dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
+ event->fork.ppid, event->fork.ptid);
if (thread) {
- rb_erase(&thread->rb_node, &session->threads);
- session->last_match = NULL;
+ rb_erase(&thread->rb_node, &machine->threads);
+ machine->last_match = NULL;
thread__delete(thread);
}
return 0;
}
-struct perf_event_ops build_id__mark_dso_hit_ops = {
+struct perf_tool build_id__mark_dso_hit_ops = {
.sample = build_id__mark_dso_hit,
- .mmap = event__process_mmap,
- .fork = event__process_task,
- .exit = event__exit_del_thread,
+ .mmap = perf_event__process_mmap,
+ .mmap2 = perf_event__process_mmap2,
+ .fork = perf_event__process_fork,
+ .exit = perf_event__exit_del_thread,
+ .attr = perf_event__process_attr,
+ .build_id = perf_event__process_build_id,
};
-char *dso__build_id_filename(struct dso *self, char *bf, size_t size)
+int build_id__sprintf(const u8 *build_id, int len, char *bf)
+{
+ char *bid = bf;
+ const u8 *raw = build_id;
+ int i;
+
+ for (i = 0; i < len; ++i) {
+ sprintf(bid, "%02x", *raw);
+ ++raw;
+ bid += 2;
+ }
+
+ return raw - build_id;
+}
+
+char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size)
{
char build_id_hex[BUILD_ID_SIZE * 2 + 1];
- if (!self->has_build_id)
+ if (!dso->has_build_id)
return NULL;
- build_id__sprintf(self->build_id, sizeof(self->build_id), build_id_hex);
+ build_id__sprintf(dso->build_id, sizeof(dso->build_id), build_id_hex);
if (bf == NULL) {
if (asprintf(&bf, "%s/.build-id/%.2s/%s", buildid_dir,
build_id_hex, build_id_hex + 2) < 0)
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index 5dafb00eaa0..ae392561470 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -1,10 +1,18 @@
#ifndef PERF_BUILD_ID_H_
#define PERF_BUILD_ID_H_ 1
-#include "session.h"
+#define BUILD_ID_SIZE 20
-extern struct perf_event_ops build_id__mark_dso_hit_ops;
+#include "tool.h"
+#include <linux/types.h>
-char *dso__build_id_filename(struct dso *self, char *bf, size_t size);
+extern struct perf_tool build_id__mark_dso_hit_ops;
+struct dso;
+int build_id__sprintf(const u8 *build_id, int len, char *bf);
+char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size);
+
+int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample, struct perf_evsel *evsel,
+ struct machine *machine);
#endif
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index a7729797fd9..7b176dd02e1 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -5,6 +5,7 @@
#include "util.h"
#include "strbuf.h"
#include "../perf.h"
+#include "../ui/ui.h"
#define CMD_EXEC_PATH "--exec-path"
#define CMD_PERF_DIR "--perf-dir="
@@ -31,19 +32,6 @@ extern const char *pager_program;
extern int pager_in_use(void);
extern int pager_use_color;
-extern int use_browser;
-
-#ifdef NO_NEWT_SUPPORT
-static inline void setup_browser(void)
-{
- setup_pager();
-}
-static inline void exit_browser(bool wait_for_ok __used) {}
-#else
-void setup_browser(void);
-void exit_browser(bool wait_for_ok);
-#endif
-
char *alias_lookup(const char *alias);
int split_cmdline(char *cmdline, const char ***argv);
@@ -82,8 +70,7 @@ extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2
extern char *perf_pathdup(const char *fmt, ...)
__attribute__((format (printf, 1, 2)));
-#ifdef NO_STRLCPY
+/* Matches the libc/libbsd function attribute so we declare this unconditionally: */
extern size_t strlcpy(char *dest, const char *src, size_t size);
-#endif
#endif /* __PERF_CACHE_H */
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index e12d539417b..48b6d3f5001 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009-2010, Frederic Weisbecker <fweisbec@gmail.com>
+ * Copyright (C) 2009-2011, Frederic Weisbecker <fweisbec@gmail.com>
*
* Handle the callchains from the stream in an ad-hoc radix tree and then
* sort them in an rbtree.
@@ -15,21 +15,93 @@
#include <errno.h>
#include <math.h>
+#include "asm/bug.h"
+
+#include "hist.h"
#include "util.h"
+#include "sort.h"
+#include "machine.h"
#include "callchain.h"
-bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event)
+__thread struct callchain_cursor callchain_cursor;
+
+int
+parse_callchain_report_opt(const char *arg)
{
- unsigned int chain_size = event->header.size;
- chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
- return chain->nr * sizeof(u64) <= chain_size;
-}
+ char *tok, *tok2;
+ char *endptr;
+
+ symbol_conf.use_callchain = true;
+
+ if (!arg)
+ return 0;
+
+ tok = strtok((char *)arg, ",");
+ if (!tok)
+ return -1;
+
+ /* get the output mode */
+ if (!strncmp(tok, "graph", strlen(arg))) {
+ callchain_param.mode = CHAIN_GRAPH_ABS;
+
+ } else if (!strncmp(tok, "flat", strlen(arg))) {
+ callchain_param.mode = CHAIN_FLAT;
+ } else if (!strncmp(tok, "fractal", strlen(arg))) {
+ callchain_param.mode = CHAIN_GRAPH_REL;
+ } else if (!strncmp(tok, "none", strlen(arg))) {
+ callchain_param.mode = CHAIN_NONE;
+ symbol_conf.use_callchain = false;
+ return 0;
+ } else {
+ return -1;
+ }
-#define chain_for_each_child(child, parent) \
- list_for_each_entry(child, &parent->children, brothers)
+ /* get the min percentage */
+ tok = strtok(NULL, ",");
+ if (!tok)
+ goto setup;
-#define chain_for_each_child_safe(child, next, parent) \
- list_for_each_entry_safe(child, next, &parent->children, brothers)
+ callchain_param.min_percent = strtod(tok, &endptr);
+ if (tok == endptr)
+ return -1;
+
+ /* get the print limit */
+ tok2 = strtok(NULL, ",");
+ if (!tok2)
+ goto setup;
+
+ if (tok2[0] != 'c') {
+ callchain_param.print_limit = strtoul(tok2, &endptr, 0);
+ tok2 = strtok(NULL, ",");
+ if (!tok2)
+ goto setup;
+ }
+
+ /* get the call chain order */
+ if (!strncmp(tok2, "caller", strlen("caller")))
+ callchain_param.order = ORDER_CALLER;
+ else if (!strncmp(tok2, "callee", strlen("callee")))
+ callchain_param.order = ORDER_CALLEE;
+ else
+ return -1;
+
+ /* Get the sort key */
+ tok2 = strtok(NULL, ",");
+ if (!tok2)
+ goto setup;
+ if (!strncmp(tok2, "function", strlen("function")))
+ callchain_param.key = CCKEY_FUNCTION;
+ else if (!strncmp(tok2, "address", strlen("address")))
+ callchain_param.key = CCKEY_ADDRESS;
+ else
+ return -1;
+setup:
+ if (callchain_register_param(&callchain_param) < 0) {
+ pr_err("Can't register callchain params\n");
+ return -1;
+ }
+ return 0;
+}
static void
rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
@@ -38,14 +110,14 @@ rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct callchain_node *rnode;
- u64 chain_cumul = cumul_hits(chain);
+ u64 chain_cumul = callchain_cumul_hits(chain);
while (*p) {
u64 rnode_cumul;
parent = *p;
rnode = rb_entry(parent, struct callchain_node, rb_node);
- rnode_cumul = cumul_hits(rnode);
+ rnode_cumul = callchain_cumul_hits(rnode);
switch (mode) {
case CHAIN_FLAT:
@@ -75,10 +147,16 @@ static void
__sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
u64 min_hit)
{
+ struct rb_node *n;
struct callchain_node *child;
- chain_for_each_child(child, node)
+ n = rb_first(&node->rb_root_in);
+ while (n) {
+ child = rb_entry(n, struct callchain_node, rb_node_in);
+ n = rb_next(n);
+
__sort_chain_flat(rb_root, child, min_hit);
+ }
if (node->hit && node->hit >= min_hit)
rb_insert_callchain(rb_root, node, CHAIN_FLAT);
@@ -90,7 +168,7 @@ __sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
*/
static void
sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
- u64 min_hit, struct callchain_param *param __used)
+ u64 min_hit, struct callchain_param *param __maybe_unused)
{
__sort_chain_flat(rb_root, &root->node, min_hit);
}
@@ -98,13 +176,18 @@ sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
static void __sort_chain_graph_abs(struct callchain_node *node,
u64 min_hit)
{
+ struct rb_node *n;
struct callchain_node *child;
node->rb_root = RB_ROOT;
+ n = rb_first(&node->rb_root_in);
+
+ while (n) {
+ child = rb_entry(n, struct callchain_node, rb_node_in);
+ n = rb_next(n);
- chain_for_each_child(child, node) {
__sort_chain_graph_abs(child, min_hit);
- if (cumul_hits(child) >= min_hit)
+ if (callchain_cumul_hits(child) >= min_hit)
rb_insert_callchain(&node->rb_root, child,
CHAIN_GRAPH_ABS);
}
@@ -112,7 +195,7 @@ static void __sort_chain_graph_abs(struct callchain_node *node,
static void
sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root,
- u64 min_hit, struct callchain_param *param __used)
+ u64 min_hit, struct callchain_param *param __maybe_unused)
{
__sort_chain_graph_abs(&chain_root->node, min_hit);
rb_root->rb_node = chain_root->node.rb_root.rb_node;
@@ -121,15 +204,20 @@ sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root,
static void __sort_chain_graph_rel(struct callchain_node *node,
double min_percent)
{
+ struct rb_node *n;
struct callchain_node *child;
u64 min_hit;
node->rb_root = RB_ROOT;
min_hit = ceil(node->children_hit * min_percent);
- chain_for_each_child(child, node) {
+ n = rb_first(&node->rb_root_in);
+ while (n) {
+ child = rb_entry(n, struct callchain_node, rb_node_in);
+ n = rb_next(n);
+
__sort_chain_graph_rel(child, min_percent);
- if (cumul_hits(child) >= min_hit)
+ if (callchain_cumul_hits(child) >= min_hit)
rb_insert_callchain(&node->rb_root, child,
CHAIN_GRAPH_REL);
}
@@ -137,13 +225,13 @@ static void __sort_chain_graph_rel(struct callchain_node *node,
static void
sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
- u64 min_hit __used, struct callchain_param *param)
+ u64 min_hit __maybe_unused, struct callchain_param *param)
{
__sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0);
rb_root->rb_node = chain_root->node.rb_root.rb_node;
}
-int register_callchain_param(struct callchain_param *param)
+int callchain_register_param(struct callchain_param *param)
{
switch (param->mode) {
case CHAIN_GRAPH_ABS:
@@ -177,44 +265,46 @@ create_child(struct callchain_node *parent, bool inherit_children)
return NULL;
}
new->parent = parent;
- INIT_LIST_HEAD(&new->children);
INIT_LIST_HEAD(&new->val);
if (inherit_children) {
- struct callchain_node *next;
+ struct rb_node *n;
+ struct callchain_node *child;
- list_splice(&parent->children, &new->children);
- INIT_LIST_HEAD(&parent->children);
+ new->rb_root_in = parent->rb_root_in;
+ parent->rb_root_in = RB_ROOT;
- chain_for_each_child(next, new)
- next->parent = new;
+ n = rb_first(&new->rb_root_in);
+ while (n) {
+ child = rb_entry(n, struct callchain_node, rb_node_in);
+ child->parent = new;
+ n = rb_next(n);
+ }
+
+ /* make it the first child */
+ rb_link_node(&new->rb_node_in, NULL, &parent->rb_root_in.rb_node);
+ rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
}
- list_add_tail(&new->brothers, &parent->children);
return new;
}
-struct resolved_ip {
- u64 ip;
- struct map_symbol ms;
-};
-
-struct resolved_chain {
- u64 nr;
- struct resolved_ip ips[0];
-};
-
-
/*
* Fill the node with callchain values
*/
static void
-fill_node(struct callchain_node *node, struct resolved_chain *chain, int start)
+fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
{
- unsigned int i;
+ struct callchain_cursor_node *cursor_node;
+
+ node->val_nr = cursor->nr - cursor->pos;
+ if (!node->val_nr)
+ pr_warning("Warning: empty node in callchain tree\n");
+
+ cursor_node = callchain_cursor_current(cursor);
- for (i = start; i < chain->nr; i++) {
+ while (cursor_node) {
struct callchain_list *call;
call = zalloc(sizeof(*call));
@@ -222,26 +312,41 @@ fill_node(struct callchain_node *node, struct resolved_chain *chain, int start)
perror("not enough memory for the code path tree");
return;
}
- call->ip = chain->ips[i].ip;
- call->ms = chain->ips[i].ms;
+ call->ip = cursor_node->ip;
+ call->ms.sym = cursor_node->sym;
+ call->ms.map = cursor_node->map;
list_add_tail(&call->list, &node->val);
+
+ callchain_cursor_advance(cursor);
+ cursor_node = callchain_cursor_current(cursor);
}
- node->val_nr = chain->nr - start;
- if (!node->val_nr)
- pr_warning("Warning: empty node in callchain tree\n");
}
-static void
-add_child(struct callchain_node *parent, struct resolved_chain *chain,
- int start, u64 period)
+static struct callchain_node *
+add_child(struct callchain_node *parent,
+ struct callchain_cursor *cursor,
+ u64 period)
{
struct callchain_node *new;
new = create_child(parent, false);
- fill_node(new, chain, start);
+ fill_node(new, cursor);
new->children_hit = 0;
new->hit = period;
+ return new;
+}
+
+static s64 match_chain(struct callchain_cursor_node *node,
+ struct callchain_list *cnode)
+{
+ struct symbol *sym = node->sym;
+
+ if (cnode->ms.sym && sym &&
+ callchain_param.key == CCKEY_FUNCTION)
+ return cnode->ms.sym->start - sym->start;
+ else
+ return cnode->ip - node->ip;
}
/*
@@ -250,9 +355,10 @@ add_child(struct callchain_node *parent, struct resolved_chain *chain,
* Then create another child to host the given callchain of new branch
*/
static void
-split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
- struct callchain_list *to_split, int idx_parents, int idx_local,
- u64 period)
+split_add_child(struct callchain_node *parent,
+ struct callchain_cursor *cursor,
+ struct callchain_list *to_split,
+ u64 idx_parents, u64 idx_local, u64 period)
{
struct callchain_node *new;
struct list_head *old_tail;
@@ -272,193 +378,297 @@ split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
/* split the hits */
new->hit = parent->hit;
new->children_hit = parent->children_hit;
- parent->children_hit = cumul_hits(new);
+ parent->children_hit = callchain_cumul_hits(new);
new->val_nr = parent->val_nr - idx_local;
parent->val_nr = idx_local;
/* create a new child for the new branch if any */
- if (idx_total < chain->nr) {
+ if (idx_total < cursor->nr) {
+ struct callchain_node *first;
+ struct callchain_list *cnode;
+ struct callchain_cursor_node *node;
+ struct rb_node *p, **pp;
+
parent->hit = 0;
- add_child(parent, chain, idx_total, period);
parent->children_hit += period;
+
+ node = callchain_cursor_current(cursor);
+ new = add_child(parent, cursor, period);
+
+ /*
+ * This is second child since we moved parent's children
+ * to new (first) child above.
+ */
+ p = parent->rb_root_in.rb_node;
+ first = rb_entry(p, struct callchain_node, rb_node_in);
+ cnode = list_first_entry(&first->val, struct callchain_list,
+ list);
+
+ if (match_chain(node, cnode) < 0)
+ pp = &p->rb_left;
+ else
+ pp = &p->rb_right;
+
+ rb_link_node(&new->rb_node_in, p, pp);
+ rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
} else {
parent->hit = period;
}
}
static int
-append_chain(struct callchain_node *root, struct resolved_chain *chain,
- unsigned int start, u64 period);
+append_chain(struct callchain_node *root,
+ struct callchain_cursor *cursor,
+ u64 period);
static void
-append_chain_children(struct callchain_node *root, struct resolved_chain *chain,
- unsigned int start, u64 period)
+append_chain_children(struct callchain_node *root,
+ struct callchain_cursor *cursor,
+ u64 period)
{
struct callchain_node *rnode;
+ struct callchain_cursor_node *node;
+ struct rb_node **p = &root->rb_root_in.rb_node;
+ struct rb_node *parent = NULL;
+
+ node = callchain_cursor_current(cursor);
+ if (!node)
+ return;
/* lookup in childrens */
- chain_for_each_child(rnode, root) {
- unsigned int ret = append_chain(rnode, chain, start, period);
+ while (*p) {
+ s64 ret;
+
+ parent = *p;
+ rnode = rb_entry(parent, struct callchain_node, rb_node_in);
- if (!ret)
+ /* If at least first entry matches, rely to children */
+ ret = append_chain(rnode, cursor, period);
+ if (ret == 0)
goto inc_children_hit;
+
+ if (ret < 0)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
}
/* nothing in children, add to the current node */
- add_child(root, chain, start, period);
+ rnode = add_child(root, cursor, period);
+ rb_link_node(&rnode->rb_node_in, parent, p);
+ rb_insert_color(&rnode->rb_node_in, &root->rb_root_in);
inc_children_hit:
root->children_hit += period;
}
static int
-append_chain(struct callchain_node *root, struct resolved_chain *chain,
- unsigned int start, u64 period)
+append_chain(struct callchain_node *root,
+ struct callchain_cursor *cursor,
+ u64 period)
{
struct callchain_list *cnode;
- unsigned int i = start;
+ u64 start = cursor->pos;
bool found = false;
+ u64 matches;
+ int cmp = 0;
/*
* Lookup in the current node
* If we have a symbol, then compare the start to match
- * anywhere inside a function.
+ * anywhere inside a function, unless function
+ * mode is disabled.
*/
list_for_each_entry(cnode, &root->val, list) {
- struct symbol *sym;
+ struct callchain_cursor_node *node;
- if (i == chain->nr)
+ node = callchain_cursor_current(cursor);
+ if (!node)
break;
- sym = chain->ips[i].ms.sym;
-
- if (cnode->ms.sym && sym) {
- if (cnode->ms.sym->start != sym->start)
- break;
- } else if (cnode->ip != chain->ips[i].ip)
+ cmp = match_chain(node, cnode);
+ if (cmp)
break;
- if (!found)
- found = true;
- i++;
+ found = true;
+
+ callchain_cursor_advance(cursor);
}
- /* matches not, relay on the parent */
- if (!found)
- return -1;
+ /* matches not, relay no the parent */
+ if (!found) {
+ WARN_ONCE(!cmp, "Chain comparison error\n");
+ return cmp;
+ }
+
+ matches = cursor->pos - start;
/* we match only a part of the node. Split it and add the new chain */
- if (i - start < root->val_nr) {
- split_add_child(root, chain, cnode, start, i - start, period);
+ if (matches < root->val_nr) {
+ split_add_child(root, cursor, cnode, start, matches, period);
return 0;
}
/* we match 100% of the path, increment the hit */
- if (i - start == root->val_nr && i == chain->nr) {
+ if (matches == root->val_nr && cursor->pos == cursor->nr) {
root->hit += period;
return 0;
}
/* We match the node and still have a part remaining */
- append_chain_children(root, chain, i, period);
+ append_chain_children(root, cursor, period);
return 0;
}
-static void filter_context(struct ip_callchain *old, struct resolved_chain *new,
- struct map_symbol *syms)
-{
- int i, j = 0;
-
- for (i = 0; i < (int)old->nr; i++) {
- if (old->ips[i] >= PERF_CONTEXT_MAX)
- continue;
-
- new->ips[j].ip = old->ips[i];
- new->ips[j].ms = syms[i];
- j++;
- }
-
- new->nr = j;
-}
-
-
-int callchain_append(struct callchain_root *root, struct ip_callchain *chain,
- struct map_symbol *syms, u64 period)
+int callchain_append(struct callchain_root *root,
+ struct callchain_cursor *cursor,
+ u64 period)
{
- struct resolved_chain *filtered;
-
- if (!chain->nr)
+ if (!cursor->nr)
return 0;
- filtered = zalloc(sizeof(*filtered) +
- chain->nr * sizeof(struct resolved_ip));
- if (!filtered)
- return -ENOMEM;
-
- filter_context(chain, filtered, syms);
-
- if (!filtered->nr)
- goto end;
+ callchain_cursor_commit(cursor);
- append_chain_children(&root->node, filtered, 0, period);
+ append_chain_children(&root->node, cursor, period);
- if (filtered->nr > root->max_depth)
- root->max_depth = filtered->nr;
-end:
- free(filtered);
+ if (cursor->nr > root->max_depth)
+ root->max_depth = cursor->nr;
return 0;
}
static int
-merge_chain_branch(struct callchain_node *dst, struct callchain_node *src,
- struct resolved_chain *chain)
+merge_chain_branch(struct callchain_cursor *cursor,
+ struct callchain_node *dst, struct callchain_node *src)
{
- struct callchain_node *child, *next_child;
+ struct callchain_cursor_node **old_last = cursor->last;
+ struct callchain_node *child;
struct callchain_list *list, *next_list;
- int old_pos = chain->nr;
+ struct rb_node *n;
+ int old_pos = cursor->nr;
int err = 0;
list_for_each_entry_safe(list, next_list, &src->val, list) {
- chain->ips[chain->nr].ip = list->ip;
- chain->ips[chain->nr].ms = list->ms;
- chain->nr++;
+ callchain_cursor_append(cursor, list->ip,
+ list->ms.map, list->ms.sym);
list_del(&list->list);
free(list);
}
- if (src->hit)
- append_chain_children(dst, chain, 0, src->hit);
+ if (src->hit) {
+ callchain_cursor_commit(cursor);
+ append_chain_children(dst, cursor, src->hit);
+ }
+
+ n = rb_first(&src->rb_root_in);
+ while (n) {
+ child = container_of(n, struct callchain_node, rb_node_in);
+ n = rb_next(n);
+ rb_erase(&child->rb_node_in, &src->rb_root_in);
- chain_for_each_child_safe(child, next_child, src) {
- err = merge_chain_branch(dst, child, chain);
+ err = merge_chain_branch(cursor, dst, child);
if (err)
break;
- list_del(&child->brothers);
free(child);
}
- chain->nr = old_pos;
+ cursor->nr = old_pos;
+ cursor->last = old_last;
return err;
}
-int callchain_merge(struct callchain_root *dst, struct callchain_root *src)
+int callchain_merge(struct callchain_cursor *cursor,
+ struct callchain_root *dst, struct callchain_root *src)
{
- struct resolved_chain *chain;
- int err;
+ return merge_chain_branch(cursor, &dst->node, &src->node);
+}
- chain = malloc(sizeof(*chain) +
- src->max_depth * sizeof(struct resolved_ip));
- if (!chain)
- return -ENOMEM;
+int callchain_cursor_append(struct callchain_cursor *cursor,
+ u64 ip, struct map *map, struct symbol *sym)
+{
+ struct callchain_cursor_node *node = *cursor->last;
- chain->nr = 0;
+ if (!node) {
+ node = calloc(1, sizeof(*node));
+ if (!node)
+ return -ENOMEM;
- err = merge_chain_branch(&dst->node, &src->node, chain);
+ *cursor->last = node;
+ }
- free(chain);
+ node->ip = ip;
+ node->map = map;
+ node->sym = sym;
- return err;
+ cursor->nr++;
+
+ cursor->last = &node->next;
+
+ return 0;
+}
+
+int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent,
+ struct perf_evsel *evsel, struct addr_location *al,
+ int max_stack)
+{
+ if (sample->callchain == NULL)
+ return 0;
+
+ if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain ||
+ sort__has_parent) {
+ return machine__resolve_callchain(al->machine, evsel, al->thread,
+ sample, parent, al, max_stack);
+ }
+ return 0;
+}
+
+int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample)
+{
+ if (!symbol_conf.use_callchain)
+ return 0;
+ return callchain_append(he->callchain, &callchain_cursor, sample->period);
+}
+
+int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
+ bool hide_unresolved)
+{
+ al->map = node->map;
+ al->sym = node->sym;
+ if (node->map)
+ al->addr = node->map->map_ip(node->map, node->ip);
+ else
+ al->addr = node->ip;
+
+ if (al->sym == NULL) {
+ if (hide_unresolved)
+ return 0;
+ if (al->map == NULL)
+ goto out;
+ }
+
+ if (al->map->groups == &al->machine->kmaps) {
+ if (machine__is_host(al->machine)) {
+ al->cpumode = PERF_RECORD_MISC_KERNEL;
+ al->level = 'k';
+ } else {
+ al->cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
+ al->level = 'g';
+ }
+ } else {
+ if (machine__is_host(al->machine)) {
+ al->cpumode = PERF_RECORD_MISC_USER;
+ al->level = '.';
+ } else if (perf_guest) {
+ al->cpumode = PERF_RECORD_MISC_GUEST_USER;
+ al->level = 'u';
+ } else {
+ al->cpumode = PERF_RECORD_MISC_HYPERVISOR;
+ al->level = 'H';
+ }
+ }
+
+out:
+ return 1;
}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index c15fb8c24ad..8f84423a75d 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -7,6 +7,13 @@
#include "event.h"
#include "symbol.h"
+enum perf_call_graph_mode {
+ CALLCHAIN_NONE,
+ CALLCHAIN_FP,
+ CALLCHAIN_DWARF,
+ CALLCHAIN_MAX
+};
+
enum chain_mode {
CHAIN_NONE,
CHAIN_FLAT,
@@ -14,13 +21,18 @@ enum chain_mode {
CHAIN_GRAPH_REL
};
+enum chain_order {
+ ORDER_CALLER,
+ ORDER_CALLEE
+};
+
struct callchain_node {
struct callchain_node *parent;
- struct list_head brothers;
- struct list_head children;
struct list_head val;
- struct rb_node rb_node; /* to sort nodes in an rbtree */
- struct rb_root rb_root; /* sorted tree of children */
+ struct rb_node rb_node_in; /* to insert nodes in an rbtree */
+ struct rb_node rb_node; /* to sort nodes in an output tree */
+ struct rb_root rb_root_in; /* input tree of children */
+ struct rb_root rb_root; /* sorted output tree of children */
unsigned int val_nr;
u64 hit;
u64 children_hit;
@@ -36,11 +48,18 @@ struct callchain_param;
typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_root *,
u64, struct callchain_param *);
+enum chain_key {
+ CCKEY_FUNCTION,
+ CCKEY_ADDRESS
+};
+
struct callchain_param {
enum chain_mode mode;
u32 print_limit;
double min_percent;
sort_chain_func_t sort;
+ enum chain_order order;
+ enum chain_key key;
};
struct callchain_list {
@@ -49,27 +68,112 @@ struct callchain_list {
struct list_head list;
};
+/*
+ * A callchain cursor is a single linked list that
+ * let one feed a callchain progressively.
+ * It keeps persistent allocated entries to minimize
+ * allocations.
+ */
+struct callchain_cursor_node {
+ u64 ip;
+ struct map *map;
+ struct symbol *sym;
+ struct callchain_cursor_node *next;
+};
+
+struct callchain_cursor {
+ u64 nr;
+ struct callchain_cursor_node *first;
+ struct callchain_cursor_node **last;
+ u64 pos;
+ struct callchain_cursor_node *curr;
+};
+
+extern __thread struct callchain_cursor callchain_cursor;
+
static inline void callchain_init(struct callchain_root *root)
{
- INIT_LIST_HEAD(&root->node.brothers);
- INIT_LIST_HEAD(&root->node.children);
INIT_LIST_HEAD(&root->node.val);
root->node.parent = NULL;
root->node.hit = 0;
root->node.children_hit = 0;
+ root->node.rb_root_in = RB_ROOT;
root->max_depth = 0;
}
-static inline u64 cumul_hits(struct callchain_node *node)
+static inline u64 callchain_cumul_hits(struct callchain_node *node)
{
return node->hit + node->children_hit;
}
-int register_callchain_param(struct callchain_param *param);
-int callchain_append(struct callchain_root *root, struct ip_callchain *chain,
- struct map_symbol *syms, u64 period);
-int callchain_merge(struct callchain_root *dst, struct callchain_root *src);
+int callchain_register_param(struct callchain_param *param);
+int callchain_append(struct callchain_root *root,
+ struct callchain_cursor *cursor,
+ u64 period);
+
+int callchain_merge(struct callchain_cursor *cursor,
+ struct callchain_root *dst, struct callchain_root *src);
+
+/*
+ * Initialize a cursor before adding entries inside, but keep
+ * the previously allocated entries as a cache.
+ */
+static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
+{
+ cursor->nr = 0;
+ cursor->last = &cursor->first;
+}
+
+int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
+ struct map *map, struct symbol *sym);
-bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event);
+/* Close a cursor writing session. Initialize for the reader */
+static inline void callchain_cursor_commit(struct callchain_cursor *cursor)
+{
+ cursor->curr = cursor->first;
+ cursor->pos = 0;
+}
+
+/* Cursor reading iteration helpers */
+static inline struct callchain_cursor_node *
+callchain_cursor_current(struct callchain_cursor *cursor)
+{
+ if (cursor->pos == cursor->nr)
+ return NULL;
+
+ return cursor->curr;
+}
+
+static inline void callchain_cursor_advance(struct callchain_cursor *cursor)
+{
+ cursor->curr = cursor->curr->next;
+ cursor->pos++;
+}
+
+struct option;
+struct hist_entry;
+
+int record_parse_callchain(const char *arg, struct record_opts *opts);
+int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset);
+int record_callchain_opt(const struct option *opt, const char *arg, int unset);
+
+int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent,
+ struct perf_evsel *evsel, struct addr_location *al,
+ int max_stack);
+int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample);
+int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
+ bool hide_unresolved);
+
+extern const char record_callchain_help[];
+int parse_callchain_report_opt(const char *arg);
+
+static inline void callchain_cursor_snapshot(struct callchain_cursor *dest,
+ struct callchain_cursor *src)
+{
+ *dest = *src;
+
+ dest->first = src->curr;
+ dest->nr -= src->pos;
+}
#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
new file mode 100644
index 00000000000..88f7be39943
--- /dev/null
+++ b/tools/perf/util/cgroup.c
@@ -0,0 +1,177 @@
+#include "util.h"
+#include "../perf.h"
+#include "parse-options.h"
+#include "evsel.h"
+#include "cgroup.h"
+#include "evlist.h"
+
+int nr_cgroups;
+
+static int
+cgroupfs_find_mountpoint(char *buf, size_t maxlen)
+{
+ FILE *fp;
+ char mountpoint[PATH_MAX + 1], tokens[PATH_MAX + 1], type[PATH_MAX + 1];
+ char *token, *saved_ptr = NULL;
+ int found = 0;
+
+ fp = fopen("/proc/mounts", "r");
+ if (!fp)
+ return -1;
+
+ /*
+ * in order to handle split hierarchy, we need to scan /proc/mounts
+ * and inspect every cgroupfs mount point to find one that has
+ * perf_event subsystem
+ */
+ while (fscanf(fp, "%*s %"STR(PATH_MAX)"s %"STR(PATH_MAX)"s %"
+ STR(PATH_MAX)"s %*d %*d\n",
+ mountpoint, type, tokens) == 3) {
+
+ if (!strcmp(type, "cgroup")) {
+
+ token = strtok_r(tokens, ",", &saved_ptr);
+
+ while (token != NULL) {
+ if (!strcmp(token, "perf_event")) {
+ found = 1;
+ break;
+ }
+ token = strtok_r(NULL, ",", &saved_ptr);
+ }
+ }
+ if (found)
+ break;
+ }
+ fclose(fp);
+ if (!found)
+ return -1;
+
+ if (strlen(mountpoint) < maxlen) {
+ strcpy(buf, mountpoint);
+ return 0;
+ }
+ return -1;
+}
+
+static int open_cgroup(char *name)
+{
+ char path[PATH_MAX + 1];
+ char mnt[PATH_MAX + 1];
+ int fd;
+
+
+ if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1))
+ return -1;
+
+ snprintf(path, PATH_MAX, "%s/%s", mnt, name);
+
+ fd = open(path, O_RDONLY);
+ if (fd == -1)
+ fprintf(stderr, "no access to cgroup %s\n", path);
+
+ return fd;
+}
+
+static int add_cgroup(struct perf_evlist *evlist, char *str)
+{
+ struct perf_evsel *counter;
+ struct cgroup_sel *cgrp = NULL;
+ int n;
+ /*
+ * check if cgrp is already defined, if so we reuse it
+ */
+ evlist__for_each(evlist, counter) {
+ cgrp = counter->cgrp;
+ if (!cgrp)
+ continue;
+ if (!strcmp(cgrp->name, str))
+ break;
+
+ cgrp = NULL;
+ }
+
+ if (!cgrp) {
+ cgrp = zalloc(sizeof(*cgrp));
+ if (!cgrp)
+ return -1;
+
+ cgrp->name = str;
+
+ cgrp->fd = open_cgroup(str);
+ if (cgrp->fd == -1) {
+ free(cgrp);
+ return -1;
+ }
+ }
+
+ /*
+ * find corresponding event
+ * if add cgroup N, then need to find event N
+ */
+ n = 0;
+ evlist__for_each(evlist, counter) {
+ if (n == nr_cgroups)
+ goto found;
+ n++;
+ }
+ if (cgrp->refcnt == 0)
+ free(cgrp);
+
+ return -1;
+found:
+ cgrp->refcnt++;
+ counter->cgrp = cgrp;
+ return 0;
+}
+
+void close_cgroup(struct cgroup_sel *cgrp)
+{
+ if (!cgrp)
+ return;
+
+ /* XXX: not reentrant */
+ if (--cgrp->refcnt == 0) {
+ close(cgrp->fd);
+ zfree(&cgrp->name);
+ free(cgrp);
+ }
+}
+
+int parse_cgroups(const struct option *opt __maybe_unused, const char *str,
+ int unset __maybe_unused)
+{
+ struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
+ const char *p, *e, *eos = str + strlen(str);
+ char *s;
+ int ret;
+
+ if (list_empty(&evlist->entries)) {
+ fprintf(stderr, "must define events before cgroups\n");
+ return -1;
+ }
+
+ for (;;) {
+ p = strchr(str, ',');
+ e = p ? p : eos;
+
+ /* allow empty cgroups, i.e., skip */
+ if (e - str) {
+ /* termination added */
+ s = strndup(str, e - str);
+ if (!s)
+ return -1;
+ ret = add_cgroup(evlist, s);
+ if (ret) {
+ free(s);
+ return -1;
+ }
+ }
+ /* nr_cgroups is increased een for empty cgroups */
+ nr_cgroups++;
+ if (!p)
+ break;
+ str = p+1;
+ }
+ return 0;
+}
diff --git a/tools/perf/util/cgroup.h b/tools/perf/util/cgroup.h
new file mode 100644
index 00000000000..89acd6debdc
--- /dev/null
+++ b/tools/perf/util/cgroup.h
@@ -0,0 +1,17 @@
+#ifndef __CGROUP_H__
+#define __CGROUP_H__
+
+struct option;
+
+struct cgroup_sel {
+ char *name;
+ int fd;
+ int refcnt;
+};
+
+
+extern int nr_cgroups; /* number of explicit cgroups defined */
+extern void close_cgroup(struct cgroup_sel *cgrp);
+extern int parse_cgroups(const struct option *opt, const char *str, int unset);
+
+#endif /* __CGROUP_H__ */
diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
index e191eb9a667..87b8672eb41 100644
--- a/tools/perf/util/color.c
+++ b/tools/perf/util/color.c
@@ -1,5 +1,7 @@
+#include <linux/kernel.h>
#include "cache.h"
#include "color.h"
+#include <math.h>
int perf_use_color_default = -1;
@@ -182,12 +184,12 @@ static int __color_vsnprintf(char *bf, size_t size, const char *color,
}
if (perf_use_color_default && *color)
- r += snprintf(bf, size, "%s", color);
- r += vsnprintf(bf + r, size - r, fmt, args);
+ r += scnprintf(bf, size, "%s", color);
+ r += vscnprintf(bf + r, size - r, fmt, args);
if (perf_use_color_default && *color)
- r += snprintf(bf + r, size - r, "%s", PERF_COLOR_RESET);
+ r += scnprintf(bf + r, size - r, "%s", PERF_COLOR_RESET);
if (trail)
- r += snprintf(bf + r, size - r, "%s", trail);
+ r += scnprintf(bf + r, size - r, "%s", trail);
return r;
}
@@ -200,7 +202,7 @@ static int __color_vfprintf(FILE *fp, const char *color, const char *fmt,
* Auto-detect:
*/
if (perf_use_color_default < 0) {
- if (isatty(1) || pager_in_use())
+ if (isatty(fileno(fp)) || pager_in_use())
perf_use_color_default = 1;
else
perf_use_color_default = 0;
@@ -297,10 +299,10 @@ const char *get_percent_color(double percent)
* entries in green - and keep the low overhead places
* normal:
*/
- if (percent >= MIN_RED)
+ if (fabs(percent) >= MIN_RED)
color = PERF_COLOR_RED;
else {
- if (percent > MIN_GREEN)
+ if (fabs(percent) > MIN_GREEN)
color = PERF_COLOR_GREEN;
}
return color;
@@ -317,8 +319,19 @@ int percent_color_fprintf(FILE *fp, const char *fmt, double percent)
return r;
}
-int percent_color_snprintf(char *bf, size_t size, const char *fmt, double percent)
+int value_color_snprintf(char *bf, size_t size, const char *fmt, double value)
+{
+ const char *color = get_percent_color(value);
+ return color_snprintf(bf, size, color, fmt, value);
+}
+
+int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...)
{
- const char *color = get_percent_color(percent);
- return color_snprintf(bf, size, color, fmt, percent);
+ va_list args;
+ double percent;
+
+ va_start(args, fmt);
+ percent = va_arg(args, double);
+ va_end(args);
+ return value_color_snprintf(bf, size, fmt, percent);
}
diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h
index dea082b7960..7ff30a62a13 100644
--- a/tools/perf/util/color.h
+++ b/tools/perf/util/color.h
@@ -39,7 +39,8 @@ int color_fprintf(FILE *fp, const char *color, const char *fmt, ...);
int color_snprintf(char *bf, size_t size, const char *color, const char *fmt, ...);
int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...);
int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf);
-int percent_color_snprintf(char *bf, size_t size, const char *fmt, double percent);
+int value_color_snprintf(char *bf, size_t size, const char *fmt, double value);
+int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...);
int percent_color_fprintf(FILE *fp, const char *fmt, double percent);
const char *get_percent_color(double percent);
diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c
new file mode 100644
index 00000000000..f9e777629e2
--- /dev/null
+++ b/tools/perf/util/comm.c
@@ -0,0 +1,122 @@
+#include "comm.h"
+#include "util.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+struct comm_str {
+ char *str;
+ struct rb_node rb_node;
+ int ref;
+};
+
+/* Should perhaps be moved to struct machine */
+static struct rb_root comm_str_root;
+
+static void comm_str__get(struct comm_str *cs)
+{
+ cs->ref++;
+}
+
+static void comm_str__put(struct comm_str *cs)
+{
+ if (!--cs->ref) {
+ rb_erase(&cs->rb_node, &comm_str_root);
+ zfree(&cs->str);
+ free(cs);
+ }
+}
+
+static struct comm_str *comm_str__alloc(const char *str)
+{
+ struct comm_str *cs;
+
+ cs = zalloc(sizeof(*cs));
+ if (!cs)
+ return NULL;
+
+ cs->str = strdup(str);
+ if (!cs->str) {
+ free(cs);
+ return NULL;
+ }
+
+ return cs;
+}
+
+static struct comm_str *comm_str__findnew(const char *str, struct rb_root *root)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct comm_str *iter, *new;
+ int cmp;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct comm_str, rb_node);
+
+ cmp = strcmp(str, iter->str);
+ if (!cmp)
+ return iter;
+
+ if (cmp < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ new = comm_str__alloc(str);
+ if (!new)
+ return NULL;
+
+ rb_link_node(&new->rb_node, parent, p);
+ rb_insert_color(&new->rb_node, root);
+
+ return new;
+}
+
+struct comm *comm__new(const char *str, u64 timestamp)
+{
+ struct comm *comm = zalloc(sizeof(*comm));
+
+ if (!comm)
+ return NULL;
+
+ comm->start = timestamp;
+
+ comm->comm_str = comm_str__findnew(str, &comm_str_root);
+ if (!comm->comm_str) {
+ free(comm);
+ return NULL;
+ }
+
+ comm_str__get(comm->comm_str);
+
+ return comm;
+}
+
+int comm__override(struct comm *comm, const char *str, u64 timestamp)
+{
+ struct comm_str *new, *old = comm->comm_str;
+
+ new = comm_str__findnew(str, &comm_str_root);
+ if (!new)
+ return -ENOMEM;
+
+ comm_str__get(new);
+ comm_str__put(old);
+ comm->comm_str = new;
+ comm->start = timestamp;
+
+ return 0;
+}
+
+void comm__free(struct comm *comm)
+{
+ comm_str__put(comm->comm_str);
+ free(comm);
+}
+
+const char *comm__str(const struct comm *comm)
+{
+ return comm->comm_str->str;
+}
diff --git a/tools/perf/util/comm.h b/tools/perf/util/comm.h
new file mode 100644
index 00000000000..fac5bd51bef
--- /dev/null
+++ b/tools/perf/util/comm.h
@@ -0,0 +1,21 @@
+#ifndef __PERF_COMM_H
+#define __PERF_COMM_H
+
+#include "../perf.h"
+#include <linux/rbtree.h>
+#include <linux/list.h>
+
+struct comm_str;
+
+struct comm {
+ struct comm_str *comm_str;
+ u64 start;
+ struct list_head list;
+};
+
+void comm__free(struct comm *comm);
+struct comm *comm__new(const char *str, u64 timestamp);
+const char *comm__str(const struct comm *comm);
+int comm__override(struct comm *comm, const char *str, u64 timestamp);
+
+#endif /* __PERF_COMM_H */
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index e02d78cae70..24519e14ac5 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -1,5 +1,8 @@
/*
- * GIT - The information manager from hell
+ * config.c
+ *
+ * Helper functions for parsing config items.
+ * Originally copied from GIT source.
*
* Copyright (C) Linus Torvalds, 2005
* Copyright (C) Johannes Schindelin, 2005
@@ -8,6 +11,7 @@
#include "util.h"
#include "cache.h"
#include "exec_cmd.h"
+#include "util/hist.h" /* perf_hist_config */
#define MAXNAME (256)
@@ -117,7 +121,7 @@ static char *parse_value(void)
static inline int iskeychar(int c)
{
- return isalnum(c) || c == '-';
+ return isalnum(c) || c == '-' || c == '_';
}
static int get_value(config_fn_t fn, void *data, char *name, unsigned int len)
@@ -339,18 +343,23 @@ const char *perf_config_dirname(const char *name, const char *value)
return value;
}
-static int perf_default_core_config(const char *var __used, const char *value __used)
+static int perf_default_core_config(const char *var __maybe_unused,
+ const char *value __maybe_unused)
{
- /* Add other config variables here and to Documentation/config.txt. */
+ /* Add other config variables here. */
return 0;
}
-int perf_default_config(const char *var, const char *value, void *dummy __used)
+int perf_default_config(const char *var, const char *value,
+ void *dummy __maybe_unused)
{
if (!prefixcmp(var, "core."))
return perf_default_core_config(var, value);
- /* Add other config variables here and to Documentation/config.txt. */
+ if (!prefixcmp(var, "hist."))
+ return perf_hist_config(var, value);
+
+ /* Add other config variables here. */
return 0;
}
@@ -399,7 +408,6 @@ static int perf_config_global(void)
int perf_config(config_fn_t fn, void *data)
{
int ret = 0, found = 0;
- char *repo_config = NULL;
const char *home = NULL;
/* Setting $PERF_CONFIG makes perf read _only_ the given config file. */
@@ -414,19 +422,32 @@ int perf_config(config_fn_t fn, void *data)
home = getenv("HOME");
if (perf_config_global() && home) {
char *user_config = strdup(mkpath("%s/.perfconfig", home));
- if (!access(user_config, R_OK)) {
- ret += perf_config_from_file(fn, user_config, data);
- found += 1;
+ struct stat st;
+
+ if (user_config == NULL) {
+ warning("Not enough memory to process %s/.perfconfig, "
+ "ignoring it.", home);
+ goto out;
}
- free(user_config);
- }
- repo_config = perf_pathdup("config");
- if (!access(repo_config, R_OK)) {
- ret += perf_config_from_file(fn, repo_config, data);
+ if (stat(user_config, &st) < 0)
+ goto out_free;
+
+ if (st.st_uid && (st.st_uid != geteuid())) {
+ warning("File %s not owned by current user or root, "
+ "ignoring it.", user_config);
+ goto out_free;
+ }
+
+ if (!st.st_size)
+ goto out_free;
+
+ ret += perf_config_from_file(fn, user_config, data);
found += 1;
+out_free:
+ free(user_config);
}
- free(repo_config);
+out:
if (found == 0)
return -1;
return ret;
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 0f9b8d7a7d7..c4e55b71010 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -1,49 +1,83 @@
#include "util.h"
+#include <api/fs/fs.h>
#include "../perf.h"
#include "cpumap.h"
#include <assert.h>
#include <stdio.h>
+#include <stdlib.h>
-int cpumap[MAX_NR_CPUS];
-
-static int default_cpu_map(void)
+static struct cpu_map *cpu_map__default_new(void)
{
- int nr_cpus, i;
+ struct cpu_map *cpus;
+ int nr_cpus;
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
- assert(nr_cpus <= MAX_NR_CPUS);
- assert((int)nr_cpus >= 0);
+ if (nr_cpus < 0)
+ return NULL;
+
+ cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
+ if (cpus != NULL) {
+ int i;
+ for (i = 0; i < nr_cpus; ++i)
+ cpus->map[i] = i;
- for (i = 0; i < nr_cpus; ++i)
- cpumap[i] = i;
+ cpus->nr = nr_cpus;
+ }
- return nr_cpus;
+ return cpus;
}
-static int read_all_cpu_map(void)
+static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
{
- FILE *onlnf;
+ size_t payload_size = nr_cpus * sizeof(int);
+ struct cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
+
+ if (cpus != NULL) {
+ cpus->nr = nr_cpus;
+ memcpy(cpus->map, tmp_cpus, payload_size);
+ }
+
+ return cpus;
+}
+
+struct cpu_map *cpu_map__read(FILE *file)
+{
+ struct cpu_map *cpus = NULL;
int nr_cpus = 0;
+ int *tmp_cpus = NULL, *tmp;
+ int max_entries = 0;
int n, cpu, prev;
char sep;
- onlnf = fopen("/sys/devices/system/cpu/online", "r");
- if (!onlnf)
- return default_cpu_map();
-
sep = 0;
prev = -1;
for (;;) {
- n = fscanf(onlnf, "%u%c", &cpu, &sep);
+ n = fscanf(file, "%u%c", &cpu, &sep);
if (n <= 0)
break;
if (prev >= 0) {
- assert(nr_cpus + cpu - prev - 1 < MAX_NR_CPUS);
+ int new_max = nr_cpus + cpu - prev - 1;
+
+ if (new_max >= max_entries) {
+ max_entries = new_max + MAX_NR_CPUS / 2;
+ tmp = realloc(tmp_cpus, max_entries * sizeof(int));
+ if (tmp == NULL)
+ goto out_free_tmp;
+ tmp_cpus = tmp;
+ }
+
while (++prev < cpu)
- cpumap[nr_cpus++] = prev;
+ tmp_cpus[nr_cpus++] = prev;
}
- assert (nr_cpus < MAX_NR_CPUS);
- cpumap[nr_cpus++] = cpu;
+ if (nr_cpus == max_entries) {
+ max_entries += MAX_NR_CPUS;
+ tmp = realloc(tmp_cpus, max_entries * sizeof(int));
+ if (tmp == NULL)
+ goto out_free_tmp;
+ tmp_cpus = tmp;
+ }
+
+ tmp_cpus[nr_cpus++] = cpu;
if (n == 2 && sep == '-')
prev = cpu;
else
@@ -51,24 +85,44 @@ static int read_all_cpu_map(void)
if (n == 1 || sep == '\n')
break;
}
- fclose(onlnf);
+
if (nr_cpus > 0)
- return nr_cpus;
+ cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
+ else
+ cpus = cpu_map__default_new();
+out_free_tmp:
+ free(tmp_cpus);
+ return cpus;
+}
+
+static struct cpu_map *cpu_map__read_all_cpu_map(void)
+{
+ struct cpu_map *cpus = NULL;
+ FILE *onlnf;
+
+ onlnf = fopen("/sys/devices/system/cpu/online", "r");
+ if (!onlnf)
+ return cpu_map__default_new();
- return default_cpu_map();
+ cpus = cpu_map__read(onlnf);
+ fclose(onlnf);
+ return cpus;
}
-int read_cpu_map(const char *cpu_list)
+struct cpu_map *cpu_map__new(const char *cpu_list)
{
+ struct cpu_map *cpus = NULL;
unsigned long start_cpu, end_cpu = 0;
char *p = NULL;
int i, nr_cpus = 0;
+ int *tmp_cpus = NULL, *tmp;
+ int max_entries = 0;
if (!cpu_list)
- return read_all_cpu_map();
+ return cpu_map__read_all_cpu_map();
if (!isdigit(*cpu_list))
- goto invalid;
+ goto out;
while (isdigit(*cpu_list)) {
p = NULL;
@@ -94,21 +148,332 @@ int read_cpu_map(const char *cpu_list)
for (; start_cpu <= end_cpu; start_cpu++) {
/* check for duplicates */
for (i = 0; i < nr_cpus; i++)
- if (cpumap[i] == (int)start_cpu)
+ if (tmp_cpus[i] == (int)start_cpu)
goto invalid;
- assert(nr_cpus < MAX_NR_CPUS);
- cpumap[nr_cpus++] = (int)start_cpu;
+ if (nr_cpus == max_entries) {
+ max_entries += MAX_NR_CPUS;
+ tmp = realloc(tmp_cpus, max_entries * sizeof(int));
+ if (tmp == NULL)
+ goto invalid;
+ tmp_cpus = tmp;
+ }
+ tmp_cpus[nr_cpus++] = (int)start_cpu;
}
if (*p)
++p;
cpu_list = p;
}
- if (nr_cpus > 0)
- return nr_cpus;
- return default_cpu_map();
+ if (nr_cpus > 0)
+ cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
+ else
+ cpus = cpu_map__default_new();
invalid:
- return -1;
+ free(tmp_cpus);
+out:
+ return cpus;
+}
+
+size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp)
+{
+ int i;
+ size_t printed = fprintf(fp, "%d cpu%s: ",
+ map->nr, map->nr > 1 ? "s" : "");
+ for (i = 0; i < map->nr; ++i)
+ printed += fprintf(fp, "%s%d", i ? ", " : "", map->map[i]);
+
+ return printed + fprintf(fp, "\n");
+}
+
+struct cpu_map *cpu_map__dummy_new(void)
+{
+ struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
+
+ if (cpus != NULL) {
+ cpus->nr = 1;
+ cpus->map[0] = -1;
+ }
+
+ return cpus;
+}
+
+void cpu_map__delete(struct cpu_map *map)
+{
+ free(map);
+}
+
+int cpu_map__get_socket(struct cpu_map *map, int idx)
+{
+ FILE *fp;
+ const char *mnt;
+ char path[PATH_MAX];
+ int cpu, ret;
+
+ if (idx > map->nr)
+ return -1;
+
+ cpu = map->map[idx];
+
+ mnt = sysfs__mountpoint();
+ if (!mnt)
+ return -1;
+
+ snprintf(path, PATH_MAX,
+ "%s/devices/system/cpu/cpu%d/topology/physical_package_id",
+ mnt, cpu);
+
+ fp = fopen(path, "r");
+ if (!fp)
+ return -1;
+ ret = fscanf(fp, "%d", &cpu);
+ fclose(fp);
+ return ret == 1 ? cpu : -1;
+}
+
+static int cmp_ids(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
+static int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
+ int (*f)(struct cpu_map *map, int cpu))
+{
+ struct cpu_map *c;
+ int nr = cpus->nr;
+ int cpu, s1, s2;
+
+ /* allocate as much as possible */
+ c = calloc(1, sizeof(*c) + nr * sizeof(int));
+ if (!c)
+ return -1;
+
+ for (cpu = 0; cpu < nr; cpu++) {
+ s1 = f(cpus, cpu);
+ for (s2 = 0; s2 < c->nr; s2++) {
+ if (s1 == c->map[s2])
+ break;
+ }
+ if (s2 == c->nr) {
+ c->map[c->nr] = s1;
+ c->nr++;
+ }
+ }
+ /* ensure we process id in increasing order */
+ qsort(c->map, c->nr, sizeof(int), cmp_ids);
+
+ *res = c;
+ return 0;
+}
+
+int cpu_map__get_core(struct cpu_map *map, int idx)
+{
+ FILE *fp;
+ const char *mnt;
+ char path[PATH_MAX];
+ int cpu, ret, s;
+
+ if (idx > map->nr)
+ return -1;
+
+ cpu = map->map[idx];
+
+ mnt = sysfs__mountpoint();
+ if (!mnt)
+ return -1;
+
+ snprintf(path, PATH_MAX,
+ "%s/devices/system/cpu/cpu%d/topology/core_id",
+ mnt, cpu);
+
+ fp = fopen(path, "r");
+ if (!fp)
+ return -1;
+ ret = fscanf(fp, "%d", &cpu);
+ fclose(fp);
+ if (ret != 1)
+ return -1;
+
+ s = cpu_map__get_socket(map, idx);
+ if (s == -1)
+ return -1;
+
+ /*
+ * encode socket in upper 16 bits
+ * core_id is relative to socket, and
+ * we need a global id. So we combine
+ * socket+ core id
+ */
+ return (s << 16) | (cpu & 0xffff);
+}
+
+int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp)
+{
+ return cpu_map__build_map(cpus, sockp, cpu_map__get_socket);
+}
+
+int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep)
+{
+ return cpu_map__build_map(cpus, corep, cpu_map__get_core);
+}
+
+/* setup simple routines to easily access node numbers given a cpu number */
+static int get_max_num(char *path, int *max)
+{
+ size_t num;
+ char *buf;
+ int err = 0;
+
+ if (filename__read_str(path, &buf, &num))
+ return -1;
+
+ buf[num] = '\0';
+
+ /* start on the right, to find highest node num */
+ while (--num) {
+ if ((buf[num] == ',') || (buf[num] == '-')) {
+ num++;
+ break;
+ }
+ }
+ if (sscanf(&buf[num], "%d", max) < 1) {
+ err = -1;
+ goto out;
+ }
+
+ /* convert from 0-based to 1-based */
+ (*max)++;
+
+out:
+ free(buf);
+ return err;
+}
+
+/* Determine highest possible cpu in the system for sparse allocation */
+static void set_max_cpu_num(void)
+{
+ const char *mnt;
+ char path[PATH_MAX];
+ int ret = -1;
+
+ /* set up default */
+ max_cpu_num = 4096;
+
+ mnt = sysfs__mountpoint();
+ if (!mnt)
+ goto out;
+
+ /* get the highest possible cpu number for a sparse allocation */
+ ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
+ if (ret == PATH_MAX) {
+ pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
+ goto out;
+ }
+
+ ret = get_max_num(path, &max_cpu_num);
+
+out:
+ if (ret)
+ pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num);
+}
+
+/* Determine highest possible node in the system for sparse allocation */
+static void set_max_node_num(void)
+{
+ const char *mnt;
+ char path[PATH_MAX];
+ int ret = -1;
+
+ /* set up default */
+ max_node_num = 8;
+
+ mnt = sysfs__mountpoint();
+ if (!mnt)
+ goto out;
+
+ /* get the highest possible cpu number for a sparse allocation */
+ ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
+ if (ret == PATH_MAX) {
+ pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
+ goto out;
+ }
+
+ ret = get_max_num(path, &max_node_num);
+
+out:
+ if (ret)
+ pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
+}
+
+static int init_cpunode_map(void)
+{
+ int i;
+
+ set_max_cpu_num();
+ set_max_node_num();
+
+ cpunode_map = calloc(max_cpu_num, sizeof(int));
+ if (!cpunode_map) {
+ pr_err("%s: calloc failed\n", __func__);
+ return -1;
+ }
+
+ for (i = 0; i < max_cpu_num; i++)
+ cpunode_map[i] = -1;
+
+ return 0;
+}
+
+int cpu__setup_cpunode_map(void)
+{
+ struct dirent *dent1, *dent2;
+ DIR *dir1, *dir2;
+ unsigned int cpu, mem;
+ char buf[PATH_MAX];
+ char path[PATH_MAX];
+ const char *mnt;
+ int n;
+
+ /* initialize globals */
+ if (init_cpunode_map())
+ return -1;
+
+ mnt = sysfs__mountpoint();
+ if (!mnt)
+ return 0;
+
+ n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
+ if (n == PATH_MAX) {
+ pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
+ return -1;
+ }
+
+ dir1 = opendir(path);
+ if (!dir1)
+ return 0;
+
+ /* walk tree and setup map */
+ while ((dent1 = readdir(dir1)) != NULL) {
+ if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1)
+ continue;
+
+ n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
+ if (n == PATH_MAX) {
+ pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
+ continue;
+ }
+
+ dir2 = opendir(buf);
+ if (!dir2)
+ continue;
+ while ((dent2 = readdir(dir2)) != NULL) {
+ if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
+ continue;
+ cpunode_map[cpu] = mem;
+ }
+ closedir(dir2);
+ }
+ closedir(dir1);
+ return 0;
}
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index 3e60f56e490..61a65484900 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -1,7 +1,84 @@
#ifndef __PERF_CPUMAP_H
#define __PERF_CPUMAP_H
-extern int read_cpu_map(const char *cpu_list);
-extern int cpumap[];
+#include <stdio.h>
+#include <stdbool.h>
+
+#include "perf.h"
+#include "util/debug.h"
+
+struct cpu_map {
+ int nr;
+ int map[];
+};
+
+struct cpu_map *cpu_map__new(const char *cpu_list);
+struct cpu_map *cpu_map__dummy_new(void);
+void cpu_map__delete(struct cpu_map *map);
+struct cpu_map *cpu_map__read(FILE *file);
+size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp);
+int cpu_map__get_socket(struct cpu_map *map, int idx);
+int cpu_map__get_core(struct cpu_map *map, int idx);
+int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp);
+int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep);
+
+static inline int cpu_map__socket(struct cpu_map *sock, int s)
+{
+ if (!sock || s > sock->nr || s < 0)
+ return 0;
+ return sock->map[s];
+}
+
+static inline int cpu_map__id_to_socket(int id)
+{
+ return id >> 16;
+}
+
+static inline int cpu_map__id_to_cpu(int id)
+{
+ return id & 0xffff;
+}
+
+static inline int cpu_map__nr(const struct cpu_map *map)
+{
+ return map ? map->nr : 1;
+}
+
+static inline bool cpu_map__empty(const struct cpu_map *map)
+{
+ return map ? map->map[0] == -1 : true;
+}
+
+int max_cpu_num;
+int max_node_num;
+int *cpunode_map;
+
+int cpu__setup_cpunode_map(void);
+
+static inline int cpu__max_node(void)
+{
+ if (unlikely(!max_node_num))
+ pr_debug("cpu_map not initialized\n");
+
+ return max_node_num;
+}
+
+static inline int cpu__max_cpu(void)
+{
+ if (unlikely(!max_cpu_num))
+ pr_debug("cpu_map not initialized\n");
+
+ return max_cpu_num;
+}
+
+static inline int cpu__get_node(int cpu)
+{
+ if (unlikely(cpunode_map == NULL)) {
+ pr_debug("cpu_map not initialized\n");
+ return -1;
+ }
+
+ return cpunode_map[cpu];
+}
#endif /* __PERF_CPUMAP_H */
diff --git a/tools/perf/util/ctype.c b/tools/perf/util/ctype.c
index 35073621e5d..aada3ac5e89 100644
--- a/tools/perf/util/ctype.c
+++ b/tools/perf/util/ctype.c
@@ -3,7 +3,7 @@
*
* No surprises, and works with signed and unsigned chars.
*/
-#include "cache.h"
+#include "util.h"
enum {
S = GIT_SPACE,
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
new file mode 100644
index 00000000000..55de44ecebe
--- /dev/null
+++ b/tools/perf/util/data.c
@@ -0,0 +1,133 @@
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <string.h>
+
+#include "data.h"
+#include "util.h"
+
+static bool check_pipe(struct perf_data_file *file)
+{
+ struct stat st;
+ bool is_pipe = false;
+ int fd = perf_data_file__is_read(file) ?
+ STDIN_FILENO : STDOUT_FILENO;
+
+ if (!file->path) {
+ if (!fstat(fd, &st) && S_ISFIFO(st.st_mode))
+ is_pipe = true;
+ } else {
+ if (!strcmp(file->path, "-"))
+ is_pipe = true;
+ }
+
+ if (is_pipe)
+ file->fd = fd;
+
+ return file->is_pipe = is_pipe;
+}
+
+static int check_backup(struct perf_data_file *file)
+{
+ struct stat st;
+
+ if (!stat(file->path, &st) && st.st_size) {
+ /* TODO check errors properly */
+ char oldname[PATH_MAX];
+ snprintf(oldname, sizeof(oldname), "%s.old",
+ file->path);
+ unlink(oldname);
+ rename(file->path, oldname);
+ }
+
+ return 0;
+}
+
+static int open_file_read(struct perf_data_file *file)
+{
+ struct stat st;
+ int fd;
+
+ fd = open(file->path, O_RDONLY);
+ if (fd < 0) {
+ int err = errno;
+
+ pr_err("failed to open %s: %s", file->path, strerror(err));
+ if (err == ENOENT && !strcmp(file->path, "perf.data"))
+ pr_err(" (try 'perf record' first)");
+ pr_err("\n");
+ return -err;
+ }
+
+ if (fstat(fd, &st) < 0)
+ goto out_close;
+
+ if (!file->force && st.st_uid && (st.st_uid != geteuid())) {
+ pr_err("file %s not owned by current user or root\n",
+ file->path);
+ goto out_close;
+ }
+
+ if (!st.st_size) {
+ pr_info("zero-sized file (%s), nothing to do!\n",
+ file->path);
+ goto out_close;
+ }
+
+ file->size = st.st_size;
+ return fd;
+
+ out_close:
+ close(fd);
+ return -1;
+}
+
+static int open_file_write(struct perf_data_file *file)
+{
+ int fd;
+
+ if (check_backup(file))
+ return -1;
+
+ fd = open(file->path, O_CREAT|O_RDWR|O_TRUNC, S_IRUSR|S_IWUSR);
+
+ if (fd < 0)
+ pr_err("failed to open %s : %s\n", file->path, strerror(errno));
+
+ return fd;
+}
+
+static int open_file(struct perf_data_file *file)
+{
+ int fd;
+
+ fd = perf_data_file__is_read(file) ?
+ open_file_read(file) : open_file_write(file);
+
+ file->fd = fd;
+ return fd < 0 ? -1 : 0;
+}
+
+int perf_data_file__open(struct perf_data_file *file)
+{
+ if (check_pipe(file))
+ return 0;
+
+ if (!file->path)
+ file->path = "perf.data";
+
+ return open_file(file);
+}
+
+void perf_data_file__close(struct perf_data_file *file)
+{
+ close(file->fd);
+}
+
+ssize_t perf_data_file__write(struct perf_data_file *file,
+ void *buf, size_t size)
+{
+ return writen(file->fd, buf, size);
+}
diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
new file mode 100644
index 00000000000..2b15d0c95c7
--- /dev/null
+++ b/tools/perf/util/data.h
@@ -0,0 +1,50 @@
+#ifndef __PERF_DATA_H
+#define __PERF_DATA_H
+
+#include <stdbool.h>
+
+enum perf_data_mode {
+ PERF_DATA_MODE_WRITE,
+ PERF_DATA_MODE_READ,
+};
+
+struct perf_data_file {
+ const char *path;
+ int fd;
+ bool is_pipe;
+ bool force;
+ unsigned long size;
+ enum perf_data_mode mode;
+};
+
+static inline bool perf_data_file__is_read(struct perf_data_file *file)
+{
+ return file->mode == PERF_DATA_MODE_READ;
+}
+
+static inline bool perf_data_file__is_write(struct perf_data_file *file)
+{
+ return file->mode == PERF_DATA_MODE_WRITE;
+}
+
+static inline int perf_data_file__is_pipe(struct perf_data_file *file)
+{
+ return file->is_pipe;
+}
+
+static inline int perf_data_file__fd(struct perf_data_file *file)
+{
+ return file->fd;
+}
+
+static inline unsigned long perf_data_file__size(struct perf_data_file *file)
+{
+ return file->size;
+}
+
+int perf_data_file__open(struct perf_data_file *file);
+void perf_data_file__close(struct perf_data_file *file);
+ssize_t perf_data_file__write(struct perf_data_file *file,
+ void *buf, size_t size);
+
+#endif /* __PERF_DATA_H */
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index c8d81b00089..299b5558650 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -11,57 +11,66 @@
#include "event.h"
#include "debug.h"
#include "util.h"
+#include "target.h"
int verbose;
bool dump_trace = false, quiet = false;
-int eprintf(int level, const char *fmt, ...)
+static int _eprintf(int level, const char *fmt, va_list args)
{
- va_list args;
int ret = 0;
if (verbose >= level) {
- va_start(args, fmt);
- if (use_browser > 0)
- ret = ui_helpline__show_help(fmt, args);
+ if (use_browser >= 1)
+ ui_helpline__vshow(fmt, args);
else
ret = vfprintf(stderr, fmt, args);
- va_end(args);
}
return ret;
}
-int dump_printf(const char *fmt, ...)
+int eprintf(int level, const char *fmt, ...)
{
va_list args;
- int ret = 0;
+ int ret;
- if (dump_trace) {
- va_start(args, fmt);
- ret = vprintf(fmt, args);
- va_end(args);
- }
+ va_start(args, fmt);
+ ret = _eprintf(level, fmt, args);
+ va_end(args);
return ret;
}
-static int dump_printf_color(const char *fmt, const char *color, ...)
+/*
+ * Overloading libtraceevent standard info print
+ * function, display with -v in perf.
+ */
+void pr_stat(const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ _eprintf(1, fmt, args);
+ va_end(args);
+ eprintf(1, "\n");
+}
+
+int dump_printf(const char *fmt, ...)
{
va_list args;
int ret = 0;
if (dump_trace) {
- va_start(args, color);
- ret = color_vfprintf(stdout, color, fmt, args);
+ va_start(args, fmt);
+ ret = vprintf(fmt, args);
va_end(args);
}
return ret;
}
-
-void trace_event(event_t *event)
+void trace_event(union perf_event *event)
{
unsigned char *raw_event = (void *)event;
const char *color = PERF_COLOR_BLUE;
@@ -70,29 +79,29 @@ void trace_event(event_t *event)
if (!dump_trace)
return;
- dump_printf(".");
- dump_printf_color("\n. ... raw event: size %d bytes\n", color,
- event->header.size);
+ printf(".");
+ color_fprintf(stdout, color, "\n. ... raw event: size %d bytes\n",
+ event->header.size);
for (i = 0; i < event->header.size; i++) {
if ((i & 15) == 0) {
- dump_printf(".");
- dump_printf_color(" %04x: ", color, i);
+ printf(".");
+ color_fprintf(stdout, color, " %04x: ", i);
}
- dump_printf_color(" %02x", color, raw_event[i]);
+ color_fprintf(stdout, color, " %02x", raw_event[i]);
if (((i & 15) == 15) || i == event->header.size-1) {
- dump_printf_color(" ", color);
+ color_fprintf(stdout, color, " ");
for (j = 0; j < 15-(i & 15); j++)
- dump_printf_color(" ", color);
+ color_fprintf(stdout, color, " ");
for (j = i & ~15; j <= i; j++) {
- dump_printf_color("%c", color,
- isprint(raw_event[j]) ?
- raw_event[j] : '.');
+ color_fprintf(stdout, color, "%c",
+ isprint(raw_event[j]) ?
+ raw_event[j] : '.');
}
- dump_printf_color("\n", color);
+ color_fprintf(stdout, color, "\n");
}
}
- dump_printf(".\n");
+ printf(".\n");
}
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index 7b514082bba..443694c36b0 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -4,35 +4,19 @@
#include <stdbool.h>
#include "event.h"
+#include "../ui/helpline.h"
+#include "../ui/progress.h"
+#include "../ui/util.h"
extern int verbose;
extern bool quiet, dump_trace;
int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
-void trace_event(event_t *event);
+void trace_event(union perf_event *event);
-struct ui_progress;
+int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2)));
+int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2)));
-#ifdef NO_NEWT_SUPPORT
-static inline int ui_helpline__show_help(const char *format __used, va_list ap __used)
-{
- return 0;
-}
-
-static inline struct ui_progress *ui_progress__new(const char *title __used,
- u64 total __used)
-{
- return (struct ui_progress *)1;
-}
-
-static inline void ui_progress__update(struct ui_progress *self __used,
- u64 curr __used) {}
-
-static inline void ui_progress__delete(struct ui_progress *self __used) {}
-#else
-extern char ui_helpline__last_msg[];
-int ui_helpline__show_help(const char *format, va_list ap);
-#include "ui/progress.h"
-#endif
+void pr_stat(const char *fmt, ...);
#endif /* __PERF_DEBUG_H */
diff --git a/tools/perf/util/debugfs.c b/tools/perf/util/debugfs.c
deleted file mode 100644
index a88fefc0cc0..00000000000
--- a/tools/perf/util/debugfs.c
+++ /dev/null
@@ -1,240 +0,0 @@
-#include "util.h"
-#include "debugfs.h"
-#include "cache.h"
-
-static int debugfs_premounted;
-static char debugfs_mountpoint[MAX_PATH+1];
-
-static const char *debugfs_known_mountpoints[] = {
- "/sys/kernel/debug/",
- "/debug/",
- 0,
-};
-
-/* use this to force a umount */
-void debugfs_force_cleanup(void)
-{
- debugfs_find_mountpoint();
- debugfs_premounted = 0;
- debugfs_umount();
-}
-
-/* construct a full path to a debugfs element */
-int debugfs_make_path(const char *element, char *buffer, int size)
-{
- int len;
-
- if (strlen(debugfs_mountpoint) == 0) {
- buffer[0] = '\0';
- return -1;
- }
-
- len = strlen(debugfs_mountpoint) + strlen(element) + 1;
- if (len >= size)
- return len+1;
-
- snprintf(buffer, size-1, "%s/%s", debugfs_mountpoint, element);
- return 0;
-}
-
-static int debugfs_found;
-
-/* find the path to the mounted debugfs */
-const char *debugfs_find_mountpoint(void)
-{
- const char **ptr;
- char type[100];
- FILE *fp;
-
- if (debugfs_found)
- return (const char *) debugfs_mountpoint;
-
- ptr = debugfs_known_mountpoints;
- while (*ptr) {
- if (debugfs_valid_mountpoint(*ptr) == 0) {
- debugfs_found = 1;
- strcpy(debugfs_mountpoint, *ptr);
- return debugfs_mountpoint;
- }
- ptr++;
- }
-
- /* give up and parse /proc/mounts */
- fp = fopen("/proc/mounts", "r");
- if (fp == NULL)
- die("Can't open /proc/mounts for read");
-
- while (fscanf(fp, "%*s %"
- STR(MAX_PATH)
- "s %99s %*s %*d %*d\n",
- debugfs_mountpoint, type) == 2) {
- if (strcmp(type, "debugfs") == 0)
- break;
- }
- fclose(fp);
-
- if (strcmp(type, "debugfs") != 0)
- return NULL;
-
- debugfs_found = 1;
-
- return debugfs_mountpoint;
-}
-
-/* verify that a mountpoint is actually a debugfs instance */
-
-int debugfs_valid_mountpoint(const char *debugfs)
-{
- struct statfs st_fs;
-
- if (statfs(debugfs, &st_fs) < 0)
- return -ENOENT;
- else if (st_fs.f_type != (long) DEBUGFS_MAGIC)
- return -ENOENT;
-
- return 0;
-}
-
-
-int debugfs_valid_entry(const char *path)
-{
- struct stat st;
-
- if (stat(path, &st))
- return -errno;
-
- return 0;
-}
-
-/* mount the debugfs somewhere if it's not mounted */
-
-char *debugfs_mount(const char *mountpoint)
-{
- /* see if it's already mounted */
- if (debugfs_find_mountpoint()) {
- debugfs_premounted = 1;
- return debugfs_mountpoint;
- }
-
- /* if not mounted and no argument */
- if (mountpoint == NULL) {
- /* see if environment variable set */
- mountpoint = getenv(PERF_DEBUGFS_ENVIRONMENT);
- /* if no environment variable, use default */
- if (mountpoint == NULL)
- mountpoint = "/sys/kernel/debug";
- }
-
- if (mount(NULL, mountpoint, "debugfs", 0, NULL) < 0)
- return NULL;
-
- /* save the mountpoint */
- strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint));
- debugfs_found = 1;
-
- return debugfs_mountpoint;
-}
-
-/* umount the debugfs */
-
-int debugfs_umount(void)
-{
- char umountcmd[128];
- int ret;
-
- /* if it was already mounted, leave it */
- if (debugfs_premounted)
- return 0;
-
- /* make sure it's a valid mount point */
- ret = debugfs_valid_mountpoint(debugfs_mountpoint);
- if (ret)
- return ret;
-
- snprintf(umountcmd, sizeof(umountcmd),
- "/bin/umount %s", debugfs_mountpoint);
- return system(umountcmd);
-}
-
-int debugfs_write(const char *entry, const char *value)
-{
- char path[MAX_PATH+1];
- int ret, count;
- int fd;
-
- /* construct the path */
- snprintf(path, sizeof(path), "%s/%s", debugfs_mountpoint, entry);
-
- /* verify that it exists */
- ret = debugfs_valid_entry(path);
- if (ret)
- return ret;
-
- /* get how many chars we're going to write */
- count = strlen(value);
-
- /* open the debugfs entry */
- fd = open(path, O_RDWR);
- if (fd < 0)
- return -errno;
-
- while (count > 0) {
- /* write it */
- ret = write(fd, value, count);
- if (ret <= 0) {
- if (ret == EAGAIN)
- continue;
- close(fd);
- return -errno;
- }
- count -= ret;
- }
-
- /* close it */
- close(fd);
-
- /* return success */
- return 0;
-}
-
-/*
- * read a debugfs entry
- * returns the number of chars read or a negative errno
- */
-int debugfs_read(const char *entry, char *buffer, size_t size)
-{
- char path[MAX_PATH+1];
- int ret;
- int fd;
-
- /* construct the path */
- snprintf(path, sizeof(path), "%s/%s", debugfs_mountpoint, entry);
-
- /* verify that it exists */
- ret = debugfs_valid_entry(path);
- if (ret)
- return ret;
-
- /* open the debugfs entry */
- fd = open(path, O_RDONLY);
- if (fd < 0)
- return -errno;
-
- do {
- /* read it */
- ret = read(fd, buffer, size);
- if (ret == 0) {
- close(fd);
- return EOF;
- }
- } while (ret < 0 && errno == EAGAIN);
-
- /* close it */
- close(fd);
-
- /* make *sure* there's a null character at the end */
- buffer[ret] = '\0';
-
- /* return the number of chars read */
- return ret;
-}
diff --git a/tools/perf/util/debugfs.h b/tools/perf/util/debugfs.h
deleted file mode 100644
index 83a02879745..00000000000
--- a/tools/perf/util/debugfs.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef __DEBUGFS_H__
-#define __DEBUGFS_H__
-
-#include <sys/mount.h>
-
-#ifndef MAX_PATH
-# define MAX_PATH 256
-#endif
-
-#ifndef STR
-# define _STR(x) #x
-# define STR(x) _STR(x)
-#endif
-
-extern const char *debugfs_find_mountpoint(void);
-extern int debugfs_valid_mountpoint(const char *debugfs);
-extern int debugfs_valid_entry(const char *path);
-extern char *debugfs_mount(const char *mountpoint);
-extern int debugfs_umount(void);
-extern int debugfs_write(const char *entry, const char *value);
-extern int debugfs_read(const char *entry, char *buffer, size_t size);
-extern void debugfs_force_cleanup(void);
-extern int debugfs_make_path(const char *element, char *buffer, int size);
-
-#endif /* __DEBUGFS_H__ */
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
new file mode 100644
index 00000000000..819f10414f0
--- /dev/null
+++ b/tools/perf/util/dso.c
@@ -0,0 +1,900 @@
+#include <asm/bug.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include "symbol.h"
+#include "dso.h"
+#include "machine.h"
+#include "util.h"
+#include "debug.h"
+
+char dso__symtab_origin(const struct dso *dso)
+{
+ static const char origin[] = {
+ [DSO_BINARY_TYPE__KALLSYMS] = 'k',
+ [DSO_BINARY_TYPE__VMLINUX] = 'v',
+ [DSO_BINARY_TYPE__JAVA_JIT] = 'j',
+ [DSO_BINARY_TYPE__DEBUGLINK] = 'l',
+ [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B',
+ [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
+ [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
+ [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o',
+ [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
+ [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
+ [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
+ [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
+ [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
+ [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
+ };
+
+ if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
+ return '!';
+ return origin[dso->symtab_type];
+}
+
+int dso__read_binary_type_filename(const struct dso *dso,
+ enum dso_binary_type type,
+ char *root_dir, char *filename, size_t size)
+{
+ char build_id_hex[BUILD_ID_SIZE * 2 + 1];
+ int ret = 0;
+
+ switch (type) {
+ case DSO_BINARY_TYPE__DEBUGLINK: {
+ char *debuglink;
+
+ strncpy(filename, dso->long_name, size);
+ debuglink = filename + dso->long_name_len;
+ while (debuglink != filename && *debuglink != '/')
+ debuglink--;
+ if (*debuglink == '/')
+ debuglink++;
+ ret = filename__read_debuglink(dso->long_name, debuglink,
+ size - (debuglink - filename));
+ }
+ break;
+ case DSO_BINARY_TYPE__BUILD_ID_CACHE:
+ /* skip the locally configured cache if a symfs is given */
+ if (symbol_conf.symfs[0] ||
+ (dso__build_id_filename(dso, filename, size) == NULL))
+ ret = -1;
+ break;
+
+ case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
+ snprintf(filename, size, "%s/usr/lib/debug%s.debug",
+ symbol_conf.symfs, dso->long_name);
+ break;
+
+ case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
+ snprintf(filename, size, "%s/usr/lib/debug%s",
+ symbol_conf.symfs, dso->long_name);
+ break;
+
+ case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
+ {
+ const char *last_slash;
+ size_t len;
+ size_t dir_size;
+
+ last_slash = dso->long_name + dso->long_name_len;
+ while (last_slash != dso->long_name && *last_slash != '/')
+ last_slash--;
+
+ len = scnprintf(filename, size, "%s", symbol_conf.symfs);
+ dir_size = last_slash - dso->long_name + 2;
+ if (dir_size > (size - len)) {
+ ret = -1;
+ break;
+ }
+ len += scnprintf(filename + len, dir_size, "%s", dso->long_name);
+ len += scnprintf(filename + len , size - len, ".debug%s",
+ last_slash);
+ break;
+ }
+
+ case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
+ if (!dso->has_build_id) {
+ ret = -1;
+ break;
+ }
+
+ build_id__sprintf(dso->build_id,
+ sizeof(dso->build_id),
+ build_id_hex);
+ snprintf(filename, size,
+ "%s/usr/lib/debug/.build-id/%.2s/%s.debug",
+ symbol_conf.symfs, build_id_hex, build_id_hex + 2);
+ break;
+
+ case DSO_BINARY_TYPE__VMLINUX:
+ case DSO_BINARY_TYPE__GUEST_VMLINUX:
+ case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
+ snprintf(filename, size, "%s%s",
+ symbol_conf.symfs, dso->long_name);
+ break;
+
+ case DSO_BINARY_TYPE__GUEST_KMODULE:
+ snprintf(filename, size, "%s%s%s", symbol_conf.symfs,
+ root_dir, dso->long_name);
+ break;
+
+ case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
+ snprintf(filename, size, "%s%s", symbol_conf.symfs,
+ dso->long_name);
+ break;
+
+ case DSO_BINARY_TYPE__KCORE:
+ case DSO_BINARY_TYPE__GUEST_KCORE:
+ snprintf(filename, size, "%s", dso->long_name);
+ break;
+
+ default:
+ case DSO_BINARY_TYPE__KALLSYMS:
+ case DSO_BINARY_TYPE__GUEST_KALLSYMS:
+ case DSO_BINARY_TYPE__JAVA_JIT:
+ case DSO_BINARY_TYPE__NOT_FOUND:
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Global list of open DSOs and the counter.
+ */
+static LIST_HEAD(dso__data_open);
+static long dso__data_open_cnt;
+
+static void dso__list_add(struct dso *dso)
+{
+ list_add_tail(&dso->data.open_entry, &dso__data_open);
+ dso__data_open_cnt++;
+}
+
+static void dso__list_del(struct dso *dso)
+{
+ list_del(&dso->data.open_entry);
+ WARN_ONCE(dso__data_open_cnt <= 0,
+ "DSO data fd counter out of bounds.");
+ dso__data_open_cnt--;
+}
+
+static void close_first_dso(void);
+
+static int do_open(char *name)
+{
+ int fd;
+
+ do {
+ fd = open(name, O_RDONLY);
+ if (fd >= 0)
+ return fd;
+
+ pr_debug("dso open failed, mmap: %s\n", strerror(errno));
+ if (!dso__data_open_cnt || errno != EMFILE)
+ break;
+
+ close_first_dso();
+ } while (1);
+
+ return -1;
+}
+
+static int __open_dso(struct dso *dso, struct machine *machine)
+{
+ int fd;
+ char *root_dir = (char *)"";
+ char *name = malloc(PATH_MAX);
+
+ if (!name)
+ return -ENOMEM;
+
+ if (machine)
+ root_dir = machine->root_dir;
+
+ if (dso__read_binary_type_filename(dso, dso->binary_type,
+ root_dir, name, PATH_MAX)) {
+ free(name);
+ return -EINVAL;
+ }
+
+ fd = do_open(name);
+ free(name);
+ return fd;
+}
+
+static void check_data_close(void);
+
+/**
+ * dso_close - Open DSO data file
+ * @dso: dso object
+ *
+ * Open @dso's data file descriptor and updates
+ * list/count of open DSO objects.
+ */
+static int open_dso(struct dso *dso, struct machine *machine)
+{
+ int fd = __open_dso(dso, machine);
+
+ if (fd > 0) {
+ dso__list_add(dso);
+ /*
+ * Check if we crossed the allowed number
+ * of opened DSOs and close one if needed.
+ */
+ check_data_close();
+ }
+
+ return fd;
+}
+
+static void close_data_fd(struct dso *dso)
+{
+ if (dso->data.fd >= 0) {
+ close(dso->data.fd);
+ dso->data.fd = -1;
+ dso->data.file_size = 0;
+ dso__list_del(dso);
+ }
+}
+
+/**
+ * dso_close - Close DSO data file
+ * @dso: dso object
+ *
+ * Close @dso's data file descriptor and updates
+ * list/count of open DSO objects.
+ */
+static void close_dso(struct dso *dso)
+{
+ close_data_fd(dso);
+}
+
+static void close_first_dso(void)
+{
+ struct dso *dso;
+
+ dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
+ close_dso(dso);
+}
+
+static rlim_t get_fd_limit(void)
+{
+ struct rlimit l;
+ rlim_t limit = 0;
+
+ /* Allow half of the current open fd limit. */
+ if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
+ if (l.rlim_cur == RLIM_INFINITY)
+ limit = l.rlim_cur;
+ else
+ limit = l.rlim_cur / 2;
+ } else {
+ pr_err("failed to get fd limit\n");
+ limit = 1;
+ }
+
+ return limit;
+}
+
+static bool may_cache_fd(void)
+{
+ static rlim_t limit;
+
+ if (!limit)
+ limit = get_fd_limit();
+
+ if (limit == RLIM_INFINITY)
+ return true;
+
+ return limit > (rlim_t) dso__data_open_cnt;
+}
+
+/*
+ * Check and close LRU dso if we crossed allowed limit
+ * for opened dso file descriptors. The limit is half
+ * of the RLIMIT_NOFILE files opened.
+*/
+static void check_data_close(void)
+{
+ bool cache_fd = may_cache_fd();
+
+ if (!cache_fd)
+ close_first_dso();
+}
+
+/**
+ * dso__data_close - Close DSO data file
+ * @dso: dso object
+ *
+ * External interface to close @dso's data file descriptor.
+ */
+void dso__data_close(struct dso *dso)
+{
+ close_dso(dso);
+}
+
+/**
+ * dso__data_fd - Get dso's data file descriptor
+ * @dso: dso object
+ * @machine: machine object
+ *
+ * External interface to find dso's file, open it and
+ * returns file descriptor.
+ */
+int dso__data_fd(struct dso *dso, struct machine *machine)
+{
+ enum dso_binary_type binary_type_data[] = {
+ DSO_BINARY_TYPE__BUILD_ID_CACHE,
+ DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
+ DSO_BINARY_TYPE__NOT_FOUND,
+ };
+ int i = 0;
+
+ if (dso->data.fd >= 0)
+ return dso->data.fd;
+
+ if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
+ dso->data.fd = open_dso(dso, machine);
+ return dso->data.fd;
+ }
+
+ do {
+ int fd;
+
+ dso->binary_type = binary_type_data[i++];
+
+ fd = open_dso(dso, machine);
+ if (fd >= 0)
+ return dso->data.fd = fd;
+
+ } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
+
+ return -EINVAL;
+}
+
+static void
+dso_cache__free(struct rb_root *root)
+{
+ struct rb_node *next = rb_first(root);
+
+ while (next) {
+ struct dso_cache *cache;
+
+ cache = rb_entry(next, struct dso_cache, rb_node);
+ next = rb_next(&cache->rb_node);
+ rb_erase(&cache->rb_node, root);
+ free(cache);
+ }
+}
+
+static struct dso_cache *dso_cache__find(const struct rb_root *root, u64 offset)
+{
+ struct rb_node * const *p = &root->rb_node;
+ const struct rb_node *parent = NULL;
+ struct dso_cache *cache;
+
+ while (*p != NULL) {
+ u64 end;
+
+ parent = *p;
+ cache = rb_entry(parent, struct dso_cache, rb_node);
+ end = cache->offset + DSO__DATA_CACHE_SIZE;
+
+ if (offset < cache->offset)
+ p = &(*p)->rb_left;
+ else if (offset >= end)
+ p = &(*p)->rb_right;
+ else
+ return cache;
+ }
+ return NULL;
+}
+
+static void
+dso_cache__insert(struct rb_root *root, struct dso_cache *new)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct dso_cache *cache;
+ u64 offset = new->offset;
+
+ while (*p != NULL) {
+ u64 end;
+
+ parent = *p;
+ cache = rb_entry(parent, struct dso_cache, rb_node);
+ end = cache->offset + DSO__DATA_CACHE_SIZE;
+
+ if (offset < cache->offset)
+ p = &(*p)->rb_left;
+ else if (offset >= end)
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&new->rb_node, parent, p);
+ rb_insert_color(&new->rb_node, root);
+}
+
+static ssize_t
+dso_cache__memcpy(struct dso_cache *cache, u64 offset,
+ u8 *data, u64 size)
+{
+ u64 cache_offset = offset - cache->offset;
+ u64 cache_size = min(cache->size - cache_offset, size);
+
+ memcpy(data, cache->data + cache_offset, cache_size);
+ return cache_size;
+}
+
+static ssize_t
+dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
+{
+ struct dso_cache *cache;
+ ssize_t ret;
+
+ do {
+ u64 cache_offset;
+
+ ret = -ENOMEM;
+
+ cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
+ if (!cache)
+ break;
+
+ cache_offset = offset & DSO__DATA_CACHE_MASK;
+ ret = -EINVAL;
+
+ if (-1 == lseek(dso->data.fd, cache_offset, SEEK_SET))
+ break;
+
+ ret = read(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE);
+ if (ret <= 0)
+ break;
+
+ cache->offset = cache_offset;
+ cache->size = ret;
+ dso_cache__insert(&dso->data.cache, cache);
+
+ ret = dso_cache__memcpy(cache, offset, data, size);
+
+ } while (0);
+
+ if (ret <= 0)
+ free(cache);
+
+ return ret;
+}
+
+static ssize_t dso_cache_read(struct dso *dso, u64 offset,
+ u8 *data, ssize_t size)
+{
+ struct dso_cache *cache;
+
+ cache = dso_cache__find(&dso->data.cache, offset);
+ if (cache)
+ return dso_cache__memcpy(cache, offset, data, size);
+ else
+ return dso_cache__read(dso, offset, data, size);
+}
+
+/*
+ * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
+ * in the rb_tree. Any read to already cached data is served
+ * by cached data.
+ */
+static ssize_t cached_read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
+{
+ ssize_t r = 0;
+ u8 *p = data;
+
+ do {
+ ssize_t ret;
+
+ ret = dso_cache_read(dso, offset, p, size);
+ if (ret < 0)
+ return ret;
+
+ /* Reached EOF, return what we have. */
+ if (!ret)
+ break;
+
+ BUG_ON(ret > size);
+
+ r += ret;
+ p += ret;
+ offset += ret;
+ size -= ret;
+
+ } while (size);
+
+ return r;
+}
+
+static int data_file_size(struct dso *dso)
+{
+ struct stat st;
+
+ if (!dso->data.file_size) {
+ if (fstat(dso->data.fd, &st)) {
+ pr_err("dso mmap failed, fstat: %s\n", strerror(errno));
+ return -1;
+ }
+ dso->data.file_size = st.st_size;
+ }
+
+ return 0;
+}
+
+static ssize_t data_read_offset(struct dso *dso, u64 offset,
+ u8 *data, ssize_t size)
+{
+ if (data_file_size(dso))
+ return -1;
+
+ /* Check the offset sanity. */
+ if (offset > dso->data.file_size)
+ return -1;
+
+ if (offset + size < offset)
+ return -1;
+
+ return cached_read(dso, offset, data, size);
+}
+
+/**
+ * dso__data_read_offset - Read data from dso file offset
+ * @dso: dso object
+ * @machine: machine object
+ * @offset: file offset
+ * @data: buffer to store data
+ * @size: size of the @data buffer
+ *
+ * External interface to read data from dso file offset. Open
+ * dso data file and use cached_read to get the data.
+ */
+ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
+ u64 offset, u8 *data, ssize_t size)
+{
+ if (dso__data_fd(dso, machine) < 0)
+ return -1;
+
+ return data_read_offset(dso, offset, data, size);
+}
+
+/**
+ * dso__data_read_addr - Read data from dso address
+ * @dso: dso object
+ * @machine: machine object
+ * @add: virtual memory address
+ * @data: buffer to store data
+ * @size: size of the @data buffer
+ *
+ * External interface to read data from dso address.
+ */
+ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
+ struct machine *machine, u64 addr,
+ u8 *data, ssize_t size)
+{
+ u64 offset = map->map_ip(map, addr);
+ return dso__data_read_offset(dso, machine, offset, data, size);
+}
+
+struct map *dso__new_map(const char *name)
+{
+ struct map *map = NULL;
+ struct dso *dso = dso__new(name);
+
+ if (dso)
+ map = map__new2(0, dso, MAP__FUNCTION);
+
+ return map;
+}
+
+struct dso *dso__kernel_findnew(struct machine *machine, const char *name,
+ const char *short_name, int dso_type)
+{
+ /*
+ * The kernel dso could be created by build_id processing.
+ */
+ struct dso *dso = __dsos__findnew(&machine->kernel_dsos, name);
+
+ /*
+ * We need to run this in all cases, since during the build_id
+ * processing we had no idea this was the kernel dso.
+ */
+ if (dso != NULL) {
+ dso__set_short_name(dso, short_name, false);
+ dso->kernel = dso_type;
+ }
+
+ return dso;
+}
+
+void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
+{
+ if (name == NULL)
+ return;
+
+ if (dso->long_name_allocated)
+ free((char *)dso->long_name);
+
+ dso->long_name = name;
+ dso->long_name_len = strlen(name);
+ dso->long_name_allocated = name_allocated;
+}
+
+void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
+{
+ if (name == NULL)
+ return;
+
+ if (dso->short_name_allocated)
+ free((char *)dso->short_name);
+
+ dso->short_name = name;
+ dso->short_name_len = strlen(name);
+ dso->short_name_allocated = name_allocated;
+}
+
+static void dso__set_basename(struct dso *dso)
+{
+ /*
+ * basename() may modify path buffer, so we must pass
+ * a copy.
+ */
+ char *base, *lname = strdup(dso->long_name);
+
+ if (!lname)
+ return;
+
+ /*
+ * basename() may return a pointer to internal
+ * storage which is reused in subsequent calls
+ * so copy the result.
+ */
+ base = strdup(basename(lname));
+
+ free(lname);
+
+ if (!base)
+ return;
+
+ dso__set_short_name(dso, base, true);
+}
+
+int dso__name_len(const struct dso *dso)
+{
+ if (!dso)
+ return strlen("[unknown]");
+ if (verbose)
+ return dso->long_name_len;
+
+ return dso->short_name_len;
+}
+
+bool dso__loaded(const struct dso *dso, enum map_type type)
+{
+ return dso->loaded & (1 << type);
+}
+
+bool dso__sorted_by_name(const struct dso *dso, enum map_type type)
+{
+ return dso->sorted_by_name & (1 << type);
+}
+
+void dso__set_sorted_by_name(struct dso *dso, enum map_type type)
+{
+ dso->sorted_by_name |= (1 << type);
+}
+
+struct dso *dso__new(const char *name)
+{
+ struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
+
+ if (dso != NULL) {
+ int i;
+ strcpy(dso->name, name);
+ dso__set_long_name(dso, dso->name, false);
+ dso__set_short_name(dso, dso->name, false);
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
+ dso->data.cache = RB_ROOT;
+ dso->data.fd = -1;
+ dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
+ dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
+ dso->loaded = 0;
+ dso->rel = 0;
+ dso->sorted_by_name = 0;
+ dso->has_build_id = 0;
+ dso->has_srcline = 1;
+ dso->a2l_fails = 1;
+ dso->kernel = DSO_TYPE_USER;
+ dso->needs_swap = DSO_SWAP__UNSET;
+ INIT_LIST_HEAD(&dso->node);
+ INIT_LIST_HEAD(&dso->data.open_entry);
+ }
+
+ return dso;
+}
+
+void dso__delete(struct dso *dso)
+{
+ int i;
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ symbols__delete(&dso->symbols[i]);
+
+ if (dso->short_name_allocated) {
+ zfree((char **)&dso->short_name);
+ dso->short_name_allocated = false;
+ }
+
+ if (dso->long_name_allocated) {
+ zfree((char **)&dso->long_name);
+ dso->long_name_allocated = false;
+ }
+
+ dso__data_close(dso);
+ dso_cache__free(&dso->data.cache);
+ dso__free_a2l(dso);
+ zfree(&dso->symsrc_filename);
+ free(dso);
+}
+
+void dso__set_build_id(struct dso *dso, void *build_id)
+{
+ memcpy(dso->build_id, build_id, sizeof(dso->build_id));
+ dso->has_build_id = 1;
+}
+
+bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
+{
+ return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0;
+}
+
+void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
+{
+ char path[PATH_MAX];
+
+ if (machine__is_default_guest(machine))
+ return;
+ sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
+ if (sysfs__read_build_id(path, dso->build_id,
+ sizeof(dso->build_id)) == 0)
+ dso->has_build_id = true;
+}
+
+int dso__kernel_module_get_build_id(struct dso *dso,
+ const char *root_dir)
+{
+ char filename[PATH_MAX];
+ /*
+ * kernel module short names are of the form "[module]" and
+ * we need just "module" here.
+ */
+ const char *name = dso->short_name + 1;
+
+ snprintf(filename, sizeof(filename),
+ "%s/sys/module/%.*s/notes/.note.gnu.build-id",
+ root_dir, (int)strlen(name) - 1, name);
+
+ if (sysfs__read_build_id(filename, dso->build_id,
+ sizeof(dso->build_id)) == 0)
+ dso->has_build_id = true;
+
+ return 0;
+}
+
+bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
+{
+ bool have_build_id = false;
+ struct dso *pos;
+
+ list_for_each_entry(pos, head, node) {
+ if (with_hits && !pos->hit)
+ continue;
+ if (pos->has_build_id) {
+ have_build_id = true;
+ continue;
+ }
+ if (filename__read_build_id(pos->long_name, pos->build_id,
+ sizeof(pos->build_id)) > 0) {
+ have_build_id = true;
+ pos->has_build_id = true;
+ }
+ }
+
+ return have_build_id;
+}
+
+void dsos__add(struct list_head *head, struct dso *dso)
+{
+ list_add_tail(&dso->node, head);
+}
+
+struct dso *dsos__find(const struct list_head *head, const char *name, bool cmp_short)
+{
+ struct dso *pos;
+
+ if (cmp_short) {
+ list_for_each_entry(pos, head, node)
+ if (strcmp(pos->short_name, name) == 0)
+ return pos;
+ return NULL;
+ }
+ list_for_each_entry(pos, head, node)
+ if (strcmp(pos->long_name, name) == 0)
+ return pos;
+ return NULL;
+}
+
+struct dso *__dsos__findnew(struct list_head *head, const char *name)
+{
+ struct dso *dso = dsos__find(head, name, false);
+
+ if (!dso) {
+ dso = dso__new(name);
+ if (dso != NULL) {
+ dsos__add(head, dso);
+ dso__set_basename(dso);
+ }
+ }
+
+ return dso;
+}
+
+size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
+ bool (skip)(struct dso *dso, int parm), int parm)
+{
+ struct dso *pos;
+ size_t ret = 0;
+
+ list_for_each_entry(pos, head, node) {
+ if (skip && skip(pos, parm))
+ continue;
+ ret += dso__fprintf_buildid(pos, fp);
+ ret += fprintf(fp, " %s\n", pos->long_name);
+ }
+ return ret;
+}
+
+size_t __dsos__fprintf(struct list_head *head, FILE *fp)
+{
+ struct dso *pos;
+ size_t ret = 0;
+
+ list_for_each_entry(pos, head, node) {
+ int i;
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ ret += dso__fprintf(pos, i, fp);
+ }
+
+ return ret;
+}
+
+size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
+{
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
+ return fprintf(fp, "%s", sbuild_id);
+}
+
+size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
+{
+ struct rb_node *nd;
+ size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
+
+ if (dso->short_name != dso->long_name)
+ ret += fprintf(fp, "%s, ", dso->long_name);
+ ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
+ dso__loaded(dso, type) ? "" : "NOT ");
+ ret += dso__fprintf_buildid(dso, fp);
+ ret += fprintf(fp, ")\n");
+ for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {
+ struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
+ ret += symbol__fprintf(pos, fp);
+ }
+
+ return ret;
+}
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
new file mode 100644
index 00000000000..ad553ba257b
--- /dev/null
+++ b/tools/perf/util/dso.h
@@ -0,0 +1,232 @@
+#ifndef __PERF_DSO
+#define __PERF_DSO
+
+#include <linux/types.h>
+#include <linux/rbtree.h>
+#include <stdbool.h>
+#include <linux/types.h>
+#include "map.h"
+#include "build-id.h"
+
+enum dso_binary_type {
+ DSO_BINARY_TYPE__KALLSYMS = 0,
+ DSO_BINARY_TYPE__GUEST_KALLSYMS,
+ DSO_BINARY_TYPE__VMLINUX,
+ DSO_BINARY_TYPE__GUEST_VMLINUX,
+ DSO_BINARY_TYPE__JAVA_JIT,
+ DSO_BINARY_TYPE__DEBUGLINK,
+ DSO_BINARY_TYPE__BUILD_ID_CACHE,
+ DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
+ DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
+ DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
+ DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
+ DSO_BINARY_TYPE__GUEST_KMODULE,
+ DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
+ DSO_BINARY_TYPE__KCORE,
+ DSO_BINARY_TYPE__GUEST_KCORE,
+ DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
+ DSO_BINARY_TYPE__NOT_FOUND,
+};
+
+enum dso_kernel_type {
+ DSO_TYPE_USER = 0,
+ DSO_TYPE_KERNEL,
+ DSO_TYPE_GUEST_KERNEL
+};
+
+enum dso_swap_type {
+ DSO_SWAP__UNSET,
+ DSO_SWAP__NO,
+ DSO_SWAP__YES,
+};
+
+#define DSO__SWAP(dso, type, val) \
+({ \
+ type ____r = val; \
+ BUG_ON(dso->needs_swap == DSO_SWAP__UNSET); \
+ if (dso->needs_swap == DSO_SWAP__YES) { \
+ switch (sizeof(____r)) { \
+ case 2: \
+ ____r = bswap_16(val); \
+ break; \
+ case 4: \
+ ____r = bswap_32(val); \
+ break; \
+ case 8: \
+ ____r = bswap_64(val); \
+ break; \
+ default: \
+ BUG_ON(1); \
+ } \
+ } \
+ ____r; \
+})
+
+#define DSO__DATA_CACHE_SIZE 4096
+#define DSO__DATA_CACHE_MASK ~(DSO__DATA_CACHE_SIZE - 1)
+
+struct dso_cache {
+ struct rb_node rb_node;
+ u64 offset;
+ u64 size;
+ char data[0];
+};
+
+struct dso {
+ struct list_head node;
+ struct rb_root symbols[MAP__NR_TYPES];
+ struct rb_root symbol_names[MAP__NR_TYPES];
+ void *a2l;
+ char *symsrc_filename;
+ unsigned int a2l_fails;
+ enum dso_kernel_type kernel;
+ enum dso_swap_type needs_swap;
+ enum dso_binary_type symtab_type;
+ enum dso_binary_type binary_type;
+ u8 adjust_symbols:1;
+ u8 has_build_id:1;
+ u8 has_srcline:1;
+ u8 hit:1;
+ u8 annotate_warned:1;
+ u8 short_name_allocated:1;
+ u8 long_name_allocated:1;
+ u8 sorted_by_name;
+ u8 loaded;
+ u8 rel;
+ u8 build_id[BUILD_ID_SIZE];
+ const char *short_name;
+ const char *long_name;
+ u16 long_name_len;
+ u16 short_name_len;
+
+ /* dso data file */
+ struct {
+ struct rb_root cache;
+ int fd;
+ size_t file_size;
+ struct list_head open_entry;
+ } data;
+
+ char name[0];
+};
+
+/* dso__for_each_symbol - iterate over the symbols of given type
+ *
+ * @dso: the 'struct dso *' in which symbols itereated
+ * @pos: the 'struct symbol *' to use as a loop cursor
+ * @n: the 'struct rb_node *' to use as a temporary storage
+ * @type: the 'enum map_type' type of symbols
+ */
+#define dso__for_each_symbol(dso, pos, n, type) \
+ symbols__for_each_entry(&(dso)->symbols[(type)], pos, n)
+
+static inline void dso__set_loaded(struct dso *dso, enum map_type type)
+{
+ dso->loaded |= (1 << type);
+}
+
+struct dso *dso__new(const char *name);
+void dso__delete(struct dso *dso);
+
+void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated);
+void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated);
+
+int dso__name_len(const struct dso *dso);
+
+bool dso__loaded(const struct dso *dso, enum map_type type);
+
+bool dso__sorted_by_name(const struct dso *dso, enum map_type type);
+void dso__set_sorted_by_name(struct dso *dso, enum map_type type);
+void dso__sort_by_name(struct dso *dso, enum map_type type);
+
+void dso__set_build_id(struct dso *dso, void *build_id);
+bool dso__build_id_equal(const struct dso *dso, u8 *build_id);
+void dso__read_running_kernel_build_id(struct dso *dso,
+ struct machine *machine);
+int dso__kernel_module_get_build_id(struct dso *dso, const char *root_dir);
+
+char dso__symtab_origin(const struct dso *dso);
+int dso__read_binary_type_filename(const struct dso *dso, enum dso_binary_type type,
+ char *root_dir, char *filename, size_t size);
+
+/*
+ * The dso__data_* external interface provides following functions:
+ * dso__data_fd
+ * dso__data_close
+ * dso__data_read_offset
+ * dso__data_read_addr
+ *
+ * Please refer to the dso.c object code for each function and
+ * arguments documentation. Following text tries to explain the
+ * dso file descriptor caching.
+ *
+ * The dso__data* interface allows caching of opened file descriptors
+ * to speed up the dso data accesses. The idea is to leave the file
+ * descriptor opened ideally for the whole life of the dso object.
+ *
+ * The current usage of the dso__data_* interface is as follows:
+ *
+ * Get DSO's fd:
+ * int fd = dso__data_fd(dso, machine);
+ * USE 'fd' SOMEHOW
+ *
+ * Read DSO's data:
+ * n = dso__data_read_offset(dso_0, &machine, 0, buf, BUFSIZE);
+ * n = dso__data_read_addr(dso_0, &machine, 0, buf, BUFSIZE);
+ *
+ * Eventually close DSO's fd:
+ * dso__data_close(dso);
+ *
+ * It is not necessary to close the DSO object data file. Each time new
+ * DSO data file is opened, the limit (RLIMIT_NOFILE/2) is checked. Once
+ * it is crossed, the oldest opened DSO object is closed.
+ *
+ * The dso__delete function calls close_dso function to ensure the
+ * data file descriptor gets closed/unmapped before the dso object
+ * is freed.
+ *
+ * TODO
+*/
+int dso__data_fd(struct dso *dso, struct machine *machine);
+void dso__data_close(struct dso *dso);
+
+ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
+ u64 offset, u8 *data, ssize_t size);
+ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
+ struct machine *machine, u64 addr,
+ u8 *data, ssize_t size);
+
+struct map *dso__new_map(const char *name);
+struct dso *dso__kernel_findnew(struct machine *machine, const char *name,
+ const char *short_name, int dso_type);
+
+void dsos__add(struct list_head *head, struct dso *dso);
+struct dso *dsos__find(const struct list_head *head, const char *name,
+ bool cmp_short);
+struct dso *__dsos__findnew(struct list_head *head, const char *name);
+bool __dsos__read_build_ids(struct list_head *head, bool with_hits);
+
+size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
+ bool (skip)(struct dso *dso, int parm), int parm);
+size_t __dsos__fprintf(struct list_head *head, FILE *fp);
+
+size_t dso__fprintf_buildid(struct dso *dso, FILE *fp);
+size_t dso__fprintf_symbols_by_name(struct dso *dso,
+ enum map_type type, FILE *fp);
+size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp);
+
+static inline bool dso__is_vmlinux(struct dso *dso)
+{
+ return dso->binary_type == DSO_BINARY_TYPE__VMLINUX ||
+ dso->binary_type == DSO_BINARY_TYPE__GUEST_VMLINUX;
+}
+
+static inline bool dso__is_kcore(struct dso *dso)
+{
+ return dso->binary_type == DSO_BINARY_TYPE__KCORE ||
+ dso->binary_type == DSO_BINARY_TYPE__GUEST_KCORE;
+}
+
+void dso__free_a2l(struct dso *dso);
+
+#endif /* __PERF_DSO */
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
new file mode 100644
index 00000000000..cc66c4049e0
--- /dev/null
+++ b/tools/perf/util/dwarf-aux.c
@@ -0,0 +1,884 @@
+/*
+ * dwarf-aux.c : libdw auxiliary interfaces
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+#include <stdbool.h>
+#include "util.h"
+#include "debug.h"
+#include "dwarf-aux.h"
+
+/**
+ * cu_find_realpath - Find the realpath of the target file
+ * @cu_die: A DIE(dwarf information entry) of CU(compilation Unit)
+ * @fname: The tail filename of the target file
+ *
+ * Find the real(long) path of @fname in @cu_die.
+ */
+const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname)
+{
+ Dwarf_Files *files;
+ size_t nfiles, i;
+ const char *src = NULL;
+ int ret;
+
+ if (!fname)
+ return NULL;
+
+ ret = dwarf_getsrcfiles(cu_die, &files, &nfiles);
+ if (ret != 0)
+ return NULL;
+
+ for (i = 0; i < nfiles; i++) {
+ src = dwarf_filesrc(files, i, NULL, NULL);
+ if (strtailcmp(src, fname) == 0)
+ break;
+ }
+ if (i == nfiles)
+ return NULL;
+ return src;
+}
+
+/**
+ * cu_get_comp_dir - Get the path of compilation directory
+ * @cu_die: a CU DIE
+ *
+ * Get the path of compilation directory of given @cu_die.
+ * Since this depends on DW_AT_comp_dir, older gcc will not
+ * embedded it. In that case, this returns NULL.
+ */
+const char *cu_get_comp_dir(Dwarf_Die *cu_die)
+{
+ Dwarf_Attribute attr;
+ if (dwarf_attr(cu_die, DW_AT_comp_dir, &attr) == NULL)
+ return NULL;
+ return dwarf_formstring(&attr);
+}
+
+/**
+ * cu_find_lineinfo - Get a line number and file name for given address
+ * @cu_die: a CU DIE
+ * @addr: An address
+ * @fname: a pointer which returns the file name string
+ * @lineno: a pointer which returns the line number
+ *
+ * Find a line number and file name for @addr in @cu_die.
+ */
+int cu_find_lineinfo(Dwarf_Die *cu_die, unsigned long addr,
+ const char **fname, int *lineno)
+{
+ Dwarf_Line *line;
+ Dwarf_Addr laddr;
+
+ line = dwarf_getsrc_die(cu_die, (Dwarf_Addr)addr);
+ if (line && dwarf_lineaddr(line, &laddr) == 0 &&
+ addr == (unsigned long)laddr && dwarf_lineno(line, lineno) == 0) {
+ *fname = dwarf_linesrc(line, NULL, NULL);
+ if (!*fname)
+ /* line number is useless without filename */
+ *lineno = 0;
+ }
+
+ return *lineno ?: -ENOENT;
+}
+
+static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data);
+
+/**
+ * cu_walk_functions_at - Walk on function DIEs at given address
+ * @cu_die: A CU DIE
+ * @addr: An address
+ * @callback: A callback which called with found DIEs
+ * @data: A user data
+ *
+ * Walk on function DIEs at given @addr in @cu_die. Passed DIEs
+ * should be subprogram or inlined-subroutines.
+ */
+int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
+ int (*callback)(Dwarf_Die *, void *), void *data)
+{
+ Dwarf_Die die_mem;
+ Dwarf_Die *sc_die;
+ int ret = -ENOENT;
+
+ /* Inlined function could be recursive. Trace it until fail */
+ for (sc_die = die_find_realfunc(cu_die, addr, &die_mem);
+ sc_die != NULL;
+ sc_die = die_find_child(sc_die, __die_find_inline_cb, &addr,
+ &die_mem)) {
+ ret = callback(sc_die, data);
+ if (ret)
+ break;
+ }
+
+ return ret;
+
+}
+
+/**
+ * die_compare_name - Compare diename and tname
+ * @dw_die: a DIE
+ * @tname: a string of target name
+ *
+ * Compare the name of @dw_die and @tname. Return false if @dw_die has no name.
+ */
+bool die_compare_name(Dwarf_Die *dw_die, const char *tname)
+{
+ const char *name;
+ name = dwarf_diename(dw_die);
+ return name ? (strcmp(tname, name) == 0) : false;
+}
+
+/**
+ * die_get_call_lineno - Get callsite line number of inline-function instance
+ * @in_die: a DIE of an inlined function instance
+ *
+ * Get call-site line number of @in_die. This means from where the inline
+ * function is called.
+ */
+int die_get_call_lineno(Dwarf_Die *in_die)
+{
+ Dwarf_Attribute attr;
+ Dwarf_Word ret;
+
+ if (!dwarf_attr(in_die, DW_AT_call_line, &attr))
+ return -ENOENT;
+
+ dwarf_formudata(&attr, &ret);
+ return (int)ret;
+}
+
+/**
+ * die_get_type - Get type DIE
+ * @vr_die: a DIE of a variable
+ * @die_mem: where to store a type DIE
+ *
+ * Get a DIE of the type of given variable (@vr_die), and store
+ * it to die_mem. Return NULL if fails to get a type DIE.
+ */
+Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
+{
+ Dwarf_Attribute attr;
+
+ if (dwarf_attr_integrate(vr_die, DW_AT_type, &attr) &&
+ dwarf_formref_die(&attr, die_mem))
+ return die_mem;
+ else
+ return NULL;
+}
+
+/* Get a type die, but skip qualifiers */
+static Dwarf_Die *__die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
+{
+ int tag;
+
+ do {
+ vr_die = die_get_type(vr_die, die_mem);
+ if (!vr_die)
+ break;
+ tag = dwarf_tag(vr_die);
+ } while (tag == DW_TAG_const_type ||
+ tag == DW_TAG_restrict_type ||
+ tag == DW_TAG_volatile_type ||
+ tag == DW_TAG_shared_type);
+
+ return vr_die;
+}
+
+/**
+ * die_get_real_type - Get a type die, but skip qualifiers and typedef
+ * @vr_die: a DIE of a variable
+ * @die_mem: where to store a type DIE
+ *
+ * Get a DIE of the type of given variable (@vr_die), and store
+ * it to die_mem. Return NULL if fails to get a type DIE.
+ * If the type is qualifiers (e.g. const) or typedef, this skips it
+ * and tries to find real type (structure or basic types, e.g. int).
+ */
+Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
+{
+ do {
+ vr_die = __die_get_real_type(vr_die, die_mem);
+ } while (vr_die && dwarf_tag(vr_die) == DW_TAG_typedef);
+
+ return vr_die;
+}
+
+/* Get attribute and translate it as a udata */
+static int die_get_attr_udata(Dwarf_Die *tp_die, unsigned int attr_name,
+ Dwarf_Word *result)
+{
+ Dwarf_Attribute attr;
+
+ if (dwarf_attr(tp_die, attr_name, &attr) == NULL ||
+ dwarf_formudata(&attr, result) != 0)
+ return -ENOENT;
+
+ return 0;
+}
+
+/* Get attribute and translate it as a sdata */
+static int die_get_attr_sdata(Dwarf_Die *tp_die, unsigned int attr_name,
+ Dwarf_Sword *result)
+{
+ Dwarf_Attribute attr;
+
+ if (dwarf_attr(tp_die, attr_name, &attr) == NULL ||
+ dwarf_formsdata(&attr, result) != 0)
+ return -ENOENT;
+
+ return 0;
+}
+
+/**
+ * die_is_signed_type - Check whether a type DIE is signed or not
+ * @tp_die: a DIE of a type
+ *
+ * Get the encoding of @tp_die and return true if the encoding
+ * is signed.
+ */
+bool die_is_signed_type(Dwarf_Die *tp_die)
+{
+ Dwarf_Word ret;
+
+ if (die_get_attr_udata(tp_die, DW_AT_encoding, &ret))
+ return false;
+
+ return (ret == DW_ATE_signed_char || ret == DW_ATE_signed ||
+ ret == DW_ATE_signed_fixed);
+}
+
+/**
+ * die_is_func_def - Ensure that this DIE is a subprogram and definition
+ * @dw_die: a DIE
+ *
+ * Ensure that this DIE is a subprogram and NOT a declaration. This
+ * returns true if @dw_die is a function definition.
+ **/
+bool die_is_func_def(Dwarf_Die *dw_die)
+{
+ Dwarf_Attribute attr;
+
+ return (dwarf_tag(dw_die) == DW_TAG_subprogram &&
+ dwarf_attr(dw_die, DW_AT_declaration, &attr) == NULL);
+}
+
+/**
+ * die_get_data_member_location - Get the data-member offset
+ * @mb_die: a DIE of a member of a data structure
+ * @offs: The offset of the member in the data structure
+ *
+ * Get the offset of @mb_die in the data structure including @mb_die, and
+ * stores result offset to @offs. If any error occurs this returns errno.
+ */
+int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs)
+{
+ Dwarf_Attribute attr;
+ Dwarf_Op *expr;
+ size_t nexpr;
+ int ret;
+
+ if (dwarf_attr(mb_die, DW_AT_data_member_location, &attr) == NULL)
+ return -ENOENT;
+
+ if (dwarf_formudata(&attr, offs) != 0) {
+ /* DW_AT_data_member_location should be DW_OP_plus_uconst */
+ ret = dwarf_getlocation(&attr, &expr, &nexpr);
+ if (ret < 0 || nexpr == 0)
+ return -ENOENT;
+
+ if (expr[0].atom != DW_OP_plus_uconst || nexpr != 1) {
+ pr_debug("Unable to get offset:Unexpected OP %x (%zd)\n",
+ expr[0].atom, nexpr);
+ return -ENOTSUP;
+ }
+ *offs = (Dwarf_Word)expr[0].number;
+ }
+ return 0;
+}
+
+/* Get the call file index number in CU DIE */
+static int die_get_call_fileno(Dwarf_Die *in_die)
+{
+ Dwarf_Sword idx;
+
+ if (die_get_attr_sdata(in_die, DW_AT_call_file, &idx) == 0)
+ return (int)idx;
+ else
+ return -ENOENT;
+}
+
+/* Get the declared file index number in CU DIE */
+static int die_get_decl_fileno(Dwarf_Die *pdie)
+{
+ Dwarf_Sword idx;
+
+ if (die_get_attr_sdata(pdie, DW_AT_decl_file, &idx) == 0)
+ return (int)idx;
+ else
+ return -ENOENT;
+}
+
+/**
+ * die_get_call_file - Get callsite file name of inlined function instance
+ * @in_die: a DIE of an inlined function instance
+ *
+ * Get call-site file name of @in_die. This means from which file the inline
+ * function is called.
+ */
+const char *die_get_call_file(Dwarf_Die *in_die)
+{
+ Dwarf_Die cu_die;
+ Dwarf_Files *files;
+ int idx;
+
+ idx = die_get_call_fileno(in_die);
+ if (idx < 0 || !dwarf_diecu(in_die, &cu_die, NULL, NULL) ||
+ dwarf_getsrcfiles(&cu_die, &files, NULL) != 0)
+ return NULL;
+
+ return dwarf_filesrc(files, idx, NULL, NULL);
+}
+
+
+/**
+ * die_find_child - Generic DIE search function in DIE tree
+ * @rt_die: a root DIE
+ * @callback: a callback function
+ * @data: a user data passed to the callback function
+ * @die_mem: a buffer for result DIE
+ *
+ * Trace DIE tree from @rt_die and call @callback for each child DIE.
+ * If @callback returns DIE_FIND_CB_END, this stores the DIE into
+ * @die_mem and returns it. If @callback returns DIE_FIND_CB_CONTINUE,
+ * this continues to trace the tree. Optionally, @callback can return
+ * DIE_FIND_CB_CHILD and DIE_FIND_CB_SIBLING, those means trace only
+ * the children and trace only the siblings respectively.
+ * Returns NULL if @callback can't find any appropriate DIE.
+ */
+Dwarf_Die *die_find_child(Dwarf_Die *rt_die,
+ int (*callback)(Dwarf_Die *, void *),
+ void *data, Dwarf_Die *die_mem)
+{
+ Dwarf_Die child_die;
+ int ret;
+
+ ret = dwarf_child(rt_die, die_mem);
+ if (ret != 0)
+ return NULL;
+
+ do {
+ ret = callback(die_mem, data);
+ if (ret == DIE_FIND_CB_END)
+ return die_mem;
+
+ if ((ret & DIE_FIND_CB_CHILD) &&
+ die_find_child(die_mem, callback, data, &child_die)) {
+ memcpy(die_mem, &child_die, sizeof(Dwarf_Die));
+ return die_mem;
+ }
+ } while ((ret & DIE_FIND_CB_SIBLING) &&
+ dwarf_siblingof(die_mem, die_mem) == 0);
+
+ return NULL;
+}
+
+struct __addr_die_search_param {
+ Dwarf_Addr addr;
+ Dwarf_Die *die_mem;
+};
+
+/* die_find callback for non-inlined function search */
+static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
+{
+ struct __addr_die_search_param *ad = data;
+
+ /*
+ * Since a declaration entry doesn't has given pc, this always returns
+ * function definition entry.
+ */
+ if (dwarf_tag(fn_die) == DW_TAG_subprogram &&
+ dwarf_haspc(fn_die, ad->addr)) {
+ memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die));
+ return DWARF_CB_ABORT;
+ }
+ return DWARF_CB_OK;
+}
+
+/**
+ * die_find_realfunc - Search a non-inlined function at given address
+ * @cu_die: a CU DIE which including @addr
+ * @addr: target address
+ * @die_mem: a buffer for result DIE
+ *
+ * Search a non-inlined function DIE which includes @addr. Stores the
+ * DIE to @die_mem and returns it if found. Returns NULL if failed.
+ */
+Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem)
+{
+ struct __addr_die_search_param ad;
+ ad.addr = addr;
+ ad.die_mem = die_mem;
+ /* dwarf_getscopes can't find subprogram. */
+ if (!dwarf_getfuncs(cu_die, __die_search_func_cb, &ad, 0))
+ return NULL;
+ else
+ return die_mem;
+}
+
+/* die_find callback for inline function search */
+static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data)
+{
+ Dwarf_Addr *addr = data;
+
+ if (dwarf_tag(die_mem) == DW_TAG_inlined_subroutine &&
+ dwarf_haspc(die_mem, *addr))
+ return DIE_FIND_CB_END;
+
+ return DIE_FIND_CB_CONTINUE;
+}
+
+/**
+ * die_find_top_inlinefunc - Search the top inlined function at given address
+ * @sp_die: a subprogram DIE which including @addr
+ * @addr: target address
+ * @die_mem: a buffer for result DIE
+ *
+ * Search an inlined function DIE which includes @addr. Stores the
+ * DIE to @die_mem and returns it if found. Returns NULL if failed.
+ * Even if several inlined functions are expanded recursively, this
+ * doesn't trace it down, and returns the topmost one.
+ */
+Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem)
+{
+ return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem);
+}
+
+/**
+ * die_find_inlinefunc - Search an inlined function at given address
+ * @sp_die: a subprogram DIE which including @addr
+ * @addr: target address
+ * @die_mem: a buffer for result DIE
+ *
+ * Search an inlined function DIE which includes @addr. Stores the
+ * DIE to @die_mem and returns it if found. Returns NULL if failed.
+ * If several inlined functions are expanded recursively, this trace
+ * it down and returns deepest one.
+ */
+Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem)
+{
+ Dwarf_Die tmp_die;
+
+ sp_die = die_find_child(sp_die, __die_find_inline_cb, &addr, &tmp_die);
+ if (!sp_die)
+ return NULL;
+
+ /* Inlined function could be recursive. Trace it until fail */
+ while (sp_die) {
+ memcpy(die_mem, sp_die, sizeof(Dwarf_Die));
+ sp_die = die_find_child(sp_die, __die_find_inline_cb, &addr,
+ &tmp_die);
+ }
+
+ return die_mem;
+}
+
+struct __instance_walk_param {
+ void *addr;
+ int (*callback)(Dwarf_Die *, void *);
+ void *data;
+ int retval;
+};
+
+static int __die_walk_instances_cb(Dwarf_Die *inst, void *data)
+{
+ struct __instance_walk_param *iwp = data;
+ Dwarf_Attribute attr_mem;
+ Dwarf_Die origin_mem;
+ Dwarf_Attribute *attr;
+ Dwarf_Die *origin;
+ int tmp;
+
+ attr = dwarf_attr(inst, DW_AT_abstract_origin, &attr_mem);
+ if (attr == NULL)
+ return DIE_FIND_CB_CONTINUE;
+
+ origin = dwarf_formref_die(attr, &origin_mem);
+ if (origin == NULL || origin->addr != iwp->addr)
+ return DIE_FIND_CB_CONTINUE;
+
+ /* Ignore redundant instances */
+ if (dwarf_tag(inst) == DW_TAG_inlined_subroutine) {
+ dwarf_decl_line(origin, &tmp);
+ if (die_get_call_lineno(inst) == tmp) {
+ tmp = die_get_decl_fileno(origin);
+ if (die_get_call_fileno(inst) == tmp)
+ return DIE_FIND_CB_CONTINUE;
+ }
+ }
+
+ iwp->retval = iwp->callback(inst, iwp->data);
+
+ return (iwp->retval) ? DIE_FIND_CB_END : DIE_FIND_CB_CONTINUE;
+}
+
+/**
+ * die_walk_instances - Walk on instances of given DIE
+ * @or_die: an abstract original DIE
+ * @callback: a callback function which is called with instance DIE
+ * @data: user data
+ *
+ * Walk on the instances of give @in_die. @in_die must be an inlined function
+ * declartion. This returns the return value of @callback if it returns
+ * non-zero value, or -ENOENT if there is no instance.
+ */
+int die_walk_instances(Dwarf_Die *or_die, int (*callback)(Dwarf_Die *, void *),
+ void *data)
+{
+ Dwarf_Die cu_die;
+ Dwarf_Die die_mem;
+ struct __instance_walk_param iwp = {
+ .addr = or_die->addr,
+ .callback = callback,
+ .data = data,
+ .retval = -ENOENT,
+ };
+
+ if (dwarf_diecu(or_die, &cu_die, NULL, NULL) == NULL)
+ return -ENOENT;
+
+ die_find_child(&cu_die, __die_walk_instances_cb, &iwp, &die_mem);
+
+ return iwp.retval;
+}
+
+/* Line walker internal parameters */
+struct __line_walk_param {
+ bool recursive;
+ line_walk_callback_t callback;
+ void *data;
+ int retval;
+};
+
+static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data)
+{
+ struct __line_walk_param *lw = data;
+ Dwarf_Addr addr = 0;
+ const char *fname;
+ int lineno;
+
+ if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) {
+ fname = die_get_call_file(in_die);
+ lineno = die_get_call_lineno(in_die);
+ if (fname && lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) {
+ lw->retval = lw->callback(fname, lineno, addr, lw->data);
+ if (lw->retval != 0)
+ return DIE_FIND_CB_END;
+ }
+ }
+ if (!lw->recursive)
+ /* Don't need to search recursively */
+ return DIE_FIND_CB_SIBLING;
+
+ if (addr) {
+ fname = dwarf_decl_file(in_die);
+ if (fname && dwarf_decl_line(in_die, &lineno) == 0) {
+ lw->retval = lw->callback(fname, lineno, addr, lw->data);
+ if (lw->retval != 0)
+ return DIE_FIND_CB_END;
+ }
+ }
+
+ /* Continue to search nested inlined function call-sites */
+ return DIE_FIND_CB_CONTINUE;
+}
+
+/* Walk on lines of blocks included in given DIE */
+static int __die_walk_funclines(Dwarf_Die *sp_die, bool recursive,
+ line_walk_callback_t callback, void *data)
+{
+ struct __line_walk_param lw = {
+ .recursive = recursive,
+ .callback = callback,
+ .data = data,
+ .retval = 0,
+ };
+ Dwarf_Die die_mem;
+ Dwarf_Addr addr;
+ const char *fname;
+ int lineno;
+
+ /* Handle function declaration line */
+ fname = dwarf_decl_file(sp_die);
+ if (fname && dwarf_decl_line(sp_die, &lineno) == 0 &&
+ dwarf_entrypc(sp_die, &addr) == 0) {
+ lw.retval = callback(fname, lineno, addr, data);
+ if (lw.retval != 0)
+ goto done;
+ }
+ die_find_child(sp_die, __die_walk_funclines_cb, &lw, &die_mem);
+done:
+ return lw.retval;
+}
+
+static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data)
+{
+ struct __line_walk_param *lw = data;
+
+ lw->retval = __die_walk_funclines(sp_die, true, lw->callback, lw->data);
+ if (lw->retval != 0)
+ return DWARF_CB_ABORT;
+
+ return DWARF_CB_OK;
+}
+
+/**
+ * die_walk_lines - Walk on lines inside given DIE
+ * @rt_die: a root DIE (CU, subprogram or inlined_subroutine)
+ * @callback: callback routine
+ * @data: user data
+ *
+ * Walk on all lines inside given @rt_die and call @callback on each line.
+ * If the @rt_die is a function, walk only on the lines inside the function,
+ * otherwise @rt_die must be a CU DIE.
+ * Note that this walks not only dwarf line list, but also function entries
+ * and inline call-site.
+ */
+int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
+{
+ Dwarf_Lines *lines;
+ Dwarf_Line *line;
+ Dwarf_Addr addr;
+ const char *fname;
+ int lineno, ret = 0;
+ Dwarf_Die die_mem, *cu_die;
+ size_t nlines, i;
+
+ /* Get the CU die */
+ if (dwarf_tag(rt_die) != DW_TAG_compile_unit)
+ cu_die = dwarf_diecu(rt_die, &die_mem, NULL, NULL);
+ else
+ cu_die = rt_die;
+ if (!cu_die) {
+ pr_debug2("Failed to get CU from given DIE.\n");
+ return -EINVAL;
+ }
+
+ /* Get lines list in the CU */
+ if (dwarf_getsrclines(cu_die, &lines, &nlines) != 0) {
+ pr_debug2("Failed to get source lines on this CU.\n");
+ return -ENOENT;
+ }
+ pr_debug2("Get %zd lines from this CU\n", nlines);
+
+ /* Walk on the lines on lines list */
+ for (i = 0; i < nlines; i++) {
+ line = dwarf_onesrcline(lines, i);
+ if (line == NULL ||
+ dwarf_lineno(line, &lineno) != 0 ||
+ dwarf_lineaddr(line, &addr) != 0) {
+ pr_debug2("Failed to get line info. "
+ "Possible error in debuginfo.\n");
+ continue;
+ }
+ /* Filter lines based on address */
+ if (rt_die != cu_die)
+ /*
+ * Address filtering
+ * The line is included in given function, and
+ * no inline block includes it.
+ */
+ if (!dwarf_haspc(rt_die, addr) ||
+ die_find_inlinefunc(rt_die, addr, &die_mem))
+ continue;
+ /* Get source line */
+ fname = dwarf_linesrc(line, NULL, NULL);
+
+ ret = callback(fname, lineno, addr, data);
+ if (ret != 0)
+ return ret;
+ }
+
+ /*
+ * Dwarf lines doesn't include function declarations and inlined
+ * subroutines. We have to check functions list or given function.
+ */
+ if (rt_die != cu_die)
+ /*
+ * Don't need walk functions recursively, because nested
+ * inlined functions don't have lines of the specified DIE.
+ */
+ ret = __die_walk_funclines(rt_die, false, callback, data);
+ else {
+ struct __line_walk_param param = {
+ .callback = callback,
+ .data = data,
+ .retval = 0,
+ };
+ dwarf_getfuncs(cu_die, __die_walk_culines_cb, &param, 0);
+ ret = param.retval;
+ }
+
+ return ret;
+}
+
+struct __find_variable_param {
+ const char *name;
+ Dwarf_Addr addr;
+};
+
+static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data)
+{
+ struct __find_variable_param *fvp = data;
+ Dwarf_Attribute attr;
+ int tag;
+
+ tag = dwarf_tag(die_mem);
+ if ((tag == DW_TAG_formal_parameter ||
+ tag == DW_TAG_variable) &&
+ die_compare_name(die_mem, fvp->name) &&
+ /* Does the DIE have location information or external instance? */
+ (dwarf_attr(die_mem, DW_AT_external, &attr) ||
+ dwarf_attr(die_mem, DW_AT_location, &attr)))
+ return DIE_FIND_CB_END;
+ if (dwarf_haspc(die_mem, fvp->addr))
+ return DIE_FIND_CB_CONTINUE;
+ else
+ return DIE_FIND_CB_SIBLING;
+}
+
+/**
+ * die_find_variable_at - Find a given name variable at given address
+ * @sp_die: a function DIE
+ * @name: variable name
+ * @addr: address
+ * @die_mem: a buffer for result DIE
+ *
+ * Find a variable DIE called @name at @addr in @sp_die.
+ */
+Dwarf_Die *die_find_variable_at(Dwarf_Die *sp_die, const char *name,
+ Dwarf_Addr addr, Dwarf_Die *die_mem)
+{
+ struct __find_variable_param fvp = { .name = name, .addr = addr};
+
+ return die_find_child(sp_die, __die_find_variable_cb, (void *)&fvp,
+ die_mem);
+}
+
+static int __die_find_member_cb(Dwarf_Die *die_mem, void *data)
+{
+ const char *name = data;
+
+ if ((dwarf_tag(die_mem) == DW_TAG_member) &&
+ die_compare_name(die_mem, name))
+ return DIE_FIND_CB_END;
+
+ return DIE_FIND_CB_SIBLING;
+}
+
+/**
+ * die_find_member - Find a given name member in a data structure
+ * @st_die: a data structure type DIE
+ * @name: member name
+ * @die_mem: a buffer for result DIE
+ *
+ * Find a member DIE called @name in @st_die.
+ */
+Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name,
+ Dwarf_Die *die_mem)
+{
+ return die_find_child(st_die, __die_find_member_cb, (void *)name,
+ die_mem);
+}
+
+/**
+ * die_get_typename - Get the name of given variable DIE
+ * @vr_die: a variable DIE
+ * @buf: a buffer for result type name
+ * @len: a max-length of @buf
+ *
+ * Get the name of @vr_die and stores it to @buf. Return the actual length
+ * of type name if succeeded. Return -E2BIG if @len is not enough long, and
+ * Return -ENOENT if failed to find type name.
+ * Note that the result will stores typedef name if possible, and stores
+ * "*(function_type)" if the type is a function pointer.
+ */
+int die_get_typename(Dwarf_Die *vr_die, char *buf, int len)
+{
+ Dwarf_Die type;
+ int tag, ret, ret2;
+ const char *tmp = "";
+
+ if (__die_get_real_type(vr_die, &type) == NULL)
+ return -ENOENT;
+
+ tag = dwarf_tag(&type);
+ if (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type)
+ tmp = "*";
+ else if (tag == DW_TAG_subroutine_type) {
+ /* Function pointer */
+ ret = snprintf(buf, len, "(function_type)");
+ return (ret >= len) ? -E2BIG : ret;
+ } else {
+ if (!dwarf_diename(&type))
+ return -ENOENT;
+ if (tag == DW_TAG_union_type)
+ tmp = "union ";
+ else if (tag == DW_TAG_structure_type)
+ tmp = "struct ";
+ else if (tag == DW_TAG_enumeration_type)
+ tmp = "enum ";
+ /* Write a base name */
+ ret = snprintf(buf, len, "%s%s", tmp, dwarf_diename(&type));
+ return (ret >= len) ? -E2BIG : ret;
+ }
+ ret = die_get_typename(&type, buf, len);
+ if (ret > 0) {
+ ret2 = snprintf(buf + ret, len - ret, "%s", tmp);
+ ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret;
+ }
+ return ret;
+}
+
+/**
+ * die_get_varname - Get the name and type of given variable DIE
+ * @vr_die: a variable DIE
+ * @buf: a buffer for type and variable name
+ * @len: the max-length of @buf
+ *
+ * Get the name and type of @vr_die and stores it in @buf as "type\tname".
+ */
+int die_get_varname(Dwarf_Die *vr_die, char *buf, int len)
+{
+ int ret, ret2;
+
+ ret = die_get_typename(vr_die, buf, len);
+ if (ret < 0) {
+ pr_debug("Failed to get type, make it unknown.\n");
+ ret = snprintf(buf, len, "(unknown_type)");
+ }
+ if (ret > 0) {
+ ret2 = snprintf(buf + ret, len - ret, "\t%s",
+ dwarf_diename(vr_die));
+ ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret;
+ }
+ return ret;
+}
+
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
new file mode 100644
index 00000000000..b4fe90c6cb2
--- /dev/null
+++ b/tools/perf/util/dwarf-aux.h
@@ -0,0 +1,118 @@
+#ifndef _DWARF_AUX_H
+#define _DWARF_AUX_H
+/*
+ * dwarf-aux.h : libdw auxiliary interfaces
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+#include <dwarf.h>
+#include <elfutils/libdw.h>
+#include <elfutils/libdwfl.h>
+#include <elfutils/version.h>
+
+/* Find the realpath of the target file */
+extern const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname);
+
+/* Get DW_AT_comp_dir (should be NULL with older gcc) */
+extern const char *cu_get_comp_dir(Dwarf_Die *cu_die);
+
+/* Get a line number and file name for given address */
+extern int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr,
+ const char **fname, int *lineno);
+
+/* Walk on funcitons at given address */
+extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
+ int (*callback)(Dwarf_Die *, void *), void *data);
+
+/* Ensure that this DIE is a subprogram and definition (not declaration) */
+extern bool die_is_func_def(Dwarf_Die *dw_die);
+
+/* Compare diename and tname */
+extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname);
+
+/* Get callsite line number of inline-function instance */
+extern int die_get_call_lineno(Dwarf_Die *in_die);
+
+/* Get callsite file name of inlined function instance */
+extern const char *die_get_call_file(Dwarf_Die *in_die);
+
+/* Get type die */
+extern Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem);
+
+/* Get a type die, but skip qualifiers and typedef */
+extern Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem);
+
+/* Check whether the DIE is signed or not */
+extern bool die_is_signed_type(Dwarf_Die *tp_die);
+
+/* Get data_member_location offset */
+extern int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs);
+
+/* Return values for die_find_child() callbacks */
+enum {
+ DIE_FIND_CB_END = 0, /* End of Search */
+ DIE_FIND_CB_CHILD = 1, /* Search only children */
+ DIE_FIND_CB_SIBLING = 2, /* Search only siblings */
+ DIE_FIND_CB_CONTINUE = 3, /* Search children and siblings */
+};
+
+/* Search child DIEs */
+extern Dwarf_Die *die_find_child(Dwarf_Die *rt_die,
+ int (*callback)(Dwarf_Die *, void *),
+ void *data, Dwarf_Die *die_mem);
+
+/* Search a non-inlined function including given address */
+extern Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem);
+
+/* Search the top inlined function including given address */
+extern Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem);
+
+/* Search the deepest inlined function including given address */
+extern Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem);
+
+/* Walk on the instances of given DIE */
+extern int die_walk_instances(Dwarf_Die *in_die,
+ int (*callback)(Dwarf_Die *, void *), void *data);
+
+/* Walker on lines (Note: line number will not be sorted) */
+typedef int (* line_walk_callback_t) (const char *fname, int lineno,
+ Dwarf_Addr addr, void *data);
+
+/*
+ * Walk on lines inside given DIE. If the DIE is a subprogram, walk only on
+ * the lines inside the subprogram, otherwise the DIE must be a CU DIE.
+ */
+extern int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback,
+ void *data);
+
+/* Find a variable called 'name' at given address */
+extern Dwarf_Die *die_find_variable_at(Dwarf_Die *sp_die, const char *name,
+ Dwarf_Addr addr, Dwarf_Die *die_mem);
+
+/* Find a member called 'name' */
+extern Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name,
+ Dwarf_Die *die_mem);
+
+/* Get the name of given variable DIE */
+extern int die_get_typename(Dwarf_Die *vr_die, char *buf, int len);
+
+/* Get the name and type of given variable DIE, stored as "type\tname" */
+extern int die_get_varname(Dwarf_Die *vr_die, char *buf, int len);
+#endif
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index dab9e754a28..d0281bdfa58 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -1,119 +1,174 @@
#include <linux/types.h>
+#include <sys/mman.h>
#include "event.h"
#include "debug.h"
-#include "session.h"
+#include "hist.h"
+#include "machine.h"
#include "sort.h"
#include "string.h"
#include "strlist.h"
#include "thread.h"
+#include "thread_map.h"
+#include "symbol/kallsyms.h"
+
+static const char *perf_event__names[] = {
+ [0] = "TOTAL",
+ [PERF_RECORD_MMAP] = "MMAP",
+ [PERF_RECORD_MMAP2] = "MMAP2",
+ [PERF_RECORD_LOST] = "LOST",
+ [PERF_RECORD_COMM] = "COMM",
+ [PERF_RECORD_EXIT] = "EXIT",
+ [PERF_RECORD_THROTTLE] = "THROTTLE",
+ [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
+ [PERF_RECORD_FORK] = "FORK",
+ [PERF_RECORD_READ] = "READ",
+ [PERF_RECORD_SAMPLE] = "SAMPLE",
+ [PERF_RECORD_HEADER_ATTR] = "ATTR",
+ [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
+ [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
+ [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
+ [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
+};
+
+const char *perf_event__name(unsigned int id)
+{
+ if (id >= ARRAY_SIZE(perf_event__names))
+ return "INVALID";
+ if (!perf_event__names[id])
+ return "UNKNOWN";
+ return perf_event__names[id];
+}
-const char *event__name[] = {
- [0] = "TOTAL",
- [PERF_RECORD_MMAP] = "MMAP",
- [PERF_RECORD_LOST] = "LOST",
- [PERF_RECORD_COMM] = "COMM",
- [PERF_RECORD_EXIT] = "EXIT",
- [PERF_RECORD_THROTTLE] = "THROTTLE",
- [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
- [PERF_RECORD_FORK] = "FORK",
- [PERF_RECORD_READ] = "READ",
- [PERF_RECORD_SAMPLE] = "SAMPLE",
- [PERF_RECORD_HEADER_ATTR] = "ATTR",
- [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
- [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
- [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
+static struct perf_sample synth_sample = {
+ .pid = -1,
+ .tid = -1,
+ .time = -1,
+ .stream_id = -1,
+ .cpu = -1,
+ .period = 1,
};
-static pid_t event__synthesize_comm(pid_t pid, int full,
- event__handler_t process,
- struct perf_session *session)
+static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len)
{
- event_t ev;
char filename[PATH_MAX];
char bf[BUFSIZ];
FILE *fp;
size_t size = 0;
- DIR *tasks;
- struct dirent dirent, *next;
- pid_t tgid = 0;
+ pid_t tgid = -1;
snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
fp = fopen(filename, "r");
if (fp == NULL) {
-out_race:
- /*
- * We raced with a task exiting - just return:
- */
pr_debug("couldn't open %s\n", filename);
return 0;
}
- memset(&ev.comm, 0, sizeof(ev.comm));
- while (!ev.comm.comm[0] || !ev.comm.pid) {
- if (fgets(bf, sizeof(bf), fp) == NULL)
- goto out_failure;
+ while (!comm[0] || (tgid < 0)) {
+ if (fgets(bf, sizeof(bf), fp) == NULL) {
+ pr_warning("couldn't get COMM and pgid, malformed %s\n",
+ filename);
+ break;
+ }
if (memcmp(bf, "Name:", 5) == 0) {
char *name = bf + 5;
while (*name && isspace(*name))
++name;
size = strlen(name) - 1;
- memcpy(ev.comm.comm, name, size++);
+ if (size >= len)
+ size = len - 1;
+ memcpy(comm, name, size);
+ comm[size] = '\0';
+
} else if (memcmp(bf, "Tgid:", 5) == 0) {
char *tgids = bf + 5;
while (*tgids && isspace(*tgids))
++tgids;
- tgid = ev.comm.pid = atoi(tgids);
+ tgid = atoi(tgids);
}
}
- ev.comm.header.type = PERF_RECORD_COMM;
- size = ALIGN(size, sizeof(u64));
- ev.comm.header.size = sizeof(ev.comm) - (sizeof(ev.comm.comm) - size);
+ fclose(fp);
- if (!full) {
- ev.comm.tid = pid;
+ return tgid;
+}
- process(&ev, session);
- goto out_fclose;
- }
+static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
+ union perf_event *event, pid_t pid,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ size_t size;
+ pid_t tgid;
- snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
+ memset(&event->comm, 0, sizeof(event->comm));
- tasks = opendir(filename);
- if (tasks == NULL)
- goto out_race;
+ if (machine__is_host(machine))
+ tgid = perf_event__get_comm_tgid(pid, event->comm.comm,
+ sizeof(event->comm.comm));
+ else
+ tgid = machine->pid;
- while (!readdir_r(tasks, &dirent, &next) && next) {
- char *end;
- pid = strtol(dirent.d_name, &end, 10);
- if (*end)
- continue;
+ if (tgid < 0)
+ goto out;
- ev.comm.tid = pid;
+ event->comm.pid = tgid;
+ event->comm.header.type = PERF_RECORD_COMM;
- process(&ev, session);
- }
- closedir(tasks);
+ size = strlen(event->comm.comm) + 1;
+ size = PERF_ALIGN(size, sizeof(u64));
+ memset(event->comm.comm + size, 0, machine->id_hdr_size);
+ event->comm.header.size = (sizeof(event->comm) -
+ (sizeof(event->comm.comm) - size) +
+ machine->id_hdr_size);
+ event->comm.tid = pid;
-out_fclose:
- fclose(fp);
+ if (process(tool, event, &synth_sample, machine) != 0)
+ return -1;
+
+out:
return tgid;
+}
+
+static int perf_event__synthesize_fork(struct perf_tool *tool,
+ union perf_event *event, pid_t pid,
+ pid_t tgid, perf_event__handler_t process,
+ struct machine *machine)
+{
+ memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
+
+ /* this is really a clone event but we use fork to synthesize it */
+ event->fork.ppid = tgid;
+ event->fork.ptid = tgid;
+ event->fork.pid = tgid;
+ event->fork.tid = pid;
+ event->fork.header.type = PERF_RECORD_FORK;
+
+ event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
+
+ if (process(tool, event, &synth_sample, machine) != 0)
+ return -1;
-out_failure:
- pr_warning("couldn't get COMM and pgid, malformed %s\n", filename);
- return -1;
+ return 0;
}
-static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
- event__handler_t process,
- struct perf_session *session)
+int perf_event__synthesize_mmap_events(struct perf_tool *tool,
+ union perf_event *event,
+ pid_t pid, pid_t tgid,
+ perf_event__handler_t process,
+ struct machine *machine,
+ bool mmap_data)
{
char filename[PATH_MAX];
FILE *fp;
+ int rc = 0;
+
+ if (machine__is_default_guest(machine))
+ return 0;
- snprintf(filename, sizeof(filename), "/proc/%d/maps", pid);
+ snprintf(filename, sizeof(filename), "%s/proc/%d/maps",
+ machine->root_dir, pid);
fp = fopen(filename, "r");
if (fp == NULL) {
@@ -124,123 +179,315 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
return -1;
}
+ event->header.type = PERF_RECORD_MMAP2;
+
while (1) {
- char bf[BUFSIZ], *pbf = bf;
- event_t ev = {
- .header = {
- .type = PERF_RECORD_MMAP,
- /*
- * Just like the kernel, see __perf_event_mmap
- * in kernel/perf_event.c
- */
- .misc = PERF_RECORD_MISC_USER,
- },
- };
- int n;
+ char bf[BUFSIZ];
+ char prot[5];
+ char execname[PATH_MAX];
+ char anonstr[] = "//anon";
+ unsigned int ino;
size_t size;
+ ssize_t n;
+
if (fgets(bf, sizeof(bf), fp) == NULL)
break;
+ /* ensure null termination since stack will be reused. */
+ strcpy(execname, "");
+
/* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
- n = hex2u64(pbf, &ev.mmap.start);
- if (n < 0)
- continue;
- pbf += n + 1;
- n = hex2u64(pbf, &ev.mmap.len);
- if (n < 0)
+ n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
+ &event->mmap2.start, &event->mmap2.len, prot,
+ &event->mmap2.pgoff, &event->mmap2.maj,
+ &event->mmap2.min,
+ &ino, execname);
+
+ /*
+ * Anon maps don't have the execname.
+ */
+ if (n < 7)
continue;
- pbf += n + 3;
- if (*pbf == 'x') { /* vm_exec */
- char *execname = strchr(bf, '/');
- /* Catch VDSO */
- if (execname == NULL)
- execname = strstr(bf, "[vdso]");
+ event->mmap2.ino = (u64)ino;
- if (execname == NULL)
- continue;
+ /*
+ * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
+ */
+ if (machine__is_host(machine))
+ event->header.misc = PERF_RECORD_MISC_USER;
+ else
+ event->header.misc = PERF_RECORD_MISC_GUEST_USER;
+
+ /* map protection and flags bits */
+ event->mmap2.prot = 0;
+ event->mmap2.flags = 0;
+ if (prot[0] == 'r')
+ event->mmap2.prot |= PROT_READ;
+ if (prot[1] == 'w')
+ event->mmap2.prot |= PROT_WRITE;
+ if (prot[2] == 'x')
+ event->mmap2.prot |= PROT_EXEC;
+
+ if (prot[3] == 's')
+ event->mmap2.flags |= MAP_SHARED;
+ else
+ event->mmap2.flags |= MAP_PRIVATE;
- pbf += 3;
- n = hex2u64(pbf, &ev.mmap.pgoff);
+ if (prot[2] != 'x') {
+ if (!mmap_data || prot[0] != 'r')
+ continue;
- size = strlen(execname);
- execname[size - 1] = '\0'; /* Remove \n */
- memcpy(ev.mmap.filename, execname, size);
- size = ALIGN(size, sizeof(u64));
- ev.mmap.len -= ev.mmap.start;
- ev.mmap.header.size = (sizeof(ev.mmap) -
- (sizeof(ev.mmap.filename) - size));
- ev.mmap.pid = tgid;
- ev.mmap.tid = pid;
+ event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
+ }
- process(&ev, session);
+ if (!strcmp(execname, ""))
+ strcpy(execname, anonstr);
+
+ size = strlen(execname) + 1;
+ memcpy(event->mmap2.filename, execname, size);
+ size = PERF_ALIGN(size, sizeof(u64));
+ event->mmap2.len -= event->mmap.start;
+ event->mmap2.header.size = (sizeof(event->mmap2) -
+ (sizeof(event->mmap2.filename) - size));
+ memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
+ event->mmap2.header.size += machine->id_hdr_size;
+ event->mmap2.pid = tgid;
+ event->mmap2.tid = pid;
+
+ if (process(tool, event, &synth_sample, machine) != 0) {
+ rc = -1;
+ break;
}
}
fclose(fp);
- return 0;
+ return rc;
}
-int event__synthesize_modules(event__handler_t process,
- struct perf_session *session,
- struct machine *machine)
+int perf_event__synthesize_modules(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine)
{
+ int rc = 0;
struct rb_node *nd;
struct map_groups *kmaps = &machine->kmaps;
- u16 misc;
+ union perf_event *event = zalloc((sizeof(event->mmap) +
+ machine->id_hdr_size));
+ if (event == NULL) {
+ pr_debug("Not enough memory synthesizing mmap event "
+ "for kernel modules\n");
+ return -1;
+ }
+
+ event->header.type = PERF_RECORD_MMAP;
/*
* kernel uses 0 for user space maps, see kernel/perf_event.c
* __perf_event_mmap
*/
if (machine__is_host(machine))
- misc = PERF_RECORD_MISC_KERNEL;
+ event->header.misc = PERF_RECORD_MISC_KERNEL;
else
- misc = PERF_RECORD_MISC_GUEST_KERNEL;
+ event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
nd; nd = rb_next(nd)) {
- event_t ev;
size_t size;
struct map *pos = rb_entry(nd, struct map, rb_node);
if (pos->dso->kernel)
continue;
- size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
- memset(&ev, 0, sizeof(ev));
- ev.mmap.header.misc = misc;
- ev.mmap.header.type = PERF_RECORD_MMAP;
- ev.mmap.header.size = (sizeof(ev.mmap) -
- (sizeof(ev.mmap.filename) - size));
- ev.mmap.start = pos->start;
- ev.mmap.len = pos->end - pos->start;
- ev.mmap.pid = machine->pid;
-
- memcpy(ev.mmap.filename, pos->dso->long_name,
+ size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
+ event->mmap.header.type = PERF_RECORD_MMAP;
+ event->mmap.header.size = (sizeof(event->mmap) -
+ (sizeof(event->mmap.filename) - size));
+ memset(event->mmap.filename + size, 0, machine->id_hdr_size);
+ event->mmap.header.size += machine->id_hdr_size;
+ event->mmap.start = pos->start;
+ event->mmap.len = pos->end - pos->start;
+ event->mmap.pid = machine->pid;
+
+ memcpy(event->mmap.filename, pos->dso->long_name,
pos->dso->long_name_len + 1);
- process(&ev, session);
+ if (process(tool, event, &synth_sample, machine) != 0) {
+ rc = -1;
+ break;
+ }
+ }
+
+ free(event);
+ return rc;
+}
+
+static int __event__synthesize_thread(union perf_event *comm_event,
+ union perf_event *mmap_event,
+ union perf_event *fork_event,
+ pid_t pid, int full,
+ perf_event__handler_t process,
+ struct perf_tool *tool,
+ struct machine *machine, bool mmap_data)
+{
+ char filename[PATH_MAX];
+ DIR *tasks;
+ struct dirent dirent, *next;
+ pid_t tgid;
+
+ /* special case: only send one comm event using passed in pid */
+ if (!full) {
+ tgid = perf_event__synthesize_comm(tool, comm_event, pid,
+ process, machine);
+
+ if (tgid == -1)
+ return -1;
+
+ return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
+ process, machine, mmap_data);
+ }
+
+ if (machine__is_default_guest(machine))
+ return 0;
+
+ snprintf(filename, sizeof(filename), "%s/proc/%d/task",
+ machine->root_dir, pid);
+
+ tasks = opendir(filename);
+ if (tasks == NULL) {
+ pr_debug("couldn't open %s\n", filename);
+ return 0;
+ }
+
+ while (!readdir_r(tasks, &dirent, &next) && next) {
+ char *end;
+ int rc = 0;
+ pid_t _pid;
+
+ _pid = strtol(dirent.d_name, &end, 10);
+ if (*end)
+ continue;
+
+ tgid = perf_event__synthesize_comm(tool, comm_event, _pid,
+ process, machine);
+ if (tgid == -1)
+ return -1;
+
+ if (_pid == pid) {
+ /* process the parent's maps too */
+ rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
+ process, machine, mmap_data);
+ } else {
+ /* only fork the tid's map, to save time */
+ rc = perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
+ process, machine);
+ }
+
+ if (rc)
+ return rc;
}
+ closedir(tasks);
return 0;
}
-int event__synthesize_thread(pid_t pid, event__handler_t process,
- struct perf_session *session)
+int perf_event__synthesize_thread_map(struct perf_tool *tool,
+ struct thread_map *threads,
+ perf_event__handler_t process,
+ struct machine *machine,
+ bool mmap_data)
{
- pid_t tgid = event__synthesize_comm(pid, 1, process, session);
- if (tgid == -1)
- return -1;
- return event__synthesize_mmap_events(pid, tgid, process, session);
+ union perf_event *comm_event, *mmap_event, *fork_event;
+ int err = -1, thread, j;
+
+ comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
+ if (comm_event == NULL)
+ goto out;
+
+ mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
+ if (mmap_event == NULL)
+ goto out_free_comm;
+
+ fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
+ if (fork_event == NULL)
+ goto out_free_mmap;
+
+ err = 0;
+ for (thread = 0; thread < threads->nr; ++thread) {
+ if (__event__synthesize_thread(comm_event, mmap_event,
+ fork_event,
+ threads->map[thread], 0,
+ process, tool, machine,
+ mmap_data)) {
+ err = -1;
+ break;
+ }
+
+ /*
+ * comm.pid is set to thread group id by
+ * perf_event__synthesize_comm
+ */
+ if ((int) comm_event->comm.pid != threads->map[thread]) {
+ bool need_leader = true;
+
+ /* is thread group leader in thread_map? */
+ for (j = 0; j < threads->nr; ++j) {
+ if ((int) comm_event->comm.pid == threads->map[j]) {
+ need_leader = false;
+ break;
+ }
+ }
+
+ /* if not, generate events for it */
+ if (need_leader &&
+ __event__synthesize_thread(comm_event, mmap_event,
+ fork_event,
+ comm_event->comm.pid, 0,
+ process, tool, machine,
+ mmap_data)) {
+ err = -1;
+ break;
+ }
+ }
+ }
+ free(fork_event);
+out_free_mmap:
+ free(mmap_event);
+out_free_comm:
+ free(comm_event);
+out:
+ return err;
}
-void event__synthesize_threads(event__handler_t process,
- struct perf_session *session)
+int perf_event__synthesize_threads(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine, bool mmap_data)
{
DIR *proc;
+ char proc_path[PATH_MAX];
struct dirent dirent, *next;
+ union perf_event *comm_event, *mmap_event, *fork_event;
+ int err = -1;
+
+ if (machine__is_default_guest(machine))
+ return 0;
+
+ comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
+ if (comm_event == NULL)
+ goto out;
+
+ mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
+ if (mmap_event == NULL)
+ goto out_free_comm;
+
+ fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
+ if (fork_event == NULL)
+ goto out_free_mmap;
+
+ snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
+ proc = opendir(proc_path);
- proc = opendir("/proc");
+ if (proc == NULL)
+ goto out_free_fork;
while (!readdir_r(proc, &dirent, &next) && next) {
char *end;
@@ -248,11 +495,24 @@ void event__synthesize_threads(event__handler_t process,
if (*end) /* only interested in proper numerical dirents */
continue;
-
- event__synthesize_thread(pid, process, session);
+ /*
+ * We may race with exiting thread, so don't stop just because
+ * one thread couldn't be synthesized.
+ */
+ __event__synthesize_thread(comm_event, mmap_event, fork_event, pid,
+ 1, process, tool, machine, mmap_data);
}
+ err = 0;
closedir(proc);
+out_free_fork:
+ free(fork_event);
+out_free_mmap:
+ free(mmap_event);
+out_free_comm:
+ free(comm_event);
+out:
+ return err;
}
struct process_symbol_args {
@@ -260,7 +520,8 @@ struct process_symbol_args {
u64 start;
};
-static int find_symbol_cb(void *arg, const char *name, char type, u64 start)
+static int find_symbol_cb(void *arg, const char *name, char type,
+ u64 start)
{
struct process_symbol_args *args = arg;
@@ -276,28 +537,39 @@ static int find_symbol_cb(void *arg, const char *name, char type, u64 start)
return 1;
}
-int event__synthesize_kernel_mmap(event__handler_t process,
- struct perf_session *session,
- struct machine *machine,
- const char *symbol_name)
+u64 kallsyms__get_function_start(const char *kallsyms_filename,
+ const char *symbol_name)
+{
+ struct process_symbol_args args = { .name = symbol_name, };
+
+ if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
+ return 0;
+
+ return args.start;
+}
+
+int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine)
{
size_t size;
- const char *filename, *mmap_name;
- char path[PATH_MAX];
+ const char *mmap_name;
char name_buff[PATH_MAX];
struct map *map;
-
- event_t ev = {
- .header = {
- .type = PERF_RECORD_MMAP,
- },
- };
+ struct kmap *kmap;
+ int err;
/*
* We should get this from /sys/kernel/sections/.text, but till that is
* available use this, and after it is use this as a fallback for older
* kernels.
*/
- struct process_symbol_args args = { .name = symbol_name, };
+ union perf_event *event = zalloc((sizeof(event->mmap) +
+ machine->id_hdr_size));
+ if (event == NULL) {
+ pr_debug("Not enough memory synthesizing mmap event "
+ "for kernel modules\n");
+ return -1;
+ }
mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
if (machine__is_host(machine)) {
@@ -305,320 +577,191 @@ int event__synthesize_kernel_mmap(event__handler_t process,
* kernel uses PERF_RECORD_MISC_USER for user space maps,
* see kernel/perf_event.c __perf_event_mmap
*/
- ev.header.misc = PERF_RECORD_MISC_KERNEL;
- filename = "/proc/kallsyms";
+ event->header.misc = PERF_RECORD_MISC_KERNEL;
} else {
- ev.header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
- if (machine__is_default_guest(machine))
- filename = (char *) symbol_conf.default_guest_kallsyms;
- else {
- sprintf(path, "%s/proc/kallsyms", machine->root_dir);
- filename = path;
- }
+ event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
}
- if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0)
- return -ENOENT;
-
map = machine->vmlinux_maps[MAP__FUNCTION];
- size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename),
- "%s%s", mmap_name, symbol_name) + 1;
- size = ALIGN(size, sizeof(u64));
- ev.mmap.header.size = (sizeof(ev.mmap) -
- (sizeof(ev.mmap.filename) - size));
- ev.mmap.pgoff = args.start;
- ev.mmap.start = map->start;
- ev.mmap.len = map->end - ev.mmap.start;
- ev.mmap.pid = machine->pid;
-
- return process(&ev, session);
+ kmap = map__kmap(map);
+ size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
+ "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
+ size = PERF_ALIGN(size, sizeof(u64));
+ event->mmap.header.type = PERF_RECORD_MMAP;
+ event->mmap.header.size = (sizeof(event->mmap) -
+ (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
+ event->mmap.pgoff = kmap->ref_reloc_sym->addr;
+ event->mmap.start = map->start;
+ event->mmap.len = map->end - event->mmap.start;
+ event->mmap.pid = machine->pid;
+
+ err = process(tool, event, &synth_sample, machine);
+ free(event);
+
+ return err;
}
-static void thread__comm_adjust(struct thread *self, struct hists *hists)
+size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
{
- char *comm = self->comm;
-
- if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
- (!symbol_conf.comm_list ||
- strlist__has_entry(symbol_conf.comm_list, comm))) {
- u16 slen = strlen(comm);
-
- if (hists__new_col_len(hists, HISTC_COMM, slen))
- hists__set_col_len(hists, HISTC_THREAD, slen + 6);
- }
+ return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid);
}
-static int thread__set_comm_adjust(struct thread *self, const char *comm,
- struct hists *hists)
+int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
{
- int ret = thread__set_comm(self, comm);
-
- if (ret)
- return ret;
-
- thread__comm_adjust(self, hists);
-
- return 0;
+ return machine__process_comm_event(machine, event, sample);
}
-int event__process_comm(event_t *self, struct perf_session *session)
+int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
{
- struct thread *thread = perf_session__findnew(session, self->comm.tid);
-
- dump_printf(": %s:%d\n", self->comm.comm, self->comm.tid);
-
- if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm,
- &session->hists)) {
- dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
- return -1;
- }
-
- return 0;
+ return machine__process_lost_event(machine, event, sample);
}
-int event__process_lost(event_t *self, struct perf_session *session)
+size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
{
- dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost);
- session->hists.stats.total_lost += self->lost.lost;
- return 0;
+ return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
+ event->mmap.pid, event->mmap.tid, event->mmap.start,
+ event->mmap.len, event->mmap.pgoff,
+ (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
+ event->mmap.filename);
}
-static void event_set_kernel_mmap_len(struct map **maps, event_t *self)
+size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
{
- maps[MAP__FUNCTION]->start = self->mmap.start;
- maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len;
- /*
- * Be a bit paranoid here, some perf.data file came with
- * a zero sized synthesized MMAP event for the kernel.
- */
- if (maps[MAP__FUNCTION]->end == 0)
- maps[MAP__FUNCTION]->end = ~0UL;
+ return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
+ " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
+ event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
+ event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
+ event->mmap2.min, event->mmap2.ino,
+ event->mmap2.ino_generation,
+ (event->mmap2.prot & PROT_READ) ? 'r' : '-',
+ (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
+ (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
+ (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
+ event->mmap2.filename);
}
-static int event__process_kernel_mmap(event_t *self,
- struct perf_session *session)
+int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
{
- struct map *map;
- char kmmap_prefix[PATH_MAX];
- struct machine *machine;
- enum dso_kernel_type kernel_type;
- bool is_kernel_mmap;
-
- machine = perf_session__findnew_machine(session, self->mmap.pid);
- if (!machine) {
- pr_err("Can't find id %d's machine\n", self->mmap.pid);
- goto out_problem;
- }
-
- machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
- if (machine__is_host(machine))
- kernel_type = DSO_TYPE_KERNEL;
- else
- kernel_type = DSO_TYPE_GUEST_KERNEL;
-
- is_kernel_mmap = memcmp(self->mmap.filename,
- kmmap_prefix,
- strlen(kmmap_prefix)) == 0;
- if (self->mmap.filename[0] == '/' ||
- (!is_kernel_mmap && self->mmap.filename[0] == '[')) {
-
- char short_module_name[1024];
- char *name, *dot;
-
- if (self->mmap.filename[0] == '/') {
- name = strrchr(self->mmap.filename, '/');
- if (name == NULL)
- goto out_problem;
-
- ++name; /* skip / */
- dot = strrchr(name, '.');
- if (dot == NULL)
- goto out_problem;
- snprintf(short_module_name, sizeof(short_module_name),
- "[%.*s]", (int)(dot - name), name);
- strxfrchar(short_module_name, '-', '_');
- } else
- strcpy(short_module_name, self->mmap.filename);
-
- map = machine__new_module(machine, self->mmap.start,
- self->mmap.filename);
- if (map == NULL)
- goto out_problem;
-
- name = strdup(short_module_name);
- if (name == NULL)
- goto out_problem;
-
- map->dso->short_name = name;
- map->dso->sname_alloc = 1;
- map->end = map->start + self->mmap.len;
- } else if (is_kernel_mmap) {
- const char *symbol_name = (self->mmap.filename +
- strlen(kmmap_prefix));
- /*
- * Should be there already, from the build-id table in
- * the header.
- */
- struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
- kmmap_prefix);
- if (kernel == NULL)
- goto out_problem;
-
- kernel->kernel = kernel_type;
- if (__machine__create_kernel_maps(machine, kernel) < 0)
- goto out_problem;
-
- event_set_kernel_mmap_len(machine->vmlinux_maps, self);
- perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
- symbol_name,
- self->mmap.pgoff);
- if (machine__is_default_guest(machine)) {
- /*
- * preload dso of guest kernel and modules
- */
- dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
- NULL);
- }
- }
- return 0;
-out_problem:
- return -1;
+ return machine__process_mmap_event(machine, event, sample);
}
-int event__process_mmap(event_t *self, struct perf_session *session)
+int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
{
- struct machine *machine;
- struct thread *thread;
- struct map *map;
- u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- int ret = 0;
-
- dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n",
- self->mmap.pid, self->mmap.tid, self->mmap.start,
- self->mmap.len, self->mmap.pgoff, self->mmap.filename);
-
- if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
- cpumode == PERF_RECORD_MISC_KERNEL) {
- ret = event__process_kernel_mmap(self, session);
- if (ret < 0)
- goto out_problem;
- return 0;
- }
-
- machine = perf_session__find_host_machine(session);
- if (machine == NULL)
- goto out_problem;
- thread = perf_session__findnew(session, self->mmap.pid);
- if (thread == NULL)
- goto out_problem;
- map = map__new(&machine->user_dsos, self->mmap.start,
- self->mmap.len, self->mmap.pgoff,
- self->mmap.pid, self->mmap.filename,
- MAP__FUNCTION);
- if (map == NULL)
- goto out_problem;
-
- thread__insert_map(thread, map);
- return 0;
-
-out_problem:
- dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
- return 0;
+ return machine__process_mmap2_event(machine, event, sample);
}
-int event__process_task(event_t *self, struct perf_session *session)
+size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
{
- struct thread *thread = perf_session__findnew(session, self->fork.tid);
- struct thread *parent = perf_session__findnew(session, self->fork.ptid);
-
- dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
- self->fork.ppid, self->fork.ptid);
-
- if (self->header.type == PERF_RECORD_EXIT) {
- perf_session__remove_thread(session, thread);
- return 0;
- }
+ return fprintf(fp, "(%d:%d):(%d:%d)\n",
+ event->fork.pid, event->fork.tid,
+ event->fork.ppid, event->fork.ptid);
+}
- if (thread == NULL || parent == NULL ||
- thread__fork(thread, parent) < 0) {
- dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
- return -1;
- }
+int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ return machine__process_fork_event(machine, event, sample);
+}
- return 0;
+int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ return machine__process_exit_event(machine, event, sample);
}
-int event__process(event_t *event, struct perf_session *session)
+size_t perf_event__fprintf(union perf_event *event, FILE *fp)
{
+ size_t ret = fprintf(fp, "PERF_RECORD_%s",
+ perf_event__name(event->header.type));
+
switch (event->header.type) {
case PERF_RECORD_COMM:
- event__process_comm(event, session);
- break;
- case PERF_RECORD_MMAP:
- event__process_mmap(event, session);
+ ret += perf_event__fprintf_comm(event, fp);
break;
case PERF_RECORD_FORK:
case PERF_RECORD_EXIT:
- event__process_task(event, session);
+ ret += perf_event__fprintf_task(event, fp);
break;
- default:
+ case PERF_RECORD_MMAP:
+ ret += perf_event__fprintf_mmap(event, fp);
+ break;
+ case PERF_RECORD_MMAP2:
+ ret += perf_event__fprintf_mmap2(event, fp);
break;
+ default:
+ ret += fprintf(fp, "\n");
}
- return 0;
+ return ret;
}
-void thread__find_addr_map(struct thread *self,
- struct perf_session *session, u8 cpumode,
- enum map_type type, pid_t pid, u64 addr,
+int perf_event__process(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ return machine__process_event(machine, event, sample);
+}
+
+void thread__find_addr_map(struct thread *thread,
+ struct machine *machine, u8 cpumode,
+ enum map_type type, u64 addr,
struct addr_location *al)
{
- struct map_groups *mg = &self->mg;
- struct machine *machine = NULL;
+ struct map_groups *mg = thread->mg;
+ bool load_map = false;
- al->thread = self;
+ al->machine = machine;
+ al->thread = thread;
al->addr = addr;
al->cpumode = cpumode;
- al->filtered = false;
+ al->filtered = 0;
+
+ if (machine == NULL) {
+ al->map = NULL;
+ return;
+ }
if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
al->level = 'k';
- machine = perf_session__find_host_machine(session);
- if (machine == NULL) {
- al->map = NULL;
- return;
- }
mg = &machine->kmaps;
+ load_map = true;
} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
al->level = '.';
- machine = perf_session__find_host_machine(session);
} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
al->level = 'g';
- machine = perf_session__find_machine(session, pid);
- if (machine == NULL) {
- al->map = NULL;
- return;
- }
mg = &machine->kmaps;
+ load_map = true;
+ } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
+ al->level = 'u';
} else {
- /*
- * 'u' means guest os user space.
- * TODO: We don't support guest user space. Might support late.
- */
- if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest)
- al->level = 'u';
- else
- al->level = 'H';
+ al->level = 'H';
al->map = NULL;
if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
!perf_guest)
- al->filtered = true;
+ al->filtered |= (1 << HIST_FILTER__GUEST);
if ((cpumode == PERF_RECORD_MISC_USER ||
cpumode == PERF_RECORD_MISC_KERNEL) &&
!perf_host)
- al->filtered = true;
+ al->filtered |= (1 << HIST_FILTER__HOST);
return;
}
@@ -635,201 +778,90 @@ try_again:
* in the whole kernel symbol list.
*/
if ((long long)al->addr < 0 &&
- cpumode == PERF_RECORD_MISC_KERNEL &&
+ cpumode == PERF_RECORD_MISC_USER &&
machine && mg != &machine->kmaps) {
mg = &machine->kmaps;
goto try_again;
}
- } else
+ } else {
+ /*
+ * Kernel maps might be changed when loading symbols so loading
+ * must be done prior to using kernel maps.
+ */
+ if (load_map)
+ map__load(al->map, machine->symbol_filter);
al->addr = al->map->map_ip(al->map, al->addr);
+ }
}
-void thread__find_addr_location(struct thread *self,
- struct perf_session *session, u8 cpumode,
- enum map_type type, pid_t pid, u64 addr,
- struct addr_location *al,
- symbol_filter_t filter)
+void thread__find_addr_location(struct thread *thread, struct machine *machine,
+ u8 cpumode, enum map_type type, u64 addr,
+ struct addr_location *al)
{
- thread__find_addr_map(self, session, cpumode, type, pid, addr, al);
+ thread__find_addr_map(thread, machine, cpumode, type, addr, al);
if (al->map != NULL)
- al->sym = map__find_symbol(al->map, al->addr, filter);
+ al->sym = map__find_symbol(al->map, al->addr,
+ machine->symbol_filter);
else
al->sym = NULL;
}
-static void dso__calc_col_width(struct dso *self, struct hists *hists)
-{
- if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
- (!symbol_conf.dso_list ||
- strlist__has_entry(symbol_conf.dso_list, self->name))) {
- u16 slen = dso__name_len(self);
- hists__new_col_len(hists, HISTC_DSO, slen);
- }
-
- self->slen_calculated = 1;
-}
-
-int event__preprocess_sample(const event_t *self, struct perf_session *session,
- struct addr_location *al, struct sample_data *data,
- symbol_filter_t filter)
+int perf_event__preprocess_sample(const union perf_event *event,
+ struct machine *machine,
+ struct addr_location *al,
+ struct perf_sample *sample)
{
- u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- struct thread *thread;
+ u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ struct thread *thread = machine__findnew_thread(machine, sample->pid,
+ sample->tid);
- event__parse_sample(self, session->sample_type, data);
-
- dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld cpu:%d\n",
- self->header.misc, data->pid, data->tid, data->ip,
- data->period, data->cpu);
-
- if (session->sample_type & PERF_SAMPLE_CALLCHAIN) {
- unsigned int i;
-
- dump_printf("... chain: nr:%Lu\n", data->callchain->nr);
-
- if (!ip_callchain__valid(data->callchain, self)) {
- pr_debug("call-chain problem with event, "
- "skipping it.\n");
- goto out_filtered;
- }
-
- if (dump_trace) {
- for (i = 0; i < data->callchain->nr; i++)
- dump_printf("..... %2d: %016Lx\n",
- i, data->callchain->ips[i]);
- }
- }
- thread = perf_session__findnew(session, self->ip.pid);
if (thread == NULL)
return -1;
- if (symbol_conf.comm_list &&
- !strlist__has_entry(symbol_conf.comm_list, thread->comm))
- goto out_filtered;
-
- dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+ dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
/*
- * Have we already created the kernel maps for the host machine?
+ * Have we already created the kernel maps for this machine?
*
* This should have happened earlier, when we processed the kernel MMAP
* events, but for older perf.data files there was no such thing, so do
* it now.
*/
if (cpumode == PERF_RECORD_MISC_KERNEL &&
- session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL)
- machine__create_kernel_maps(&session->host_machine);
+ machine->vmlinux_maps[MAP__FUNCTION] == NULL)
+ machine__create_kernel_maps(machine);
- thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
- self->ip.pid, self->ip.ip, al);
+ thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
+ sample->ip, al);
dump_printf(" ...... dso: %s\n",
al->map ? al->map->dso->long_name :
al->level == 'H' ? "[hypervisor]" : "<not found>");
- al->sym = NULL;
- al->cpu = data->cpu;
-
- if (al->map) {
- if (symbol_conf.dso_list &&
- (!al->map || !al->map->dso ||
- !(strlist__has_entry(symbol_conf.dso_list,
- al->map->dso->short_name) ||
- (al->map->dso->short_name != al->map->dso->long_name &&
- strlist__has_entry(symbol_conf.dso_list,
- al->map->dso->long_name)))))
- goto out_filtered;
- /*
- * We have to do this here as we may have a dso with no symbol
- * hit that has a name longer than the ones with symbols
- * sampled.
- */
- if (!sort_dso.elide && !al->map->dso->slen_calculated)
- dso__calc_col_width(al->map->dso, &session->hists);
- al->sym = map__find_symbol(al->map, al->addr, filter);
- } else {
- const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
-
- if (hists__col_len(&session->hists, HISTC_DSO) < unresolved_col_width &&
- !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
- !symbol_conf.dso_list)
- hists__set_col_len(&session->hists, HISTC_DSO,
- unresolved_col_width);
- }
-
- if (symbol_conf.sym_list && al->sym &&
- !strlist__has_entry(symbol_conf.sym_list, al->sym->name))
- goto out_filtered;
-
- return 0;
-
-out_filtered:
- al->filtered = true;
- return 0;
-}
-
-int event__parse_sample(const event_t *event, u64 type, struct sample_data *data)
-{
- const u64 *array = event->sample.array;
-
- if (type & PERF_SAMPLE_IP) {
- data->ip = event->ip.ip;
- array++;
- }
-
- if (type & PERF_SAMPLE_TID) {
- u32 *p = (u32 *)array;
- data->pid = p[0];
- data->tid = p[1];
- array++;
- }
+ if (thread__is_filtered(thread))
+ al->filtered |= (1 << HIST_FILTER__THREAD);
- if (type & PERF_SAMPLE_TIME) {
- data->time = *array;
- array++;
- }
-
- if (type & PERF_SAMPLE_ADDR) {
- data->addr = *array;
- array++;
- }
-
- data->id = -1ULL;
- if (type & PERF_SAMPLE_ID) {
- data->id = *array;
- array++;
- }
-
- if (type & PERF_SAMPLE_STREAM_ID) {
- data->stream_id = *array;
- array++;
- }
-
- if (type & PERF_SAMPLE_CPU) {
- u32 *p = (u32 *)array;
- data->cpu = *p;
- array++;
- } else
- data->cpu = -1;
+ al->sym = NULL;
+ al->cpu = sample->cpu;
- if (type & PERF_SAMPLE_PERIOD) {
- data->period = *array;
- array++;
- }
+ if (al->map) {
+ struct dso *dso = al->map->dso;
- if (type & PERF_SAMPLE_READ) {
- pr_debug("PERF_SAMPLE_READ is unsuported for now\n");
- return -1;
- }
+ if (symbol_conf.dso_list &&
+ (!dso || !(strlist__has_entry(symbol_conf.dso_list,
+ dso->short_name) ||
+ (dso->short_name != dso->long_name &&
+ strlist__has_entry(symbol_conf.dso_list,
+ dso->long_name))))) {
+ al->filtered |= (1 << HIST_FILTER__DSO);
+ }
- if (type & PERF_SAMPLE_CALLCHAIN) {
- data->callchain = (struct ip_callchain *)array;
- array += 1 + data->callchain->nr;
+ al->sym = map__find_symbol(al->map, al->addr,
+ machine->symbol_filter);
}
- if (type & PERF_SAMPLE_RAW) {
- u32 *p = (u32 *)array;
- data->raw_size = *p;
- p++;
- data->raw_data = p;
+ if (symbol_conf.sym_list &&
+ (!al->sym || !strlist__has_entry(symbol_conf.sym_list,
+ al->sym->name))) {
+ al->filtered |= (1 << HIST_FILTER__SYMBOL);
}
return 0;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 8e790dae702..e5dd40addb3 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -2,26 +2,34 @@
#define __PERF_RECORD_H
#include <limits.h>
+#include <stdio.h>
#include "../perf.h"
#include "map.h"
+#include "build-id.h"
+#include "perf_regs.h"
-/*
- * PERF_SAMPLE_IP | PERF_SAMPLE_TID | *
- */
-struct ip_event {
+struct mmap_event {
struct perf_event_header header;
- u64 ip;
u32 pid, tid;
- unsigned char __more_data[];
+ u64 start;
+ u64 len;
+ u64 pgoff;
+ char filename[PATH_MAX];
};
-struct mmap_event {
+struct mmap2_event {
struct perf_event_header header;
u32 pid, tid;
u64 start;
u64 len;
u64 pgoff;
+ u32 maj;
+ u32 min;
+ u64 ino;
+ u64 ino_generation;
+ u32 prot;
+ u32 flags;
char filename[PATH_MAX];
};
@@ -56,12 +64,86 @@ struct read_event {
u64 id;
};
+struct throttle_event {
+ struct perf_event_header header;
+ u64 time;
+ u64 id;
+ u64 stream_id;
+};
+
+#define PERF_SAMPLE_MASK \
+ (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
+ PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \
+ PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \
+ PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD | \
+ PERF_SAMPLE_IDENTIFIER)
+
+/* perf sample has 16 bits size limit */
+#define PERF_SAMPLE_MAX_SIZE (1 << 16)
+
struct sample_event {
struct perf_event_header header;
u64 array[];
};
-struct sample_data {
+struct regs_dump {
+ u64 abi;
+ u64 mask;
+ u64 *regs;
+
+ /* Cached values/mask filled by first register access. */
+ u64 cache_regs[PERF_REGS_MAX];
+ u64 cache_mask;
+};
+
+struct stack_dump {
+ u16 offset;
+ u64 size;
+ char *data;
+};
+
+struct sample_read_value {
+ u64 value;
+ u64 id;
+};
+
+struct sample_read {
+ u64 time_enabled;
+ u64 time_running;
+ union {
+ struct {
+ u64 nr;
+ struct sample_read_value *values;
+ } group;
+ struct sample_read_value one;
+ };
+};
+
+struct ip_callchain {
+ u64 nr;
+ u64 ips[0];
+};
+
+struct branch_flags {
+ u64 mispred:1;
+ u64 predicted:1;
+ u64 in_tx:1;
+ u64 abort:1;
+ u64 reserved:60;
+};
+
+struct branch_entry {
+ u64 from;
+ u64 to;
+ struct branch_flags flags;
+};
+
+struct branch_stack {
+ u64 nr;
+ struct branch_entry entries[0];
+};
+
+struct perf_sample {
u64 ip;
u32 pid, tid;
u64 time;
@@ -69,24 +151,37 @@ struct sample_data {
u64 id;
u64 stream_id;
u64 period;
+ u64 weight;
+ u64 transaction;
u32 cpu;
u32 raw_size;
+ u64 data_src;
void *raw_data;
struct ip_callchain *callchain;
+ struct branch_stack *branch_stack;
+ struct regs_dump user_regs;
+ struct stack_dump user_stack;
+ struct sample_read read;
};
-#define BUILD_ID_SIZE 20
+#define PERF_MEM_DATA_SRC_NONE \
+ (PERF_MEM_S(OP, NA) |\
+ PERF_MEM_S(LVL, NA) |\
+ PERF_MEM_S(SNOOP, NA) |\
+ PERF_MEM_S(LOCK, NA) |\
+ PERF_MEM_S(TLB, NA))
struct build_id_event {
struct perf_event_header header;
pid_t pid;
- u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))];
+ u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
char filename[];
};
enum perf_user_event_type { /* above any possible kernel type */
+ PERF_RECORD_USER_TYPE_START = 64,
PERF_RECORD_HEADER_ATTR = 64,
- PERF_RECORD_HEADER_EVENT_TYPE = 65,
+ PERF_RECORD_HEADER_EVENT_TYPE = 65, /* depreceated */
PERF_RECORD_HEADER_TRACING_DATA = 66,
PERF_RECORD_HEADER_BUILD_ID = 67,
PERF_RECORD_FINISHED_ROUND = 68,
@@ -116,52 +211,106 @@ struct tracing_data_event {
u32 size;
};
-typedef union event_union {
+union perf_event {
struct perf_event_header header;
- struct ip_event ip;
struct mmap_event mmap;
+ struct mmap2_event mmap2;
struct comm_event comm;
struct fork_event fork;
struct lost_event lost;
struct read_event read;
+ struct throttle_event throttle;
struct sample_event sample;
struct attr_event attr;
struct event_type_event event_type;
struct tracing_data_event tracing_data;
struct build_id_event build_id;
-} event_t;
+};
-void event__print_totals(void);
+void perf_event__print_totals(void);
-struct perf_session;
+struct perf_tool;
+struct thread_map;
-typedef int (*event__handler_t)(event_t *event, struct perf_session *session);
+typedef int (*perf_event__handler_t)(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
-int event__synthesize_thread(pid_t pid, event__handler_t process,
- struct perf_session *session);
-void event__synthesize_threads(event__handler_t process,
- struct perf_session *session);
-int event__synthesize_kernel_mmap(event__handler_t process,
- struct perf_session *session,
- struct machine *machine,
- const char *symbol_name);
+int perf_event__synthesize_thread_map(struct perf_tool *tool,
+ struct thread_map *threads,
+ perf_event__handler_t process,
+ struct machine *machine, bool mmap_data);
+int perf_event__synthesize_threads(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine, bool mmap_data);
+int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine);
-int event__synthesize_modules(event__handler_t process,
- struct perf_session *session,
- struct machine *machine);
+int perf_event__synthesize_modules(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine);
-int event__process_comm(event_t *self, struct perf_session *session);
-int event__process_lost(event_t *self, struct perf_session *session);
-int event__process_mmap(event_t *self, struct perf_session *session);
-int event__process_task(event_t *self, struct perf_session *session);
-int event__process(event_t *event, struct perf_session *session);
+int perf_event__process_comm(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_lost(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_mmap(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_mmap2(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_fork(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_exit(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
struct addr_location;
-int event__preprocess_sample(const event_t *self, struct perf_session *session,
- struct addr_location *al, struct sample_data *data,
- symbol_filter_t filter);
-int event__parse_sample(const event_t *event, u64 type, struct sample_data *data);
-extern const char *event__name[];
+int perf_event__preprocess_sample(const union perf_event *event,
+ struct machine *machine,
+ struct addr_location *al,
+ struct perf_sample *sample);
+
+const char *perf_event__name(unsigned int id);
+
+size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
+ u64 read_format);
+int perf_event__synthesize_sample(union perf_event *event, u64 type,
+ u64 read_format,
+ const struct perf_sample *sample,
+ bool swapped);
+
+int perf_event__synthesize_mmap_events(struct perf_tool *tool,
+ union perf_event *event,
+ pid_t pid, pid_t tgid,
+ perf_event__handler_t process,
+ struct machine *machine,
+ bool mmap_data);
+
+size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf(union perf_event *event, FILE *fp);
+
+u64 kallsyms__get_function_start(const char *kallsyms_filename,
+ const char *symbol_name);
#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
new file mode 100644
index 00000000000..59ef2802fcf
--- /dev/null
+++ b/tools/perf/util/evlist.c
@@ -0,0 +1,1245 @@
+/*
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Parts came from builtin-{top,stat,record}.c, see those files for further
+ * copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+#include "util.h"
+#include <api/fs/debugfs.h>
+#include <poll.h>
+#include "cpumap.h"
+#include "thread_map.h"
+#include "target.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "debug.h"
+#include <unistd.h>
+
+#include "parse-events.h"
+#include "parse-options.h"
+
+#include <sys/mman.h>
+
+#include <linux/bitops.h>
+#include <linux/hash.h>
+
+#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
+#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
+
+void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
+ struct thread_map *threads)
+{
+ int i;
+
+ for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
+ INIT_HLIST_HEAD(&evlist->heads[i]);
+ INIT_LIST_HEAD(&evlist->entries);
+ perf_evlist__set_maps(evlist, cpus, threads);
+ evlist->workload.pid = -1;
+}
+
+struct perf_evlist *perf_evlist__new(void)
+{
+ struct perf_evlist *evlist = zalloc(sizeof(*evlist));
+
+ if (evlist != NULL)
+ perf_evlist__init(evlist, NULL, NULL);
+
+ return evlist;
+}
+
+struct perf_evlist *perf_evlist__new_default(void)
+{
+ struct perf_evlist *evlist = perf_evlist__new();
+
+ if (evlist && perf_evlist__add_default(evlist)) {
+ perf_evlist__delete(evlist);
+ evlist = NULL;
+ }
+
+ return evlist;
+}
+
+/**
+ * perf_evlist__set_id_pos - set the positions of event ids.
+ * @evlist: selected event list
+ *
+ * Events with compatible sample types all have the same id_pos
+ * and is_pos. For convenience, put a copy on evlist.
+ */
+void perf_evlist__set_id_pos(struct perf_evlist *evlist)
+{
+ struct perf_evsel *first = perf_evlist__first(evlist);
+
+ evlist->id_pos = first->id_pos;
+ evlist->is_pos = first->is_pos;
+}
+
+static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+
+ evlist__for_each(evlist, evsel)
+ perf_evsel__calc_id_pos(evsel);
+
+ perf_evlist__set_id_pos(evlist);
+}
+
+static void perf_evlist__purge(struct perf_evlist *evlist)
+{
+ struct perf_evsel *pos, *n;
+
+ evlist__for_each_safe(evlist, n, pos) {
+ list_del_init(&pos->node);
+ perf_evsel__delete(pos);
+ }
+
+ evlist->nr_entries = 0;
+}
+
+void perf_evlist__exit(struct perf_evlist *evlist)
+{
+ zfree(&evlist->mmap);
+ zfree(&evlist->pollfd);
+}
+
+void perf_evlist__delete(struct perf_evlist *evlist)
+{
+ perf_evlist__munmap(evlist);
+ perf_evlist__close(evlist);
+ cpu_map__delete(evlist->cpus);
+ thread_map__delete(evlist->threads);
+ evlist->cpus = NULL;
+ evlist->threads = NULL;
+ perf_evlist__purge(evlist);
+ perf_evlist__exit(evlist);
+ free(evlist);
+}
+
+void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
+{
+ list_add_tail(&entry->node, &evlist->entries);
+ entry->idx = evlist->nr_entries;
+
+ if (!evlist->nr_entries++)
+ perf_evlist__set_id_pos(evlist);
+}
+
+void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
+ struct list_head *list,
+ int nr_entries)
+{
+ bool set_id_pos = !evlist->nr_entries;
+
+ list_splice_tail(list, &evlist->entries);
+ evlist->nr_entries += nr_entries;
+ if (set_id_pos)
+ perf_evlist__set_id_pos(evlist);
+}
+
+void __perf_evlist__set_leader(struct list_head *list)
+{
+ struct perf_evsel *evsel, *leader;
+
+ leader = list_entry(list->next, struct perf_evsel, node);
+ evsel = list_entry(list->prev, struct perf_evsel, node);
+
+ leader->nr_members = evsel->idx - leader->idx + 1;
+
+ __evlist__for_each(list, evsel) {
+ evsel->leader = leader;
+ }
+}
+
+void perf_evlist__set_leader(struct perf_evlist *evlist)
+{
+ if (evlist->nr_entries) {
+ evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
+ __perf_evlist__set_leader(&evlist->entries);
+ }
+}
+
+int perf_evlist__add_default(struct perf_evlist *evlist)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ };
+ struct perf_evsel *evsel;
+
+ event_attr_init(&attr);
+
+ evsel = perf_evsel__new(&attr);
+ if (evsel == NULL)
+ goto error;
+
+ /* use strdup() because free(evsel) assumes name is allocated */
+ evsel->name = strdup("cycles");
+ if (!evsel->name)
+ goto error_free;
+
+ perf_evlist__add(evlist, evsel);
+ return 0;
+error_free:
+ perf_evsel__delete(evsel);
+error:
+ return -ENOMEM;
+}
+
+static int perf_evlist__add_attrs(struct perf_evlist *evlist,
+ struct perf_event_attr *attrs, size_t nr_attrs)
+{
+ struct perf_evsel *evsel, *n;
+ LIST_HEAD(head);
+ size_t i;
+
+ for (i = 0; i < nr_attrs; i++) {
+ evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
+ if (evsel == NULL)
+ goto out_delete_partial_list;
+ list_add_tail(&evsel->node, &head);
+ }
+
+ perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
+
+ return 0;
+
+out_delete_partial_list:
+ __evlist__for_each_safe(&head, n, evsel)
+ perf_evsel__delete(evsel);
+ return -1;
+}
+
+int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
+ struct perf_event_attr *attrs, size_t nr_attrs)
+{
+ size_t i;
+
+ for (i = 0; i < nr_attrs; i++)
+ event_attr_init(attrs + i);
+
+ return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
+}
+
+struct perf_evsel *
+perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
+{
+ struct perf_evsel *evsel;
+
+ evlist__for_each(evlist, evsel) {
+ if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
+ (int)evsel->attr.config == id)
+ return evsel;
+ }
+
+ return NULL;
+}
+
+struct perf_evsel *
+perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
+ const char *name)
+{
+ struct perf_evsel *evsel;
+
+ evlist__for_each(evlist, evsel) {
+ if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
+ (strcmp(evsel->name, name) == 0))
+ return evsel;
+ }
+
+ return NULL;
+}
+
+int perf_evlist__add_newtp(struct perf_evlist *evlist,
+ const char *sys, const char *name, void *handler)
+{
+ struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
+
+ if (evsel == NULL)
+ return -1;
+
+ evsel->handler = handler;
+ perf_evlist__add(evlist, evsel);
+ return 0;
+}
+
+void perf_evlist__disable(struct perf_evlist *evlist)
+{
+ int cpu, thread;
+ struct perf_evsel *pos;
+ int nr_cpus = cpu_map__nr(evlist->cpus);
+ int nr_threads = thread_map__nr(evlist->threads);
+
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
+ evlist__for_each(evlist, pos) {
+ if (!perf_evsel__is_group_leader(pos) || !pos->fd)
+ continue;
+ for (thread = 0; thread < nr_threads; thread++)
+ ioctl(FD(pos, cpu, thread),
+ PERF_EVENT_IOC_DISABLE, 0);
+ }
+ }
+}
+
+void perf_evlist__enable(struct perf_evlist *evlist)
+{
+ int cpu, thread;
+ struct perf_evsel *pos;
+ int nr_cpus = cpu_map__nr(evlist->cpus);
+ int nr_threads = thread_map__nr(evlist->threads);
+
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
+ evlist__for_each(evlist, pos) {
+ if (!perf_evsel__is_group_leader(pos) || !pos->fd)
+ continue;
+ for (thread = 0; thread < nr_threads; thread++)
+ ioctl(FD(pos, cpu, thread),
+ PERF_EVENT_IOC_ENABLE, 0);
+ }
+ }
+}
+
+int perf_evlist__disable_event(struct perf_evlist *evlist,
+ struct perf_evsel *evsel)
+{
+ int cpu, thread, err;
+
+ if (!evsel->fd)
+ return 0;
+
+ for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ for (thread = 0; thread < evlist->threads->nr; thread++) {
+ err = ioctl(FD(evsel, cpu, thread),
+ PERF_EVENT_IOC_DISABLE, 0);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
+int perf_evlist__enable_event(struct perf_evlist *evlist,
+ struct perf_evsel *evsel)
+{
+ int cpu, thread, err;
+
+ if (!evsel->fd)
+ return -EINVAL;
+
+ for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ for (thread = 0; thread < evlist->threads->nr; thread++) {
+ err = ioctl(FD(evsel, cpu, thread),
+ PERF_EVENT_IOC_ENABLE, 0);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
+{
+ int nr_cpus = cpu_map__nr(evlist->cpus);
+ int nr_threads = thread_map__nr(evlist->threads);
+ int nfds = nr_cpus * nr_threads * evlist->nr_entries;
+ evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
+ return evlist->pollfd != NULL ? 0 : -ENOMEM;
+}
+
+void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
+{
+ fcntl(fd, F_SETFL, O_NONBLOCK);
+ evlist->pollfd[evlist->nr_fds].fd = fd;
+ evlist->pollfd[evlist->nr_fds].events = POLLIN;
+ evlist->nr_fds++;
+}
+
+static void perf_evlist__id_hash(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+ int cpu, int thread, u64 id)
+{
+ int hash;
+ struct perf_sample_id *sid = SID(evsel, cpu, thread);
+
+ sid->id = id;
+ sid->evsel = evsel;
+ hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
+ hlist_add_head(&sid->node, &evlist->heads[hash]);
+}
+
+void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
+ int cpu, int thread, u64 id)
+{
+ perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
+ evsel->id[evsel->ids++] = id;
+}
+
+static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+ int cpu, int thread, int fd)
+{
+ u64 read_data[4] = { 0, };
+ int id_idx = 1; /* The first entry is the counter value */
+ u64 id;
+ int ret;
+
+ ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
+ if (!ret)
+ goto add;
+
+ if (errno != ENOTTY)
+ return -1;
+
+ /* Legacy way to get event id.. All hail to old kernels! */
+
+ /*
+ * This way does not work with group format read, so bail
+ * out in that case.
+ */
+ if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
+ return -1;
+
+ if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
+ read(fd, &read_data, sizeof(read_data)) == -1)
+ return -1;
+
+ if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ ++id_idx;
+ if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ ++id_idx;
+
+ id = read_data[id_idx];
+
+ add:
+ perf_evlist__id_add(evlist, evsel, cpu, thread, id);
+ return 0;
+}
+
+struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
+{
+ struct hlist_head *head;
+ struct perf_sample_id *sid;
+ int hash;
+
+ hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
+ head = &evlist->heads[hash];
+
+ hlist_for_each_entry(sid, head, node)
+ if (sid->id == id)
+ return sid;
+
+ return NULL;
+}
+
+struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
+{
+ struct perf_sample_id *sid;
+
+ if (evlist->nr_entries == 1)
+ return perf_evlist__first(evlist);
+
+ sid = perf_evlist__id2sid(evlist, id);
+ if (sid)
+ return sid->evsel;
+
+ if (!perf_evlist__sample_id_all(evlist))
+ return perf_evlist__first(evlist);
+
+ return NULL;
+}
+
+static int perf_evlist__event2id(struct perf_evlist *evlist,
+ union perf_event *event, u64 *id)
+{
+ const u64 *array = event->sample.array;
+ ssize_t n;
+
+ n = (event->header.size - sizeof(event->header)) >> 3;
+
+ if (event->header.type == PERF_RECORD_SAMPLE) {
+ if (evlist->id_pos >= n)
+ return -1;
+ *id = array[evlist->id_pos];
+ } else {
+ if (evlist->is_pos > n)
+ return -1;
+ n -= evlist->is_pos;
+ *id = array[n];
+ }
+ return 0;
+}
+
+static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
+ union perf_event *event)
+{
+ struct perf_evsel *first = perf_evlist__first(evlist);
+ struct hlist_head *head;
+ struct perf_sample_id *sid;
+ int hash;
+ u64 id;
+
+ if (evlist->nr_entries == 1)
+ return first;
+
+ if (!first->attr.sample_id_all &&
+ event->header.type != PERF_RECORD_SAMPLE)
+ return first;
+
+ if (perf_evlist__event2id(evlist, event, &id))
+ return NULL;
+
+ /* Synthesized events have an id of zero */
+ if (!id)
+ return first;
+
+ hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
+ head = &evlist->heads[hash];
+
+ hlist_for_each_entry(sid, head, node) {
+ if (sid->id == id)
+ return sid->evsel;
+ }
+ return NULL;
+}
+
+union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
+{
+ struct perf_mmap *md = &evlist->mmap[idx];
+ unsigned int head = perf_mmap__read_head(md);
+ unsigned int old = md->prev;
+ unsigned char *data = md->base + page_size;
+ union perf_event *event = NULL;
+
+ if (evlist->overwrite) {
+ /*
+ * If we're further behind than half the buffer, there's a chance
+ * the writer will bite our tail and mess up the samples under us.
+ *
+ * If we somehow ended up ahead of the head, we got messed up.
+ *
+ * In either case, truncate and restart at head.
+ */
+ int diff = head - old;
+ if (diff > md->mask / 2 || diff < 0) {
+ fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
+
+ /*
+ * head points to a known good entry, start there.
+ */
+ old = head;
+ }
+ }
+
+ if (old != head) {
+ size_t size;
+
+ event = (union perf_event *)&data[old & md->mask];
+ size = event->header.size;
+
+ /*
+ * Event straddles the mmap boundary -- header should always
+ * be inside due to u64 alignment of output.
+ */
+ if ((old & md->mask) + size != ((old + size) & md->mask)) {
+ unsigned int offset = old;
+ unsigned int len = min(sizeof(*event), size), cpy;
+ void *dst = md->event_copy;
+
+ do {
+ cpy = min(md->mask + 1 - (offset & md->mask), len);
+ memcpy(dst, &data[offset & md->mask], cpy);
+ offset += cpy;
+ dst += cpy;
+ len -= cpy;
+ } while (len);
+
+ event = (union perf_event *) md->event_copy;
+ }
+
+ old += size;
+ }
+
+ md->prev = old;
+
+ return event;
+}
+
+void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
+{
+ if (!evlist->overwrite) {
+ struct perf_mmap *md = &evlist->mmap[idx];
+ unsigned int old = md->prev;
+
+ perf_mmap__write_tail(md, old);
+ }
+}
+
+static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
+{
+ if (evlist->mmap[idx].base != NULL) {
+ munmap(evlist->mmap[idx].base, evlist->mmap_len);
+ evlist->mmap[idx].base = NULL;
+ }
+}
+
+void perf_evlist__munmap(struct perf_evlist *evlist)
+{
+ int i;
+
+ if (evlist->mmap == NULL)
+ return;
+
+ for (i = 0; i < evlist->nr_mmaps; i++)
+ __perf_evlist__munmap(evlist, i);
+
+ zfree(&evlist->mmap);
+}
+
+static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
+{
+ evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
+ if (cpu_map__empty(evlist->cpus))
+ evlist->nr_mmaps = thread_map__nr(evlist->threads);
+ evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
+ return evlist->mmap != NULL ? 0 : -ENOMEM;
+}
+
+static int __perf_evlist__mmap(struct perf_evlist *evlist,
+ int idx, int prot, int mask, int fd)
+{
+ evlist->mmap[idx].prev = 0;
+ evlist->mmap[idx].mask = mask;
+ evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
+ MAP_SHARED, fd, 0);
+ if (evlist->mmap[idx].base == MAP_FAILED) {
+ pr_debug2("failed to mmap perf event ring buffer, error %d\n",
+ errno);
+ evlist->mmap[idx].base = NULL;
+ return -1;
+ }
+
+ perf_evlist__add_pollfd(evlist, fd);
+ return 0;
+}
+
+static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
+ int prot, int mask, int cpu, int thread,
+ int *output)
+{
+ struct perf_evsel *evsel;
+
+ evlist__for_each(evlist, evsel) {
+ int fd = FD(evsel, cpu, thread);
+
+ if (*output == -1) {
+ *output = fd;
+ if (__perf_evlist__mmap(evlist, idx, prot, mask,
+ *output) < 0)
+ return -1;
+ } else {
+ if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
+ return -1;
+ }
+
+ if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+ perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot,
+ int mask)
+{
+ int cpu, thread;
+ int nr_cpus = cpu_map__nr(evlist->cpus);
+ int nr_threads = thread_map__nr(evlist->threads);
+
+ pr_debug2("perf event ring buffer mmapped per cpu\n");
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
+ int output = -1;
+
+ for (thread = 0; thread < nr_threads; thread++) {
+ if (perf_evlist__mmap_per_evsel(evlist, cpu, prot, mask,
+ cpu, thread, &output))
+ goto out_unmap;
+ }
+ }
+
+ return 0;
+
+out_unmap:
+ for (cpu = 0; cpu < nr_cpus; cpu++)
+ __perf_evlist__munmap(evlist, cpu);
+ return -1;
+}
+
+static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot,
+ int mask)
+{
+ int thread;
+ int nr_threads = thread_map__nr(evlist->threads);
+
+ pr_debug2("perf event ring buffer mmapped per thread\n");
+ for (thread = 0; thread < nr_threads; thread++) {
+ int output = -1;
+
+ if (perf_evlist__mmap_per_evsel(evlist, thread, prot, mask, 0,
+ thread, &output))
+ goto out_unmap;
+ }
+
+ return 0;
+
+out_unmap:
+ for (thread = 0; thread < nr_threads; thread++)
+ __perf_evlist__munmap(evlist, thread);
+ return -1;
+}
+
+static size_t perf_evlist__mmap_size(unsigned long pages)
+{
+ /* 512 kiB: default amount of unprivileged mlocked memory */
+ if (pages == UINT_MAX)
+ pages = (512 * 1024) / page_size;
+ else if (!is_power_of_2(pages))
+ return 0;
+
+ return (pages + 1) * page_size;
+}
+
+static long parse_pages_arg(const char *str, unsigned long min,
+ unsigned long max)
+{
+ unsigned long pages, val;
+ static struct parse_tag tags[] = {
+ { .tag = 'B', .mult = 1 },
+ { .tag = 'K', .mult = 1 << 10 },
+ { .tag = 'M', .mult = 1 << 20 },
+ { .tag = 'G', .mult = 1 << 30 },
+ { .tag = 0 },
+ };
+
+ if (str == NULL)
+ return -EINVAL;
+
+ val = parse_tag_value(str, tags);
+ if (val != (unsigned long) -1) {
+ /* we got file size value */
+ pages = PERF_ALIGN(val, page_size) / page_size;
+ } else {
+ /* we got pages count value */
+ char *eptr;
+ pages = strtoul(str, &eptr, 10);
+ if (*eptr != '\0')
+ return -EINVAL;
+ }
+
+ if (pages == 0 && min == 0) {
+ /* leave number of pages at 0 */
+ } else if (!is_power_of_2(pages)) {
+ /* round pages up to next power of 2 */
+ pages = next_pow2_l(pages);
+ if (!pages)
+ return -EINVAL;
+ pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
+ pages * page_size, pages);
+ }
+
+ if (pages > max)
+ return -EINVAL;
+
+ return pages;
+}
+
+int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
+ int unset __maybe_unused)
+{
+ unsigned int *mmap_pages = opt->value;
+ unsigned long max = UINT_MAX;
+ long pages;
+
+ if (max > SIZE_MAX / page_size)
+ max = SIZE_MAX / page_size;
+
+ pages = parse_pages_arg(str, 1, max);
+ if (pages < 0) {
+ pr_err("Invalid argument for --mmap_pages/-m\n");
+ return -1;
+ }
+
+ *mmap_pages = pages;
+ return 0;
+}
+
+/**
+ * perf_evlist__mmap - Create mmaps to receive events.
+ * @evlist: list of events
+ * @pages: map length in pages
+ * @overwrite: overwrite older events?
+ *
+ * If @overwrite is %false the user needs to signal event consumption using
+ * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
+ * automatically.
+ *
+ * Return: %0 on success, negative error code otherwise.
+ */
+int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
+ bool overwrite)
+{
+ struct perf_evsel *evsel;
+ const struct cpu_map *cpus = evlist->cpus;
+ const struct thread_map *threads = evlist->threads;
+ int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
+
+ if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
+ return -ENOMEM;
+
+ if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
+ return -ENOMEM;
+
+ evlist->overwrite = overwrite;
+ evlist->mmap_len = perf_evlist__mmap_size(pages);
+ pr_debug("mmap size %zuB\n", evlist->mmap_len);
+ mask = evlist->mmap_len - page_size - 1;
+
+ evlist__for_each(evlist, evsel) {
+ if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+ evsel->sample_id == NULL &&
+ perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
+ return -ENOMEM;
+ }
+
+ if (cpu_map__empty(cpus))
+ return perf_evlist__mmap_per_thread(evlist, prot, mask);
+
+ return perf_evlist__mmap_per_cpu(evlist, prot, mask);
+}
+
+int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
+{
+ evlist->threads = thread_map__new_str(target->pid, target->tid,
+ target->uid);
+
+ if (evlist->threads == NULL)
+ return -1;
+
+ if (target__uses_dummy_map(target))
+ evlist->cpus = cpu_map__dummy_new();
+ else
+ evlist->cpus = cpu_map__new(target->cpu_list);
+
+ if (evlist->cpus == NULL)
+ goto out_delete_threads;
+
+ return 0;
+
+out_delete_threads:
+ thread_map__delete(evlist->threads);
+ return -1;
+}
+
+int perf_evlist__apply_filters(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+ int err = 0;
+ const int ncpus = cpu_map__nr(evlist->cpus),
+ nthreads = thread_map__nr(evlist->threads);
+
+ evlist__for_each(evlist, evsel) {
+ if (evsel->filter == NULL)
+ continue;
+
+ err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
+{
+ struct perf_evsel *evsel;
+ int err = 0;
+ const int ncpus = cpu_map__nr(evlist->cpus),
+ nthreads = thread_map__nr(evlist->threads);
+
+ evlist__for_each(evlist, evsel) {
+ err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
+{
+ struct perf_evsel *pos;
+
+ if (evlist->nr_entries == 1)
+ return true;
+
+ if (evlist->id_pos < 0 || evlist->is_pos < 0)
+ return false;
+
+ evlist__for_each(evlist, pos) {
+ if (pos->id_pos != evlist->id_pos ||
+ pos->is_pos != evlist->is_pos)
+ return false;
+ }
+
+ return true;
+}
+
+u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+
+ if (evlist->combined_sample_type)
+ return evlist->combined_sample_type;
+
+ evlist__for_each(evlist, evsel)
+ evlist->combined_sample_type |= evsel->attr.sample_type;
+
+ return evlist->combined_sample_type;
+}
+
+u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
+{
+ evlist->combined_sample_type = 0;
+ return __perf_evlist__combined_sample_type(evlist);
+}
+
+bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
+{
+ struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
+ u64 read_format = first->attr.read_format;
+ u64 sample_type = first->attr.sample_type;
+
+ evlist__for_each(evlist, pos) {
+ if (read_format != pos->attr.read_format)
+ return false;
+ }
+
+ /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
+ if ((sample_type & PERF_SAMPLE_READ) &&
+ !(read_format & PERF_FORMAT_ID)) {
+ return false;
+ }
+
+ return true;
+}
+
+u64 perf_evlist__read_format(struct perf_evlist *evlist)
+{
+ struct perf_evsel *first = perf_evlist__first(evlist);
+ return first->attr.read_format;
+}
+
+u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
+{
+ struct perf_evsel *first = perf_evlist__first(evlist);
+ struct perf_sample *data;
+ u64 sample_type;
+ u16 size = 0;
+
+ if (!first->attr.sample_id_all)
+ goto out;
+
+ sample_type = first->attr.sample_type;
+
+ if (sample_type & PERF_SAMPLE_TID)
+ size += sizeof(data->tid) * 2;
+
+ if (sample_type & PERF_SAMPLE_TIME)
+ size += sizeof(data->time);
+
+ if (sample_type & PERF_SAMPLE_ID)
+ size += sizeof(data->id);
+
+ if (sample_type & PERF_SAMPLE_STREAM_ID)
+ size += sizeof(data->stream_id);
+
+ if (sample_type & PERF_SAMPLE_CPU)
+ size += sizeof(data->cpu) * 2;
+
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ size += sizeof(data->id);
+out:
+ return size;
+}
+
+bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
+{
+ struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
+
+ evlist__for_each_continue(evlist, pos) {
+ if (first->attr.sample_id_all != pos->attr.sample_id_all)
+ return false;
+ }
+
+ return true;
+}
+
+bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
+{
+ struct perf_evsel *first = perf_evlist__first(evlist);
+ return first->attr.sample_id_all;
+}
+
+void perf_evlist__set_selected(struct perf_evlist *evlist,
+ struct perf_evsel *evsel)
+{
+ evlist->selected = evsel;
+}
+
+void perf_evlist__close(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+ int ncpus = cpu_map__nr(evlist->cpus);
+ int nthreads = thread_map__nr(evlist->threads);
+ int n;
+
+ evlist__for_each_reverse(evlist, evsel) {
+ n = evsel->cpus ? evsel->cpus->nr : ncpus;
+ perf_evsel__close(evsel, n, nthreads);
+ }
+}
+
+int perf_evlist__open(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+ int err;
+
+ perf_evlist__update_id_pos(evlist);
+
+ evlist__for_each(evlist, evsel) {
+ err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
+ if (err < 0)
+ goto out_err;
+ }
+
+ return 0;
+out_err:
+ perf_evlist__close(evlist);
+ errno = -err;
+ return err;
+}
+
+int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
+ const char *argv[], bool pipe_output,
+ void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
+{
+ int child_ready_pipe[2], go_pipe[2];
+ char bf;
+
+ if (pipe(child_ready_pipe) < 0) {
+ perror("failed to create 'ready' pipe");
+ return -1;
+ }
+
+ if (pipe(go_pipe) < 0) {
+ perror("failed to create 'go' pipe");
+ goto out_close_ready_pipe;
+ }
+
+ evlist->workload.pid = fork();
+ if (evlist->workload.pid < 0) {
+ perror("failed to fork");
+ goto out_close_pipes;
+ }
+
+ if (!evlist->workload.pid) {
+ if (pipe_output)
+ dup2(2, 1);
+
+ signal(SIGTERM, SIG_DFL);
+
+ close(child_ready_pipe[0]);
+ close(go_pipe[1]);
+ fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
+
+ /*
+ * Tell the parent we're ready to go
+ */
+ close(child_ready_pipe[1]);
+
+ /*
+ * Wait until the parent tells us to go.
+ */
+ if (read(go_pipe[0], &bf, 1) == -1)
+ perror("unable to read pipe");
+
+ execvp(argv[0], (char **)argv);
+
+ if (exec_error) {
+ union sigval val;
+
+ val.sival_int = errno;
+ if (sigqueue(getppid(), SIGUSR1, val))
+ perror(argv[0]);
+ } else
+ perror(argv[0]);
+ exit(-1);
+ }
+
+ if (exec_error) {
+ struct sigaction act = {
+ .sa_flags = SA_SIGINFO,
+ .sa_sigaction = exec_error,
+ };
+ sigaction(SIGUSR1, &act, NULL);
+ }
+
+ if (target__none(target))
+ evlist->threads->map[0] = evlist->workload.pid;
+
+ close(child_ready_pipe[1]);
+ close(go_pipe[0]);
+ /*
+ * wait for child to settle
+ */
+ if (read(child_ready_pipe[0], &bf, 1) == -1) {
+ perror("unable to read pipe");
+ goto out_close_pipes;
+ }
+
+ fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
+ evlist->workload.cork_fd = go_pipe[1];
+ close(child_ready_pipe[0]);
+ return 0;
+
+out_close_pipes:
+ close(go_pipe[0]);
+ close(go_pipe[1]);
+out_close_ready_pipe:
+ close(child_ready_pipe[0]);
+ close(child_ready_pipe[1]);
+ return -1;
+}
+
+int perf_evlist__start_workload(struct perf_evlist *evlist)
+{
+ if (evlist->workload.cork_fd > 0) {
+ char bf = 0;
+ int ret;
+ /*
+ * Remove the cork, let it rip!
+ */
+ ret = write(evlist->workload.cork_fd, &bf, 1);
+ if (ret < 0)
+ perror("enable to write to pipe");
+
+ close(evlist->workload.cork_fd);
+ return ret;
+ }
+
+ return 0;
+}
+
+int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
+ struct perf_sample *sample)
+{
+ struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
+
+ if (!evsel)
+ return -EFAULT;
+ return perf_evsel__parse_sample(evsel, event, sample);
+}
+
+size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
+{
+ struct perf_evsel *evsel;
+ size_t printed = 0;
+
+ evlist__for_each(evlist, evsel) {
+ printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
+ perf_evsel__name(evsel));
+ }
+
+ return printed + fprintf(fp, "\n");
+}
+
+int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
+ int err, char *buf, size_t size)
+{
+ char sbuf[128];
+
+ switch (err) {
+ case ENOENT:
+ scnprintf(buf, size, "%s",
+ "Error:\tUnable to find debugfs\n"
+ "Hint:\tWas your kernel was compiled with debugfs support?\n"
+ "Hint:\tIs the debugfs filesystem mounted?\n"
+ "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
+ break;
+ case EACCES:
+ scnprintf(buf, size,
+ "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
+ "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
+ debugfs_mountpoint, debugfs_mountpoint);
+ break;
+ default:
+ scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
+ break;
+ }
+
+ return 0;
+}
+
+int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
+ int err, char *buf, size_t size)
+{
+ int printed, value;
+ char sbuf[128], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
+
+ switch (err) {
+ case EACCES:
+ case EPERM:
+ printed = scnprintf(buf, size,
+ "Error:\t%s.\n"
+ "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
+
+ value = perf_event_paranoid();
+
+ printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
+
+ if (value >= 2) {
+ printed += scnprintf(buf + printed, size - printed,
+ "For your workloads it needs to be <= 1\nHint:\t");
+ }
+ printed += scnprintf(buf + printed, size - printed,
+ "For system wide tracing it needs to be set to -1");
+
+ printed += scnprintf(buf + printed, size - printed,
+ ".\nHint:\tThe current value is %d.", value);
+ break;
+ default:
+ scnprintf(buf, size, "%s", emsg);
+ break;
+ }
+
+ return 0;
+}
+
+void perf_evlist__to_front(struct perf_evlist *evlist,
+ struct perf_evsel *move_evsel)
+{
+ struct perf_evsel *evsel, *n;
+ LIST_HEAD(move);
+
+ if (move_evsel == perf_evlist__first(evlist))
+ return;
+
+ evlist__for_each_safe(evlist, n, evsel) {
+ if (evsel->leader == move_evsel->leader)
+ list_move_tail(&evsel->node, &move);
+ }
+
+ list_splice(&move, &evlist->entries);
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
new file mode 100644
index 00000000000..f5173cd6369
--- /dev/null
+++ b/tools/perf/util/evlist.h
@@ -0,0 +1,265 @@
+#ifndef __PERF_EVLIST_H
+#define __PERF_EVLIST_H 1
+
+#include <linux/list.h>
+#include <stdio.h>
+#include "../perf.h"
+#include "event.h"
+#include "evsel.h"
+#include "util.h"
+#include <unistd.h>
+
+struct pollfd;
+struct thread_map;
+struct cpu_map;
+struct record_opts;
+
+#define PERF_EVLIST__HLIST_BITS 8
+#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
+
+struct perf_mmap {
+ void *base;
+ int mask;
+ unsigned int prev;
+ char event_copy[PERF_SAMPLE_MAX_SIZE];
+};
+
+struct perf_evlist {
+ struct list_head entries;
+ struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
+ int nr_entries;
+ int nr_groups;
+ int nr_fds;
+ int nr_mmaps;
+ size_t mmap_len;
+ int id_pos;
+ int is_pos;
+ u64 combined_sample_type;
+ struct {
+ int cork_fd;
+ pid_t pid;
+ } workload;
+ bool overwrite;
+ struct perf_mmap *mmap;
+ struct pollfd *pollfd;
+ struct thread_map *threads;
+ struct cpu_map *cpus;
+ struct perf_evsel *selected;
+};
+
+struct perf_evsel_str_handler {
+ const char *name;
+ void *handler;
+};
+
+struct perf_evlist *perf_evlist__new(void);
+struct perf_evlist *perf_evlist__new_default(void);
+void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
+ struct thread_map *threads);
+void perf_evlist__exit(struct perf_evlist *evlist);
+void perf_evlist__delete(struct perf_evlist *evlist);
+
+void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
+int perf_evlist__add_default(struct perf_evlist *evlist);
+int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
+ struct perf_event_attr *attrs, size_t nr_attrs);
+
+#define perf_evlist__add_default_attrs(evlist, array) \
+ __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
+
+int perf_evlist__add_newtp(struct perf_evlist *evlist,
+ const char *sys, const char *name, void *handler);
+
+int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter);
+
+struct perf_evsel *
+perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id);
+
+struct perf_evsel *
+perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
+ const char *name);
+
+void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
+ int cpu, int thread, u64 id);
+
+void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
+
+struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
+
+struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
+
+union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
+
+void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
+
+int perf_evlist__open(struct perf_evlist *evlist);
+void perf_evlist__close(struct perf_evlist *evlist);
+
+void perf_evlist__set_id_pos(struct perf_evlist *evlist);
+bool perf_can_sample_identifier(void);
+void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts);
+int record_opts__config(struct record_opts *opts);
+
+int perf_evlist__prepare_workload(struct perf_evlist *evlist,
+ struct target *target,
+ const char *argv[], bool pipe_output,
+ void (*exec_error)(int signo, siginfo_t *info,
+ void *ucontext));
+int perf_evlist__start_workload(struct perf_evlist *evlist);
+
+int perf_evlist__parse_mmap_pages(const struct option *opt,
+ const char *str,
+ int unset);
+
+int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
+ bool overwrite);
+void perf_evlist__munmap(struct perf_evlist *evlist);
+
+void perf_evlist__disable(struct perf_evlist *evlist);
+void perf_evlist__enable(struct perf_evlist *evlist);
+
+int perf_evlist__disable_event(struct perf_evlist *evlist,
+ struct perf_evsel *evsel);
+int perf_evlist__enable_event(struct perf_evlist *evlist,
+ struct perf_evsel *evsel);
+
+void perf_evlist__set_selected(struct perf_evlist *evlist,
+ struct perf_evsel *evsel);
+
+static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
+ struct cpu_map *cpus,
+ struct thread_map *threads)
+{
+ evlist->cpus = cpus;
+ evlist->threads = threads;
+}
+
+int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
+int perf_evlist__apply_filters(struct perf_evlist *evlist);
+
+void __perf_evlist__set_leader(struct list_head *list);
+void perf_evlist__set_leader(struct perf_evlist *evlist);
+
+u64 perf_evlist__read_format(struct perf_evlist *evlist);
+u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist);
+u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist);
+bool perf_evlist__sample_id_all(struct perf_evlist *evlist);
+u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist);
+
+int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
+ struct perf_sample *sample);
+
+bool perf_evlist__valid_sample_type(struct perf_evlist *evlist);
+bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist);
+bool perf_evlist__valid_read_format(struct perf_evlist *evlist);
+
+void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
+ struct list_head *list,
+ int nr_entries);
+
+static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
+{
+ return list_entry(evlist->entries.next, struct perf_evsel, node);
+}
+
+static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
+{
+ return list_entry(evlist->entries.prev, struct perf_evsel, node);
+}
+
+size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
+
+int perf_evlist__strerror_tp(struct perf_evlist *evlist, int err, char *buf, size_t size);
+int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
+
+static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
+{
+ struct perf_event_mmap_page *pc = mm->base;
+ int head = ACCESS_ONCE(pc->data_head);
+ rmb();
+ return head;
+}
+
+static inline void perf_mmap__write_tail(struct perf_mmap *md,
+ unsigned long tail)
+{
+ struct perf_event_mmap_page *pc = md->base;
+
+ /*
+ * ensure all reads are done before we write the tail out.
+ */
+ mb();
+ pc->data_tail = tail;
+}
+
+bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str);
+void perf_evlist__to_front(struct perf_evlist *evlist,
+ struct perf_evsel *move_evsel);
+
+/**
+ * __evlist__for_each - iterate thru all the evsels
+ * @list: list_head instance to iterate
+ * @evsel: struct evsel iterator
+ */
+#define __evlist__for_each(list, evsel) \
+ list_for_each_entry(evsel, list, node)
+
+/**
+ * evlist__for_each - iterate thru all the evsels
+ * @evlist: evlist instance to iterate
+ * @evsel: struct evsel iterator
+ */
+#define evlist__for_each(evlist, evsel) \
+ __evlist__for_each(&(evlist)->entries, evsel)
+
+/**
+ * __evlist__for_each_continue - continue iteration thru all the evsels
+ * @list: list_head instance to iterate
+ * @evsel: struct evsel iterator
+ */
+#define __evlist__for_each_continue(list, evsel) \
+ list_for_each_entry_continue(evsel, list, node)
+
+/**
+ * evlist__for_each_continue - continue iteration thru all the evsels
+ * @evlist: evlist instance to iterate
+ * @evsel: struct evsel iterator
+ */
+#define evlist__for_each_continue(evlist, evsel) \
+ __evlist__for_each_continue(&(evlist)->entries, evsel)
+
+/**
+ * __evlist__for_each_reverse - iterate thru all the evsels in reverse order
+ * @list: list_head instance to iterate
+ * @evsel: struct evsel iterator
+ */
+#define __evlist__for_each_reverse(list, evsel) \
+ list_for_each_entry_reverse(evsel, list, node)
+
+/**
+ * evlist__for_each_reverse - iterate thru all the evsels in reverse order
+ * @evlist: evlist instance to iterate
+ * @evsel: struct evsel iterator
+ */
+#define evlist__for_each_reverse(evlist, evsel) \
+ __evlist__for_each_reverse(&(evlist)->entries, evsel)
+
+/**
+ * __evlist__for_each_safe - safely iterate thru all the evsels
+ * @list: list_head instance to iterate
+ * @tmp: struct evsel temp iterator
+ * @evsel: struct evsel iterator
+ */
+#define __evlist__for_each_safe(list, tmp, evsel) \
+ list_for_each_entry_safe(evsel, tmp, list, node)
+
+/**
+ * evlist__for_each_safe - safely iterate thru all the evsels
+ * @evlist: evlist instance to iterate
+ * @evsel: struct evsel iterator
+ * @tmp: struct evsel temp iterator
+ */
+#define evlist__for_each_safe(evlist, tmp, evsel) \
+ __evlist__for_each_safe(&(evlist)->entries, tmp, evsel)
+
+#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
new file mode 100644
index 00000000000..8606175fe1e
--- /dev/null
+++ b/tools/perf/util/evsel.c
@@ -0,0 +1,2036 @@
+/*
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Parts came from builtin-{top,stat,record}.c, see those files for further
+ * copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
+#include <byteswap.h>
+#include <linux/bitops.h>
+#include <api/fs/debugfs.h>
+#include <traceevent/event-parse.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/perf_event.h>
+#include <sys/resource.h>
+#include "asm/bug.h"
+#include "evsel.h"
+#include "evlist.h"
+#include "util.h"
+#include "cpumap.h"
+#include "thread_map.h"
+#include "target.h"
+#include "perf_regs.h"
+#include "debug.h"
+#include "trace-event.h"
+
+static struct {
+ bool sample_id_all;
+ bool exclude_guest;
+ bool mmap2;
+} perf_missing_features;
+
+#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
+
+int __perf_evsel__sample_size(u64 sample_type)
+{
+ u64 mask = sample_type & PERF_SAMPLE_MASK;
+ int size = 0;
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ if (mask & (1ULL << i))
+ size++;
+ }
+
+ size *= sizeof(u64);
+
+ return size;
+}
+
+/**
+ * __perf_evsel__calc_id_pos - calculate id_pos.
+ * @sample_type: sample type
+ *
+ * This function returns the position of the event id (PERF_SAMPLE_ID or
+ * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
+ * sample_event.
+ */
+static int __perf_evsel__calc_id_pos(u64 sample_type)
+{
+ int idx = 0;
+
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ return 0;
+
+ if (!(sample_type & PERF_SAMPLE_ID))
+ return -1;
+
+ if (sample_type & PERF_SAMPLE_IP)
+ idx += 1;
+
+ if (sample_type & PERF_SAMPLE_TID)
+ idx += 1;
+
+ if (sample_type & PERF_SAMPLE_TIME)
+ idx += 1;
+
+ if (sample_type & PERF_SAMPLE_ADDR)
+ idx += 1;
+
+ return idx;
+}
+
+/**
+ * __perf_evsel__calc_is_pos - calculate is_pos.
+ * @sample_type: sample type
+ *
+ * This function returns the position (counting backwards) of the event id
+ * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
+ * sample_id_all is used there is an id sample appended to non-sample events.
+ */
+static int __perf_evsel__calc_is_pos(u64 sample_type)
+{
+ int idx = 1;
+
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ return 1;
+
+ if (!(sample_type & PERF_SAMPLE_ID))
+ return -1;
+
+ if (sample_type & PERF_SAMPLE_CPU)
+ idx += 1;
+
+ if (sample_type & PERF_SAMPLE_STREAM_ID)
+ idx += 1;
+
+ return idx;
+}
+
+void perf_evsel__calc_id_pos(struct perf_evsel *evsel)
+{
+ evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type);
+ evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type);
+}
+
+void hists__init(struct hists *hists)
+{
+ memset(hists, 0, sizeof(*hists));
+ hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
+ hists->entries_in = &hists->entries_in_array[0];
+ hists->entries_collapsed = RB_ROOT;
+ hists->entries = RB_ROOT;
+ pthread_mutex_init(&hists->lock, NULL);
+}
+
+void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
+ enum perf_event_sample_format bit)
+{
+ if (!(evsel->attr.sample_type & bit)) {
+ evsel->attr.sample_type |= bit;
+ evsel->sample_size += sizeof(u64);
+ perf_evsel__calc_id_pos(evsel);
+ }
+}
+
+void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
+ enum perf_event_sample_format bit)
+{
+ if (evsel->attr.sample_type & bit) {
+ evsel->attr.sample_type &= ~bit;
+ evsel->sample_size -= sizeof(u64);
+ perf_evsel__calc_id_pos(evsel);
+ }
+}
+
+void perf_evsel__set_sample_id(struct perf_evsel *evsel,
+ bool can_sample_identifier)
+{
+ if (can_sample_identifier) {
+ perf_evsel__reset_sample_bit(evsel, ID);
+ perf_evsel__set_sample_bit(evsel, IDENTIFIER);
+ } else {
+ perf_evsel__set_sample_bit(evsel, ID);
+ }
+ evsel->attr.read_format |= PERF_FORMAT_ID;
+}
+
+void perf_evsel__init(struct perf_evsel *evsel,
+ struct perf_event_attr *attr, int idx)
+{
+ evsel->idx = idx;
+ evsel->attr = *attr;
+ evsel->leader = evsel;
+ evsel->unit = "";
+ evsel->scale = 1.0;
+ INIT_LIST_HEAD(&evsel->node);
+ hists__init(&evsel->hists);
+ evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
+ perf_evsel__calc_id_pos(evsel);
+}
+
+struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
+{
+ struct perf_evsel *evsel = zalloc(sizeof(*evsel));
+
+ if (evsel != NULL)
+ perf_evsel__init(evsel, attr, idx);
+
+ return evsel;
+}
+
+struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
+{
+ struct perf_evsel *evsel = zalloc(sizeof(*evsel));
+
+ if (evsel != NULL) {
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_TRACEPOINT,
+ .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
+ PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
+ };
+
+ if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
+ goto out_free;
+
+ evsel->tp_format = trace_event__tp_format(sys, name);
+ if (evsel->tp_format == NULL)
+ goto out_free;
+
+ event_attr_init(&attr);
+ attr.config = evsel->tp_format->id;
+ attr.sample_period = 1;
+ perf_evsel__init(evsel, &attr, idx);
+ }
+
+ return evsel;
+
+out_free:
+ zfree(&evsel->name);
+ free(evsel);
+ return NULL;
+}
+
+const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
+ "cycles",
+ "instructions",
+ "cache-references",
+ "cache-misses",
+ "branches",
+ "branch-misses",
+ "bus-cycles",
+ "stalled-cycles-frontend",
+ "stalled-cycles-backend",
+ "ref-cycles",
+};
+
+static const char *__perf_evsel__hw_name(u64 config)
+{
+ if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
+ return perf_evsel__hw_names[config];
+
+ return "unknown-hardware";
+}
+
+static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
+{
+ int colon = 0, r = 0;
+ struct perf_event_attr *attr = &evsel->attr;
+ bool exclude_guest_default = false;
+
+#define MOD_PRINT(context, mod) do { \
+ if (!attr->exclude_##context) { \
+ if (!colon) colon = ++r; \
+ r += scnprintf(bf + r, size - r, "%c", mod); \
+ } } while(0)
+
+ if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
+ MOD_PRINT(kernel, 'k');
+ MOD_PRINT(user, 'u');
+ MOD_PRINT(hv, 'h');
+ exclude_guest_default = true;
+ }
+
+ if (attr->precise_ip) {
+ if (!colon)
+ colon = ++r;
+ r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
+ exclude_guest_default = true;
+ }
+
+ if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
+ MOD_PRINT(host, 'H');
+ MOD_PRINT(guest, 'G');
+ }
+#undef MOD_PRINT
+ if (colon)
+ bf[colon - 1] = ':';
+ return r;
+}
+
+static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+ int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
+ return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
+}
+
+const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
+ "cpu-clock",
+ "task-clock",
+ "page-faults",
+ "context-switches",
+ "cpu-migrations",
+ "minor-faults",
+ "major-faults",
+ "alignment-faults",
+ "emulation-faults",
+ "dummy",
+};
+
+static const char *__perf_evsel__sw_name(u64 config)
+{
+ if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
+ return perf_evsel__sw_names[config];
+ return "unknown-software";
+}
+
+static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+ int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
+ return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
+}
+
+static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
+{
+ int r;
+
+ r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
+
+ if (type & HW_BREAKPOINT_R)
+ r += scnprintf(bf + r, size - r, "r");
+
+ if (type & HW_BREAKPOINT_W)
+ r += scnprintf(bf + r, size - r, "w");
+
+ if (type & HW_BREAKPOINT_X)
+ r += scnprintf(bf + r, size - r, "x");
+
+ return r;
+}
+
+static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+ struct perf_event_attr *attr = &evsel->attr;
+ int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
+ return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
+}
+
+const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_EVSEL__MAX_ALIASES] = {
+ { "L1-dcache", "l1-d", "l1d", "L1-data", },
+ { "L1-icache", "l1-i", "l1i", "L1-instruction", },
+ { "LLC", "L2", },
+ { "dTLB", "d-tlb", "Data-TLB", },
+ { "iTLB", "i-tlb", "Instruction-TLB", },
+ { "branch", "branches", "bpu", "btb", "bpc", },
+ { "node", },
+};
+
+const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_EVSEL__MAX_ALIASES] = {
+ { "load", "loads", "read", },
+ { "store", "stores", "write", },
+ { "prefetch", "prefetches", "speculative-read", "speculative-load", },
+};
+
+const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
+ [PERF_EVSEL__MAX_ALIASES] = {
+ { "refs", "Reference", "ops", "access", },
+ { "misses", "miss", },
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+#define CACHE_READ (1 << C(OP_READ))
+#define CACHE_WRITE (1 << C(OP_WRITE))
+#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
+#define COP(x) (1 << x)
+
+/*
+ * cache operartion stat
+ * L1I : Read and prefetch only
+ * ITLB and BPU : Read-only
+ */
+static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
+ [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
+ [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(ITLB)] = (CACHE_READ),
+ [C(BPU)] = (CACHE_READ),
+ [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+};
+
+bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
+{
+ if (perf_evsel__hw_cache_stat[type] & COP(op))
+ return true; /* valid */
+ else
+ return false; /* invalid */
+}
+
+int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
+ char *bf, size_t size)
+{
+ if (result) {
+ return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
+ perf_evsel__hw_cache_op[op][0],
+ perf_evsel__hw_cache_result[result][0]);
+ }
+
+ return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
+ perf_evsel__hw_cache_op[op][1]);
+}
+
+static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
+{
+ u8 op, result, type = (config >> 0) & 0xff;
+ const char *err = "unknown-ext-hardware-cache-type";
+
+ if (type > PERF_COUNT_HW_CACHE_MAX)
+ goto out_err;
+
+ op = (config >> 8) & 0xff;
+ err = "unknown-ext-hardware-cache-op";
+ if (op > PERF_COUNT_HW_CACHE_OP_MAX)
+ goto out_err;
+
+ result = (config >> 16) & 0xff;
+ err = "unknown-ext-hardware-cache-result";
+ if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
+ goto out_err;
+
+ err = "invalid-cache";
+ if (!perf_evsel__is_cache_op_valid(type, op))
+ goto out_err;
+
+ return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
+out_err:
+ return scnprintf(bf, size, "%s", err);
+}
+
+static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+ int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
+ return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
+}
+
+static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+ int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
+ return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
+}
+
+const char *perf_evsel__name(struct perf_evsel *evsel)
+{
+ char bf[128];
+
+ if (evsel->name)
+ return evsel->name;
+
+ switch (evsel->attr.type) {
+ case PERF_TYPE_RAW:
+ perf_evsel__raw_name(evsel, bf, sizeof(bf));
+ break;
+
+ case PERF_TYPE_HARDWARE:
+ perf_evsel__hw_name(evsel, bf, sizeof(bf));
+ break;
+
+ case PERF_TYPE_HW_CACHE:
+ perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
+ break;
+
+ case PERF_TYPE_SOFTWARE:
+ perf_evsel__sw_name(evsel, bf, sizeof(bf));
+ break;
+
+ case PERF_TYPE_TRACEPOINT:
+ scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
+ break;
+
+ case PERF_TYPE_BREAKPOINT:
+ perf_evsel__bp_name(evsel, bf, sizeof(bf));
+ break;
+
+ default:
+ scnprintf(bf, sizeof(bf), "unknown attr type: %d",
+ evsel->attr.type);
+ break;
+ }
+
+ evsel->name = strdup(bf);
+
+ return evsel->name ?: "unknown";
+}
+
+const char *perf_evsel__group_name(struct perf_evsel *evsel)
+{
+ return evsel->group_name ?: "anon group";
+}
+
+int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
+{
+ int ret;
+ struct perf_evsel *pos;
+ const char *group_name = perf_evsel__group_name(evsel);
+
+ ret = scnprintf(buf, size, "%s", group_name);
+
+ ret += scnprintf(buf + ret, size - ret, " { %s",
+ perf_evsel__name(evsel));
+
+ for_each_group_member(pos, evsel)
+ ret += scnprintf(buf + ret, size - ret, ", %s",
+ perf_evsel__name(pos));
+
+ ret += scnprintf(buf + ret, size - ret, " }");
+
+ return ret;
+}
+
+static void
+perf_evsel__config_callgraph(struct perf_evsel *evsel,
+ struct record_opts *opts)
+{
+ bool function = perf_evsel__is_function_event(evsel);
+ struct perf_event_attr *attr = &evsel->attr;
+
+ perf_evsel__set_sample_bit(evsel, CALLCHAIN);
+
+ if (opts->call_graph == CALLCHAIN_DWARF) {
+ if (!function) {
+ perf_evsel__set_sample_bit(evsel, REGS_USER);
+ perf_evsel__set_sample_bit(evsel, STACK_USER);
+ attr->sample_regs_user = PERF_REGS_MASK;
+ attr->sample_stack_user = opts->stack_dump_size;
+ attr->exclude_callchain_user = 1;
+ } else {
+ pr_info("Cannot use DWARF unwind for function trace event,"
+ " falling back to framepointers.\n");
+ }
+ }
+
+ if (function) {
+ pr_info("Disabling user space callchains for function trace event.\n");
+ attr->exclude_callchain_user = 1;
+ }
+}
+
+/*
+ * The enable_on_exec/disabled value strategy:
+ *
+ * 1) For any type of traced program:
+ * - all independent events and group leaders are disabled
+ * - all group members are enabled
+ *
+ * Group members are ruled by group leaders. They need to
+ * be enabled, because the group scheduling relies on that.
+ *
+ * 2) For traced programs executed by perf:
+ * - all independent events and group leaders have
+ * enable_on_exec set
+ * - we don't specifically enable or disable any event during
+ * the record command
+ *
+ * Independent events and group leaders are initially disabled
+ * and get enabled by exec. Group members are ruled by group
+ * leaders as stated in 1).
+ *
+ * 3) For traced programs attached by perf (pid/tid):
+ * - we specifically enable or disable all events during
+ * the record command
+ *
+ * When attaching events to already running traced we
+ * enable/disable events specifically, as there's no
+ * initial traced exec call.
+ */
+void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
+{
+ struct perf_evsel *leader = evsel->leader;
+ struct perf_event_attr *attr = &evsel->attr;
+ int track = !evsel->idx; /* only the first counter needs these */
+ bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
+
+ attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
+ attr->inherit = !opts->no_inherit;
+
+ perf_evsel__set_sample_bit(evsel, IP);
+ perf_evsel__set_sample_bit(evsel, TID);
+
+ if (evsel->sample_read) {
+ perf_evsel__set_sample_bit(evsel, READ);
+
+ /*
+ * We need ID even in case of single event, because
+ * PERF_SAMPLE_READ process ID specific data.
+ */
+ perf_evsel__set_sample_id(evsel, false);
+
+ /*
+ * Apply group format only if we belong to group
+ * with more than one members.
+ */
+ if (leader->nr_members > 1) {
+ attr->read_format |= PERF_FORMAT_GROUP;
+ attr->inherit = 0;
+ }
+ }
+
+ /*
+ * We default some events to have a default interval. But keep
+ * it a weak assumption overridable by the user.
+ */
+ if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
+ opts->user_interval != ULLONG_MAX)) {
+ if (opts->freq) {
+ perf_evsel__set_sample_bit(evsel, PERIOD);
+ attr->freq = 1;
+ attr->sample_freq = opts->freq;
+ } else {
+ attr->sample_period = opts->default_interval;
+ }
+ }
+
+ /*
+ * Disable sampling for all group members other
+ * than leader in case leader 'leads' the sampling.
+ */
+ if ((leader != evsel) && leader->sample_read) {
+ attr->sample_freq = 0;
+ attr->sample_period = 0;
+ }
+
+ if (opts->no_samples)
+ attr->sample_freq = 0;
+
+ if (opts->inherit_stat)
+ attr->inherit_stat = 1;
+
+ if (opts->sample_address) {
+ perf_evsel__set_sample_bit(evsel, ADDR);
+ attr->mmap_data = track;
+ }
+
+ if (opts->call_graph_enabled)
+ perf_evsel__config_callgraph(evsel, opts);
+
+ if (target__has_cpu(&opts->target))
+ perf_evsel__set_sample_bit(evsel, CPU);
+
+ if (opts->period)
+ perf_evsel__set_sample_bit(evsel, PERIOD);
+
+ if (!perf_missing_features.sample_id_all &&
+ (opts->sample_time || !opts->no_inherit ||
+ target__has_cpu(&opts->target) || per_cpu))
+ perf_evsel__set_sample_bit(evsel, TIME);
+
+ if (opts->raw_samples) {
+ perf_evsel__set_sample_bit(evsel, TIME);
+ perf_evsel__set_sample_bit(evsel, RAW);
+ perf_evsel__set_sample_bit(evsel, CPU);
+ }
+
+ if (opts->sample_address)
+ perf_evsel__set_sample_bit(evsel, DATA_SRC);
+
+ if (opts->no_buffering) {
+ attr->watermark = 0;
+ attr->wakeup_events = 1;
+ }
+ if (opts->branch_stack) {
+ perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
+ attr->branch_sample_type = opts->branch_stack;
+ }
+
+ if (opts->sample_weight)
+ perf_evsel__set_sample_bit(evsel, WEIGHT);
+
+ attr->mmap = track;
+ attr->mmap2 = track && !perf_missing_features.mmap2;
+ attr->comm = track;
+
+ if (opts->sample_transaction)
+ perf_evsel__set_sample_bit(evsel, TRANSACTION);
+
+ /*
+ * XXX see the function comment above
+ *
+ * Disabling only independent events or group leaders,
+ * keeping group members enabled.
+ */
+ if (perf_evsel__is_group_leader(evsel))
+ attr->disabled = 1;
+
+ /*
+ * Setting enable_on_exec for independent events and
+ * group leaders for traced executed by perf.
+ */
+ if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) &&
+ !opts->initial_delay)
+ attr->enable_on_exec = 1;
+}
+
+int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ int cpu, thread;
+ evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
+
+ if (evsel->fd) {
+ for (cpu = 0; cpu < ncpus; cpu++) {
+ for (thread = 0; thread < nthreads; thread++) {
+ FD(evsel, cpu, thread) = -1;
+ }
+ }
+ }
+
+ return evsel->fd != NULL ? 0 : -ENOMEM;
+}
+
+static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads,
+ int ioc, void *arg)
+{
+ int cpu, thread;
+
+ for (cpu = 0; cpu < ncpus; cpu++) {
+ for (thread = 0; thread < nthreads; thread++) {
+ int fd = FD(evsel, cpu, thread),
+ err = ioctl(fd, ioc, arg);
+
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
+ const char *filter)
+{
+ return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
+ PERF_EVENT_IOC_SET_FILTER,
+ (void *)filter);
+}
+
+int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
+ PERF_EVENT_IOC_ENABLE,
+ 0);
+}
+
+int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
+ if (evsel->sample_id == NULL)
+ return -ENOMEM;
+
+ evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
+ if (evsel->id == NULL) {
+ xyarray__delete(evsel->sample_id);
+ evsel->sample_id = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus)
+{
+ memset(evsel->counts, 0, (sizeof(*evsel->counts) +
+ (ncpus * sizeof(struct perf_counts_values))));
+}
+
+int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
+{
+ evsel->counts = zalloc((sizeof(*evsel->counts) +
+ (ncpus * sizeof(struct perf_counts_values))));
+ return evsel->counts != NULL ? 0 : -ENOMEM;
+}
+
+void perf_evsel__free_fd(struct perf_evsel *evsel)
+{
+ xyarray__delete(evsel->fd);
+ evsel->fd = NULL;
+}
+
+void perf_evsel__free_id(struct perf_evsel *evsel)
+{
+ xyarray__delete(evsel->sample_id);
+ evsel->sample_id = NULL;
+ zfree(&evsel->id);
+}
+
+void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ int cpu, thread;
+
+ for (cpu = 0; cpu < ncpus; cpu++)
+ for (thread = 0; thread < nthreads; ++thread) {
+ close(FD(evsel, cpu, thread));
+ FD(evsel, cpu, thread) = -1;
+ }
+}
+
+void perf_evsel__free_counts(struct perf_evsel *evsel)
+{
+ zfree(&evsel->counts);
+}
+
+void perf_evsel__exit(struct perf_evsel *evsel)
+{
+ assert(list_empty(&evsel->node));
+ perf_evsel__free_fd(evsel);
+ perf_evsel__free_id(evsel);
+}
+
+void perf_evsel__delete(struct perf_evsel *evsel)
+{
+ perf_evsel__exit(evsel);
+ close_cgroup(evsel->cgrp);
+ zfree(&evsel->group_name);
+ if (evsel->tp_format)
+ pevent_free_format(evsel->tp_format);
+ zfree(&evsel->name);
+ free(evsel);
+}
+
+static inline void compute_deltas(struct perf_evsel *evsel,
+ int cpu,
+ struct perf_counts_values *count)
+{
+ struct perf_counts_values tmp;
+
+ if (!evsel->prev_raw_counts)
+ return;
+
+ if (cpu == -1) {
+ tmp = evsel->prev_raw_counts->aggr;
+ evsel->prev_raw_counts->aggr = *count;
+ } else {
+ tmp = evsel->prev_raw_counts->cpu[cpu];
+ evsel->prev_raw_counts->cpu[cpu] = *count;
+ }
+
+ count->val = count->val - tmp.val;
+ count->ena = count->ena - tmp.ena;
+ count->run = count->run - tmp.run;
+}
+
+int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
+ int cpu, int thread, bool scale)
+{
+ struct perf_counts_values count;
+ size_t nv = scale ? 3 : 1;
+
+ if (FD(evsel, cpu, thread) < 0)
+ return -EINVAL;
+
+ if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
+ return -ENOMEM;
+
+ if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
+ return -errno;
+
+ compute_deltas(evsel, cpu, &count);
+
+ if (scale) {
+ if (count.run == 0)
+ count.val = 0;
+ else if (count.run < count.ena)
+ count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
+ } else
+ count.ena = count.run = 0;
+
+ evsel->counts->cpu[cpu] = count;
+ return 0;
+}
+
+int __perf_evsel__read(struct perf_evsel *evsel,
+ int ncpus, int nthreads, bool scale)
+{
+ size_t nv = scale ? 3 : 1;
+ int cpu, thread;
+ struct perf_counts_values *aggr = &evsel->counts->aggr, count;
+
+ aggr->val = aggr->ena = aggr->run = 0;
+
+ for (cpu = 0; cpu < ncpus; cpu++) {
+ for (thread = 0; thread < nthreads; thread++) {
+ if (FD(evsel, cpu, thread) < 0)
+ continue;
+
+ if (readn(FD(evsel, cpu, thread),
+ &count, nv * sizeof(u64)) < 0)
+ return -errno;
+
+ aggr->val += count.val;
+ if (scale) {
+ aggr->ena += count.ena;
+ aggr->run += count.run;
+ }
+ }
+ }
+
+ compute_deltas(evsel, -1, aggr);
+
+ evsel->counts->scaled = 0;
+ if (scale) {
+ if (aggr->run == 0) {
+ evsel->counts->scaled = -1;
+ aggr->val = 0;
+ return 0;
+ }
+
+ if (aggr->run < aggr->ena) {
+ evsel->counts->scaled = 1;
+ aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
+ }
+ } else
+ aggr->ena = aggr->run = 0;
+
+ return 0;
+}
+
+static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
+{
+ struct perf_evsel *leader = evsel->leader;
+ int fd;
+
+ if (perf_evsel__is_group_leader(evsel))
+ return -1;
+
+ /*
+ * Leader must be already processed/open,
+ * if not it's a bug.
+ */
+ BUG_ON(!leader->fd);
+
+ fd = FD(leader, cpu, thread);
+ BUG_ON(fd == -1);
+
+ return fd;
+}
+
+#define __PRINT_ATTR(fmt, cast, field) \
+ fprintf(fp, " %-19s "fmt"\n", #field, cast attr->field)
+
+#define PRINT_ATTR_U32(field) __PRINT_ATTR("%u" , , field)
+#define PRINT_ATTR_X32(field) __PRINT_ATTR("%#x", , field)
+#define PRINT_ATTR_U64(field) __PRINT_ATTR("%" PRIu64, (uint64_t), field)
+#define PRINT_ATTR_X64(field) __PRINT_ATTR("%#"PRIx64, (uint64_t), field)
+
+#define PRINT_ATTR2N(name1, field1, name2, field2) \
+ fprintf(fp, " %-19s %u %-19s %u\n", \
+ name1, attr->field1, name2, attr->field2)
+
+#define PRINT_ATTR2(field1, field2) \
+ PRINT_ATTR2N(#field1, field1, #field2, field2)
+
+static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp)
+{
+ size_t ret = 0;
+
+ ret += fprintf(fp, "%.60s\n", graph_dotted_line);
+ ret += fprintf(fp, "perf_event_attr:\n");
+
+ ret += PRINT_ATTR_U32(type);
+ ret += PRINT_ATTR_U32(size);
+ ret += PRINT_ATTR_X64(config);
+ ret += PRINT_ATTR_U64(sample_period);
+ ret += PRINT_ATTR_U64(sample_freq);
+ ret += PRINT_ATTR_X64(sample_type);
+ ret += PRINT_ATTR_X64(read_format);
+
+ ret += PRINT_ATTR2(disabled, inherit);
+ ret += PRINT_ATTR2(pinned, exclusive);
+ ret += PRINT_ATTR2(exclude_user, exclude_kernel);
+ ret += PRINT_ATTR2(exclude_hv, exclude_idle);
+ ret += PRINT_ATTR2(mmap, comm);
+ ret += PRINT_ATTR2(freq, inherit_stat);
+ ret += PRINT_ATTR2(enable_on_exec, task);
+ ret += PRINT_ATTR2(watermark, precise_ip);
+ ret += PRINT_ATTR2(mmap_data, sample_id_all);
+ ret += PRINT_ATTR2(exclude_host, exclude_guest);
+ ret += PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel,
+ "excl.callchain_user", exclude_callchain_user);
+ ret += PRINT_ATTR_U32(mmap2);
+
+ ret += PRINT_ATTR_U32(wakeup_events);
+ ret += PRINT_ATTR_U32(wakeup_watermark);
+ ret += PRINT_ATTR_X32(bp_type);
+ ret += PRINT_ATTR_X64(bp_addr);
+ ret += PRINT_ATTR_X64(config1);
+ ret += PRINT_ATTR_U64(bp_len);
+ ret += PRINT_ATTR_X64(config2);
+ ret += PRINT_ATTR_X64(branch_sample_type);
+ ret += PRINT_ATTR_X64(sample_regs_user);
+ ret += PRINT_ATTR_U32(sample_stack_user);
+
+ ret += fprintf(fp, "%.60s\n", graph_dotted_line);
+
+ return ret;
+}
+
+static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+ struct thread_map *threads)
+{
+ int cpu, thread;
+ unsigned long flags = 0;
+ int pid = -1, err;
+ enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
+
+ if (evsel->fd == NULL &&
+ perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
+ return -ENOMEM;
+
+ if (evsel->cgrp) {
+ flags = PERF_FLAG_PID_CGROUP;
+ pid = evsel->cgrp->fd;
+ }
+
+fallback_missing_features:
+ if (perf_missing_features.mmap2)
+ evsel->attr.mmap2 = 0;
+ if (perf_missing_features.exclude_guest)
+ evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
+retry_sample_id:
+ if (perf_missing_features.sample_id_all)
+ evsel->attr.sample_id_all = 0;
+
+ if (verbose >= 2)
+ perf_event_attr__fprintf(&evsel->attr, stderr);
+
+ for (cpu = 0; cpu < cpus->nr; cpu++) {
+
+ for (thread = 0; thread < threads->nr; thread++) {
+ int group_fd;
+
+ if (!evsel->cgrp)
+ pid = threads->map[thread];
+
+ group_fd = get_group_fd(evsel, cpu, thread);
+retry_open:
+ pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
+ pid, cpus->map[cpu], group_fd, flags);
+
+ FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
+ pid,
+ cpus->map[cpu],
+ group_fd, flags);
+ if (FD(evsel, cpu, thread) < 0) {
+ err = -errno;
+ pr_debug2("sys_perf_event_open failed, error %d\n",
+ err);
+ goto try_fallback;
+ }
+ set_rlimit = NO_CHANGE;
+ }
+ }
+
+ return 0;
+
+try_fallback:
+ /*
+ * perf stat needs between 5 and 22 fds per CPU. When we run out
+ * of them try to increase the limits.
+ */
+ if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
+ struct rlimit l;
+ int old_errno = errno;
+
+ if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
+ if (set_rlimit == NO_CHANGE)
+ l.rlim_cur = l.rlim_max;
+ else {
+ l.rlim_cur = l.rlim_max + 1000;
+ l.rlim_max = l.rlim_cur;
+ }
+ if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
+ set_rlimit++;
+ errno = old_errno;
+ goto retry_open;
+ }
+ }
+ errno = old_errno;
+ }
+
+ if (err != -EINVAL || cpu > 0 || thread > 0)
+ goto out_close;
+
+ if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
+ perf_missing_features.mmap2 = true;
+ goto fallback_missing_features;
+ } else if (!perf_missing_features.exclude_guest &&
+ (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
+ perf_missing_features.exclude_guest = true;
+ goto fallback_missing_features;
+ } else if (!perf_missing_features.sample_id_all) {
+ perf_missing_features.sample_id_all = true;
+ goto retry_sample_id;
+ }
+
+out_close:
+ do {
+ while (--thread >= 0) {
+ close(FD(evsel, cpu, thread));
+ FD(evsel, cpu, thread) = -1;
+ }
+ thread = threads->nr;
+ } while (--cpu >= 0);
+ return err;
+}
+
+void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ if (evsel->fd == NULL)
+ return;
+
+ perf_evsel__close_fd(evsel, ncpus, nthreads);
+ perf_evsel__free_fd(evsel);
+}
+
+static struct {
+ struct cpu_map map;
+ int cpus[1];
+} empty_cpu_map = {
+ .map.nr = 1,
+ .cpus = { -1, },
+};
+
+static struct {
+ struct thread_map map;
+ int threads[1];
+} empty_thread_map = {
+ .map.nr = 1,
+ .threads = { -1, },
+};
+
+int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+ struct thread_map *threads)
+{
+ if (cpus == NULL) {
+ /* Work around old compiler warnings about strict aliasing */
+ cpus = &empty_cpu_map.map;
+ }
+
+ if (threads == NULL)
+ threads = &empty_thread_map.map;
+
+ return __perf_evsel__open(evsel, cpus, threads);
+}
+
+int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
+ struct cpu_map *cpus)
+{
+ return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
+}
+
+int perf_evsel__open_per_thread(struct perf_evsel *evsel,
+ struct thread_map *threads)
+{
+ return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
+}
+
+static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
+ const union perf_event *event,
+ struct perf_sample *sample)
+{
+ u64 type = evsel->attr.sample_type;
+ const u64 *array = event->sample.array;
+ bool swapped = evsel->needs_swap;
+ union u64_swap u;
+
+ array += ((event->header.size -
+ sizeof(event->header)) / sizeof(u64)) - 1;
+
+ if (type & PERF_SAMPLE_IDENTIFIER) {
+ sample->id = *array;
+ array--;
+ }
+
+ if (type & PERF_SAMPLE_CPU) {
+ u.val64 = *array;
+ if (swapped) {
+ /* undo swap of u64, then swap on individual u32s */
+ u.val64 = bswap_64(u.val64);
+ u.val32[0] = bswap_32(u.val32[0]);
+ }
+
+ sample->cpu = u.val32[0];
+ array--;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ sample->stream_id = *array;
+ array--;
+ }
+
+ if (type & PERF_SAMPLE_ID) {
+ sample->id = *array;
+ array--;
+ }
+
+ if (type & PERF_SAMPLE_TIME) {
+ sample->time = *array;
+ array--;
+ }
+
+ if (type & PERF_SAMPLE_TID) {
+ u.val64 = *array;
+ if (swapped) {
+ /* undo swap of u64, then swap on individual u32s */
+ u.val64 = bswap_64(u.val64);
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val32[1] = bswap_32(u.val32[1]);
+ }
+
+ sample->pid = u.val32[0];
+ sample->tid = u.val32[1];
+ array--;
+ }
+
+ return 0;
+}
+
+static inline bool overflow(const void *endp, u16 max_size, const void *offset,
+ u64 size)
+{
+ return size > max_size || offset + size > endp;
+}
+
+#define OVERFLOW_CHECK(offset, size, max_size) \
+ do { \
+ if (overflow(endp, (max_size), (offset), (size))) \
+ return -EFAULT; \
+ } while (0)
+
+#define OVERFLOW_CHECK_u64(offset) \
+ OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
+
+int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
+ struct perf_sample *data)
+{
+ u64 type = evsel->attr.sample_type;
+ bool swapped = evsel->needs_swap;
+ const u64 *array;
+ u16 max_size = event->header.size;
+ const void *endp = (void *)event + max_size;
+ u64 sz;
+
+ /*
+ * used for cross-endian analysis. See git commit 65014ab3
+ * for why this goofiness is needed.
+ */
+ union u64_swap u;
+
+ memset(data, 0, sizeof(*data));
+ data->cpu = data->pid = data->tid = -1;
+ data->stream_id = data->id = data->time = -1ULL;
+ data->period = evsel->attr.sample_period;
+ data->weight = 0;
+
+ if (event->header.type != PERF_RECORD_SAMPLE) {
+ if (!evsel->attr.sample_id_all)
+ return 0;
+ return perf_evsel__parse_id_sample(evsel, event, data);
+ }
+
+ array = event->sample.array;
+
+ /*
+ * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
+ * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
+ * check the format does not go past the end of the event.
+ */
+ if (evsel->sample_size + sizeof(event->header) > event->header.size)
+ return -EFAULT;
+
+ data->id = -1ULL;
+ if (type & PERF_SAMPLE_IDENTIFIER) {
+ data->id = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_IP) {
+ data->ip = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TID) {
+ u.val64 = *array;
+ if (swapped) {
+ /* undo swap of u64, then swap on individual u32s */
+ u.val64 = bswap_64(u.val64);
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val32[1] = bswap_32(u.val32[1]);
+ }
+
+ data->pid = u.val32[0];
+ data->tid = u.val32[1];
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TIME) {
+ data->time = *array;
+ array++;
+ }
+
+ data->addr = 0;
+ if (type & PERF_SAMPLE_ADDR) {
+ data->addr = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ID) {
+ data->id = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ data->stream_id = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_CPU) {
+
+ u.val64 = *array;
+ if (swapped) {
+ /* undo swap of u64, then swap on individual u32s */
+ u.val64 = bswap_64(u.val64);
+ u.val32[0] = bswap_32(u.val32[0]);
+ }
+
+ data->cpu = u.val32[0];
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_PERIOD) {
+ data->period = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_READ) {
+ u64 read_format = evsel->attr.read_format;
+
+ OVERFLOW_CHECK_u64(array);
+ if (read_format & PERF_FORMAT_GROUP)
+ data->read.group.nr = *array;
+ else
+ data->read.one.value = *array;
+
+ array++;
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+ OVERFLOW_CHECK_u64(array);
+ data->read.time_enabled = *array;
+ array++;
+ }
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+ OVERFLOW_CHECK_u64(array);
+ data->read.time_running = *array;
+ array++;
+ }
+
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ if (read_format & PERF_FORMAT_GROUP) {
+ const u64 max_group_nr = UINT64_MAX /
+ sizeof(struct sample_read_value);
+
+ if (data->read.group.nr > max_group_nr)
+ return -EFAULT;
+ sz = data->read.group.nr *
+ sizeof(struct sample_read_value);
+ OVERFLOW_CHECK(array, sz, max_size);
+ data->read.group.values =
+ (struct sample_read_value *)array;
+ array = (void *)array + sz;
+ } else {
+ OVERFLOW_CHECK_u64(array);
+ data->read.one.id = *array;
+ array++;
+ }
+ }
+
+ if (type & PERF_SAMPLE_CALLCHAIN) {
+ const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
+
+ OVERFLOW_CHECK_u64(array);
+ data->callchain = (struct ip_callchain *)array++;
+ if (data->callchain->nr > max_callchain_nr)
+ return -EFAULT;
+ sz = data->callchain->nr * sizeof(u64);
+ OVERFLOW_CHECK(array, sz, max_size);
+ array = (void *)array + sz;
+ }
+
+ if (type & PERF_SAMPLE_RAW) {
+ OVERFLOW_CHECK_u64(array);
+ u.val64 = *array;
+ if (WARN_ONCE(swapped,
+ "Endianness of raw data not corrected!\n")) {
+ /* undo swap of u64, then swap on individual u32s */
+ u.val64 = bswap_64(u.val64);
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val32[1] = bswap_32(u.val32[1]);
+ }
+ data->raw_size = u.val32[0];
+ array = (void *)array + sizeof(u32);
+
+ OVERFLOW_CHECK(array, data->raw_size, max_size);
+ data->raw_data = (void *)array;
+ array = (void *)array + data->raw_size;
+ }
+
+ if (type & PERF_SAMPLE_BRANCH_STACK) {
+ const u64 max_branch_nr = UINT64_MAX /
+ sizeof(struct branch_entry);
+
+ OVERFLOW_CHECK_u64(array);
+ data->branch_stack = (struct branch_stack *)array++;
+
+ if (data->branch_stack->nr > max_branch_nr)
+ return -EFAULT;
+ sz = data->branch_stack->nr * sizeof(struct branch_entry);
+ OVERFLOW_CHECK(array, sz, max_size);
+ array = (void *)array + sz;
+ }
+
+ if (type & PERF_SAMPLE_REGS_USER) {
+ OVERFLOW_CHECK_u64(array);
+ data->user_regs.abi = *array;
+ array++;
+
+ if (data->user_regs.abi) {
+ u64 mask = evsel->attr.sample_regs_user;
+
+ sz = hweight_long(mask) * sizeof(u64);
+ OVERFLOW_CHECK(array, sz, max_size);
+ data->user_regs.mask = mask;
+ data->user_regs.regs = (u64 *)array;
+ array = (void *)array + sz;
+ }
+ }
+
+ if (type & PERF_SAMPLE_STACK_USER) {
+ OVERFLOW_CHECK_u64(array);
+ sz = *array++;
+
+ data->user_stack.offset = ((char *)(array - 1)
+ - (char *) event);
+
+ if (!sz) {
+ data->user_stack.size = 0;
+ } else {
+ OVERFLOW_CHECK(array, sz, max_size);
+ data->user_stack.data = (char *)array;
+ array = (void *)array + sz;
+ OVERFLOW_CHECK_u64(array);
+ data->user_stack.size = *array++;
+ if (WARN_ONCE(data->user_stack.size > sz,
+ "user stack dump failure\n"))
+ return -EFAULT;
+ }
+ }
+
+ data->weight = 0;
+ if (type & PERF_SAMPLE_WEIGHT) {
+ OVERFLOW_CHECK_u64(array);
+ data->weight = *array;
+ array++;
+ }
+
+ data->data_src = PERF_MEM_DATA_SRC_NONE;
+ if (type & PERF_SAMPLE_DATA_SRC) {
+ OVERFLOW_CHECK_u64(array);
+ data->data_src = *array;
+ array++;
+ }
+
+ data->transaction = 0;
+ if (type & PERF_SAMPLE_TRANSACTION) {
+ OVERFLOW_CHECK_u64(array);
+ data->transaction = *array;
+ array++;
+ }
+
+ return 0;
+}
+
+size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
+ u64 read_format)
+{
+ size_t sz, result = sizeof(struct sample_event);
+
+ if (type & PERF_SAMPLE_IDENTIFIER)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_IP)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_TID)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_TIME)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_ADDR)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_ID)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_STREAM_ID)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_CPU)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_PERIOD)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_READ) {
+ result += sizeof(u64);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ result += sizeof(u64);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ result += sizeof(u64);
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ if (read_format & PERF_FORMAT_GROUP) {
+ sz = sample->read.group.nr *
+ sizeof(struct sample_read_value);
+ result += sz;
+ } else {
+ result += sizeof(u64);
+ }
+ }
+
+ if (type & PERF_SAMPLE_CALLCHAIN) {
+ sz = (sample->callchain->nr + 1) * sizeof(u64);
+ result += sz;
+ }
+
+ if (type & PERF_SAMPLE_RAW) {
+ result += sizeof(u32);
+ result += sample->raw_size;
+ }
+
+ if (type & PERF_SAMPLE_BRANCH_STACK) {
+ sz = sample->branch_stack->nr * sizeof(struct branch_entry);
+ sz += sizeof(u64);
+ result += sz;
+ }
+
+ if (type & PERF_SAMPLE_REGS_USER) {
+ if (sample->user_regs.abi) {
+ result += sizeof(u64);
+ sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
+ result += sz;
+ } else {
+ result += sizeof(u64);
+ }
+ }
+
+ if (type & PERF_SAMPLE_STACK_USER) {
+ sz = sample->user_stack.size;
+ result += sizeof(u64);
+ if (sz) {
+ result += sz;
+ result += sizeof(u64);
+ }
+ }
+
+ if (type & PERF_SAMPLE_WEIGHT)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_DATA_SRC)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_TRANSACTION)
+ result += sizeof(u64);
+
+ return result;
+}
+
+int perf_event__synthesize_sample(union perf_event *event, u64 type,
+ u64 read_format,
+ const struct perf_sample *sample,
+ bool swapped)
+{
+ u64 *array;
+ size_t sz;
+ /*
+ * used for cross-endian analysis. See git commit 65014ab3
+ * for why this goofiness is needed.
+ */
+ union u64_swap u;
+
+ array = event->sample.array;
+
+ if (type & PERF_SAMPLE_IDENTIFIER) {
+ *array = sample->id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_IP) {
+ *array = sample->ip;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TID) {
+ u.val32[0] = sample->pid;
+ u.val32[1] = sample->tid;
+ if (swapped) {
+ /*
+ * Inverse of what is done in perf_evsel__parse_sample
+ */
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val32[1] = bswap_32(u.val32[1]);
+ u.val64 = bswap_64(u.val64);
+ }
+
+ *array = u.val64;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TIME) {
+ *array = sample->time;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ADDR) {
+ *array = sample->addr;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ID) {
+ *array = sample->id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ *array = sample->stream_id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_CPU) {
+ u.val32[0] = sample->cpu;
+ if (swapped) {
+ /*
+ * Inverse of what is done in perf_evsel__parse_sample
+ */
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val64 = bswap_64(u.val64);
+ }
+ *array = u.val64;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_PERIOD) {
+ *array = sample->period;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_READ) {
+ if (read_format & PERF_FORMAT_GROUP)
+ *array = sample->read.group.nr;
+ else
+ *array = sample->read.one.value;
+ array++;
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+ *array = sample->read.time_enabled;
+ array++;
+ }
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+ *array = sample->read.time_running;
+ array++;
+ }
+
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ if (read_format & PERF_FORMAT_GROUP) {
+ sz = sample->read.group.nr *
+ sizeof(struct sample_read_value);
+ memcpy(array, sample->read.group.values, sz);
+ array = (void *)array + sz;
+ } else {
+ *array = sample->read.one.id;
+ array++;
+ }
+ }
+
+ if (type & PERF_SAMPLE_CALLCHAIN) {
+ sz = (sample->callchain->nr + 1) * sizeof(u64);
+ memcpy(array, sample->callchain, sz);
+ array = (void *)array + sz;
+ }
+
+ if (type & PERF_SAMPLE_RAW) {
+ u.val32[0] = sample->raw_size;
+ if (WARN_ONCE(swapped,
+ "Endianness of raw data not corrected!\n")) {
+ /*
+ * Inverse of what is done in perf_evsel__parse_sample
+ */
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val32[1] = bswap_32(u.val32[1]);
+ u.val64 = bswap_64(u.val64);
+ }
+ *array = u.val64;
+ array = (void *)array + sizeof(u32);
+
+ memcpy(array, sample->raw_data, sample->raw_size);
+ array = (void *)array + sample->raw_size;
+ }
+
+ if (type & PERF_SAMPLE_BRANCH_STACK) {
+ sz = sample->branch_stack->nr * sizeof(struct branch_entry);
+ sz += sizeof(u64);
+ memcpy(array, sample->branch_stack, sz);
+ array = (void *)array + sz;
+ }
+
+ if (type & PERF_SAMPLE_REGS_USER) {
+ if (sample->user_regs.abi) {
+ *array++ = sample->user_regs.abi;
+ sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
+ memcpy(array, sample->user_regs.regs, sz);
+ array = (void *)array + sz;
+ } else {
+ *array++ = 0;
+ }
+ }
+
+ if (type & PERF_SAMPLE_STACK_USER) {
+ sz = sample->user_stack.size;
+ *array++ = sz;
+ if (sz) {
+ memcpy(array, sample->user_stack.data, sz);
+ array = (void *)array + sz;
+ *array++ = sz;
+ }
+ }
+
+ if (type & PERF_SAMPLE_WEIGHT) {
+ *array = sample->weight;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_DATA_SRC) {
+ *array = sample->data_src;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TRANSACTION) {
+ *array = sample->transaction;
+ array++;
+ }
+
+ return 0;
+}
+
+struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
+{
+ return pevent_find_field(evsel->tp_format, name);
+}
+
+void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
+ const char *name)
+{
+ struct format_field *field = perf_evsel__field(evsel, name);
+ int offset;
+
+ if (!field)
+ return NULL;
+
+ offset = field->offset;
+
+ if (field->flags & FIELD_IS_DYNAMIC) {
+ offset = *(int *)(sample->raw_data + field->offset);
+ offset &= 0xffff;
+ }
+
+ return sample->raw_data + offset;
+}
+
+u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
+ const char *name)
+{
+ struct format_field *field = perf_evsel__field(evsel, name);
+ void *ptr;
+ u64 value;
+
+ if (!field)
+ return 0;
+
+ ptr = sample->raw_data + field->offset;
+
+ switch (field->size) {
+ case 1:
+ return *(u8 *)ptr;
+ case 2:
+ value = *(u16 *)ptr;
+ break;
+ case 4:
+ value = *(u32 *)ptr;
+ break;
+ case 8:
+ value = *(u64 *)ptr;
+ break;
+ default:
+ return 0;
+ }
+
+ if (!evsel->needs_swap)
+ return value;
+
+ switch (field->size) {
+ case 2:
+ return bswap_16(value);
+ case 4:
+ return bswap_32(value);
+ case 8:
+ return bswap_64(value);
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
+{
+ va_list args;
+ int ret = 0;
+
+ if (!*first) {
+ ret += fprintf(fp, ",");
+ } else {
+ ret += fprintf(fp, ":");
+ *first = false;
+ }
+
+ va_start(args, fmt);
+ ret += vfprintf(fp, fmt, args);
+ va_end(args);
+ return ret;
+}
+
+static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value)
+{
+ if (value == 0)
+ return 0;
+
+ return comma_fprintf(fp, first, " %s: %" PRIu64, field, value);
+}
+
+#define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field)
+
+struct bit_names {
+ int bit;
+ const char *name;
+};
+
+static int bits__fprintf(FILE *fp, const char *field, u64 value,
+ struct bit_names *bits, bool *first)
+{
+ int i = 0, printed = comma_fprintf(fp, first, " %s: ", field);
+ bool first_bit = true;
+
+ do {
+ if (value & bits[i].bit) {
+ printed += fprintf(fp, "%s%s", first_bit ? "" : "|", bits[i].name);
+ first_bit = false;
+ }
+ } while (bits[++i].name != NULL);
+
+ return printed;
+}
+
+static int sample_type__fprintf(FILE *fp, bool *first, u64 value)
+{
+#define bit_name(n) { PERF_SAMPLE_##n, #n }
+ struct bit_names bits[] = {
+ bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
+ bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
+ bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
+ bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
+ bit_name(IDENTIFIER),
+ { .name = NULL, }
+ };
+#undef bit_name
+ return bits__fprintf(fp, "sample_type", value, bits, first);
+}
+
+static int read_format__fprintf(FILE *fp, bool *first, u64 value)
+{
+#define bit_name(n) { PERF_FORMAT_##n, #n }
+ struct bit_names bits[] = {
+ bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
+ bit_name(ID), bit_name(GROUP),
+ { .name = NULL, }
+ };
+#undef bit_name
+ return bits__fprintf(fp, "read_format", value, bits, first);
+}
+
+int perf_evsel__fprintf(struct perf_evsel *evsel,
+ struct perf_attr_details *details, FILE *fp)
+{
+ bool first = true;
+ int printed = 0;
+
+ if (details->event_group) {
+ struct perf_evsel *pos;
+
+ if (!perf_evsel__is_group_leader(evsel))
+ return 0;
+
+ if (evsel->nr_members > 1)
+ printed += fprintf(fp, "%s{", evsel->group_name ?: "");
+
+ printed += fprintf(fp, "%s", perf_evsel__name(evsel));
+ for_each_group_member(pos, evsel)
+ printed += fprintf(fp, ",%s", perf_evsel__name(pos));
+
+ if (evsel->nr_members > 1)
+ printed += fprintf(fp, "}");
+ goto out;
+ }
+
+ printed += fprintf(fp, "%s", perf_evsel__name(evsel));
+
+ if (details->verbose || details->freq) {
+ printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64,
+ (u64)evsel->attr.sample_freq);
+ }
+
+ if (details->verbose) {
+ if_print(type);
+ if_print(config);
+ if_print(config1);
+ if_print(config2);
+ if_print(size);
+ printed += sample_type__fprintf(fp, &first, evsel->attr.sample_type);
+ if (evsel->attr.read_format)
+ printed += read_format__fprintf(fp, &first, evsel->attr.read_format);
+ if_print(disabled);
+ if_print(inherit);
+ if_print(pinned);
+ if_print(exclusive);
+ if_print(exclude_user);
+ if_print(exclude_kernel);
+ if_print(exclude_hv);
+ if_print(exclude_idle);
+ if_print(mmap);
+ if_print(mmap2);
+ if_print(comm);
+ if_print(freq);
+ if_print(inherit_stat);
+ if_print(enable_on_exec);
+ if_print(task);
+ if_print(watermark);
+ if_print(precise_ip);
+ if_print(mmap_data);
+ if_print(sample_id_all);
+ if_print(exclude_host);
+ if_print(exclude_guest);
+ if_print(__reserved_1);
+ if_print(wakeup_events);
+ if_print(bp_type);
+ if_print(branch_sample_type);
+ }
+out:
+ fputc('\n', fp);
+ return ++printed;
+}
+
+bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
+ char *msg, size_t msgsize)
+{
+ if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
+ evsel->attr.type == PERF_TYPE_HARDWARE &&
+ evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
+ /*
+ * If it's cycles then fall back to hrtimer based
+ * cpu-clock-tick sw counter, which is always available even if
+ * no PMU support.
+ *
+ * PPC returns ENXIO until 2.6.37 (behavior changed with commit
+ * b0a873e).
+ */
+ scnprintf(msg, msgsize, "%s",
+"The cycles event is not supported, trying to fall back to cpu-clock-ticks");
+
+ evsel->attr.type = PERF_TYPE_SOFTWARE;
+ evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
+
+ zfree(&evsel->name);
+ return true;
+ }
+
+ return false;
+}
+
+int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
+ int err, char *msg, size_t size)
+{
+ switch (err) {
+ case EPERM:
+ case EACCES:
+ return scnprintf(msg, size,
+ "You may not have permission to collect %sstats.\n"
+ "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
+ " -1 - Not paranoid at all\n"
+ " 0 - Disallow raw tracepoint access for unpriv\n"
+ " 1 - Disallow cpu events for unpriv\n"
+ " 2 - Disallow kernel profiling for unpriv",
+ target->system_wide ? "system-wide " : "");
+ case ENOENT:
+ return scnprintf(msg, size, "The %s event is not supported.",
+ perf_evsel__name(evsel));
+ case EMFILE:
+ return scnprintf(msg, size, "%s",
+ "Too many events are opened.\n"
+ "Try again after reducing the number of events.");
+ case ENODEV:
+ if (target->cpu_list)
+ return scnprintf(msg, size, "%s",
+ "No such device - did you specify an out-of-range profile CPU?\n");
+ break;
+ case EOPNOTSUPP:
+ if (evsel->attr.precise_ip)
+ return scnprintf(msg, size, "%s",
+ "\'precise\' request may not be supported. Try removing 'p' modifier.");
+#if defined(__i386__) || defined(__x86_64__)
+ if (evsel->attr.type == PERF_TYPE_HARDWARE)
+ return scnprintf(msg, size, "%s",
+ "No hardware sampling interrupt available.\n"
+ "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
+#endif
+ break;
+ default:
+ break;
+ }
+
+ return scnprintf(msg, size,
+ "The sys_perf_event_open() syscall returned with %d (%s) for event (%s). \n"
+ "/bin/dmesg may provide additional information.\n"
+ "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
+ err, strerror(err), perf_evsel__name(evsel));
+}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
new file mode 100644
index 00000000000..a52e9a5bb2d
--- /dev/null
+++ b/tools/perf/util/evsel.h
@@ -0,0 +1,365 @@
+#ifndef __PERF_EVSEL_H
+#define __PERF_EVSEL_H 1
+
+#include <linux/list.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <linux/perf_event.h>
+#include <linux/types.h>
+#include "xyarray.h"
+#include "cgroup.h"
+#include "hist.h"
+#include "symbol.h"
+
+struct perf_counts_values {
+ union {
+ struct {
+ u64 val;
+ u64 ena;
+ u64 run;
+ };
+ u64 values[3];
+ };
+};
+
+struct perf_counts {
+ s8 scaled;
+ struct perf_counts_values aggr;
+ struct perf_counts_values cpu[];
+};
+
+struct perf_evsel;
+
+/*
+ * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
+ * more than one entry in the evlist.
+ */
+struct perf_sample_id {
+ struct hlist_node node;
+ u64 id;
+ struct perf_evsel *evsel;
+
+ /* Holds total ID period value for PERF_SAMPLE_READ processing. */
+ u64 period;
+};
+
+/** struct perf_evsel - event selector
+ *
+ * @name - Can be set to retain the original event name passed by the user,
+ * so that when showing results in tools such as 'perf stat', we
+ * show the name used, not some alias.
+ * @id_pos: the position of the event id (PERF_SAMPLE_ID or
+ * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of
+ * struct sample_event
+ * @is_pos: the position (counting backwards) of the event id (PERF_SAMPLE_ID or
+ * PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all
+ * is used there is an id sample appended to non-sample events
+ */
+struct perf_evsel {
+ struct list_head node;
+ struct perf_event_attr attr;
+ char *filter;
+ struct xyarray *fd;
+ struct xyarray *sample_id;
+ u64 *id;
+ struct perf_counts *counts;
+ struct perf_counts *prev_raw_counts;
+ int idx;
+ u32 ids;
+ struct hists hists;
+ char *name;
+ double scale;
+ const char *unit;
+ struct event_format *tp_format;
+ union {
+ void *priv;
+ off_t id_offset;
+ };
+ struct cgroup_sel *cgrp;
+ void *handler;
+ struct cpu_map *cpus;
+ unsigned int sample_size;
+ int id_pos;
+ int is_pos;
+ bool supported;
+ bool needs_swap;
+ /* parse modifier helper */
+ int exclude_GH;
+ int nr_members;
+ int sample_read;
+ struct perf_evsel *leader;
+ char *group_name;
+};
+
+union u64_swap {
+ u64 val64;
+ u32 val32[2];
+};
+
+#define hists_to_evsel(h) container_of(h, struct perf_evsel, hists)
+
+struct cpu_map;
+struct thread_map;
+struct perf_evlist;
+struct record_opts;
+
+struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx);
+
+static inline struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
+{
+ return perf_evsel__new_idx(attr, 0);
+}
+
+struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx);
+
+static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name)
+{
+ return perf_evsel__newtp_idx(sys, name, 0);
+}
+
+struct event_format *event_format__new(const char *sys, const char *name);
+
+void perf_evsel__init(struct perf_evsel *evsel,
+ struct perf_event_attr *attr, int idx);
+void perf_evsel__exit(struct perf_evsel *evsel);
+void perf_evsel__delete(struct perf_evsel *evsel);
+
+void perf_evsel__config(struct perf_evsel *evsel,
+ struct record_opts *opts);
+
+int __perf_evsel__sample_size(u64 sample_type);
+void perf_evsel__calc_id_pos(struct perf_evsel *evsel);
+
+bool perf_evsel__is_cache_op_valid(u8 type, u8 op);
+
+#define PERF_EVSEL__MAX_ALIASES 8
+
+extern const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_EVSEL__MAX_ALIASES];
+extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_EVSEL__MAX_ALIASES];
+extern const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
+ [PERF_EVSEL__MAX_ALIASES];
+extern const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX];
+extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX];
+int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
+ char *bf, size_t size);
+const char *perf_evsel__name(struct perf_evsel *evsel);
+
+const char *perf_evsel__group_name(struct perf_evsel *evsel);
+int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size);
+
+int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
+int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
+int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
+void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus);
+void perf_evsel__free_fd(struct perf_evsel *evsel);
+void perf_evsel__free_id(struct perf_evsel *evsel);
+void perf_evsel__free_counts(struct perf_evsel *evsel);
+void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
+
+void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
+ enum perf_event_sample_format bit);
+void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
+ enum perf_event_sample_format bit);
+
+#define perf_evsel__set_sample_bit(evsel, bit) \
+ __perf_evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)
+
+#define perf_evsel__reset_sample_bit(evsel, bit) \
+ __perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
+
+void perf_evsel__set_sample_id(struct perf_evsel *evsel,
+ bool use_sample_identifier);
+
+int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
+ const char *filter);
+int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads);
+
+int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
+ struct cpu_map *cpus);
+int perf_evsel__open_per_thread(struct perf_evsel *evsel,
+ struct thread_map *threads);
+int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+ struct thread_map *threads);
+void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads);
+
+struct perf_sample;
+
+void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
+ const char *name);
+u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
+ const char *name);
+
+static inline char *perf_evsel__strval(struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ const char *name)
+{
+ return perf_evsel__rawptr(evsel, sample, name);
+}
+
+struct format_field;
+
+struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name);
+
+#define perf_evsel__match(evsel, t, c) \
+ (evsel->attr.type == PERF_TYPE_##t && \
+ evsel->attr.config == PERF_COUNT_##c)
+
+static inline bool perf_evsel__match2(struct perf_evsel *e1,
+ struct perf_evsel *e2)
+{
+ return (e1->attr.type == e2->attr.type) &&
+ (e1->attr.config == e2->attr.config);
+}
+
+#define perf_evsel__cmp(a, b) \
+ ((a) && \
+ (b) && \
+ (a)->attr.type == (b)->attr.type && \
+ (a)->attr.config == (b)->attr.config)
+
+int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
+ int cpu, int thread, bool scale);
+
+/**
+ * perf_evsel__read_on_cpu - Read out the results on a CPU and thread
+ *
+ * @evsel - event selector to read value
+ * @cpu - CPU of interest
+ * @thread - thread of interest
+ */
+static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel,
+ int cpu, int thread)
+{
+ return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
+}
+
+/**
+ * perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
+ *
+ * @evsel - event selector to read value
+ * @cpu - CPU of interest
+ * @thread - thread of interest
+ */
+static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
+ int cpu, int thread)
+{
+ return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
+}
+
+int __perf_evsel__read(struct perf_evsel *evsel, int ncpus, int nthreads,
+ bool scale);
+
+/**
+ * perf_evsel__read - Read the aggregate results on all CPUs
+ *
+ * @evsel - event selector to read value
+ * @ncpus - Number of cpus affected, from zero
+ * @nthreads - Number of threads affected, from zero
+ */
+static inline int perf_evsel__read(struct perf_evsel *evsel,
+ int ncpus, int nthreads)
+{
+ return __perf_evsel__read(evsel, ncpus, nthreads, false);
+}
+
+/**
+ * perf_evsel__read_scaled - Read the aggregate results on all CPUs, scaled
+ *
+ * @evsel - event selector to read value
+ * @ncpus - Number of cpus affected, from zero
+ * @nthreads - Number of threads affected, from zero
+ */
+static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
+ int ncpus, int nthreads)
+{
+ return __perf_evsel__read(evsel, ncpus, nthreads, true);
+}
+
+void hists__init(struct hists *hists);
+
+int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
+ struct perf_sample *sample);
+
+static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel)
+{
+ return list_entry(evsel->node.next, struct perf_evsel, node);
+}
+
+static inline struct perf_evsel *perf_evsel__prev(struct perf_evsel *evsel)
+{
+ return list_entry(evsel->node.prev, struct perf_evsel, node);
+}
+
+/**
+ * perf_evsel__is_group_leader - Return whether given evsel is a leader event
+ *
+ * @evsel - evsel selector to be tested
+ *
+ * Return %true if @evsel is a group leader or a stand-alone event
+ */
+static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel)
+{
+ return evsel->leader == evsel;
+}
+
+/**
+ * perf_evsel__is_group_event - Return whether given evsel is a group event
+ *
+ * @evsel - evsel selector to be tested
+ *
+ * Return %true iff event group view is enabled and @evsel is a actual group
+ * leader which has other members in the group
+ */
+static inline bool perf_evsel__is_group_event(struct perf_evsel *evsel)
+{
+ if (!symbol_conf.event_group)
+ return false;
+
+ return perf_evsel__is_group_leader(evsel) && evsel->nr_members > 1;
+}
+
+/**
+ * perf_evsel__is_function_event - Return whether given evsel is a function
+ * trace event
+ *
+ * @evsel - evsel selector to be tested
+ *
+ * Return %true if event is function trace event
+ */
+static inline bool perf_evsel__is_function_event(struct perf_evsel *evsel)
+{
+#define FUNCTION_EVENT "ftrace:function"
+
+ return evsel->name &&
+ !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
+
+#undef FUNCTION_EVENT
+}
+
+struct perf_attr_details {
+ bool freq;
+ bool verbose;
+ bool event_group;
+};
+
+int perf_evsel__fprintf(struct perf_evsel *evsel,
+ struct perf_attr_details *details, FILE *fp);
+
+bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
+ char *msg, size_t msgsize);
+int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
+ int err, char *msg, size_t size);
+
+static inline int perf_evsel__group_idx(struct perf_evsel *evsel)
+{
+ return evsel->idx - evsel->leader->idx;
+}
+
+#define for_each_group_member(_evsel, _leader) \
+for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node); \
+ (_evsel) && (_evsel)->leader == (_leader); \
+ (_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))
+
+#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/exec_cmd.c b/tools/perf/util/exec_cmd.c
index 67eeff57156..7adf4ad15d8 100644
--- a/tools/perf/util/exec_cmd.c
+++ b/tools/perf/util/exec_cmd.c
@@ -11,31 +11,12 @@ static const char *argv0_path;
const char *system_path(const char *path)
{
-#ifdef RUNTIME_PREFIX
- static const char *prefix;
-#else
static const char *prefix = PREFIX;
-#endif
struct strbuf d = STRBUF_INIT;
if (is_absolute_path(path))
return path;
-#ifdef RUNTIME_PREFIX
- assert(argv0_path);
- assert(is_absolute_path(argv0_path));
-
- if (!prefix &&
- !(prefix = strip_path_suffix(argv0_path, PERF_EXEC_PATH)) &&
- !(prefix = strip_path_suffix(argv0_path, BINDIR)) &&
- !(prefix = strip_path_suffix(argv0_path, "perf"))) {
- prefix = PREFIX;
- fprintf(stderr, "RUNTIME_PREFIX requested, "
- "but prefix computation failed. "
- "Using static fallback '%s'.\n", prefix);
- }
-#endif
-
strbuf_addf(&d, "%s/%s", prefix, path);
path = strbuf_detach(&d, NULL);
return path;
diff --git a/tools/perf/util/generate-cmdlist.sh b/tools/perf/util/generate-cmdlist.sh
index f06f6fd148f..36a885d2cd2 100755
--- a/tools/perf/util/generate-cmdlist.sh
+++ b/tools/perf/util/generate-cmdlist.sh
@@ -21,4 +21,19 @@ do
p
}' "Documentation/perf-$cmd.txt"
done
+
+echo "#ifdef HAVE_LIBELF_SUPPORT"
+sed -n -e 's/^perf-\([^ ]*\)[ ].* full.*/\1/p' command-list.txt |
+sort |
+while read cmd
+do
+ sed -n '
+ /^NAME/,/perf-'"$cmd"'/H
+ ${
+ x
+ s/.*perf-'"$cmd"' - \(.*\)/ {"'"$cmd"'", "\1"},/
+ p
+ }' "Documentation/perf-$cmd.txt"
+done
+echo "#endif /* HAVE_LIBELF_SUPPORT */"
echo "};"
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index d7e67b167ea..893f8e2df92 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1,5 +1,4 @@
-#define _FILE_OFFSET_BITS 64
-
+#include "util.h"
#include <sys/types.h>
#include <byteswap.h>
#include <unistd.h>
@@ -7,183 +6,169 @@
#include <stdlib.h>
#include <linux/list.h>
#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <sys/utsname.h>
-#include "util.h"
+#include "evlist.h"
+#include "evsel.h"
#include "header.h"
#include "../perf.h"
#include "trace-event.h"
#include "session.h"
#include "symbol.h"
#include "debug.h"
+#include "cpumap.h"
+#include "pmu.h"
+#include "vdso.h"
+#include "strbuf.h"
+#include "build-id.h"
+#include "data.h"
static bool no_buildid_cache = false;
+static u32 header_argc;
+static const char **header_argv;
+
/*
- * Create new perf.data header attribute:
+ * magic2 = "PERFILE2"
+ * must be a numerical value to let the endianness
+ * determine the memory layout. That way we are able
+ * to detect endianness when reading the perf.data file
+ * back.
+ *
+ * we check for legacy (PERFFILE) format.
*/
-struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr)
-{
- struct perf_header_attr *self = malloc(sizeof(*self));
-
- if (self != NULL) {
- self->attr = *attr;
- self->ids = 0;
- self->size = 1;
- self->id = malloc(sizeof(u64));
- if (self->id == NULL) {
- free(self);
- self = NULL;
- }
- }
+static const char *__perf_magic1 = "PERFFILE";
+static const u64 __perf_magic2 = 0x32454c4946524550ULL;
+static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
- return self;
-}
+#define PERF_MAGIC __perf_magic2
-void perf_header_attr__delete(struct perf_header_attr *self)
-{
- free(self->id);
- free(self);
-}
+struct perf_file_attr {
+ struct perf_event_attr attr;
+ struct perf_file_section ids;
+};
-int perf_header_attr__add_id(struct perf_header_attr *self, u64 id)
+void perf_header__set_feat(struct perf_header *header, int feat)
{
- int pos = self->ids;
-
- self->ids++;
- if (self->ids > self->size) {
- int nsize = self->size * 2;
- u64 *nid = realloc(self->id, nsize * sizeof(u64));
-
- if (nid == NULL)
- return -1;
-
- self->size = nsize;
- self->id = nid;
- }
- self->id[pos] = id;
- return 0;
+ set_bit(feat, header->adds_features);
}
-int perf_header__init(struct perf_header *self)
+void perf_header__clear_feat(struct perf_header *header, int feat)
{
- self->size = 1;
- self->attr = malloc(sizeof(void *));
- return self->attr == NULL ? -ENOMEM : 0;
+ clear_bit(feat, header->adds_features);
}
-void perf_header__exit(struct perf_header *self)
+bool perf_header__has_feat(const struct perf_header *header, int feat)
{
- int i;
- for (i = 0; i < self->attrs; ++i)
- perf_header_attr__delete(self->attr[i]);
- free(self->attr);
+ return test_bit(feat, header->adds_features);
}
-int perf_header__add_attr(struct perf_header *self,
- struct perf_header_attr *attr)
+static int do_write(int fd, const void *buf, size_t size)
{
- if (self->frozen)
- return -1;
-
- if (self->attrs == self->size) {
- int nsize = self->size * 2;
- struct perf_header_attr **nattr;
+ while (size) {
+ int ret = write(fd, buf, size);
- nattr = realloc(self->attr, nsize * sizeof(void *));
- if (nattr == NULL)
- return -1;
+ if (ret < 0)
+ return -errno;
- self->size = nsize;
- self->attr = nattr;
+ size -= ret;
+ buf += ret;
}
- self->attr[self->attrs++] = attr;
return 0;
}
-static int event_count;
-static struct perf_trace_event_type *events;
+#define NAME_ALIGN 64
-int perf_header__push_event(u64 id, const char *name)
+static int write_padded(int fd, const void *bf, size_t count,
+ size_t count_aligned)
{
- if (strlen(name) > MAX_EVENT_NAME)
- pr_warning("Event %s will be truncated\n", name);
+ static const char zero_buf[NAME_ALIGN];
+ int err = do_write(fd, bf, count);
- if (!events) {
- events = malloc(sizeof(struct perf_trace_event_type));
- if (events == NULL)
- return -ENOMEM;
- } else {
- struct perf_trace_event_type *nevents;
+ if (!err)
+ err = do_write(fd, zero_buf, count_aligned - count);
- nevents = realloc(events, (event_count + 1) * sizeof(*events));
- if (nevents == NULL)
- return -ENOMEM;
- events = nevents;
- }
- memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
- events[event_count].event_id = id;
- strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
- event_count++;
- return 0;
+ return err;
}
-char *perf_header__find_event(u64 id)
+static int do_write_string(int fd, const char *str)
{
- int i;
- for (i = 0 ; i < event_count; i++) {
- if (events[i].event_id == id)
- return events[i].name;
- }
- return NULL;
-}
-
-static const char *__perf_magic = "PERFFILE";
+ u32 len, olen;
+ int ret;
-#define PERF_MAGIC (*(u64 *)__perf_magic)
+ olen = strlen(str) + 1;
+ len = PERF_ALIGN(olen, NAME_ALIGN);
-struct perf_file_attr {
- struct perf_event_attr attr;
- struct perf_file_section ids;
-};
+ /* write len, incl. \0 */
+ ret = do_write(fd, &len, sizeof(len));
+ if (ret < 0)
+ return ret;
-void perf_header__set_feat(struct perf_header *self, int feat)
-{
- set_bit(feat, self->adds_features);
+ return write_padded(fd, str, olen, len);
}
-bool perf_header__has_feat(const struct perf_header *self, int feat)
+static char *do_read_string(int fd, struct perf_header *ph)
{
- return test_bit(feat, self->adds_features);
-}
+ ssize_t sz, ret;
+ u32 len;
+ char *buf;
-static int do_write(int fd, const void *buf, size_t size)
-{
- while (size) {
- int ret = write(fd, buf, size);
+ sz = readn(fd, &len, sizeof(len));
+ if (sz < (ssize_t)sizeof(len))
+ return NULL;
- if (ret < 0)
- return -errno;
+ if (ph->needs_swap)
+ len = bswap_32(len);
- size -= ret;
- buf += ret;
+ buf = malloc(len);
+ if (!buf)
+ return NULL;
+
+ ret = readn(fd, buf, len);
+ if (ret == (ssize_t)len) {
+ /*
+ * strings are padded by zeroes
+ * thus the actual strlen of buf
+ * may be less than len
+ */
+ return buf;
}
- return 0;
+ free(buf);
+ return NULL;
}
-#define NAME_ALIGN 64
-
-static int write_padded(int fd, const void *bf, size_t count,
- size_t count_aligned)
+int
+perf_header__set_cmdline(int argc, const char **argv)
{
- static const char zero_buf[NAME_ALIGN];
- int err = do_write(fd, bf, count);
+ int i;
- if (!err)
- err = do_write(fd, zero_buf, count_aligned - count);
+ /*
+ * If header_argv has already been set, do not override it.
+ * This allows a command to set the cmdline, parse args and
+ * then call another builtin function that implements a
+ * command -- e.g, cmd_kvm calling cmd_record.
+ */
+ if (header_argv)
+ return 0;
- return err;
+ header_argc = (u32)argc;
+
+ /* do not include NULL termination */
+ header_argv = calloc(argc, sizeof(char *));
+ if (!header_argv)
+ return -ENOMEM;
+
+ /*
+ * must copy argv contents because it gets moved
+ * around during option parsing
+ */
+ for (i = 0; i < argc ; i++)
+ header_argv[i] = argv[i];
+
+ return 0;
}
#define dsos__for_each_with_build_id(pos, head) \
@@ -192,53 +177,81 @@ static int write_padded(int fd, const void *bf, size_t count,
continue; \
else
-static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
- u16 misc, int fd)
+static int write_buildid(const char *name, size_t name_len, u8 *build_id,
+ pid_t pid, u16 misc, int fd)
{
+ int err;
+ struct build_id_event b;
+ size_t len;
+
+ len = name_len + 1;
+ len = PERF_ALIGN(len, NAME_ALIGN);
+
+ memset(&b, 0, sizeof(b));
+ memcpy(&b.build_id, build_id, BUILD_ID_SIZE);
+ b.pid = pid;
+ b.header.misc = misc;
+ b.header.size = sizeof(b) + len;
+
+ err = do_write(fd, &b, sizeof(b));
+ if (err < 0)
+ return err;
+
+ return write_padded(fd, name, name_len + 1, len);
+}
+
+static int __dsos__write_buildid_table(struct list_head *head,
+ struct machine *machine,
+ pid_t pid, u16 misc, int fd)
+{
+ char nm[PATH_MAX];
struct dso *pos;
dsos__for_each_with_build_id(pos, head) {
int err;
- struct build_id_event b;
- size_t len;
+ const char *name;
+ size_t name_len;
if (!pos->hit)
continue;
- len = pos->long_name_len + 1;
- len = ALIGN(len, NAME_ALIGN);
- memset(&b, 0, sizeof(b));
- memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
- b.pid = pid;
- b.header.misc = misc;
- b.header.size = sizeof(b) + len;
- err = do_write(fd, &b, sizeof(b));
- if (err < 0)
- return err;
- err = write_padded(fd, pos->long_name,
- pos->long_name_len + 1, len);
- if (err < 0)
+
+ if (is_vdso_map(pos->short_name)) {
+ name = (char *) VDSO__MAP_NAME;
+ name_len = sizeof(VDSO__MAP_NAME) + 1;
+ } else if (dso__is_kcore(pos)) {
+ machine__mmap_name(machine, nm, sizeof(nm));
+ name = nm;
+ name_len = strlen(nm) + 1;
+ } else {
+ name = pos->long_name;
+ name_len = pos->long_name_len + 1;
+ }
+
+ err = write_buildid(name, name_len, pos->build_id,
+ pid, misc, fd);
+ if (err)
return err;
}
return 0;
}
-static int machine__write_buildid_table(struct machine *self, int fd)
+static int machine__write_buildid_table(struct machine *machine, int fd)
{
int err;
u16 kmisc = PERF_RECORD_MISC_KERNEL,
umisc = PERF_RECORD_MISC_USER;
- if (!machine__is_host(self)) {
+ if (!machine__is_host(machine)) {
kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
umisc = PERF_RECORD_MISC_GUEST_USER;
}
- err = __dsos__write_buildid_table(&self->kernel_dsos, self->pid,
- kmisc, fd);
+ err = __dsos__write_buildid_table(&machine->kernel_dsos, machine,
+ machine->pid, kmisc, fd);
if (err == 0)
- err = __dsos__write_buildid_table(&self->user_dsos,
- self->pid, umisc, fd);
+ err = __dsos__write_buildid_table(&machine->user_dsos, machine,
+ machine->pid, umisc, fd);
return err;
}
@@ -247,12 +260,12 @@ static int dsos__write_buildid_table(struct perf_header *header, int fd)
struct perf_session *session = container_of(header,
struct perf_session, header);
struct rb_node *nd;
- int err = machine__write_buildid_table(&session->host_machine, fd);
+ int err = machine__write_buildid_table(&session->machines.host, fd);
if (err)
return err;
- for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
err = machine__write_buildid_table(pos, fd);
if (err)
@@ -262,32 +275,44 @@ static int dsos__write_buildid_table(struct perf_header *header, int fd)
}
int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
- const char *name, bool is_kallsyms)
+ const char *name, bool is_kallsyms, bool is_vdso)
{
const size_t size = PATH_MAX;
- char *filename = malloc(size),
- *linkname = malloc(size), *targetname;
+ char *realname, *filename = zalloc(size),
+ *linkname = zalloc(size), *targetname;
int len, err = -1;
+ bool slash = is_kallsyms || is_vdso;
- if (filename == NULL || linkname == NULL)
+ if (is_kallsyms) {
+ if (symbol_conf.kptr_restrict) {
+ pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
+ err = 0;
+ goto out_free;
+ }
+ realname = (char *) name;
+ } else
+ realname = realpath(name, NULL);
+
+ if (realname == NULL || filename == NULL || linkname == NULL)
goto out_free;
- len = snprintf(filename, size, "%s%s%s",
- debugdir, is_kallsyms ? "/" : "", name);
+ len = scnprintf(filename, size, "%s%s%s",
+ debugdir, slash ? "/" : "",
+ is_vdso ? VDSO__MAP_NAME : realname);
if (mkdir_p(filename, 0755))
goto out_free;
- snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
+ snprintf(filename + len, size - len, "/%s", sbuild_id);
if (access(filename, F_OK)) {
if (is_kallsyms) {
if (copyfile("/proc/kallsyms", filename))
goto out_free;
- } else if (link(name, filename) && copyfile(name, filename))
+ } else if (link(realname, filename) && copyfile(name, filename))
goto out_free;
}
- len = snprintf(linkname, size, "%s/.build-id/%.2s",
+ len = scnprintf(linkname, size, "%s/.build-id/%.2s",
debugdir, sbuild_id);
if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
@@ -300,6 +325,8 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
if (symlink(targetname, linkname) == 0)
err = 0;
out_free:
+ if (!is_kallsyms)
+ free(realname);
free(filename);
free(linkname);
return err;
@@ -307,20 +334,21 @@ out_free:
static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
const char *name, const char *debugdir,
- bool is_kallsyms)
+ bool is_kallsyms, bool is_vdso)
{
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
build_id__sprintf(build_id, build_id_size, sbuild_id);
- return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
+ return build_id_cache__add_s(sbuild_id, debugdir, name,
+ is_kallsyms, is_vdso);
}
int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
{
const size_t size = PATH_MAX;
- char *filename = malloc(size),
- *linkname = malloc(size);
+ char *filename = zalloc(size),
+ *linkname = zalloc(size);
int err = -1;
if (filename == NULL || linkname == NULL)
@@ -332,7 +360,7 @@ int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
if (access(linkname, F_OK))
goto out_free;
- if (readlink(linkname, filename, size) < 0)
+ if (readlink(linkname, filename, size - 1) < 0)
goto out_free;
if (unlink(linkname))
@@ -354,34 +382,45 @@ out_free:
return err;
}
-static int dso__cache_build_id(struct dso *self, const char *debugdir)
+static int dso__cache_build_id(struct dso *dso, struct machine *machine,
+ const char *debugdir)
{
- bool is_kallsyms = self->kernel && self->long_name[0] != '/';
-
- return build_id_cache__add_b(self->build_id, sizeof(self->build_id),
- self->long_name, debugdir, is_kallsyms);
+ bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
+ bool is_vdso = is_vdso_map(dso->short_name);
+ const char *name = dso->long_name;
+ char nm[PATH_MAX];
+
+ if (dso__is_kcore(dso)) {
+ is_kallsyms = true;
+ machine__mmap_name(machine, nm, sizeof(nm));
+ name = nm;
+ }
+ return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name,
+ debugdir, is_kallsyms, is_vdso);
}
-static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
+static int __dsos__cache_build_ids(struct list_head *head,
+ struct machine *machine, const char *debugdir)
{
struct dso *pos;
int err = 0;
dsos__for_each_with_build_id(pos, head)
- if (dso__cache_build_id(pos, debugdir))
+ if (dso__cache_build_id(pos, machine, debugdir))
err = -1;
return err;
}
-static int machine__cache_build_ids(struct machine *self, const char *debugdir)
+static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
{
- int ret = __dsos__cache_build_ids(&self->kernel_dsos, debugdir);
- ret |= __dsos__cache_build_ids(&self->user_dsos, debugdir);
+ int ret = __dsos__cache_build_ids(&machine->kernel_dsos, machine,
+ debugdir);
+ ret |= __dsos__cache_build_ids(&machine->user_dsos, machine, debugdir);
return ret;
}
-static int perf_session__cache_build_ids(struct perf_session *self)
+static int perf_session__cache_build_ids(struct perf_session *session)
{
struct rb_node *nd;
int ret;
@@ -392,28 +431,28 @@ static int perf_session__cache_build_ids(struct perf_session *self)
if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
return -1;
- ret = machine__cache_build_ids(&self->host_machine, debugdir);
+ ret = machine__cache_build_ids(&session->machines.host, debugdir);
- for (nd = rb_first(&self->machines); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret |= machine__cache_build_ids(pos, debugdir);
}
return ret ? -1 : 0;
}
-static bool machine__read_build_ids(struct machine *self, bool with_hits)
+static bool machine__read_build_ids(struct machine *machine, bool with_hits)
{
- bool ret = __dsos__read_build_ids(&self->kernel_dsos, with_hits);
- ret |= __dsos__read_build_ids(&self->user_dsos, with_hits);
+ bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
+ ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
return ret;
}
-static bool perf_session__read_build_ids(struct perf_session *self, bool with_hits)
+static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
{
struct rb_node *nd;
- bool ret = machine__read_build_ids(&self->host_machine, with_hits);
+ bool ret = machine__read_build_ids(&session->machines.host, with_hits);
- for (nd = rb_first(&self->machines); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret |= machine__read_build_ids(pos, with_hits);
}
@@ -421,280 +460,1046 @@ static bool perf_session__read_build_ids(struct perf_session *self, bool with_hi
return ret;
}
-static int perf_header__adds_write(struct perf_header *self, int fd)
+static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist)
+{
+ return read_tracing_data(fd, &evlist->entries);
+}
+
+
+static int write_build_id(int fd, struct perf_header *h,
+ struct perf_evlist *evlist __maybe_unused)
{
- int nr_sections;
struct perf_session *session;
- struct perf_file_section *feat_sec;
- int sec_size;
- u64 sec_start;
- int idx = 0, err;
+ int err;
- session = container_of(self, struct perf_session, header);
- if (perf_session__read_build_ids(session, true))
- perf_header__set_feat(self, HEADER_BUILD_ID);
+ session = container_of(h, struct perf_session, header);
- nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS);
- if (!nr_sections)
- return 0;
+ if (!perf_session__read_build_ids(session, true))
+ return -1;
- feat_sec = calloc(sizeof(*feat_sec), nr_sections);
- if (feat_sec == NULL)
- return -ENOMEM;
+ err = dsos__write_buildid_table(h, fd);
+ if (err < 0) {
+ pr_debug("failed to write buildid table\n");
+ return err;
+ }
+ if (!no_buildid_cache)
+ perf_session__cache_build_ids(session);
- sec_size = sizeof(*feat_sec) * nr_sections;
+ return 0;
+}
- sec_start = self->data_offset + self->data_size;
- lseek(fd, sec_start + sec_size, SEEK_SET);
+static int write_hostname(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct utsname uts;
+ int ret;
- if (perf_header__has_feat(self, HEADER_TRACE_INFO)) {
- struct perf_file_section *trace_sec;
+ ret = uname(&uts);
+ if (ret < 0)
+ return -1;
- trace_sec = &feat_sec[idx++];
+ return do_write_string(fd, uts.nodename);
+}
- /* Write trace info */
- trace_sec->offset = lseek(fd, 0, SEEK_CUR);
- read_tracing_data(fd, attrs, nr_counters);
- trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset;
- }
+static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct utsname uts;
+ int ret;
- if (perf_header__has_feat(self, HEADER_BUILD_ID)) {
- struct perf_file_section *buildid_sec;
+ ret = uname(&uts);
+ if (ret < 0)
+ return -1;
- buildid_sec = &feat_sec[idx++];
+ return do_write_string(fd, uts.release);
+}
- /* Write build-ids */
- buildid_sec->offset = lseek(fd, 0, SEEK_CUR);
- err = dsos__write_buildid_table(self, fd);
- if (err < 0) {
- pr_debug("failed to write buildid table\n");
- goto out_free;
+static int write_arch(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct utsname uts;
+ int ret;
+
+ ret = uname(&uts);
+ if (ret < 0)
+ return -1;
+
+ return do_write_string(fd, uts.machine);
+}
+
+static int write_version(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ return do_write_string(fd, perf_version_string);
+}
+
+static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+#ifndef CPUINFO_PROC
+#define CPUINFO_PROC NULL
+#endif
+ FILE *file;
+ char *buf = NULL;
+ char *s, *p;
+ const char *search = CPUINFO_PROC;
+ size_t len = 0;
+ int ret = -1;
+
+ if (!search)
+ return -1;
+
+ file = fopen("/proc/cpuinfo", "r");
+ if (!file)
+ return -1;
+
+ while (getline(&buf, &len, file) > 0) {
+ ret = strncmp(buf, search, strlen(search));
+ if (!ret)
+ break;
+ }
+
+ if (ret)
+ goto done;
+
+ s = buf;
+
+ p = strchr(buf, ':');
+ if (p && *(p+1) == ' ' && *(p+2))
+ s = p + 2;
+ p = strchr(s, '\n');
+ if (p)
+ *p = '\0';
+
+ /* squash extra space characters (branding string) */
+ p = s;
+ while (*p) {
+ if (isspace(*p)) {
+ char *r = p + 1;
+ char *q = r;
+ *p = ' ';
+ while (*q && isspace(*q))
+ q++;
+ if (q != (p+1))
+ while ((*r++ = *q++));
}
- buildid_sec->size = lseek(fd, 0, SEEK_CUR) -
- buildid_sec->offset;
- if (!no_buildid_cache)
- perf_session__cache_build_ids(session);
+ p++;
}
+ ret = do_write_string(fd, s);
+done:
+ free(buf);
+ fclose(file);
+ return ret;
+}
- lseek(fd, sec_start, SEEK_SET);
- err = do_write(fd, feat_sec, sec_size);
- if (err < 0)
- pr_debug("failed to write feature section\n");
-out_free:
- free(feat_sec);
- return err;
+static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ long nr;
+ u32 nrc, nra;
+ int ret;
+
+ nr = sysconf(_SC_NPROCESSORS_CONF);
+ if (nr < 0)
+ return -1;
+
+ nrc = (u32)(nr & UINT_MAX);
+
+ nr = sysconf(_SC_NPROCESSORS_ONLN);
+ if (nr < 0)
+ return -1;
+
+ nra = (u32)(nr & UINT_MAX);
+
+ ret = do_write(fd, &nrc, sizeof(nrc));
+ if (ret < 0)
+ return ret;
+
+ return do_write(fd, &nra, sizeof(nra));
}
-int perf_header__write_pipe(int fd)
+static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist)
{
- struct perf_pipe_file_header f_header;
- int err;
+ struct perf_evsel *evsel;
+ u32 nre, nri, sz;
+ int ret;
- f_header = (struct perf_pipe_file_header){
- .magic = PERF_MAGIC,
- .size = sizeof(f_header),
- };
+ nre = evlist->nr_entries;
- err = do_write(fd, &f_header, sizeof(f_header));
- if (err < 0) {
- pr_debug("failed to write perf pipe header\n");
- return err;
+ /*
+ * write number of events
+ */
+ ret = do_write(fd, &nre, sizeof(nre));
+ if (ret < 0)
+ return ret;
+
+ /*
+ * size of perf_event_attr struct
+ */
+ sz = (u32)sizeof(evsel->attr);
+ ret = do_write(fd, &sz, sizeof(sz));
+ if (ret < 0)
+ return ret;
+
+ evlist__for_each(evlist, evsel) {
+ ret = do_write(fd, &evsel->attr, sz);
+ if (ret < 0)
+ return ret;
+ /*
+ * write number of unique id per event
+ * there is one id per instance of an event
+ *
+ * copy into an nri to be independent of the
+ * type of ids,
+ */
+ nri = evsel->ids;
+ ret = do_write(fd, &nri, sizeof(nri));
+ if (ret < 0)
+ return ret;
+
+ /*
+ * write event string as passed on cmdline
+ */
+ ret = do_write_string(fd, perf_evsel__name(evsel));
+ if (ret < 0)
+ return ret;
+ /*
+ * write unique ids for this event
+ */
+ ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
+ if (ret < 0)
+ return ret;
}
+ return 0;
+}
+
+static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ char buf[MAXPATHLEN];
+ char proc[32];
+ u32 i, n;
+ int ret;
+
+ /*
+ * actual atual path to perf binary
+ */
+ sprintf(proc, "/proc/%d/exe", getpid());
+ ret = readlink(proc, buf, sizeof(buf));
+ if (ret <= 0)
+ return -1;
+ /* readlink() does not add null termination */
+ buf[ret] = '\0';
+
+ /* account for binary path */
+ n = header_argc + 1;
+
+ ret = do_write(fd, &n, sizeof(n));
+ if (ret < 0)
+ return ret;
+
+ ret = do_write_string(fd, buf);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0 ; i < header_argc; i++) {
+ ret = do_write_string(fd, header_argv[i]);
+ if (ret < 0)
+ return ret;
+ }
return 0;
}
-int perf_header__write(struct perf_header *self, int fd, bool at_exit)
+#define CORE_SIB_FMT \
+ "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
+#define THRD_SIB_FMT \
+ "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
+
+struct cpu_topo {
+ u32 core_sib;
+ u32 thread_sib;
+ char **core_siblings;
+ char **thread_siblings;
+};
+
+static int build_cpu_topo(struct cpu_topo *tp, int cpu)
{
- struct perf_file_header f_header;
- struct perf_file_attr f_attr;
- struct perf_header_attr *attr;
- int i, err;
+ FILE *fp;
+ char filename[MAXPATHLEN];
+ char *buf = NULL, *p;
+ size_t len = 0;
+ ssize_t sret;
+ u32 i = 0;
+ int ret = -1;
+
+ sprintf(filename, CORE_SIB_FMT, cpu);
+ fp = fopen(filename, "r");
+ if (!fp)
+ goto try_threads;
+
+ sret = getline(&buf, &len, fp);
+ fclose(fp);
+ if (sret <= 0)
+ goto try_threads;
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ for (i = 0; i < tp->core_sib; i++) {
+ if (!strcmp(buf, tp->core_siblings[i]))
+ break;
+ }
+ if (i == tp->core_sib) {
+ tp->core_siblings[i] = buf;
+ tp->core_sib++;
+ buf = NULL;
+ len = 0;
+ }
+ ret = 0;
- lseek(fd, sizeof(f_header), SEEK_SET);
+try_threads:
+ sprintf(filename, THRD_SIB_FMT, cpu);
+ fp = fopen(filename, "r");
+ if (!fp)
+ goto done;
- for (i = 0; i < self->attrs; i++) {
- attr = self->attr[i];
+ if (getline(&buf, &len, fp) <= 0)
+ goto done;
- attr->id_offset = lseek(fd, 0, SEEK_CUR);
- err = do_write(fd, attr->id, attr->ids * sizeof(u64));
- if (err < 0) {
- pr_debug("failed to write perf header\n");
- return err;
- }
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ for (i = 0; i < tp->thread_sib; i++) {
+ if (!strcmp(buf, tp->thread_siblings[i]))
+ break;
+ }
+ if (i == tp->thread_sib) {
+ tp->thread_siblings[i] = buf;
+ tp->thread_sib++;
+ buf = NULL;
}
+ ret = 0;
+done:
+ if(fp)
+ fclose(fp);
+ free(buf);
+ return ret;
+}
+static void free_cpu_topo(struct cpu_topo *tp)
+{
+ u32 i;
- self->attr_offset = lseek(fd, 0, SEEK_CUR);
+ if (!tp)
+ return;
- for (i = 0; i < self->attrs; i++) {
- attr = self->attr[i];
+ for (i = 0 ; i < tp->core_sib; i++)
+ zfree(&tp->core_siblings[i]);
- f_attr = (struct perf_file_attr){
- .attr = attr->attr,
- .ids = {
- .offset = attr->id_offset,
- .size = attr->ids * sizeof(u64),
- }
- };
- err = do_write(fd, &f_attr, sizeof(f_attr));
- if (err < 0) {
- pr_debug("failed to write perf header attribute\n");
- return err;
- }
+ for (i = 0 ; i < tp->thread_sib; i++)
+ zfree(&tp->thread_siblings[i]);
+
+ free(tp);
+}
+
+static struct cpu_topo *build_cpu_topology(void)
+{
+ struct cpu_topo *tp;
+ void *addr;
+ u32 nr, i;
+ size_t sz;
+ long ncpus;
+ int ret = -1;
+
+ ncpus = sysconf(_SC_NPROCESSORS_CONF);
+ if (ncpus < 0)
+ return NULL;
+
+ nr = (u32)(ncpus & UINT_MAX);
+
+ sz = nr * sizeof(char *);
+
+ addr = calloc(1, sizeof(*tp) + 2 * sz);
+ if (!addr)
+ return NULL;
+
+ tp = addr;
+
+ addr += sizeof(*tp);
+ tp->core_siblings = addr;
+ addr += sz;
+ tp->thread_siblings = addr;
+
+ for (i = 0; i < nr; i++) {
+ ret = build_cpu_topo(tp, i);
+ if (ret < 0)
+ break;
}
+ if (ret) {
+ free_cpu_topo(tp);
+ tp = NULL;
+ }
+ return tp;
+}
- self->event_offset = lseek(fd, 0, SEEK_CUR);
- self->event_size = event_count * sizeof(struct perf_trace_event_type);
- if (events) {
- err = do_write(fd, events, self->event_size);
- if (err < 0) {
- pr_debug("failed to write perf header events\n");
- return err;
- }
+static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct cpu_topo *tp;
+ u32 i;
+ int ret;
+
+ tp = build_cpu_topology();
+ if (!tp)
+ return -1;
+
+ ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
+ if (ret < 0)
+ goto done;
+
+ for (i = 0; i < tp->core_sib; i++) {
+ ret = do_write_string(fd, tp->core_siblings[i]);
+ if (ret < 0)
+ goto done;
}
+ ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
+ if (ret < 0)
+ goto done;
- self->data_offset = lseek(fd, 0, SEEK_CUR);
+ for (i = 0; i < tp->thread_sib; i++) {
+ ret = do_write_string(fd, tp->thread_siblings[i]);
+ if (ret < 0)
+ break;
+ }
+done:
+ free_cpu_topo(tp);
+ return ret;
+}
- if (at_exit) {
- err = perf_header__adds_write(self, fd);
- if (err < 0)
- return err;
+
+
+static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ char *buf = NULL;
+ FILE *fp;
+ size_t len = 0;
+ int ret = -1, n;
+ uint64_t mem;
+
+ fp = fopen("/proc/meminfo", "r");
+ if (!fp)
+ return -1;
+
+ while (getline(&buf, &len, fp) > 0) {
+ ret = strncmp(buf, "MemTotal:", 9);
+ if (!ret)
+ break;
+ }
+ if (!ret) {
+ n = sscanf(buf, "%*s %"PRIu64, &mem);
+ if (n == 1)
+ ret = do_write(fd, &mem, sizeof(mem));
}
+ free(buf);
+ fclose(fp);
+ return ret;
+}
- f_header = (struct perf_file_header){
- .magic = PERF_MAGIC,
- .size = sizeof(f_header),
- .attr_size = sizeof(f_attr),
- .attrs = {
- .offset = self->attr_offset,
- .size = self->attrs * sizeof(f_attr),
- },
- .data = {
- .offset = self->data_offset,
- .size = self->data_size,
- },
- .event_types = {
- .offset = self->event_offset,
- .size = self->event_size,
- },
- };
+static int write_topo_node(int fd, int node)
+{
+ char str[MAXPATHLEN];
+ char field[32];
+ char *buf = NULL, *p;
+ size_t len = 0;
+ FILE *fp;
+ u64 mem_total, mem_free, mem;
+ int ret = -1;
+
+ sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
+ fp = fopen(str, "r");
+ if (!fp)
+ return -1;
- memcpy(&f_header.adds_features, &self->adds_features, sizeof(self->adds_features));
+ while (getline(&buf, &len, fp) > 0) {
+ /* skip over invalid lines */
+ if (!strchr(buf, ':'))
+ continue;
+ if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
+ goto done;
+ if (!strcmp(field, "MemTotal:"))
+ mem_total = mem;
+ if (!strcmp(field, "MemFree:"))
+ mem_free = mem;
+ }
- lseek(fd, 0, SEEK_SET);
- err = do_write(fd, &f_header, sizeof(f_header));
- if (err < 0) {
- pr_debug("failed to write perf header\n");
- return err;
+ fclose(fp);
+ fp = NULL;
+
+ ret = do_write(fd, &mem_total, sizeof(u64));
+ if (ret)
+ goto done;
+
+ ret = do_write(fd, &mem_free, sizeof(u64));
+ if (ret)
+ goto done;
+
+ ret = -1;
+ sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
+
+ fp = fopen(str, "r");
+ if (!fp)
+ goto done;
+
+ if (getline(&buf, &len, fp) <= 0)
+ goto done;
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ ret = do_write_string(fd, buf);
+done:
+ free(buf);
+ if (fp)
+ fclose(fp);
+ return ret;
+}
+
+static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ char *buf = NULL;
+ size_t len = 0;
+ FILE *fp;
+ struct cpu_map *node_map = NULL;
+ char *c;
+ u32 nr, i, j;
+ int ret = -1;
+
+ fp = fopen("/sys/devices/system/node/online", "r");
+ if (!fp)
+ return -1;
+
+ if (getline(&buf, &len, fp) <= 0)
+ goto done;
+
+ c = strchr(buf, '\n');
+ if (c)
+ *c = '\0';
+
+ node_map = cpu_map__new(buf);
+ if (!node_map)
+ goto done;
+
+ nr = (u32)node_map->nr;
+
+ ret = do_write(fd, &nr, sizeof(nr));
+ if (ret < 0)
+ goto done;
+
+ for (i = 0; i < nr; i++) {
+ j = (u32)node_map->map[i];
+ ret = do_write(fd, &j, sizeof(j));
+ if (ret < 0)
+ break;
+
+ ret = write_topo_node(fd, i);
+ if (ret < 0)
+ break;
+ }
+done:
+ free(buf);
+ fclose(fp);
+ free(node_map);
+ return ret;
+}
+
+/*
+ * File format:
+ *
+ * struct pmu_mappings {
+ * u32 pmu_num;
+ * struct pmu_map {
+ * u32 type;
+ * char name[];
+ * }[pmu_num];
+ * };
+ */
+
+static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct perf_pmu *pmu = NULL;
+ off_t offset = lseek(fd, 0, SEEK_CUR);
+ __u32 pmu_num = 0;
+ int ret;
+
+ /* write real pmu_num later */
+ ret = do_write(fd, &pmu_num, sizeof(pmu_num));
+ if (ret < 0)
+ return ret;
+
+ while ((pmu = perf_pmu__scan(pmu))) {
+ if (!pmu->name)
+ continue;
+ pmu_num++;
+
+ ret = do_write(fd, &pmu->type, sizeof(pmu->type));
+ if (ret < 0)
+ return ret;
+
+ ret = do_write_string(fd, pmu->name);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
+ /* discard all */
+ lseek(fd, offset, SEEK_SET);
+ return -1;
}
- lseek(fd, self->data_offset + self->data_size, SEEK_SET);
- self->frozen = 1;
return 0;
}
-static int perf_header__getbuffer64(struct perf_header *self,
- int fd, void *buf, size_t size)
+/*
+ * File format:
+ *
+ * struct group_descs {
+ * u32 nr_groups;
+ * struct group_desc {
+ * char name[];
+ * u32 leader_idx;
+ * u32 nr_members;
+ * }[nr_groups];
+ * };
+ */
+static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist)
{
- if (do_read(fd, buf, size) <= 0)
- return -1;
+ u32 nr_groups = evlist->nr_groups;
+ struct perf_evsel *evsel;
+ int ret;
- if (self->needs_swap)
- mem_bswap_64(buf, size);
+ ret = do_write(fd, &nr_groups, sizeof(nr_groups));
+ if (ret < 0)
+ return ret;
+
+ evlist__for_each(evlist, evsel) {
+ if (perf_evsel__is_group_leader(evsel) &&
+ evsel->nr_members > 1) {
+ const char *name = evsel->group_name ?: "{anon_group}";
+ u32 leader_idx = evsel->idx;
+ u32 nr_members = evsel->nr_members;
+
+ ret = do_write_string(fd, name);
+ if (ret < 0)
+ return ret;
+ ret = do_write(fd, &leader_idx, sizeof(leader_idx));
+ if (ret < 0)
+ return ret;
+
+ ret = do_write(fd, &nr_members, sizeof(nr_members));
+ if (ret < 0)
+ return ret;
+ }
+ }
return 0;
}
-int perf_header__process_sections(struct perf_header *self, int fd,
- int (*process)(struct perf_file_section *self,
- struct perf_header *ph,
- int feat, int fd))
+/*
+ * default get_cpuid(): nothing gets recorded
+ * actual implementation must be in arch/$(ARCH)/util/header.c
+ */
+int __attribute__ ((weak)) get_cpuid(char *buffer __maybe_unused,
+ size_t sz __maybe_unused)
{
- struct perf_file_section *feat_sec;
- int nr_sections;
- int sec_size;
- int idx = 0;
- int err = -1, feat = 1;
+ return -1;
+}
- nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS);
- if (!nr_sections)
- return 0;
+static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ char buffer[64];
+ int ret;
- feat_sec = calloc(sizeof(*feat_sec), nr_sections);
- if (!feat_sec)
- return -1;
+ ret = get_cpuid(buffer, sizeof(buffer));
+ if (!ret)
+ goto write_it;
- sec_size = sizeof(*feat_sec) * nr_sections;
+ return -1;
+write_it:
+ return do_write_string(fd, buffer);
+}
- lseek(fd, self->data_offset + self->data_size, SEEK_SET);
+static int write_branch_stack(int fd __maybe_unused,
+ struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ return 0;
+}
- if (perf_header__getbuffer64(self, fd, feat_sec, sec_size))
- goto out_free;
+static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ fprintf(fp, "# hostname : %s\n", ph->env.hostname);
+}
- err = 0;
- while (idx < nr_sections && feat < HEADER_LAST_FEATURE) {
- if (perf_header__has_feat(self, feat)) {
- struct perf_file_section *sec = &feat_sec[idx++];
+static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ fprintf(fp, "# os release : %s\n", ph->env.os_release);
+}
+
+static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
+{
+ fprintf(fp, "# arch : %s\n", ph->env.arch);
+}
+
+static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
+}
- err = process(sec, self, feat, fd);
- if (err < 0)
- break;
+static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
+ fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
+}
+
+static void print_version(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ fprintf(fp, "# perf version : %s\n", ph->env.version);
+}
+
+static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ int nr, i;
+ char *str;
+
+ nr = ph->env.nr_cmdline;
+ str = ph->env.cmdline;
+
+ fprintf(fp, "# cmdline : ");
+
+ for (i = 0; i < nr; i++) {
+ fprintf(fp, "%s ", str);
+ str += strlen(str) + 1;
+ }
+ fputc('\n', fp);
+}
+
+static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ int nr, i;
+ char *str;
+
+ nr = ph->env.nr_sibling_cores;
+ str = ph->env.sibling_cores;
+
+ for (i = 0; i < nr; i++) {
+ fprintf(fp, "# sibling cores : %s\n", str);
+ str += strlen(str) + 1;
+ }
+
+ nr = ph->env.nr_sibling_threads;
+ str = ph->env.sibling_threads;
+
+ for (i = 0; i < nr; i++) {
+ fprintf(fp, "# sibling threads : %s\n", str);
+ str += strlen(str) + 1;
+ }
+}
+
+static void free_event_desc(struct perf_evsel *events)
+{
+ struct perf_evsel *evsel;
+
+ if (!events)
+ return;
+
+ for (evsel = events; evsel->attr.size; evsel++) {
+ zfree(&evsel->name);
+ zfree(&evsel->id);
+ }
+
+ free(events);
+}
+
+static struct perf_evsel *
+read_event_desc(struct perf_header *ph, int fd)
+{
+ struct perf_evsel *evsel, *events = NULL;
+ u64 *id;
+ void *buf = NULL;
+ u32 nre, sz, nr, i, j;
+ ssize_t ret;
+ size_t msz;
+
+ /* number of events */
+ ret = readn(fd, &nre, sizeof(nre));
+ if (ret != (ssize_t)sizeof(nre))
+ goto error;
+
+ if (ph->needs_swap)
+ nre = bswap_32(nre);
+
+ ret = readn(fd, &sz, sizeof(sz));
+ if (ret != (ssize_t)sizeof(sz))
+ goto error;
+
+ if (ph->needs_swap)
+ sz = bswap_32(sz);
+
+ /* buffer to hold on file attr struct */
+ buf = malloc(sz);
+ if (!buf)
+ goto error;
+
+ /* the last event terminates with evsel->attr.size == 0: */
+ events = calloc(nre + 1, sizeof(*events));
+ if (!events)
+ goto error;
+
+ msz = sizeof(evsel->attr);
+ if (sz < msz)
+ msz = sz;
+
+ for (i = 0, evsel = events; i < nre; evsel++, i++) {
+ evsel->idx = i;
+
+ /*
+ * must read entire on-file attr struct to
+ * sync up with layout.
+ */
+ ret = readn(fd, buf, sz);
+ if (ret != (ssize_t)sz)
+ goto error;
+
+ if (ph->needs_swap)
+ perf_event__attr_swap(buf);
+
+ memcpy(&evsel->attr, buf, msz);
+
+ ret = readn(fd, &nr, sizeof(nr));
+ if (ret != (ssize_t)sizeof(nr))
+ goto error;
+
+ if (ph->needs_swap) {
+ nr = bswap_32(nr);
+ evsel->needs_swap = true;
+ }
+
+ evsel->name = do_read_string(fd, ph);
+
+ if (!nr)
+ continue;
+
+ id = calloc(nr, sizeof(*id));
+ if (!id)
+ goto error;
+ evsel->ids = nr;
+ evsel->id = id;
+
+ for (j = 0 ; j < nr; j++) {
+ ret = readn(fd, id, sizeof(*id));
+ if (ret != (ssize_t)sizeof(*id))
+ goto error;
+ if (ph->needs_swap)
+ *id = bswap_64(*id);
+ id++;
}
- ++feat;
}
-out_free:
- free(feat_sec);
- return err;
+out:
+ free(buf);
+ return events;
+error:
+ if (events)
+ free_event_desc(events);
+ events = NULL;
+ goto out;
}
-int perf_file_header__read(struct perf_file_header *self,
- struct perf_header *ph, int fd)
+static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
{
- lseek(fd, 0, SEEK_SET);
+ struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
+ u32 j;
+ u64 *id;
- if (do_read(fd, self, sizeof(*self)) <= 0 ||
- memcmp(&self->magic, __perf_magic, sizeof(self->magic)))
- return -1;
+ if (!events) {
+ fprintf(fp, "# event desc: not available or unable to read\n");
+ return;
+ }
+
+ for (evsel = events; evsel->attr.size; evsel++) {
+ fprintf(fp, "# event : name = %s, ", evsel->name);
+
+ fprintf(fp, "type = %d, config = 0x%"PRIx64
+ ", config1 = 0x%"PRIx64", config2 = 0x%"PRIx64,
+ evsel->attr.type,
+ (u64)evsel->attr.config,
+ (u64)evsel->attr.config1,
+ (u64)evsel->attr.config2);
+
+ fprintf(fp, ", excl_usr = %d, excl_kern = %d",
+ evsel->attr.exclude_user,
+ evsel->attr.exclude_kernel);
+
+ fprintf(fp, ", excl_host = %d, excl_guest = %d",
+ evsel->attr.exclude_host,
+ evsel->attr.exclude_guest);
+
+ fprintf(fp, ", precise_ip = %d", evsel->attr.precise_ip);
+
+ fprintf(fp, ", attr_mmap2 = %d", evsel->attr.mmap2);
+ fprintf(fp, ", attr_mmap = %d", evsel->attr.mmap);
+ fprintf(fp, ", attr_mmap_data = %d", evsel->attr.mmap_data);
+ if (evsel->ids) {
+ fprintf(fp, ", id = {");
+ for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
+ if (j)
+ fputc(',', fp);
+ fprintf(fp, " %"PRIu64, *id);
+ }
+ fprintf(fp, " }");
+ }
- if (self->attr_size != sizeof(struct perf_file_attr)) {
- u64 attr_size = bswap_64(self->attr_size);
+ fputc('\n', fp);
+ }
- if (attr_size != sizeof(struct perf_file_attr))
- return -1;
+ free_event_desc(events);
+}
+
+static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
+}
- mem_bswap_64(self, offsetof(struct perf_file_header,
- adds_features));
- ph->needs_swap = true;
+static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ u32 nr, c, i;
+ char *str, *tmp;
+ uint64_t mem_total, mem_free;
+
+ /* nr nodes */
+ nr = ph->env.nr_numa_nodes;
+ str = ph->env.numa_nodes;
+
+ for (i = 0; i < nr; i++) {
+ /* node number */
+ c = strtoul(str, &tmp, 0);
+ if (*tmp != ':')
+ goto error;
+
+ str = tmp + 1;
+ mem_total = strtoull(str, &tmp, 0);
+ if (*tmp != ':')
+ goto error;
+
+ str = tmp + 1;
+ mem_free = strtoull(str, &tmp, 0);
+ if (*tmp != ':')
+ goto error;
+
+ fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
+ " free = %"PRIu64" kB\n",
+ c, mem_total, mem_free);
+
+ str = tmp + 1;
+ fprintf(fp, "# node%u cpu list : %s\n", c, str);
+
+ str += strlen(str) + 1;
}
+ return;
+error:
+ fprintf(fp, "# numa topology : not available\n");
+}
- if (self->size != sizeof(*self)) {
- /* Support the previous format */
- if (self->size == offsetof(typeof(*self), adds_features))
- bitmap_zero(self->adds_features, HEADER_FEAT_BITS);
- else
- return -1;
+static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
+{
+ fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
+}
+
+static void print_branch_stack(struct perf_header *ph __maybe_unused,
+ int fd __maybe_unused, FILE *fp)
+{
+ fprintf(fp, "# contains samples with branch stack\n");
+}
+
+static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ const char *delimiter = "# pmu mappings: ";
+ char *str, *tmp;
+ u32 pmu_num;
+ u32 type;
+
+ pmu_num = ph->env.nr_pmu_mappings;
+ if (!pmu_num) {
+ fprintf(fp, "# pmu mappings: not available\n");
+ return;
}
- memcpy(&ph->adds_features, &self->adds_features,
- sizeof(ph->adds_features));
- /*
- * FIXME: hack that assumes that if we need swap the perf.data file
- * may be coming from an arch with a different word-size, ergo different
- * DEFINE_BITMAP format, investigate more later, but for now its mostly
- * safe to assume that we have a build-id section. Trace files probably
- * have several other issues in this realm anyway...
- */
- if (ph->needs_swap) {
- memset(&ph->adds_features, 0, sizeof(ph->adds_features));
- perf_header__set_feat(ph, HEADER_BUILD_ID);
+ str = ph->env.pmu_mappings;
+
+ while (pmu_num) {
+ type = strtoul(str, &tmp, 0);
+ if (*tmp != ':')
+ goto error;
+
+ str = tmp + 1;
+ fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
+
+ delimiter = ", ";
+ str += strlen(str) + 1;
+ pmu_num--;
}
- ph->event_offset = self->event_types.offset;
- ph->event_size = self->event_types.size;
- ph->data_offset = self->data.offset;
- ph->data_size = self->data.size;
- return 0;
+ fprintf(fp, "\n");
+
+ if (!pmu_num)
+ return;
+error:
+ fprintf(fp, "# pmu mappings: unable to read\n");
+}
+
+static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ struct perf_session *session;
+ struct perf_evsel *evsel;
+ u32 nr = 0;
+
+ session = container_of(ph, struct perf_session, header);
+
+ evlist__for_each(session->evlist, evsel) {
+ if (perf_evsel__is_group_leader(evsel) &&
+ evsel->nr_members > 1) {
+ fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
+ perf_evsel__name(evsel));
+
+ nr = evsel->nr_members - 1;
+ } else if (nr) {
+ fprintf(fp, ",%s", perf_evsel__name(evsel));
+
+ if (--nr == 0)
+ fprintf(fp, "}\n");
+ }
+ }
}
static int __event_process_build_id(struct build_id_event *bev,
@@ -752,28 +1557,91 @@ out:
return err;
}
-static int perf_header__read_build_ids(struct perf_header *self,
- int input, u64 offset, u64 size)
+static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
+ int input, u64 offset, u64 size)
{
- struct perf_session *session = container_of(self,
- struct perf_session, header);
+ struct perf_session *session = container_of(header, struct perf_session, header);
+ struct {
+ struct perf_event_header header;
+ u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
+ char filename[0];
+ } old_bev;
struct build_id_event bev;
char filename[PATH_MAX];
u64 limit = offset + size;
+
+ while (offset < limit) {
+ ssize_t len;
+
+ if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
+ return -1;
+
+ if (header->needs_swap)
+ perf_event_header__bswap(&old_bev.header);
+
+ len = old_bev.header.size - sizeof(old_bev);
+ if (readn(input, filename, len) != len)
+ return -1;
+
+ bev.header = old_bev.header;
+
+ /*
+ * As the pid is the missing value, we need to fill
+ * it properly. The header.misc value give us nice hint.
+ */
+ bev.pid = HOST_KERNEL_ID;
+ if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
+ bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
+ bev.pid = DEFAULT_GUEST_KERNEL_ID;
+
+ memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
+ __event_process_build_id(&bev, filename, session);
+
+ offset += bev.header.size;
+ }
+
+ return 0;
+}
+
+static int perf_header__read_build_ids(struct perf_header *header,
+ int input, u64 offset, u64 size)
+{
+ struct perf_session *session = container_of(header, struct perf_session, header);
+ struct build_id_event bev;
+ char filename[PATH_MAX];
+ u64 limit = offset + size, orig_offset = offset;
int err = -1;
while (offset < limit) {
ssize_t len;
- if (read(input, &bev, sizeof(bev)) != sizeof(bev))
+ if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
goto out;
- if (self->needs_swap)
+ if (header->needs_swap)
perf_event_header__bswap(&bev.header);
len = bev.header.size - sizeof(bev);
- if (read(input, filename, len) != len)
+ if (readn(input, filename, len) != len)
goto out;
+ /*
+ * The a1645ce1 changeset:
+ *
+ * "perf: 'perf kvm' tool for monitoring guest performance from host"
+ *
+ * Added a field to struct build_id_event that broke the file
+ * format.
+ *
+ * Since the kernel build-id is the first entry, process the
+ * table using the old format if the well known
+ * '[kernel.kallsyms]' string for the kernel build-id has the
+ * first 4 characters chopped off (where the pid_t sits).
+ */
+ if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
+ if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
+ return -1;
+ return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
+ }
__event_process_build_id(&bev, filename, session);
@@ -784,304 +1652,1248 @@ out:
return err;
}
-static int perf_file_section__process(struct perf_file_section *self,
- struct perf_header *ph,
- int feat, int fd)
+static int process_tracing_data(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph __maybe_unused,
+ int fd, void *data)
+{
+ ssize_t ret = trace_report(fd, data, false);
+ return ret < 0 ? -1 : 0;
+}
+
+static int process_build_id(struct perf_file_section *section,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
+ pr_debug("Failed to read buildids, continuing...\n");
+ return 0;
+}
+
+static int process_hostname(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ph->env.hostname = do_read_string(fd, ph);
+ return ph->env.hostname ? 0 : -ENOMEM;
+}
+
+static int process_osrelease(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ph->env.os_release = do_read_string(fd, ph);
+ return ph->env.os_release ? 0 : -ENOMEM;
+}
+
+static int process_version(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ph->env.version = do_read_string(fd, ph);
+ return ph->env.version ? 0 : -ENOMEM;
+}
+
+static int process_arch(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ph->env.arch = do_read_string(fd, ph);
+ return ph->env.arch ? 0 : -ENOMEM;
+}
+
+static int process_nrcpus(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ssize_t ret;
+ u32 nr;
+
+ ret = readn(fd, &nr, sizeof(nr));
+ if (ret != sizeof(nr))
+ return -1;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ ph->env.nr_cpus_online = nr;
+
+ ret = readn(fd, &nr, sizeof(nr));
+ if (ret != sizeof(nr))
+ return -1;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ ph->env.nr_cpus_avail = nr;
+ return 0;
+}
+
+static int process_cpudesc(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ph->env.cpu_desc = do_read_string(fd, ph);
+ return ph->env.cpu_desc ? 0 : -ENOMEM;
+}
+
+static int process_cpuid(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ph->env.cpuid = do_read_string(fd, ph);
+ return ph->env.cpuid ? 0 : -ENOMEM;
+}
+
+static int process_total_mem(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
{
- if (lseek(fd, self->offset, SEEK_SET) == (off_t)-1) {
- pr_debug("Failed to lseek to %Ld offset for feature %d, "
- "continuing...\n", self->offset, feat);
+ uint64_t mem;
+ ssize_t ret;
+
+ ret = readn(fd, &mem, sizeof(mem));
+ if (ret != sizeof(mem))
+ return -1;
+
+ if (ph->needs_swap)
+ mem = bswap_64(mem);
+
+ ph->env.total_mem = mem;
+ return 0;
+}
+
+static struct perf_evsel *
+perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
+{
+ struct perf_evsel *evsel;
+
+ evlist__for_each(evlist, evsel) {
+ if (evsel->idx == idx)
+ return evsel;
+ }
+
+ return NULL;
+}
+
+static void
+perf_evlist__set_event_name(struct perf_evlist *evlist,
+ struct perf_evsel *event)
+{
+ struct perf_evsel *evsel;
+
+ if (!event->name)
+ return;
+
+ evsel = perf_evlist__find_by_index(evlist, event->idx);
+ if (!evsel)
+ return;
+
+ if (evsel->name)
+ return;
+
+ evsel->name = strdup(event->name);
+}
+
+static int
+process_event_desc(struct perf_file_section *section __maybe_unused,
+ struct perf_header *header, int fd,
+ void *data __maybe_unused)
+{
+ struct perf_session *session;
+ struct perf_evsel *evsel, *events = read_event_desc(header, fd);
+
+ if (!events)
return 0;
+
+ session = container_of(header, struct perf_session, header);
+ for (evsel = events; evsel->attr.size; evsel++)
+ perf_evlist__set_event_name(session->evlist, evsel);
+
+ free_event_desc(events);
+
+ return 0;
+}
+
+static int process_cmdline(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ssize_t ret;
+ char *str;
+ u32 nr, i;
+ struct strbuf sb;
+
+ ret = readn(fd, &nr, sizeof(nr));
+ if (ret != sizeof(nr))
+ return -1;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ ph->env.nr_cmdline = nr;
+ strbuf_init(&sb, 128);
+
+ for (i = 0; i < nr; i++) {
+ str = do_read_string(fd, ph);
+ if (!str)
+ goto error;
+
+ /* include a NULL character at the end */
+ strbuf_add(&sb, str, strlen(str) + 1);
+ free(str);
}
+ ph->env.cmdline = strbuf_detach(&sb, NULL);
+ return 0;
- switch (feat) {
- case HEADER_TRACE_INFO:
- trace_report(fd, false);
- break;
+error:
+ strbuf_release(&sb);
+ return -1;
+}
- case HEADER_BUILD_ID:
- if (perf_header__read_build_ids(ph, fd, self->offset, self->size))
- pr_debug("Failed to read buildids, continuing...\n");
- break;
- default:
- pr_debug("unknown feature %d, continuing...\n", feat);
+static int process_cpu_topology(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ssize_t ret;
+ u32 nr, i;
+ char *str;
+ struct strbuf sb;
+
+ ret = readn(fd, &nr, sizeof(nr));
+ if (ret != sizeof(nr))
+ return -1;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ ph->env.nr_sibling_cores = nr;
+ strbuf_init(&sb, 128);
+
+ for (i = 0; i < nr; i++) {
+ str = do_read_string(fd, ph);
+ if (!str)
+ goto error;
+
+ /* include a NULL character at the end */
+ strbuf_add(&sb, str, strlen(str) + 1);
+ free(str);
}
+ ph->env.sibling_cores = strbuf_detach(&sb, NULL);
+
+ ret = readn(fd, &nr, sizeof(nr));
+ if (ret != sizeof(nr))
+ return -1;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+ ph->env.nr_sibling_threads = nr;
+
+ for (i = 0; i < nr; i++) {
+ str = do_read_string(fd, ph);
+ if (!str)
+ goto error;
+
+ /* include a NULL character at the end */
+ strbuf_add(&sb, str, strlen(str) + 1);
+ free(str);
+ }
+ ph->env.sibling_threads = strbuf_detach(&sb, NULL);
return 0;
+
+error:
+ strbuf_release(&sb);
+ return -1;
}
-static int perf_file_header__read_pipe(struct perf_pipe_file_header *self,
- struct perf_header *ph, int fd,
- bool repipe)
+static int process_numa_topology(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
{
- if (do_read(fd, self, sizeof(*self)) <= 0 ||
- memcmp(&self->magic, __perf_magic, sizeof(self->magic)))
+ ssize_t ret;
+ u32 nr, node, i;
+ char *str;
+ uint64_t mem_total, mem_free;
+ struct strbuf sb;
+
+ /* nr nodes */
+ ret = readn(fd, &nr, sizeof(nr));
+ if (ret != sizeof(nr))
+ goto error;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ ph->env.nr_numa_nodes = nr;
+ strbuf_init(&sb, 256);
+
+ for (i = 0; i < nr; i++) {
+ /* node number */
+ ret = readn(fd, &node, sizeof(node));
+ if (ret != sizeof(node))
+ goto error;
+
+ ret = readn(fd, &mem_total, sizeof(u64));
+ if (ret != sizeof(u64))
+ goto error;
+
+ ret = readn(fd, &mem_free, sizeof(u64));
+ if (ret != sizeof(u64))
+ goto error;
+
+ if (ph->needs_swap) {
+ node = bswap_32(node);
+ mem_total = bswap_64(mem_total);
+ mem_free = bswap_64(mem_free);
+ }
+
+ strbuf_addf(&sb, "%u:%"PRIu64":%"PRIu64":",
+ node, mem_total, mem_free);
+
+ str = do_read_string(fd, ph);
+ if (!str)
+ goto error;
+
+ /* include a NULL character at the end */
+ strbuf_add(&sb, str, strlen(str) + 1);
+ free(str);
+ }
+ ph->env.numa_nodes = strbuf_detach(&sb, NULL);
+ return 0;
+
+error:
+ strbuf_release(&sb);
+ return -1;
+}
+
+static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ssize_t ret;
+ char *name;
+ u32 pmu_num;
+ u32 type;
+ struct strbuf sb;
+
+ ret = readn(fd, &pmu_num, sizeof(pmu_num));
+ if (ret != sizeof(pmu_num))
return -1;
- if (repipe && do_write(STDOUT_FILENO, self, sizeof(*self)) < 0)
+ if (ph->needs_swap)
+ pmu_num = bswap_32(pmu_num);
+
+ if (!pmu_num) {
+ pr_debug("pmu mappings not available\n");
+ return 0;
+ }
+
+ ph->env.nr_pmu_mappings = pmu_num;
+ strbuf_init(&sb, 128);
+
+ while (pmu_num) {
+ if (readn(fd, &type, sizeof(type)) != sizeof(type))
+ goto error;
+ if (ph->needs_swap)
+ type = bswap_32(type);
+
+ name = do_read_string(fd, ph);
+ if (!name)
+ goto error;
+
+ strbuf_addf(&sb, "%u:%s", type, name);
+ /* include a NULL character at the end */
+ strbuf_add(&sb, "", 1);
+
+ free(name);
+ pmu_num--;
+ }
+ ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
+ return 0;
+
+error:
+ strbuf_release(&sb);
+ return -1;
+}
+
+static int process_group_desc(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ size_t ret = -1;
+ u32 i, nr, nr_groups;
+ struct perf_session *session;
+ struct perf_evsel *evsel, *leader = NULL;
+ struct group_desc {
+ char *name;
+ u32 leader_idx;
+ u32 nr_members;
+ } *desc;
+
+ if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
return -1;
- if (self->size != sizeof(*self)) {
- u64 size = bswap_64(self->size);
+ if (ph->needs_swap)
+ nr_groups = bswap_32(nr_groups);
- if (size != sizeof(*self))
- return -1;
+ ph->env.nr_groups = nr_groups;
+ if (!nr_groups) {
+ pr_debug("group desc not available\n");
+ return 0;
+ }
+
+ desc = calloc(nr_groups, sizeof(*desc));
+ if (!desc)
+ return -1;
- ph->needs_swap = true;
+ for (i = 0; i < nr_groups; i++) {
+ desc[i].name = do_read_string(fd, ph);
+ if (!desc[i].name)
+ goto out_free;
+
+ if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
+ goto out_free;
+
+ if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
+ goto out_free;
+
+ if (ph->needs_swap) {
+ desc[i].leader_idx = bswap_32(desc[i].leader_idx);
+ desc[i].nr_members = bswap_32(desc[i].nr_members);
+ }
}
- return 0;
+ /*
+ * Rebuild group relationship based on the group_desc
+ */
+ session = container_of(ph, struct perf_session, header);
+ session->evlist->nr_groups = nr_groups;
+
+ i = nr = 0;
+ evlist__for_each(session->evlist, evsel) {
+ if (evsel->idx == (int) desc[i].leader_idx) {
+ evsel->leader = evsel;
+ /* {anon_group} is a dummy name */
+ if (strcmp(desc[i].name, "{anon_group}")) {
+ evsel->group_name = desc[i].name;
+ desc[i].name = NULL;
+ }
+ evsel->nr_members = desc[i].nr_members;
+
+ if (i >= nr_groups || nr > 0) {
+ pr_debug("invalid group desc\n");
+ goto out_free;
+ }
+
+ leader = evsel;
+ nr = evsel->nr_members - 1;
+ i++;
+ } else if (nr) {
+ /* This is a group member */
+ evsel->leader = leader;
+
+ nr--;
+ }
+ }
+
+ if (i != nr_groups || nr != 0) {
+ pr_debug("invalid group desc\n");
+ goto out_free;
+ }
+
+ ret = 0;
+out_free:
+ for (i = 0; i < nr_groups; i++)
+ zfree(&desc[i].name);
+ free(desc);
+
+ return ret;
}
-static int perf_header__read_pipe(struct perf_session *session, int fd)
+struct feature_ops {
+ int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
+ void (*print)(struct perf_header *h, int fd, FILE *fp);
+ int (*process)(struct perf_file_section *section,
+ struct perf_header *h, int fd, void *data);
+ const char *name;
+ bool full_only;
+};
+
+#define FEAT_OPA(n, func) \
+ [n] = { .name = #n, .write = write_##func, .print = print_##func }
+#define FEAT_OPP(n, func) \
+ [n] = { .name = #n, .write = write_##func, .print = print_##func, \
+ .process = process_##func }
+#define FEAT_OPF(n, func) \
+ [n] = { .name = #n, .write = write_##func, .print = print_##func, \
+ .process = process_##func, .full_only = true }
+
+/* feature_ops not implemented: */
+#define print_tracing_data NULL
+#define print_build_id NULL
+
+static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
+ FEAT_OPP(HEADER_TRACING_DATA, tracing_data),
+ FEAT_OPP(HEADER_BUILD_ID, build_id),
+ FEAT_OPP(HEADER_HOSTNAME, hostname),
+ FEAT_OPP(HEADER_OSRELEASE, osrelease),
+ FEAT_OPP(HEADER_VERSION, version),
+ FEAT_OPP(HEADER_ARCH, arch),
+ FEAT_OPP(HEADER_NRCPUS, nrcpus),
+ FEAT_OPP(HEADER_CPUDESC, cpudesc),
+ FEAT_OPP(HEADER_CPUID, cpuid),
+ FEAT_OPP(HEADER_TOTAL_MEM, total_mem),
+ FEAT_OPP(HEADER_EVENT_DESC, event_desc),
+ FEAT_OPP(HEADER_CMDLINE, cmdline),
+ FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology),
+ FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology),
+ FEAT_OPA(HEADER_BRANCH_STACK, branch_stack),
+ FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings),
+ FEAT_OPP(HEADER_GROUP_DESC, group_desc),
+};
+
+struct header_print_data {
+ FILE *fp;
+ bool full; /* extended list of headers */
+};
+
+static int perf_file_section__fprintf_info(struct perf_file_section *section,
+ struct perf_header *ph,
+ int feat, int fd, void *data)
{
- struct perf_header *self = &session->header;
- struct perf_pipe_file_header f_header;
+ struct header_print_data *hd = data;
- if (perf_file_header__read_pipe(&f_header, self, fd,
- session->repipe) < 0) {
- pr_debug("incompatible file format\n");
- return -EINVAL;
+ if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
+ pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
+ "%d, continuing...\n", section->offset, feat);
+ return 0;
+ }
+ if (feat >= HEADER_LAST_FEATURE) {
+ pr_warning("unknown feature %d\n", feat);
+ return 0;
}
+ if (!feat_ops[feat].print)
+ return 0;
- session->fd = fd;
+ if (!feat_ops[feat].full_only || hd->full)
+ feat_ops[feat].print(ph, fd, hd->fp);
+ else
+ fprintf(hd->fp, "# %s info available, use -I to display\n",
+ feat_ops[feat].name);
return 0;
}
-int perf_header__read(struct perf_session *session, int fd)
+int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
{
- struct perf_header *self = &session->header;
- struct perf_file_header f_header;
- struct perf_file_attr f_attr;
- u64 f_id;
- int nr_attrs, nr_ids, i, j;
+ struct header_print_data hd;
+ struct perf_header *header = &session->header;
+ int fd = perf_data_file__fd(session->file);
+ hd.fp = fp;
+ hd.full = full;
+
+ perf_header__process_sections(header, fd, &hd,
+ perf_file_section__fprintf_info);
+ return 0;
+}
- if (session->fd_pipe)
- return perf_header__read_pipe(session, fd);
+static int do_write_feat(int fd, struct perf_header *h, int type,
+ struct perf_file_section **p,
+ struct perf_evlist *evlist)
+{
+ int err;
+ int ret = 0;
- if (perf_file_header__read(&f_header, self, fd) < 0) {
- pr_debug("incompatible file format\n");
- return -EINVAL;
+ if (perf_header__has_feat(h, type)) {
+ if (!feat_ops[type].write)
+ return -1;
+
+ (*p)->offset = lseek(fd, 0, SEEK_CUR);
+
+ err = feat_ops[type].write(fd, h, evlist);
+ if (err < 0) {
+ pr_debug("failed to write feature %d\n", type);
+
+ /* undo anything written */
+ lseek(fd, (*p)->offset, SEEK_SET);
+
+ return -1;
+ }
+ (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
+ (*p)++;
}
+ return ret;
+}
- nr_attrs = f_header.attrs.size / sizeof(f_attr);
- lseek(fd, f_header.attrs.offset, SEEK_SET);
+static int perf_header__adds_write(struct perf_header *header,
+ struct perf_evlist *evlist, int fd)
+{
+ int nr_sections;
+ struct perf_file_section *feat_sec, *p;
+ int sec_size;
+ u64 sec_start;
+ int feat;
+ int err;
- for (i = 0; i < nr_attrs; i++) {
- struct perf_header_attr *attr;
- off_t tmp;
+ nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
+ if (!nr_sections)
+ return 0;
- if (perf_header__getbuffer64(self, fd, &f_attr, sizeof(f_attr)))
- goto out_errno;
+ feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
+ if (feat_sec == NULL)
+ return -ENOMEM;
- tmp = lseek(fd, 0, SEEK_CUR);
+ sec_size = sizeof(*feat_sec) * nr_sections;
- attr = perf_header_attr__new(&f_attr.attr);
- if (attr == NULL)
- return -ENOMEM;
+ sec_start = header->feat_offset;
+ lseek(fd, sec_start + sec_size, SEEK_SET);
- nr_ids = f_attr.ids.size / sizeof(u64);
- lseek(fd, f_attr.ids.offset, SEEK_SET);
+ for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
+ if (do_write_feat(fd, header, feat, &p, evlist))
+ perf_header__clear_feat(header, feat);
+ }
- for (j = 0; j < nr_ids; j++) {
- if (perf_header__getbuffer64(self, fd, &f_id, sizeof(f_id)))
- goto out_errno;
+ lseek(fd, sec_start, SEEK_SET);
+ /*
+ * may write more than needed due to dropped feature, but
+ * this is okay, reader will skip the mising entries
+ */
+ err = do_write(fd, feat_sec, sec_size);
+ if (err < 0)
+ pr_debug("failed to write feature section\n");
+ free(feat_sec);
+ return err;
+}
- if (perf_header_attr__add_id(attr, f_id) < 0) {
- perf_header_attr__delete(attr);
- return -ENOMEM;
- }
+int perf_header__write_pipe(int fd)
+{
+ struct perf_pipe_file_header f_header;
+ int err;
+
+ f_header = (struct perf_pipe_file_header){
+ .magic = PERF_MAGIC,
+ .size = sizeof(f_header),
+ };
+
+ err = do_write(fd, &f_header, sizeof(f_header));
+ if (err < 0) {
+ pr_debug("failed to write perf pipe header\n");
+ return err;
+ }
+
+ return 0;
+}
+
+int perf_session__write_header(struct perf_session *session,
+ struct perf_evlist *evlist,
+ int fd, bool at_exit)
+{
+ struct perf_file_header f_header;
+ struct perf_file_attr f_attr;
+ struct perf_header *header = &session->header;
+ struct perf_evsel *evsel;
+ u64 attr_offset;
+ int err;
+
+ lseek(fd, sizeof(f_header), SEEK_SET);
+
+ evlist__for_each(session->evlist, evsel) {
+ evsel->id_offset = lseek(fd, 0, SEEK_CUR);
+ err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
+ if (err < 0) {
+ pr_debug("failed to write perf header\n");
+ return err;
}
- if (perf_header__add_attr(self, attr) < 0) {
- perf_header_attr__delete(attr);
- return -ENOMEM;
+ }
+
+ attr_offset = lseek(fd, 0, SEEK_CUR);
+
+ evlist__for_each(evlist, evsel) {
+ f_attr = (struct perf_file_attr){
+ .attr = evsel->attr,
+ .ids = {
+ .offset = evsel->id_offset,
+ .size = evsel->ids * sizeof(u64),
+ }
+ };
+ err = do_write(fd, &f_attr, sizeof(f_attr));
+ if (err < 0) {
+ pr_debug("failed to write perf header attribute\n");
+ return err;
}
+ }
- lseek(fd, tmp, SEEK_SET);
+ if (!header->data_offset)
+ header->data_offset = lseek(fd, 0, SEEK_CUR);
+ header->feat_offset = header->data_offset + header->data_size;
+
+ if (at_exit) {
+ err = perf_header__adds_write(header, evlist, fd);
+ if (err < 0)
+ return err;
}
- if (f_header.event_types.size) {
- lseek(fd, f_header.event_types.offset, SEEK_SET);
- events = malloc(f_header.event_types.size);
- if (events == NULL)
- return -ENOMEM;
- if (perf_header__getbuffer64(self, fd, events,
- f_header.event_types.size))
- goto out_errno;
- event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
+ f_header = (struct perf_file_header){
+ .magic = PERF_MAGIC,
+ .size = sizeof(f_header),
+ .attr_size = sizeof(f_attr),
+ .attrs = {
+ .offset = attr_offset,
+ .size = evlist->nr_entries * sizeof(f_attr),
+ },
+ .data = {
+ .offset = header->data_offset,
+ .size = header->data_size,
+ },
+ /* event_types is ignored, store zeros */
+ };
+
+ memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
+
+ lseek(fd, 0, SEEK_SET);
+ err = do_write(fd, &f_header, sizeof(f_header));
+ if (err < 0) {
+ pr_debug("failed to write perf header\n");
+ return err;
}
+ lseek(fd, header->data_offset + header->data_size, SEEK_SET);
- perf_header__process_sections(self, fd, perf_file_section__process);
+ return 0;
+}
- lseek(fd, self->data_offset, SEEK_SET);
+static int perf_header__getbuffer64(struct perf_header *header,
+ int fd, void *buf, size_t size)
+{
+ if (readn(fd, buf, size) <= 0)
+ return -1;
+
+ if (header->needs_swap)
+ mem_bswap_64(buf, size);
- self->frozen = 1;
return 0;
-out_errno:
- return -errno;
}
-u64 perf_header__sample_type(struct perf_header *header)
+int perf_header__process_sections(struct perf_header *header, int fd,
+ void *data,
+ int (*process)(struct perf_file_section *section,
+ struct perf_header *ph,
+ int feat, int fd, void *data))
{
- u64 type = 0;
- int i;
+ struct perf_file_section *feat_sec, *sec;
+ int nr_sections;
+ int sec_size;
+ int feat;
+ int err;
- for (i = 0; i < header->attrs; i++) {
- struct perf_header_attr *attr = header->attr[i];
+ nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
+ if (!nr_sections)
+ return 0;
- if (!type)
- type = attr->attr.sample_type;
- else if (type != attr->attr.sample_type)
- die("non matching sample_type");
+ feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
+ if (!feat_sec)
+ return -1;
+
+ sec_size = sizeof(*feat_sec) * nr_sections;
+
+ lseek(fd, header->feat_offset, SEEK_SET);
+
+ err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
+ if (err < 0)
+ goto out_free;
+
+ for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
+ err = process(sec++, header, feat, fd, data);
+ if (err < 0)
+ goto out_free;
}
+ err = 0;
+out_free:
+ free(feat_sec);
+ return err;
+}
- return type;
+static const int attr_file_abi_sizes[] = {
+ [0] = PERF_ATTR_SIZE_VER0,
+ [1] = PERF_ATTR_SIZE_VER1,
+ [2] = PERF_ATTR_SIZE_VER2,
+ [3] = PERF_ATTR_SIZE_VER3,
+ 0,
+};
+
+/*
+ * In the legacy file format, the magic number is not used to encode endianness.
+ * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
+ * on ABI revisions, we need to try all combinations for all endianness to
+ * detect the endianness.
+ */
+static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
+{
+ uint64_t ref_size, attr_size;
+ int i;
+
+ for (i = 0 ; attr_file_abi_sizes[i]; i++) {
+ ref_size = attr_file_abi_sizes[i]
+ + sizeof(struct perf_file_section);
+ if (hdr_sz != ref_size) {
+ attr_size = bswap_64(hdr_sz);
+ if (attr_size != ref_size)
+ continue;
+
+ ph->needs_swap = true;
+ }
+ pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
+ i,
+ ph->needs_swap);
+ return 0;
+ }
+ /* could not determine endianness */
+ return -1;
}
-struct perf_event_attr *
-perf_header__find_attr(u64 id, struct perf_header *header)
+#define PERF_PIPE_HDR_VER0 16
+
+static const size_t attr_pipe_abi_sizes[] = {
+ [0] = PERF_PIPE_HDR_VER0,
+ 0,
+};
+
+/*
+ * In the legacy pipe format, there is an implicit assumption that endiannesss
+ * between host recording the samples, and host parsing the samples is the
+ * same. This is not always the case given that the pipe output may always be
+ * redirected into a file and analyzed on a different machine with possibly a
+ * different endianness and perf_event ABI revsions in the perf tool itself.
+ */
+static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
{
+ u64 attr_size;
int i;
+ for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
+ if (hdr_sz != attr_pipe_abi_sizes[i]) {
+ attr_size = bswap_64(hdr_sz);
+ if (attr_size != hdr_sz)
+ continue;
+
+ ph->needs_swap = true;
+ }
+ pr_debug("Pipe ABI%d perf.data file detected\n", i);
+ return 0;
+ }
+ return -1;
+}
+
+bool is_perf_magic(u64 magic)
+{
+ if (!memcmp(&magic, __perf_magic1, sizeof(magic))
+ || magic == __perf_magic2
+ || magic == __perf_magic2_sw)
+ return true;
+
+ return false;
+}
+
+static int check_magic_endian(u64 magic, uint64_t hdr_sz,
+ bool is_pipe, struct perf_header *ph)
+{
+ int ret;
+
+ /* check for legacy format */
+ ret = memcmp(&magic, __perf_magic1, sizeof(magic));
+ if (ret == 0) {
+ ph->version = PERF_HEADER_VERSION_1;
+ pr_debug("legacy perf.data format\n");
+ if (is_pipe)
+ return try_all_pipe_abis(hdr_sz, ph);
+
+ return try_all_file_abis(hdr_sz, ph);
+ }
/*
- * We set id to -1 if the data file doesn't contain sample
- * ids. Check for this and avoid walking through the entire
- * list of ids which may be large.
+ * the new magic number serves two purposes:
+ * - unique number to identify actual perf.data files
+ * - encode endianness of file
*/
- if (id == -1ULL)
- return NULL;
- for (i = 0; i < header->attrs; i++) {
- struct perf_header_attr *attr = header->attr[i];
- int j;
+ /* check magic number with one endianness */
+ if (magic == __perf_magic2)
+ return 0;
+
+ /* check magic number with opposite endianness */
+ if (magic != __perf_magic2_sw)
+ return -1;
+
+ ph->needs_swap = true;
+ ph->version = PERF_HEADER_VERSION_2;
+
+ return 0;
+}
+
+int perf_file_header__read(struct perf_file_header *header,
+ struct perf_header *ph, int fd)
+{
+ ssize_t ret;
+
+ lseek(fd, 0, SEEK_SET);
+
+ ret = readn(fd, header, sizeof(*header));
+ if (ret <= 0)
+ return -1;
+
+ if (check_magic_endian(header->magic,
+ header->attr_size, false, ph) < 0) {
+ pr_debug("magic/endian check failed\n");
+ return -1;
+ }
- for (j = 0; j < attr->ids; j++) {
- if (attr->id[j] == id)
- return &attr->attr;
+ if (ph->needs_swap) {
+ mem_bswap_64(header, offsetof(struct perf_file_header,
+ adds_features));
+ }
+
+ if (header->size != sizeof(*header)) {
+ /* Support the previous format */
+ if (header->size == offsetof(typeof(*header), adds_features))
+ bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
+ else
+ return -1;
+ } else if (ph->needs_swap) {
+ /*
+ * feature bitmap is declared as an array of unsigned longs --
+ * not good since its size can differ between the host that
+ * generated the data file and the host analyzing the file.
+ *
+ * We need to handle endianness, but we don't know the size of
+ * the unsigned long where the file was generated. Take a best
+ * guess at determining it: try 64-bit swap first (ie., file
+ * created on a 64-bit host), and check if the hostname feature
+ * bit is set (this feature bit is forced on as of fbe96f2).
+ * If the bit is not, undo the 64-bit swap and try a 32-bit
+ * swap. If the hostname bit is still not set (e.g., older data
+ * file), punt and fallback to the original behavior --
+ * clearing all feature bits and setting buildid.
+ */
+ mem_bswap_64(&header->adds_features,
+ BITS_TO_U64(HEADER_FEAT_BITS));
+
+ if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
+ /* unswap as u64 */
+ mem_bswap_64(&header->adds_features,
+ BITS_TO_U64(HEADER_FEAT_BITS));
+
+ /* unswap as u32 */
+ mem_bswap_32(&header->adds_features,
+ BITS_TO_U32(HEADER_FEAT_BITS));
+ }
+
+ if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
+ bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
+ set_bit(HEADER_BUILD_ID, header->adds_features);
}
}
- return NULL;
+ memcpy(&ph->adds_features, &header->adds_features,
+ sizeof(ph->adds_features));
+
+ ph->data_offset = header->data.offset;
+ ph->data_size = header->data.size;
+ ph->feat_offset = header->data.offset + header->data.size;
+ return 0;
}
-int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
- event__handler_t process,
- struct perf_session *session)
+static int perf_file_section__process(struct perf_file_section *section,
+ struct perf_header *ph,
+ int feat, int fd, void *data)
{
- event_t *ev;
- size_t size;
- int err;
+ if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
+ pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
+ "%d, continuing...\n", section->offset, feat);
+ return 0;
+ }
- size = sizeof(struct perf_event_attr);
- size = ALIGN(size, sizeof(u64));
- size += sizeof(struct perf_event_header);
- size += ids * sizeof(u64);
+ if (feat >= HEADER_LAST_FEATURE) {
+ pr_debug("unknown feature %d, continuing...\n", feat);
+ return 0;
+ }
- ev = malloc(size);
+ if (!feat_ops[feat].process)
+ return 0;
- ev->attr.attr = *attr;
- memcpy(ev->attr.id, id, ids * sizeof(u64));
+ return feat_ops[feat].process(section, ph, fd, data);
+}
- ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
- ev->attr.header.size = size;
+static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
+ struct perf_header *ph, int fd,
+ bool repipe)
+{
+ ssize_t ret;
- err = process(ev, session);
+ ret = readn(fd, header, sizeof(*header));
+ if (ret <= 0)
+ return -1;
- free(ev);
+ if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
+ pr_debug("endian/magic failed\n");
+ return -1;
+ }
- return err;
+ if (ph->needs_swap)
+ header->size = bswap_64(header->size);
+
+ if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
+ return -1;
+
+ return 0;
}
-int event__synthesize_attrs(struct perf_header *self,
- event__handler_t process,
- struct perf_session *session)
+static int perf_header__read_pipe(struct perf_session *session)
{
- struct perf_header_attr *attr;
- int i, err = 0;
+ struct perf_header *header = &session->header;
+ struct perf_pipe_file_header f_header;
- for (i = 0; i < self->attrs; i++) {
- attr = self->attr[i];
+ if (perf_file_header__read_pipe(&f_header, header,
+ perf_data_file__fd(session->file),
+ session->repipe) < 0) {
+ pr_debug("incompatible file format\n");
+ return -EINVAL;
+ }
- err = event__synthesize_attr(&attr->attr, attr->ids, attr->id,
- process, session);
- if (err) {
- pr_debug("failed to create perf header attribute\n");
- return err;
- }
+ return 0;
+}
+
+static int read_attr(int fd, struct perf_header *ph,
+ struct perf_file_attr *f_attr)
+{
+ struct perf_event_attr *attr = &f_attr->attr;
+ size_t sz, left;
+ size_t our_sz = sizeof(f_attr->attr);
+ ssize_t ret;
+
+ memset(f_attr, 0, sizeof(*f_attr));
+
+ /* read minimal guaranteed structure */
+ ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
+ if (ret <= 0) {
+ pr_debug("cannot read %d bytes of header attr\n",
+ PERF_ATTR_SIZE_VER0);
+ return -1;
}
- return err;
+ /* on file perf_event_attr size */
+ sz = attr->size;
+
+ if (ph->needs_swap)
+ sz = bswap_32(sz);
+
+ if (sz == 0) {
+ /* assume ABI0 */
+ sz = PERF_ATTR_SIZE_VER0;
+ } else if (sz > our_sz) {
+ pr_debug("file uses a more recent and unsupported ABI"
+ " (%zu bytes extra)\n", sz - our_sz);
+ return -1;
+ }
+ /* what we have not yet read and that we know about */
+ left = sz - PERF_ATTR_SIZE_VER0;
+ if (left) {
+ void *ptr = attr;
+ ptr += PERF_ATTR_SIZE_VER0;
+
+ ret = readn(fd, ptr, left);
+ }
+ /* read perf_file_section, ids are read in caller */
+ ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
+
+ return ret <= 0 ? -1 : 0;
}
-int event__process_attr(event_t *self, struct perf_session *session)
+static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
+ struct pevent *pevent)
{
- struct perf_header_attr *attr;
- unsigned int i, ids, n_ids;
+ struct event_format *event;
+ char bf[128];
- attr = perf_header_attr__new(&self->attr.attr);
- if (attr == NULL)
- return -ENOMEM;
+ /* already prepared */
+ if (evsel->tp_format)
+ return 0;
- ids = self->header.size;
- ids -= (void *)&self->attr.id - (void *)self;
- n_ids = ids / sizeof(u64);
+ if (pevent == NULL) {
+ pr_debug("broken or missing trace data\n");
+ return -1;
+ }
- for (i = 0; i < n_ids; i++) {
- if (perf_header_attr__add_id(attr, self->attr.id[i]) < 0) {
- perf_header_attr__delete(attr);
- return -ENOMEM;
- }
+ event = pevent_find_event(pevent, evsel->attr.config);
+ if (event == NULL)
+ return -1;
+
+ if (!evsel->name) {
+ snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
+ evsel->name = strdup(bf);
+ if (evsel->name == NULL)
+ return -1;
}
- if (perf_header__add_attr(&session->header, attr) < 0) {
- perf_header_attr__delete(attr);
+ evsel->tp_format = event;
+ return 0;
+}
+
+static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
+ struct pevent *pevent)
+{
+ struct perf_evsel *pos;
+
+ evlist__for_each(evlist, pos) {
+ if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
+ perf_evsel__prepare_tracepoint_event(pos, pevent))
+ return -1;
+ }
+
+ return 0;
+}
+
+int perf_session__read_header(struct perf_session *session)
+{
+ struct perf_data_file *file = session->file;
+ struct perf_header *header = &session->header;
+ struct perf_file_header f_header;
+ struct perf_file_attr f_attr;
+ u64 f_id;
+ int nr_attrs, nr_ids, i, j;
+ int fd = perf_data_file__fd(file);
+
+ session->evlist = perf_evlist__new();
+ if (session->evlist == NULL)
return -ENOMEM;
+
+ if (perf_data_file__is_pipe(file))
+ return perf_header__read_pipe(session);
+
+ if (perf_file_header__read(&f_header, header, fd) < 0)
+ return -EINVAL;
+
+ /*
+ * Sanity check that perf.data was written cleanly; data size is
+ * initialized to 0 and updated only if the on_exit function is run.
+ * If data size is still 0 then the file contains only partial
+ * information. Just warn user and process it as much as it can.
+ */
+ if (f_header.data.size == 0) {
+ pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
+ "Was the 'perf record' command properly terminated?\n",
+ file->path);
}
- perf_session__update_sample_type(session);
+ nr_attrs = f_header.attrs.size / f_header.attr_size;
+ lseek(fd, f_header.attrs.offset, SEEK_SET);
+
+ for (i = 0; i < nr_attrs; i++) {
+ struct perf_evsel *evsel;
+ off_t tmp;
+
+ if (read_attr(fd, header, &f_attr) < 0)
+ goto out_errno;
+
+ if (header->needs_swap)
+ perf_event__attr_swap(&f_attr.attr);
+
+ tmp = lseek(fd, 0, SEEK_CUR);
+ evsel = perf_evsel__new(&f_attr.attr);
+
+ if (evsel == NULL)
+ goto out_delete_evlist;
+
+ evsel->needs_swap = header->needs_swap;
+ /*
+ * Do it before so that if perf_evsel__alloc_id fails, this
+ * entry gets purged too at perf_evlist__delete().
+ */
+ perf_evlist__add(session->evlist, evsel);
+
+ nr_ids = f_attr.ids.size / sizeof(u64);
+ /*
+ * We don't have the cpu and thread maps on the header, so
+ * for allocating the perf_sample_id table we fake 1 cpu and
+ * hattr->ids threads.
+ */
+ if (perf_evsel__alloc_id(evsel, 1, nr_ids))
+ goto out_delete_evlist;
+
+ lseek(fd, f_attr.ids.offset, SEEK_SET);
+
+ for (j = 0; j < nr_ids; j++) {
+ if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
+ goto out_errno;
+
+ perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
+ }
+
+ lseek(fd, tmp, SEEK_SET);
+ }
+
+ symbol_conf.nr_events = nr_attrs;
+
+ perf_header__process_sections(header, fd, &session->tevent,
+ perf_file_section__process);
+
+ if (perf_evlist__prepare_tracepoint_events(session->evlist,
+ session->tevent.pevent))
+ goto out_delete_evlist;
return 0;
+out_errno:
+ return -errno;
+
+out_delete_evlist:
+ perf_evlist__delete(session->evlist);
+ session->evlist = NULL;
+ return -ENOMEM;
}
-int event__synthesize_event_type(u64 event_id, char *name,
- event__handler_t process,
- struct perf_session *session)
+int perf_event__synthesize_attr(struct perf_tool *tool,
+ struct perf_event_attr *attr, u32 ids, u64 *id,
+ perf_event__handler_t process)
{
- event_t ev;
- size_t size = 0;
- int err = 0;
+ union perf_event *ev;
+ size_t size;
+ int err;
- memset(&ev, 0, sizeof(ev));
+ size = sizeof(struct perf_event_attr);
+ size = PERF_ALIGN(size, sizeof(u64));
+ size += sizeof(struct perf_event_header);
+ size += ids * sizeof(u64);
+
+ ev = malloc(size);
+
+ if (ev == NULL)
+ return -ENOMEM;
+
+ ev->attr.attr = *attr;
+ memcpy(ev->attr.id, id, ids * sizeof(u64));
- ev.event_type.event_type.event_id = event_id;
- memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME);
- strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
+ ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
+ ev->attr.header.size = (u16)size;
- ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
- size = strlen(name);
- size = ALIGN(size, sizeof(u64));
- ev.event_type.header.size = sizeof(ev.event_type) -
- (sizeof(ev.event_type.event_type.name) - size);
+ if (ev->attr.header.size == size)
+ err = process(tool, ev, NULL, NULL);
+ else
+ err = -E2BIG;
- err = process(&ev, session);
+ free(ev);
return err;
}
-int event__synthesize_event_types(event__handler_t process,
- struct perf_session *session)
+int perf_event__synthesize_attrs(struct perf_tool *tool,
+ struct perf_session *session,
+ perf_event__handler_t process)
{
- struct perf_trace_event_type *type;
- int i, err = 0;
-
- for (i = 0; i < event_count; i++) {
- type = &events[i];
+ struct perf_evsel *evsel;
+ int err = 0;
- err = event__synthesize_event_type(type->event_id, type->name,
- process, session);
+ evlist__for_each(session->evlist, evsel) {
+ err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
+ evsel->id, process);
if (err) {
- pr_debug("failed to create perf header event type\n");
+ pr_debug("failed to create perf header attribute\n");
return err;
}
}
@@ -1089,79 +2901,138 @@ int event__synthesize_event_types(event__handler_t process,
return err;
}
-int event__process_event_type(event_t *self,
- struct perf_session *session __unused)
+int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_evlist **pevlist)
{
- if (perf_header__push_event(self->event_type.event_type.event_id,
- self->event_type.event_type.name) < 0)
+ u32 i, ids, n_ids;
+ struct perf_evsel *evsel;
+ struct perf_evlist *evlist = *pevlist;
+
+ if (evlist == NULL) {
+ *pevlist = evlist = perf_evlist__new();
+ if (evlist == NULL)
+ return -ENOMEM;
+ }
+
+ evsel = perf_evsel__new(&event->attr.attr);
+ if (evsel == NULL)
+ return -ENOMEM;
+
+ perf_evlist__add(evlist, evsel);
+
+ ids = event->header.size;
+ ids -= (void *)&event->attr.id - (void *)event;
+ n_ids = ids / sizeof(u64);
+ /*
+ * We don't have the cpu and thread maps on the header, so
+ * for allocating the perf_sample_id table we fake 1 cpu and
+ * hattr->ids threads.
+ */
+ if (perf_evsel__alloc_id(evsel, 1, n_ids))
return -ENOMEM;
+ for (i = 0; i < n_ids; i++) {
+ perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
+ }
+
+ symbol_conf.nr_events = evlist->nr_entries;
+
return 0;
}
-int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs,
- int nb_events,
- event__handler_t process,
- struct perf_session *session __unused)
+int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
+ struct perf_evlist *evlist,
+ perf_event__handler_t process)
{
- event_t ev;
+ union perf_event ev;
+ struct tracing_data *tdata;
ssize_t size = 0, aligned_size = 0, padding;
- int err = 0;
+ int err __maybe_unused = 0;
+
+ /*
+ * We are going to store the size of the data followed
+ * by the data contents. Since the fd descriptor is a pipe,
+ * we cannot seek back to store the size of the data once
+ * we know it. Instead we:
+ *
+ * - write the tracing data to the temp file
+ * - get/write the data size to pipe
+ * - write the tracing data from the temp file
+ * to the pipe
+ */
+ tdata = tracing_data_get(&evlist->entries, fd, true);
+ if (!tdata)
+ return -1;
memset(&ev, 0, sizeof(ev));
ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
- size = read_tracing_data_size(fd, pattrs, nb_events);
- if (size <= 0)
- return size;
- aligned_size = ALIGN(size, sizeof(u64));
+ size = tdata->size;
+ aligned_size = PERF_ALIGN(size, sizeof(u64));
padding = aligned_size - size;
ev.tracing_data.header.size = sizeof(ev.tracing_data);
ev.tracing_data.size = aligned_size;
- process(&ev, session);
+ process(tool, &ev, NULL, NULL);
+
+ /*
+ * The put function will copy all the tracing data
+ * stored in temp file to the pipe.
+ */
+ tracing_data_put(tdata);
- err = read_tracing_data(fd, pattrs, nb_events);
write_padded(fd, NULL, 0, padding);
return aligned_size;
}
-int event__process_tracing_data(event_t *self,
- struct perf_session *session)
+int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_session *session)
{
- ssize_t size_read, padding, size = self->tracing_data.size;
- off_t offset = lseek(session->fd, 0, SEEK_CUR);
+ ssize_t size_read, padding, size = event->tracing_data.size;
+ int fd = perf_data_file__fd(session->file);
+ off_t offset = lseek(fd, 0, SEEK_CUR);
char buf[BUFSIZ];
/* setup for reading amidst mmap */
- lseek(session->fd, offset + sizeof(struct tracing_data_event),
+ lseek(fd, offset + sizeof(struct tracing_data_event),
SEEK_SET);
- size_read = trace_report(session->fd, session->repipe);
-
- padding = ALIGN(size_read, sizeof(u64)) - size_read;
+ size_read = trace_report(fd, &session->tevent,
+ session->repipe);
+ padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
- if (read(session->fd, buf, padding) < 0)
- die("reading input file");
+ if (readn(fd, buf, padding) < 0) {
+ pr_err("%s: reading input file", __func__);
+ return -1;
+ }
if (session->repipe) {
int retw = write(STDOUT_FILENO, buf, padding);
- if (retw <= 0 || retw != padding)
- die("repiping tracing data padding");
+ if (retw <= 0 || retw != padding) {
+ pr_err("%s: repiping tracing data padding", __func__);
+ return -1;
+ }
}
- if (size_read + padding != size)
- die("tracing data size mismatch");
+ if (size_read + padding != size) {
+ pr_err("%s: tracing data size mismatch", __func__);
+ return -1;
+ }
+
+ perf_evlist__prepare_tracepoint_events(session->evlist,
+ session->tevent.pevent);
return size_read + padding;
}
-int event__synthesize_build_id(struct dso *pos, u16 misc,
- event__handler_t process,
- struct machine *machine,
- struct perf_session *session)
+int perf_event__synthesize_build_id(struct perf_tool *tool,
+ struct dso *pos, u16 misc,
+ perf_event__handler_t process,
+ struct machine *machine)
{
- event_t ev;
+ union perf_event ev;
size_t len;
int err = 0;
@@ -1171,7 +3042,7 @@ int event__synthesize_build_id(struct dso *pos, u16 misc,
memset(&ev, 0, sizeof(ev));
len = pos->long_name_len + 1;
- len = ALIGN(len, NAME_ALIGN);
+ len = PERF_ALIGN(len, NAME_ALIGN);
memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
ev.build_id.header.misc = misc;
@@ -1179,16 +3050,17 @@ int event__synthesize_build_id(struct dso *pos, u16 misc,
ev.build_id.header.size = sizeof(ev.build_id) + len;
memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
- err = process(&ev, session);
+ err = process(tool, &ev, NULL, machine);
return err;
}
-int event__process_build_id(event_t *self,
- struct perf_session *session)
+int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_session *session)
{
- __event_process_build_id(&self->build_id,
- self->build_id.filename,
+ __event_process_build_id(&event->build_id,
+ event->build_id.filename,
session);
return 0;
}
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 402ac2454cf..d08cfe49940 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -1,28 +1,43 @@
#ifndef __PERF_HEADER_H
#define __PERF_HEADER_H
-#include "../../../include/linux/perf_event.h"
+#include <linux/perf_event.h>
#include <sys/types.h>
#include <stdbool.h>
-#include "types.h"
-#include "event.h"
-
#include <linux/bitmap.h>
+#include <linux/types.h>
+#include "event.h"
-struct perf_header_attr {
- struct perf_event_attr attr;
- int ids, size;
- u64 *id;
- off_t id_offset;
-};
enum {
- HEADER_TRACE_INFO = 1,
+ HEADER_RESERVED = 0, /* always cleared */
+ HEADER_FIRST_FEATURE = 1,
+ HEADER_TRACING_DATA = 1,
HEADER_BUILD_ID,
+
+ HEADER_HOSTNAME,
+ HEADER_OSRELEASE,
+ HEADER_VERSION,
+ HEADER_ARCH,
+ HEADER_NRCPUS,
+ HEADER_CPUDESC,
+ HEADER_CPUID,
+ HEADER_TOTAL_MEM,
+ HEADER_CMDLINE,
+ HEADER_EVENT_DESC,
+ HEADER_CPU_TOPOLOGY,
+ HEADER_NUMA_TOPOLOGY,
+ HEADER_BRANCH_STACK,
+ HEADER_PMU_MAPPINGS,
+ HEADER_GROUP_DESC,
HEADER_LAST_FEATURE,
+ HEADER_FEAT_BITS = 256,
};
-#define HEADER_FEAT_BITS 256
+enum perf_header_version {
+ PERF_HEADER_VERSION_1,
+ PERF_HEADER_VERSION_2,
+};
struct perf_file_section {
u64 offset;
@@ -35,6 +50,7 @@ struct perf_file_header {
u64 attr_size;
struct perf_file_section attrs;
struct perf_file_section data;
+ /* event_types is ignored */
struct perf_file_section event_types;
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
};
@@ -46,82 +62,98 @@ struct perf_pipe_file_header {
struct perf_header;
-int perf_file_header__read(struct perf_file_header *self,
+int perf_file_header__read(struct perf_file_header *header,
struct perf_header *ph, int fd);
+struct perf_session_env {
+ char *hostname;
+ char *os_release;
+ char *version;
+ char *arch;
+ int nr_cpus_online;
+ int nr_cpus_avail;
+ char *cpu_desc;
+ char *cpuid;
+ unsigned long long total_mem;
+
+ int nr_cmdline;
+ int nr_sibling_cores;
+ int nr_sibling_threads;
+ int nr_numa_nodes;
+ int nr_pmu_mappings;
+ int nr_groups;
+ char *cmdline;
+ char *sibling_cores;
+ char *sibling_threads;
+ char *numa_nodes;
+ char *pmu_mappings;
+};
+
struct perf_header {
- int frozen;
- int attrs, size;
- bool needs_swap;
- struct perf_header_attr **attr;
- s64 attr_offset;
- u64 data_offset;
- u64 data_size;
- u64 event_offset;
- u64 event_size;
+ enum perf_header_version version;
+ bool needs_swap;
+ u64 data_offset;
+ u64 data_size;
+ u64 feat_offset;
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
+ struct perf_session_env env;
};
-int perf_header__init(struct perf_header *self);
-void perf_header__exit(struct perf_header *self);
+struct perf_evlist;
+struct perf_session;
-int perf_header__read(struct perf_session *session, int fd);
-int perf_header__write(struct perf_header *self, int fd, bool at_exit);
+int perf_session__read_header(struct perf_session *session);
+int perf_session__write_header(struct perf_session *session,
+ struct perf_evlist *evlist,
+ int fd, bool at_exit);
int perf_header__write_pipe(int fd);
-int perf_header__add_attr(struct perf_header *self,
- struct perf_header_attr *attr);
-
-int perf_header__push_event(u64 id, const char *name);
-char *perf_header__find_event(u64 id);
+void perf_header__set_feat(struct perf_header *header, int feat);
+void perf_header__clear_feat(struct perf_header *header, int feat);
+bool perf_header__has_feat(const struct perf_header *header, int feat);
-struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr);
-void perf_header_attr__delete(struct perf_header_attr *self);
+int perf_header__set_cmdline(int argc, const char **argv);
-int perf_header_attr__add_id(struct perf_header_attr *self, u64 id);
+int perf_header__process_sections(struct perf_header *header, int fd,
+ void *data,
+ int (*process)(struct perf_file_section *section,
+ struct perf_header *ph,
+ int feat, int fd, void *data));
-u64 perf_header__sample_type(struct perf_header *header);
-struct perf_event_attr *
-perf_header__find_attr(u64 id, struct perf_header *header);
-void perf_header__set_feat(struct perf_header *self, int feat);
-bool perf_header__has_feat(const struct perf_header *self, int feat);
-
-int perf_header__process_sections(struct perf_header *self, int fd,
- int (*process)(struct perf_file_section *self,
- struct perf_header *ph,
- int feat, int fd));
+int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full);
int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
- const char *name, bool is_kallsyms);
+ const char *name, bool is_kallsyms, bool is_vdso);
int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
-int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
- event__handler_t process,
- struct perf_session *session);
-int event__synthesize_attrs(struct perf_header *self,
- event__handler_t process,
- struct perf_session *session);
-int event__process_attr(event_t *self, struct perf_session *session);
-
-int event__synthesize_event_type(u64 event_id, char *name,
- event__handler_t process,
+int perf_event__synthesize_attr(struct perf_tool *tool,
+ struct perf_event_attr *attr, u32 ids, u64 *id,
+ perf_event__handler_t process);
+int perf_event__synthesize_attrs(struct perf_tool *tool,
+ struct perf_session *session,
+ perf_event__handler_t process);
+int perf_event__process_attr(struct perf_tool *tool, union perf_event *event,
+ struct perf_evlist **pevlist);
+
+int perf_event__synthesize_tracing_data(struct perf_tool *tool,
+ int fd, struct perf_evlist *evlist,
+ perf_event__handler_t process);
+int perf_event__process_tracing_data(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_session *session);
+
+int perf_event__synthesize_build_id(struct perf_tool *tool,
+ struct dso *pos, u16 misc,
+ perf_event__handler_t process,
+ struct machine *machine);
+int perf_event__process_build_id(struct perf_tool *tool,
+ union perf_event *event,
struct perf_session *session);
-int event__synthesize_event_types(event__handler_t process,
- struct perf_session *session);
-int event__process_event_type(event_t *self,
- struct perf_session *session);
-
-int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs,
- int nb_events,
- event__handler_t process,
- struct perf_session *session);
-int event__process_tracing_data(event_t *self,
- struct perf_session *session);
-
-int event__synthesize_build_id(struct dso *pos, u16 misc,
- event__handler_t process,
- struct machine *machine,
- struct perf_session *session);
-int event__process_build_id(event_t *self, struct perf_session *session);
+bool is_perf_magic(u64 magic);
+
+/*
+ * arch specific callback
+ */
+int get_cpuid(char *buffer, size_t sz);
#endif /* __PERF_HEADER_H */
diff --git a/tools/perf/util/help.c b/tools/perf/util/help.c
index 6f2975a0035..86c37c47226 100644
--- a/tools/perf/util/help.c
+++ b/tools/perf/util/help.c
@@ -3,6 +3,7 @@
#include "exec_cmd.h"
#include "levenshtein.h"
#include "help.h"
+#include <termios.h>
void add_cmdname(struct cmdnames *cmds, const char *name, size_t len)
{
@@ -21,8 +22,8 @@ static void clean_cmdnames(struct cmdnames *cmds)
unsigned int i;
for (i = 0; i < cmds->cnt; ++i)
- free(cmds->names[i]);
- free(cmds->names);
+ zfree(&cmds->names[i]);
+ zfree(&cmds->names);
cmds->cnt = 0;
cmds->alloc = 0;
}
@@ -262,9 +263,8 @@ static void add_cmd_list(struct cmdnames *cmds, struct cmdnames *old)
for (i = 0; i < old->cnt; i++)
cmds->names[cmds->cnt++] = old->names[i];
- free(old->names);
+ zfree(&old->names);
old->cnt = 0;
- old->names = NULL;
}
const char *help_unknown_cmd(const char *cmd)
@@ -331,7 +331,8 @@ const char *help_unknown_cmd(const char *cmd)
exit(1);
}
-int cmd_version(int argc __used, const char **argv __used, const char *prefix __used)
+int cmd_version(int argc __maybe_unused, const char **argv __maybe_unused,
+ const char *prefix __maybe_unused)
{
printf("perf version %s\n", perf_version_string);
return 0;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 2022e874099..30df6187ee0 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -3,111 +3,348 @@
#include "hist.h"
#include "session.h"
#include "sort.h"
+#include "evsel.h"
+#include "annotate.h"
#include <math.h>
-enum hist_filter {
- HIST_FILTER__DSO,
- HIST_FILTER__THREAD,
- HIST_FILTER__PARENT,
-};
+static bool hists__filter_entry_by_dso(struct hists *hists,
+ struct hist_entry *he);
+static bool hists__filter_entry_by_thread(struct hists *hists,
+ struct hist_entry *he);
+static bool hists__filter_entry_by_symbol(struct hists *hists,
+ struct hist_entry *he);
struct callchain_param callchain_param = {
.mode = CHAIN_GRAPH_REL,
- .min_percent = 0.5
+ .min_percent = 0.5,
+ .order = ORDER_CALLEE,
+ .key = CCKEY_FUNCTION
};
-u16 hists__col_len(struct hists *self, enum hist_column col)
+u16 hists__col_len(struct hists *hists, enum hist_column col)
{
- return self->col_len[col];
+ return hists->col_len[col];
}
-void hists__set_col_len(struct hists *self, enum hist_column col, u16 len)
+void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
{
- self->col_len[col] = len;
+ hists->col_len[col] = len;
}
-bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len)
+bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
{
- if (len > hists__col_len(self, col)) {
- hists__set_col_len(self, col, len);
+ if (len > hists__col_len(hists, col)) {
+ hists__set_col_len(hists, col, len);
return true;
}
return false;
}
-static void hists__reset_col_len(struct hists *self)
+void hists__reset_col_len(struct hists *hists)
{
enum hist_column col;
for (col = 0; col < HISTC_NR_COLS; ++col)
- hists__set_col_len(self, col, 0);
+ hists__set_col_len(hists, col, 0);
+}
+
+static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
+{
+ const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
+
+ if (hists__col_len(hists, dso) < unresolved_col_width &&
+ !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
+ !symbol_conf.dso_list)
+ hists__set_col_len(hists, dso, unresolved_col_width);
}
-static void hists__calc_col_len(struct hists *self, struct hist_entry *h)
+void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
{
+ const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
+ int symlen;
u16 len;
- if (h->ms.sym)
- hists__new_col_len(self, HISTC_SYMBOL, h->ms.sym->namelen);
+ /*
+ * +4 accounts for '[x] ' priv level info
+ * +2 accounts for 0x prefix on raw addresses
+ * +3 accounts for ' y ' symtab origin info
+ */
+ if (h->ms.sym) {
+ symlen = h->ms.sym->namelen + 4;
+ if (verbose)
+ symlen += BITS_PER_LONG / 4 + 2 + 3;
+ hists__new_col_len(hists, HISTC_SYMBOL, symlen);
+ } else {
+ symlen = unresolved_col_width + 4 + 2;
+ hists__new_col_len(hists, HISTC_SYMBOL, symlen);
+ hists__set_unres_dso_col_len(hists, HISTC_DSO);
+ }
len = thread__comm_len(h->thread);
- if (hists__new_col_len(self, HISTC_COMM, len))
- hists__set_col_len(self, HISTC_THREAD, len + 6);
+ if (hists__new_col_len(hists, HISTC_COMM, len))
+ hists__set_col_len(hists, HISTC_THREAD, len + 6);
if (h->ms.map) {
len = dso__name_len(h->ms.map->dso);
- hists__new_col_len(self, HISTC_DSO, len);
+ hists__new_col_len(hists, HISTC_DSO, len);
}
+
+ if (h->parent)
+ hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
+
+ if (h->branch_info) {
+ if (h->branch_info->from.sym) {
+ symlen = (int)h->branch_info->from.sym->namelen + 4;
+ if (verbose)
+ symlen += BITS_PER_LONG / 4 + 2 + 3;
+ hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
+
+ symlen = dso__name_len(h->branch_info->from.map->dso);
+ hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
+ } else {
+ symlen = unresolved_col_width + 4 + 2;
+ hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
+ hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
+ }
+
+ if (h->branch_info->to.sym) {
+ symlen = (int)h->branch_info->to.sym->namelen + 4;
+ if (verbose)
+ symlen += BITS_PER_LONG / 4 + 2 + 3;
+ hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
+
+ symlen = dso__name_len(h->branch_info->to.map->dso);
+ hists__new_col_len(hists, HISTC_DSO_TO, symlen);
+ } else {
+ symlen = unresolved_col_width + 4 + 2;
+ hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
+ hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
+ }
+ }
+
+ if (h->mem_info) {
+ if (h->mem_info->daddr.sym) {
+ symlen = (int)h->mem_info->daddr.sym->namelen + 4
+ + unresolved_col_width + 2;
+ hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
+ symlen);
+ hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
+ symlen + 1);
+ } else {
+ symlen = unresolved_col_width + 4 + 2;
+ hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
+ symlen);
+ }
+ if (h->mem_info->daddr.map) {
+ symlen = dso__name_len(h->mem_info->daddr.map->dso);
+ hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
+ symlen);
+ } else {
+ symlen = unresolved_col_width + 4 + 2;
+ hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
+ }
+ } else {
+ symlen = unresolved_col_width + 4 + 2;
+ hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
+ hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
+ }
+
+ hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
+ hists__new_col_len(hists, HISTC_MEM_TLB, 22);
+ hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
+ hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
+ hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
+ hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
+
+ if (h->transaction)
+ hists__new_col_len(hists, HISTC_TRANSACTION,
+ hist_entry__transaction_len());
}
-static void hist_entry__add_cpumode_period(struct hist_entry *self,
- unsigned int cpumode, u64 period)
+void hists__output_recalc_col_len(struct hists *hists, int max_rows)
+{
+ struct rb_node *next = rb_first(&hists->entries);
+ struct hist_entry *n;
+ int row = 0;
+
+ hists__reset_col_len(hists);
+
+ while (next && row++ < max_rows) {
+ n = rb_entry(next, struct hist_entry, rb_node);
+ if (!n->filtered)
+ hists__calc_col_len(hists, n);
+ next = rb_next(&n->rb_node);
+ }
+}
+
+static void he_stat__add_cpumode_period(struct he_stat *he_stat,
+ unsigned int cpumode, u64 period)
{
switch (cpumode) {
case PERF_RECORD_MISC_KERNEL:
- self->period_sys += period;
+ he_stat->period_sys += period;
break;
case PERF_RECORD_MISC_USER:
- self->period_us += period;
+ he_stat->period_us += period;
break;
case PERF_RECORD_MISC_GUEST_KERNEL:
- self->period_guest_sys += period;
+ he_stat->period_guest_sys += period;
break;
case PERF_RECORD_MISC_GUEST_USER:
- self->period_guest_us += period;
+ he_stat->period_guest_us += period;
break;
default:
break;
}
}
+static void he_stat__add_period(struct he_stat *he_stat, u64 period,
+ u64 weight)
+{
+
+ he_stat->period += period;
+ he_stat->weight += weight;
+ he_stat->nr_events += 1;
+}
+
+static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
+{
+ dest->period += src->period;
+ dest->period_sys += src->period_sys;
+ dest->period_us += src->period_us;
+ dest->period_guest_sys += src->period_guest_sys;
+ dest->period_guest_us += src->period_guest_us;
+ dest->nr_events += src->nr_events;
+ dest->weight += src->weight;
+}
+
+static void he_stat__decay(struct he_stat *he_stat)
+{
+ he_stat->period = (he_stat->period * 7) / 8;
+ he_stat->nr_events = (he_stat->nr_events * 7) / 8;
+ /* XXX need decay for weight too? */
+}
+
+static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
+{
+ u64 prev_period = he->stat.period;
+ u64 diff;
+
+ if (prev_period == 0)
+ return true;
+
+ he_stat__decay(&he->stat);
+ if (symbol_conf.cumulate_callchain)
+ he_stat__decay(he->stat_acc);
+
+ diff = prev_period - he->stat.period;
+
+ hists->stats.total_period -= diff;
+ if (!he->filtered)
+ hists->stats.total_non_filtered_period -= diff;
+
+ return he->stat.period == 0;
+}
+
+void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
+{
+ struct rb_node *next = rb_first(&hists->entries);
+ struct hist_entry *n;
+
+ while (next) {
+ n = rb_entry(next, struct hist_entry, rb_node);
+ next = rb_next(&n->rb_node);
+ /*
+ * We may be annotating this, for instance, so keep it here in
+ * case some it gets new samples, we'll eventually free it when
+ * the user stops browsing and it agains gets fully decayed.
+ */
+ if (((zap_user && n->level == '.') ||
+ (zap_kernel && n->level != '.') ||
+ hists__decay_entry(hists, n)) &&
+ !n->used) {
+ rb_erase(&n->rb_node, &hists->entries);
+
+ if (sort__need_collapse)
+ rb_erase(&n->rb_node_in, &hists->entries_collapsed);
+
+ --hists->nr_entries;
+ if (!n->filtered)
+ --hists->nr_non_filtered_entries;
+
+ hist_entry__free(n);
+ }
+ }
+}
+
/*
* histogram, sorted on item, collects periods
*/
-static struct hist_entry *hist_entry__new(struct hist_entry *template)
+static struct hist_entry *hist_entry__new(struct hist_entry *template,
+ bool sample_self)
{
- size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
- struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
+ size_t callchain_size = 0;
+ struct hist_entry *he;
- if (self != NULL) {
- *self = *template;
- self->nr_events = 1;
- if (self->ms.map)
- self->ms.map->referenced = true;
- if (symbol_conf.use_callchain)
- callchain_init(self->callchain);
- }
+ if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain)
+ callchain_size = sizeof(struct callchain_root);
- return self;
-}
+ he = zalloc(sizeof(*he) + callchain_size);
-static void hists__inc_nr_entries(struct hists *self, struct hist_entry *h)
-{
- if (!h->filtered) {
- hists__calc_col_len(self, h);
- ++self->nr_entries;
+ if (he != NULL) {
+ *he = *template;
+
+ if (symbol_conf.cumulate_callchain) {
+ he->stat_acc = malloc(sizeof(he->stat));
+ if (he->stat_acc == NULL) {
+ free(he);
+ return NULL;
+ }
+ memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
+ if (!sample_self)
+ memset(&he->stat, 0, sizeof(he->stat));
+ }
+
+ if (he->ms.map)
+ he->ms.map->referenced = true;
+
+ if (he->branch_info) {
+ /*
+ * This branch info is (a part of) allocated from
+ * sample__resolve_bstack() and will be freed after
+ * adding new entries. So we need to save a copy.
+ */
+ he->branch_info = malloc(sizeof(*he->branch_info));
+ if (he->branch_info == NULL) {
+ free(he->stat_acc);
+ free(he);
+ return NULL;
+ }
+
+ memcpy(he->branch_info, template->branch_info,
+ sizeof(*he->branch_info));
+
+ if (he->branch_info->from.map)
+ he->branch_info->from.map->referenced = true;
+ if (he->branch_info->to.map)
+ he->branch_info->to.map->referenced = true;
+ }
+
+ if (he->mem_info) {
+ if (he->mem_info->iaddr.map)
+ he->mem_info->iaddr.map->referenced = true;
+ if (he->mem_info->daddr.map)
+ he->mem_info->daddr.map->referenced = true;
+ }
+
+ if (symbol_conf.use_callchain)
+ callchain_init(he->callchain);
+
+ INIT_LIST_HEAD(&he->pairs.node);
}
+
+ return he;
}
static u8 symbol__parent_filter(const struct symbol *parent)
@@ -117,37 +354,55 @@ static u8 symbol__parent_filter(const struct symbol *parent)
return 0;
}
-struct hist_entry *__hists__add_entry(struct hists *self,
- struct addr_location *al,
- struct symbol *sym_parent, u64 period)
+static struct hist_entry *add_hist_entry(struct hists *hists,
+ struct hist_entry *entry,
+ struct addr_location *al,
+ bool sample_self)
{
- struct rb_node **p = &self->entries.rb_node;
+ struct rb_node **p;
struct rb_node *parent = NULL;
struct hist_entry *he;
- struct hist_entry entry = {
- .thread = al->thread,
- .ms = {
- .map = al->map,
- .sym = al->sym,
- },
- .cpu = al->cpu,
- .ip = al->addr,
- .level = al->level,
- .period = period,
- .parent = sym_parent,
- .filtered = symbol__parent_filter(sym_parent),
- };
- int cmp;
+ int64_t cmp;
+ u64 period = entry->stat.period;
+ u64 weight = entry->stat.weight;
+
+ p = &hists->entries_in->rb_node;
while (*p != NULL) {
parent = *p;
- he = rb_entry(parent, struct hist_entry, rb_node);
+ he = rb_entry(parent, struct hist_entry, rb_node_in);
- cmp = hist_entry__cmp(&entry, he);
+ /*
+ * Make sure that it receives arguments in a same order as
+ * hist_entry__collapse() so that we can use an appropriate
+ * function when searching an entry regardless which sort
+ * keys were used.
+ */
+ cmp = hist_entry__cmp(he, entry);
if (!cmp) {
- he->period += period;
- ++he->nr_events;
+ if (sample_self)
+ he_stat__add_period(&he->stat, period, weight);
+ if (symbol_conf.cumulate_callchain)
+ he_stat__add_period(he->stat_acc, period, weight);
+
+ /*
+ * This mem info was allocated from sample__resolve_mem
+ * and will not be used anymore.
+ */
+ zfree(&entry->mem_info);
+
+ /* If the map of an existing hist_entry has
+ * become out-of-date due to an exec() or
+ * similar, update it. Otherwise we will
+ * mis-adjust symbol addresses when computing
+ * the history counter to increment.
+ */
+ if (he->ms.map != entry->ms.map) {
+ he->ms.map = entry->ms.map;
+ if (he->ms.map)
+ he->ms.map->referenced = true;
+ }
goto out;
}
@@ -157,1022 +412,1003 @@ struct hist_entry *__hists__add_entry(struct hists *self,
p = &(*p)->rb_right;
}
- he = hist_entry__new(&entry);
+ he = hist_entry__new(entry, sample_self);
if (!he)
return NULL;
- rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, &self->entries);
- hists__inc_nr_entries(self, he);
+
+ rb_link_node(&he->rb_node_in, parent, p);
+ rb_insert_color(&he->rb_node_in, hists->entries_in);
out:
- hist_entry__add_cpumode_period(he, al->cpumode, period);
+ if (sample_self)
+ he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
+ if (symbol_conf.cumulate_callchain)
+ he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
return he;
}
-int64_t
-hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
+struct hist_entry *__hists__add_entry(struct hists *hists,
+ struct addr_location *al,
+ struct symbol *sym_parent,
+ struct branch_info *bi,
+ struct mem_info *mi,
+ u64 period, u64 weight, u64 transaction,
+ bool sample_self)
{
- struct sort_entry *se;
- int64_t cmp = 0;
-
- list_for_each_entry(se, &hist_entry__sort_list, list) {
- cmp = se->se_cmp(left, right);
- if (cmp)
- break;
- }
+ struct hist_entry entry = {
+ .thread = al->thread,
+ .comm = thread__comm(al->thread),
+ .ms = {
+ .map = al->map,
+ .sym = al->sym,
+ },
+ .cpu = al->cpu,
+ .cpumode = al->cpumode,
+ .ip = al->addr,
+ .level = al->level,
+ .stat = {
+ .nr_events = 1,
+ .period = period,
+ .weight = weight,
+ },
+ .parent = sym_parent,
+ .filtered = symbol__parent_filter(sym_parent) | al->filtered,
+ .hists = hists,
+ .branch_info = bi,
+ .mem_info = mi,
+ .transaction = transaction,
+ };
- return cmp;
+ return add_hist_entry(hists, &entry, al, sample_self);
}
-int64_t
-hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
+static int
+iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
+ struct addr_location *al __maybe_unused)
{
- struct sort_entry *se;
- int64_t cmp = 0;
+ return 0;
+}
- list_for_each_entry(se, &hist_entry__sort_list, list) {
- int64_t (*f)(struct hist_entry *, struct hist_entry *);
+static int
+iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
+ struct addr_location *al __maybe_unused)
+{
+ return 0;
+}
- f = se->se_collapse ?: se->se_cmp;
+static int
+iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
+{
+ struct perf_sample *sample = iter->sample;
+ struct mem_info *mi;
- cmp = f(left, right);
- if (cmp)
- break;
- }
+ mi = sample__resolve_mem(sample, al);
+ if (mi == NULL)
+ return -ENOMEM;
- return cmp;
+ iter->priv = mi;
+ return 0;
}
-void hist_entry__free(struct hist_entry *he)
+static int
+iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
{
- free(he);
-}
+ u64 cost;
+ struct mem_info *mi = iter->priv;
+ struct hist_entry *he;
-/*
- * collapse the histogram
- */
+ if (mi == NULL)
+ return -EINVAL;
+
+ cost = iter->sample->weight;
+ if (!cost)
+ cost = 1;
+
+ /*
+ * must pass period=weight in order to get the correct
+ * sorting from hists__collapse_resort() which is solely
+ * based on periods. We want sorting be done on nr_events * weight
+ * and this is indirectly achieved by passing period=weight here
+ * and the he_stat__add_period() function.
+ */
+ he = __hists__add_entry(&iter->evsel->hists, al, iter->parent, NULL, mi,
+ cost, cost, 0, true);
+ if (!he)
+ return -ENOMEM;
+
+ iter->he = he;
+ return 0;
+}
-static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
+static int
+iter_finish_mem_entry(struct hist_entry_iter *iter,
+ struct addr_location *al __maybe_unused)
{
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent = NULL;
- struct hist_entry *iter;
- int64_t cmp;
+ struct perf_evsel *evsel = iter->evsel;
+ struct hist_entry *he = iter->he;
+ int err = -EINVAL;
- while (*p != NULL) {
- parent = *p;
- iter = rb_entry(parent, struct hist_entry, rb_node);
+ if (he == NULL)
+ goto out;
- cmp = hist_entry__collapse(iter, he);
+ hists__inc_nr_samples(&evsel->hists, he->filtered);
- if (!cmp) {
- iter->period += he->period;
- if (symbol_conf.use_callchain)
- callchain_merge(iter->callchain, he->callchain);
- hist_entry__free(he);
- return false;
- }
+ err = hist_entry__append_callchain(he, iter->sample);
- if (cmp < 0)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
+out:
+ /*
+ * We don't need to free iter->priv (mem_info) here since
+ * the mem info was either already freed in add_hist_entry() or
+ * passed to a new hist entry by hist_entry__new().
+ */
+ iter->priv = NULL;
- rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, root);
- return true;
+ iter->he = NULL;
+ return err;
}
-void hists__collapse_resort(struct hists *self)
+static int
+iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
{
- struct rb_root tmp;
- struct rb_node *next;
- struct hist_entry *n;
+ struct branch_info *bi;
+ struct perf_sample *sample = iter->sample;
- if (!sort__need_collapse)
- return;
+ bi = sample__resolve_bstack(sample, al);
+ if (!bi)
+ return -ENOMEM;
- tmp = RB_ROOT;
- next = rb_first(&self->entries);
- self->nr_entries = 0;
- hists__reset_col_len(self);
+ iter->curr = 0;
+ iter->total = sample->branch_stack->nr;
- while (next) {
- n = rb_entry(next, struct hist_entry, rb_node);
- next = rb_next(&n->rb_node);
+ iter->priv = bi;
+ return 0;
+}
- rb_erase(&n->rb_node, &self->entries);
- if (collapse__insert_entry(&tmp, n))
- hists__inc_nr_entries(self, n);
- }
+static int
+iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
+ struct addr_location *al __maybe_unused)
+{
+ /* to avoid calling callback function */
+ iter->he = NULL;
- self->entries = tmp;
+ return 0;
}
-/*
- * reverse the map, sort on period.
- */
-
-static void __hists__insert_output_entry(struct rb_root *entries,
- struct hist_entry *he,
- u64 min_callchain_hits)
+static int
+iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
{
- struct rb_node **p = &entries->rb_node;
- struct rb_node *parent = NULL;
- struct hist_entry *iter;
-
- if (symbol_conf.use_callchain)
- callchain_param.sort(&he->sorted_chain, he->callchain,
- min_callchain_hits, &callchain_param);
+ struct branch_info *bi = iter->priv;
+ int i = iter->curr;
- while (*p != NULL) {
- parent = *p;
- iter = rb_entry(parent, struct hist_entry, rb_node);
+ if (bi == NULL)
+ return 0;
- if (he->period > iter->period)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
+ if (iter->curr >= iter->total)
+ return 0;
- rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, entries);
+ al->map = bi[i].to.map;
+ al->sym = bi[i].to.sym;
+ al->addr = bi[i].to.addr;
+ return 1;
}
-void hists__output_resort(struct hists *self)
+static int
+iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
{
- struct rb_root tmp;
- struct rb_node *next;
- struct hist_entry *n;
- u64 min_callchain_hits;
+ struct branch_info *bi;
+ struct perf_evsel *evsel = iter->evsel;
+ struct hist_entry *he = NULL;
+ int i = iter->curr;
+ int err = 0;
- min_callchain_hits = self->stats.total_period * (callchain_param.min_percent / 100);
+ bi = iter->priv;
- tmp = RB_ROOT;
- next = rb_first(&self->entries);
+ if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
+ goto out;
- self->nr_entries = 0;
- hists__reset_col_len(self);
+ /*
+ * The report shows the percentage of total branches captured
+ * and not events sampled. Thus we use a pseudo period of 1.
+ */
+ he = __hists__add_entry(&evsel->hists, al, iter->parent, &bi[i], NULL,
+ 1, 1, 0, true);
+ if (he == NULL)
+ return -ENOMEM;
- while (next) {
- n = rb_entry(next, struct hist_entry, rb_node);
- next = rb_next(&n->rb_node);
+ hists__inc_nr_samples(&evsel->hists, he->filtered);
- rb_erase(&n->rb_node, &self->entries);
- __hists__insert_output_entry(&tmp, n, min_callchain_hits);
- hists__inc_nr_entries(self, n);
- }
+out:
+ iter->he = he;
+ iter->curr++;
+ return err;
+}
+
+static int
+iter_finish_branch_entry(struct hist_entry_iter *iter,
+ struct addr_location *al __maybe_unused)
+{
+ zfree(&iter->priv);
+ iter->he = NULL;
- self->entries = tmp;
+ return iter->curr >= iter->total ? 0 : -1;
}
-static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
+static int
+iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
+ struct addr_location *al __maybe_unused)
{
- int i;
- int ret = fprintf(fp, " ");
+ return 0;
+}
+
+static int
+iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
+{
+ struct perf_evsel *evsel = iter->evsel;
+ struct perf_sample *sample = iter->sample;
+ struct hist_entry *he;
- for (i = 0; i < left_margin; i++)
- ret += fprintf(fp, " ");
+ he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL,
+ sample->period, sample->weight,
+ sample->transaction, true);
+ if (he == NULL)
+ return -ENOMEM;
- return ret;
+ iter->he = he;
+ return 0;
}
-static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
- int left_margin)
+static int
+iter_finish_normal_entry(struct hist_entry_iter *iter,
+ struct addr_location *al __maybe_unused)
{
- int i;
- size_t ret = callchain__fprintf_left_margin(fp, left_margin);
+ struct hist_entry *he = iter->he;
+ struct perf_evsel *evsel = iter->evsel;
+ struct perf_sample *sample = iter->sample;
- for (i = 0; i < depth; i++)
- if (depth_mask & (1 << i))
- ret += fprintf(fp, "| ");
- else
- ret += fprintf(fp, " ");
+ if (he == NULL)
+ return 0;
+
+ iter->he = NULL;
- ret += fprintf(fp, "\n");
+ hists__inc_nr_samples(&evsel->hists, he->filtered);
- return ret;
+ return hist_entry__append_callchain(he, sample);
}
-static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
- int depth, int depth_mask, int period,
- u64 total_samples, int hits,
- int left_margin)
+static int
+iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
+ struct addr_location *al __maybe_unused)
{
- int i;
- size_t ret = 0;
+ struct hist_entry **he_cache;
- ret += callchain__fprintf_left_margin(fp, left_margin);
- for (i = 0; i < depth; i++) {
- if (depth_mask & (1 << i))
- ret += fprintf(fp, "|");
- else
- ret += fprintf(fp, " ");
- if (!period && i == depth - 1) {
- double percent;
-
- percent = hits * 100.0 / total_samples;
- ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
- } else
- ret += fprintf(fp, "%s", " ");
- }
- if (chain->ms.sym)
- ret += fprintf(fp, "%s\n", chain->ms.sym->name);
- else
- ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
+ callchain_cursor_commit(&callchain_cursor);
- return ret;
-}
+ /*
+ * This is for detecting cycles or recursions so that they're
+ * cumulated only one time to prevent entries more than 100%
+ * overhead.
+ */
+ he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1));
+ if (he_cache == NULL)
+ return -ENOMEM;
-static struct symbol *rem_sq_bracket;
-static struct callchain_list rem_hits;
+ iter->priv = he_cache;
+ iter->curr = 0;
+
+ return 0;
+}
-static void init_rem_hits(void)
+static int
+iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
+ struct addr_location *al)
{
- rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
- if (!rem_sq_bracket) {
- fprintf(stderr, "Not enough memory to display remaining hits\n");
- return;
- }
+ struct perf_evsel *evsel = iter->evsel;
+ struct perf_sample *sample = iter->sample;
+ struct hist_entry **he_cache = iter->priv;
+ struct hist_entry *he;
+ int err = 0;
+
+ he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL,
+ sample->period, sample->weight,
+ sample->transaction, true);
+ if (he == NULL)
+ return -ENOMEM;
+
+ iter->he = he;
+ he_cache[iter->curr++] = he;
+
+ callchain_append(he->callchain, &callchain_cursor, sample->period);
+
+ /*
+ * We need to re-initialize the cursor since callchain_append()
+ * advanced the cursor to the end.
+ */
+ callchain_cursor_commit(&callchain_cursor);
+
+ hists__inc_nr_samples(&evsel->hists, he->filtered);
- strcpy(rem_sq_bracket->name, "[...]");
- rem_hits.ms.sym = rem_sq_bracket;
+ return err;
}
-static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
- u64 total_samples, int depth,
- int depth_mask, int left_margin)
+static int
+iter_next_cumulative_entry(struct hist_entry_iter *iter,
+ struct addr_location *al)
{
- struct rb_node *node, *next;
- struct callchain_node *child;
- struct callchain_list *chain;
- int new_depth_mask = depth_mask;
- u64 new_total;
- u64 remaining;
- size_t ret = 0;
- int i;
- uint entries_printed = 0;
+ struct callchain_cursor_node *node;
- if (callchain_param.mode == CHAIN_GRAPH_REL)
- new_total = self->children_hit;
- else
- new_total = total_samples;
+ node = callchain_cursor_current(&callchain_cursor);
+ if (node == NULL)
+ return 0;
- remaining = new_total;
+ return fill_callchain_info(al, node, iter->hide_unresolved);
+}
- node = rb_first(&self->rb_root);
- while (node) {
- u64 cumul;
+static int
+iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
+ struct addr_location *al)
+{
+ struct perf_evsel *evsel = iter->evsel;
+ struct perf_sample *sample = iter->sample;
+ struct hist_entry **he_cache = iter->priv;
+ struct hist_entry *he;
+ struct hist_entry he_tmp = {
+ .cpu = al->cpu,
+ .thread = al->thread,
+ .comm = thread__comm(al->thread),
+ .ip = al->addr,
+ .ms = {
+ .map = al->map,
+ .sym = al->sym,
+ },
+ .parent = iter->parent,
+ };
+ int i;
+ struct callchain_cursor cursor;
- child = rb_entry(node, struct callchain_node, rb_node);
- cumul = cumul_hits(child);
- remaining -= cumul;
+ callchain_cursor_snapshot(&cursor, &callchain_cursor);
- /*
- * The depth mask manages the output of pipes that show
- * the depth. We don't want to keep the pipes of the current
- * level for the last child of this depth.
- * Except if we have remaining filtered hits. They will
- * supersede the last child
- */
- next = rb_next(node);
- if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
- new_depth_mask &= ~(1 << (depth - 1));
+ callchain_cursor_advance(&callchain_cursor);
- /*
- * But we keep the older depth mask for the line separator
- * to keep the level link until we reach the last child
- */
- ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
- left_margin);
- i = 0;
- list_for_each_entry(chain, &child->val, list) {
- ret += ipchain__fprintf_graph(fp, chain, depth,
- new_depth_mask, i++,
- new_total,
- cumul,
- left_margin);
+ /*
+ * Check if there's duplicate entries in the callchain.
+ * It's possible that it has cycles or recursive calls.
+ */
+ for (i = 0; i < iter->curr; i++) {
+ if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
+ /* to avoid calling callback function */
+ iter->he = NULL;
+ return 0;
}
- ret += __callchain__fprintf_graph(fp, child, new_total,
- depth + 1,
- new_depth_mask | (1 << depth),
- left_margin);
- node = next;
- if (++entries_printed == callchain_param.print_limit)
- break;
}
- if (callchain_param.mode == CHAIN_GRAPH_REL &&
- remaining && remaining != new_total) {
+ he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL,
+ sample->period, sample->weight,
+ sample->transaction, false);
+ if (he == NULL)
+ return -ENOMEM;
- if (!rem_sq_bracket)
- return ret;
+ iter->he = he;
+ he_cache[iter->curr++] = he;
- new_depth_mask &= ~(1 << (depth - 1));
+ callchain_append(he->callchain, &cursor, sample->period);
+ return 0;
+}
- ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
- new_depth_mask, 0, new_total,
- remaining, left_margin);
- }
+static int
+iter_finish_cumulative_entry(struct hist_entry_iter *iter,
+ struct addr_location *al __maybe_unused)
+{
+ zfree(&iter->priv);
+ iter->he = NULL;
- return ret;
+ return 0;
}
-static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
- u64 total_samples, int left_margin)
+const struct hist_iter_ops hist_iter_mem = {
+ .prepare_entry = iter_prepare_mem_entry,
+ .add_single_entry = iter_add_single_mem_entry,
+ .next_entry = iter_next_nop_entry,
+ .add_next_entry = iter_add_next_nop_entry,
+ .finish_entry = iter_finish_mem_entry,
+};
+
+const struct hist_iter_ops hist_iter_branch = {
+ .prepare_entry = iter_prepare_branch_entry,
+ .add_single_entry = iter_add_single_branch_entry,
+ .next_entry = iter_next_branch_entry,
+ .add_next_entry = iter_add_next_branch_entry,
+ .finish_entry = iter_finish_branch_entry,
+};
+
+const struct hist_iter_ops hist_iter_normal = {
+ .prepare_entry = iter_prepare_normal_entry,
+ .add_single_entry = iter_add_single_normal_entry,
+ .next_entry = iter_next_nop_entry,
+ .add_next_entry = iter_add_next_nop_entry,
+ .finish_entry = iter_finish_normal_entry,
+};
+
+const struct hist_iter_ops hist_iter_cumulative = {
+ .prepare_entry = iter_prepare_cumulative_entry,
+ .add_single_entry = iter_add_single_cumulative_entry,
+ .next_entry = iter_next_cumulative_entry,
+ .add_next_entry = iter_add_next_cumulative_entry,
+ .finish_entry = iter_finish_cumulative_entry,
+};
+
+int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
+ struct perf_evsel *evsel, struct perf_sample *sample,
+ int max_stack_depth, void *arg)
{
- struct callchain_list *chain;
- bool printed = false;
- int i = 0;
- int ret = 0;
- u32 entries_printed = 0;
+ int err, err2;
- list_for_each_entry(chain, &self->val, list) {
- if (!i++ && sort__first_dimension == SORT_SYM)
- continue;
+ err = sample__resolve_callchain(sample, &iter->parent, evsel, al,
+ max_stack_depth);
+ if (err)
+ return err;
- if (!printed) {
- ret += callchain__fprintf_left_margin(fp, left_margin);
- ret += fprintf(fp, "|\n");
- ret += callchain__fprintf_left_margin(fp, left_margin);
- ret += fprintf(fp, "---");
+ iter->evsel = evsel;
+ iter->sample = sample;
- left_margin += 3;
- printed = true;
- } else
- ret += callchain__fprintf_left_margin(fp, left_margin);
+ err = iter->ops->prepare_entry(iter, al);
+ if (err)
+ goto out;
- if (chain->ms.sym)
- ret += fprintf(fp, " %s\n", chain->ms.sym->name);
- else
- ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
+ err = iter->ops->add_single_entry(iter, al);
+ if (err)
+ goto out;
- if (++entries_printed == callchain_param.print_limit)
+ if (iter->he && iter->add_entry_cb) {
+ err = iter->add_entry_cb(iter, al, true, arg);
+ if (err)
+ goto out;
+ }
+
+ while (iter->ops->next_entry(iter, al)) {
+ err = iter->ops->add_next_entry(iter, al);
+ if (err)
break;
+
+ if (iter->he && iter->add_entry_cb) {
+ err = iter->add_entry_cb(iter, al, false, arg);
+ if (err)
+ goto out;
+ }
}
- ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
+out:
+ err2 = iter->ops->finish_entry(iter, al);
+ if (!err)
+ err = err2;
- return ret;
+ return err;
}
-static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
- u64 total_samples)
+int64_t
+hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
{
- struct callchain_list *chain;
- size_t ret = 0;
-
- if (!self)
- return 0;
-
- ret += callchain__fprintf_flat(fp, self->parent, total_samples);
-
+ struct perf_hpp_fmt *fmt;
+ int64_t cmp = 0;
- list_for_each_entry(chain, &self->val, list) {
- if (chain->ip >= PERF_CONTEXT_MAX)
+ perf_hpp__for_each_sort_list(fmt) {
+ if (perf_hpp__should_skip(fmt))
continue;
- if (chain->ms.sym)
- ret += fprintf(fp, " %s\n", chain->ms.sym->name);
- else
- ret += fprintf(fp, " %p\n",
- (void *)(long)chain->ip);
+
+ cmp = fmt->cmp(left, right);
+ if (cmp)
+ break;
}
- return ret;
+ return cmp;
}
-static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
- u64 total_samples, int left_margin)
+int64_t
+hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
{
- struct rb_node *rb_node;
- struct callchain_node *chain;
- size_t ret = 0;
- u32 entries_printed = 0;
+ struct perf_hpp_fmt *fmt;
+ int64_t cmp = 0;
- rb_node = rb_first(&self->sorted_chain);
- while (rb_node) {
- double percent;
+ perf_hpp__for_each_sort_list(fmt) {
+ if (perf_hpp__should_skip(fmt))
+ continue;
- chain = rb_entry(rb_node, struct callchain_node, rb_node);
- percent = chain->hit * 100.0 / total_samples;
- switch (callchain_param.mode) {
- case CHAIN_FLAT:
- ret += percent_color_fprintf(fp, " %6.2f%%\n",
- percent);
- ret += callchain__fprintf_flat(fp, chain, total_samples);
- break;
- case CHAIN_GRAPH_ABS: /* Falldown */
- case CHAIN_GRAPH_REL:
- ret += callchain__fprintf_graph(fp, chain, total_samples,
- left_margin);
- case CHAIN_NONE:
- default:
- break;
- }
- ret += fprintf(fp, "\n");
- if (++entries_printed == callchain_param.print_limit)
+ cmp = fmt->collapse(left, right);
+ if (cmp)
break;
- rb_node = rb_next(rb_node);
}
- return ret;
+ return cmp;
}
-int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
- struct hists *hists, struct hists *pair_hists,
- bool show_displacement, long displacement,
- bool color, u64 session_total)
+void hist_entry__free(struct hist_entry *he)
{
- struct sort_entry *se;
- u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
- const char *sep = symbol_conf.field_sep;
- int ret;
+ zfree(&he->branch_info);
+ zfree(&he->mem_info);
+ zfree(&he->stat_acc);
+ free_srcline(he->srcline);
+ free(he);
+}
- if (symbol_conf.exclude_other && !self->parent)
- return 0;
+/*
+ * collapse the histogram
+ */
- if (pair_hists) {
- period = self->pair ? self->pair->period : 0;
- total = pair_hists->stats.total_period;
- period_sys = self->pair ? self->pair->period_sys : 0;
- period_us = self->pair ? self->pair->period_us : 0;
- period_guest_sys = self->pair ? self->pair->period_guest_sys : 0;
- period_guest_us = self->pair ? self->pair->period_guest_us : 0;
- } else {
- period = self->period;
- total = session_total;
- period_sys = self->period_sys;
- period_us = self->period_us;
- period_guest_sys = self->period_guest_sys;
- period_guest_us = self->period_guest_us;
- }
+static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
+ struct rb_root *root,
+ struct hist_entry *he)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct hist_entry *iter;
+ int64_t cmp;
- if (total) {
- if (color)
- ret = percent_color_snprintf(s, size,
- sep ? "%.2f" : " %6.2f%%",
- (period * 100.0) / total);
- else
- ret = snprintf(s, size, sep ? "%.2f" : " %6.2f%%",
- (period * 100.0) / total);
- if (symbol_conf.show_cpu_utilization) {
- ret += percent_color_snprintf(s + ret, size - ret,
- sep ? "%.2f" : " %6.2f%%",
- (period_sys * 100.0) / total);
- ret += percent_color_snprintf(s + ret, size - ret,
- sep ? "%.2f" : " %6.2f%%",
- (period_us * 100.0) / total);
- if (perf_guest) {
- ret += percent_color_snprintf(s + ret,
- size - ret,
- sep ? "%.2f" : " %6.2f%%",
- (period_guest_sys * 100.0) /
- total);
- ret += percent_color_snprintf(s + ret,
- size - ret,
- sep ? "%.2f" : " %6.2f%%",
- (period_guest_us * 100.0) /
- total);
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct hist_entry, rb_node_in);
+
+ cmp = hist_entry__collapse(iter, he);
+
+ if (!cmp) {
+ he_stat__add_stat(&iter->stat, &he->stat);
+ if (symbol_conf.cumulate_callchain)
+ he_stat__add_stat(iter->stat_acc, he->stat_acc);
+
+ if (symbol_conf.use_callchain) {
+ callchain_cursor_reset(&callchain_cursor);
+ callchain_merge(&callchain_cursor,
+ iter->callchain,
+ he->callchain);
}
+ hist_entry__free(he);
+ return false;
}
- } else
- ret = snprintf(s, size, sep ? "%lld" : "%12lld ", period);
- if (symbol_conf.show_nr_samples) {
- if (sep)
- ret += snprintf(s + ret, size - ret, "%c%lld", *sep, period);
+ if (cmp < 0)
+ p = &(*p)->rb_left;
else
- ret += snprintf(s + ret, size - ret, "%11lld", period);
+ p = &(*p)->rb_right;
}
- if (pair_hists) {
- char bf[32];
- double old_percent = 0, new_percent = 0, diff;
-
- if (total > 0)
- old_percent = (period * 100.0) / total;
- if (session_total > 0)
- new_percent = (self->period * 100.0) / session_total;
-
- diff = new_percent - old_percent;
+ rb_link_node(&he->rb_node_in, parent, p);
+ rb_insert_color(&he->rb_node_in, root);
+ return true;
+}
- if (fabs(diff) >= 0.01)
- snprintf(bf, sizeof(bf), "%+4.2F%%", diff);
- else
- snprintf(bf, sizeof(bf), " ");
+static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
+{
+ struct rb_root *root;
- if (sep)
- ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
- else
- ret += snprintf(s + ret, size - ret, "%11.11s", bf);
-
- if (show_displacement) {
- if (displacement)
- snprintf(bf, sizeof(bf), "%+4ld", displacement);
- else
- snprintf(bf, sizeof(bf), " ");
-
- if (sep)
- ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
- else
- ret += snprintf(s + ret, size - ret, "%6.6s", bf);
- }
- }
+ pthread_mutex_lock(&hists->lock);
- list_for_each_entry(se, &hist_entry__sort_list, list) {
- if (se->elide)
- continue;
+ root = hists->entries_in;
+ if (++hists->entries_in > &hists->entries_in_array[1])
+ hists->entries_in = &hists->entries_in_array[0];
- ret += snprintf(s + ret, size - ret, "%s", sep ?: " ");
- ret += se->se_snprintf(self, s + ret, size - ret,
- hists__col_len(hists, se->se_width_idx));
- }
+ pthread_mutex_unlock(&hists->lock);
- return ret;
+ return root;
}
-int hist_entry__fprintf(struct hist_entry *self, struct hists *hists,
- struct hists *pair_hists, bool show_displacement,
- long displacement, FILE *fp, u64 session_total)
+static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
{
- char bf[512];
- hist_entry__snprintf(self, bf, sizeof(bf), hists, pair_hists,
- show_displacement, displacement,
- true, session_total);
- return fprintf(fp, "%s\n", bf);
+ hists__filter_entry_by_dso(hists, he);
+ hists__filter_entry_by_thread(hists, he);
+ hists__filter_entry_by_symbol(hists, he);
}
-static size_t hist_entry__fprintf_callchain(struct hist_entry *self,
- struct hists *hists, FILE *fp,
- u64 session_total)
+void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
{
- int left_margin = 0;
+ struct rb_root *root;
+ struct rb_node *next;
+ struct hist_entry *n;
- if (sort__first_dimension == SORT_COMM) {
- struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
- typeof(*se), list);
- left_margin = hists__col_len(hists, se->se_width_idx);
- left_margin -= thread__comm_len(self->thread);
- }
+ if (!sort__need_collapse)
+ return;
+
+ root = hists__get_rotate_entries_in(hists);
+ next = rb_first(root);
- return hist_entry_callchain__fprintf(fp, self, session_total,
- left_margin);
+ while (next) {
+ if (session_done())
+ break;
+ n = rb_entry(next, struct hist_entry, rb_node_in);
+ next = rb_next(&n->rb_node_in);
+
+ rb_erase(&n->rb_node_in, root);
+ if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
+ /*
+ * If it wasn't combined with one of the entries already
+ * collapsed, we need to apply the filters that may have
+ * been set by, say, the hist_browser.
+ */
+ hists__apply_filters(hists, n);
+ }
+ if (prog)
+ ui_progress__update(prog, 1);
+ }
}
-size_t hists__fprintf(struct hists *self, struct hists *pair,
- bool show_displacement, FILE *fp)
+static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
{
- struct sort_entry *se;
- struct rb_node *nd;
- size_t ret = 0;
- unsigned long position = 1;
- long displacement = 0;
- unsigned int width;
- const char *sep = symbol_conf.field_sep;
- const char *col_width = symbol_conf.col_width_list_str;
-
- init_rem_hits();
+ struct perf_hpp_fmt *fmt;
+ int64_t cmp = 0;
- fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
+ perf_hpp__for_each_sort_list(fmt) {
+ if (perf_hpp__should_skip(fmt))
+ continue;
- if (symbol_conf.show_nr_samples) {
- if (sep)
- fprintf(fp, "%cSamples", *sep);
- else
- fputs(" Samples ", fp);
+ cmp = fmt->sort(a, b);
+ if (cmp)
+ break;
}
- if (symbol_conf.show_cpu_utilization) {
- if (sep) {
- ret += fprintf(fp, "%csys", *sep);
- ret += fprintf(fp, "%cus", *sep);
- if (perf_guest) {
- ret += fprintf(fp, "%cguest sys", *sep);
- ret += fprintf(fp, "%cguest us", *sep);
- }
- } else {
- ret += fprintf(fp, " sys ");
- ret += fprintf(fp, " us ");
- if (perf_guest) {
- ret += fprintf(fp, " guest sys ");
- ret += fprintf(fp, " guest us ");
- }
- }
- }
+ return cmp;
+}
- if (pair) {
- if (sep)
- ret += fprintf(fp, "%cDelta", *sep);
- else
- ret += fprintf(fp, " Delta ");
+static void hists__reset_filter_stats(struct hists *hists)
+{
+ hists->nr_non_filtered_entries = 0;
+ hists->stats.total_non_filtered_period = 0;
+}
- if (show_displacement) {
- if (sep)
- ret += fprintf(fp, "%cDisplacement", *sep);
- else
- ret += fprintf(fp, " Displ");
- }
- }
+void hists__reset_stats(struct hists *hists)
+{
+ hists->nr_entries = 0;
+ hists->stats.total_period = 0;
- list_for_each_entry(se, &hist_entry__sort_list, list) {
- if (se->elide)
- continue;
- if (sep) {
- fprintf(fp, "%c%s", *sep, se->se_header);
- continue;
- }
- width = strlen(se->se_header);
- if (symbol_conf.col_width_list_str) {
- if (col_width) {
- hists__set_col_len(self, se->se_width_idx,
- atoi(col_width));
- col_width = strchr(col_width, ',');
- if (col_width)
- ++col_width;
- }
- }
- if (!hists__new_col_len(self, se->se_width_idx, width))
- width = hists__col_len(self, se->se_width_idx);
- fprintf(fp, " %*s", width, se->se_header);
- }
- fprintf(fp, "\n");
-
- if (sep)
- goto print_entries;
-
- fprintf(fp, "# ........");
- if (symbol_conf.show_nr_samples)
- fprintf(fp, " ..........");
- if (pair) {
- fprintf(fp, " ..........");
- if (show_displacement)
- fprintf(fp, " .....");
- }
- list_for_each_entry(se, &hist_entry__sort_list, list) {
- unsigned int i;
+ hists__reset_filter_stats(hists);
+}
- if (se->elide)
- continue;
+static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
+{
+ hists->nr_non_filtered_entries++;
+ hists->stats.total_non_filtered_period += h->stat.period;
+}
- fprintf(fp, " ");
- width = hists__col_len(self, se->se_width_idx);
- if (width == 0)
- width = strlen(se->se_header);
- for (i = 0; i < width; i++)
- fprintf(fp, ".");
- }
+void hists__inc_stats(struct hists *hists, struct hist_entry *h)
+{
+ if (!h->filtered)
+ hists__inc_filter_stats(hists, h);
- fprintf(fp, "\n#\n");
+ hists->nr_entries++;
+ hists->stats.total_period += h->stat.period;
+}
-print_entries:
- for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
- struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+static void __hists__insert_output_entry(struct rb_root *entries,
+ struct hist_entry *he,
+ u64 min_callchain_hits)
+{
+ struct rb_node **p = &entries->rb_node;
+ struct rb_node *parent = NULL;
+ struct hist_entry *iter;
- if (show_displacement) {
- if (h->pair != NULL)
- displacement = ((long)h->pair->position -
- (long)position);
- else
- displacement = 0;
- ++position;
- }
- ret += hist_entry__fprintf(h, self, pair, show_displacement,
- displacement, fp, self->stats.total_period);
+ if (symbol_conf.use_callchain)
+ callchain_param.sort(&he->sorted_chain, he->callchain,
+ min_callchain_hits, &callchain_param);
- if (symbol_conf.use_callchain)
- ret += hist_entry__fprintf_callchain(h, self, fp,
- self->stats.total_period);
- if (h->ms.map == NULL && verbose > 1) {
- __map_groups__fprintf_maps(&h->thread->mg,
- MAP__FUNCTION, verbose, fp);
- fprintf(fp, "%.10s end\n", graph_dotted_line);
- }
- }
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct hist_entry, rb_node);
- free(rem_sq_bracket);
+ if (hist_entry__sort(he, iter) > 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
- return ret;
+ rb_link_node(&he->rb_node, parent, p);
+ rb_insert_color(&he->rb_node, entries);
}
-/*
- * See hists__fprintf to match the column widths
- */
-unsigned int hists__sort_list_width(struct hists *self)
+void hists__output_resort(struct hists *hists)
{
- struct sort_entry *se;
- int ret = 9; /* total % */
+ struct rb_root *root;
+ struct rb_node *next;
+ struct hist_entry *n;
+ u64 min_callchain_hits;
- if (symbol_conf.show_cpu_utilization) {
- ret += 7; /* count_sys % */
- ret += 6; /* count_us % */
- if (perf_guest) {
- ret += 13; /* count_guest_sys % */
- ret += 12; /* count_guest_us % */
- }
- }
+ min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
+
+ if (sort__need_collapse)
+ root = &hists->entries_collapsed;
+ else
+ root = hists->entries_in;
- if (symbol_conf.show_nr_samples)
- ret += 11;
+ next = rb_first(root);
+ hists->entries = RB_ROOT;
- list_for_each_entry(se, &hist_entry__sort_list, list)
- if (!se->elide)
- ret += 2 + hists__col_len(self, se->se_width_idx);
+ hists__reset_stats(hists);
+ hists__reset_col_len(hists);
- if (verbose) /* Addr + origin */
- ret += 3 + BITS_PER_LONG / 4;
+ while (next) {
+ n = rb_entry(next, struct hist_entry, rb_node_in);
+ next = rb_next(&n->rb_node_in);
+
+ __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
+ hists__inc_stats(hists, n);
- return ret;
+ if (!n->filtered)
+ hists__calc_col_len(hists, n);
+ }
}
-static void hists__remove_entry_filter(struct hists *self, struct hist_entry *h,
+static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
enum hist_filter filter)
{
h->filtered &= ~(1 << filter);
if (h->filtered)
return;
- ++self->nr_entries;
- if (h->ms.unfolded)
- self->nr_entries += h->nr_rows;
+ /* force fold unfiltered entry for simplicity */
+ h->ms.unfolded = false;
h->row_offset = 0;
- self->stats.total_period += h->period;
- self->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
- hists__calc_col_len(self, h);
+ hists->stats.nr_non_filtered_samples += h->stat.nr_events;
+
+ hists__inc_filter_stats(hists, h);
+ hists__calc_col_len(hists, h);
}
-void hists__filter_by_dso(struct hists *self, const struct dso *dso)
+
+static bool hists__filter_entry_by_dso(struct hists *hists,
+ struct hist_entry *he)
+{
+ if (hists->dso_filter != NULL &&
+ (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
+ he->filtered |= (1 << HIST_FILTER__DSO);
+ return true;
+ }
+
+ return false;
+}
+
+void hists__filter_by_dso(struct hists *hists)
{
struct rb_node *nd;
- self->nr_entries = self->stats.total_period = 0;
- self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
- hists__reset_col_len(self);
+ hists->stats.nr_non_filtered_samples = 0;
+
+ hists__reset_filter_stats(hists);
+ hists__reset_col_len(hists);
- for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
if (symbol_conf.exclude_other && !h->parent)
continue;
- if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
- h->filtered |= (1 << HIST_FILTER__DSO);
+ if (hists__filter_entry_by_dso(hists, h))
continue;
- }
- hists__remove_entry_filter(self, h, HIST_FILTER__DSO);
+ hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
+ }
+}
+
+static bool hists__filter_entry_by_thread(struct hists *hists,
+ struct hist_entry *he)
+{
+ if (hists->thread_filter != NULL &&
+ he->thread != hists->thread_filter) {
+ he->filtered |= (1 << HIST_FILTER__THREAD);
+ return true;
}
+
+ return false;
}
-void hists__filter_by_thread(struct hists *self, const struct thread *thread)
+void hists__filter_by_thread(struct hists *hists)
{
struct rb_node *nd;
- self->nr_entries = self->stats.total_period = 0;
- self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
- hists__reset_col_len(self);
+ hists->stats.nr_non_filtered_samples = 0;
- for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
+ hists__reset_filter_stats(hists);
+ hists__reset_col_len(hists);
+
+ for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
- if (thread != NULL && h->thread != thread) {
- h->filtered |= (1 << HIST_FILTER__THREAD);
+ if (hists__filter_entry_by_thread(hists, h))
continue;
- }
- hists__remove_entry_filter(self, h, HIST_FILTER__THREAD);
+ hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
}
}
-static int symbol__alloc_hist(struct symbol *self)
+static bool hists__filter_entry_by_symbol(struct hists *hists,
+ struct hist_entry *he)
{
- struct sym_priv *priv = symbol__priv(self);
- const int size = (sizeof(*priv->hist) +
- (self->end - self->start) * sizeof(u64));
+ if (hists->symbol_filter_str != NULL &&
+ (!he->ms.sym || strstr(he->ms.sym->name,
+ hists->symbol_filter_str) == NULL)) {
+ he->filtered |= (1 << HIST_FILTER__SYMBOL);
+ return true;
+ }
- priv->hist = zalloc(size);
- return priv->hist == NULL ? -1 : 0;
+ return false;
}
-int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip)
+void hists__filter_by_symbol(struct hists *hists)
{
- unsigned int sym_size, offset;
- struct symbol *sym = self->ms.sym;
- struct sym_priv *priv;
- struct sym_hist *h;
-
- if (!sym || !self->ms.map)
- return 0;
-
- priv = symbol__priv(sym);
- if (priv->hist == NULL && symbol__alloc_hist(sym) < 0)
- return -ENOMEM;
-
- sym_size = sym->end - sym->start;
- offset = ip - sym->start;
-
- pr_debug3("%s: ip=%#Lx\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
+ struct rb_node *nd;
- if (offset >= sym_size)
- return 0;
+ hists->stats.nr_non_filtered_samples = 0;
- h = priv->hist;
- h->sum++;
- h->ip[offset]++;
+ hists__reset_filter_stats(hists);
+ hists__reset_col_len(hists);
- pr_debug3("%#Lx %s: period++ [ip: %#Lx, %#Lx] => %Ld\n", self->ms.sym->start,
- self->ms.sym->name, ip, ip - self->ms.sym->start, h->ip[offset]);
- return 0;
-}
+ for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
-static struct objdump_line *objdump_line__new(s64 offset, char *line, size_t privsize)
-{
- struct objdump_line *self = malloc(sizeof(*self) + privsize);
+ if (hists__filter_entry_by_symbol(hists, h))
+ continue;
- if (self != NULL) {
- self->offset = offset;
- self->line = line;
+ hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
}
-
- return self;
}
-void objdump_line__free(struct objdump_line *self)
+void events_stats__inc(struct events_stats *stats, u32 type)
{
- free(self->line);
- free(self);
+ ++stats->nr_events[0];
+ ++stats->nr_events[type];
}
-static void objdump__add_line(struct list_head *head, struct objdump_line *line)
+void hists__inc_nr_events(struct hists *hists, u32 type)
{
- list_add_tail(&line->node, head);
+ events_stats__inc(&hists->stats, type);
}
-struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
- struct objdump_line *pos)
+void hists__inc_nr_samples(struct hists *hists, bool filtered)
{
- list_for_each_entry_continue(pos, head, node)
- if (pos->offset >= 0)
- return pos;
-
- return NULL;
+ events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
+ if (!filtered)
+ hists->stats.nr_non_filtered_samples++;
}
-static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file,
- struct list_head *head, size_t privsize)
+static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
+ struct hist_entry *pair)
{
- struct symbol *sym = self->ms.sym;
- struct objdump_line *objdump_line;
- char *line = NULL, *tmp, *tmp2, *c;
- size_t line_len;
- s64 line_ip, offset = -1;
+ struct rb_root *root;
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+ struct hist_entry *he;
+ int64_t cmp;
- if (getline(&line, &line_len, file) < 0)
- return -1;
+ if (sort__need_collapse)
+ root = &hists->entries_collapsed;
+ else
+ root = hists->entries_in;
- if (!line)
- return -1;
+ p = &root->rb_node;
- while (line_len != 0 && isspace(line[line_len - 1]))
- line[--line_len] = '\0';
+ while (*p != NULL) {
+ parent = *p;
+ he = rb_entry(parent, struct hist_entry, rb_node_in);
- c = strchr(line, '\n');
- if (c)
- *c = 0;
+ cmp = hist_entry__collapse(he, pair);
- line_ip = -1;
+ if (!cmp)
+ goto out;
- /*
- * Strip leading spaces:
- */
- tmp = line;
- while (*tmp) {
- if (*tmp != ' ')
- break;
- tmp++;
+ if (cmp < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
}
- if (*tmp) {
- /*
- * Parse hexa addresses followed by ':'
- */
- line_ip = strtoull(tmp, &tmp2, 16);
- if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
- line_ip = -1;
+ he = hist_entry__new(pair, true);
+ if (he) {
+ memset(&he->stat, 0, sizeof(he->stat));
+ he->hists = hists;
+ rb_link_node(&he->rb_node_in, parent, p);
+ rb_insert_color(&he->rb_node_in, root);
+ hists__inc_stats(hists, he);
+ he->dummy = true;
}
+out:
+ return he;
+}
- if (line_ip != -1) {
- u64 start = map__rip_2objdump(self->ms.map, sym->start),
- end = map__rip_2objdump(self->ms.map, sym->end);
+static struct hist_entry *hists__find_entry(struct hists *hists,
+ struct hist_entry *he)
+{
+ struct rb_node *n;
- offset = line_ip - start;
- if (offset < 0 || (u64)line_ip > end)
- offset = -1;
- }
+ if (sort__need_collapse)
+ n = hists->entries_collapsed.rb_node;
+ else
+ n = hists->entries_in->rb_node;
- objdump_line = objdump_line__new(offset, line, privsize);
- if (objdump_line == NULL) {
- free(line);
- return -1;
+ while (n) {
+ struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
+ int64_t cmp = hist_entry__collapse(iter, he);
+
+ if (cmp < 0)
+ n = n->rb_left;
+ else if (cmp > 0)
+ n = n->rb_right;
+ else
+ return iter;
}
- objdump__add_line(head, objdump_line);
- return 0;
+ return NULL;
}
-int hist_entry__annotate(struct hist_entry *self, struct list_head *head,
- size_t privsize)
+/*
+ * Look for pairs to link to the leader buckets (hist_entries):
+ */
+void hists__match(struct hists *leader, struct hists *other)
{
- struct symbol *sym = self->ms.sym;
- struct map *map = self->ms.map;
- struct dso *dso = map->dso;
- char *filename = dso__build_id_filename(dso, NULL, 0);
- bool free_filename = true;
- char command[PATH_MAX * 2];
- FILE *file;
- int err = 0;
- u64 len;
-
- if (filename == NULL) {
- if (dso->has_build_id) {
- pr_err("Can't annotate %s: not enough memory\n",
- sym->name);
- return -ENOMEM;
- }
- goto fallback;
- } else if (readlink(filename, command, sizeof(command)) < 0 ||
- strstr(command, "[kernel.kallsyms]") ||
- access(filename, R_OK)) {
- free(filename);
-fallback:
- /*
- * If we don't have build-ids or the build-id file isn't in the
- * cache, or is just a kallsyms file, well, lets hope that this
- * DSO is the same as when 'perf record' ran.
- */
- filename = dso->long_name;
- free_filename = false;
- }
-
- if (dso->origin == DSO__ORIG_KERNEL) {
- if (dso->annotate_warned)
- goto out_free_filename;
- err = -ENOENT;
- dso->annotate_warned = 1;
- pr_err("Can't annotate %s: No vmlinux file was found in the "
- "path\n", sym->name);
- goto out_free_filename;
- }
+ struct rb_root *root;
+ struct rb_node *nd;
+ struct hist_entry *pos, *pair;
- pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
- filename, sym->name, map->unmap_ip(map, sym->start),
- map->unmap_ip(map, sym->end));
+ if (sort__need_collapse)
+ root = &leader->entries_collapsed;
+ else
+ root = leader->entries_in;
- len = sym->end - sym->start;
+ for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+ pos = rb_entry(nd, struct hist_entry, rb_node_in);
+ pair = hists__find_entry(other, pos);
- pr_debug("annotating [%p] %30s : [%p] %30s\n",
- dso, dso->long_name, sym, sym->name);
+ if (pair)
+ hist_entry__add_pair(pair, pos);
+ }
+}
- snprintf(command, sizeof(command),
- "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS -C %s|grep -v %s|expand",
- map__rip_2objdump(map, sym->start),
- map__rip_2objdump(map, sym->end),
- filename, filename);
+/*
+ * Look for entries in the other hists that are not present in the leader, if
+ * we find them, just add a dummy entry on the leader hists, with period=0,
+ * nr_events=0, to serve as the list header.
+ */
+int hists__link(struct hists *leader, struct hists *other)
+{
+ struct rb_root *root;
+ struct rb_node *nd;
+ struct hist_entry *pos, *pair;
- pr_debug("Executing: %s\n", command);
+ if (sort__need_collapse)
+ root = &other->entries_collapsed;
+ else
+ root = other->entries_in;
- file = popen(command, "r");
- if (!file)
- goto out_free_filename;
+ for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+ pos = rb_entry(nd, struct hist_entry, rb_node_in);
- while (!feof(file))
- if (hist_entry__parse_objdump_line(self, file, head, privsize) < 0)
- break;
+ if (!hist_entry__has_pairs(pos)) {
+ pair = hists__add_dummy_entry(leader, pos);
+ if (pair == NULL)
+ return -1;
+ hist_entry__add_pair(pos, pair);
+ }
+ }
- pclose(file);
-out_free_filename:
- if (free_filename)
- free(filename);
- return err;
+ return 0;
}
-void hists__inc_nr_events(struct hists *self, u32 type)
+u64 hists__total_period(struct hists *hists)
{
- ++self->stats.nr_events[0];
- ++self->stats.nr_events[type];
+ return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
+ hists->stats.total_period;
}
-size_t hists__fprintf_nr_events(struct hists *self, FILE *fp)
+int parse_filter_percentage(const struct option *opt __maybe_unused,
+ const char *arg, int unset __maybe_unused)
{
- int i;
- size_t ret = 0;
+ if (!strcmp(arg, "relative"))
+ symbol_conf.filter_relative = true;
+ else if (!strcmp(arg, "absolute"))
+ symbol_conf.filter_relative = false;
+ else
+ return -1;
- for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
- if (!event__name[i])
- continue;
- ret += fprintf(fp, "%10s events: %10d\n",
- event__name[i], self->stats.nr_events[i]);
- }
+ return 0;
+}
+
+int perf_hist_config(const char *var, const char *value)
+{
+ if (!strcmp(var, "hist.percentage"))
+ return parse_filter_percentage(NULL, value, 0);
- return ret;
+ return 0;
}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 587d375d343..742f49a8572 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -2,39 +2,25 @@
#define __PERF_HIST_H
#include <linux/types.h>
+#include <pthread.h>
#include "callchain.h"
+#include "header.h"
+#include "color.h"
+#include "ui/progress.h"
extern struct callchain_param callchain_param;
struct hist_entry;
struct addr_location;
struct symbol;
-struct rb_root;
-struct objdump_line {
- struct list_head node;
- s64 offset;
- char *line;
-};
-
-void objdump_line__free(struct objdump_line *self);
-struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
- struct objdump_line *pos);
-
-struct sym_hist {
- u64 sum;
- u64 ip[0];
-};
-
-struct sym_ext {
- struct rb_node node;
- double percent;
- char *path;
-};
-
-struct sym_priv {
- struct sym_hist *hist;
- struct sym_ext *ext;
+enum hist_filter {
+ HIST_FILTER__DSO,
+ HIST_FILTER__THREAD,
+ HIST_FILTER__PARENT,
+ HIST_FILTER__SYMBOL,
+ HIST_FILTER__GUEST,
+ HIST_FILTER__HOST,
};
/*
@@ -51,9 +37,16 @@ struct sym_priv {
*/
struct events_stats {
u64 total_period;
+ u64 total_non_filtered_period;
u64 total_lost;
+ u64 total_invalid_chains;
u32 nr_events[PERF_RECORD_HEADER_MAX];
+ u32 nr_non_filtered_samples;
+ u32 nr_lost_warned;
u32 nr_unknown_events;
+ u32 nr_invalid_chains;
+ u32 nr_unknown_id;
+ u32 nr_unprocessable_samples;
};
enum hist_column {
@@ -63,86 +56,292 @@ enum hist_column {
HISTC_COMM,
HISTC_PARENT,
HISTC_CPU,
+ HISTC_SRCLINE,
+ HISTC_MISPREDICT,
+ HISTC_IN_TX,
+ HISTC_ABORT,
+ HISTC_SYMBOL_FROM,
+ HISTC_SYMBOL_TO,
+ HISTC_DSO_FROM,
+ HISTC_DSO_TO,
+ HISTC_LOCAL_WEIGHT,
+ HISTC_GLOBAL_WEIGHT,
+ HISTC_MEM_DADDR_SYMBOL,
+ HISTC_MEM_DADDR_DSO,
+ HISTC_MEM_LOCKED,
+ HISTC_MEM_TLB,
+ HISTC_MEM_LVL,
+ HISTC_MEM_SNOOP,
+ HISTC_MEM_DCACHELINE,
+ HISTC_TRANSACTION,
HISTC_NR_COLS, /* Last entry */
};
+struct thread;
+struct dso;
+
struct hists {
- struct rb_node rb_node;
+ struct rb_root entries_in_array[2];
+ struct rb_root *entries_in;
struct rb_root entries;
+ struct rb_root entries_collapsed;
u64 nr_entries;
+ u64 nr_non_filtered_entries;
+ const struct thread *thread_filter;
+ const struct dso *dso_filter;
+ const char *uid_filter_str;
+ const char *symbol_filter_str;
+ pthread_mutex_t lock;
struct events_stats stats;
- u64 config;
u64 event_stream;
- u32 type;
u16 col_len[HISTC_NR_COLS];
};
-struct hist_entry *__hists__add_entry(struct hists *self,
+struct hist_entry_iter;
+
+struct hist_iter_ops {
+ int (*prepare_entry)(struct hist_entry_iter *, struct addr_location *);
+ int (*add_single_entry)(struct hist_entry_iter *, struct addr_location *);
+ int (*next_entry)(struct hist_entry_iter *, struct addr_location *);
+ int (*add_next_entry)(struct hist_entry_iter *, struct addr_location *);
+ int (*finish_entry)(struct hist_entry_iter *, struct addr_location *);
+};
+
+struct hist_entry_iter {
+ int total;
+ int curr;
+
+ bool hide_unresolved;
+
+ struct perf_evsel *evsel;
+ struct perf_sample *sample;
+ struct hist_entry *he;
+ struct symbol *parent;
+ void *priv;
+
+ const struct hist_iter_ops *ops;
+ /* user-defined callback function (optional) */
+ int (*add_entry_cb)(struct hist_entry_iter *iter,
+ struct addr_location *al, bool single, void *arg);
+};
+
+extern const struct hist_iter_ops hist_iter_normal;
+extern const struct hist_iter_ops hist_iter_branch;
+extern const struct hist_iter_ops hist_iter_mem;
+extern const struct hist_iter_ops hist_iter_cumulative;
+
+struct hist_entry *__hists__add_entry(struct hists *hists,
struct addr_location *al,
- struct symbol *parent, u64 period);
-extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *);
-extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *);
-int hist_entry__fprintf(struct hist_entry *self, struct hists *hists,
- struct hists *pair_hists, bool show_displacement,
- long displacement, FILE *fp, u64 total);
-int hist_entry__snprintf(struct hist_entry *self, char *bf, size_t size,
- struct hists *hists, struct hists *pair_hists,
- bool show_displacement, long displacement,
- bool color, u64 total);
+ struct symbol *parent,
+ struct branch_info *bi,
+ struct mem_info *mi, u64 period,
+ u64 weight, u64 transaction,
+ bool sample_self);
+int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
+ struct perf_evsel *evsel, struct perf_sample *sample,
+ int max_stack_depth, void *arg);
+
+int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right);
+int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
+int hist_entry__transaction_len(void);
+int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size,
+ struct hists *hists);
void hist_entry__free(struct hist_entry *);
-void hists__output_resort(struct hists *self);
-void hists__collapse_resort(struct hists *self);
+void hists__output_resort(struct hists *hists);
+void hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
+
+void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
+void hists__output_recalc_col_len(struct hists *hists, int max_rows);
+
+u64 hists__total_period(struct hists *hists);
+void hists__reset_stats(struct hists *hists);
+void hists__inc_stats(struct hists *hists, struct hist_entry *h);
+void hists__inc_nr_events(struct hists *hists, u32 type);
+void hists__inc_nr_samples(struct hists *hists, bool filtered);
+void events_stats__inc(struct events_stats *stats, u32 type);
+size_t events_stats__fprintf(struct events_stats *stats, FILE *fp);
+
+size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
+ int max_cols, float min_pcnt, FILE *fp);
+
+void hists__filter_by_dso(struct hists *hists);
+void hists__filter_by_thread(struct hists *hists);
+void hists__filter_by_symbol(struct hists *hists);
+
+static inline bool hists__has_filter(struct hists *hists)
+{
+ return hists->thread_filter || hists->dso_filter ||
+ hists->symbol_filter_str;
+}
+
+u16 hists__col_len(struct hists *hists, enum hist_column col);
+void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len);
+bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len);
+void hists__reset_col_len(struct hists *hists);
+void hists__calc_col_len(struct hists *hists, struct hist_entry *he);
+
+void hists__match(struct hists *leader, struct hists *other);
+int hists__link(struct hists *leader, struct hists *other);
+
+struct perf_hpp {
+ char *buf;
+ size_t size;
+ const char *sep;
+ void *ptr;
+};
+
+struct perf_hpp_fmt {
+ int (*header)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct perf_evsel *evsel);
+ int (*width)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct perf_evsel *evsel);
+ int (*color)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he);
+ int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he);
+ int64_t (*cmp)(struct hist_entry *a, struct hist_entry *b);
+ int64_t (*collapse)(struct hist_entry *a, struct hist_entry *b);
+ int64_t (*sort)(struct hist_entry *a, struct hist_entry *b);
+
+ struct list_head list;
+ struct list_head sort_list;
+ bool elide;
+};
+
+extern struct list_head perf_hpp__list;
+extern struct list_head perf_hpp__sort_list;
+
+#define perf_hpp__for_each_format(format) \
+ list_for_each_entry(format, &perf_hpp__list, list)
+
+#define perf_hpp__for_each_format_safe(format, tmp) \
+ list_for_each_entry_safe(format, tmp, &perf_hpp__list, list)
+
+#define perf_hpp__for_each_sort_list(format) \
+ list_for_each_entry(format, &perf_hpp__sort_list, sort_list)
+
+#define perf_hpp__for_each_sort_list_safe(format, tmp) \
+ list_for_each_entry_safe(format, tmp, &perf_hpp__sort_list, sort_list)
+
+extern struct perf_hpp_fmt perf_hpp__format[];
+
+enum {
+ /* Matches perf_hpp__format array. */
+ PERF_HPP__OVERHEAD,
+ PERF_HPP__OVERHEAD_SYS,
+ PERF_HPP__OVERHEAD_US,
+ PERF_HPP__OVERHEAD_GUEST_SYS,
+ PERF_HPP__OVERHEAD_GUEST_US,
+ PERF_HPP__OVERHEAD_ACC,
+ PERF_HPP__SAMPLES,
+ PERF_HPP__PERIOD,
-void hists__inc_nr_events(struct hists *self, u32 type);
-size_t hists__fprintf_nr_events(struct hists *self, FILE *fp);
+ PERF_HPP__MAX_INDEX
+};
+
+void perf_hpp__init(void);
+void perf_hpp__column_register(struct perf_hpp_fmt *format);
+void perf_hpp__column_unregister(struct perf_hpp_fmt *format);
+void perf_hpp__column_enable(unsigned col);
+void perf_hpp__column_disable(unsigned col);
+void perf_hpp__cancel_cumulate(void);
+
+void perf_hpp__register_sort_field(struct perf_hpp_fmt *format);
+void perf_hpp__setup_output_field(void);
+void perf_hpp__reset_output_field(void);
+void perf_hpp__append_sort_keys(void);
+
+bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format);
+bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b);
-size_t hists__fprintf(struct hists *self, struct hists *pair,
- bool show_displacement, FILE *fp);
+static inline bool perf_hpp__should_skip(struct perf_hpp_fmt *format)
+{
+ return format->elide;
+}
-int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip);
-int hist_entry__annotate(struct hist_entry *self, struct list_head *head,
- size_t privsize);
+void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists);
-void hists__filter_by_dso(struct hists *self, const struct dso *dso);
-void hists__filter_by_thread(struct hists *self, const struct thread *thread);
+typedef u64 (*hpp_field_fn)(struct hist_entry *he);
+typedef int (*hpp_callback_fn)(struct perf_hpp *hpp, bool front);
+typedef int (*hpp_snprint_fn)(struct perf_hpp *hpp, const char *fmt, ...);
-u16 hists__col_len(struct hists *self, enum hist_column col);
-void hists__set_col_len(struct hists *self, enum hist_column col, u16 len);
-bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len);
+int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
+ hpp_field_fn get_field, const char *fmt,
+ hpp_snprint_fn print_fn, bool fmt_percent);
+int __hpp__fmt_acc(struct perf_hpp *hpp, struct hist_entry *he,
+ hpp_field_fn get_field, const char *fmt,
+ hpp_snprint_fn print_fn, bool fmt_percent);
-#ifdef NO_NEWT_SUPPORT
-static inline int hists__browse(struct hists *self __used,
- const char *helpline __used,
- const char *ev_name __used)
+static inline void advance_hpp(struct perf_hpp *hpp, int inc)
{
- return 0;
+ hpp->buf += inc;
+ hpp->size -= inc;
}
-static inline int hists__tui_browse_tree(struct rb_root *self __used,
- const char *help __used)
+static inline size_t perf_hpp__use_color(void)
+{
+ return !symbol_conf.field_sep;
+}
+
+static inline size_t perf_hpp__color_overhead(void)
+{
+ return perf_hpp__use_color() ?
+ (COLOR_MAXLEN + sizeof(PERF_COLOR_RESET)) * PERF_HPP__MAX_INDEX
+ : 0;
+}
+
+struct perf_evlist;
+
+struct hist_browser_timer {
+ void (*timer)(void *arg);
+ void *arg;
+ int refresh;
+};
+
+#ifdef HAVE_SLANG_SUPPORT
+#include "../ui/keysyms.h"
+int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel,
+ struct hist_browser_timer *hbt);
+
+int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
+ struct hist_browser_timer *hbt,
+ float min_pcnt,
+ struct perf_session_env *env);
+int script_browse(const char *script_opt);
+#else
+static inline
+int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused,
+ const char *help __maybe_unused,
+ struct hist_browser_timer *hbt __maybe_unused,
+ float min_pcnt __maybe_unused,
+ struct perf_session_env *env __maybe_unused)
{
return 0;
}
-static inline int hist_entry__tui_annotate(struct hist_entry *self __used)
+static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused,
+ struct hist_browser_timer *hbt __maybe_unused)
{
return 0;
}
-#define KEY_LEFT -1
-#define KEY_RIGHT -2
-#else
-#include <newt.h>
-int hists__browse(struct hists *self, const char *helpline,
- const char *ev_name);
-int hist_entry__tui_annotate(struct hist_entry *self);
-#define KEY_LEFT NEWT_KEY_LEFT
-#define KEY_RIGHT NEWT_KEY_RIGHT
+static inline int script_browse(const char *script_opt __maybe_unused)
+{
+ return 0;
+}
-int hists__tui_browse_tree(struct rb_root *self, const char *help);
+#define K_LEFT -1000
+#define K_RIGHT -2000
+#define K_SWITCH_INPUT_DATA -3000
#endif
-unsigned int hists__sort_list_width(struct hists *self);
+unsigned int hists__sort_list_width(struct hists *hists);
+
+struct option;
+int parse_filter_percentage(const struct option *opt __maybe_unused,
+ const char *arg, int unset __maybe_unused);
+int perf_hist_config(const char *var, const char *value);
#endif /* __PERF_HIST_H */
diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
new file mode 100644
index 00000000000..6789d788d49
--- /dev/null
+++ b/tools/perf/util/include/asm/alternative-asm.h
@@ -0,0 +1,8 @@
+#ifndef _PERF_ASM_ALTERNATIVE_ASM_H
+#define _PERF_ASM_ALTERNATIVE_ASM_H
+
+/* Just disable it so we can build arch/x86/lib/memcpy_64.S for perf bench: */
+
+#define altinstruction_entry #
+
+#endif
diff --git a/tools/perf/util/include/asm/bug.h b/tools/perf/util/include/asm/bug.h
deleted file mode 100644
index 7fcc6810adc..00000000000
--- a/tools/perf/util/include/asm/bug.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef _PERF_ASM_GENERIC_BUG_H
-#define _PERF_ASM_GENERIC_BUG_H
-
-#define __WARN_printf(arg...) do { fprintf(stderr, arg); } while (0)
-
-#define WARN(condition, format...) ({ \
- int __ret_warn_on = !!(condition); \
- if (unlikely(__ret_warn_on)) \
- __WARN_printf(format); \
- unlikely(__ret_warn_on); \
-})
-
-#define WARN_ONCE(condition, format...) ({ \
- static int __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once)) \
- if (WARN(!__warned, format)) \
- __warned = 1; \
- unlikely(__ret_warn_once); \
-})
-#endif
diff --git a/tools/perf/util/include/asm/byteorder.h b/tools/perf/util/include/asm/byteorder.h
index b722abe3a62..2a9bdc06630 100644
--- a/tools/perf/util/include/asm/byteorder.h
+++ b/tools/perf/util/include/asm/byteorder.h
@@ -1,2 +1,2 @@
#include <asm/types.h>
-#include "../../../../include/linux/swab.h"
+#include "../../../../include/uapi/linux/swab.h"
diff --git a/tools/perf/util/include/asm/cpufeature.h b/tools/perf/util/include/asm/cpufeature.h
new file mode 100644
index 00000000000..acffd5e4d1d
--- /dev/null
+++ b/tools/perf/util/include/asm/cpufeature.h
@@ -0,0 +1,9 @@
+
+#ifndef PERF_CPUFEATURE_H
+#define PERF_CPUFEATURE_H
+
+/* cpufeature.h ... dummy header file for including arch/x86/lib/memcpy_64.S */
+
+#define X86_FEATURE_REP_GOOD 0
+
+#endif /* PERF_CPUFEATURE_H */
diff --git a/tools/perf/util/include/asm/dwarf2.h b/tools/perf/util/include/asm/dwarf2.h
new file mode 100644
index 00000000000..afe38199e92
--- /dev/null
+++ b/tools/perf/util/include/asm/dwarf2.h
@@ -0,0 +1,13 @@
+
+#ifndef PERF_DWARF2_H
+#define PERF_DWARF2_H
+
+/* dwarf2.h ... dummy header file for including arch/x86/lib/mem{cpy,set}_64.S */
+
+#define CFI_STARTPROC
+#define CFI_ENDPROC
+#define CFI_REMEMBER_STATE
+#define CFI_RESTORE_STATE
+
+#endif /* PERF_DWARF2_H */
+
diff --git a/tools/perf/util/include/asm/hash.h b/tools/perf/util/include/asm/hash.h
new file mode 100644
index 00000000000..d82b170bb21
--- /dev/null
+++ b/tools/perf/util/include/asm/hash.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_GENERIC_HASH_H
+#define __ASM_GENERIC_HASH_H
+
+/* Stub */
+
+#endif /* __ASM_GENERIC_HASH_H */
diff --git a/tools/perf/util/include/asm/unistd_32.h b/tools/perf/util/include/asm/unistd_32.h
new file mode 100644
index 00000000000..8b137891791
--- /dev/null
+++ b/tools/perf/util/include/asm/unistd_32.h
@@ -0,0 +1 @@
+
diff --git a/tools/perf/util/include/asm/unistd_64.h b/tools/perf/util/include/asm/unistd_64.h
new file mode 100644
index 00000000000..8b137891791
--- /dev/null
+++ b/tools/perf/util/include/asm/unistd_64.h
@@ -0,0 +1 @@
+
diff --git a/tools/perf/util/include/dwarf-regs.h b/tools/perf/util/include/dwarf-regs.h
index cf6727e99c4..8f149655f49 100644
--- a/tools/perf/util/include/dwarf-regs.h
+++ b/tools/perf/util/include/dwarf-regs.h
@@ -1,7 +1,7 @@
#ifndef _PERF_DWARF_REGS_H_
#define _PERF_DWARF_REGS_H_
-#ifdef DWARF_SUPPORT
+#ifdef HAVE_DWARF_SUPPORT
const char *get_arch_regstr(unsigned int n);
#endif
diff --git a/tools/perf/util/include/linux/bitmap.h b/tools/perf/util/include/linux/bitmap.h
index eda4416efa0..01ffd12dc79 100644
--- a/tools/perf/util/include/linux/bitmap.h
+++ b/tools/perf/util/include/linux/bitmap.h
@@ -4,7 +4,12 @@
#include <string.h>
#include <linux/bitops.h>
+#define DECLARE_BITMAP(name,bits) \
+ unsigned long name[BITS_TO_LONGS(bits)]
+
int __bitmap_weight(const unsigned long *bitmap, int bits);
+void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
#define BITMAP_LAST_WORD_MASK(nbits) \
( \
@@ -32,4 +37,13 @@ static inline int bitmap_weight(const unsigned long *src, int nbits)
return __bitmap_weight(src, nbits);
}
+static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if (small_const_nbits(nbits))
+ *dst = *src1 | *src2;
+ else
+ __bitmap_or(dst, src1, src2, nbits);
+}
+
#endif /* _PERF_BITOPS_H */
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
index bb4ac2e0538..dadfa7e5428 100644
--- a/tools/perf/util/include/linux/bitops.h
+++ b/tools/perf/util/include/linux/bitops.h
@@ -2,17 +2,41 @@
#define _PERF_LINUX_BITOPS_H_
#include <linux/kernel.h>
+#include <linux/compiler.h>
#include <asm/hweight.h>
+#ifndef __WORDSIZE
+#define __WORDSIZE (__SIZEOF_LONG__ * 8)
+#endif
+
#define BITS_PER_LONG __WORDSIZE
#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
+#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
+#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE)
+
+#define for_each_set_bit(bit, addr, size) \
+ for ((bit) = find_first_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_from(bit, addr, size) \
+ for ((bit) = find_next_bit((addr), (size), (bit)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
static inline void set_bit(int nr, unsigned long *addr)
{
addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG);
}
+static inline void clear_bit(int nr, unsigned long *addr)
+{
+ addr[nr / BITS_PER_LONG] &= ~(1UL << (nr % BITS_PER_LONG));
+}
+
static __always_inline int test_bit(unsigned int nr, const unsigned long *addr)
{
return ((1UL << (nr % BITS_PER_LONG)) &
@@ -24,4 +48,113 @@ static inline unsigned long hweight_long(unsigned long w)
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
}
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __ffs(unsigned long word)
+{
+ int num = 0;
+
+#if BITS_PER_LONG == 64
+ if ((word & 0xffffffff) == 0) {
+ num += 32;
+ word >>= 32;
+ }
+#endif
+ if ((word & 0xffff) == 0) {
+ num += 16;
+ word >>= 16;
+ }
+ if ((word & 0xff) == 0) {
+ num += 8;
+ word >>= 8;
+ }
+ if ((word & 0xf) == 0) {
+ num += 4;
+ word >>= 4;
+ }
+ if ((word & 0x3) == 0) {
+ num += 2;
+ word >>= 2;
+ }
+ if ((word & 0x1) == 0)
+ num += 1;
+ return num;
+}
+
+typedef const unsigned long __attribute__((__may_alias__)) long_alias_t;
+
+/*
+ * Find the first set bit in a memory region.
+ */
+static inline unsigned long
+find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ long_alias_t *p = (long_alias_t *) addr;
+ unsigned long result = 0;
+ unsigned long tmp;
+
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+
+ tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found:
+ return result + __ffs(tmp);
+}
+
+/*
+ * Find the next set bit in a memory region.
+ */
+static inline unsigned long
+find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
+{
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG-1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= (~0UL << offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp &= (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + __ffs(tmp);
+}
+
#endif
diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h
deleted file mode 100644
index 791f9dd27eb..00000000000
--- a/tools/perf/util/include/linux/compiler.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _PERF_LINUX_COMPILER_H_
-#define _PERF_LINUX_COMPILER_H_
-
-#ifndef __always_inline
-#define __always_inline inline
-#endif
-#define __user
-#define __attribute_const__
-
-#define __used __attribute__((__unused__))
-
-#endif
diff --git a/tools/perf/util/include/linux/const.h b/tools/perf/util/include/linux/const.h
new file mode 100644
index 00000000000..c10a35e1afb
--- /dev/null
+++ b/tools/perf/util/include/linux/const.h
@@ -0,0 +1 @@
+#include "../../../../include/uapi/linux/const.h"
diff --git a/tools/perf/util/include/linux/hash.h b/tools/perf/util/include/linux/hash.h
deleted file mode 100644
index 201f5739799..00000000000
--- a/tools/perf/util/include/linux/hash.h
+++ /dev/null
@@ -1,5 +0,0 @@
-#include "../../../../include/linux/hash.h"
-
-#ifndef PERF_HASH_H
-#define PERF_HASH_H
-#endif
diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h
index 1eb804fd3fb..9844c31b7c2 100644
--- a/tools/perf/util/include/linux/kernel.h
+++ b/tools/perf/util/include/linux/kernel.h
@@ -8,8 +8,8 @@
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
-#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
-#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
+#define PERF_ALIGN(x, a) __PERF_ALIGN_MASK(x, (typeof(x))(a)-1)
+#define __PERF_ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
#ifndef offsetof
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
@@ -46,9 +46,22 @@
_min1 < _min2 ? _min1 : _min2; })
#endif
+#ifndef roundup
+#define roundup(x, y) ( \
+{ \
+ const typeof(y) __y = y; \
+ (((x) + (__y - 1)) / __y) * __y; \
+} \
+)
+#endif
+
#ifndef BUG_ON
+#ifdef NDEBUG
+#define BUG_ON(cond) do { if (cond) {} } while (0)
+#else
#define BUG_ON(cond) assert(!(cond))
#endif
+#endif
/*
* Both need more care to handle endianness
@@ -81,12 +94,6 @@ static inline int scnprintf(char * buf, size_t size, const char * fmt, ...)
return (i >= ssize) ? (ssize - 1) : i;
}
-static inline unsigned long
-simple_strtoul(const char *nptr, char **endptr, int base)
-{
- return strtoul(nptr, endptr, base);
-}
-
int eprintf(int level,
const char *fmt, ...) __attribute__((format(printf, 2, 3)));
@@ -108,4 +115,14 @@ int eprintf(int level,
#define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__)
#define pr_debug4(fmt, ...) pr_debugN(4, pr_fmt(fmt), ##__VA_ARGS__)
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
#endif
diff --git a/tools/perf/util/include/linux/linkage.h b/tools/perf/util/include/linux/linkage.h
new file mode 100644
index 00000000000..06387cffe12
--- /dev/null
+++ b/tools/perf/util/include/linux/linkage.h
@@ -0,0 +1,13 @@
+
+#ifndef PERF_LINUX_LINKAGE_H_
+#define PERF_LINUX_LINKAGE_H_
+
+/* linkage.h ... for including arch/x86/lib/memcpy_64.S */
+
+#define ENTRY(name) \
+ .globl name; \
+ name:
+
+#define ENDPROC(name)
+
+#endif /* PERF_LINUX_LINKAGE_H_ */
diff --git a/tools/perf/util/include/linux/list.h b/tools/perf/util/include/linux/list.h
index f5ca26e53fb..76ddbc72634 100644
--- a/tools/perf/util/include/linux/list.h
+++ b/tools/perf/util/include/linux/list.h
@@ -1,3 +1,6 @@
+#include <linux/kernel.h>
+#include <linux/types.h>
+
#include "../../../../include/linux/list.h"
#ifndef PERF_LIST_H
@@ -22,5 +25,5 @@ static inline void list_del_range(struct list_head *begin,
* @head: the head for your list.
*/
#define list_for_each_from(pos, head) \
- for (; prefetch(pos->next), pos != (head); pos = pos->next)
+ for (; pos != (head); pos = pos->next)
#endif
diff --git a/tools/perf/util/include/linux/module.h b/tools/perf/util/include/linux/module.h
deleted file mode 100644
index b43e2dc21e0..00000000000
--- a/tools/perf/util/include/linux/module.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef PERF_LINUX_MODULE_H
-#define PERF_LINUX_MODULE_H
-
-#define EXPORT_SYMBOL(name)
-
-#endif
diff --git a/tools/perf/util/include/linux/prefetch.h b/tools/perf/util/include/linux/prefetch.h
deleted file mode 100644
index 7841e485d8c..00000000000
--- a/tools/perf/util/include/linux/prefetch.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef PERF_LINUX_PREFETCH_H
-#define PERF_LINUX_PREFETCH_H
-
-static inline void prefetch(void *a __attribute__((unused))) { }
-
-#endif
diff --git a/tools/perf/util/include/linux/rbtree.h b/tools/perf/util/include/linux/rbtree.h
index 7a243a14303..2a030c5af3a 100644
--- a/tools/perf/util/include/linux/rbtree.h
+++ b/tools/perf/util/include/linux/rbtree.h
@@ -1 +1,2 @@
+#include <stdbool.h>
#include "../../../../include/linux/rbtree.h"
diff --git a/tools/perf/util/include/linux/rbtree_augmented.h b/tools/perf/util/include/linux/rbtree_augmented.h
new file mode 100644
index 00000000000..9d6fcdf1788
--- /dev/null
+++ b/tools/perf/util/include/linux/rbtree_augmented.h
@@ -0,0 +1,2 @@
+#include <stdbool.h>
+#include "../../../../include/linux/rbtree_augmented.h"
diff --git a/tools/perf/util/include/linux/string.h b/tools/perf/util/include/linux/string.h
index 3b2f5900276..97a80073822 100644
--- a/tools/perf/util/include/linux/string.h
+++ b/tools/perf/util/include/linux/string.h
@@ -1 +1,4 @@
#include <string.h>
+
+void *memdup(const void *src, size_t len);
+int str_append(char **s, int *len, const char *a);
diff --git a/tools/perf/util/include/linux/types.h b/tools/perf/util/include/linux/types.h
deleted file mode 100644
index 12de3b8112f..00000000000
--- a/tools/perf/util/include/linux/types.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef _PERF_LINUX_TYPES_H_
-#define _PERF_LINUX_TYPES_H_
-
-#include <asm/types.h>
-
-#define DECLARE_BITMAP(name,bits) \
- unsigned long name[BITS_TO_LONGS(bits)]
-
-struct list_head {
- struct list_head *next, *prev;
-};
-
-struct hlist_head {
- struct hlist_node *first;
-};
-
-struct hlist_node {
- struct hlist_node *next, **pprev;
-};
-
-#endif
diff --git a/tools/perf/util/intlist.c b/tools/perf/util/intlist.c
new file mode 100644
index 00000000000..89715b64a31
--- /dev/null
+++ b/tools/perf/util/intlist.c
@@ -0,0 +1,146 @@
+/*
+ * Based on intlist.c by:
+ * (c) 2009 Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Licensed under the GPLv2.
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <linux/compiler.h>
+
+#include "intlist.h"
+
+static struct rb_node *intlist__node_new(struct rblist *rblist __maybe_unused,
+ const void *entry)
+{
+ int i = (int)((long)entry);
+ struct rb_node *rc = NULL;
+ struct int_node *node = malloc(sizeof(*node));
+
+ if (node != NULL) {
+ node->i = i;
+ node->priv = NULL;
+ rc = &node->rb_node;
+ }
+
+ return rc;
+}
+
+static void int_node__delete(struct int_node *ilist)
+{
+ free(ilist);
+}
+
+static void intlist__node_delete(struct rblist *rblist __maybe_unused,
+ struct rb_node *rb_node)
+{
+ struct int_node *node = container_of(rb_node, struct int_node, rb_node);
+
+ int_node__delete(node);
+}
+
+static int intlist__node_cmp(struct rb_node *rb_node, const void *entry)
+{
+ int i = (int)((long)entry);
+ struct int_node *node = container_of(rb_node, struct int_node, rb_node);
+
+ return node->i - i;
+}
+
+int intlist__add(struct intlist *ilist, int i)
+{
+ return rblist__add_node(&ilist->rblist, (void *)((long)i));
+}
+
+void intlist__remove(struct intlist *ilist, struct int_node *node)
+{
+ rblist__remove_node(&ilist->rblist, &node->rb_node);
+}
+
+static struct int_node *__intlist__findnew(struct intlist *ilist,
+ int i, bool create)
+{
+ struct int_node *node = NULL;
+ struct rb_node *rb_node;
+
+ if (ilist == NULL)
+ return NULL;
+
+ if (create)
+ rb_node = rblist__findnew(&ilist->rblist, (void *)((long)i));
+ else
+ rb_node = rblist__find(&ilist->rblist, (void *)((long)i));
+
+ if (rb_node)
+ node = container_of(rb_node, struct int_node, rb_node);
+
+ return node;
+}
+
+struct int_node *intlist__find(struct intlist *ilist, int i)
+{
+ return __intlist__findnew(ilist, i, false);
+}
+
+struct int_node *intlist__findnew(struct intlist *ilist, int i)
+{
+ return __intlist__findnew(ilist, i, true);
+}
+
+static int intlist__parse_list(struct intlist *ilist, const char *s)
+{
+ char *sep;
+ int err;
+
+ do {
+ long value = strtol(s, &sep, 10);
+ err = -EINVAL;
+ if (*sep != ',' && *sep != '\0')
+ break;
+ err = intlist__add(ilist, value);
+ if (err)
+ break;
+ s = sep + 1;
+ } while (*sep != '\0');
+
+ return err;
+}
+
+struct intlist *intlist__new(const char *slist)
+{
+ struct intlist *ilist = malloc(sizeof(*ilist));
+
+ if (ilist != NULL) {
+ rblist__init(&ilist->rblist);
+ ilist->rblist.node_cmp = intlist__node_cmp;
+ ilist->rblist.node_new = intlist__node_new;
+ ilist->rblist.node_delete = intlist__node_delete;
+
+ if (slist && intlist__parse_list(ilist, slist))
+ goto out_delete;
+ }
+
+ return ilist;
+out_delete:
+ intlist__delete(ilist);
+ return NULL;
+}
+
+void intlist__delete(struct intlist *ilist)
+{
+ if (ilist != NULL)
+ rblist__delete(&ilist->rblist);
+}
+
+struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx)
+{
+ struct int_node *node = NULL;
+ struct rb_node *rb_node;
+
+ rb_node = rblist__entry(&ilist->rblist, idx);
+ if (rb_node)
+ node = container_of(rb_node, struct int_node, rb_node);
+
+ return node;
+}
diff --git a/tools/perf/util/intlist.h b/tools/perf/util/intlist.h
new file mode 100644
index 00000000000..aa6877d3685
--- /dev/null
+++ b/tools/perf/util/intlist.h
@@ -0,0 +1,77 @@
+#ifndef __PERF_INTLIST_H
+#define __PERF_INTLIST_H
+
+#include <linux/rbtree.h>
+#include <stdbool.h>
+
+#include "rblist.h"
+
+struct int_node {
+ struct rb_node rb_node;
+ int i;
+ void *priv;
+};
+
+struct intlist {
+ struct rblist rblist;
+};
+
+struct intlist *intlist__new(const char *slist);
+void intlist__delete(struct intlist *ilist);
+
+void intlist__remove(struct intlist *ilist, struct int_node *in);
+int intlist__add(struct intlist *ilist, int i);
+
+struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx);
+struct int_node *intlist__find(struct intlist *ilist, int i);
+struct int_node *intlist__findnew(struct intlist *ilist, int i);
+
+static inline bool intlist__has_entry(struct intlist *ilist, int i)
+{
+ return intlist__find(ilist, i) != NULL;
+}
+
+static inline bool intlist__empty(const struct intlist *ilist)
+{
+ return rblist__empty(&ilist->rblist);
+}
+
+static inline unsigned int intlist__nr_entries(const struct intlist *ilist)
+{
+ return rblist__nr_entries(&ilist->rblist);
+}
+
+/* For intlist iteration */
+static inline struct int_node *intlist__first(struct intlist *ilist)
+{
+ struct rb_node *rn = rb_first(&ilist->rblist.entries);
+ return rn ? rb_entry(rn, struct int_node, rb_node) : NULL;
+}
+static inline struct int_node *intlist__next(struct int_node *in)
+{
+ struct rb_node *rn;
+ if (!in)
+ return NULL;
+ rn = rb_next(&in->rb_node);
+ return rn ? rb_entry(rn, struct int_node, rb_node) : NULL;
+}
+
+/**
+ * intlist_for_each - iterate over a intlist
+ * @pos: the &struct int_node to use as a loop cursor.
+ * @ilist: the &struct intlist for loop.
+ */
+#define intlist__for_each(pos, ilist) \
+ for (pos = intlist__first(ilist); pos; pos = intlist__next(pos))
+
+/**
+ * intlist_for_each_safe - iterate over a intlist safe against removal of
+ * int_node
+ * @pos: the &struct int_node to use as a loop cursor.
+ * @n: another &struct int_node to use as temporary storage.
+ * @ilist: the &struct intlist for loop.
+ */
+#define intlist__for_each_safe(pos, n, ilist) \
+ for (pos = intlist__first(ilist), n = intlist__next(pos); pos;\
+ pos = n, n = intlist__next(n))
+#endif /* __PERF_INTLIST_H */
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
new file mode 100644
index 00000000000..c73e1fc12e5
--- /dev/null
+++ b/tools/perf/util/machine.c
@@ -0,0 +1,1422 @@
+#include "callchain.h"
+#include "debug.h"
+#include "event.h"
+#include "evsel.h"
+#include "hist.h"
+#include "machine.h"
+#include "map.h"
+#include "sort.h"
+#include "strlist.h"
+#include "thread.h"
+#include <stdbool.h>
+#include <symbol/kallsyms.h>
+#include "unwind.h"
+
+int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
+{
+ map_groups__init(&machine->kmaps);
+ RB_CLEAR_NODE(&machine->rb_node);
+ INIT_LIST_HEAD(&machine->user_dsos);
+ INIT_LIST_HEAD(&machine->kernel_dsos);
+
+ machine->threads = RB_ROOT;
+ INIT_LIST_HEAD(&machine->dead_threads);
+ machine->last_match = NULL;
+
+ machine->kmaps.machine = machine;
+ machine->pid = pid;
+
+ machine->symbol_filter = NULL;
+ machine->id_hdr_size = 0;
+
+ machine->root_dir = strdup(root_dir);
+ if (machine->root_dir == NULL)
+ return -ENOMEM;
+
+ if (pid != HOST_KERNEL_ID) {
+ struct thread *thread = machine__findnew_thread(machine, 0,
+ pid);
+ char comm[64];
+
+ if (thread == NULL)
+ return -ENOMEM;
+
+ snprintf(comm, sizeof(comm), "[guest/%d]", pid);
+ thread__set_comm(thread, comm, 0);
+ }
+
+ return 0;
+}
+
+struct machine *machine__new_host(void)
+{
+ struct machine *machine = malloc(sizeof(*machine));
+
+ if (machine != NULL) {
+ machine__init(machine, "", HOST_KERNEL_ID);
+
+ if (machine__create_kernel_maps(machine) < 0)
+ goto out_delete;
+ }
+
+ return machine;
+out_delete:
+ free(machine);
+ return NULL;
+}
+
+static void dsos__delete(struct list_head *dsos)
+{
+ struct dso *pos, *n;
+
+ list_for_each_entry_safe(pos, n, dsos, node) {
+ list_del(&pos->node);
+ dso__delete(pos);
+ }
+}
+
+void machine__delete_dead_threads(struct machine *machine)
+{
+ struct thread *n, *t;
+
+ list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
+ list_del(&t->node);
+ thread__delete(t);
+ }
+}
+
+void machine__delete_threads(struct machine *machine)
+{
+ struct rb_node *nd = rb_first(&machine->threads);
+
+ while (nd) {
+ struct thread *t = rb_entry(nd, struct thread, rb_node);
+
+ rb_erase(&t->rb_node, &machine->threads);
+ nd = rb_next(nd);
+ thread__delete(t);
+ }
+}
+
+void machine__exit(struct machine *machine)
+{
+ map_groups__exit(&machine->kmaps);
+ dsos__delete(&machine->user_dsos);
+ dsos__delete(&machine->kernel_dsos);
+ zfree(&machine->root_dir);
+}
+
+void machine__delete(struct machine *machine)
+{
+ machine__exit(machine);
+ free(machine);
+}
+
+void machines__init(struct machines *machines)
+{
+ machine__init(&machines->host, "", HOST_KERNEL_ID);
+ machines->guests = RB_ROOT;
+ machines->symbol_filter = NULL;
+}
+
+void machines__exit(struct machines *machines)
+{
+ machine__exit(&machines->host);
+ /* XXX exit guest */
+}
+
+struct machine *machines__add(struct machines *machines, pid_t pid,
+ const char *root_dir)
+{
+ struct rb_node **p = &machines->guests.rb_node;
+ struct rb_node *parent = NULL;
+ struct machine *pos, *machine = malloc(sizeof(*machine));
+
+ if (machine == NULL)
+ return NULL;
+
+ if (machine__init(machine, root_dir, pid) != 0) {
+ free(machine);
+ return NULL;
+ }
+
+ machine->symbol_filter = machines->symbol_filter;
+
+ while (*p != NULL) {
+ parent = *p;
+ pos = rb_entry(parent, struct machine, rb_node);
+ if (pid < pos->pid)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&machine->rb_node, parent, p);
+ rb_insert_color(&machine->rb_node, &machines->guests);
+
+ return machine;
+}
+
+void machines__set_symbol_filter(struct machines *machines,
+ symbol_filter_t symbol_filter)
+{
+ struct rb_node *nd;
+
+ machines->symbol_filter = symbol_filter;
+ machines->host.symbol_filter = symbol_filter;
+
+ for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ struct machine *machine = rb_entry(nd, struct machine, rb_node);
+
+ machine->symbol_filter = symbol_filter;
+ }
+}
+
+struct machine *machines__find(struct machines *machines, pid_t pid)
+{
+ struct rb_node **p = &machines->guests.rb_node;
+ struct rb_node *parent = NULL;
+ struct machine *machine;
+ struct machine *default_machine = NULL;
+
+ if (pid == HOST_KERNEL_ID)
+ return &machines->host;
+
+ while (*p != NULL) {
+ parent = *p;
+ machine = rb_entry(parent, struct machine, rb_node);
+ if (pid < machine->pid)
+ p = &(*p)->rb_left;
+ else if (pid > machine->pid)
+ p = &(*p)->rb_right;
+ else
+ return machine;
+ if (!machine->pid)
+ default_machine = machine;
+ }
+
+ return default_machine;
+}
+
+struct machine *machines__findnew(struct machines *machines, pid_t pid)
+{
+ char path[PATH_MAX];
+ const char *root_dir = "";
+ struct machine *machine = machines__find(machines, pid);
+
+ if (machine && (machine->pid == pid))
+ goto out;
+
+ if ((pid != HOST_KERNEL_ID) &&
+ (pid != DEFAULT_GUEST_KERNEL_ID) &&
+ (symbol_conf.guestmount)) {
+ sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
+ if (access(path, R_OK)) {
+ static struct strlist *seen;
+
+ if (!seen)
+ seen = strlist__new(true, NULL);
+
+ if (!strlist__has_entry(seen, path)) {
+ pr_err("Can't access file %s\n", path);
+ strlist__add(seen, path);
+ }
+ machine = NULL;
+ goto out;
+ }
+ root_dir = path;
+ }
+
+ machine = machines__add(machines, pid, root_dir);
+out:
+ return machine;
+}
+
+void machines__process_guests(struct machines *machines,
+ machine__process_t process, void *data)
+{
+ struct rb_node *nd;
+
+ for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ process(pos, data);
+ }
+}
+
+char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
+{
+ if (machine__is_host(machine))
+ snprintf(bf, size, "[%s]", "kernel.kallsyms");
+ else if (machine__is_default_guest(machine))
+ snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
+ else {
+ snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
+ machine->pid);
+ }
+
+ return bf;
+}
+
+void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
+{
+ struct rb_node *node;
+ struct machine *machine;
+
+ machines->host.id_hdr_size = id_hdr_size;
+
+ for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
+ machine = rb_entry(node, struct machine, rb_node);
+ machine->id_hdr_size = id_hdr_size;
+ }
+
+ return;
+}
+
+static struct thread *__machine__findnew_thread(struct machine *machine,
+ pid_t pid, pid_t tid,
+ bool create)
+{
+ struct rb_node **p = &machine->threads.rb_node;
+ struct rb_node *parent = NULL;
+ struct thread *th;
+
+ /*
+ * Front-end cache - TID lookups come in blocks,
+ * so most of the time we dont have to look up
+ * the full rbtree:
+ */
+ if (machine->last_match && machine->last_match->tid == tid) {
+ if (pid && pid != machine->last_match->pid_)
+ machine->last_match->pid_ = pid;
+ return machine->last_match;
+ }
+
+ while (*p != NULL) {
+ parent = *p;
+ th = rb_entry(parent, struct thread, rb_node);
+
+ if (th->tid == tid) {
+ machine->last_match = th;
+ if (pid && pid != th->pid_)
+ th->pid_ = pid;
+ return th;
+ }
+
+ if (tid < th->tid)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ if (!create)
+ return NULL;
+
+ th = thread__new(pid, tid);
+ if (th != NULL) {
+ rb_link_node(&th->rb_node, parent, p);
+ rb_insert_color(&th->rb_node, &machine->threads);
+ machine->last_match = th;
+
+ /*
+ * We have to initialize map_groups separately
+ * after rb tree is updated.
+ *
+ * The reason is that we call machine__findnew_thread
+ * within thread__init_map_groups to find the thread
+ * leader and that would screwed the rb tree.
+ */
+ if (thread__init_map_groups(th, machine))
+ return NULL;
+ }
+
+ return th;
+}
+
+struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
+ pid_t tid)
+{
+ return __machine__findnew_thread(machine, pid, tid, true);
+}
+
+struct thread *machine__find_thread(struct machine *machine, pid_t pid,
+ pid_t tid)
+{
+ return __machine__findnew_thread(machine, pid, tid, false);
+}
+
+int machine__process_comm_event(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample)
+{
+ struct thread *thread = machine__findnew_thread(machine,
+ event->comm.pid,
+ event->comm.tid);
+
+ if (dump_trace)
+ perf_event__fprintf_comm(event, stdout);
+
+ if (thread == NULL || thread__set_comm(thread, event->comm.comm, sample->time)) {
+ dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int machine__process_lost_event(struct machine *machine __maybe_unused,
+ union perf_event *event, struct perf_sample *sample __maybe_unused)
+{
+ dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
+ event->lost.id, event->lost.lost);
+ return 0;
+}
+
+struct map *machine__new_module(struct machine *machine, u64 start,
+ const char *filename)
+{
+ struct map *map;
+ struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
+
+ if (dso == NULL)
+ return NULL;
+
+ map = map__new2(start, dso, MAP__FUNCTION);
+ if (map == NULL)
+ return NULL;
+
+ if (machine__is_host(machine))
+ dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
+ else
+ dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
+ map_groups__insert(&machine->kmaps, map);
+ return map;
+}
+
+size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
+{
+ struct rb_node *nd;
+ size_t ret = __dsos__fprintf(&machines->host.kernel_dsos, fp) +
+ __dsos__fprintf(&machines->host.user_dsos, fp);
+
+ for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ ret += __dsos__fprintf(&pos->kernel_dsos, fp);
+ ret += __dsos__fprintf(&pos->user_dsos, fp);
+ }
+
+ return ret;
+}
+
+size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
+ bool (skip)(struct dso *dso, int parm), int parm)
+{
+ return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, skip, parm) +
+ __dsos__fprintf_buildid(&machine->user_dsos, fp, skip, parm);
+}
+
+size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
+ bool (skip)(struct dso *dso, int parm), int parm)
+{
+ struct rb_node *nd;
+ size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
+
+ for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
+ }
+ return ret;
+}
+
+size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
+{
+ int i;
+ size_t printed = 0;
+ struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
+
+ if (kdso->has_build_id) {
+ char filename[PATH_MAX];
+ if (dso__build_id_filename(kdso, filename, sizeof(filename)))
+ printed += fprintf(fp, "[0] %s\n", filename);
+ }
+
+ for (i = 0; i < vmlinux_path__nr_entries; ++i)
+ printed += fprintf(fp, "[%d] %s\n",
+ i + kdso->has_build_id, vmlinux_path[i]);
+
+ return printed;
+}
+
+size_t machine__fprintf(struct machine *machine, FILE *fp)
+{
+ size_t ret = 0;
+ struct rb_node *nd;
+
+ for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
+ struct thread *pos = rb_entry(nd, struct thread, rb_node);
+
+ ret += thread__fprintf(pos, fp);
+ }
+
+ return ret;
+}
+
+static struct dso *machine__get_kernel(struct machine *machine)
+{
+ const char *vmlinux_name = NULL;
+ struct dso *kernel;
+
+ if (machine__is_host(machine)) {
+ vmlinux_name = symbol_conf.vmlinux_name;
+ if (!vmlinux_name)
+ vmlinux_name = "[kernel.kallsyms]";
+
+ kernel = dso__kernel_findnew(machine, vmlinux_name,
+ "[kernel]",
+ DSO_TYPE_KERNEL);
+ } else {
+ char bf[PATH_MAX];
+
+ if (machine__is_default_guest(machine))
+ vmlinux_name = symbol_conf.default_guest_vmlinux_name;
+ if (!vmlinux_name)
+ vmlinux_name = machine__mmap_name(machine, bf,
+ sizeof(bf));
+
+ kernel = dso__kernel_findnew(machine, vmlinux_name,
+ "[guest.kernel]",
+ DSO_TYPE_GUEST_KERNEL);
+ }
+
+ if (kernel != NULL && (!kernel->has_build_id))
+ dso__read_running_kernel_build_id(kernel, machine);
+
+ return kernel;
+}
+
+struct process_args {
+ u64 start;
+};
+
+static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
+ size_t bufsz)
+{
+ if (machine__is_default_guest(machine))
+ scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
+ else
+ scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
+}
+
+const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
+
+/* Figure out the start address of kernel map from /proc/kallsyms.
+ * Returns the name of the start symbol in *symbol_name. Pass in NULL as
+ * symbol_name if it's not that important.
+ */
+static u64 machine__get_kernel_start_addr(struct machine *machine,
+ const char **symbol_name)
+{
+ char filename[PATH_MAX];
+ int i;
+ const char *name;
+ u64 addr = 0;
+
+ machine__get_kallsyms_filename(machine, filename, PATH_MAX);
+
+ if (symbol__restricted_filename(filename, "/proc/kallsyms"))
+ return 0;
+
+ for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
+ addr = kallsyms__get_function_start(filename, name);
+ if (addr)
+ break;
+ }
+
+ if (symbol_name)
+ *symbol_name = name;
+
+ return addr;
+}
+
+int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
+{
+ enum map_type type;
+ u64 start = machine__get_kernel_start_addr(machine, NULL);
+
+ for (type = 0; type < MAP__NR_TYPES; ++type) {
+ struct kmap *kmap;
+
+ machine->vmlinux_maps[type] = map__new2(start, kernel, type);
+ if (machine->vmlinux_maps[type] == NULL)
+ return -1;
+
+ machine->vmlinux_maps[type]->map_ip =
+ machine->vmlinux_maps[type]->unmap_ip =
+ identity__map_ip;
+ kmap = map__kmap(machine->vmlinux_maps[type]);
+ kmap->kmaps = &machine->kmaps;
+ map_groups__insert(&machine->kmaps,
+ machine->vmlinux_maps[type]);
+ }
+
+ return 0;
+}
+
+void machine__destroy_kernel_maps(struct machine *machine)
+{
+ enum map_type type;
+
+ for (type = 0; type < MAP__NR_TYPES; ++type) {
+ struct kmap *kmap;
+
+ if (machine->vmlinux_maps[type] == NULL)
+ continue;
+
+ kmap = map__kmap(machine->vmlinux_maps[type]);
+ map_groups__remove(&machine->kmaps,
+ machine->vmlinux_maps[type]);
+ if (kmap->ref_reloc_sym) {
+ /*
+ * ref_reloc_sym is shared among all maps, so free just
+ * on one of them.
+ */
+ if (type == MAP__FUNCTION) {
+ zfree((char **)&kmap->ref_reloc_sym->name);
+ zfree(&kmap->ref_reloc_sym);
+ } else
+ kmap->ref_reloc_sym = NULL;
+ }
+
+ map__delete(machine->vmlinux_maps[type]);
+ machine->vmlinux_maps[type] = NULL;
+ }
+}
+
+int machines__create_guest_kernel_maps(struct machines *machines)
+{
+ int ret = 0;
+ struct dirent **namelist = NULL;
+ int i, items = 0;
+ char path[PATH_MAX];
+ pid_t pid;
+ char *endp;
+
+ if (symbol_conf.default_guest_vmlinux_name ||
+ symbol_conf.default_guest_modules ||
+ symbol_conf.default_guest_kallsyms) {
+ machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
+ }
+
+ if (symbol_conf.guestmount) {
+ items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
+ if (items <= 0)
+ return -ENOENT;
+ for (i = 0; i < items; i++) {
+ if (!isdigit(namelist[i]->d_name[0])) {
+ /* Filter out . and .. */
+ continue;
+ }
+ pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
+ if ((*endp != '\0') ||
+ (endp == namelist[i]->d_name) ||
+ (errno == ERANGE)) {
+ pr_debug("invalid directory (%s). Skipping.\n",
+ namelist[i]->d_name);
+ continue;
+ }
+ sprintf(path, "%s/%s/proc/kallsyms",
+ symbol_conf.guestmount,
+ namelist[i]->d_name);
+ ret = access(path, R_OK);
+ if (ret) {
+ pr_debug("Can't access file %s\n", path);
+ goto failure;
+ }
+ machines__create_kernel_maps(machines, pid);
+ }
+failure:
+ free(namelist);
+ }
+
+ return ret;
+}
+
+void machines__destroy_kernel_maps(struct machines *machines)
+{
+ struct rb_node *next = rb_first(&machines->guests);
+
+ machine__destroy_kernel_maps(&machines->host);
+
+ while (next) {
+ struct machine *pos = rb_entry(next, struct machine, rb_node);
+
+ next = rb_next(&pos->rb_node);
+ rb_erase(&pos->rb_node, &machines->guests);
+ machine__delete(pos);
+ }
+}
+
+int machines__create_kernel_maps(struct machines *machines, pid_t pid)
+{
+ struct machine *machine = machines__findnew(machines, pid);
+
+ if (machine == NULL)
+ return -1;
+
+ return machine__create_kernel_maps(machine);
+}
+
+int machine__load_kallsyms(struct machine *machine, const char *filename,
+ enum map_type type, symbol_filter_t filter)
+{
+ struct map *map = machine->vmlinux_maps[type];
+ int ret = dso__load_kallsyms(map->dso, filename, map, filter);
+
+ if (ret > 0) {
+ dso__set_loaded(map->dso, type);
+ /*
+ * Since /proc/kallsyms will have multiple sessions for the
+ * kernel, with modules between them, fixup the end of all
+ * sections.
+ */
+ __map_groups__fixup_end(&machine->kmaps, type);
+ }
+
+ return ret;
+}
+
+int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
+ symbol_filter_t filter)
+{
+ struct map *map = machine->vmlinux_maps[type];
+ int ret = dso__load_vmlinux_path(map->dso, map, filter);
+
+ if (ret > 0)
+ dso__set_loaded(map->dso, type);
+
+ return ret;
+}
+
+static void map_groups__fixup_end(struct map_groups *mg)
+{
+ int i;
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ __map_groups__fixup_end(mg, i);
+}
+
+static char *get_kernel_version(const char *root_dir)
+{
+ char version[PATH_MAX];
+ FILE *file;
+ char *name, *tmp;
+ const char *prefix = "Linux version ";
+
+ sprintf(version, "%s/proc/version", root_dir);
+ file = fopen(version, "r");
+ if (!file)
+ return NULL;
+
+ version[0] = '\0';
+ tmp = fgets(version, sizeof(version), file);
+ fclose(file);
+
+ name = strstr(version, prefix);
+ if (!name)
+ return NULL;
+ name += strlen(prefix);
+ tmp = strchr(name, ' ');
+ if (tmp)
+ *tmp = '\0';
+
+ return strdup(name);
+}
+
+static int map_groups__set_modules_path_dir(struct map_groups *mg,
+ const char *dir_name, int depth)
+{
+ struct dirent *dent;
+ DIR *dir = opendir(dir_name);
+ int ret = 0;
+
+ if (!dir) {
+ pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
+ return -1;
+ }
+
+ while ((dent = readdir(dir)) != NULL) {
+ char path[PATH_MAX];
+ struct stat st;
+
+ /*sshfs might return bad dent->d_type, so we have to stat*/
+ snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
+ if (stat(path, &st))
+ continue;
+
+ if (S_ISDIR(st.st_mode)) {
+ if (!strcmp(dent->d_name, ".") ||
+ !strcmp(dent->d_name, ".."))
+ continue;
+
+ /* Do not follow top-level source and build symlinks */
+ if (depth == 0) {
+ if (!strcmp(dent->d_name, "source") ||
+ !strcmp(dent->d_name, "build"))
+ continue;
+ }
+
+ ret = map_groups__set_modules_path_dir(mg, path,
+ depth + 1);
+ if (ret < 0)
+ goto out;
+ } else {
+ char *dot = strrchr(dent->d_name, '.'),
+ dso_name[PATH_MAX];
+ struct map *map;
+ char *long_name;
+
+ if (dot == NULL || strcmp(dot, ".ko"))
+ continue;
+ snprintf(dso_name, sizeof(dso_name), "[%.*s]",
+ (int)(dot - dent->d_name), dent->d_name);
+
+ strxfrchar(dso_name, '-', '_');
+ map = map_groups__find_by_name(mg, MAP__FUNCTION,
+ dso_name);
+ if (map == NULL)
+ continue;
+
+ long_name = strdup(path);
+ if (long_name == NULL) {
+ ret = -1;
+ goto out;
+ }
+ dso__set_long_name(map->dso, long_name, true);
+ dso__kernel_module_get_build_id(map->dso, "");
+ }
+ }
+
+out:
+ closedir(dir);
+ return ret;
+}
+
+static int machine__set_modules_path(struct machine *machine)
+{
+ char *version;
+ char modules_path[PATH_MAX];
+
+ version = get_kernel_version(machine->root_dir);
+ if (!version)
+ return -1;
+
+ snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
+ machine->root_dir, version);
+ free(version);
+
+ return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
+}
+
+static int machine__create_module(void *arg, const char *name, u64 start)
+{
+ struct machine *machine = arg;
+ struct map *map;
+
+ map = machine__new_module(machine, start, name);
+ if (map == NULL)
+ return -1;
+
+ dso__kernel_module_get_build_id(map->dso, machine->root_dir);
+
+ return 0;
+}
+
+static int machine__create_modules(struct machine *machine)
+{
+ const char *modules;
+ char path[PATH_MAX];
+
+ if (machine__is_default_guest(machine)) {
+ modules = symbol_conf.default_guest_modules;
+ } else {
+ snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
+ modules = path;
+ }
+
+ if (symbol__restricted_filename(modules, "/proc/modules"))
+ return -1;
+
+ if (modules__parse(modules, machine, machine__create_module))
+ return -1;
+
+ if (!machine__set_modules_path(machine))
+ return 0;
+
+ pr_debug("Problems setting modules path maps, continuing anyway...\n");
+
+ return 0;
+}
+
+int machine__create_kernel_maps(struct machine *machine)
+{
+ struct dso *kernel = machine__get_kernel(machine);
+ const char *name;
+ u64 addr = machine__get_kernel_start_addr(machine, &name);
+ if (!addr)
+ return -1;
+
+ if (kernel == NULL ||
+ __machine__create_kernel_maps(machine, kernel) < 0)
+ return -1;
+
+ if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
+ if (machine__is_host(machine))
+ pr_debug("Problems creating module maps, "
+ "continuing anyway...\n");
+ else
+ pr_debug("Problems creating module maps for guest %d, "
+ "continuing anyway...\n", machine->pid);
+ }
+
+ /*
+ * Now that we have all the maps created, just set the ->end of them:
+ */
+ map_groups__fixup_end(&machine->kmaps);
+
+ if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
+ addr)) {
+ machine__destroy_kernel_maps(machine);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void machine__set_kernel_mmap_len(struct machine *machine,
+ union perf_event *event)
+{
+ int i;
+
+ for (i = 0; i < MAP__NR_TYPES; i++) {
+ machine->vmlinux_maps[i]->start = event->mmap.start;
+ machine->vmlinux_maps[i]->end = (event->mmap.start +
+ event->mmap.len);
+ /*
+ * Be a bit paranoid here, some perf.data file came with
+ * a zero sized synthesized MMAP event for the kernel.
+ */
+ if (machine->vmlinux_maps[i]->end == 0)
+ machine->vmlinux_maps[i]->end = ~0ULL;
+ }
+}
+
+static bool machine__uses_kcore(struct machine *machine)
+{
+ struct dso *dso;
+
+ list_for_each_entry(dso, &machine->kernel_dsos, node) {
+ if (dso__is_kcore(dso))
+ return true;
+ }
+
+ return false;
+}
+
+static int machine__process_kernel_mmap_event(struct machine *machine,
+ union perf_event *event)
+{
+ struct map *map;
+ char kmmap_prefix[PATH_MAX];
+ enum dso_kernel_type kernel_type;
+ bool is_kernel_mmap;
+
+ /* If we have maps from kcore then we do not need or want any others */
+ if (machine__uses_kcore(machine))
+ return 0;
+
+ machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
+ if (machine__is_host(machine))
+ kernel_type = DSO_TYPE_KERNEL;
+ else
+ kernel_type = DSO_TYPE_GUEST_KERNEL;
+
+ is_kernel_mmap = memcmp(event->mmap.filename,
+ kmmap_prefix,
+ strlen(kmmap_prefix) - 1) == 0;
+ if (event->mmap.filename[0] == '/' ||
+ (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
+
+ char short_module_name[1024];
+ char *name, *dot;
+
+ if (event->mmap.filename[0] == '/') {
+ name = strrchr(event->mmap.filename, '/');
+ if (name == NULL)
+ goto out_problem;
+
+ ++name; /* skip / */
+ dot = strrchr(name, '.');
+ if (dot == NULL)
+ goto out_problem;
+ snprintf(short_module_name, sizeof(short_module_name),
+ "[%.*s]", (int)(dot - name), name);
+ strxfrchar(short_module_name, '-', '_');
+ } else
+ strcpy(short_module_name, event->mmap.filename);
+
+ map = machine__new_module(machine, event->mmap.start,
+ event->mmap.filename);
+ if (map == NULL)
+ goto out_problem;
+
+ name = strdup(short_module_name);
+ if (name == NULL)
+ goto out_problem;
+
+ dso__set_short_name(map->dso, name, true);
+ map->end = map->start + event->mmap.len;
+ } else if (is_kernel_mmap) {
+ const char *symbol_name = (event->mmap.filename +
+ strlen(kmmap_prefix));
+ /*
+ * Should be there already, from the build-id table in
+ * the header.
+ */
+ struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
+ kmmap_prefix);
+ if (kernel == NULL)
+ goto out_problem;
+
+ kernel->kernel = kernel_type;
+ if (__machine__create_kernel_maps(machine, kernel) < 0)
+ goto out_problem;
+
+ machine__set_kernel_mmap_len(machine, event);
+
+ /*
+ * Avoid using a zero address (kptr_restrict) for the ref reloc
+ * symbol. Effectively having zero here means that at record
+ * time /proc/sys/kernel/kptr_restrict was non zero.
+ */
+ if (event->mmap.pgoff != 0) {
+ maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
+ symbol_name,
+ event->mmap.pgoff);
+ }
+
+ if (machine__is_default_guest(machine)) {
+ /*
+ * preload dso of guest kernel and modules
+ */
+ dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
+ NULL);
+ }
+ }
+ return 0;
+out_problem:
+ return -1;
+}
+
+int machine__process_mmap2_event(struct machine *machine,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ struct thread *thread;
+ struct map *map;
+ enum map_type type;
+ int ret = 0;
+
+ if (dump_trace)
+ perf_event__fprintf_mmap2(event, stdout);
+
+ if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
+ cpumode == PERF_RECORD_MISC_KERNEL) {
+ ret = machine__process_kernel_mmap_event(machine, event);
+ if (ret < 0)
+ goto out_problem;
+ return 0;
+ }
+
+ thread = machine__findnew_thread(machine, event->mmap2.pid,
+ event->mmap2.tid);
+ if (thread == NULL)
+ goto out_problem;
+
+ if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
+ type = MAP__VARIABLE;
+ else
+ type = MAP__FUNCTION;
+
+ map = map__new(&machine->user_dsos, event->mmap2.start,
+ event->mmap2.len, event->mmap2.pgoff,
+ event->mmap2.pid, event->mmap2.maj,
+ event->mmap2.min, event->mmap2.ino,
+ event->mmap2.ino_generation,
+ event->mmap2.prot,
+ event->mmap2.flags,
+ event->mmap2.filename, type);
+
+ if (map == NULL)
+ goto out_problem;
+
+ thread__insert_map(thread, map);
+ return 0;
+
+out_problem:
+ dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
+ return 0;
+}
+
+int machine__process_mmap_event(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ struct thread *thread;
+ struct map *map;
+ enum map_type type;
+ int ret = 0;
+
+ if (dump_trace)
+ perf_event__fprintf_mmap(event, stdout);
+
+ if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
+ cpumode == PERF_RECORD_MISC_KERNEL) {
+ ret = machine__process_kernel_mmap_event(machine, event);
+ if (ret < 0)
+ goto out_problem;
+ return 0;
+ }
+
+ thread = machine__findnew_thread(machine, event->mmap.pid,
+ event->mmap.tid);
+ if (thread == NULL)
+ goto out_problem;
+
+ if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
+ type = MAP__VARIABLE;
+ else
+ type = MAP__FUNCTION;
+
+ map = map__new(&machine->user_dsos, event->mmap.start,
+ event->mmap.len, event->mmap.pgoff,
+ event->mmap.pid, 0, 0, 0, 0, 0, 0,
+ event->mmap.filename,
+ type);
+
+ if (map == NULL)
+ goto out_problem;
+
+ thread__insert_map(thread, map);
+ return 0;
+
+out_problem:
+ dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
+ return 0;
+}
+
+static void machine__remove_thread(struct machine *machine, struct thread *th)
+{
+ machine->last_match = NULL;
+ rb_erase(&th->rb_node, &machine->threads);
+ /*
+ * We may have references to this thread, for instance in some hist_entry
+ * instances, so just move them to a separate list.
+ */
+ list_add_tail(&th->node, &machine->dead_threads);
+}
+
+int machine__process_fork_event(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample)
+{
+ struct thread *thread = machine__find_thread(machine,
+ event->fork.pid,
+ event->fork.tid);
+ struct thread *parent = machine__findnew_thread(machine,
+ event->fork.ppid,
+ event->fork.ptid);
+
+ /* if a thread currently exists for the thread id remove it */
+ if (thread != NULL)
+ machine__remove_thread(machine, thread);
+
+ thread = machine__findnew_thread(machine, event->fork.pid,
+ event->fork.tid);
+ if (dump_trace)
+ perf_event__fprintf_task(event, stdout);
+
+ if (thread == NULL || parent == NULL ||
+ thread__fork(thread, parent, sample->time) < 0) {
+ dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int machine__process_exit_event(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ struct thread *thread = machine__find_thread(machine,
+ event->fork.pid,
+ event->fork.tid);
+
+ if (dump_trace)
+ perf_event__fprintf_task(event, stdout);
+
+ if (thread != NULL)
+ thread__exited(thread);
+
+ return 0;
+}
+
+int machine__process_event(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample)
+{
+ int ret;
+
+ switch (event->header.type) {
+ case PERF_RECORD_COMM:
+ ret = machine__process_comm_event(machine, event, sample); break;
+ case PERF_RECORD_MMAP:
+ ret = machine__process_mmap_event(machine, event, sample); break;
+ case PERF_RECORD_MMAP2:
+ ret = machine__process_mmap2_event(machine, event, sample); break;
+ case PERF_RECORD_FORK:
+ ret = machine__process_fork_event(machine, event, sample); break;
+ case PERF_RECORD_EXIT:
+ ret = machine__process_exit_event(machine, event, sample); break;
+ case PERF_RECORD_LOST:
+ ret = machine__process_lost_event(machine, event, sample); break;
+ default:
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
+{
+ if (sym->name && !regexec(regex, sym->name, 0, NULL, 0))
+ return 1;
+ return 0;
+}
+
+static void ip__resolve_ams(struct machine *machine, struct thread *thread,
+ struct addr_map_symbol *ams,
+ u64 ip)
+{
+ struct addr_location al;
+
+ memset(&al, 0, sizeof(al));
+ /*
+ * We cannot use the header.misc hint to determine whether a
+ * branch stack address is user, kernel, guest, hypervisor.
+ * Branches may straddle the kernel/user/hypervisor boundaries.
+ * Thus, we have to try consecutively until we find a match
+ * or else, the symbol is unknown
+ */
+ thread__find_cpumode_addr_location(thread, machine, MAP__FUNCTION, ip, &al);
+
+ ams->addr = ip;
+ ams->al_addr = al.addr;
+ ams->sym = al.sym;
+ ams->map = al.map;
+}
+
+static void ip__resolve_data(struct machine *machine, struct thread *thread,
+ u8 m, struct addr_map_symbol *ams, u64 addr)
+{
+ struct addr_location al;
+
+ memset(&al, 0, sizeof(al));
+
+ thread__find_addr_location(thread, machine, m, MAP__VARIABLE, addr,
+ &al);
+ ams->addr = addr;
+ ams->al_addr = al.addr;
+ ams->sym = al.sym;
+ ams->map = al.map;
+}
+
+struct mem_info *sample__resolve_mem(struct perf_sample *sample,
+ struct addr_location *al)
+{
+ struct mem_info *mi = zalloc(sizeof(*mi));
+
+ if (!mi)
+ return NULL;
+
+ ip__resolve_ams(al->machine, al->thread, &mi->iaddr, sample->ip);
+ ip__resolve_data(al->machine, al->thread, al->cpumode,
+ &mi->daddr, sample->addr);
+ mi->data_src.val = sample->data_src;
+
+ return mi;
+}
+
+struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
+ struct addr_location *al)
+{
+ unsigned int i;
+ const struct branch_stack *bs = sample->branch_stack;
+ struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
+
+ if (!bi)
+ return NULL;
+
+ for (i = 0; i < bs->nr; i++) {
+ ip__resolve_ams(al->machine, al->thread, &bi[i].to, bs->entries[i].to);
+ ip__resolve_ams(al->machine, al->thread, &bi[i].from, bs->entries[i].from);
+ bi[i].flags = bs->entries[i].flags;
+ }
+ return bi;
+}
+
+static int machine__resolve_callchain_sample(struct machine *machine,
+ struct thread *thread,
+ struct ip_callchain *chain,
+ struct symbol **parent,
+ struct addr_location *root_al,
+ int max_stack)
+{
+ u8 cpumode = PERF_RECORD_MISC_USER;
+ int chain_nr = min(max_stack, (int)chain->nr);
+ int i;
+ int err;
+
+ callchain_cursor_reset(&callchain_cursor);
+
+ if (chain->nr > PERF_MAX_STACK_DEPTH) {
+ pr_warning("corrupted callchain. skipping...\n");
+ return 0;
+ }
+
+ for (i = 0; i < chain_nr; i++) {
+ u64 ip;
+ struct addr_location al;
+
+ if (callchain_param.order == ORDER_CALLEE)
+ ip = chain->ips[i];
+ else
+ ip = chain->ips[chain->nr - i - 1];
+
+ if (ip >= PERF_CONTEXT_MAX) {
+ switch (ip) {
+ case PERF_CONTEXT_HV:
+ cpumode = PERF_RECORD_MISC_HYPERVISOR;
+ break;
+ case PERF_CONTEXT_KERNEL:
+ cpumode = PERF_RECORD_MISC_KERNEL;
+ break;
+ case PERF_CONTEXT_USER:
+ cpumode = PERF_RECORD_MISC_USER;
+ break;
+ default:
+ pr_debug("invalid callchain context: "
+ "%"PRId64"\n", (s64) ip);
+ /*
+ * It seems the callchain is corrupted.
+ * Discard all.
+ */
+ callchain_cursor_reset(&callchain_cursor);
+ return 0;
+ }
+ continue;
+ }
+
+ al.filtered = 0;
+ thread__find_addr_location(thread, machine, cpumode,
+ MAP__FUNCTION, ip, &al);
+ if (al.sym != NULL) {
+ if (sort__has_parent && !*parent &&
+ symbol__match_regex(al.sym, &parent_regex))
+ *parent = al.sym;
+ else if (have_ignore_callees && root_al &&
+ symbol__match_regex(al.sym, &ignore_callees_regex)) {
+ /* Treat this symbol as the root,
+ forgetting its callees. */
+ *root_al = al;
+ callchain_cursor_reset(&callchain_cursor);
+ }
+ }
+
+ err = callchain_cursor_append(&callchain_cursor,
+ ip, al.map, al.sym);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int unwind_entry(struct unwind_entry *entry, void *arg)
+{
+ struct callchain_cursor *cursor = arg;
+ return callchain_cursor_append(cursor, entry->ip,
+ entry->map, entry->sym);
+}
+
+int machine__resolve_callchain(struct machine *machine,
+ struct perf_evsel *evsel,
+ struct thread *thread,
+ struct perf_sample *sample,
+ struct symbol **parent,
+ struct addr_location *root_al,
+ int max_stack)
+{
+ int ret;
+
+ ret = machine__resolve_callchain_sample(machine, thread,
+ sample->callchain, parent,
+ root_al, max_stack);
+ if (ret)
+ return ret;
+
+ /* Can we do dwarf post unwind? */
+ if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
+ (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
+ return 0;
+
+ /* Bail out if nothing was captured. */
+ if ((!sample->user_regs.regs) ||
+ (!sample->user_stack.size))
+ return 0;
+
+ return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
+ thread, sample, max_stack);
+
+}
+
+int machine__for_each_thread(struct machine *machine,
+ int (*fn)(struct thread *thread, void *p),
+ void *priv)
+{
+ struct rb_node *nd;
+ struct thread *thread;
+ int rc = 0;
+
+ for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
+ thread = rb_entry(nd, struct thread, rb_node);
+ rc = fn(thread, priv);
+ if (rc != 0)
+ return rc;
+ }
+
+ list_for_each_entry(thread, &machine->dead_threads, node) {
+ rc = fn(thread, priv);
+ if (rc != 0)
+ return rc;
+ }
+ return rc;
+}
+
+int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
+ struct target *target, struct thread_map *threads,
+ perf_event__handler_t process, bool data_mmap)
+{
+ if (target__has_task(target))
+ return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
+ else if (target__has_cpu(target))
+ return perf_event__synthesize_threads(tool, process, machine, data_mmap);
+ /* command specified */
+ return 0;
+}
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
new file mode 100644
index 00000000000..c8c74a11939
--- /dev/null
+++ b/tools/perf/util/machine.h
@@ -0,0 +1,194 @@
+#ifndef __PERF_MACHINE_H
+#define __PERF_MACHINE_H
+
+#include <sys/types.h>
+#include <linux/rbtree.h>
+#include "map.h"
+#include "event.h"
+
+struct addr_location;
+struct branch_stack;
+struct perf_evsel;
+struct perf_sample;
+struct symbol;
+struct thread;
+union perf_event;
+
+/* Native host kernel uses -1 as pid index in machine */
+#define HOST_KERNEL_ID (-1)
+#define DEFAULT_GUEST_KERNEL_ID (0)
+
+extern const char *ref_reloc_sym_names[];
+
+struct machine {
+ struct rb_node rb_node;
+ pid_t pid;
+ u16 id_hdr_size;
+ char *root_dir;
+ struct rb_root threads;
+ struct list_head dead_threads;
+ struct thread *last_match;
+ struct list_head user_dsos;
+ struct list_head kernel_dsos;
+ struct map_groups kmaps;
+ struct map *vmlinux_maps[MAP__NR_TYPES];
+ symbol_filter_t symbol_filter;
+};
+
+static inline
+struct map *machine__kernel_map(struct machine *machine, enum map_type type)
+{
+ return machine->vmlinux_maps[type];
+}
+
+struct thread *machine__find_thread(struct machine *machine, pid_t pid,
+ pid_t tid);
+
+int machine__process_comm_event(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample);
+int machine__process_exit_event(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample);
+int machine__process_fork_event(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample);
+int machine__process_lost_event(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample);
+int machine__process_mmap_event(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample);
+int machine__process_mmap2_event(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample);
+int machine__process_event(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample);
+
+typedef void (*machine__process_t)(struct machine *machine, void *data);
+
+struct machines {
+ struct machine host;
+ struct rb_root guests;
+ symbol_filter_t symbol_filter;
+};
+
+void machines__init(struct machines *machines);
+void machines__exit(struct machines *machines);
+
+void machines__process_guests(struct machines *machines,
+ machine__process_t process, void *data);
+
+struct machine *machines__add(struct machines *machines, pid_t pid,
+ const char *root_dir);
+struct machine *machines__find_host(struct machines *machines);
+struct machine *machines__find(struct machines *machines, pid_t pid);
+struct machine *machines__findnew(struct machines *machines, pid_t pid);
+
+void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size);
+char *machine__mmap_name(struct machine *machine, char *bf, size_t size);
+
+void machines__set_symbol_filter(struct machines *machines,
+ symbol_filter_t symbol_filter);
+
+struct machine *machine__new_host(void);
+int machine__init(struct machine *machine, const char *root_dir, pid_t pid);
+void machine__exit(struct machine *machine);
+void machine__delete_dead_threads(struct machine *machine);
+void machine__delete_threads(struct machine *machine);
+void machine__delete(struct machine *machine);
+
+struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
+ struct addr_location *al);
+struct mem_info *sample__resolve_mem(struct perf_sample *sample,
+ struct addr_location *al);
+int machine__resolve_callchain(struct machine *machine,
+ struct perf_evsel *evsel,
+ struct thread *thread,
+ struct perf_sample *sample,
+ struct symbol **parent,
+ struct addr_location *root_al,
+ int max_stack);
+
+/*
+ * Default guest kernel is defined by parameter --guestkallsyms
+ * and --guestmodules
+ */
+static inline bool machine__is_default_guest(struct machine *machine)
+{
+ return machine ? machine->pid == DEFAULT_GUEST_KERNEL_ID : false;
+}
+
+static inline bool machine__is_host(struct machine *machine)
+{
+ return machine ? machine->pid == HOST_KERNEL_ID : false;
+}
+
+struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
+ pid_t tid);
+
+size_t machine__fprintf(struct machine *machine, FILE *fp);
+
+static inline
+struct symbol *machine__find_kernel_symbol(struct machine *machine,
+ enum map_type type, u64 addr,
+ struct map **mapp,
+ symbol_filter_t filter)
+{
+ return map_groups__find_symbol(&machine->kmaps, type, addr,
+ mapp, filter);
+}
+
+static inline
+struct symbol *machine__find_kernel_function(struct machine *machine, u64 addr,
+ struct map **mapp,
+ symbol_filter_t filter)
+{
+ return machine__find_kernel_symbol(machine, MAP__FUNCTION, addr,
+ mapp, filter);
+}
+
+static inline
+struct symbol *machine__find_kernel_function_by_name(struct machine *machine,
+ const char *name,
+ struct map **mapp,
+ symbol_filter_t filter)
+{
+ return map_groups__find_function_by_name(&machine->kmaps, name, mapp,
+ filter);
+}
+
+struct map *machine__new_module(struct machine *machine, u64 start,
+ const char *filename);
+
+int machine__load_kallsyms(struct machine *machine, const char *filename,
+ enum map_type type, symbol_filter_t filter);
+int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
+ symbol_filter_t filter);
+
+size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
+ bool (skip)(struct dso *dso, int parm), int parm);
+size_t machines__fprintf_dsos(struct machines *machines, FILE *fp);
+size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
+ bool (skip)(struct dso *dso, int parm), int parm);
+
+void machine__destroy_kernel_maps(struct machine *machine);
+int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel);
+int machine__create_kernel_maps(struct machine *machine);
+
+int machines__create_kernel_maps(struct machines *machines, pid_t pid);
+int machines__create_guest_kernel_maps(struct machines *machines);
+void machines__destroy_kernel_maps(struct machines *machines);
+
+size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp);
+
+int machine__for_each_thread(struct machine *machine,
+ int (*fn)(struct thread *thread, void *p),
+ void *priv);
+
+int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
+ struct target *target, struct thread_map *threads,
+ perf_event__handler_t process, bool data_mmap);
+static inline
+int machine__synthesize_threads(struct machine *machine, struct target *target,
+ struct thread_map *threads, bool data_mmap)
+{
+ return __machine__synthesize_threads(machine, NULL, target, threads,
+ perf_event__process, data_mmap);
+}
+
+#endif /* __PERF_MACHINE_H */
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 3a7eb6ec0ee..25c571f4cba 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -1,11 +1,18 @@
#include "symbol.h"
#include <errno.h>
+#include <inttypes.h>
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <unistd.h>
#include "map.h"
+#include "thread.h"
+#include "strlist.h"
+#include "vdso.h"
+#include "build-id.h"
+#include "util.h"
+#include <linux/string.h>
const char *map_type__name[MAP__NR_TYPES] = {
[MAP__FUNCTION] = "Functions",
@@ -14,104 +21,245 @@ const char *map_type__name[MAP__NR_TYPES] = {
static inline int is_anon_memory(const char *filename)
{
- return strcmp(filename, "//anon") == 0;
+ return !strcmp(filename, "//anon") ||
+ !strcmp(filename, "/dev/zero (deleted)") ||
+ !strcmp(filename, "/anon_hugepage (deleted)");
}
-void map__init(struct map *self, enum map_type type,
+static inline int is_no_dso_memory(const char *filename)
+{
+ return !strncmp(filename, "[stack", 6) ||
+ !strcmp(filename, "[heap]");
+}
+
+static inline int is_android_lib(const char *filename)
+{
+ return !strncmp(filename, "/data/app-lib", 13) ||
+ !strncmp(filename, "/system/lib", 11);
+}
+
+static inline bool replace_android_lib(const char *filename, char *newfilename)
+{
+ const char *libname;
+ char *app_abi;
+ size_t app_abi_length, new_length;
+ size_t lib_length = 0;
+
+ libname = strrchr(filename, '/');
+ if (libname)
+ lib_length = strlen(libname);
+
+ app_abi = getenv("APP_ABI");
+ if (!app_abi)
+ return false;
+
+ app_abi_length = strlen(app_abi);
+
+ if (!strncmp(filename, "/data/app-lib", 13)) {
+ char *apk_path;
+
+ if (!app_abi_length)
+ return false;
+
+ new_length = 7 + app_abi_length + lib_length;
+
+ apk_path = getenv("APK_PATH");
+ if (apk_path) {
+ new_length += strlen(apk_path) + 1;
+ if (new_length > PATH_MAX)
+ return false;
+ snprintf(newfilename, new_length,
+ "%s/libs/%s/%s", apk_path, app_abi, libname);
+ } else {
+ if (new_length > PATH_MAX)
+ return false;
+ snprintf(newfilename, new_length,
+ "libs/%s/%s", app_abi, libname);
+ }
+ return true;
+ }
+
+ if (!strncmp(filename, "/system/lib/", 11)) {
+ char *ndk, *app;
+ const char *arch;
+ size_t ndk_length;
+ size_t app_length;
+
+ ndk = getenv("NDK_ROOT");
+ app = getenv("APP_PLATFORM");
+
+ if (!(ndk && app))
+ return false;
+
+ ndk_length = strlen(ndk);
+ app_length = strlen(app);
+
+ if (!(ndk_length && app_length && app_abi_length))
+ return false;
+
+ arch = !strncmp(app_abi, "arm", 3) ? "arm" :
+ !strncmp(app_abi, "mips", 4) ? "mips" :
+ !strncmp(app_abi, "x86", 3) ? "x86" : NULL;
+
+ if (!arch)
+ return false;
+
+ new_length = 27 + ndk_length +
+ app_length + lib_length
+ + strlen(arch);
+
+ if (new_length > PATH_MAX)
+ return false;
+ snprintf(newfilename, new_length,
+ "%s/platforms/%s/arch-%s/usr/lib/%s",
+ ndk, app, arch, libname);
+
+ return true;
+ }
+ return false;
+}
+
+void map__init(struct map *map, enum map_type type,
u64 start, u64 end, u64 pgoff, struct dso *dso)
{
- self->type = type;
- self->start = start;
- self->end = end;
- self->pgoff = pgoff;
- self->dso = dso;
- self->map_ip = map__map_ip;
- self->unmap_ip = map__unmap_ip;
- RB_CLEAR_NODE(&self->rb_node);
- self->groups = NULL;
- self->referenced = false;
+ map->type = type;
+ map->start = start;
+ map->end = end;
+ map->pgoff = pgoff;
+ map->reloc = 0;
+ map->dso = dso;
+ map->map_ip = map__map_ip;
+ map->unmap_ip = map__unmap_ip;
+ RB_CLEAR_NODE(&map->rb_node);
+ map->groups = NULL;
+ map->referenced = false;
+ map->erange_warned = false;
}
struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
- u64 pgoff, u32 pid, char *filename,
+ u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino,
+ u64 ino_gen, u32 prot, u32 flags, char *filename,
enum map_type type)
{
- struct map *self = malloc(sizeof(*self));
+ struct map *map = malloc(sizeof(*map));
- if (self != NULL) {
+ if (map != NULL) {
char newfilename[PATH_MAX];
struct dso *dso;
- int anon;
+ int anon, no_dso, vdso, android;
+ android = is_android_lib(filename);
anon = is_anon_memory(filename);
+ vdso = is_vdso_map(filename);
+ no_dso = is_no_dso_memory(filename);
- if (anon) {
+ map->maj = d_maj;
+ map->min = d_min;
+ map->ino = ino;
+ map->ino_generation = ino_gen;
+ map->prot = prot;
+ map->flags = flags;
+
+ if ((anon || no_dso) && type == MAP__FUNCTION) {
snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
filename = newfilename;
}
- dso = __dsos__findnew(dsos__list, filename);
+ if (android) {
+ if (replace_android_lib(filename, newfilename))
+ filename = newfilename;
+ }
+
+ if (vdso) {
+ pgoff = 0;
+ dso = vdso__dso_findnew(dsos__list);
+ } else
+ dso = __dsos__findnew(dsos__list, filename);
+
if (dso == NULL)
goto out_delete;
- map__init(self, type, start, start + len, pgoff, dso);
+ map__init(map, type, start, start + len, pgoff, dso);
- if (anon) {
-set_identity:
- self->map_ip = self->unmap_ip = identity__map_ip;
- } else if (strcmp(filename, "[vdso]") == 0) {
- dso__set_loaded(dso, self->type);
- goto set_identity;
+ if (anon || no_dso) {
+ map->map_ip = map->unmap_ip = identity__map_ip;
+
+ /*
+ * Set memory without DSO as loaded. All map__find_*
+ * functions still return NULL, and we avoid the
+ * unnecessary map__load warning.
+ */
+ if (type != MAP__FUNCTION)
+ dso__set_loaded(dso, map->type);
}
}
- return self;
+ return map;
out_delete:
- free(self);
+ free(map);
return NULL;
}
-void map__delete(struct map *self)
+/*
+ * Constructor variant for modules (where we know from /proc/modules where
+ * they are loaded) and for vmlinux, where only after we load all the
+ * symbols we'll know where it starts and ends.
+ */
+struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
+{
+ struct map *map = calloc(1, (sizeof(*map) +
+ (dso->kernel ? sizeof(struct kmap) : 0)));
+ if (map != NULL) {
+ /*
+ * ->end will be filled after we load all the symbols
+ */
+ map__init(map, type, start, 0, 0, dso);
+ }
+
+ return map;
+}
+
+void map__delete(struct map *map)
{
- free(self);
+ free(map);
}
-void map__fixup_start(struct map *self)
+void map__fixup_start(struct map *map)
{
- struct rb_root *symbols = &self->dso->symbols[self->type];
+ struct rb_root *symbols = &map->dso->symbols[map->type];
struct rb_node *nd = rb_first(symbols);
if (nd != NULL) {
struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
- self->start = sym->start;
+ map->start = sym->start;
}
}
-void map__fixup_end(struct map *self)
+void map__fixup_end(struct map *map)
{
- struct rb_root *symbols = &self->dso->symbols[self->type];
+ struct rb_root *symbols = &map->dso->symbols[map->type];
struct rb_node *nd = rb_last(symbols);
if (nd != NULL) {
struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
- self->end = sym->end;
+ map->end = sym->end;
}
}
#define DSO__DELETED "(deleted)"
-int map__load(struct map *self, symbol_filter_t filter)
+int map__load(struct map *map, symbol_filter_t filter)
{
- const char *name = self->dso->long_name;
+ const char *name = map->dso->long_name;
int nr;
- if (dso__loaded(self->dso, self->type))
+ if (dso__loaded(map->dso, map->type))
return 0;
- nr = dso__load(self->dso, self, filter);
+ nr = dso__load(map->dso, map, filter);
if (nr < 0) {
- if (self->dso->has_build_id) {
+ if (map->dso->has_build_id) {
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
- build_id__sprintf(self->dso->build_id,
- sizeof(self->dso->build_id),
+ build_id__sprintf(map->dso->build_id,
+ sizeof(map->dso->build_id),
sbuild_id);
pr_warning("%s with build id %s not found",
name, sbuild_id);
@@ -121,62 +269,50 @@ int map__load(struct map *self, symbol_filter_t filter)
pr_warning(", continuing without symbols\n");
return -1;
} else if (nr == 0) {
+#ifdef HAVE_LIBELF_SUPPORT
const size_t len = strlen(name);
const size_t real_len = len - sizeof(DSO__DELETED);
if (len > sizeof(DSO__DELETED) &&
strcmp(name + real_len + 1, DSO__DELETED) == 0) {
- pr_warning("%.*s was updated, restart the long "
- "running apps that use it!\n",
+ pr_warning("%.*s was updated (is prelink enabled?). "
+ "Restart the long running apps that use it!\n",
(int)real_len, name);
} else {
pr_warning("no symbols found in %s, maybe install "
"a debug package?\n", name);
}
-
+#endif
return -1;
}
- /*
- * Only applies to the kernel, as its symtabs aren't relative like the
- * module ones.
- */
- if (self->dso->kernel)
- map__reloc_vmlinux(self);
return 0;
}
-struct symbol *map__find_symbol(struct map *self, u64 addr,
+struct symbol *map__find_symbol(struct map *map, u64 addr,
symbol_filter_t filter)
{
- if (map__load(self, filter) < 0)
+ if (map__load(map, filter) < 0)
return NULL;
- return dso__find_symbol(self->dso, self->type, addr);
+ return dso__find_symbol(map->dso, map->type, addr);
}
-struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
+struct symbol *map__find_symbol_by_name(struct map *map, const char *name,
symbol_filter_t filter)
{
- if (map__load(self, filter) < 0)
+ if (map__load(map, filter) < 0)
return NULL;
- if (!dso__sorted_by_name(self->dso, self->type))
- dso__sort_by_name(self->dso, self->type);
+ if (!dso__sorted_by_name(map->dso, map->type))
+ dso__sort_by_name(map->dso, map->type);
- return dso__find_symbol_by_name(self->dso, self->type, name);
+ return dso__find_symbol_by_name(map->dso, map->type, name);
}
-struct map *map__clone(struct map *self)
+struct map *map__clone(struct map *map)
{
- struct map *map = malloc(sizeof(*self));
-
- if (!map)
- return NULL;
-
- memcpy(map, self, sizeof(*self));
-
- return map;
+ return memdup(map, sizeof(*map));
}
int map__overlap(struct map *l, struct map *r)
@@ -193,81 +329,159 @@ int map__overlap(struct map *l, struct map *r)
return 0;
}
-size_t map__fprintf(struct map *self, FILE *fp)
+size_t map__fprintf(struct map *map, FILE *fp)
{
- return fprintf(fp, " %Lx-%Lx %Lx %s\n",
- self->start, self->end, self->pgoff, self->dso->name);
+ return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
+ map->start, map->end, map->pgoff, map->dso->name);
}
-/*
+size_t map__fprintf_dsoname(struct map *map, FILE *fp)
+{
+ const char *dsoname = "[unknown]";
+
+ if (map && map->dso && (map->dso->name || map->dso->long_name)) {
+ if (symbol_conf.show_kernel_path && map->dso->long_name)
+ dsoname = map->dso->long_name;
+ else if (map->dso->name)
+ dsoname = map->dso->name;
+ }
+
+ return fprintf(fp, "%s", dsoname);
+}
+
+int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
+ FILE *fp)
+{
+ char *srcline;
+ int ret = 0;
+
+ if (map && map->dso) {
+ srcline = get_srcline(map->dso,
+ map__rip_2objdump(map, addr));
+ if (srcline != SRCLINE_UNKNOWN)
+ ret = fprintf(fp, "%s%s", prefix, srcline);
+ free_srcline(srcline);
+ }
+ return ret;
+}
+
+/**
+ * map__rip_2objdump - convert symbol start address to objdump address.
+ * @map: memory map
+ * @rip: symbol start address
+ *
* objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
- * map->dso->adjust_symbols==1 for ET_EXEC-like cases.
+ * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
+ * relative to section start.
+ *
+ * Return: Address suitable for passing to "objdump --start-address="
*/
u64 map__rip_2objdump(struct map *map, u64 rip)
{
- u64 addr = map->dso->adjust_symbols ?
- map->unmap_ip(map, rip) : /* RIP -> IP */
- rip;
- return addr;
+ if (!map->dso->adjust_symbols)
+ return rip;
+
+ if (map->dso->rel)
+ return rip - map->pgoff;
+
+ return map->unmap_ip(map, rip) - map->reloc;
}
-u64 map__objdump_2ip(struct map *map, u64 addr)
+/**
+ * map__objdump_2mem - convert objdump address to a memory address.
+ * @map: memory map
+ * @ip: objdump address
+ *
+ * Closely related to map__rip_2objdump(), this function takes an address from
+ * objdump and converts it to a memory address. Note this assumes that @map
+ * contains the address. To be sure the result is valid, check it forwards
+ * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip
+ *
+ * Return: Memory address.
+ */
+u64 map__objdump_2mem(struct map *map, u64 ip)
{
- u64 ip = map->dso->adjust_symbols ?
- addr :
- map->unmap_ip(map, addr); /* RIP -> IP */
- return ip;
+ if (!map->dso->adjust_symbols)
+ return map->unmap_ip(map, ip);
+
+ if (map->dso->rel)
+ return map->unmap_ip(map, ip + map->pgoff);
+
+ return ip + map->reloc;
}
-void map_groups__init(struct map_groups *self)
+void map_groups__init(struct map_groups *mg)
{
int i;
for (i = 0; i < MAP__NR_TYPES; ++i) {
- self->maps[i] = RB_ROOT;
- INIT_LIST_HEAD(&self->removed_maps[i]);
+ mg->maps[i] = RB_ROOT;
+ INIT_LIST_HEAD(&mg->removed_maps[i]);
}
- self->machine = NULL;
+ mg->machine = NULL;
+ mg->refcnt = 1;
}
-static void maps__delete(struct rb_root *self)
+static void maps__delete(struct rb_root *maps)
{
- struct rb_node *next = rb_first(self);
+ struct rb_node *next = rb_first(maps);
while (next) {
struct map *pos = rb_entry(next, struct map, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, self);
+ rb_erase(&pos->rb_node, maps);
map__delete(pos);
}
}
-static void maps__delete_removed(struct list_head *self)
+static void maps__delete_removed(struct list_head *maps)
{
struct map *pos, *n;
- list_for_each_entry_safe(pos, n, self, node) {
+ list_for_each_entry_safe(pos, n, maps, node) {
list_del(&pos->node);
map__delete(pos);
}
}
-void map_groups__exit(struct map_groups *self)
+void map_groups__exit(struct map_groups *mg)
{
int i;
for (i = 0; i < MAP__NR_TYPES; ++i) {
- maps__delete(&self->maps[i]);
- maps__delete_removed(&self->removed_maps[i]);
+ maps__delete(&mg->maps[i]);
+ maps__delete_removed(&mg->removed_maps[i]);
}
}
-void map_groups__flush(struct map_groups *self)
+struct map_groups *map_groups__new(void)
+{
+ struct map_groups *mg = malloc(sizeof(*mg));
+
+ if (mg != NULL)
+ map_groups__init(mg);
+
+ return mg;
+}
+
+void map_groups__delete(struct map_groups *mg)
+{
+ map_groups__exit(mg);
+ free(mg);
+}
+
+void map_groups__put(struct map_groups *mg)
+{
+ if (--mg->refcnt == 0)
+ map_groups__delete(mg);
+}
+
+void map_groups__flush(struct map_groups *mg)
{
int type;
for (type = 0; type < MAP__NR_TYPES; type++) {
- struct rb_root *root = &self->maps[type];
+ struct rb_root *root = &mg->maps[type];
struct rb_node *next = rb_first(root);
while (next) {
@@ -279,19 +493,20 @@ void map_groups__flush(struct map_groups *self)
* instance in some hist_entry instances, so
* just move them to a separate list.
*/
- list_add_tail(&pos->node, &self->removed_maps[pos->type]);
+ list_add_tail(&pos->node, &mg->removed_maps[pos->type]);
}
}
}
-struct symbol *map_groups__find_symbol(struct map_groups *self,
+struct symbol *map_groups__find_symbol(struct map_groups *mg,
enum map_type type, u64 addr,
struct map **mapp,
symbol_filter_t filter)
{
- struct map *map = map_groups__find(self, type, addr);
+ struct map *map = map_groups__find(mg, type, addr);
- if (map != NULL) {
+ /* Ensure map is loaded before using map->map_ip */
+ if (map != NULL && map__load(map, filter) >= 0) {
if (mapp != NULL)
*mapp = map;
return map__find_symbol(map, map->map_ip(map, addr), filter);
@@ -300,7 +515,7 @@ struct symbol *map_groups__find_symbol(struct map_groups *self,
return NULL;
}
-struct symbol *map_groups__find_symbol_by_name(struct map_groups *self,
+struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
enum map_type type,
const char *name,
struct map **mapp,
@@ -308,7 +523,7 @@ struct symbol *map_groups__find_symbol_by_name(struct map_groups *self,
{
struct rb_node *nd;
- for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
struct map *pos = rb_entry(nd, struct map, rb_node);
struct symbol *sym = map__find_symbol_by_name(pos, name, filter);
@@ -322,13 +537,30 @@ struct symbol *map_groups__find_symbol_by_name(struct map_groups *self,
return NULL;
}
-size_t __map_groups__fprintf_maps(struct map_groups *self,
+int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter)
+{
+ if (ams->addr < ams->map->start || ams->addr > ams->map->end) {
+ if (ams->map->groups == NULL)
+ return -1;
+ ams->map = map_groups__find(ams->map->groups, ams->map->type,
+ ams->addr);
+ if (ams->map == NULL)
+ return -1;
+ }
+
+ ams->al_addr = ams->map->map_ip(ams->map, ams->addr);
+ ams->sym = map__find_symbol(ams->map, ams->al_addr, filter);
+
+ return ams->sym ? 0 : -1;
+}
+
+size_t __map_groups__fprintf_maps(struct map_groups *mg,
enum map_type type, int verbose, FILE *fp)
{
size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
struct rb_node *nd;
- for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
struct map *pos = rb_entry(nd, struct map, rb_node);
printed += fprintf(fp, "Map:");
printed += map__fprintf(pos, fp);
@@ -341,22 +573,22 @@ size_t __map_groups__fprintf_maps(struct map_groups *self,
return printed;
}
-size_t map_groups__fprintf_maps(struct map_groups *self, int verbose, FILE *fp)
+size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp)
{
size_t printed = 0, i;
for (i = 0; i < MAP__NR_TYPES; ++i)
- printed += __map_groups__fprintf_maps(self, i, verbose, fp);
+ printed += __map_groups__fprintf_maps(mg, i, verbose, fp);
return printed;
}
-static size_t __map_groups__fprintf_removed_maps(struct map_groups *self,
+static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg,
enum map_type type,
int verbose, FILE *fp)
{
struct map *pos;
size_t printed = 0;
- list_for_each_entry(pos, &self->removed_maps[type], node) {
+ list_for_each_entry(pos, &mg->removed_maps[type], node) {
printed += fprintf(fp, "Map:");
printed += map__fprintf(pos, fp);
if (verbose > 1) {
@@ -367,26 +599,26 @@ static size_t __map_groups__fprintf_removed_maps(struct map_groups *self,
return printed;
}
-static size_t map_groups__fprintf_removed_maps(struct map_groups *self,
+static size_t map_groups__fprintf_removed_maps(struct map_groups *mg,
int verbose, FILE *fp)
{
size_t printed = 0, i;
for (i = 0; i < MAP__NR_TYPES; ++i)
- printed += __map_groups__fprintf_removed_maps(self, i, verbose, fp);
+ printed += __map_groups__fprintf_removed_maps(mg, i, verbose, fp);
return printed;
}
-size_t map_groups__fprintf(struct map_groups *self, int verbose, FILE *fp)
+size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp)
{
- size_t printed = map_groups__fprintf_maps(self, verbose, fp);
+ size_t printed = map_groups__fprintf_maps(mg, verbose, fp);
printed += fprintf(fp, "Removed maps:\n");
- return printed + map_groups__fprintf_removed_maps(self, verbose, fp);
+ return printed + map_groups__fprintf_removed_maps(mg, verbose, fp);
}
-int map_groups__fixup_overlappings(struct map_groups *self, struct map *map,
+int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
int verbose, FILE *fp)
{
- struct rb_root *root = &self->maps[map->type];
+ struct rb_root *root = &mg->maps[map->type];
struct rb_node *next = rb_first(root);
int err = 0;
@@ -417,7 +649,7 @@ int map_groups__fixup_overlappings(struct map_groups *self, struct map *map,
}
before->end = map->start - 1;
- map_groups__insert(self, before);
+ map_groups__insert(mg, before);
if (verbose >= 2)
map__fprintf(before, fp);
}
@@ -431,7 +663,7 @@ int map_groups__fixup_overlappings(struct map_groups *self, struct map *map,
}
after->start = map->end + 1;
- map_groups__insert(self, after);
+ map_groups__insert(mg, after);
if (verbose >= 2)
map__fprintf(after, fp);
}
@@ -440,7 +672,7 @@ move_map:
* If we have references, just move them to a separate list.
*/
if (pos->referenced)
- list_add_tail(&pos->node, &self->removed_maps[map->type]);
+ list_add_tail(&pos->node, &mg->removed_maps[map->type]);
else
map__delete(pos);
@@ -454,7 +686,7 @@ move_map:
/*
* XXX This should not really _copy_ te maps, but refcount them.
*/
-int map_groups__clone(struct map_groups *self,
+int map_groups__clone(struct map_groups *mg,
struct map_groups *parent, enum map_type type)
{
struct rb_node *nd;
@@ -463,40 +695,11 @@ int map_groups__clone(struct map_groups *self,
struct map *new = map__clone(map);
if (new == NULL)
return -ENOMEM;
- map_groups__insert(self, new);
+ map_groups__insert(mg, new);
}
return 0;
}
-static u64 map__reloc_map_ip(struct map *map, u64 ip)
-{
- return ip + (s64)map->pgoff;
-}
-
-static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
-{
- return ip - (s64)map->pgoff;
-}
-
-void map__reloc_vmlinux(struct map *self)
-{
- struct kmap *kmap = map__kmap(self);
- s64 reloc;
-
- if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
- return;
-
- reloc = (kmap->ref_reloc_sym->unrelocated_addr -
- kmap->ref_reloc_sym->addr);
-
- if (!reloc)
- return;
-
- self->map_ip = map__reloc_map_ip;
- self->unmap_ip = map__reloc_unmap_ip;
- self->pgoff = reloc;
-}
-
void maps__insert(struct rb_root *maps, struct map *map)
{
struct rb_node **p = &maps->rb_node;
@@ -517,9 +720,9 @@ void maps__insert(struct rb_root *maps, struct map *map)
rb_insert_color(&map->rb_node, maps);
}
-void maps__remove(struct rb_root *self, struct map *map)
+void maps__remove(struct rb_root *maps, struct map *map)
{
- rb_erase(&map->rb_node, self);
+ rb_erase(&map->rb_node, maps);
}
struct map *maps__find(struct rb_root *maps, u64 ip)
@@ -542,141 +745,20 @@ struct map *maps__find(struct rb_root *maps, u64 ip)
return NULL;
}
-int machine__init(struct machine *self, const char *root_dir, pid_t pid)
-{
- map_groups__init(&self->kmaps);
- RB_CLEAR_NODE(&self->rb_node);
- INIT_LIST_HEAD(&self->user_dsos);
- INIT_LIST_HEAD(&self->kernel_dsos);
-
- self->kmaps.machine = self;
- self->pid = pid;
- self->root_dir = strdup(root_dir);
- return self->root_dir == NULL ? -ENOMEM : 0;
-}
-
-static void dsos__delete(struct list_head *self)
-{
- struct dso *pos, *n;
-
- list_for_each_entry_safe(pos, n, self, node) {
- list_del(&pos->node);
- dso__delete(pos);
- }
-}
-
-void machine__exit(struct machine *self)
-{
- map_groups__exit(&self->kmaps);
- dsos__delete(&self->user_dsos);
- dsos__delete(&self->kernel_dsos);
- free(self->root_dir);
- self->root_dir = NULL;
-}
-
-void machine__delete(struct machine *self)
+struct map *maps__first(struct rb_root *maps)
{
- machine__exit(self);
- free(self);
-}
+ struct rb_node *first = rb_first(maps);
-struct machine *machines__add(struct rb_root *self, pid_t pid,
- const char *root_dir)
-{
- struct rb_node **p = &self->rb_node;
- struct rb_node *parent = NULL;
- struct machine *pos, *machine = malloc(sizeof(*machine));
-
- if (!machine)
- return NULL;
-
- if (machine__init(machine, root_dir, pid) != 0) {
- free(machine);
- return NULL;
- }
-
- while (*p != NULL) {
- parent = *p;
- pos = rb_entry(parent, struct machine, rb_node);
- if (pid < pos->pid)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
-
- rb_link_node(&machine->rb_node, parent, p);
- rb_insert_color(&machine->rb_node, self);
-
- return machine;
-}
-
-struct machine *machines__find(struct rb_root *self, pid_t pid)
-{
- struct rb_node **p = &self->rb_node;
- struct rb_node *parent = NULL;
- struct machine *machine;
- struct machine *default_machine = NULL;
-
- while (*p != NULL) {
- parent = *p;
- machine = rb_entry(parent, struct machine, rb_node);
- if (pid < machine->pid)
- p = &(*p)->rb_left;
- else if (pid > machine->pid)
- p = &(*p)->rb_right;
- else
- return machine;
- if (!machine->pid)
- default_machine = machine;
- }
-
- return default_machine;
-}
-
-struct machine *machines__findnew(struct rb_root *self, pid_t pid)
-{
- char path[PATH_MAX];
- const char *root_dir;
- struct machine *machine = machines__find(self, pid);
-
- if (!machine || machine->pid != pid) {
- if (pid == HOST_KERNEL_ID || pid == DEFAULT_GUEST_KERNEL_ID)
- root_dir = "";
- else {
- if (!symbol_conf.guestmount)
- goto out;
- sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
- if (access(path, R_OK)) {
- pr_err("Can't access file %s\n", path);
- goto out;
- }
- root_dir = path;
- }
- machine = machines__add(self, pid, root_dir);
- }
-
-out:
- return machine;
-}
-
-void machines__process(struct rb_root *self, machine__process_t process, void *data)
-{
- struct rb_node *nd;
-
- for (nd = rb_first(self); nd; nd = rb_next(nd)) {
- struct machine *pos = rb_entry(nd, struct machine, rb_node);
- process(pos, data);
- }
+ if (first)
+ return rb_entry(first, struct map, rb_node);
+ return NULL;
}
-char *machine__mmap_name(struct machine *self, char *bf, size_t size)
+struct map *maps__next(struct map *map)
{
- if (machine__is_host(self))
- snprintf(bf, size, "[%s]", "kernel.kallsyms");
- else if (machine__is_default_guest(self))
- snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
- else
- snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", self->pid);
+ struct rb_node *next = rb_next(&map->rb_node);
- return bf;
+ if (next)
+ return rb_entry(next, struct map, rb_node);
+ return NULL;
}
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index b397c038372..7758c72522e 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -6,7 +6,7 @@
#include <linux/rbtree.h>
#include <stdio.h>
#include <stdbool.h>
-#include "types.h"
+#include <linux/types.h>
enum map_type {
MAP__FUNCTION = 0,
@@ -18,9 +18,11 @@ enum map_type {
extern const char *map_type__name[MAP__NR_TYPES];
struct dso;
+struct ip_callchain;
struct ref_reloc_sym;
struct map_groups;
struct machine;
+struct perf_evsel;
struct map {
union {
@@ -31,8 +33,15 @@ struct map {
u64 end;
u8 /* enum map_type */ type;
bool referenced;
+ bool erange_warned;
u32 priv;
+ u32 prot;
+ u32 flags;
u64 pgoff;
+ u64 reloc;
+ u32 maj, min; /* only valid for MMAP2 record */
+ u64 ino; /* only valid for MMAP2 record */
+ u64 ino_generation;/* only valid for MMAP2 record */
/* ip -> dso rip */
u64 (*map_ip)(struct map *, u64);
@@ -52,31 +61,23 @@ struct map_groups {
struct rb_root maps[MAP__NR_TYPES];
struct list_head removed_maps[MAP__NR_TYPES];
struct machine *machine;
+ int refcnt;
};
-/* Native host kernel uses -1 as pid index in machine */
-#define HOST_KERNEL_ID (-1)
-#define DEFAULT_GUEST_KERNEL_ID (0)
-
-struct machine {
- struct rb_node rb_node;
- pid_t pid;
- char *root_dir;
- struct list_head user_dsos;
- struct list_head kernel_dsos;
- struct map_groups kmaps;
- struct map *vmlinux_maps[MAP__NR_TYPES];
-};
+struct map_groups *map_groups__new(void);
+void map_groups__delete(struct map_groups *mg);
-static inline
-struct map *machine__kernel_map(struct machine *self, enum map_type type)
+static inline struct map_groups *map_groups__get(struct map_groups *mg)
{
- return self->vmlinux_maps[type];
+ ++mg->refcnt;
+ return mg;
}
-static inline struct kmap *map__kmap(struct map *self)
+void map_groups__put(struct map_groups *mg);
+
+static inline struct kmap *map__kmap(struct map *map)
{
- return (struct kmap *)(self + 1);
+ return (struct kmap *)(map + 1);
}
static inline u64 map__map_ip(struct map *map, u64 ip)
@@ -89,7 +90,7 @@ static inline u64 map__unmap_ip(struct map *map, u64 ip)
return ip + map->start - map->pgoff;
}
-static inline u64 identity__map_ip(struct map *map __used, u64 ip)
+static inline u64 identity__map_ip(struct map *map __maybe_unused, u64 ip)
{
return ip;
}
@@ -97,141 +98,123 @@ static inline u64 identity__map_ip(struct map *map __used, u64 ip)
/* rip/ip <-> addr suitable for passing to `objdump --start-address=` */
u64 map__rip_2objdump(struct map *map, u64 rip);
-u64 map__objdump_2ip(struct map *map, u64 addr);
+
+/* objdump address -> memory address */
+u64 map__objdump_2mem(struct map *map, u64 ip);
struct symbol;
+/* map__for_each_symbol - iterate over the symbols in the given map
+ *
+ * @map: the 'struct map *' in which symbols itereated
+ * @pos: the 'struct symbol *' to use as a loop cursor
+ * @n: the 'struct rb_node *' to use as a temporary storage
+ * Note: caller must ensure map->dso is not NULL (map is loaded).
+ */
+#define map__for_each_symbol(map, pos, n) \
+ dso__for_each_symbol(map->dso, pos, n, map->type)
+
typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
-void map__init(struct map *self, enum map_type type,
+void map__init(struct map *map, enum map_type type,
u64 start, u64 end, u64 pgoff, struct dso *dso);
struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
- u64 pgoff, u32 pid, char *filename,
- enum map_type type);
-void map__delete(struct map *self);
-struct map *map__clone(struct map *self);
+ u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino,
+ u64 ino_gen, u32 prot, u32 flags,
+ char *filename, enum map_type type);
+struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
+void map__delete(struct map *map);
+struct map *map__clone(struct map *map);
int map__overlap(struct map *l, struct map *r);
-size_t map__fprintf(struct map *self, FILE *fp);
+size_t map__fprintf(struct map *map, FILE *fp);
+size_t map__fprintf_dsoname(struct map *map, FILE *fp);
+int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
+ FILE *fp);
-int map__load(struct map *self, symbol_filter_t filter);
-struct symbol *map__find_symbol(struct map *self,
+int map__load(struct map *map, symbol_filter_t filter);
+struct symbol *map__find_symbol(struct map *map,
u64 addr, symbol_filter_t filter);
-struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
+struct symbol *map__find_symbol_by_name(struct map *map, const char *name,
symbol_filter_t filter);
-void map__fixup_start(struct map *self);
-void map__fixup_end(struct map *self);
+void map__fixup_start(struct map *map);
+void map__fixup_end(struct map *map);
-void map__reloc_vmlinux(struct map *self);
+void map__reloc_vmlinux(struct map *map);
-size_t __map_groups__fprintf_maps(struct map_groups *self,
+size_t __map_groups__fprintf_maps(struct map_groups *mg,
enum map_type type, int verbose, FILE *fp);
void maps__insert(struct rb_root *maps, struct map *map);
-void maps__remove(struct rb_root *self, struct map *map);
+void maps__remove(struct rb_root *maps, struct map *map);
struct map *maps__find(struct rb_root *maps, u64 addr);
-void map_groups__init(struct map_groups *self);
-void map_groups__exit(struct map_groups *self);
-int map_groups__clone(struct map_groups *self,
+struct map *maps__first(struct rb_root *maps);
+struct map *maps__next(struct map *map);
+void map_groups__init(struct map_groups *mg);
+void map_groups__exit(struct map_groups *mg);
+int map_groups__clone(struct map_groups *mg,
struct map_groups *parent, enum map_type type);
-size_t map_groups__fprintf(struct map_groups *self, int verbose, FILE *fp);
-size_t map_groups__fprintf_maps(struct map_groups *self, int verbose, FILE *fp);
-
-typedef void (*machine__process_t)(struct machine *self, void *data);
-
-void machines__process(struct rb_root *self, machine__process_t process, void *data);
-struct machine *machines__add(struct rb_root *self, pid_t pid,
- const char *root_dir);
-struct machine *machines__find_host(struct rb_root *self);
-struct machine *machines__find(struct rb_root *self, pid_t pid);
-struct machine *machines__findnew(struct rb_root *self, pid_t pid);
-char *machine__mmap_name(struct machine *self, char *bf, size_t size);
-int machine__init(struct machine *self, const char *root_dir, pid_t pid);
-void machine__exit(struct machine *self);
-void machine__delete(struct machine *self);
-
-/*
- * Default guest kernel is defined by parameter --guestkallsyms
- * and --guestmodules
- */
-static inline bool machine__is_default_guest(struct machine *self)
+size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp);
+size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp);
+
+int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name,
+ u64 addr);
+
+static inline void map_groups__insert(struct map_groups *mg, struct map *map)
{
- return self ? self->pid == DEFAULT_GUEST_KERNEL_ID : false;
+ maps__insert(&mg->maps[map->type], map);
+ map->groups = mg;
}
-static inline bool machine__is_host(struct machine *self)
+static inline void map_groups__remove(struct map_groups *mg, struct map *map)
{
- return self ? self->pid == HOST_KERNEL_ID : false;
+ maps__remove(&mg->maps[map->type], map);
}
-static inline void map_groups__insert(struct map_groups *self, struct map *map)
+static inline struct map *map_groups__find(struct map_groups *mg,
+ enum map_type type, u64 addr)
{
- maps__insert(&self->maps[map->type], map);
- map->groups = self;
+ return maps__find(&mg->maps[type], addr);
}
-static inline void map_groups__remove(struct map_groups *self, struct map *map)
+static inline struct map *map_groups__first(struct map_groups *mg,
+ enum map_type type)
{
- maps__remove(&self->maps[map->type], map);
+ return maps__first(&mg->maps[type]);
}
-static inline struct map *map_groups__find(struct map_groups *self,
- enum map_type type, u64 addr)
+static inline struct map *map_groups__next(struct map *map)
{
- return maps__find(&self->maps[type], addr);
+ return maps__next(map);
}
-struct symbol *map_groups__find_symbol(struct map_groups *self,
+struct symbol *map_groups__find_symbol(struct map_groups *mg,
enum map_type type, u64 addr,
struct map **mapp,
symbol_filter_t filter);
-struct symbol *map_groups__find_symbol_by_name(struct map_groups *self,
+struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
enum map_type type,
const char *name,
struct map **mapp,
symbol_filter_t filter);
-static inline
-struct symbol *machine__find_kernel_symbol(struct machine *self,
- enum map_type type, u64 addr,
- struct map **mapp,
- symbol_filter_t filter)
-{
- return map_groups__find_symbol(&self->kmaps, type, addr, mapp, filter);
-}
+struct addr_map_symbol;
-static inline
-struct symbol *machine__find_kernel_function(struct machine *self, u64 addr,
- struct map **mapp,
- symbol_filter_t filter)
-{
- return machine__find_kernel_symbol(self, MAP__FUNCTION, addr, mapp, filter);
-}
+int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter);
static inline
-struct symbol *map_groups__find_function_by_name(struct map_groups *self,
+struct symbol *map_groups__find_function_by_name(struct map_groups *mg,
const char *name, struct map **mapp,
symbol_filter_t filter)
{
- return map_groups__find_symbol_by_name(self, MAP__FUNCTION, name, mapp, filter);
-}
-
-static inline
-struct symbol *machine__find_kernel_function_by_name(struct machine *self,
- const char *name,
- struct map **mapp,
- symbol_filter_t filter)
-{
- return map_groups__find_function_by_name(&self->kmaps, name, mapp,
- filter);
+ return map_groups__find_symbol_by_name(mg, MAP__FUNCTION, name, mapp, filter);
}
-int map_groups__fixup_overlappings(struct map_groups *self, struct map *map,
+int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
int verbose, FILE *fp);
-struct map *map_groups__find_by_name(struct map_groups *self,
+struct map *map_groups__find_by_name(struct map_groups *mg,
enum map_type type, const char *name);
-struct map *machine__new_module(struct machine *self, u64 start, const char *filename);
-void map_groups__flush(struct map_groups *self);
+void map_groups__flush(struct map_groups *mg);
#endif /* __PERF_MAP_H */
diff --git a/tools/perf/util/pager.c b/tools/perf/util/pager.c
index 1915de20dca..31ee02d4e98 100644
--- a/tools/perf/util/pager.c
+++ b/tools/perf/util/pager.c
@@ -57,9 +57,13 @@ void setup_pager(void)
}
if (!pager)
pager = getenv("PAGER");
+ if (!(pager || access("/usr/bin/pager", X_OK)))
+ pager = "/usr/bin/pager";
+ if (!(pager || access("/usr/bin/less", X_OK)))
+ pager = "/usr/bin/less";
if (!pager)
- pager = "less";
- else if (!*pager || !strcmp(pager, "cat"))
+ pager = "cat";
+ if (!*pager || !strcmp(pager, "cat"))
return;
spawned_pager = 1; /* means we are emitting to terminal */
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 4af5bd59cfd..1e15df10a88 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1,130 +1,128 @@
-#include "../../../include/linux/hw_breakpoint.h"
+#include <linux/hw_breakpoint.h>
#include "util.h"
#include "../perf.h"
+#include "evlist.h"
+#include "evsel.h"
#include "parse-options.h"
#include "parse-events.h"
#include "exec_cmd.h"
-#include "string.h"
+#include "linux/string.h"
#include "symbol.h"
#include "cache.h"
#include "header.h"
-#include "debugfs.h"
+#include <api/fs/debugfs.h>
+#include "parse-events-bison.h"
+#define YY_EXTRA_TYPE int
+#include "parse-events-flex.h"
+#include "pmu.h"
+#include "thread_map.h"
-int nr_counters;
-
-struct perf_event_attr attrs[MAX_COUNTERS];
-char *filters[MAX_COUNTERS];
+#define MAX_NAME_LEN 100
struct event_symbol {
- u8 type;
- u64 config;
const char *symbol;
const char *alias;
};
-enum event_result {
- EVT_FAILED,
- EVT_HANDLED,
- EVT_HANDLED_ALL
+#ifdef PARSER_DEBUG
+extern int parse_events_debug;
+#endif
+int parse_events_parse(void *data, void *scanner);
+
+static struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = {
+ .symbol = "cpu-cycles",
+ .alias = "cycles",
+ },
+ [PERF_COUNT_HW_INSTRUCTIONS] = {
+ .symbol = "instructions",
+ .alias = "",
+ },
+ [PERF_COUNT_HW_CACHE_REFERENCES] = {
+ .symbol = "cache-references",
+ .alias = "",
+ },
+ [PERF_COUNT_HW_CACHE_MISSES] = {
+ .symbol = "cache-misses",
+ .alias = "",
+ },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {
+ .symbol = "branch-instructions",
+ .alias = "branches",
+ },
+ [PERF_COUNT_HW_BRANCH_MISSES] = {
+ .symbol = "branch-misses",
+ .alias = "",
+ },
+ [PERF_COUNT_HW_BUS_CYCLES] = {
+ .symbol = "bus-cycles",
+ .alias = "",
+ },
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {
+ .symbol = "stalled-cycles-frontend",
+ .alias = "idle-cycles-frontend",
+ },
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {
+ .symbol = "stalled-cycles-backend",
+ .alias = "idle-cycles-backend",
+ },
+ [PERF_COUNT_HW_REF_CPU_CYCLES] = {
+ .symbol = "ref-cycles",
+ .alias = "",
+ },
};
-char debugfs_path[MAXPATHLEN];
-
-#define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
-#define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
-
-static struct event_symbol event_symbols[] = {
- { CHW(CPU_CYCLES), "cpu-cycles", "cycles" },
- { CHW(INSTRUCTIONS), "instructions", "" },
- { CHW(CACHE_REFERENCES), "cache-references", "" },
- { CHW(CACHE_MISSES), "cache-misses", "" },
- { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" },
- { CHW(BRANCH_MISSES), "branch-misses", "" },
- { CHW(BUS_CYCLES), "bus-cycles", "" },
-
- { CSW(CPU_CLOCK), "cpu-clock", "" },
- { CSW(TASK_CLOCK), "task-clock", "" },
- { CSW(PAGE_FAULTS), "page-faults", "faults" },
- { CSW(PAGE_FAULTS_MIN), "minor-faults", "" },
- { CSW(PAGE_FAULTS_MAJ), "major-faults", "" },
- { CSW(CONTEXT_SWITCHES), "context-switches", "cs" },
- { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
- { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" },
- { CSW(EMULATION_FAULTS), "emulation-faults", "" },
+static struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
+ [PERF_COUNT_SW_CPU_CLOCK] = {
+ .symbol = "cpu-clock",
+ .alias = "",
+ },
+ [PERF_COUNT_SW_TASK_CLOCK] = {
+ .symbol = "task-clock",
+ .alias = "",
+ },
+ [PERF_COUNT_SW_PAGE_FAULTS] = {
+ .symbol = "page-faults",
+ .alias = "faults",
+ },
+ [PERF_COUNT_SW_CONTEXT_SWITCHES] = {
+ .symbol = "context-switches",
+ .alias = "cs",
+ },
+ [PERF_COUNT_SW_CPU_MIGRATIONS] = {
+ .symbol = "cpu-migrations",
+ .alias = "migrations",
+ },
+ [PERF_COUNT_SW_PAGE_FAULTS_MIN] = {
+ .symbol = "minor-faults",
+ .alias = "",
+ },
+ [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = {
+ .symbol = "major-faults",
+ .alias = "",
+ },
+ [PERF_COUNT_SW_ALIGNMENT_FAULTS] = {
+ .symbol = "alignment-faults",
+ .alias = "",
+ },
+ [PERF_COUNT_SW_EMULATION_FAULTS] = {
+ .symbol = "emulation-faults",
+ .alias = "",
+ },
+ [PERF_COUNT_SW_DUMMY] = {
+ .symbol = "dummy",
+ .alias = "",
+ },
};
#define __PERF_EVENT_FIELD(config, name) \
((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
-#define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW)
+#define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW)
#define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG)
-#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
+#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
-static const char *hw_event_names[] = {
- "cycles",
- "instructions",
- "cache-references",
- "cache-misses",
- "branches",
- "branch-misses",
- "bus-cycles",
-};
-
-static const char *sw_event_names[] = {
- "cpu-clock-msecs",
- "task-clock-msecs",
- "page-faults",
- "context-switches",
- "CPU-migrations",
- "minor-faults",
- "major-faults",
- "alignment-faults",
- "emulation-faults",
-};
-
-#define MAX_ALIASES 8
-
-static const char *hw_cache[][MAX_ALIASES] = {
- { "L1-dcache", "l1-d", "l1d", "L1-data", },
- { "L1-icache", "l1-i", "l1i", "L1-instruction", },
- { "LLC", "L2" },
- { "dTLB", "d-tlb", "Data-TLB", },
- { "iTLB", "i-tlb", "Instruction-TLB", },
- { "branch", "branches", "bpu", "btb", "bpc", },
-};
-
-static const char *hw_cache_op[][MAX_ALIASES] = {
- { "load", "loads", "read", },
- { "store", "stores", "write", },
- { "prefetch", "prefetches", "speculative-read", "speculative-load", },
-};
-
-static const char *hw_cache_result[][MAX_ALIASES] = {
- { "refs", "Reference", "ops", "access", },
- { "misses", "miss", },
-};
-
-#define C(x) PERF_COUNT_HW_CACHE_##x
-#define CACHE_READ (1 << C(OP_READ))
-#define CACHE_WRITE (1 << C(OP_WRITE))
-#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
-#define COP(x) (1 << x)
-
-/*
- * cache operartion stat
- * L1I : Read and prefetch only
- * ITLB and BPU : Read-only
- */
-static unsigned long hw_cache_stat[C(MAX)] = {
- [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
- [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
- [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
- [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
- [C(ITLB)] = (CACHE_READ),
- [C(BPU)] = (CACHE_READ),
-};
-
#define for_each_subsystem(sys_dir, sys_dirent, sys_next) \
while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
if (sys_dirent.d_type == DT_DIR && \
@@ -136,7 +134,7 @@ static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
char evt_path[MAXPATHLEN];
int fd;
- snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
+ snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path,
sys_dir->d_name, evt_dir->d_name);
fd = open(evt_path, O_RDONLY);
if (fd < 0)
@@ -161,22 +159,22 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
struct tracepoint_path *path = NULL;
DIR *sys_dir, *evt_dir;
struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
- char id_buf[4];
+ char id_buf[24];
int fd;
u64 id;
char evt_path[MAXPATHLEN];
char dir_path[MAXPATHLEN];
- if (debugfs_valid_mountpoint(debugfs_path))
+ if (debugfs_valid_mountpoint(tracing_events_path))
return NULL;
- sys_dir = opendir(debugfs_path);
+ sys_dir = opendir(tracing_events_path);
if (!sys_dir)
return NULL;
for_each_subsystem(sys_dir, sys_dirent, sys_next) {
- snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
+ snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
sys_dirent.d_name);
evt_dir = opendir(dir_path);
if (!evt_dir)
@@ -206,7 +204,7 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
}
path->name = malloc(MAX_EVENT_LENGTH);
if (!path->name) {
- free(path->system);
+ zfree(&path->system);
free(path);
return NULL;
}
@@ -224,99 +222,43 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
return NULL;
}
-#define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1)
-static const char *tracepoint_id_to_name(u64 config)
+struct tracepoint_path *tracepoint_name_to_path(const char *name)
{
- static char buf[TP_PATH_LEN];
- struct tracepoint_path *path;
+ struct tracepoint_path *path = zalloc(sizeof(*path));
+ char *str = strchr(name, ':');
- path = tracepoint_id_to_path(config);
- if (path) {
- snprintf(buf, TP_PATH_LEN, "%s:%s", path->system, path->name);
- free(path->name);
- free(path->system);
+ if (path == NULL || str == NULL) {
free(path);
- } else
- snprintf(buf, TP_PATH_LEN, "%s:%s", "unknown", "unknown");
-
- return buf;
-}
-
-static int is_cache_op_valid(u8 cache_type, u8 cache_op)
-{
- if (hw_cache_stat[cache_type] & COP(cache_op))
- return 1; /* valid */
- else
- return 0; /* invalid */
-}
-
-static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
-{
- static char name[50];
-
- if (cache_result) {
- sprintf(name, "%s-%s-%s", hw_cache[cache_type][0],
- hw_cache_op[cache_op][0],
- hw_cache_result[cache_result][0]);
- } else {
- sprintf(name, "%s-%s", hw_cache[cache_type][0],
- hw_cache_op[cache_op][1]);
+ return NULL;
}
- return name;
-}
+ path->system = strndup(name, str - name);
+ path->name = strdup(str+1);
-const char *event_name(int counter)
-{
- u64 config = attrs[counter].config;
- int type = attrs[counter].type;
+ if (path->system == NULL || path->name == NULL) {
+ zfree(&path->system);
+ zfree(&path->name);
+ free(path);
+ path = NULL;
+ }
- return __event_name(type, config);
+ return path;
}
-const char *__event_name(int type, u64 config)
+const char *event_type(int type)
{
- static char buf[32];
-
- if (type == PERF_TYPE_RAW) {
- sprintf(buf, "raw 0x%llx", config);
- return buf;
- }
-
switch (type) {
case PERF_TYPE_HARDWARE:
- if (config < PERF_COUNT_HW_MAX)
- return hw_event_names[config];
- return "unknown-hardware";
-
- case PERF_TYPE_HW_CACHE: {
- u8 cache_type, cache_op, cache_result;
-
- cache_type = (config >> 0) & 0xff;
- if (cache_type > PERF_COUNT_HW_CACHE_MAX)
- return "unknown-ext-hardware-cache-type";
-
- cache_op = (config >> 8) & 0xff;
- if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX)
- return "unknown-ext-hardware-cache-op";
-
- cache_result = (config >> 16) & 0xff;
- if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX)
- return "unknown-ext-hardware-cache-result";
-
- if (!is_cache_op_valid(cache_type, cache_op))
- return "invalid-cache";
-
- return event_cache_name(cache_type, cache_op, cache_result);
- }
+ return "hardware";
case PERF_TYPE_SOFTWARE:
- if (config < PERF_COUNT_SW_MAX)
- return sw_event_names[config];
- return "unknown-software";
+ return "software";
case PERF_TYPE_TRACEPOINT:
- return tracepoint_id_to_name(config);
+ return "tracepoint";
+
+ case PERF_TYPE_HW_CACHE:
+ return "hardware-cache";
default:
break;
@@ -325,66 +267,93 @@ const char *__event_name(int type, u64 config)
return "unknown";
}
-static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int size)
+
+
+static struct perf_evsel *
+__add_event(struct list_head *list, int *idx,
+ struct perf_event_attr *attr,
+ char *name, struct cpu_map *cpus)
+{
+ struct perf_evsel *evsel;
+
+ event_attr_init(attr);
+
+ evsel = perf_evsel__new_idx(attr, (*idx)++);
+ if (!evsel)
+ return NULL;
+
+ evsel->cpus = cpus;
+ if (name)
+ evsel->name = strdup(name);
+ list_add_tail(&evsel->node, list);
+ return evsel;
+}
+
+static int add_event(struct list_head *list, int *idx,
+ struct perf_event_attr *attr, char *name)
+{
+ return __add_event(list, idx, attr, name, NULL) ? 0 : -ENOMEM;
+}
+
+static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size)
{
int i, j;
int n, longest = -1;
for (i = 0; i < size; i++) {
- for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
+ for (j = 0; j < PERF_EVSEL__MAX_ALIASES && names[i][j]; j++) {
n = strlen(names[i][j]);
- if (n > longest && !strncasecmp(*str, names[i][j], n))
+ if (n > longest && !strncasecmp(str, names[i][j], n))
longest = n;
}
- if (longest > 0) {
- *str += longest;
+ if (longest > 0)
return i;
- }
}
return -1;
}
-static enum event_result
-parse_generic_hw_event(const char **str, struct perf_event_attr *attr)
+int parse_events_add_cache(struct list_head *list, int *idx,
+ char *type, char *op_result1, char *op_result2)
{
- const char *s = *str;
+ struct perf_event_attr attr;
+ char name[MAX_NAME_LEN];
int cache_type = -1, cache_op = -1, cache_result = -1;
+ char *op_result[2] = { op_result1, op_result2 };
+ int i, n;
- cache_type = parse_aliases(&s, hw_cache, PERF_COUNT_HW_CACHE_MAX);
/*
* No fallback - if we cannot get a clear cache type
* then bail out:
*/
+ cache_type = parse_aliases(type, perf_evsel__hw_cache,
+ PERF_COUNT_HW_CACHE_MAX);
if (cache_type == -1)
- return EVT_FAILED;
+ return -EINVAL;
- while ((cache_op == -1 || cache_result == -1) && *s == '-') {
- ++s;
+ n = snprintf(name, MAX_NAME_LEN, "%s", type);
+
+ for (i = 0; (i < 2) && (op_result[i]); i++) {
+ char *str = op_result[i];
+
+ n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str);
if (cache_op == -1) {
- cache_op = parse_aliases(&s, hw_cache_op,
- PERF_COUNT_HW_CACHE_OP_MAX);
+ cache_op = parse_aliases(str, perf_evsel__hw_cache_op,
+ PERF_COUNT_HW_CACHE_OP_MAX);
if (cache_op >= 0) {
- if (!is_cache_op_valid(cache_type, cache_op))
- return 0;
+ if (!perf_evsel__is_cache_op_valid(cache_type, cache_op))
+ return -EINVAL;
continue;
}
}
if (cache_result == -1) {
- cache_result = parse_aliases(&s, hw_cache_result,
- PERF_COUNT_HW_CACHE_RESULT_MAX);
+ cache_result = parse_aliases(str, perf_evsel__hw_cache_result,
+ PERF_COUNT_HW_CACHE_RESULT_MAX);
if (cache_result >= 0)
continue;
}
-
- /*
- * Can't parse this as a cache op or result, so back up
- * to the '-'.
- */
- --s;
- break;
}
/*
@@ -399,307 +368,361 @@ parse_generic_hw_event(const char **str, struct perf_event_attr *attr)
if (cache_result == -1)
cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
- attr->config = cache_type | (cache_op << 8) | (cache_result << 16);
- attr->type = PERF_TYPE_HW_CACHE;
-
- *str = s;
- return EVT_HANDLED;
+ memset(&attr, 0, sizeof(attr));
+ attr.config = cache_type | (cache_op << 8) | (cache_result << 16);
+ attr.type = PERF_TYPE_HW_CACHE;
+ return add_event(list, idx, &attr, name);
}
-static enum event_result
-parse_single_tracepoint_event(char *sys_name,
- const char *evt_name,
- unsigned int evt_length,
- struct perf_event_attr *attr,
- const char **strp)
+static int add_tracepoint(struct list_head *list, int *idx,
+ char *sys_name, char *evt_name)
{
- char evt_path[MAXPATHLEN];
- char id_buf[4];
- u64 id;
- int fd;
-
- snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
- sys_name, evt_name);
-
- fd = open(evt_path, O_RDONLY);
- if (fd < 0)
- return EVT_FAILED;
-
- if (read(fd, id_buf, sizeof(id_buf)) < 0) {
- close(fd);
- return EVT_FAILED;
- }
-
- close(fd);
- id = atoll(id_buf);
- attr->config = id;
- attr->type = PERF_TYPE_TRACEPOINT;
- *strp = evt_name + evt_length;
-
- attr->sample_type |= PERF_SAMPLE_RAW;
- attr->sample_type |= PERF_SAMPLE_TIME;
- attr->sample_type |= PERF_SAMPLE_CPU;
+ struct perf_evsel *evsel;
- attr->sample_period = 1;
+ evsel = perf_evsel__newtp_idx(sys_name, evt_name, (*idx)++);
+ if (!evsel)
+ return -ENOMEM;
+ list_add_tail(&evsel->node, list);
- return EVT_HANDLED;
+ return 0;
}
-/* sys + ':' + event + ':' + flags*/
-#define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128)
-static enum event_result
-parse_multiple_tracepoint_event(char *sys_name, const char *evt_exp,
- char *flags)
+static int add_tracepoint_multi_event(struct list_head *list, int *idx,
+ char *sys_name, char *evt_name)
{
char evt_path[MAXPATHLEN];
struct dirent *evt_ent;
DIR *evt_dir;
+ int ret = 0;
- snprintf(evt_path, MAXPATHLEN, "%s/%s", debugfs_path, sys_name);
+ snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name);
evt_dir = opendir(evt_path);
-
if (!evt_dir) {
perror("Can't open event dir");
- return EVT_FAILED;
+ return -1;
}
- while ((evt_ent = readdir(evt_dir))) {
- char event_opt[MAX_EVOPT_LEN + 1];
- int len;
-
+ while (!ret && (evt_ent = readdir(evt_dir))) {
if (!strcmp(evt_ent->d_name, ".")
|| !strcmp(evt_ent->d_name, "..")
|| !strcmp(evt_ent->d_name, "enable")
|| !strcmp(evt_ent->d_name, "filter"))
continue;
- if (!strglobmatch(evt_ent->d_name, evt_exp))
+ if (!strglobmatch(evt_ent->d_name, evt_name))
continue;
- len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s%s%s", sys_name,
- evt_ent->d_name, flags ? ":" : "",
- flags ?: "");
- if (len < 0)
- return EVT_FAILED;
-
- if (parse_events(NULL, event_opt, 0))
- return EVT_FAILED;
+ ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name);
}
- return EVT_HANDLED_ALL;
+ closedir(evt_dir);
+ return ret;
}
-
-static enum event_result parse_tracepoint_event(const char **strp,
- struct perf_event_attr *attr)
+static int add_tracepoint_event(struct list_head *list, int *idx,
+ char *sys_name, char *evt_name)
{
- const char *evt_name;
- char *flags;
- char sys_name[MAX_EVENT_LENGTH];
- unsigned int sys_length, evt_length;
+ return strpbrk(evt_name, "*?") ?
+ add_tracepoint_multi_event(list, idx, sys_name, evt_name) :
+ add_tracepoint(list, idx, sys_name, evt_name);
+}
- if (debugfs_valid_mountpoint(debugfs_path))
- return 0;
+static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
+ char *sys_name, char *evt_name)
+{
+ struct dirent *events_ent;
+ DIR *events_dir;
+ int ret = 0;
- evt_name = strchr(*strp, ':');
- if (!evt_name)
- return EVT_FAILED;
+ events_dir = opendir(tracing_events_path);
+ if (!events_dir) {
+ perror("Can't open event dir");
+ return -1;
+ }
- sys_length = evt_name - *strp;
- if (sys_length >= MAX_EVENT_LENGTH)
- return 0;
+ while (!ret && (events_ent = readdir(events_dir))) {
+ if (!strcmp(events_ent->d_name, ".")
+ || !strcmp(events_ent->d_name, "..")
+ || !strcmp(events_ent->d_name, "enable")
+ || !strcmp(events_ent->d_name, "header_event")
+ || !strcmp(events_ent->d_name, "header_page"))
+ continue;
- strncpy(sys_name, *strp, sys_length);
- sys_name[sys_length] = '\0';
- evt_name = evt_name + 1;
+ if (!strglobmatch(events_ent->d_name, sys_name))
+ continue;
- flags = strchr(evt_name, ':');
- if (flags) {
- /* split it out: */
- evt_name = strndup(evt_name, flags - evt_name);
- flags++;
+ ret = add_tracepoint_event(list, idx, events_ent->d_name,
+ evt_name);
}
- evt_length = strlen(evt_name);
- if (evt_length >= MAX_EVENT_LENGTH)
- return EVT_FAILED;
+ closedir(events_dir);
+ return ret;
+}
+
+int parse_events_add_tracepoint(struct list_head *list, int *idx,
+ char *sys, char *event)
+{
+ int ret;
+
+ ret = debugfs_valid_mountpoint(tracing_events_path);
+ if (ret)
+ return ret;
- if (strpbrk(evt_name, "*?")) {
- *strp = evt_name + evt_length;
- return parse_multiple_tracepoint_event(sys_name, evt_name,
- flags);
- } else
- return parse_single_tracepoint_event(sys_name, evt_name,
- evt_length, attr, strp);
+ if (strpbrk(sys, "*?"))
+ return add_tracepoint_multi_sys(list, idx, sys, event);
+ else
+ return add_tracepoint_event(list, idx, sys, event);
}
-static enum event_result
-parse_breakpoint_type(const char *type, const char **strp,
- struct perf_event_attr *attr)
+static int
+parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
{
int i;
for (i = 0; i < 3; i++) {
- if (!type[i])
+ if (!type || !type[i])
break;
+#define CHECK_SET_TYPE(bit) \
+do { \
+ if (attr->bp_type & bit) \
+ return -EINVAL; \
+ else \
+ attr->bp_type |= bit; \
+} while (0)
+
switch (type[i]) {
case 'r':
- attr->bp_type |= HW_BREAKPOINT_R;
+ CHECK_SET_TYPE(HW_BREAKPOINT_R);
break;
case 'w':
- attr->bp_type |= HW_BREAKPOINT_W;
+ CHECK_SET_TYPE(HW_BREAKPOINT_W);
break;
case 'x':
- attr->bp_type |= HW_BREAKPOINT_X;
+ CHECK_SET_TYPE(HW_BREAKPOINT_X);
break;
default:
- return EVT_FAILED;
+ return -EINVAL;
}
}
+
+#undef CHECK_SET_TYPE
+
if (!attr->bp_type) /* Default */
attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
- *strp = type + i;
-
- return EVT_HANDLED;
+ return 0;
}
-static enum event_result
-parse_breakpoint_event(const char **strp, struct perf_event_attr *attr)
+int parse_events_add_breakpoint(struct list_head *list, int *idx,
+ void *ptr, char *type)
{
- const char *target;
- const char *type;
- char *endaddr;
- u64 addr;
- enum event_result err;
-
- target = strchr(*strp, ':');
- if (!target)
- return EVT_FAILED;
-
- if (strncmp(*strp, "mem", target - *strp) != 0)
- return EVT_FAILED;
-
- target++;
-
- addr = strtoull(target, &endaddr, 0);
- if (target == endaddr)
- return EVT_FAILED;
-
- attr->bp_addr = addr;
- *strp = endaddr;
+ struct perf_event_attr attr;
- type = strchr(target, ':');
+ memset(&attr, 0, sizeof(attr));
+ attr.bp_addr = (unsigned long) ptr;
- /* If no type is defined, just rw as default */
- if (!type) {
- attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
- } else {
- err = parse_breakpoint_type(++type, strp, attr);
- if (err == EVT_FAILED)
- return EVT_FAILED;
- }
+ if (parse_breakpoint_type(type, &attr))
+ return -EINVAL;
/*
* We should find a nice way to override the access length
* Provide some defaults for now
*/
- if (attr->bp_type == HW_BREAKPOINT_X)
- attr->bp_len = sizeof(long);
+ if (attr.bp_type == HW_BREAKPOINT_X)
+ attr.bp_len = sizeof(long);
else
- attr->bp_len = HW_BREAKPOINT_LEN_4;
+ attr.bp_len = HW_BREAKPOINT_LEN_4;
- attr->type = PERF_TYPE_BREAKPOINT;
+ attr.type = PERF_TYPE_BREAKPOINT;
+ attr.sample_period = 1;
- return EVT_HANDLED;
+ return add_event(list, idx, &attr, NULL);
}
-static int check_events(const char *str, unsigned int i)
+static int config_term(struct perf_event_attr *attr,
+ struct parse_events_term *term)
{
- int n;
+#define CHECK_TYPE_VAL(type) \
+do { \
+ if (PARSE_EVENTS__TERM_TYPE_ ## type != term->type_val) \
+ return -EINVAL; \
+} while (0)
+
+ switch (term->type_term) {
+ case PARSE_EVENTS__TERM_TYPE_CONFIG:
+ CHECK_TYPE_VAL(NUM);
+ attr->config = term->val.num;
+ break;
+ case PARSE_EVENTS__TERM_TYPE_CONFIG1:
+ CHECK_TYPE_VAL(NUM);
+ attr->config1 = term->val.num;
+ break;
+ case PARSE_EVENTS__TERM_TYPE_CONFIG2:
+ CHECK_TYPE_VAL(NUM);
+ attr->config2 = term->val.num;
+ break;
+ case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
+ CHECK_TYPE_VAL(NUM);
+ attr->sample_period = term->val.num;
+ break;
+ case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
+ /*
+ * TODO uncomment when the field is available
+ * attr->branch_sample_type = term->val.num;
+ */
+ break;
+ case PARSE_EVENTS__TERM_TYPE_NAME:
+ CHECK_TYPE_VAL(STR);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+#undef CHECK_TYPE_VAL
+}
- n = strlen(event_symbols[i].symbol);
- if (!strncmp(str, event_symbols[i].symbol, n))
- return n;
+static int config_attr(struct perf_event_attr *attr,
+ struct list_head *head, int fail)
+{
+ struct parse_events_term *term;
+
+ list_for_each_entry(term, head, list)
+ if (config_term(attr, term) && fail)
+ return -EINVAL;
- n = strlen(event_symbols[i].alias);
- if (n)
- if (!strncmp(str, event_symbols[i].alias, n))
- return n;
return 0;
}
-static enum event_result
-parse_symbolic_event(const char **strp, struct perf_event_attr *attr)
+int parse_events_add_numeric(struct list_head *list, int *idx,
+ u32 type, u64 config,
+ struct list_head *head_config)
+{
+ struct perf_event_attr attr;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.type = type;
+ attr.config = config;
+
+ if (head_config &&
+ config_attr(&attr, head_config, 1))
+ return -EINVAL;
+
+ return add_event(list, idx, &attr, NULL);
+}
+
+static int parse_events__is_name_term(struct parse_events_term *term)
{
- const char *str = *strp;
- unsigned int i;
- int n;
+ return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME;
+}
- for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
- n = check_events(str, i);
- if (n > 0) {
- attr->type = event_symbols[i].type;
- attr->config = event_symbols[i].config;
- *strp = str + n;
- return EVT_HANDLED;
- }
- }
- return EVT_FAILED;
+static char *pmu_event_name(struct list_head *head_terms)
+{
+ struct parse_events_term *term;
+
+ list_for_each_entry(term, head_terms, list)
+ if (parse_events__is_name_term(term))
+ return term->val.str;
+
+ return NULL;
}
-static enum event_result
-parse_raw_event(const char **strp, struct perf_event_attr *attr)
+int parse_events_add_pmu(struct list_head *list, int *idx,
+ char *name, struct list_head *head_config)
{
- const char *str = *strp;
- u64 config;
- int n;
+ struct perf_event_attr attr;
+ struct perf_pmu *pmu;
+ struct perf_evsel *evsel;
+ const char *unit;
+ double scale;
+
+ pmu = perf_pmu__find(name);
+ if (!pmu)
+ return -EINVAL;
- if (*str != 'r')
- return EVT_FAILED;
- n = hex2u64(str + 1, &config);
- if (n > 0) {
- *strp = str + n + 1;
- attr->type = PERF_TYPE_RAW;
- attr->config = config;
- return EVT_HANDLED;
+ memset(&attr, 0, sizeof(attr));
+
+ if (perf_pmu__check_alias(pmu, head_config, &unit, &scale))
+ return -EINVAL;
+
+ /*
+ * Configure hardcoded terms first, no need to check
+ * return value when called with fail == 0 ;)
+ */
+ config_attr(&attr, head_config, 0);
+
+ if (perf_pmu__config(pmu, &attr, head_config))
+ return -EINVAL;
+
+ evsel = __add_event(list, idx, &attr, pmu_event_name(head_config),
+ pmu->cpus);
+ if (evsel) {
+ evsel->unit = unit;
+ evsel->scale = scale;
}
- return EVT_FAILED;
+
+ return evsel ? 0 : -ENOMEM;
}
-static enum event_result
-parse_numeric_event(const char **strp, struct perf_event_attr *attr)
+int parse_events__modifier_group(struct list_head *list,
+ char *event_mod)
{
- const char *str = *strp;
- char *endp;
- unsigned long type;
- u64 config;
+ return parse_events__modifier_event(list, event_mod, true);
+}
- type = strtoul(str, &endp, 0);
- if (endp > str && type < PERF_TYPE_MAX && *endp == ':') {
- str = endp + 1;
- config = strtoul(str, &endp, 0);
- if (endp > str) {
- attr->type = type;
- attr->config = config;
- *strp = endp;
- return EVT_HANDLED;
- }
- }
- return EVT_FAILED;
+void parse_events__set_leader(char *name, struct list_head *list)
+{
+ struct perf_evsel *leader;
+
+ __perf_evlist__set_leader(list);
+ leader = list_entry(list->next, struct perf_evsel, node);
+ leader->group_name = name ? strdup(name) : NULL;
}
-static enum event_result
-parse_event_modifier(const char **strp, struct perf_event_attr *attr)
+/* list_event is assumed to point to malloc'ed memory */
+void parse_events_update_lists(struct list_head *list_event,
+ struct list_head *list_all)
{
- const char *str = *strp;
- int exclude = 0;
- int eu = 0, ek = 0, eh = 0, precise = 0;
+ /*
+ * Called for single event definition. Update the
+ * 'all event' list, and reinit the 'single event'
+ * list, for next event definition.
+ */
+ list_splice_tail(list_event, list_all);
+ free(list_event);
+}
+
+struct event_modifier {
+ int eu;
+ int ek;
+ int eh;
+ int eH;
+ int eG;
+ int precise;
+ int exclude_GH;
+ int sample_read;
+ int pinned;
+};
+
+static int get_event_modifier(struct event_modifier *mod, char *str,
+ struct perf_evsel *evsel)
+{
+ int eu = evsel ? evsel->attr.exclude_user : 0;
+ int ek = evsel ? evsel->attr.exclude_kernel : 0;
+ int eh = evsel ? evsel->attr.exclude_hv : 0;
+ int eH = evsel ? evsel->attr.exclude_host : 0;
+ int eG = evsel ? evsel->attr.exclude_guest : 0;
+ int precise = evsel ? evsel->attr.precise_ip : 0;
+ int sample_read = 0;
+ int pinned = evsel ? evsel->attr.pinned : 0;
+
+ int exclude = eu | ek | eh;
+ int exclude_GH = evsel ? evsel->exclude_GH : 0;
+
+ memset(mod, 0, sizeof(*mod));
- if (*str++ != ':')
- return 0;
while (*str) {
if (*str == 'u') {
if (!exclude)
@@ -713,148 +736,252 @@ parse_event_modifier(const char **strp, struct perf_event_attr *attr)
if (!exclude)
exclude = eu = ek = eh = 1;
eh = 0;
+ } else if (*str == 'G') {
+ if (!exclude_GH)
+ exclude_GH = eG = eH = 1;
+ eG = 0;
+ } else if (*str == 'H') {
+ if (!exclude_GH)
+ exclude_GH = eG = eH = 1;
+ eH = 0;
} else if (*str == 'p') {
precise++;
+ /* use of precise requires exclude_guest */
+ if (!exclude_GH)
+ eG = 1;
+ } else if (*str == 'S') {
+ sample_read = 1;
+ } else if (*str == 'D') {
+ pinned = 1;
} else
break;
++str;
}
- if (str >= *strp + 2) {
- *strp = str;
- attr->exclude_user = eu;
- attr->exclude_kernel = ek;
- attr->exclude_hv = eh;
- attr->precise_ip = precise;
- return 1;
- }
+
+ /*
+ * precise ip:
+ *
+ * 0 - SAMPLE_IP can have arbitrary skid
+ * 1 - SAMPLE_IP must have constant skid
+ * 2 - SAMPLE_IP requested to have 0 skid
+ * 3 - SAMPLE_IP must have 0 skid
+ *
+ * See also PERF_RECORD_MISC_EXACT_IP
+ */
+ if (precise > 3)
+ return -EINVAL;
+
+ mod->eu = eu;
+ mod->ek = ek;
+ mod->eh = eh;
+ mod->eH = eH;
+ mod->eG = eG;
+ mod->precise = precise;
+ mod->exclude_GH = exclude_GH;
+ mod->sample_read = sample_read;
+ mod->pinned = pinned;
+
return 0;
}
/*
- * Each event can have multiple symbolic names.
- * Symbolic names are (almost) exactly matched.
+ * Basic modifier sanity check to validate it contains only one
+ * instance of any modifier (apart from 'p') present.
*/
-static enum event_result
-parse_event_symbols(const char **str, struct perf_event_attr *attr)
+static int check_modifier(char *str)
{
- enum event_result ret;
+ char *p = str;
- ret = parse_tracepoint_event(str, attr);
- if (ret != EVT_FAILED)
- goto modifier;
+ /* The sizeof includes 0 byte as well. */
+ if (strlen(str) > (sizeof("ukhGHpppSD") - 1))
+ return -1;
- ret = parse_raw_event(str, attr);
- if (ret != EVT_FAILED)
- goto modifier;
+ while (*p) {
+ if (*p != 'p' && strchr(p + 1, *p))
+ return -1;
+ p++;
+ }
- ret = parse_numeric_event(str, attr);
- if (ret != EVT_FAILED)
- goto modifier;
+ return 0;
+}
- ret = parse_symbolic_event(str, attr);
- if (ret != EVT_FAILED)
- goto modifier;
+int parse_events__modifier_event(struct list_head *list, char *str, bool add)
+{
+ struct perf_evsel *evsel;
+ struct event_modifier mod;
- ret = parse_generic_hw_event(str, attr);
- if (ret != EVT_FAILED)
- goto modifier;
+ if (str == NULL)
+ return 0;
- ret = parse_breakpoint_event(str, attr);
- if (ret != EVT_FAILED)
- goto modifier;
+ if (check_modifier(str))
+ return -EINVAL;
- fprintf(stderr, "invalid or unsupported event: '%s'\n", *str);
- fprintf(stderr, "Run 'perf list' for a list of valid events\n");
- return EVT_FAILED;
+ if (!add && get_event_modifier(&mod, str, NULL))
+ return -EINVAL;
-modifier:
- parse_event_modifier(str, attr);
+ __evlist__for_each(list, evsel) {
+ if (add && get_event_modifier(&mod, str, evsel))
+ return -EINVAL;
+
+ evsel->attr.exclude_user = mod.eu;
+ evsel->attr.exclude_kernel = mod.ek;
+ evsel->attr.exclude_hv = mod.eh;
+ evsel->attr.precise_ip = mod.precise;
+ evsel->attr.exclude_host = mod.eH;
+ evsel->attr.exclude_guest = mod.eG;
+ evsel->exclude_GH = mod.exclude_GH;
+ evsel->sample_read = mod.sample_read;
+
+ if (perf_evsel__is_group_leader(evsel))
+ evsel->attr.pinned = mod.pinned;
+ }
- return ret;
+ return 0;
}
-static int store_event_type(const char *orgname)
+int parse_events_name(struct list_head *list, char *name)
{
- char filename[PATH_MAX], *c;
- FILE *file;
- int id, n;
+ struct perf_evsel *evsel;
- sprintf(filename, "%s/", debugfs_path);
- strncat(filename, orgname, strlen(orgname));
- strcat(filename, "/id");
+ __evlist__for_each(list, evsel) {
+ if (!evsel->name)
+ evsel->name = strdup(name);
+ }
- c = strchr(filename, ':');
- if (c)
- *c = '/';
+ return 0;
+}
- file = fopen(filename, "r");
- if (!file)
- return 0;
- n = fscanf(file, "%i", &id);
- fclose(file);
- if (n < 1) {
- pr_err("cannot store event ID\n");
- return -EINVAL;
+static int parse_events__scanner(const char *str, void *data, int start_token);
+
+static int parse_events_fixup(int ret, const char *str, void *data,
+ int start_token)
+{
+ char *o = strdup(str);
+ char *s = NULL;
+ char *t = o;
+ char *p;
+ int len = 0;
+
+ if (!o)
+ return ret;
+ while ((p = strsep(&t, ",")) != NULL) {
+ if (s)
+ str_append(&s, &len, ",");
+ str_append(&s, &len, "cpu/");
+ str_append(&s, &len, p);
+ str_append(&s, &len, "/");
}
- return perf_header__push_event(id, orgname);
+ free(o);
+ if (!s)
+ return -ENOMEM;
+ return parse_events__scanner(s, data, start_token);
}
-int parse_events(const struct option *opt __used, const char *str, int unset __used)
+static int parse_events__scanner(const char *str, void *data, int start_token)
{
- struct perf_event_attr attr;
- enum event_result ret;
+ YY_BUFFER_STATE buffer;
+ void *scanner;
+ int ret;
+
+ ret = parse_events_lex_init_extra(start_token, &scanner);
+ if (ret)
+ return ret;
+
+ buffer = parse_events__scan_string(str, scanner);
+
+#ifdef PARSER_DEBUG
+ parse_events_debug = 1;
+#endif
+ ret = parse_events_parse(data, scanner);
+
+ parse_events__flush_buffer(buffer, scanner);
+ parse_events__delete_buffer(buffer, scanner);
+ parse_events_lex_destroy(scanner);
+ if (ret && !strchr(str, '/'))
+ ret = parse_events_fixup(ret, str, data, start_token);
+ return ret;
+}
- if (strchr(str, ':'))
- if (store_event_type(str) < 0)
- return -1;
+/*
+ * parse event config string, return a list of event terms.
+ */
+int parse_events_terms(struct list_head *terms, const char *str)
+{
+ struct parse_events_terms data = {
+ .terms = NULL,
+ };
+ int ret;
+
+ ret = parse_events__scanner(str, &data, PE_START_TERMS);
+ if (!ret) {
+ list_splice(data.terms, terms);
+ zfree(&data.terms);
+ return 0;
+ }
- for (;;) {
- if (nr_counters == MAX_COUNTERS)
- return -1;
+ if (data.terms)
+ parse_events__free_terms(data.terms);
+ return ret;
+}
- memset(&attr, 0, sizeof(attr));
- ret = parse_event_symbols(&str, &attr);
- if (ret == EVT_FAILED)
- return -1;
+int parse_events(struct perf_evlist *evlist, const char *str)
+{
+ struct parse_events_evlist data = {
+ .list = LIST_HEAD_INIT(data.list),
+ .idx = evlist->nr_entries,
+ };
+ int ret;
+
+ ret = parse_events__scanner(str, &data, PE_START_EVENTS);
+ if (!ret) {
+ int entries = data.idx - evlist->nr_entries;
+ perf_evlist__splice_list_tail(evlist, &data.list, entries);
+ evlist->nr_groups += data.nr_groups;
+ return 0;
+ }
- if (!(*str == 0 || *str == ',' || isspace(*str)))
- return -1;
+ /*
+ * There are 2 users - builtin-record and builtin-test objects.
+ * Both call perf_evlist__delete in case of error, so we dont
+ * need to bother.
+ */
+ return ret;
+}
- if (ret != EVT_HANDLED_ALL) {
- attrs[nr_counters] = attr;
- nr_counters++;
- }
+int parse_events_option(const struct option *opt, const char *str,
+ int unset __maybe_unused)
+{
+ struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
+ int ret = parse_events(evlist, str);
- if (*str == 0)
- break;
- if (*str == ',')
- ++str;
- while (isspace(*str))
- ++str;
+ if (ret) {
+ fprintf(stderr, "invalid or unsupported event: '%s'\n", str);
+ fprintf(stderr, "Run 'perf list' for a list of valid events\n");
}
-
- return 0;
+ return ret;
}
-int parse_filter(const struct option *opt __used, const char *str,
- int unset __used)
+int parse_filter(const struct option *opt, const char *str,
+ int unset __maybe_unused)
{
- int i = nr_counters - 1;
- int len = strlen(str);
+ struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
+ struct perf_evsel *last = NULL;
+
+ if (evlist->nr_entries > 0)
+ last = perf_evlist__last(evlist);
- if (i < 0 || attrs[i].type != PERF_TYPE_TRACEPOINT) {
+ if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) {
fprintf(stderr,
"-F option should follow a -e tracepoint option\n");
return -1;
}
- filters[i] = malloc(len + 1);
- if (!filters[i]) {
+ last->filter = strdup(str);
+ if (last->filter == NULL) {
fprintf(stderr, "not enough memory to hold filter string\n");
return -1;
}
- strcpy(filters[i], str);
return 0;
}
@@ -872,32 +999,47 @@ static const char * const event_type_descriptors[] = {
* Print the events from <debugfs_mount_point>/tracing/events
*/
-static void print_tracepoint_events(void)
+void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
+ bool name_only)
{
DIR *sys_dir, *evt_dir;
struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
char evt_path[MAXPATHLEN];
char dir_path[MAXPATHLEN];
- if (debugfs_valid_mountpoint(debugfs_path))
+ if (debugfs_valid_mountpoint(tracing_events_path)) {
+ printf(" [ Tracepoints not available: %s ]\n", strerror(errno));
return;
+ }
- sys_dir = opendir(debugfs_path);
+ sys_dir = opendir(tracing_events_path);
if (!sys_dir)
return;
for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+ if (subsys_glob != NULL &&
+ !strglobmatch(sys_dirent.d_name, subsys_glob))
+ continue;
- snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
+ snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
sys_dirent.d_name);
evt_dir = opendir(dir_path);
if (!evt_dir)
continue;
for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+ if (event_glob != NULL &&
+ !strglobmatch(evt_dirent.d_name, event_glob))
+ continue;
+
+ if (name_only) {
+ printf("%s:%s ", sys_dirent.d_name, evt_dirent.d_name);
+ continue;
+ }
+
snprintf(evt_path, MAXPATHLEN, "%s:%s",
sys_dirent.d_name, evt_dirent.d_name);
- printf(" %-42s [%s]\n", evt_path,
+ printf(" %-50s [%s]\n", evt_path,
event_type_descriptors[PERF_TYPE_TRACEPOINT]);
}
closedir(evt_dir);
@@ -906,60 +1048,307 @@ static void print_tracepoint_events(void)
}
/*
- * Print the help text for the event symbols:
+ * Check whether event is in <debugfs_mount_point>/tracing/events
*/
-void print_events(void)
+
+int is_valid_tracepoint(const char *event_string)
{
- struct event_symbol *syms = event_symbols;
- unsigned int i, type, op, prev_type = -1;
- char name[40];
+ DIR *sys_dir, *evt_dir;
+ struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+ char evt_path[MAXPATHLEN];
+ char dir_path[MAXPATHLEN];
- printf("\n");
- printf("List of pre-defined events (to be used in -e):\n");
+ if (debugfs_valid_mountpoint(tracing_events_path))
+ return 0;
- for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
- type = syms->type;
+ sys_dir = opendir(tracing_events_path);
+ if (!sys_dir)
+ return 0;
- if (type != prev_type)
- printf("\n");
+ for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+
+ snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
+ sys_dirent.d_name);
+ evt_dir = opendir(dir_path);
+ if (!evt_dir)
+ continue;
+
+ for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+ snprintf(evt_path, MAXPATHLEN, "%s:%s",
+ sys_dirent.d_name, evt_dirent.d_name);
+ if (!strcmp(evt_path, event_string)) {
+ closedir(evt_dir);
+ closedir(sys_dir);
+ return 1;
+ }
+ }
+ closedir(evt_dir);
+ }
+ closedir(sys_dir);
+ return 0;
+}
+
+static bool is_event_supported(u8 type, unsigned config)
+{
+ bool ret = true;
+ int open_return;
+ struct perf_evsel *evsel;
+ struct perf_event_attr attr = {
+ .type = type,
+ .config = config,
+ .disabled = 1,
+ };
+ struct {
+ struct thread_map map;
+ int threads[1];
+ } tmap = {
+ .map.nr = 1,
+ .threads = { 0 },
+ };
+
+ evsel = perf_evsel__new(&attr);
+ if (evsel) {
+ open_return = perf_evsel__open(evsel, NULL, &tmap.map);
+ ret = open_return >= 0;
+
+ if (open_return == -EACCES) {
+ /*
+ * This happens if the paranoid value
+ * /proc/sys/kernel/perf_event_paranoid is set to 2
+ * Re-run with exclude_kernel set; we don't do that
+ * by default as some ARM machines do not support it.
+ *
+ */
+ evsel->attr.exclude_kernel = 1;
+ ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0;
+ }
+ perf_evsel__delete(evsel);
+ }
+
+ return ret;
+}
+
+static void __print_events_type(u8 type, struct event_symbol *syms,
+ unsigned max)
+{
+ char name[64];
+ unsigned i;
+
+ for (i = 0; i < max ; i++, syms++) {
+ if (!is_event_supported(type, i))
+ continue;
if (strlen(syms->alias))
- sprintf(name, "%s OR %s", syms->symbol, syms->alias);
+ snprintf(name, sizeof(name), "%s OR %s",
+ syms->symbol, syms->alias);
else
- strcpy(name, syms->symbol);
- printf(" %-42s [%s]\n", name,
- event_type_descriptors[type]);
+ snprintf(name, sizeof(name), "%s", syms->symbol);
- prev_type = type;
+ printf(" %-50s [%s]\n", name, event_type_descriptors[type]);
}
+}
+
+void print_events_type(u8 type)
+{
+ if (type == PERF_TYPE_SOFTWARE)
+ __print_events_type(type, event_symbols_sw, PERF_COUNT_SW_MAX);
+ else
+ __print_events_type(type, event_symbols_hw, PERF_COUNT_HW_MAX);
+}
+
+int print_hwcache_events(const char *event_glob, bool name_only)
+{
+ unsigned int type, op, i, printed = 0;
+ char name[64];
- printf("\n");
for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
/* skip invalid cache type */
- if (!is_cache_op_valid(type, op))
+ if (!perf_evsel__is_cache_op_valid(type, op))
continue;
for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
- printf(" %-42s [%s]\n",
- event_cache_name(type, op, i),
- event_type_descriptors[PERF_TYPE_HW_CACHE]);
+ __perf_evsel__hw_cache_type_op_res_name(type, op, i,
+ name, sizeof(name));
+ if (event_glob != NULL && !strglobmatch(name, event_glob))
+ continue;
+
+ if (!is_event_supported(PERF_TYPE_HW_CACHE,
+ type | (op << 8) | (i << 16)))
+ continue;
+
+ if (name_only)
+ printf("%s ", name);
+ else
+ printf(" %-50s [%s]\n", name,
+ event_type_descriptors[PERF_TYPE_HW_CACHE]);
+ ++printed;
}
}
}
- printf("\n");
- printf(" %-42s [%s]\n",
- "rNNN (see 'perf list --help' on how to encode it)",
- event_type_descriptors[PERF_TYPE_RAW]);
- printf("\n");
+ if (printed)
+ printf("\n");
+ return printed;
+}
+
+static void print_symbol_events(const char *event_glob, unsigned type,
+ struct event_symbol *syms, unsigned max,
+ bool name_only)
+{
+ unsigned i, printed = 0;
+ char name[MAX_NAME_LEN];
- printf(" %-42s [%s]\n",
- "mem:<addr>[:access]",
+ for (i = 0; i < max; i++, syms++) {
+
+ if (event_glob != NULL &&
+ !(strglobmatch(syms->symbol, event_glob) ||
+ (syms->alias && strglobmatch(syms->alias, event_glob))))
+ continue;
+
+ if (!is_event_supported(type, i))
+ continue;
+
+ if (name_only) {
+ printf("%s ", syms->symbol);
+ continue;
+ }
+
+ if (strlen(syms->alias))
+ snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
+ else
+ strncpy(name, syms->symbol, MAX_NAME_LEN);
+
+ printf(" %-50s [%s]\n", name, event_type_descriptors[type]);
+
+ printed++;
+ }
+
+ if (printed)
+ printf("\n");
+}
+
+/*
+ * Print the help text for the event symbols:
+ */
+void print_events(const char *event_glob, bool name_only)
+{
+ if (!name_only) {
+ printf("\n");
+ printf("List of pre-defined events (to be used in -e):\n");
+ }
+
+ print_symbol_events(event_glob, PERF_TYPE_HARDWARE,
+ event_symbols_hw, PERF_COUNT_HW_MAX, name_only);
+
+ print_symbol_events(event_glob, PERF_TYPE_SOFTWARE,
+ event_symbols_sw, PERF_COUNT_SW_MAX, name_only);
+
+ print_hwcache_events(event_glob, name_only);
+
+ print_pmu_events(event_glob, name_only);
+
+ if (event_glob != NULL)
+ return;
+
+ if (!name_only) {
+ printf(" %-50s [%s]\n",
+ "rNNN",
+ event_type_descriptors[PERF_TYPE_RAW]);
+ printf(" %-50s [%s]\n",
+ "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
+ event_type_descriptors[PERF_TYPE_RAW]);
+ printf(" (see 'man perf-list' on how to encode it)\n");
+ printf("\n");
+
+ printf(" %-50s [%s]\n",
+ "mem:<addr>[:access]",
event_type_descriptors[PERF_TYPE_BREAKPOINT]);
- printf("\n");
+ printf("\n");
+ }
+
+ print_tracepoint_events(NULL, NULL, name_only);
+}
+
+int parse_events__is_hardcoded_term(struct parse_events_term *term)
+{
+ return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
+}
+
+static int new_term(struct parse_events_term **_term, int type_val,
+ int type_term, char *config,
+ char *str, u64 num)
+{
+ struct parse_events_term *term;
+
+ term = zalloc(sizeof(*term));
+ if (!term)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&term->list);
+ term->type_val = type_val;
+ term->type_term = type_term;
+ term->config = config;
+
+ switch (type_val) {
+ case PARSE_EVENTS__TERM_TYPE_NUM:
+ term->val.num = num;
+ break;
+ case PARSE_EVENTS__TERM_TYPE_STR:
+ term->val.str = str;
+ break;
+ default:
+ free(term);
+ return -EINVAL;
+ }
+
+ *_term = term;
+ return 0;
+}
+
+int parse_events_term__num(struct parse_events_term **term,
+ int type_term, char *config, u64 num)
+{
+ return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term,
+ config, NULL, num);
+}
+
+int parse_events_term__str(struct parse_events_term **term,
+ int type_term, char *config, char *str)
+{
+ return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term,
+ config, str, 0);
+}
- print_tracepoint_events();
+int parse_events_term__sym_hw(struct parse_events_term **term,
+ char *config, unsigned idx)
+{
+ struct event_symbol *sym;
+
+ BUG_ON(idx >= PERF_COUNT_HW_MAX);
+ sym = &event_symbols_hw[idx];
+
+ if (config)
+ return new_term(term, PARSE_EVENTS__TERM_TYPE_STR,
+ PARSE_EVENTS__TERM_TYPE_USER, config,
+ (char *) sym->symbol, 0);
+ else
+ return new_term(term, PARSE_EVENTS__TERM_TYPE_STR,
+ PARSE_EVENTS__TERM_TYPE_USER,
+ (char *) "event", (char *) sym->symbol, 0);
+}
+
+int parse_events_term__clone(struct parse_events_term **new,
+ struct parse_events_term *term)
+{
+ return new_term(new, term->type_val, term->type_term, term->config,
+ term->val.str, term->val.num);
+}
+
+void parse_events__free_terms(struct list_head *terms)
+{
+ struct parse_events_term *term, *h;
- exit(129);
+ list_for_each_entry_safe(term, h, terms, list)
+ free(term);
}
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index fc4ab3fe877..df094b4ed5e 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -4,6 +4,15 @@
* Parse symbolic events/counts passed in as options:
*/
+#include <linux/list.h>
+#include <stdbool.h>
+#include <linux/types.h>
+#include <linux/perf_event.h>
+
+struct list_head;
+struct perf_evsel;
+struct perf_evlist;
+
struct option;
struct tracepoint_path {
@@ -13,25 +22,91 @@ struct tracepoint_path {
};
extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
-extern bool have_tracepoints(struct perf_event_attr *pattrs, int nb_events);
-
-extern int nr_counters;
-
-extern struct perf_event_attr attrs[MAX_COUNTERS];
-extern char *filters[MAX_COUNTERS];
+extern struct tracepoint_path *tracepoint_name_to_path(const char *name);
+extern bool have_tracepoints(struct list_head *evlist);
-extern const char *event_name(int ctr);
-extern const char *__event_name(int type, u64 config);
+const char *event_type(int type);
-extern int parse_events(const struct option *opt, const char *str, int unset);
+extern int parse_events_option(const struct option *opt, const char *str,
+ int unset);
+extern int parse_events(struct perf_evlist *evlist, const char *str);
+extern int parse_events_terms(struct list_head *terms, const char *str);
extern int parse_filter(const struct option *opt, const char *str, int unset);
#define EVENTS_HELP_MAX (128*1024)
-extern void print_events(void);
+enum {
+ PARSE_EVENTS__TERM_TYPE_NUM,
+ PARSE_EVENTS__TERM_TYPE_STR,
+};
-extern char debugfs_path[];
-extern int valid_debugfs_mount(const char *debugfs);
+enum {
+ PARSE_EVENTS__TERM_TYPE_USER,
+ PARSE_EVENTS__TERM_TYPE_CONFIG,
+ PARSE_EVENTS__TERM_TYPE_CONFIG1,
+ PARSE_EVENTS__TERM_TYPE_CONFIG2,
+ PARSE_EVENTS__TERM_TYPE_NAME,
+ PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD,
+ PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE,
+};
+
+struct parse_events_term {
+ char *config;
+ union {
+ char *str;
+ u64 num;
+ } val;
+ int type_val;
+ int type_term;
+ struct list_head list;
+};
+
+struct parse_events_evlist {
+ struct list_head list;
+ int idx;
+ int nr_groups;
+};
+
+struct parse_events_terms {
+ struct list_head *terms;
+};
+
+int parse_events__is_hardcoded_term(struct parse_events_term *term);
+int parse_events_term__num(struct parse_events_term **_term,
+ int type_term, char *config, u64 num);
+int parse_events_term__str(struct parse_events_term **_term,
+ int type_term, char *config, char *str);
+int parse_events_term__sym_hw(struct parse_events_term **term,
+ char *config, unsigned idx);
+int parse_events_term__clone(struct parse_events_term **new,
+ struct parse_events_term *term);
+void parse_events__free_terms(struct list_head *terms);
+int parse_events__modifier_event(struct list_head *list, char *str, bool add);
+int parse_events__modifier_group(struct list_head *list, char *event_mod);
+int parse_events_name(struct list_head *list, char *name);
+int parse_events_add_tracepoint(struct list_head *list, int *idx,
+ char *sys, char *event);
+int parse_events_add_numeric(struct list_head *list, int *idx,
+ u32 type, u64 config,
+ struct list_head *head_config);
+int parse_events_add_cache(struct list_head *list, int *idx,
+ char *type, char *op_result1, char *op_result2);
+int parse_events_add_breakpoint(struct list_head *list, int *idx,
+ void *ptr, char *type);
+int parse_events_add_pmu(struct list_head *list, int *idx,
+ char *pmu , struct list_head *head_config);
+void parse_events__set_leader(char *name, struct list_head *list);
+void parse_events_update_lists(struct list_head *list_event,
+ struct list_head *list_all);
+void parse_events_error(void *data, void *scanner, char const *msg);
+void print_events(const char *event_glob, bool name_only);
+void print_events_type(u8 type);
+void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
+ bool name_only);
+int print_hwcache_events(const char *event_glob, bool name_only);
+extern int is_valid_tracepoint(const char *event_string);
+
+extern int valid_debugfs_mount(const char *debugfs);
#endif /* __PERF_PARSE_EVENTS_H */
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
new file mode 100644
index 00000000000..343299575b3
--- /dev/null
+++ b/tools/perf/util/parse-events.l
@@ -0,0 +1,218 @@
+
+%option reentrant
+%option bison-bridge
+%option prefix="parse_events_"
+%option stack
+
+%{
+#include <errno.h>
+#include "../perf.h"
+#include "parse-events-bison.h"
+#include "parse-events.h"
+
+char *parse_events_get_text(yyscan_t yyscanner);
+YYSTYPE *parse_events_get_lval(yyscan_t yyscanner);
+
+static int __value(YYSTYPE *yylval, char *str, int base, int token)
+{
+ u64 num;
+
+ errno = 0;
+ num = strtoull(str, NULL, base);
+ if (errno)
+ return PE_ERROR;
+
+ yylval->num = num;
+ return token;
+}
+
+static int value(yyscan_t scanner, int base)
+{
+ YYSTYPE *yylval = parse_events_get_lval(scanner);
+ char *text = parse_events_get_text(scanner);
+
+ return __value(yylval, text, base, PE_VALUE);
+}
+
+static int raw(yyscan_t scanner)
+{
+ YYSTYPE *yylval = parse_events_get_lval(scanner);
+ char *text = parse_events_get_text(scanner);
+
+ return __value(yylval, text + 1, 16, PE_RAW);
+}
+
+static int str(yyscan_t scanner, int token)
+{
+ YYSTYPE *yylval = parse_events_get_lval(scanner);
+ char *text = parse_events_get_text(scanner);
+
+ yylval->str = strdup(text);
+ return token;
+}
+
+static int sym(yyscan_t scanner, int type, int config)
+{
+ YYSTYPE *yylval = parse_events_get_lval(scanner);
+
+ yylval->num = (type << 16) + config;
+ return type == PERF_TYPE_HARDWARE ? PE_VALUE_SYM_HW : PE_VALUE_SYM_SW;
+}
+
+static int term(yyscan_t scanner, int type)
+{
+ YYSTYPE *yylval = parse_events_get_lval(scanner);
+
+ yylval->num = type;
+ return PE_TERM;
+}
+
+%}
+
+%x mem
+%s config
+%x event
+
+group [^,{}/]*[{][^}]*[}][^,{}/]*
+event_pmu [^,{}/]+[/][^/]*[/][^,{}/]*
+event [^,{}/]+
+
+num_dec [0-9]+
+num_hex 0x[a-fA-F0-9]+
+num_raw_hex [a-fA-F0-9]+
+name [a-zA-Z_*?][a-zA-Z0-9_*?]*
+name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?]*
+/* If you add a modifier you need to update check_modifier() */
+modifier_event [ukhpGHSD]+
+modifier_bp [rwx]{1,3}
+
+%%
+
+%{
+ {
+ int start_token;
+
+ start_token = parse_events_get_extra(yyscanner);
+
+ if (start_token == PE_START_TERMS)
+ BEGIN(config);
+ else if (start_token == PE_START_EVENTS)
+ BEGIN(event);
+
+ if (start_token) {
+ parse_events_set_extra(NULL, yyscanner);
+ return start_token;
+ }
+ }
+%}
+
+<event>{
+
+{group} {
+ BEGIN(INITIAL); yyless(0);
+ }
+
+{event_pmu} |
+{event} {
+ str(yyscanner, PE_EVENT_NAME);
+ BEGIN(INITIAL); yyless(0);
+ return PE_EVENT_NAME;
+ }
+
+. |
+<<EOF>> {
+ BEGIN(INITIAL); yyless(0);
+ }
+
+}
+
+<config>{
+config { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); }
+config1 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG1); }
+config2 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG2); }
+name { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NAME); }
+period { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); }
+branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); }
+, { return ','; }
+"/" { BEGIN(INITIAL); return '/'; }
+{name_minus} { return str(yyscanner, PE_NAME); }
+}
+
+<mem>{
+{modifier_bp} { return str(yyscanner, PE_MODIFIER_BP); }
+: { return ':'; }
+{num_dec} { return value(yyscanner, 10); }
+{num_hex} { return value(yyscanner, 16); }
+ /*
+ * We need to separate 'mem:' scanner part, in order to get specific
+ * modifier bits parsed out. Otherwise we would need to handle PE_NAME
+ * and we'd need to parse it manually. During the escape from <mem>
+ * state we need to put the escaping char back, so we dont miss it.
+ */
+. { unput(*yytext); BEGIN(INITIAL); }
+ /*
+ * We destroy the scanner after reaching EOF,
+ * but anyway just to be sure get back to INIT state.
+ */
+<<EOF>> { BEGIN(INITIAL); }
+}
+
+cpu-cycles|cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); }
+stalled-cycles-frontend|idle-cycles-frontend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); }
+stalled-cycles-backend|idle-cycles-backend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); }
+instructions { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS); }
+cache-references { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES); }
+cache-misses { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES); }
+branch-instructions|branches { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); }
+branch-misses { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_MISSES); }
+bus-cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BUS_CYCLES); }
+ref-cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_REF_CPU_CYCLES); }
+cpu-clock { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK); }
+task-clock { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_TASK_CLOCK); }
+page-faults|faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS); }
+minor-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MIN); }
+major-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MAJ); }
+context-switches|cs { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CONTEXT_SWITCHES); }
+cpu-migrations|migrations { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS); }
+alignment-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); }
+emulation-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
+dummy { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); }
+
+L1-dcache|l1-d|l1d|L1-data |
+L1-icache|l1-i|l1i|L1-instruction |
+LLC|L2 |
+dTLB|d-tlb|Data-TLB |
+iTLB|i-tlb|Instruction-TLB |
+branch|branches|bpu|btb|bpc |
+node { return str(yyscanner, PE_NAME_CACHE_TYPE); }
+
+load|loads|read |
+store|stores|write |
+prefetch|prefetches |
+speculative-read|speculative-load |
+refs|Reference|ops|access |
+misses|miss { return str(yyscanner, PE_NAME_CACHE_OP_RESULT); }
+
+mem: { BEGIN(mem); return PE_PREFIX_MEM; }
+r{num_raw_hex} { return raw(yyscanner); }
+{num_dec} { return value(yyscanner, 10); }
+{num_hex} { return value(yyscanner, 16); }
+
+{modifier_event} { return str(yyscanner, PE_MODIFIER_EVENT); }
+{name} { return str(yyscanner, PE_NAME); }
+"/" { BEGIN(config); return '/'; }
+- { return '-'; }
+, { BEGIN(event); return ','; }
+: { return ':'; }
+"{" { BEGIN(event); return '{'; }
+"}" { return '}'; }
+= { return '='; }
+\n { }
+. { }
+
+%%
+
+int parse_events_wrap(void *scanner __maybe_unused)
+{
+ return 1;
+}
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
new file mode 100644
index 00000000000..0bc87ba46bf
--- /dev/null
+++ b/tools/perf/util/parse-events.y
@@ -0,0 +1,454 @@
+%pure-parser
+%parse-param {void *_data}
+%parse-param {void *scanner}
+%lex-param {void* scanner}
+
+%{
+
+#define YYDEBUG 1
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include "util.h"
+#include "parse-events.h"
+#include "parse-events-bison.h"
+
+extern int parse_events_lex (YYSTYPE* lvalp, void* scanner);
+
+#define ABORT_ON(val) \
+do { \
+ if (val) \
+ YYABORT; \
+} while (0)
+
+#define ALLOC_LIST(list) \
+do { \
+ list = malloc(sizeof(*list)); \
+ ABORT_ON(!list); \
+ INIT_LIST_HEAD(list); \
+} while (0)
+
+static inc_group_count(struct list_head *list,
+ struct parse_events_evlist *data)
+{
+ /* Count groups only have more than 1 members */
+ if (!list_is_last(list->next, list))
+ data->nr_groups++;
+}
+
+%}
+
+%token PE_START_EVENTS PE_START_TERMS
+%token PE_VALUE PE_VALUE_SYM_HW PE_VALUE_SYM_SW PE_RAW PE_TERM
+%token PE_EVENT_NAME
+%token PE_NAME
+%token PE_MODIFIER_EVENT PE_MODIFIER_BP
+%token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT
+%token PE_PREFIX_MEM PE_PREFIX_RAW PE_PREFIX_GROUP
+%token PE_ERROR
+%type <num> PE_VALUE
+%type <num> PE_VALUE_SYM_HW
+%type <num> PE_VALUE_SYM_SW
+%type <num> PE_RAW
+%type <num> PE_TERM
+%type <str> PE_NAME
+%type <str> PE_NAME_CACHE_TYPE
+%type <str> PE_NAME_CACHE_OP_RESULT
+%type <str> PE_MODIFIER_EVENT
+%type <str> PE_MODIFIER_BP
+%type <str> PE_EVENT_NAME
+%type <num> value_sym
+%type <head> event_config
+%type <term> event_term
+%type <head> event_pmu
+%type <head> event_legacy_symbol
+%type <head> event_legacy_cache
+%type <head> event_legacy_mem
+%type <head> event_legacy_tracepoint
+%type <head> event_legacy_numeric
+%type <head> event_legacy_raw
+%type <head> event_def
+%type <head> event_mod
+%type <head> event_name
+%type <head> event
+%type <head> events
+%type <head> group_def
+%type <head> group
+%type <head> groups
+
+%union
+{
+ char *str;
+ u64 num;
+ struct list_head *head;
+ struct parse_events_term *term;
+}
+%%
+
+start:
+PE_START_EVENTS start_events
+|
+PE_START_TERMS start_terms
+
+start_events: groups
+{
+ struct parse_events_evlist *data = _data;
+
+ parse_events_update_lists($1, &data->list);
+}
+
+groups:
+groups ',' group
+{
+ struct list_head *list = $1;
+ struct list_head *group = $3;
+
+ parse_events_update_lists(group, list);
+ $$ = list;
+}
+|
+groups ',' event
+{
+ struct list_head *list = $1;
+ struct list_head *event = $3;
+
+ parse_events_update_lists(event, list);
+ $$ = list;
+}
+|
+group
+|
+event
+
+group:
+group_def ':' PE_MODIFIER_EVENT
+{
+ struct list_head *list = $1;
+
+ ABORT_ON(parse_events__modifier_group(list, $3));
+ $$ = list;
+}
+|
+group_def
+
+group_def:
+PE_NAME '{' events '}'
+{
+ struct list_head *list = $3;
+
+ inc_group_count(list, _data);
+ parse_events__set_leader($1, list);
+ $$ = list;
+}
+|
+'{' events '}'
+{
+ struct list_head *list = $2;
+
+ inc_group_count(list, _data);
+ parse_events__set_leader(NULL, list);
+ $$ = list;
+}
+
+events:
+events ',' event
+{
+ struct list_head *event = $3;
+ struct list_head *list = $1;
+
+ parse_events_update_lists(event, list);
+ $$ = list;
+}
+|
+event
+
+event: event_mod
+
+event_mod:
+event_name PE_MODIFIER_EVENT
+{
+ struct list_head *list = $1;
+
+ /*
+ * Apply modifier on all events added by single event definition
+ * (there could be more events added for multiple tracepoint
+ * definitions via '*?'.
+ */
+ ABORT_ON(parse_events__modifier_event(list, $2, false));
+ $$ = list;
+}
+|
+event_name
+
+event_name:
+PE_EVENT_NAME event_def
+{
+ ABORT_ON(parse_events_name($2, $1));
+ free($1);
+ $$ = $2;
+}
+|
+event_def
+
+event_def: event_pmu |
+ event_legacy_symbol |
+ event_legacy_cache sep_dc |
+ event_legacy_mem |
+ event_legacy_tracepoint sep_dc |
+ event_legacy_numeric sep_dc |
+ event_legacy_raw sep_dc
+
+event_pmu:
+PE_NAME '/' event_config '/'
+{
+ struct parse_events_evlist *data = _data;
+ struct list_head *list;
+
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_pmu(list, &data->idx, $1, $3));
+ parse_events__free_terms($3);
+ $$ = list;
+}
+
+value_sym:
+PE_VALUE_SYM_HW
+|
+PE_VALUE_SYM_SW
+
+event_legacy_symbol:
+value_sym '/' event_config '/'
+{
+ struct parse_events_evlist *data = _data;
+ struct list_head *list;
+ int type = $1 >> 16;
+ int config = $1 & 255;
+
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_numeric(list, &data->idx,
+ type, config, $3));
+ parse_events__free_terms($3);
+ $$ = list;
+}
+|
+value_sym sep_slash_dc
+{
+ struct parse_events_evlist *data = _data;
+ struct list_head *list;
+ int type = $1 >> 16;
+ int config = $1 & 255;
+
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_numeric(list, &data->idx,
+ type, config, NULL));
+ $$ = list;
+}
+
+event_legacy_cache:
+PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT
+{
+ struct parse_events_evlist *data = _data;
+ struct list_head *list;
+
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_cache(list, &data->idx, $1, $3, $5));
+ $$ = list;
+}
+|
+PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT
+{
+ struct parse_events_evlist *data = _data;
+ struct list_head *list;
+
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_cache(list, &data->idx, $1, $3, NULL));
+ $$ = list;
+}
+|
+PE_NAME_CACHE_TYPE
+{
+ struct parse_events_evlist *data = _data;
+ struct list_head *list;
+
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_cache(list, &data->idx, $1, NULL, NULL));
+ $$ = list;
+}
+
+event_legacy_mem:
+PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
+{
+ struct parse_events_evlist *data = _data;
+ struct list_head *list;
+
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
+ (void *) $2, $4));
+ $$ = list;
+}
+|
+PE_PREFIX_MEM PE_VALUE sep_dc
+{
+ struct parse_events_evlist *data = _data;
+ struct list_head *list;
+
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
+ (void *) $2, NULL));
+ $$ = list;
+}
+
+event_legacy_tracepoint:
+PE_NAME '-' PE_NAME ':' PE_NAME
+{
+ struct parse_events_evlist *data = _data;
+ struct list_head *list;
+ char sys_name[128];
+ snprintf(&sys_name, 128, "%s-%s", $1, $3);
+
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_tracepoint(list, &data->idx, &sys_name, $5));
+ $$ = list;
+}
+|
+PE_NAME ':' PE_NAME
+{
+ struct parse_events_evlist *data = _data;
+ struct list_head *list;
+
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_tracepoint(list, &data->idx, $1, $3));
+ $$ = list;
+}
+
+event_legacy_numeric:
+PE_VALUE ':' PE_VALUE
+{
+ struct parse_events_evlist *data = _data;
+ struct list_head *list;
+
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_numeric(list, &data->idx, (u32)$1, $3, NULL));
+ $$ = list;
+}
+
+event_legacy_raw:
+PE_RAW
+{
+ struct parse_events_evlist *data = _data;
+ struct list_head *list;
+
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_numeric(list, &data->idx,
+ PERF_TYPE_RAW, $1, NULL));
+ $$ = list;
+}
+
+start_terms: event_config
+{
+ struct parse_events_terms *data = _data;
+ data->terms = $1;
+}
+
+event_config:
+event_config ',' event_term
+{
+ struct list_head *head = $1;
+ struct parse_events_term *term = $3;
+
+ ABORT_ON(!head);
+ list_add_tail(&term->list, head);
+ $$ = $1;
+}
+|
+event_term
+{
+ struct list_head *head = malloc(sizeof(*head));
+ struct parse_events_term *term = $1;
+
+ ABORT_ON(!head);
+ INIT_LIST_HEAD(head);
+ list_add_tail(&term->list, head);
+ $$ = head;
+}
+
+event_term:
+PE_NAME '=' PE_NAME
+{
+ struct parse_events_term *term;
+
+ ABORT_ON(parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
+ $1, $3));
+ $$ = term;
+}
+|
+PE_NAME '=' PE_VALUE
+{
+ struct parse_events_term *term;
+
+ ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
+ $1, $3));
+ $$ = term;
+}
+|
+PE_NAME '=' PE_VALUE_SYM_HW
+{
+ struct parse_events_term *term;
+ int config = $3 & 255;
+
+ ABORT_ON(parse_events_term__sym_hw(&term, $1, config));
+ $$ = term;
+}
+|
+PE_NAME
+{
+ struct parse_events_term *term;
+
+ ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
+ $1, 1));
+ $$ = term;
+}
+|
+PE_VALUE_SYM_HW
+{
+ struct parse_events_term *term;
+ int config = $1 & 255;
+
+ ABORT_ON(parse_events_term__sym_hw(&term, NULL, config));
+ $$ = term;
+}
+|
+PE_TERM '=' PE_NAME
+{
+ struct parse_events_term *term;
+
+ ABORT_ON(parse_events_term__str(&term, (int)$1, NULL, $3));
+ $$ = term;
+}
+|
+PE_TERM '=' PE_VALUE
+{
+ struct parse_events_term *term;
+
+ ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, $3));
+ $$ = term;
+}
+|
+PE_TERM
+{
+ struct parse_events_term *term;
+
+ ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, 1));
+ $$ = term;
+}
+
+sep_dc: ':' |
+
+sep_slash_dc: '/' | ':' |
+
+%%
+
+void parse_events_error(void *data __maybe_unused, void *scanner __maybe_unused,
+ char const *msg __maybe_unused)
+{
+}
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index 99d02aa57db..bf48092983c 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -1,6 +1,7 @@
#include "util.h"
#include "parse-options.h"
#include "cache.h"
+#include "header.h"
#define OPT_SHORT 1
#define OPT_UNSET 2
@@ -77,6 +78,8 @@ static int get_value(struct parse_opt_ctx_t *p,
case OPTION_BOOLEAN:
*(bool *)opt->value = unset ? false : true;
+ if (opt->set)
+ *(bool *)opt->set = true;
return 0;
case OPTION_INCR:
@@ -223,6 +226,24 @@ static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg,
return 0;
}
if (!rest) {
+ if (!prefixcmp(options->long_name, "no-")) {
+ /*
+ * The long name itself starts with "no-", so
+ * accept the option without "no-" so that users
+ * do not have to enter "no-no-" to get the
+ * negation.
+ */
+ rest = skip_prefix(arg, options->long_name + 3);
+ if (rest) {
+ flags |= OPT_UNSET;
+ goto match;
+ }
+ /* Abbreviated case */
+ if (!prefixcmp(options->long_name + 3, arg)) {
+ flags |= OPT_UNSET;
+ goto is_abbreviated;
+ }
+ }
/* abbreviated? */
if (!strncmp(options->long_name, arg, arg_end - arg)) {
is_abbreviated:
@@ -258,6 +279,7 @@ is_abbreviated:
if (!rest)
continue;
}
+match:
if (*rest) {
if (*rest != '=')
continue;
@@ -338,10 +360,10 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
if (arg[1] != '-') {
ctx->opt = arg + 1;
if (internal_help && *ctx->opt == 'h')
- return parse_options_usage(usagestr, options);
+ return usage_with_options_internal(usagestr, options, 0);
switch (parse_short_opt(ctx, options)) {
case -1:
- return parse_options_usage(usagestr, options);
+ return parse_options_usage(usagestr, options, arg + 1, 1);
case -2:
goto unknown;
default:
@@ -351,10 +373,11 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
check_typos(arg + 1, options);
while (ctx->opt) {
if (internal_help && *ctx->opt == 'h')
- return parse_options_usage(usagestr, options);
+ return usage_with_options_internal(usagestr, options, 0);
+ arg = ctx->opt;
switch (parse_short_opt(ctx, options)) {
case -1:
- return parse_options_usage(usagestr, options);
+ return parse_options_usage(usagestr, options, arg, 1);
case -2:
/* fake a short option thing to hide the fact that we may have
* started to parse aggregated stuff
@@ -382,10 +405,14 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
if (internal_help && !strcmp(arg + 2, "help-all"))
return usage_with_options_internal(usagestr, options, 1);
if (internal_help && !strcmp(arg + 2, "help"))
- return parse_options_usage(usagestr, options);
+ return usage_with_options_internal(usagestr, options, 0);
+ if (!strcmp(arg + 2, "list-opts"))
+ return PARSE_OPT_LIST_OPTS;
+ if (!strcmp(arg + 2, "list-cmds"))
+ return PARSE_OPT_LIST_SUBCMDS;
switch (parse_long_opt(ctx, arg + 2, options)) {
case -1:
- return parse_options_usage(usagestr, options);
+ return parse_options_usage(usagestr, options, arg + 2, 0);
case -2:
goto unknown;
default:
@@ -408,17 +435,45 @@ int parse_options_end(struct parse_opt_ctx_t *ctx)
return ctx->cpidx + ctx->argc;
}
-int parse_options(int argc, const char **argv, const struct option *options,
- const char * const usagestr[], int flags)
+int parse_options_subcommand(int argc, const char **argv, const struct option *options,
+ const char *const subcommands[], const char *usagestr[], int flags)
{
struct parse_opt_ctx_t ctx;
+ perf_header__set_cmdline(argc, argv);
+
+ /* build usage string if it's not provided */
+ if (subcommands && !usagestr[0]) {
+ struct strbuf buf = STRBUF_INIT;
+
+ strbuf_addf(&buf, "perf %s [<options>] {", argv[0]);
+ for (int i = 0; subcommands[i]; i++) {
+ if (i)
+ strbuf_addstr(&buf, "|");
+ strbuf_addstr(&buf, subcommands[i]);
+ }
+ strbuf_addstr(&buf, "}");
+
+ usagestr[0] = strdup(buf.buf);
+ strbuf_release(&buf);
+ }
+
parse_options_start(&ctx, argc, argv, flags);
switch (parse_options_step(&ctx, options, usagestr)) {
case PARSE_OPT_HELP:
exit(129);
case PARSE_OPT_DONE:
break;
+ case PARSE_OPT_LIST_OPTS:
+ while (options->type != OPTION_END) {
+ printf("--%s ", options->long_name);
+ options++;
+ }
+ exit(130);
+ case PARSE_OPT_LIST_SUBCMDS:
+ for (int i = 0; subcommands[i]; i++)
+ printf("%s ", subcommands[i]);
+ exit(130);
default: /* PARSE_OPT_UNKNOWN */
if (ctx.argv[0][1] == '-') {
error("unknown option `%s'", ctx.argv[0] + 2);
@@ -431,9 +486,99 @@ int parse_options(int argc, const char **argv, const struct option *options,
return parse_options_end(&ctx);
}
+int parse_options(int argc, const char **argv, const struct option *options,
+ const char * const usagestr[], int flags)
+{
+ return parse_options_subcommand(argc, argv, options, NULL,
+ (const char **) usagestr, flags);
+}
+
#define USAGE_OPTS_WIDTH 24
#define USAGE_GAP 2
+static void print_option_help(const struct option *opts, int full)
+{
+ size_t pos;
+ int pad;
+
+ if (opts->type == OPTION_GROUP) {
+ fputc('\n', stderr);
+ if (*opts->help)
+ fprintf(stderr, "%s\n", opts->help);
+ return;
+ }
+ if (!full && (opts->flags & PARSE_OPT_HIDDEN))
+ return;
+
+ pos = fprintf(stderr, " ");
+ if (opts->short_name)
+ pos += fprintf(stderr, "-%c", opts->short_name);
+ else
+ pos += fprintf(stderr, " ");
+
+ if (opts->long_name && opts->short_name)
+ pos += fprintf(stderr, ", ");
+ if (opts->long_name)
+ pos += fprintf(stderr, "--%s", opts->long_name);
+
+ switch (opts->type) {
+ case OPTION_ARGUMENT:
+ break;
+ case OPTION_LONG:
+ case OPTION_U64:
+ case OPTION_INTEGER:
+ case OPTION_UINTEGER:
+ if (opts->flags & PARSE_OPT_OPTARG)
+ if (opts->long_name)
+ pos += fprintf(stderr, "[=<n>]");
+ else
+ pos += fprintf(stderr, "[<n>]");
+ else
+ pos += fprintf(stderr, " <n>");
+ break;
+ case OPTION_CALLBACK:
+ if (opts->flags & PARSE_OPT_NOARG)
+ break;
+ /* FALLTHROUGH */
+ case OPTION_STRING:
+ if (opts->argh) {
+ if (opts->flags & PARSE_OPT_OPTARG)
+ if (opts->long_name)
+ pos += fprintf(stderr, "[=<%s>]", opts->argh);
+ else
+ pos += fprintf(stderr, "[<%s>]", opts->argh);
+ else
+ pos += fprintf(stderr, " <%s>", opts->argh);
+ } else {
+ if (opts->flags & PARSE_OPT_OPTARG)
+ if (opts->long_name)
+ pos += fprintf(stderr, "[=...]");
+ else
+ pos += fprintf(stderr, "[...]");
+ else
+ pos += fprintf(stderr, " ...");
+ }
+ break;
+ default: /* OPTION_{BIT,BOOLEAN,SET_UINT,SET_PTR} */
+ case OPTION_END:
+ case OPTION_GROUP:
+ case OPTION_BIT:
+ case OPTION_BOOLEAN:
+ case OPTION_INCR:
+ case OPTION_SET_UINT:
+ case OPTION_SET_PTR:
+ break;
+ }
+
+ if (pos <= USAGE_OPTS_WIDTH)
+ pad = USAGE_OPTS_WIDTH - pos;
+ else {
+ fputc('\n', stderr);
+ pad = USAGE_OPTS_WIDTH;
+ }
+ fprintf(stderr, "%*s%s\n", pad + USAGE_GAP, "", opts->help);
+}
+
int usage_with_options_internal(const char * const *usagestr,
const struct option *opts, int full)
{
@@ -453,87 +598,9 @@ int usage_with_options_internal(const char * const *usagestr,
if (opts->type != OPTION_GROUP)
fputc('\n', stderr);
- for (; opts->type != OPTION_END; opts++) {
- size_t pos;
- int pad;
+ for ( ; opts->type != OPTION_END; opts++)
+ print_option_help(opts, full);
- if (opts->type == OPTION_GROUP) {
- fputc('\n', stderr);
- if (*opts->help)
- fprintf(stderr, "%s\n", opts->help);
- continue;
- }
- if (!full && (opts->flags & PARSE_OPT_HIDDEN))
- continue;
-
- pos = fprintf(stderr, " ");
- if (opts->short_name)
- pos += fprintf(stderr, "-%c", opts->short_name);
- else
- pos += fprintf(stderr, " ");
-
- if (opts->long_name && opts->short_name)
- pos += fprintf(stderr, ", ");
- if (opts->long_name)
- pos += fprintf(stderr, "--%s", opts->long_name);
-
- switch (opts->type) {
- case OPTION_ARGUMENT:
- break;
- case OPTION_LONG:
- case OPTION_U64:
- case OPTION_INTEGER:
- case OPTION_UINTEGER:
- if (opts->flags & PARSE_OPT_OPTARG)
- if (opts->long_name)
- pos += fprintf(stderr, "[=<n>]");
- else
- pos += fprintf(stderr, "[<n>]");
- else
- pos += fprintf(stderr, " <n>");
- break;
- case OPTION_CALLBACK:
- if (opts->flags & PARSE_OPT_NOARG)
- break;
- /* FALLTHROUGH */
- case OPTION_STRING:
- if (opts->argh) {
- if (opts->flags & PARSE_OPT_OPTARG)
- if (opts->long_name)
- pos += fprintf(stderr, "[=<%s>]", opts->argh);
- else
- pos += fprintf(stderr, "[<%s>]", opts->argh);
- else
- pos += fprintf(stderr, " <%s>", opts->argh);
- } else {
- if (opts->flags & PARSE_OPT_OPTARG)
- if (opts->long_name)
- pos += fprintf(stderr, "[=...]");
- else
- pos += fprintf(stderr, "[...]");
- else
- pos += fprintf(stderr, " ...");
- }
- break;
- default: /* OPTION_{BIT,BOOLEAN,SET_UINT,SET_PTR} */
- case OPTION_END:
- case OPTION_GROUP:
- case OPTION_BIT:
- case OPTION_BOOLEAN:
- case OPTION_INCR:
- case OPTION_SET_UINT:
- case OPTION_SET_PTR:
- break;
- }
-
- if (pos <= USAGE_OPTS_WIDTH)
- pad = USAGE_OPTS_WIDTH - pos;
- else {
- fputc('\n', stderr);
- pad = USAGE_OPTS_WIDTH;
- }
- fprintf(stderr, "%*s%s\n", pad + USAGE_GAP, "", opts->help);
- }
fputc('\n', stderr);
return PARSE_OPT_HELP;
@@ -548,13 +615,50 @@ void usage_with_options(const char * const *usagestr,
}
int parse_options_usage(const char * const *usagestr,
- const struct option *opts)
+ const struct option *opts,
+ const char *optstr, bool short_opt)
{
- return usage_with_options_internal(usagestr, opts, 0);
+ if (!usagestr)
+ goto opt;
+
+ fprintf(stderr, "\n usage: %s\n", *usagestr++);
+ while (*usagestr && **usagestr)
+ fprintf(stderr, " or: %s\n", *usagestr++);
+ while (*usagestr) {
+ fprintf(stderr, "%s%s\n",
+ **usagestr ? " " : "",
+ *usagestr);
+ usagestr++;
+ }
+ fputc('\n', stderr);
+
+opt:
+ for ( ; opts->type != OPTION_END; opts++) {
+ if (short_opt) {
+ if (opts->short_name == *optstr)
+ break;
+ continue;
+ }
+
+ if (opts->long_name == NULL)
+ continue;
+
+ if (!prefixcmp(optstr, opts->long_name))
+ break;
+ if (!prefixcmp(optstr, "no-") &&
+ !prefixcmp(optstr + 3, opts->long_name))
+ break;
+ }
+
+ if (opts->type != OPTION_END)
+ print_option_help(opts, 0);
+
+ return PARSE_OPT_HELP;
}
-int parse_opt_verbosity_cb(const struct option *opt, const char *arg __used,
+int parse_opt_verbosity_cb(const struct option *opt,
+ const char *arg __maybe_unused,
int unset)
{
int *target = opt->value;
diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h
index c7d72dce54b..d8dac8ac5f3 100644
--- a/tools/perf/util/parse-options.h
+++ b/tools/perf/util/parse-options.h
@@ -82,6 +82,9 @@ typedef int parse_opt_cb(const struct option *, const char *arg, int unset);
* OPTION_{BIT,SET_UINT,SET_PTR} store the {mask,integer,pointer} to put in
* the value when met.
* CALLBACKS can use it like they want.
+ *
+ * `set`::
+ * whether an option was set by the user
*/
struct option {
enum parse_opt_type type;
@@ -94,6 +97,7 @@ struct option {
int flags;
parse_opt_cb *callback;
intptr_t defval;
+ bool *set;
};
#define check_vtype(v, type) ( BUILD_BUG_ON_ZERO(!__builtin_types_compatible_p(typeof(v), type)) + v )
@@ -103,6 +107,10 @@ struct option {
#define OPT_GROUP(h) { .type = OPTION_GROUP, .help = (h) }
#define OPT_BIT(s, l, v, h, b) { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h), .defval = (b) }
#define OPT_BOOLEAN(s, l, v, h) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = check_vtype(v, bool *), .help = (h) }
+#define OPT_BOOLEAN_SET(s, l, v, os, h) \
+ { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), \
+ .value = check_vtype(v, bool *), .help = (h), \
+ .set = check_vtype(os, bool *)}
#define OPT_INCR(s, l, v, h) { .type = OPTION_INCR, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h) }
#define OPT_SET_UINT(s, l, v, h, i) { .type = OPTION_SET_UINT, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned int *), .help = (h), .defval = (i) }
#define OPT_SET_PTR(s, l, v, h, p) { .type = OPTION_SET_PTR, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (p) }
@@ -119,6 +127,10 @@ struct option {
{ .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .flags = PARSE_OPT_NOARG }
#define OPT_CALLBACK_DEFAULT(s, l, v, a, h, f, d) \
{ .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d, .flags = PARSE_OPT_LASTARG_DEFAULT }
+#define OPT_CALLBACK_DEFAULT_NOOPT(s, l, v, a, h, f, d) \
+ { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l),\
+ .value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d,\
+ .flags = PARSE_OPT_LASTARG_DEFAULT | PARSE_OPT_NOARG}
/* parse_options() will filter out the processed options and leave the
* non-option argments in argv[].
@@ -128,6 +140,11 @@ extern int parse_options(int argc, const char **argv,
const struct option *options,
const char * const usagestr[], int flags);
+extern int parse_options_subcommand(int argc, const char **argv,
+ const struct option *options,
+ const char *const subcommands[],
+ const char *usagestr[], int flags);
+
extern NORETURN void usage_with_options(const char * const *usagestr,
const struct option *options);
@@ -136,6 +153,8 @@ extern NORETURN void usage_with_options(const char * const *usagestr,
enum {
PARSE_OPT_HELP = -1,
PARSE_OPT_DONE,
+ PARSE_OPT_LIST_OPTS,
+ PARSE_OPT_LIST_SUBCMDS,
PARSE_OPT_UNKNOWN,
};
@@ -153,7 +172,9 @@ struct parse_opt_ctx_t {
};
extern int parse_options_usage(const char * const *usagestr,
- const struct option *opts);
+ const struct option *opts,
+ const char *optstr,
+ bool short_opt);
extern void parse_options_start(struct parse_opt_ctx_t *ctx,
int argc, const char **argv, int flags);
diff --git a/tools/perf/util/path.c b/tools/perf/util/path.c
index bd749771142..5d13cb45b31 100644
--- a/tools/perf/util/path.c
+++ b/tools/perf/util/path.c
@@ -22,19 +22,23 @@ static const char *get_perf_dir(void)
return ".";
}
-#ifdef NO_STRLCPY
-size_t strlcpy(char *dest, const char *src, size_t size)
+/*
+ * If libc has strlcpy() then that version will override this
+ * implementation:
+ */
+size_t __weak strlcpy(char *dest, const char *src, size_t size)
{
size_t ret = strlen(src);
if (size) {
size_t len = (ret >= size) ? size - 1 : ret;
+
memcpy(dest, src, len);
dest[len] = '\0';
}
+
return ret;
}
-#endif
static char *get_pathname(void)
{
diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c
new file mode 100644
index 00000000000..43168fb0d9a
--- /dev/null
+++ b/tools/perf/util/perf_regs.c
@@ -0,0 +1,27 @@
+#include <errno.h>
+#include "perf_regs.h"
+#include "event.h"
+
+int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
+{
+ int i, idx = 0;
+ u64 mask = regs->mask;
+
+ if (regs->cache_mask & (1 << id))
+ goto out;
+
+ if (!(mask & (1 << id)))
+ return -EINVAL;
+
+ for (i = 0; i < id; i++) {
+ if (mask & (1 << i))
+ idx++;
+ }
+
+ regs->cache_mask |= (1 << id);
+ regs->cache_regs[id] = regs->regs[idx];
+
+out:
+ *valp = regs->cache_regs[id];
+ return 0;
+}
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h
new file mode 100644
index 00000000000..980dbf76bc9
--- /dev/null
+++ b/tools/perf/util/perf_regs.h
@@ -0,0 +1,29 @@
+#ifndef __PERF_REGS_H
+#define __PERF_REGS_H
+
+#include <linux/types.h>
+
+struct regs_dump;
+
+#ifdef HAVE_PERF_REGS_SUPPORT
+#include <perf_regs.h>
+
+int perf_reg_value(u64 *valp, struct regs_dump *regs, int id);
+
+#else
+#define PERF_REGS_MASK 0
+#define PERF_REGS_MAX 0
+
+static inline const char *perf_reg_name(int id __maybe_unused)
+{
+ return NULL;
+}
+
+static inline int perf_reg_value(u64 *valp __maybe_unused,
+ struct regs_dump *regs __maybe_unused,
+ int id __maybe_unused)
+{
+ return 0;
+}
+#endif /* HAVE_PERF_REGS_SUPPORT */
+#endif /* __PERF_REGS_H */
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
new file mode 100644
index 00000000000..7a811eb61f7
--- /dev/null
+++ b/tools/perf/util/pmu.c
@@ -0,0 +1,796 @@
+#include <linux/list.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <dirent.h>
+#include <api/fs/fs.h>
+#include <locale.h>
+#include "util.h"
+#include "pmu.h"
+#include "parse-events.h"
+#include "cpumap.h"
+
+#define UNIT_MAX_LEN 31 /* max length for event unit name */
+
+struct perf_pmu_alias {
+ char *name;
+ struct list_head terms;
+ struct list_head list;
+ char unit[UNIT_MAX_LEN+1];
+ double scale;
+};
+
+struct perf_pmu_format {
+ char *name;
+ int value;
+ DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
+ struct list_head list;
+};
+
+#define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/"
+
+int perf_pmu_parse(struct list_head *list, char *name);
+extern FILE *perf_pmu_in;
+
+static LIST_HEAD(pmus);
+
+/*
+ * Parse & process all the sysfs attributes located under
+ * the directory specified in 'dir' parameter.
+ */
+int perf_pmu__format_parse(char *dir, struct list_head *head)
+{
+ struct dirent *evt_ent;
+ DIR *format_dir;
+ int ret = 0;
+
+ format_dir = opendir(dir);
+ if (!format_dir)
+ return -EINVAL;
+
+ while (!ret && (evt_ent = readdir(format_dir))) {
+ char path[PATH_MAX];
+ char *name = evt_ent->d_name;
+ FILE *file;
+
+ if (!strcmp(name, ".") || !strcmp(name, ".."))
+ continue;
+
+ snprintf(path, PATH_MAX, "%s/%s", dir, name);
+
+ ret = -EINVAL;
+ file = fopen(path, "r");
+ if (!file)
+ break;
+
+ perf_pmu_in = file;
+ ret = perf_pmu_parse(head, name);
+ fclose(file);
+ }
+
+ closedir(format_dir);
+ return ret;
+}
+
+/*
+ * Reading/parsing the default pmu format definition, which should be
+ * located at:
+ * /sys/bus/event_source/devices/<dev>/format as sysfs group attributes.
+ */
+static int pmu_format(const char *name, struct list_head *format)
+{
+ struct stat st;
+ char path[PATH_MAX];
+ const char *sysfs = sysfs__mountpoint();
+
+ if (!sysfs)
+ return -1;
+
+ snprintf(path, PATH_MAX,
+ "%s" EVENT_SOURCE_DEVICE_PATH "%s/format", sysfs, name);
+
+ if (stat(path, &st) < 0)
+ return 0; /* no error if format does not exist */
+
+ if (perf_pmu__format_parse(path, format))
+ return -1;
+
+ return 0;
+}
+
+static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char *name)
+{
+ struct stat st;
+ ssize_t sret;
+ char scale[128];
+ int fd, ret = -1;
+ char path[PATH_MAX];
+ const char *lc;
+
+ snprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
+
+ fd = open(path, O_RDONLY);
+ if (fd == -1)
+ return -1;
+
+ if (fstat(fd, &st) < 0)
+ goto error;
+
+ sret = read(fd, scale, sizeof(scale)-1);
+ if (sret < 0)
+ goto error;
+
+ scale[sret] = '\0';
+ /*
+ * save current locale
+ */
+ lc = setlocale(LC_NUMERIC, NULL);
+
+ /*
+ * force to C locale to ensure kernel
+ * scale string is converted correctly.
+ * kernel uses default C locale.
+ */
+ setlocale(LC_NUMERIC, "C");
+
+ alias->scale = strtod(scale, NULL);
+
+ /* restore locale */
+ setlocale(LC_NUMERIC, lc);
+
+ ret = 0;
+error:
+ close(fd);
+ return ret;
+}
+
+static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *name)
+{
+ char path[PATH_MAX];
+ ssize_t sret;
+ int fd;
+
+ snprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
+
+ fd = open(path, O_RDONLY);
+ if (fd == -1)
+ return -1;
+
+ sret = read(fd, alias->unit, UNIT_MAX_LEN);
+ if (sret < 0)
+ goto error;
+
+ close(fd);
+
+ alias->unit[sret] = '\0';
+
+ return 0;
+error:
+ close(fd);
+ alias->unit[0] = '\0';
+ return -1;
+}
+
+static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FILE *file)
+{
+ struct perf_pmu_alias *alias;
+ char buf[256];
+ int ret;
+
+ ret = fread(buf, 1, sizeof(buf), file);
+ if (ret == 0)
+ return -EINVAL;
+ buf[ret] = 0;
+
+ alias = malloc(sizeof(*alias));
+ if (!alias)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&alias->terms);
+ alias->scale = 1.0;
+ alias->unit[0] = '\0';
+
+ ret = parse_events_terms(&alias->terms, buf);
+ if (ret) {
+ free(alias);
+ return ret;
+ }
+
+ alias->name = strdup(name);
+ /*
+ * load unit name and scale if available
+ */
+ perf_pmu__parse_unit(alias, dir, name);
+ perf_pmu__parse_scale(alias, dir, name);
+
+ list_add_tail(&alias->list, list);
+
+ return 0;
+}
+
+/*
+ * Process all the sysfs attributes located under the directory
+ * specified in 'dir' parameter.
+ */
+static int pmu_aliases_parse(char *dir, struct list_head *head)
+{
+ struct dirent *evt_ent;
+ DIR *event_dir;
+ size_t len;
+ int ret = 0;
+
+ event_dir = opendir(dir);
+ if (!event_dir)
+ return -EINVAL;
+
+ while (!ret && (evt_ent = readdir(event_dir))) {
+ char path[PATH_MAX];
+ char *name = evt_ent->d_name;
+ FILE *file;
+
+ if (!strcmp(name, ".") || !strcmp(name, ".."))
+ continue;
+
+ /*
+ * skip .unit and .scale info files
+ * parsed in perf_pmu__new_alias()
+ */
+ len = strlen(name);
+ if (len > 5 && !strcmp(name + len - 5, ".unit"))
+ continue;
+ if (len > 6 && !strcmp(name + len - 6, ".scale"))
+ continue;
+
+ snprintf(path, PATH_MAX, "%s/%s", dir, name);
+
+ ret = -EINVAL;
+ file = fopen(path, "r");
+ if (!file)
+ break;
+
+ ret = perf_pmu__new_alias(head, dir, name, file);
+ fclose(file);
+ }
+
+ closedir(event_dir);
+ return ret;
+}
+
+/*
+ * Reading the pmu event aliases definition, which should be located at:
+ * /sys/bus/event_source/devices/<dev>/events as sysfs group attributes.
+ */
+static int pmu_aliases(const char *name, struct list_head *head)
+{
+ struct stat st;
+ char path[PATH_MAX];
+ const char *sysfs = sysfs__mountpoint();
+
+ if (!sysfs)
+ return -1;
+
+ snprintf(path, PATH_MAX,
+ "%s/bus/event_source/devices/%s/events", sysfs, name);
+
+ if (stat(path, &st) < 0)
+ return 0; /* no error if 'events' does not exist */
+
+ if (pmu_aliases_parse(path, head))
+ return -1;
+
+ return 0;
+}
+
+static int pmu_alias_terms(struct perf_pmu_alias *alias,
+ struct list_head *terms)
+{
+ struct parse_events_term *term, *cloned;
+ LIST_HEAD(list);
+ int ret;
+
+ list_for_each_entry(term, &alias->terms, list) {
+ ret = parse_events_term__clone(&cloned, term);
+ if (ret) {
+ parse_events__free_terms(&list);
+ return ret;
+ }
+ list_add_tail(&cloned->list, &list);
+ }
+ list_splice(&list, terms);
+ return 0;
+}
+
+/*
+ * Reading/parsing the default pmu type value, which should be
+ * located at:
+ * /sys/bus/event_source/devices/<dev>/type as sysfs attribute.
+ */
+static int pmu_type(const char *name, __u32 *type)
+{
+ struct stat st;
+ char path[PATH_MAX];
+ FILE *file;
+ int ret = 0;
+ const char *sysfs = sysfs__mountpoint();
+
+ if (!sysfs)
+ return -1;
+
+ snprintf(path, PATH_MAX,
+ "%s" EVENT_SOURCE_DEVICE_PATH "%s/type", sysfs, name);
+
+ if (stat(path, &st) < 0)
+ return -1;
+
+ file = fopen(path, "r");
+ if (!file)
+ return -EINVAL;
+
+ if (1 != fscanf(file, "%u", type))
+ ret = -1;
+
+ fclose(file);
+ return ret;
+}
+
+/* Add all pmus in sysfs to pmu list: */
+static void pmu_read_sysfs(void)
+{
+ char path[PATH_MAX];
+ DIR *dir;
+ struct dirent *dent;
+ const char *sysfs = sysfs__mountpoint();
+
+ if (!sysfs)
+ return;
+
+ snprintf(path, PATH_MAX,
+ "%s" EVENT_SOURCE_DEVICE_PATH, sysfs);
+
+ dir = opendir(path);
+ if (!dir)
+ return;
+
+ while ((dent = readdir(dir))) {
+ if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
+ continue;
+ /* add to static LIST_HEAD(pmus): */
+ perf_pmu__find(dent->d_name);
+ }
+
+ closedir(dir);
+}
+
+static struct cpu_map *pmu_cpumask(const char *name)
+{
+ struct stat st;
+ char path[PATH_MAX];
+ FILE *file;
+ struct cpu_map *cpus;
+ const char *sysfs = sysfs__mountpoint();
+
+ if (!sysfs)
+ return NULL;
+
+ snprintf(path, PATH_MAX,
+ "%s/bus/event_source/devices/%s/cpumask", sysfs, name);
+
+ if (stat(path, &st) < 0)
+ return NULL;
+
+ file = fopen(path, "r");
+ if (!file)
+ return NULL;
+
+ cpus = cpu_map__read(file);
+ fclose(file);
+ return cpus;
+}
+
+static struct perf_pmu *pmu_lookup(const char *name)
+{
+ struct perf_pmu *pmu;
+ LIST_HEAD(format);
+ LIST_HEAD(aliases);
+ __u32 type;
+
+ /*
+ * The pmu data we store & need consists of the pmu
+ * type value and format definitions. Load both right
+ * now.
+ */
+ if (pmu_format(name, &format))
+ return NULL;
+
+ if (pmu_aliases(name, &aliases))
+ return NULL;
+
+ if (pmu_type(name, &type))
+ return NULL;
+
+ pmu = zalloc(sizeof(*pmu));
+ if (!pmu)
+ return NULL;
+
+ pmu->cpus = pmu_cpumask(name);
+
+ INIT_LIST_HEAD(&pmu->format);
+ INIT_LIST_HEAD(&pmu->aliases);
+ list_splice(&format, &pmu->format);
+ list_splice(&aliases, &pmu->aliases);
+ pmu->name = strdup(name);
+ pmu->type = type;
+ list_add_tail(&pmu->list, &pmus);
+ return pmu;
+}
+
+static struct perf_pmu *pmu_find(const char *name)
+{
+ struct perf_pmu *pmu;
+
+ list_for_each_entry(pmu, &pmus, list)
+ if (!strcmp(pmu->name, name))
+ return pmu;
+
+ return NULL;
+}
+
+struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu)
+{
+ /*
+ * pmu iterator: If pmu is NULL, we start at the begin,
+ * otherwise return the next pmu. Returns NULL on end.
+ */
+ if (!pmu) {
+ pmu_read_sysfs();
+ pmu = list_prepare_entry(pmu, &pmus, list);
+ }
+ list_for_each_entry_continue(pmu, &pmus, list)
+ return pmu;
+ return NULL;
+}
+
+struct perf_pmu *perf_pmu__find(const char *name)
+{
+ struct perf_pmu *pmu;
+
+ /*
+ * Once PMU is loaded it stays in the list,
+ * so we keep us from multiple reading/parsing
+ * the pmu format definitions.
+ */
+ pmu = pmu_find(name);
+ if (pmu)
+ return pmu;
+
+ return pmu_lookup(name);
+}
+
+static struct perf_pmu_format *
+pmu_find_format(struct list_head *formats, char *name)
+{
+ struct perf_pmu_format *format;
+
+ list_for_each_entry(format, formats, list)
+ if (!strcmp(format->name, name))
+ return format;
+
+ return NULL;
+}
+
+/*
+ * Returns value based on the format definition (format parameter)
+ * and unformated value (value parameter).
+ *
+ * TODO maybe optimize a little ;)
+ */
+static __u64 pmu_format_value(unsigned long *format, __u64 value)
+{
+ unsigned long fbit, vbit;
+ __u64 v = 0;
+
+ for (fbit = 0, vbit = 0; fbit < PERF_PMU_FORMAT_BITS; fbit++) {
+
+ if (!test_bit(fbit, format))
+ continue;
+
+ if (!(value & (1llu << vbit++)))
+ continue;
+
+ v |= (1llu << fbit);
+ }
+
+ return v;
+}
+
+/*
+ * Setup one of config[12] attr members based on the
+ * user input data - term parameter.
+ */
+static int pmu_config_term(struct list_head *formats,
+ struct perf_event_attr *attr,
+ struct parse_events_term *term)
+{
+ struct perf_pmu_format *format;
+ __u64 *vp;
+
+ /*
+ * Support only for hardcoded and numnerial terms.
+ * Hardcoded terms should be already in, so nothing
+ * to be done for them.
+ */
+ if (parse_events__is_hardcoded_term(term))
+ return 0;
+
+ if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
+ return -EINVAL;
+
+ format = pmu_find_format(formats, term->config);
+ if (!format)
+ return -EINVAL;
+
+ switch (format->value) {
+ case PERF_PMU_FORMAT_VALUE_CONFIG:
+ vp = &attr->config;
+ break;
+ case PERF_PMU_FORMAT_VALUE_CONFIG1:
+ vp = &attr->config1;
+ break;
+ case PERF_PMU_FORMAT_VALUE_CONFIG2:
+ vp = &attr->config2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * XXX If we ever decide to go with string values for
+ * non-hardcoded terms, here's the place to translate
+ * them into value.
+ */
+ *vp |= pmu_format_value(format->bits, term->val.num);
+ return 0;
+}
+
+int perf_pmu__config_terms(struct list_head *formats,
+ struct perf_event_attr *attr,
+ struct list_head *head_terms)
+{
+ struct parse_events_term *term;
+
+ list_for_each_entry(term, head_terms, list)
+ if (pmu_config_term(formats, attr, term))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Configures event's 'attr' parameter based on the:
+ * 1) users input - specified in terms parameter
+ * 2) pmu format definitions - specified by pmu parameter
+ */
+int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
+ struct list_head *head_terms)
+{
+ attr->type = pmu->type;
+ return perf_pmu__config_terms(&pmu->format, attr, head_terms);
+}
+
+static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu,
+ struct parse_events_term *term)
+{
+ struct perf_pmu_alias *alias;
+ char *name;
+
+ if (parse_events__is_hardcoded_term(term))
+ return NULL;
+
+ if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
+ if (term->val.num != 1)
+ return NULL;
+ if (pmu_find_format(&pmu->format, term->config))
+ return NULL;
+ name = term->config;
+ } else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
+ if (strcasecmp(term->config, "event"))
+ return NULL;
+ name = term->val.str;
+ } else {
+ return NULL;
+ }
+
+ list_for_each_entry(alias, &pmu->aliases, list) {
+ if (!strcasecmp(alias->name, name))
+ return alias;
+ }
+ return NULL;
+}
+
+
+static int check_unit_scale(struct perf_pmu_alias *alias,
+ const char **unit, double *scale)
+{
+ /*
+ * Only one term in event definition can
+ * define unit and scale, fail if there's
+ * more than one.
+ */
+ if ((*unit && alias->unit) ||
+ (*scale && alias->scale))
+ return -EINVAL;
+
+ if (alias->unit)
+ *unit = alias->unit;
+
+ if (alias->scale)
+ *scale = alias->scale;
+
+ return 0;
+}
+
+/*
+ * Find alias in the terms list and replace it with the terms
+ * defined for the alias
+ */
+int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
+ const char **unit, double *scale)
+{
+ struct parse_events_term *term, *h;
+ struct perf_pmu_alias *alias;
+ int ret;
+
+ /*
+ * Mark unit and scale as not set
+ * (different from default values, see below)
+ */
+ *unit = NULL;
+ *scale = 0.0;
+
+ list_for_each_entry_safe(term, h, head_terms, list) {
+ alias = pmu_find_alias(pmu, term);
+ if (!alias)
+ continue;
+ ret = pmu_alias_terms(alias, &term->list);
+ if (ret)
+ return ret;
+
+ ret = check_unit_scale(alias, unit, scale);
+ if (ret)
+ return ret;
+
+ list_del(&term->list);
+ free(term);
+ }
+
+ /*
+ * if no unit or scale foundin aliases, then
+ * set defaults as for evsel
+ * unit cannot left to NULL
+ */
+ if (*unit == NULL)
+ *unit = "";
+
+ if (*scale == 0.0)
+ *scale = 1.0;
+
+ return 0;
+}
+
+int perf_pmu__new_format(struct list_head *list, char *name,
+ int config, unsigned long *bits)
+{
+ struct perf_pmu_format *format;
+
+ format = zalloc(sizeof(*format));
+ if (!format)
+ return -ENOMEM;
+
+ format->name = strdup(name);
+ format->value = config;
+ memcpy(format->bits, bits, sizeof(format->bits));
+
+ list_add_tail(&format->list, list);
+ return 0;
+}
+
+void perf_pmu__set_format(unsigned long *bits, long from, long to)
+{
+ long b;
+
+ if (!to)
+ to = from;
+
+ memset(bits, 0, BITS_TO_BYTES(PERF_PMU_FORMAT_BITS));
+ for (b = from; b <= to; b++)
+ set_bit(b, bits);
+}
+
+static char *format_alias(char *buf, int len, struct perf_pmu *pmu,
+ struct perf_pmu_alias *alias)
+{
+ snprintf(buf, len, "%s/%s/", pmu->name, alias->name);
+ return buf;
+}
+
+static char *format_alias_or(char *buf, int len, struct perf_pmu *pmu,
+ struct perf_pmu_alias *alias)
+{
+ snprintf(buf, len, "%s OR %s/%s/", alias->name, pmu->name, alias->name);
+ return buf;
+}
+
+static int cmp_string(const void *a, const void *b)
+{
+ const char * const *as = a;
+ const char * const *bs = b;
+ return strcmp(*as, *bs);
+}
+
+void print_pmu_events(const char *event_glob, bool name_only)
+{
+ struct perf_pmu *pmu;
+ struct perf_pmu_alias *alias;
+ char buf[1024];
+ int printed = 0;
+ int len, j;
+ char **aliases;
+
+ pmu = NULL;
+ len = 0;
+ while ((pmu = perf_pmu__scan(pmu)) != NULL)
+ list_for_each_entry(alias, &pmu->aliases, list)
+ len++;
+ aliases = malloc(sizeof(char *) * len);
+ if (!aliases)
+ return;
+ pmu = NULL;
+ j = 0;
+ while ((pmu = perf_pmu__scan(pmu)) != NULL)
+ list_for_each_entry(alias, &pmu->aliases, list) {
+ char *name = format_alias(buf, sizeof(buf), pmu, alias);
+ bool is_cpu = !strcmp(pmu->name, "cpu");
+
+ if (event_glob != NULL &&
+ !(strglobmatch(name, event_glob) ||
+ (!is_cpu && strglobmatch(alias->name,
+ event_glob))))
+ continue;
+ aliases[j] = name;
+ if (is_cpu && !name_only)
+ aliases[j] = format_alias_or(buf, sizeof(buf),
+ pmu, alias);
+ aliases[j] = strdup(aliases[j]);
+ j++;
+ }
+ len = j;
+ qsort(aliases, len, sizeof(char *), cmp_string);
+ for (j = 0; j < len; j++) {
+ if (name_only) {
+ printf("%s ", aliases[j]);
+ continue;
+ }
+ printf(" %-50s [Kernel PMU event]\n", aliases[j]);
+ zfree(&aliases[j]);
+ printed++;
+ }
+ if (printed)
+ printf("\n");
+ free(aliases);
+}
+
+bool pmu_have_event(const char *pname, const char *name)
+{
+ struct perf_pmu *pmu;
+ struct perf_pmu_alias *alias;
+
+ pmu = NULL;
+ while ((pmu = perf_pmu__scan(pmu)) != NULL) {
+ if (strcmp(pname, pmu->name))
+ continue;
+ list_for_each_entry(alias, &pmu->aliases, list)
+ if (!strcmp(alias->name, name))
+ return true;
+ }
+ return false;
+}
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
new file mode 100644
index 00000000000..c14a543ce1f
--- /dev/null
+++ b/tools/perf/util/pmu.h
@@ -0,0 +1,49 @@
+#ifndef __PMU_H
+#define __PMU_H
+
+#include <linux/bitmap.h>
+#include <linux/perf_event.h>
+#include <stdbool.h>
+
+enum {
+ PERF_PMU_FORMAT_VALUE_CONFIG,
+ PERF_PMU_FORMAT_VALUE_CONFIG1,
+ PERF_PMU_FORMAT_VALUE_CONFIG2,
+};
+
+#define PERF_PMU_FORMAT_BITS 64
+
+struct perf_pmu {
+ char *name;
+ __u32 type;
+ struct cpu_map *cpus;
+ struct list_head format;
+ struct list_head aliases;
+ struct list_head list;
+};
+
+struct perf_pmu *perf_pmu__find(const char *name);
+int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
+ struct list_head *head_terms);
+int perf_pmu__config_terms(struct list_head *formats,
+ struct perf_event_attr *attr,
+ struct list_head *head_terms);
+int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
+ const char **unit, double *scale);
+struct list_head *perf_pmu__alias(struct perf_pmu *pmu,
+ struct list_head *head_terms);
+int perf_pmu_wrap(void);
+void perf_pmu_error(struct list_head *list, char *name, char const *msg);
+
+int perf_pmu__new_format(struct list_head *list, char *name,
+ int config, unsigned long *bits);
+void perf_pmu__set_format(unsigned long *bits, long from, long to);
+int perf_pmu__format_parse(char *dir, struct list_head *head);
+
+struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu);
+
+void print_pmu_events(const char *event_glob, bool name_only);
+bool pmu_have_event(const char *pname, const char *name);
+
+int perf_pmu__test(void);
+#endif /* __PMU_H */
diff --git a/tools/perf/util/pmu.l b/tools/perf/util/pmu.l
new file mode 100644
index 00000000000..a15d9fbd7c0
--- /dev/null
+++ b/tools/perf/util/pmu.l
@@ -0,0 +1,43 @@
+%option prefix="perf_pmu_"
+
+%{
+#include <stdlib.h>
+#include <linux/bitops.h>
+#include "pmu.h"
+#include "pmu-bison.h"
+
+static int value(int base)
+{
+ long num;
+
+ errno = 0;
+ num = strtoul(perf_pmu_text, NULL, base);
+ if (errno)
+ return PP_ERROR;
+
+ perf_pmu_lval.num = num;
+ return PP_VALUE;
+}
+
+%}
+
+num_dec [0-9]+
+
+%%
+
+{num_dec} { return value(10); }
+config { return PP_CONFIG; }
+config1 { return PP_CONFIG1; }
+config2 { return PP_CONFIG2; }
+- { return '-'; }
+: { return ':'; }
+, { return ','; }
+. { ; }
+\n { ; }
+
+%%
+
+int perf_pmu_wrap(void)
+{
+ return 1;
+}
diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y
new file mode 100644
index 00000000000..bfd7e850986
--- /dev/null
+++ b/tools/perf/util/pmu.y
@@ -0,0 +1,92 @@
+
+%parse-param {struct list_head *format}
+%parse-param {char *name}
+
+%{
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/bitmap.h>
+#include <string.h>
+#include "pmu.h"
+
+extern int perf_pmu_lex (void);
+
+#define ABORT_ON(val) \
+do { \
+ if (val) \
+ YYABORT; \
+} while (0)
+
+%}
+
+%token PP_CONFIG PP_CONFIG1 PP_CONFIG2
+%token PP_VALUE PP_ERROR
+%type <num> PP_VALUE
+%type <bits> bit_term
+%type <bits> bits
+
+%union
+{
+ unsigned long num;
+ DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
+}
+
+%%
+
+format:
+format format_term
+|
+format_term
+
+format_term:
+PP_CONFIG ':' bits
+{
+ ABORT_ON(perf_pmu__new_format(format, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG,
+ $3));
+}
+|
+PP_CONFIG1 ':' bits
+{
+ ABORT_ON(perf_pmu__new_format(format, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG1,
+ $3));
+}
+|
+PP_CONFIG2 ':' bits
+{
+ ABORT_ON(perf_pmu__new_format(format, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG2,
+ $3));
+}
+
+bits:
+bits ',' bit_term
+{
+ bitmap_or($$, $1, $3, 64);
+}
+|
+bit_term
+{
+ memcpy($$, $1, sizeof($1));
+}
+
+bit_term:
+PP_VALUE '-' PP_VALUE
+{
+ perf_pmu__set_format($$, $1, $3);
+}
+|
+PP_VALUE
+{
+ perf_pmu__set_format($$, $1, 0);
+}
+
+%%
+
+void perf_pmu_error(struct list_head *list __maybe_unused,
+ char *name __maybe_unused,
+ char const *msg __maybe_unused)
+{
+}
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 3b6a5297bf1..9a0a1839a37 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -19,7 +19,6 @@
*
*/
-#define _GNU_SOURCE
#include <sys/utsname.h>
#include <sys/types.h>
#include <sys/stat.h>
@@ -31,24 +30,23 @@
#include <string.h>
#include <stdarg.h>
#include <limits.h>
+#include <elf.h>
-#undef _GNU_SOURCE
#include "util.h"
#include "event.h"
-#include "string.h"
#include "strlist.h"
#include "debug.h"
#include "cache.h"
#include "color.h"
#include "symbol.h"
#include "thread.h"
-#include "debugfs.h"
-#include "trace-event.h" /* For __unused */
+#include <api/fs/debugfs.h>
+#include "trace-event.h" /* For __maybe_unused */
#include "probe-event.h"
#include "probe-finder.h"
+#include "session.h"
#define MAX_CMDLEN 256
-#define MAX_PROBE_ARGS 128
#define PERFPROBE_GROUP "probe"
bool probe_event_dry_run; /* Dry run flag */
@@ -72,31 +70,32 @@ static int e_snprintf(char *str, size_t size, const char *format, ...)
}
static char *synthesize_perf_probe_point(struct perf_probe_point *pp);
-static struct machine machine;
+static void clear_probe_trace_event(struct probe_trace_event *tev);
+static struct machine *host_machine;
/* Initialize symbol maps and path of vmlinux/modules */
-static int init_vmlinux(void)
+static int init_symbol_maps(bool user_only)
{
int ret;
symbol_conf.sort_by_name = true;
- if (symbol_conf.vmlinux_name == NULL)
- symbol_conf.try_vmlinux_path = true;
- else
- pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name);
ret = symbol__init();
if (ret < 0) {
pr_debug("Failed to init symbol map.\n");
goto out;
}
- ret = machine__init(&machine, "", HOST_KERNEL_ID);
- if (ret < 0)
- goto out;
+ if (host_machine || user_only) /* already initialized */
+ return 0;
- if (machine__create_kernel_maps(&machine) < 0) {
- pr_debug("machine__create_kernel_maps ");
- goto out;
+ if (symbol_conf.vmlinux_name)
+ pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name);
+
+ host_machine = machine__new_host();
+ if (!host_machine) {
+ pr_debug("machine__new_host() failed.\n");
+ symbol__exit();
+ ret = -1;
}
out:
if (ret < 0)
@@ -104,106 +103,386 @@ out:
return ret;
}
+static void exit_symbol_maps(void)
+{
+ if (host_machine) {
+ machine__delete(host_machine);
+ host_machine = NULL;
+ }
+ symbol__exit();
+}
+
static struct symbol *__find_kernel_function_by_name(const char *name,
struct map **mapp)
{
- return machine__find_kernel_function_by_name(&machine, name, mapp,
+ return machine__find_kernel_function_by_name(host_machine, name, mapp,
NULL);
}
-const char *kernel_get_module_path(const char *module)
+static struct symbol *__find_kernel_function(u64 addr, struct map **mapp)
+{
+ return machine__find_kernel_function(host_machine, addr, mapp, NULL);
+}
+
+static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void)
+{
+ /* kmap->ref_reloc_sym should be set if host_machine is initialized */
+ struct kmap *kmap;
+
+ if (map__load(host_machine->vmlinux_maps[MAP__FUNCTION], NULL) < 0)
+ return NULL;
+
+ kmap = map__kmap(host_machine->vmlinux_maps[MAP__FUNCTION]);
+ return kmap->ref_reloc_sym;
+}
+
+static u64 kernel_get_symbol_address_by_name(const char *name, bool reloc)
+{
+ struct ref_reloc_sym *reloc_sym;
+ struct symbol *sym;
+ struct map *map;
+
+ /* ref_reloc_sym is just a label. Need a special fix*/
+ reloc_sym = kernel_get_ref_reloc_sym();
+ if (reloc_sym && strcmp(name, reloc_sym->name) == 0)
+ return (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr;
+ else {
+ sym = __find_kernel_function_by_name(name, &map);
+ if (sym)
+ return map->unmap_ip(map, sym->start) -
+ (reloc) ? 0 : map->reloc;
+ }
+ return 0;
+}
+
+static struct map *kernel_get_module_map(const char *module)
+{
+ struct rb_node *nd;
+ struct map_groups *grp = &host_machine->kmaps;
+
+ /* A file path -- this is an offline module */
+ if (module && strchr(module, '/'))
+ return machine__new_module(host_machine, 0, module);
+
+ if (!module)
+ module = "kernel";
+
+ for (nd = rb_first(&grp->maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) {
+ struct map *pos = rb_entry(nd, struct map, rb_node);
+ if (strncmp(pos->dso->short_name + 1, module,
+ pos->dso->short_name_len - 2) == 0) {
+ return pos;
+ }
+ }
+ return NULL;
+}
+
+static struct dso *kernel_get_module_dso(const char *module)
{
struct dso *dso;
+ struct map *map;
+ const char *vmlinux_name;
if (module) {
- list_for_each_entry(dso, &machine.kernel_dsos, node) {
+ list_for_each_entry(dso, &host_machine->kernel_dsos, node) {
if (strncmp(dso->short_name + 1, module,
dso->short_name_len - 2) == 0)
goto found;
}
pr_debug("Failed to find module %s.\n", module);
return NULL;
+ }
+
+ map = host_machine->vmlinux_maps[MAP__FUNCTION];
+ dso = map->dso;
+
+ vmlinux_name = symbol_conf.vmlinux_name;
+ if (vmlinux_name) {
+ if (dso__load_vmlinux(dso, map, vmlinux_name, false, NULL) <= 0)
+ return NULL;
} else {
- dso = machine.vmlinux_maps[MAP__FUNCTION]->dso;
- if (dso__load_vmlinux_path(dso,
- machine.vmlinux_maps[MAP__FUNCTION], NULL) < 0) {
+ if (dso__load_vmlinux_path(dso, map, NULL) <= 0) {
pr_debug("Failed to load kernel map.\n");
return NULL;
}
}
found:
- return dso->long_name;
+ return dso;
}
-#ifdef DWARF_SUPPORT
-static int open_vmlinux(const char *module)
+const char *kernel_get_module_path(const char *module)
{
- const char *path = kernel_get_module_path(module);
- if (!path) {
- pr_err("Failed to find path of %s module", module ?: "kernel");
- return -ENOENT;
+ struct dso *dso = kernel_get_module_dso(module);
+ return (dso) ? dso->long_name : NULL;
+}
+
+static int convert_exec_to_group(const char *exec, char **result)
+{
+ char *ptr1, *ptr2, *exec_copy;
+ char buf[64];
+ int ret;
+
+ exec_copy = strdup(exec);
+ if (!exec_copy)
+ return -ENOMEM;
+
+ ptr1 = basename(exec_copy);
+ if (!ptr1) {
+ ret = -EINVAL;
+ goto out;
}
- pr_debug("Try to open %s\n", path);
- return open(path, O_RDONLY);
+
+ ptr2 = strpbrk(ptr1, "-._");
+ if (ptr2)
+ *ptr2 = '\0';
+ ret = e_snprintf(buf, 64, "%s_%s", PERFPROBE_GROUP, ptr1);
+ if (ret < 0)
+ goto out;
+
+ *result = strdup(buf);
+ ret = *result ? 0 : -ENOMEM;
+
+out:
+ free(exec_copy);
+ return ret;
+}
+
+static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs)
+{
+ int i;
+
+ for (i = 0; i < ntevs; i++)
+ clear_probe_trace_event(tevs + i);
+}
+
+#ifdef HAVE_DWARF_SUPPORT
+
+/* Open new debuginfo of given module */
+static struct debuginfo *open_debuginfo(const char *module)
+{
+ const char *path = module;
+
+ if (!module || !strchr(module, '/')) {
+ path = kernel_get_module_path(module);
+ if (!path) {
+ pr_err("Failed to find path of %s module.\n",
+ module ?: "kernel");
+ return NULL;
+ }
+ }
+ return debuginfo__new(path);
+}
+
+static int get_text_start_address(const char *exec, unsigned long *address)
+{
+ Elf *elf;
+ GElf_Ehdr ehdr;
+ GElf_Shdr shdr;
+ int fd, ret = -ENOENT;
+
+ fd = open(exec, O_RDONLY);
+ if (fd < 0)
+ return -errno;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL)
+ return -EINVAL;
+
+ if (gelf_getehdr(elf, &ehdr) == NULL)
+ goto out;
+
+ if (!elf_section_by_name(elf, &ehdr, &shdr, ".text", NULL))
+ goto out;
+
+ *address = shdr.sh_addr - shdr.sh_offset;
+ ret = 0;
+out:
+ elf_end(elf);
+ return ret;
}
/*
* Convert trace point to probe point with debuginfo
- * Currently only handles kprobes.
*/
-static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
- struct perf_probe_point *pp)
+static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp,
+ struct perf_probe_point *pp,
+ bool is_kprobe)
{
- struct symbol *sym;
- struct map *map;
- u64 addr;
+ struct debuginfo *dinfo = NULL;
+ unsigned long stext = 0;
+ u64 addr = tp->address;
int ret = -ENOENT;
- sym = __find_kernel_function_by_name(tp->symbol, &map);
- if (sym) {
- addr = map->unmap_ip(map, sym->start + tp->offset);
- pr_debug("try to find %s+%ld@%llx\n", tp->symbol,
- tp->offset, addr);
- ret = find_perf_probe_point((unsigned long)addr, pp);
+ /* convert the address to dwarf address */
+ if (!is_kprobe) {
+ if (!addr) {
+ ret = -EINVAL;
+ goto error;
+ }
+ ret = get_text_start_address(tp->module, &stext);
+ if (ret < 0)
+ goto error;
+ addr += stext;
+ } else {
+ addr = kernel_get_symbol_address_by_name(tp->symbol, false);
+ if (addr == 0)
+ goto error;
+ addr += tp->offset;
}
- if (ret <= 0) {
- pr_debug("Failed to find corresponding probes from "
- "debuginfo. Use kprobe event information.\n");
- pp->function = strdup(tp->symbol);
- if (pp->function == NULL)
+
+ pr_debug("try to find information at %" PRIx64 " in %s\n", addr,
+ tp->module ? : "kernel");
+
+ dinfo = open_debuginfo(tp->module);
+ if (dinfo) {
+ ret = debuginfo__find_probe_point(dinfo,
+ (unsigned long)addr, pp);
+ debuginfo__delete(dinfo);
+ } else {
+ pr_debug("Failed to open debuginfo at 0x%" PRIx64 "\n", addr);
+ ret = -ENOENT;
+ }
+
+ if (ret > 0) {
+ pp->retprobe = tp->retprobe;
+ return 0;
+ }
+error:
+ pr_debug("Failed to find corresponding probes from debuginfo.\n");
+ return ret ? : -ENOENT;
+}
+
+static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
+ int ntevs, const char *exec)
+{
+ int i, ret = 0;
+ unsigned long stext = 0;
+
+ if (!exec)
+ return 0;
+
+ ret = get_text_start_address(exec, &stext);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ntevs && ret >= 0; i++) {
+ /* point.address is the addres of point.symbol + point.offset */
+ tevs[i].point.address -= stext;
+ tevs[i].point.module = strdup(exec);
+ if (!tevs[i].point.module) {
+ ret = -ENOMEM;
+ break;
+ }
+ tevs[i].uprobes = true;
+ }
+
+ return ret;
+}
+
+static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
+ int ntevs, const char *module)
+{
+ int i, ret = 0;
+ char *tmp;
+
+ if (!module)
+ return 0;
+
+ tmp = strrchr(module, '/');
+ if (tmp) {
+ /* This is a module path -- get the module name */
+ module = strdup(tmp + 1);
+ if (!module)
return -ENOMEM;
- pp->offset = tp->offset;
+ tmp = strchr(module, '.');
+ if (tmp)
+ *tmp = '\0';
+ tmp = (char *)module; /* For free() */
}
- pp->retprobe = tp->retprobe;
+ for (i = 0; i < ntevs; i++) {
+ tevs[i].point.module = strdup(module);
+ if (!tevs[i].point.module) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ free(tmp);
+ return ret;
+}
+
+/* Post processing the probe events */
+static int post_process_probe_trace_events(struct probe_trace_event *tevs,
+ int ntevs, const char *module,
+ bool uprobe)
+{
+ struct ref_reloc_sym *reloc_sym;
+ char *tmp;
+ int i;
+
+ if (uprobe)
+ return add_exec_to_probe_trace_events(tevs, ntevs, module);
+
+ /* Note that currently ref_reloc_sym based probe is not for drivers */
+ if (module)
+ return add_module_to_probe_trace_events(tevs, ntevs, module);
+
+ reloc_sym = kernel_get_ref_reloc_sym();
+ if (!reloc_sym) {
+ pr_warning("Relocated base symbol is not found!\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ntevs; i++) {
+ if (tevs[i].point.address) {
+ tmp = strdup(reloc_sym->name);
+ if (!tmp)
+ return -ENOMEM;
+ free(tevs[i].point.symbol);
+ tevs[i].point.symbol = tmp;
+ tevs[i].point.offset = tevs[i].point.address -
+ reloc_sym->unrelocated_addr;
+ }
+ }
return 0;
}
/* Try to find perf_probe_event with debuginfo */
static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
- struct probe_trace_event **tevs,
- int max_tevs, const char *module)
+ struct probe_trace_event **tevs,
+ int max_tevs, const char *target)
{
bool need_dwarf = perf_probe_event_need_dwarf(pev);
- int fd, ntevs;
+ struct debuginfo *dinfo;
+ int ntevs, ret = 0;
+
+ dinfo = open_debuginfo(target);
- fd = open_vmlinux(module);
- if (fd < 0) {
+ if (!dinfo) {
if (need_dwarf) {
pr_warning("Failed to open debuginfo file.\n");
- return fd;
+ return -ENOENT;
}
- pr_debug("Could not open vmlinux. Try to use symbols.\n");
+ pr_debug("Could not open debuginfo. Try to use symbols.\n");
return 0;
}
- /* Searching trace events corresponding to probe event */
- ntevs = find_probe_trace_events(fd, pev, tevs, max_tevs);
- close(fd);
+ pr_debug("Try to find probe point from debuginfo.\n");
+ /* Searching trace events corresponding to a probe event */
+ ntevs = debuginfo__find_trace_events(dinfo, pev, tevs, max_tevs);
+
+ debuginfo__delete(dinfo);
if (ntevs > 0) { /* Succeeded to find trace events */
- pr_debug("find %d probe_trace_events.\n", ntevs);
- return ntevs;
+ pr_debug("Found %d probe_trace_events.\n", ntevs);
+ ret = post_process_probe_trace_events(*tevs, ntevs,
+ target, pev->uprobes);
+ if (ret < 0) {
+ clear_probe_trace_events(*tevs, ntevs);
+ zfree(tevs);
+ }
+ return ret < 0 ? ret : ntevs;
}
if (ntevs == 0) { /* No error but failed to find probe point. */
@@ -217,7 +496,7 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
pr_warning("Warning: No dwarf info found in the vmlinux - "
"please rebuild kernel with CONFIG_DEBUG_INFO=y.\n");
if (!need_dwarf) {
- pr_debug("Trying to use symbols.\nn");
+ pr_debug("Trying to use symbols.\n");
return 0;
}
}
@@ -269,15 +548,13 @@ static int get_real_path(const char *raw_path, const char *comp_dir,
case EFAULT:
raw_path = strchr(++raw_path, '/');
if (!raw_path) {
- free(*new_path);
- *new_path = NULL;
+ zfree(new_path);
return -ENOENT;
}
continue;
default:
- free(*new_path);
- *new_path = NULL;
+ zfree(new_path);
return -errno;
}
}
@@ -286,72 +563,76 @@ static int get_real_path(const char *raw_path, const char *comp_dir,
#define LINEBUF_SIZE 256
#define NR_ADDITIONAL_LINES 2
-static int show_one_line(FILE *fp, int l, bool skip, bool show_num)
+static int __show_one_line(FILE *fp, int l, bool skip, bool show_num)
{
char buf[LINEBUF_SIZE];
- const char *color = PERF_COLOR_BLUE;
-
- if (fgets(buf, LINEBUF_SIZE, fp) == NULL)
- goto error;
- if (!skip) {
- if (show_num)
- fprintf(stdout, "%7d %s", l, buf);
- else
- color_fprintf(stdout, color, " %s", buf);
- }
+ const char *color = show_num ? "" : PERF_COLOR_BLUE;
+ const char *prefix = NULL;
- while (strlen(buf) == LINEBUF_SIZE - 1 &&
- buf[LINEBUF_SIZE - 2] != '\n') {
+ do {
if (fgets(buf, LINEBUF_SIZE, fp) == NULL)
goto error;
- if (!skip) {
- if (show_num)
- fprintf(stdout, "%s", buf);
- else
- color_fprintf(stdout, color, "%s", buf);
+ if (skip)
+ continue;
+ if (!prefix) {
+ prefix = show_num ? "%7d " : " ";
+ color_fprintf(stdout, color, prefix, l);
}
- }
+ color_fprintf(stdout, color, "%s", buf);
- return 0;
+ } while (strchr(buf, '\n') == NULL);
+
+ return 1;
error:
- if (feof(fp))
- pr_warning("Source file is shorter than expected.\n");
- else
+ if (ferror(fp)) {
pr_warning("File read error: %s\n", strerror(errno));
+ return -1;
+ }
+ return 0;
+}
- return -1;
+static int _show_one_line(FILE *fp, int l, bool skip, bool show_num)
+{
+ int rv = __show_one_line(fp, l, skip, show_num);
+ if (rv == 0) {
+ pr_warning("Source file is shorter than expected.\n");
+ rv = -1;
+ }
+ return rv;
}
+#define show_one_line_with_num(f,l) _show_one_line(f,l,false,true)
+#define show_one_line(f,l) _show_one_line(f,l,false,false)
+#define skip_one_line(f,l) _show_one_line(f,l,true,false)
+#define show_one_line_or_eof(f,l) __show_one_line(f,l,false,false)
+
/*
* Show line-range always requires debuginfo to find source file and
* line number.
*/
-int show_line_range(struct line_range *lr, const char *module)
+static int __show_line_range(struct line_range *lr, const char *module)
{
int l = 1;
- struct line_node *ln;
+ struct int_node *ln;
+ struct debuginfo *dinfo;
FILE *fp;
- int fd, ret;
+ int ret;
char *tmp;
/* Search a line range */
- ret = init_vmlinux();
- if (ret < 0)
- return ret;
-
- fd = open_vmlinux(module);
- if (fd < 0) {
+ dinfo = open_debuginfo(module);
+ if (!dinfo) {
pr_warning("Failed to open debuginfo file.\n");
- return fd;
+ return -ENOENT;
}
- ret = find_line_range(fd, lr);
- close(fd);
- if (ret == 0) {
+ ret = debuginfo__find_line_range(dinfo, lr);
+ debuginfo__delete(dinfo);
+ if (ret == 0 || ret == -ENOENT) {
pr_warning("Specified source line is not found.\n");
return -ENOENT;
} else if (ret < 0) {
- pr_warning("Debuginfo analysis failed. (%d)\n", ret);
+ pr_warning("Debuginfo analysis failed.\n");
return ret;
}
@@ -360,17 +641,17 @@ int show_line_range(struct line_range *lr, const char *module)
ret = get_real_path(tmp, lr->comp_dir, &lr->path);
free(tmp); /* Free old path */
if (ret < 0) {
- pr_warning("Failed to find source file. (%d)\n", ret);
+ pr_warning("Failed to find source file path.\n");
return ret;
}
setup_pager();
if (lr->function)
- fprintf(stdout, "<%s:%d>\n", lr->function,
+ fprintf(stdout, "<%s@%s:%d>\n", lr->function, lr->path,
lr->start - lr->offset);
else
- fprintf(stdout, "<%s:%d>\n", lr->file, lr->start);
+ fprintf(stdout, "<%s:%d>\n", lr->path, lr->start);
fp = fopen(lr->path, "r");
if (fp == NULL) {
@@ -379,194 +660,288 @@ int show_line_range(struct line_range *lr, const char *module)
return -errno;
}
/* Skip to starting line number */
- while (l < lr->start && ret >= 0)
- ret = show_one_line(fp, l++, true, false);
- if (ret < 0)
- goto end;
+ while (l < lr->start) {
+ ret = skip_one_line(fp, l++);
+ if (ret < 0)
+ goto end;
+ }
- list_for_each_entry(ln, &lr->line_list, list) {
- while (ln->line > l && ret >= 0)
- ret = show_one_line(fp, (l++) - lr->offset,
- false, false);
- if (ret >= 0)
- ret = show_one_line(fp, (l++) - lr->offset,
- false, true);
+ intlist__for_each(ln, lr->line_list) {
+ for (; ln->i > l; l++) {
+ ret = show_one_line(fp, l - lr->offset);
+ if (ret < 0)
+ goto end;
+ }
+ ret = show_one_line_with_num(fp, l++ - lr->offset);
if (ret < 0)
goto end;
}
if (lr->end == INT_MAX)
lr->end = l + NR_ADDITIONAL_LINES;
- while (l <= lr->end && !feof(fp) && ret >= 0)
- ret = show_one_line(fp, (l++) - lr->offset, false, false);
+ while (l <= lr->end) {
+ ret = show_one_line_or_eof(fp, l++ - lr->offset);
+ if (ret <= 0)
+ break;
+ }
end:
fclose(fp);
return ret;
}
-static int show_available_vars_at(int fd, struct perf_probe_event *pev,
- int max_vls, bool externs)
+int show_line_range(struct line_range *lr, const char *module)
+{
+ int ret;
+
+ ret = init_symbol_maps(false);
+ if (ret < 0)
+ return ret;
+ ret = __show_line_range(lr, module);
+ exit_symbol_maps();
+
+ return ret;
+}
+
+static int show_available_vars_at(struct debuginfo *dinfo,
+ struct perf_probe_event *pev,
+ int max_vls, struct strfilter *_filter,
+ bool externs)
{
char *buf;
- int ret, i;
+ int ret, i, nvars;
struct str_node *node;
struct variable_list *vls = NULL, *vl;
+ const char *var;
buf = synthesize_perf_probe_point(&pev->point);
if (!buf)
return -EINVAL;
pr_debug("Searching variables at %s\n", buf);
- ret = find_available_vars_at(fd, pev, &vls, max_vls, externs);
- if (ret > 0) {
- /* Some variables were found */
- fprintf(stdout, "Available variables at %s\n", buf);
- for (i = 0; i < ret; i++) {
- vl = &vls[i];
- /*
- * A probe point might be converted to
- * several trace points.
- */
- fprintf(stdout, "\t@<%s+%lu>\n", vl->point.symbol,
- vl->point.offset);
- free(vl->point.symbol);
- if (vl->vars) {
- strlist__for_each(node, vl->vars)
+ ret = debuginfo__find_available_vars_at(dinfo, pev, &vls,
+ max_vls, externs);
+ if (ret <= 0) {
+ if (ret == 0 || ret == -ENOENT) {
+ pr_err("Failed to find the address of %s\n", buf);
+ ret = -ENOENT;
+ } else
+ pr_warning("Debuginfo analysis failed.\n");
+ goto end;
+ }
+
+ /* Some variables are found */
+ fprintf(stdout, "Available variables at %s\n", buf);
+ for (i = 0; i < ret; i++) {
+ vl = &vls[i];
+ /*
+ * A probe point might be converted to
+ * several trace points.
+ */
+ fprintf(stdout, "\t@<%s+%lu>\n", vl->point.symbol,
+ vl->point.offset);
+ zfree(&vl->point.symbol);
+ nvars = 0;
+ if (vl->vars) {
+ strlist__for_each(node, vl->vars) {
+ var = strchr(node->s, '\t') + 1;
+ if (strfilter__compare(_filter, var)) {
fprintf(stdout, "\t\t%s\n", node->s);
- strlist__delete(vl->vars);
- } else
- fprintf(stdout, "(No variables)\n");
+ nvars++;
+ }
+ }
+ strlist__delete(vl->vars);
}
- free(vls);
- } else
- pr_err("Failed to find variables at %s (%d)\n", buf, ret);
-
+ if (nvars == 0)
+ fprintf(stdout, "\t\t(No matched variables)\n");
+ }
+ free(vls);
+end:
free(buf);
return ret;
}
/* Show available variables on given probe point */
int show_available_vars(struct perf_probe_event *pevs, int npevs,
- int max_vls, const char *module, bool externs)
+ int max_vls, const char *module,
+ struct strfilter *_filter, bool externs)
{
- int i, fd, ret = 0;
+ int i, ret = 0;
+ struct debuginfo *dinfo;
- ret = init_vmlinux();
+ ret = init_symbol_maps(false);
if (ret < 0)
return ret;
- fd = open_vmlinux(module);
- if (fd < 0) {
+ dinfo = open_debuginfo(module);
+ if (!dinfo) {
pr_warning("Failed to open debuginfo file.\n");
- return fd;
+ ret = -ENOENT;
+ goto out;
}
setup_pager();
for (i = 0; i < npevs && ret >= 0; i++)
- ret = show_available_vars_at(fd, &pevs[i], max_vls, externs);
+ ret = show_available_vars_at(dinfo, &pevs[i], max_vls, _filter,
+ externs);
- close(fd);
+ debuginfo__delete(dinfo);
+out:
+ exit_symbol_maps();
return ret;
}
-#else /* !DWARF_SUPPORT */
+#else /* !HAVE_DWARF_SUPPORT */
-static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
- struct perf_probe_point *pp)
+static int
+find_perf_probe_point_from_dwarf(struct probe_trace_point *tp __maybe_unused,
+ struct perf_probe_point *pp __maybe_unused,
+ bool is_kprobe __maybe_unused)
{
- struct symbol *sym;
-
- sym = __find_kernel_function_by_name(tp->symbol, NULL);
- if (!sym) {
- pr_err("Failed to find symbol %s in kernel.\n", tp->symbol);
- return -ENOENT;
- }
- pp->function = strdup(tp->symbol);
- if (pp->function == NULL)
- return -ENOMEM;
- pp->offset = tp->offset;
- pp->retprobe = tp->retprobe;
-
- return 0;
+ return -ENOSYS;
}
static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
- struct probe_trace_event **tevs __unused,
- int max_tevs __unused, const char *mod __unused)
+ struct probe_trace_event **tevs __maybe_unused,
+ int max_tevs __maybe_unused,
+ const char *target __maybe_unused)
{
if (perf_probe_event_need_dwarf(pev)) {
pr_warning("Debuginfo-analysis is not supported.\n");
return -ENOSYS;
}
+
return 0;
}
-int show_line_range(struct line_range *lr __unused, const char *module __unused)
+int show_line_range(struct line_range *lr __maybe_unused,
+ const char *module __maybe_unused)
{
pr_warning("Debuginfo-analysis is not supported.\n");
return -ENOSYS;
}
-int show_available_vars(struct perf_probe_event *pevs __unused,
- int npevs __unused, int max_vls __unused,
- const char *module __unused, bool externs __unused)
+int show_available_vars(struct perf_probe_event *pevs __maybe_unused,
+ int npevs __maybe_unused, int max_vls __maybe_unused,
+ const char *module __maybe_unused,
+ struct strfilter *filter __maybe_unused,
+ bool externs __maybe_unused)
{
pr_warning("Debuginfo-analysis is not supported.\n");
return -ENOSYS;
}
#endif
+void line_range__clear(struct line_range *lr)
+{
+ free(lr->function);
+ free(lr->file);
+ free(lr->path);
+ free(lr->comp_dir);
+ intlist__delete(lr->line_list);
+ memset(lr, 0, sizeof(*lr));
+}
+
+int line_range__init(struct line_range *lr)
+{
+ memset(lr, 0, sizeof(*lr));
+ lr->line_list = intlist__new(NULL);
+ if (!lr->line_list)
+ return -ENOMEM;
+ else
+ return 0;
+}
+
+static int parse_line_num(char **ptr, int *val, const char *what)
+{
+ const char *start = *ptr;
+
+ errno = 0;
+ *val = strtol(*ptr, ptr, 0);
+ if (errno || *ptr == start) {
+ semantic_error("'%s' is not a valid number.\n", what);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Stuff 'lr' according to the line range described by 'arg'.
+ * The line range syntax is described by:
+ *
+ * SRC[:SLN[+NUM|-ELN]]
+ * FNC[@SRC][:SLN[+NUM|-ELN]]
+ */
int parse_line_range_desc(const char *arg, struct line_range *lr)
{
- const char *ptr;
- char *tmp;
- /*
- * <Syntax>
- * SRC:SLN[+NUM|-ELN]
- * FUNC[:SLN[+NUM|-ELN]]
- */
- ptr = strchr(arg, ':');
- if (ptr) {
- lr->start = (int)strtoul(ptr + 1, &tmp, 0);
- if (*tmp == '+') {
- lr->end = lr->start + (int)strtoul(tmp + 1, &tmp, 0);
- lr->end--; /*
- * Adjust the number of lines here.
- * If the number of lines == 1, the
- * the end of line should be equal to
- * the start of line.
- */
- } else if (*tmp == '-')
- lr->end = (int)strtoul(tmp + 1, &tmp, 0);
- else
- lr->end = INT_MAX;
+ char *range, *file, *name = strdup(arg);
+ int err;
+
+ if (!name)
+ return -ENOMEM;
+
+ lr->start = 0;
+ lr->end = INT_MAX;
+
+ range = strchr(name, ':');
+ if (range) {
+ *range++ = '\0';
+
+ err = parse_line_num(&range, &lr->start, "start line");
+ if (err)
+ goto err;
+
+ if (*range == '+' || *range == '-') {
+ const char c = *range++;
+
+ err = parse_line_num(&range, &lr->end, "end line");
+ if (err)
+ goto err;
+
+ if (c == '+') {
+ lr->end += lr->start;
+ /*
+ * Adjust the number of lines here.
+ * If the number of lines == 1, the
+ * the end of line should be equal to
+ * the start of line.
+ */
+ lr->end--;
+ }
+ }
+
pr_debug("Line range is %d to %d\n", lr->start, lr->end);
+
+ err = -EINVAL;
if (lr->start > lr->end) {
semantic_error("Start line must be smaller"
" than end line.\n");
- return -EINVAL;
+ goto err;
}
- if (*tmp != '\0') {
- semantic_error("Tailing with invalid character '%d'.\n",
- *tmp);
- return -EINVAL;
+ if (*range != '\0') {
+ semantic_error("Tailing with invalid str '%s'.\n", range);
+ goto err;
}
- tmp = strndup(arg, (ptr - arg));
- } else {
- tmp = strdup(arg);
- lr->end = INT_MAX;
}
- if (tmp == NULL)
- return -ENOMEM;
-
- if (strchr(tmp, '.'))
- lr->file = tmp;
+ file = strchr(name, '@');
+ if (file) {
+ *file = '\0';
+ lr->file = strdup(++file);
+ if (lr->file == NULL) {
+ err = -ENOMEM;
+ goto err;
+ }
+ lr->function = name;
+ } else if (strchr(name, '.'))
+ lr->file = name;
else
- lr->function = tmp;
+ lr->function = name;
return 0;
+err:
+ free(name);
+ return err;
}
/* Check the name is good for event/group */
@@ -690,39 +1065,40 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
/* Exclusion check */
if (pp->lazy_line && pp->line) {
- semantic_error("Lazy pattern can't be used with line number.");
+ semantic_error("Lazy pattern can't be used with"
+ " line number.\n");
return -EINVAL;
}
if (pp->lazy_line && pp->offset) {
- semantic_error("Lazy pattern can't be used with offset.");
+ semantic_error("Lazy pattern can't be used with offset.\n");
return -EINVAL;
}
if (pp->line && pp->offset) {
- semantic_error("Offset can't be used with line number.");
+ semantic_error("Offset can't be used with line number.\n");
return -EINVAL;
}
if (!pp->line && !pp->lazy_line && pp->file && !pp->function) {
semantic_error("File always requires line number or "
- "lazy pattern.");
+ "lazy pattern.\n");
return -EINVAL;
}
if (pp->offset && !pp->function) {
- semantic_error("Offset requires an entry function.");
+ semantic_error("Offset requires an entry function.\n");
return -EINVAL;
}
if (pp->retprobe && !pp->function) {
- semantic_error("Return probe requires an entry function.");
+ semantic_error("Return probe requires an entry function.\n");
return -EINVAL;
}
if ((pp->offset || pp->line || pp->lazy_line) && pp->retprobe) {
semantic_error("Offset/Line/Lazy pattern can't be used with "
- "return probe.");
+ "return probe.\n");
return -EINVAL;
}
@@ -892,11 +1268,12 @@ bool perf_probe_event_need_dwarf(struct perf_probe_event *pev)
/* Parse probe_events event into struct probe_point */
static int parse_probe_trace_command(const char *cmd,
- struct probe_trace_event *tev)
+ struct probe_trace_event *tev)
{
struct probe_trace_point *tp = &tev->point;
char pr;
char *p;
+ char *argv0_str = NULL, *fmt, *fmt1_str, *fmt2_str, *fmt3_str;
int ret, i, argc;
char **argv;
@@ -913,23 +1290,54 @@ static int parse_probe_trace_command(const char *cmd,
}
/* Scan event and group name. */
- ret = sscanf(argv[0], "%c:%a[^/ \t]/%a[^ \t]",
- &pr, (float *)(void *)&tev->group,
- (float *)(void *)&tev->event);
- if (ret != 3) {
+ argv0_str = strdup(argv[0]);
+ if (argv0_str == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ fmt1_str = strtok_r(argv0_str, ":", &fmt);
+ fmt2_str = strtok_r(NULL, "/", &fmt);
+ fmt3_str = strtok_r(NULL, " \t", &fmt);
+ if (fmt1_str == NULL || strlen(fmt1_str) != 1 || fmt2_str == NULL
+ || fmt3_str == NULL) {
semantic_error("Failed to parse event name: %s\n", argv[0]);
ret = -EINVAL;
goto out;
}
+ pr = fmt1_str[0];
+ tev->group = strdup(fmt2_str);
+ tev->event = strdup(fmt3_str);
+ if (tev->group == NULL || tev->event == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
pr_debug("Group:%s Event:%s probe:%c\n", tev->group, tev->event, pr);
tp->retprobe = (pr == 'r');
- /* Scan function name and offset */
- ret = sscanf(argv[1], "%a[^+]+%lu", (float *)(void *)&tp->symbol,
- &tp->offset);
- if (ret == 1)
- tp->offset = 0;
+ /* Scan module name(if there), function name and offset */
+ p = strchr(argv[1], ':');
+ if (p) {
+ tp->module = strndup(argv[1], p - argv[1]);
+ p++;
+ } else
+ p = argv[1];
+ fmt1_str = strtok_r(p, "+", &fmt);
+ if (fmt1_str[0] == '0') /* only the address started with 0x */
+ tp->address = strtoul(fmt1_str, NULL, 0);
+ else {
+ /* Only the symbol-based probe has offset */
+ tp->symbol = strdup(fmt1_str);
+ if (tp->symbol == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ fmt2_str = strtok_r(NULL, "", &fmt);
+ if (fmt2_str == NULL)
+ tp->offset = 0;
+ else
+ tp->offset = strtoul(fmt2_str, NULL, 10);
+ }
tev->nargs = argc - 2;
tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
@@ -953,6 +1361,7 @@ static int parse_probe_trace_command(const char *cmd,
}
ret = 0;
out:
+ free(argv0_str);
argv_free(argv);
return ret;
}
@@ -996,7 +1405,7 @@ int synthesize_perf_probe_arg(struct perf_probe_arg *pa, char *buf, size_t len)
return tmp - buf;
error:
- pr_debug("Failed to synthesize perf probe argument: %s",
+ pr_debug("Failed to synthesize perf probe argument: %s\n",
strerror(-ret));
return ret;
}
@@ -1024,13 +1433,13 @@ static char *synthesize_perf_probe_point(struct perf_probe_point *pp)
goto error;
}
if (pp->file) {
- len = strlen(pp->file) - 31;
- if (len < 0)
- len = 0;
- tmp = strchr(pp->file + len, '/');
- if (!tmp)
- tmp = pp->file + len;
- ret = e_snprintf(file, 32, "@%s", tmp + 1);
+ tmp = pp->file;
+ len = strlen(tmp);
+ if (len > 30) {
+ tmp = strchr(pp->file + len - 30, '/');
+ tmp = tmp ? tmp + 1 : pp->file + len - 30;
+ }
+ ret = e_snprintf(file, 32, "@%s", tmp);
if (ret <= 0)
goto error;
}
@@ -1046,10 +1455,9 @@ static char *synthesize_perf_probe_point(struct perf_probe_point *pp)
return buf;
error:
- pr_debug("Failed to synthesize perf probe point: %s",
+ pr_debug("Failed to synthesize perf probe point: %s\n",
strerror(-ret));
- if (buf)
- free(buf);
+ free(buf);
return NULL;
}
@@ -1171,13 +1579,28 @@ char *synthesize_probe_trace_command(struct probe_trace_event *tev)
if (buf == NULL)
return NULL;
- len = e_snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s+%lu",
- tp->retprobe ? 'r' : 'p',
- tev->group, tev->event,
- tp->symbol, tp->offset);
+ len = e_snprintf(buf, MAX_CMDLEN, "%c:%s/%s ", tp->retprobe ? 'r' : 'p',
+ tev->group, tev->event);
if (len <= 0)
goto error;
+ /* Uprobes must have tp->address and tp->module */
+ if (tev->uprobes && (!tp->address || !tp->module))
+ goto error;
+
+ /* Use the tp->address for uprobes */
+ if (tev->uprobes)
+ ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s:0x%lx",
+ tp->module, tp->address);
+ else
+ ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s%s%s+%lu",
+ tp->module ?: "", tp->module ? ":" : "",
+ tp->symbol, tp->offset);
+
+ if (ret <= 0)
+ goto error;
+ len += ret;
+
for (i = 0; i < tev->nargs; i++) {
ret = synthesize_probe_trace_arg(&tev->args[i], buf + len,
MAX_CMDLEN - len);
@@ -1192,8 +1615,81 @@ error:
return NULL;
}
+static int find_perf_probe_point_from_map(struct probe_trace_point *tp,
+ struct perf_probe_point *pp,
+ bool is_kprobe)
+{
+ struct symbol *sym = NULL;
+ struct map *map;
+ u64 addr;
+ int ret = -ENOENT;
+
+ if (!is_kprobe) {
+ map = dso__new_map(tp->module);
+ if (!map)
+ goto out;
+ addr = tp->address;
+ sym = map__find_symbol(map, addr, NULL);
+ } else {
+ addr = kernel_get_symbol_address_by_name(tp->symbol, true);
+ if (addr) {
+ addr += tp->offset;
+ sym = __find_kernel_function(addr, &map);
+ }
+ }
+ if (!sym)
+ goto out;
+
+ pp->retprobe = tp->retprobe;
+ pp->offset = addr - map->unmap_ip(map, sym->start);
+ pp->function = strdup(sym->name);
+ ret = pp->function ? 0 : -ENOMEM;
+
+out:
+ if (map && !is_kprobe) {
+ dso__delete(map->dso);
+ map__delete(map);
+ }
+
+ return ret;
+}
+
+static int convert_to_perf_probe_point(struct probe_trace_point *tp,
+ struct perf_probe_point *pp,
+ bool is_kprobe)
+{
+ char buf[128];
+ int ret;
+
+ ret = find_perf_probe_point_from_dwarf(tp, pp, is_kprobe);
+ if (!ret)
+ return 0;
+ ret = find_perf_probe_point_from_map(tp, pp, is_kprobe);
+ if (!ret)
+ return 0;
+
+ pr_debug("Failed to find probe point from both of dwarf and map.\n");
+
+ if (tp->symbol) {
+ pp->function = strdup(tp->symbol);
+ pp->offset = tp->offset;
+ } else if (!tp->module && !is_kprobe) {
+ ret = e_snprintf(buf, 128, "0x%" PRIx64, (u64)tp->address);
+ if (ret < 0)
+ return ret;
+ pp->function = strdup(buf);
+ pp->offset = 0;
+ }
+ if (pp->function == NULL)
+ return -ENOMEM;
+
+ pp->retprobe = tp->retprobe;
+
+ return 0;
+}
+
static int convert_to_perf_probe_event(struct probe_trace_event *tev,
- struct perf_probe_event *pev)
+ struct perf_probe_event *pev, bool is_kprobe)
{
char buf[64] = "";
int i, ret;
@@ -1205,7 +1701,7 @@ static int convert_to_perf_probe_event(struct probe_trace_event *tev,
return -ENOMEM;
/* Convert trace_point to probe_point */
- ret = kprobe_convert_to_perf_probe(&tev->point, &pev->point);
+ ret = convert_to_perf_probe_point(&tev->point, &pev->point, is_kprobe);
if (ret < 0)
return ret;
@@ -1238,34 +1734,25 @@ void clear_perf_probe_event(struct perf_probe_event *pev)
struct perf_probe_arg_field *field, *next;
int i;
- if (pev->event)
- free(pev->event);
- if (pev->group)
- free(pev->group);
- if (pp->file)
- free(pp->file);
- if (pp->function)
- free(pp->function);
- if (pp->lazy_line)
- free(pp->lazy_line);
+ free(pev->event);
+ free(pev->group);
+ free(pp->file);
+ free(pp->function);
+ free(pp->lazy_line);
+
for (i = 0; i < pev->nargs; i++) {
- if (pev->args[i].name)
- free(pev->args[i].name);
- if (pev->args[i].var)
- free(pev->args[i].var);
- if (pev->args[i].type)
- free(pev->args[i].type);
+ free(pev->args[i].name);
+ free(pev->args[i].var);
+ free(pev->args[i].type);
field = pev->args[i].field;
while (field) {
next = field->next;
- if (field->name)
- free(field->name);
+ zfree(&field->name);
free(field);
field = next;
}
}
- if (pev->args)
- free(pev->args);
+ free(pev->args);
memset(pev, 0, sizeof(*pev));
}
@@ -1274,19 +1761,14 @@ static void clear_probe_trace_event(struct probe_trace_event *tev)
struct probe_trace_arg_ref *ref, *next;
int i;
- if (tev->event)
- free(tev->event);
- if (tev->group)
- free(tev->group);
- if (tev->point.symbol)
- free(tev->point.symbol);
+ free(tev->event);
+ free(tev->group);
+ free(tev->point.symbol);
+ free(tev->point.module);
for (i = 0; i < tev->nargs; i++) {
- if (tev->args[i].name)
- free(tev->args[i].name);
- if (tev->args[i].value)
- free(tev->args[i].value);
- if (tev->args[i].type)
- free(tev->args[i].type);
+ free(tev->args[i].name);
+ free(tev->args[i].value);
+ free(tev->args[i].type);
ref = tev->args[i].ref;
while (ref) {
next = ref->next;
@@ -1294,12 +1776,30 @@ static void clear_probe_trace_event(struct probe_trace_event *tev)
ref = next;
}
}
- if (tev->args)
- free(tev->args);
+ free(tev->args);
memset(tev, 0, sizeof(*tev));
}
-static int open_kprobe_events(bool readwrite)
+static void print_warn_msg(const char *file, bool is_kprobe)
+{
+
+ if (errno == ENOENT) {
+ const char *config;
+
+ if (!is_kprobe)
+ config = "CONFIG_UPROBE_EVENTS";
+ else
+ config = "CONFIG_KPROBE_EVENTS";
+
+ pr_warning("%s file does not exist - please rebuild kernel"
+ " with %s.\n", file, config);
+ } else
+ pr_warning("Failed to open %s file: %s\n", file,
+ strerror(errno));
+}
+
+static int open_probe_events(const char *trace_file, bool readwrite,
+ bool is_kprobe)
{
char buf[PATH_MAX];
const char *__debugfs;
@@ -1311,27 +1811,31 @@ static int open_kprobe_events(bool readwrite)
return -ENOENT;
}
- ret = e_snprintf(buf, PATH_MAX, "%stracing/kprobe_events", __debugfs);
+ ret = e_snprintf(buf, PATH_MAX, "%s/%s", __debugfs, trace_file);
if (ret >= 0) {
pr_debug("Opening %s write=%d\n", buf, readwrite);
if (readwrite && !probe_event_dry_run)
ret = open(buf, O_RDWR, O_APPEND);
else
ret = open(buf, O_RDONLY, 0);
- }
- if (ret < 0) {
- if (errno == ENOENT)
- pr_warning("kprobe_events file does not exist - please"
- " rebuild kernel with CONFIG_KPROBE_EVENT.\n");
- else
- pr_warning("Failed to open kprobe_events file: %s\n",
- strerror(errno));
+ if (ret < 0)
+ print_warn_msg(buf, is_kprobe);
}
return ret;
}
-/* Get raw string list of current kprobe_events */
+static int open_kprobe_events(bool readwrite)
+{
+ return open_probe_events("tracing/kprobe_events", readwrite, true);
+}
+
+static int open_uprobe_events(bool readwrite)
+{
+ return open_probe_events("tracing/uprobe_events", readwrite, false);
+}
+
+/* Get raw string list of current kprobe_events or uprobe_events */
static struct strlist *get_probe_trace_command_rawlist(int fd)
{
int ret, idx;
@@ -1364,7 +1868,8 @@ static struct strlist *get_probe_trace_command_rawlist(int fd)
}
/* Show an event */
-static int show_perf_probe_event(struct perf_probe_event *pev)
+static int show_perf_probe_event(struct perf_probe_event *pev,
+ const char *module)
{
int i, ret;
char buf[128];
@@ -1380,6 +1885,8 @@ static int show_perf_probe_event(struct perf_probe_event *pev)
return ret;
printf(" %-20s (on %s", buf, place);
+ if (module)
+ printf(" in %s", module);
if (pev->nargs > 0) {
printf(" with");
@@ -1396,38 +1903,29 @@ static int show_perf_probe_event(struct perf_probe_event *pev)
return ret;
}
-/* List up current perf-probe events */
-int show_perf_probe_events(void)
+static int __show_perf_probe_events(int fd, bool is_kprobe)
{
- int fd, ret;
+ int ret = 0;
struct probe_trace_event tev;
struct perf_probe_event pev;
struct strlist *rawlist;
struct str_node *ent;
- setup_pager();
- ret = init_vmlinux();
- if (ret < 0)
- return ret;
-
memset(&tev, 0, sizeof(tev));
memset(&pev, 0, sizeof(pev));
- fd = open_kprobe_events(false);
- if (fd < 0)
- return fd;
-
rawlist = get_probe_trace_command_rawlist(fd);
- close(fd);
if (!rawlist)
return -ENOENT;
strlist__for_each(ent, rawlist) {
ret = parse_probe_trace_command(ent->s, &tev);
if (ret >= 0) {
- ret = convert_to_perf_probe_event(&tev, &pev);
+ ret = convert_to_perf_probe_event(&tev, &pev,
+ is_kprobe);
if (ret >= 0)
- ret = show_perf_probe_event(&pev);
+ ret = show_perf_probe_event(&pev,
+ tev.point.module);
}
clear_perf_probe_event(&pev);
clear_probe_trace_event(&tev);
@@ -1439,6 +1937,34 @@ int show_perf_probe_events(void)
return ret;
}
+/* List up current perf-probe events */
+int show_perf_probe_events(void)
+{
+ int fd, ret;
+
+ setup_pager();
+ fd = open_kprobe_events(false);
+
+ if (fd < 0)
+ return fd;
+
+ ret = init_symbol_maps(false);
+ if (ret < 0)
+ return ret;
+
+ ret = __show_perf_probe_events(fd, true);
+ close(fd);
+
+ fd = open_uprobe_events(false);
+ if (fd >= 0) {
+ ret = __show_perf_probe_events(fd, false);
+ close(fd);
+ }
+
+ exit_symbol_maps();
+ return ret;
+}
+
/* Get current perf-probe event names */
static struct strlist *get_probe_trace_event_names(int fd, bool include_group)
{
@@ -1544,7 +2070,11 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
const char *event, *group;
struct strlist *namelist;
- fd = open_kprobe_events(true);
+ if (pev->uprobes)
+ fd = open_uprobe_events(true);
+ else
+ fd = open_kprobe_events(true);
+
if (fd < 0)
return fd;
/* Get current event names */
@@ -1555,7 +2085,7 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
}
ret = 0;
- printf("Add new event%s\n", (ntevs > 1) ? "s:" : ":");
+ printf("Added new event%s\n", (ntevs > 1) ? "s:" : ":");
for (i = 0; i < ntevs; i++) {
tev = &tevs[i];
if (pev->event)
@@ -1594,7 +2124,7 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
group = pev->group;
pev->event = tev->event;
pev->group = tev->group;
- show_perf_probe_event(pev);
+ show_perf_probe_event(pev, tev->point.module);
/* Trick here - restore current event/group */
pev->event = (char *)event;
pev->group = (char *)group;
@@ -1610,7 +2140,7 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
if (ret >= 0) {
/* Show how to use the event. */
- printf("\nYou can now use it on all perf tools, such as:\n\n");
+ printf("\nYou can now use it in all perf tools, such as:\n\n");
printf("\tperf record -e %s:%s -aR sleep 1\n\n", tev->group,
tev->event);
}
@@ -1620,78 +2150,175 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
return ret;
}
-static int convert_to_probe_trace_events(struct perf_probe_event *pev,
- struct probe_trace_event **tevs,
- int max_tevs, const char *module)
+static char *looking_function_name;
+static int num_matched_functions;
+
+static int probe_function_filter(struct map *map __maybe_unused,
+ struct symbol *sym)
+{
+ if ((sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) &&
+ strcmp(looking_function_name, sym->name) == 0) {
+ num_matched_functions++;
+ return 0;
+ }
+ return 1;
+}
+
+#define strdup_or_goto(str, label) \
+ ({ char *__p = strdup(str); if (!__p) goto label; __p; })
+
+/*
+ * Find probe function addresses from map.
+ * Return an error or the number of found probe_trace_event
+ */
+static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
+ struct probe_trace_event **tevs,
+ int max_tevs, const char *target)
{
+ struct map *map = NULL;
+ struct kmap *kmap = NULL;
+ struct ref_reloc_sym *reloc_sym = NULL;
struct symbol *sym;
- int ret = 0, i;
+ struct rb_node *nd;
struct probe_trace_event *tev;
+ struct perf_probe_point *pp = &pev->point;
+ struct probe_trace_point *tp;
+ int ret, i;
- /* Convert perf_probe_event with debuginfo */
- ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, module);
- if (ret != 0)
- return ret;
+ /* Init maps of given executable or kernel */
+ if (pev->uprobes)
+ map = dso__new_map(target);
+ else
+ map = kernel_get_module_map(target);
+ if (!map) {
+ ret = -EINVAL;
+ goto out;
+ }
- /* Allocate trace event buffer */
- tev = *tevs = zalloc(sizeof(struct probe_trace_event));
- if (tev == NULL)
- return -ENOMEM;
+ /*
+ * Load matched symbols: Since the different local symbols may have
+ * same name but different addresses, this lists all the symbols.
+ */
+ num_matched_functions = 0;
+ looking_function_name = pp->function;
+ ret = map__load(map, probe_function_filter);
+ if (ret || num_matched_functions == 0) {
+ pr_err("Failed to find symbol %s in %s\n", pp->function,
+ target ? : "kernel");
+ ret = -ENOENT;
+ goto out;
+ } else if (num_matched_functions > max_tevs) {
+ pr_err("Too many functions matched in %s\n",
+ target ? : "kernel");
+ ret = -E2BIG;
+ goto out;
+ }
+
+ if (!pev->uprobes) {
+ kmap = map__kmap(map);
+ reloc_sym = kmap->ref_reloc_sym;
+ if (!reloc_sym) {
+ pr_warning("Relocated base symbol is not found!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ }
- /* Copy parameters */
- tev->point.symbol = strdup(pev->point.function);
- if (tev->point.symbol == NULL) {
+ /* Setup result trace-probe-events */
+ *tevs = zalloc(sizeof(*tev) * num_matched_functions);
+ if (!*tevs) {
ret = -ENOMEM;
- goto error;
+ goto out;
}
- tev->point.offset = pev->point.offset;
- tev->point.retprobe = pev->point.retprobe;
- tev->nargs = pev->nargs;
- if (tev->nargs) {
- tev->args = zalloc(sizeof(struct probe_trace_arg)
- * tev->nargs);
- if (tev->args == NULL) {
- ret = -ENOMEM;
- goto error;
+
+ ret = 0;
+ map__for_each_symbol(map, sym, nd) {
+ tev = (*tevs) + ret;
+ tp = &tev->point;
+ if (ret == num_matched_functions) {
+ pr_warning("Too many symbols are listed. Skip it.\n");
+ break;
+ }
+ ret++;
+
+ if (pp->offset > sym->end - sym->start) {
+ pr_warning("Offset %ld is bigger than the size of %s\n",
+ pp->offset, sym->name);
+ ret = -ENOENT;
+ goto err_out;
+ }
+ /* Add one probe point */
+ tp->address = map->unmap_ip(map, sym->start) + pp->offset;
+ if (reloc_sym) {
+ tp->symbol = strdup_or_goto(reloc_sym->name, nomem_out);
+ tp->offset = tp->address - reloc_sym->addr;
+ } else {
+ tp->symbol = strdup_or_goto(sym->name, nomem_out);
+ tp->offset = pp->offset;
+ }
+ tp->retprobe = pp->retprobe;
+ if (target)
+ tev->point.module = strdup_or_goto(target, nomem_out);
+ tev->uprobes = pev->uprobes;
+ tev->nargs = pev->nargs;
+ if (tev->nargs) {
+ tev->args = zalloc(sizeof(struct probe_trace_arg) *
+ tev->nargs);
+ if (tev->args == NULL)
+ goto nomem_out;
}
for (i = 0; i < tev->nargs; i++) {
- if (pev->args[i].name) {
- tev->args[i].name = strdup(pev->args[i].name);
- if (tev->args[i].name == NULL) {
- ret = -ENOMEM;
- goto error;
- }
- }
- tev->args[i].value = strdup(pev->args[i].var);
- if (tev->args[i].value == NULL) {
- ret = -ENOMEM;
- goto error;
- }
- if (pev->args[i].type) {
- tev->args[i].type = strdup(pev->args[i].type);
- if (tev->args[i].type == NULL) {
- ret = -ENOMEM;
- goto error;
- }
- }
+ if (pev->args[i].name)
+ tev->args[i].name =
+ strdup_or_goto(pev->args[i].name,
+ nomem_out);
+
+ tev->args[i].value = strdup_or_goto(pev->args[i].var,
+ nomem_out);
+ if (pev->args[i].type)
+ tev->args[i].type =
+ strdup_or_goto(pev->args[i].type,
+ nomem_out);
}
}
- /* Currently just checking function name from symbol map */
- sym = __find_kernel_function_by_name(tev->point.symbol, NULL);
- if (!sym) {
- pr_warning("Kernel symbol \'%s\' not found.\n",
- tev->point.symbol);
- ret = -ENOENT;
- goto error;
+out:
+ if (map && pev->uprobes) {
+ /* Only when using uprobe(exec) map needs to be released */
+ dso__delete(map->dso);
+ map__delete(map);
}
-
- return 1;
-error:
- clear_probe_trace_event(tev);
- free(tev);
- *tevs = NULL;
return ret;
+
+nomem_out:
+ ret = -ENOMEM;
+err_out:
+ clear_probe_trace_events(*tevs, num_matched_functions);
+ zfree(tevs);
+ goto out;
+}
+
+static int convert_to_probe_trace_events(struct perf_probe_event *pev,
+ struct probe_trace_event **tevs,
+ int max_tevs, const char *target)
+{
+ int ret;
+
+ if (pev->uprobes && !pev->group) {
+ /* Replace group name if not given */
+ ret = convert_exec_to_group(target, &pev->group);
+ if (ret != 0) {
+ pr_warning("Failed to make a group name.\n");
+ return ret;
+ }
+ }
+
+ /* Convert perf_probe_event with debuginfo */
+ ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, target);
+ if (ret != 0)
+ return ret; /* Found in debuginfo or got an error */
+
+ return find_probe_trace_events_from_map(pev, tevs, max_tevs, target);
}
struct __event_package {
@@ -1701,17 +2328,18 @@ struct __event_package {
};
int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
- int max_tevs, const char *module, bool force_add)
+ int max_tevs, const char *target, bool force_add)
{
int i, j, ret;
struct __event_package *pkgs;
+ ret = 0;
pkgs = zalloc(sizeof(struct __event_package) * npevs);
+
if (pkgs == NULL)
return -ENOMEM;
- /* Init vmlinux path */
- ret = init_vmlinux();
+ ret = init_symbol_maps(pevs->uprobes);
if (ret < 0) {
free(pkgs);
return ret;
@@ -1724,24 +2352,28 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
ret = convert_to_probe_trace_events(pkgs[i].pev,
&pkgs[i].tevs,
max_tevs,
- module);
+ target);
if (ret < 0)
goto end;
pkgs[i].ntevs = ret;
}
/* Loop 2: add all events */
- for (i = 0; i < npevs && ret >= 0; i++)
+ for (i = 0; i < npevs; i++) {
ret = __add_probe_trace_events(pkgs[i].pev, pkgs[i].tevs,
pkgs[i].ntevs, force_add);
+ if (ret < 0)
+ break;
+ }
end:
/* Loop 3: cleanup and free trace events */
for (i = 0; i < npevs; i++) {
for (j = 0; j < pkgs[i].ntevs; j++)
clear_probe_trace_event(&pkgs[i].tevs[j]);
- free(pkgs[i].tevs);
+ zfree(&pkgs[i].tevs);
}
free(pkgs);
+ exit_symbol_maps();
return ret;
}
@@ -1768,33 +2400,27 @@ static int __del_trace_probe_event(int fd, struct str_node *ent)
pr_debug("Writing event: %s\n", buf);
ret = write(fd, buf, strlen(buf));
- if (ret < 0)
+ if (ret < 0) {
+ ret = -errno;
goto error;
+ }
- printf("Remove event: %s\n", ent->s);
+ printf("Removed event: %s\n", ent->s);
return 0;
error:
pr_warning("Failed to delete event: %s\n", strerror(-ret));
return ret;
}
-static int del_trace_probe_event(int fd, const char *group,
- const char *event, struct strlist *namelist)
+static int del_trace_probe_event(int fd, const char *buf,
+ struct strlist *namelist)
{
- char buf[128];
struct str_node *ent, *n;
- int found = 0, ret = 0;
-
- ret = e_snprintf(buf, 128, "%s:%s", group, event);
- if (ret < 0) {
- pr_err("Failed to copy event.");
- return ret;
- }
+ int ret = -1;
if (strpbrk(buf, "*?")) { /* Glob-exp */
strlist__for_each_safe(ent, n, namelist)
if (strglobmatch(ent->s, buf)) {
- found++;
ret = __del_trace_probe_event(fd, ent);
if (ret < 0)
break;
@@ -1803,40 +2429,43 @@ static int del_trace_probe_event(int fd, const char *group,
} else {
ent = strlist__find(namelist, buf);
if (ent) {
- found++;
ret = __del_trace_probe_event(fd, ent);
if (ret >= 0)
strlist__remove(namelist, ent);
}
}
- if (found == 0 && ret >= 0)
- pr_info("Info: Event \"%s\" does not exist.\n", buf);
return ret;
}
int del_perf_probe_events(struct strlist *dellist)
{
- int fd, ret = 0;
+ int ret = -1, ufd = -1, kfd = -1;
+ char buf[128];
const char *group, *event;
char *p, *str;
struct str_node *ent;
- struct strlist *namelist;
-
- fd = open_kprobe_events(true);
- if (fd < 0)
- return fd;
+ struct strlist *namelist = NULL, *unamelist = NULL;
/* Get current event names */
- namelist = get_probe_trace_event_names(fd, true);
- if (namelist == NULL)
- return -EINVAL;
+ kfd = open_kprobe_events(true);
+ if (kfd < 0)
+ return kfd;
+
+ namelist = get_probe_trace_event_names(kfd, true);
+ ufd = open_uprobe_events(true);
+
+ if (ufd >= 0)
+ unamelist = get_probe_trace_event_names(ufd, true);
+
+ if (namelist == NULL && unamelist == NULL)
+ goto error;
strlist__for_each(ent, dellist) {
str = strdup(ent->s);
if (str == NULL) {
ret = -ENOMEM;
- break;
+ goto error;
}
pr_debug("Parsing: %s\n", str);
p = strchr(str, ':');
@@ -1848,14 +2477,96 @@ int del_perf_probe_events(struct strlist *dellist)
group = "*";
event = str;
}
+
+ ret = e_snprintf(buf, 128, "%s:%s", group, event);
+ if (ret < 0) {
+ pr_err("Failed to copy event.");
+ free(str);
+ goto error;
+ }
+
pr_debug("Group: %s, Event: %s\n", group, event);
- ret = del_trace_probe_event(fd, group, event, namelist);
+
+ if (namelist)
+ ret = del_trace_probe_event(kfd, buf, namelist);
+
+ if (unamelist && ret != 0)
+ ret = del_trace_probe_event(ufd, buf, unamelist);
+
+ if (ret != 0)
+ pr_info("Info: Event \"%s\" does not exist.\n", buf);
+
free(str);
- if (ret < 0)
- break;
}
- strlist__delete(namelist);
- close(fd);
+
+error:
+ if (kfd >= 0) {
+ strlist__delete(namelist);
+ close(kfd);
+ }
+
+ if (ufd >= 0) {
+ strlist__delete(unamelist);
+ close(ufd);
+ }
+
+ return ret;
+}
+
+/* TODO: don't use a global variable for filter ... */
+static struct strfilter *available_func_filter;
+
+/*
+ * If a symbol corresponds to a function with global binding and
+ * matches filter return 0. For all others return 1.
+ */
+static int filter_available_functions(struct map *map __maybe_unused,
+ struct symbol *sym)
+{
+ if ((sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) &&
+ strfilter__compare(available_func_filter, sym->name))
+ return 0;
+ return 1;
+}
+
+int show_available_funcs(const char *target, struct strfilter *_filter,
+ bool user)
+{
+ struct map *map;
+ int ret;
+
+ ret = init_symbol_maps(user);
+ if (ret < 0)
+ return ret;
+
+ /* Get a symbol map */
+ if (user)
+ map = dso__new_map(target);
+ else
+ map = kernel_get_module_map(target);
+ if (!map) {
+ pr_err("Failed to get a map for %s\n", (target) ? : "kernel");
+ return -EINVAL;
+ }
+
+ /* Load symbols with given filter */
+ available_func_filter = _filter;
+ if (map__load(map, filter_available_functions)) {
+ pr_err("Failed to load symbols in %s\n", (target) ? : "kernel");
+ goto end;
+ }
+ if (!dso__sorted_by_name(map->dso, map->type))
+ dso__sort_by_name(map->dso, map->type);
+
+ /* Show all (filtered) symbols */
+ setup_pager();
+ dso__fprintf_symbols_by_name(map->dso, map->type, stdout);
+end:
+ if (user) {
+ dso__delete(map->dso);
+ map__delete(map);
+ }
+ exit_symbol_maps();
return ret;
}
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 5accbedfea3..776c9347a3b 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -2,14 +2,18 @@
#define _PROBE_EVENT_H
#include <stdbool.h>
+#include "intlist.h"
#include "strlist.h"
+#include "strfilter.h"
extern bool probe_event_dry_run;
-/* kprobe-tracer tracing point */
+/* kprobe-tracer and uprobe-tracer tracing point */
struct probe_trace_point {
char *symbol; /* Base symbol */
+ char *module; /* Module name */
unsigned long offset; /* Offset from symbol */
+ unsigned long address; /* Actual address of the trace point */
bool retprobe; /* Return probe flag */
};
@@ -19,7 +23,7 @@ struct probe_trace_arg_ref {
long offset; /* Offset value */
};
-/* kprobe-tracer tracing argument */
+/* kprobe-tracer and uprobe-tracer tracing argument */
struct probe_trace_arg {
char *name; /* Argument name */
char *value; /* Base value */
@@ -27,12 +31,13 @@ struct probe_trace_arg {
struct probe_trace_arg_ref *ref; /* Referencing offset */
};
-/* kprobe-tracer tracing event (point + arg) */
+/* kprobe-tracer and uprobe-tracer tracing event (point + arg) */
struct probe_trace_event {
char *event; /* Event name */
char *group; /* Group name */
struct probe_trace_point point; /* Trace point */
int nargs; /* Number of args */
+ bool uprobes; /* uprobes only */
struct probe_trace_arg *args; /* Arguments */
};
@@ -68,16 +73,10 @@ struct perf_probe_event {
char *group; /* Group name */
struct perf_probe_point point; /* Probe point */
int nargs; /* Number of arguments */
+ bool uprobes;
struct perf_probe_arg *args; /* Arguments */
};
-
-/* Line number container */
-struct line_node {
- struct list_head list;
- int line;
-};
-
/* Line range */
struct line_range {
char *file; /* File name */
@@ -87,7 +86,7 @@ struct line_range {
int offset; /* Start line offset */
char *path; /* Real path name */
char *comp_dir; /* Compile directory */
- struct list_head line_list; /* Visible lines */
+ struct intlist *line_list; /* Visible lines */
};
/* List of variables */
@@ -115,6 +114,12 @@ extern void clear_perf_probe_event(struct perf_probe_event *pev);
/* Command string to line-range */
extern int parse_line_range_desc(const char *cmd, struct line_range *lr);
+/* Release line range members */
+extern void line_range__clear(struct line_range *lr);
+
+/* Initialize line range */
+extern int line_range__init(struct line_range *lr);
+
/* Internal use: Return kernel/module path */
extern const char *kernel_get_module_path(const char *module);
@@ -126,8 +131,9 @@ extern int show_perf_probe_events(void);
extern int show_line_range(struct line_range *lr, const char *module);
extern int show_available_vars(struct perf_probe_event *pevs, int npevs,
int max_probe_points, const char *module,
- bool externs);
-
+ struct strfilter *filter, bool externs);
+extern int show_available_funcs(const char *module, struct strfilter *filter,
+ bool user);
/* Maximum index number of event-name postfix */
#define MAX_EVENT_INDEX 1024
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 3991d73d1cf..98e30476641 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -30,11 +30,13 @@
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
-#include <ctype.h>
#include <dwarf-regs.h>
+#include <linux/bitops.h>
#include "event.h"
+#include "dso.h"
#include "debug.h"
+#include "intlist.h"
#include "util.h"
#include "symbol.h"
#include "probe-finder.h"
@@ -42,103 +44,7 @@
/* Kprobe tracer basic type is up to u64 */
#define MAX_BASIC_TYPE_BITS 64
-/*
- * Compare the tail of two strings.
- * Return 0 if whole of either string is same as another's tail part.
- */
-static int strtailcmp(const char *s1, const char *s2)
-{
- int i1 = strlen(s1);
- int i2 = strlen(s2);
- while (--i1 >= 0 && --i2 >= 0) {
- if (s1[i1] != s2[i2])
- return s1[i1] - s2[i2];
- }
- return 0;
-}
-
-/* Line number list operations */
-
-/* Add a line to line number list */
-static int line_list__add_line(struct list_head *head, int line)
-{
- struct line_node *ln;
- struct list_head *p;
-
- /* Reverse search, because new line will be the last one */
- list_for_each_entry_reverse(ln, head, list) {
- if (ln->line < line) {
- p = &ln->list;
- goto found;
- } else if (ln->line == line) /* Already exist */
- return 1;
- }
- /* List is empty, or the smallest entry */
- p = head;
-found:
- pr_debug("line list: add a line %u\n", line);
- ln = zalloc(sizeof(struct line_node));
- if (ln == NULL)
- return -ENOMEM;
- ln->line = line;
- INIT_LIST_HEAD(&ln->list);
- list_add(&ln->list, p);
- return 0;
-}
-
-/* Check if the line in line number list */
-static int line_list__has_line(struct list_head *head, int line)
-{
- struct line_node *ln;
-
- /* Reverse search, because new line will be the last one */
- list_for_each_entry(ln, head, list)
- if (ln->line == line)
- return 1;
-
- return 0;
-}
-
-/* Init line number list */
-static void line_list__init(struct list_head *head)
-{
- INIT_LIST_HEAD(head);
-}
-
-/* Free line number list */
-static void line_list__free(struct list_head *head)
-{
- struct line_node *ln;
- while (!list_empty(head)) {
- ln = list_first_entry(head, struct line_node, list);
- list_del(&ln->list);
- free(ln);
- }
-}
-
/* Dwarf FL wrappers */
-
-static int __linux_kernel_find_elf(Dwfl_Module *mod,
- void **userdata,
- const char *module_name,
- Dwarf_Addr base,
- char **file_name, Elf **elfp)
-{
- int fd;
- const char *path = kernel_get_module_path(module_name);
-
- if (path) {
- fd = open(path, O_RDONLY);
- if (fd >= 0) {
- *file_name = strdup(path);
- return fd;
- }
- }
- /* If failed, try to call standard method */
- return dwfl_linux_kernel_find_elf(mod, userdata, module_name, base,
- file_name, elfp);
-}
-
static char *debuginfo_path; /* Currently dummy */
static const Dwfl_Callbacks offline_callbacks = {
@@ -151,391 +57,94 @@ static const Dwfl_Callbacks offline_callbacks = {
.find_elf = dwfl_build_id_find_elf,
};
-static const Dwfl_Callbacks kernel_callbacks = {
- .find_debuginfo = dwfl_standard_find_debuginfo,
- .debuginfo_path = &debuginfo_path,
-
- .find_elf = __linux_kernel_find_elf,
- .section_address = dwfl_linux_kernel_module_section_address,
-};
-
/* Get a Dwarf from offline image */
-static Dwarf *dwfl_init_offline_dwarf(int fd, Dwfl **dwflp, Dwarf_Addr *bias)
+static int debuginfo__init_offline_dwarf(struct debuginfo *dbg,
+ const char *path)
{
- Dwfl_Module *mod;
- Dwarf *dbg = NULL;
-
- if (!dwflp)
- return NULL;
+ int fd;
- *dwflp = dwfl_begin(&offline_callbacks);
- if (!*dwflp)
- return NULL;
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return fd;
- mod = dwfl_report_offline(*dwflp, "", "", fd);
- if (!mod)
+ dbg->dwfl = dwfl_begin(&offline_callbacks);
+ if (!dbg->dwfl)
goto error;
- dbg = dwfl_module_getdwarf(mod, bias);
- if (!dbg) {
-error:
- dwfl_end(*dwflp);
- *dwflp = NULL;
- }
- return dbg;
-}
-
-/* Get a Dwarf from live kernel image */
-static Dwarf *dwfl_init_live_kernel_dwarf(Dwarf_Addr addr, Dwfl **dwflp,
- Dwarf_Addr *bias)
-{
- Dwarf *dbg;
-
- if (!dwflp)
- return NULL;
-
- *dwflp = dwfl_begin(&kernel_callbacks);
- if (!*dwflp)
- return NULL;
-
- /* Load the kernel dwarves: Don't care the result here */
- dwfl_linux_kernel_report_kernel(*dwflp);
- dwfl_linux_kernel_report_modules(*dwflp);
-
- dbg = dwfl_addrdwarf(*dwflp, addr, bias);
- /* Here, check whether we could get a real dwarf */
- if (!dbg) {
- dwfl_end(*dwflp);
- *dwflp = NULL;
- }
- return dbg;
-}
-
-/* Dwarf wrappers */
-
-/* Find the realpath of the target file. */
-static const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname)
-{
- Dwarf_Files *files;
- size_t nfiles, i;
- const char *src = NULL;
- int ret;
-
- if (!fname)
- return NULL;
-
- ret = dwarf_getsrcfiles(cu_die, &files, &nfiles);
- if (ret != 0)
- return NULL;
-
- for (i = 0; i < nfiles; i++) {
- src = dwarf_filesrc(files, i, NULL, NULL);
- if (strtailcmp(src, fname) == 0)
- break;
- }
- if (i == nfiles)
- return NULL;
- return src;
-}
-
-/* Get DW_AT_comp_dir (should be NULL with older gcc) */
-static const char *cu_get_comp_dir(Dwarf_Die *cu_die)
-{
- Dwarf_Attribute attr;
- if (dwarf_attr(cu_die, DW_AT_comp_dir, &attr) == NULL)
- return NULL;
- return dwarf_formstring(&attr);
-}
-
-/* Compare diename and tname */
-static bool die_compare_name(Dwarf_Die *dw_die, const char *tname)
-{
- const char *name;
- name = dwarf_diename(dw_die);
- return name ? (strcmp(tname, name) == 0) : false;
-}
+ dbg->mod = dwfl_report_offline(dbg->dwfl, "", "", fd);
+ if (!dbg->mod)
+ goto error;
-/* Get type die */
-static Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
-{
- Dwarf_Attribute attr;
+ dbg->dbg = dwfl_module_getdwarf(dbg->mod, &dbg->bias);
+ if (!dbg->dbg)
+ goto error;
- if (dwarf_attr_integrate(vr_die, DW_AT_type, &attr) &&
- dwarf_formref_die(&attr, die_mem))
- return die_mem;
+ return 0;
+error:
+ if (dbg->dwfl)
+ dwfl_end(dbg->dwfl);
else
- return NULL;
-}
+ close(fd);
+ memset(dbg, 0, sizeof(*dbg));
-/* Get a type die, but skip qualifiers */
-static Dwarf_Die *__die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
-{
- int tag;
-
- do {
- vr_die = die_get_type(vr_die, die_mem);
- if (!vr_die)
- break;
- tag = dwarf_tag(vr_die);
- } while (tag == DW_TAG_const_type ||
- tag == DW_TAG_restrict_type ||
- tag == DW_TAG_volatile_type ||
- tag == DW_TAG_shared_type);
-
- return vr_die;
-}
-
-/* Get a type die, but skip qualifiers and typedef */
-static Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
-{
- do {
- vr_die = __die_get_real_type(vr_die, die_mem);
- } while (vr_die && dwarf_tag(vr_die) == DW_TAG_typedef);
-
- return vr_die;
-}
-
-static bool die_is_signed_type(Dwarf_Die *tp_die)
-{
- Dwarf_Attribute attr;
- Dwarf_Word ret;
-
- if (dwarf_attr(tp_die, DW_AT_encoding, &attr) == NULL ||
- dwarf_formudata(&attr, &ret) != 0)
- return false;
-
- return (ret == DW_ATE_signed_char || ret == DW_ATE_signed ||
- ret == DW_ATE_signed_fixed);
-}
-
-static int die_get_byte_size(Dwarf_Die *tp_die)
-{
- Dwarf_Attribute attr;
- Dwarf_Word ret;
-
- if (dwarf_attr(tp_die, DW_AT_byte_size, &attr) == NULL ||
- dwarf_formudata(&attr, &ret) != 0)
- return 0;
-
- return (int)ret;
+ return -ENOENT;
}
-/* Get data_member_location offset */
-static int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs)
+static struct debuginfo *__debuginfo__new(const char *path)
{
- Dwarf_Attribute attr;
- Dwarf_Op *expr;
- size_t nexpr;
- int ret;
-
- if (dwarf_attr(mb_die, DW_AT_data_member_location, &attr) == NULL)
- return -ENOENT;
-
- if (dwarf_formudata(&attr, offs) != 0) {
- /* DW_AT_data_member_location should be DW_OP_plus_uconst */
- ret = dwarf_getlocation(&attr, &expr, &nexpr);
- if (ret < 0 || nexpr == 0)
- return -ENOENT;
-
- if (expr[0].atom != DW_OP_plus_uconst || nexpr != 1) {
- pr_debug("Unable to get offset:Unexpected OP %x (%zd)\n",
- expr[0].atom, nexpr);
- return -ENOTSUP;
- }
- *offs = (Dwarf_Word)expr[0].number;
- }
- return 0;
-}
-
-/* Return values for die_find callbacks */
-enum {
- DIE_FIND_CB_FOUND = 0, /* End of Search */
- DIE_FIND_CB_CHILD = 1, /* Search only children */
- DIE_FIND_CB_SIBLING = 2, /* Search only siblings */
- DIE_FIND_CB_CONTINUE = 3, /* Search children and siblings */
-};
-
-/* Search a child die */
-static Dwarf_Die *die_find_child(Dwarf_Die *rt_die,
- int (*callback)(Dwarf_Die *, void *),
- void *data, Dwarf_Die *die_mem)
-{
- Dwarf_Die child_die;
- int ret;
-
- ret = dwarf_child(rt_die, die_mem);
- if (ret != 0)
+ struct debuginfo *dbg = zalloc(sizeof(*dbg));
+ if (!dbg)
return NULL;
- do {
- ret = callback(die_mem, data);
- if (ret == DIE_FIND_CB_FOUND)
- return die_mem;
-
- if ((ret & DIE_FIND_CB_CHILD) &&
- die_find_child(die_mem, callback, data, &child_die)) {
- memcpy(die_mem, &child_die, sizeof(Dwarf_Die));
- return die_mem;
- }
- } while ((ret & DIE_FIND_CB_SIBLING) &&
- dwarf_siblingof(die_mem, die_mem) == 0);
-
- return NULL;
+ if (debuginfo__init_offline_dwarf(dbg, path) < 0)
+ zfree(&dbg);
+ if (dbg)
+ pr_debug("Open Debuginfo file: %s\n", path);
+ return dbg;
}
-struct __addr_die_search_param {
- Dwarf_Addr addr;
- Dwarf_Die *die_mem;
+enum dso_binary_type distro_dwarf_types[] = {
+ DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
+ DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
+ DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
+ DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
+ DSO_BINARY_TYPE__NOT_FOUND,
};
-static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
+struct debuginfo *debuginfo__new(const char *path)
{
- struct __addr_die_search_param *ad = data;
+ enum dso_binary_type *type;
+ char buf[PATH_MAX], nil = '\0';
+ struct dso *dso;
+ struct debuginfo *dinfo = NULL;
+
+ /* Try to open distro debuginfo files */
+ dso = dso__new(path);
+ if (!dso)
+ goto out;
- if (dwarf_tag(fn_die) == DW_TAG_subprogram &&
- dwarf_haspc(fn_die, ad->addr)) {
- memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die));
- return DWARF_CB_ABORT;
+ for (type = distro_dwarf_types;
+ !dinfo && *type != DSO_BINARY_TYPE__NOT_FOUND;
+ type++) {
+ if (dso__read_binary_type_filename(dso, *type, &nil,
+ buf, PATH_MAX) < 0)
+ continue;
+ dinfo = __debuginfo__new(buf);
}
- return DWARF_CB_OK;
-}
-
-/* Search a real subprogram including this line, */
-static Dwarf_Die *die_find_real_subprogram(Dwarf_Die *cu_die, Dwarf_Addr addr,
- Dwarf_Die *die_mem)
-{
- struct __addr_die_search_param ad;
- ad.addr = addr;
- ad.die_mem = die_mem;
- /* dwarf_getscopes can't find subprogram. */
- if (!dwarf_getfuncs(cu_die, __die_search_func_cb, &ad, 0))
- return NULL;
- else
- return die_mem;
-}
-
-/* die_find callback for inline function search */
-static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data)
-{
- Dwarf_Addr *addr = data;
+ dso__delete(dso);
- if (dwarf_tag(die_mem) == DW_TAG_inlined_subroutine &&
- dwarf_haspc(die_mem, *addr))
- return DIE_FIND_CB_FOUND;
-
- return DIE_FIND_CB_CONTINUE;
-}
-
-/* Similar to dwarf_getfuncs, but returns inlined_subroutine if exists. */
-static Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
- Dwarf_Die *die_mem)
-{
- return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem);
-}
-
-struct __find_variable_param {
- const char *name;
- Dwarf_Addr addr;
-};
-
-static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data)
-{
- struct __find_variable_param *fvp = data;
- int tag;
-
- tag = dwarf_tag(die_mem);
- if ((tag == DW_TAG_formal_parameter ||
- tag == DW_TAG_variable) &&
- die_compare_name(die_mem, fvp->name))
- return DIE_FIND_CB_FOUND;
-
- if (dwarf_haspc(die_mem, fvp->addr))
- return DIE_FIND_CB_CONTINUE;
- else
- return DIE_FIND_CB_SIBLING;
-}
-
-/* Find a variable called 'name' at given address */
-static Dwarf_Die *die_find_variable_at(Dwarf_Die *sp_die, const char *name,
- Dwarf_Addr addr, Dwarf_Die *die_mem)
-{
- struct __find_variable_param fvp = { .name = name, .addr = addr};
-
- return die_find_child(sp_die, __die_find_variable_cb, (void *)&fvp,
- die_mem);
-}
-
-static int __die_find_member_cb(Dwarf_Die *die_mem, void *data)
-{
- const char *name = data;
-
- if ((dwarf_tag(die_mem) == DW_TAG_member) &&
- die_compare_name(die_mem, name))
- return DIE_FIND_CB_FOUND;
-
- return DIE_FIND_CB_SIBLING;
-}
-
-/* Find a member called 'name' */
-static Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name,
- Dwarf_Die *die_mem)
-{
- return die_find_child(st_die, __die_find_member_cb, (void *)name,
- die_mem);
-}
-
-/* Get the name of given variable DIE */
-static int die_get_typename(Dwarf_Die *vr_die, char *buf, int len)
-{
- Dwarf_Die type;
- int tag, ret, ret2;
- const char *tmp = "";
-
- if (__die_get_real_type(vr_die, &type) == NULL)
- return -ENOENT;
-
- tag = dwarf_tag(&type);
- if (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type)
- tmp = "*";
- else if (tag == DW_TAG_subroutine_type) {
- /* Function pointer */
- ret = snprintf(buf, len, "(function_type)");
- return (ret >= len) ? -E2BIG : ret;
- } else {
- if (!dwarf_diename(&type))
- return -ENOENT;
- if (tag == DW_TAG_union_type)
- tmp = "union ";
- else if (tag == DW_TAG_structure_type)
- tmp = "struct ";
- /* Write a base name */
- ret = snprintf(buf, len, "%s%s", tmp, dwarf_diename(&type));
- return (ret >= len) ? -E2BIG : ret;
- }
- ret = die_get_typename(&type, buf, len);
- if (ret > 0) {
- ret2 = snprintf(buf + ret, len - ret, "%s", tmp);
- ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret;
- }
- return ret;
+out:
+ /* if failed to open all distro debuginfo, open given binary */
+ return dinfo ? : __debuginfo__new(path);
}
-/* Get the name and type of given variable DIE, stored as "type\tname" */
-static int die_get_varname(Dwarf_Die *vr_die, char *buf, int len)
+void debuginfo__delete(struct debuginfo *dbg)
{
- int ret, ret2;
-
- ret = die_get_typename(vr_die, buf, len);
- if (ret < 0) {
- pr_debug("Failed to get type, make it unknown.\n");
- ret = snprintf(buf, len, "(unknown_type)");
+ if (dbg) {
+ if (dbg->dwfl)
+ dwfl_end(dbg->dwfl);
+ free(dbg);
}
- if (ret > 0) {
- ret2 = snprintf(buf + ret, len - ret, "\t%s",
- dwarf_diename(vr_die));
- ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret;
- }
- return ret;
}
/*
@@ -554,12 +163,15 @@ static struct probe_trace_arg_ref *alloc_trace_arg_ref(long offs)
/*
* Convert a location into trace_arg.
* If tvar == NULL, this just checks variable can be converted.
+ * If fentry == true and vr_die is a parameter, do huristic search
+ * for the location fuzzed by function entry mcount.
*/
static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
- Dwarf_Op *fb_ops,
+ Dwarf_Op *fb_ops, Dwarf_Die *sp_die,
struct probe_trace_arg *tvar)
{
Dwarf_Attribute attr;
+ Dwarf_Addr tmp = 0;
Dwarf_Op *op;
size_t nops;
unsigned int regn;
@@ -572,12 +184,29 @@ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
goto static_var;
/* TODO: handle more than 1 exprs */
- if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL ||
- dwarf_getlocation_addr(&attr, addr, &op, &nops, 1) <= 0 ||
- nops == 0) {
- /* TODO: Support const_value */
+ if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL)
+ return -EINVAL; /* Broken DIE ? */
+ if (dwarf_getlocation_addr(&attr, addr, &op, &nops, 1) <= 0) {
+ ret = dwarf_entrypc(sp_die, &tmp);
+ if (ret || addr != tmp ||
+ dwarf_tag(vr_die) != DW_TAG_formal_parameter ||
+ dwarf_highpc(sp_die, &tmp))
+ return -ENOENT;
+ /*
+ * This is fuzzed by fentry mcount. We try to find the
+ * parameter location at the earliest address.
+ */
+ for (addr += 1; addr <= tmp; addr++) {
+ if (dwarf_getlocation_addr(&attr, addr, &op,
+ &nops, 1) > 0)
+ goto found;
+ }
return -ENOENT;
}
+found:
+ if (nops == 0)
+ /* TODO: Support const_value */
+ return -ENOENT;
if (op->atom == DW_OP_addr) {
static_var:
@@ -627,8 +256,8 @@ static_var:
regs = get_arch_regstr(regn);
if (!regs) {
/* This should be a bug in DWARF or this tool */
- pr_warning("Mapping for DWARF register number %u "
- "missing on this architecture.", regn);
+ pr_warning("Mapping for the register number %u "
+ "missing on this architecture.\n", regn);
return -ERANGE;
}
@@ -644,6 +273,8 @@ static_var:
return 0;
}
+#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_LONG / sizeof(long))
+
static int convert_variable_type(Dwarf_Die *vr_die,
struct probe_trace_arg *tvar,
const char *cast)
@@ -651,6 +282,7 @@ static int convert_variable_type(Dwarf_Die *vr_die,
struct probe_trace_arg_ref **ref_ptr = &tvar->ref;
Dwarf_Die type;
char buf[16];
+ int bsize, boffs, total;
int ret;
/* TODO: check all types */
@@ -660,6 +292,18 @@ static int convert_variable_type(Dwarf_Die *vr_die,
return (tvar->type == NULL) ? -ENOMEM : 0;
}
+ bsize = dwarf_bitsize(vr_die);
+ if (bsize > 0) {
+ /* This is a bitfield */
+ boffs = dwarf_bitoffset(vr_die);
+ total = dwarf_bytesize(vr_die);
+ if (boffs < 0 || total < 0)
+ return -ENOENT;
+ ret = snprintf(buf, 16, "b%d@%d/%zd", bsize, boffs,
+ BYTES_TO_BITS(total));
+ goto formatted;
+ }
+
if (die_get_real_type(vr_die, &type) == NULL) {
pr_warning("Failed to get a type information of %s.\n",
dwarf_diename(vr_die));
@@ -674,15 +318,16 @@ static int convert_variable_type(Dwarf_Die *vr_die,
if (ret != DW_TAG_pointer_type &&
ret != DW_TAG_array_type) {
pr_warning("Failed to cast into string: "
- "%s(%s) is not a pointer nor array.",
+ "%s(%s) is not a pointer nor array.\n",
dwarf_diename(vr_die), dwarf_diename(&type));
return -EINVAL;
}
+ if (die_get_real_type(&type, &type) == NULL) {
+ pr_warning("Failed to get a type"
+ " information.\n");
+ return -ENOENT;
+ }
if (ret == DW_TAG_pointer_type) {
- if (die_get_real_type(&type, &type) == NULL) {
- pr_warning("Failed to get a type information.");
- return -ENOENT;
- }
while (*ref_ptr)
ref_ptr = &(*ref_ptr)->next;
/* Add new reference with offset +0 */
@@ -695,7 +340,7 @@ static int convert_variable_type(Dwarf_Die *vr_die,
if (!die_compare_name(&type, "char") &&
!die_compare_name(&type, "unsigned char")) {
pr_warning("Failed to cast into string: "
- "%s is not (unsigned) char *.",
+ "%s is not (unsigned) char *.\n",
dwarf_diename(vr_die));
return -EINVAL;
}
@@ -703,29 +348,32 @@ static int convert_variable_type(Dwarf_Die *vr_die,
return (tvar->type == NULL) ? -ENOMEM : 0;
}
- ret = die_get_byte_size(&type) * 8;
- if (ret) {
- /* Check the bitwidth */
- if (ret > MAX_BASIC_TYPE_BITS) {
- pr_info("%s exceeds max-bitwidth."
- " Cut down to %d bits.\n",
- dwarf_diename(&type), MAX_BASIC_TYPE_BITS);
- ret = MAX_BASIC_TYPE_BITS;
- }
-
- ret = snprintf(buf, 16, "%c%d",
- die_is_signed_type(&type) ? 's' : 'u', ret);
- if (ret < 0 || ret >= 16) {
- if (ret >= 16)
- ret = -E2BIG;
- pr_warning("Failed to convert variable type: %s\n",
- strerror(-ret));
- return ret;
- }
- tvar->type = strdup(buf);
- if (tvar->type == NULL)
- return -ENOMEM;
+ ret = dwarf_bytesize(&type);
+ if (ret <= 0)
+ /* No size ... try to use default type */
+ return 0;
+ ret = BYTES_TO_BITS(ret);
+
+ /* Check the bitwidth */
+ if (ret > MAX_BASIC_TYPE_BITS) {
+ pr_info("%s exceeds max-bitwidth. Cut down to %d bits.\n",
+ dwarf_diename(&type), MAX_BASIC_TYPE_BITS);
+ ret = MAX_BASIC_TYPE_BITS;
+ }
+ ret = snprintf(buf, 16, "%c%d",
+ die_is_signed_type(&type) ? 's' : 'u', ret);
+
+formatted:
+ if (ret < 0 || ret >= 16) {
+ if (ret >= 16)
+ ret = -E2BIG;
+ pr_warning("Failed to convert variable type: %s\n",
+ strerror(-ret));
+ return ret;
}
+ tvar->type = strdup(buf);
+ if (tvar->type == NULL)
+ return -ENOMEM;
return 0;
}
@@ -768,7 +416,7 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname,
else
*ref_ptr = ref;
}
- ref->offset += die_get_byte_size(&type) * field->index;
+ ref->offset += dwarf_bytesize(&type) * field->index;
if (!field->next)
/* Save vr_die for converting types */
memcpy(die_mem, vr_die, sizeof(*die_mem));
@@ -786,8 +434,10 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname,
return -ENOENT;
}
/* Verify it is a data structure */
- if (dwarf_tag(&type) != DW_TAG_structure_type) {
- pr_warning("%s is not a data structure.\n", varname);
+ tag = dwarf_tag(&type);
+ if (tag != DW_TAG_structure_type && tag != DW_TAG_union_type) {
+ pr_warning("%s is not a data structure nor an union.\n",
+ varname);
return -EINVAL;
}
@@ -800,13 +450,14 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname,
*ref_ptr = ref;
} else {
/* Verify it is a data structure */
- if (tag != DW_TAG_structure_type) {
- pr_warning("%s is not a data structure.\n", varname);
+ if (tag != DW_TAG_structure_type && tag != DW_TAG_union_type) {
+ pr_warning("%s is not a data structure nor an union.\n",
+ varname);
return -EINVAL;
}
if (field->name[0] == '[') {
- pr_err("Semantic error: %s is not a pointor nor array.",
- varname);
+ pr_err("Semantic error: %s is not a pointor"
+ " nor array.\n", varname);
return -EINVAL;
}
if (field->ref) {
@@ -822,16 +473,21 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname,
}
if (die_find_member(&type, field->name, die_mem) == NULL) {
- pr_warning("%s(tyep:%s) has no member %s.\n", varname,
+ pr_warning("%s(type:%s) has no member %s.\n", varname,
dwarf_diename(&type), field->name);
return -EINVAL;
}
/* Get the offset of the field */
- ret = die_get_data_member_location(die_mem, &offs);
- if (ret < 0) {
- pr_warning("Failed to get the offset of %s.\n", field->name);
- return ret;
+ if (tag == DW_TAG_union_type) {
+ offs = 0;
+ } else {
+ ret = die_get_data_member_location(die_mem, &offs);
+ if (ret < 0) {
+ pr_warning("Failed to get the offset of %s.\n",
+ field->name);
+ return ret;
+ }
}
ref->offset += (long)offs;
@@ -854,13 +510,13 @@ static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
dwarf_diename(vr_die));
ret = convert_variable_location(vr_die, pf->addr, pf->fb_ops,
- pf->tvar);
- if (ret == -ENOENT)
+ &pf->sp_die, pf->tvar);
+ if (ret == -ENOENT || ret == -EINVAL)
pr_err("Failed to find the location of %s at this address.\n"
" Perhaps, it has been optimized out.\n", pf->pvar->var);
else if (ret == -ENOTSUP)
pr_err("Sorry, we don't support this variable location yet.\n");
- else if (pf->pvar->field) {
+ else if (ret == 0 && pf->pvar->field) {
ret = convert_variable_fields(vr_die, pf->pvar->var,
pf->pvar->field, &pf->tvar->ref,
&die_mem);
@@ -872,12 +528,12 @@ static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
return ret;
}
-/* Find a variable in a subprogram die */
-static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
+/* Find a variable in a scope DIE */
+static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
{
- Dwarf_Die vr_die, *scopes;
+ Dwarf_Die vr_die;
char buf[32], *ptr;
- int ret, nscopes;
+ int ret = 0;
if (!is_c_varname(pf->pvar->var)) {
/* Copy raw parameters */
@@ -912,64 +568,65 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
if (pf->tvar->name == NULL)
return -ENOMEM;
- pr_debug("Searching '%s' variable in context.\n",
- pf->pvar->var);
+ pr_debug("Searching '%s' variable in context.\n", pf->pvar->var);
/* Search child die for local variables and parameters. */
- if (die_find_variable_at(sp_die, pf->pvar->var, pf->addr, &vr_die))
- ret = convert_variable(&vr_die, pf);
- else {
- /* Search upper class */
- nscopes = dwarf_getscopes_die(sp_die, &scopes);
- while (nscopes-- > 1) {
- pr_debug("Searching variables in %s\n",
- dwarf_diename(&scopes[nscopes]));
- /* We should check this scope, so give dummy address */
- if (die_find_variable_at(&scopes[nscopes],
- pf->pvar->var, 0,
- &vr_die)) {
- ret = convert_variable(&vr_die, pf);
- goto found;
- }
- }
- if (scopes)
- free(scopes);
- ret = -ENOENT;
+ if (!die_find_variable_at(sc_die, pf->pvar->var, pf->addr, &vr_die)) {
+ /* Search again in global variables */
+ if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, 0, &vr_die))
+ pr_warning("Failed to find '%s' in this function.\n",
+ pf->pvar->var);
+ ret = -ENOENT;
}
-found:
- if (ret < 0)
- pr_warning("Failed to find '%s' in this function.\n",
- pf->pvar->var);
+ if (ret >= 0)
+ ret = convert_variable(&vr_die, pf);
+
return ret;
}
/* Convert subprogram DIE to trace point */
-static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr,
- bool retprobe, struct probe_trace_point *tp)
+static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
+ Dwarf_Addr paddr, bool retprobe,
+ struct probe_trace_point *tp)
{
- Dwarf_Addr eaddr;
- const char *name;
-
- /* Copy the name of probe point */
- name = dwarf_diename(sp_die);
- if (name) {
- if (dwarf_entrypc(sp_die, &eaddr) != 0) {
- pr_warning("Failed to get entry pc of %s\n",
- dwarf_diename(sp_die));
- return -ENOENT;
- }
- tp->symbol = strdup(name);
- if (tp->symbol == NULL)
- return -ENOMEM;
- tp->offset = (unsigned long)(paddr - eaddr);
- } else
- /* This function has no name. */
- tp->offset = (unsigned long)paddr;
+ Dwarf_Addr eaddr, highaddr;
+ GElf_Sym sym;
+ const char *symbol;
+
+ /* Verify the address is correct */
+ if (dwarf_entrypc(sp_die, &eaddr) != 0) {
+ pr_warning("Failed to get entry address of %s\n",
+ dwarf_diename(sp_die));
+ return -ENOENT;
+ }
+ if (dwarf_highpc(sp_die, &highaddr) != 0) {
+ pr_warning("Failed to get end address of %s\n",
+ dwarf_diename(sp_die));
+ return -ENOENT;
+ }
+ if (paddr > highaddr) {
+ pr_warning("Offset specified is greater than size of %s\n",
+ dwarf_diename(sp_die));
+ return -EINVAL;
+ }
+
+ /* Get an appropriate symbol from symtab */
+ symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
+ if (!symbol) {
+ pr_warning("Failed to find symbol at 0x%lx\n",
+ (unsigned long)paddr);
+ return -ENOENT;
+ }
+ tp->offset = (unsigned long)(paddr - sym.st_value);
+ tp->address = (unsigned long)paddr;
+ tp->symbol = strdup(symbol);
+ if (!tp->symbol)
+ return -ENOMEM;
/* Return probe must be on the head of a subprogram */
if (retprobe) {
if (eaddr != paddr) {
pr_warning("Return probe must be on the head of"
- " a real function\n");
+ " a real function.\n");
return -EINVAL;
}
tp->retprobe = true;
@@ -978,27 +635,30 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr,
return 0;
}
-/* Call probe_finder callback with real subprogram DIE */
-static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf)
+/* Call probe_finder callback with scope DIE */
+static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
{
- Dwarf_Die die_mem;
Dwarf_Attribute fb_attr;
size_t nops;
int ret;
- /* If no real subprogram, find a real one */
- if (!sp_die || dwarf_tag(sp_die) != DW_TAG_subprogram) {
- sp_die = die_find_real_subprogram(&pf->cu_die,
- pf->addr, &die_mem);
- if (!sp_die) {
+ if (!sc_die) {
+ pr_err("Caller must pass a scope DIE. Program error.\n");
+ return -EINVAL;
+ }
+
+ /* If not a real subprogram, find a real one */
+ if (!die_is_func_def(sc_die)) {
+ if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) {
pr_warning("Failed to find probe point in any "
"functions.\n");
return -ENOENT;
}
- }
+ } else
+ memcpy(&pf->sp_die, sc_die, sizeof(Dwarf_Die));
- /* Get the frame base attribute/ops */
- dwarf_attr(sp_die, DW_AT_frame_base, &fb_attr);
+ /* Get the frame base attribute/ops from subprogram */
+ dwarf_attr(&pf->sp_die, DW_AT_frame_base, &fb_attr);
ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1);
if (ret <= 0 || nops == 0) {
pf->fb_ops = NULL;
@@ -1008,7 +668,7 @@ static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf)
Dwarf_Frame *frame;
if (dwarf_cfi_addrframe(pf->cfi, pf->addr, &frame) != 0 ||
dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) {
- pr_warning("Failed to get CFA on 0x%jx\n",
+ pr_warning("Failed to get call frame on 0x%jx\n",
(uintmax_t)pf->addr);
return -ENOENT;
}
@@ -1016,7 +676,7 @@ static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf)
}
/* Call finder's callback handler */
- ret = pf->callback(sp_die, pf);
+ ret = pf->callback(sc_die, pf);
/* *pf->fb_ops will be cached in libdw. Don't free it. */
pf->fb_ops = NULL;
@@ -1024,195 +684,210 @@ static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf)
return ret;
}
-/* Find probe point from its line number */
-static int find_probe_point_by_line(struct probe_finder *pf)
-{
- Dwarf_Lines *lines;
- Dwarf_Line *line;
- size_t nlines, i;
- Dwarf_Addr addr;
- int lineno;
- int ret = 0;
+struct find_scope_param {
+ const char *function;
+ const char *file;
+ int line;
+ int diff;
+ Dwarf_Die *die_mem;
+ bool found;
+};
- if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) {
- pr_warning("No source lines found in this CU.\n");
- return -ENOENT;
+static int find_best_scope_cb(Dwarf_Die *fn_die, void *data)
+{
+ struct find_scope_param *fsp = data;
+ const char *file;
+ int lno;
+
+ /* Skip if declared file name does not match */
+ if (fsp->file) {
+ file = dwarf_decl_file(fn_die);
+ if (!file || strcmp(fsp->file, file) != 0)
+ return 0;
+ }
+ /* If the function name is given, that's what user expects */
+ if (fsp->function) {
+ if (die_compare_name(fn_die, fsp->function)) {
+ memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die));
+ fsp->found = true;
+ return 1;
+ }
+ } else {
+ /* With the line number, find the nearest declared DIE */
+ dwarf_decl_line(fn_die, &lno);
+ if (lno < fsp->line && fsp->diff > fsp->line - lno) {
+ /* Keep a candidate and continue */
+ fsp->diff = fsp->line - lno;
+ memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die));
+ fsp->found = true;
+ }
}
+ return 0;
+}
- for (i = 0; i < nlines && ret == 0; i++) {
- line = dwarf_onesrcline(lines, i);
- if (dwarf_lineno(line, &lineno) != 0 ||
- lineno != pf->lno)
- continue;
+/* Find an appropriate scope fits to given conditions */
+static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem)
+{
+ struct find_scope_param fsp = {
+ .function = pf->pev->point.function,
+ .file = pf->fname,
+ .line = pf->lno,
+ .diff = INT_MAX,
+ .die_mem = die_mem,
+ .found = false,
+ };
+
+ cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb, &fsp);
+
+ return fsp.found ? die_mem : NULL;
+}
- /* TODO: Get fileno from line, but how? */
- if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0)
- continue;
+static int probe_point_line_walker(const char *fname, int lineno,
+ Dwarf_Addr addr, void *data)
+{
+ struct probe_finder *pf = data;
+ Dwarf_Die *sc_die, die_mem;
+ int ret;
- if (dwarf_lineaddr(line, &addr) != 0) {
- pr_warning("Failed to get the address of the line.\n");
- return -ENOENT;
- }
- pr_debug("Probe line found: line[%d]:%d addr:0x%jx\n",
- (int)i, lineno, (uintmax_t)addr);
- pf->addr = addr;
+ if (lineno != pf->lno || strtailcmp(fname, pf->fname) != 0)
+ return 0;
- ret = call_probe_finder(NULL, pf);
- /* Continuing, because target line might be inlined. */
+ pf->addr = addr;
+ sc_die = find_best_scope(pf, &die_mem);
+ if (!sc_die) {
+ pr_warning("Failed to find scope of probe point.\n");
+ return -ENOENT;
}
- return ret;
+
+ ret = call_probe_finder(sc_die, pf);
+
+ /* Continue if no error, because the line will be in inline function */
+ return ret < 0 ? ret : 0;
+}
+
+/* Find probe point from its line number */
+static int find_probe_point_by_line(struct probe_finder *pf)
+{
+ return die_walk_lines(&pf->cu_die, probe_point_line_walker, pf);
}
/* Find lines which match lazy pattern */
-static int find_lazy_match_lines(struct list_head *head,
+static int find_lazy_match_lines(struct intlist *list,
const char *fname, const char *pat)
{
- char *fbuf, *p1, *p2;
- int fd, line, nlines = -1;
- struct stat st;
-
- fd = open(fname, O_RDONLY);
- if (fd < 0) {
- pr_warning("Failed to open %s: %s\n", fname, strerror(-fd));
+ FILE *fp;
+ char *line = NULL;
+ size_t line_len;
+ ssize_t len;
+ int count = 0, linenum = 1;
+
+ fp = fopen(fname, "r");
+ if (!fp) {
+ pr_warning("Failed to open %s: %s\n", fname, strerror(errno));
return -errno;
}
- if (fstat(fd, &st) < 0) {
- pr_warning("Failed to get the size of %s: %s\n",
- fname, strerror(errno));
- nlines = -errno;
- goto out_close;
- }
+ while ((len = getline(&line, &line_len, fp)) > 0) {
- nlines = -ENOMEM;
- fbuf = malloc(st.st_size + 2);
- if (fbuf == NULL)
- goto out_close;
- if (read(fd, fbuf, st.st_size) < 0) {
- pr_warning("Failed to read %s: %s\n", fname, strerror(errno));
- nlines = -errno;
- goto out_free_fbuf;
- }
- fbuf[st.st_size] = '\n'; /* Dummy line */
- fbuf[st.st_size + 1] = '\0';
- p1 = fbuf;
- line = 1;
- nlines = 0;
- while ((p2 = strchr(p1, '\n')) != NULL) {
- *p2 = '\0';
- if (strlazymatch(p1, pat)) {
- line_list__add_line(head, line);
- nlines++;
+ if (line[len - 1] == '\n')
+ line[len - 1] = '\0';
+
+ if (strlazymatch(line, pat)) {
+ intlist__add(list, linenum);
+ count++;
}
- line++;
- p1 = p2 + 1;
+ linenum++;
}
-out_free_fbuf:
- free(fbuf);
-out_close:
- close(fd);
- return nlines;
+
+ if (ferror(fp))
+ count = -errno;
+ free(line);
+ fclose(fp);
+
+ if (count == 0)
+ pr_debug("No matched lines found in %s.\n", fname);
+ return count;
}
-/* Find probe points from lazy pattern */
-static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
+static int probe_point_lazy_walker(const char *fname, int lineno,
+ Dwarf_Addr addr, void *data)
{
- Dwarf_Lines *lines;
- Dwarf_Line *line;
- size_t nlines, i;
- Dwarf_Addr addr;
- Dwarf_Die die_mem;
- int lineno;
- int ret = 0;
+ struct probe_finder *pf = data;
+ Dwarf_Die *sc_die, die_mem;
+ int ret;
- if (list_empty(&pf->lcache)) {
- /* Matching lazy line pattern */
- ret = find_lazy_match_lines(&pf->lcache, pf->fname,
- pf->pev->point.lazy_line);
- if (ret == 0) {
- pr_debug("No matched lines found in %s.\n", pf->fname);
- return 0;
- } else if (ret < 0)
- return ret;
- }
+ if (!intlist__has_entry(pf->lcache, lineno) ||
+ strtailcmp(fname, pf->fname) != 0)
+ return 0;
- if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) {
- pr_warning("No source lines found in this CU.\n");
+ pr_debug("Probe line found: line:%d addr:0x%llx\n",
+ lineno, (unsigned long long)addr);
+ pf->addr = addr;
+ pf->lno = lineno;
+ sc_die = find_best_scope(pf, &die_mem);
+ if (!sc_die) {
+ pr_warning("Failed to find scope of probe point.\n");
return -ENOENT;
}
- for (i = 0; i < nlines && ret >= 0; i++) {
- line = dwarf_onesrcline(lines, i);
+ ret = call_probe_finder(sc_die, pf);
- if (dwarf_lineno(line, &lineno) != 0 ||
- !line_list__has_line(&pf->lcache, lineno))
- continue;
-
- /* TODO: Get fileno from line, but how? */
- if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0)
- continue;
-
- if (dwarf_lineaddr(line, &addr) != 0) {
- pr_debug("Failed to get the address of line %d.\n",
- lineno);
- continue;
- }
- if (sp_die) {
- /* Address filtering 1: does sp_die include addr? */
- if (!dwarf_haspc(sp_die, addr))
- continue;
- /* Address filtering 2: No child include addr? */
- if (die_find_inlinefunc(sp_die, addr, &die_mem))
- continue;
- }
+ /*
+ * Continue if no error, because the lazy pattern will match
+ * to other lines
+ */
+ return ret < 0 ? ret : 0;
+}
- pr_debug("Probe line found: line[%d]:%d addr:0x%llx\n",
- (int)i, lineno, (unsigned long long)addr);
- pf->addr = addr;
+/* Find probe points from lazy pattern */
+static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
+{
+ int ret = 0;
- ret = call_probe_finder(sp_die, pf);
- /* Continuing, because target line might be inlined. */
+ if (intlist__empty(pf->lcache)) {
+ /* Matching lazy line pattern */
+ ret = find_lazy_match_lines(pf->lcache, pf->fname,
+ pf->pev->point.lazy_line);
+ if (ret <= 0)
+ return ret;
}
- /* TODO: deallocate lines, but how? */
- return ret;
-}
-/* Callback parameter with return value */
-struct dwarf_callback_param {
- void *data;
- int retval;
-};
+ return die_walk_lines(sp_die, probe_point_lazy_walker, pf);
+}
static int probe_point_inline_cb(Dwarf_Die *in_die, void *data)
{
- struct dwarf_callback_param *param = data;
- struct probe_finder *pf = param->data;
+ struct probe_finder *pf = data;
struct perf_probe_point *pp = &pf->pev->point;
Dwarf_Addr addr;
+ int ret;
if (pp->lazy_line)
- param->retval = find_probe_point_lazy(in_die, pf);
+ ret = find_probe_point_lazy(in_die, pf);
else {
/* Get probe address */
if (dwarf_entrypc(in_die, &addr) != 0) {
- pr_warning("Failed to get entry pc of %s.\n",
+ pr_warning("Failed to get entry address of %s.\n",
dwarf_diename(in_die));
- param->retval = -ENOENT;
- return DWARF_CB_ABORT;
+ return -ENOENT;
}
pf->addr = addr;
pf->addr += pp->offset;
pr_debug("found inline addr: 0x%jx\n",
(uintmax_t)pf->addr);
- param->retval = call_probe_finder(in_die, pf);
- if (param->retval < 0)
- return DWARF_CB_ABORT;
+ ret = call_probe_finder(in_die, pf);
}
- return DWARF_CB_OK;
+ return ret;
}
+/* Callback parameter with return value for libdw */
+struct dwarf_callback_param {
+ void *data;
+ int retval;
+};
+
/* Search function from function name */
static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
{
@@ -1221,10 +896,14 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
struct perf_probe_point *pp = &pf->pev->point;
/* Check tag and diename */
- if (dwarf_tag(sp_die) != DW_TAG_subprogram ||
+ if (!die_is_func_def(sp_die) ||
!die_compare_name(sp_die, pp->function))
return DWARF_CB_OK;
+ /* Check declared file */
+ if (pp->file && strtailcmp(pp->file, dwarf_decl_file(sp_die)))
+ return DWARF_CB_OK;
+
pf->fname = dwarf_decl_file(sp_die);
if (pp->line) { /* Function relative line */
dwarf_decl_line(sp_die, &pf->lno);
@@ -1236,8 +915,8 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
param->retval = find_probe_point_lazy(sp_die, pf);
else {
if (dwarf_entrypc(sp_die, &pf->addr) != 0) {
- pr_warning("Failed to get entry pc of %s.\n",
- dwarf_diename(sp_die));
+ pr_warning("Failed to get entry address of "
+ "%s.\n", dwarf_diename(sp_die));
param->retval = -ENOENT;
return DWARF_CB_ABORT;
}
@@ -1245,14 +924,10 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
/* TODO: Check the address in this function */
param->retval = call_probe_finder(sp_die, pf);
}
- } else {
- struct dwarf_callback_param _param = {.data = (void *)pf,
- .retval = 0};
+ } else
/* Inlined function: search instances */
- dwarf_func_inline_instances(sp_die, probe_point_inline_cb,
- &_param);
- param->retval = _param.retval;
- }
+ param->retval = die_walk_instances(sp_die,
+ probe_point_inline_cb, (void *)pf);
return DWARF_CB_ABORT; /* Exit; no same symbol in this CU. */
}
@@ -1265,37 +940,84 @@ static int find_probe_point_by_func(struct probe_finder *pf)
return _param.retval;
}
+struct pubname_callback_param {
+ char *function;
+ char *file;
+ Dwarf_Die *cu_die;
+ Dwarf_Die *sp_die;
+ int found;
+};
+
+static int pubname_search_cb(Dwarf *dbg, Dwarf_Global *gl, void *data)
+{
+ struct pubname_callback_param *param = data;
+
+ if (dwarf_offdie(dbg, gl->die_offset, param->sp_die)) {
+ if (dwarf_tag(param->sp_die) != DW_TAG_subprogram)
+ return DWARF_CB_OK;
+
+ if (die_compare_name(param->sp_die, param->function)) {
+ if (!dwarf_offdie(dbg, gl->cu_offset, param->cu_die))
+ return DWARF_CB_OK;
+
+ if (param->file &&
+ strtailcmp(param->file, dwarf_decl_file(param->sp_die)))
+ return DWARF_CB_OK;
+
+ param->found = 1;
+ return DWARF_CB_ABORT;
+ }
+ }
+
+ return DWARF_CB_OK;
+}
+
/* Find probe points from debuginfo */
-static int find_probes(int fd, struct probe_finder *pf)
+static int debuginfo__find_probes(struct debuginfo *dbg,
+ struct probe_finder *pf)
{
struct perf_probe_point *pp = &pf->pev->point;
Dwarf_Off off, noff;
size_t cuhl;
Dwarf_Die *diep;
- Dwarf *dbg = NULL;
- Dwfl *dwfl;
- Dwarf_Addr bias; /* Currently ignored */
int ret = 0;
- dbg = dwfl_init_offline_dwarf(fd, &dwfl, &bias);
- if (!dbg) {
- pr_warning("No dwarf info found in the vmlinux - "
- "please rebuild with CONFIG_DEBUG_INFO=y.\n");
- return -EBADF;
- }
-
#if _ELFUTILS_PREREQ(0, 142)
/* Get the call frame information from this dwarf */
- pf->cfi = dwarf_getcfi(dbg);
+ pf->cfi = dwarf_getcfi_elf(dwarf_getelf(dbg->dbg));
#endif
off = 0;
- line_list__init(&pf->lcache);
+ pf->lcache = intlist__new(NULL);
+ if (!pf->lcache)
+ return -ENOMEM;
+
+ /* Fastpath: lookup by function name from .debug_pubnames section */
+ if (pp->function) {
+ struct pubname_callback_param pubname_param = {
+ .function = pp->function,
+ .file = pp->file,
+ .cu_die = &pf->cu_die,
+ .sp_die = &pf->sp_die,
+ .found = 0,
+ };
+ struct dwarf_callback_param probe_param = {
+ .data = pf,
+ };
+
+ dwarf_getpubnames(dbg->dbg, pubname_search_cb,
+ &pubname_param, 0);
+ if (pubname_param.found) {
+ ret = probe_point_search_cb(&pf->sp_die, &probe_param);
+ if (ret)
+ goto found;
+ }
+ }
+
/* Loop on CUs (Compilation Unit) */
- while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) &&
- ret >= 0) {
+ while (!dwarf_nextcu(dbg->dbg, off, &noff, &cuhl, NULL, NULL, NULL)) {
/* Get the DIE(Debugging Information Entry) of this CU */
- diep = dwarf_offdie(dbg, off + cuhl, &pf->cu_die);
+ diep = dwarf_offdie(dbg->dbg, off + cuhl, &pf->cu_die);
if (!diep)
continue;
@@ -1314,22 +1036,93 @@ static int find_probes(int fd, struct probe_finder *pf)
pf->lno = pp->line;
ret = find_probe_point_by_line(pf);
}
+ if (ret < 0)
+ break;
}
off = noff;
}
- line_list__free(&pf->lcache);
- if (dwfl)
- dwfl_end(dwfl);
+
+found:
+ intlist__delete(pf->lcache);
+ pf->lcache = NULL;
return ret;
}
+struct local_vars_finder {
+ struct probe_finder *pf;
+ struct perf_probe_arg *args;
+ int max_args;
+ int nargs;
+ int ret;
+};
+
+/* Collect available variables in this scope */
+static int copy_variables_cb(Dwarf_Die *die_mem, void *data)
+{
+ struct local_vars_finder *vf = data;
+ struct probe_finder *pf = vf->pf;
+ int tag;
+
+ tag = dwarf_tag(die_mem);
+ if (tag == DW_TAG_formal_parameter ||
+ tag == DW_TAG_variable) {
+ if (convert_variable_location(die_mem, vf->pf->addr,
+ vf->pf->fb_ops, &pf->sp_die,
+ NULL) == 0) {
+ vf->args[vf->nargs].var = (char *)dwarf_diename(die_mem);
+ if (vf->args[vf->nargs].var == NULL) {
+ vf->ret = -ENOMEM;
+ return DIE_FIND_CB_END;
+ }
+ pr_debug(" %s", vf->args[vf->nargs].var);
+ vf->nargs++;
+ }
+ }
+
+ if (dwarf_haspc(die_mem, vf->pf->addr))
+ return DIE_FIND_CB_CONTINUE;
+ else
+ return DIE_FIND_CB_SIBLING;
+}
+
+static int expand_probe_args(Dwarf_Die *sc_die, struct probe_finder *pf,
+ struct perf_probe_arg *args)
+{
+ Dwarf_Die die_mem;
+ int i;
+ int n = 0;
+ struct local_vars_finder vf = {.pf = pf, .args = args,
+ .max_args = MAX_PROBE_ARGS, .ret = 0};
+
+ for (i = 0; i < pf->pev->nargs; i++) {
+ /* var never be NULL */
+ if (strcmp(pf->pev->args[i].var, "$vars") == 0) {
+ pr_debug("Expanding $vars into:");
+ vf.nargs = n;
+ /* Special local variables */
+ die_find_child(sc_die, copy_variables_cb, (void *)&vf,
+ &die_mem);
+ pr_debug(" (%d)\n", vf.nargs - n);
+ if (vf.ret < 0)
+ return vf.ret;
+ n = vf.nargs;
+ } else {
+ /* Copy normal argument */
+ args[n] = pf->pev->args[i];
+ n++;
+ }
+ }
+ return n;
+}
+
/* Add a found probe point into trace event list */
-static int add_probe_trace_event(Dwarf_Die *sp_die, struct probe_finder *pf)
+static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
{
struct trace_event_finder *tf =
container_of(pf, struct trace_event_finder, pf);
struct probe_trace_event *tev;
+ struct perf_probe_arg *args;
int ret, i;
/* Check number of tevs */
@@ -1340,37 +1133,54 @@ static int add_probe_trace_event(Dwarf_Die *sp_die, struct probe_finder *pf)
}
tev = &tf->tevs[tf->ntevs++];
- ret = convert_to_trace_point(sp_die, pf->addr, pf->pev->point.retprobe,
- &tev->point);
+ /* Trace point should be converted from subprogram DIE */
+ ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr,
+ pf->pev->point.retprobe, &tev->point);
if (ret < 0)
return ret;
pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
tev->point.offset);
- /* Find each argument */
- tev->nargs = pf->pev->nargs;
- tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
- if (tev->args == NULL)
+ /* Expand special probe argument if exist */
+ args = zalloc(sizeof(struct perf_probe_arg) * MAX_PROBE_ARGS);
+ if (args == NULL)
return -ENOMEM;
- for (i = 0; i < pf->pev->nargs; i++) {
- pf->pvar = &pf->pev->args[i];
+
+ ret = expand_probe_args(sc_die, pf, args);
+ if (ret < 0)
+ goto end;
+
+ tev->nargs = ret;
+ tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
+ if (tev->args == NULL) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ /* Find each argument */
+ for (i = 0; i < tev->nargs; i++) {
+ pf->pvar = &args[i];
pf->tvar = &tev->args[i];
- ret = find_variable(sp_die, pf);
+ /* Variable should be found from scope DIE */
+ ret = find_variable(sc_die, pf);
if (ret != 0)
- return ret;
+ break;
}
- return 0;
+end:
+ free(args);
+ return ret;
}
/* Find probe_trace_events specified by perf_probe_event from debuginfo */
-int find_probe_trace_events(int fd, struct perf_probe_event *pev,
- struct probe_trace_event **tevs, int max_tevs)
+int debuginfo__find_trace_events(struct debuginfo *dbg,
+ struct perf_probe_event *pev,
+ struct probe_trace_event **tevs, int max_tevs)
{
struct trace_event_finder tf = {
.pf = {.pev = pev, .callback = add_probe_trace_event},
- .max_tevs = max_tevs};
+ .mod = dbg->mod, .max_tevs = max_tevs};
int ret;
/* Allocate result tevs array */
@@ -1381,10 +1191,9 @@ int find_probe_trace_events(int fd, struct perf_probe_event *pev,
tf.tevs = *tevs;
tf.ntevs = 0;
- ret = find_probes(fd, &tf.pf);
+ ret = debuginfo__find_probes(dbg, &tf.pf);
if (ret < 0) {
- free(*tevs);
- *tevs = NULL;
+ zfree(tevs);
return ret;
}
@@ -1407,7 +1216,8 @@ static int collect_variables_cb(Dwarf_Die *die_mem, void *data)
if (tag == DW_TAG_formal_parameter ||
tag == DW_TAG_variable) {
ret = convert_variable_location(die_mem, af->pf.addr,
- af->pf.fb_ops, NULL);
+ af->pf.fb_ops, &af->pf.sp_die,
+ NULL);
if (ret == 0) {
ret = die_get_varname(die_mem, buf, MAX_VAR_LEN);
pr_debug2("Add new var: %s\n", buf);
@@ -1423,13 +1233,13 @@ static int collect_variables_cb(Dwarf_Die *die_mem, void *data)
}
/* Add a found vars into available variables list */
-static int add_available_vars(Dwarf_Die *sp_die, struct probe_finder *pf)
+static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
{
struct available_var_finder *af =
container_of(pf, struct available_var_finder, pf);
struct variable_list *vl;
- Dwarf_Die die_mem, *scopes = NULL;
- int ret, nscopes;
+ Dwarf_Die die_mem;
+ int ret;
/* Check number of tevs */
if (af->nvls == af->max_vls) {
@@ -1438,8 +1248,9 @@ static int add_available_vars(Dwarf_Die *sp_die, struct probe_finder *pf)
}
vl = &af->vls[af->nvls++];
- ret = convert_to_trace_point(sp_die, pf->addr, pf->pev->point.retprobe,
- &vl->point);
+ /* Trace point should be converted from subprogram DIE */
+ ret = convert_to_trace_point(&pf->sp_die, af->mod, pf->addr,
+ pf->pev->point.retprobe, &vl->point);
if (ret < 0)
return ret;
@@ -1451,19 +1262,14 @@ static int add_available_vars(Dwarf_Die *sp_die, struct probe_finder *pf)
if (vl->vars == NULL)
return -ENOMEM;
af->child = true;
- die_find_child(sp_die, collect_variables_cb, (void *)af, &die_mem);
+ die_find_child(sc_die, collect_variables_cb, (void *)af, &die_mem);
/* Find external variables */
if (!af->externs)
goto out;
/* Don't need to search child DIE for externs. */
af->child = false;
- nscopes = dwarf_getscopes_die(sp_die, &scopes);
- while (nscopes-- > 1)
- die_find_child(&scopes[nscopes], collect_variables_cb,
- (void *)af, &die_mem);
- if (scopes)
- free(scopes);
+ die_find_child(&pf->cu_die, collect_variables_cb, (void *)af, &die_mem);
out:
if (strlist__empty(vl->vars)) {
@@ -1474,13 +1280,19 @@ out:
return ret;
}
-/* Find available variables at given probe point */
-int find_available_vars_at(int fd, struct perf_probe_event *pev,
- struct variable_list **vls, int max_vls,
- bool externs)
+/*
+ * Find available variables at given probe point
+ * Return the number of found probe points. Return 0 if there is no
+ * matched probe point. Return <0 if an error occurs.
+ */
+int debuginfo__find_available_vars_at(struct debuginfo *dbg,
+ struct perf_probe_event *pev,
+ struct variable_list **vls,
+ int max_vls, bool externs)
{
struct available_var_finder af = {
.pf = {.pev = pev, .callback = add_available_vars},
+ .mod = dbg->mod,
.max_vls = max_vls, .externs = externs};
int ret;
@@ -1492,17 +1304,14 @@ int find_available_vars_at(int fd, struct perf_probe_event *pev,
af.vls = *vls;
af.nvls = 0;
- ret = find_probes(fd, &af.pf);
+ ret = debuginfo__find_probes(dbg, &af.pf);
if (ret < 0) {
/* Free vlist for error */
while (af.nvls--) {
- if (af.vls[af.nvls].point.symbol)
- free(af.vls[af.nvls].point.symbol);
- if (af.vls[af.nvls].vars)
- strlist__delete(af.vls[af.nvls].vars);
+ zfree(&af.vls[af.nvls].point.symbol);
+ strlist__delete(af.vls[af.nvls].vars);
}
- free(af.vls);
- *vls = NULL;
+ zfree(vls);
return ret;
}
@@ -1510,97 +1319,110 @@ int find_available_vars_at(int fd, struct perf_probe_event *pev,
}
/* Reverse search */
-int find_perf_probe_point(unsigned long addr, struct perf_probe_point *ppt)
+int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
+ struct perf_probe_point *ppt)
{
Dwarf_Die cudie, spdie, indie;
- Dwarf *dbg = NULL;
- Dwfl *dwfl = NULL;
- Dwarf_Line *line;
- Dwarf_Addr laddr, eaddr, bias = 0;
- const char *tmp;
- int lineno, ret = 0;
- bool found = false;
-
- /* Open the live linux kernel */
- dbg = dwfl_init_live_kernel_dwarf(addr, &dwfl, &bias);
- if (!dbg) {
- pr_warning("No dwarf info found in the vmlinux - "
- "please rebuild with CONFIG_DEBUG_INFO=y.\n");
- ret = -EINVAL;
- goto end;
- }
+ Dwarf_Addr _addr = 0, baseaddr = 0;
+ const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
+ int baseline = 0, lineno = 0, ret = 0;
/* Adjust address with bias */
- addr += bias;
+ addr += dbg->bias;
+
/* Find cu die */
- if (!dwarf_addrdie(dbg, (Dwarf_Addr)addr - bias, &cudie)) {
- pr_warning("No CU DIE is found at %lx\n", addr);
+ if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr - dbg->bias, &cudie)) {
+ pr_warning("Failed to find debug information for address %lx\n",
+ addr);
ret = -EINVAL;
goto end;
}
- /* Find a corresponding line */
- line = dwarf_getsrc_die(&cudie, (Dwarf_Addr)addr);
- if (line) {
- if (dwarf_lineaddr(line, &laddr) == 0 &&
- (Dwarf_Addr)addr == laddr &&
- dwarf_lineno(line, &lineno) == 0) {
- tmp = dwarf_linesrc(line, NULL, NULL);
- if (tmp) {
- ppt->line = lineno;
- ppt->file = strdup(tmp);
- if (ppt->file == NULL) {
- ret = -ENOMEM;
- goto end;
- }
- found = true;
- }
+ /* Find a corresponding line (filename and lineno) */
+ cu_find_lineinfo(&cudie, addr, &fname, &lineno);
+ /* Don't care whether it failed or not */
+
+ /* Find a corresponding function (name, baseline and baseaddr) */
+ if (die_find_realfunc(&cudie, (Dwarf_Addr)addr, &spdie)) {
+ /* Get function entry information */
+ func = basefunc = dwarf_diename(&spdie);
+ if (!func ||
+ dwarf_entrypc(&spdie, &baseaddr) != 0 ||
+ dwarf_decl_line(&spdie, &baseline) != 0) {
+ lineno = 0;
+ goto post;
}
- }
- /* Find a corresponding function */
- if (die_find_real_subprogram(&cudie, (Dwarf_Addr)addr, &spdie)) {
- tmp = dwarf_diename(&spdie);
- if (!tmp || dwarf_entrypc(&spdie, &eaddr) != 0)
- goto end;
+ fname = dwarf_decl_file(&spdie);
+ if (addr == (unsigned long)baseaddr) {
+ /* Function entry - Relative line number is 0 */
+ lineno = baseline;
+ goto post;
+ }
- if (ppt->line) {
- if (die_find_inlinefunc(&spdie, (Dwarf_Addr)addr,
+ /* Track down the inline functions step by step */
+ while (die_find_top_inlinefunc(&spdie, (Dwarf_Addr)addr,
&indie)) {
- /* addr in an inline function */
- tmp = dwarf_diename(&indie);
- if (!tmp)
- goto end;
- ret = dwarf_decl_line(&indie, &lineno);
+ /* There is an inline function */
+ if (dwarf_entrypc(&indie, &_addr) == 0 &&
+ _addr == addr) {
+ /*
+ * addr is at an inline function entry.
+ * In this case, lineno should be the call-site
+ * line number. (overwrite lineinfo)
+ */
+ lineno = die_get_call_lineno(&indie);
+ fname = die_get_call_file(&indie);
+ break;
} else {
- if (eaddr == addr) { /* Function entry */
- lineno = ppt->line;
- ret = 0;
- } else
- ret = dwarf_decl_line(&spdie, &lineno);
- }
- if (ret == 0) {
- /* Make a relative line number */
- ppt->line -= lineno;
- goto found;
+ /*
+ * addr is in an inline function body.
+ * Since lineno points one of the lines
+ * of the inline function, baseline should
+ * be the entry line of the inline function.
+ */
+ tmp = dwarf_diename(&indie);
+ if (!tmp ||
+ dwarf_decl_line(&indie, &baseline) != 0)
+ break;
+ func = tmp;
+ spdie = indie;
}
}
- /* We don't have a line number, let's use offset */
- ppt->offset = addr - (unsigned long)eaddr;
-found:
- ppt->function = strdup(tmp);
+ /* Verify the lineno and baseline are in a same file */
+ tmp = dwarf_decl_file(&spdie);
+ if (!tmp || strcmp(tmp, fname) != 0)
+ lineno = 0;
+ }
+
+post:
+ /* Make a relative line number or an offset */
+ if (lineno)
+ ppt->line = lineno - baseline;
+ else if (basefunc) {
+ ppt->offset = addr - (unsigned long)baseaddr;
+ func = basefunc;
+ }
+
+ /* Duplicate strings */
+ if (func) {
+ ppt->function = strdup(func);
if (ppt->function == NULL) {
ret = -ENOMEM;
goto end;
}
- found = true;
}
-
+ if (fname) {
+ ppt->file = strdup(fname);
+ if (ppt->file == NULL) {
+ zfree(&ppt->function);
+ ret = -ENOMEM;
+ goto end;
+ }
+ }
end:
- if (dwfl)
- dwfl_end(dwfl);
- if (ret >= 0)
- ret = found ? 1 : 0;
+ if (ret == 0 && (fname || func))
+ ret = 1; /* Found a point */
return ret;
}
@@ -1614,127 +1436,71 @@ static int line_range_add_line(const char *src, unsigned int lineno,
if (lr->path == NULL)
return -ENOMEM;
}
- return line_list__add_line(&lr->line_list, lineno);
+ return intlist__add(lr->line_list, lineno);
}
-/* Search function declaration lines */
-static int line_range_funcdecl_cb(Dwarf_Die *sp_die, void *data)
+static int line_range_walk_cb(const char *fname, int lineno,
+ Dwarf_Addr addr __maybe_unused,
+ void *data)
{
- struct dwarf_callback_param *param = data;
- struct line_finder *lf = param->data;
- const char *src;
- int lineno;
-
- src = dwarf_decl_file(sp_die);
- if (src && strtailcmp(src, lf->fname) != 0)
- return DWARF_CB_OK;
+ struct line_finder *lf = data;
+ int err;
- if (dwarf_decl_line(sp_die, &lineno) != 0 ||
+ if ((strtailcmp(fname, lf->fname) != 0) ||
(lf->lno_s > lineno || lf->lno_e < lineno))
- return DWARF_CB_OK;
+ return 0;
- param->retval = line_range_add_line(src, lineno, lf->lr);
- if (param->retval < 0)
- return DWARF_CB_ABORT;
- return DWARF_CB_OK;
-}
+ err = line_range_add_line(fname, lineno, lf->lr);
+ if (err < 0 && err != -EEXIST)
+ return err;
-static int find_line_range_func_decl_lines(struct line_finder *lf)
-{
- struct dwarf_callback_param param = {.data = (void *)lf, .retval = 0};
- dwarf_getfuncs(&lf->cu_die, line_range_funcdecl_cb, &param, 0);
- return param.retval;
+ return 0;
}
/* Find line range from its line number */
static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf)
{
- Dwarf_Lines *lines;
- Dwarf_Line *line;
- size_t nlines, i;
- Dwarf_Addr addr;
- int lineno, ret = 0;
- const char *src;
- Dwarf_Die die_mem;
-
- line_list__init(&lf->lr->line_list);
- if (dwarf_getsrclines(&lf->cu_die, &lines, &nlines) != 0) {
- pr_warning("No source lines found in this CU.\n");
- return -ENOENT;
- }
-
- /* Search probable lines on lines list */
- for (i = 0; i < nlines; i++) {
- line = dwarf_onesrcline(lines, i);
- if (dwarf_lineno(line, &lineno) != 0 ||
- (lf->lno_s > lineno || lf->lno_e < lineno))
- continue;
-
- if (sp_die) {
- /* Address filtering 1: does sp_die include addr? */
- if (dwarf_lineaddr(line, &addr) != 0 ||
- !dwarf_haspc(sp_die, addr))
- continue;
-
- /* Address filtering 2: No child include addr? */
- if (die_find_inlinefunc(sp_die, addr, &die_mem))
- continue;
- }
-
- /* TODO: Get fileno from line, but how? */
- src = dwarf_linesrc(line, NULL, NULL);
- if (strtailcmp(src, lf->fname) != 0)
- continue;
-
- ret = line_range_add_line(src, lineno, lf->lr);
- if (ret < 0)
- return ret;
- }
+ int ret;
- /*
- * Dwarf lines doesn't include function declarations. We have to
- * check functions list or given function.
- */
- if (sp_die) {
- src = dwarf_decl_file(sp_die);
- if (src && dwarf_decl_line(sp_die, &lineno) == 0 &&
- (lf->lno_s <= lineno && lf->lno_e >= lineno))
- ret = line_range_add_line(src, lineno, lf->lr);
- } else
- ret = find_line_range_func_decl_lines(lf);
+ ret = die_walk_lines(sp_die ?: &lf->cu_die, line_range_walk_cb, lf);
/* Update status */
if (ret >= 0)
- if (!list_empty(&lf->lr->line_list))
+ if (!intlist__empty(lf->lr->line_list))
ret = lf->found = 1;
else
ret = 0; /* Lines are not found */
else {
- free(lf->lr->path);
- lf->lr->path = NULL;
+ zfree(&lf->lr->path);
}
return ret;
}
static int line_range_inline_cb(Dwarf_Die *in_die, void *data)
{
- struct dwarf_callback_param *param = data;
+ int ret = find_line_range_by_line(in_die, data);
- param->retval = find_line_range_by_line(in_die, param->data);
- return DWARF_CB_ABORT; /* No need to find other instances */
+ /*
+ * We have to check all instances of inlined function, because
+ * some execution paths can be optimized out depends on the
+ * function argument of instances. However, if an error occurs,
+ * it should be handled by the caller.
+ */
+ return ret < 0 ? ret : 0;
}
-/* Search function from function name */
+/* Search function definition from function name */
static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
{
struct dwarf_callback_param *param = data;
struct line_finder *lf = param->data;
struct line_range *lr = lf->lr;
- pr_debug("find (%llx) %s\n",
- (unsigned long long)dwarf_dieoffset(sp_die),
- dwarf_diename(sp_die));
- if (dwarf_tag(sp_die) == DW_TAG_subprogram &&
+ /* Check declared file */
+ if (lr->file && strtailcmp(lr->file, dwarf_decl_file(sp_die)))
+ return DWARF_CB_OK;
+
+ if (die_is_func_def(sp_die) &&
die_compare_name(sp_die, lr->function)) {
lf->fname = dwarf_decl_file(sp_die);
dwarf_decl_line(sp_die, &lr->offset);
@@ -1748,15 +1514,10 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
pr_debug("New line range: %d to %d\n", lf->lno_s, lf->lno_e);
lr->start = lf->lno_s;
lr->end = lf->lno_e;
- if (dwarf_func_inline(sp_die)) {
- struct dwarf_callback_param _param;
- _param.data = (void *)lf;
- _param.retval = 0;
- dwarf_func_inline_instances(sp_die,
- line_range_inline_cb,
- &_param);
- param->retval = _param.retval;
- } else
+ if (dwarf_func_inline(sp_die))
+ param->retval = die_walk_instances(sp_die,
+ line_range_inline_cb, lf);
+ else
param->retval = find_line_range_by_line(sp_die, lf);
return DWARF_CB_ABORT;
}
@@ -1770,32 +1531,40 @@ static int find_line_range_by_func(struct line_finder *lf)
return param.retval;
}
-int find_line_range(int fd, struct line_range *lr)
+int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr)
{
struct line_finder lf = {.lr = lr, .found = 0};
int ret = 0;
Dwarf_Off off = 0, noff;
size_t cuhl;
Dwarf_Die *diep;
- Dwarf *dbg = NULL;
- Dwfl *dwfl;
- Dwarf_Addr bias; /* Currently ignored */
const char *comp_dir;
- dbg = dwfl_init_offline_dwarf(fd, &dwfl, &bias);
- if (!dbg) {
- pr_warning("No dwarf info found in the vmlinux - "
- "please rebuild with CONFIG_DEBUG_INFO=y.\n");
- return -EBADF;
+ /* Fastpath: lookup by function name from .debug_pubnames section */
+ if (lr->function) {
+ struct pubname_callback_param pubname_param = {
+ .function = lr->function, .file = lr->file,
+ .cu_die = &lf.cu_die, .sp_die = &lf.sp_die, .found = 0};
+ struct dwarf_callback_param line_range_param = {
+ .data = (void *)&lf, .retval = 0};
+
+ dwarf_getpubnames(dbg->dbg, pubname_search_cb,
+ &pubname_param, 0);
+ if (pubname_param.found) {
+ line_range_search_cb(&lf.sp_die, &line_range_param);
+ if (lf.found)
+ goto found;
+ }
}
/* Loop on CUs (Compilation Unit) */
while (!lf.found && ret >= 0) {
- if (dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) != 0)
+ if (dwarf_nextcu(dbg->dbg, off, &noff, &cuhl,
+ NULL, NULL, NULL) != 0)
break;
/* Get the DIE(Debugging Information Entry) of this CU */
- diep = dwarf_offdie(dbg, off + cuhl, &lf.cu_die);
+ diep = dwarf_offdie(dbg->dbg, off + cuhl, &lf.cu_die);
if (!diep)
continue;
@@ -1817,6 +1586,7 @@ int find_line_range(int fd, struct line_range *lr)
off = noff;
}
+found:
/* Store comp_dir */
if (lf.found) {
comp_dir = cu_get_comp_dir(&lf.cu_die);
@@ -1828,7 +1598,6 @@ int find_line_range(int fd, struct line_range *lr)
}
pr_debug("path: %s\n", lr->path);
- dwfl_end(dwfl);
return (ret < 0) ? ret : lf.found;
}
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index bba69d45569..92590b2c7e1 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -3,11 +3,12 @@
#include <stdbool.h>
#include "util.h"
+#include "intlist.h"
#include "probe-event.h"
-#define MAX_PATH_LEN 256
#define MAX_PROBE_BUFFER 1024
#define MAX_PROBES 128
+#define MAX_PROBE_ARGS 128
static inline int is_c_varname(const char *name)
{
@@ -15,41 +16,58 @@ static inline int is_c_varname(const char *name)
return isalpha(name[0]) || name[0] == '_';
}
-#ifdef DWARF_SUPPORT
+#ifdef HAVE_DWARF_SUPPORT
+
+#include "dwarf-aux.h"
+
+/* TODO: export debuginfo data structure even if no dwarf support */
+
+/* debug information structure */
+struct debuginfo {
+ Dwarf *dbg;
+ Dwfl_Module *mod;
+ Dwfl *dwfl;
+ Dwarf_Addr bias;
+};
+
+/* This also tries to open distro debuginfo */
+extern struct debuginfo *debuginfo__new(const char *path);
+extern void debuginfo__delete(struct debuginfo *dbg);
+
/* Find probe_trace_events specified by perf_probe_event from debuginfo */
-extern int find_probe_trace_events(int fd, struct perf_probe_event *pev,
- struct probe_trace_event **tevs,
- int max_tevs);
+extern int debuginfo__find_trace_events(struct debuginfo *dbg,
+ struct perf_probe_event *pev,
+ struct probe_trace_event **tevs,
+ int max_tevs);
/* Find a perf_probe_point from debuginfo */
-extern int find_perf_probe_point(unsigned long addr,
- struct perf_probe_point *ppt);
+extern int debuginfo__find_probe_point(struct debuginfo *dbg,
+ unsigned long addr,
+ struct perf_probe_point *ppt);
/* Find a line range */
-extern int find_line_range(int fd, struct line_range *lr);
+extern int debuginfo__find_line_range(struct debuginfo *dbg,
+ struct line_range *lr);
/* Find available variables */
-extern int find_available_vars_at(int fd, struct perf_probe_event *pev,
- struct variable_list **vls, int max_points,
- bool externs);
-
-#include <dwarf.h>
-#include <libdw.h>
-#include <libdwfl.h>
-#include <version.h>
+extern int debuginfo__find_available_vars_at(struct debuginfo *dbg,
+ struct perf_probe_event *pev,
+ struct variable_list **vls,
+ int max_points, bool externs);
struct probe_finder {
struct perf_probe_event *pev; /* Target probe event */
/* Callback when a probe point is found */
- int (*callback)(Dwarf_Die *sp_die, struct probe_finder *pf);
+ int (*callback)(Dwarf_Die *sc_die, struct probe_finder *pf);
/* For function searching */
int lno; /* Line number */
Dwarf_Addr addr; /* Address */
const char *fname; /* Real file name */
Dwarf_Die cu_die; /* Current CU */
- struct list_head lcache; /* Line cache for lazy match */
+ Dwarf_Die sp_die;
+ struct intlist *lcache; /* Line cache for lazy match */
/* For variable searching */
#if _ELFUTILS_PREREQ(0, 142)
@@ -62,6 +80,7 @@ struct probe_finder {
struct trace_event_finder {
struct probe_finder pf;
+ Dwfl_Module *mod; /* For solving symbols */
struct probe_trace_event *tevs; /* Found trace events */
int ntevs; /* Number of trace events */
int max_tevs; /* Max number of trace events */
@@ -69,6 +88,7 @@ struct trace_event_finder {
struct available_var_finder {
struct probe_finder pf;
+ Dwfl_Module *mod; /* For solving symbols */
struct variable_list *vls; /* Found variable lists */
int nvls; /* Number of variable lists */
int max_vls; /* Max no. of variable lists */
@@ -83,9 +103,10 @@ struct line_finder {
int lno_s; /* Start line number */
int lno_e; /* End line number */
Dwarf_Die cu_die; /* Current CU */
+ Dwarf_Die sp_die;
int found;
};
-#endif /* DWARF_SUPPORT */
+#endif /* HAVE_DWARF_SUPPORT */
#endif /*_PROBE_FINDER_H */
diff --git a/tools/perf/util/pstack.c b/tools/perf/util/pstack.c
index 13d36faf64e..daa17aeb6c6 100644
--- a/tools/perf/util/pstack.c
+++ b/tools/perf/util/pstack.c
@@ -17,59 +17,59 @@ struct pstack {
struct pstack *pstack__new(unsigned short max_nr_entries)
{
- struct pstack *self = zalloc((sizeof(*self) +
- max_nr_entries * sizeof(void *)));
- if (self != NULL)
- self->max_nr_entries = max_nr_entries;
- return self;
+ struct pstack *pstack = zalloc((sizeof(*pstack) +
+ max_nr_entries * sizeof(void *)));
+ if (pstack != NULL)
+ pstack->max_nr_entries = max_nr_entries;
+ return pstack;
}
-void pstack__delete(struct pstack *self)
+void pstack__delete(struct pstack *pstack)
{
- free(self);
+ free(pstack);
}
-bool pstack__empty(const struct pstack *self)
+bool pstack__empty(const struct pstack *pstack)
{
- return self->top == 0;
+ return pstack->top == 0;
}
-void pstack__remove(struct pstack *self, void *key)
+void pstack__remove(struct pstack *pstack, void *key)
{
- unsigned short i = self->top, last_index = self->top - 1;
+ unsigned short i = pstack->top, last_index = pstack->top - 1;
while (i-- != 0) {
- if (self->entries[i] == key) {
+ if (pstack->entries[i] == key) {
if (i < last_index)
- memmove(self->entries + i,
- self->entries + i + 1,
+ memmove(pstack->entries + i,
+ pstack->entries + i + 1,
(last_index - i) * sizeof(void *));
- --self->top;
+ --pstack->top;
return;
}
}
pr_err("%s: %p not on the pstack!\n", __func__, key);
}
-void pstack__push(struct pstack *self, void *key)
+void pstack__push(struct pstack *pstack, void *key)
{
- if (self->top == self->max_nr_entries) {
- pr_err("%s: top=%d, overflow!\n", __func__, self->top);
+ if (pstack->top == pstack->max_nr_entries) {
+ pr_err("%s: top=%d, overflow!\n", __func__, pstack->top);
return;
}
- self->entries[self->top++] = key;
+ pstack->entries[pstack->top++] = key;
}
-void *pstack__pop(struct pstack *self)
+void *pstack__pop(struct pstack *pstack)
{
void *ret;
- if (self->top == 0) {
+ if (pstack->top == 0) {
pr_err("%s: underflow!\n", __func__);
return NULL;
}
- ret = self->entries[--self->top];
- self->entries[self->top] = NULL;
+ ret = pstack->entries[--pstack->top];
+ pstack->entries[pstack->top] = NULL;
return ret;
}
diff --git a/tools/perf/util/pstack.h b/tools/perf/util/pstack.h
index 4cedea59f51..c3cb6584d52 100644
--- a/tools/perf/util/pstack.h
+++ b/tools/perf/util/pstack.h
@@ -5,10 +5,10 @@
struct pstack;
struct pstack *pstack__new(unsigned short max_nr_entries);
-void pstack__delete(struct pstack *self);
-bool pstack__empty(const struct pstack *self);
-void pstack__remove(struct pstack *self, void *key);
-void pstack__push(struct pstack *self, void *key);
-void *pstack__pop(struct pstack *self);
+void pstack__delete(struct pstack *pstack);
+bool pstack__empty(const struct pstack *pstack);
+void pstack__remove(struct pstack *pstack, void *key);
+void pstack__push(struct pstack *pstack, void *key);
+void *pstack__pop(struct pstack *pstack);
#endif /* _PERF_PSTACK_ */
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
new file mode 100644
index 00000000000..16a475a7d49
--- /dev/null
+++ b/tools/perf/util/python-ext-sources
@@ -0,0 +1,22 @@
+#
+# List of files needed by perf python extension
+#
+# Each source file must be placed on its own line so that it can be
+# processed by Makefile and util/setup.py accordingly.
+#
+
+util/python.c
+util/ctype.c
+util/evlist.c
+util/evsel.c
+util/cpumap.c
+util/hweight.c
+util/thread_map.c
+util/util.c
+util/xyarray.c
+util/cgroup.c
+util/rblist.c
+util/strlist.c
+../lib/api/fs/fs.c
+util/trace-event.c
+../../lib/rbtree.c
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
new file mode 100644
index 00000000000..122669c18ff
--- /dev/null
+++ b/tools/perf/util/python.c
@@ -0,0 +1,1074 @@
+#include <Python.h>
+#include <structmember.h>
+#include <inttypes.h>
+#include <poll.h>
+#include "evlist.h"
+#include "evsel.h"
+#include "event.h"
+#include "cpumap.h"
+#include "thread_map.h"
+
+/*
+ * Support debug printing even though util/debug.c is not linked. That means
+ * implementing 'verbose' and 'eprintf'.
+ */
+int verbose;
+
+int eprintf(int level, const char *fmt, ...)
+{
+ va_list args;
+ int ret = 0;
+
+ if (verbose >= level) {
+ va_start(args, fmt);
+ ret = vfprintf(stderr, fmt, args);
+ va_end(args);
+ }
+
+ return ret;
+}
+
+/* Define PyVarObject_HEAD_INIT for python 2.5 */
+#ifndef PyVarObject_HEAD_INIT
+# define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
+#endif
+
+PyMODINIT_FUNC initperf(void);
+
+#define member_def(type, member, ptype, help) \
+ { #member, ptype, \
+ offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
+ 0, help }
+
+#define sample_member_def(name, member, ptype, help) \
+ { #name, ptype, \
+ offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \
+ 0, help }
+
+struct pyrf_event {
+ PyObject_HEAD
+ struct perf_sample sample;
+ union perf_event event;
+};
+
+#define sample_members \
+ sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \
+ sample_member_def(sample_pid, pid, T_INT, "event pid"), \
+ sample_member_def(sample_tid, tid, T_INT, "event tid"), \
+ sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \
+ sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \
+ sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \
+ sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
+ sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \
+ sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
+
+static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
+
+static PyMemberDef pyrf_mmap_event__members[] = {
+ sample_members
+ member_def(perf_event_header, type, T_UINT, "event type"),
+ member_def(mmap_event, pid, T_UINT, "event pid"),
+ member_def(mmap_event, tid, T_UINT, "event tid"),
+ member_def(mmap_event, start, T_ULONGLONG, "start of the map"),
+ member_def(mmap_event, len, T_ULONGLONG, "map length"),
+ member_def(mmap_event, pgoff, T_ULONGLONG, "page offset"),
+ member_def(mmap_event, filename, T_STRING_INPLACE, "backing store"),
+ { .name = NULL, },
+};
+
+static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
+{
+ PyObject *ret;
+ char *s;
+
+ if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRIx64 ", "
+ "length: %#" PRIx64 ", offset: %#" PRIx64 ", "
+ "filename: %s }",
+ pevent->event.mmap.pid, pevent->event.mmap.tid,
+ pevent->event.mmap.start, pevent->event.mmap.len,
+ pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
+ ret = PyErr_NoMemory();
+ } else {
+ ret = PyString_FromString(s);
+ free(s);
+ }
+ return ret;
+}
+
+static PyTypeObject pyrf_mmap_event__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.mmap_event",
+ .tp_basicsize = sizeof(struct pyrf_event),
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_mmap_event__doc,
+ .tp_members = pyrf_mmap_event__members,
+ .tp_repr = (reprfunc)pyrf_mmap_event__repr,
+};
+
+static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
+
+static PyMemberDef pyrf_task_event__members[] = {
+ sample_members
+ member_def(perf_event_header, type, T_UINT, "event type"),
+ member_def(fork_event, pid, T_UINT, "event pid"),
+ member_def(fork_event, ppid, T_UINT, "event ppid"),
+ member_def(fork_event, tid, T_UINT, "event tid"),
+ member_def(fork_event, ptid, T_UINT, "event ptid"),
+ member_def(fork_event, time, T_ULONGLONG, "timestamp"),
+ { .name = NULL, },
+};
+
+static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent)
+{
+ return PyString_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
+ "ptid: %u, time: %" PRIu64 "}",
+ pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
+ pevent->event.fork.pid,
+ pevent->event.fork.ppid,
+ pevent->event.fork.tid,
+ pevent->event.fork.ptid,
+ pevent->event.fork.time);
+}
+
+static PyTypeObject pyrf_task_event__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.task_event",
+ .tp_basicsize = sizeof(struct pyrf_event),
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_task_event__doc,
+ .tp_members = pyrf_task_event__members,
+ .tp_repr = (reprfunc)pyrf_task_event__repr,
+};
+
+static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
+
+static PyMemberDef pyrf_comm_event__members[] = {
+ sample_members
+ member_def(perf_event_header, type, T_UINT, "event type"),
+ member_def(comm_event, pid, T_UINT, "event pid"),
+ member_def(comm_event, tid, T_UINT, "event tid"),
+ member_def(comm_event, comm, T_STRING_INPLACE, "process name"),
+ { .name = NULL, },
+};
+
+static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent)
+{
+ return PyString_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
+ pevent->event.comm.pid,
+ pevent->event.comm.tid,
+ pevent->event.comm.comm);
+}
+
+static PyTypeObject pyrf_comm_event__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.comm_event",
+ .tp_basicsize = sizeof(struct pyrf_event),
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_comm_event__doc,
+ .tp_members = pyrf_comm_event__members,
+ .tp_repr = (reprfunc)pyrf_comm_event__repr,
+};
+
+static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
+
+static PyMemberDef pyrf_throttle_event__members[] = {
+ sample_members
+ member_def(perf_event_header, type, T_UINT, "event type"),
+ member_def(throttle_event, time, T_ULONGLONG, "timestamp"),
+ member_def(throttle_event, id, T_ULONGLONG, "event id"),
+ member_def(throttle_event, stream_id, T_ULONGLONG, "event stream id"),
+ { .name = NULL, },
+};
+
+static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent)
+{
+ struct throttle_event *te = (struct throttle_event *)(&pevent->event.header + 1);
+
+ return PyString_FromFormat("{ type: %sthrottle, time: %" PRIu64 ", id: %" PRIu64
+ ", stream_id: %" PRIu64 " }",
+ pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
+ te->time, te->id, te->stream_id);
+}
+
+static PyTypeObject pyrf_throttle_event__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.throttle_event",
+ .tp_basicsize = sizeof(struct pyrf_event),
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_throttle_event__doc,
+ .tp_members = pyrf_throttle_event__members,
+ .tp_repr = (reprfunc)pyrf_throttle_event__repr,
+};
+
+static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object.");
+
+static PyMemberDef pyrf_lost_event__members[] = {
+ sample_members
+ member_def(lost_event, id, T_ULONGLONG, "event id"),
+ member_def(lost_event, lost, T_ULONGLONG, "number of lost events"),
+ { .name = NULL, },
+};
+
+static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent)
+{
+ PyObject *ret;
+ char *s;
+
+ if (asprintf(&s, "{ type: lost, id: %#" PRIx64 ", "
+ "lost: %#" PRIx64 " }",
+ pevent->event.lost.id, pevent->event.lost.lost) < 0) {
+ ret = PyErr_NoMemory();
+ } else {
+ ret = PyString_FromString(s);
+ free(s);
+ }
+ return ret;
+}
+
+static PyTypeObject pyrf_lost_event__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.lost_event",
+ .tp_basicsize = sizeof(struct pyrf_event),
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_lost_event__doc,
+ .tp_members = pyrf_lost_event__members,
+ .tp_repr = (reprfunc)pyrf_lost_event__repr,
+};
+
+static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object.");
+
+static PyMemberDef pyrf_read_event__members[] = {
+ sample_members
+ member_def(read_event, pid, T_UINT, "event pid"),
+ member_def(read_event, tid, T_UINT, "event tid"),
+ { .name = NULL, },
+};
+
+static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent)
+{
+ return PyString_FromFormat("{ type: read, pid: %u, tid: %u }",
+ pevent->event.read.pid,
+ pevent->event.read.tid);
+ /*
+ * FIXME: return the array of read values,
+ * making this method useful ;-)
+ */
+}
+
+static PyTypeObject pyrf_read_event__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.read_event",
+ .tp_basicsize = sizeof(struct pyrf_event),
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_read_event__doc,
+ .tp_members = pyrf_read_event__members,
+ .tp_repr = (reprfunc)pyrf_read_event__repr,
+};
+
+static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object.");
+
+static PyMemberDef pyrf_sample_event__members[] = {
+ sample_members
+ member_def(perf_event_header, type, T_UINT, "event type"),
+ { .name = NULL, },
+};
+
+static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent)
+{
+ PyObject *ret;
+ char *s;
+
+ if (asprintf(&s, "{ type: sample }") < 0) {
+ ret = PyErr_NoMemory();
+ } else {
+ ret = PyString_FromString(s);
+ free(s);
+ }
+ return ret;
+}
+
+static PyTypeObject pyrf_sample_event__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.sample_event",
+ .tp_basicsize = sizeof(struct pyrf_event),
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_sample_event__doc,
+ .tp_members = pyrf_sample_event__members,
+ .tp_repr = (reprfunc)pyrf_sample_event__repr,
+};
+
+static int pyrf_event__setup_types(void)
+{
+ int err;
+ pyrf_mmap_event__type.tp_new =
+ pyrf_task_event__type.tp_new =
+ pyrf_comm_event__type.tp_new =
+ pyrf_lost_event__type.tp_new =
+ pyrf_read_event__type.tp_new =
+ pyrf_sample_event__type.tp_new =
+ pyrf_throttle_event__type.tp_new = PyType_GenericNew;
+ err = PyType_Ready(&pyrf_mmap_event__type);
+ if (err < 0)
+ goto out;
+ err = PyType_Ready(&pyrf_lost_event__type);
+ if (err < 0)
+ goto out;
+ err = PyType_Ready(&pyrf_task_event__type);
+ if (err < 0)
+ goto out;
+ err = PyType_Ready(&pyrf_comm_event__type);
+ if (err < 0)
+ goto out;
+ err = PyType_Ready(&pyrf_throttle_event__type);
+ if (err < 0)
+ goto out;
+ err = PyType_Ready(&pyrf_read_event__type);
+ if (err < 0)
+ goto out;
+ err = PyType_Ready(&pyrf_sample_event__type);
+ if (err < 0)
+ goto out;
+out:
+ return err;
+}
+
+static PyTypeObject *pyrf_event__type[] = {
+ [PERF_RECORD_MMAP] = &pyrf_mmap_event__type,
+ [PERF_RECORD_LOST] = &pyrf_lost_event__type,
+ [PERF_RECORD_COMM] = &pyrf_comm_event__type,
+ [PERF_RECORD_EXIT] = &pyrf_task_event__type,
+ [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type,
+ [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type,
+ [PERF_RECORD_FORK] = &pyrf_task_event__type,
+ [PERF_RECORD_READ] = &pyrf_read_event__type,
+ [PERF_RECORD_SAMPLE] = &pyrf_sample_event__type,
+};
+
+static PyObject *pyrf_event__new(union perf_event *event)
+{
+ struct pyrf_event *pevent;
+ PyTypeObject *ptype;
+
+ if (event->header.type < PERF_RECORD_MMAP ||
+ event->header.type > PERF_RECORD_SAMPLE)
+ return NULL;
+
+ ptype = pyrf_event__type[event->header.type];
+ pevent = PyObject_New(struct pyrf_event, ptype);
+ if (pevent != NULL)
+ memcpy(&pevent->event, event, event->header.size);
+ return (PyObject *)pevent;
+}
+
+struct pyrf_cpu_map {
+ PyObject_HEAD
+
+ struct cpu_map *cpus;
+};
+
+static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
+ PyObject *args, PyObject *kwargs)
+{
+ static char *kwlist[] = { "cpustr", NULL };
+ char *cpustr = NULL;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s",
+ kwlist, &cpustr))
+ return -1;
+
+ pcpus->cpus = cpu_map__new(cpustr);
+ if (pcpus->cpus == NULL)
+ return -1;
+ return 0;
+}
+
+static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
+{
+ cpu_map__delete(pcpus->cpus);
+ pcpus->ob_type->tp_free((PyObject*)pcpus);
+}
+
+static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
+{
+ struct pyrf_cpu_map *pcpus = (void *)obj;
+
+ return pcpus->cpus->nr;
+}
+
+static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
+{
+ struct pyrf_cpu_map *pcpus = (void *)obj;
+
+ if (i >= pcpus->cpus->nr)
+ return NULL;
+
+ return Py_BuildValue("i", pcpus->cpus->map[i]);
+}
+
+static PySequenceMethods pyrf_cpu_map__sequence_methods = {
+ .sq_length = pyrf_cpu_map__length,
+ .sq_item = pyrf_cpu_map__item,
+};
+
+static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
+
+static PyTypeObject pyrf_cpu_map__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.cpu_map",
+ .tp_basicsize = sizeof(struct pyrf_cpu_map),
+ .tp_dealloc = (destructor)pyrf_cpu_map__delete,
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_cpu_map__doc,
+ .tp_as_sequence = &pyrf_cpu_map__sequence_methods,
+ .tp_init = (initproc)pyrf_cpu_map__init,
+};
+
+static int pyrf_cpu_map__setup_types(void)
+{
+ pyrf_cpu_map__type.tp_new = PyType_GenericNew;
+ return PyType_Ready(&pyrf_cpu_map__type);
+}
+
+struct pyrf_thread_map {
+ PyObject_HEAD
+
+ struct thread_map *threads;
+};
+
+static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
+ PyObject *args, PyObject *kwargs)
+{
+ static char *kwlist[] = { "pid", "tid", "uid", NULL };
+ int pid = -1, tid = -1, uid = UINT_MAX;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii",
+ kwlist, &pid, &tid, &uid))
+ return -1;
+
+ pthreads->threads = thread_map__new(pid, tid, uid);
+ if (pthreads->threads == NULL)
+ return -1;
+ return 0;
+}
+
+static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
+{
+ thread_map__delete(pthreads->threads);
+ pthreads->ob_type->tp_free((PyObject*)pthreads);
+}
+
+static Py_ssize_t pyrf_thread_map__length(PyObject *obj)
+{
+ struct pyrf_thread_map *pthreads = (void *)obj;
+
+ return pthreads->threads->nr;
+}
+
+static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i)
+{
+ struct pyrf_thread_map *pthreads = (void *)obj;
+
+ if (i >= pthreads->threads->nr)
+ return NULL;
+
+ return Py_BuildValue("i", pthreads->threads->map[i]);
+}
+
+static PySequenceMethods pyrf_thread_map__sequence_methods = {
+ .sq_length = pyrf_thread_map__length,
+ .sq_item = pyrf_thread_map__item,
+};
+
+static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
+
+static PyTypeObject pyrf_thread_map__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.thread_map",
+ .tp_basicsize = sizeof(struct pyrf_thread_map),
+ .tp_dealloc = (destructor)pyrf_thread_map__delete,
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_thread_map__doc,
+ .tp_as_sequence = &pyrf_thread_map__sequence_methods,
+ .tp_init = (initproc)pyrf_thread_map__init,
+};
+
+static int pyrf_thread_map__setup_types(void)
+{
+ pyrf_thread_map__type.tp_new = PyType_GenericNew;
+ return PyType_Ready(&pyrf_thread_map__type);
+}
+
+struct pyrf_evsel {
+ PyObject_HEAD
+
+ struct perf_evsel evsel;
+};
+
+static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
+ PyObject *args, PyObject *kwargs)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID,
+ };
+ static char *kwlist[] = {
+ "type",
+ "config",
+ "sample_freq",
+ "sample_period",
+ "sample_type",
+ "read_format",
+ "disabled",
+ "inherit",
+ "pinned",
+ "exclusive",
+ "exclude_user",
+ "exclude_kernel",
+ "exclude_hv",
+ "exclude_idle",
+ "mmap",
+ "comm",
+ "freq",
+ "inherit_stat",
+ "enable_on_exec",
+ "task",
+ "watermark",
+ "precise_ip",
+ "mmap_data",
+ "sample_id_all",
+ "wakeup_events",
+ "bp_type",
+ "bp_addr",
+ "bp_len",
+ NULL
+ };
+ u64 sample_period = 0;
+ u32 disabled = 0,
+ inherit = 0,
+ pinned = 0,
+ exclusive = 0,
+ exclude_user = 0,
+ exclude_kernel = 0,
+ exclude_hv = 0,
+ exclude_idle = 0,
+ mmap = 0,
+ comm = 0,
+ freq = 1,
+ inherit_stat = 0,
+ enable_on_exec = 0,
+ task = 0,
+ watermark = 0,
+ precise_ip = 0,
+ mmap_data = 0,
+ sample_id_all = 1;
+ int idx = 0;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs,
+ "|iKiKKiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
+ &attr.type, &attr.config, &attr.sample_freq,
+ &sample_period, &attr.sample_type,
+ &attr.read_format, &disabled, &inherit,
+ &pinned, &exclusive, &exclude_user,
+ &exclude_kernel, &exclude_hv, &exclude_idle,
+ &mmap, &comm, &freq, &inherit_stat,
+ &enable_on_exec, &task, &watermark,
+ &precise_ip, &mmap_data, &sample_id_all,
+ &attr.wakeup_events, &attr.bp_type,
+ &attr.bp_addr, &attr.bp_len, &idx))
+ return -1;
+
+ /* union... */
+ if (sample_period != 0) {
+ if (attr.sample_freq != 0)
+ return -1; /* FIXME: throw right exception */
+ attr.sample_period = sample_period;
+ }
+
+ /* Bitfields */
+ attr.disabled = disabled;
+ attr.inherit = inherit;
+ attr.pinned = pinned;
+ attr.exclusive = exclusive;
+ attr.exclude_user = exclude_user;
+ attr.exclude_kernel = exclude_kernel;
+ attr.exclude_hv = exclude_hv;
+ attr.exclude_idle = exclude_idle;
+ attr.mmap = mmap;
+ attr.comm = comm;
+ attr.freq = freq;
+ attr.inherit_stat = inherit_stat;
+ attr.enable_on_exec = enable_on_exec;
+ attr.task = task;
+ attr.watermark = watermark;
+ attr.precise_ip = precise_ip;
+ attr.mmap_data = mmap_data;
+ attr.sample_id_all = sample_id_all;
+
+ perf_evsel__init(&pevsel->evsel, &attr, idx);
+ return 0;
+}
+
+static void pyrf_evsel__delete(struct pyrf_evsel *pevsel)
+{
+ perf_evsel__exit(&pevsel->evsel);
+ pevsel->ob_type->tp_free((PyObject*)pevsel);
+}
+
+static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
+ PyObject *args, PyObject *kwargs)
+{
+ struct perf_evsel *evsel = &pevsel->evsel;
+ struct cpu_map *cpus = NULL;
+ struct thread_map *threads = NULL;
+ PyObject *pcpus = NULL, *pthreads = NULL;
+ int group = 0, inherit = 0;
+ static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL };
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
+ &pcpus, &pthreads, &group, &inherit))
+ return NULL;
+
+ if (pthreads != NULL)
+ threads = ((struct pyrf_thread_map *)pthreads)->threads;
+
+ if (pcpus != NULL)
+ cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
+
+ evsel->attr.inherit = inherit;
+ /*
+ * This will group just the fds for this single evsel, to group
+ * multiple events, use evlist.open().
+ */
+ if (perf_evsel__open(evsel, cpus, threads) < 0) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ return NULL;
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyMethodDef pyrf_evsel__methods[] = {
+ {
+ .ml_name = "open",
+ .ml_meth = (PyCFunction)pyrf_evsel__open,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("open the event selector file descriptor table.")
+ },
+ { .ml_name = NULL, }
+};
+
+static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
+
+static PyTypeObject pyrf_evsel__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.evsel",
+ .tp_basicsize = sizeof(struct pyrf_evsel),
+ .tp_dealloc = (destructor)pyrf_evsel__delete,
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_doc = pyrf_evsel__doc,
+ .tp_methods = pyrf_evsel__methods,
+ .tp_init = (initproc)pyrf_evsel__init,
+};
+
+static int pyrf_evsel__setup_types(void)
+{
+ pyrf_evsel__type.tp_new = PyType_GenericNew;
+ return PyType_Ready(&pyrf_evsel__type);
+}
+
+struct pyrf_evlist {
+ PyObject_HEAD
+
+ struct perf_evlist evlist;
+};
+
+static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
+ PyObject *args, PyObject *kwargs __maybe_unused)
+{
+ PyObject *pcpus = NULL, *pthreads = NULL;
+ struct cpu_map *cpus;
+ struct thread_map *threads;
+
+ if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
+ return -1;
+
+ threads = ((struct pyrf_thread_map *)pthreads)->threads;
+ cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
+ perf_evlist__init(&pevlist->evlist, cpus, threads);
+ return 0;
+}
+
+static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
+{
+ perf_evlist__exit(&pevlist->evlist);
+ pevlist->ob_type->tp_free((PyObject*)pevlist);
+}
+
+static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
+ PyObject *args, PyObject *kwargs)
+{
+ struct perf_evlist *evlist = &pevlist->evlist;
+ static char *kwlist[] = { "pages", "overwrite", NULL };
+ int pages = 128, overwrite = false;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
+ &pages, &overwrite))
+ return NULL;
+
+ if (perf_evlist__mmap(evlist, pages, overwrite) < 0) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ return NULL;
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
+ PyObject *args, PyObject *kwargs)
+{
+ struct perf_evlist *evlist = &pevlist->evlist;
+ static char *kwlist[] = { "timeout", NULL };
+ int timeout = -1, n;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
+ return NULL;
+
+ n = poll(evlist->pollfd, evlist->nr_fds, timeout);
+ if (n < 0) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ return NULL;
+ }
+
+ return Py_BuildValue("i", n);
+}
+
+static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
+ PyObject *args __maybe_unused,
+ PyObject *kwargs __maybe_unused)
+{
+ struct perf_evlist *evlist = &pevlist->evlist;
+ PyObject *list = PyList_New(0);
+ int i;
+
+ for (i = 0; i < evlist->nr_fds; ++i) {
+ PyObject *file;
+ FILE *fp = fdopen(evlist->pollfd[i].fd, "r");
+
+ if (fp == NULL)
+ goto free_list;
+
+ file = PyFile_FromFile(fp, "perf", "r", NULL);
+ if (file == NULL)
+ goto free_list;
+
+ if (PyList_Append(list, file) != 0) {
+ Py_DECREF(file);
+ goto free_list;
+ }
+
+ Py_DECREF(file);
+ }
+
+ return list;
+free_list:
+ return PyErr_NoMemory();
+}
+
+
+static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
+ PyObject *args,
+ PyObject *kwargs __maybe_unused)
+{
+ struct perf_evlist *evlist = &pevlist->evlist;
+ PyObject *pevsel;
+ struct perf_evsel *evsel;
+
+ if (!PyArg_ParseTuple(args, "O", &pevsel))
+ return NULL;
+
+ Py_INCREF(pevsel);
+ evsel = &((struct pyrf_evsel *)pevsel)->evsel;
+ evsel->idx = evlist->nr_entries;
+ perf_evlist__add(evlist, evsel);
+
+ return Py_BuildValue("i", evlist->nr_entries);
+}
+
+static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
+ PyObject *args, PyObject *kwargs)
+{
+ struct perf_evlist *evlist = &pevlist->evlist;
+ union perf_event *event;
+ int sample_id_all = 1, cpu;
+ static char *kwlist[] = { "cpu", "sample_id_all", NULL };
+ int err;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
+ &cpu, &sample_id_all))
+ return NULL;
+
+ event = perf_evlist__mmap_read(evlist, cpu);
+ if (event != NULL) {
+ PyObject *pyevent = pyrf_event__new(event);
+ struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
+
+ perf_evlist__mmap_consume(evlist, cpu);
+
+ if (pyevent == NULL)
+ return PyErr_NoMemory();
+
+ err = perf_evlist__parse_sample(evlist, event, &pevent->sample);
+ if (err)
+ return PyErr_Format(PyExc_OSError,
+ "perf: can't parse sample, err=%d", err);
+ return pyevent;
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
+ PyObject *args, PyObject *kwargs)
+{
+ struct perf_evlist *evlist = &pevlist->evlist;
+ int group = 0;
+ static char *kwlist[] = { "group", NULL };
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, &group))
+ return NULL;
+
+ if (group)
+ perf_evlist__set_leader(evlist);
+
+ if (perf_evlist__open(evlist) < 0) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ return NULL;
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+static PyMethodDef pyrf_evlist__methods[] = {
+ {
+ .ml_name = "mmap",
+ .ml_meth = (PyCFunction)pyrf_evlist__mmap,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("mmap the file descriptor table.")
+ },
+ {
+ .ml_name = "open",
+ .ml_meth = (PyCFunction)pyrf_evlist__open,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("open the file descriptors.")
+ },
+ {
+ .ml_name = "poll",
+ .ml_meth = (PyCFunction)pyrf_evlist__poll,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("poll the file descriptor table.")
+ },
+ {
+ .ml_name = "get_pollfd",
+ .ml_meth = (PyCFunction)pyrf_evlist__get_pollfd,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("get the poll file descriptor table.")
+ },
+ {
+ .ml_name = "add",
+ .ml_meth = (PyCFunction)pyrf_evlist__add,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("adds an event selector to the list.")
+ },
+ {
+ .ml_name = "read_on_cpu",
+ .ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu,
+ .ml_flags = METH_VARARGS | METH_KEYWORDS,
+ .ml_doc = PyDoc_STR("reads an event.")
+ },
+ { .ml_name = NULL, }
+};
+
+static Py_ssize_t pyrf_evlist__length(PyObject *obj)
+{
+ struct pyrf_evlist *pevlist = (void *)obj;
+
+ return pevlist->evlist.nr_entries;
+}
+
+static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
+{
+ struct pyrf_evlist *pevlist = (void *)obj;
+ struct perf_evsel *pos;
+
+ if (i >= pevlist->evlist.nr_entries)
+ return NULL;
+
+ evlist__for_each(&pevlist->evlist, pos) {
+ if (i-- == 0)
+ break;
+ }
+
+ return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
+}
+
+static PySequenceMethods pyrf_evlist__sequence_methods = {
+ .sq_length = pyrf_evlist__length,
+ .sq_item = pyrf_evlist__item,
+};
+
+static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
+
+static PyTypeObject pyrf_evlist__type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "perf.evlist",
+ .tp_basicsize = sizeof(struct pyrf_evlist),
+ .tp_dealloc = (destructor)pyrf_evlist__delete,
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+ .tp_as_sequence = &pyrf_evlist__sequence_methods,
+ .tp_doc = pyrf_evlist__doc,
+ .tp_methods = pyrf_evlist__methods,
+ .tp_init = (initproc)pyrf_evlist__init,
+};
+
+static int pyrf_evlist__setup_types(void)
+{
+ pyrf_evlist__type.tp_new = PyType_GenericNew;
+ return PyType_Ready(&pyrf_evlist__type);
+}
+
+static struct {
+ const char *name;
+ int value;
+} perf__constants[] = {
+ { "TYPE_HARDWARE", PERF_TYPE_HARDWARE },
+ { "TYPE_SOFTWARE", PERF_TYPE_SOFTWARE },
+ { "TYPE_TRACEPOINT", PERF_TYPE_TRACEPOINT },
+ { "TYPE_HW_CACHE", PERF_TYPE_HW_CACHE },
+ { "TYPE_RAW", PERF_TYPE_RAW },
+ { "TYPE_BREAKPOINT", PERF_TYPE_BREAKPOINT },
+
+ { "COUNT_HW_CPU_CYCLES", PERF_COUNT_HW_CPU_CYCLES },
+ { "COUNT_HW_INSTRUCTIONS", PERF_COUNT_HW_INSTRUCTIONS },
+ { "COUNT_HW_CACHE_REFERENCES", PERF_COUNT_HW_CACHE_REFERENCES },
+ { "COUNT_HW_CACHE_MISSES", PERF_COUNT_HW_CACHE_MISSES },
+ { "COUNT_HW_BRANCH_INSTRUCTIONS", PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
+ { "COUNT_HW_BRANCH_MISSES", PERF_COUNT_HW_BRANCH_MISSES },
+ { "COUNT_HW_BUS_CYCLES", PERF_COUNT_HW_BUS_CYCLES },
+ { "COUNT_HW_CACHE_L1D", PERF_COUNT_HW_CACHE_L1D },
+ { "COUNT_HW_CACHE_L1I", PERF_COUNT_HW_CACHE_L1I },
+ { "COUNT_HW_CACHE_LL", PERF_COUNT_HW_CACHE_LL },
+ { "COUNT_HW_CACHE_DTLB", PERF_COUNT_HW_CACHE_DTLB },
+ { "COUNT_HW_CACHE_ITLB", PERF_COUNT_HW_CACHE_ITLB },
+ { "COUNT_HW_CACHE_BPU", PERF_COUNT_HW_CACHE_BPU },
+ { "COUNT_HW_CACHE_OP_READ", PERF_COUNT_HW_CACHE_OP_READ },
+ { "COUNT_HW_CACHE_OP_WRITE", PERF_COUNT_HW_CACHE_OP_WRITE },
+ { "COUNT_HW_CACHE_OP_PREFETCH", PERF_COUNT_HW_CACHE_OP_PREFETCH },
+ { "COUNT_HW_CACHE_RESULT_ACCESS", PERF_COUNT_HW_CACHE_RESULT_ACCESS },
+ { "COUNT_HW_CACHE_RESULT_MISS", PERF_COUNT_HW_CACHE_RESULT_MISS },
+
+ { "COUNT_HW_STALLED_CYCLES_FRONTEND", PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
+ { "COUNT_HW_STALLED_CYCLES_BACKEND", PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
+
+ { "COUNT_SW_CPU_CLOCK", PERF_COUNT_SW_CPU_CLOCK },
+ { "COUNT_SW_TASK_CLOCK", PERF_COUNT_SW_TASK_CLOCK },
+ { "COUNT_SW_PAGE_FAULTS", PERF_COUNT_SW_PAGE_FAULTS },
+ { "COUNT_SW_CONTEXT_SWITCHES", PERF_COUNT_SW_CONTEXT_SWITCHES },
+ { "COUNT_SW_CPU_MIGRATIONS", PERF_COUNT_SW_CPU_MIGRATIONS },
+ { "COUNT_SW_PAGE_FAULTS_MIN", PERF_COUNT_SW_PAGE_FAULTS_MIN },
+ { "COUNT_SW_PAGE_FAULTS_MAJ", PERF_COUNT_SW_PAGE_FAULTS_MAJ },
+ { "COUNT_SW_ALIGNMENT_FAULTS", PERF_COUNT_SW_ALIGNMENT_FAULTS },
+ { "COUNT_SW_EMULATION_FAULTS", PERF_COUNT_SW_EMULATION_FAULTS },
+ { "COUNT_SW_DUMMY", PERF_COUNT_SW_DUMMY },
+
+ { "SAMPLE_IP", PERF_SAMPLE_IP },
+ { "SAMPLE_TID", PERF_SAMPLE_TID },
+ { "SAMPLE_TIME", PERF_SAMPLE_TIME },
+ { "SAMPLE_ADDR", PERF_SAMPLE_ADDR },
+ { "SAMPLE_READ", PERF_SAMPLE_READ },
+ { "SAMPLE_CALLCHAIN", PERF_SAMPLE_CALLCHAIN },
+ { "SAMPLE_ID", PERF_SAMPLE_ID },
+ { "SAMPLE_CPU", PERF_SAMPLE_CPU },
+ { "SAMPLE_PERIOD", PERF_SAMPLE_PERIOD },
+ { "SAMPLE_STREAM_ID", PERF_SAMPLE_STREAM_ID },
+ { "SAMPLE_RAW", PERF_SAMPLE_RAW },
+
+ { "FORMAT_TOTAL_TIME_ENABLED", PERF_FORMAT_TOTAL_TIME_ENABLED },
+ { "FORMAT_TOTAL_TIME_RUNNING", PERF_FORMAT_TOTAL_TIME_RUNNING },
+ { "FORMAT_ID", PERF_FORMAT_ID },
+ { "FORMAT_GROUP", PERF_FORMAT_GROUP },
+
+ { "RECORD_MMAP", PERF_RECORD_MMAP },
+ { "RECORD_LOST", PERF_RECORD_LOST },
+ { "RECORD_COMM", PERF_RECORD_COMM },
+ { "RECORD_EXIT", PERF_RECORD_EXIT },
+ { "RECORD_THROTTLE", PERF_RECORD_THROTTLE },
+ { "RECORD_UNTHROTTLE", PERF_RECORD_UNTHROTTLE },
+ { "RECORD_FORK", PERF_RECORD_FORK },
+ { "RECORD_READ", PERF_RECORD_READ },
+ { "RECORD_SAMPLE", PERF_RECORD_SAMPLE },
+ { .name = NULL, },
+};
+
+static PyMethodDef perf__methods[] = {
+ { .ml_name = NULL, }
+};
+
+PyMODINIT_FUNC initperf(void)
+{
+ PyObject *obj;
+ int i;
+ PyObject *dict, *module = Py_InitModule("perf", perf__methods);
+
+ if (module == NULL ||
+ pyrf_event__setup_types() < 0 ||
+ pyrf_evlist__setup_types() < 0 ||
+ pyrf_evsel__setup_types() < 0 ||
+ pyrf_thread_map__setup_types() < 0 ||
+ pyrf_cpu_map__setup_types() < 0)
+ return;
+
+ /* The page_size is placed in util object. */
+ page_size = sysconf(_SC_PAGE_SIZE);
+
+ Py_INCREF(&pyrf_evlist__type);
+ PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type);
+
+ Py_INCREF(&pyrf_evsel__type);
+ PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type);
+
+ Py_INCREF(&pyrf_thread_map__type);
+ PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type);
+
+ Py_INCREF(&pyrf_cpu_map__type);
+ PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type);
+
+ dict = PyModule_GetDict(module);
+ if (dict == NULL)
+ goto error;
+
+ for (i = 0; perf__constants[i].name != NULL; i++) {
+ obj = PyInt_FromLong(perf__constants[i].value);
+ if (obj == NULL)
+ goto error;
+ PyDict_SetItemString(dict, perf__constants[i].name, obj);
+ Py_DECREF(obj);
+ }
+
+error:
+ if (PyErr_Occurred())
+ PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
+}
+
+/*
+ * Dummy, to avoid dragging all the test_attr infrastructure in the python
+ * binding.
+ */
+void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
+ int fd, int group_fd, unsigned long flags)
+{
+}
diff --git a/tools/perf/util/rblist.c b/tools/perf/util/rblist.c
new file mode 100644
index 00000000000..0dfe27d9945
--- /dev/null
+++ b/tools/perf/util/rblist.c
@@ -0,0 +1,128 @@
+/*
+ * Based on strlist.c by:
+ * (c) 2009 Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Licensed under the GPLv2.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "rblist.h"
+
+int rblist__add_node(struct rblist *rblist, const void *new_entry)
+{
+ struct rb_node **p = &rblist->entries.rb_node;
+ struct rb_node *parent = NULL, *new_node;
+
+ while (*p != NULL) {
+ int rc;
+
+ parent = *p;
+
+ rc = rblist->node_cmp(parent, new_entry);
+ if (rc > 0)
+ p = &(*p)->rb_left;
+ else if (rc < 0)
+ p = &(*p)->rb_right;
+ else
+ return -EEXIST;
+ }
+
+ new_node = rblist->node_new(rblist, new_entry);
+ if (new_node == NULL)
+ return -ENOMEM;
+
+ rb_link_node(new_node, parent, p);
+ rb_insert_color(new_node, &rblist->entries);
+ ++rblist->nr_entries;
+
+ return 0;
+}
+
+void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node)
+{
+ rb_erase(rb_node, &rblist->entries);
+ --rblist->nr_entries;
+ rblist->node_delete(rblist, rb_node);
+}
+
+static struct rb_node *__rblist__findnew(struct rblist *rblist,
+ const void *entry,
+ bool create)
+{
+ struct rb_node **p = &rblist->entries.rb_node;
+ struct rb_node *parent = NULL, *new_node = NULL;
+
+ while (*p != NULL) {
+ int rc;
+
+ parent = *p;
+
+ rc = rblist->node_cmp(parent, entry);
+ if (rc > 0)
+ p = &(*p)->rb_left;
+ else if (rc < 0)
+ p = &(*p)->rb_right;
+ else
+ return parent;
+ }
+
+ if (create) {
+ new_node = rblist->node_new(rblist, entry);
+ if (new_node) {
+ rb_link_node(new_node, parent, p);
+ rb_insert_color(new_node, &rblist->entries);
+ ++rblist->nr_entries;
+ }
+ }
+
+ return new_node;
+}
+
+struct rb_node *rblist__find(struct rblist *rblist, const void *entry)
+{
+ return __rblist__findnew(rblist, entry, false);
+}
+
+struct rb_node *rblist__findnew(struct rblist *rblist, const void *entry)
+{
+ return __rblist__findnew(rblist, entry, true);
+}
+
+void rblist__init(struct rblist *rblist)
+{
+ if (rblist != NULL) {
+ rblist->entries = RB_ROOT;
+ rblist->nr_entries = 0;
+ }
+
+ return;
+}
+
+void rblist__delete(struct rblist *rblist)
+{
+ if (rblist != NULL) {
+ struct rb_node *pos, *next = rb_first(&rblist->entries);
+
+ while (next) {
+ pos = next;
+ next = rb_next(pos);
+ rblist__remove_node(rblist, pos);
+ }
+ free(rblist);
+ }
+}
+
+struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx)
+{
+ struct rb_node *node;
+
+ for (node = rb_first(&rblist->entries); node; node = rb_next(node)) {
+ if (!idx--)
+ return node;
+ }
+
+ return NULL;
+}
diff --git a/tools/perf/util/rblist.h b/tools/perf/util/rblist.h
new file mode 100644
index 00000000000..ff9913b994c
--- /dev/null
+++ b/tools/perf/util/rblist.h
@@ -0,0 +1,48 @@
+#ifndef __PERF_RBLIST_H
+#define __PERF_RBLIST_H
+
+#include <linux/rbtree.h>
+#include <stdbool.h>
+
+/*
+ * create node structs of the form:
+ * struct my_node {
+ * struct rb_node rb_node;
+ * ... my data ...
+ * };
+ *
+ * create list structs of the form:
+ * struct mylist {
+ * struct rblist rblist;
+ * ... my data ...
+ * };
+ */
+
+struct rblist {
+ struct rb_root entries;
+ unsigned int nr_entries;
+
+ int (*node_cmp)(struct rb_node *rbn, const void *entry);
+ struct rb_node *(*node_new)(struct rblist *rlist, const void *new_entry);
+ void (*node_delete)(struct rblist *rblist, struct rb_node *rb_node);
+};
+
+void rblist__init(struct rblist *rblist);
+void rblist__delete(struct rblist *rblist);
+int rblist__add_node(struct rblist *rblist, const void *new_entry);
+void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node);
+struct rb_node *rblist__find(struct rblist *rblist, const void *entry);
+struct rb_node *rblist__findnew(struct rblist *rblist, const void *entry);
+struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx);
+
+static inline bool rblist__empty(const struct rblist *rblist)
+{
+ return rblist->nr_entries == 0;
+}
+
+static inline unsigned int rblist__nr_entries(const struct rblist *rblist)
+{
+ return rblist->nr_entries;
+}
+
+#endif /* __PERF_RBLIST_H */
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
new file mode 100644
index 00000000000..049e0a09ccd
--- /dev/null
+++ b/tools/perf/util/record.c
@@ -0,0 +1,215 @@
+#include "evlist.h"
+#include "evsel.h"
+#include "cpumap.h"
+#include "parse-events.h"
+#include <api/fs/fs.h>
+#include "util.h"
+
+typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel);
+
+static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
+{
+ struct perf_evlist *evlist;
+ struct perf_evsel *evsel;
+ int err = -EAGAIN, fd;
+
+ evlist = perf_evlist__new();
+ if (!evlist)
+ return -ENOMEM;
+
+ if (parse_events(evlist, str))
+ goto out_delete;
+
+ evsel = perf_evlist__first(evlist);
+
+ fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0);
+ if (fd < 0)
+ goto out_delete;
+ close(fd);
+
+ fn(evsel);
+
+ fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0);
+ if (fd < 0) {
+ if (errno == EINVAL)
+ err = -EINVAL;
+ goto out_delete;
+ }
+ close(fd);
+ err = 0;
+
+out_delete:
+ perf_evlist__delete(evlist);
+ return err;
+}
+
+static bool perf_probe_api(setup_probe_fn_t fn)
+{
+ const char *try[] = {"cycles:u", "instructions:u", "cpu-clock", NULL};
+ struct cpu_map *cpus;
+ int cpu, ret, i = 0;
+
+ cpus = cpu_map__new(NULL);
+ if (!cpus)
+ return false;
+ cpu = cpus->map[0];
+ cpu_map__delete(cpus);
+
+ do {
+ ret = perf_do_probe_api(fn, cpu, try[i++]);
+ if (!ret)
+ return true;
+ } while (ret == -EAGAIN && try[i]);
+
+ return false;
+}
+
+static void perf_probe_sample_identifier(struct perf_evsel *evsel)
+{
+ evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
+}
+
+bool perf_can_sample_identifier(void)
+{
+ return perf_probe_api(perf_probe_sample_identifier);
+}
+
+void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts)
+{
+ struct perf_evsel *evsel;
+ bool use_sample_identifier = false;
+
+ /*
+ * Set the evsel leader links before we configure attributes,
+ * since some might depend on this info.
+ */
+ if (opts->group)
+ perf_evlist__set_leader(evlist);
+
+ if (evlist->cpus->map[0] < 0)
+ opts->no_inherit = true;
+
+ evlist__for_each(evlist, evsel)
+ perf_evsel__config(evsel, opts);
+
+ if (evlist->nr_entries > 1) {
+ struct perf_evsel *first = perf_evlist__first(evlist);
+
+ evlist__for_each(evlist, evsel) {
+ if (evsel->attr.sample_type == first->attr.sample_type)
+ continue;
+ use_sample_identifier = perf_can_sample_identifier();
+ break;
+ }
+ evlist__for_each(evlist, evsel)
+ perf_evsel__set_sample_id(evsel, use_sample_identifier);
+ }
+
+ perf_evlist__set_id_pos(evlist);
+}
+
+static int get_max_rate(unsigned int *rate)
+{
+ char path[PATH_MAX];
+ const char *procfs = procfs__mountpoint();
+
+ if (!procfs)
+ return -1;
+
+ snprintf(path, PATH_MAX,
+ "%s/sys/kernel/perf_event_max_sample_rate", procfs);
+
+ return filename__read_int(path, (int *) rate);
+}
+
+static int record_opts__config_freq(struct record_opts *opts)
+{
+ bool user_freq = opts->user_freq != UINT_MAX;
+ unsigned int max_rate;
+
+ if (opts->user_interval != ULLONG_MAX)
+ opts->default_interval = opts->user_interval;
+ if (user_freq)
+ opts->freq = opts->user_freq;
+
+ /*
+ * User specified count overrides default frequency.
+ */
+ if (opts->default_interval)
+ opts->freq = 0;
+ else if (opts->freq) {
+ opts->default_interval = opts->freq;
+ } else {
+ pr_err("frequency and count are zero, aborting\n");
+ return -1;
+ }
+
+ if (get_max_rate(&max_rate))
+ return 0;
+
+ /*
+ * User specified frequency is over current maximum.
+ */
+ if (user_freq && (max_rate < opts->freq)) {
+ pr_err("Maximum frequency rate (%u) reached.\n"
+ "Please use -F freq option with lower value or consider\n"
+ "tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
+ max_rate);
+ return -1;
+ }
+
+ /*
+ * Default frequency is over current maximum.
+ */
+ if (max_rate < opts->freq) {
+ pr_warning("Lowering default frequency rate to %u.\n"
+ "Please consider tweaking "
+ "/proc/sys/kernel/perf_event_max_sample_rate.\n",
+ max_rate);
+ opts->freq = max_rate;
+ }
+
+ return 0;
+}
+
+int record_opts__config(struct record_opts *opts)
+{
+ return record_opts__config_freq(opts);
+}
+
+bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str)
+{
+ struct perf_evlist *temp_evlist;
+ struct perf_evsel *evsel;
+ int err, fd, cpu;
+ bool ret = false;
+
+ temp_evlist = perf_evlist__new();
+ if (!temp_evlist)
+ return false;
+
+ err = parse_events(temp_evlist, str);
+ if (err)
+ goto out_delete;
+
+ evsel = perf_evlist__last(temp_evlist);
+
+ if (!evlist || cpu_map__empty(evlist->cpus)) {
+ struct cpu_map *cpus = cpu_map__new(NULL);
+
+ cpu = cpus ? cpus->map[0] : 0;
+ cpu_map__delete(cpus);
+ } else {
+ cpu = evlist->cpus->map[0];
+ }
+
+ fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0);
+ if (fd >= 0) {
+ close(fd);
+ ret = true;
+ }
+
+out_delete:
+ perf_evlist__delete(temp_evlist);
+ return ret;
+}
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index b059dc50cc2..af7da565a75 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -1,5 +1,5 @@
/*
- * trace-event-perl. Feed perf trace events to an embedded Perl interpreter.
+ * trace-event-perl. Feed perf script events to an embedded Perl interpreter.
*
* Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
*
@@ -25,13 +25,16 @@
#include <ctype.h>
#include <errno.h>
-#include "../../perf.h"
#include "../util.h"
-#include "../trace-event.h"
-
#include <EXTERN.h>
#include <perl.h>
+#include "../../perf.h"
+#include "../thread.h"
+#include "../event.h"
+#include "../trace-event.h"
+#include "../evsel.h"
+
void boot_Perf__Trace__Context(pTHX_ CV *cv);
void boot_DynaLoader(pTHX_ CV *cv);
typedef PerlInterpreter * INTERP;
@@ -53,7 +56,7 @@ INTERP my_perl;
#define FTRACE_MAX_EVENT \
((1 << (sizeof(unsigned short) * 8)) - 1)
-struct event *events[FTRACE_MAX_EVENT];
+struct event_format *events[FTRACE_MAX_EVENT];
extern struct scripting_context *scripting_context;
@@ -178,7 +181,7 @@ static void define_flag_field(const char *ev_name,
LEAVE;
}
-static void define_event_symbols(struct event *event,
+static void define_event_symbols(struct event_format *event,
const char *ev_name,
struct print_arg *args)
{
@@ -191,8 +194,7 @@ static void define_event_symbols(struct event *event,
zero_flag_atom = 0;
break;
case PRINT_FIELD:
- if (cur_field_name)
- free(cur_field_name);
+ free(cur_field_name);
cur_field_name = strdup(args->field.name);
break;
case PRINT_FLAGS:
@@ -206,7 +208,14 @@ static void define_event_symbols(struct event *event,
define_symbolic_values(args->symbol.symbols, ev_name,
cur_field_name);
break;
+ case PRINT_HEX:
+ define_event_symbols(event, ev_name, args->hex.field);
+ define_event_symbols(event, ev_name, args->hex.size);
+ break;
+ case PRINT_BSTRING:
+ case PRINT_DYNAMIC_ARRAY:
case PRINT_STRING:
+ case PRINT_BITMASK:
break;
case PRINT_TYPE:
define_event_symbols(event, ev_name, args->typecast.item);
@@ -217,7 +226,9 @@ static void define_event_symbols(struct event *event,
define_event_symbols(event, ev_name, args->op.left);
define_event_symbols(event, ev_name, args->op.right);
break;
+ case PRINT_FUNC:
default:
+ pr_err("Unsupported print arg type\n");
/* we should warn... */
return;
}
@@ -226,15 +237,16 @@ static void define_event_symbols(struct event *event,
define_event_symbols(event, ev_name, args->next);
}
-static inline struct event *find_cache_event(int type)
+static inline struct event_format *find_cache_event(struct perf_evsel *evsel)
{
static char ev_name[256];
- struct event *event;
+ struct event_format *event;
+ int type = evsel->attr.config;
if (events[type])
return events[type];
- events[type] = event = trace_find_event(type);
+ events[type] = event = evsel->tp_format;
if (!event)
return NULL;
@@ -245,27 +257,31 @@ static inline struct event *find_cache_event(int type)
return event;
}
-static void perl_process_event(int cpu, void *data,
- int size __unused,
- unsigned long long nsecs, char *comm)
+static void perl_process_tracepoint(struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct thread *thread)
{
struct format_field *field;
static char handler[256];
unsigned long long val;
unsigned long s, ns;
- struct event *event;
- int type;
+ struct event_format *event;
int pid;
+ int cpu = sample->cpu;
+ void *data = sample->raw_data;
+ unsigned long long nsecs = sample->time;
+ const char *comm = thread__comm_str(thread);
dSP;
- type = trace_parse_common_type(data);
+ if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
+ return;
- event = find_cache_event(type);
+ event = find_cache_event(evsel);
if (!event)
- die("ug! no event found for type %d", type);
+ die("ug! no event found for type %" PRIu64, (u64)evsel->attr.config);
- pid = trace_parse_common_pid(data);
+ pid = raw_field_value(event, "common_pid", data);
sprintf(handler, "%s::%s", event->system, event->name);
@@ -273,6 +289,7 @@ static void perl_process_event(int cpu, void *data,
ns = nsecs - s * NSECS_PER_SEC;
scripting_context->event_data = data;
+ scripting_context->pevent = evsel->tp_format->pevent;
ENTER;
SAVETMPS;
@@ -298,7 +315,8 @@ static void perl_process_event(int cpu, void *data,
offset = field->offset;
XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0)));
} else { /* FIELD_IS_NUMERIC */
- val = read_size(data + field->offset, field->size);
+ val = read_size(event, data + field->offset,
+ field->size);
if (field->flags & FIELD_IS_SIGNED) {
XPUSHs(sv_2mortal(newSViv(val)));
} else {
@@ -326,6 +344,40 @@ static void perl_process_event(int cpu, void *data,
LEAVE;
}
+static void perl_process_event_generic(union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel)
+{
+ dSP;
+
+ if (!get_cv("process_event", 0))
+ return;
+
+ ENTER;
+ SAVETMPS;
+ PUSHMARK(SP);
+ XPUSHs(sv_2mortal(newSVpvn((const char *)event, event->header.size)));
+ XPUSHs(sv_2mortal(newSVpvn((const char *)&evsel->attr, sizeof(evsel->attr))));
+ XPUSHs(sv_2mortal(newSVpvn((const char *)sample, sizeof(*sample))));
+ XPUSHs(sv_2mortal(newSVpvn((const char *)sample->raw_data, sample->raw_size)));
+ PUTBACK;
+ call_pv("process_event", G_SCALAR);
+ SPAGAIN;
+ PUTBACK;
+ FREETMPS;
+ LEAVE;
+}
+
+static void perl_process_event(union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct thread *thread,
+ struct addr_location *al __maybe_unused)
+{
+ perl_process_tracepoint(sample, evsel, thread);
+ perl_process_event_generic(event, sample, evsel);
+}
+
static void run_start_sub(void)
{
dSP; /* access to Perl stack */
@@ -396,9 +448,9 @@ static int perl_stop_script(void)
return 0;
}
-static int perl_generate_script(const char *outfile)
+static int perl_generate_script(struct pevent *pevent, const char *outfile)
{
- struct event *event = NULL;
+ struct event_format *event = NULL;
struct format_field *f;
char fname[PATH_MAX];
int not_first, count;
@@ -411,8 +463,8 @@ static int perl_generate_script(const char *outfile)
return -1;
}
- fprintf(ofp, "# perf trace event handlers, "
- "generated by perf trace -g perl\n");
+ fprintf(ofp, "# perf script event handlers, "
+ "generated by perf script -g perl\n");
fprintf(ofp, "# Licensed under the terms of the GNU GPL"
" License version 2\n\n");
@@ -443,7 +495,7 @@ static int perl_generate_script(const char *outfile)
fprintf(ofp, "sub trace_begin\n{\n\t# optional\n}\n\n");
fprintf(ofp, "sub trace_end\n{\n\t# optional\n}\n\n");
- while ((event = trace_find_next_event(event))) {
+ while ((event = trace_find_next_event(pevent, event))) {
fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name);
fprintf(ofp, "\tmy (");
@@ -547,7 +599,28 @@ static int perl_generate_script(const char *outfile)
fprintf(ofp, "sub print_header\n{\n"
"\tmy ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;\n\n"
"\tprintf(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \",\n\t "
- "$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}");
+ "$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}\n");
+
+ fprintf(ofp,
+ "\n# Packed byte string args of process_event():\n"
+ "#\n"
+ "# $event:\tunion perf_event\tutil/event.h\n"
+ "# $attr:\tstruct perf_event_attr\tlinux/perf_event.h\n"
+ "# $sample:\tstruct perf_sample\tutil/event.h\n"
+ "# $raw_data:\tperf_sample->raw_data\tutil/event.h\n"
+ "\n"
+ "sub process_event\n"
+ "{\n"
+ "\tmy ($event, $attr, $sample, $raw_data) = @_;\n"
+ "\n"
+ "\tmy @event\t= unpack(\"LSS\", $event);\n"
+ "\tmy @attr\t= unpack(\"LLQQQQQLLQQ\", $attr);\n"
+ "\tmy @sample\t= unpack(\"QLLQQQQQLL\", $sample);\n"
+ "\tmy @raw_data\t= unpack(\"C*\", $raw_data);\n"
+ "\n"
+ "\tuse Data::Dumper;\n"
+ "\tprint Dumper \\@event, \\@attr, \\@sample, \\@raw_data;\n"
+ "}\n");
fclose(ofp);
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 33a63252374..1c419321f70 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -24,11 +24,13 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <ctype.h>
#include <errno.h>
#include "../../perf.h"
+#include "../evsel.h"
#include "../util.h"
+#include "../event.h"
+#include "../thread.h"
#include "../trace-event.h"
PyMODINIT_FUNC initperf_trace_context(void);
@@ -36,7 +38,7 @@ PyMODINIT_FUNC initperf_trace_context(void);
#define FTRACE_MAX_EVENT \
((1 << (sizeof(unsigned short) * 8)) - 1)
-struct event *events[FTRACE_MAX_EVENT];
+struct event_format *events[FTRACE_MAX_EVENT];
#define MAX_FIELDS 64
#define N_COMMON_FIELDS 7
@@ -54,6 +56,17 @@ static void handler_call_die(const char *handler_name)
Py_FatalError("problem in Python trace event handler");
}
+/*
+ * Insert val into into the dictionary and decrement the reference counter.
+ * This is necessary for dictionaries since PyDict_SetItemString() does not
+ * steal a reference, as opposed to PyTuple_SetItem().
+ */
+static void pydict_set_item_string_decref(PyObject *dict, const char *key, PyObject *val)
+{
+ PyDict_SetItemString(dict, key, val);
+ Py_DECREF(val);
+}
+
static void define_value(enum print_arg_type field_type,
const char *ev_name,
const char *field_name,
@@ -135,7 +148,7 @@ static void define_field(enum print_arg_type field_type,
Py_DECREF(t);
}
-static void define_event_symbols(struct event *event,
+static void define_event_symbols(struct event_format *event,
const char *ev_name,
struct print_arg *args)
{
@@ -148,8 +161,7 @@ static void define_event_symbols(struct event *event,
zero_flag_atom = 0;
break;
case PRINT_FIELD:
- if (cur_field_name)
- free(cur_field_name);
+ free(cur_field_name);
cur_field_name = strdup(args->field.name);
break;
case PRINT_FLAGS:
@@ -165,6 +177,10 @@ static void define_event_symbols(struct event *event,
define_values(PRINT_SYMBOL, args->symbol.symbols, ev_name,
cur_field_name);
break;
+ case PRINT_HEX:
+ define_event_symbols(event, ev_name, args->hex.field);
+ define_event_symbols(event, ev_name, args->hex.size);
+ break;
case PRINT_STRING:
break;
case PRINT_TYPE:
@@ -177,6 +193,11 @@ static void define_event_symbols(struct event *event,
define_event_symbols(event, ev_name, args->op.right);
break;
default:
+ /* gcc warns for these? */
+ case PRINT_BSTRING:
+ case PRINT_DYNAMIC_ARRAY:
+ case PRINT_FUNC:
+ case PRINT_BITMASK:
/* we should warn... */
return;
}
@@ -185,15 +206,21 @@ static void define_event_symbols(struct event *event,
define_event_symbols(event, ev_name, args->next);
}
-static inline struct event *find_cache_event(int type)
+static inline struct event_format *find_cache_event(struct perf_evsel *evsel)
{
static char ev_name[256];
- struct event *event;
-
+ struct event_format *event;
+ int type = evsel->attr.config;
+
+ /*
+ * XXX: Do we really need to cache this since now we have evsel->tp_format
+ * cached already? Need to re-read this "cache" routine that as well calls
+ * define_event_symbols() :-\
+ */
if (events[type])
return events[type];
- events[type] = event = trace_find_event(type);
+ events[type] = event = evsel->tp_format;
if (!event)
return NULL;
@@ -204,31 +231,33 @@ static inline struct event *find_cache_event(int type)
return event;
}
-static void python_process_event(int cpu, void *data,
- int size __unused,
- unsigned long long nsecs, char *comm)
+static void python_process_tracepoint(struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct thread *thread,
+ struct addr_location *al)
{
PyObject *handler, *retval, *context, *t, *obj, *dict = NULL;
static char handler_name[256];
struct format_field *field;
unsigned long long val;
unsigned long s, ns;
- struct event *event;
+ struct event_format *event;
unsigned n = 0;
- int type;
int pid;
+ int cpu = sample->cpu;
+ void *data = sample->raw_data;
+ unsigned long long nsecs = sample->time;
+ const char *comm = thread__comm_str(thread);
t = PyTuple_New(MAX_FIELDS);
if (!t)
Py_FatalError("couldn't create Python tuple");
- type = trace_parse_common_type(data);
-
- event = find_cache_event(type);
+ event = find_cache_event(evsel);
if (!event)
- die("ug! no event found for type %d", type);
+ die("ug! no event found for type %d", (int)evsel->attr.config);
- pid = trace_parse_common_pid(data);
+ pid = raw_field_value(event, "common_pid", data);
sprintf(handler_name, "%s__%s", event->system, event->name);
@@ -244,12 +273,12 @@ static void python_process_event(int cpu, void *data,
ns = nsecs - s * NSECS_PER_SEC;
scripting_context->event_data = data;
+ scripting_context->pevent = evsel->tp_format->pevent;
context = PyCObject_FromVoidPtr(scripting_context, NULL);
PyTuple_SetItem(t, n++, PyString_FromString(handler_name));
- PyTuple_SetItem(t, n++,
- PyCObject_FromVoidPtr(scripting_context, NULL));
+ PyTuple_SetItem(t, n++, context);
if (handler) {
PyTuple_SetItem(t, n++, PyInt_FromLong(cpu));
@@ -258,11 +287,11 @@ static void python_process_event(int cpu, void *data,
PyTuple_SetItem(t, n++, PyInt_FromLong(pid));
PyTuple_SetItem(t, n++, PyString_FromString(comm));
} else {
- PyDict_SetItemString(dict, "common_cpu", PyInt_FromLong(cpu));
- PyDict_SetItemString(dict, "common_s", PyInt_FromLong(s));
- PyDict_SetItemString(dict, "common_ns", PyInt_FromLong(ns));
- PyDict_SetItemString(dict, "common_pid", PyInt_FromLong(pid));
- PyDict_SetItemString(dict, "common_comm", PyString_FromString(comm));
+ pydict_set_item_string_decref(dict, "common_cpu", PyInt_FromLong(cpu));
+ pydict_set_item_string_decref(dict, "common_s", PyInt_FromLong(s));
+ pydict_set_item_string_decref(dict, "common_ns", PyInt_FromLong(ns));
+ pydict_set_item_string_decref(dict, "common_pid", PyInt_FromLong(pid));
+ pydict_set_item_string_decref(dict, "common_comm", PyString_FromString(comm));
}
for (field = event->format.fields; field; field = field->next) {
if (field->flags & FIELD_IS_STRING) {
@@ -274,7 +303,8 @@ static void python_process_event(int cpu, void *data,
offset = field->offset;
obj = PyString_FromString((char *)data + offset);
} else { /* FIELD_IS_NUMERIC */
- val = read_size(data + field->offset, field->size);
+ val = read_size(event, data + field->offset,
+ field->size);
if (field->flags & FIELD_IS_SIGNED) {
if ((long long)val >= LONG_MIN &&
(long long)val <= LONG_MAX)
@@ -291,7 +321,7 @@ static void python_process_event(int cpu, void *data,
if (handler)
PyTuple_SetItem(t, n++, obj);
else
- PyDict_SetItemString(dict, field->name, obj);
+ pydict_set_item_string_decref(dict, field->name, obj);
}
if (!handler)
@@ -318,6 +348,79 @@ static void python_process_event(int cpu, void *data,
Py_DECREF(t);
}
+static void python_process_general_event(struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct thread *thread,
+ struct addr_location *al)
+{
+ PyObject *handler, *retval, *t, *dict;
+ static char handler_name[64];
+ unsigned n = 0;
+
+ /*
+ * Use the MAX_FIELDS to make the function expandable, though
+ * currently there is only one item for the tuple.
+ */
+ t = PyTuple_New(MAX_FIELDS);
+ if (!t)
+ Py_FatalError("couldn't create Python tuple");
+
+ dict = PyDict_New();
+ if (!dict)
+ Py_FatalError("couldn't create Python dictionary");
+
+ snprintf(handler_name, sizeof(handler_name), "%s", "process_event");
+
+ handler = PyDict_GetItemString(main_dict, handler_name);
+ if (!handler || !PyCallable_Check(handler))
+ goto exit;
+
+ pydict_set_item_string_decref(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel)));
+ pydict_set_item_string_decref(dict, "attr", PyString_FromStringAndSize(
+ (const char *)&evsel->attr, sizeof(evsel->attr)));
+ pydict_set_item_string_decref(dict, "sample", PyString_FromStringAndSize(
+ (const char *)sample, sizeof(*sample)));
+ pydict_set_item_string_decref(dict, "raw_buf", PyString_FromStringAndSize(
+ (const char *)sample->raw_data, sample->raw_size));
+ pydict_set_item_string_decref(dict, "comm",
+ PyString_FromString(thread__comm_str(thread)));
+ if (al->map) {
+ pydict_set_item_string_decref(dict, "dso",
+ PyString_FromString(al->map->dso->name));
+ }
+ if (al->sym) {
+ pydict_set_item_string_decref(dict, "symbol",
+ PyString_FromString(al->sym->name));
+ }
+
+ PyTuple_SetItem(t, n++, dict);
+ if (_PyTuple_Resize(&t, n) == -1)
+ Py_FatalError("error resizing Python tuple");
+
+ retval = PyObject_CallObject(handler, t);
+ if (retval == NULL)
+ handler_call_die(handler_name);
+exit:
+ Py_DECREF(dict);
+ Py_DECREF(t);
+}
+
+static void python_process_event(union perf_event *event __maybe_unused,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct thread *thread,
+ struct addr_location *al)
+{
+ switch (evsel->attr.type) {
+ case PERF_TYPE_TRACEPOINT:
+ python_process_tracepoint(sample, evsel, thread, al);
+ break;
+ /* Reserve for future process_hw/sw/raw APIs */
+ default:
+ python_process_general_event(sample, evsel, thread, al);
+ }
+}
+
static int run_start_sub(void)
{
PyObject *handler, *retval;
@@ -428,9 +531,9 @@ out:
return err;
}
-static int python_generate_script(const char *outfile)
+static int python_generate_script(struct pevent *pevent, const char *outfile)
{
- struct event *event = NULL;
+ struct event_format *event = NULL;
struct format_field *f;
char fname[PATH_MAX];
int not_first, count;
@@ -442,8 +545,8 @@ static int python_generate_script(const char *outfile)
fprintf(stderr, "couldn't open %s\n", fname);
return -1;
}
- fprintf(ofp, "# perf trace event handlers, "
- "generated by perf trace -g python\n");
+ fprintf(ofp, "# perf script event handlers, "
+ "generated by perf script -g python\n");
fprintf(ofp, "# Licensed under the terms of the GNU GPL"
" License version 2\n\n");
@@ -477,7 +580,7 @@ static int python_generate_script(const char *outfile)
fprintf(ofp, "def trace_end():\n");
fprintf(ofp, "\tprint \"in trace_end\"\n\n");
- while ((event = trace_find_next_event(event))) {
+ while ((event = trace_find_next_event(pevent, event))) {
fprintf(ofp, "def %s__%s(", event->system, event->name);
fprintf(ofp, "event_name, ");
fprintf(ofp, "context, ");
@@ -520,6 +623,7 @@ static int python_generate_script(const char *outfile)
fprintf(ofp, "%s=", f->name);
if (f->flags & FIELD_IS_STRING ||
f->flags & FIELD_IS_FLAG ||
+ f->flags & FIELD_IS_ARRAY ||
f->flags & FIELD_IS_SYMBOLIC)
fprintf(ofp, "%%s");
else if (f->flags & FIELD_IS_SIGNED)
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index fa9d652c2dc..64a186edc7b 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1,419 +1,531 @@
-#define _FILE_OFFSET_BITS 64
-
#include <linux/kernel.h>
+#include <traceevent/event-parse.h>
#include <byteswap.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/mman.h>
+#include "evlist.h"
+#include "evsel.h"
#include "session.h"
+#include "tool.h"
#include "sort.h"
#include "util.h"
+#include "cpumap.h"
+#include "perf_regs.h"
+#include "vdso.h"
-static int perf_session__open(struct perf_session *self, bool force)
+static int perf_session__open(struct perf_session *session)
{
- struct stat input_stat;
-
- if (!strcmp(self->filename, "-")) {
- self->fd_pipe = true;
- self->fd = STDIN_FILENO;
-
- if (perf_header__read(self, self->fd) < 0)
- pr_err("incompatible file format");
-
- return 0;
- }
-
- self->fd = open(self->filename, O_RDONLY);
- if (self->fd < 0) {
- int err = errno;
+ struct perf_data_file *file = session->file;
- pr_err("failed to open %s: %s", self->filename, strerror(err));
- if (err == ENOENT && !strcmp(self->filename, "perf.data"))
- pr_err(" (try 'perf record' first)");
- pr_err("\n");
- return -errno;
+ if (perf_session__read_header(session) < 0) {
+ pr_err("incompatible file format (rerun with -v to learn more)");
+ return -1;
}
- if (fstat(self->fd, &input_stat) < 0)
- goto out_close;
+ if (perf_data_file__is_pipe(file))
+ return 0;
- if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
- pr_err("file %s not owned by current user or root\n",
- self->filename);
- goto out_close;
+ if (!perf_evlist__valid_sample_type(session->evlist)) {
+ pr_err("non matching sample_type");
+ return -1;
}
- if (!input_stat.st_size) {
- pr_info("zero-sized file (%s), nothing to do!\n",
- self->filename);
- goto out_close;
+ if (!perf_evlist__valid_sample_id_all(session->evlist)) {
+ pr_err("non matching sample_id_all");
+ return -1;
}
- if (perf_header__read(self, self->fd) < 0) {
- pr_err("incompatible file format");
- goto out_close;
+ if (!perf_evlist__valid_read_format(session->evlist)) {
+ pr_err("non matching read_format");
+ return -1;
}
- self->size = input_stat.st_size;
return 0;
-
-out_close:
- close(self->fd);
- self->fd = -1;
- return -1;
}
-void perf_session__update_sample_type(struct perf_session *self)
+void perf_session__set_id_hdr_size(struct perf_session *session)
{
- self->sample_type = perf_header__sample_type(&self->header);
+ u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
+
+ machines__set_id_hdr_size(&session->machines, id_hdr_size);
}
-int perf_session__create_kernel_maps(struct perf_session *self)
+int perf_session__create_kernel_maps(struct perf_session *session)
{
- int ret = machine__create_kernel_maps(&self->host_machine);
+ int ret = machine__create_kernel_maps(&session->machines.host);
if (ret >= 0)
- ret = machines__create_guest_kernel_maps(&self->machines);
+ ret = machines__create_guest_kernel_maps(&session->machines);
return ret;
}
-static void perf_session__destroy_kernel_maps(struct perf_session *self)
+static void perf_session__destroy_kernel_maps(struct perf_session *session)
{
- machine__destroy_kernel_maps(&self->host_machine);
- machines__destroy_guest_kernel_maps(&self->machines);
+ machines__destroy_kernel_maps(&session->machines);
}
-struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe)
+struct perf_session *perf_session__new(struct perf_data_file *file,
+ bool repipe, struct perf_tool *tool)
{
- size_t len = filename ? strlen(filename) + 1 : 0;
- struct perf_session *self = zalloc(sizeof(*self) + len);
+ struct perf_session *session = zalloc(sizeof(*session));
- if (self == NULL)
+ if (!session)
goto out;
- if (perf_header__init(&self->header) < 0)
- goto out_free;
-
- memcpy(self->filename, filename, len);
- self->threads = RB_ROOT;
- INIT_LIST_HEAD(&self->dead_threads);
- self->hists_tree = RB_ROOT;
- self->last_match = NULL;
- self->mmap_window = 32;
- self->machines = RB_ROOT;
- self->repipe = repipe;
- INIT_LIST_HEAD(&self->ordered_samples.samples_head);
- machine__init(&self->host_machine, "", HOST_KERNEL_ID);
-
- if (mode == O_RDONLY) {
- if (perf_session__open(self, force) < 0)
+ session->repipe = repipe;
+ INIT_LIST_HEAD(&session->ordered_samples.samples);
+ INIT_LIST_HEAD(&session->ordered_samples.sample_cache);
+ INIT_LIST_HEAD(&session->ordered_samples.to_free);
+ machines__init(&session->machines);
+
+ if (file) {
+ if (perf_data_file__open(file))
goto out_delete;
- } else if (mode == O_WRONLY) {
+
+ session->file = file;
+
+ if (perf_data_file__is_read(file)) {
+ if (perf_session__open(session) < 0)
+ goto out_close;
+
+ perf_session__set_id_hdr_size(session);
+ }
+ }
+
+ if (!file || perf_data_file__is_write(file)) {
/*
* In O_RDONLY mode this will be performed when reading the
- * kernel MMAP event, in event__process_mmap().
+ * kernel MMAP event, in perf_event__process_mmap().
*/
- if (perf_session__create_kernel_maps(self) < 0)
+ if (perf_session__create_kernel_maps(session) < 0)
goto out_delete;
}
- perf_session__update_sample_type(self);
-out:
- return self;
-out_free:
- free(self);
- return NULL;
-out_delete:
- perf_session__delete(self);
+ if (tool && tool->ordering_requires_timestamps &&
+ tool->ordered_samples && !perf_evlist__sample_id_all(session->evlist)) {
+ dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
+ tool->ordered_samples = false;
+ }
+
+ return session;
+
+ out_close:
+ perf_data_file__close(file);
+ out_delete:
+ perf_session__delete(session);
+ out:
return NULL;
}
-static void perf_session__delete_dead_threads(struct perf_session *self)
+static void perf_session__delete_dead_threads(struct perf_session *session)
{
- struct thread *n, *t;
-
- list_for_each_entry_safe(t, n, &self->dead_threads, node) {
- list_del(&t->node);
- thread__delete(t);
- }
+ machine__delete_dead_threads(&session->machines.host);
}
-static void perf_session__delete_threads(struct perf_session *self)
+static void perf_session__delete_threads(struct perf_session *session)
{
- struct rb_node *nd = rb_first(&self->threads);
-
- while (nd) {
- struct thread *t = rb_entry(nd, struct thread, rb_node);
-
- rb_erase(&t->rb_node, &self->threads);
- nd = rb_next(nd);
- thread__delete(t);
- }
+ machine__delete_threads(&session->machines.host);
}
-void perf_session__delete(struct perf_session *self)
+static void perf_session_env__delete(struct perf_session_env *env)
{
- perf_header__exit(&self->header);
- perf_session__destroy_kernel_maps(self);
- perf_session__delete_dead_threads(self);
- perf_session__delete_threads(self);
- machine__exit(&self->host_machine);
- close(self->fd);
- free(self);
+ zfree(&env->hostname);
+ zfree(&env->os_release);
+ zfree(&env->version);
+ zfree(&env->arch);
+ zfree(&env->cpu_desc);
+ zfree(&env->cpuid);
+
+ zfree(&env->cmdline);
+ zfree(&env->sibling_cores);
+ zfree(&env->sibling_threads);
+ zfree(&env->numa_nodes);
+ zfree(&env->pmu_mappings);
}
-void perf_session__remove_thread(struct perf_session *self, struct thread *th)
+void perf_session__delete(struct perf_session *session)
{
- self->last_match = NULL;
- rb_erase(&th->rb_node, &self->threads);
- /*
- * We may have references to this thread, for instance in some hist_entry
- * instances, so just move them to a separate list.
- */
- list_add_tail(&th->node, &self->dead_threads);
+ perf_session__destroy_kernel_maps(session);
+ perf_session__delete_dead_threads(session);
+ perf_session__delete_threads(session);
+ perf_session_env__delete(&session->header.env);
+ machines__exit(&session->machines);
+ if (session->file)
+ perf_data_file__close(session->file);
+ free(session);
+ vdso__exit();
}
-static bool symbol__match_parent_regex(struct symbol *sym)
+static int process_event_synth_tracing_data_stub(struct perf_tool *tool
+ __maybe_unused,
+ union perf_event *event
+ __maybe_unused,
+ struct perf_session *session
+ __maybe_unused)
{
- if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
- return 1;
-
+ dump_printf(": unhandled!\n");
return 0;
}
-struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
- struct thread *thread,
- struct ip_callchain *chain,
- struct symbol **parent)
+static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_evlist **pevlist
+ __maybe_unused)
{
- u8 cpumode = PERF_RECORD_MISC_USER;
- unsigned int i;
- struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
-
- if (!syms)
- return NULL;
-
- for (i = 0; i < chain->nr; i++) {
- u64 ip = chain->ips[i];
- struct addr_location al;
-
- if (ip >= PERF_CONTEXT_MAX) {
- switch (ip) {
- case PERF_CONTEXT_HV:
- cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
- case PERF_CONTEXT_KERNEL:
- cpumode = PERF_RECORD_MISC_KERNEL; break;
- case PERF_CONTEXT_USER:
- cpumode = PERF_RECORD_MISC_USER; break;
- default:
- break;
- }
- continue;
- }
-
- al.filtered = false;
- thread__find_addr_location(thread, self, cpumode,
- MAP__FUNCTION, thread->pid, ip, &al, NULL);
- if (al.sym != NULL) {
- if (sort__has_parent && !*parent &&
- symbol__match_parent_regex(al.sym))
- *parent = al.sym;
- if (!symbol_conf.use_callchain)
- break;
- syms[i].map = al.map;
- syms[i].sym = al.sym;
- }
- }
+ dump_printf(": unhandled!\n");
+ return 0;
+}
- return syms;
+static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
}
-static int process_event_stub(event_t *event __used,
- struct perf_session *session __used)
+static int process_event_stub(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
-static int process_finished_round_stub(event_t *event __used,
- struct perf_session *session __used,
- struct perf_event_ops *ops __used)
+static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_session *perf_session
+ __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
-static int process_finished_round(event_t *event,
- struct perf_session *session,
- struct perf_event_ops *ops);
-
-static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
-{
- if (handler->sample == NULL)
- handler->sample = process_event_stub;
- if (handler->mmap == NULL)
- handler->mmap = process_event_stub;
- if (handler->comm == NULL)
- handler->comm = process_event_stub;
- if (handler->fork == NULL)
- handler->fork = process_event_stub;
- if (handler->exit == NULL)
- handler->exit = process_event_stub;
- if (handler->lost == NULL)
- handler->lost = process_event_stub;
- if (handler->read == NULL)
- handler->read = process_event_stub;
- if (handler->throttle == NULL)
- handler->throttle = process_event_stub;
- if (handler->unthrottle == NULL)
- handler->unthrottle = process_event_stub;
- if (handler->attr == NULL)
- handler->attr = process_event_stub;
- if (handler->event_type == NULL)
- handler->event_type = process_event_stub;
- if (handler->tracing_data == NULL)
- handler->tracing_data = process_event_stub;
- if (handler->build_id == NULL)
- handler->build_id = process_event_stub;
- if (handler->finished_round == NULL) {
- if (handler->ordered_samples)
- handler->finished_round = process_finished_round;
+static int process_finished_round(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_session *session);
+
+void perf_tool__fill_defaults(struct perf_tool *tool)
+{
+ if (tool->sample == NULL)
+ tool->sample = process_event_sample_stub;
+ if (tool->mmap == NULL)
+ tool->mmap = process_event_stub;
+ if (tool->mmap2 == NULL)
+ tool->mmap2 = process_event_stub;
+ if (tool->comm == NULL)
+ tool->comm = process_event_stub;
+ if (tool->fork == NULL)
+ tool->fork = process_event_stub;
+ if (tool->exit == NULL)
+ tool->exit = process_event_stub;
+ if (tool->lost == NULL)
+ tool->lost = perf_event__process_lost;
+ if (tool->read == NULL)
+ tool->read = process_event_sample_stub;
+ if (tool->throttle == NULL)
+ tool->throttle = process_event_stub;
+ if (tool->unthrottle == NULL)
+ tool->unthrottle = process_event_stub;
+ if (tool->attr == NULL)
+ tool->attr = process_event_synth_attr_stub;
+ if (tool->tracing_data == NULL)
+ tool->tracing_data = process_event_synth_tracing_data_stub;
+ if (tool->build_id == NULL)
+ tool->build_id = process_finished_round_stub;
+ if (tool->finished_round == NULL) {
+ if (tool->ordered_samples)
+ tool->finished_round = process_finished_round;
else
- handler->finished_round = process_finished_round_stub;
+ tool->finished_round = process_finished_round_stub;
}
}
+
+static void swap_sample_id_all(union perf_event *event, void *data)
+{
+ void *end = (void *) event + event->header.size;
+ int size = end - data;
+
+ BUG_ON(size % sizeof(u64));
+ mem_bswap_64(data, size);
+}
-void mem_bswap_64(void *src, int byte_size)
+static void perf_event__all64_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
{
- u64 *m = src;
+ struct perf_event_header *hdr = &event->header;
+ mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
+}
+
+static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
+{
+ event->comm.pid = bswap_32(event->comm.pid);
+ event->comm.tid = bswap_32(event->comm.tid);
- while (byte_size > 0) {
- *m = bswap_64(*m);
- byte_size -= sizeof(u64);
- ++m;
+ if (sample_id_all) {
+ void *data = &event->comm.comm;
+
+ data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
+ swap_sample_id_all(event, data);
}
}
-static void event__all64_swap(event_t *self)
+static void perf_event__mmap_swap(union perf_event *event,
+ bool sample_id_all)
{
- struct perf_event_header *hdr = &self->header;
- mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
+ event->mmap.pid = bswap_32(event->mmap.pid);
+ event->mmap.tid = bswap_32(event->mmap.tid);
+ event->mmap.start = bswap_64(event->mmap.start);
+ event->mmap.len = bswap_64(event->mmap.len);
+ event->mmap.pgoff = bswap_64(event->mmap.pgoff);
+
+ if (sample_id_all) {
+ void *data = &event->mmap.filename;
+
+ data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
+ swap_sample_id_all(event, data);
+ }
}
-static void event__comm_swap(event_t *self)
+static void perf_event__mmap2_swap(union perf_event *event,
+ bool sample_id_all)
+{
+ event->mmap2.pid = bswap_32(event->mmap2.pid);
+ event->mmap2.tid = bswap_32(event->mmap2.tid);
+ event->mmap2.start = bswap_64(event->mmap2.start);
+ event->mmap2.len = bswap_64(event->mmap2.len);
+ event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
+ event->mmap2.maj = bswap_32(event->mmap2.maj);
+ event->mmap2.min = bswap_32(event->mmap2.min);
+ event->mmap2.ino = bswap_64(event->mmap2.ino);
+
+ if (sample_id_all) {
+ void *data = &event->mmap2.filename;
+
+ data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
+ swap_sample_id_all(event, data);
+ }
+}
+static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
{
- self->comm.pid = bswap_32(self->comm.pid);
- self->comm.tid = bswap_32(self->comm.tid);
+ event->fork.pid = bswap_32(event->fork.pid);
+ event->fork.tid = bswap_32(event->fork.tid);
+ event->fork.ppid = bswap_32(event->fork.ppid);
+ event->fork.ptid = bswap_32(event->fork.ptid);
+ event->fork.time = bswap_64(event->fork.time);
+
+ if (sample_id_all)
+ swap_sample_id_all(event, &event->fork + 1);
}
-static void event__mmap_swap(event_t *self)
+static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
{
- self->mmap.pid = bswap_32(self->mmap.pid);
- self->mmap.tid = bswap_32(self->mmap.tid);
- self->mmap.start = bswap_64(self->mmap.start);
- self->mmap.len = bswap_64(self->mmap.len);
- self->mmap.pgoff = bswap_64(self->mmap.pgoff);
+ event->read.pid = bswap_32(event->read.pid);
+ event->read.tid = bswap_32(event->read.tid);
+ event->read.value = bswap_64(event->read.value);
+ event->read.time_enabled = bswap_64(event->read.time_enabled);
+ event->read.time_running = bswap_64(event->read.time_running);
+ event->read.id = bswap_64(event->read.id);
+
+ if (sample_id_all)
+ swap_sample_id_all(event, &event->read + 1);
}
-static void event__task_swap(event_t *self)
+static void perf_event__throttle_swap(union perf_event *event,
+ bool sample_id_all)
{
- self->fork.pid = bswap_32(self->fork.pid);
- self->fork.tid = bswap_32(self->fork.tid);
- self->fork.ppid = bswap_32(self->fork.ppid);
- self->fork.ptid = bswap_32(self->fork.ptid);
- self->fork.time = bswap_64(self->fork.time);
+ event->throttle.time = bswap_64(event->throttle.time);
+ event->throttle.id = bswap_64(event->throttle.id);
+ event->throttle.stream_id = bswap_64(event->throttle.stream_id);
+
+ if (sample_id_all)
+ swap_sample_id_all(event, &event->throttle + 1);
}
-static void event__read_swap(event_t *self)
+static u8 revbyte(u8 b)
{
- self->read.pid = bswap_32(self->read.pid);
- self->read.tid = bswap_32(self->read.tid);
- self->read.value = bswap_64(self->read.value);
- self->read.time_enabled = bswap_64(self->read.time_enabled);
- self->read.time_running = bswap_64(self->read.time_running);
- self->read.id = bswap_64(self->read.id);
+ int rev = (b >> 4) | ((b & 0xf) << 4);
+ rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
+ rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
+ return (u8) rev;
}
-static void event__attr_swap(event_t *self)
+/*
+ * XXX this is hack in attempt to carry flags bitfield
+ * throught endian village. ABI says:
+ *
+ * Bit-fields are allocated from right to left (least to most significant)
+ * on little-endian implementations and from left to right (most to least
+ * significant) on big-endian implementations.
+ *
+ * The above seems to be byte specific, so we need to reverse each
+ * byte of the bitfield. 'Internet' also says this might be implementation
+ * specific and we probably need proper fix and carry perf_event_attr
+ * bitfield flags in separate data file FEAT_ section. Thought this seems
+ * to work for now.
+ */
+static void swap_bitfield(u8 *p, unsigned len)
{
- size_t size;
+ unsigned i;
- self->attr.attr.type = bswap_32(self->attr.attr.type);
- self->attr.attr.size = bswap_32(self->attr.attr.size);
- self->attr.attr.config = bswap_64(self->attr.attr.config);
- self->attr.attr.sample_period = bswap_64(self->attr.attr.sample_period);
- self->attr.attr.sample_type = bswap_64(self->attr.attr.sample_type);
- self->attr.attr.read_format = bswap_64(self->attr.attr.read_format);
- self->attr.attr.wakeup_events = bswap_32(self->attr.attr.wakeup_events);
- self->attr.attr.bp_type = bswap_32(self->attr.attr.bp_type);
- self->attr.attr.bp_addr = bswap_64(self->attr.attr.bp_addr);
- self->attr.attr.bp_len = bswap_64(self->attr.attr.bp_len);
+ for (i = 0; i < len; i++) {
+ *p = revbyte(*p);
+ p++;
+ }
+}
- size = self->header.size;
- size -= (void *)&self->attr.id - (void *)self;
- mem_bswap_64(self->attr.id, size);
+/* exported for swapping attributes in file header */
+void perf_event__attr_swap(struct perf_event_attr *attr)
+{
+ attr->type = bswap_32(attr->type);
+ attr->size = bswap_32(attr->size);
+ attr->config = bswap_64(attr->config);
+ attr->sample_period = bswap_64(attr->sample_period);
+ attr->sample_type = bswap_64(attr->sample_type);
+ attr->read_format = bswap_64(attr->read_format);
+ attr->wakeup_events = bswap_32(attr->wakeup_events);
+ attr->bp_type = bswap_32(attr->bp_type);
+ attr->bp_addr = bswap_64(attr->bp_addr);
+ attr->bp_len = bswap_64(attr->bp_len);
+ attr->branch_sample_type = bswap_64(attr->branch_sample_type);
+ attr->sample_regs_user = bswap_64(attr->sample_regs_user);
+ attr->sample_stack_user = bswap_32(attr->sample_stack_user);
+
+ swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
}
-static void event__event_type_swap(event_t *self)
+static void perf_event__hdr_attr_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
{
- self->event_type.event_type.event_id =
- bswap_64(self->event_type.event_type.event_id);
+ size_t size;
+
+ perf_event__attr_swap(&event->attr.attr);
+
+ size = event->header.size;
+ size -= (void *)&event->attr.id - (void *)event;
+ mem_bswap_64(event->attr.id, size);
}
-static void event__tracing_data_swap(event_t *self)
+static void perf_event__event_type_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
{
- self->tracing_data.size = bswap_32(self->tracing_data.size);
+ event->event_type.event_type.event_id =
+ bswap_64(event->event_type.event_type.event_id);
}
-typedef void (*event__swap_op)(event_t *self);
+static void perf_event__tracing_data_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
+{
+ event->tracing_data.size = bswap_32(event->tracing_data.size);
+}
-static event__swap_op event__swap_ops[] = {
- [PERF_RECORD_MMAP] = event__mmap_swap,
- [PERF_RECORD_COMM] = event__comm_swap,
- [PERF_RECORD_FORK] = event__task_swap,
- [PERF_RECORD_EXIT] = event__task_swap,
- [PERF_RECORD_LOST] = event__all64_swap,
- [PERF_RECORD_READ] = event__read_swap,
- [PERF_RECORD_SAMPLE] = event__all64_swap,
- [PERF_RECORD_HEADER_ATTR] = event__attr_swap,
- [PERF_RECORD_HEADER_EVENT_TYPE] = event__event_type_swap,
- [PERF_RECORD_HEADER_TRACING_DATA] = event__tracing_data_swap,
- [PERF_RECORD_HEADER_BUILD_ID] = NULL,
- [PERF_RECORD_HEADER_MAX] = NULL,
+typedef void (*perf_event__swap_op)(union perf_event *event,
+ bool sample_id_all);
+
+static perf_event__swap_op perf_event__swap_ops[] = {
+ [PERF_RECORD_MMAP] = perf_event__mmap_swap,
+ [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
+ [PERF_RECORD_COMM] = perf_event__comm_swap,
+ [PERF_RECORD_FORK] = perf_event__task_swap,
+ [PERF_RECORD_EXIT] = perf_event__task_swap,
+ [PERF_RECORD_LOST] = perf_event__all64_swap,
+ [PERF_RECORD_READ] = perf_event__read_swap,
+ [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
+ [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
+ [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
+ [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
+ [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
+ [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
+ [PERF_RECORD_HEADER_BUILD_ID] = NULL,
+ [PERF_RECORD_HEADER_MAX] = NULL,
};
struct sample_queue {
u64 timestamp;
- struct sample_event *event;
+ u64 file_offset;
+ union perf_event *event;
struct list_head list;
};
-static void flush_sample_queue(struct perf_session *s,
- struct perf_event_ops *ops)
+static void perf_session_free_sample_buffers(struct perf_session *session)
+{
+ struct ordered_samples *os = &session->ordered_samples;
+
+ while (!list_empty(&os->to_free)) {
+ struct sample_queue *sq;
+
+ sq = list_entry(os->to_free.next, struct sample_queue, list);
+ list_del(&sq->list);
+ free(sq);
+ }
+}
+
+static int perf_session_deliver_event(struct perf_session *session,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_tool *tool,
+ u64 file_offset);
+
+static int flush_sample_queue(struct perf_session *s,
+ struct perf_tool *tool)
{
- struct list_head *head = &s->ordered_samples.samples_head;
- u64 limit = s->ordered_samples.next_flush;
+ struct ordered_samples *os = &s->ordered_samples;
+ struct list_head *head = &os->samples;
struct sample_queue *tmp, *iter;
+ struct perf_sample sample;
+ u64 limit = os->next_flush;
+ u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
+ bool show_progress = limit == ULLONG_MAX;
+ struct ui_progress prog;
+ int ret;
+
+ if (!tool->ordered_samples || !limit)
+ return 0;
- if (!ops->ordered_samples || !limit)
- return;
+ if (show_progress)
+ ui_progress__init(&prog, os->nr_samples, "Processing time ordered events...");
list_for_each_entry_safe(iter, tmp, head, list) {
+ if (session_done())
+ return 0;
+
if (iter->timestamp > limit)
- return;
+ break;
+
+ ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
+ if (ret)
+ pr_err("Can't parse sample, err = %d\n", ret);
+ else {
+ ret = perf_session_deliver_event(s, iter->event, &sample, tool,
+ iter->file_offset);
+ if (ret)
+ return ret;
+ }
- if (iter == s->ordered_samples.last_inserted)
- s->ordered_samples.last_inserted = NULL;
+ os->last_flush = iter->timestamp;
+ list_del(&iter->list);
+ list_add(&iter->list, &os->sample_cache);
- ops->sample((event_t *)iter->event, s);
+ if (show_progress)
+ ui_progress__update(&prog, 1);
+ }
- s->ordered_samples.last_flush = iter->timestamp;
- list_del(&iter->list);
- free(iter->event);
- free(iter);
+ if (list_empty(head)) {
+ os->last_sample = NULL;
+ } else if (last_ts <= limit) {
+ os->last_sample =
+ list_entry(head->prev, struct sample_queue, list);
}
+
+ os->nr_samples = 0;
+
+ return 0;
}
/*
@@ -455,200 +567,530 @@ static void flush_sample_queue(struct perf_session *s,
* Flush every events below timestamp 7
* etc...
*/
-static int process_finished_round(event_t *event __used,
- struct perf_session *session,
- struct perf_event_ops *ops)
+static int process_finished_round(struct perf_tool *tool,
+ union perf_event *event __maybe_unused,
+ struct perf_session *session)
{
- flush_sample_queue(session, ops);
- session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
+ int ret = flush_sample_queue(session, tool);
+ if (!ret)
+ session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
- return 0;
+ return ret;
}
-static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
+/* The queue is ordered by time */
+static void __queue_event(struct sample_queue *new, struct perf_session *s)
{
- struct sample_queue *iter;
+ struct ordered_samples *os = &s->ordered_samples;
+ struct sample_queue *sample = os->last_sample;
+ u64 timestamp = new->timestamp;
+ struct list_head *p;
- list_for_each_entry_reverse(iter, head, list) {
- if (iter->timestamp < new->timestamp) {
- list_add(&new->list, &iter->list);
- return;
- }
+ ++os->nr_samples;
+ os->last_sample = new;
+
+ if (!sample) {
+ list_add(&new->list, &os->samples);
+ os->max_timestamp = timestamp;
+ return;
}
- list_add(&new->list, head);
+ /*
+ * last_sample might point to some random place in the list as it's
+ * the last queued event. We expect that the new event is close to
+ * this.
+ */
+ if (sample->timestamp <= timestamp) {
+ while (sample->timestamp <= timestamp) {
+ p = sample->list.next;
+ if (p == &os->samples) {
+ list_add_tail(&new->list, &os->samples);
+ os->max_timestamp = timestamp;
+ return;
+ }
+ sample = list_entry(p, struct sample_queue, list);
+ }
+ list_add_tail(&new->list, &sample->list);
+ } else {
+ while (sample->timestamp > timestamp) {
+ p = sample->list.prev;
+ if (p == &os->samples) {
+ list_add(&new->list, &os->samples);
+ return;
+ }
+ sample = list_entry(p, struct sample_queue, list);
+ }
+ list_add(&new->list, &sample->list);
+ }
}
-static void __queue_sample_before(struct sample_queue *new,
- struct sample_queue *iter,
- struct list_head *head)
+#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
+
+int perf_session_queue_event(struct perf_session *s, union perf_event *event,
+ struct perf_sample *sample, u64 file_offset)
{
- list_for_each_entry_continue_reverse(iter, head, list) {
- if (iter->timestamp < new->timestamp) {
- list_add(&new->list, &iter->list);
- return;
- }
+ struct ordered_samples *os = &s->ordered_samples;
+ struct list_head *sc = &os->sample_cache;
+ u64 timestamp = sample->time;
+ struct sample_queue *new;
+
+ if (!timestamp || timestamp == ~0ULL)
+ return -ETIME;
+
+ if (timestamp < s->ordered_samples.last_flush) {
+ printf("Warning: Timestamp below last timeslice flush\n");
+ return -EINVAL;
+ }
+
+ if (!list_empty(sc)) {
+ new = list_entry(sc->next, struct sample_queue, list);
+ list_del(&new->list);
+ } else if (os->sample_buffer) {
+ new = os->sample_buffer + os->sample_buffer_idx;
+ if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
+ os->sample_buffer = NULL;
+ } else {
+ os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
+ if (!os->sample_buffer)
+ return -ENOMEM;
+ list_add(&os->sample_buffer->list, &os->to_free);
+ os->sample_buffer_idx = 2;
+ new = os->sample_buffer + 1;
}
- list_add(&new->list, head);
+ new->timestamp = timestamp;
+ new->file_offset = file_offset;
+ new->event = event;
+
+ __queue_event(new, s);
+
+ return 0;
}
-static void __queue_sample_after(struct sample_queue *new,
- struct sample_queue *iter,
- struct list_head *head)
+static void callchain__printf(struct perf_sample *sample)
{
- list_for_each_entry_continue(iter, head, list) {
- if (iter->timestamp > new->timestamp) {
- list_add_tail(&new->list, &iter->list);
- return;
- }
+ unsigned int i;
+
+ printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
+
+ for (i = 0; i < sample->callchain->nr; i++)
+ printf("..... %2d: %016" PRIx64 "\n",
+ i, sample->callchain->ips[i]);
+}
+
+static void branch_stack__printf(struct perf_sample *sample)
+{
+ uint64_t i;
+
+ printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
+
+ for (i = 0; i < sample->branch_stack->nr; i++)
+ printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
+ i, sample->branch_stack->entries[i].from,
+ sample->branch_stack->entries[i].to);
+}
+
+static void regs_dump__printf(u64 mask, u64 *regs)
+{
+ unsigned rid, i = 0;
+
+ for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
+ u64 val = regs[i++];
+
+ printf(".... %-5s 0x%" PRIx64 "\n",
+ perf_reg_name(rid), val);
}
- list_add_tail(&new->list, head);
}
-/* The queue is ordered by time */
-static void __queue_sample_event(struct sample_queue *new,
- struct perf_session *s)
+static void regs_user__printf(struct perf_sample *sample)
+{
+ struct regs_dump *user_regs = &sample->user_regs;
+
+ if (user_regs->regs) {
+ u64 mask = user_regs->mask;
+ printf("... user regs: mask 0x%" PRIx64 "\n", mask);
+ regs_dump__printf(mask, user_regs->regs);
+ }
+}
+
+static void stack_user__printf(struct stack_dump *dump)
{
- struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
- struct list_head *head = &s->ordered_samples.samples_head;
+ printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
+ dump->size, dump->offset);
+}
+static void perf_session__print_tstamp(struct perf_session *session,
+ union perf_event *event,
+ struct perf_sample *sample)
+{
+ u64 sample_type = __perf_evlist__combined_sample_type(session->evlist);
- if (!last_inserted) {
- __queue_sample_end(new, head);
+ if (event->header.type != PERF_RECORD_SAMPLE &&
+ !perf_evlist__sample_id_all(session->evlist)) {
+ fputs("-1 -1 ", stdout);
return;
}
- /*
- * Most of the time the current event has a timestamp
- * very close to the last event inserted, unless we just switched
- * to another event buffer. Having a sorting based on a list and
- * on the last inserted event that is close to the current one is
- * probably more efficient than an rbtree based sorting.
- */
- if (last_inserted->timestamp >= new->timestamp)
- __queue_sample_before(new, last_inserted, head);
- else
- __queue_sample_after(new, last_inserted, head);
+ if ((sample_type & PERF_SAMPLE_CPU))
+ printf("%u ", sample->cpu);
+
+ if (sample_type & PERF_SAMPLE_TIME)
+ printf("%" PRIu64 " ", sample->time);
}
-static int queue_sample_event(event_t *event, struct sample_data *data,
- struct perf_session *s)
+static void sample_read__printf(struct perf_sample *sample, u64 read_format)
{
- u64 timestamp = data->time;
- struct sample_queue *new;
+ printf("... sample_read:\n");
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ printf("...... time enabled %016" PRIx64 "\n",
+ sample->read.time_enabled);
- if (timestamp < s->ordered_samples.last_flush) {
- printf("Warning: Timestamp below last timeslice flush\n");
- return -EINVAL;
- }
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ printf("...... time running %016" PRIx64 "\n",
+ sample->read.time_running);
- new = malloc(sizeof(*new));
- if (!new)
- return -ENOMEM;
+ if (read_format & PERF_FORMAT_GROUP) {
+ u64 i;
- new->timestamp = timestamp;
+ printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
- new->event = malloc(event->header.size);
- if (!new->event) {
- free(new);
- return -ENOMEM;
- }
+ for (i = 0; i < sample->read.group.nr; i++) {
+ struct sample_read_value *value;
- memcpy(new->event, event, event->header.size);
+ value = &sample->read.group.values[i];
+ printf("..... id %016" PRIx64
+ ", value %016" PRIx64 "\n",
+ value->id, value->value);
+ }
+ } else
+ printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
+ sample->read.one.id, sample->read.one.value);
+}
- __queue_sample_event(new, s);
- s->ordered_samples.last_inserted = new;
+static void dump_event(struct perf_session *session, union perf_event *event,
+ u64 file_offset, struct perf_sample *sample)
+{
+ if (!dump_trace)
+ return;
- if (new->timestamp > s->ordered_samples.max_timestamp)
- s->ordered_samples.max_timestamp = new->timestamp;
+ printf("\n%#" PRIx64 " [%#x]: event: %d\n",
+ file_offset, event->header.size, event->header.type);
- return 0;
+ trace_event(event);
+
+ if (sample)
+ perf_session__print_tstamp(session, event, sample);
+
+ printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
+ event->header.size, perf_event__name(event->header.type));
}
-static int perf_session__process_sample(event_t *event, struct perf_session *s,
- struct perf_event_ops *ops)
+static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
+ struct perf_sample *sample)
{
- struct sample_data data;
+ u64 sample_type;
- if (!ops->ordered_samples)
- return ops->sample(event, s);
+ if (!dump_trace)
+ return;
- bzero(&data, sizeof(struct sample_data));
- event__parse_sample(event, s->sample_type, &data);
+ printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
+ event->header.misc, sample->pid, sample->tid, sample->ip,
+ sample->period, sample->addr);
- queue_sample_event(event, &data, s);
+ sample_type = evsel->attr.sample_type;
- return 0;
+ if (sample_type & PERF_SAMPLE_CALLCHAIN)
+ callchain__printf(sample);
+
+ if (sample_type & PERF_SAMPLE_BRANCH_STACK)
+ branch_stack__printf(sample);
+
+ if (sample_type & PERF_SAMPLE_REGS_USER)
+ regs_user__printf(sample);
+
+ if (sample_type & PERF_SAMPLE_STACK_USER)
+ stack_user__printf(&sample->user_stack);
+
+ if (sample_type & PERF_SAMPLE_WEIGHT)
+ printf("... weight: %" PRIu64 "\n", sample->weight);
+
+ if (sample_type & PERF_SAMPLE_DATA_SRC)
+ printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
+
+ if (sample_type & PERF_SAMPLE_TRANSACTION)
+ printf("... transaction: %" PRIx64 "\n", sample->transaction);
+
+ if (sample_type & PERF_SAMPLE_READ)
+ sample_read__printf(sample, evsel->attr.read_format);
}
-static int perf_session__process_event(struct perf_session *self,
- event_t *event,
- struct perf_event_ops *ops,
- u64 offset, u64 head)
+static struct machine *
+ perf_session__find_machine_for_cpumode(struct perf_session *session,
+ union perf_event *event,
+ struct perf_sample *sample)
{
- trace_event(event);
+ const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ struct machine *machine;
+
+ if (perf_guest &&
+ ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
+ (cpumode == PERF_RECORD_MISC_GUEST_USER))) {
+ u32 pid;
+
+ if (event->header.type == PERF_RECORD_MMAP
+ || event->header.type == PERF_RECORD_MMAP2)
+ pid = event->mmap.pid;
+ else
+ pid = sample->pid;
+
+ machine = perf_session__find_machine(session, pid);
+ if (!machine)
+ machine = perf_session__findnew_machine(session,
+ DEFAULT_GUEST_KERNEL_ID);
+ return machine;
+ }
+
+ return &session->machines.host;
+}
+
+static int deliver_sample_value(struct perf_session *session,
+ struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct sample_read_value *v,
+ struct machine *machine)
+{
+ struct perf_sample_id *sid;
+
+ sid = perf_evlist__id2sid(session->evlist, v->id);
+ if (sid) {
+ sample->id = v->id;
+ sample->period = v->value - sid->period;
+ sid->period = v->value;
+ }
+
+ if (!sid || sid->evsel == NULL) {
+ ++session->stats.nr_unknown_id;
+ return 0;
+ }
+
+ return tool->sample(tool, event, sample, sid->evsel, machine);
+}
+
+static int deliver_sample_group(struct perf_session *session,
+ struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ int ret = -EINVAL;
+ u64 i;
+
+ for (i = 0; i < sample->read.group.nr; i++) {
+ ret = deliver_sample_value(session, tool, event, sample,
+ &sample->read.group.values[i],
+ machine);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
- if (event->header.type < PERF_RECORD_HEADER_MAX) {
- dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
- offset + head, event->header.size,
- event__name[event->header.type]);
- hists__inc_nr_events(&self->hists, event->header.type);
+static int
+perf_session__deliver_sample(struct perf_session *session,
+ struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine)
+{
+ /* We know evsel != NULL. */
+ u64 sample_type = evsel->attr.sample_type;
+ u64 read_format = evsel->attr.read_format;
+
+ /* Standard sample delievery. */
+ if (!(sample_type & PERF_SAMPLE_READ))
+ return tool->sample(tool, event, sample, evsel, machine);
+
+ /* For PERF_SAMPLE_READ we have either single or group mode. */
+ if (read_format & PERF_FORMAT_GROUP)
+ return deliver_sample_group(session, tool, event, sample,
+ machine);
+ else
+ return deliver_sample_value(session, tool, event, sample,
+ &sample->read.one, machine);
+}
+
+static int perf_session_deliver_event(struct perf_session *session,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_tool *tool,
+ u64 file_offset)
+{
+ struct perf_evsel *evsel;
+ struct machine *machine;
+
+ dump_event(session, event, file_offset, sample);
+
+ evsel = perf_evlist__id2evsel(session->evlist, sample->id);
+ if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
+ /*
+ * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
+ * because the tools right now may apply filters, discarding
+ * some of the samples. For consistency, in the future we
+ * should have something like nr_filtered_samples and remove
+ * the sample->period from total_sample_period, etc, KISS for
+ * now tho.
+ *
+ * Also testing against NULL allows us to handle files without
+ * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
+ * future probably it'll be a good idea to restrict event
+ * processing via perf_session to files with both set.
+ */
+ hists__inc_nr_events(&evsel->hists, event->header.type);
}
- if (self->header.needs_swap && event__swap_ops[event->header.type])
- event__swap_ops[event->header.type](event);
+ machine = perf_session__find_machine_for_cpumode(session, event,
+ sample);
switch (event->header.type) {
case PERF_RECORD_SAMPLE:
- return perf_session__process_sample(event, self, ops);
+ dump_sample(evsel, event, sample);
+ if (evsel == NULL) {
+ ++session->stats.nr_unknown_id;
+ return 0;
+ }
+ if (machine == NULL) {
+ ++session->stats.nr_unprocessable_samples;
+ return 0;
+ }
+ return perf_session__deliver_sample(session, tool, event,
+ sample, evsel, machine);
case PERF_RECORD_MMAP:
- return ops->mmap(event, self);
+ return tool->mmap(tool, event, sample, machine);
+ case PERF_RECORD_MMAP2:
+ return tool->mmap2(tool, event, sample, machine);
case PERF_RECORD_COMM:
- return ops->comm(event, self);
+ return tool->comm(tool, event, sample, machine);
case PERF_RECORD_FORK:
- return ops->fork(event, self);
+ return tool->fork(tool, event, sample, machine);
case PERF_RECORD_EXIT:
- return ops->exit(event, self);
+ return tool->exit(tool, event, sample, machine);
case PERF_RECORD_LOST:
- return ops->lost(event, self);
+ if (tool->lost == perf_event__process_lost)
+ session->stats.total_lost += event->lost.lost;
+ return tool->lost(tool, event, sample, machine);
case PERF_RECORD_READ:
- return ops->read(event, self);
+ return tool->read(tool, event, sample, evsel, machine);
case PERF_RECORD_THROTTLE:
- return ops->throttle(event, self);
+ return tool->throttle(tool, event, sample, machine);
case PERF_RECORD_UNTHROTTLE:
- return ops->unthrottle(event, self);
+ return tool->unthrottle(tool, event, sample, machine);
+ default:
+ ++session->stats.nr_unknown_events;
+ return -1;
+ }
+}
+
+static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
+ struct perf_tool *tool, u64 file_offset)
+{
+ int fd = perf_data_file__fd(session->file);
+ int err;
+
+ dump_event(session, event, file_offset, NULL);
+
+ /* These events are processed right away */
+ switch (event->header.type) {
case PERF_RECORD_HEADER_ATTR:
- return ops->attr(event, self);
+ err = tool->attr(tool, event, &session->evlist);
+ if (err == 0)
+ perf_session__set_id_hdr_size(session);
+ return err;
case PERF_RECORD_HEADER_EVENT_TYPE:
- return ops->event_type(event, self);
+ /*
+ * Depreceated, but we need to handle it for sake
+ * of old data files create in pipe mode.
+ */
+ return 0;
case PERF_RECORD_HEADER_TRACING_DATA:
/* setup for reading amidst mmap */
- lseek(self->fd, offset + head, SEEK_SET);
- return ops->tracing_data(event, self);
+ lseek(fd, file_offset, SEEK_SET);
+ return tool->tracing_data(tool, event, session);
case PERF_RECORD_HEADER_BUILD_ID:
- return ops->build_id(event, self);
+ return tool->build_id(tool, event, session);
case PERF_RECORD_FINISHED_ROUND:
- return ops->finished_round(event, self, ops);
+ return tool->finished_round(tool, event, session);
default:
- ++self->hists.stats.nr_unknown_events;
- return -1;
+ return -EINVAL;
+ }
+}
+
+static void event_swap(union perf_event *event, bool sample_id_all)
+{
+ perf_event__swap_op swap;
+
+ swap = perf_event__swap_ops[event->header.type];
+ if (swap)
+ swap(event, sample_id_all);
+}
+
+static int perf_session__process_event(struct perf_session *session,
+ union perf_event *event,
+ struct perf_tool *tool,
+ u64 file_offset)
+{
+ struct perf_sample sample;
+ int ret;
+
+ if (session->header.needs_swap)
+ event_swap(event, perf_evlist__sample_id_all(session->evlist));
+
+ if (event->header.type >= PERF_RECORD_HEADER_MAX)
+ return -EINVAL;
+
+ events_stats__inc(&session->stats, event->header.type);
+
+ if (event->header.type >= PERF_RECORD_USER_TYPE_START)
+ return perf_session__process_user_event(session, event, tool, file_offset);
+
+ /*
+ * For all kernel events we get the sample data
+ */
+ ret = perf_evlist__parse_sample(session->evlist, event, &sample);
+ if (ret)
+ return ret;
+
+ if (tool->ordered_samples) {
+ ret = perf_session_queue_event(session, event, &sample,
+ file_offset);
+ if (ret != -ETIME)
+ return ret;
}
+
+ return perf_session_deliver_event(session, event, &sample, tool,
+ file_offset);
+}
+
+void perf_event_header__bswap(struct perf_event_header *hdr)
+{
+ hdr->type = bswap_32(hdr->type);
+ hdr->misc = bswap_16(hdr->misc);
+ hdr->size = bswap_16(hdr->size);
}
-void perf_event_header__bswap(struct perf_event_header *self)
+struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
{
- self->type = bswap_32(self->type);
- self->misc = bswap_16(self->misc);
- self->size = bswap_16(self->size);
+ return machine__findnew_thread(&session->machines.host, 0, pid);
}
-static struct thread *perf_session__register_idle_thread(struct perf_session *self)
+static struct thread *perf_session__register_idle_thread(struct perf_session *session)
{
- struct thread *thread = perf_session__findnew(self, 0);
+ struct thread *thread = perf_session__findnew(session, 0);
- if (thread == NULL || thread__set_comm(thread, "swapper")) {
+ if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
pr_err("problem inserting idle task.\n");
thread = NULL;
}
@@ -656,41 +1098,71 @@ static struct thread *perf_session__register_idle_thread(struct perf_session *se
return thread;
}
-int do_read(int fd, void *buf, size_t size)
+static void perf_session__warn_about_errors(const struct perf_session *session,
+ const struct perf_tool *tool)
{
- void *buf_start = buf;
-
- while (size) {
- int ret = read(fd, buf, size);
+ if (tool->lost == perf_event__process_lost &&
+ session->stats.nr_events[PERF_RECORD_LOST] != 0) {
+ ui__warning("Processed %d events and lost %d chunks!\n\n"
+ "Check IO/CPU overload!\n\n",
+ session->stats.nr_events[0],
+ session->stats.nr_events[PERF_RECORD_LOST]);
+ }
- if (ret <= 0)
- return ret;
+ if (session->stats.nr_unknown_events != 0) {
+ ui__warning("Found %u unknown events!\n\n"
+ "Is this an older tool processing a perf.data "
+ "file generated by a more recent tool?\n\n"
+ "If that is not the case, consider "
+ "reporting to linux-kernel@vger.kernel.org.\n\n",
+ session->stats.nr_unknown_events);
+ }
- size -= ret;
- buf += ret;
+ if (session->stats.nr_unknown_id != 0) {
+ ui__warning("%u samples with id not present in the header\n",
+ session->stats.nr_unknown_id);
}
- return buf - buf_start;
+ if (session->stats.nr_invalid_chains != 0) {
+ ui__warning("Found invalid callchains!\n\n"
+ "%u out of %u events were discarded for this reason.\n\n"
+ "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
+ session->stats.nr_invalid_chains,
+ session->stats.nr_events[PERF_RECORD_SAMPLE]);
+ }
+
+ if (session->stats.nr_unprocessable_samples != 0) {
+ ui__warning("%u unprocessable samples recorded.\n"
+ "Do you have a KVM guest running and not using 'perf kvm'?\n",
+ session->stats.nr_unprocessable_samples);
+ }
}
-#define session_done() (*(volatile int *)(&session_done))
volatile int session_done;
-static int __perf_session__process_pipe_events(struct perf_session *self,
- struct perf_event_ops *ops)
+static int __perf_session__process_pipe_events(struct perf_session *session,
+ struct perf_tool *tool)
{
- event_t event;
- uint32_t size;
+ int fd = perf_data_file__fd(session->file);
+ union perf_event *event;
+ uint32_t size, cur_size = 0;
+ void *buf = NULL;
int skip = 0;
u64 head;
- int err;
+ ssize_t err;
void *p;
- perf_event_ops__fill_defaults(ops);
+ perf_tool__fill_defaults(tool);
head = 0;
+ cur_size = sizeof(union perf_event);
+
+ buf = malloc(cur_size);
+ if (!buf)
+ return -errno;
more:
- err = do_read(self->fd, &event, sizeof(struct perf_event_header));
+ event = buf;
+ err = readn(fd, event, sizeof(struct perf_event_header));
if (err <= 0) {
if (err == 0)
goto done;
@@ -699,19 +1171,30 @@ more:
goto out_err;
}
- if (self->header.needs_swap)
- perf_event_header__bswap(&event.header);
+ if (session->header.needs_swap)
+ perf_event_header__bswap(&event->header);
- size = event.header.size;
- if (size == 0)
- size = 8;
+ size = event->header.size;
+ if (size < sizeof(struct perf_event_header)) {
+ pr_err("bad event header size\n");
+ goto out_err;
+ }
- p = &event;
+ if (size > cur_size) {
+ void *new = realloc(buf, size);
+ if (!new) {
+ pr_err("failed to allocate memory to read event\n");
+ goto out_err;
+ }
+ buf = new;
+ cur_size = size;
+ event = buf;
+ }
+ p = event;
p += sizeof(struct perf_event_header);
if (size - sizeof(struct perf_event_header)) {
- err = do_read(self->fd, p,
- size - sizeof(struct perf_event_header));
+ err = readn(fd, p, size - sizeof(struct perf_event_header));
if (err <= 0) {
if (err == 0) {
pr_err("unexpected end of event stream\n");
@@ -723,170 +1206,202 @@ more:
}
}
- if (size == 0 ||
- (skip = perf_session__process_event(self, &event, ops,
- 0, head)) < 0) {
- dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
- head, event.header.size, event.header.type);
- /*
- * assume we lost track of the stream, check alignment, and
- * increment a single u64 in the hope to catch on again 'soon'.
- */
- if (unlikely(head & 7))
- head &= ~7ULL;
-
- size = 8;
+ if ((skip = perf_session__process_event(session, event, tool, head)) < 0) {
+ pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
+ head, event->header.size, event->header.type);
+ err = -EINVAL;
+ goto out_err;
}
head += size;
- dump_printf("\n%#Lx [%#x]: event: %d\n",
- head, event.header.size, event.header.type);
-
if (skip > 0)
head += skip;
if (!session_done())
goto more;
done:
- err = 0;
+ /* do the final flush for ordered samples */
+ session->ordered_samples.next_flush = ULLONG_MAX;
+ err = flush_sample_queue(session, tool);
out_err:
+ free(buf);
+ perf_session__warn_about_errors(session, tool);
+ perf_session_free_sample_buffers(session);
return err;
}
-int __perf_session__process_events(struct perf_session *self,
+static union perf_event *
+fetch_mmaped_event(struct perf_session *session,
+ u64 head, size_t mmap_size, char *buf)
+{
+ union perf_event *event;
+
+ /*
+ * Ensure we have enough space remaining to read
+ * the size of the event in the headers.
+ */
+ if (head + sizeof(event->header) > mmap_size)
+ return NULL;
+
+ event = (union perf_event *)(buf + head);
+
+ if (session->header.needs_swap)
+ perf_event_header__bswap(&event->header);
+
+ if (head + event->header.size > mmap_size) {
+ /* We're not fetching the event so swap back again */
+ if (session->header.needs_swap)
+ perf_event_header__bswap(&event->header);
+ return NULL;
+ }
+
+ return event;
+}
+
+/*
+ * On 64bit we can mmap the data file in one go. No need for tiny mmap
+ * slices. On 32bit we use 32MB.
+ */
+#if BITS_PER_LONG == 64
+#define MMAP_SIZE ULLONG_MAX
+#define NUM_MMAPS 1
+#else
+#define MMAP_SIZE (32 * 1024 * 1024ULL)
+#define NUM_MMAPS 128
+#endif
+
+int __perf_session__process_events(struct perf_session *session,
u64 data_offset, u64 data_size,
- u64 file_size, struct perf_event_ops *ops)
+ u64 file_size, struct perf_tool *tool)
{
- int err, mmap_prot, mmap_flags;
- u64 head, shift;
- u64 offset = 0;
- size_t page_size;
- event_t *event;
+ int fd = perf_data_file__fd(session->file);
+ u64 head, page_offset, file_offset, file_pos;
+ int err, mmap_prot, mmap_flags, map_idx = 0;
+ size_t mmap_size;
+ char *buf, *mmaps[NUM_MMAPS];
+ union perf_event *event;
uint32_t size;
- char *buf;
- struct ui_progress *progress = ui_progress__new("Processing events...",
- self->size);
- if (progress == NULL)
- return -1;
+ struct ui_progress prog;
+
+ perf_tool__fill_defaults(tool);
- perf_event_ops__fill_defaults(ops);
+ page_offset = page_size * (data_offset / page_size);
+ file_offset = page_offset;
+ head = data_offset - page_offset;
- page_size = sysconf(_SC_PAGESIZE);
+ if (data_size && (data_offset + data_size < file_size))
+ file_size = data_offset + data_size;
- head = data_offset;
- shift = page_size * (head / page_size);
- offset += shift;
- head -= shift;
+ ui_progress__init(&prog, file_size, "Processing events...");
+
+ mmap_size = MMAP_SIZE;
+ if (mmap_size > file_size)
+ mmap_size = file_size;
+
+ memset(mmaps, 0, sizeof(mmaps));
mmap_prot = PROT_READ;
mmap_flags = MAP_SHARED;
- if (self->header.needs_swap) {
+ if (session->header.needs_swap) {
mmap_prot |= PROT_WRITE;
mmap_flags = MAP_PRIVATE;
}
remap:
- buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
- mmap_flags, self->fd, offset);
+ buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
+ file_offset);
if (buf == MAP_FAILED) {
pr_err("failed to mmap file\n");
err = -errno;
goto out_err;
}
+ mmaps[map_idx] = buf;
+ map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
+ file_pos = file_offset + head;
more:
- event = (event_t *)(buf + head);
- ui_progress__update(progress, offset);
-
- if (self->header.needs_swap)
- perf_event_header__bswap(&event->header);
- size = event->header.size;
- if (size == 0)
- size = 8;
-
- if (head + event->header.size >= page_size * self->mmap_window) {
- int munmap_ret;
-
- shift = page_size * (head / page_size);
-
- munmap_ret = munmap(buf, page_size * self->mmap_window);
- assert(munmap_ret == 0);
+ event = fetch_mmaped_event(session, head, mmap_size, buf);
+ if (!event) {
+ if (mmaps[map_idx]) {
+ munmap(mmaps[map_idx], mmap_size);
+ mmaps[map_idx] = NULL;
+ }
- offset += shift;
- head -= shift;
+ page_offset = page_size * (head / page_size);
+ file_offset += page_offset;
+ head -= page_offset;
goto remap;
}
size = event->header.size;
- dump_printf("\n%#Lx [%#x]: event: %d\n",
- offset + head, event->header.size, event->header.type);
-
- if (size == 0 ||
- perf_session__process_event(self, event, ops, offset, head) < 0) {
- dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
- offset + head, event->header.size,
- event->header.type);
- /*
- * assume we lost track of the stream, check alignment, and
- * increment a single u64 in the hope to catch on again 'soon'.
- */
- if (unlikely(head & 7))
- head &= ~7ULL;
-
- size = 8;
+ if (size < sizeof(struct perf_event_header) ||
+ perf_session__process_event(session, event, tool, file_pos) < 0) {
+ pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
+ file_offset + head, event->header.size,
+ event->header.type);
+ err = -EINVAL;
+ goto out_err;
}
head += size;
+ file_pos += size;
- if (offset + head >= data_offset + data_size)
- goto done;
+ ui_progress__update(&prog, size);
+
+ if (session_done())
+ goto out;
- if (offset + head < file_size)
+ if (file_pos < file_size)
goto more;
-done:
- err = 0;
+
+out:
/* do the final flush for ordered samples */
- self->ordered_samples.next_flush = ULLONG_MAX;
- flush_sample_queue(self, ops);
+ session->ordered_samples.next_flush = ULLONG_MAX;
+ err = flush_sample_queue(session, tool);
out_err:
- ui_progress__delete(progress);
+ ui_progress__finish();
+ perf_session__warn_about_errors(session, tool);
+ perf_session_free_sample_buffers(session);
return err;
}
-int perf_session__process_events(struct perf_session *self,
- struct perf_event_ops *ops)
+int perf_session__process_events(struct perf_session *session,
+ struct perf_tool *tool)
{
+ u64 size = perf_data_file__size(session->file);
int err;
- if (perf_session__register_idle_thread(self) == NULL)
+ if (perf_session__register_idle_thread(session) == NULL)
return -ENOMEM;
- if (!self->fd_pipe)
- err = __perf_session__process_events(self,
- self->header.data_offset,
- self->header.data_size,
- self->size, ops);
+ if (!perf_data_file__is_pipe(session->file))
+ err = __perf_session__process_events(session,
+ session->header.data_offset,
+ session->header.data_size,
+ size, tool);
else
- err = __perf_session__process_pipe_events(self, ops);
+ err = __perf_session__process_pipe_events(session, tool);
return err;
}
-bool perf_session__has_traces(struct perf_session *self, const char *msg)
+bool perf_session__has_traces(struct perf_session *session, const char *msg)
{
- if (!(self->sample_type & PERF_SAMPLE_RAW)) {
- pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
- return false;
+ struct perf_evsel *evsel;
+
+ evlist__for_each(session->evlist, evsel) {
+ if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
+ return true;
}
- return true;
+ pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
+ return false;
}
-int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
- const char *symbol_name,
- u64 addr)
+int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
+ const char *symbol_name, u64 addr)
{
char *bracket;
enum map_type i;
@@ -916,16 +1431,243 @@ int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
return 0;
}
-size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
+size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
+{
+ return machines__fprintf_dsos(&session->machines, fp);
+}
+
+size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
+ bool (skip)(struct dso *dso, int parm), int parm)
+{
+ return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
+}
+
+size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
+{
+ struct perf_evsel *pos;
+ size_t ret = fprintf(fp, "Aggregated stats:\n");
+
+ ret += events_stats__fprintf(&session->stats, fp);
+
+ evlist__for_each(session->evlist, pos) {
+ ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
+ ret += events_stats__fprintf(&pos->hists.stats, fp);
+ }
+
+ return ret;
+}
+
+size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
+{
+ /*
+ * FIXME: Here we have to actually print all the machines in this
+ * session, not just the host...
+ */
+ return machine__fprintf(&session->machines.host, fp);
+}
+
+struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
+ unsigned int type)
+{
+ struct perf_evsel *pos;
+
+ evlist__for_each(session->evlist, pos) {
+ if (pos->attr.type == type)
+ return pos;
+ }
+ return NULL;
+}
+
+void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample,
+ struct addr_location *al,
+ unsigned int print_opts, unsigned int stack_depth)
+{
+ struct callchain_cursor_node *node;
+ int print_ip = print_opts & PRINT_IP_OPT_IP;
+ int print_sym = print_opts & PRINT_IP_OPT_SYM;
+ int print_dso = print_opts & PRINT_IP_OPT_DSO;
+ int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET;
+ int print_oneline = print_opts & PRINT_IP_OPT_ONELINE;
+ int print_srcline = print_opts & PRINT_IP_OPT_SRCLINE;
+ char s = print_oneline ? ' ' : '\t';
+
+ if (symbol_conf.use_callchain && sample->callchain) {
+ struct addr_location node_al;
+
+ if (machine__resolve_callchain(al->machine, evsel, al->thread,
+ sample, NULL, NULL,
+ PERF_MAX_STACK_DEPTH) != 0) {
+ if (verbose)
+ error("Failed to resolve callchain. Skipping\n");
+ return;
+ }
+ callchain_cursor_commit(&callchain_cursor);
+
+ if (print_symoffset)
+ node_al = *al;
+
+ while (stack_depth) {
+ u64 addr = 0;
+
+ node = callchain_cursor_current(&callchain_cursor);
+ if (!node)
+ break;
+
+ if (node->sym && node->sym->ignore)
+ goto next;
+
+ if (print_ip)
+ printf("%c%16" PRIx64, s, node->ip);
+
+ if (node->map)
+ addr = node->map->map_ip(node->map, node->ip);
+
+ if (print_sym) {
+ printf(" ");
+ if (print_symoffset) {
+ node_al.addr = addr;
+ node_al.map = node->map;
+ symbol__fprintf_symname_offs(node->sym, &node_al, stdout);
+ } else
+ symbol__fprintf_symname(node->sym, stdout);
+ }
+
+ if (print_dso) {
+ printf(" (");
+ map__fprintf_dsoname(node->map, stdout);
+ printf(")");
+ }
+
+ if (print_srcline)
+ map__fprintf_srcline(node->map, addr, "\n ",
+ stdout);
+
+ if (!print_oneline)
+ printf("\n");
+
+ stack_depth--;
+next:
+ callchain_cursor_advance(&callchain_cursor);
+ }
+
+ } else {
+ if (al->sym && al->sym->ignore)
+ return;
+
+ if (print_ip)
+ printf("%16" PRIx64, sample->ip);
+
+ if (print_sym) {
+ printf(" ");
+ if (print_symoffset)
+ symbol__fprintf_symname_offs(al->sym, al,
+ stdout);
+ else
+ symbol__fprintf_symname(al->sym, stdout);
+ }
+
+ if (print_dso) {
+ printf(" (");
+ map__fprintf_dsoname(al->map, stdout);
+ printf(")");
+ }
+
+ if (print_srcline)
+ map__fprintf_srcline(al->map, al->addr, "\n ", stdout);
+ }
+}
+
+int perf_session__cpu_bitmap(struct perf_session *session,
+ const char *cpu_list, unsigned long *cpu_bitmap)
{
- return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
- __dsos__fprintf(&self->host_machine.user_dsos, fp) +
- machines__fprintf_dsos(&self->machines, fp);
+ int i, err = -1;
+ struct cpu_map *map;
+
+ for (i = 0; i < PERF_TYPE_MAX; ++i) {
+ struct perf_evsel *evsel;
+
+ evsel = perf_session__find_first_evtype(session, i);
+ if (!evsel)
+ continue;
+
+ if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
+ pr_err("File does not contain CPU events. "
+ "Remove -c option to proceed.\n");
+ return -1;
+ }
+ }
+
+ map = cpu_map__new(cpu_list);
+ if (map == NULL) {
+ pr_err("Invalid cpu_list\n");
+ return -1;
+ }
+
+ for (i = 0; i < map->nr; i++) {
+ int cpu = map->map[i];
+
+ if (cpu >= MAX_NR_CPUS) {
+ pr_err("Requested CPU %d too large. "
+ "Consider raising MAX_NR_CPUS\n", cpu);
+ goto out_delete_map;
+ }
+
+ set_bit(cpu, cpu_bitmap);
+ }
+
+ err = 0;
+
+out_delete_map:
+ cpu_map__delete(map);
+ return err;
}
-size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
- bool with_hits)
+void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
+ bool full)
{
- size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
- return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
+ struct stat st;
+ int fd, ret;
+
+ if (session == NULL || fp == NULL)
+ return;
+
+ fd = perf_data_file__fd(session->file);
+
+ ret = fstat(fd, &st);
+ if (ret == -1)
+ return;
+
+ fprintf(fp, "# ========\n");
+ fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
+ perf_header__fprintf_info(session, fp, full);
+ fprintf(fp, "# ========\n#\n");
+}
+
+
+int __perf_session__set_tracepoints_handlers(struct perf_session *session,
+ const struct perf_evsel_str_handler *assocs,
+ size_t nr_assocs)
+{
+ struct perf_evsel *evsel;
+ size_t i;
+ int err;
+
+ for (i = 0; i < nr_assocs; i++) {
+ /*
+ * Adding a handler for an event not in the session,
+ * just ignore it.
+ */
+ evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
+ if (evsel == NULL)
+ continue;
+
+ err = -EEXIST;
+ if (evsel->handler != NULL)
+ goto out;
+ evsel->handler = assocs[i].handler;
+ }
+
+ err = 0;
+out:
+ return err;
}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 9fa0fc2a863..3140f8ae614 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -1,13 +1,16 @@
#ifndef __PERF_SESSION_H
#define __PERF_SESSION_H
+#include "trace-event.h"
#include "hist.h"
#include "event.h"
#include "header.h"
+#include "machine.h"
#include "symbol.h"
#include "thread.h"
+#include "data.h"
#include <linux/rbtree.h>
-#include "../../../include/linux/perf_event.h"
+#include <linux/perf_event.h>
struct sample_queue;
struct ip_callchain;
@@ -17,129 +20,110 @@ struct ordered_samples {
u64 last_flush;
u64 next_flush;
u64 max_timestamp;
- struct list_head samples_head;
- struct sample_queue *last_inserted;
+ struct list_head samples;
+ struct list_head sample_cache;
+ struct list_head to_free;
+ struct sample_queue *sample_buffer;
+ struct sample_queue *last_sample;
+ int sample_buffer_idx;
+ unsigned int nr_samples;
};
struct perf_session {
struct perf_header header;
- unsigned long size;
- unsigned long mmap_window;
- struct rb_root threads;
- struct list_head dead_threads;
- struct thread *last_match;
- struct machine host_machine;
- struct rb_root machines;
- struct rb_root hists_tree;
- /*
- * FIXME: should point to the first entry in hists_tree and
- * be a hists instance. Right now its only 'report'
- * that is using ->hists_tree while all the rest use
- * ->hists.
- */
- struct hists hists;
- u64 sample_type;
- int fd;
- bool fd_pipe;
+ struct machines machines;
+ struct perf_evlist *evlist;
+ struct trace_event tevent;
+ struct events_stats stats;
bool repipe;
- int cwdlen;
- char *cwd;
struct ordered_samples ordered_samples;
- char filename[0];
+ struct perf_data_file *file;
};
-struct perf_event_ops;
-
-typedef int (*event_op)(event_t *self, struct perf_session *session);
-typedef int (*event_op2)(event_t *self, struct perf_session *session,
- struct perf_event_ops *ops);
-
-struct perf_event_ops {
- event_op sample,
- mmap,
- comm,
- fork,
- exit,
- lost,
- read,
- throttle,
- unthrottle,
- attr,
- event_type,
- tracing_data,
- build_id;
- event_op2 finished_round;
- bool ordered_samples;
-};
+#define PRINT_IP_OPT_IP (1<<0)
+#define PRINT_IP_OPT_SYM (1<<1)
+#define PRINT_IP_OPT_DSO (1<<2)
+#define PRINT_IP_OPT_SYMOFFSET (1<<3)
+#define PRINT_IP_OPT_ONELINE (1<<4)
+#define PRINT_IP_OPT_SRCLINE (1<<5)
+
+struct perf_tool;
-struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe);
-void perf_session__delete(struct perf_session *self);
+struct perf_session *perf_session__new(struct perf_data_file *file,
+ bool repipe, struct perf_tool *tool);
+void perf_session__delete(struct perf_session *session);
-void perf_event_header__bswap(struct perf_event_header *self);
+void perf_event_header__bswap(struct perf_event_header *hdr);
-int __perf_session__process_events(struct perf_session *self,
+int __perf_session__process_events(struct perf_session *session,
u64 data_offset, u64 data_size, u64 size,
- struct perf_event_ops *ops);
-int perf_session__process_events(struct perf_session *self,
- struct perf_event_ops *event_ops);
+ struct perf_tool *tool);
+int perf_session__process_events(struct perf_session *session,
+ struct perf_tool *tool);
-struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
- struct thread *thread,
- struct ip_callchain *chain,
- struct symbol **parent);
+int perf_session_queue_event(struct perf_session *s, union perf_event *event,
+ struct perf_sample *sample, u64 file_offset);
-bool perf_session__has_traces(struct perf_session *self, const char *msg);
+void perf_tool__fill_defaults(struct perf_tool *tool);
-int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
- const char *symbol_name,
- u64 addr);
+int perf_session__resolve_callchain(struct perf_session *session,
+ struct perf_evsel *evsel,
+ struct thread *thread,
+ struct ip_callchain *chain,
+ struct symbol **parent);
-void mem_bswap_64(void *src, int byte_size);
+bool perf_session__has_traces(struct perf_session *session, const char *msg);
-int perf_session__create_kernel_maps(struct perf_session *self);
+void perf_event__attr_swap(struct perf_event_attr *attr);
-int do_read(int fd, void *buf, size_t size);
-void perf_session__update_sample_type(struct perf_session *self);
-void perf_session__remove_thread(struct perf_session *self, struct thread *th);
+int perf_session__create_kernel_maps(struct perf_session *session);
-static inline
-struct machine *perf_session__find_host_machine(struct perf_session *self)
-{
- return &self->host_machine;
-}
+void perf_session__set_id_hdr_size(struct perf_session *session);
static inline
-struct machine *perf_session__find_machine(struct perf_session *self, pid_t pid)
+struct machine *perf_session__find_machine(struct perf_session *session, pid_t pid)
{
- if (pid == HOST_KERNEL_ID)
- return &self->host_machine;
- return machines__find(&self->machines, pid);
+ return machines__find(&session->machines, pid);
}
static inline
-struct machine *perf_session__findnew_machine(struct perf_session *self, pid_t pid)
+struct machine *perf_session__findnew_machine(struct perf_session *session, pid_t pid)
{
- if (pid == HOST_KERNEL_ID)
- return &self->host_machine;
- return machines__findnew(&self->machines, pid);
+ return machines__findnew(&session->machines, pid);
}
-static inline
-void perf_session__process_machines(struct perf_session *self,
- machine__process_t process)
-{
- process(&self->host_machine, self);
- return machines__process(&self->machines, process, self);
-}
+struct thread *perf_session__findnew(struct perf_session *session, pid_t pid);
+size_t perf_session__fprintf(struct perf_session *session, FILE *fp);
-size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp);
+size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp);
-size_t perf_session__fprintf_dsos_buildid(struct perf_session *self,
- FILE *fp, bool with_hits);
+size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
+ bool (fn)(struct dso *dso, int parm), int parm);
-static inline
-size_t perf_session__fprintf_nr_events(struct perf_session *self, FILE *fp)
-{
- return hists__fprintf_nr_events(&self->hists, fp);
-}
+size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp);
+
+struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
+ unsigned int type);
+
+void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample,
+ struct addr_location *al,
+ unsigned int print_opts, unsigned int stack_depth);
+
+int perf_session__cpu_bitmap(struct perf_session *session,
+ const char *cpu_list, unsigned long *cpu_bitmap);
+
+void perf_session__fprintf_info(struct perf_session *s, FILE *fp, bool full);
+
+struct perf_evsel_str_handler;
+
+int __perf_session__set_tracepoints_handlers(struct perf_session *session,
+ const struct perf_evsel_str_handler *assocs,
+ size_t nr_assocs);
+
+#define perf_session__set_tracepoints_handlers(session, array) \
+ __perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array))
+
+extern volatile int session_done;
+
+#define session_done() (*(volatile int *)(&session_done))
#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
new file mode 100644
index 00000000000..d0aee4b9dfd
--- /dev/null
+++ b/tools/perf/util/setup.py
@@ -0,0 +1,48 @@
+#!/usr/bin/python2
+
+from distutils.core import setup, Extension
+from os import getenv
+
+from distutils.command.build_ext import build_ext as _build_ext
+from distutils.command.install_lib import install_lib as _install_lib
+
+class build_ext(_build_ext):
+ def finalize_options(self):
+ _build_ext.finalize_options(self)
+ self.build_lib = build_lib
+ self.build_temp = build_tmp
+
+class install_lib(_install_lib):
+ def finalize_options(self):
+ _install_lib.finalize_options(self)
+ self.build_dir = build_lib
+
+
+cflags = getenv('CFLAGS', '').split()
+# switch off several checks (need to be at the end of cflags list)
+cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
+
+build_lib = getenv('PYTHON_EXTBUILD_LIB')
+build_tmp = getenv('PYTHON_EXTBUILD_TMP')
+libtraceevent = getenv('LIBTRACEEVENT')
+libapikfs = getenv('LIBAPIKFS')
+
+ext_sources = [f.strip() for f in file('util/python-ext-sources')
+ if len(f.strip()) > 0 and f[0] != '#']
+
+perf = Extension('perf',
+ sources = ext_sources,
+ include_dirs = ['util/include'],
+ extra_compile_args = cflags,
+ extra_objects = [libtraceevent, libapikfs],
+ )
+
+setup(name='perf',
+ version='0.1',
+ description='Interface with the Linux profiling infrastructure',
+ author='Arnaldo Carvalho de Melo',
+ author_email='acme@redhat.com',
+ license='GPLv2',
+ url='http://perf.wiki.kernel.org',
+ ext_modules=[perf],
+ cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index b62a553cc67..1ec57dd8228 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -1,32 +1,78 @@
+#include <sys/mman.h>
#include "sort.h"
#include "hist.h"
+#include "comm.h"
+#include "symbol.h"
+#include "evsel.h"
regex_t parent_regex;
const char default_parent_pattern[] = "^sys_|^do_page_fault";
const char *parent_pattern = default_parent_pattern;
const char default_sort_order[] = "comm,dso,symbol";
-const char *sort_order = default_sort_order;
+const char default_branch_sort_order[] = "comm,dso_from,symbol_from,dso_to,symbol_to";
+const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
+const char default_top_sort_order[] = "dso,symbol";
+const char default_diff_sort_order[] = "dso,symbol";
+const char *sort_order;
+const char *field_order;
+regex_t ignore_callees_regex;
+int have_ignore_callees = 0;
int sort__need_collapse = 0;
int sort__has_parent = 0;
+int sort__has_sym = 0;
+int sort__has_dso = 0;
+enum sort_mode sort__mode = SORT_MODE__NORMAL;
-enum sort_type sort__first_dimension;
-char * field_sep;
+static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
+{
+ int n;
+ va_list ap;
-LIST_HEAD(hist_entry__sort_list);
+ va_start(ap, fmt);
+ n = vsnprintf(bf, size, fmt, ap);
+ if (symbol_conf.field_sep && n > 0) {
+ char *sep = bf;
-static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf,
- size_t size, unsigned int width);
-static int hist_entry__comm_snprintf(struct hist_entry *self, char *bf,
- size_t size, unsigned int width);
-static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf,
- size_t size, unsigned int width);
-static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf,
- size_t size, unsigned int width);
-static int hist_entry__parent_snprintf(struct hist_entry *self, char *bf,
- size_t size, unsigned int width);
-static int hist_entry__cpu_snprintf(struct hist_entry *self, char *bf,
- size_t size, unsigned int width);
+ while (1) {
+ sep = strchr(sep, *symbol_conf.field_sep);
+ if (sep == NULL)
+ break;
+ *sep = '.';
+ }
+ }
+ va_end(ap);
+
+ if (n >= (int)size)
+ return size - 1;
+ return n;
+}
+
+static int64_t cmp_null(const void *l, const void *r)
+{
+ if (!l && !r)
+ return 0;
+ else if (!l)
+ return -1;
+ else
+ return 1;
+}
+
+/* --sort pid */
+
+static int64_t
+sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return right->thread->tid - left->thread->tid;
+}
+
+static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ const char *comm = thread__comm_str(he->thread);
+ return repsep_snprintf(bf, size, "%*s:%5d", width - 6,
+ comm ?: "", he->thread->tid);
+}
struct sort_entry sort_thread = {
.se_header = "Command: Pid",
@@ -35,14 +81,89 @@ struct sort_entry sort_thread = {
.se_width_idx = HISTC_THREAD,
};
+/* --sort comm */
+
+static int64_t
+sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ /* Compare the addr that should be unique among comm */
+ return comm__str(right->comm) - comm__str(left->comm);
+}
+
+static int64_t
+sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
+{
+ /* Compare the addr that should be unique among comm */
+ return comm__str(right->comm) - comm__str(left->comm);
+}
+
+static int64_t
+sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
+{
+ return strcmp(comm__str(right->comm), comm__str(left->comm));
+}
+
+static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ return repsep_snprintf(bf, size, "%*s", width, comm__str(he->comm));
+}
+
struct sort_entry sort_comm = {
.se_header = "Command",
.se_cmp = sort__comm_cmp,
.se_collapse = sort__comm_collapse,
+ .se_sort = sort__comm_sort,
.se_snprintf = hist_entry__comm_snprintf,
.se_width_idx = HISTC_COMM,
};
+/* --sort dso */
+
+static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
+{
+ struct dso *dso_l = map_l ? map_l->dso : NULL;
+ struct dso *dso_r = map_r ? map_r->dso : NULL;
+ const char *dso_name_l, *dso_name_r;
+
+ if (!dso_l || !dso_r)
+ return cmp_null(dso_r, dso_l);
+
+ if (verbose) {
+ dso_name_l = dso_l->long_name;
+ dso_name_r = dso_r->long_name;
+ } else {
+ dso_name_l = dso_l->short_name;
+ dso_name_r = dso_r->short_name;
+ }
+
+ return strcmp(dso_name_l, dso_name_r);
+}
+
+static int64_t
+sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return _sort__dso_cmp(right->ms.map, left->ms.map);
+}
+
+static int _hist_entry__dso_snprintf(struct map *map, char *bf,
+ size_t size, unsigned int width)
+{
+ if (map && map->dso) {
+ const char *dso_name = !verbose ? map->dso->short_name :
+ map->dso->long_name;
+ return repsep_snprintf(bf, size, "%-*s", width, dso_name);
+ }
+
+ return repsep_snprintf(bf, size, "%-*s", width, "[unknown]");
+}
+
+static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
+}
+
struct sort_entry sort_dso = {
.se_header = "Shared Object",
.se_cmp = sort__dso_cmp,
@@ -50,20 +171,192 @@ struct sort_entry sort_dso = {
.se_width_idx = HISTC_DSO,
};
+/* --sort symbol */
+
+static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
+{
+ return (int64_t)(right_ip - left_ip);
+}
+
+static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
+{
+ u64 ip_l, ip_r;
+
+ if (!sym_l || !sym_r)
+ return cmp_null(sym_l, sym_r);
+
+ if (sym_l == sym_r)
+ return 0;
+
+ ip_l = sym_l->start;
+ ip_r = sym_r->start;
+
+ return (int64_t)(ip_r - ip_l);
+}
+
+static int64_t
+sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ int64_t ret;
+
+ if (!left->ms.sym && !right->ms.sym)
+ return _sort__addr_cmp(left->ip, right->ip);
+
+ /*
+ * comparing symbol address alone is not enough since it's a
+ * relative address within a dso.
+ */
+ if (!sort__has_dso) {
+ ret = sort__dso_cmp(left, right);
+ if (ret != 0)
+ return ret;
+ }
+
+ return _sort__sym_cmp(left->ms.sym, right->ms.sym);
+}
+
+static int64_t
+sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
+{
+ if (!left->ms.sym || !right->ms.sym)
+ return cmp_null(left->ms.sym, right->ms.sym);
+
+ return strcmp(right->ms.sym->name, left->ms.sym->name);
+}
+
+static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
+ u64 ip, char level, char *bf, size_t size,
+ unsigned int width)
+{
+ size_t ret = 0;
+
+ if (verbose) {
+ char o = map ? dso__symtab_origin(map->dso) : '!';
+ ret += repsep_snprintf(bf, size, "%-#*llx %c ",
+ BITS_PER_LONG / 4 + 2, ip, o);
+ }
+
+ ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
+ if (sym && map) {
+ if (map->type == MAP__VARIABLE) {
+ ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
+ ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
+ ip - map->unmap_ip(map, sym->start));
+ ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
+ width - ret, "");
+ } else {
+ ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
+ width - ret,
+ sym->name);
+ }
+ } else {
+ size_t len = BITS_PER_LONG / 4;
+ ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
+ len, ip);
+ ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
+ width - ret, "");
+ }
+
+ return ret;
+}
+
+static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
+ he->level, bf, size, width);
+}
+
struct sort_entry sort_sym = {
.se_header = "Symbol",
.se_cmp = sort__sym_cmp,
+ .se_sort = sort__sym_sort,
.se_snprintf = hist_entry__sym_snprintf,
.se_width_idx = HISTC_SYMBOL,
};
+/* --sort srcline */
+
+static int64_t
+sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ if (!left->srcline) {
+ if (!left->ms.map)
+ left->srcline = SRCLINE_UNKNOWN;
+ else {
+ struct map *map = left->ms.map;
+ left->srcline = get_srcline(map->dso,
+ map__rip_2objdump(map, left->ip));
+ }
+ }
+ if (!right->srcline) {
+ if (!right->ms.map)
+ right->srcline = SRCLINE_UNKNOWN;
+ else {
+ struct map *map = right->ms.map;
+ right->srcline = get_srcline(map->dso,
+ map__rip_2objdump(map, right->ip));
+ }
+ }
+ return strcmp(right->srcline, left->srcline);
+}
+
+static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
+ size_t size,
+ unsigned int width __maybe_unused)
+{
+ return repsep_snprintf(bf, size, "%s", he->srcline);
+}
+
+struct sort_entry sort_srcline = {
+ .se_header = "Source:Line",
+ .se_cmp = sort__srcline_cmp,
+ .se_snprintf = hist_entry__srcline_snprintf,
+ .se_width_idx = HISTC_SRCLINE,
+};
+
+/* --sort parent */
+
+static int64_t
+sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ struct symbol *sym_l = left->parent;
+ struct symbol *sym_r = right->parent;
+
+ if (!sym_l || !sym_r)
+ return cmp_null(sym_l, sym_r);
+
+ return strcmp(sym_r->name, sym_l->name);
+}
+
+static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ return repsep_snprintf(bf, size, "%-*s", width,
+ he->parent ? he->parent->name : "[other]");
+}
+
struct sort_entry sort_parent = {
.se_header = "Parent symbol",
.se_cmp = sort__parent_cmp,
.se_snprintf = hist_entry__parent_snprintf,
.se_width_idx = HISTC_PARENT,
};
-
+
+/* --sort cpu */
+
+static int64_t
+sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return right->cpu - left->cpu;
+}
+
+static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ return repsep_snprintf(bf, size, "%*d", width, he->cpu);
+}
+
struct sort_entry sort_cpu = {
.se_header = "CPU",
.se_cmp = sort__cpu_cmp,
@@ -71,217 +364,996 @@ struct sort_entry sort_cpu = {
.se_width_idx = HISTC_CPU,
};
-struct sort_dimension {
- const char *name;
- struct sort_entry *entry;
- int taken;
+/* sort keys for branch stacks */
+
+static int64_t
+sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return _sort__dso_cmp(left->branch_info->from.map,
+ right->branch_info->from.map);
+}
+
+static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ return _hist_entry__dso_snprintf(he->branch_info->from.map,
+ bf, size, width);
+}
+
+static int64_t
+sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return _sort__dso_cmp(left->branch_info->to.map,
+ right->branch_info->to.map);
+}
+
+static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ return _hist_entry__dso_snprintf(he->branch_info->to.map,
+ bf, size, width);
+}
+
+static int64_t
+sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ struct addr_map_symbol *from_l = &left->branch_info->from;
+ struct addr_map_symbol *from_r = &right->branch_info->from;
+
+ if (!from_l->sym && !from_r->sym)
+ return _sort__addr_cmp(from_l->addr, from_r->addr);
+
+ return _sort__sym_cmp(from_l->sym, from_r->sym);
+}
+
+static int64_t
+sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ struct addr_map_symbol *to_l = &left->branch_info->to;
+ struct addr_map_symbol *to_r = &right->branch_info->to;
+
+ if (!to_l->sym && !to_r->sym)
+ return _sort__addr_cmp(to_l->addr, to_r->addr);
+
+ return _sort__sym_cmp(to_l->sym, to_r->sym);
+}
+
+static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ struct addr_map_symbol *from = &he->branch_info->from;
+ return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
+ he->level, bf, size, width);
+
+}
+
+static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ struct addr_map_symbol *to = &he->branch_info->to;
+ return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
+ he->level, bf, size, width);
+
+}
+
+struct sort_entry sort_dso_from = {
+ .se_header = "Source Shared Object",
+ .se_cmp = sort__dso_from_cmp,
+ .se_snprintf = hist_entry__dso_from_snprintf,
+ .se_width_idx = HISTC_DSO_FROM,
+};
+
+struct sort_entry sort_dso_to = {
+ .se_header = "Target Shared Object",
+ .se_cmp = sort__dso_to_cmp,
+ .se_snprintf = hist_entry__dso_to_snprintf,
+ .se_width_idx = HISTC_DSO_TO,
+};
+
+struct sort_entry sort_sym_from = {
+ .se_header = "Source Symbol",
+ .se_cmp = sort__sym_from_cmp,
+ .se_snprintf = hist_entry__sym_from_snprintf,
+ .se_width_idx = HISTC_SYMBOL_FROM,
};
-static struct sort_dimension sort_dimensions[] = {
- { .name = "pid", .entry = &sort_thread, },
- { .name = "comm", .entry = &sort_comm, },
- { .name = "dso", .entry = &sort_dso, },
- { .name = "symbol", .entry = &sort_sym, },
- { .name = "parent", .entry = &sort_parent, },
- { .name = "cpu", .entry = &sort_cpu, },
+struct sort_entry sort_sym_to = {
+ .se_header = "Target Symbol",
+ .se_cmp = sort__sym_to_cmp,
+ .se_snprintf = hist_entry__sym_to_snprintf,
+ .se_width_idx = HISTC_SYMBOL_TO,
};
-int64_t cmp_null(void *l, void *r)
+static int64_t
+sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
{
- if (!l && !r)
- return 0;
- else if (!l)
- return -1;
+ const unsigned char mp = left->branch_info->flags.mispred !=
+ right->branch_info->flags.mispred;
+ const unsigned char p = left->branch_info->flags.predicted !=
+ right->branch_info->flags.predicted;
+
+ return mp || p;
+}
+
+static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width){
+ static const char *out = "N/A";
+
+ if (he->branch_info->flags.predicted)
+ out = "N";
+ else if (he->branch_info->flags.mispred)
+ out = "Y";
+
+ return repsep_snprintf(bf, size, "%-*s", width, out);
+}
+
+/* --sort daddr_sym */
+static int64_t
+sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ uint64_t l = 0, r = 0;
+
+ if (left->mem_info)
+ l = left->mem_info->daddr.addr;
+ if (right->mem_info)
+ r = right->mem_info->daddr.addr;
+
+ return (int64_t)(r - l);
+}
+
+static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ uint64_t addr = 0;
+ struct map *map = NULL;
+ struct symbol *sym = NULL;
+
+ if (he->mem_info) {
+ addr = he->mem_info->daddr.addr;
+ map = he->mem_info->daddr.map;
+ sym = he->mem_info->daddr.sym;
+ }
+ return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
+ width);
+}
+
+static int64_t
+sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ struct map *map_l = NULL;
+ struct map *map_r = NULL;
+
+ if (left->mem_info)
+ map_l = left->mem_info->daddr.map;
+ if (right->mem_info)
+ map_r = right->mem_info->daddr.map;
+
+ return _sort__dso_cmp(map_l, map_r);
+}
+
+static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ struct map *map = NULL;
+
+ if (he->mem_info)
+ map = he->mem_info->daddr.map;
+
+ return _hist_entry__dso_snprintf(map, bf, size, width);
+}
+
+static int64_t
+sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ union perf_mem_data_src data_src_l;
+ union perf_mem_data_src data_src_r;
+
+ if (left->mem_info)
+ data_src_l = left->mem_info->data_src;
else
- return 1;
+ data_src_l.mem_lock = PERF_MEM_LOCK_NA;
+
+ if (right->mem_info)
+ data_src_r = right->mem_info->data_src;
+ else
+ data_src_r.mem_lock = PERF_MEM_LOCK_NA;
+
+ return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
}
-/* --sort pid */
+static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ const char *out;
+ u64 mask = PERF_MEM_LOCK_NA;
-int64_t
-sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
+ if (he->mem_info)
+ mask = he->mem_info->data_src.mem_lock;
+
+ if (mask & PERF_MEM_LOCK_NA)
+ out = "N/A";
+ else if (mask & PERF_MEM_LOCK_LOCKED)
+ out = "Yes";
+ else
+ out = "No";
+
+ return repsep_snprintf(bf, size, "%-*s", width, out);
+}
+
+static int64_t
+sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
{
- return right->thread->pid - left->thread->pid;
+ union perf_mem_data_src data_src_l;
+ union perf_mem_data_src data_src_r;
+
+ if (left->mem_info)
+ data_src_l = left->mem_info->data_src;
+ else
+ data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
+
+ if (right->mem_info)
+ data_src_r = right->mem_info->data_src;
+ else
+ data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
+
+ return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
}
-static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
+static const char * const tlb_access[] = {
+ "N/A",
+ "HIT",
+ "MISS",
+ "L1",
+ "L2",
+ "Walker",
+ "Fault",
+};
+#define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *))
+
+static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
{
- int n;
- va_list ap;
+ char out[64];
+ size_t sz = sizeof(out) - 1; /* -1 for null termination */
+ size_t l = 0, i;
+ u64 m = PERF_MEM_TLB_NA;
+ u64 hit, miss;
- va_start(ap, fmt);
- n = vsnprintf(bf, size, fmt, ap);
- if (field_sep && n > 0) {
- char *sep = bf;
+ out[0] = '\0';
- while (1) {
- sep = strchr(sep, *field_sep);
- if (sep == NULL)
- break;
- *sep = '.';
+ if (he->mem_info)
+ m = he->mem_info->data_src.mem_dtlb;
+
+ hit = m & PERF_MEM_TLB_HIT;
+ miss = m & PERF_MEM_TLB_MISS;
+
+ /* already taken care of */
+ m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
+
+ for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) {
+ if (!(m & 0x1))
+ continue;
+ if (l) {
+ strcat(out, " or ");
+ l += 4;
}
+ strncat(out, tlb_access[i], sz - l);
+ l += strlen(tlb_access[i]);
}
- va_end(ap);
- return n;
+ if (*out == '\0')
+ strcpy(out, "N/A");
+ if (hit)
+ strncat(out, " hit", sz - l);
+ if (miss)
+ strncat(out, " miss", sz - l);
+
+ return repsep_snprintf(bf, size, "%-*s", width, out);
}
-static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf,
- size_t size, unsigned int width)
+static int64_t
+sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
{
- return repsep_snprintf(bf, size, "%*s:%5d", width,
- self->thread->comm ?: "", self->thread->pid);
+ union perf_mem_data_src data_src_l;
+ union perf_mem_data_src data_src_r;
+
+ if (left->mem_info)
+ data_src_l = left->mem_info->data_src;
+ else
+ data_src_l.mem_lvl = PERF_MEM_LVL_NA;
+
+ if (right->mem_info)
+ data_src_r = right->mem_info->data_src;
+ else
+ data_src_r.mem_lvl = PERF_MEM_LVL_NA;
+
+ return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
}
-static int hist_entry__comm_snprintf(struct hist_entry *self, char *bf,
- size_t size, unsigned int width)
+static const char * const mem_lvl[] = {
+ "N/A",
+ "HIT",
+ "MISS",
+ "L1",
+ "LFB",
+ "L2",
+ "L3",
+ "Local RAM",
+ "Remote RAM (1 hop)",
+ "Remote RAM (2 hops)",
+ "Remote Cache (1 hop)",
+ "Remote Cache (2 hops)",
+ "I/O",
+ "Uncached",
+};
+#define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *))
+
+static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
{
- return repsep_snprintf(bf, size, "%*s", width, self->thread->comm);
+ char out[64];
+ size_t sz = sizeof(out) - 1; /* -1 for null termination */
+ size_t i, l = 0;
+ u64 m = PERF_MEM_LVL_NA;
+ u64 hit, miss;
+
+ if (he->mem_info)
+ m = he->mem_info->data_src.mem_lvl;
+
+ out[0] = '\0';
+
+ hit = m & PERF_MEM_LVL_HIT;
+ miss = m & PERF_MEM_LVL_MISS;
+
+ /* already taken care of */
+ m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
+
+ for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) {
+ if (!(m & 0x1))
+ continue;
+ if (l) {
+ strcat(out, " or ");
+ l += 4;
+ }
+ strncat(out, mem_lvl[i], sz - l);
+ l += strlen(mem_lvl[i]);
+ }
+ if (*out == '\0')
+ strcpy(out, "N/A");
+ if (hit)
+ strncat(out, " hit", sz - l);
+ if (miss)
+ strncat(out, " miss", sz - l);
+
+ return repsep_snprintf(bf, size, "%-*s", width, out);
}
-/* --sort dso */
+static int64_t
+sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ union perf_mem_data_src data_src_l;
+ union perf_mem_data_src data_src_r;
-int64_t
-sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
+ if (left->mem_info)
+ data_src_l = left->mem_info->data_src;
+ else
+ data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
+
+ if (right->mem_info)
+ data_src_r = right->mem_info->data_src;
+ else
+ data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
+
+ return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
+}
+
+static const char * const snoop_access[] = {
+ "N/A",
+ "None",
+ "Miss",
+ "Hit",
+ "HitM",
+};
+#define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *))
+
+static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
{
- struct dso *dso_l = left->ms.map ? left->ms.map->dso : NULL;
- struct dso *dso_r = right->ms.map ? right->ms.map->dso : NULL;
- const char *dso_name_l, *dso_name_r;
+ char out[64];
+ size_t sz = sizeof(out) - 1; /* -1 for null termination */
+ size_t i, l = 0;
+ u64 m = PERF_MEM_SNOOP_NA;
- if (!dso_l || !dso_r)
- return cmp_null(dso_l, dso_r);
+ out[0] = '\0';
- if (verbose) {
- dso_name_l = dso_l->long_name;
- dso_name_r = dso_r->long_name;
- } else {
- dso_name_l = dso_l->short_name;
- dso_name_r = dso_r->short_name;
+ if (he->mem_info)
+ m = he->mem_info->data_src.mem_snoop;
+
+ for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) {
+ if (!(m & 0x1))
+ continue;
+ if (l) {
+ strcat(out, " or ");
+ l += 4;
+ }
+ strncat(out, snoop_access[i], sz - l);
+ l += strlen(snoop_access[i]);
}
- return strcmp(dso_name_l, dso_name_r);
+ if (*out == '\0')
+ strcpy(out, "N/A");
+
+ return repsep_snprintf(bf, size, "%-*s", width, out);
}
-static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf,
- size_t size, unsigned int width)
+static inline u64 cl_address(u64 address)
{
- if (self->ms.map && self->ms.map->dso) {
- const char *dso_name = !verbose ? self->ms.map->dso->short_name :
- self->ms.map->dso->long_name;
- return repsep_snprintf(bf, size, "%-*s", width, dso_name);
+ /* return the cacheline of the address */
+ return (address & ~(cacheline_size - 1));
+}
+
+static int64_t
+sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ u64 l, r;
+ struct map *l_map, *r_map;
+
+ if (!left->mem_info) return -1;
+ if (!right->mem_info) return 1;
+
+ /* group event types together */
+ if (left->cpumode > right->cpumode) return -1;
+ if (left->cpumode < right->cpumode) return 1;
+
+ l_map = left->mem_info->daddr.map;
+ r_map = right->mem_info->daddr.map;
+
+ /* if both are NULL, jump to sort on al_addr instead */
+ if (!l_map && !r_map)
+ goto addr;
+
+ if (!l_map) return -1;
+ if (!r_map) return 1;
+
+ if (l_map->maj > r_map->maj) return -1;
+ if (l_map->maj < r_map->maj) return 1;
+
+ if (l_map->min > r_map->min) return -1;
+ if (l_map->min < r_map->min) return 1;
+
+ if (l_map->ino > r_map->ino) return -1;
+ if (l_map->ino < r_map->ino) return 1;
+
+ if (l_map->ino_generation > r_map->ino_generation) return -1;
+ if (l_map->ino_generation < r_map->ino_generation) return 1;
+
+ /*
+ * Addresses with no major/minor numbers are assumed to be
+ * anonymous in userspace. Sort those on pid then address.
+ *
+ * The kernel and non-zero major/minor mapped areas are
+ * assumed to be unity mapped. Sort those on address.
+ */
+
+ if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
+ (!(l_map->flags & MAP_SHARED)) &&
+ !l_map->maj && !l_map->min && !l_map->ino &&
+ !l_map->ino_generation) {
+ /* userspace anonymous */
+
+ if (left->thread->pid_ > right->thread->pid_) return -1;
+ if (left->thread->pid_ < right->thread->pid_) return 1;
}
- return repsep_snprintf(bf, size, "%*Lx", width, self->ip);
+addr:
+ /* al_addr does all the right addr - start + offset calculations */
+ l = cl_address(left->mem_info->daddr.al_addr);
+ r = cl_address(right->mem_info->daddr.al_addr);
+
+ if (l > r) return -1;
+ if (l < r) return 1;
+
+ return 0;
}
-/* --sort symbol */
+static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
-int64_t
-sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
+ uint64_t addr = 0;
+ struct map *map = NULL;
+ struct symbol *sym = NULL;
+ char level = he->level;
+
+ if (he->mem_info) {
+ addr = cl_address(he->mem_info->daddr.al_addr);
+ map = he->mem_info->daddr.map;
+ sym = he->mem_info->daddr.sym;
+
+ /* print [s] for shared data mmaps */
+ if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
+ map && (map->type == MAP__VARIABLE) &&
+ (map->flags & MAP_SHARED) &&
+ (map->maj || map->min || map->ino ||
+ map->ino_generation))
+ level = 's';
+ else if (!map)
+ level = 'X';
+ }
+ return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
+ width);
+}
+
+struct sort_entry sort_mispredict = {
+ .se_header = "Branch Mispredicted",
+ .se_cmp = sort__mispredict_cmp,
+ .se_snprintf = hist_entry__mispredict_snprintf,
+ .se_width_idx = HISTC_MISPREDICT,
+};
+
+static u64 he_weight(struct hist_entry *he)
{
- u64 ip_l, ip_r;
+ return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
+}
- if (left->ms.sym == right->ms.sym)
- return 0;
+static int64_t
+sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return he_weight(left) - he_weight(right);
+}
+
+static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
+}
- ip_l = left->ms.sym ? left->ms.sym->start : left->ip;
- ip_r = right->ms.sym ? right->ms.sym->start : right->ip;
+struct sort_entry sort_local_weight = {
+ .se_header = "Local Weight",
+ .se_cmp = sort__local_weight_cmp,
+ .se_snprintf = hist_entry__local_weight_snprintf,
+ .se_width_idx = HISTC_LOCAL_WEIGHT,
+};
- return (int64_t)(ip_r - ip_l);
+static int64_t
+sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return left->stat.weight - right->stat.weight;
}
-static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf,
- size_t size, unsigned int width __used)
+static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
{
- size_t ret = 0;
+ return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
+}
- if (verbose) {
- char o = self->ms.map ? dso__symtab_origin(self->ms.map->dso) : '!';
- ret += repsep_snprintf(bf, size, "%*Lx %c ",
- BITS_PER_LONG / 4, self->ip, o);
+struct sort_entry sort_global_weight = {
+ .se_header = "Weight",
+ .se_cmp = sort__global_weight_cmp,
+ .se_snprintf = hist_entry__global_weight_snprintf,
+ .se_width_idx = HISTC_GLOBAL_WEIGHT,
+};
+
+struct sort_entry sort_mem_daddr_sym = {
+ .se_header = "Data Symbol",
+ .se_cmp = sort__daddr_cmp,
+ .se_snprintf = hist_entry__daddr_snprintf,
+ .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
+};
+
+struct sort_entry sort_mem_daddr_dso = {
+ .se_header = "Data Object",
+ .se_cmp = sort__dso_daddr_cmp,
+ .se_snprintf = hist_entry__dso_daddr_snprintf,
+ .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
+};
+
+struct sort_entry sort_mem_locked = {
+ .se_header = "Locked",
+ .se_cmp = sort__locked_cmp,
+ .se_snprintf = hist_entry__locked_snprintf,
+ .se_width_idx = HISTC_MEM_LOCKED,
+};
+
+struct sort_entry sort_mem_tlb = {
+ .se_header = "TLB access",
+ .se_cmp = sort__tlb_cmp,
+ .se_snprintf = hist_entry__tlb_snprintf,
+ .se_width_idx = HISTC_MEM_TLB,
+};
+
+struct sort_entry sort_mem_lvl = {
+ .se_header = "Memory access",
+ .se_cmp = sort__lvl_cmp,
+ .se_snprintf = hist_entry__lvl_snprintf,
+ .se_width_idx = HISTC_MEM_LVL,
+};
+
+struct sort_entry sort_mem_snoop = {
+ .se_header = "Snoop",
+ .se_cmp = sort__snoop_cmp,
+ .se_snprintf = hist_entry__snoop_snprintf,
+ .se_width_idx = HISTC_MEM_SNOOP,
+};
+
+struct sort_entry sort_mem_dcacheline = {
+ .se_header = "Data Cacheline",
+ .se_cmp = sort__dcacheline_cmp,
+ .se_snprintf = hist_entry__dcacheline_snprintf,
+ .se_width_idx = HISTC_MEM_DCACHELINE,
+};
+
+static int64_t
+sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return left->branch_info->flags.abort !=
+ right->branch_info->flags.abort;
+}
+
+static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ static const char *out = ".";
+
+ if (he->branch_info->flags.abort)
+ out = "A";
+ return repsep_snprintf(bf, size, "%-*s", width, out);
+}
+
+struct sort_entry sort_abort = {
+ .se_header = "Transaction abort",
+ .se_cmp = sort__abort_cmp,
+ .se_snprintf = hist_entry__abort_snprintf,
+ .se_width_idx = HISTC_ABORT,
+};
+
+static int64_t
+sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return left->branch_info->flags.in_tx !=
+ right->branch_info->flags.in_tx;
+}
+
+static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ static const char *out = ".";
+
+ if (he->branch_info->flags.in_tx)
+ out = "T";
+
+ return repsep_snprintf(bf, size, "%-*s", width, out);
+}
+
+struct sort_entry sort_in_tx = {
+ .se_header = "Branch in transaction",
+ .se_cmp = sort__in_tx_cmp,
+ .se_snprintf = hist_entry__in_tx_snprintf,
+ .se_width_idx = HISTC_IN_TX,
+};
+
+static int64_t
+sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return left->transaction - right->transaction;
+}
+
+static inline char *add_str(char *p, const char *str)
+{
+ strcpy(p, str);
+ return p + strlen(str);
+}
+
+static struct txbit {
+ unsigned flag;
+ const char *name;
+ int skip_for_len;
+} txbits[] = {
+ { PERF_TXN_ELISION, "EL ", 0 },
+ { PERF_TXN_TRANSACTION, "TX ", 1 },
+ { PERF_TXN_SYNC, "SYNC ", 1 },
+ { PERF_TXN_ASYNC, "ASYNC ", 0 },
+ { PERF_TXN_RETRY, "RETRY ", 0 },
+ { PERF_TXN_CONFLICT, "CON ", 0 },
+ { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
+ { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
+ { 0, NULL, 0 }
+};
+
+int hist_entry__transaction_len(void)
+{
+ int i;
+ int len = 0;
+
+ for (i = 0; txbits[i].name; i++) {
+ if (!txbits[i].skip_for_len)
+ len += strlen(txbits[i].name);
}
+ len += 4; /* :XX<space> */
+ return len;
+}
- ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", self->level);
- if (self->ms.sym)
- ret += repsep_snprintf(bf + ret, size - ret, "%s",
- self->ms.sym->name);
- else
- ret += repsep_snprintf(bf + ret, size - ret, "%*Lx",
- BITS_PER_LONG / 4, self->ip);
+static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ u64 t = he->transaction;
+ char buf[128];
+ char *p = buf;
+ int i;
+
+ buf[0] = 0;
+ for (i = 0; txbits[i].name; i++)
+ if (txbits[i].flag & t)
+ p = add_str(p, txbits[i].name);
+ if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
+ p = add_str(p, "NEITHER ");
+ if (t & PERF_TXN_ABORT_MASK) {
+ sprintf(p, ":%" PRIx64,
+ (t & PERF_TXN_ABORT_MASK) >>
+ PERF_TXN_ABORT_SHIFT);
+ p += strlen(p);
+ }
- return ret;
+ return repsep_snprintf(bf, size, "%-*s", width, buf);
}
-/* --sort comm */
+struct sort_entry sort_transaction = {
+ .se_header = "Transaction ",
+ .se_cmp = sort__transaction_cmp,
+ .se_snprintf = hist_entry__transaction_snprintf,
+ .se_width_idx = HISTC_TRANSACTION,
+};
-int64_t
-sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
+struct sort_dimension {
+ const char *name;
+ struct sort_entry *entry;
+ int taken;
+};
+
+#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
+
+static struct sort_dimension common_sort_dimensions[] = {
+ DIM(SORT_PID, "pid", sort_thread),
+ DIM(SORT_COMM, "comm", sort_comm),
+ DIM(SORT_DSO, "dso", sort_dso),
+ DIM(SORT_SYM, "symbol", sort_sym),
+ DIM(SORT_PARENT, "parent", sort_parent),
+ DIM(SORT_CPU, "cpu", sort_cpu),
+ DIM(SORT_SRCLINE, "srcline", sort_srcline),
+ DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
+ DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
+ DIM(SORT_TRANSACTION, "transaction", sort_transaction),
+};
+
+#undef DIM
+
+#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
+
+static struct sort_dimension bstack_sort_dimensions[] = {
+ DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
+ DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
+ DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
+ DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
+ DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
+ DIM(SORT_IN_TX, "in_tx", sort_in_tx),
+ DIM(SORT_ABORT, "abort", sort_abort),
+};
+
+#undef DIM
+
+#define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
+
+static struct sort_dimension memory_sort_dimensions[] = {
+ DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
+ DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
+ DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
+ DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
+ DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
+ DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
+ DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
+};
+
+#undef DIM
+
+struct hpp_dimension {
+ const char *name;
+ struct perf_hpp_fmt *fmt;
+ int taken;
+};
+
+#define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
+
+static struct hpp_dimension hpp_sort_dimensions[] = {
+ DIM(PERF_HPP__OVERHEAD, "overhead"),
+ DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
+ DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
+ DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
+ DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
+ DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
+ DIM(PERF_HPP__SAMPLES, "sample"),
+ DIM(PERF_HPP__PERIOD, "period"),
+};
+
+#undef DIM
+
+struct hpp_sort_entry {
+ struct perf_hpp_fmt hpp;
+ struct sort_entry *se;
+};
+
+bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
{
- return right->thread->pid - left->thread->pid;
+ struct hpp_sort_entry *hse_a;
+ struct hpp_sort_entry *hse_b;
+
+ if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
+ return false;
+
+ hse_a = container_of(a, struct hpp_sort_entry, hpp);
+ hse_b = container_of(b, struct hpp_sort_entry, hpp);
+
+ return hse_a->se == hse_b->se;
}
-int64_t
-sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
+void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
{
- char *comm_l = left->thread->comm;
- char *comm_r = right->thread->comm;
+ struct hpp_sort_entry *hse;
- if (!comm_l || !comm_r)
- return cmp_null(comm_l, comm_r);
+ if (!perf_hpp__is_sort_entry(fmt))
+ return;
- return strcmp(comm_l, comm_r);
+ hse = container_of(fmt, struct hpp_sort_entry, hpp);
+ hists__new_col_len(hists, hse->se->se_width_idx,
+ strlen(hse->se->se_header));
}
-/* --sort parent */
+static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct perf_evsel *evsel)
+{
+ struct hpp_sort_entry *hse;
+ size_t len;
-int64_t
-sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
+ hse = container_of(fmt, struct hpp_sort_entry, hpp);
+ len = hists__col_len(&evsel->hists, hse->se->se_width_idx);
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", len, hse->se->se_header);
+}
+
+static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp __maybe_unused,
+ struct perf_evsel *evsel)
{
- struct symbol *sym_l = left->parent;
- struct symbol *sym_r = right->parent;
+ struct hpp_sort_entry *hse;
- if (!sym_l || !sym_r)
- return cmp_null(sym_l, sym_r);
+ hse = container_of(fmt, struct hpp_sort_entry, hpp);
- return strcmp(sym_l->name, sym_r->name);
+ return hists__col_len(&evsel->hists, hse->se->se_width_idx);
}
-static int hist_entry__parent_snprintf(struct hist_entry *self, char *bf,
- size_t size, unsigned int width)
+static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
{
- return repsep_snprintf(bf, size, "%-*s", width,
- self->parent ? self->parent->name : "[other]");
+ struct hpp_sort_entry *hse;
+ size_t len;
+
+ hse = container_of(fmt, struct hpp_sort_entry, hpp);
+ len = hists__col_len(he->hists, hse->se->se_width_idx);
+
+ return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
}
-/* --sort cpu */
+static struct hpp_sort_entry *
+__sort_dimension__alloc_hpp(struct sort_dimension *sd)
+{
+ struct hpp_sort_entry *hse;
-int64_t
-sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
+ hse = malloc(sizeof(*hse));
+ if (hse == NULL) {
+ pr_err("Memory allocation failed\n");
+ return NULL;
+ }
+
+ hse->se = sd->entry;
+ hse->hpp.header = __sort__hpp_header;
+ hse->hpp.width = __sort__hpp_width;
+ hse->hpp.entry = __sort__hpp_entry;
+ hse->hpp.color = NULL;
+
+ hse->hpp.cmp = sd->entry->se_cmp;
+ hse->hpp.collapse = sd->entry->se_collapse ? : sd->entry->se_cmp;
+ hse->hpp.sort = sd->entry->se_sort ? : hse->hpp.collapse;
+
+ INIT_LIST_HEAD(&hse->hpp.list);
+ INIT_LIST_HEAD(&hse->hpp.sort_list);
+ hse->hpp.elide = false;
+
+ return hse;
+}
+
+bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
{
- return right->cpu - left->cpu;
+ return format->header == __sort__hpp_header;
}
-static int hist_entry__cpu_snprintf(struct hist_entry *self, char *bf,
- size_t size, unsigned int width)
+static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd)
{
- return repsep_snprintf(bf, size, "%-*d", width, self->cpu);
+ struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
+
+ if (hse == NULL)
+ return -1;
+
+ perf_hpp__register_sort_field(&hse->hpp);
+ return 0;
+}
+
+static int __sort_dimension__add_hpp_output(struct sort_dimension *sd)
+{
+ struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
+
+ if (hse == NULL)
+ return -1;
+
+ perf_hpp__column_register(&hse->hpp);
+ return 0;
+}
+
+static int __sort_dimension__add(struct sort_dimension *sd)
+{
+ if (sd->taken)
+ return 0;
+
+ if (__sort_dimension__add_hpp_sort(sd) < 0)
+ return -1;
+
+ if (sd->entry->se_collapse)
+ sort__need_collapse = 1;
+
+ sd->taken = 1;
+
+ return 0;
+}
+
+static int __hpp_dimension__add(struct hpp_dimension *hd)
+{
+ if (!hd->taken) {
+ hd->taken = 1;
+
+ perf_hpp__register_sort_field(hd->fmt);
+ }
+ return 0;
+}
+
+static int __sort_dimension__add_output(struct sort_dimension *sd)
+{
+ if (sd->taken)
+ return 0;
+
+ if (__sort_dimension__add_hpp_output(sd) < 0)
+ return -1;
+
+ sd->taken = 1;
+ return 0;
+}
+
+static int __hpp_dimension__add_output(struct hpp_dimension *hd)
+{
+ if (!hd->taken) {
+ hd->taken = 1;
+
+ perf_hpp__column_register(hd->fmt);
+ }
+ return 0;
}
int sort_dimension__add(const char *tok)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
- struct sort_dimension *sd = &sort_dimensions[i];
-
- if (sd->taken)
- continue;
+ for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
+ struct sort_dimension *sd = &common_sort_dimensions[i];
if (strncasecmp(tok, sd->name, strlen(tok)))
continue;
- if (sd->entry->se_collapse)
- sort__need_collapse = 1;
-
if (sd->entry == &sort_parent) {
int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
if (ret) {
@@ -292,54 +1364,343 @@ int sort_dimension__add(const char *tok)
return -EINVAL;
}
sort__has_parent = 1;
+ } else if (sd->entry == &sort_sym) {
+ sort__has_sym = 1;
+ } else if (sd->entry == &sort_dso) {
+ sort__has_dso = 1;
}
- if (list_empty(&hist_entry__sort_list)) {
- if (!strcmp(sd->name, "pid"))
- sort__first_dimension = SORT_PID;
- else if (!strcmp(sd->name, "comm"))
- sort__first_dimension = SORT_COMM;
- else if (!strcmp(sd->name, "dso"))
- sort__first_dimension = SORT_DSO;
- else if (!strcmp(sd->name, "symbol"))
- sort__first_dimension = SORT_SYM;
- else if (!strcmp(sd->name, "parent"))
- sort__first_dimension = SORT_PARENT;
- else if (!strcmp(sd->name, "cpu"))
- sort__first_dimension = SORT_CPU;
- }
+ return __sort_dimension__add(sd);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
+ struct hpp_dimension *hd = &hpp_sort_dimensions[i];
+
+ if (strncasecmp(tok, hd->name, strlen(tok)))
+ continue;
- list_add_tail(&sd->entry->list, &hist_entry__sort_list);
- sd->taken = 1;
+ return __hpp_dimension__add(hd);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
+ struct sort_dimension *sd = &bstack_sort_dimensions[i];
+
+ if (strncasecmp(tok, sd->name, strlen(tok)))
+ continue;
+ if (sort__mode != SORT_MODE__BRANCH)
+ return -EINVAL;
+
+ if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
+ sort__has_sym = 1;
+
+ __sort_dimension__add(sd);
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
+ struct sort_dimension *sd = &memory_sort_dimensions[i];
+
+ if (strncasecmp(tok, sd->name, strlen(tok)))
+ continue;
+
+ if (sort__mode != SORT_MODE__MEMORY)
+ return -EINVAL;
+
+ if (sd->entry == &sort_mem_daddr_sym)
+ sort__has_sym = 1;
+
+ __sort_dimension__add(sd);
return 0;
}
return -ESRCH;
}
-void setup_sorting(const char * const usagestr[], const struct option *opts)
+static const char *get_default_sort_order(void)
+{
+ const char *default_sort_orders[] = {
+ default_sort_order,
+ default_branch_sort_order,
+ default_mem_sort_order,
+ default_top_sort_order,
+ default_diff_sort_order,
+ };
+
+ BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
+
+ return default_sort_orders[sort__mode];
+}
+
+static int __setup_sorting(void)
{
- char *tmp, *tok, *str = strdup(sort_order);
+ char *tmp, *tok, *str;
+ const char *sort_keys = sort_order;
+ int ret = 0;
+
+ if (sort_keys == NULL) {
+ if (field_order) {
+ /*
+ * If user specified field order but no sort order,
+ * we'll honor it and not add default sort orders.
+ */
+ return 0;
+ }
+
+ sort_keys = get_default_sort_order();
+ }
+
+ str = strdup(sort_keys);
+ if (str == NULL) {
+ error("Not enough memory to setup sort keys");
+ return -ENOMEM;
+ }
for (tok = strtok_r(str, ", ", &tmp);
tok; tok = strtok_r(NULL, ", ", &tmp)) {
- if (sort_dimension__add(tok) < 0) {
+ ret = sort_dimension__add(tok);
+ if (ret == -EINVAL) {
+ error("Invalid --sort key: `%s'", tok);
+ break;
+ } else if (ret == -ESRCH) {
error("Unknown --sort key: `%s'", tok);
- usage_with_options(usagestr, opts);
+ break;
}
}
free(str);
+ return ret;
+}
+
+void perf_hpp__set_elide(int idx, bool elide)
+{
+ struct perf_hpp_fmt *fmt;
+ struct hpp_sort_entry *hse;
+
+ perf_hpp__for_each_format(fmt) {
+ if (!perf_hpp__is_sort_entry(fmt))
+ continue;
+
+ hse = container_of(fmt, struct hpp_sort_entry, hpp);
+ if (hse->se->se_width_idx == idx) {
+ fmt->elide = elide;
+ break;
+ }
+ }
}
-void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list,
- const char *list_name, FILE *fp)
+static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
{
if (list && strlist__nr_entries(list) == 1) {
if (fp != NULL)
fprintf(fp, "# %s: %s\n", list_name,
strlist__entry(list, 0)->s);
- self->elide = true;
+ return true;
+ }
+ return false;
+}
+
+static bool get_elide(int idx, FILE *output)
+{
+ switch (idx) {
+ case HISTC_SYMBOL:
+ return __get_elide(symbol_conf.sym_list, "symbol", output);
+ case HISTC_DSO:
+ return __get_elide(symbol_conf.dso_list, "dso", output);
+ case HISTC_COMM:
+ return __get_elide(symbol_conf.comm_list, "comm", output);
+ default:
+ break;
+ }
+
+ if (sort__mode != SORT_MODE__BRANCH)
+ return false;
+
+ switch (idx) {
+ case HISTC_SYMBOL_FROM:
+ return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
+ case HISTC_SYMBOL_TO:
+ return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
+ case HISTC_DSO_FROM:
+ return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
+ case HISTC_DSO_TO:
+ return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
+ default:
+ break;
+ }
+
+ return false;
+}
+
+void sort__setup_elide(FILE *output)
+{
+ struct perf_hpp_fmt *fmt;
+ struct hpp_sort_entry *hse;
+
+ perf_hpp__for_each_format(fmt) {
+ if (!perf_hpp__is_sort_entry(fmt))
+ continue;
+
+ hse = container_of(fmt, struct hpp_sort_entry, hpp);
+ fmt->elide = get_elide(hse->se->se_width_idx, output);
}
+
+ /*
+ * It makes no sense to elide all of sort entries.
+ * Just revert them to show up again.
+ */
+ perf_hpp__for_each_format(fmt) {
+ if (!perf_hpp__is_sort_entry(fmt))
+ continue;
+
+ if (!fmt->elide)
+ return;
+ }
+
+ perf_hpp__for_each_format(fmt) {
+ if (!perf_hpp__is_sort_entry(fmt))
+ continue;
+
+ fmt->elide = false;
+ }
+}
+
+static int output_field_add(char *tok)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
+ struct sort_dimension *sd = &common_sort_dimensions[i];
+
+ if (strncasecmp(tok, sd->name, strlen(tok)))
+ continue;
+
+ return __sort_dimension__add_output(sd);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
+ struct hpp_dimension *hd = &hpp_sort_dimensions[i];
+
+ if (strncasecmp(tok, hd->name, strlen(tok)))
+ continue;
+
+ return __hpp_dimension__add_output(hd);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
+ struct sort_dimension *sd = &bstack_sort_dimensions[i];
+
+ if (strncasecmp(tok, sd->name, strlen(tok)))
+ continue;
+
+ return __sort_dimension__add_output(sd);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
+ struct sort_dimension *sd = &memory_sort_dimensions[i];
+
+ if (strncasecmp(tok, sd->name, strlen(tok)))
+ continue;
+
+ return __sort_dimension__add_output(sd);
+ }
+
+ return -ESRCH;
+}
+
+static void reset_dimensions(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
+ common_sort_dimensions[i].taken = 0;
+
+ for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
+ hpp_sort_dimensions[i].taken = 0;
+
+ for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
+ bstack_sort_dimensions[i].taken = 0;
+
+ for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
+ memory_sort_dimensions[i].taken = 0;
+}
+
+static int __setup_output_field(void)
+{
+ char *tmp, *tok, *str;
+ int ret = 0;
+
+ if (field_order == NULL)
+ return 0;
+
+ reset_dimensions();
+
+ str = strdup(field_order);
+ if (str == NULL) {
+ error("Not enough memory to setup output fields");
+ return -ENOMEM;
+ }
+
+ for (tok = strtok_r(str, ", ", &tmp);
+ tok; tok = strtok_r(NULL, ", ", &tmp)) {
+ ret = output_field_add(tok);
+ if (ret == -EINVAL) {
+ error("Invalid --fields key: `%s'", tok);
+ break;
+ } else if (ret == -ESRCH) {
+ error("Unknown --fields key: `%s'", tok);
+ break;
+ }
+ }
+
+ free(str);
+ return ret;
+}
+
+int setup_sorting(void)
+{
+ int err;
+
+ err = __setup_sorting();
+ if (err < 0)
+ return err;
+
+ if (parent_pattern != default_parent_pattern) {
+ err = sort_dimension__add("parent");
+ if (err < 0)
+ return err;
+ }
+
+ reset_dimensions();
+
+ /*
+ * perf diff doesn't use default hpp output fields.
+ */
+ if (sort__mode != SORT_MODE__DIFF)
+ perf_hpp__init();
+
+ err = __setup_output_field();
+ if (err < 0)
+ return err;
+
+ /* copy sort keys to output fields */
+ perf_hpp__setup_output_field();
+ /* and then copy output fields to sort keys */
+ perf_hpp__append_sort_keys();
+
+ return 0;
+}
+
+void reset_output_field(void)
+{
+ sort__need_collapse = 0;
+ sort__has_parent = 0;
+ sort__has_sym = 0;
+ sort__has_dso = 0;
+
+ field_order = NULL;
+ sort_order = NULL;
+
+ reset_dimensions();
+ perf_hpp__reset_output_field();
}
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 0b91053a7d1..041f0c9cea2 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -20,24 +20,54 @@
#include "parse-options.h"
#include "parse-events.h"
-
+#include "hist.h"
#include "thread.h"
-#include "sort.h"
extern regex_t parent_regex;
extern const char *sort_order;
+extern const char *field_order;
extern const char default_parent_pattern[];
extern const char *parent_pattern;
extern const char default_sort_order[];
+extern regex_t ignore_callees_regex;
+extern int have_ignore_callees;
extern int sort__need_collapse;
extern int sort__has_parent;
-extern char *field_sep;
+extern int sort__has_sym;
+extern enum sort_mode sort__mode;
extern struct sort_entry sort_comm;
extern struct sort_entry sort_dso;
extern struct sort_entry sort_sym;
extern struct sort_entry sort_parent;
+extern struct sort_entry sort_dso_from;
+extern struct sort_entry sort_dso_to;
+extern struct sort_entry sort_sym_from;
+extern struct sort_entry sort_sym_to;
extern enum sort_type sort__first_dimension;
+struct he_stat {
+ u64 period;
+ u64 period_sys;
+ u64 period_us;
+ u64 period_guest_sys;
+ u64 period_guest_us;
+ u64 weight;
+ u32 nr_events;
+};
+
+struct hist_entry_diff {
+ bool computed;
+
+ /* PERF_HPP__DELTA */
+ double period_ratio_delta;
+
+ /* PERF_HPP__RATIO */
+ double period_ratio;
+
+ /* HISTC_WEIGHTED_DIFF */
+ s64 wdiff;
+};
+
/**
* struct hist_entry - histogram entry
*
@@ -45,17 +75,26 @@ extern enum sort_type sort__first_dimension;
* @nr_rows - rows expanded in callchain, recalculated on folding/unfolding
*/
struct hist_entry {
+ struct rb_node rb_node_in;
struct rb_node rb_node;
- u64 period;
- u64 period_sys;
- u64 period_us;
- u64 period_guest_sys;
- u64 period_guest_us;
+ union {
+ struct list_head node;
+ struct list_head head;
+ } pairs;
+ struct he_stat stat;
+ struct he_stat *stat_acc;
struct map_symbol ms;
struct thread *thread;
+ struct comm *comm;
u64 ip;
+ u64 transaction;
s32 cpu;
- u32 nr_events;
+ u8 cpumode;
+
+ struct hist_entry_diff diff;
+
+ /* We are added by hists__add_dummy_entry. */
+ bool dummy;
/* XXX These two should move to some tree widget lib */
u16 row_offset;
@@ -63,23 +102,91 @@ struct hist_entry {
bool init_have_children;
char level;
+ bool used;
u8 filtered;
+ char *srcline;
struct symbol *parent;
- union {
- unsigned long position;
- struct hist_entry *pair;
- struct rb_root sorted_chain;
- };
- struct callchain_root callchain[0];
+ unsigned long position;
+ struct rb_root sorted_chain;
+ struct branch_info *branch_info;
+ struct hists *hists;
+ struct mem_info *mem_info;
+ struct callchain_root callchain[0]; /* must be last member */
+};
+
+static inline bool hist_entry__has_pairs(struct hist_entry *he)
+{
+ return !list_empty(&he->pairs.node);
+}
+
+static inline struct hist_entry *hist_entry__next_pair(struct hist_entry *he)
+{
+ if (hist_entry__has_pairs(he))
+ return list_entry(he->pairs.node.next, struct hist_entry, pairs.node);
+ return NULL;
+}
+
+static inline void hist_entry__add_pair(struct hist_entry *pair,
+ struct hist_entry *he)
+{
+ list_add_tail(&pair->pairs.node, &he->pairs.head);
+}
+
+static inline float hist_entry__get_percent_limit(struct hist_entry *he)
+{
+ u64 period = he->stat.period;
+ u64 total_period = hists__total_period(he->hists);
+
+ if (unlikely(total_period == 0))
+ return 0;
+
+ if (symbol_conf.cumulate_callchain)
+ period = he->stat_acc->period;
+
+ return period * 100.0 / total_period;
+}
+
+
+enum sort_mode {
+ SORT_MODE__NORMAL,
+ SORT_MODE__BRANCH,
+ SORT_MODE__MEMORY,
+ SORT_MODE__TOP,
+ SORT_MODE__DIFF,
};
enum sort_type {
+ /* common sort keys */
SORT_PID,
SORT_COMM,
SORT_DSO,
SORT_SYM,
SORT_PARENT,
SORT_CPU,
+ SORT_SRCLINE,
+ SORT_LOCAL_WEIGHT,
+ SORT_GLOBAL_WEIGHT,
+ SORT_TRANSACTION,
+
+ /* branch stack specific sort keys */
+ __SORT_BRANCH_STACK,
+ SORT_DSO_FROM = __SORT_BRANCH_STACK,
+ SORT_DSO_TO,
+ SORT_SYM_FROM,
+ SORT_SYM_TO,
+ SORT_MISPREDICT,
+ SORT_ABORT,
+ SORT_IN_TX,
+
+ /* memory mode specific sort keys */
+ __SORT_MEMORY_MODE,
+ SORT_MEM_DADDR_SYMBOL = __SORT_MEMORY_MODE,
+ SORT_MEM_DADDR_DSO,
+ SORT_MEM_LOCKED,
+ SORT_MEM_TLB,
+ SORT_MEM_LVL,
+ SORT_MEM_SNOOP,
+ SORT_MEM_DCACHELINE,
};
/*
@@ -93,32 +200,22 @@ struct sort_entry {
int64_t (*se_cmp)(struct hist_entry *, struct hist_entry *);
int64_t (*se_collapse)(struct hist_entry *, struct hist_entry *);
- int (*se_snprintf)(struct hist_entry *self, char *bf, size_t size,
+ int64_t (*se_sort)(struct hist_entry *, struct hist_entry *);
+ int (*se_snprintf)(struct hist_entry *he, char *bf, size_t size,
unsigned int width);
u8 se_width_idx;
- bool elide;
};
extern struct sort_entry sort_thread;
extern struct list_head hist_entry__sort_list;
-void setup_sorting(const char * const usagestr[], const struct option *opts);
-
-extern size_t sort__thread_print(FILE *, struct hist_entry *, unsigned int);
-extern size_t sort__comm_print(FILE *, struct hist_entry *, unsigned int);
-extern size_t sort__dso_print(FILE *, struct hist_entry *, unsigned int);
-extern size_t sort__sym_print(FILE *, struct hist_entry *, unsigned int __used);
-extern int64_t cmp_null(void *, void *);
-extern int64_t sort__thread_cmp(struct hist_entry *, struct hist_entry *);
-extern int64_t sort__comm_cmp(struct hist_entry *, struct hist_entry *);
-extern int64_t sort__comm_collapse(struct hist_entry *, struct hist_entry *);
-extern int64_t sort__dso_cmp(struct hist_entry *, struct hist_entry *);
-extern int64_t sort__sym_cmp(struct hist_entry *, struct hist_entry *);
-extern int64_t sort__parent_cmp(struct hist_entry *, struct hist_entry *);
-int64_t sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right);
-extern size_t sort__parent_print(FILE *, struct hist_entry *, unsigned int);
+int setup_sorting(void);
+int setup_output_field(void);
+void reset_output_field(void);
extern int sort_dimension__add(const char *);
-void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list,
- const char *list_name, FILE *fp);
+void sort__setup_elide(FILE *fp);
+void perf_hpp__set_elide(int idx, bool elide);
+
+int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset);
#endif /* __PERF_SORT_H */
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
new file mode 100644
index 00000000000..f3e4bc5fe5d
--- /dev/null
+++ b/tools/perf/util/srcline.c
@@ -0,0 +1,299 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <linux/kernel.h>
+
+#include "util/dso.h"
+#include "util/util.h"
+#include "util/debug.h"
+
+#ifdef HAVE_LIBBFD_SUPPORT
+
+/*
+ * Implement addr2line using libbfd.
+ */
+#define PACKAGE "perf"
+#include <bfd.h>
+
+struct a2l_data {
+ const char *input;
+ unsigned long addr;
+
+ bool found;
+ const char *filename;
+ const char *funcname;
+ unsigned line;
+
+ bfd *abfd;
+ asymbol **syms;
+};
+
+static int bfd_error(const char *string)
+{
+ const char *errmsg;
+
+ errmsg = bfd_errmsg(bfd_get_error());
+ fflush(stdout);
+
+ if (string)
+ pr_debug("%s: %s\n", string, errmsg);
+ else
+ pr_debug("%s\n", errmsg);
+
+ return -1;
+}
+
+static int slurp_symtab(bfd *abfd, struct a2l_data *a2l)
+{
+ long storage;
+ long symcount;
+ asymbol **syms;
+ bfd_boolean dynamic = FALSE;
+
+ if ((bfd_get_file_flags(abfd) & HAS_SYMS) == 0)
+ return bfd_error(bfd_get_filename(abfd));
+
+ storage = bfd_get_symtab_upper_bound(abfd);
+ if (storage == 0L) {
+ storage = bfd_get_dynamic_symtab_upper_bound(abfd);
+ dynamic = TRUE;
+ }
+ if (storage < 0L)
+ return bfd_error(bfd_get_filename(abfd));
+
+ syms = malloc(storage);
+ if (dynamic)
+ symcount = bfd_canonicalize_dynamic_symtab(abfd, syms);
+ else
+ symcount = bfd_canonicalize_symtab(abfd, syms);
+
+ if (symcount < 0) {
+ free(syms);
+ return bfd_error(bfd_get_filename(abfd));
+ }
+
+ a2l->syms = syms;
+ return 0;
+}
+
+static void find_address_in_section(bfd *abfd, asection *section, void *data)
+{
+ bfd_vma pc, vma;
+ bfd_size_type size;
+ struct a2l_data *a2l = data;
+
+ if (a2l->found)
+ return;
+
+ if ((bfd_get_section_flags(abfd, section) & SEC_ALLOC) == 0)
+ return;
+
+ pc = a2l->addr;
+ vma = bfd_get_section_vma(abfd, section);
+ size = bfd_get_section_size(section);
+
+ if (pc < vma || pc >= vma + size)
+ return;
+
+ a2l->found = bfd_find_nearest_line(abfd, section, a2l->syms, pc - vma,
+ &a2l->filename, &a2l->funcname,
+ &a2l->line);
+}
+
+static struct a2l_data *addr2line_init(const char *path)
+{
+ bfd *abfd;
+ struct a2l_data *a2l = NULL;
+
+ abfd = bfd_openr(path, NULL);
+ if (abfd == NULL)
+ return NULL;
+
+ if (!bfd_check_format(abfd, bfd_object))
+ goto out;
+
+ a2l = zalloc(sizeof(*a2l));
+ if (a2l == NULL)
+ goto out;
+
+ a2l->abfd = abfd;
+ a2l->input = strdup(path);
+ if (a2l->input == NULL)
+ goto out;
+
+ if (slurp_symtab(abfd, a2l))
+ goto out;
+
+ return a2l;
+
+out:
+ if (a2l) {
+ zfree((char **)&a2l->input);
+ free(a2l);
+ }
+ bfd_close(abfd);
+ return NULL;
+}
+
+static void addr2line_cleanup(struct a2l_data *a2l)
+{
+ if (a2l->abfd)
+ bfd_close(a2l->abfd);
+ zfree((char **)&a2l->input);
+ zfree(&a2l->syms);
+ free(a2l);
+}
+
+static int addr2line(const char *dso_name, unsigned long addr,
+ char **file, unsigned int *line, struct dso *dso)
+{
+ int ret = 0;
+ struct a2l_data *a2l = dso->a2l;
+
+ if (!a2l) {
+ dso->a2l = addr2line_init(dso_name);
+ a2l = dso->a2l;
+ }
+
+ if (a2l == NULL) {
+ pr_warning("addr2line_init failed for %s\n", dso_name);
+ return 0;
+ }
+
+ a2l->addr = addr;
+ a2l->found = false;
+
+ bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l);
+
+ if (a2l->found && a2l->filename) {
+ *file = strdup(a2l->filename);
+ *line = a2l->line;
+
+ if (*file)
+ ret = 1;
+ }
+
+ return ret;
+}
+
+void dso__free_a2l(struct dso *dso)
+{
+ struct a2l_data *a2l = dso->a2l;
+
+ if (!a2l)
+ return;
+
+ addr2line_cleanup(a2l);
+
+ dso->a2l = NULL;
+}
+
+#else /* HAVE_LIBBFD_SUPPORT */
+
+static int addr2line(const char *dso_name, unsigned long addr,
+ char **file, unsigned int *line_nr,
+ struct dso *dso __maybe_unused)
+{
+ FILE *fp;
+ char cmd[PATH_MAX];
+ char *filename = NULL;
+ size_t len;
+ char *sep;
+ int ret = 0;
+
+ scnprintf(cmd, sizeof(cmd), "addr2line -e %s %016"PRIx64,
+ dso_name, addr);
+
+ fp = popen(cmd, "r");
+ if (fp == NULL) {
+ pr_warning("popen failed for %s\n", dso_name);
+ return 0;
+ }
+
+ if (getline(&filename, &len, fp) < 0 || !len) {
+ pr_warning("addr2line has no output for %s\n", dso_name);
+ goto out;
+ }
+
+ sep = strchr(filename, '\n');
+ if (sep)
+ *sep = '\0';
+
+ if (!strcmp(filename, "??:0")) {
+ pr_debug("no debugging info in %s\n", dso_name);
+ free(filename);
+ goto out;
+ }
+
+ sep = strchr(filename, ':');
+ if (sep) {
+ *sep++ = '\0';
+ *file = filename;
+ *line_nr = strtoul(sep, NULL, 0);
+ ret = 1;
+ }
+out:
+ pclose(fp);
+ return ret;
+}
+
+void dso__free_a2l(struct dso *dso __maybe_unused)
+{
+}
+
+#endif /* HAVE_LIBBFD_SUPPORT */
+
+/*
+ * Number of addr2line failures (without success) before disabling it for that
+ * dso.
+ */
+#define A2L_FAIL_LIMIT 123
+
+char *get_srcline(struct dso *dso, unsigned long addr)
+{
+ char *file = NULL;
+ unsigned line = 0;
+ char *srcline;
+ const char *dso_name;
+
+ if (!dso->has_srcline)
+ return SRCLINE_UNKNOWN;
+
+ if (dso->symsrc_filename)
+ dso_name = dso->symsrc_filename;
+ else
+ dso_name = dso->long_name;
+
+ if (dso_name[0] == '[')
+ goto out;
+
+ if (!strncmp(dso_name, "/tmp/perf-", 10))
+ goto out;
+
+ if (!addr2line(dso_name, addr, &file, &line, dso))
+ goto out;
+
+ if (asprintf(&srcline, "%s:%u", file, line) < 0) {
+ free(file);
+ goto out;
+ }
+
+ dso->a2l_fails = 0;
+
+ free(file);
+ return srcline;
+
+out:
+ if (dso->a2l_fails && ++dso->a2l_fails > A2L_FAIL_LIMIT) {
+ dso->has_srcline = 0;
+ dso__free_a2l(dso);
+ }
+ return SRCLINE_UNKNOWN;
+}
+
+void free_srcline(char *srcline)
+{
+ if (srcline && strcmp(srcline, SRCLINE_UNKNOWN) != 0)
+ free(srcline);
+}
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
new file mode 100644
index 00000000000..6506b3dfb60
--- /dev/null
+++ b/tools/perf/util/stat.c
@@ -0,0 +1,63 @@
+#include <math.h>
+
+#include "stat.h"
+
+void update_stats(struct stats *stats, u64 val)
+{
+ double delta;
+
+ stats->n++;
+ delta = val - stats->mean;
+ stats->mean += delta / stats->n;
+ stats->M2 += delta*(val - stats->mean);
+
+ if (val > stats->max)
+ stats->max = val;
+
+ if (val < stats->min)
+ stats->min = val;
+}
+
+double avg_stats(struct stats *stats)
+{
+ return stats->mean;
+}
+
+/*
+ * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
+ *
+ * (\Sum n_i^2) - ((\Sum n_i)^2)/n
+ * s^2 = -------------------------------
+ * n - 1
+ *
+ * http://en.wikipedia.org/wiki/Stddev
+ *
+ * The std dev of the mean is related to the std dev by:
+ *
+ * s
+ * s_mean = -------
+ * sqrt(n)
+ *
+ */
+double stddev_stats(struct stats *stats)
+{
+ double variance, variance_mean;
+
+ if (stats->n < 2)
+ return 0.0;
+
+ variance = stats->M2 / (stats->n - 1);
+ variance_mean = variance / stats->n;
+
+ return sqrt(variance_mean);
+}
+
+double rel_stddev_stats(double stddev, double avg)
+{
+ double pct = 0.0;
+
+ if (avg)
+ pct = 100.0 * stddev/avg;
+
+ return pct;
+}
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
new file mode 100644
index 00000000000..5667fc3e39c
--- /dev/null
+++ b/tools/perf/util/stat.h
@@ -0,0 +1,25 @@
+#ifndef __PERF_STATS_H
+#define __PERF_STATS_H
+
+#include <linux/types.h>
+
+struct stats
+{
+ double n, mean, M2;
+ u64 max, min;
+};
+
+void update_stats(struct stats *stats, u64 val);
+double avg_stats(struct stats *stats);
+double stddev_stats(struct stats *stats);
+double rel_stddev_stats(double stddev, double avg);
+
+static inline void init_stats(struct stats *stats)
+{
+ stats->n = 0.0;
+ stats->mean = 0.0;
+ stats->M2 = 0.0;
+ stats->min = (u64) -1;
+ stats->max = 0;
+}
+#endif
diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c
index 92e068517c1..4abe23550c7 100644
--- a/tools/perf/util/strbuf.c
+++ b/tools/perf/util/strbuf.c
@@ -1,4 +1,5 @@
#include "cache.h"
+#include <linux/kernel.h>
int prefixcmp(const char *str, const char *prefix)
{
@@ -27,7 +28,7 @@ void strbuf_init(struct strbuf *sb, ssize_t hint)
void strbuf_release(struct strbuf *sb)
{
if (sb->alloc) {
- free(sb->buf);
+ zfree(&sb->buf);
strbuf_init(sb, 0);
}
}
@@ -99,7 +100,7 @@ void strbuf_addf(struct strbuf *sb, const char *fmt, ...)
len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
va_end(ap);
if (len > strbuf_avail(sb)) {
- die("this should not happen, your snprintf is broken");
+ die("this should not happen, your vsnprintf is broken");
}
}
strbuf_setlen(sb, sb->len + len);
diff --git a/tools/perf/util/strfilter.c b/tools/perf/util/strfilter.c
new file mode 100644
index 00000000000..79a757a2a15
--- /dev/null
+++ b/tools/perf/util/strfilter.c
@@ -0,0 +1,199 @@
+#include "util.h"
+#include "string.h"
+#include "strfilter.h"
+
+/* Operators */
+static const char *OP_and = "&"; /* Logical AND */
+static const char *OP_or = "|"; /* Logical OR */
+static const char *OP_not = "!"; /* Logical NOT */
+
+#define is_operator(c) ((c) == '|' || (c) == '&' || (c) == '!')
+#define is_separator(c) (is_operator(c) || (c) == '(' || (c) == ')')
+
+static void strfilter_node__delete(struct strfilter_node *node)
+{
+ if (node) {
+ if (node->p && !is_operator(*node->p))
+ zfree((char **)&node->p);
+ strfilter_node__delete(node->l);
+ strfilter_node__delete(node->r);
+ free(node);
+ }
+}
+
+void strfilter__delete(struct strfilter *filter)
+{
+ if (filter) {
+ strfilter_node__delete(filter->root);
+ free(filter);
+ }
+}
+
+static const char *get_token(const char *s, const char **e)
+{
+ const char *p;
+
+ while (isspace(*s)) /* Skip spaces */
+ s++;
+
+ if (*s == '\0') {
+ p = s;
+ goto end;
+ }
+
+ p = s + 1;
+ if (!is_separator(*s)) {
+ /* End search */
+retry:
+ while (*p && !is_separator(*p) && !isspace(*p))
+ p++;
+ /* Escape and special case: '!' is also used in glob pattern */
+ if (*(p - 1) == '\\' || (*p == '!' && *(p - 1) == '[')) {
+ p++;
+ goto retry;
+ }
+ }
+end:
+ *e = p;
+ return s;
+}
+
+static struct strfilter_node *strfilter_node__alloc(const char *op,
+ struct strfilter_node *l,
+ struct strfilter_node *r)
+{
+ struct strfilter_node *node = zalloc(sizeof(*node));
+
+ if (node) {
+ node->p = op;
+ node->l = l;
+ node->r = r;
+ }
+
+ return node;
+}
+
+static struct strfilter_node *strfilter_node__new(const char *s,
+ const char **ep)
+{
+ struct strfilter_node root, *cur, *last_op;
+ const char *e;
+
+ if (!s)
+ return NULL;
+
+ memset(&root, 0, sizeof(root));
+ last_op = cur = &root;
+
+ s = get_token(s, &e);
+ while (*s != '\0' && *s != ')') {
+ switch (*s) {
+ case '&': /* Exchg last OP->r with AND */
+ if (!cur->r || !last_op->r)
+ goto error;
+ cur = strfilter_node__alloc(OP_and, last_op->r, NULL);
+ if (!cur)
+ goto nomem;
+ last_op->r = cur;
+ last_op = cur;
+ break;
+ case '|': /* Exchg the root with OR */
+ if (!cur->r || !root.r)
+ goto error;
+ cur = strfilter_node__alloc(OP_or, root.r, NULL);
+ if (!cur)
+ goto nomem;
+ root.r = cur;
+ last_op = cur;
+ break;
+ case '!': /* Add NOT as a leaf node */
+ if (cur->r)
+ goto error;
+ cur->r = strfilter_node__alloc(OP_not, NULL, NULL);
+ if (!cur->r)
+ goto nomem;
+ cur = cur->r;
+ break;
+ case '(': /* Recursively parses inside the parenthesis */
+ if (cur->r)
+ goto error;
+ cur->r = strfilter_node__new(s + 1, &s);
+ if (!s)
+ goto nomem;
+ if (!cur->r || *s != ')')
+ goto error;
+ e = s + 1;
+ break;
+ default:
+ if (cur->r)
+ goto error;
+ cur->r = strfilter_node__alloc(NULL, NULL, NULL);
+ if (!cur->r)
+ goto nomem;
+ cur->r->p = strndup(s, e - s);
+ if (!cur->r->p)
+ goto nomem;
+ }
+ s = get_token(e, &e);
+ }
+ if (!cur->r)
+ goto error;
+ *ep = s;
+ return root.r;
+nomem:
+ s = NULL;
+error:
+ *ep = s;
+ strfilter_node__delete(root.r);
+ return NULL;
+}
+
+/*
+ * Parse filter rule and return new strfilter.
+ * Return NULL if fail, and *ep == NULL if memory allocation failed.
+ */
+struct strfilter *strfilter__new(const char *rules, const char **err)
+{
+ struct strfilter *filter = zalloc(sizeof(*filter));
+ const char *ep = NULL;
+
+ if (filter)
+ filter->root = strfilter_node__new(rules, &ep);
+
+ if (!filter || !filter->root || *ep != '\0') {
+ if (err)
+ *err = ep;
+ strfilter__delete(filter);
+ filter = NULL;
+ }
+
+ return filter;
+}
+
+static bool strfilter_node__compare(struct strfilter_node *node,
+ const char *str)
+{
+ if (!node || !node->p)
+ return false;
+
+ switch (*node->p) {
+ case '|': /* OR */
+ return strfilter_node__compare(node->l, str) ||
+ strfilter_node__compare(node->r, str);
+ case '&': /* AND */
+ return strfilter_node__compare(node->l, str) &&
+ strfilter_node__compare(node->r, str);
+ case '!': /* NOT */
+ return !strfilter_node__compare(node->r, str);
+ default:
+ return strglobmatch(str, node->p);
+ }
+}
+
+/* Return true if STR matches the filter rules */
+bool strfilter__compare(struct strfilter *filter, const char *str)
+{
+ if (!filter)
+ return false;
+ return strfilter_node__compare(filter->root, str);
+}
diff --git a/tools/perf/util/strfilter.h b/tools/perf/util/strfilter.h
new file mode 100644
index 00000000000..fe611f3c9e3
--- /dev/null
+++ b/tools/perf/util/strfilter.h
@@ -0,0 +1,48 @@
+#ifndef __PERF_STRFILTER_H
+#define __PERF_STRFILTER_H
+/* General purpose glob matching filter */
+
+#include <linux/list.h>
+#include <stdbool.h>
+
+/* A node of string filter */
+struct strfilter_node {
+ struct strfilter_node *l; /* Tree left branche (for &,|) */
+ struct strfilter_node *r; /* Tree right branche (for !,&,|) */
+ const char *p; /* Operator or rule */
+};
+
+/* String filter */
+struct strfilter {
+ struct strfilter_node *root;
+};
+
+/**
+ * strfilter__new - Create a new string filter
+ * @rules: Filter rule, which is a combination of glob expressions.
+ * @err: Pointer which points an error detected on @rules
+ *
+ * Parse @rules and return new strfilter. Return NULL if an error detected.
+ * In that case, *@err will indicate where it is detected, and *@err is NULL
+ * if a memory allocation is failed.
+ */
+struct strfilter *strfilter__new(const char *rules, const char **err);
+
+/**
+ * strfilter__compare - compare given string and a string filter
+ * @filter: String filter
+ * @str: target string
+ *
+ * Compare @str and @filter. Return true if the str match the rule
+ */
+bool strfilter__compare(struct strfilter *filter, const char *str);
+
+/**
+ * strfilter__delete - delete a string filter
+ * @filter: String filter to delete
+ *
+ * Delete @filter.
+ */
+void strfilter__delete(struct strfilter *filter);
+
+#endif
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index 0409fc7c005..2553e5b55b8 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -1,5 +1,5 @@
#include "util.h"
-#include "string.h"
+#include "linux/string.h"
#define K 1024LL
/*
@@ -85,7 +85,7 @@ out:
/*
* Helper function for splitting a string into an argv-like array.
- * originaly copied from lib/argv_split.c
+ * originally copied from lib/argv_split.c
*/
static const char *skip_sep(const char *cp)
{
@@ -128,7 +128,7 @@ void argv_free(char **argv)
{
char **p;
for (p = argv; *p; p++)
- free(*p);
+ zfree(p);
free(argv);
}
@@ -259,7 +259,7 @@ static bool __match_glob(const char *str, const char *pat, bool ignore_space)
if (!*pat) /* Tail wild card matches all */
return true;
while (*str)
- if (strglobmatch(str++, pat))
+ if (__match_glob(str++, pat, ignore_space))
return true;
}
return !*str && !*pat;
@@ -294,3 +294,120 @@ bool strlazymatch(const char *str, const char *pat)
{
return __match_glob(str, pat, true);
}
+
+/**
+ * strtailcmp - Compare the tail of two strings
+ * @s1: 1st string to be compared
+ * @s2: 2nd string to be compared
+ *
+ * Return 0 if whole of either string is same as another's tail part.
+ */
+int strtailcmp(const char *s1, const char *s2)
+{
+ int i1 = strlen(s1);
+ int i2 = strlen(s2);
+ while (--i1 >= 0 && --i2 >= 0) {
+ if (s1[i1] != s2[i2])
+ return s1[i1] - s2[i2];
+ }
+ return 0;
+}
+
+/**
+ * strxfrchar - Locate and replace character in @s
+ * @s: The string to be searched/changed.
+ * @from: Source character to be replaced.
+ * @to: Destination character.
+ *
+ * Return pointer to the changed string.
+ */
+char *strxfrchar(char *s, char from, char to)
+{
+ char *p = s;
+
+ while ((p = strchr(p, from)) != NULL)
+ *p++ = to;
+
+ return s;
+}
+
+/**
+ * ltrim - Removes leading whitespace from @s.
+ * @s: The string to be stripped.
+ *
+ * Return pointer to the first non-whitespace character in @s.
+ */
+char *ltrim(char *s)
+{
+ int len = strlen(s);
+
+ while (len && isspace(*s)) {
+ len--;
+ s++;
+ }
+
+ return s;
+}
+
+/**
+ * rtrim - Removes trailing whitespace from @s.
+ * @s: The string to be stripped.
+ *
+ * Note that the first trailing whitespace is replaced with a %NUL-terminator
+ * in the given string @s. Returns @s.
+ */
+char *rtrim(char *s)
+{
+ size_t size = strlen(s);
+ char *end;
+
+ if (!size)
+ return s;
+
+ end = s + size - 1;
+ while (end >= s && isspace(*end))
+ end--;
+ *(end + 1) = '\0';
+
+ return s;
+}
+
+/**
+ * memdup - duplicate region of memory
+ * @src: memory region to duplicate
+ * @len: memory region length
+ */
+void *memdup(const void *src, size_t len)
+{
+ void *p;
+
+ p = malloc(len);
+ if (p)
+ memcpy(p, src, len);
+
+ return p;
+}
+
+/**
+ * str_append - reallocate string and append another
+ * @s: pointer to string pointer
+ * @len: pointer to len (initialized)
+ * @a: string to append.
+ */
+int str_append(char **s, int *len, const char *a)
+{
+ int olen = *s ? strlen(*s) : 0;
+ int nlen = olen + strlen(a) + 1;
+ if (*len < nlen) {
+ *len = *len * 2;
+ if (*len < nlen)
+ *len = nlen;
+ *s = realloc(*s, *len);
+ if (!*s)
+ return -ENOMEM;
+ if (olen == 0)
+ **s = 0;
+ }
+ strcat(*s, a);
+ return 0;
+}
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c
index 6783a204355..71f9d102b96 100644
--- a/tools/perf/util/strlist.c
+++ b/tools/perf/util/strlist.c
@@ -5,71 +5,67 @@
*/
#include "strlist.h"
+#include "util.h"
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-static struct str_node *str_node__new(const char *s, bool dupstr)
+static
+struct rb_node *strlist__node_new(struct rblist *rblist, const void *entry)
{
- struct str_node *self = malloc(sizeof(*self));
+ const char *s = entry;
+ struct rb_node *rc = NULL;
+ struct strlist *strlist = container_of(rblist, struct strlist, rblist);
+ struct str_node *snode = malloc(sizeof(*snode));
- if (self != NULL) {
- if (dupstr) {
+ if (snode != NULL) {
+ if (strlist->dupstr) {
s = strdup(s);
if (s == NULL)
goto out_delete;
}
- self->s = s;
+ snode->s = s;
+ rc = &snode->rb_node;
}
- return self;
+ return rc;
out_delete:
- free(self);
+ free(snode);
return NULL;
}
-static void str_node__delete(struct str_node *self, bool dupstr)
+static void str_node__delete(struct str_node *snode, bool dupstr)
{
if (dupstr)
- free((void *)self->s);
- free(self);
+ zfree((char **)&snode->s);
+ free(snode);
}
-int strlist__add(struct strlist *self, const char *new_entry)
+static
+void strlist__node_delete(struct rblist *rblist, struct rb_node *rb_node)
{
- struct rb_node **p = &self->entries.rb_node;
- struct rb_node *parent = NULL;
- struct str_node *sn;
-
- while (*p != NULL) {
- int rc;
-
- parent = *p;
- sn = rb_entry(parent, struct str_node, rb_node);
- rc = strcmp(sn->s, new_entry);
-
- if (rc > 0)
- p = &(*p)->rb_left;
- else if (rc < 0)
- p = &(*p)->rb_right;
- else
- return -EEXIST;
- }
+ struct strlist *slist = container_of(rblist, struct strlist, rblist);
+ struct str_node *snode = container_of(rb_node, struct str_node, rb_node);
+
+ str_node__delete(snode, slist->dupstr);
+}
- sn = str_node__new(new_entry, self->dupstr);
- if (sn == NULL)
- return -ENOMEM;
+static int strlist__node_cmp(struct rb_node *rb_node, const void *entry)
+{
+ const char *str = entry;
+ struct str_node *snode = container_of(rb_node, struct str_node, rb_node);
- rb_link_node(&sn->rb_node, parent, p);
- rb_insert_color(&sn->rb_node, &self->entries);
- ++self->nr_entries;
+ return strcmp(snode->s, str);
+}
- return 0;
+int strlist__add(struct strlist *slist, const char *new_entry)
+{
+ return rblist__add_node(&slist->rblist, new_entry);
}
-int strlist__load(struct strlist *self, const char *filename)
+int strlist__load(struct strlist *slist, const char *filename)
{
char entry[1024];
int err;
@@ -85,7 +81,7 @@ int strlist__load(struct strlist *self, const char *filename)
continue;
entry[len - 1] = '\0';
- err = strlist__add(self, entry);
+ err = strlist__add(slist, entry);
if (err != 0)
goto out;
}
@@ -96,105 +92,82 @@ out:
return err;
}
-void strlist__remove(struct strlist *self, struct str_node *sn)
+void strlist__remove(struct strlist *slist, struct str_node *snode)
{
- rb_erase(&sn->rb_node, &self->entries);
- str_node__delete(sn, self->dupstr);
+ rblist__remove_node(&slist->rblist, &snode->rb_node);
}
-struct str_node *strlist__find(struct strlist *self, const char *entry)
+struct str_node *strlist__find(struct strlist *slist, const char *entry)
{
- struct rb_node **p = &self->entries.rb_node;
- struct rb_node *parent = NULL;
-
- while (*p != NULL) {
- struct str_node *sn;
- int rc;
-
- parent = *p;
- sn = rb_entry(parent, struct str_node, rb_node);
- rc = strcmp(sn->s, entry);
-
- if (rc > 0)
- p = &(*p)->rb_left;
- else if (rc < 0)
- p = &(*p)->rb_right;
- else
- return sn;
- }
+ struct str_node *snode = NULL;
+ struct rb_node *rb_node = rblist__find(&slist->rblist, entry);
- return NULL;
+ if (rb_node)
+ snode = container_of(rb_node, struct str_node, rb_node);
+
+ return snode;
}
-static int strlist__parse_list_entry(struct strlist *self, const char *s)
+static int strlist__parse_list_entry(struct strlist *slist, const char *s)
{
if (strncmp(s, "file://", 7) == 0)
- return strlist__load(self, s + 7);
+ return strlist__load(slist, s + 7);
- return strlist__add(self, s);
+ return strlist__add(slist, s);
}
-int strlist__parse_list(struct strlist *self, const char *s)
+int strlist__parse_list(struct strlist *slist, const char *s)
{
char *sep;
int err;
while ((sep = strchr(s, ',')) != NULL) {
*sep = '\0';
- err = strlist__parse_list_entry(self, s);
+ err = strlist__parse_list_entry(slist, s);
*sep = ',';
if (err != 0)
return err;
s = sep + 1;
}
- return *s ? strlist__parse_list_entry(self, s) : 0;
+ return *s ? strlist__parse_list_entry(slist, s) : 0;
}
-struct strlist *strlist__new(bool dupstr, const char *slist)
+struct strlist *strlist__new(bool dupstr, const char *list)
{
- struct strlist *self = malloc(sizeof(*self));
+ struct strlist *slist = malloc(sizeof(*slist));
+
+ if (slist != NULL) {
+ rblist__init(&slist->rblist);
+ slist->rblist.node_cmp = strlist__node_cmp;
+ slist->rblist.node_new = strlist__node_new;
+ slist->rblist.node_delete = strlist__node_delete;
- if (self != NULL) {
- self->entries = RB_ROOT;
- self->dupstr = dupstr;
- self->nr_entries = 0;
- if (slist && strlist__parse_list(self, slist) != 0)
+ slist->dupstr = dupstr;
+ if (list && strlist__parse_list(slist, list) != 0)
goto out_error;
}
- return self;
+ return slist;
out_error:
- free(self);
+ free(slist);
return NULL;
}
-void strlist__delete(struct strlist *self)
+void strlist__delete(struct strlist *slist)
{
- if (self != NULL) {
- struct str_node *pos;
- struct rb_node *next = rb_first(&self->entries);
-
- while (next) {
- pos = rb_entry(next, struct str_node, rb_node);
- next = rb_next(&pos->rb_node);
- strlist__remove(self, pos);
- }
- self->entries = RB_ROOT;
- free(self);
- }
+ if (slist != NULL)
+ rblist__delete(&slist->rblist);
}
-struct str_node *strlist__entry(const struct strlist *self, unsigned int idx)
+struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx)
{
- struct rb_node *nd;
+ struct str_node *snode = NULL;
+ struct rb_node *rb_node;
- for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
- struct str_node *pos = rb_entry(nd, struct str_node, rb_node);
+ rb_node = rblist__entry(&slist->rblist, idx);
+ if (rb_node)
+ snode = container_of(rb_node, struct str_node, rb_node);
- if (!idx--)
- return pos;
- }
-
- return NULL;
+ return snode;
}
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h
index 3ba839007d2..5c7f87069d9 100644
--- a/tools/perf/util/strlist.h
+++ b/tools/perf/util/strlist.h
@@ -4,46 +4,47 @@
#include <linux/rbtree.h>
#include <stdbool.h>
+#include "rblist.h"
+
struct str_node {
struct rb_node rb_node;
const char *s;
};
struct strlist {
- struct rb_root entries;
- unsigned int nr_entries;
+ struct rblist rblist;
bool dupstr;
};
struct strlist *strlist__new(bool dupstr, const char *slist);
-void strlist__delete(struct strlist *self);
+void strlist__delete(struct strlist *slist);
-void strlist__remove(struct strlist *self, struct str_node *sn);
-int strlist__load(struct strlist *self, const char *filename);
-int strlist__add(struct strlist *self, const char *str);
+void strlist__remove(struct strlist *slist, struct str_node *sn);
+int strlist__load(struct strlist *slist, const char *filename);
+int strlist__add(struct strlist *slist, const char *str);
-struct str_node *strlist__entry(const struct strlist *self, unsigned int idx);
-struct str_node *strlist__find(struct strlist *self, const char *entry);
+struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx);
+struct str_node *strlist__find(struct strlist *slist, const char *entry);
-static inline bool strlist__has_entry(struct strlist *self, const char *entry)
+static inline bool strlist__has_entry(struct strlist *slist, const char *entry)
{
- return strlist__find(self, entry) != NULL;
+ return strlist__find(slist, entry) != NULL;
}
-static inline bool strlist__empty(const struct strlist *self)
+static inline bool strlist__empty(const struct strlist *slist)
{
- return self->nr_entries == 0;
+ return rblist__empty(&slist->rblist);
}
-static inline unsigned int strlist__nr_entries(const struct strlist *self)
+static inline unsigned int strlist__nr_entries(const struct strlist *slist)
{
- return self->nr_entries;
+ return rblist__nr_entries(&slist->rblist);
}
/* For strlist iteration */
-static inline struct str_node *strlist__first(struct strlist *self)
+static inline struct str_node *strlist__first(struct strlist *slist)
{
- struct rb_node *rn = rb_first(&self->entries);
+ struct rb_node *rn = rb_first(&slist->rblist.entries);
return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
}
static inline struct str_node *strlist__next(struct str_node *sn)
@@ -58,21 +59,21 @@ static inline struct str_node *strlist__next(struct str_node *sn)
/**
* strlist_for_each - iterate over a strlist
* @pos: the &struct str_node to use as a loop cursor.
- * @self: the &struct strlist for loop.
+ * @slist: the &struct strlist for loop.
*/
-#define strlist__for_each(pos, self) \
- for (pos = strlist__first(self); pos; pos = strlist__next(pos))
+#define strlist__for_each(pos, slist) \
+ for (pos = strlist__first(slist); pos; pos = strlist__next(pos))
/**
* strlist_for_each_safe - iterate over a strlist safe against removal of
* str_node
* @pos: the &struct str_node to use as a loop cursor.
* @n: another &struct str_node to use as temporary storage.
- * @self: the &struct strlist for loop.
+ * @slist: the &struct strlist for loop.
*/
-#define strlist__for_each_safe(pos, n, self) \
- for (pos = strlist__first(self), n = strlist__next(pos); pos;\
+#define strlist__for_each_safe(pos, n, slist) \
+ for (pos = strlist__first(slist), n = strlist__next(pos); pos;\
pos = n, n = strlist__next(n))
-int strlist__parse_list(struct strlist *self, const char *s);
+int strlist__parse_list(struct strlist *slist, const char *s);
#endif /* __PERF_STRLIST_H */
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index b3637db025a..6a0a13d07a2 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -12,12 +12,17 @@
* of the License.
*/
+#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
+#include <linux/bitmap.h>
+#include "perf.h"
#include "svghelper.h"
+#include "util.h"
+#include "cpumap.h"
static u64 first_time, last_time;
static u64 turbo_frequency, max_freq;
@@ -27,6 +32,8 @@ static u64 turbo_frequency, max_freq;
#define SLOT_HEIGHT 25.0
int svg_page_width = 1000;
+u64 svg_highlight;
+const char *svg_highlight_name;
#define MIN_TEXT_SIZE 0.01
@@ -38,16 +45,21 @@ static double cpu2slot(int cpu)
return 2 * cpu + 1;
}
+static int *topology_map;
+
static double cpu2y(int cpu)
{
- return cpu2slot(cpu) * SLOT_MULT;
+ if (topology_map)
+ return cpu2slot(topology_map[cpu]) * SLOT_MULT;
+ else
+ return cpu2slot(cpu) * SLOT_MULT;
}
-static double time2pixels(u64 time)
+static double time2pixels(u64 __time)
{
double X;
- X = 1.0 * svg_page_width * (time - first_time) / (last_time - first_time);
+ X = 1.0 * svg_page_width * (__time - first_time) / (last_time - first_time);
return X;
}
@@ -94,7 +106,8 @@ void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end)
total_height = (1 + rows + cpu2slot(cpus)) * SLOT_MULT;
fprintf(svgfile, "<?xml version=\"1.0\" standalone=\"no\"?> \n");
- fprintf(svgfile, "<svg width=\"%i\" height=\"%llu\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n", svg_page_width, total_height);
+ fprintf(svgfile, "<!DOCTYPE svg SYSTEM \"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\">\n");
+ fprintf(svgfile, "<svg width=\"%i\" height=\"%" PRIu64 "\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n", svg_page_width, total_height);
fprintf(svgfile, "<defs>\n <style type=\"text/css\">\n <![CDATA[\n");
@@ -102,6 +115,7 @@ void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end)
fprintf(svgfile, " rect.process { fill:rgb(180,180,180); fill-opacity:0.9; stroke-width:1; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.process2 { fill:rgb(180,180,180); fill-opacity:0.9; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.sample { fill:rgb( 0, 0,255); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
+ fprintf(svgfile, " rect.sample_hi{ fill:rgb(255,128, 0); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.blocked { fill:rgb(255, 0, 0); fill-opacity:0.5; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.waiting { fill:rgb(224,214, 0); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
fprintf(svgfile, " rect.WAITING { fill:rgb(255,214, 48); fill-opacity:0.6; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
@@ -127,14 +141,42 @@ void svg_box(int Yslot, u64 start, u64 end, const char *type)
time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT, type);
}
-void svg_sample(int Yslot, int cpu, u64 start, u64 end)
+static char *time_to_string(u64 duration);
+void svg_blocked(int Yslot, int cpu, u64 start, u64 end, const char *backtrace)
+{
+ if (!svgfile)
+ return;
+
+ fprintf(svgfile, "<g>\n");
+ fprintf(svgfile, "<title>#%d blocked %s</title>\n", cpu,
+ time_to_string(end - start));
+ if (backtrace)
+ fprintf(svgfile, "<desc>Blocked on:\n%s</desc>\n", backtrace);
+ svg_box(Yslot, start, end, "blocked");
+ fprintf(svgfile, "</g>\n");
+}
+
+void svg_running(int Yslot, int cpu, u64 start, u64 end, const char *backtrace)
{
double text_size;
+ const char *type;
+
if (!svgfile)
return;
- fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"sample\"/>\n",
- time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT);
+ if (svg_highlight && end - start > svg_highlight)
+ type = "sample_hi";
+ else
+ type = "sample";
+ fprintf(svgfile, "<g>\n");
+
+ fprintf(svgfile, "<title>#%d running %s</title>\n",
+ cpu, time_to_string(end - start));
+ if (backtrace)
+ fprintf(svgfile, "<desc>Switched because:\n%s</desc>\n", backtrace);
+ fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"%s\"/>\n",
+ time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT,
+ type);
text_size = (time2pixels(end)-time2pixels(start));
if (cpu > 9)
@@ -147,6 +189,7 @@ void svg_sample(int Yslot, int cpu, u64 start, u64 end)
fprintf(svgfile, "<text x=\"%1.8f\" y=\"%1.8f\" font-size=\"%1.8fpt\">%i</text>\n",
time2pixels(start), Yslot * SLOT_MULT + SLOT_HEIGHT - 1, text_size, cpu + 1);
+ fprintf(svgfile, "</g>\n");
}
static char *time_to_string(u64 duration)
@@ -167,7 +210,7 @@ static char *time_to_string(u64 duration)
return text;
}
-void svg_waiting(int Yslot, u64 start, u64 end)
+void svg_waiting(int Yslot, int cpu, u64 start, u64 end, const char *backtrace)
{
char *text;
const char *style;
@@ -191,6 +234,9 @@ void svg_waiting(int Yslot, u64 start, u64 end)
font_size = round_text_size(font_size);
fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\">\n", time2pixels(start), Yslot * SLOT_MULT);
+ fprintf(svgfile, "<title>#%d waiting %s</title>\n", cpu, time_to_string(end - start));
+ if (backtrace)
+ fprintf(svgfile, "<desc>Waiting on:\n%s</desc>\n", backtrace);
fprintf(svgfile, "<rect x=\"0\" width=\"%4.8f\" y=\"0\" height=\"%4.1f\" class=\"%s\"/>\n",
time2pixels(end)-time2pixels(start), SLOT_HEIGHT, style);
if (font_size > MIN_TEXT_SIZE)
@@ -241,28 +287,42 @@ void svg_cpu_box(int cpu, u64 __max_freq, u64 __turbo_freq)
max_freq = __max_freq;
turbo_frequency = __turbo_freq;
+ fprintf(svgfile, "<g>\n");
+
fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"cpu\"/>\n",
time2pixels(first_time),
time2pixels(last_time)-time2pixels(first_time),
cpu2y(cpu), SLOT_MULT+SLOT_HEIGHT);
- sprintf(cpu_string, "CPU %i", (int)cpu+1);
+ sprintf(cpu_string, "CPU %i", (int)cpu);
fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\">%s</text>\n",
10+time2pixels(first_time), cpu2y(cpu) + SLOT_HEIGHT/2, cpu_string);
fprintf(svgfile, "<text transform=\"translate(%4.8f,%4.8f)\" font-size=\"1.25pt\">%s</text>\n",
10+time2pixels(first_time), cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - 4, cpu_model());
+
+ fprintf(svgfile, "</g>\n");
}
-void svg_process(int cpu, u64 start, u64 end, const char *type, const char *name)
+void svg_process(int cpu, u64 start, u64 end, int pid, const char *name, const char *backtrace)
{
double width;
+ const char *type;
if (!svgfile)
return;
+ if (svg_highlight && end - start >= svg_highlight)
+ type = "sample_hi";
+ else if (svg_highlight_name && strstr(name, svg_highlight_name))
+ type = "sample_hi";
+ else
+ type = "sample";
fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\">\n", time2pixels(start), cpu2y(cpu));
+ fprintf(svgfile, "<title>%d %s running %s</title>\n", pid, name, time_to_string(end - start));
+ if (backtrace)
+ fprintf(svgfile, "<desc>Switched because:\n%s</desc>\n", backtrace);
fprintf(svgfile, "<rect x=\"0\" width=\"%4.8f\" y=\"0\" height=\"%4.1f\" class=\"%s\"/>\n",
time2pixels(end)-time2pixels(start), SLOT_MULT+SLOT_HEIGHT, type);
width = time2pixels(end)-time2pixels(start);
@@ -287,6 +347,8 @@ void svg_cstate(int cpu, u64 start, u64 end, int type)
return;
+ fprintf(svgfile, "<g>\n");
+
if (type > 6)
type = 6;
sprintf(style, "c%i", type);
@@ -305,6 +367,8 @@ void svg_cstate(int cpu, u64 start, u64 end, int type)
if (width > MIN_TEXT_SIZE)
fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\" font-size=\"%3.8fpt\">C%i</text>\n",
time2pixels(start), cpu2y(cpu)+width, width, type);
+
+ fprintf(svgfile, "</g>\n");
}
static char *HzToHuman(unsigned long hz)
@@ -338,6 +402,8 @@ void svg_pstate(int cpu, u64 start, u64 end, u64 freq)
if (!svgfile)
return;
+ fprintf(svgfile, "<g>\n");
+
if (max_freq)
height = freq * 1.0 / max_freq * (SLOT_HEIGHT + SLOT_MULT);
height = 1 + cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - height;
@@ -346,10 +412,11 @@ void svg_pstate(int cpu, u64 start, u64 end, u64 freq)
fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\" font-size=\"0.25pt\">%s</text>\n",
time2pixels(start), height+0.9, HzToHuman(freq));
+ fprintf(svgfile, "</g>\n");
}
-void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2)
+void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2, const char *backtrace)
{
double height;
@@ -357,6 +424,15 @@ void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc
return;
+ fprintf(svgfile, "<g>\n");
+
+ fprintf(svgfile, "<title>%s wakes up %s</title>\n",
+ desc1 ? desc1 : "?",
+ desc2 ? desc2 : "?");
+
+ if (backtrace)
+ fprintf(svgfile, "<desc>%s</desc>\n", backtrace);
+
if (row1 < row2) {
if (row1) {
fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
@@ -394,9 +470,11 @@ void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc
if (row1)
fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(32,255,32)\"/>\n",
time2pixels(start), height);
+
+ fprintf(svgfile, "</g>\n");
}
-void svg_wakeline(u64 start, int row1, int row2)
+void svg_wakeline(u64 start, int row1, int row2, const char *backtrace)
{
double height;
@@ -404,6 +482,11 @@ void svg_wakeline(u64 start, int row1, int row2)
return;
+ fprintf(svgfile, "<g>\n");
+
+ if (backtrace)
+ fprintf(svgfile, "<desc>%s</desc>\n", backtrace);
+
if (row1 < row2)
fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row2 * SLOT_MULT);
@@ -416,17 +499,28 @@ void svg_wakeline(u64 start, int row1, int row2)
height += SLOT_HEIGHT;
fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(32,255,32)\"/>\n",
time2pixels(start), height);
+
+ fprintf(svgfile, "</g>\n");
}
-void svg_interrupt(u64 start, int row)
+void svg_interrupt(u64 start, int row, const char *backtrace)
{
if (!svgfile)
return;
+ fprintf(svgfile, "<g>\n");
+
+ fprintf(svgfile, "<title>Wakeup from interrupt</title>\n");
+
+ if (backtrace)
+ fprintf(svgfile, "<desc>%s</desc>\n", backtrace);
+
fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(255,128,128)\"/>\n",
time2pixels(start), row * SLOT_MULT);
fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(255,128,128)\"/>\n",
time2pixels(start), row * SLOT_MULT + SLOT_HEIGHT);
+
+ fprintf(svgfile, "</g>\n");
}
void svg_text(int Yslot, u64 start, const char *text)
@@ -454,13 +548,15 @@ void svg_legenda(void)
if (!svgfile)
return;
+ fprintf(svgfile, "<g>\n");
svg_legenda_box(0, "Running", "sample");
- svg_legenda_box(100, "Idle","rect.c1");
- svg_legenda_box(200, "Deeper Idle", "rect.c3");
- svg_legenda_box(350, "Deepest Idle", "rect.c6");
+ svg_legenda_box(100, "Idle","c1");
+ svg_legenda_box(200, "Deeper Idle", "c3");
+ svg_legenda_box(350, "Deepest Idle", "c6");
svg_legenda_box(550, "Sleeping", "process2");
svg_legenda_box(650, "Waiting for cpu", "waiting");
svg_legenda_box(800, "Blocked on IO", "blocked");
+ fprintf(svgfile, "</g>\n");
}
void svg_time_grid(void)
@@ -483,7 +579,7 @@ void svg_time_grid(void)
color = 128;
}
- fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%llu\" style=\"stroke:rgb(%i,%i,%i);stroke-width:%1.3f\"/>\n",
+ fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%" PRIu64 "\" style=\"stroke:rgb(%i,%i,%i);stroke-width:%1.3f\"/>\n",
time2pixels(i), SLOT_MULT/2, time2pixels(i), total_height, color, color, color, thickness);
i += 10000000;
@@ -498,3 +594,123 @@ void svg_close(void)
svgfile = NULL;
}
}
+
+#define cpumask_bits(maskp) ((maskp)->bits)
+typedef struct { DECLARE_BITMAP(bits, MAX_NR_CPUS); } cpumask_t;
+
+struct topology {
+ cpumask_t *sib_core;
+ int sib_core_nr;
+ cpumask_t *sib_thr;
+ int sib_thr_nr;
+};
+
+static void scan_thread_topology(int *map, struct topology *t, int cpu, int *pos)
+{
+ int i;
+ int thr;
+
+ for (i = 0; i < t->sib_thr_nr; i++) {
+ if (!test_bit(cpu, cpumask_bits(&t->sib_thr[i])))
+ continue;
+
+ for_each_set_bit(thr,
+ cpumask_bits(&t->sib_thr[i]),
+ MAX_NR_CPUS)
+ if (map[thr] == -1)
+ map[thr] = (*pos)++;
+ }
+}
+
+static void scan_core_topology(int *map, struct topology *t)
+{
+ int pos = 0;
+ int i;
+ int cpu;
+
+ for (i = 0; i < t->sib_core_nr; i++)
+ for_each_set_bit(cpu,
+ cpumask_bits(&t->sib_core[i]),
+ MAX_NR_CPUS)
+ scan_thread_topology(map, t, cpu, &pos);
+}
+
+static int str_to_bitmap(char *s, cpumask_t *b)
+{
+ int i;
+ int ret = 0;
+ struct cpu_map *m;
+ int c;
+
+ m = cpu_map__new(s);
+ if (!m)
+ return -1;
+
+ for (i = 0; i < m->nr; i++) {
+ c = m->map[i];
+ if (c >= MAX_NR_CPUS) {
+ ret = -1;
+ break;
+ }
+
+ set_bit(c, cpumask_bits(b));
+ }
+
+ cpu_map__delete(m);
+
+ return ret;
+}
+
+int svg_build_topology_map(char *sib_core, int sib_core_nr,
+ char *sib_thr, int sib_thr_nr)
+{
+ int i;
+ struct topology t;
+
+ t.sib_core_nr = sib_core_nr;
+ t.sib_thr_nr = sib_thr_nr;
+ t.sib_core = calloc(sib_core_nr, sizeof(cpumask_t));
+ t.sib_thr = calloc(sib_thr_nr, sizeof(cpumask_t));
+
+ if (!t.sib_core || !t.sib_thr) {
+ fprintf(stderr, "topology: no memory\n");
+ goto exit;
+ }
+
+ for (i = 0; i < sib_core_nr; i++) {
+ if (str_to_bitmap(sib_core, &t.sib_core[i])) {
+ fprintf(stderr, "topology: can't parse siblings map\n");
+ goto exit;
+ }
+
+ sib_core += strlen(sib_core) + 1;
+ }
+
+ for (i = 0; i < sib_thr_nr; i++) {
+ if (str_to_bitmap(sib_thr, &t.sib_thr[i])) {
+ fprintf(stderr, "topology: can't parse siblings map\n");
+ goto exit;
+ }
+
+ sib_thr += strlen(sib_thr) + 1;
+ }
+
+ topology_map = malloc(sizeof(int) * MAX_NR_CPUS);
+ if (!topology_map) {
+ fprintf(stderr, "topology: no memory\n");
+ goto exit;
+ }
+
+ for (i = 0; i < MAX_NR_CPUS; i++)
+ topology_map[i] = -1;
+
+ scan_core_topology(topology_map, &t);
+
+ return 0;
+
+exit:
+ zfree(&t.sib_core);
+ zfree(&t.sib_thr);
+
+ return -1;
+}
diff --git a/tools/perf/util/svghelper.h b/tools/perf/util/svghelper.h
index e0781989cc3..e3aff5332e3 100644
--- a/tools/perf/util/svghelper.h
+++ b/tools/perf/util/svghelper.h
@@ -1,28 +1,33 @@
#ifndef __PERF_SVGHELPER_H
#define __PERF_SVGHELPER_H
-#include "types.h"
+#include <linux/types.h>
extern void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end);
extern void svg_box(int Yslot, u64 start, u64 end, const char *type);
-extern void svg_sample(int Yslot, int cpu, u64 start, u64 end);
-extern void svg_waiting(int Yslot, u64 start, u64 end);
+extern void svg_blocked(int Yslot, int cpu, u64 start, u64 end, const char *backtrace);
+extern void svg_running(int Yslot, int cpu, u64 start, u64 end, const char *backtrace);
+extern void svg_waiting(int Yslot, int cpu, u64 start, u64 end, const char *backtrace);
extern void svg_cpu_box(int cpu, u64 max_frequency, u64 turbo_frequency);
-extern void svg_process(int cpu, u64 start, u64 end, const char *type, const char *name);
+extern void svg_process(int cpu, u64 start, u64 end, int pid, const char *name, const char *backtrace);
extern void svg_cstate(int cpu, u64 start, u64 end, int type);
extern void svg_pstate(int cpu, u64 start, u64 end, u64 freq);
extern void svg_time_grid(void);
extern void svg_legenda(void);
-extern void svg_wakeline(u64 start, int row1, int row2);
-extern void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2);
-extern void svg_interrupt(u64 start, int row);
+extern void svg_wakeline(u64 start, int row1, int row2, const char *backtrace);
+extern void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2, const char *backtrace);
+extern void svg_interrupt(u64 start, int row, const char *backtrace);
extern void svg_text(int Yslot, u64 start, const char *text);
extern void svg_close(void);
+extern int svg_build_topology_map(char *sib_core, int sib_core_nr,
+ char *sib_thr, int sib_thr_nr);
extern int svg_page_width;
+extern u64 svg_highlight;
+extern const char *svg_highlight_name;
#endif /* __PERF_SVGHELPER_H */
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
new file mode 100644
index 00000000000..6864661a79d
--- /dev/null
+++ b/tools/perf/util/symbol-elf.c
@@ -0,0 +1,1625 @@
+#include <fcntl.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include "symbol.h"
+#include "vdso.h"
+#include <symbol/kallsyms.h>
+#include "debug.h"
+
+#ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
+static int elf_getphdrnum(Elf *elf, size_t *dst)
+{
+ GElf_Ehdr gehdr;
+ GElf_Ehdr *ehdr;
+
+ ehdr = gelf_getehdr(elf, &gehdr);
+ if (!ehdr)
+ return -1;
+
+ *dst = ehdr->e_phnum;
+
+ return 0;
+}
+#endif
+
+#ifndef NT_GNU_BUILD_ID
+#define NT_GNU_BUILD_ID 3
+#endif
+
+/**
+ * elf_symtab__for_each_symbol - iterate thru all the symbols
+ *
+ * @syms: struct elf_symtab instance to iterate
+ * @idx: uint32_t idx
+ * @sym: GElf_Sym iterator
+ */
+#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
+ for (idx = 0, gelf_getsym(syms, idx, &sym);\
+ idx < nr_syms; \
+ idx++, gelf_getsym(syms, idx, &sym))
+
+static inline uint8_t elf_sym__type(const GElf_Sym *sym)
+{
+ return GELF_ST_TYPE(sym->st_info);
+}
+
+static inline int elf_sym__is_function(const GElf_Sym *sym)
+{
+ return elf_sym__type(sym) == STT_FUNC &&
+ sym->st_name != 0 &&
+ sym->st_shndx != SHN_UNDEF;
+}
+
+static inline bool elf_sym__is_object(const GElf_Sym *sym)
+{
+ return elf_sym__type(sym) == STT_OBJECT &&
+ sym->st_name != 0 &&
+ sym->st_shndx != SHN_UNDEF;
+}
+
+static inline int elf_sym__is_label(const GElf_Sym *sym)
+{
+ return elf_sym__type(sym) == STT_NOTYPE &&
+ sym->st_name != 0 &&
+ sym->st_shndx != SHN_UNDEF &&
+ sym->st_shndx != SHN_ABS;
+}
+
+static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type)
+{
+ switch (type) {
+ case MAP__FUNCTION:
+ return elf_sym__is_function(sym);
+ case MAP__VARIABLE:
+ return elf_sym__is_object(sym);
+ default:
+ return false;
+ }
+}
+
+static inline const char *elf_sym__name(const GElf_Sym *sym,
+ const Elf_Data *symstrs)
+{
+ return symstrs->d_buf + sym->st_name;
+}
+
+static inline const char *elf_sec__name(const GElf_Shdr *shdr,
+ const Elf_Data *secstrs)
+{
+ return secstrs->d_buf + shdr->sh_name;
+}
+
+static inline int elf_sec__is_text(const GElf_Shdr *shdr,
+ const Elf_Data *secstrs)
+{
+ return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
+}
+
+static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
+ const Elf_Data *secstrs)
+{
+ return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
+}
+
+static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs,
+ enum map_type type)
+{
+ switch (type) {
+ case MAP__FUNCTION:
+ return elf_sec__is_text(shdr, secstrs);
+ case MAP__VARIABLE:
+ return elf_sec__is_data(shdr, secstrs);
+ default:
+ return false;
+ }
+}
+
+static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
+{
+ Elf_Scn *sec = NULL;
+ GElf_Shdr shdr;
+ size_t cnt = 1;
+
+ while ((sec = elf_nextscn(elf, sec)) != NULL) {
+ gelf_getshdr(sec, &shdr);
+
+ if ((addr >= shdr.sh_addr) &&
+ (addr < (shdr.sh_addr + shdr.sh_size)))
+ return cnt;
+
+ ++cnt;
+ }
+
+ return -1;
+}
+
+Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
+ GElf_Shdr *shp, const char *name, size_t *idx)
+{
+ Elf_Scn *sec = NULL;
+ size_t cnt = 1;
+
+ /* Elf is corrupted/truncated, avoid calling elf_strptr. */
+ if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
+ return NULL;
+
+ while ((sec = elf_nextscn(elf, sec)) != NULL) {
+ char *str;
+
+ gelf_getshdr(sec, shp);
+ str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
+ if (str && !strcmp(name, str)) {
+ if (idx)
+ *idx = cnt;
+ return sec;
+ }
+ ++cnt;
+ }
+
+ return NULL;
+}
+
+#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
+ for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
+ idx < nr_entries; \
+ ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
+
+#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
+ for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
+ idx < nr_entries; \
+ ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
+
+/*
+ * We need to check if we have a .dynsym, so that we can handle the
+ * .plt, synthesizing its symbols, that aren't on the symtabs (be it
+ * .dynsym or .symtab).
+ * And always look at the original dso, not at debuginfo packages, that
+ * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
+ */
+int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map,
+ symbol_filter_t filter)
+{
+ uint32_t nr_rel_entries, idx;
+ GElf_Sym sym;
+ u64 plt_offset;
+ GElf_Shdr shdr_plt;
+ struct symbol *f;
+ GElf_Shdr shdr_rel_plt, shdr_dynsym;
+ Elf_Data *reldata, *syms, *symstrs;
+ Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
+ size_t dynsym_idx;
+ GElf_Ehdr ehdr;
+ char sympltname[1024];
+ Elf *elf;
+ int nr = 0, symidx, err = 0;
+
+ if (!ss->dynsym)
+ return 0;
+
+ elf = ss->elf;
+ ehdr = ss->ehdr;
+
+ scn_dynsym = ss->dynsym;
+ shdr_dynsym = ss->dynshdr;
+ dynsym_idx = ss->dynsym_idx;
+
+ if (scn_dynsym == NULL)
+ goto out_elf_end;
+
+ scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
+ ".rela.plt", NULL);
+ if (scn_plt_rel == NULL) {
+ scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
+ ".rel.plt", NULL);
+ if (scn_plt_rel == NULL)
+ goto out_elf_end;
+ }
+
+ err = -1;
+
+ if (shdr_rel_plt.sh_link != dynsym_idx)
+ goto out_elf_end;
+
+ if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
+ goto out_elf_end;
+
+ /*
+ * Fetch the relocation section to find the idxes to the GOT
+ * and the symbols in the .dynsym they refer to.
+ */
+ reldata = elf_getdata(scn_plt_rel, NULL);
+ if (reldata == NULL)
+ goto out_elf_end;
+
+ syms = elf_getdata(scn_dynsym, NULL);
+ if (syms == NULL)
+ goto out_elf_end;
+
+ scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
+ if (scn_symstrs == NULL)
+ goto out_elf_end;
+
+ symstrs = elf_getdata(scn_symstrs, NULL);
+ if (symstrs == NULL)
+ goto out_elf_end;
+
+ if (symstrs->d_size == 0)
+ goto out_elf_end;
+
+ nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
+ plt_offset = shdr_plt.sh_offset;
+
+ if (shdr_rel_plt.sh_type == SHT_RELA) {
+ GElf_Rela pos_mem, *pos;
+
+ elf_section__for_each_rela(reldata, pos, pos_mem, idx,
+ nr_rel_entries) {
+ symidx = GELF_R_SYM(pos->r_info);
+ plt_offset += shdr_plt.sh_entsize;
+ gelf_getsym(syms, symidx, &sym);
+ snprintf(sympltname, sizeof(sympltname),
+ "%s@plt", elf_sym__name(&sym, symstrs));
+
+ f = symbol__new(plt_offset, shdr_plt.sh_entsize,
+ STB_GLOBAL, sympltname);
+ if (!f)
+ goto out_elf_end;
+
+ if (filter && filter(map, f))
+ symbol__delete(f);
+ else {
+ symbols__insert(&dso->symbols[map->type], f);
+ ++nr;
+ }
+ }
+ } else if (shdr_rel_plt.sh_type == SHT_REL) {
+ GElf_Rel pos_mem, *pos;
+ elf_section__for_each_rel(reldata, pos, pos_mem, idx,
+ nr_rel_entries) {
+ symidx = GELF_R_SYM(pos->r_info);
+ plt_offset += shdr_plt.sh_entsize;
+ gelf_getsym(syms, symidx, &sym);
+ snprintf(sympltname, sizeof(sympltname),
+ "%s@plt", elf_sym__name(&sym, symstrs));
+
+ f = symbol__new(plt_offset, shdr_plt.sh_entsize,
+ STB_GLOBAL, sympltname);
+ if (!f)
+ goto out_elf_end;
+
+ if (filter && filter(map, f))
+ symbol__delete(f);
+ else {
+ symbols__insert(&dso->symbols[map->type], f);
+ ++nr;
+ }
+ }
+ }
+
+ err = 0;
+out_elf_end:
+ if (err == 0)
+ return nr;
+ pr_debug("%s: problems reading %s PLT info.\n",
+ __func__, dso->long_name);
+ return 0;
+}
+
+/*
+ * Align offset to 4 bytes as needed for note name and descriptor data.
+ */
+#define NOTE_ALIGN(n) (((n) + 3) & -4U)
+
+static int elf_read_build_id(Elf *elf, void *bf, size_t size)
+{
+ int err = -1;
+ GElf_Ehdr ehdr;
+ GElf_Shdr shdr;
+ Elf_Data *data;
+ Elf_Scn *sec;
+ Elf_Kind ek;
+ void *ptr;
+
+ if (size < BUILD_ID_SIZE)
+ goto out;
+
+ ek = elf_kind(elf);
+ if (ek != ELF_K_ELF)
+ goto out;
+
+ if (gelf_getehdr(elf, &ehdr) == NULL) {
+ pr_err("%s: cannot get elf header.\n", __func__);
+ goto out;
+ }
+
+ /*
+ * Check following sections for notes:
+ * '.note.gnu.build-id'
+ * '.notes'
+ * '.note' (VDSO specific)
+ */
+ do {
+ sec = elf_section_by_name(elf, &ehdr, &shdr,
+ ".note.gnu.build-id", NULL);
+ if (sec)
+ break;
+
+ sec = elf_section_by_name(elf, &ehdr, &shdr,
+ ".notes", NULL);
+ if (sec)
+ break;
+
+ sec = elf_section_by_name(elf, &ehdr, &shdr,
+ ".note", NULL);
+ if (sec)
+ break;
+
+ return err;
+
+ } while (0);
+
+ data = elf_getdata(sec, NULL);
+ if (data == NULL)
+ goto out;
+
+ ptr = data->d_buf;
+ while (ptr < (data->d_buf + data->d_size)) {
+ GElf_Nhdr *nhdr = ptr;
+ size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
+ descsz = NOTE_ALIGN(nhdr->n_descsz);
+ const char *name;
+
+ ptr += sizeof(*nhdr);
+ name = ptr;
+ ptr += namesz;
+ if (nhdr->n_type == NT_GNU_BUILD_ID &&
+ nhdr->n_namesz == sizeof("GNU")) {
+ if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
+ size_t sz = min(size, descsz);
+ memcpy(bf, ptr, sz);
+ memset(bf + sz, 0, size - sz);
+ err = descsz;
+ break;
+ }
+ }
+ ptr += descsz;
+ }
+
+out:
+ return err;
+}
+
+int filename__read_build_id(const char *filename, void *bf, size_t size)
+{
+ int fd, err = -1;
+ Elf *elf;
+
+ if (size < BUILD_ID_SIZE)
+ goto out;
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ goto out;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL) {
+ pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
+ goto out_close;
+ }
+
+ err = elf_read_build_id(elf, bf, size);
+
+ elf_end(elf);
+out_close:
+ close(fd);
+out:
+ return err;
+}
+
+int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
+{
+ int fd, err = -1;
+
+ if (size < BUILD_ID_SIZE)
+ goto out;
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ goto out;
+
+ while (1) {
+ char bf[BUFSIZ];
+ GElf_Nhdr nhdr;
+ size_t namesz, descsz;
+
+ if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
+ break;
+
+ namesz = NOTE_ALIGN(nhdr.n_namesz);
+ descsz = NOTE_ALIGN(nhdr.n_descsz);
+ if (nhdr.n_type == NT_GNU_BUILD_ID &&
+ nhdr.n_namesz == sizeof("GNU")) {
+ if (read(fd, bf, namesz) != (ssize_t)namesz)
+ break;
+ if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
+ size_t sz = min(descsz, size);
+ if (read(fd, build_id, sz) == (ssize_t)sz) {
+ memset(build_id + sz, 0, size - sz);
+ err = 0;
+ break;
+ }
+ } else if (read(fd, bf, descsz) != (ssize_t)descsz)
+ break;
+ } else {
+ int n = namesz + descsz;
+ if (read(fd, bf, n) != n)
+ break;
+ }
+ }
+ close(fd);
+out:
+ return err;
+}
+
+int filename__read_debuglink(const char *filename, char *debuglink,
+ size_t size)
+{
+ int fd, err = -1;
+ Elf *elf;
+ GElf_Ehdr ehdr;
+ GElf_Shdr shdr;
+ Elf_Data *data;
+ Elf_Scn *sec;
+ Elf_Kind ek;
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ goto out;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL) {
+ pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
+ goto out_close;
+ }
+
+ ek = elf_kind(elf);
+ if (ek != ELF_K_ELF)
+ goto out_elf_end;
+
+ if (gelf_getehdr(elf, &ehdr) == NULL) {
+ pr_err("%s: cannot get elf header.\n", __func__);
+ goto out_elf_end;
+ }
+
+ sec = elf_section_by_name(elf, &ehdr, &shdr,
+ ".gnu_debuglink", NULL);
+ if (sec == NULL)
+ goto out_elf_end;
+
+ data = elf_getdata(sec, NULL);
+ if (data == NULL)
+ goto out_elf_end;
+
+ /* the start of this section is a zero-terminated string */
+ strncpy(debuglink, data->d_buf, size);
+
+ err = 0;
+
+out_elf_end:
+ elf_end(elf);
+out_close:
+ close(fd);
+out:
+ return err;
+}
+
+static int dso__swap_init(struct dso *dso, unsigned char eidata)
+{
+ static unsigned int const endian = 1;
+
+ dso->needs_swap = DSO_SWAP__NO;
+
+ switch (eidata) {
+ case ELFDATA2LSB:
+ /* We are big endian, DSO is little endian. */
+ if (*(unsigned char const *)&endian != 1)
+ dso->needs_swap = DSO_SWAP__YES;
+ break;
+
+ case ELFDATA2MSB:
+ /* We are little endian, DSO is big endian. */
+ if (*(unsigned char const *)&endian != 0)
+ dso->needs_swap = DSO_SWAP__YES;
+ break;
+
+ default:
+ pr_err("unrecognized DSO data encoding %d\n", eidata);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+bool symsrc__possibly_runtime(struct symsrc *ss)
+{
+ return ss->dynsym || ss->opdsec;
+}
+
+bool symsrc__has_symtab(struct symsrc *ss)
+{
+ return ss->symtab != NULL;
+}
+
+void symsrc__destroy(struct symsrc *ss)
+{
+ zfree(&ss->name);
+ elf_end(ss->elf);
+ close(ss->fd);
+}
+
+int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
+ enum dso_binary_type type)
+{
+ int err = -1;
+ GElf_Ehdr ehdr;
+ Elf *elf;
+ int fd;
+
+ fd = open(name, O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL) {
+ pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
+ goto out_close;
+ }
+
+ if (gelf_getehdr(elf, &ehdr) == NULL) {
+ pr_debug("%s: cannot get elf header.\n", __func__);
+ goto out_elf_end;
+ }
+
+ if (dso__swap_init(dso, ehdr.e_ident[EI_DATA]))
+ goto out_elf_end;
+
+ /* Always reject images with a mismatched build-id: */
+ if (dso->has_build_id) {
+ u8 build_id[BUILD_ID_SIZE];
+
+ if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0)
+ goto out_elf_end;
+
+ if (!dso__build_id_equal(dso, build_id))
+ goto out_elf_end;
+ }
+
+ ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
+ NULL);
+ if (ss->symshdr.sh_type != SHT_SYMTAB)
+ ss->symtab = NULL;
+
+ ss->dynsym_idx = 0;
+ ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
+ &ss->dynsym_idx);
+ if (ss->dynshdr.sh_type != SHT_DYNSYM)
+ ss->dynsym = NULL;
+
+ ss->opdidx = 0;
+ ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
+ &ss->opdidx);
+ if (ss->opdshdr.sh_type != SHT_PROGBITS)
+ ss->opdsec = NULL;
+
+ if (dso->kernel == DSO_TYPE_USER) {
+ GElf_Shdr shdr;
+ ss->adjust_symbols = (ehdr.e_type == ET_EXEC ||
+ ehdr.e_type == ET_REL ||
+ is_vdso_map(dso->short_name) ||
+ elf_section_by_name(elf, &ehdr, &shdr,
+ ".gnu.prelink_undo",
+ NULL) != NULL);
+ } else {
+ ss->adjust_symbols = ehdr.e_type == ET_EXEC ||
+ ehdr.e_type == ET_REL;
+ }
+
+ ss->name = strdup(name);
+ if (!ss->name)
+ goto out_elf_end;
+
+ ss->elf = elf;
+ ss->fd = fd;
+ ss->ehdr = ehdr;
+ ss->type = type;
+
+ return 0;
+
+out_elf_end:
+ elf_end(elf);
+out_close:
+ close(fd);
+ return err;
+}
+
+/**
+ * ref_reloc_sym_not_found - has kernel relocation symbol been found.
+ * @kmap: kernel maps and relocation reference symbol
+ *
+ * This function returns %true if we are dealing with the kernel maps and the
+ * relocation reference symbol has not yet been found. Otherwise %false is
+ * returned.
+ */
+static bool ref_reloc_sym_not_found(struct kmap *kmap)
+{
+ return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
+ !kmap->ref_reloc_sym->unrelocated_addr;
+}
+
+/**
+ * ref_reloc - kernel relocation offset.
+ * @kmap: kernel maps and relocation reference symbol
+ *
+ * This function returns the offset of kernel addresses as determined by using
+ * the relocation reference symbol i.e. if the kernel has not been relocated
+ * then the return value is zero.
+ */
+static u64 ref_reloc(struct kmap *kmap)
+{
+ if (kmap && kmap->ref_reloc_sym &&
+ kmap->ref_reloc_sym->unrelocated_addr)
+ return kmap->ref_reloc_sym->addr -
+ kmap->ref_reloc_sym->unrelocated_addr;
+ return 0;
+}
+
+int dso__load_sym(struct dso *dso, struct map *map,
+ struct symsrc *syms_ss, struct symsrc *runtime_ss,
+ symbol_filter_t filter, int kmodule)
+{
+ struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
+ struct map *curr_map = map;
+ struct dso *curr_dso = dso;
+ Elf_Data *symstrs, *secstrs;
+ uint32_t nr_syms;
+ int err = -1;
+ uint32_t idx;
+ GElf_Ehdr ehdr;
+ GElf_Shdr shdr;
+ Elf_Data *syms, *opddata = NULL;
+ GElf_Sym sym;
+ Elf_Scn *sec, *sec_strndx;
+ Elf *elf;
+ int nr = 0;
+ bool remap_kernel = false, adjust_kernel_syms = false;
+
+ dso->symtab_type = syms_ss->type;
+ dso->rel = syms_ss->ehdr.e_type == ET_REL;
+
+ /*
+ * Modules may already have symbols from kallsyms, but those symbols
+ * have the wrong values for the dso maps, so remove them.
+ */
+ if (kmodule && syms_ss->symtab)
+ symbols__delete(&dso->symbols[map->type]);
+
+ if (!syms_ss->symtab) {
+ syms_ss->symtab = syms_ss->dynsym;
+ syms_ss->symshdr = syms_ss->dynshdr;
+ }
+
+ elf = syms_ss->elf;
+ ehdr = syms_ss->ehdr;
+ sec = syms_ss->symtab;
+ shdr = syms_ss->symshdr;
+
+ if (runtime_ss->opdsec)
+ opddata = elf_rawdata(runtime_ss->opdsec, NULL);
+
+ syms = elf_getdata(sec, NULL);
+ if (syms == NULL)
+ goto out_elf_end;
+
+ sec = elf_getscn(elf, shdr.sh_link);
+ if (sec == NULL)
+ goto out_elf_end;
+
+ symstrs = elf_getdata(sec, NULL);
+ if (symstrs == NULL)
+ goto out_elf_end;
+
+ sec_strndx = elf_getscn(elf, ehdr.e_shstrndx);
+ if (sec_strndx == NULL)
+ goto out_elf_end;
+
+ secstrs = elf_getdata(sec_strndx, NULL);
+ if (secstrs == NULL)
+ goto out_elf_end;
+
+ nr_syms = shdr.sh_size / shdr.sh_entsize;
+
+ memset(&sym, 0, sizeof(sym));
+
+ /*
+ * The kernel relocation symbol is needed in advance in order to adjust
+ * kernel maps correctly.
+ */
+ if (ref_reloc_sym_not_found(kmap)) {
+ elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
+ const char *elf_name = elf_sym__name(&sym, symstrs);
+
+ if (strcmp(elf_name, kmap->ref_reloc_sym->name))
+ continue;
+ kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
+ map->reloc = kmap->ref_reloc_sym->addr -
+ kmap->ref_reloc_sym->unrelocated_addr;
+ break;
+ }
+ }
+
+ dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
+ /*
+ * Initial kernel and module mappings do not map to the dso. For
+ * function mappings, flag the fixups.
+ */
+ if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) {
+ remap_kernel = true;
+ adjust_kernel_syms = dso->adjust_symbols;
+ }
+ elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
+ struct symbol *f;
+ const char *elf_name = elf_sym__name(&sym, symstrs);
+ char *demangled = NULL;
+ int is_label = elf_sym__is_label(&sym);
+ const char *section_name;
+ bool used_opd = false;
+
+ if (!is_label && !elf_sym__is_a(&sym, map->type))
+ continue;
+
+ /* Reject ARM ELF "mapping symbols": these aren't unique and
+ * don't identify functions, so will confuse the profile
+ * output: */
+ if (ehdr.e_machine == EM_ARM) {
+ if (!strcmp(elf_name, "$a") ||
+ !strcmp(elf_name, "$d") ||
+ !strcmp(elf_name, "$t"))
+ continue;
+ }
+
+ if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
+ u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
+ u64 *opd = opddata->d_buf + offset;
+ sym.st_value = DSO__SWAP(dso, u64, *opd);
+ sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
+ sym.st_value);
+ used_opd = true;
+ }
+ /*
+ * When loading symbols in a data mapping, ABS symbols (which
+ * has a value of SHN_ABS in its st_shndx) failed at
+ * elf_getscn(). And it marks the loading as a failure so
+ * already loaded symbols cannot be fixed up.
+ *
+ * I'm not sure what should be done. Just ignore them for now.
+ * - Namhyung Kim
+ */
+ if (sym.st_shndx == SHN_ABS)
+ continue;
+
+ sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
+ if (!sec)
+ goto out_elf_end;
+
+ gelf_getshdr(sec, &shdr);
+
+ if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
+ continue;
+
+ section_name = elf_sec__name(&shdr, secstrs);
+
+ /* On ARM, symbols for thumb functions have 1 added to
+ * the symbol address as a flag - remove it */
+ if ((ehdr.e_machine == EM_ARM) &&
+ (map->type == MAP__FUNCTION) &&
+ (sym.st_value & 1))
+ --sym.st_value;
+
+ if (dso->kernel || kmodule) {
+ char dso_name[PATH_MAX];
+
+ /* Adjust symbol to map to file offset */
+ if (adjust_kernel_syms)
+ sym.st_value -= shdr.sh_addr - shdr.sh_offset;
+
+ if (strcmp(section_name,
+ (curr_dso->short_name +
+ dso->short_name_len)) == 0)
+ goto new_symbol;
+
+ if (strcmp(section_name, ".text") == 0) {
+ /*
+ * The initial kernel mapping is based on
+ * kallsyms and identity maps. Overwrite it to
+ * map to the kernel dso.
+ */
+ if (remap_kernel && dso->kernel) {
+ remap_kernel = false;
+ map->start = shdr.sh_addr +
+ ref_reloc(kmap);
+ map->end = map->start + shdr.sh_size;
+ map->pgoff = shdr.sh_offset;
+ map->map_ip = map__map_ip;
+ map->unmap_ip = map__unmap_ip;
+ /* Ensure maps are correctly ordered */
+ map_groups__remove(kmap->kmaps, map);
+ map_groups__insert(kmap->kmaps, map);
+ }
+
+ /*
+ * The initial module mapping is based on
+ * /proc/modules mapped to offset zero.
+ * Overwrite it to map to the module dso.
+ */
+ if (remap_kernel && kmodule) {
+ remap_kernel = false;
+ map->pgoff = shdr.sh_offset;
+ }
+
+ curr_map = map;
+ curr_dso = dso;
+ goto new_symbol;
+ }
+
+ if (!kmap)
+ goto new_symbol;
+
+ snprintf(dso_name, sizeof(dso_name),
+ "%s%s", dso->short_name, section_name);
+
+ curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name);
+ if (curr_map == NULL) {
+ u64 start = sym.st_value;
+
+ if (kmodule)
+ start += map->start + shdr.sh_offset;
+
+ curr_dso = dso__new(dso_name);
+ if (curr_dso == NULL)
+ goto out_elf_end;
+ curr_dso->kernel = dso->kernel;
+ curr_dso->long_name = dso->long_name;
+ curr_dso->long_name_len = dso->long_name_len;
+ curr_map = map__new2(start, curr_dso,
+ map->type);
+ if (curr_map == NULL) {
+ dso__delete(curr_dso);
+ goto out_elf_end;
+ }
+ if (adjust_kernel_syms) {
+ curr_map->start = shdr.sh_addr +
+ ref_reloc(kmap);
+ curr_map->end = curr_map->start +
+ shdr.sh_size;
+ curr_map->pgoff = shdr.sh_offset;
+ } else {
+ curr_map->map_ip = identity__map_ip;
+ curr_map->unmap_ip = identity__map_ip;
+ }
+ curr_dso->symtab_type = dso->symtab_type;
+ map_groups__insert(kmap->kmaps, curr_map);
+ dsos__add(&dso->node, curr_dso);
+ dso__set_loaded(curr_dso, map->type);
+ } else
+ curr_dso = curr_map->dso;
+
+ goto new_symbol;
+ }
+
+ if ((used_opd && runtime_ss->adjust_symbols)
+ || (!used_opd && syms_ss->adjust_symbols)) {
+ pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
+ "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
+ (u64)sym.st_value, (u64)shdr.sh_addr,
+ (u64)shdr.sh_offset);
+ sym.st_value -= shdr.sh_addr - shdr.sh_offset;
+ }
+new_symbol:
+ /*
+ * We need to figure out if the object was created from C++ sources
+ * DWARF DW_compile_unit has this, but we don't always have access
+ * to it...
+ */
+ if (symbol_conf.demangle) {
+ demangled = bfd_demangle(NULL, elf_name,
+ DMGL_PARAMS | DMGL_ANSI);
+ if (demangled != NULL)
+ elf_name = demangled;
+ }
+ f = symbol__new(sym.st_value, sym.st_size,
+ GELF_ST_BIND(sym.st_info), elf_name);
+ free(demangled);
+ if (!f)
+ goto out_elf_end;
+
+ if (filter && filter(curr_map, f))
+ symbol__delete(f);
+ else {
+ symbols__insert(&curr_dso->symbols[curr_map->type], f);
+ nr++;
+ }
+ }
+
+ /*
+ * For misannotated, zeroed, ASM function sizes.
+ */
+ if (nr > 0) {
+ symbols__fixup_duplicate(&dso->symbols[map->type]);
+ symbols__fixup_end(&dso->symbols[map->type]);
+ if (kmap) {
+ /*
+ * We need to fixup this here too because we create new
+ * maps here, for things like vsyscall sections.
+ */
+ __map_groups__fixup_end(kmap->kmaps, map->type);
+ }
+ }
+ err = nr;
+out_elf_end:
+ return err;
+}
+
+static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
+{
+ GElf_Phdr phdr;
+ size_t i, phdrnum;
+ int err;
+ u64 sz;
+
+ if (elf_getphdrnum(elf, &phdrnum))
+ return -1;
+
+ for (i = 0; i < phdrnum; i++) {
+ if (gelf_getphdr(elf, i, &phdr) == NULL)
+ return -1;
+ if (phdr.p_type != PT_LOAD)
+ continue;
+ if (exe) {
+ if (!(phdr.p_flags & PF_X))
+ continue;
+ } else {
+ if (!(phdr.p_flags & PF_R))
+ continue;
+ }
+ sz = min(phdr.p_memsz, phdr.p_filesz);
+ if (!sz)
+ continue;
+ err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
+ bool *is_64_bit)
+{
+ int err;
+ Elf *elf;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL)
+ return -1;
+
+ if (is_64_bit)
+ *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
+
+ err = elf_read_maps(elf, exe, mapfn, data);
+
+ elf_end(elf);
+ return err;
+}
+
+static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
+{
+ ssize_t r;
+ size_t n;
+ int err = -1;
+ char *buf = malloc(page_size);
+
+ if (buf == NULL)
+ return -1;
+
+ if (lseek(to, to_offs, SEEK_SET) != to_offs)
+ goto out;
+
+ if (lseek(from, from_offs, SEEK_SET) != from_offs)
+ goto out;
+
+ while (len) {
+ n = page_size;
+ if (len < n)
+ n = len;
+ /* Use read because mmap won't work on proc files */
+ r = read(from, buf, n);
+ if (r < 0)
+ goto out;
+ if (!r)
+ break;
+ n = r;
+ r = write(to, buf, n);
+ if (r < 0)
+ goto out;
+ if ((size_t)r != n)
+ goto out;
+ len -= n;
+ }
+
+ err = 0;
+out:
+ free(buf);
+ return err;
+}
+
+struct kcore {
+ int fd;
+ int elfclass;
+ Elf *elf;
+ GElf_Ehdr ehdr;
+};
+
+static int kcore__open(struct kcore *kcore, const char *filename)
+{
+ GElf_Ehdr *ehdr;
+
+ kcore->fd = open(filename, O_RDONLY);
+ if (kcore->fd == -1)
+ return -1;
+
+ kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
+ if (!kcore->elf)
+ goto out_close;
+
+ kcore->elfclass = gelf_getclass(kcore->elf);
+ if (kcore->elfclass == ELFCLASSNONE)
+ goto out_end;
+
+ ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
+ if (!ehdr)
+ goto out_end;
+
+ return 0;
+
+out_end:
+ elf_end(kcore->elf);
+out_close:
+ close(kcore->fd);
+ return -1;
+}
+
+static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
+ bool temp)
+{
+ GElf_Ehdr *ehdr;
+
+ kcore->elfclass = elfclass;
+
+ if (temp)
+ kcore->fd = mkstemp(filename);
+ else
+ kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
+ if (kcore->fd == -1)
+ return -1;
+
+ kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
+ if (!kcore->elf)
+ goto out_close;
+
+ if (!gelf_newehdr(kcore->elf, elfclass))
+ goto out_end;
+
+ ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
+ if (!ehdr)
+ goto out_end;
+
+ return 0;
+
+out_end:
+ elf_end(kcore->elf);
+out_close:
+ close(kcore->fd);
+ unlink(filename);
+ return -1;
+}
+
+static void kcore__close(struct kcore *kcore)
+{
+ elf_end(kcore->elf);
+ close(kcore->fd);
+}
+
+static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
+{
+ GElf_Ehdr *ehdr = &to->ehdr;
+ GElf_Ehdr *kehdr = &from->ehdr;
+
+ memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
+ ehdr->e_type = kehdr->e_type;
+ ehdr->e_machine = kehdr->e_machine;
+ ehdr->e_version = kehdr->e_version;
+ ehdr->e_entry = 0;
+ ehdr->e_shoff = 0;
+ ehdr->e_flags = kehdr->e_flags;
+ ehdr->e_phnum = count;
+ ehdr->e_shentsize = 0;
+ ehdr->e_shnum = 0;
+ ehdr->e_shstrndx = 0;
+
+ if (from->elfclass == ELFCLASS32) {
+ ehdr->e_phoff = sizeof(Elf32_Ehdr);
+ ehdr->e_ehsize = sizeof(Elf32_Ehdr);
+ ehdr->e_phentsize = sizeof(Elf32_Phdr);
+ } else {
+ ehdr->e_phoff = sizeof(Elf64_Ehdr);
+ ehdr->e_ehsize = sizeof(Elf64_Ehdr);
+ ehdr->e_phentsize = sizeof(Elf64_Phdr);
+ }
+
+ if (!gelf_update_ehdr(to->elf, ehdr))
+ return -1;
+
+ if (!gelf_newphdr(to->elf, count))
+ return -1;
+
+ return 0;
+}
+
+static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
+ u64 addr, u64 len)
+{
+ GElf_Phdr gphdr;
+ GElf_Phdr *phdr;
+
+ phdr = gelf_getphdr(kcore->elf, idx, &gphdr);
+ if (!phdr)
+ return -1;
+
+ phdr->p_type = PT_LOAD;
+ phdr->p_flags = PF_R | PF_W | PF_X;
+ phdr->p_offset = offset;
+ phdr->p_vaddr = addr;
+ phdr->p_paddr = 0;
+ phdr->p_filesz = len;
+ phdr->p_memsz = len;
+ phdr->p_align = page_size;
+
+ if (!gelf_update_phdr(kcore->elf, idx, phdr))
+ return -1;
+
+ return 0;
+}
+
+static off_t kcore__write(struct kcore *kcore)
+{
+ return elf_update(kcore->elf, ELF_C_WRITE);
+}
+
+struct phdr_data {
+ off_t offset;
+ u64 addr;
+ u64 len;
+};
+
+struct kcore_copy_info {
+ u64 stext;
+ u64 etext;
+ u64 first_symbol;
+ u64 last_symbol;
+ u64 first_module;
+ u64 last_module_symbol;
+ struct phdr_data kernel_map;
+ struct phdr_data modules_map;
+};
+
+static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
+ u64 start)
+{
+ struct kcore_copy_info *kci = arg;
+
+ if (!symbol_type__is_a(type, MAP__FUNCTION))
+ return 0;
+
+ if (strchr(name, '[')) {
+ if (start > kci->last_module_symbol)
+ kci->last_module_symbol = start;
+ return 0;
+ }
+
+ if (!kci->first_symbol || start < kci->first_symbol)
+ kci->first_symbol = start;
+
+ if (!kci->last_symbol || start > kci->last_symbol)
+ kci->last_symbol = start;
+
+ if (!strcmp(name, "_stext")) {
+ kci->stext = start;
+ return 0;
+ }
+
+ if (!strcmp(name, "_etext")) {
+ kci->etext = start;
+ return 0;
+ }
+
+ return 0;
+}
+
+static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
+ const char *dir)
+{
+ char kallsyms_filename[PATH_MAX];
+
+ scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
+
+ if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
+ return -1;
+
+ if (kallsyms__parse(kallsyms_filename, kci,
+ kcore_copy__process_kallsyms) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int kcore_copy__process_modules(void *arg,
+ const char *name __maybe_unused,
+ u64 start)
+{
+ struct kcore_copy_info *kci = arg;
+
+ if (!kci->first_module || start < kci->first_module)
+ kci->first_module = start;
+
+ return 0;
+}
+
+static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
+ const char *dir)
+{
+ char modules_filename[PATH_MAX];
+
+ scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
+
+ if (symbol__restricted_filename(modules_filename, "/proc/modules"))
+ return -1;
+
+ if (modules__parse(modules_filename, kci,
+ kcore_copy__process_modules) < 0)
+ return -1;
+
+ return 0;
+}
+
+static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff,
+ u64 s, u64 e)
+{
+ if (p->addr || s < start || s >= end)
+ return;
+
+ p->addr = s;
+ p->offset = (s - start) + pgoff;
+ p->len = e < end ? e - s : end - s;
+}
+
+static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
+{
+ struct kcore_copy_info *kci = data;
+ u64 end = start + len;
+
+ kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext,
+ kci->etext);
+
+ kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module,
+ kci->last_module_symbol);
+
+ return 0;
+}
+
+static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
+{
+ if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
+ Elf *elf)
+{
+ if (kcore_copy__parse_kallsyms(kci, dir))
+ return -1;
+
+ if (kcore_copy__parse_modules(kci, dir))
+ return -1;
+
+ if (kci->stext)
+ kci->stext = round_down(kci->stext, page_size);
+ else
+ kci->stext = round_down(kci->first_symbol, page_size);
+
+ if (kci->etext) {
+ kci->etext = round_up(kci->etext, page_size);
+ } else if (kci->last_symbol) {
+ kci->etext = round_up(kci->last_symbol, page_size);
+ kci->etext += page_size;
+ }
+
+ kci->first_module = round_down(kci->first_module, page_size);
+
+ if (kci->last_module_symbol) {
+ kci->last_module_symbol = round_up(kci->last_module_symbol,
+ page_size);
+ kci->last_module_symbol += page_size;
+ }
+
+ if (!kci->stext || !kci->etext)
+ return -1;
+
+ if (kci->first_module && !kci->last_module_symbol)
+ return -1;
+
+ return kcore_copy__read_maps(kci, elf);
+}
+
+static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
+ const char *name)
+{
+ char from_filename[PATH_MAX];
+ char to_filename[PATH_MAX];
+
+ scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
+ scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
+
+ return copyfile_mode(from_filename, to_filename, 0400);
+}
+
+static int kcore_copy__unlink(const char *dir, const char *name)
+{
+ char filename[PATH_MAX];
+
+ scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
+
+ return unlink(filename);
+}
+
+static int kcore_copy__compare_fds(int from, int to)
+{
+ char *buf_from;
+ char *buf_to;
+ ssize_t ret;
+ size_t len;
+ int err = -1;
+
+ buf_from = malloc(page_size);
+ buf_to = malloc(page_size);
+ if (!buf_from || !buf_to)
+ goto out;
+
+ while (1) {
+ /* Use read because mmap won't work on proc files */
+ ret = read(from, buf_from, page_size);
+ if (ret < 0)
+ goto out;
+
+ if (!ret)
+ break;
+
+ len = ret;
+
+ if (readn(to, buf_to, len) != (int)len)
+ goto out;
+
+ if (memcmp(buf_from, buf_to, len))
+ goto out;
+ }
+
+ err = 0;
+out:
+ free(buf_to);
+ free(buf_from);
+ return err;
+}
+
+static int kcore_copy__compare_files(const char *from_filename,
+ const char *to_filename)
+{
+ int from, to, err = -1;
+
+ from = open(from_filename, O_RDONLY);
+ if (from < 0)
+ return -1;
+
+ to = open(to_filename, O_RDONLY);
+ if (to < 0)
+ goto out_close_from;
+
+ err = kcore_copy__compare_fds(from, to);
+
+ close(to);
+out_close_from:
+ close(from);
+ return err;
+}
+
+static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
+ const char *name)
+{
+ char from_filename[PATH_MAX];
+ char to_filename[PATH_MAX];
+
+ scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
+ scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
+
+ return kcore_copy__compare_files(from_filename, to_filename);
+}
+
+/**
+ * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
+ * @from_dir: from directory
+ * @to_dir: to directory
+ *
+ * This function copies kallsyms, modules and kcore files from one directory to
+ * another. kallsyms and modules are copied entirely. Only code segments are
+ * copied from kcore. It is assumed that two segments suffice: one for the
+ * kernel proper and one for all the modules. The code segments are determined
+ * from kallsyms and modules files. The kernel map starts at _stext or the
+ * lowest function symbol, and ends at _etext or the highest function symbol.
+ * The module map starts at the lowest module address and ends at the highest
+ * module symbol. Start addresses are rounded down to the nearest page. End
+ * addresses are rounded up to the nearest page. An extra page is added to the
+ * highest kernel symbol and highest module symbol to, hopefully, encompass that
+ * symbol too. Because it contains only code sections, the resulting kcore is
+ * unusual. One significant peculiarity is that the mapping (start -> pgoff)
+ * is not the same for the kernel map and the modules map. That happens because
+ * the data is copied adjacently whereas the original kcore has gaps. Finally,
+ * kallsyms and modules files are compared with their copies to check that
+ * modules have not been loaded or unloaded while the copies were taking place.
+ *
+ * Return: %0 on success, %-1 on failure.
+ */
+int kcore_copy(const char *from_dir, const char *to_dir)
+{
+ struct kcore kcore;
+ struct kcore extract;
+ size_t count = 2;
+ int idx = 0, err = -1;
+ off_t offset = page_size, sz, modules_offset = 0;
+ struct kcore_copy_info kci = { .stext = 0, };
+ char kcore_filename[PATH_MAX];
+ char extract_filename[PATH_MAX];
+
+ if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
+ return -1;
+
+ if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
+ goto out_unlink_kallsyms;
+
+ scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
+ scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
+
+ if (kcore__open(&kcore, kcore_filename))
+ goto out_unlink_modules;
+
+ if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
+ goto out_kcore_close;
+
+ if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
+ goto out_kcore_close;
+
+ if (!kci.modules_map.addr)
+ count -= 1;
+
+ if (kcore__copy_hdr(&kcore, &extract, count))
+ goto out_extract_close;
+
+ if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr,
+ kci.kernel_map.len))
+ goto out_extract_close;
+
+ if (kci.modules_map.addr) {
+ modules_offset = offset + kci.kernel_map.len;
+ if (kcore__add_phdr(&extract, idx, modules_offset,
+ kci.modules_map.addr, kci.modules_map.len))
+ goto out_extract_close;
+ }
+
+ sz = kcore__write(&extract);
+ if (sz < 0 || sz > offset)
+ goto out_extract_close;
+
+ if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset,
+ kci.kernel_map.len))
+ goto out_extract_close;
+
+ if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset,
+ extract.fd, modules_offset,
+ kci.modules_map.len))
+ goto out_extract_close;
+
+ if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
+ goto out_extract_close;
+
+ if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
+ goto out_extract_close;
+
+ err = 0;
+
+out_extract_close:
+ kcore__close(&extract);
+ if (err)
+ unlink(extract_filename);
+out_kcore_close:
+ kcore__close(&kcore);
+out_unlink_modules:
+ if (err)
+ kcore_copy__unlink(to_dir, "modules");
+out_unlink_kallsyms:
+ if (err)
+ kcore_copy__unlink(to_dir, "kallsyms");
+
+ return err;
+}
+
+int kcore_extract__create(struct kcore_extract *kce)
+{
+ struct kcore kcore;
+ struct kcore extract;
+ size_t count = 1;
+ int idx = 0, err = -1;
+ off_t offset = page_size, sz;
+
+ if (kcore__open(&kcore, kce->kcore_filename))
+ return -1;
+
+ strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
+ if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
+ goto out_kcore_close;
+
+ if (kcore__copy_hdr(&kcore, &extract, count))
+ goto out_extract_close;
+
+ if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
+ goto out_extract_close;
+
+ sz = kcore__write(&extract);
+ if (sz < 0 || sz > offset)
+ goto out_extract_close;
+
+ if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
+ goto out_extract_close;
+
+ err = 0;
+
+out_extract_close:
+ kcore__close(&extract);
+ if (err)
+ unlink(kce->extract_filename);
+out_kcore_close:
+ kcore__close(&kcore);
+
+ return err;
+}
+
+void kcore_extract__delete(struct kcore_extract *kce)
+{
+ unlink(kce->extract_filename);
+}
+
+void symbol__elf_init(void)
+{
+ elf_version(EV_CURRENT);
+}
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
new file mode 100644
index 00000000000..bd15f490d04
--- /dev/null
+++ b/tools/perf/util/symbol-minimal.c
@@ -0,0 +1,330 @@
+#include "symbol.h"
+#include "util.h"
+
+#include <stdio.h>
+#include <fcntl.h>
+#include <string.h>
+#include <byteswap.h>
+#include <sys/stat.h>
+
+
+static bool check_need_swap(int file_endian)
+{
+ const int data = 1;
+ u8 *check = (u8 *)&data;
+ int host_endian;
+
+ if (check[0] == 1)
+ host_endian = ELFDATA2LSB;
+ else
+ host_endian = ELFDATA2MSB;
+
+ return host_endian != file_endian;
+}
+
+#define NOTE_ALIGN(sz) (((sz) + 3) & ~3)
+
+#define NT_GNU_BUILD_ID 3
+
+static int read_build_id(void *note_data, size_t note_len, void *bf,
+ size_t size, bool need_swap)
+{
+ struct {
+ u32 n_namesz;
+ u32 n_descsz;
+ u32 n_type;
+ } *nhdr;
+ void *ptr;
+
+ ptr = note_data;
+ while (ptr < (note_data + note_len)) {
+ const char *name;
+ size_t namesz, descsz;
+
+ nhdr = ptr;
+ if (need_swap) {
+ nhdr->n_namesz = bswap_32(nhdr->n_namesz);
+ nhdr->n_descsz = bswap_32(nhdr->n_descsz);
+ nhdr->n_type = bswap_32(nhdr->n_type);
+ }
+
+ namesz = NOTE_ALIGN(nhdr->n_namesz);
+ descsz = NOTE_ALIGN(nhdr->n_descsz);
+
+ ptr += sizeof(*nhdr);
+ name = ptr;
+ ptr += namesz;
+ if (nhdr->n_type == NT_GNU_BUILD_ID &&
+ nhdr->n_namesz == sizeof("GNU")) {
+ if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
+ size_t sz = min(size, descsz);
+ memcpy(bf, ptr, sz);
+ memset(bf + sz, 0, size - sz);
+ return 0;
+ }
+ }
+ ptr += descsz;
+ }
+
+ return -1;
+}
+
+int filename__read_debuglink(const char *filename __maybe_unused,
+ char *debuglink __maybe_unused,
+ size_t size __maybe_unused)
+{
+ return -1;
+}
+
+/*
+ * Just try PT_NOTE header otherwise fails
+ */
+int filename__read_build_id(const char *filename, void *bf, size_t size)
+{
+ FILE *fp;
+ int ret = -1;
+ bool need_swap = false;
+ u8 e_ident[EI_NIDENT];
+ size_t buf_size;
+ void *buf;
+ int i;
+
+ fp = fopen(filename, "r");
+ if (fp == NULL)
+ return -1;
+
+ if (fread(e_ident, sizeof(e_ident), 1, fp) != 1)
+ goto out;
+
+ if (memcmp(e_ident, ELFMAG, SELFMAG) ||
+ e_ident[EI_VERSION] != EV_CURRENT)
+ goto out;
+
+ need_swap = check_need_swap(e_ident[EI_DATA]);
+
+ /* for simplicity */
+ fseek(fp, 0, SEEK_SET);
+
+ if (e_ident[EI_CLASS] == ELFCLASS32) {
+ Elf32_Ehdr ehdr;
+ Elf32_Phdr *phdr;
+
+ if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1)
+ goto out;
+
+ if (need_swap) {
+ ehdr.e_phoff = bswap_32(ehdr.e_phoff);
+ ehdr.e_phentsize = bswap_16(ehdr.e_phentsize);
+ ehdr.e_phnum = bswap_16(ehdr.e_phnum);
+ }
+
+ buf_size = ehdr.e_phentsize * ehdr.e_phnum;
+ buf = malloc(buf_size);
+ if (buf == NULL)
+ goto out;
+
+ fseek(fp, ehdr.e_phoff, SEEK_SET);
+ if (fread(buf, buf_size, 1, fp) != 1)
+ goto out_free;
+
+ for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) {
+ void *tmp;
+
+ if (need_swap) {
+ phdr->p_type = bswap_32(phdr->p_type);
+ phdr->p_offset = bswap_32(phdr->p_offset);
+ phdr->p_filesz = bswap_32(phdr->p_filesz);
+ }
+
+ if (phdr->p_type != PT_NOTE)
+ continue;
+
+ buf_size = phdr->p_filesz;
+ tmp = realloc(buf, buf_size);
+ if (tmp == NULL)
+ goto out_free;
+
+ buf = tmp;
+ fseek(fp, phdr->p_offset, SEEK_SET);
+ if (fread(buf, buf_size, 1, fp) != 1)
+ goto out_free;
+
+ ret = read_build_id(buf, buf_size, bf, size, need_swap);
+ if (ret == 0)
+ ret = size;
+ break;
+ }
+ } else {
+ Elf64_Ehdr ehdr;
+ Elf64_Phdr *phdr;
+
+ if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1)
+ goto out;
+
+ if (need_swap) {
+ ehdr.e_phoff = bswap_64(ehdr.e_phoff);
+ ehdr.e_phentsize = bswap_16(ehdr.e_phentsize);
+ ehdr.e_phnum = bswap_16(ehdr.e_phnum);
+ }
+
+ buf_size = ehdr.e_phentsize * ehdr.e_phnum;
+ buf = malloc(buf_size);
+ if (buf == NULL)
+ goto out;
+
+ fseek(fp, ehdr.e_phoff, SEEK_SET);
+ if (fread(buf, buf_size, 1, fp) != 1)
+ goto out_free;
+
+ for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) {
+ void *tmp;
+
+ if (need_swap) {
+ phdr->p_type = bswap_32(phdr->p_type);
+ phdr->p_offset = bswap_64(phdr->p_offset);
+ phdr->p_filesz = bswap_64(phdr->p_filesz);
+ }
+
+ if (phdr->p_type != PT_NOTE)
+ continue;
+
+ buf_size = phdr->p_filesz;
+ tmp = realloc(buf, buf_size);
+ if (tmp == NULL)
+ goto out_free;
+
+ buf = tmp;
+ fseek(fp, phdr->p_offset, SEEK_SET);
+ if (fread(buf, buf_size, 1, fp) != 1)
+ goto out_free;
+
+ ret = read_build_id(buf, buf_size, bf, size, need_swap);
+ if (ret == 0)
+ ret = size;
+ break;
+ }
+ }
+out_free:
+ free(buf);
+out:
+ fclose(fp);
+ return ret;
+}
+
+int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
+{
+ int fd;
+ int ret = -1;
+ struct stat stbuf;
+ size_t buf_size;
+ void *buf;
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ if (fstat(fd, &stbuf) < 0)
+ goto out;
+
+ buf_size = stbuf.st_size;
+ buf = malloc(buf_size);
+ if (buf == NULL)
+ goto out;
+
+ if (read(fd, buf, buf_size) != (ssize_t) buf_size)
+ goto out_free;
+
+ ret = read_build_id(buf, buf_size, build_id, size, false);
+out_free:
+ free(buf);
+out:
+ close(fd);
+ return ret;
+}
+
+int symsrc__init(struct symsrc *ss, struct dso *dso __maybe_unused,
+ const char *name,
+ enum dso_binary_type type)
+{
+ int fd = open(name, O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ ss->name = strdup(name);
+ if (!ss->name)
+ goto out_close;
+
+ ss->fd = fd;
+ ss->type = type;
+
+ return 0;
+out_close:
+ close(fd);
+ return -1;
+}
+
+bool symsrc__possibly_runtime(struct symsrc *ss __maybe_unused)
+{
+ /* Assume all sym sources could be a runtime image. */
+ return true;
+}
+
+bool symsrc__has_symtab(struct symsrc *ss __maybe_unused)
+{
+ return false;
+}
+
+void symsrc__destroy(struct symsrc *ss)
+{
+ zfree(&ss->name);
+ close(ss->fd);
+}
+
+int dso__synthesize_plt_symbols(struct dso *dso __maybe_unused,
+ struct symsrc *ss __maybe_unused,
+ struct map *map __maybe_unused,
+ symbol_filter_t filter __maybe_unused)
+{
+ return 0;
+}
+
+int dso__load_sym(struct dso *dso, struct map *map __maybe_unused,
+ struct symsrc *ss,
+ struct symsrc *runtime_ss __maybe_unused,
+ symbol_filter_t filter __maybe_unused,
+ int kmodule __maybe_unused)
+{
+ unsigned char *build_id[BUILD_ID_SIZE];
+
+ if (filename__read_build_id(ss->name, build_id, BUILD_ID_SIZE) > 0) {
+ dso__set_build_id(dso, build_id);
+ return 1;
+ }
+ return 0;
+}
+
+int file__read_maps(int fd __maybe_unused, bool exe __maybe_unused,
+ mapfn_t mapfn __maybe_unused, void *data __maybe_unused,
+ bool *is_64_bit __maybe_unused)
+{
+ return -1;
+}
+
+int kcore_extract__create(struct kcore_extract *kce __maybe_unused)
+{
+ return -1;
+}
+
+void kcore_extract__delete(struct kcore_extract *kce __maybe_unused)
+{
+}
+
+int kcore_copy(const char *from_dir __maybe_unused,
+ const char *to_dir __maybe_unused)
+{
+ return -1;
+}
+
+void symbol__elf_init(void)
+{
+}
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index b39f499e575..7b9096f29cd 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1,8 +1,5 @@
-#define _GNU_SOURCE
-#include <ctype.h>
#include <dirent.h>
#include <errno.h>
-#include <libgen.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
@@ -11,76 +8,169 @@
#include <sys/param.h>
#include <fcntl.h>
#include <unistd.h>
+#include <inttypes.h>
#include "build-id.h"
+#include "util.h"
#include "debug.h"
+#include "machine.h"
#include "symbol.h"
#include "strlist.h"
-#include <libelf.h>
-#include <gelf.h>
#include <elf.h>
#include <limits.h>
+#include <symbol/kallsyms.h>
#include <sys/utsname.h>
-#ifndef NT_GNU_BUILD_ID
-#define NT_GNU_BUILD_ID 3
-#endif
-
-static bool dso__build_id_equal(const struct dso *self, u8 *build_id);
-static int elf_read_build_id(Elf *elf, void *bf, size_t size);
-static void dsos__add(struct list_head *head, struct dso *dso);
-static struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
-static int dso__load_kernel_sym(struct dso *self, struct map *map,
+static int dso__load_kernel_sym(struct dso *dso, struct map *map,
symbol_filter_t filter);
-static int dso__load_guest_kernel_sym(struct dso *self, struct map *map,
+static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
symbol_filter_t filter);
-static int vmlinux_path__nr_entries;
-static char **vmlinux_path;
+int vmlinux_path__nr_entries;
+char **vmlinux_path;
struct symbol_conf symbol_conf = {
- .exclude_other = true,
- .use_modules = true,
- .try_vmlinux_path = true,
+ .use_modules = true,
+ .try_vmlinux_path = true,
+ .annotate_src = true,
+ .demangle = true,
+ .cumulate_callchain = true,
+ .symfs = "",
};
-int dso__name_len(const struct dso *self)
-{
- if (verbose)
- return self->long_name_len;
+static enum dso_binary_type binary_type_symtab[] = {
+ DSO_BINARY_TYPE__KALLSYMS,
+ DSO_BINARY_TYPE__GUEST_KALLSYMS,
+ DSO_BINARY_TYPE__JAVA_JIT,
+ DSO_BINARY_TYPE__DEBUGLINK,
+ DSO_BINARY_TYPE__BUILD_ID_CACHE,
+ DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
+ DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
+ DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
+ DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
+ DSO_BINARY_TYPE__GUEST_KMODULE,
+ DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
+ DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
+ DSO_BINARY_TYPE__NOT_FOUND,
+};
- return self->short_name_len;
-}
+#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
-bool dso__loaded(const struct dso *self, enum map_type type)
+bool symbol_type__is_a(char symbol_type, enum map_type map_type)
{
- return self->loaded & (1 << type);
+ symbol_type = toupper(symbol_type);
+
+ switch (map_type) {
+ case MAP__FUNCTION:
+ return symbol_type == 'T' || symbol_type == 'W';
+ case MAP__VARIABLE:
+ return symbol_type == 'D';
+ default:
+ return false;
+ }
}
-bool dso__sorted_by_name(const struct dso *self, enum map_type type)
+static int prefix_underscores_count(const char *str)
{
- return self->sorted_by_name & (1 << type);
+ const char *tail = str;
+
+ while (*tail == '_')
+ tail++;
+
+ return tail - str;
}
-static void dso__set_sorted_by_name(struct dso *self, enum map_type type)
+#define SYMBOL_A 0
+#define SYMBOL_B 1
+
+static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
{
- self->sorted_by_name |= (1 << type);
+ s64 a;
+ s64 b;
+ size_t na, nb;
+
+ /* Prefer a symbol with non zero length */
+ a = syma->end - syma->start;
+ b = symb->end - symb->start;
+ if ((b == 0) && (a > 0))
+ return SYMBOL_A;
+ else if ((a == 0) && (b > 0))
+ return SYMBOL_B;
+
+ /* Prefer a non weak symbol over a weak one */
+ a = syma->binding == STB_WEAK;
+ b = symb->binding == STB_WEAK;
+ if (b && !a)
+ return SYMBOL_A;
+ if (a && !b)
+ return SYMBOL_B;
+
+ /* Prefer a global symbol over a non global one */
+ a = syma->binding == STB_GLOBAL;
+ b = symb->binding == STB_GLOBAL;
+ if (a && !b)
+ return SYMBOL_A;
+ if (b && !a)
+ return SYMBOL_B;
+
+ /* Prefer a symbol with less underscores */
+ a = prefix_underscores_count(syma->name);
+ b = prefix_underscores_count(symb->name);
+ if (b > a)
+ return SYMBOL_A;
+ else if (a > b)
+ return SYMBOL_B;
+
+ /* Choose the symbol with the longest name */
+ na = strlen(syma->name);
+ nb = strlen(symb->name);
+ if (na > nb)
+ return SYMBOL_A;
+ else if (na < nb)
+ return SYMBOL_B;
+
+ /* Avoid "SyS" kernel syscall aliases */
+ if (na >= 3 && !strncmp(syma->name, "SyS", 3))
+ return SYMBOL_B;
+ if (na >= 10 && !strncmp(syma->name, "compat_SyS", 10))
+ return SYMBOL_B;
+
+ return SYMBOL_A;
}
-bool symbol_type__is_a(char symbol_type, enum map_type map_type)
+void symbols__fixup_duplicate(struct rb_root *symbols)
{
- switch (map_type) {
- case MAP__FUNCTION:
- return symbol_type == 'T' || symbol_type == 'W';
- case MAP__VARIABLE:
- return symbol_type == 'D' || symbol_type == 'd';
- default:
- return false;
+ struct rb_node *nd;
+ struct symbol *curr, *next;
+
+ nd = rb_first(symbols);
+
+ while (nd) {
+ curr = rb_entry(nd, struct symbol, rb_node);
+again:
+ nd = rb_next(&curr->rb_node);
+ next = rb_entry(nd, struct symbol, rb_node);
+
+ if (!nd)
+ break;
+
+ if (curr->start != next->start)
+ continue;
+
+ if (choose_best_symbol(curr, next) == SYMBOL_A) {
+ rb_erase(&next->rb_node, symbols);
+ symbol__delete(next);
+ goto again;
+ } else {
+ nd = rb_next(&curr->rb_node);
+ rb_erase(&curr->rb_node, symbols);
+ symbol__delete(curr);
+ }
}
}
-static void symbols__fixup_end(struct rb_root *self)
+void symbols__fixup_end(struct rb_root *symbols)
{
- struct rb_node *nd, *prevnd = rb_first(self);
+ struct rb_node *nd, *prevnd = rb_first(symbols);
struct symbol *curr, *prev;
if (prevnd == NULL)
@@ -92,7 +182,7 @@ static void symbols__fixup_end(struct rb_root *self)
prev = curr;
curr = rb_entry(nd, struct symbol, rb_node);
- if (prev->end == prev->start)
+ if (prev->end == prev->start && prev->end != curr->start)
prev->end = curr->start - 1;
}
@@ -101,10 +191,10 @@ static void symbols__fixup_end(struct rb_root *self)
curr->end = roundup(curr->start, 4096);
}
-static void __map_groups__fixup_end(struct map_groups *self, enum map_type type)
+void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
{
struct map *prev, *curr;
- struct rb_node *nd, *prevnd = rb_first(&self->maps[type]);
+ struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]);
if (prevnd == NULL)
return;
@@ -121,132 +211,87 @@ static void __map_groups__fixup_end(struct map_groups *self, enum map_type type)
* We still haven't the actual symbols, so guess the
* last map final address.
*/
- curr->end = ~0UL;
+ curr->end = ~0ULL;
}
-static void map_groups__fixup_end(struct map_groups *self)
-{
- int i;
- for (i = 0; i < MAP__NR_TYPES; ++i)
- __map_groups__fixup_end(self, i);
-}
-
-static struct symbol *symbol__new(u64 start, u64 len, u8 binding,
- const char *name)
+struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
{
size_t namelen = strlen(name) + 1;
- struct symbol *self = calloc(1, (symbol_conf.priv_size +
- sizeof(*self) + namelen));
- if (self == NULL)
+ struct symbol *sym = calloc(1, (symbol_conf.priv_size +
+ sizeof(*sym) + namelen));
+ if (sym == NULL)
return NULL;
if (symbol_conf.priv_size)
- self = ((void *)self) + symbol_conf.priv_size;
+ sym = ((void *)sym) + symbol_conf.priv_size;
- self->start = start;
- self->end = len ? start + len - 1 : start;
- self->binding = binding;
- self->namelen = namelen - 1;
+ sym->start = start;
+ sym->end = len ? start + len - 1 : start;
+ sym->binding = binding;
+ sym->namelen = namelen - 1;
- pr_debug4("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end);
+ pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
+ __func__, name, start, sym->end);
+ memcpy(sym->name, name, namelen);
- memcpy(self->name, name, namelen);
-
- return self;
+ return sym;
}
-void symbol__delete(struct symbol *self)
+void symbol__delete(struct symbol *sym)
{
- free(((void *)self) - symbol_conf.priv_size);
+ free(((void *)sym) - symbol_conf.priv_size);
}
-static size_t symbol__fprintf(struct symbol *self, FILE *fp)
+size_t symbol__fprintf(struct symbol *sym, FILE *fp)
{
- return fprintf(fp, " %llx-%llx %c %s\n",
- self->start, self->end,
- self->binding == STB_GLOBAL ? 'g' :
- self->binding == STB_LOCAL ? 'l' : 'w',
- self->name);
-}
-
-void dso__set_long_name(struct dso *self, char *name)
-{
- if (name == NULL)
- return;
- self->long_name = name;
- self->long_name_len = strlen(name);
+ return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n",
+ sym->start, sym->end,
+ sym->binding == STB_GLOBAL ? 'g' :
+ sym->binding == STB_LOCAL ? 'l' : 'w',
+ sym->name);
}
-static void dso__set_short_name(struct dso *self, const char *name)
+size_t symbol__fprintf_symname_offs(const struct symbol *sym,
+ const struct addr_location *al, FILE *fp)
{
- if (name == NULL)
- return;
- self->short_name = name;
- self->short_name_len = strlen(name);
-}
+ unsigned long offset;
+ size_t length;
-static void dso__set_basename(struct dso *self)
-{
- dso__set_short_name(self, basename(self->long_name));
+ if (sym && sym->name) {
+ length = fprintf(fp, "%s", sym->name);
+ if (al) {
+ if (al->addr < sym->end)
+ offset = al->addr - sym->start;
+ else
+ offset = al->addr - al->map->start - sym->start;
+ length += fprintf(fp, "+0x%lx", offset);
+ }
+ return length;
+ } else
+ return fprintf(fp, "[unknown]");
}
-struct dso *dso__new(const char *name)
+size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp)
{
- struct dso *self = calloc(1, sizeof(*self) + strlen(name) + 1);
-
- if (self != NULL) {
- int i;
- strcpy(self->name, name);
- dso__set_long_name(self, self->name);
- dso__set_short_name(self, self->name);
- for (i = 0; i < MAP__NR_TYPES; ++i)
- self->symbols[i] = self->symbol_names[i] = RB_ROOT;
- self->slen_calculated = 0;
- self->origin = DSO__ORIG_NOT_FOUND;
- self->loaded = 0;
- self->sorted_by_name = 0;
- self->has_build_id = 0;
- self->kernel = DSO_TYPE_USER;
- INIT_LIST_HEAD(&self->node);
- }
-
- return self;
+ return symbol__fprintf_symname_offs(sym, NULL, fp);
}
-static void symbols__delete(struct rb_root *self)
+void symbols__delete(struct rb_root *symbols)
{
struct symbol *pos;
- struct rb_node *next = rb_first(self);
+ struct rb_node *next = rb_first(symbols);
while (next) {
pos = rb_entry(next, struct symbol, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, self);
+ rb_erase(&pos->rb_node, symbols);
symbol__delete(pos);
}
}
-void dso__delete(struct dso *self)
+void symbols__insert(struct rb_root *symbols, struct symbol *sym)
{
- int i;
- for (i = 0; i < MAP__NR_TYPES; ++i)
- symbols__delete(&self->symbols[i]);
- if (self->sname_alloc)
- free((char *)self->short_name);
- if (self->lname_alloc)
- free(self->long_name);
- free(self);
-}
-
-void dso__set_build_id(struct dso *self, void *build_id)
-{
- memcpy(self->build_id, build_id, sizeof(self->build_id));
- self->has_build_id = 1;
-}
-
-static void symbols__insert(struct rb_root *self, struct symbol *sym)
-{
- struct rb_node **p = &self->rb_node;
+ struct rb_node **p = &symbols->rb_node;
struct rb_node *parent = NULL;
const u64 ip = sym->start;
struct symbol *s;
@@ -260,17 +305,17 @@ static void symbols__insert(struct rb_root *self, struct symbol *sym)
p = &(*p)->rb_right;
}
rb_link_node(&sym->rb_node, parent, p);
- rb_insert_color(&sym->rb_node, self);
+ rb_insert_color(&sym->rb_node, symbols);
}
-static struct symbol *symbols__find(struct rb_root *self, u64 ip)
+static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
{
struct rb_node *n;
- if (self == NULL)
+ if (symbols == NULL)
return NULL;
- n = self->rb_node;
+ n = symbols->rb_node;
while (n) {
struct symbol *s = rb_entry(n, struct symbol, rb_node);
@@ -286,16 +331,28 @@ static struct symbol *symbols__find(struct rb_root *self, u64 ip)
return NULL;
}
+static struct symbol *symbols__first(struct rb_root *symbols)
+{
+ struct rb_node *n = rb_first(symbols);
+
+ if (n)
+ return rb_entry(n, struct symbol, rb_node);
+
+ return NULL;
+}
+
struct symbol_name_rb_node {
struct rb_node rb_node;
struct symbol sym;
};
-static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym)
+static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
{
- struct rb_node **p = &self->rb_node;
+ struct rb_node **p = &symbols->rb_node;
struct rb_node *parent = NULL;
- struct symbol_name_rb_node *symn = ((void *)sym) - sizeof(*parent), *s;
+ struct symbol_name_rb_node *symn, *s;
+
+ symn = container_of(sym, struct symbol_name_rb_node, sym);
while (*p != NULL) {
parent = *p;
@@ -306,27 +363,29 @@ static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym)
p = &(*p)->rb_right;
}
rb_link_node(&symn->rb_node, parent, p);
- rb_insert_color(&symn->rb_node, self);
+ rb_insert_color(&symn->rb_node, symbols);
}
-static void symbols__sort_by_name(struct rb_root *self, struct rb_root *source)
+static void symbols__sort_by_name(struct rb_root *symbols,
+ struct rb_root *source)
{
struct rb_node *nd;
for (nd = rb_first(source); nd; nd = rb_next(nd)) {
struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
- symbols__insert_by_name(self, pos);
+ symbols__insert_by_name(symbols, pos);
}
}
-static struct symbol *symbols__find_by_name(struct rb_root *self, const char *name)
+static struct symbol *symbols__find_by_name(struct rb_root *symbols,
+ const char *name)
{
struct rb_node *n;
- if (self == NULL)
+ if (symbols == NULL)
return NULL;
- n = self->rb_node;
+ n = symbols->rb_node;
while (n) {
struct symbol_name_rb_node *s;
@@ -346,55 +405,38 @@ static struct symbol *symbols__find_by_name(struct rb_root *self, const char *na
return NULL;
}
-struct symbol *dso__find_symbol(struct dso *self,
+struct symbol *dso__find_symbol(struct dso *dso,
enum map_type type, u64 addr)
{
- return symbols__find(&self->symbols[type], addr);
+ return symbols__find(&dso->symbols[type], addr);
}
-struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type,
- const char *name)
+static struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
{
- return symbols__find_by_name(&self->symbol_names[type], name);
+ return symbols__first(&dso->symbols[type]);
}
-void dso__sort_by_name(struct dso *self, enum map_type type)
-{
- dso__set_sorted_by_name(self, type);
- return symbols__sort_by_name(&self->symbol_names[type],
- &self->symbols[type]);
-}
-
-int build_id__sprintf(const u8 *self, int len, char *bf)
+struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
+ const char *name)
{
- char *bid = bf;
- const u8 *raw = self;
- int i;
-
- for (i = 0; i < len; ++i) {
- sprintf(bid, "%02x", *raw);
- ++raw;
- bid += 2;
- }
-
- return raw - self;
+ return symbols__find_by_name(&dso->symbol_names[type], name);
}
-size_t dso__fprintf_buildid(struct dso *self, FILE *fp)
+void dso__sort_by_name(struct dso *dso, enum map_type type)
{
- char sbuild_id[BUILD_ID_SIZE * 2 + 1];
-
- build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id);
- return fprintf(fp, "%s", sbuild_id);
+ dso__set_sorted_by_name(dso, type);
+ return symbols__sort_by_name(&dso->symbol_names[type],
+ &dso->symbols[type]);
}
-size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE *fp)
+size_t dso__fprintf_symbols_by_name(struct dso *dso,
+ enum map_type type, FILE *fp)
{
size_t ret = 0;
struct rb_node *nd;
struct symbol_name_rb_node *pos;
- for (nd = rb_first(&self->symbol_names[type]); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
fprintf(fp, "%s\n", pos->sym.name);
}
@@ -402,69 +444,62 @@ size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE *
return ret;
}
-size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp)
-{
- struct rb_node *nd;
- size_t ret = fprintf(fp, "dso: %s (", self->short_name);
-
- if (self->short_name != self->long_name)
- ret += fprintf(fp, "%s, ", self->long_name);
- ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
- self->loaded ? "" : "NOT ");
- ret += dso__fprintf_buildid(self, fp);
- ret += fprintf(fp, ")\n");
- for (nd = rb_first(&self->symbols[type]); nd; nd = rb_next(nd)) {
- struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
- ret += symbol__fprintf(pos, fp);
- }
-
- return ret;
-}
-
-int kallsyms__parse(const char *filename, void *arg,
- int (*process_symbol)(void *arg, const char *name,
- char type, u64 start))
+int modules__parse(const char *filename, void *arg,
+ int (*process_module)(void *arg, const char *name,
+ u64 start))
{
char *line = NULL;
size_t n;
+ FILE *file;
int err = 0;
- FILE *file = fopen(filename, "r");
+ file = fopen(filename, "r");
if (file == NULL)
- goto out_failure;
+ return -1;
- while (!feof(file)) {
+ while (1) {
+ char name[PATH_MAX];
u64 start;
- int line_len, len;
- char symbol_type;
- char *symbol_name;
+ char *sep;
+ ssize_t line_len;
line_len = getline(&line, &n, file);
- if (line_len < 0 || !line)
- break;
+ if (line_len < 0) {
+ if (feof(file))
+ break;
+ err = -1;
+ goto out;
+ }
+
+ if (!line) {
+ err = -1;
+ goto out;
+ }
line[--line_len] = '\0'; /* \n */
- len = hex2u64(line, &start);
+ sep = strrchr(line, 'x');
+ if (sep == NULL)
+ continue;
- len++;
- if (len + 2 >= line_len)
+ hex2u64(sep + 1, &start);
+
+ sep = strchr(line, ' ');
+ if (sep == NULL)
continue;
- symbol_type = toupper(line[len]);
- symbol_name = line + len + 2;
+ *sep = '\0';
+
+ scnprintf(name, sizeof(name), "[%s]", line);
- err = process_symbol(arg, symbol_name, symbol_type, start);
+ err = process_module(arg, name, start);
if (err)
break;
}
-
+out:
free(line);
fclose(file);
return err;
-
-out_failure:
- return -1;
}
struct process_kallsyms_args {
@@ -472,12 +507,34 @@ struct process_kallsyms_args {
struct dso *dso;
};
-static u8 kallsyms2elf_type(char type)
-{
- if (type == 'W')
- return STB_WEAK;
+bool symbol__is_idle(struct symbol *sym)
+{
+ const char * const idle_symbols[] = {
+ "cpu_idle",
+ "intel_idle",
+ "default_idle",
+ "native_safe_halt",
+ "enter_idle",
+ "exit_idle",
+ "mwait_idle",
+ "mwait_idle_with_hints",
+ "poll_idle",
+ "ppc64_runlatch_off",
+ "pseries_dedicated_idle_sleep",
+ NULL
+ };
+
+ int i;
+
+ if (!sym)
+ return false;
+
+ for (i = 0; idle_symbols[i]; i++) {
+ if (!strcmp(idle_symbols[i], sym->name))
+ return true;
+ }
- return isupper(type) ? STB_GLOBAL : STB_LOCAL;
+ return false;
}
static int map__process_kallsym_symbol(void *arg, const char *name,
@@ -491,10 +548,11 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
return 0;
/*
- * Will fix up the end later, when we have all symbols sorted.
+ * module symbols are not sorted so we add all
+ * symbols, setting length to 0, and rely on
+ * symbols__fixup_end() to fix it up.
*/
sym = symbol__new(start, 0, kallsyms2elf_type(type), name);
-
if (sym == NULL)
return -ENOMEM;
/*
@@ -511,27 +569,74 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
* so that we can in the next step set the symbol ->end address and then
* call kernel_maps__split_kallsyms.
*/
-static int dso__load_all_kallsyms(struct dso *self, const char *filename,
+static int dso__load_all_kallsyms(struct dso *dso, const char *filename,
struct map *map)
{
- struct process_kallsyms_args args = { .map = map, .dso = self, };
+ struct process_kallsyms_args args = { .map = map, .dso = dso, };
return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
}
+static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
+ symbol_filter_t filter)
+{
+ struct map_groups *kmaps = map__kmap(map)->kmaps;
+ struct map *curr_map;
+ struct symbol *pos;
+ int count = 0, moved = 0;
+ struct rb_root *root = &dso->symbols[map->type];
+ struct rb_node *next = rb_first(root);
+
+ while (next) {
+ char *module;
+
+ pos = rb_entry(next, struct symbol, rb_node);
+ next = rb_next(&pos->rb_node);
+
+ module = strchr(pos->name, '\t');
+ if (module)
+ *module = '\0';
+
+ curr_map = map_groups__find(kmaps, map->type, pos->start);
+
+ if (!curr_map || (filter && filter(curr_map, pos))) {
+ rb_erase(&pos->rb_node, root);
+ symbol__delete(pos);
+ } else {
+ pos->start -= curr_map->start - curr_map->pgoff;
+ if (pos->end)
+ pos->end -= curr_map->start - curr_map->pgoff;
+ if (curr_map != map) {
+ rb_erase(&pos->rb_node, root);
+ symbols__insert(
+ &curr_map->dso->symbols[curr_map->type],
+ pos);
+ ++moved;
+ } else {
+ ++count;
+ }
+ }
+ }
+
+ /* Symbols have been adjusted */
+ dso->adjust_symbols = 1;
+
+ return count + moved;
+}
+
/*
* Split the symbols into maps, making sure there are no overlaps, i.e. the
* kernel range is broken in several maps, named [kernel].N, as we don't have
* the original ELF section names vmlinux have.
*/
-static int dso__split_kallsyms(struct dso *self, struct map *map,
+static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta,
symbol_filter_t filter)
{
struct map_groups *kmaps = map__kmap(map)->kmaps;
struct machine *machine = kmaps->machine;
struct map *curr_map = map;
struct symbol *pos;
- int count = 0;
- struct rb_root *root = &self->symbols[map->type];
+ int count = 0, moved = 0;
+ struct rb_root *root = &dso->symbols[map->type];
struct rb_node *next = rb_first(root);
int kernel_range = 0;
@@ -550,7 +655,7 @@ static int dso__split_kallsyms(struct dso *self, struct map *map,
if (strcmp(curr_map->dso->short_name, module)) {
if (curr_map != map &&
- self->kernel == DSO_TYPE_GUEST_KERNEL &&
+ dso->kernel == DSO_TYPE_GUEST_KERNEL &&
machine__is_default_guest(machine)) {
/*
* We assume all symbols of a module are
@@ -586,9 +691,20 @@ static int dso__split_kallsyms(struct dso *self, struct map *map,
pos->end = curr_map->map_ip(curr_map, pos->end);
} else if (curr_map != map) {
char dso_name[PATH_MAX];
- struct dso *dso;
+ struct dso *ndso;
- if (self->kernel == DSO_TYPE_GUEST_KERNEL)
+ if (delta) {
+ /* Kernel was relocated at boot time */
+ pos->start -= delta;
+ pos->end -= delta;
+ }
+
+ if (count == 0) {
+ curr_map = map;
+ goto filter_symbol;
+ }
+
+ if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
snprintf(dso_name, sizeof(dso_name),
"[guest.kernel].%d",
kernel_range++);
@@ -597,23 +713,27 @@ static int dso__split_kallsyms(struct dso *self, struct map *map,
"[kernel].%d",
kernel_range++);
- dso = dso__new(dso_name);
- if (dso == NULL)
+ ndso = dso__new(dso_name);
+ if (ndso == NULL)
return -1;
- dso->kernel = self->kernel;
+ ndso->kernel = dso->kernel;
- curr_map = map__new2(pos->start, dso, map->type);
+ curr_map = map__new2(pos->start, ndso, map->type);
if (curr_map == NULL) {
- dso__delete(dso);
+ dso__delete(ndso);
return -1;
}
curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
map_groups__insert(kmaps, curr_map);
++kernel_range;
+ } else if (delta) {
+ /* Kernel was relocated at boot time */
+ pos->start -= delta;
+ pos->end -= delta;
}
-
+filter_symbol:
if (filter && filter(curr_map, pos)) {
discard_symbol: rb_erase(&pos->rb_node, root);
symbol__delete(pos);
@@ -621,913 +741,695 @@ discard_symbol: rb_erase(&pos->rb_node, root);
if (curr_map != map) {
rb_erase(&pos->rb_node, root);
symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
- }
- count++;
+ ++moved;
+ } else
+ ++count;
}
}
if (curr_map != map &&
- self->kernel == DSO_TYPE_GUEST_KERNEL &&
+ dso->kernel == DSO_TYPE_GUEST_KERNEL &&
machine__is_default_guest(kmaps->machine)) {
dso__set_loaded(curr_map->dso, curr_map->type);
}
- return count;
+ return count + moved;
}
-int dso__load_kallsyms(struct dso *self, const char *filename,
- struct map *map, symbol_filter_t filter)
+bool symbol__restricted_filename(const char *filename,
+ const char *restricted_filename)
{
- if (dso__load_all_kallsyms(self, filename, map) < 0)
- return -1;
+ bool restricted = false;
- symbols__fixup_end(&self->symbols[map->type]);
- if (self->kernel == DSO_TYPE_GUEST_KERNEL)
- self->origin = DSO__ORIG_GUEST_KERNEL;
- else
- self->origin = DSO__ORIG_KERNEL;
+ if (symbol_conf.kptr_restrict) {
+ char *r = realpath(filename, NULL);
+
+ if (r != NULL) {
+ restricted = strcmp(r, restricted_filename) == 0;
+ free(r);
+ return restricted;
+ }
+ }
- return dso__split_kallsyms(self, map, filter);
+ return restricted;
}
-static int dso__load_perf_map(struct dso *self, struct map *map,
- symbol_filter_t filter)
-{
- char *line = NULL;
- size_t n;
- FILE *file;
- int nr_syms = 0;
+struct module_info {
+ struct rb_node rb_node;
+ char *name;
+ u64 start;
+};
- file = fopen(self->long_name, "r");
- if (file == NULL)
- goto out_failure;
+static void add_module(struct module_info *mi, struct rb_root *modules)
+{
+ struct rb_node **p = &modules->rb_node;
+ struct rb_node *parent = NULL;
+ struct module_info *m;
- while (!feof(file)) {
- u64 start, size;
- struct symbol *sym;
- int line_len, len;
+ while (*p != NULL) {
+ parent = *p;
+ m = rb_entry(parent, struct module_info, rb_node);
+ if (strcmp(mi->name, m->name) < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&mi->rb_node, parent, p);
+ rb_insert_color(&mi->rb_node, modules);
+}
- line_len = getline(&line, &n, file);
- if (line_len < 0)
- break;
+static void delete_modules(struct rb_root *modules)
+{
+ struct module_info *mi;
+ struct rb_node *next = rb_first(modules);
- if (!line)
- goto out_failure;
+ while (next) {
+ mi = rb_entry(next, struct module_info, rb_node);
+ next = rb_next(&mi->rb_node);
+ rb_erase(&mi->rb_node, modules);
+ zfree(&mi->name);
+ free(mi);
+ }
+}
- line[--line_len] = '\0'; /* \n */
+static struct module_info *find_module(const char *name,
+ struct rb_root *modules)
+{
+ struct rb_node *n = modules->rb_node;
- len = hex2u64(line, &start);
+ while (n) {
+ struct module_info *m;
+ int cmp;
- len++;
- if (len + 2 >= line_len)
- continue;
+ m = rb_entry(n, struct module_info, rb_node);
+ cmp = strcmp(name, m->name);
+ if (cmp < 0)
+ n = n->rb_left;
+ else if (cmp > 0)
+ n = n->rb_right;
+ else
+ return m;
+ }
- len += hex2u64(line + len, &size);
+ return NULL;
+}
- len++;
- if (len + 2 >= line_len)
- continue;
+static int __read_proc_modules(void *arg, const char *name, u64 start)
+{
+ struct rb_root *modules = arg;
+ struct module_info *mi;
- sym = symbol__new(start, size, STB_GLOBAL, line + len);
+ mi = zalloc(sizeof(struct module_info));
+ if (!mi)
+ return -ENOMEM;
- if (sym == NULL)
- goto out_delete_line;
+ mi->name = strdup(name);
+ mi->start = start;
- if (filter && filter(map, sym))
- symbol__delete(sym);
- else {
- symbols__insert(&self->symbols[map->type], sym);
- nr_syms++;
- }
+ if (!mi->name) {
+ free(mi);
+ return -ENOMEM;
}
- free(line);
- fclose(file);
-
- return nr_syms;
+ add_module(mi, modules);
-out_delete_line:
- free(line);
-out_failure:
- return -1;
-}
-
-/**
- * elf_symtab__for_each_symbol - iterate thru all the symbols
- *
- * @self: struct elf_symtab instance to iterate
- * @idx: uint32_t idx
- * @sym: GElf_Sym iterator
- */
-#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
- for (idx = 0, gelf_getsym(syms, idx, &sym);\
- idx < nr_syms; \
- idx++, gelf_getsym(syms, idx, &sym))
-
-static inline uint8_t elf_sym__type(const GElf_Sym *sym)
-{
- return GELF_ST_TYPE(sym->st_info);
-}
-
-static inline int elf_sym__is_function(const GElf_Sym *sym)
-{
- return elf_sym__type(sym) == STT_FUNC &&
- sym->st_name != 0 &&
- sym->st_shndx != SHN_UNDEF;
+ return 0;
}
-static inline bool elf_sym__is_object(const GElf_Sym *sym)
+static int read_proc_modules(const char *filename, struct rb_root *modules)
{
- return elf_sym__type(sym) == STT_OBJECT &&
- sym->st_name != 0 &&
- sym->st_shndx != SHN_UNDEF;
-}
+ if (symbol__restricted_filename(filename, "/proc/modules"))
+ return -1;
-static inline int elf_sym__is_label(const GElf_Sym *sym)
-{
- return elf_sym__type(sym) == STT_NOTYPE &&
- sym->st_name != 0 &&
- sym->st_shndx != SHN_UNDEF &&
- sym->st_shndx != SHN_ABS;
-}
+ if (modules__parse(filename, modules, __read_proc_modules)) {
+ delete_modules(modules);
+ return -1;
+ }
-static inline const char *elf_sec__name(const GElf_Shdr *shdr,
- const Elf_Data *secstrs)
-{
- return secstrs->d_buf + shdr->sh_name;
+ return 0;
}
-static inline int elf_sec__is_text(const GElf_Shdr *shdr,
- const Elf_Data *secstrs)
+int compare_proc_modules(const char *from, const char *to)
{
- return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
-}
+ struct rb_root from_modules = RB_ROOT;
+ struct rb_root to_modules = RB_ROOT;
+ struct rb_node *from_node, *to_node;
+ struct module_info *from_m, *to_m;
+ int ret = -1;
-static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
- const Elf_Data *secstrs)
-{
- return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
-}
+ if (read_proc_modules(from, &from_modules))
+ return -1;
-static inline const char *elf_sym__name(const GElf_Sym *sym,
- const Elf_Data *symstrs)
-{
- return symstrs->d_buf + sym->st_name;
-}
+ if (read_proc_modules(to, &to_modules))
+ goto out_delete_from;
-static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
- GElf_Shdr *shp, const char *name,
- size_t *idx)
-{
- Elf_Scn *sec = NULL;
- size_t cnt = 1;
+ from_node = rb_first(&from_modules);
+ to_node = rb_first(&to_modules);
+ while (from_node) {
+ if (!to_node)
+ break;
- while ((sec = elf_nextscn(elf, sec)) != NULL) {
- char *str;
+ from_m = rb_entry(from_node, struct module_info, rb_node);
+ to_m = rb_entry(to_node, struct module_info, rb_node);
- gelf_getshdr(sec, shp);
- str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
- if (!strcmp(name, str)) {
- if (idx)
- *idx = cnt;
+ if (from_m->start != to_m->start ||
+ strcmp(from_m->name, to_m->name))
break;
- }
- ++cnt;
+
+ from_node = rb_next(from_node);
+ to_node = rb_next(to_node);
}
- return sec;
-}
+ if (!from_node && !to_node)
+ ret = 0;
-#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
- for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
- idx < nr_entries; \
- ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
+ delete_modules(&to_modules);
+out_delete_from:
+ delete_modules(&from_modules);
-#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
- for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
- idx < nr_entries; \
- ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
+ return ret;
+}
-/*
- * We need to check if we have a .dynsym, so that we can handle the
- * .plt, synthesizing its symbols, that aren't on the symtabs (be it
- * .dynsym or .symtab).
- * And always look at the original dso, not at debuginfo packages, that
- * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
- */
-static int dso__synthesize_plt_symbols(struct dso *self, struct map *map,
- symbol_filter_t filter)
+static int do_validate_kcore_modules(const char *filename, struct map *map,
+ struct map_groups *kmaps)
{
- uint32_t nr_rel_entries, idx;
- GElf_Sym sym;
- u64 plt_offset;
- GElf_Shdr shdr_plt;
- struct symbol *f;
- GElf_Shdr shdr_rel_plt, shdr_dynsym;
- Elf_Data *reldata, *syms, *symstrs;
- Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
- size_t dynsym_idx;
- GElf_Ehdr ehdr;
- char sympltname[1024];
- Elf *elf;
- int nr = 0, symidx, fd, err = 0;
-
- fd = open(self->long_name, O_RDONLY);
- if (fd < 0)
- goto out;
-
- elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
- if (elf == NULL)
- goto out_close;
-
- if (gelf_getehdr(elf, &ehdr) == NULL)
- goto out_elf_end;
-
- scn_dynsym = elf_section_by_name(elf, &ehdr, &shdr_dynsym,
- ".dynsym", &dynsym_idx);
- if (scn_dynsym == NULL)
- goto out_elf_end;
-
- scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
- ".rela.plt", NULL);
- if (scn_plt_rel == NULL) {
- scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
- ".rel.plt", NULL);
- if (scn_plt_rel == NULL)
- goto out_elf_end;
- }
-
- err = -1;
+ struct rb_root modules = RB_ROOT;
+ struct map *old_map;
+ int err;
- if (shdr_rel_plt.sh_link != dynsym_idx)
- goto out_elf_end;
+ err = read_proc_modules(filename, &modules);
+ if (err)
+ return err;
- if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
- goto out_elf_end;
+ old_map = map_groups__first(kmaps, map->type);
+ while (old_map) {
+ struct map *next = map_groups__next(old_map);
+ struct module_info *mi;
- /*
- * Fetch the relocation section to find the idxes to the GOT
- * and the symbols in the .dynsym they refer to.
- */
- reldata = elf_getdata(scn_plt_rel, NULL);
- if (reldata == NULL)
- goto out_elf_end;
-
- syms = elf_getdata(scn_dynsym, NULL);
- if (syms == NULL)
- goto out_elf_end;
-
- scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
- if (scn_symstrs == NULL)
- goto out_elf_end;
-
- symstrs = elf_getdata(scn_symstrs, NULL);
- if (symstrs == NULL)
- goto out_elf_end;
-
- nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
- plt_offset = shdr_plt.sh_offset;
-
- if (shdr_rel_plt.sh_type == SHT_RELA) {
- GElf_Rela pos_mem, *pos;
-
- elf_section__for_each_rela(reldata, pos, pos_mem, idx,
- nr_rel_entries) {
- symidx = GELF_R_SYM(pos->r_info);
- plt_offset += shdr_plt.sh_entsize;
- gelf_getsym(syms, symidx, &sym);
- snprintf(sympltname, sizeof(sympltname),
- "%s@plt", elf_sym__name(&sym, symstrs));
-
- f = symbol__new(plt_offset, shdr_plt.sh_entsize,
- STB_GLOBAL, sympltname);
- if (!f)
- goto out_elf_end;
-
- if (filter && filter(map, f))
- symbol__delete(f);
- else {
- symbols__insert(&self->symbols[map->type], f);
- ++nr;
- }
- }
- } else if (shdr_rel_plt.sh_type == SHT_REL) {
- GElf_Rel pos_mem, *pos;
- elf_section__for_each_rel(reldata, pos, pos_mem, idx,
- nr_rel_entries) {
- symidx = GELF_R_SYM(pos->r_info);
- plt_offset += shdr_plt.sh_entsize;
- gelf_getsym(syms, symidx, &sym);
- snprintf(sympltname, sizeof(sympltname),
- "%s@plt", elf_sym__name(&sym, symstrs));
-
- f = symbol__new(plt_offset, shdr_plt.sh_entsize,
- STB_GLOBAL, sympltname);
- if (!f)
- goto out_elf_end;
-
- if (filter && filter(map, f))
- symbol__delete(f);
- else {
- symbols__insert(&self->symbols[map->type], f);
- ++nr;
- }
+ if (old_map == map || old_map->start == map->start) {
+ /* The kernel map */
+ old_map = next;
+ continue;
}
- }
- err = 0;
-out_elf_end:
- elf_end(elf);
-out_close:
- close(fd);
+ /* Module must be in memory at the same address */
+ mi = find_module(old_map->dso->short_name, &modules);
+ if (!mi || mi->start != old_map->start) {
+ err = -EINVAL;
+ goto out;
+ }
- if (err == 0)
- return nr;
+ old_map = next;
+ }
out:
- pr_debug("%s: problems reading %s PLT info.\n",
- __func__, self->long_name);
- return 0;
+ delete_modules(&modules);
+ return err;
}
-static bool elf_sym__is_a(GElf_Sym *self, enum map_type type)
+/*
+ * If kallsyms is referenced by name then we look for filename in the same
+ * directory.
+ */
+static bool filename_from_kallsyms_filename(char *filename,
+ const char *base_name,
+ const char *kallsyms_filename)
{
- switch (type) {
- case MAP__FUNCTION:
- return elf_sym__is_function(self);
- case MAP__VARIABLE:
- return elf_sym__is_object(self);
- default:
- return false;
- }
-}
+ char *name;
-static bool elf_sec__is_a(GElf_Shdr *self, Elf_Data *secstrs, enum map_type type)
-{
- switch (type) {
- case MAP__FUNCTION:
- return elf_sec__is_text(self, secstrs);
- case MAP__VARIABLE:
- return elf_sec__is_data(self, secstrs);
- default:
+ strcpy(filename, kallsyms_filename);
+ name = strrchr(filename, '/');
+ if (!name)
return false;
- }
-}
-
-static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
-{
- Elf_Scn *sec = NULL;
- GElf_Shdr shdr;
- size_t cnt = 1;
-
- while ((sec = elf_nextscn(elf, sec)) != NULL) {
- gelf_getshdr(sec, &shdr);
- if ((addr >= shdr.sh_addr) &&
- (addr < (shdr.sh_addr + shdr.sh_size)))
- return cnt;
+ name += 1;
- ++cnt;
+ if (!strcmp(name, "kallsyms")) {
+ strcpy(name, base_name);
+ return true;
}
- return -1;
+ return false;
}
-static int dso__load_sym(struct dso *self, struct map *map, const char *name,
- int fd, symbol_filter_t filter, int kmodule,
- int want_symtab)
+static int validate_kcore_modules(const char *kallsyms_filename,
+ struct map *map)
{
- struct kmap *kmap = self->kernel ? map__kmap(map) : NULL;
- struct map *curr_map = map;
- struct dso *curr_dso = self;
- Elf_Data *symstrs, *secstrs;
- uint32_t nr_syms;
- int err = -1;
- uint32_t idx;
- GElf_Ehdr ehdr;
- GElf_Shdr shdr, opdshdr;
- Elf_Data *syms, *opddata = NULL;
- GElf_Sym sym;
- Elf_Scn *sec, *sec_strndx, *opdsec;
- Elf *elf;
- int nr = 0;
- size_t opdidx = 0;
-
- elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
- if (elf == NULL) {
- pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
- goto out_close;
- }
+ struct map_groups *kmaps = map__kmap(map)->kmaps;
+ char modules_filename[PATH_MAX];
- if (gelf_getehdr(elf, &ehdr) == NULL) {
- pr_debug("%s: cannot get elf header.\n", __func__);
- goto out_elf_end;
- }
+ if (!filename_from_kallsyms_filename(modules_filename, "modules",
+ kallsyms_filename))
+ return -EINVAL;
- /* Always reject images with a mismatched build-id: */
- if (self->has_build_id) {
- u8 build_id[BUILD_ID_SIZE];
+ if (do_validate_kcore_modules(modules_filename, map, kmaps))
+ return -EINVAL;
- if (elf_read_build_id(elf, build_id,
- BUILD_ID_SIZE) != BUILD_ID_SIZE)
- goto out_elf_end;
+ return 0;
+}
- if (!dso__build_id_equal(self, build_id))
- goto out_elf_end;
- }
+static int validate_kcore_addresses(const char *kallsyms_filename,
+ struct map *map)
+{
+ struct kmap *kmap = map__kmap(map);
- sec = elf_section_by_name(elf, &ehdr, &shdr, ".symtab", NULL);
- if (sec == NULL) {
- if (want_symtab)
- goto out_elf_end;
+ if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
+ u64 start;
- sec = elf_section_by_name(elf, &ehdr, &shdr, ".dynsym", NULL);
- if (sec == NULL)
- goto out_elf_end;
+ start = kallsyms__get_function_start(kallsyms_filename,
+ kmap->ref_reloc_sym->name);
+ if (start != kmap->ref_reloc_sym->addr)
+ return -EINVAL;
}
- opdsec = elf_section_by_name(elf, &ehdr, &opdshdr, ".opd", &opdidx);
- if (opdsec)
- opddata = elf_rawdata(opdsec, NULL);
-
- syms = elf_getdata(sec, NULL);
- if (syms == NULL)
- goto out_elf_end;
-
- sec = elf_getscn(elf, shdr.sh_link);
- if (sec == NULL)
- goto out_elf_end;
-
- symstrs = elf_getdata(sec, NULL);
- if (symstrs == NULL)
- goto out_elf_end;
-
- sec_strndx = elf_getscn(elf, ehdr.e_shstrndx);
- if (sec_strndx == NULL)
- goto out_elf_end;
-
- secstrs = elf_getdata(sec_strndx, NULL);
- if (secstrs == NULL)
- goto out_elf_end;
-
- nr_syms = shdr.sh_size / shdr.sh_entsize;
-
- memset(&sym, 0, sizeof(sym));
- if (self->kernel == DSO_TYPE_USER) {
- self->adjust_symbols = (ehdr.e_type == ET_EXEC ||
- elf_section_by_name(elf, &ehdr, &shdr,
- ".gnu.prelink_undo",
- NULL) != NULL);
- } else self->adjust_symbols = 0;
+ return validate_kcore_modules(kallsyms_filename, map);
+}
- elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
- struct symbol *f;
- const char *elf_name = elf_sym__name(&sym, symstrs);
- char *demangled = NULL;
- int is_label = elf_sym__is_label(&sym);
- const char *section_name;
+struct kcore_mapfn_data {
+ struct dso *dso;
+ enum map_type type;
+ struct list_head maps;
+};
- if (kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
- strcmp(elf_name, kmap->ref_reloc_sym->name) == 0)
- kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
+static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
+{
+ struct kcore_mapfn_data *md = data;
+ struct map *map;
- if (!is_label && !elf_sym__is_a(&sym, map->type))
- continue;
+ map = map__new2(start, md->dso, md->type);
+ if (map == NULL)
+ return -ENOMEM;
- /* Reject ARM ELF "mapping symbols": these aren't unique and
- * don't identify functions, so will confuse the profile
- * output: */
- if (ehdr.e_machine == EM_ARM) {
- if (!strcmp(elf_name, "$a") ||
- !strcmp(elf_name, "$d") ||
- !strcmp(elf_name, "$t"))
- continue;
- }
+ map->end = map->start + len;
+ map->pgoff = pgoff;
- if (opdsec && sym.st_shndx == opdidx) {
- u32 offset = sym.st_value - opdshdr.sh_addr;
- u64 *opd = opddata->d_buf + offset;
- sym.st_value = *opd;
- sym.st_shndx = elf_addr_to_index(elf, sym.st_value);
- }
+ list_add(&map->node, &md->maps);
- sec = elf_getscn(elf, sym.st_shndx);
- if (!sec)
- goto out_elf_end;
+ return 0;
+}
- gelf_getshdr(sec, &shdr);
+static int dso__load_kcore(struct dso *dso, struct map *map,
+ const char *kallsyms_filename)
+{
+ struct map_groups *kmaps = map__kmap(map)->kmaps;
+ struct machine *machine = kmaps->machine;
+ struct kcore_mapfn_data md;
+ struct map *old_map, *new_map, *replacement_map = NULL;
+ bool is_64_bit;
+ int err, fd;
+ char kcore_filename[PATH_MAX];
+ struct symbol *sym;
- if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
- continue;
+ /* This function requires that the map is the kernel map */
+ if (map != machine->vmlinux_maps[map->type])
+ return -EINVAL;
- section_name = elf_sec__name(&shdr, secstrs);
+ if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
+ kallsyms_filename))
+ return -EINVAL;
- if (self->kernel != DSO_TYPE_USER || kmodule) {
- char dso_name[PATH_MAX];
+ /* Modules and kernel must be present at their original addresses */
+ if (validate_kcore_addresses(kallsyms_filename, map))
+ return -EINVAL;
- if (strcmp(section_name,
- (curr_dso->short_name +
- self->short_name_len)) == 0)
- goto new_symbol;
+ md.dso = dso;
+ md.type = map->type;
+ INIT_LIST_HEAD(&md.maps);
- if (strcmp(section_name, ".text") == 0) {
- curr_map = map;
- curr_dso = self;
- goto new_symbol;
- }
+ fd = open(kcore_filename, O_RDONLY);
+ if (fd < 0)
+ return -EINVAL;
- snprintf(dso_name, sizeof(dso_name),
- "%s%s", self->short_name, section_name);
+ /* Read new maps into temporary lists */
+ err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md,
+ &is_64_bit);
+ if (err)
+ goto out_err;
- curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name);
- if (curr_map == NULL) {
- u64 start = sym.st_value;
+ if (list_empty(&md.maps)) {
+ err = -EINVAL;
+ goto out_err;
+ }
- if (kmodule)
- start += map->start + shdr.sh_offset;
+ /* Remove old maps */
+ old_map = map_groups__first(kmaps, map->type);
+ while (old_map) {
+ struct map *next = map_groups__next(old_map);
- curr_dso = dso__new(dso_name);
- if (curr_dso == NULL)
- goto out_elf_end;
- curr_dso->kernel = self->kernel;
- curr_map = map__new2(start, curr_dso,
- map->type);
- if (curr_map == NULL) {
- dso__delete(curr_dso);
- goto out_elf_end;
- }
- curr_map->map_ip = identity__map_ip;
- curr_map->unmap_ip = identity__map_ip;
- curr_dso->origin = self->origin;
- map_groups__insert(kmap->kmaps, curr_map);
- dsos__add(&self->node, curr_dso);
- dso__set_loaded(curr_dso, map->type);
- } else
- curr_dso = curr_map->dso;
+ if (old_map != map)
+ map_groups__remove(kmaps, old_map);
+ old_map = next;
+ }
- goto new_symbol;
+ /* Find the kernel map using the first symbol */
+ sym = dso__first_symbol(dso, map->type);
+ list_for_each_entry(new_map, &md.maps, node) {
+ if (sym && sym->start >= new_map->start &&
+ sym->start < new_map->end) {
+ replacement_map = new_map;
+ break;
}
+ }
- if (curr_dso->adjust_symbols) {
- pr_debug4("%s: adjusting symbol: st_value: %#Lx "
- "sh_addr: %#Lx sh_offset: %#Lx\n", __func__,
- (u64)sym.st_value, (u64)shdr.sh_addr,
- (u64)shdr.sh_offset);
- sym.st_value -= shdr.sh_addr - shdr.sh_offset;
- }
- /*
- * We need to figure out if the object was created from C++ sources
- * DWARF DW_compile_unit has this, but we don't always have access
- * to it...
- */
- demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI);
- if (demangled != NULL)
- elf_name = demangled;
-new_symbol:
- f = symbol__new(sym.st_value, sym.st_size,
- GELF_ST_BIND(sym.st_info), elf_name);
- free(demangled);
- if (!f)
- goto out_elf_end;
-
- if (filter && filter(curr_map, f))
- symbol__delete(f);
- else {
- symbols__insert(&curr_dso->symbols[curr_map->type], f);
- nr++;
+ if (!replacement_map)
+ replacement_map = list_entry(md.maps.next, struct map, node);
+
+ /* Add new maps */
+ while (!list_empty(&md.maps)) {
+ new_map = list_entry(md.maps.next, struct map, node);
+ list_del(&new_map->node);
+ if (new_map == replacement_map) {
+ map->start = new_map->start;
+ map->end = new_map->end;
+ map->pgoff = new_map->pgoff;
+ map->map_ip = new_map->map_ip;
+ map->unmap_ip = new_map->unmap_ip;
+ map__delete(new_map);
+ /* Ensure maps are correctly ordered */
+ map_groups__remove(kmaps, map);
+ map_groups__insert(kmaps, map);
+ } else {
+ map_groups__insert(kmaps, new_map);
}
}
/*
- * For misannotated, zeroed, ASM function sizes.
+ * Set the data type and long name so that kcore can be read via
+ * dso__data_read_addr().
*/
- if (nr > 0) {
- symbols__fixup_end(&self->symbols[map->type]);
- if (kmap) {
- /*
- * We need to fixup this here too because we create new
- * maps here, for things like vsyscall sections.
- */
- __map_groups__fixup_end(kmap->kmaps, map->type);
- }
- }
- err = nr;
-out_elf_end:
- elf_end(elf);
-out_close:
- return err;
-}
+ if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+ dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
+ else
+ dso->binary_type = DSO_BINARY_TYPE__KCORE;
+ dso__set_long_name(dso, strdup(kcore_filename), true);
-static bool dso__build_id_equal(const struct dso *self, u8 *build_id)
-{
- return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0;
-}
+ close(fd);
-bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
-{
- bool have_build_id = false;
- struct dso *pos;
+ if (map->type == MAP__FUNCTION)
+ pr_debug("Using %s for kernel object code\n", kcore_filename);
+ else
+ pr_debug("Using %s for kernel data\n", kcore_filename);
- list_for_each_entry(pos, head, node) {
- if (with_hits && !pos->hit)
- continue;
- if (pos->has_build_id) {
- have_build_id = true;
- continue;
- }
- if (filename__read_build_id(pos->long_name, pos->build_id,
- sizeof(pos->build_id)) > 0) {
- have_build_id = true;
- pos->has_build_id = true;
- }
- }
+ return 0;
- return have_build_id;
+out_err:
+ while (!list_empty(&md.maps)) {
+ map = list_entry(md.maps.next, struct map, node);
+ list_del(&map->node);
+ map__delete(map);
+ }
+ close(fd);
+ return -EINVAL;
}
/*
- * Align offset to 4 bytes as needed for note name and descriptor data.
+ * If the kernel is relocated at boot time, kallsyms won't match. Compute the
+ * delta based on the relocation reference symbol.
*/
-#define NOTE_ALIGN(n) (((n) + 3) & -4U)
-
-static int elf_read_build_id(Elf *elf, void *bf, size_t size)
+static int kallsyms__delta(struct map *map, const char *filename, u64 *delta)
{
- int err = -1;
- GElf_Ehdr ehdr;
- GElf_Shdr shdr;
- Elf_Data *data;
- Elf_Scn *sec;
- Elf_Kind ek;
- void *ptr;
-
- if (size < BUILD_ID_SIZE)
- goto out;
-
- ek = elf_kind(elf);
- if (ek != ELF_K_ELF)
- goto out;
-
- if (gelf_getehdr(elf, &ehdr) == NULL) {
- pr_err("%s: cannot get elf header.\n", __func__);
- goto out;
- }
+ struct kmap *kmap = map__kmap(map);
+ u64 addr;
- sec = elf_section_by_name(elf, &ehdr, &shdr,
- ".note.gnu.build-id", NULL);
- if (sec == NULL) {
- sec = elf_section_by_name(elf, &ehdr, &shdr,
- ".notes", NULL);
- if (sec == NULL)
- goto out;
- }
+ if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
+ return 0;
- data = elf_getdata(sec, NULL);
- if (data == NULL)
- goto out;
-
- ptr = data->d_buf;
- while (ptr < (data->d_buf + data->d_size)) {
- GElf_Nhdr *nhdr = ptr;
- int namesz = NOTE_ALIGN(nhdr->n_namesz),
- descsz = NOTE_ALIGN(nhdr->n_descsz);
- const char *name;
-
- ptr += sizeof(*nhdr);
- name = ptr;
- ptr += namesz;
- if (nhdr->n_type == NT_GNU_BUILD_ID &&
- nhdr->n_namesz == sizeof("GNU")) {
- if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
- memcpy(bf, ptr, BUILD_ID_SIZE);
- err = BUILD_ID_SIZE;
- break;
- }
- }
- ptr += descsz;
- }
+ addr = kallsyms__get_function_start(filename,
+ kmap->ref_reloc_sym->name);
+ if (!addr)
+ return -1;
-out:
- return err;
+ *delta = addr - kmap->ref_reloc_sym->addr;
+ return 0;
}
-int filename__read_build_id(const char *filename, void *bf, size_t size)
+int dso__load_kallsyms(struct dso *dso, const char *filename,
+ struct map *map, symbol_filter_t filter)
{
- int fd, err = -1;
- Elf *elf;
+ u64 delta = 0;
- if (size < BUILD_ID_SIZE)
- goto out;
+ if (symbol__restricted_filename(filename, "/proc/kallsyms"))
+ return -1;
- fd = open(filename, O_RDONLY);
- if (fd < 0)
- goto out;
+ if (dso__load_all_kallsyms(dso, filename, map) < 0)
+ return -1;
- elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
- if (elf == NULL) {
- pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
- goto out_close;
- }
+ if (kallsyms__delta(map, filename, &delta))
+ return -1;
- err = elf_read_build_id(elf, bf, size);
+ symbols__fixup_duplicate(&dso->symbols[map->type]);
+ symbols__fixup_end(&dso->symbols[map->type]);
- elf_end(elf);
-out_close:
- close(fd);
-out:
- return err;
+ if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+ dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
+ else
+ dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
+
+ if (!dso__load_kcore(dso, map, filename))
+ return dso__split_kallsyms_for_kcore(dso, map, filter);
+ else
+ return dso__split_kallsyms(dso, map, delta, filter);
}
-int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
+static int dso__load_perf_map(struct dso *dso, struct map *map,
+ symbol_filter_t filter)
{
- int fd, err = -1;
-
- if (size < BUILD_ID_SIZE)
- goto out;
+ char *line = NULL;
+ size_t n;
+ FILE *file;
+ int nr_syms = 0;
- fd = open(filename, O_RDONLY);
- if (fd < 0)
- goto out;
+ file = fopen(dso->long_name, "r");
+ if (file == NULL)
+ goto out_failure;
- while (1) {
- char bf[BUFSIZ];
- GElf_Nhdr nhdr;
- int namesz, descsz;
+ while (!feof(file)) {
+ u64 start, size;
+ struct symbol *sym;
+ int line_len, len;
- if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
+ line_len = getline(&line, &n, file);
+ if (line_len < 0)
break;
- namesz = NOTE_ALIGN(nhdr.n_namesz);
- descsz = NOTE_ALIGN(nhdr.n_descsz);
- if (nhdr.n_type == NT_GNU_BUILD_ID &&
- nhdr.n_namesz == sizeof("GNU")) {
- if (read(fd, bf, namesz) != namesz)
- break;
- if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
- if (read(fd, build_id,
- BUILD_ID_SIZE) == BUILD_ID_SIZE) {
- err = 0;
- break;
- }
- } else if (read(fd, bf, descsz) != descsz)
- break;
- } else {
- int n = namesz + descsz;
- if (read(fd, bf, n) != n)
- break;
+ if (!line)
+ goto out_failure;
+
+ line[--line_len] = '\0'; /* \n */
+
+ len = hex2u64(line, &start);
+
+ len++;
+ if (len + 2 >= line_len)
+ continue;
+
+ len += hex2u64(line + len, &size);
+
+ len++;
+ if (len + 2 >= line_len)
+ continue;
+
+ sym = symbol__new(start, size, STB_GLOBAL, line + len);
+
+ if (sym == NULL)
+ goto out_delete_line;
+
+ if (filter && filter(map, sym))
+ symbol__delete(sym);
+ else {
+ symbols__insert(&dso->symbols[map->type], sym);
+ nr_syms++;
}
}
- close(fd);
-out:
- return err;
+
+ free(line);
+ fclose(file);
+
+ return nr_syms;
+
+out_delete_line:
+ free(line);
+out_failure:
+ return -1;
}
-char dso__symtab_origin(const struct dso *self)
+static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
+ enum dso_binary_type type)
{
- static const char origin[] = {
- [DSO__ORIG_KERNEL] = 'k',
- [DSO__ORIG_JAVA_JIT] = 'j',
- [DSO__ORIG_BUILD_ID_CACHE] = 'B',
- [DSO__ORIG_FEDORA] = 'f',
- [DSO__ORIG_UBUNTU] = 'u',
- [DSO__ORIG_BUILDID] = 'b',
- [DSO__ORIG_DSO] = 'd',
- [DSO__ORIG_KMODULE] = 'K',
- [DSO__ORIG_GUEST_KERNEL] = 'g',
- [DSO__ORIG_GUEST_KMODULE] = 'G',
- };
+ switch (type) {
+ case DSO_BINARY_TYPE__JAVA_JIT:
+ case DSO_BINARY_TYPE__DEBUGLINK:
+ case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
+ case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
+ case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
+ case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
+ case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
+ return !kmod && dso->kernel == DSO_TYPE_USER;
+
+ case DSO_BINARY_TYPE__KALLSYMS:
+ case DSO_BINARY_TYPE__VMLINUX:
+ case DSO_BINARY_TYPE__KCORE:
+ return dso->kernel == DSO_TYPE_KERNEL;
+
+ case DSO_BINARY_TYPE__GUEST_KALLSYMS:
+ case DSO_BINARY_TYPE__GUEST_VMLINUX:
+ case DSO_BINARY_TYPE__GUEST_KCORE:
+ return dso->kernel == DSO_TYPE_GUEST_KERNEL;
+
+ case DSO_BINARY_TYPE__GUEST_KMODULE:
+ case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
+ /*
+ * kernel modules know their symtab type - it's set when
+ * creating a module dso in machine__new_module().
+ */
+ return kmod && dso->symtab_type == type;
- if (self == NULL || self->origin == DSO__ORIG_NOT_FOUND)
- return '!';
- return origin[self->origin];
+ case DSO_BINARY_TYPE__BUILD_ID_CACHE:
+ return true;
+
+ case DSO_BINARY_TYPE__NOT_FOUND:
+ default:
+ return false;
+ }
}
-int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
+int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
{
- int size = PATH_MAX;
char *name;
int ret = -1;
- int fd;
+ u_int i;
struct machine *machine;
- const char *root_dir;
- int want_symtab;
+ char *root_dir = (char *) "";
+ int ss_pos = 0;
+ struct symsrc ss_[2];
+ struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
+ bool kmod;
- dso__set_loaded(self, map->type);
+ dso__set_loaded(dso, map->type);
- if (self->kernel == DSO_TYPE_KERNEL)
- return dso__load_kernel_sym(self, map, filter);
- else if (self->kernel == DSO_TYPE_GUEST_KERNEL)
- return dso__load_guest_kernel_sym(self, map, filter);
+ if (dso->kernel == DSO_TYPE_KERNEL)
+ return dso__load_kernel_sym(dso, map, filter);
+ else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+ return dso__load_guest_kernel_sym(dso, map, filter);
if (map->groups && map->groups->machine)
machine = map->groups->machine;
else
machine = NULL;
- name = malloc(size);
- if (!name)
- return -1;
+ dso->adjust_symbols = 0;
+
+ if (strncmp(dso->name, "/tmp/perf-", 10) == 0) {
+ struct stat st;
- self->adjust_symbols = 0;
+ if (lstat(dso->name, &st) < 0)
+ return -1;
- if (strncmp(self->name, "/tmp/perf-", 10) == 0) {
- ret = dso__load_perf_map(self, map, filter);
- self->origin = ret > 0 ? DSO__ORIG_JAVA_JIT :
- DSO__ORIG_NOT_FOUND;
+ if (st.st_uid && (st.st_uid != geteuid())) {
+ pr_warning("File %s not owned by current user or root, "
+ "ignoring it.\n", dso->name);
+ return -1;
+ }
+
+ ret = dso__load_perf_map(dso, map, filter);
+ dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
+ DSO_BINARY_TYPE__NOT_FOUND;
return ret;
}
- /* Iterate over candidate debug images.
- * On the first pass, only load images if they have a full symtab.
- * Failing that, do a second pass where we accept .dynsym also
+ if (machine)
+ root_dir = machine->root_dir;
+
+ name = malloc(PATH_MAX);
+ if (!name)
+ return -1;
+
+ kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
+ dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
+
+ /*
+ * Iterate over candidate debug images.
+ * Keep track of "interesting" ones (those which have a symtab, dynsym,
+ * and/or opd section) for processing.
*/
- for (self->origin = DSO__ORIG_BUILD_ID_CACHE, want_symtab = 1;
- self->origin != DSO__ORIG_NOT_FOUND;
- self->origin++) {
- switch (self->origin) {
- case DSO__ORIG_BUILD_ID_CACHE:
- if (dso__build_id_filename(self, name, size) == NULL)
- continue;
- break;
- case DSO__ORIG_FEDORA:
- snprintf(name, size, "/usr/lib/debug%s.debug",
- self->long_name);
- break;
- case DSO__ORIG_UBUNTU:
- snprintf(name, size, "/usr/lib/debug%s",
- self->long_name);
- break;
- case DSO__ORIG_BUILDID: {
- char build_id_hex[BUILD_ID_SIZE * 2 + 1];
-
- if (!self->has_build_id)
- continue;
-
- build_id__sprintf(self->build_id,
- sizeof(self->build_id),
- build_id_hex);
- snprintf(name, size,
- "/usr/lib/debug/.build-id/%.2s/%s.debug",
- build_id_hex, build_id_hex + 2);
- }
- break;
- case DSO__ORIG_DSO:
- snprintf(name, size, "%s", self->long_name);
- break;
- case DSO__ORIG_GUEST_KMODULE:
- if (map->groups && map->groups->machine)
- root_dir = map->groups->machine->root_dir;
- else
- root_dir = "";
- snprintf(name, size, "%s%s", root_dir, self->long_name);
- break;
+ for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
+ struct symsrc *ss = &ss_[ss_pos];
+ bool next_slot = false;
- default:
- /*
- * If we wanted a full symtab but no image had one,
- * relax our requirements and repeat the search.
- */
- if (want_symtab) {
- want_symtab = 0;
- self->origin = DSO__ORIG_BUILD_ID_CACHE;
- } else
- continue;
- }
+ enum dso_binary_type symtab_type = binary_type_symtab[i];
- /* Name is now the name of the next image to try */
- fd = open(name, O_RDONLY);
- if (fd < 0)
+ if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
continue;
- ret = dso__load_sym(self, map, name, fd, filter, 0,
- want_symtab);
- close(fd);
+ if (dso__read_binary_type_filename(dso, symtab_type,
+ root_dir, name, PATH_MAX))
+ continue;
- /*
- * Some people seem to have debuginfo files _WITHOUT_ debug
- * info!?!?
- */
- if (!ret)
+ /* Name is now the name of the next image to try */
+ if (symsrc__init(ss, dso, name, symtab_type) < 0)
continue;
- if (ret > 0) {
- int nr_plt = dso__synthesize_plt_symbols(self, map, filter);
- if (nr_plt > 0)
- ret += nr_plt;
- break;
+ if (!syms_ss && symsrc__has_symtab(ss)) {
+ syms_ss = ss;
+ next_slot = true;
+ if (!dso->symsrc_filename)
+ dso->symsrc_filename = strdup(name);
+ }
+
+ if (!runtime_ss && symsrc__possibly_runtime(ss)) {
+ runtime_ss = ss;
+ next_slot = true;
+ }
+
+ if (next_slot) {
+ ss_pos++;
+
+ if (syms_ss && runtime_ss)
+ break;
+ } else {
+ symsrc__destroy(ss);
}
+
+ }
+
+ if (!runtime_ss && !syms_ss)
+ goto out_free;
+
+ if (runtime_ss && !syms_ss) {
+ syms_ss = runtime_ss;
}
+ /* We'll have to hope for the best */
+ if (!runtime_ss && syms_ss)
+ runtime_ss = syms_ss;
+
+ if (syms_ss)
+ ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, kmod);
+ else
+ ret = -1;
+
+ if (ret > 0) {
+ int nr_plt;
+
+ nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss, map, filter);
+ if (nr_plt > 0)
+ ret += nr_plt;
+ }
+
+ for (; ss_pos > 0; ss_pos--)
+ symsrc__destroy(&ss_[ss_pos - 1]);
+out_free:
free(name);
- if (ret < 0 && strstr(self->name, " (deleted)") != NULL)
+ if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
return 0;
return ret;
}
-struct map *map_groups__find_by_name(struct map_groups *self,
+struct map *map_groups__find_by_name(struct map_groups *mg,
enum map_type type, const char *name)
{
struct rb_node *nd;
- for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
struct map *map = rb_entry(nd, struct map, rb_node);
if (map->dso && strcmp(map->dso->short_name, name) == 0)
@@ -1537,299 +1439,184 @@ struct map *map_groups__find_by_name(struct map_groups *self,
return NULL;
}
-static int dso__kernel_module_get_build_id(struct dso *self,
- const char *root_dir)
+int dso__load_vmlinux(struct dso *dso, struct map *map,
+ const char *vmlinux, bool vmlinux_allocated,
+ symbol_filter_t filter)
{
- char filename[PATH_MAX];
- /*
- * kernel module short names are of the form "[module]" and
- * we need just "module" here.
- */
- const char *name = self->short_name + 1;
-
- snprintf(filename, sizeof(filename),
- "%s/sys/module/%.*s/notes/.note.gnu.build-id",
- root_dir, (int)strlen(name) - 1, name);
-
- if (sysfs__read_build_id(filename, self->build_id,
- sizeof(self->build_id)) == 0)
- self->has_build_id = true;
+ int err = -1;
+ struct symsrc ss;
+ char symfs_vmlinux[PATH_MAX];
+ enum dso_binary_type symtab_type;
- return 0;
-}
+ if (vmlinux[0] == '/')
+ snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
+ else
+ snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s",
+ symbol_conf.symfs, vmlinux);
-static int map_groups__set_modules_path_dir(struct map_groups *self,
- const char *dir_name)
-{
- struct dirent *dent;
- DIR *dir = opendir(dir_name);
- int ret = 0;
+ if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+ symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
+ else
+ symtab_type = DSO_BINARY_TYPE__VMLINUX;
- if (!dir) {
- pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
+ if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
return -1;
- }
-
- while ((dent = readdir(dir)) != NULL) {
- char path[PATH_MAX];
- struct stat st;
- /*sshfs might return bad dent->d_type, so we have to stat*/
- sprintf(path, "%s/%s", dir_name, dent->d_name);
- if (stat(path, &st))
- continue;
+ err = dso__load_sym(dso, map, &ss, &ss, filter, 0);
+ symsrc__destroy(&ss);
- if (S_ISDIR(st.st_mode)) {
- if (!strcmp(dent->d_name, ".") ||
- !strcmp(dent->d_name, ".."))
- continue;
-
- snprintf(path, sizeof(path), "%s/%s",
- dir_name, dent->d_name);
- ret = map_groups__set_modules_path_dir(self, path);
- if (ret < 0)
- goto out;
- } else {
- char *dot = strrchr(dent->d_name, '.'),
- dso_name[PATH_MAX];
- struct map *map;
- char *long_name;
-
- if (dot == NULL || strcmp(dot, ".ko"))
- continue;
- snprintf(dso_name, sizeof(dso_name), "[%.*s]",
- (int)(dot - dent->d_name), dent->d_name);
-
- strxfrchar(dso_name, '-', '_');
- map = map_groups__find_by_name(self, MAP__FUNCTION, dso_name);
- if (map == NULL)
- continue;
-
- snprintf(path, sizeof(path), "%s/%s",
- dir_name, dent->d_name);
-
- long_name = strdup(path);
- if (long_name == NULL) {
- ret = -1;
- goto out;
- }
- dso__set_long_name(map->dso, long_name);
- map->dso->lname_alloc = 1;
- dso__kernel_module_get_build_id(map->dso, "");
- }
+ if (err > 0) {
+ if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+ dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
+ else
+ dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
+ dso__set_long_name(dso, vmlinux, vmlinux_allocated);
+ dso__set_loaded(dso, map->type);
+ pr_debug("Using %s for symbols\n", symfs_vmlinux);
}
-out:
- closedir(dir);
- return ret;
+ return err;
}
-static char *get_kernel_version(const char *root_dir)
+int dso__load_vmlinux_path(struct dso *dso, struct map *map,
+ symbol_filter_t filter)
{
- char version[PATH_MAX];
- FILE *file;
- char *name, *tmp;
- const char *prefix = "Linux version ";
-
- sprintf(version, "%s/proc/version", root_dir);
- file = fopen(version, "r");
- if (!file)
- return NULL;
+ int i, err = 0;
+ char *filename;
- version[0] = '\0';
- tmp = fgets(version, sizeof(version), file);
- fclose(file);
+ pr_debug("Looking at the vmlinux_path (%d entries long)\n",
+ vmlinux_path__nr_entries + 1);
- name = strstr(version, prefix);
- if (!name)
- return NULL;
- name += strlen(prefix);
- tmp = strchr(name, ' ');
- if (tmp)
- *tmp = '\0';
+ filename = dso__build_id_filename(dso, NULL, 0);
+ if (filename != NULL) {
+ err = dso__load_vmlinux(dso, map, filename, true, filter);
+ if (err > 0)
+ goto out;
+ free(filename);
+ }
- return strdup(name);
+ for (i = 0; i < vmlinux_path__nr_entries; ++i) {
+ err = dso__load_vmlinux(dso, map, vmlinux_path[i], false, filter);
+ if (err > 0)
+ break;
+ }
+out:
+ return err;
}
-static int machine__set_modules_path(struct machine *self)
+static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
{
- char *version;
- char modules_path[PATH_MAX];
+ char kallsyms_filename[PATH_MAX];
+ struct dirent *dent;
+ int ret = -1;
+ DIR *d;
- version = get_kernel_version(self->root_dir);
- if (!version)
+ d = opendir(dir);
+ if (!d)
return -1;
- snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
- self->root_dir, version);
- free(version);
-
- return map_groups__set_modules_path_dir(&self->kmaps, modules_path);
-}
-
-/*
- * Constructor variant for modules (where we know from /proc/modules where
- * they are loaded) and for vmlinux, where only after we load all the
- * symbols we'll know where it starts and ends.
- */
-static struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
-{
- struct map *self = calloc(1, (sizeof(*self) +
- (dso->kernel ? sizeof(struct kmap) : 0)));
- if (self != NULL) {
- /*
- * ->end will be filled after we load all the symbols
- */
- map__init(self, type, start, 0, 0, dso);
+ while (1) {
+ dent = readdir(d);
+ if (!dent)
+ break;
+ if (dent->d_type != DT_DIR)
+ continue;
+ scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
+ "%s/%s/kallsyms", dir, dent->d_name);
+ if (!validate_kcore_addresses(kallsyms_filename, map)) {
+ strlcpy(dir, kallsyms_filename, dir_sz);
+ ret = 0;
+ break;
+ }
}
- return self;
-}
+ closedir(d);
-struct map *machine__new_module(struct machine *self, u64 start,
- const char *filename)
-{
- struct map *map;
- struct dso *dso = __dsos__findnew(&self->kernel_dsos, filename);
-
- if (dso == NULL)
- return NULL;
-
- map = map__new2(start, dso, MAP__FUNCTION);
- if (map == NULL)
- return NULL;
-
- if (machine__is_host(self))
- dso->origin = DSO__ORIG_KMODULE;
- else
- dso->origin = DSO__ORIG_GUEST_KMODULE;
- map_groups__insert(&self->kmaps, map);
- return map;
+ return ret;
}
-static int machine__create_modules(struct machine *self)
+static char *dso__find_kallsyms(struct dso *dso, struct map *map)
{
- char *line = NULL;
- size_t n;
- FILE *file;
- struct map *map;
- const char *modules;
+ u8 host_build_id[BUILD_ID_SIZE];
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+ bool is_host = false;
char path[PATH_MAX];
- if (machine__is_default_guest(self))
- modules = symbol_conf.default_guest_modules;
- else {
- sprintf(path, "%s/proc/modules", self->root_dir);
- modules = path;
+ if (!dso->has_build_id) {
+ /*
+ * Last resort, if we don't have a build-id and couldn't find
+ * any vmlinux file, try the running kernel kallsyms table.
+ */
+ goto proc_kallsyms;
}
- file = fopen(modules, "r");
- if (file == NULL)
- return -1;
+ if (sysfs__read_build_id("/sys/kernel/notes", host_build_id,
+ sizeof(host_build_id)) == 0)
+ is_host = dso__build_id_equal(dso, host_build_id);
- while (!feof(file)) {
- char name[PATH_MAX];
- u64 start;
- char *sep;
- int line_len;
+ build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
- line_len = getline(&line, &n, file);
- if (line_len < 0)
- break;
+ scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s", buildid_dir,
+ sbuild_id);
- if (!line)
- goto out_failure;
+ /* Use /proc/kallsyms if possible */
+ if (is_host) {
+ DIR *d;
+ int fd;
- line[--line_len] = '\0'; /* \n */
+ /* If no cached kcore go with /proc/kallsyms */
+ d = opendir(path);
+ if (!d)
+ goto proc_kallsyms;
+ closedir(d);
- sep = strrchr(line, 'x');
- if (sep == NULL)
- continue;
-
- hex2u64(sep + 1, &start);
+ /*
+ * Do not check the build-id cache, until we know we cannot use
+ * /proc/kcore.
+ */
+ fd = open("/proc/kcore", O_RDONLY);
+ if (fd != -1) {
+ close(fd);
+ /* If module maps match go with /proc/kallsyms */
+ if (!validate_kcore_addresses("/proc/kallsyms", map))
+ goto proc_kallsyms;
+ }
- sep = strchr(line, ' ');
- if (sep == NULL)
- continue;
+ /* Find kallsyms in build-id cache with kcore */
+ if (!find_matching_kcore(map, path, sizeof(path)))
+ return strdup(path);
- *sep = '\0';
-
- snprintf(name, sizeof(name), "[%s]", line);
- map = machine__new_module(self, start, name);
- if (map == NULL)
- goto out_delete_line;
- dso__kernel_module_get_build_id(map->dso, self->root_dir);
+ goto proc_kallsyms;
}
- free(line);
- fclose(file);
-
- return machine__set_modules_path(self);
+ /* Find kallsyms in build-id cache with kcore */
+ if (!find_matching_kcore(map, path, sizeof(path)))
+ return strdup(path);
-out_delete_line:
- free(line);
-out_failure:
- return -1;
-}
-
-static int dso__load_vmlinux(struct dso *self, struct map *map,
- const char *vmlinux, symbol_filter_t filter)
-{
- int err = -1, fd;
-
- fd = open(vmlinux, O_RDONLY);
- if (fd < 0)
- return -1;
+ scnprintf(path, sizeof(path), "%s/[kernel.kallsyms]/%s",
+ buildid_dir, sbuild_id);
- dso__set_loaded(self, map->type);
- err = dso__load_sym(self, map, vmlinux, fd, filter, 0, 0);
- close(fd);
-
- if (err > 0)
- pr_debug("Using %s for symbols\n", vmlinux);
-
- return err;
-}
-
-int dso__load_vmlinux_path(struct dso *self, struct map *map,
- symbol_filter_t filter)
-{
- int i, err = 0;
- char *filename;
-
- pr_debug("Looking at the vmlinux_path (%d entries long)\n",
- vmlinux_path__nr_entries + 1);
-
- filename = dso__build_id_filename(self, NULL, 0);
- if (filename != NULL) {
- err = dso__load_vmlinux(self, map, filename, filter);
- if (err > 0) {
- dso__set_long_name(self, filename);
- goto out;
- }
- free(filename);
+ if (access(path, F_OK)) {
+ pr_err("No kallsyms or vmlinux with build-id %s was found\n",
+ sbuild_id);
+ return NULL;
}
- for (i = 0; i < vmlinux_path__nr_entries; ++i) {
- err = dso__load_vmlinux(self, map, vmlinux_path[i], filter);
- if (err > 0) {
- dso__set_long_name(self, strdup(vmlinux_path[i]));
- break;
- }
- }
-out:
- return err;
+ return strdup(path);
+
+proc_kallsyms:
+ return strdup("/proc/kallsyms");
}
-static int dso__load_kernel_sym(struct dso *self, struct map *map,
+static int dso__load_kernel_sym(struct dso *dso, struct map *map,
symbol_filter_t filter)
{
int err;
const char *kallsyms_filename = NULL;
char *kallsyms_allocated_filename = NULL;
/*
- * Step 1: if the user specified a vmlinux filename, use it and only
- * it, reporting errors to the user if it cannot be used.
+ * Step 1: if the user specified a kallsyms or vmlinux filename, use
+ * it and only it, reporting errors to the user if it cannot be used.
*
* For instance, try to analyse an ARM perf.data file _without_ a
* build-id, or if the user specifies the wrong path to the right
@@ -1842,79 +1629,40 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map,
* validation in dso__load_vmlinux and will bail out if they don't
* match.
*/
- if (symbol_conf.vmlinux_name != NULL) {
- err = dso__load_vmlinux(self, map,
- symbol_conf.vmlinux_name, filter);
- if (err > 0) {
- dso__set_long_name(self,
- strdup(symbol_conf.vmlinux_name));
- goto out_fixup;
- }
- return err;
+ if (symbol_conf.kallsyms_name != NULL) {
+ kallsyms_filename = symbol_conf.kallsyms_name;
+ goto do_kallsyms;
}
- if (vmlinux_path != NULL) {
- err = dso__load_vmlinux_path(self, map, filter);
- if (err > 0)
- goto out_fixup;
+ if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
+ return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name,
+ false, filter);
}
- /*
- * Say the kernel DSO was created when processing the build-id header table,
- * we have a build-id, so check if it is the same as the running kernel,
- * using it if it is.
- */
- if (self->has_build_id) {
- u8 kallsyms_build_id[BUILD_ID_SIZE];
- char sbuild_id[BUILD_ID_SIZE * 2 + 1];
-
- if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id,
- sizeof(kallsyms_build_id)) == 0) {
- if (dso__build_id_equal(self, kallsyms_build_id)) {
- kallsyms_filename = "/proc/kallsyms";
- goto do_kallsyms;
- }
- }
- /*
- * Now look if we have it on the build-id cache in
- * $HOME/.debug/[kernel.kallsyms].
- */
- build_id__sprintf(self->build_id, sizeof(self->build_id),
- sbuild_id);
+ if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
+ err = dso__load_vmlinux_path(dso, map, filter);
+ if (err > 0)
+ return err;
+ }
- if (asprintf(&kallsyms_allocated_filename,
- "%s/.debug/[kernel.kallsyms]/%s",
- getenv("HOME"), sbuild_id) == -1) {
- pr_err("Not enough memory for kallsyms file lookup\n");
- return -1;
- }
+ /* do not try local files if a symfs was given */
+ if (symbol_conf.symfs[0] != 0)
+ return -1;
- kallsyms_filename = kallsyms_allocated_filename;
+ kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
+ if (!kallsyms_allocated_filename)
+ return -1;
- if (access(kallsyms_filename, F_OK)) {
- pr_err("No kallsyms or vmlinux with build-id %s "
- "was found\n", sbuild_id);
- free(kallsyms_allocated_filename);
- return -1;
- }
- } else {
- /*
- * Last resort, if we don't have a build-id and couldn't find
- * any vmlinux file, try the running kernel kallsyms table.
- */
- kallsyms_filename = "/proc/kallsyms";
- }
+ kallsyms_filename = kallsyms_allocated_filename;
do_kallsyms:
- err = dso__load_kallsyms(self, kallsyms_filename, map, filter);
+ err = dso__load_kallsyms(dso, kallsyms_filename, map, filter);
if (err > 0)
pr_debug("Using %s for symbols\n", kallsyms_filename);
free(kallsyms_allocated_filename);
- if (err > 0) {
-out_fixup:
- if (kallsyms_filename != NULL)
- dso__set_long_name(self, strdup("[kernel.kallsyms]"));
+ if (err > 0 && !dso__is_kcore(dso)) {
+ dso__set_long_name(dso, "[kernel.kallsyms]", false);
map__fixup_start(map);
map__fixup_end(map);
}
@@ -1922,8 +1670,8 @@ out_fixup:
return err;
}
-static int dso__load_guest_kernel_sym(struct dso *self, struct map *map,
- symbol_filter_t filter)
+static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
+ symbol_filter_t filter)
{
int err;
const char *kallsyms_filename = NULL;
@@ -1943,9 +1691,10 @@ static int dso__load_guest_kernel_sym(struct dso *self, struct map *map,
* Or use file guest_kallsyms inputted by user on commandline
*/
if (symbol_conf.default_guest_vmlinux_name != NULL) {
- err = dso__load_vmlinux(self, map,
- symbol_conf.default_guest_vmlinux_name, filter);
- goto out_try_fixup;
+ err = dso__load_vmlinux(dso, map,
+ symbol_conf.default_guest_vmlinux_name,
+ false, filter);
+ return err;
}
kallsyms_filename = symbol_conf.default_guest_kallsyms;
@@ -1956,16 +1705,12 @@ static int dso__load_guest_kernel_sym(struct dso *self, struct map *map,
kallsyms_filename = path;
}
- err = dso__load_kallsyms(self, kallsyms_filename, map, filter);
+ err = dso__load_kallsyms(dso, kallsyms_filename, map, filter);
if (err > 0)
pr_debug("Using %s for symbols\n", kallsyms_filename);
-
-out_try_fixup:
- if (err > 0) {
- if (kallsyms_filename != NULL) {
- machine__mmap_name(machine, path, sizeof(path));
- dso__set_long_name(self, strdup(path));
- }
+ if (err > 0 && !dso__is_kcore(dso)) {
+ machine__mmap_name(machine, path, sizeof(path));
+ dso__set_long_name(dso, strdup(path), true);
map__fixup_start(map);
map__fixup_end(map);
}
@@ -1973,234 +1718,12 @@ out_try_fixup:
return err;
}
-static void dsos__add(struct list_head *head, struct dso *dso)
-{
- list_add_tail(&dso->node, head);
-}
-
-static struct dso *dsos__find(struct list_head *head, const char *name)
-{
- struct dso *pos;
-
- list_for_each_entry(pos, head, node)
- if (strcmp(pos->long_name, name) == 0)
- return pos;
- return NULL;
-}
-
-struct dso *__dsos__findnew(struct list_head *head, const char *name)
-{
- struct dso *dso = dsos__find(head, name);
-
- if (!dso) {
- dso = dso__new(name);
- if (dso != NULL) {
- dsos__add(head, dso);
- dso__set_basename(dso);
- }
- }
-
- return dso;
-}
-
-size_t __dsos__fprintf(struct list_head *head, FILE *fp)
-{
- struct dso *pos;
- size_t ret = 0;
-
- list_for_each_entry(pos, head, node) {
- int i;
- for (i = 0; i < MAP__NR_TYPES; ++i)
- ret += dso__fprintf(pos, i, fp);
- }
-
- return ret;
-}
-
-size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp)
-{
- struct rb_node *nd;
- size_t ret = 0;
-
- for (nd = rb_first(self); nd; nd = rb_next(nd)) {
- struct machine *pos = rb_entry(nd, struct machine, rb_node);
- ret += __dsos__fprintf(&pos->kernel_dsos, fp);
- ret += __dsos__fprintf(&pos->user_dsos, fp);
- }
-
- return ret;
-}
-
-static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
- bool with_hits)
-{
- struct dso *pos;
- size_t ret = 0;
-
- list_for_each_entry(pos, head, node) {
- if (with_hits && !pos->hit)
- continue;
- ret += dso__fprintf_buildid(pos, fp);
- ret += fprintf(fp, " %s\n", pos->long_name);
- }
- return ret;
-}
-
-size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits)
-{
- return __dsos__fprintf_buildid(&self->kernel_dsos, fp, with_hits) +
- __dsos__fprintf_buildid(&self->user_dsos, fp, with_hits);
-}
-
-size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits)
-{
- struct rb_node *nd;
- size_t ret = 0;
-
- for (nd = rb_first(self); nd; nd = rb_next(nd)) {
- struct machine *pos = rb_entry(nd, struct machine, rb_node);
- ret += machine__fprintf_dsos_buildid(pos, fp, with_hits);
- }
- return ret;
-}
-
-struct dso *dso__new_kernel(const char *name)
-{
- struct dso *self = dso__new(name ?: "[kernel.kallsyms]");
-
- if (self != NULL) {
- dso__set_short_name(self, "[kernel]");
- self->kernel = DSO_TYPE_KERNEL;
- }
-
- return self;
-}
-
-static struct dso *dso__new_guest_kernel(struct machine *machine,
- const char *name)
-{
- char bf[PATH_MAX];
- struct dso *self = dso__new(name ?: machine__mmap_name(machine, bf, sizeof(bf)));
-
- if (self != NULL) {
- dso__set_short_name(self, "[guest.kernel]");
- self->kernel = DSO_TYPE_GUEST_KERNEL;
- }
-
- return self;
-}
-
-void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine)
-{
- char path[PATH_MAX];
-
- if (machine__is_default_guest(machine))
- return;
- sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
- if (sysfs__read_build_id(path, self->build_id,
- sizeof(self->build_id)) == 0)
- self->has_build_id = true;
-}
-
-static struct dso *machine__create_kernel(struct machine *self)
-{
- const char *vmlinux_name = NULL;
- struct dso *kernel;
-
- if (machine__is_host(self)) {
- vmlinux_name = symbol_conf.vmlinux_name;
- kernel = dso__new_kernel(vmlinux_name);
- } else {
- if (machine__is_default_guest(self))
- vmlinux_name = symbol_conf.default_guest_vmlinux_name;
- kernel = dso__new_guest_kernel(self, vmlinux_name);
- }
-
- if (kernel != NULL) {
- dso__read_running_kernel_build_id(kernel, self);
- dsos__add(&self->kernel_dsos, kernel);
- }
- return kernel;
-}
-
-int __machine__create_kernel_maps(struct machine *self, struct dso *kernel)
-{
- enum map_type type;
-
- for (type = 0; type < MAP__NR_TYPES; ++type) {
- struct kmap *kmap;
-
- self->vmlinux_maps[type] = map__new2(0, kernel, type);
- if (self->vmlinux_maps[type] == NULL)
- return -1;
-
- self->vmlinux_maps[type]->map_ip =
- self->vmlinux_maps[type]->unmap_ip = identity__map_ip;
-
- kmap = map__kmap(self->vmlinux_maps[type]);
- kmap->kmaps = &self->kmaps;
- map_groups__insert(&self->kmaps, self->vmlinux_maps[type]);
- }
-
- return 0;
-}
-
-void machine__destroy_kernel_maps(struct machine *self)
-{
- enum map_type type;
-
- for (type = 0; type < MAP__NR_TYPES; ++type) {
- struct kmap *kmap;
-
- if (self->vmlinux_maps[type] == NULL)
- continue;
-
- kmap = map__kmap(self->vmlinux_maps[type]);
- map_groups__remove(&self->kmaps, self->vmlinux_maps[type]);
- if (kmap->ref_reloc_sym) {
- /*
- * ref_reloc_sym is shared among all maps, so free just
- * on one of them.
- */
- if (type == MAP__FUNCTION) {
- free((char *)kmap->ref_reloc_sym->name);
- kmap->ref_reloc_sym->name = NULL;
- free(kmap->ref_reloc_sym);
- }
- kmap->ref_reloc_sym = NULL;
- }
-
- map__delete(self->vmlinux_maps[type]);
- self->vmlinux_maps[type] = NULL;
- }
-}
-
-int machine__create_kernel_maps(struct machine *self)
-{
- struct dso *kernel = machine__create_kernel(self);
-
- if (kernel == NULL ||
- __machine__create_kernel_maps(self, kernel) < 0)
- return -1;
-
- if (symbol_conf.use_modules && machine__create_modules(self) < 0)
- pr_debug("Problems creating module maps, continuing anyway...\n");
- /*
- * Now that we have all the maps created, just set the ->end of them:
- */
- map_groups__fixup_end(&self->kmaps);
- return 0;
-}
-
static void vmlinux_path__exit(void)
{
- while (--vmlinux_path__nr_entries >= 0) {
- free(vmlinux_path[vmlinux_path__nr_entries]);
- vmlinux_path[vmlinux_path__nr_entries] = NULL;
- }
+ while (--vmlinux_path__nr_entries >= 0)
+ zfree(&vmlinux_path[vmlinux_path__nr_entries]);
- free(vmlinux_path);
- vmlinux_path = NULL;
+ zfree(&vmlinux_path);
}
static int vmlinux_path__init(void)
@@ -2208,9 +1731,6 @@ static int vmlinux_path__init(void)
struct utsname uts;
char bf[PATH_MAX];
- if (uname(&uts) < 0)
- return -1;
-
vmlinux_path = malloc(sizeof(char *) * 5);
if (vmlinux_path == NULL)
return -1;
@@ -2223,6 +1743,14 @@ static int vmlinux_path__init(void)
if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
goto out_fail;
++vmlinux_path__nr_entries;
+
+ /* only try running kernel version if no symfs was given */
+ if (symbol_conf.symfs[0] != 0)
+ return 0;
+
+ if (uname(&uts) < 0)
+ return -1;
+
snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", uts.release);
vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
@@ -2247,26 +1775,7 @@ out_fail:
return -1;
}
-size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp)
-{
- int i;
- size_t printed = 0;
- struct dso *kdso = self->vmlinux_maps[MAP__FUNCTION]->dso;
-
- if (kdso->has_build_id) {
- char filename[PATH_MAX];
- if (dso__build_id_filename(kdso, filename, sizeof(filename)))
- printed += fprintf(fp, "[0] %s\n", filename);
- }
-
- for (i = 0; i < vmlinux_path__nr_entries; ++i)
- printed += fprintf(fp, "[%d] %s\n",
- i + kdso->has_build_id, vmlinux_path[i]);
-
- return printed;
-}
-
-static int setup_list(struct strlist **list, const char *list_str,
+int setup_list(struct strlist **list, const char *list_str,
const char *list_name)
{
if (list_str == NULL)
@@ -2280,12 +1789,36 @@ static int setup_list(struct strlist **list, const char *list_str,
return 0;
}
+static bool symbol__read_kptr_restrict(void)
+{
+ bool value = false;
+
+ if (geteuid() != 0) {
+ FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
+ if (fp != NULL) {
+ char line[8];
+
+ if (fgets(line, sizeof(line), fp) != NULL)
+ value = atoi(line) != 0;
+
+ fclose(fp);
+ }
+ }
+
+ return value;
+}
+
int symbol__init(void)
{
+ const char *symfs;
+
if (symbol_conf.initialized)
return 0;
- elf_version(EV_CURRENT);
+ symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
+
+ symbol__elf_init();
+
if (symbol_conf.sort_by_name)
symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
sizeof(struct symbol));
@@ -2310,13 +1843,27 @@ int symbol__init(void)
symbol_conf.sym_list_str, "symbol") < 0)
goto out_free_comm_list;
+ /*
+ * A path to symbols of "/" is identical to ""
+ * reset here for simplicity.
+ */
+ symfs = realpath(symbol_conf.symfs, NULL);
+ if (symfs == NULL)
+ symfs = symbol_conf.symfs;
+ if (strcmp(symfs, "/") == 0)
+ symbol_conf.symfs = "";
+ if (symfs != symbol_conf.symfs)
+ free((void *)symfs);
+
+ symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
+
symbol_conf.initialized = true;
return 0;
-out_free_dso_list:
- strlist__delete(symbol_conf.dso_list);
out_free_comm_list:
strlist__delete(symbol_conf.comm_list);
+out_free_dso_list:
+ strlist__delete(symbol_conf.dso_list);
return -1;
}
@@ -2331,143 +1878,3 @@ void symbol__exit(void)
symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
symbol_conf.initialized = false;
}
-
-int machines__create_kernel_maps(struct rb_root *self, pid_t pid)
-{
- struct machine *machine = machines__findnew(self, pid);
-
- if (machine == NULL)
- return -1;
-
- return machine__create_kernel_maps(machine);
-}
-
-static int hex(char ch)
-{
- if ((ch >= '0') && (ch <= '9'))
- return ch - '0';
- if ((ch >= 'a') && (ch <= 'f'))
- return ch - 'a' + 10;
- if ((ch >= 'A') && (ch <= 'F'))
- return ch - 'A' + 10;
- return -1;
-}
-
-/*
- * While we find nice hex chars, build a long_val.
- * Return number of chars processed.
- */
-int hex2u64(const char *ptr, u64 *long_val)
-{
- const char *p = ptr;
- *long_val = 0;
-
- while (*p) {
- const int hex_val = hex(*p);
-
- if (hex_val < 0)
- break;
-
- *long_val = (*long_val << 4) | hex_val;
- p++;
- }
-
- return p - ptr;
-}
-
-char *strxfrchar(char *s, char from, char to)
-{
- char *p = s;
-
- while ((p = strchr(p, from)) != NULL)
- *p++ = to;
-
- return s;
-}
-
-int machines__create_guest_kernel_maps(struct rb_root *self)
-{
- int ret = 0;
- struct dirent **namelist = NULL;
- int i, items = 0;
- char path[PATH_MAX];
- pid_t pid;
-
- if (symbol_conf.default_guest_vmlinux_name ||
- symbol_conf.default_guest_modules ||
- symbol_conf.default_guest_kallsyms) {
- machines__create_kernel_maps(self, DEFAULT_GUEST_KERNEL_ID);
- }
-
- if (symbol_conf.guestmount) {
- items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
- if (items <= 0)
- return -ENOENT;
- for (i = 0; i < items; i++) {
- if (!isdigit(namelist[i]->d_name[0])) {
- /* Filter out . and .. */
- continue;
- }
- pid = atoi(namelist[i]->d_name);
- sprintf(path, "%s/%s/proc/kallsyms",
- symbol_conf.guestmount,
- namelist[i]->d_name);
- ret = access(path, R_OK);
- if (ret) {
- pr_debug("Can't access file %s\n", path);
- goto failure;
- }
- machines__create_kernel_maps(self, pid);
- }
-failure:
- free(namelist);
- }
-
- return ret;
-}
-
-void machines__destroy_guest_kernel_maps(struct rb_root *self)
-{
- struct rb_node *next = rb_first(self);
-
- while (next) {
- struct machine *pos = rb_entry(next, struct machine, rb_node);
-
- next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, self);
- machine__delete(pos);
- }
-}
-
-int machine__load_kallsyms(struct machine *self, const char *filename,
- enum map_type type, symbol_filter_t filter)
-{
- struct map *map = self->vmlinux_maps[type];
- int ret = dso__load_kallsyms(map->dso, filename, map, filter);
-
- if (ret > 0) {
- dso__set_loaded(map->dso, type);
- /*
- * Since /proc/kallsyms will have multiple sessions for the
- * kernel, with modules between them, fixup the end of all
- * sections.
- */
- __map_groups__fixup_end(&self->kmaps, type);
- }
-
- return ret;
-}
-
-int machine__load_vmlinux_path(struct machine *self, enum map_type type,
- symbol_filter_t filter)
-{
- struct map *map = self->vmlinux_maps[type];
- int ret = dso__load_vmlinux_path(map->dso, map, filter);
-
- if (ret > 0) {
- dso__set_loaded(map->dso, type);
- map__reloc_vmlinux(map);
- }
-
- return ret;
-}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 038f2201ee0..615c752dd76 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -5,40 +5,57 @@
#include <stdbool.h>
#include <stdint.h>
#include "map.h"
+#include "../perf.h"
#include <linux/list.h>
#include <linux/rbtree.h>
#include <stdio.h>
+#include <byteswap.h>
+#include <libgen.h>
+#include "build-id.h"
+#include "event.h"
+
+#ifdef HAVE_LIBELF_SUPPORT
+#include <libelf.h>
+#include <gelf.h>
+#endif
+#include <elf.h>
+
+#include "dso.h"
-#ifdef HAVE_CPLUS_DEMANGLE
+#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
extern char *cplus_demangle(const char *, int);
-static inline char *bfd_demangle(void __used *v, const char *c, int i)
+static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i)
{
return cplus_demangle(c, i);
}
#else
#ifdef NO_DEMANGLE
-static inline char *bfd_demangle(void __used *v, const char __used *c,
- int __used i)
+static inline char *bfd_demangle(void __maybe_unused *v,
+ const char __maybe_unused *c,
+ int __maybe_unused i)
{
return NULL;
}
#else
+#define PACKAGE 'perf'
#include <bfd.h>
#endif
#endif
-int hex2u64(const char *ptr, u64 *val);
-char *strxfrchar(char *s, char from, char to);
-
/*
* libelf 0.8.x and earlier do not support ELF_C_READ_MMAP;
* for newer versions we can use mmap to reduce memory usage:
*/
-#ifdef LIBELF_NO_MMAP
-# define PERF_ELF_C_READ_MMAP ELF_C_READ
-#else
+#ifdef HAVE_LIBELF_MMAP_SUPPORT
# define PERF_ELF_C_READ_MMAP ELF_C_READ_MMAP
+#else
+# define PERF_ELF_C_READ_MMAP ELF_C_READ
+#endif
+
+#ifdef HAVE_LIBELF_SUPPORT
+extern Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
+ GElf_Shdr *shp, const char *name, size_t *idx);
#endif
#ifndef DMGL_PARAMS
@@ -46,32 +63,64 @@ char *strxfrchar(char *s, char from, char to);
#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
#endif
-#define BUILD_ID_SIZE 20
-
+/** struct symbol - symtab entry
+ *
+ * @ignore - resolvable but tools ignore it (e.g. idle routines)
+ */
struct symbol {
struct rb_node rb_node;
u64 start;
u64 end;
u16 namelen;
u8 binding;
+ bool ignore;
char name[0];
};
-void symbol__delete(struct symbol *self);
+void symbol__delete(struct symbol *sym);
+void symbols__delete(struct rb_root *symbols);
+
+/* symbols__for_each_entry - iterate over symbols (rb_root)
+ *
+ * @symbols: the rb_root of symbols
+ * @pos: the 'struct symbol *' to use as a loop cursor
+ * @nd: the 'struct rb_node *' to use as a temporary storage
+ */
+#define symbols__for_each_entry(symbols, pos, nd) \
+ for (nd = rb_first(symbols); \
+ nd && (pos = rb_entry(nd, struct symbol, rb_node)); \
+ nd = rb_next(nd))
+
+static inline size_t symbol__size(const struct symbol *sym)
+{
+ return sym->end - sym->start + 1;
+}
struct strlist;
struct symbol_conf {
unsigned short priv_size;
+ unsigned short nr_events;
bool try_vmlinux_path,
+ ignore_vmlinux,
+ show_kernel_path,
use_modules,
sort_by_name,
show_nr_samples,
+ show_total_period,
use_callchain,
+ cumulate_callchain,
exclude_other,
show_cpu_utilization,
- initialized;
+ initialized,
+ kptr_restrict,
+ annotate_asm_raw,
+ annotate_src,
+ event_group,
+ demangle,
+ filter_relative;
const char *vmlinux_name,
+ *kallsyms_name,
*source_prefix,
*field_sep;
const char *default_guest_vmlinux_name,
@@ -84,14 +133,21 @@ struct symbol_conf {
*col_width_list_str;
struct strlist *dso_list,
*comm_list,
- *sym_list;
+ *sym_list,
+ *dso_from_list,
+ *dso_to_list,
+ *sym_from_list,
+ *sym_to_list;
+ const char *symfs;
};
extern struct symbol_conf symbol_conf;
+extern int vmlinux_path__nr_entries;
+extern char **vmlinux_path;
-static inline void *symbol__priv(struct symbol *self)
+static inline void *symbol__priv(struct symbol *sym)
{
- return ((void *)self) - symbol_conf.priv_size;
+ return ((void *)sym) - symbol_conf.priv_size;
}
struct ref_reloc_sym {
@@ -107,126 +163,135 @@ struct map_symbol {
bool has_children;
};
+struct addr_map_symbol {
+ struct map *map;
+ struct symbol *sym;
+ u64 addr;
+ u64 al_addr;
+};
+
+struct branch_info {
+ struct addr_map_symbol from;
+ struct addr_map_symbol to;
+ struct branch_flags flags;
+};
+
+struct mem_info {
+ struct addr_map_symbol iaddr;
+ struct addr_map_symbol daddr;
+ union perf_mem_data_src data_src;
+};
+
struct addr_location {
+ struct machine *machine;
struct thread *thread;
struct map *map;
struct symbol *sym;
u64 addr;
char level;
- bool filtered;
+ u8 filtered;
u8 cpumode;
s32 cpu;
};
-enum dso_kernel_type {
- DSO_TYPE_USER = 0,
- DSO_TYPE_KERNEL,
- DSO_TYPE_GUEST_KERNEL
-};
-
-struct dso {
- struct list_head node;
- struct rb_root symbols[MAP__NR_TYPES];
- struct rb_root symbol_names[MAP__NR_TYPES];
- enum dso_kernel_type kernel;
- u8 adjust_symbols:1;
- u8 slen_calculated:1;
- u8 has_build_id:1;
- u8 hit:1;
- u8 annotate_warned:1;
- u8 sname_alloc:1;
- u8 lname_alloc:1;
- unsigned char origin;
- u8 sorted_by_name;
- u8 loaded;
- u8 build_id[BUILD_ID_SIZE];
- const char *short_name;
- char *long_name;
- u16 long_name_len;
- u16 short_name_len;
- char name[0];
-};
+struct symsrc {
+ char *name;
+ int fd;
+ enum dso_binary_type type;
-struct dso *dso__new(const char *name);
-struct dso *dso__new_kernel(const char *name);
-void dso__delete(struct dso *self);
+#ifdef HAVE_LIBELF_SUPPORT
+ Elf *elf;
+ GElf_Ehdr ehdr;
-int dso__name_len(const struct dso *self);
+ Elf_Scn *opdsec;
+ size_t opdidx;
+ GElf_Shdr opdshdr;
-bool dso__loaded(const struct dso *self, enum map_type type);
-bool dso__sorted_by_name(const struct dso *self, enum map_type type);
+ Elf_Scn *symtab;
+ GElf_Shdr symshdr;
-static inline void dso__set_loaded(struct dso *self, enum map_type type)
-{
- self->loaded |= (1 << type);
-}
+ Elf_Scn *dynsym;
+ size_t dynsym_idx;
+ GElf_Shdr dynshdr;
-void dso__sort_by_name(struct dso *self, enum map_type type);
-
-struct dso *__dsos__findnew(struct list_head *head, const char *name);
+ bool adjust_symbols;
+#endif
+};
-int dso__load(struct dso *self, struct map *map, symbol_filter_t filter);
-int dso__load_vmlinux_path(struct dso *self, struct map *map,
+void symsrc__destroy(struct symsrc *ss);
+int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
+ enum dso_binary_type type);
+bool symsrc__has_symtab(struct symsrc *ss);
+bool symsrc__possibly_runtime(struct symsrc *ss);
+
+int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter);
+int dso__load_vmlinux(struct dso *dso, struct map *map,
+ const char *vmlinux, bool vmlinux_allocated,
+ symbol_filter_t filter);
+int dso__load_vmlinux_path(struct dso *dso, struct map *map,
symbol_filter_t filter);
-int dso__load_kallsyms(struct dso *self, const char *filename, struct map *map,
+int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map,
symbol_filter_t filter);
-int machine__load_kallsyms(struct machine *self, const char *filename,
- enum map_type type, symbol_filter_t filter);
-int machine__load_vmlinux_path(struct machine *self, enum map_type type,
- symbol_filter_t filter);
-
-size_t __dsos__fprintf(struct list_head *head, FILE *fp);
-
-size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits);
-size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp);
-size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits);
-
-size_t dso__fprintf_buildid(struct dso *self, FILE *fp);
-size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE *fp);
-size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp);
-
-enum dso_origin {
- DSO__ORIG_KERNEL = 0,
- DSO__ORIG_GUEST_KERNEL,
- DSO__ORIG_JAVA_JIT,
- DSO__ORIG_BUILD_ID_CACHE,
- DSO__ORIG_FEDORA,
- DSO__ORIG_UBUNTU,
- DSO__ORIG_BUILDID,
- DSO__ORIG_DSO,
- DSO__ORIG_GUEST_KMODULE,
- DSO__ORIG_KMODULE,
- DSO__ORIG_NOT_FOUND,
-};
-char dso__symtab_origin(const struct dso *self);
-void dso__set_long_name(struct dso *self, char *name);
-void dso__set_build_id(struct dso *self, void *build_id);
-void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine);
-struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr);
-struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type,
+struct symbol *dso__find_symbol(struct dso *dso, enum map_type type,
+ u64 addr);
+struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
const char *name);
int filename__read_build_id(const char *filename, void *bf, size_t size);
int sysfs__read_build_id(const char *filename, void *bf, size_t size);
-bool __dsos__read_build_ids(struct list_head *head, bool with_hits);
-int build_id__sprintf(const u8 *self, int len, char *bf);
-int kallsyms__parse(const char *filename, void *arg,
- int (*process_symbol)(void *arg, const char *name,
- char type, u64 start));
-
-void machine__destroy_kernel_maps(struct machine *self);
-int __machine__create_kernel_maps(struct machine *self, struct dso *kernel);
-int machine__create_kernel_maps(struct machine *self);
-
-int machines__create_kernel_maps(struct rb_root *self, pid_t pid);
-int machines__create_guest_kernel_maps(struct rb_root *self);
-void machines__destroy_guest_kernel_maps(struct rb_root *self);
+int modules__parse(const char *filename, void *arg,
+ int (*process_module)(void *arg, const char *name,
+ u64 start));
+int filename__read_debuglink(const char *filename, char *debuglink,
+ size_t size);
int symbol__init(void);
void symbol__exit(void);
+void symbol__elf_init(void);
+struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name);
+size_t symbol__fprintf_symname_offs(const struct symbol *sym,
+ const struct addr_location *al, FILE *fp);
+size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp);
+size_t symbol__fprintf(struct symbol *sym, FILE *fp);
bool symbol_type__is_a(char symbol_type, enum map_type map_type);
+bool symbol__restricted_filename(const char *filename,
+ const char *restricted_filename);
+bool symbol__is_idle(struct symbol *sym);
+
+int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
+ struct symsrc *runtime_ss, symbol_filter_t filter,
+ int kmodule);
+int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss,
+ struct map *map, symbol_filter_t filter);
+
+void symbols__insert(struct rb_root *symbols, struct symbol *sym);
+void symbols__fixup_duplicate(struct rb_root *symbols);
+void symbols__fixup_end(struct rb_root *symbols);
+void __map_groups__fixup_end(struct map_groups *mg, enum map_type type);
+
+typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
+int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
+ bool *is_64_bit);
+
+#define PERF_KCORE_EXTRACT "/tmp/perf-kcore-XXXXXX"
+
+struct kcore_extract {
+ char *kcore_filename;
+ u64 addr;
+ u64 offs;
+ u64 len;
+ char extract_filename[sizeof(PERF_KCORE_EXTRACT)];
+ int fd;
+};
+
+int kcore_extract__create(struct kcore_extract *kce);
+void kcore_extract__delete(struct kcore_extract *kce);
+
+int kcore_copy(const char *from_dir, const char *to_dir);
+int compare_proc_modules(const char *from, const char *to);
-size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp);
+int setup_list(struct strlist **list, const char *list_str,
+ const char *list_name);
#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c
new file mode 100644
index 00000000000..e74c5963dc7
--- /dev/null
+++ b/tools/perf/util/target.c
@@ -0,0 +1,158 @@
+/*
+ * Helper functions for handling target threads/cpus
+ *
+ * Copyright (C) 2012, LG Electronics, Namhyung Kim <namhyung.kim@lge.com>
+ *
+ * Released under the GPL v2.
+ */
+
+#include "target.h"
+#include "debug.h"
+
+#include <pwd.h>
+#include <string.h>
+
+
+enum target_errno target__validate(struct target *target)
+{
+ enum target_errno ret = TARGET_ERRNO__SUCCESS;
+
+ if (target->pid)
+ target->tid = target->pid;
+
+ /* CPU and PID are mutually exclusive */
+ if (target->tid && target->cpu_list) {
+ target->cpu_list = NULL;
+ if (ret == TARGET_ERRNO__SUCCESS)
+ ret = TARGET_ERRNO__PID_OVERRIDE_CPU;
+ }
+
+ /* UID and PID are mutually exclusive */
+ if (target->tid && target->uid_str) {
+ target->uid_str = NULL;
+ if (ret == TARGET_ERRNO__SUCCESS)
+ ret = TARGET_ERRNO__PID_OVERRIDE_UID;
+ }
+
+ /* UID and CPU are mutually exclusive */
+ if (target->uid_str && target->cpu_list) {
+ target->cpu_list = NULL;
+ if (ret == TARGET_ERRNO__SUCCESS)
+ ret = TARGET_ERRNO__UID_OVERRIDE_CPU;
+ }
+
+ /* PID and SYSTEM are mutually exclusive */
+ if (target->tid && target->system_wide) {
+ target->system_wide = false;
+ if (ret == TARGET_ERRNO__SUCCESS)
+ ret = TARGET_ERRNO__PID_OVERRIDE_SYSTEM;
+ }
+
+ /* UID and SYSTEM are mutually exclusive */
+ if (target->uid_str && target->system_wide) {
+ target->system_wide = false;
+ if (ret == TARGET_ERRNO__SUCCESS)
+ ret = TARGET_ERRNO__UID_OVERRIDE_SYSTEM;
+ }
+
+ /* THREAD and SYSTEM/CPU are mutually exclusive */
+ if (target->per_thread && (target->system_wide || target->cpu_list)) {
+ target->per_thread = false;
+ if (ret == TARGET_ERRNO__SUCCESS)
+ ret = TARGET_ERRNO__SYSTEM_OVERRIDE_THREAD;
+ }
+
+ return ret;
+}
+
+enum target_errno target__parse_uid(struct target *target)
+{
+ struct passwd pwd, *result;
+ char buf[1024];
+ const char *str = target->uid_str;
+
+ target->uid = UINT_MAX;
+ if (str == NULL)
+ return TARGET_ERRNO__SUCCESS;
+
+ /* Try user name first */
+ getpwnam_r(str, &pwd, buf, sizeof(buf), &result);
+
+ if (result == NULL) {
+ /*
+ * The user name not found. Maybe it's a UID number.
+ */
+ char *endptr;
+ int uid = strtol(str, &endptr, 10);
+
+ if (*endptr != '\0')
+ return TARGET_ERRNO__INVALID_UID;
+
+ getpwuid_r(uid, &pwd, buf, sizeof(buf), &result);
+
+ if (result == NULL)
+ return TARGET_ERRNO__USER_NOT_FOUND;
+ }
+
+ target->uid = result->pw_uid;
+ return TARGET_ERRNO__SUCCESS;
+}
+
+/*
+ * This must have a same ordering as the enum target_errno.
+ */
+static const char *target__error_str[] = {
+ "PID/TID switch overriding CPU",
+ "PID/TID switch overriding UID",
+ "UID switch overriding CPU",
+ "PID/TID switch overriding SYSTEM",
+ "UID switch overriding SYSTEM",
+ "SYSTEM/CPU switch overriding PER-THREAD",
+ "Invalid User: %s",
+ "Problems obtaining information for user %s",
+};
+
+int target__strerror(struct target *target, int errnum,
+ char *buf, size_t buflen)
+{
+ int idx;
+ const char *msg;
+
+ BUG_ON(buflen == 0);
+
+ if (errnum >= 0) {
+ const char *err = strerror_r(errnum, buf, buflen);
+
+ if (err != buf) {
+ size_t len = strlen(err);
+ memcpy(buf, err, min(buflen - 1, len));
+ *(buf + min(buflen - 1, len)) = '\0';
+ }
+
+ return 0;
+ }
+
+ if (errnum < __TARGET_ERRNO__START || errnum >= __TARGET_ERRNO__END)
+ return -1;
+
+ idx = errnum - __TARGET_ERRNO__START;
+ msg = target__error_str[idx];
+
+ switch (errnum) {
+ case TARGET_ERRNO__PID_OVERRIDE_CPU ...
+ TARGET_ERRNO__SYSTEM_OVERRIDE_THREAD:
+ snprintf(buf, buflen, "%s", msg);
+ break;
+
+ case TARGET_ERRNO__INVALID_UID:
+ case TARGET_ERRNO__USER_NOT_FOUND:
+ snprintf(buf, buflen, msg, target->uid_str);
+ break;
+
+ default:
+ /* cannot reach here */
+ break;
+ }
+
+ return 0;
+}
diff --git a/tools/perf/util/target.h b/tools/perf/util/target.h
new file mode 100644
index 00000000000..7381b1ca404
--- /dev/null
+++ b/tools/perf/util/target.h
@@ -0,0 +1,79 @@
+#ifndef _PERF_TARGET_H
+#define _PERF_TARGET_H
+
+#include <stdbool.h>
+#include <sys/types.h>
+
+struct target {
+ const char *pid;
+ const char *tid;
+ const char *cpu_list;
+ const char *uid_str;
+ uid_t uid;
+ bool system_wide;
+ bool uses_mmap;
+ bool default_per_cpu;
+ bool per_thread;
+};
+
+enum target_errno {
+ TARGET_ERRNO__SUCCESS = 0,
+
+ /*
+ * Choose an arbitrary negative big number not to clash with standard
+ * errno since SUS requires the errno has distinct positive values.
+ * See 'Issue 6' in the link below.
+ *
+ * http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html
+ */
+ __TARGET_ERRNO__START = -10000,
+
+ /* for target__validate() */
+ TARGET_ERRNO__PID_OVERRIDE_CPU = __TARGET_ERRNO__START,
+ TARGET_ERRNO__PID_OVERRIDE_UID,
+ TARGET_ERRNO__UID_OVERRIDE_CPU,
+ TARGET_ERRNO__PID_OVERRIDE_SYSTEM,
+ TARGET_ERRNO__UID_OVERRIDE_SYSTEM,
+ TARGET_ERRNO__SYSTEM_OVERRIDE_THREAD,
+
+ /* for target__parse_uid() */
+ TARGET_ERRNO__INVALID_UID,
+ TARGET_ERRNO__USER_NOT_FOUND,
+
+ __TARGET_ERRNO__END,
+};
+
+enum target_errno target__validate(struct target *target);
+enum target_errno target__parse_uid(struct target *target);
+
+int target__strerror(struct target *target, int errnum, char *buf, size_t buflen);
+
+static inline bool target__has_task(struct target *target)
+{
+ return target->tid || target->pid || target->uid_str;
+}
+
+static inline bool target__has_cpu(struct target *target)
+{
+ return target->system_wide || target->cpu_list;
+}
+
+static inline bool target__none(struct target *target)
+{
+ return !target__has_task(target) && !target__has_cpu(target);
+}
+
+static inline bool target__uses_dummy_map(struct target *target)
+{
+ bool use_dummy = false;
+
+ if (target->default_per_cpu)
+ use_dummy = target->per_thread ? true : false;
+ else if (target__has_task(target) ||
+ (!target__has_cpu(target) && !target->uses_mmap))
+ use_dummy = true;
+
+ return use_dummy;
+}
+
+#endif /* _PERF_TARGET_H */
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 8c72d888e44..2fde0d5e40b 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -6,175 +6,188 @@
#include "thread.h"
#include "util.h"
#include "debug.h"
+#include "comm.h"
-/* Skip "." and ".." directories */
-static int filter(const struct dirent *dir)
+int thread__init_map_groups(struct thread *thread, struct machine *machine)
{
- if (dir->d_name[0] == '.')
- return 0;
- else
- return 1;
+ struct thread *leader;
+ pid_t pid = thread->pid_;
+
+ if (pid == thread->tid) {
+ thread->mg = map_groups__new();
+ } else {
+ leader = machine__findnew_thread(machine, pid, pid);
+ if (leader)
+ thread->mg = map_groups__get(leader->mg);
+ }
+
+ return thread->mg ? 0 : -1;
}
-int find_all_tid(int pid, pid_t ** all_tid)
+struct thread *thread__new(pid_t pid, pid_t tid)
{
- char name[256];
- int items;
- struct dirent **namelist = NULL;
- int ret = 0;
- int i;
-
- sprintf(name, "/proc/%d/task", pid);
- items = scandir(name, &namelist, filter, NULL);
- if (items <= 0)
- return -ENOENT;
- *all_tid = malloc(sizeof(pid_t) * items);
- if (!*all_tid) {
- ret = -ENOMEM;
- goto failure;
+ char *comm_str;
+ struct comm *comm;
+ struct thread *thread = zalloc(sizeof(*thread));
+
+ if (thread != NULL) {
+ thread->pid_ = pid;
+ thread->tid = tid;
+ thread->ppid = -1;
+ INIT_LIST_HEAD(&thread->comm_list);
+
+ comm_str = malloc(32);
+ if (!comm_str)
+ goto err_thread;
+
+ snprintf(comm_str, 32, ":%d", tid);
+ comm = comm__new(comm_str, 0);
+ free(comm_str);
+ if (!comm)
+ goto err_thread;
+
+ list_add(&comm->list, &thread->comm_list);
}
- for (i = 0; i < items; i++)
- (*all_tid)[i] = atoi(namelist[i]->d_name);
+ return thread;
- ret = items;
-
-failure:
- for (i=0; i<items; i++)
- free(namelist[i]);
- free(namelist);
-
- return ret;
+err_thread:
+ free(thread);
+ return NULL;
}
-static struct thread *thread__new(pid_t pid)
+void thread__delete(struct thread *thread)
{
- struct thread *self = zalloc(sizeof(*self));
-
- if (self != NULL) {
- map_groups__init(&self->mg);
- self->pid = pid;
- self->comm = malloc(32);
- if (self->comm)
- snprintf(self->comm, 32, ":%d", self->pid);
+ struct comm *comm, *tmp;
+
+ map_groups__put(thread->mg);
+ thread->mg = NULL;
+ list_for_each_entry_safe(comm, tmp, &thread->comm_list, list) {
+ list_del(&comm->list);
+ comm__free(comm);
}
- return self;
+ free(thread);
}
-void thread__delete(struct thread *self)
+struct comm *thread__comm(const struct thread *thread)
{
- map_groups__exit(&self->mg);
- free(self->comm);
- free(self);
+ if (list_empty(&thread->comm_list))
+ return NULL;
+
+ return list_first_entry(&thread->comm_list, struct comm, list);
}
-int thread__set_comm(struct thread *self, const char *comm)
+/* CHECKME: time should always be 0 if event aren't ordered */
+int thread__set_comm(struct thread *thread, const char *str, u64 timestamp)
{
+ struct comm *new, *curr = thread__comm(thread);
int err;
- if (self->comm)
- free(self->comm);
- self->comm = strdup(comm);
- err = self->comm == NULL ? -ENOMEM : 0;
- if (!err) {
- self->comm_set = true;
- map_groups__flush(&self->mg);
+ /* Override latest entry if it had no specific time coverage */
+ if (!curr->start) {
+ err = comm__override(curr, str, timestamp);
+ if (err)
+ return err;
+ } else {
+ new = comm__new(str, timestamp);
+ if (!new)
+ return -ENOMEM;
+ list_add(&new->list, &thread->comm_list);
}
- return err;
-}
-int thread__comm_len(struct thread *self)
-{
- if (!self->comm_len) {
- if (!self->comm)
- return 0;
- self->comm_len = strlen(self->comm);
- }
+ thread->comm_set = true;
- return self->comm_len;
+ return 0;
}
-static size_t thread__fprintf(struct thread *self, FILE *fp)
+const char *thread__comm_str(const struct thread *thread)
{
- return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
- map_groups__fprintf(&self->mg, verbose, fp);
+ const struct comm *comm = thread__comm(thread);
+
+ if (!comm)
+ return NULL;
+
+ return comm__str(comm);
}
-struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
+/* CHECKME: it should probably better return the max comm len from its comm list */
+int thread__comm_len(struct thread *thread)
{
- struct rb_node **p = &self->threads.rb_node;
- struct rb_node *parent = NULL;
- struct thread *th;
-
- /*
- * Font-end cache - PID lookups come in blocks,
- * so most of the time we dont have to look up
- * the full rbtree:
- */
- if (self->last_match && self->last_match->pid == pid)
- return self->last_match;
-
- while (*p != NULL) {
- parent = *p;
- th = rb_entry(parent, struct thread, rb_node);
-
- if (th->pid == pid) {
- self->last_match = th;
- return th;
- }
-
- if (pid < th->pid)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
+ if (!thread->comm_len) {
+ const char *comm = thread__comm_str(thread);
+ if (!comm)
+ return 0;
+ thread->comm_len = strlen(comm);
}
- th = thread__new(pid);
- if (th != NULL) {
- rb_link_node(&th->rb_node, parent, p);
- rb_insert_color(&th->rb_node, &self->threads);
- self->last_match = th;
- }
+ return thread->comm_len;
+}
- return th;
+size_t thread__fprintf(struct thread *thread, FILE *fp)
+{
+ return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) +
+ map_groups__fprintf(thread->mg, verbose, fp);
}
-void thread__insert_map(struct thread *self, struct map *map)
+void thread__insert_map(struct thread *thread, struct map *map)
{
- map_groups__fixup_overlappings(&self->mg, map, verbose, stderr);
- map_groups__insert(&self->mg, map);
+ map_groups__fixup_overlappings(thread->mg, map, verbose, stderr);
+ map_groups__insert(thread->mg, map);
}
-int thread__fork(struct thread *self, struct thread *parent)
+static int thread__clone_map_groups(struct thread *thread,
+ struct thread *parent)
{
int i;
- if (parent->comm_set) {
- if (self->comm)
- free(self->comm);
- self->comm = strdup(parent->comm);
- if (!self->comm)
- return -ENOMEM;
- self->comm_set = true;
- }
+ /* This is new thread, we share map groups for process. */
+ if (thread->pid_ == parent->pid_)
+ return 0;
+ /* But this one is new process, copy maps. */
for (i = 0; i < MAP__NR_TYPES; ++i)
- if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
+ if (map_groups__clone(thread->mg, parent->mg, i) < 0)
return -ENOMEM;
+
return 0;
}
-size_t perf_session__fprintf(struct perf_session *self, FILE *fp)
+int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp)
{
- size_t ret = 0;
- struct rb_node *nd;
-
- for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) {
- struct thread *pos = rb_entry(nd, struct thread, rb_node);
+ int err;
- ret += thread__fprintf(pos, fp);
+ if (parent->comm_set) {
+ const char *comm = thread__comm_str(parent);
+ if (!comm)
+ return -ENOMEM;
+ err = thread__set_comm(thread, comm, timestamp);
+ if (err)
+ return err;
+ thread->comm_set = true;
}
- return ret;
+ thread->ppid = parent->tid;
+ return thread__clone_map_groups(thread, parent);
+}
+
+void thread__find_cpumode_addr_location(struct thread *thread,
+ struct machine *machine,
+ enum map_type type, u64 addr,
+ struct addr_location *al)
+{
+ size_t i;
+ const u8 const cpumodes[] = {
+ PERF_RECORD_MISC_USER,
+ PERF_RECORD_MISC_KERNEL,
+ PERF_RECORD_MISC_GUEST_USER,
+ PERF_RECORD_MISC_GUEST_KERNEL
+ };
+
+ for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
+ thread__find_addr_location(thread, machine, cpumodes[i], type,
+ addr, al);
+ if (al->map)
+ break;
+ }
}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 688500ff826..3c0c2724f82 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -2,48 +2,80 @@
#define __PERF_THREAD_H
#include <linux/rbtree.h>
+#include <linux/list.h>
#include <unistd.h>
+#include <sys/types.h>
#include "symbol.h"
+#include <strlist.h>
struct thread {
union {
struct rb_node rb_node;
struct list_head node;
};
- struct map_groups mg;
- pid_t pid;
+ struct map_groups *mg;
+ pid_t pid_; /* Not all tools update this */
+ pid_t tid;
+ pid_t ppid;
char shortname[3];
bool comm_set;
- char *comm;
+ bool dead; /* if set thread has exited */
+ struct list_head comm_list;
int comm_len;
+
+ void *priv;
};
-struct perf_session;
+struct machine;
+struct comm;
+
+struct thread *thread__new(pid_t pid, pid_t tid);
+int thread__init_map_groups(struct thread *thread, struct machine *machine);
+void thread__delete(struct thread *thread);
+static inline void thread__exited(struct thread *thread)
+{
+ thread->dead = true;
+}
+
+int thread__set_comm(struct thread *thread, const char *comm, u64 timestamp);
+int thread__comm_len(struct thread *thread);
+struct comm *thread__comm(const struct thread *thread);
+const char *thread__comm_str(const struct thread *thread);
+void thread__insert_map(struct thread *thread, struct map *map);
+int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp);
+size_t thread__fprintf(struct thread *thread, FILE *fp);
+
+void thread__find_addr_map(struct thread *thread, struct machine *machine,
+ u8 cpumode, enum map_type type, u64 addr,
+ struct addr_location *al);
+
+void thread__find_addr_location(struct thread *thread, struct machine *machine,
+ u8 cpumode, enum map_type type, u64 addr,
+ struct addr_location *al);
-void thread__delete(struct thread *self);
+void thread__find_cpumode_addr_location(struct thread *thread,
+ struct machine *machine,
+ enum map_type type, u64 addr,
+ struct addr_location *al);
-int find_all_tid(int pid, pid_t ** all_tid);
-int thread__set_comm(struct thread *self, const char *comm);
-int thread__comm_len(struct thread *self);
-struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
-void thread__insert_map(struct thread *self, struct map *map);
-int thread__fork(struct thread *self, struct thread *parent);
-size_t perf_session__fprintf(struct perf_session *self, FILE *fp);
+static inline void *thread__priv(struct thread *thread)
+{
+ return thread->priv;
+}
-static inline struct map *thread__find_map(struct thread *self,
- enum map_type type, u64 addr)
+static inline void thread__set_priv(struct thread *thread, void *p)
{
- return self ? map_groups__find(&self->mg, type, addr) : NULL;
+ thread->priv = p;
}
-void thread__find_addr_map(struct thread *self,
- struct perf_session *session, u8 cpumode,
- enum map_type type, pid_t pid, u64 addr,
- struct addr_location *al);
+static inline bool thread__is_filtered(struct thread *thread)
+{
+ if (symbol_conf.comm_list &&
+ !strlist__has_entry(symbol_conf.comm_list, thread__comm_str(thread))) {
+ return true;
+ }
+
+ return false;
+}
-void thread__find_addr_location(struct thread *self,
- struct perf_session *session, u8 cpumode,
- enum map_type type, pid_t pid, u64 addr,
- struct addr_location *al,
- symbol_filter_t filter);
#endif /* __PERF_THREAD_H */
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
new file mode 100644
index 00000000000..5d321591210
--- /dev/null
+++ b/tools/perf/util/thread_map.c
@@ -0,0 +1,294 @@
+#include <dirent.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include "strlist.h"
+#include <string.h>
+#include "thread_map.h"
+#include "util.h"
+
+/* Skip "." and ".." directories */
+static int filter(const struct dirent *dir)
+{
+ if (dir->d_name[0] == '.')
+ return 0;
+ else
+ return 1;
+}
+
+struct thread_map *thread_map__new_by_pid(pid_t pid)
+{
+ struct thread_map *threads;
+ char name[256];
+ int items;
+ struct dirent **namelist = NULL;
+ int i;
+
+ sprintf(name, "/proc/%d/task", pid);
+ items = scandir(name, &namelist, filter, NULL);
+ if (items <= 0)
+ return NULL;
+
+ threads = malloc(sizeof(*threads) + sizeof(pid_t) * items);
+ if (threads != NULL) {
+ for (i = 0; i < items; i++)
+ threads->map[i] = atoi(namelist[i]->d_name);
+ threads->nr = items;
+ }
+
+ for (i=0; i<items; i++)
+ zfree(&namelist[i]);
+ free(namelist);
+
+ return threads;
+}
+
+struct thread_map *thread_map__new_by_tid(pid_t tid)
+{
+ struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t));
+
+ if (threads != NULL) {
+ threads->map[0] = tid;
+ threads->nr = 1;
+ }
+
+ return threads;
+}
+
+struct thread_map *thread_map__new_by_uid(uid_t uid)
+{
+ DIR *proc;
+ int max_threads = 32, items, i;
+ char path[256];
+ struct dirent dirent, *next, **namelist = NULL;
+ struct thread_map *threads = malloc(sizeof(*threads) +
+ max_threads * sizeof(pid_t));
+ if (threads == NULL)
+ goto out;
+
+ proc = opendir("/proc");
+ if (proc == NULL)
+ goto out_free_threads;
+
+ threads->nr = 0;
+
+ while (!readdir_r(proc, &dirent, &next) && next) {
+ char *end;
+ bool grow = false;
+ struct stat st;
+ pid_t pid = strtol(dirent.d_name, &end, 10);
+
+ if (*end) /* only interested in proper numerical dirents */
+ continue;
+
+ snprintf(path, sizeof(path), "/proc/%s", dirent.d_name);
+
+ if (stat(path, &st) != 0)
+ continue;
+
+ if (st.st_uid != uid)
+ continue;
+
+ snprintf(path, sizeof(path), "/proc/%d/task", pid);
+ items = scandir(path, &namelist, filter, NULL);
+ if (items <= 0)
+ goto out_free_closedir;
+
+ while (threads->nr + items >= max_threads) {
+ max_threads *= 2;
+ grow = true;
+ }
+
+ if (grow) {
+ struct thread_map *tmp;
+
+ tmp = realloc(threads, (sizeof(*threads) +
+ max_threads * sizeof(pid_t)));
+ if (tmp == NULL)
+ goto out_free_namelist;
+
+ threads = tmp;
+ }
+
+ for (i = 0; i < items; i++)
+ threads->map[threads->nr + i] = atoi(namelist[i]->d_name);
+
+ for (i = 0; i < items; i++)
+ zfree(&namelist[i]);
+ free(namelist);
+
+ threads->nr += items;
+ }
+
+out_closedir:
+ closedir(proc);
+out:
+ return threads;
+
+out_free_threads:
+ free(threads);
+ return NULL;
+
+out_free_namelist:
+ for (i = 0; i < items; i++)
+ zfree(&namelist[i]);
+ free(namelist);
+
+out_free_closedir:
+ zfree(&threads);
+ goto out_closedir;
+}
+
+struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid)
+{
+ if (pid != -1)
+ return thread_map__new_by_pid(pid);
+
+ if (tid == -1 && uid != UINT_MAX)
+ return thread_map__new_by_uid(uid);
+
+ return thread_map__new_by_tid(tid);
+}
+
+static struct thread_map *thread_map__new_by_pid_str(const char *pid_str)
+{
+ struct thread_map *threads = NULL, *nt;
+ char name[256];
+ int items, total_tasks = 0;
+ struct dirent **namelist = NULL;
+ int i, j = 0;
+ pid_t pid, prev_pid = INT_MAX;
+ char *end_ptr;
+ struct str_node *pos;
+ struct strlist *slist = strlist__new(false, pid_str);
+
+ if (!slist)
+ return NULL;
+
+ strlist__for_each(pos, slist) {
+ pid = strtol(pos->s, &end_ptr, 10);
+
+ if (pid == INT_MIN || pid == INT_MAX ||
+ (*end_ptr != '\0' && *end_ptr != ','))
+ goto out_free_threads;
+
+ if (pid == prev_pid)
+ continue;
+
+ sprintf(name, "/proc/%d/task", pid);
+ items = scandir(name, &namelist, filter, NULL);
+ if (items <= 0)
+ goto out_free_threads;
+
+ total_tasks += items;
+ nt = realloc(threads, (sizeof(*threads) +
+ sizeof(pid_t) * total_tasks));
+ if (nt == NULL)
+ goto out_free_namelist;
+
+ threads = nt;
+
+ for (i = 0; i < items; i++) {
+ threads->map[j++] = atoi(namelist[i]->d_name);
+ zfree(&namelist[i]);
+ }
+ threads->nr = total_tasks;
+ free(namelist);
+ }
+
+out:
+ strlist__delete(slist);
+ return threads;
+
+out_free_namelist:
+ for (i = 0; i < items; i++)
+ zfree(&namelist[i]);
+ free(namelist);
+
+out_free_threads:
+ zfree(&threads);
+ goto out;
+}
+
+static struct thread_map *thread_map__new_by_tid_str(const char *tid_str)
+{
+ struct thread_map *threads = NULL, *nt;
+ int ntasks = 0;
+ pid_t tid, prev_tid = INT_MAX;
+ char *end_ptr;
+ struct str_node *pos;
+ struct strlist *slist;
+
+ /* perf-stat expects threads to be generated even if tid not given */
+ if (!tid_str) {
+ threads = malloc(sizeof(*threads) + sizeof(pid_t));
+ if (threads != NULL) {
+ threads->map[0] = -1;
+ threads->nr = 1;
+ }
+ return threads;
+ }
+
+ slist = strlist__new(false, tid_str);
+ if (!slist)
+ return NULL;
+
+ strlist__for_each(pos, slist) {
+ tid = strtol(pos->s, &end_ptr, 10);
+
+ if (tid == INT_MIN || tid == INT_MAX ||
+ (*end_ptr != '\0' && *end_ptr != ','))
+ goto out_free_threads;
+
+ if (tid == prev_tid)
+ continue;
+
+ ntasks++;
+ nt = realloc(threads, sizeof(*threads) + sizeof(pid_t) * ntasks);
+
+ if (nt == NULL)
+ goto out_free_threads;
+
+ threads = nt;
+ threads->map[ntasks - 1] = tid;
+ threads->nr = ntasks;
+ }
+out:
+ return threads;
+
+out_free_threads:
+ zfree(&threads);
+ goto out;
+}
+
+struct thread_map *thread_map__new_str(const char *pid, const char *tid,
+ uid_t uid)
+{
+ if (pid)
+ return thread_map__new_by_pid_str(pid);
+
+ if (!tid && uid != UINT_MAX)
+ return thread_map__new_by_uid(uid);
+
+ return thread_map__new_by_tid_str(tid);
+}
+
+void thread_map__delete(struct thread_map *threads)
+{
+ free(threads);
+}
+
+size_t thread_map__fprintf(struct thread_map *threads, FILE *fp)
+{
+ int i;
+ size_t printed = fprintf(fp, "%d thread%s: ",
+ threads->nr, threads->nr > 1 ? "s" : "");
+ for (i = 0; i < threads->nr; ++i)
+ printed += fprintf(fp, "%s%d", i ? ", " : "", threads->map[i]);
+
+ return printed + fprintf(fp, "\n");
+}
diff --git a/tools/perf/util/thread_map.h b/tools/perf/util/thread_map.h
new file mode 100644
index 00000000000..0cd8b310808
--- /dev/null
+++ b/tools/perf/util/thread_map.h
@@ -0,0 +1,29 @@
+#ifndef __PERF_THREAD_MAP_H
+#define __PERF_THREAD_MAP_H
+
+#include <sys/types.h>
+#include <stdio.h>
+
+struct thread_map {
+ int nr;
+ pid_t map[];
+};
+
+struct thread_map *thread_map__new_by_pid(pid_t pid);
+struct thread_map *thread_map__new_by_tid(pid_t tid);
+struct thread_map *thread_map__new_by_uid(uid_t uid);
+struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid);
+
+struct thread_map *thread_map__new_str(const char *pid,
+ const char *tid, uid_t uid);
+
+void thread_map__delete(struct thread_map *threads);
+
+size_t thread_map__fprintf(struct thread_map *threads, FILE *fp);
+
+static inline int thread_map__nr(struct thread_map *threads)
+{
+ return threads ? threads->nr : 1;
+}
+
+#endif /* __PERF_THREAD_MAP_H */
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
new file mode 100644
index 00000000000..4385816d3d4
--- /dev/null
+++ b/tools/perf/util/tool.h
@@ -0,0 +1,47 @@
+#ifndef __PERF_TOOL_H
+#define __PERF_TOOL_H
+
+#include <stdbool.h>
+
+struct perf_session;
+union perf_event;
+struct perf_evlist;
+struct perf_evsel;
+struct perf_sample;
+struct perf_tool;
+struct machine;
+
+typedef int (*event_sample)(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel, struct machine *machine);
+
+typedef int (*event_op)(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample, struct machine *machine);
+
+typedef int (*event_attr_op)(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_evlist **pevlist);
+
+typedef int (*event_op2)(struct perf_tool *tool, union perf_event *event,
+ struct perf_session *session);
+
+struct perf_tool {
+ event_sample sample,
+ read;
+ event_op mmap,
+ mmap2,
+ comm,
+ fork,
+ exit,
+ lost,
+ throttle,
+ unthrottle;
+ event_attr_op attr;
+ event_op2 tracing_data;
+ event_op2 finished_round,
+ build_id;
+ bool ordered_samples;
+ bool ordering_requires_timestamps;
+};
+
+#endif /* __PERF_TOOL_H */
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c
new file mode 100644
index 00000000000..8e517def925
--- /dev/null
+++ b/tools/perf/util/top.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Refactored from builtin-top.c, see that files for further copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
+#include "cpumap.h"
+#include "event.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "parse-events.h"
+#include "symbol.h"
+#include "top.h"
+#include <inttypes.h>
+
+#define SNPRINTF(buf, size, fmt, args...) \
+({ \
+ size_t r = snprintf(buf, size, fmt, ## args); \
+ r > size ? size : r; \
+})
+
+size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
+{
+ float samples_per_sec;
+ float ksamples_per_sec;
+ float esamples_percent;
+ struct record_opts *opts = &top->record_opts;
+ struct target *target = &opts->target;
+ size_t ret = 0;
+
+ if (top->samples) {
+ samples_per_sec = top->samples / top->delay_secs;
+ ksamples_per_sec = top->kernel_samples / top->delay_secs;
+ esamples_percent = (100.0 * top->exact_samples) / top->samples;
+ } else {
+ samples_per_sec = ksamples_per_sec = esamples_percent = 0.0;
+ }
+
+ if (!perf_guest) {
+ float ksamples_percent = 0.0;
+
+ if (samples_per_sec)
+ ksamples_percent = (100.0 * ksamples_per_sec) /
+ samples_per_sec;
+ ret = SNPRINTF(bf, size,
+ " PerfTop:%8.0f irqs/sec kernel:%4.1f%%"
+ " exact: %4.1f%% [", samples_per_sec,
+ ksamples_percent, esamples_percent);
+ } else {
+ float us_samples_per_sec = top->us_samples / top->delay_secs;
+ float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs;
+ float guest_us_samples_per_sec = top->guest_us_samples / top->delay_secs;
+
+ ret = SNPRINTF(bf, size,
+ " PerfTop:%8.0f irqs/sec kernel:%4.1f%% us:%4.1f%%"
+ " guest kernel:%4.1f%% guest us:%4.1f%%"
+ " exact: %4.1f%% [", samples_per_sec,
+ 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
+ samples_per_sec)),
+ 100.0 - (100.0 * ((samples_per_sec - us_samples_per_sec) /
+ samples_per_sec)),
+ 100.0 - (100.0 * ((samples_per_sec -
+ guest_kernel_samples_per_sec) /
+ samples_per_sec)),
+ 100.0 - (100.0 * ((samples_per_sec -
+ guest_us_samples_per_sec) /
+ samples_per_sec)),
+ esamples_percent);
+ }
+
+ if (top->evlist->nr_entries == 1) {
+ struct perf_evsel *first = perf_evlist__first(top->evlist);
+ ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ",
+ (uint64_t)first->attr.sample_period,
+ opts->freq ? "Hz" : "");
+ }
+
+ ret += SNPRINTF(bf + ret, size - ret, "%s", perf_evsel__name(top->sym_evsel));
+
+ ret += SNPRINTF(bf + ret, size - ret, "], ");
+
+ if (target->pid)
+ ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %s",
+ target->pid);
+ else if (target->tid)
+ ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %s",
+ target->tid);
+ else if (target->uid_str != NULL)
+ ret += SNPRINTF(bf + ret, size - ret, " (uid: %s",
+ target->uid_str);
+ else
+ ret += SNPRINTF(bf + ret, size - ret, " (all");
+
+ if (target->cpu_list)
+ ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
+ top->evlist->cpus->nr > 1 ? "s" : "",
+ target->cpu_list);
+ else {
+ if (target->tid)
+ ret += SNPRINTF(bf + ret, size - ret, ")");
+ else
+ ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
+ top->evlist->cpus->nr,
+ top->evlist->cpus->nr > 1 ? "s" : "");
+ }
+
+ return ret;
+}
+
+void perf_top__reset_sample_counters(struct perf_top *top)
+{
+ top->samples = top->us_samples = top->kernel_samples =
+ top->exact_samples = top->guest_kernel_samples =
+ top->guest_us_samples = 0;
+}
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
new file mode 100644
index 00000000000..f92c37abb0a
--- /dev/null
+++ b/tools/perf/util/top.h
@@ -0,0 +1,47 @@
+#ifndef __PERF_TOP_H
+#define __PERF_TOP_H 1
+
+#include "tool.h"
+#include <linux/types.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include <termios.h>
+
+struct perf_evlist;
+struct perf_evsel;
+struct perf_session;
+
+struct perf_top {
+ struct perf_tool tool;
+ struct perf_evlist *evlist;
+ struct record_opts record_opts;
+ /*
+ * Symbols will be added here in perf_event__process_sample and will
+ * get out after decayed.
+ */
+ u64 samples;
+ u64 kernel_samples, us_samples;
+ u64 exact_samples;
+ u64 guest_us_samples, guest_kernel_samples;
+ int print_entries, count_filter, delay_secs;
+ int max_stack;
+ bool hide_kernel_symbols, hide_user_symbols, zero;
+ bool use_tui, use_stdio;
+ bool kptr_restrict_warned;
+ bool vmlinux_warned;
+ bool dump_symtab;
+ struct hist_entry *sym_filter_entry;
+ struct perf_evsel *sym_evsel;
+ struct perf_session *session;
+ struct winsize winsize;
+ int realtime_prio;
+ int sym_pcnt_filter;
+ const char *sym_filter;
+ float min_percent;
+};
+
+#define CONSOLE_CLEAR ""
+
+size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size);
+void perf_top__reset_sample_counters(struct perf_top *top);
+#endif /* __PERF_TOP_H */
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index b1572601286..7e6fcfe8b43 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -18,7 +18,7 @@
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#define _GNU_SOURCE
+#include "util.h"
#include <dirent.h>
#include <mntent.h>
#include <stdio.h>
@@ -31,146 +31,20 @@
#include <pthread.h>
#include <fcntl.h>
#include <unistd.h>
-#include <ctype.h>
#include <errno.h>
#include <stdbool.h>
+#include <linux/list.h>
#include <linux/kernel.h>
#include "../perf.h"
#include "trace-event.h"
-#include "debugfs.h"
+#include <api/fs/debugfs.h>
+#include "evsel.h"
#define VERSION "0.5"
-#define _STR(x) #x
-#define STR(x) _STR(x)
-#define MAX_PATH 256
-
-#define TRACE_CTRL "tracing_on"
-#define TRACE "trace"
-#define AVAILABLE "available_tracers"
-#define CURRENT "current_tracer"
-#define ITER_CTRL "trace_options"
-#define MAX_LATENCY "tracing_max_latency"
-
-unsigned int page_size;
-
-static const char *output_file = "trace.info";
static int output_fd;
-struct event_list {
- struct event_list *next;
- const char *event;
-};
-
-struct events {
- struct events *sibling;
- struct events *children;
- struct events *next;
- char *name;
-};
-
-
-
-static void die(const char *fmt, ...)
-{
- va_list ap;
- int ret = errno;
-
- if (errno)
- perror("trace-cmd");
- else
- ret = -1;
-
- va_start(ap, fmt);
- fprintf(stderr, " ");
- vfprintf(stderr, fmt, ap);
- va_end(ap);
-
- fprintf(stderr, "\n");
- exit(ret);
-}
-
-void *malloc_or_die(unsigned int size)
-{
- void *data;
-
- data = malloc(size);
- if (!data)
- die("malloc");
- return data;
-}
-
-static const char *find_debugfs(void)
-{
- const char *path = debugfs_mount(NULL);
-
- if (!path)
- die("Your kernel not support debugfs filesystem");
-
- return path;
-}
-
-/*
- * Finds the path to the debugfs/tracing
- * Allocates the string and stores it.
- */
-static const char *find_tracing_dir(void)
-{
- static char *tracing;
- static int tracing_found;
- const char *debugfs;
-
- if (tracing_found)
- return tracing;
-
- debugfs = find_debugfs();
-
- tracing = malloc_or_die(strlen(debugfs) + 9);
-
- sprintf(tracing, "%s/tracing", debugfs);
-
- tracing_found = 1;
- return tracing;
-}
-
-static char *get_tracing_file(const char *name)
-{
- const char *tracing;
- char *file;
-
- tracing = find_tracing_dir();
- if (!tracing)
- return NULL;
-
- file = malloc_or_die(strlen(tracing) + strlen(name) + 2);
-
- sprintf(file, "%s/%s", tracing, name);
- return file;
-}
-
-static void put_tracing_file(char *file)
-{
- free(file);
-}
-
-static ssize_t calc_data_size;
-
-static ssize_t write_or_die(const void *buf, size_t len)
-{
- int ret;
-
- if (calc_data_size) {
- calc_data_size += len;
- return len;
- }
-
- ret = write(output_fd, buf, len);
- if (ret < 0)
- die("writing to '%s'", output_file);
-
- return ret;
-}
int bigendian(void)
{
@@ -181,106 +55,107 @@ int bigendian(void)
return *ptr == 0x01020304;
}
-static unsigned long long copy_file_fd(int fd)
+/* unfortunately, you can not stat debugfs or proc files for size */
+static int record_file(const char *file, ssize_t hdr_sz)
{
unsigned long long size = 0;
- char buf[BUFSIZ];
- int r;
-
- do {
- r = read(fd, buf, BUFSIZ);
- if (r > 0) {
- size += r;
- write_or_die(buf, r);
- }
- } while (r > 0);
-
- return size;
-}
-
-static unsigned long long copy_file(const char *file)
-{
- unsigned long long size = 0;
- int fd;
+ char buf[BUFSIZ], *sizep;
+ off_t hdr_pos = lseek(output_fd, 0, SEEK_CUR);
+ int r, fd;
+ int err = -EIO;
fd = open(file, O_RDONLY);
- if (fd < 0)
- die("Can't read '%s'", file);
- size = copy_file_fd(fd);
- close(fd);
-
- return size;
-}
+ if (fd < 0) {
+ pr_debug("Can't read '%s'", file);
+ return -errno;
+ }
-static unsigned long get_size_fd(int fd)
-{
- unsigned long long size = 0;
- char buf[BUFSIZ];
- int r;
+ /* put in zeros for file size, then fill true size later */
+ if (hdr_sz) {
+ if (write(output_fd, &size, hdr_sz) != hdr_sz)
+ goto out;
+ }
do {
r = read(fd, buf, BUFSIZ);
- if (r > 0)
+ if (r > 0) {
size += r;
+ if (write(output_fd, buf, r) != r)
+ goto out;
+ }
} while (r > 0);
- lseek(fd, 0, SEEK_SET);
-
- return size;
-}
+ /* ugh, handle big-endian hdr_size == 4 */
+ sizep = (char*)&size;
+ if (bigendian())
+ sizep += sizeof(u64) - hdr_sz;
-static unsigned long get_size(const char *file)
-{
- unsigned long long size = 0;
- int fd;
+ if (hdr_sz && pwrite(output_fd, sizep, hdr_sz, hdr_pos) < 0) {
+ pr_debug("writing file size failed\n");
+ goto out;
+ }
- fd = open(file, O_RDONLY);
- if (fd < 0)
- die("Can't read '%s'", file);
- size = get_size_fd(fd);
+ err = 0;
+out:
close(fd);
-
- return size;
+ return err;
}
-static void read_header_files(void)
+static int record_header_files(void)
{
- unsigned long long size, check_size;
char *path;
- int fd;
+ struct stat st;
+ int err = -EIO;
path = get_tracing_file("events/header_page");
- fd = open(path, O_RDONLY);
- if (fd < 0)
- die("can't read '%s'", path);
+ if (!path) {
+ pr_debug("can't get tracing/events/header_page");
+ return -ENOMEM;
+ }
+
+ if (stat(path, &st) < 0) {
+ pr_debug("can't read '%s'", path);
+ goto out;
+ }
- /* unfortunately, you can not stat debugfs files for size */
- size = get_size_fd(fd);
+ if (write(output_fd, "header_page", 12) != 12) {
+ pr_debug("can't write header_page\n");
+ goto out;
+ }
- write_or_die("header_page", 12);
- write_or_die(&size, 8);
- check_size = copy_file_fd(fd);
- close(fd);
+ if (record_file(path, 8) < 0) {
+ pr_debug("can't record header_page file\n");
+ goto out;
+ }
- if (size != check_size)
- die("wrong size for '%s' size=%lld read=%lld",
- path, size, check_size);
put_tracing_file(path);
path = get_tracing_file("events/header_event");
- fd = open(path, O_RDONLY);
- if (fd < 0)
- die("can't read '%s'", path);
+ if (!path) {
+ pr_debug("can't get tracing/events/header_event");
+ err = -ENOMEM;
+ goto out;
+ }
- size = get_size_fd(fd);
+ if (stat(path, &st) < 0) {
+ pr_debug("can't read '%s'", path);
+ goto out;
+ }
- write_or_die("header_event", 13);
- write_or_die(&size, 8);
- check_size = copy_file_fd(fd);
- if (size != check_size)
- die("wrong size for '%s'", path);
+ if (write(output_fd, "header_event", 13) != 13) {
+ pr_debug("can't write header_event\n");
+ goto out;
+ }
+
+ if (record_file(path, 8) < 0) {
+ pr_debug("can't record header_event file\n");
+ goto out;
+ }
+
+ err = 0;
+out:
put_tracing_file(path);
- close(fd);
+ return err;
}
static bool name_in_tp_list(char *sys, struct tracepoint_path *tps)
@@ -294,19 +169,21 @@ static bool name_in_tp_list(char *sys, struct tracepoint_path *tps)
return false;
}
-static void copy_event_system(const char *sys, struct tracepoint_path *tps)
+static int copy_event_system(const char *sys, struct tracepoint_path *tps)
{
- unsigned long long size, check_size;
struct dirent *dent;
struct stat st;
char *format;
DIR *dir;
int count = 0;
int ret;
+ int err;
dir = opendir(sys);
- if (!dir)
- die("can't read directory '%s'", sys);
+ if (!dir) {
+ pr_debug("can't read directory '%s'", sys);
+ return -errno;
+ }
while ((dent = readdir(dir))) {
if (dent->d_type != DT_DIR ||
@@ -314,7 +191,11 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps)
strcmp(dent->d_name, "..") == 0 ||
!name_in_tp_list(dent->d_name, tps))
continue;
- format = malloc_or_die(strlen(sys) + strlen(dent->d_name) + 10);
+ format = malloc(strlen(sys) + strlen(dent->d_name) + 10);
+ if (!format) {
+ err = -ENOMEM;
+ goto out;
+ }
sprintf(format, "%s/%s/format", sys, dent->d_name);
ret = stat(format, &st);
free(format);
@@ -323,7 +204,11 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps)
count++;
}
- write_or_die(&count, 4);
+ if (write(output_fd, &count, 4) != 4) {
+ err = -EIO;
+ pr_debug("can't write count\n");
+ goto out;
+ }
rewinddir(dir);
while ((dent = readdir(dir))) {
@@ -332,33 +217,45 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps)
strcmp(dent->d_name, "..") == 0 ||
!name_in_tp_list(dent->d_name, tps))
continue;
- format = malloc_or_die(strlen(sys) + strlen(dent->d_name) + 10);
+ format = malloc(strlen(sys) + strlen(dent->d_name) + 10);
+ if (!format) {
+ err = -ENOMEM;
+ goto out;
+ }
sprintf(format, "%s/%s/format", sys, dent->d_name);
ret = stat(format, &st);
if (ret >= 0) {
- /* unfortunately, you can not stat debugfs files for size */
- size = get_size(format);
- write_or_die(&size, 8);
- check_size = copy_file(format);
- if (size != check_size)
- die("error in size of file '%s'", format);
+ err = record_file(format, 8);
+ if (err) {
+ free(format);
+ goto out;
+ }
}
-
free(format);
}
+ err = 0;
+out:
closedir(dir);
+ return err;
}
-static void read_ftrace_files(struct tracepoint_path *tps)
+static int record_ftrace_files(struct tracepoint_path *tps)
{
char *path;
+ int ret;
path = get_tracing_file("events/ftrace");
+ if (!path) {
+ pr_debug("can't get tracing/events/ftrace");
+ return -ENOMEM;
+ }
- copy_event_system(path, tps);
+ ret = copy_event_system(path, tps);
put_tracing_file(path);
+
+ return ret;
}
static bool system_in_tp_list(char *sys, struct tracepoint_path *tps)
@@ -372,7 +269,7 @@ static bool system_in_tp_list(char *sys, struct tracepoint_path *tps)
return false;
}
-static void read_event_files(struct tracepoint_path *tps)
+static int record_event_files(struct tracepoint_path *tps)
{
struct dirent *dent;
struct stat st;
@@ -381,12 +278,20 @@ static void read_event_files(struct tracepoint_path *tps)
DIR *dir;
int count = 0;
int ret;
+ int err;
path = get_tracing_file("events");
+ if (!path) {
+ pr_debug("can't get tracing/events");
+ return -ENOMEM;
+ }
dir = opendir(path);
- if (!dir)
- die("can't read directory '%s'", path);
+ if (!dir) {
+ err = -errno;
+ pr_debug("can't read directory '%s'", path);
+ goto out;
+ }
while ((dent = readdir(dir))) {
if (dent->d_type != DT_DIR ||
@@ -398,7 +303,11 @@ static void read_event_files(struct tracepoint_path *tps)
count++;
}
- write_or_die(&count, 4);
+ if (write(output_fd, &count, 4) != 4) {
+ err = -EIO;
+ pr_debug("can't write count\n");
+ goto out;
+ }
rewinddir(dir);
while ((dent = readdir(dir))) {
@@ -408,117 +317,158 @@ static void read_event_files(struct tracepoint_path *tps)
strcmp(dent->d_name, "ftrace") == 0 ||
!system_in_tp_list(dent->d_name, tps))
continue;
- sys = malloc_or_die(strlen(path) + strlen(dent->d_name) + 2);
+ sys = malloc(strlen(path) + strlen(dent->d_name) + 2);
+ if (!sys) {
+ err = -ENOMEM;
+ goto out;
+ }
sprintf(sys, "%s/%s", path, dent->d_name);
ret = stat(sys, &st);
if (ret >= 0) {
- write_or_die(dent->d_name, strlen(dent->d_name) + 1);
- copy_event_system(sys, tps);
+ ssize_t size = strlen(dent->d_name) + 1;
+
+ if (write(output_fd, dent->d_name, size) != size ||
+ copy_event_system(sys, tps) < 0) {
+ err = -EIO;
+ free(sys);
+ goto out;
+ }
}
free(sys);
}
-
+ err = 0;
+out:
closedir(dir);
put_tracing_file(path);
+
+ return err;
}
-static void read_proc_kallsyms(void)
+static int record_proc_kallsyms(void)
{
- unsigned int size, check_size;
+ unsigned int size;
const char *path = "/proc/kallsyms";
struct stat st;
- int ret;
+ int ret, err = 0;
ret = stat(path, &st);
if (ret < 0) {
/* not found */
size = 0;
- write_or_die(&size, 4);
- return;
+ if (write(output_fd, &size, 4) != 4)
+ err = -EIO;
+ return err;
}
- size = get_size(path);
- write_or_die(&size, 4);
- check_size = copy_file(path);
- if (size != check_size)
- die("error in size of file '%s'", path);
-
+ return record_file(path, 4);
}
-static void read_ftrace_printk(void)
+static int record_ftrace_printk(void)
{
- unsigned int size, check_size;
+ unsigned int size;
char *path;
struct stat st;
- int ret;
+ int ret, err = 0;
path = get_tracing_file("printk_formats");
+ if (!path) {
+ pr_debug("can't get tracing/printk_formats");
+ return -ENOMEM;
+ }
+
ret = stat(path, &st);
if (ret < 0) {
/* not found */
size = 0;
- write_or_die(&size, 4);
+ if (write(output_fd, &size, 4) != 4)
+ err = -EIO;
goto out;
}
- size = get_size(path);
- write_or_die(&size, 4);
- check_size = copy_file(path);
- if (size != check_size)
- die("error in size of file '%s'", path);
+ err = record_file(path, 4);
+
out:
put_tracing_file(path);
+ return err;
+}
+
+static void
+put_tracepoints_path(struct tracepoint_path *tps)
+{
+ while (tps) {
+ struct tracepoint_path *t = tps;
+
+ tps = tps->next;
+ zfree(&t->name);
+ zfree(&t->system);
+ free(t);
+ }
}
static struct tracepoint_path *
-get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events)
+get_tracepoints_path(struct list_head *pattrs)
{
struct tracepoint_path path, *ppath = &path;
- int i, nr_tracepoints = 0;
+ struct perf_evsel *pos;
+ int nr_tracepoints = 0;
- for (i = 0; i < nb_events; i++) {
- if (pattrs[i].type != PERF_TYPE_TRACEPOINT)
+ list_for_each_entry(pos, pattrs, node) {
+ if (pos->attr.type != PERF_TYPE_TRACEPOINT)
continue;
++nr_tracepoints;
- ppath->next = tracepoint_id_to_path(pattrs[i].config);
- if (!ppath->next)
- die("%s\n", "No memory to alloc tracepoints list");
+
+ if (pos->name) {
+ ppath->next = tracepoint_name_to_path(pos->name);
+ if (ppath->next)
+ goto next;
+
+ if (strchr(pos->name, ':') == NULL)
+ goto try_id;
+
+ goto error;
+ }
+
+try_id:
+ ppath->next = tracepoint_id_to_path(pos->attr.config);
+ if (!ppath->next) {
+error:
+ pr_debug("No memory to alloc tracepoints list\n");
+ put_tracepoints_path(&path);
+ return NULL;
+ }
+next:
ppath = ppath->next;
}
return nr_tracepoints > 0 ? path.next : NULL;
}
-bool have_tracepoints(struct perf_event_attr *pattrs, int nb_events)
+bool have_tracepoints(struct list_head *pattrs)
{
- int i;
+ struct perf_evsel *pos;
- for (i = 0; i < nb_events; i++)
- if (pattrs[i].type == PERF_TYPE_TRACEPOINT)
+ list_for_each_entry(pos, pattrs, node)
+ if (pos->attr.type == PERF_TYPE_TRACEPOINT)
return true;
return false;
}
-int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events)
+static int tracing_data_header(void)
{
- char buf[BUFSIZ];
- struct tracepoint_path *tps = get_tracepoints_path(pattrs, nb_events);
-
- /*
- * What? No tracepoints? No sense writing anything here, bail out.
- */
- if (tps == NULL)
- return -1;
-
- output_fd = fd;
+ char buf[20];
+ ssize_t size;
+ /* just guessing this is someone's birthday.. ;) */
buf[0] = 23;
buf[1] = 8;
buf[2] = 68;
memcpy(buf + 3, "tracing", 7);
- write_or_die(buf, 10);
+ if (write(output_fd, buf, 10) != 10)
+ return -1;
- write_or_die(VERSION, strlen(VERSION) + 1);
+ size = strlen(VERSION) + 1;
+ if (write(output_fd, VERSION, size) != size)
+ return -1;
/* save endian */
if (bigendian())
@@ -526,38 +476,125 @@ int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events)
else
buf[0] = 0;
- write_or_die(buf, 1);
+ if (write(output_fd, buf, 1) != 1)
+ return -1;
/* save size of long */
buf[0] = sizeof(long);
- write_or_die(buf, 1);
+ if (write(output_fd, buf, 1) != 1)
+ return -1;
/* save page_size */
- page_size = sysconf(_SC_PAGESIZE);
- write_or_die(&page_size, 4);
-
- read_header_files();
- read_ftrace_files(tps);
- read_event_files(tps);
- read_proc_kallsyms();
- read_ftrace_printk();
+ if (write(output_fd, &page_size, 4) != 4)
+ return -1;
return 0;
}
-ssize_t read_tracing_data_size(int fd, struct perf_event_attr *pattrs,
- int nb_events)
+struct tracing_data *tracing_data_get(struct list_head *pattrs,
+ int fd, bool temp)
+{
+ struct tracepoint_path *tps;
+ struct tracing_data *tdata;
+ int err;
+
+ output_fd = fd;
+
+ tps = get_tracepoints_path(pattrs);
+ if (!tps)
+ return NULL;
+
+ tdata = malloc(sizeof(*tdata));
+ if (!tdata)
+ return NULL;
+
+ tdata->temp = temp;
+ tdata->size = 0;
+
+ if (temp) {
+ int temp_fd;
+
+ snprintf(tdata->temp_file, sizeof(tdata->temp_file),
+ "/tmp/perf-XXXXXX");
+ if (!mkstemp(tdata->temp_file)) {
+ pr_debug("Can't make temp file");
+ return NULL;
+ }
+
+ temp_fd = open(tdata->temp_file, O_RDWR);
+ if (temp_fd < 0) {
+ pr_debug("Can't read '%s'", tdata->temp_file);
+ return NULL;
+ }
+
+ /*
+ * Set the temp file the default output, so all the
+ * tracing data are stored into it.
+ */
+ output_fd = temp_fd;
+ }
+
+ err = tracing_data_header();
+ if (err)
+ goto out;
+ err = record_header_files();
+ if (err)
+ goto out;
+ err = record_ftrace_files(tps);
+ if (err)
+ goto out;
+ err = record_event_files(tps);
+ if (err)
+ goto out;
+ err = record_proc_kallsyms();
+ if (err)
+ goto out;
+ err = record_ftrace_printk();
+
+out:
+ /*
+ * All tracing data are stored by now, we can restore
+ * the default output file in case we used temp file.
+ */
+ if (temp) {
+ tdata->size = lseek(output_fd, 0, SEEK_CUR);
+ close(output_fd);
+ output_fd = fd;
+ }
+
+ if (err)
+ zfree(&tdata);
+
+ put_tracepoints_path(tps);
+ return tdata;
+}
+
+int tracing_data_put(struct tracing_data *tdata)
{
- ssize_t size;
int err = 0;
- calc_data_size = 1;
- err = read_tracing_data(fd, pattrs, nb_events);
- size = calc_data_size - 1;
- calc_data_size = 0;
+ if (tdata->temp) {
+ err = record_file(tdata->temp_file, 0);
+ unlink(tdata->temp_file);
+ }
- if (err < 0)
- return err;
+ free(tdata);
+ return err;
+}
+
+int read_tracing_data(int fd, struct list_head *pattrs)
+{
+ int err;
+ struct tracing_data *tdata;
+
+ /*
+ * We work over the real file, so we can write data
+ * directly, no temp file is needed.
+ */
+ tdata = tracing_data_get(pattrs, fd, false);
+ if (!tdata)
+ return -ENOMEM;
- return size;
+ err = tracing_data_put(tdata);
+ return err;
}
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 73a02223c62..c36636fd825 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -17,2161 +17,211 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * The parts for function graph printing was taken and modified from the
- * Linux Kernel that were written by Frederic Weisbecker.
*/
-#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <errno.h>
-#undef _GNU_SOURCE
#include "../perf.h"
#include "util.h"
#include "trace-event.h"
-int header_page_ts_offset;
-int header_page_ts_size;
-int header_page_size_offset;
-int header_page_size_size;
-int header_page_overwrite_offset;
-int header_page_overwrite_size;
-int header_page_data_offset;
-int header_page_data_size;
-
-bool latency_format;
-
-static char *input_buf;
-static unsigned long long input_buf_ptr;
-static unsigned long long input_buf_siz;
-
-static int cpus;
-static int long_size;
-static int is_flag_field;
-static int is_symbolic_field;
-
-static struct format_field *
-find_any_field(struct event *event, const char *name);
-
-static void init_input_buf(char *buf, unsigned long long size)
-{
- input_buf = buf;
- input_buf_siz = size;
- input_buf_ptr = 0;
-}
-
-struct cmdline {
- char *comm;
- int pid;
-};
-
-static struct cmdline *cmdlines;
-static int cmdline_count;
-
-static int cmdline_cmp(const void *a, const void *b)
-{
- const struct cmdline *ca = a;
- const struct cmdline *cb = b;
-
- if (ca->pid < cb->pid)
- return -1;
- if (ca->pid > cb->pid)
- return 1;
-
- return 0;
-}
-
-void parse_cmdlines(char *file, int size __unused)
+static int get_common_field(struct scripting_context *context,
+ int *offset, int *size, const char *type)
{
- struct cmdline_list {
- struct cmdline_list *next;
- char *comm;
- int pid;
- } *list = NULL, *item;
- char *line;
- char *next = NULL;
- int i;
-
- line = strtok_r(file, "\n", &next);
- while (line) {
- item = malloc_or_die(sizeof(*item));
- sscanf(line, "%d %as", &item->pid,
- (float *)(void *)&item->comm); /* workaround gcc warning */
- item->next = list;
- list = item;
- line = strtok_r(NULL, "\n", &next);
- cmdline_count++;
- }
+ struct pevent *pevent = context->pevent;
+ struct event_format *event;
+ struct format_field *field;
- cmdlines = malloc_or_die(sizeof(*cmdlines) * cmdline_count);
+ if (!*size) {
+ if (!pevent->events)
+ return 0;
- i = 0;
- while (list) {
- cmdlines[i].pid = list->pid;
- cmdlines[i].comm = list->comm;
- i++;
- item = list;
- list = list->next;
- free(item);
+ event = pevent->events[0];
+ field = pevent_find_common_field(event, type);
+ if (!field)
+ return 0;
+ *offset = field->offset;
+ *size = field->size;
}
- qsort(cmdlines, cmdline_count, sizeof(*cmdlines), cmdline_cmp);
+ return pevent_read_number(pevent, context->event_data + *offset, *size);
}
-static struct func_map {
- unsigned long long addr;
- char *func;
- char *mod;
-} *func_list;
-static unsigned int func_count;
-
-static int func_cmp(const void *a, const void *b)
+int common_lock_depth(struct scripting_context *context)
{
- const struct func_map *fa = a;
- const struct func_map *fb = b;
+ static int offset;
+ static int size;
+ int ret;
- if (fa->addr < fb->addr)
+ ret = get_common_field(context, &size, &offset,
+ "common_lock_depth");
+ if (ret < 0)
return -1;
- if (fa->addr > fb->addr)
- return 1;
- return 0;
+ return ret;
}
-void parse_proc_kallsyms(char *file, unsigned int size __unused)
+int common_flags(struct scripting_context *context)
{
- struct func_list {
- struct func_list *next;
- unsigned long long addr;
- char *func;
- char *mod;
- } *list = NULL, *item;
- char *line;
- char *next = NULL;
- char *addr_str;
- char ch;
+ static int offset;
+ static int size;
int ret;
- int i;
-
- line = strtok_r(file, "\n", &next);
- while (line) {
- item = malloc_or_die(sizeof(*item));
- item->mod = NULL;
- ret = sscanf(line, "%as %c %as\t[%as",
- (float *)(void *)&addr_str, /* workaround gcc warning */
- &ch,
- (float *)(void *)&item->func,
- (float *)(void *)&item->mod);
- item->addr = strtoull(addr_str, NULL, 16);
- free(addr_str);
-
- /* truncate the extra ']' */
- if (item->mod)
- item->mod[strlen(item->mod) - 1] = 0;
+ ret = get_common_field(context, &size, &offset,
+ "common_flags");
+ if (ret < 0)
+ return -1;
- item->next = list;
- list = item;
- line = strtok_r(NULL, "\n", &next);
- func_count++;
- }
-
- func_list = malloc_or_die(sizeof(*func_list) * (func_count + 1));
-
- i = 0;
- while (list) {
- func_list[i].func = list->func;
- func_list[i].addr = list->addr;
- func_list[i].mod = list->mod;
- i++;
- item = list;
- list = list->next;
- free(item);
- }
-
- qsort(func_list, func_count, sizeof(*func_list), func_cmp);
-
- /*
- * Add a special record at the end.
- */
- func_list[func_count].func = NULL;
- func_list[func_count].addr = 0;
- func_list[func_count].mod = NULL;
+ return ret;
}
-/*
- * We are searching for a record in between, not an exact
- * match.
- */
-static int func_bcmp(const void *a, const void *b)
+int common_pc(struct scripting_context *context)
{
- const struct func_map *fa = a;
- const struct func_map *fb = b;
-
- if ((fa->addr == fb->addr) ||
-
- (fa->addr > fb->addr &&
- fa->addr < (fb+1)->addr))
- return 0;
+ static int offset;
+ static int size;
+ int ret;
- if (fa->addr < fb->addr)
+ ret = get_common_field(context, &size, &offset,
+ "common_preempt_count");
+ if (ret < 0)
return -1;
- return 1;
+ return ret;
}
-static struct func_map *find_func(unsigned long long addr)
+unsigned long long
+raw_field_value(struct event_format *event, const char *name, void *data)
{
- struct func_map *func;
- struct func_map key;
+ struct format_field *field;
+ unsigned long long val;
- key.addr = addr;
+ field = pevent_find_any_field(event, name);
+ if (!field)
+ return 0ULL;
- func = bsearch(&key, func_list, func_count, sizeof(*func_list),
- func_bcmp);
+ pevent_read_number_field(field, data, &val);
- return func;
+ return val;
}
-void print_funcs(void)
+unsigned long long read_size(struct event_format *event, void *ptr, int size)
{
- int i;
-
- for (i = 0; i < (int)func_count; i++) {
- printf("%016llx %s",
- func_list[i].addr,
- func_list[i].func);
- if (func_list[i].mod)
- printf(" [%s]\n", func_list[i].mod);
- else
- printf("\n");
- }
+ return pevent_read_number(event->pevent, ptr, size);
}
-static struct printk_map {
- unsigned long long addr;
- char *printk;
-} *printk_list;
-static unsigned int printk_count;
-
-static int printk_cmp(const void *a, const void *b)
+void event_format__print(struct event_format *event,
+ int cpu, void *data, int size)
{
- const struct func_map *fa = a;
- const struct func_map *fb = b;
+ struct pevent_record record;
+ struct trace_seq s;
- if (fa->addr < fb->addr)
- return -1;
- if (fa->addr > fb->addr)
- return 1;
+ memset(&record, 0, sizeof(record));
+ record.cpu = cpu;
+ record.size = size;
+ record.data = data;
- return 0;
+ trace_seq_init(&s);
+ pevent_event_info(&s, event, &record);
+ trace_seq_do_printf(&s);
+ trace_seq_destroy(&s);
}
-static struct printk_map *find_printk(unsigned long long addr)
+void parse_proc_kallsyms(struct pevent *pevent,
+ char *file, unsigned int size __maybe_unused)
{
- struct printk_map *printk;
- struct printk_map key;
-
- key.addr = addr;
+ unsigned long long addr;
+ char *func;
+ char *line;
+ char *next = NULL;
+ char *addr_str;
+ char *mod;
+ char *fmt = NULL;
- printk = bsearch(&key, printk_list, printk_count, sizeof(*printk_list),
- printk_cmp);
+ line = strtok_r(file, "\n", &next);
+ while (line) {
+ mod = NULL;
+ addr_str = strtok_r(line, " ", &fmt);
+ addr = strtoull(addr_str, NULL, 16);
+ /* skip character */
+ strtok_r(NULL, " ", &fmt);
+ func = strtok_r(NULL, "\t", &fmt);
+ mod = strtok_r(NULL, "]", &fmt);
+ /* truncate the extra '[' */
+ if (mod)
+ mod = mod + 1;
+
+ pevent_register_function(pevent, func, addr, mod);
- return printk;
+ line = strtok_r(NULL, "\n", &next);
+ }
}
-void parse_ftrace_printk(char *file, unsigned int size __unused)
+void parse_ftrace_printk(struct pevent *pevent,
+ char *file, unsigned int size __maybe_unused)
{
- struct printk_list {
- struct printk_list *next;
- unsigned long long addr;
- char *printk;
- } *list = NULL, *item;
+ unsigned long long addr;
+ char *printk;
char *line;
char *next = NULL;
char *addr_str;
- int i;
+ char *fmt;
line = strtok_r(file, "\n", &next);
while (line) {
- addr_str = strsep(&line, ":");
- if (!line) {
- warning("error parsing print strings");
+ addr_str = strtok_r(line, ":", &fmt);
+ if (!addr_str) {
+ warning("printk format with empty entry");
break;
}
- item = malloc_or_die(sizeof(*item));
- item->addr = strtoull(addr_str, NULL, 16);
+ addr = strtoull(addr_str, NULL, 16);
/* fmt still has a space, skip it */
- item->printk = strdup(line+1);
- item->next = list;
- list = item;
+ printk = strdup(fmt+1);
line = strtok_r(NULL, "\n", &next);
- printk_count++;
- }
-
- printk_list = malloc_or_die(sizeof(*printk_list) * printk_count + 1);
-
- i = 0;
- while (list) {
- printk_list[i].printk = list->printk;
- printk_list[i].addr = list->addr;
- i++;
- item = list;
- list = list->next;
- free(item);
- }
-
- qsort(printk_list, printk_count, sizeof(*printk_list), printk_cmp);
-}
-
-void print_printk(void)
-{
- int i;
-
- for (i = 0; i < (int)printk_count; i++) {
- printf("%016llx %s\n",
- printk_list[i].addr,
- printk_list[i].printk);
- }
-}
-
-static struct event *alloc_event(void)
-{
- struct event *event;
-
- event = malloc_or_die(sizeof(*event));
- memset(event, 0, sizeof(*event));
-
- return event;
-}
-
-enum event_type {
- EVENT_ERROR,
- EVENT_NONE,
- EVENT_SPACE,
- EVENT_NEWLINE,
- EVENT_OP,
- EVENT_DELIM,
- EVENT_ITEM,
- EVENT_DQUOTE,
- EVENT_SQUOTE,
-};
-
-static struct event *event_list;
-
-static void add_event(struct event *event)
-{
- event->next = event_list;
- event_list = event;
-}
-
-static int event_item_type(enum event_type type)
-{
- switch (type) {
- case EVENT_ITEM ... EVENT_SQUOTE:
- return 1;
- case EVENT_ERROR ... EVENT_DELIM:
- default:
- return 0;
- }
-}
-
-static void free_arg(struct print_arg *arg)
-{
- if (!arg)
- return;
-
- switch (arg->type) {
- case PRINT_ATOM:
- if (arg->atom.atom)
- free(arg->atom.atom);
- break;
- case PRINT_NULL:
- case PRINT_FIELD ... PRINT_OP:
- default:
- /* todo */
- break;
- }
-
- free(arg);
-}
-
-static enum event_type get_type(int ch)
-{
- if (ch == '\n')
- return EVENT_NEWLINE;
- if (isspace(ch))
- return EVENT_SPACE;
- if (isalnum(ch) || ch == '_')
- return EVENT_ITEM;
- if (ch == '\'')
- return EVENT_SQUOTE;
- if (ch == '"')
- return EVENT_DQUOTE;
- if (!isprint(ch))
- return EVENT_NONE;
- if (ch == '(' || ch == ')' || ch == ',')
- return EVENT_DELIM;
-
- return EVENT_OP;
-}
-
-static int __read_char(void)
-{
- if (input_buf_ptr >= input_buf_siz)
- return -1;
-
- return input_buf[input_buf_ptr++];
-}
-
-static int __peek_char(void)
-{
- if (input_buf_ptr >= input_buf_siz)
- return -1;
-
- return input_buf[input_buf_ptr];
-}
-
-static enum event_type __read_token(char **tok)
-{
- char buf[BUFSIZ];
- int ch, last_ch, quote_ch, next_ch;
- int i = 0;
- int tok_size = 0;
- enum event_type type;
-
- *tok = NULL;
-
-
- ch = __read_char();
- if (ch < 0)
- return EVENT_NONE;
-
- type = get_type(ch);
- if (type == EVENT_NONE)
- return type;
-
- buf[i++] = ch;
-
- switch (type) {
- case EVENT_NEWLINE:
- case EVENT_DELIM:
- *tok = malloc_or_die(2);
- (*tok)[0] = ch;
- (*tok)[1] = 0;
- return type;
-
- case EVENT_OP:
- switch (ch) {
- case '-':
- next_ch = __peek_char();
- if (next_ch == '>') {
- buf[i++] = __read_char();
- break;
- }
- /* fall through */
- case '+':
- case '|':
- case '&':
- case '>':
- case '<':
- last_ch = ch;
- ch = __peek_char();
- if (ch != last_ch)
- goto test_equal;
- buf[i++] = __read_char();
- switch (last_ch) {
- case '>':
- case '<':
- goto test_equal;
- default:
- break;
- }
- break;
- case '!':
- case '=':
- goto test_equal;
- default: /* what should we do instead? */
- break;
- }
- buf[i] = 0;
- *tok = strdup(buf);
- return type;
-
- test_equal:
- ch = __peek_char();
- if (ch == '=')
- buf[i++] = __read_char();
- break;
-
- case EVENT_DQUOTE:
- case EVENT_SQUOTE:
- /* don't keep quotes */
- i--;
- quote_ch = ch;
- last_ch = 0;
- do {
- if (i == (BUFSIZ - 1)) {
- buf[i] = 0;
- if (*tok) {
- *tok = realloc(*tok, tok_size + BUFSIZ);
- if (!*tok)
- return EVENT_NONE;
- strcat(*tok, buf);
- } else
- *tok = strdup(buf);
-
- if (!*tok)
- return EVENT_NONE;
- tok_size += BUFSIZ;
- i = 0;
- }
- last_ch = ch;
- ch = __read_char();
- buf[i++] = ch;
- /* the '\' '\' will cancel itself */
- if (ch == '\\' && last_ch == '\\')
- last_ch = 0;
- } while (ch != quote_ch || last_ch == '\\');
- /* remove the last quote */
- i--;
- goto out;
-
- case EVENT_ERROR ... EVENT_SPACE:
- case EVENT_ITEM:
- default:
- break;
- }
-
- while (get_type(__peek_char()) == type) {
- if (i == (BUFSIZ - 1)) {
- buf[i] = 0;
- if (*tok) {
- *tok = realloc(*tok, tok_size + BUFSIZ);
- if (!*tok)
- return EVENT_NONE;
- strcat(*tok, buf);
- } else
- *tok = strdup(buf);
-
- if (!*tok)
- return EVENT_NONE;
- tok_size += BUFSIZ;
- i = 0;
- }
- ch = __read_char();
- buf[i++] = ch;
- }
-
- out:
- buf[i] = 0;
- if (*tok) {
- *tok = realloc(*tok, tok_size + i);
- if (!*tok)
- return EVENT_NONE;
- strcat(*tok, buf);
- } else
- *tok = strdup(buf);
- if (!*tok)
- return EVENT_NONE;
-
- return type;
-}
-
-static void free_token(char *tok)
-{
- if (tok)
- free(tok);
-}
-
-static enum event_type read_token(char **tok)
-{
- enum event_type type;
-
- for (;;) {
- type = __read_token(tok);
- if (type != EVENT_SPACE)
- return type;
-
- free_token(*tok);
+ pevent_register_print_string(pevent, printk, addr);
}
-
- /* not reached */
- return EVENT_NONE;
}
-/* no newline */
-static enum event_type read_token_item(char **tok)
+int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size)
{
- enum event_type type;
-
- for (;;) {
- type = __read_token(tok);
- if (type != EVENT_SPACE && type != EVENT_NEWLINE)
- return type;
-
- free_token(*tok);
- }
-
- /* not reached */
- return EVENT_NONE;
+ return pevent_parse_event(pevent, buf, size, "ftrace");
}
-static int test_type(enum event_type type, enum event_type expect)
+int parse_event_file(struct pevent *pevent,
+ char *buf, unsigned long size, char *sys)
{
- if (type != expect) {
- warning("Error: expected type %d but read %d",
- expect, type);
- return -1;
- }
- return 0;
+ return pevent_parse_event(pevent, buf, size, sys);
}
-static int __test_type_token(enum event_type type, char *token,
- enum event_type expect, const char *expect_tok,
- bool warn)
+struct event_format *trace_find_next_event(struct pevent *pevent,
+ struct event_format *event)
{
- if (type != expect) {
- if (warn)
- warning("Error: expected type %d but read %d",
- expect, type);
- return -1;
- }
+ static int idx;
- if (strcmp(token, expect_tok) != 0) {
- if (warn)
- warning("Error: expected '%s' but read '%s'",
- expect_tok, token);
- return -1;
- }
- return 0;
-}
-
-static int test_type_token(enum event_type type, char *token,
- enum event_type expect, const char *expect_tok)
-{
- return __test_type_token(type, token, expect, expect_tok, true);
-}
-
-static int __read_expect_type(enum event_type expect, char **tok, int newline_ok)
-{
- enum event_type type;
-
- if (newline_ok)
- type = read_token(tok);
- else
- type = read_token_item(tok);
- return test_type(type, expect);
-}
-
-static int read_expect_type(enum event_type expect, char **tok)
-{
- return __read_expect_type(expect, tok, 1);
-}
-
-static int __read_expected(enum event_type expect, const char *str,
- int newline_ok, bool warn)
-{
- enum event_type type;
- char *token;
- int ret;
-
- if (newline_ok)
- type = read_token(&token);
- else
- type = read_token_item(&token);
-
- ret = __test_type_token(type, token, expect, str, warn);
-
- free_token(token);
-
- return ret;
-}
-
-static int read_expected(enum event_type expect, const char *str)
-{
- return __read_expected(expect, str, 1, true);
-}
-
-static int read_expected_item(enum event_type expect, const char *str)
-{
- return __read_expected(expect, str, 0, true);
-}
-
-static char *event_read_name(void)
-{
- char *token;
-
- if (read_expected(EVENT_ITEM, "name") < 0)
+ if (!pevent || !pevent->events)
return NULL;
- if (read_expected(EVENT_OP, ":") < 0)
- return NULL;
-
- if (read_expect_type(EVENT_ITEM, &token) < 0)
- goto fail;
-
- return token;
-
- fail:
- free_token(token);
- return NULL;
-}
-
-static int event_read_id(void)
-{
- char *token;
- int id;
-
- if (read_expected_item(EVENT_ITEM, "ID") < 0)
- return -1;
-
- if (read_expected(EVENT_OP, ":") < 0)
- return -1;
-
- if (read_expect_type(EVENT_ITEM, &token) < 0)
- goto fail;
-
- id = strtoul(token, NULL, 0);
- free_token(token);
- return id;
-
- fail:
- free_token(token);
- return -1;
-}
-
-static int field_is_string(struct format_field *field)
-{
- if ((field->flags & FIELD_IS_ARRAY) &&
- (!strstr(field->type, "char") || !strstr(field->type, "u8") ||
- !strstr(field->type, "s8")))
- return 1;
-
- return 0;
-}
-
-static int field_is_dynamic(struct format_field *field)
-{
- if (!strncmp(field->type, "__data_loc", 10))
- return 1;
-
- return 0;
-}
-
-static int event_read_fields(struct event *event, struct format_field **fields)
-{
- struct format_field *field = NULL;
- enum event_type type;
- char *token;
- char *last_token;
- int count = 0;
-
- do {
- type = read_token(&token);
- if (type == EVENT_NEWLINE) {
- free_token(token);
- return count;
- }
-
- count++;
-
- if (test_type_token(type, token, EVENT_ITEM, "field"))
- goto fail;
- free_token(token);
-
- type = read_token(&token);
- /*
- * The ftrace fields may still use the "special" name.
- * Just ignore it.
- */
- if (event->flags & EVENT_FL_ISFTRACE &&
- type == EVENT_ITEM && strcmp(token, "special") == 0) {
- free_token(token);
- type = read_token(&token);
- }
-
- if (test_type_token(type, token, EVENT_OP, ":") < 0)
- return -1;
-
- if (read_expect_type(EVENT_ITEM, &token) < 0)
- goto fail;
-
- last_token = token;
-
- field = malloc_or_die(sizeof(*field));
- memset(field, 0, sizeof(*field));
-
- /* read the rest of the type */
- for (;;) {
- type = read_token(&token);
- if (type == EVENT_ITEM ||
- (type == EVENT_OP && strcmp(token, "*") == 0) ||
- /*
- * Some of the ftrace fields are broken and have
- * an illegal "." in them.
- */
- (event->flags & EVENT_FL_ISFTRACE &&
- type == EVENT_OP && strcmp(token, ".") == 0)) {
-
- if (strcmp(token, "*") == 0)
- field->flags |= FIELD_IS_POINTER;
-
- if (field->type) {
- field->type = realloc(field->type,
- strlen(field->type) +
- strlen(last_token) + 2);
- strcat(field->type, " ");
- strcat(field->type, last_token);
- } else
- field->type = last_token;
- last_token = token;
- continue;
- }
-
- break;
- }
-
- if (!field->type) {
- die("no type found");
- goto fail;
- }
- field->name = last_token;
-
- if (test_type(type, EVENT_OP))
- goto fail;
-
- if (strcmp(token, "[") == 0) {
- enum event_type last_type = type;
- char *brackets = token;
- int len;
-
- field->flags |= FIELD_IS_ARRAY;
-
- type = read_token(&token);
- while (strcmp(token, "]") != 0) {
- if (last_type == EVENT_ITEM &&
- type == EVENT_ITEM)
- len = 2;
- else
- len = 1;
- last_type = type;
-
- brackets = realloc(brackets,
- strlen(brackets) +
- strlen(token) + len);
- if (len == 2)
- strcat(brackets, " ");
- strcat(brackets, token);
- free_token(token);
- type = read_token(&token);
- if (type == EVENT_NONE) {
- die("failed to find token");
- goto fail;
- }
- }
-
- free_token(token);
-
- brackets = realloc(brackets, strlen(brackets) + 2);
- strcat(brackets, "]");
-
- /* add brackets to type */
-
- type = read_token(&token);
- /*
- * If the next token is not an OP, then it is of
- * the format: type [] item;
- */
- if (type == EVENT_ITEM) {
- field->type = realloc(field->type,
- strlen(field->type) +
- strlen(field->name) +
- strlen(brackets) + 2);
- strcat(field->type, " ");
- strcat(field->type, field->name);
- free_token(field->name);
- strcat(field->type, brackets);
- field->name = token;
- type = read_token(&token);
- } else {
- field->type = realloc(field->type,
- strlen(field->type) +
- strlen(brackets) + 1);
- strcat(field->type, brackets);
- }
- free(brackets);
- }
-
- if (field_is_string(field)) {
- field->flags |= FIELD_IS_STRING;
- if (field_is_dynamic(field))
- field->flags |= FIELD_IS_DYNAMIC;
- }
-
- if (test_type_token(type, token, EVENT_OP, ";"))
- goto fail;
- free_token(token);
-
- if (read_expected(EVENT_ITEM, "offset") < 0)
- goto fail_expect;
-
- if (read_expected(EVENT_OP, ":") < 0)
- goto fail_expect;
-
- if (read_expect_type(EVENT_ITEM, &token))
- goto fail;
- field->offset = strtoul(token, NULL, 0);
- free_token(token);
-
- if (read_expected(EVENT_OP, ";") < 0)
- goto fail_expect;
-
- if (read_expected(EVENT_ITEM, "size") < 0)
- goto fail_expect;
-
- if (read_expected(EVENT_OP, ":") < 0)
- goto fail_expect;
-
- if (read_expect_type(EVENT_ITEM, &token))
- goto fail;
- field->size = strtoul(token, NULL, 0);
- free_token(token);
-
- if (read_expected(EVENT_OP, ";") < 0)
- goto fail_expect;
-
- type = read_token(&token);
- if (type != EVENT_NEWLINE) {
- /* newer versions of the kernel have a "signed" type */
- if (test_type_token(type, token, EVENT_ITEM, "signed"))
- goto fail;
-
- free_token(token);
-
- if (read_expected(EVENT_OP, ":") < 0)
- goto fail_expect;
-
- if (read_expect_type(EVENT_ITEM, &token))
- goto fail;
-
- if (strtoul(token, NULL, 0))
- field->flags |= FIELD_IS_SIGNED;
-
- free_token(token);
- if (read_expected(EVENT_OP, ";") < 0)
- goto fail_expect;
-
- if (read_expect_type(EVENT_NEWLINE, &token))
- goto fail;
- }
-
- free_token(token);
-
- *fields = field;
- fields = &field->next;
-
- } while (1);
-
- return 0;
-
-fail:
- free_token(token);
-fail_expect:
- if (field)
- free(field);
- return -1;
-}
-
-static int event_read_format(struct event *event)
-{
- char *token;
- int ret;
-
- if (read_expected_item(EVENT_ITEM, "format") < 0)
- return -1;
-
- if (read_expected(EVENT_OP, ":") < 0)
- return -1;
-
- if (read_expect_type(EVENT_NEWLINE, &token))
- goto fail;
- free_token(token);
-
- ret = event_read_fields(event, &event->format.common_fields);
- if (ret < 0)
- return ret;
- event->format.nr_common = ret;
-
- ret = event_read_fields(event, &event->format.fields);
- if (ret < 0)
- return ret;
- event->format.nr_fields = ret;
-
- return 0;
-
- fail:
- free_token(token);
- return -1;
-}
-
-enum event_type
-process_arg_token(struct event *event, struct print_arg *arg,
- char **tok, enum event_type type);
-
-static enum event_type
-process_arg(struct event *event, struct print_arg *arg, char **tok)
-{
- enum event_type type;
- char *token;
-
- type = read_token(&token);
- *tok = token;
-
- return process_arg_token(event, arg, tok, type);
-}
-
-static enum event_type
-process_cond(struct event *event, struct print_arg *top, char **tok)
-{
- struct print_arg *arg, *left, *right;
- enum event_type type;
- char *token = NULL;
-
- arg = malloc_or_die(sizeof(*arg));
- memset(arg, 0, sizeof(*arg));
-
- left = malloc_or_die(sizeof(*left));
-
- right = malloc_or_die(sizeof(*right));
-
- arg->type = PRINT_OP;
- arg->op.left = left;
- arg->op.right = right;
-
- *tok = NULL;
- type = process_arg(event, left, &token);
- if (test_type_token(type, token, EVENT_OP, ":"))
- goto out_free;
-
- arg->op.op = token;
-
- type = process_arg(event, right, &token);
-
- top->op.right = arg;
-
- *tok = token;
- return type;
-
-out_free:
- free_token(*tok);
- free(right);
- free(left);
- free_arg(arg);
- return EVENT_ERROR;
-}
-
-static enum event_type
-process_array(struct event *event, struct print_arg *top, char **tok)
-{
- struct print_arg *arg;
- enum event_type type;
- char *token = NULL;
-
- arg = malloc_or_die(sizeof(*arg));
- memset(arg, 0, sizeof(*arg));
-
- *tok = NULL;
- type = process_arg(event, arg, &token);
- if (test_type_token(type, token, EVENT_OP, "]"))
- goto out_free;
-
- top->op.right = arg;
-
- free_token(token);
- type = read_token_item(&token);
- *tok = token;
-
- return type;
-
-out_free:
- free_token(*tok);
- free_arg(arg);
- return EVENT_ERROR;
-}
-
-static int get_op_prio(char *op)
-{
- if (!op[1]) {
- switch (op[0]) {
- case '*':
- case '/':
- case '%':
- return 6;
- case '+':
- case '-':
- return 7;
- /* '>>' and '<<' are 8 */
- case '<':
- case '>':
- return 9;
- /* '==' and '!=' are 10 */
- case '&':
- return 11;
- case '^':
- return 12;
- case '|':
- return 13;
- case '?':
- return 16;
- default:
- die("unknown op '%c'", op[0]);
- return -1;
- }
- } else {
- if (strcmp(op, "++") == 0 ||
- strcmp(op, "--") == 0) {
- return 3;
- } else if (strcmp(op, ">>") == 0 ||
- strcmp(op, "<<") == 0) {
- return 8;
- } else if (strcmp(op, ">=") == 0 ||
- strcmp(op, "<=") == 0) {
- return 9;
- } else if (strcmp(op, "==") == 0 ||
- strcmp(op, "!=") == 0) {
- return 10;
- } else if (strcmp(op, "&&") == 0) {
- return 14;
- } else if (strcmp(op, "||") == 0) {
- return 15;
- } else {
- die("unknown op '%s'", op);
- return -1;
- }
- }
-}
-
-static void set_op_prio(struct print_arg *arg)
-{
-
- /* single ops are the greatest */
- if (!arg->op.left || arg->op.left->type == PRINT_NULL) {
- arg->op.prio = 0;
- return;
- }
-
- arg->op.prio = get_op_prio(arg->op.op);
-}
-
-static enum event_type
-process_op(struct event *event, struct print_arg *arg, char **tok)
-{
- struct print_arg *left, *right = NULL;
- enum event_type type;
- char *token;
-
- /* the op is passed in via tok */
- token = *tok;
-
- if (arg->type == PRINT_OP && !arg->op.left) {
- /* handle single op */
- if (token[1]) {
- die("bad op token %s", token);
- return EVENT_ERROR;
- }
- switch (token[0]) {
- case '!':
- case '+':
- case '-':
- break;
- default:
- die("bad op token %s", token);
- return EVENT_ERROR;
- }
-
- /* make an empty left */
- left = malloc_or_die(sizeof(*left));
- left->type = PRINT_NULL;
- arg->op.left = left;
-
- right = malloc_or_die(sizeof(*right));
- arg->op.right = right;
-
- type = process_arg(event, right, tok);
-
- } else if (strcmp(token, "?") == 0) {
-
- left = malloc_or_die(sizeof(*left));
- /* copy the top arg to the left */
- *left = *arg;
-
- arg->type = PRINT_OP;
- arg->op.op = token;
- arg->op.left = left;
- arg->op.prio = 0;
-
- type = process_cond(event, arg, tok);
-
- } else if (strcmp(token, ">>") == 0 ||
- strcmp(token, "<<") == 0 ||
- strcmp(token, "&") == 0 ||
- strcmp(token, "|") == 0 ||
- strcmp(token, "&&") == 0 ||
- strcmp(token, "||") == 0 ||
- strcmp(token, "-") == 0 ||
- strcmp(token, "+") == 0 ||
- strcmp(token, "*") == 0 ||
- strcmp(token, "^") == 0 ||
- strcmp(token, "/") == 0 ||
- strcmp(token, "<") == 0 ||
- strcmp(token, ">") == 0 ||
- strcmp(token, "==") == 0 ||
- strcmp(token, "!=") == 0) {
-
- left = malloc_or_die(sizeof(*left));
-
- /* copy the top arg to the left */
- *left = *arg;
-
- arg->type = PRINT_OP;
- arg->op.op = token;
- arg->op.left = left;
-
- set_op_prio(arg);
-
- right = malloc_or_die(sizeof(*right));
-
- type = read_token_item(&token);
- *tok = token;
-
- /* could just be a type pointer */
- if ((strcmp(arg->op.op, "*") == 0) &&
- type == EVENT_DELIM && (strcmp(token, ")") == 0)) {
- if (left->type != PRINT_ATOM)
- die("bad pointer type");
- left->atom.atom = realloc(left->atom.atom,
- sizeof(left->atom.atom) + 3);
- strcat(left->atom.atom, " *");
- *arg = *left;
- free(arg);
-
- return type;
- }
-
- type = process_arg_token(event, right, tok, type);
-
- arg->op.right = right;
-
- } else if (strcmp(token, "[") == 0) {
-
- left = malloc_or_die(sizeof(*left));
- *left = *arg;
-
- arg->type = PRINT_OP;
- arg->op.op = token;
- arg->op.left = left;
-
- arg->op.prio = 0;
- type = process_array(event, arg, tok);
-
- } else {
- warning("unknown op '%s'", token);
- event->flags |= EVENT_FL_FAILED;
- /* the arg is now the left side */
- return EVENT_NONE;
- }
-
- if (type == EVENT_OP) {
- int prio;
-
- /* higher prios need to be closer to the root */
- prio = get_op_prio(*tok);
-
- if (prio > arg->op.prio)
- return process_op(event, arg, tok);
-
- return process_op(event, right, tok);
- }
-
- return type;
-}
-
-static enum event_type
-process_entry(struct event *event __unused, struct print_arg *arg,
- char **tok)
-{
- enum event_type type;
- char *field;
- char *token;
-
- if (read_expected(EVENT_OP, "->") < 0)
- return EVENT_ERROR;
-
- if (read_expect_type(EVENT_ITEM, &token) < 0)
- goto fail;
- field = token;
-
- arg->type = PRINT_FIELD;
- arg->field.name = field;
-
- if (is_flag_field) {
- arg->field.field = find_any_field(event, arg->field.name);
- arg->field.field->flags |= FIELD_IS_FLAG;
- is_flag_field = 0;
- } else if (is_symbolic_field) {
- arg->field.field = find_any_field(event, arg->field.name);
- arg->field.field->flags |= FIELD_IS_SYMBOLIC;
- is_symbolic_field = 0;
+ if (!event) {
+ idx = 0;
+ return pevent->events[0];
}
- type = read_token(&token);
- *tok = token;
-
- return type;
-
-fail:
- free_token(token);
- return EVENT_ERROR;
-}
-
-static char *arg_eval (struct print_arg *arg);
-
-static long long arg_num_eval(struct print_arg *arg)
-{
- long long left, right;
- long long val = 0;
-
- switch (arg->type) {
- case PRINT_ATOM:
- val = strtoll(arg->atom.atom, NULL, 0);
- break;
- case PRINT_TYPE:
- val = arg_num_eval(arg->typecast.item);
- break;
- case PRINT_OP:
- switch (arg->op.op[0]) {
- case '|':
- left = arg_num_eval(arg->op.left);
- right = arg_num_eval(arg->op.right);
- if (arg->op.op[1])
- val = left || right;
- else
- val = left | right;
- break;
- case '&':
- left = arg_num_eval(arg->op.left);
- right = arg_num_eval(arg->op.right);
- if (arg->op.op[1])
- val = left && right;
- else
- val = left & right;
- break;
- case '<':
- left = arg_num_eval(arg->op.left);
- right = arg_num_eval(arg->op.right);
- switch (arg->op.op[1]) {
- case 0:
- val = left < right;
- break;
- case '<':
- val = left << right;
- break;
- case '=':
- val = left <= right;
- break;
- default:
- die("unknown op '%s'", arg->op.op);
- }
- break;
- case '>':
- left = arg_num_eval(arg->op.left);
- right = arg_num_eval(arg->op.right);
- switch (arg->op.op[1]) {
- case 0:
- val = left > right;
- break;
- case '>':
- val = left >> right;
- break;
- case '=':
- val = left >= right;
- break;
- default:
- die("unknown op '%s'", arg->op.op);
- }
- break;
- case '=':
- left = arg_num_eval(arg->op.left);
- right = arg_num_eval(arg->op.right);
-
- if (arg->op.op[1] != '=')
- die("unknown op '%s'", arg->op.op);
-
- val = left == right;
- break;
- case '!':
- left = arg_num_eval(arg->op.left);
- right = arg_num_eval(arg->op.right);
-
- switch (arg->op.op[1]) {
- case '=':
- val = left != right;
- break;
- default:
- die("unknown op '%s'", arg->op.op);
- }
- break;
- default:
- die("unknown op '%s'", arg->op.op);
- }
- break;
-
- case PRINT_NULL:
- case PRINT_FIELD ... PRINT_SYMBOL:
- case PRINT_STRING:
- default:
- die("invalid eval type %d", arg->type);
-
+ if (idx < pevent->nr_events && event == pevent->events[idx]) {
+ idx++;
+ if (idx == pevent->nr_events)
+ return NULL;
+ return pevent->events[idx];
}
- return val;
-}
-
-static char *arg_eval (struct print_arg *arg)
-{
- long long val;
- static char buf[20];
-
- switch (arg->type) {
- case PRINT_ATOM:
- return arg->atom.atom;
- case PRINT_TYPE:
- return arg_eval(arg->typecast.item);
- case PRINT_OP:
- val = arg_num_eval(arg);
- sprintf(buf, "%lld", val);
- return buf;
- case PRINT_NULL:
- case PRINT_FIELD ... PRINT_SYMBOL:
- case PRINT_STRING:
- default:
- die("invalid eval type %d", arg->type);
- break;
+ for (idx = 1; idx < pevent->nr_events; idx++) {
+ if (event == pevent->events[idx - 1])
+ return pevent->events[idx];
}
-
return NULL;
}
-static enum event_type
-process_fields(struct event *event, struct print_flag_sym **list, char **tok)
-{
- enum event_type type;
- struct print_arg *arg = NULL;
- struct print_flag_sym *field;
- char *token = NULL;
- char *value;
-
- do {
- free_token(token);
- type = read_token_item(&token);
- if (test_type_token(type, token, EVENT_OP, "{"))
- break;
-
- arg = malloc_or_die(sizeof(*arg));
-
- free_token(token);
- type = process_arg(event, arg, &token);
- if (test_type_token(type, token, EVENT_DELIM, ","))
- goto out_free;
-
- field = malloc_or_die(sizeof(*field));
- memset(field, 0, sizeof(*field));
-
- value = arg_eval(arg);
- field->value = strdup(value);
-
- free_token(token);
- type = process_arg(event, arg, &token);
- if (test_type_token(type, token, EVENT_OP, "}"))
- goto out_free;
-
- value = arg_eval(arg);
- field->str = strdup(value);
- free_arg(arg);
- arg = NULL;
-
- *list = field;
- list = &field->next;
-
- free_token(token);
- type = read_token_item(&token);
- } while (type == EVENT_DELIM && strcmp(token, ",") == 0);
-
- *tok = token;
- return type;
-
-out_free:
- free_arg(arg);
- free_token(token);
-
- return EVENT_ERROR;
-}
-
-static enum event_type
-process_flags(struct event *event, struct print_arg *arg, char **tok)
-{
- struct print_arg *field;
- enum event_type type;
- char *token;
-
- memset(arg, 0, sizeof(*arg));
- arg->type = PRINT_FLAGS;
-
- if (read_expected_item(EVENT_DELIM, "(") < 0)
- return EVENT_ERROR;
-
- field = malloc_or_die(sizeof(*field));
-
- type = process_arg(event, field, &token);
- if (test_type_token(type, token, EVENT_DELIM, ","))
- goto out_free;
-
- arg->flags.field = field;
-
- type = read_token_item(&token);
- if (event_item_type(type)) {
- arg->flags.delim = token;
- type = read_token_item(&token);
- }
-
- if (test_type_token(type, token, EVENT_DELIM, ","))
- goto out_free;
-
- type = process_fields(event, &arg->flags.flags, &token);
- if (test_type_token(type, token, EVENT_DELIM, ")"))
- goto out_free;
-
- free_token(token);
- type = read_token_item(tok);
- return type;
-
-out_free:
- free_token(token);
- return EVENT_ERROR;
-}
-
-static enum event_type
-process_symbols(struct event *event, struct print_arg *arg, char **tok)
-{
- struct print_arg *field;
- enum event_type type;
- char *token;
-
- memset(arg, 0, sizeof(*arg));
- arg->type = PRINT_SYMBOL;
-
- if (read_expected_item(EVENT_DELIM, "(") < 0)
- return EVENT_ERROR;
-
- field = malloc_or_die(sizeof(*field));
-
- type = process_arg(event, field, &token);
- if (test_type_token(type, token, EVENT_DELIM, ","))
- goto out_free;
-
- arg->symbol.field = field;
-
- type = process_fields(event, &arg->symbol.symbols, &token);
- if (test_type_token(type, token, EVENT_DELIM, ")"))
- goto out_free;
-
- free_token(token);
- type = read_token_item(tok);
- return type;
-
-out_free:
- free_token(token);
- return EVENT_ERROR;
-}
-
-static enum event_type
-process_paren(struct event *event, struct print_arg *arg, char **tok)
-{
- struct print_arg *item_arg;
- enum event_type type;
- char *token;
-
- type = process_arg(event, arg, &token);
-
- if (type == EVENT_ERROR)
- return EVENT_ERROR;
-
- if (type == EVENT_OP)
- type = process_op(event, arg, &token);
-
- if (type == EVENT_ERROR)
- return EVENT_ERROR;
-
- if (test_type_token(type, token, EVENT_DELIM, ")")) {
- free_token(token);
- return EVENT_ERROR;
- }
-
- free_token(token);
- type = read_token_item(&token);
-
- /*
- * If the next token is an item or another open paren, then
- * this was a typecast.
- */
- if (event_item_type(type) ||
- (type == EVENT_DELIM && strcmp(token, "(") == 0)) {
-
- /* make this a typecast and contine */
-
- /* prevous must be an atom */
- if (arg->type != PRINT_ATOM)
- die("previous needed to be PRINT_ATOM");
-
- item_arg = malloc_or_die(sizeof(*item_arg));
-
- arg->type = PRINT_TYPE;
- arg->typecast.type = arg->atom.atom;
- arg->typecast.item = item_arg;
- type = process_arg_token(event, item_arg, &token, type);
-
- }
-
- *tok = token;
- return type;
-}
-
-
-static enum event_type
-process_str(struct event *event __unused, struct print_arg *arg, char **tok)
-{
- enum event_type type;
- char *token;
-
- if (read_expected(EVENT_DELIM, "(") < 0)
- return EVENT_ERROR;
-
- if (read_expect_type(EVENT_ITEM, &token) < 0)
- goto fail;
-
- arg->type = PRINT_STRING;
- arg->string.string = token;
- arg->string.offset = -1;
-
- if (read_expected(EVENT_DELIM, ")") < 0)
- return EVENT_ERROR;
-
- type = read_token(&token);
- *tok = token;
-
- return type;
-fail:
- free_token(token);
- return EVENT_ERROR;
-}
-
-enum event_type
-process_arg_token(struct event *event, struct print_arg *arg,
- char **tok, enum event_type type)
-{
- char *token;
- char *atom;
-
- token = *tok;
-
- switch (type) {
- case EVENT_ITEM:
- if (strcmp(token, "REC") == 0) {
- free_token(token);
- type = process_entry(event, arg, &token);
- } else if (strcmp(token, "__print_flags") == 0) {
- free_token(token);
- is_flag_field = 1;
- type = process_flags(event, arg, &token);
- } else if (strcmp(token, "__print_symbolic") == 0) {
- free_token(token);
- is_symbolic_field = 1;
- type = process_symbols(event, arg, &token);
- } else if (strcmp(token, "__get_str") == 0) {
- free_token(token);
- type = process_str(event, arg, &token);
- } else {
- atom = token;
- /* test the next token */
- type = read_token_item(&token);
-
- /* atoms can be more than one token long */
- while (type == EVENT_ITEM) {
- atom = realloc(atom, strlen(atom) + strlen(token) + 2);
- strcat(atom, " ");
- strcat(atom, token);
- free_token(token);
- type = read_token_item(&token);
- }
-
- /* todo, test for function */
-
- arg->type = PRINT_ATOM;
- arg->atom.atom = atom;
- }
- break;
- case EVENT_DQUOTE:
- case EVENT_SQUOTE:
- arg->type = PRINT_ATOM;
- arg->atom.atom = token;
- type = read_token_item(&token);
- break;
- case EVENT_DELIM:
- if (strcmp(token, "(") == 0) {
- free_token(token);
- type = process_paren(event, arg, &token);
- break;
- }
- case EVENT_OP:
- /* handle single ops */
- arg->type = PRINT_OP;
- arg->op.op = token;
- arg->op.left = NULL;
- type = process_op(event, arg, &token);
-
- break;
-
- case EVENT_ERROR ... EVENT_NEWLINE:
- default:
- die("unexpected type %d", type);
- }
- *tok = token;
-
- return type;
-}
-
-static int event_read_print_args(struct event *event, struct print_arg **list)
-{
- enum event_type type = EVENT_ERROR;
- struct print_arg *arg;
- char *token;
- int args = 0;
-
- do {
- if (type == EVENT_NEWLINE) {
- free_token(token);
- type = read_token_item(&token);
- continue;
- }
-
- arg = malloc_or_die(sizeof(*arg));
- memset(arg, 0, sizeof(*arg));
-
- type = process_arg(event, arg, &token);
-
- if (type == EVENT_ERROR) {
- free_arg(arg);
- return -1;
- }
-
- *list = arg;
- args++;
-
- if (type == EVENT_OP) {
- type = process_op(event, arg, &token);
- list = &arg->next;
- continue;
- }
-
- if (type == EVENT_DELIM && strcmp(token, ",") == 0) {
- free_token(token);
- *list = arg;
- list = &arg->next;
- continue;
- }
- break;
- } while (type != EVENT_NONE);
-
- if (type != EVENT_NONE)
- free_token(token);
-
- return args;
-}
-
-static int event_read_print(struct event *event)
-{
- enum event_type type;
- char *token;
- int ret;
-
- if (read_expected_item(EVENT_ITEM, "print") < 0)
- return -1;
-
- if (read_expected(EVENT_ITEM, "fmt") < 0)
- return -1;
-
- if (read_expected(EVENT_OP, ":") < 0)
- return -1;
-
- if (read_expect_type(EVENT_DQUOTE, &token) < 0)
- goto fail;
-
- concat:
- event->print_fmt.format = token;
- event->print_fmt.args = NULL;
-
- /* ok to have no arg */
- type = read_token_item(&token);
-
- if (type == EVENT_NONE)
- return 0;
-
- /* Handle concatination of print lines */
- if (type == EVENT_DQUOTE) {
- char *cat;
-
- cat = malloc_or_die(strlen(event->print_fmt.format) +
- strlen(token) + 1);
- strcpy(cat, event->print_fmt.format);
- strcat(cat, token);
- free_token(token);
- free_token(event->print_fmt.format);
- event->print_fmt.format = NULL;
- token = cat;
- goto concat;
- }
-
- if (test_type_token(type, token, EVENT_DELIM, ","))
- goto fail;
-
- free_token(token);
-
- ret = event_read_print_args(event, &event->print_fmt.args);
- if (ret < 0)
- return -1;
-
- return ret;
-
- fail:
- free_token(token);
- return -1;
-}
-
-static struct format_field *
-find_common_field(struct event *event, const char *name)
-{
- struct format_field *format;
-
- for (format = event->format.common_fields;
- format; format = format->next) {
- if (strcmp(format->name, name) == 0)
- break;
- }
-
- return format;
-}
-
-static struct format_field *
-find_field(struct event *event, const char *name)
-{
- struct format_field *format;
-
- for (format = event->format.fields;
- format; format = format->next) {
- if (strcmp(format->name, name) == 0)
- break;
- }
-
- return format;
-}
-
-static struct format_field *
-find_any_field(struct event *event, const char *name)
-{
- struct format_field *format;
-
- format = find_common_field(event, name);
- if (format)
- return format;
- return find_field(event, name);
-}
-
-unsigned long long read_size(void *ptr, int size)
-{
- switch (size) {
- case 1:
- return *(unsigned char *)ptr;
- case 2:
- return data2host2(ptr);
- case 4:
- return data2host4(ptr);
- case 8:
- return data2host8(ptr);
- default:
- /* BUG! */
- return 0;
- }
-}
-
-unsigned long long
-raw_field_value(struct event *event, const char *name, void *data)
-{
- struct format_field *field;
-
- field = find_any_field(event, name);
- if (!field)
- return 0ULL;
-
- return read_size(data + field->offset, field->size);
-}
-
-void *raw_field_ptr(struct event *event, const char *name, void *data)
-{
- struct format_field *field;
-
- field = find_any_field(event, name);
- if (!field)
- return NULL;
-
- if (field->flags & FIELD_IS_DYNAMIC) {
- int offset;
-
- offset = *(int *)(data + field->offset);
- offset &= 0xffff;
-
- return data + offset;
- }
-
- return data + field->offset;
-}
-
-static int get_common_info(const char *type, int *offset, int *size)
-{
- struct event *event;
- struct format_field *field;
-
- /*
- * All events should have the same common elements.
- * Pick any event to find where the type is;
- */
- if (!event_list)
- die("no event_list!");
-
- event = event_list;
- field = find_common_field(event, type);
- if (!field)
- die("field '%s' not found", type);
-
- *offset = field->offset;
- *size = field->size;
-
- return 0;
-}
-
-static int __parse_common(void *data, int *size, int *offset,
- const char *name)
-{
- int ret;
-
- if (!*size) {
- ret = get_common_info(name, offset, size);
- if (ret < 0)
- return ret;
- }
- return read_size(data + *offset, *size);
-}
-
-int trace_parse_common_type(void *data)
-{
- static int type_offset;
- static int type_size;
-
- return __parse_common(data, &type_size, &type_offset,
- "common_type");
-}
-
-int trace_parse_common_pid(void *data)
-{
- static int pid_offset;
- static int pid_size;
-
- return __parse_common(data, &pid_size, &pid_offset,
- "common_pid");
-}
-
-int parse_common_pc(void *data)
-{
- static int pc_offset;
- static int pc_size;
-
- return __parse_common(data, &pc_size, &pc_offset,
- "common_preempt_count");
-}
-
-int parse_common_flags(void *data)
-{
- static int flags_offset;
- static int flags_size;
-
- return __parse_common(data, &flags_size, &flags_offset,
- "common_flags");
-}
-
-int parse_common_lock_depth(void *data)
-{
- static int ld_offset;
- static int ld_size;
- int ret;
-
- ret = __parse_common(data, &ld_size, &ld_offset,
- "common_lock_depth");
- if (ret < 0)
- return -1;
-
- return ret;
-}
-
-struct event *trace_find_event(int id)
-{
- struct event *event;
-
- for (event = event_list; event; event = event->next) {
- if (event->id == id)
- break;
- }
- return event;
-}
-
-struct event *trace_find_next_event(struct event *event)
-{
- if (!event)
- return event_list;
-
- return event->next;
-}
-
-static unsigned long long eval_num_arg(void *data, int size,
- struct event *event, struct print_arg *arg)
-{
- unsigned long long val = 0;
- unsigned long long left, right;
- struct print_arg *larg;
-
- switch (arg->type) {
- case PRINT_NULL:
- /* ?? */
- return 0;
- case PRINT_ATOM:
- return strtoull(arg->atom.atom, NULL, 0);
- case PRINT_FIELD:
- if (!arg->field.field) {
- arg->field.field = find_any_field(event, arg->field.name);
- if (!arg->field.field)
- die("field %s not found", arg->field.name);
- }
- /* must be a number */
- val = read_size(data + arg->field.field->offset,
- arg->field.field->size);
- break;
- case PRINT_FLAGS:
- case PRINT_SYMBOL:
- break;
- case PRINT_TYPE:
- return eval_num_arg(data, size, event, arg->typecast.item);
- case PRINT_STRING:
- return 0;
- break;
- case PRINT_OP:
- if (strcmp(arg->op.op, "[") == 0) {
- /*
- * Arrays are special, since we don't want
- * to read the arg as is.
- */
- if (arg->op.left->type != PRINT_FIELD)
- goto default_op; /* oops, all bets off */
- larg = arg->op.left;
- if (!larg->field.field) {
- larg->field.field =
- find_any_field(event, larg->field.name);
- if (!larg->field.field)
- die("field %s not found", larg->field.name);
- }
- right = eval_num_arg(data, size, event, arg->op.right);
- val = read_size(data + larg->field.field->offset +
- right * long_size, long_size);
- break;
- }
- default_op:
- left = eval_num_arg(data, size, event, arg->op.left);
- right = eval_num_arg(data, size, event, arg->op.right);
- switch (arg->op.op[0]) {
- case '|':
- if (arg->op.op[1])
- val = left || right;
- else
- val = left | right;
- break;
- case '&':
- if (arg->op.op[1])
- val = left && right;
- else
- val = left & right;
- break;
- case '<':
- switch (arg->op.op[1]) {
- case 0:
- val = left < right;
- break;
- case '<':
- val = left << right;
- break;
- case '=':
- val = left <= right;
- break;
- default:
- die("unknown op '%s'", arg->op.op);
- }
- break;
- case '>':
- switch (arg->op.op[1]) {
- case 0:
- val = left > right;
- break;
- case '>':
- val = left >> right;
- break;
- case '=':
- val = left >= right;
- break;
- default:
- die("unknown op '%s'", arg->op.op);
- }
- break;
- case '=':
- if (arg->op.op[1] != '=')
- die("unknown op '%s'", arg->op.op);
- val = left == right;
- break;
- case '-':
- val = left - right;
- break;
- case '+':
- val = left + right;
- break;
- default:
- die("unknown op '%s'", arg->op.op);
- }
- break;
- default: /* not sure what to do there */
- return 0;
- }
- return val;
-}
-
struct flag {
const char *name;
unsigned long long value;
@@ -2211,1023 +261,3 @@ unsigned long long eval_flag(const char *flag)
return 0;
}
-
-static void print_str_arg(void *data, int size,
- struct event *event, struct print_arg *arg)
-{
- struct print_flag_sym *flag;
- unsigned long long val, fval;
- char *str;
- int print;
-
- switch (arg->type) {
- case PRINT_NULL:
- /* ?? */
- return;
- case PRINT_ATOM:
- printf("%s", arg->atom.atom);
- return;
- case PRINT_FIELD:
- if (!arg->field.field) {
- arg->field.field = find_any_field(event, arg->field.name);
- if (!arg->field.field)
- die("field %s not found", arg->field.name);
- }
- str = malloc_or_die(arg->field.field->size + 1);
- memcpy(str, data + arg->field.field->offset,
- arg->field.field->size);
- str[arg->field.field->size] = 0;
- printf("%s", str);
- free(str);
- break;
- case PRINT_FLAGS:
- val = eval_num_arg(data, size, event, arg->flags.field);
- print = 0;
- for (flag = arg->flags.flags; flag; flag = flag->next) {
- fval = eval_flag(flag->value);
- if (!val && !fval) {
- printf("%s", flag->str);
- break;
- }
- if (fval && (val & fval) == fval) {
- if (print && arg->flags.delim)
- printf("%s", arg->flags.delim);
- printf("%s", flag->str);
- print = 1;
- val &= ~fval;
- }
- }
- break;
- case PRINT_SYMBOL:
- val = eval_num_arg(data, size, event, arg->symbol.field);
- for (flag = arg->symbol.symbols; flag; flag = flag->next) {
- fval = eval_flag(flag->value);
- if (val == fval) {
- printf("%s", flag->str);
- break;
- }
- }
- break;
-
- case PRINT_TYPE:
- break;
- case PRINT_STRING: {
- int str_offset;
-
- if (arg->string.offset == -1) {
- struct format_field *f;
-
- f = find_any_field(event, arg->string.string);
- arg->string.offset = f->offset;
- }
- str_offset = *(int *)(data + arg->string.offset);
- str_offset &= 0xffff;
- printf("%s", ((char *)data) + str_offset);
- break;
- }
- case PRINT_OP:
- /*
- * The only op for string should be ? :
- */
- if (arg->op.op[0] != '?')
- return;
- val = eval_num_arg(data, size, event, arg->op.left);
- if (val)
- print_str_arg(data, size, event, arg->op.right->op.left);
- else
- print_str_arg(data, size, event, arg->op.right->op.right);
- break;
- default:
- /* well... */
- break;
- }
-}
-
-static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struct event *event)
-{
- static struct format_field *field, *ip_field;
- struct print_arg *args, *arg, **next;
- unsigned long long ip, val;
- char *ptr;
- void *bptr;
-
- if (!field) {
- field = find_field(event, "buf");
- if (!field)
- die("can't find buffer field for binary printk");
- ip_field = find_field(event, "ip");
- if (!ip_field)
- die("can't find ip field for binary printk");
- }
-
- ip = read_size(data + ip_field->offset, ip_field->size);
-
- /*
- * The first arg is the IP pointer.
- */
- args = malloc_or_die(sizeof(*args));
- arg = args;
- arg->next = NULL;
- next = &arg->next;
-
- arg->type = PRINT_ATOM;
- arg->atom.atom = malloc_or_die(32);
- sprintf(arg->atom.atom, "%lld", ip);
-
- /* skip the first "%pf : " */
- for (ptr = fmt + 6, bptr = data + field->offset;
- bptr < data + size && *ptr; ptr++) {
- int ls = 0;
-
- if (*ptr == '%') {
- process_again:
- ptr++;
- switch (*ptr) {
- case '%':
- break;
- case 'l':
- ls++;
- goto process_again;
- case 'L':
- ls = 2;
- goto process_again;
- case '0' ... '9':
- goto process_again;
- case 'p':
- ls = 1;
- /* fall through */
- case 'd':
- case 'u':
- case 'x':
- case 'i':
- /* the pointers are always 4 bytes aligned */
- bptr = (void *)(((unsigned long)bptr + 3) &
- ~3);
- switch (ls) {
- case 0:
- case 1:
- ls = long_size;
- break;
- case 2:
- ls = 8;
- default:
- break;
- }
- val = read_size(bptr, ls);
- bptr += ls;
- arg = malloc_or_die(sizeof(*arg));
- arg->next = NULL;
- arg->type = PRINT_ATOM;
- arg->atom.atom = malloc_or_die(32);
- sprintf(arg->atom.atom, "%lld", val);
- *next = arg;
- next = &arg->next;
- break;
- case 's':
- arg = malloc_or_die(sizeof(*arg));
- arg->next = NULL;
- arg->type = PRINT_STRING;
- arg->string.string = strdup(bptr);
- bptr += strlen(bptr) + 1;
- *next = arg;
- next = &arg->next;
- default:
- break;
- }
- }
- }
-
- return args;
-}
-
-static void free_args(struct print_arg *args)
-{
- struct print_arg *next;
-
- while (args) {
- next = args->next;
-
- if (args->type == PRINT_ATOM)
- free(args->atom.atom);
- else
- free(args->string.string);
- free(args);
- args = next;
- }
-}
-
-static char *get_bprint_format(void *data, int size __unused, struct event *event)
-{
- unsigned long long addr;
- static struct format_field *field;
- struct printk_map *printk;
- char *format;
- char *p;
-
- if (!field) {
- field = find_field(event, "fmt");
- if (!field)
- die("can't find format field for binary printk");
- printf("field->offset = %d size=%d\n", field->offset, field->size);
- }
-
- addr = read_size(data + field->offset, field->size);
-
- printk = find_printk(addr);
- if (!printk) {
- format = malloc_or_die(45);
- sprintf(format, "%%pf : (NO FORMAT FOUND at %llx)\n",
- addr);
- return format;
- }
-
- p = printk->printk;
- /* Remove any quotes. */
- if (*p == '"')
- p++;
- format = malloc_or_die(strlen(p) + 10);
- sprintf(format, "%s : %s", "%pf", p);
- /* remove ending quotes and new line since we will add one too */
- p = format + strlen(format) - 1;
- if (*p == '"')
- *p = 0;
-
- p -= 2;
- if (strcmp(p, "\\n") == 0)
- *p = 0;
-
- return format;
-}
-
-static void pretty_print(void *data, int size, struct event *event)
-{
- struct print_fmt *print_fmt = &event->print_fmt;
- struct print_arg *arg = print_fmt->args;
- struct print_arg *args = NULL;
- const char *ptr = print_fmt->format;
- unsigned long long val;
- struct func_map *func;
- const char *saveptr;
- char *bprint_fmt = NULL;
- char format[32];
- int show_func;
- int len;
- int ls;
-
- if (event->flags & EVENT_FL_ISFUNC)
- ptr = " %pF <-- %pF";
-
- if (event->flags & EVENT_FL_ISBPRINT) {
- bprint_fmt = get_bprint_format(data, size, event);
- args = make_bprint_args(bprint_fmt, data, size, event);
- arg = args;
- ptr = bprint_fmt;
- }
-
- for (; *ptr; ptr++) {
- ls = 0;
- if (*ptr == '\\') {
- ptr++;
- switch (*ptr) {
- case 'n':
- printf("\n");
- break;
- case 't':
- printf("\t");
- break;
- case 'r':
- printf("\r");
- break;
- case '\\':
- printf("\\");
- break;
- default:
- printf("%c", *ptr);
- break;
- }
-
- } else if (*ptr == '%') {
- saveptr = ptr;
- show_func = 0;
- cont_process:
- ptr++;
- switch (*ptr) {
- case '%':
- printf("%%");
- break;
- case 'l':
- ls++;
- goto cont_process;
- case 'L':
- ls = 2;
- goto cont_process;
- case 'z':
- case 'Z':
- case '0' ... '9':
- goto cont_process;
- case 'p':
- if (long_size == 4)
- ls = 1;
- else
- ls = 2;
-
- if (*(ptr+1) == 'F' ||
- *(ptr+1) == 'f') {
- ptr++;
- show_func = *ptr;
- }
-
- /* fall through */
- case 'd':
- case 'i':
- case 'x':
- case 'X':
- case 'u':
- if (!arg)
- die("no argument match");
-
- len = ((unsigned long)ptr + 1) -
- (unsigned long)saveptr;
-
- /* should never happen */
- if (len > 32)
- die("bad format!");
-
- memcpy(format, saveptr, len);
- format[len] = 0;
-
- val = eval_num_arg(data, size, event, arg);
- arg = arg->next;
-
- if (show_func) {
- func = find_func(val);
- if (func) {
- printf("%s", func->func);
- if (show_func == 'F')
- printf("+0x%llx",
- val - func->addr);
- break;
- }
- }
- switch (ls) {
- case 0:
- printf(format, (int)val);
- break;
- case 1:
- printf(format, (long)val);
- break;
- case 2:
- printf(format, (long long)val);
- break;
- default:
- die("bad count (%d)", ls);
- }
- break;
- case 's':
- if (!arg)
- die("no matching argument");
-
- print_str_arg(data, size, event, arg);
- arg = arg->next;
- break;
- default:
- printf(">%c<", *ptr);
-
- }
- } else
- printf("%c", *ptr);
- }
-
- if (args) {
- free_args(args);
- free(bprint_fmt);
- }
-}
-
-static inline int log10_cpu(int nb)
-{
- if (nb / 100)
- return 3;
- if (nb / 10)
- return 2;
- return 1;
-}
-
-static void print_lat_fmt(void *data, int size __unused)
-{
- unsigned int lat_flags;
- unsigned int pc;
- int lock_depth;
- int hardirq;
- int softirq;
-
- lat_flags = parse_common_flags(data);
- pc = parse_common_pc(data);
- lock_depth = parse_common_lock_depth(data);
-
- hardirq = lat_flags & TRACE_FLAG_HARDIRQ;
- softirq = lat_flags & TRACE_FLAG_SOFTIRQ;
-
- printf("%c%c%c",
- (lat_flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
- (lat_flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
- 'X' : '.',
- (lat_flags & TRACE_FLAG_NEED_RESCHED) ?
- 'N' : '.',
- (hardirq && softirq) ? 'H' :
- hardirq ? 'h' : softirq ? 's' : '.');
-
- if (pc)
- printf("%x", pc);
- else
- printf(".");
-
- if (lock_depth < 0)
- printf(".");
- else
- printf("%d", lock_depth);
-}
-
-/* taken from Linux, written by Frederic Weisbecker */
-static void print_graph_cpu(int cpu)
-{
- int i;
- int log10_this = log10_cpu(cpu);
- int log10_all = log10_cpu(cpus);
-
-
- /*
- * Start with a space character - to make it stand out
- * to the right a bit when trace output is pasted into
- * email:
- */
- printf(" ");
-
- /*
- * Tricky - we space the CPU field according to the max
- * number of online CPUs. On a 2-cpu system it would take
- * a maximum of 1 digit - on a 128 cpu system it would
- * take up to 3 digits:
- */
- for (i = 0; i < log10_all - log10_this; i++)
- printf(" ");
-
- printf("%d) ", cpu);
-}
-
-#define TRACE_GRAPH_PROCINFO_LENGTH 14
-#define TRACE_GRAPH_INDENT 2
-
-static void print_graph_proc(int pid, const char *comm)
-{
- /* sign + log10(MAX_INT) + '\0' */
- char pid_str[11];
- int spaces = 0;
- int len;
- int i;
-
- sprintf(pid_str, "%d", pid);
-
- /* 1 stands for the "-" character */
- len = strlen(comm) + strlen(pid_str) + 1;
-
- if (len < TRACE_GRAPH_PROCINFO_LENGTH)
- spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
-
- /* First spaces to align center */
- for (i = 0; i < spaces / 2; i++)
- printf(" ");
-
- printf("%s-%s", comm, pid_str);
-
- /* Last spaces to align center */
- for (i = 0; i < spaces - (spaces / 2); i++)
- printf(" ");
-}
-
-static struct record *
-get_return_for_leaf(int cpu, int cur_pid, unsigned long long cur_func,
- struct record *next)
-{
- struct format_field *field;
- struct event *event;
- unsigned long val;
- int type;
- int pid;
-
- type = trace_parse_common_type(next->data);
- event = trace_find_event(type);
- if (!event)
- return NULL;
-
- if (!(event->flags & EVENT_FL_ISFUNCRET))
- return NULL;
-
- pid = trace_parse_common_pid(next->data);
- field = find_field(event, "func");
- if (!field)
- die("function return does not have field func");
-
- val = read_size(next->data + field->offset, field->size);
-
- if (cur_pid != pid || cur_func != val)
- return NULL;
-
- /* this is a leaf, now advance the iterator */
- return trace_read_data(cpu);
-}
-
-/* Signal a overhead of time execution to the output */
-static void print_graph_overhead(unsigned long long duration)
-{
- /* Non nested entry or return */
- if (duration == ~0ULL)
- return (void)printf(" ");
-
- /* Duration exceeded 100 msecs */
- if (duration > 100000ULL)
- return (void)printf("! ");
-
- /* Duration exceeded 10 msecs */
- if (duration > 10000ULL)
- return (void)printf("+ ");
-
- printf(" ");
-}
-
-static void print_graph_duration(unsigned long long duration)
-{
- unsigned long usecs = duration / 1000;
- unsigned long nsecs_rem = duration % 1000;
- /* log10(ULONG_MAX) + '\0' */
- char msecs_str[21];
- char nsecs_str[5];
- int len;
- int i;
-
- sprintf(msecs_str, "%lu", usecs);
-
- /* Print msecs */
- len = printf("%lu", usecs);
-
- /* Print nsecs (we don't want to exceed 7 numbers) */
- if (len < 7) {
- snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
- len += printf(".%s", nsecs_str);
- }
-
- printf(" us ");
-
- /* Print remaining spaces to fit the row's width */
- for (i = len; i < 7; i++)
- printf(" ");
-
- printf("| ");
-}
-
-static void
-print_graph_entry_leaf(struct event *event, void *data, struct record *ret_rec)
-{
- unsigned long long rettime, calltime;
- unsigned long long duration, depth;
- unsigned long long val;
- struct format_field *field;
- struct func_map *func;
- struct event *ret_event;
- int type;
- int i;
-
- type = trace_parse_common_type(ret_rec->data);
- ret_event = trace_find_event(type);
-
- field = find_field(ret_event, "rettime");
- if (!field)
- die("can't find rettime in return graph");
- rettime = read_size(ret_rec->data + field->offset, field->size);
-
- field = find_field(ret_event, "calltime");
- if (!field)
- die("can't find rettime in return graph");
- calltime = read_size(ret_rec->data + field->offset, field->size);
-
- duration = rettime - calltime;
-
- /* Overhead */
- print_graph_overhead(duration);
-
- /* Duration */
- print_graph_duration(duration);
-
- field = find_field(event, "depth");
- if (!field)
- die("can't find depth in entry graph");
- depth = read_size(data + field->offset, field->size);
-
- /* Function */
- for (i = 0; i < (int)(depth * TRACE_GRAPH_INDENT); i++)
- printf(" ");
-
- field = find_field(event, "func");
- if (!field)
- die("can't find func in entry graph");
- val = read_size(data + field->offset, field->size);
- func = find_func(val);
-
- if (func)
- printf("%s();", func->func);
- else
- printf("%llx();", val);
-}
-
-static void print_graph_nested(struct event *event, void *data)
-{
- struct format_field *field;
- unsigned long long depth;
- unsigned long long val;
- struct func_map *func;
- int i;
-
- /* No overhead */
- print_graph_overhead(-1);
-
- /* No time */
- printf(" | ");
-
- field = find_field(event, "depth");
- if (!field)
- die("can't find depth in entry graph");
- depth = read_size(data + field->offset, field->size);
-
- /* Function */
- for (i = 0; i < (int)(depth * TRACE_GRAPH_INDENT); i++)
- printf(" ");
-
- field = find_field(event, "func");
- if (!field)
- die("can't find func in entry graph");
- val = read_size(data + field->offset, field->size);
- func = find_func(val);
-
- if (func)
- printf("%s() {", func->func);
- else
- printf("%llx() {", val);
-}
-
-static void
-pretty_print_func_ent(void *data, int size, struct event *event,
- int cpu, int pid, const char *comm,
- unsigned long secs, unsigned long usecs)
-{
- struct format_field *field;
- struct record *rec;
- void *copy_data;
- unsigned long val;
-
- printf("%5lu.%06lu | ", secs, usecs);
-
- print_graph_cpu(cpu);
- print_graph_proc(pid, comm);
-
- printf(" | ");
-
- if (latency_format) {
- print_lat_fmt(data, size);
- printf(" | ");
- }
-
- field = find_field(event, "func");
- if (!field)
- die("function entry does not have func field");
-
- val = read_size(data + field->offset, field->size);
-
- /*
- * peek_data may unmap the data pointer. Copy it first.
- */
- copy_data = malloc_or_die(size);
- memcpy(copy_data, data, size);
- data = copy_data;
-
- rec = trace_peek_data(cpu);
- if (rec) {
- rec = get_return_for_leaf(cpu, pid, val, rec);
- if (rec) {
- print_graph_entry_leaf(event, data, rec);
- goto out_free;
- }
- }
- print_graph_nested(event, data);
-out_free:
- free(data);
-}
-
-static void
-pretty_print_func_ret(void *data, int size __unused, struct event *event,
- int cpu, int pid, const char *comm,
- unsigned long secs, unsigned long usecs)
-{
- unsigned long long rettime, calltime;
- unsigned long long duration, depth;
- struct format_field *field;
- int i;
-
- printf("%5lu.%06lu | ", secs, usecs);
-
- print_graph_cpu(cpu);
- print_graph_proc(pid, comm);
-
- printf(" | ");
-
- if (latency_format) {
- print_lat_fmt(data, size);
- printf(" | ");
- }
-
- field = find_field(event, "rettime");
- if (!field)
- die("can't find rettime in return graph");
- rettime = read_size(data + field->offset, field->size);
-
- field = find_field(event, "calltime");
- if (!field)
- die("can't find calltime in return graph");
- calltime = read_size(data + field->offset, field->size);
-
- duration = rettime - calltime;
-
- /* Overhead */
- print_graph_overhead(duration);
-
- /* Duration */
- print_graph_duration(duration);
-
- field = find_field(event, "depth");
- if (!field)
- die("can't find depth in entry graph");
- depth = read_size(data + field->offset, field->size);
-
- /* Function */
- for (i = 0; i < (int)(depth * TRACE_GRAPH_INDENT); i++)
- printf(" ");
-
- printf("}");
-}
-
-static void
-pretty_print_func_graph(void *data, int size, struct event *event,
- int cpu, int pid, const char *comm,
- unsigned long secs, unsigned long usecs)
-{
- if (event->flags & EVENT_FL_ISFUNCENT)
- pretty_print_func_ent(data, size, event,
- cpu, pid, comm, secs, usecs);
- else if (event->flags & EVENT_FL_ISFUNCRET)
- pretty_print_func_ret(data, size, event,
- cpu, pid, comm, secs, usecs);
- printf("\n");
-}
-
-void print_event(int cpu, void *data, int size, unsigned long long nsecs,
- char *comm)
-{
- struct event *event;
- unsigned long secs;
- unsigned long usecs;
- int type;
- int pid;
-
- secs = nsecs / NSECS_PER_SEC;
- nsecs -= secs * NSECS_PER_SEC;
- usecs = nsecs / NSECS_PER_USEC;
-
- type = trace_parse_common_type(data);
-
- event = trace_find_event(type);
- if (!event) {
- warning("ug! no event found for type %d", type);
- return;
- }
-
- pid = trace_parse_common_pid(data);
-
- if (event->flags & (EVENT_FL_ISFUNCENT | EVENT_FL_ISFUNCRET))
- return pretty_print_func_graph(data, size, event, cpu,
- pid, comm, secs, usecs);
-
- if (latency_format) {
- printf("%8.8s-%-5d %3d",
- comm, pid, cpu);
- print_lat_fmt(data, size);
- } else
- printf("%16s-%-5d [%03d]", comm, pid, cpu);
-
- printf(" %5lu.%06lu: %s: ", secs, usecs, event->name);
-
- if (event->flags & EVENT_FL_FAILED) {
- printf("EVENT '%s' FAILED TO PARSE\n",
- event->name);
- return;
- }
-
- pretty_print(data, size, event);
- printf("\n");
-}
-
-static void print_fields(struct print_flag_sym *field)
-{
- printf("{ %s, %s }", field->value, field->str);
- if (field->next) {
- printf(", ");
- print_fields(field->next);
- }
-}
-
-static void print_args(struct print_arg *args)
-{
- int print_paren = 1;
-
- switch (args->type) {
- case PRINT_NULL:
- printf("null");
- break;
- case PRINT_ATOM:
- printf("%s", args->atom.atom);
- break;
- case PRINT_FIELD:
- printf("REC->%s", args->field.name);
- break;
- case PRINT_FLAGS:
- printf("__print_flags(");
- print_args(args->flags.field);
- printf(", %s, ", args->flags.delim);
- print_fields(args->flags.flags);
- printf(")");
- break;
- case PRINT_SYMBOL:
- printf("__print_symbolic(");
- print_args(args->symbol.field);
- printf(", ");
- print_fields(args->symbol.symbols);
- printf(")");
- break;
- case PRINT_STRING:
- printf("__get_str(%s)", args->string.string);
- break;
- case PRINT_TYPE:
- printf("(%s)", args->typecast.type);
- print_args(args->typecast.item);
- break;
- case PRINT_OP:
- if (strcmp(args->op.op, ":") == 0)
- print_paren = 0;
- if (print_paren)
- printf("(");
- print_args(args->op.left);
- printf(" %s ", args->op.op);
- print_args(args->op.right);
- if (print_paren)
- printf(")");
- break;
- default:
- /* we should warn... */
- return;
- }
- if (args->next) {
- printf("\n");
- print_args(args->next);
- }
-}
-
-int parse_ftrace_file(char *buf, unsigned long size)
-{
- struct format_field *field;
- struct print_arg *arg, **list;
- struct event *event;
- int ret;
-
- init_input_buf(buf, size);
-
- event = alloc_event();
- if (!event)
- return -ENOMEM;
-
- event->flags |= EVENT_FL_ISFTRACE;
-
- event->name = event_read_name();
- if (!event->name)
- die("failed to read ftrace event name");
-
- if (strcmp(event->name, "function") == 0)
- event->flags |= EVENT_FL_ISFUNC;
-
- else if (strcmp(event->name, "funcgraph_entry") == 0)
- event->flags |= EVENT_FL_ISFUNCENT;
-
- else if (strcmp(event->name, "funcgraph_exit") == 0)
- event->flags |= EVENT_FL_ISFUNCRET;
-
- else if (strcmp(event->name, "bprint") == 0)
- event->flags |= EVENT_FL_ISBPRINT;
-
- event->id = event_read_id();
- if (event->id < 0)
- die("failed to read ftrace event id");
-
- add_event(event);
-
- ret = event_read_format(event);
- if (ret < 0)
- die("failed to read ftrace event format");
-
- ret = event_read_print(event);
- if (ret < 0)
- die("failed to read ftrace event print fmt");
-
- /* New ftrace handles args */
- if (ret > 0)
- return 0;
- /*
- * The arguments for ftrace files are parsed by the fields.
- * Set up the fields as their arguments.
- */
- list = &event->print_fmt.args;
- for (field = event->format.fields; field; field = field->next) {
- arg = malloc_or_die(sizeof(*arg));
- memset(arg, 0, sizeof(*arg));
- *list = arg;
- list = &arg->next;
- arg->type = PRINT_FIELD;
- arg->field.name = field->name;
- arg->field.field = field;
- }
- return 0;
-}
-
-int parse_event_file(char *buf, unsigned long size, char *sys)
-{
- struct event *event;
- int ret;
-
- init_input_buf(buf, size);
-
- event = alloc_event();
- if (!event)
- return -ENOMEM;
-
- event->name = event_read_name();
- if (!event->name)
- die("failed to read event name");
-
- event->id = event_read_id();
- if (event->id < 0)
- die("failed to read event id");
-
- ret = event_read_format(event);
- if (ret < 0) {
- warning("failed to read event format for %s", event->name);
- goto event_failed;
- }
-
- ret = event_read_print(event);
- if (ret < 0) {
- warning("failed to read event print fmt for %s", event->name);
- goto event_failed;
- }
-
- event->system = strdup(sys);
-
-#define PRINT_ARGS 0
- if (PRINT_ARGS && event->print_fmt.args)
- print_args(event->print_fmt.args);
-
- add_event(event);
- return 0;
-
- event_failed:
- event->flags |= EVENT_FL_FAILED;
- /* still add it even if it failed */
- add_event(event);
- return -1;
-}
-
-void parse_set_info(int nr_cpus, int long_sz)
-{
- cpus = nr_cpus;
- long_size = long_sz;
-}
-
-int common_pc(struct scripting_context *context)
-{
- return parse_common_pc(context->event_data);
-}
-
-int common_flags(struct scripting_context *context)
-{
- return parse_common_flags(context->event_data);
-}
-
-int common_lock_depth(struct scripting_context *context)
-{
- return parse_common_lock_depth(context->event_data);
-}
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index f55cc3a765a..e113e180c48 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -18,8 +18,6 @@
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#define _FILE_OFFSET_BITS 64
-
#include <dirent.h>
#include <stdio.h>
#include <stdlib.h>
@@ -33,7 +31,6 @@
#include <pthread.h>
#include <fcntl.h>
#include <unistd.h>
-#include <ctype.h>
#include <errno.h>
#include "../perf.h"
@@ -42,18 +39,10 @@
static int input_fd;
-static int read_page;
-
-int file_bigendian;
-int host_bigendian;
-static int long_size;
-
-static unsigned long page_size;
-
-static ssize_t calc_data_size;
+static ssize_t trace_data_size;
static bool repipe;
-static int do_read(int fd, void *buf, int size)
+static int __do_read(int fd, void *buf, int size)
{
int rsize = size;
@@ -66,8 +55,10 @@ static int do_read(int fd, void *buf, int size)
if (repipe) {
int retw = write(STDOUT_FILENO, buf, ret);
- if (retw <= 0 || retw != ret)
- die("repiping input file");
+ if (retw <= 0 || retw != ret) {
+ pr_debug("repiping input file");
+ return -1;
+ }
}
size -= ret;
@@ -77,17 +68,18 @@ static int do_read(int fd, void *buf, int size)
return rsize;
}
-static int read_or_die(void *data, int size)
+static int do_read(void *data, int size)
{
int r;
- r = do_read(input_fd, data, size);
- if (r <= 0)
- die("reading input file (size expected=%d received=%d)",
- size, r);
+ r = __do_read(input_fd, data, size);
+ if (r <= 0) {
+ pr_debug("reading input file (size expected=%d received=%d)",
+ size, r);
+ return -1;
+ }
- if (calc_data_size)
- calc_data_size += r;
+ trace_data_size += r;
return r;
}
@@ -100,25 +92,27 @@ static void skip(int size)
while (size) {
r = size > BUFSIZ ? BUFSIZ : size;
- read_or_die(buf, r);
+ do_read(buf, r);
size -= r;
};
}
-static unsigned int read4(void)
+static unsigned int read4(struct pevent *pevent)
{
unsigned int data;
- read_or_die(&data, 4);
- return __data2host4(data);
+ if (do_read(&data, 4) < 0)
+ return 0;
+ return __data2host4(pevent, data);
}
-static unsigned long long read8(void)
+static unsigned long long read8(struct pevent *pevent)
{
unsigned long long data;
- read_or_die(&data, 8);
- return __data2host8(data);
+ if (do_read(&data, 8) < 0)
+ return 0;
+ return __data2host8(pevent, data);
}
static char *read_string(void)
@@ -131,17 +125,23 @@ static char *read_string(void)
for (;;) {
r = read(input_fd, &c, 1);
- if (r < 0)
- die("reading input file");
+ if (r < 0) {
+ pr_debug("reading input file");
+ goto out;
+ }
- if (!r)
- die("no data");
+ if (!r) {
+ pr_debug("no data");
+ goto out;
+ }
if (repipe) {
int retw = write(STDOUT_FILENO, &c, 1);
- if (retw <= 0 || retw != r)
- die("repiping input file string");
+ if (retw <= 0 || retw != r) {
+ pr_debug("repiping input file string");
+ goto out;
+ }
}
buf[size++] = c;
@@ -150,335 +150,200 @@ static char *read_string(void)
break;
}
- if (calc_data_size)
- calc_data_size += size;
-
- str = malloc_or_die(size);
- memcpy(str, buf, size);
+ trace_data_size += size;
+ str = malloc(size);
+ if (str)
+ memcpy(str, buf, size);
+out:
return str;
}
-static void read_proc_kallsyms(void)
+static int read_proc_kallsyms(struct pevent *pevent)
{
unsigned int size;
char *buf;
- size = read4();
+ size = read4(pevent);
if (!size)
- return;
+ return 0;
+
+ buf = malloc(size + 1);
+ if (buf == NULL)
+ return -1;
- buf = malloc_or_die(size + 1);
- read_or_die(buf, size);
+ if (do_read(buf, size) < 0) {
+ free(buf);
+ return -1;
+ }
buf[size] = '\0';
- parse_proc_kallsyms(buf, size);
+ parse_proc_kallsyms(pevent, buf, size);
free(buf);
+ return 0;
}
-static void read_ftrace_printk(void)
+static int read_ftrace_printk(struct pevent *pevent)
{
unsigned int size;
char *buf;
- size = read4();
+ /* it can have 0 size */
+ size = read4(pevent);
if (!size)
- return;
+ return 0;
+
+ buf = malloc(size);
+ if (buf == NULL)
+ return -1;
- buf = malloc_or_die(size);
- read_or_die(buf, size);
+ if (do_read(buf, size) < 0) {
+ free(buf);
+ return -1;
+ }
- parse_ftrace_printk(buf, size);
+ parse_ftrace_printk(pevent, buf, size);
free(buf);
+ return 0;
}
-static void read_header_files(void)
+static int read_header_files(struct pevent *pevent)
{
unsigned long long size;
- char *header_event;
+ char *header_page;
char buf[BUFSIZ];
+ int ret = 0;
- read_or_die(buf, 12);
+ if (do_read(buf, 12) < 0)
+ return -1;
- if (memcmp(buf, "header_page", 12) != 0)
- die("did not read header page");
+ if (memcmp(buf, "header_page", 12) != 0) {
+ pr_debug("did not read header page");
+ return -1;
+ }
- size = read8();
- skip(size);
+ size = read8(pevent);
+
+ header_page = malloc(size);
+ if (header_page == NULL)
+ return -1;
- /*
- * The size field in the page is of type long,
- * use that instead, since it represents the kernel.
- */
- long_size = header_page_size_size;
+ if (do_read(header_page, size) < 0) {
+ pr_debug("did not read header page");
+ free(header_page);
+ return -1;
+ }
+
+ if (!pevent_parse_header_page(pevent, header_page, size,
+ pevent_get_long_size(pevent))) {
+ /*
+ * The commit field in the page is of type long,
+ * use that instead, since it represents the kernel.
+ */
+ pevent_set_long_size(pevent, pevent->header_page_size_size);
+ }
+ free(header_page);
- read_or_die(buf, 13);
- if (memcmp(buf, "header_event", 13) != 0)
- die("did not read header event");
+ if (do_read(buf, 13) < 0)
+ return -1;
- size = read8();
- header_event = malloc_or_die(size);
- read_or_die(header_event, size);
- free(header_event);
+ if (memcmp(buf, "header_event", 13) != 0) {
+ pr_debug("did not read header event");
+ return -1;
+ }
+
+ size = read8(pevent);
+ skip(size);
+
+ return ret;
}
-static void read_ftrace_file(unsigned long long size)
+static int read_ftrace_file(struct pevent *pevent, unsigned long long size)
{
char *buf;
- buf = malloc_or_die(size);
- read_or_die(buf, size);
- parse_ftrace_file(buf, size);
+ buf = malloc(size);
+ if (buf == NULL)
+ return -1;
+
+ if (do_read(buf, size) < 0) {
+ free(buf);
+ return -1;
+ }
+
+ parse_ftrace_file(pevent, buf, size);
free(buf);
+ return 0;
}
-static void read_event_file(char *sys, unsigned long long size)
+static int read_event_file(struct pevent *pevent, char *sys,
+ unsigned long long size)
{
char *buf;
- buf = malloc_or_die(size);
- read_or_die(buf, size);
- parse_event_file(buf, size, sys);
+ buf = malloc(size);
+ if (buf == NULL)
+ return -1;
+
+ if (do_read(buf, size) < 0) {
+ free(buf);
+ return -1;
+ }
+
+ parse_event_file(pevent, buf, size, sys);
free(buf);
+ return 0;
}
-static void read_ftrace_files(void)
+static int read_ftrace_files(struct pevent *pevent)
{
unsigned long long size;
int count;
int i;
+ int ret;
- count = read4();
+ count = read4(pevent);
for (i = 0; i < count; i++) {
- size = read8();
- read_ftrace_file(size);
+ size = read8(pevent);
+ ret = read_ftrace_file(pevent, size);
+ if (ret)
+ return ret;
}
+ return 0;
}
-static void read_event_files(void)
+static int read_event_files(struct pevent *pevent)
{
unsigned long long size;
char *sys;
int systems;
int count;
int i,x;
+ int ret;
- systems = read4();
+ systems = read4(pevent);
for (i = 0; i < systems; i++) {
sys = read_string();
+ if (sys == NULL)
+ return -1;
- count = read4();
- for (x=0; x < count; x++) {
- size = read8();
- read_event_file(sys, size);
- }
- }
-}
-
-struct cpu_data {
- unsigned long long offset;
- unsigned long long size;
- unsigned long long timestamp;
- struct record *next;
- char *page;
- int cpu;
- int index;
- int page_size;
-};
-
-static struct cpu_data *cpu_data;
-
-static void update_cpu_data_index(int cpu)
-{
- cpu_data[cpu].offset += page_size;
- cpu_data[cpu].size -= page_size;
- cpu_data[cpu].index = 0;
-}
-
-static void get_next_page(int cpu)
-{
- off_t save_seek;
- off_t ret;
-
- if (!cpu_data[cpu].page)
- return;
-
- if (read_page) {
- if (cpu_data[cpu].size <= page_size) {
- free(cpu_data[cpu].page);
- cpu_data[cpu].page = NULL;
- return;
- }
-
- update_cpu_data_index(cpu);
-
- /* other parts of the code may expect the pointer to not move */
- save_seek = lseek(input_fd, 0, SEEK_CUR);
-
- ret = lseek(input_fd, cpu_data[cpu].offset, SEEK_SET);
- if (ret == (off_t)-1)
- die("failed to lseek");
- ret = read(input_fd, cpu_data[cpu].page, page_size);
- if (ret < 0)
- die("failed to read page");
-
- /* reset the file pointer back */
- lseek(input_fd, save_seek, SEEK_SET);
-
- return;
- }
-
- munmap(cpu_data[cpu].page, page_size);
- cpu_data[cpu].page = NULL;
-
- if (cpu_data[cpu].size <= page_size)
- return;
-
- update_cpu_data_index(cpu);
-
- cpu_data[cpu].page = mmap(NULL, page_size, PROT_READ, MAP_PRIVATE,
- input_fd, cpu_data[cpu].offset);
- if (cpu_data[cpu].page == MAP_FAILED)
- die("failed to mmap cpu %d at offset 0x%llx",
- cpu, cpu_data[cpu].offset);
-}
-
-static unsigned int type_len4host(unsigned int type_len_ts)
-{
- if (file_bigendian)
- return (type_len_ts >> 27) & ((1 << 5) - 1);
- else
- return type_len_ts & ((1 << 5) - 1);
-}
-
-static unsigned int ts4host(unsigned int type_len_ts)
-{
- if (file_bigendian)
- return type_len_ts & ((1 << 27) - 1);
- else
- return type_len_ts >> 5;
-}
-
-static int calc_index(void *ptr, int cpu)
-{
- return (unsigned long)ptr - (unsigned long)cpu_data[cpu].page;
-}
+ count = read4(pevent);
-struct record *trace_peek_data(int cpu)
-{
- struct record *data;
- void *page = cpu_data[cpu].page;
- int idx = cpu_data[cpu].index;
- void *ptr = page + idx;
- unsigned long long extend;
- unsigned int type_len_ts;
- unsigned int type_len;
- unsigned int delta;
- unsigned int length = 0;
-
- if (cpu_data[cpu].next)
- return cpu_data[cpu].next;
-
- if (!page)
- return NULL;
-
- if (!idx) {
- /* FIXME: handle header page */
- if (header_page_ts_size != 8)
- die("expected a long long type for timestamp");
- cpu_data[cpu].timestamp = data2host8(ptr);
- ptr += 8;
- switch (header_page_size_size) {
- case 4:
- cpu_data[cpu].page_size = data2host4(ptr);
- ptr += 4;
- break;
- case 8:
- cpu_data[cpu].page_size = data2host8(ptr);
- ptr += 8;
- break;
- default:
- die("bad long size");
+ for (x=0; x < count; x++) {
+ size = read8(pevent);
+ ret = read_event_file(pevent, sys, size);
+ if (ret)
+ return ret;
}
- ptr = cpu_data[cpu].page + header_page_data_offset;
}
-
-read_again:
- idx = calc_index(ptr, cpu);
-
- if (idx >= cpu_data[cpu].page_size) {
- get_next_page(cpu);
- return trace_peek_data(cpu);
- }
-
- type_len_ts = data2host4(ptr);
- ptr += 4;
-
- type_len = type_len4host(type_len_ts);
- delta = ts4host(type_len_ts);
-
- switch (type_len) {
- case RINGBUF_TYPE_PADDING:
- if (!delta)
- die("error, hit unexpected end of page");
- length = data2host4(ptr);
- ptr += 4;
- length *= 4;
- ptr += length;
- goto read_again;
-
- case RINGBUF_TYPE_TIME_EXTEND:
- extend = data2host4(ptr);
- ptr += 4;
- extend <<= TS_SHIFT;
- extend += delta;
- cpu_data[cpu].timestamp += extend;
- goto read_again;
-
- case RINGBUF_TYPE_TIME_STAMP:
- ptr += 12;
- break;
- case 0:
- length = data2host4(ptr);
- ptr += 4;
- die("here! length=%d", length);
- break;
- default:
- length = type_len * 4;
- break;
- }
-
- cpu_data[cpu].timestamp += delta;
-
- data = malloc_or_die(sizeof(*data));
- memset(data, 0, sizeof(*data));
-
- data->ts = cpu_data[cpu].timestamp;
- data->size = length;
- data->data = ptr;
- ptr += length;
-
- cpu_data[cpu].index = calc_index(ptr, cpu);
- cpu_data[cpu].next = data;
-
- return data;
-}
-
-struct record *trace_read_data(int cpu)
-{
- struct record *data;
-
- data = trace_peek_data(cpu);
- cpu_data[cpu].next = NULL;
-
- return data;
+ return 0;
}
-ssize_t trace_report(int fd, bool __repipe)
+ssize_t trace_report(int fd, struct trace_event *tevent, bool __repipe)
{
char buf[BUFSIZ];
char test[] = { 23, 8, 68 };
@@ -486,54 +351,94 @@ ssize_t trace_report(int fd, bool __repipe)
int show_version = 0;
int show_funcs = 0;
int show_printk = 0;
- ssize_t size;
+ ssize_t size = -1;
+ int file_bigendian;
+ int host_bigendian;
+ int file_long_size;
+ int file_page_size;
+ struct pevent *pevent = NULL;
+ int err;
- calc_data_size = 1;
repipe = __repipe;
-
input_fd = fd;
- read_or_die(buf, 3);
- if (memcmp(buf, test, 3) != 0)
- die("no trace data in the file");
+ if (do_read(buf, 3) < 0)
+ return -1;
+ if (memcmp(buf, test, 3) != 0) {
+ pr_debug("no trace data in the file");
+ return -1;
+ }
- read_or_die(buf, 7);
- if (memcmp(buf, "tracing", 7) != 0)
- die("not a trace file (missing 'tracing' tag)");
+ if (do_read(buf, 7) < 0)
+ return -1;
+ if (memcmp(buf, "tracing", 7) != 0) {
+ pr_debug("not a trace file (missing 'tracing' tag)");
+ return -1;
+ }
version = read_string();
+ if (version == NULL)
+ return -1;
if (show_version)
printf("version = %s\n", version);
free(version);
- read_or_die(buf, 1);
+ if (do_read(buf, 1) < 0)
+ return -1;
file_bigendian = buf[0];
host_bigendian = bigendian();
- read_or_die(buf, 1);
- long_size = buf[0];
-
- page_size = read4();
-
- read_header_files();
-
- read_ftrace_files();
- read_event_files();
- read_proc_kallsyms();
- read_ftrace_printk();
+ if (trace_event__init(tevent)) {
+ pr_debug("trace_event__init failed");
+ goto out;
+ }
- size = calc_data_size - 1;
- calc_data_size = 0;
+ pevent = tevent->pevent;
+
+ pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT);
+ pevent_set_file_bigendian(pevent, file_bigendian);
+ pevent_set_host_bigendian(pevent, host_bigendian);
+
+ if (do_read(buf, 1) < 0)
+ goto out;
+ file_long_size = buf[0];
+
+ file_page_size = read4(pevent);
+ if (!file_page_size)
+ goto out;
+
+ pevent_set_long_size(pevent, file_long_size);
+ pevent_set_page_size(pevent, file_page_size);
+
+ err = read_header_files(pevent);
+ if (err)
+ goto out;
+ err = read_ftrace_files(pevent);
+ if (err)
+ goto out;
+ err = read_event_files(pevent);
+ if (err)
+ goto out;
+ err = read_proc_kallsyms(pevent);
+ if (err)
+ goto out;
+ err = read_ftrace_printk(pevent);
+ if (err)
+ goto out;
+
+ size = trace_data_size;
repipe = false;
if (show_funcs) {
- print_funcs();
- return size;
- }
- if (show_printk) {
- print_printk();
- return size;
+ pevent_print_funcs(pevent);
+ } else if (show_printk) {
+ pevent_print_printk(pevent);
}
+ pevent = NULL;
+
+out:
+ if (pevent)
+ trace_event__cleanup(tevent);
return size;
}
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
index f7af2fca965..57aaccc1692 100644
--- a/tools/perf/util/trace-event-scripting.c
+++ b/tools/perf/util/trace-event-scripting.c
@@ -22,7 +22,6 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <ctype.h>
#include <errno.h>
#include "../perf.h"
@@ -36,11 +35,11 @@ static int stop_script_unsupported(void)
return 0;
}
-static void process_event_unsupported(int cpu __unused,
- void *data __unused,
- int size __unused,
- unsigned long long nsecs __unused,
- char *comm __unused)
+static void process_event_unsupported(union perf_event *event __maybe_unused,
+ struct perf_sample *sample __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused,
+ struct thread *thread __maybe_unused,
+ struct addr_location *al __maybe_unused)
{
}
@@ -53,16 +52,19 @@ static void print_python_unsupported_msg(void)
"\n etc.\n");
}
-static int python_start_script_unsupported(const char *script __unused,
- int argc __unused,
- const char **argv __unused)
+static int python_start_script_unsupported(const char *script __maybe_unused,
+ int argc __maybe_unused,
+ const char **argv __maybe_unused)
{
print_python_unsupported_msg();
return -1;
}
-static int python_generate_script_unsupported(const char *outfile __unused)
+static int python_generate_script_unsupported(struct pevent *pevent
+ __maybe_unused,
+ const char *outfile
+ __maybe_unused)
{
print_python_unsupported_msg();
@@ -114,16 +116,18 @@ static void print_perl_unsupported_msg(void)
"\n etc.\n");
}
-static int perl_start_script_unsupported(const char *script __unused,
- int argc __unused,
- const char **argv __unused)
+static int perl_start_script_unsupported(const char *script __maybe_unused,
+ int argc __maybe_unused,
+ const char **argv __maybe_unused)
{
print_perl_unsupported_msg();
return -1;
}
-static int perl_generate_script_unsupported(const char *outfile __unused)
+static int perl_generate_script_unsupported(struct pevent *pevent
+ __maybe_unused,
+ const char *outfile __maybe_unused)
{
print_perl_unsupported_msg();
diff --git a/tools/perf/util/trace-event.c b/tools/perf/util/trace-event.c
new file mode 100644
index 00000000000..6322d37164c
--- /dev/null
+++ b/tools/perf/util/trace-event.c
@@ -0,0 +1,82 @@
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <linux/kernel.h>
+#include <traceevent/event-parse.h>
+#include "trace-event.h"
+#include "util.h"
+
+/*
+ * global trace_event object used by trace_event__tp_format
+ *
+ * TODO There's no cleanup call for this. Add some sort of
+ * __exit function support and call trace_event__cleanup
+ * there.
+ */
+static struct trace_event tevent;
+
+int trace_event__init(struct trace_event *t)
+{
+ struct pevent *pevent = pevent_alloc();
+
+ if (pevent) {
+ t->plugin_list = traceevent_load_plugins(pevent);
+ t->pevent = pevent;
+ }
+
+ return pevent ? 0 : -1;
+}
+
+void trace_event__cleanup(struct trace_event *t)
+{
+ traceevent_unload_plugins(t->plugin_list, t->pevent);
+ pevent_free(t->pevent);
+}
+
+static struct event_format*
+tp_format(const char *sys, const char *name)
+{
+ struct pevent *pevent = tevent.pevent;
+ struct event_format *event = NULL;
+ char path[PATH_MAX];
+ size_t size;
+ char *data;
+
+ scnprintf(path, PATH_MAX, "%s/%s/%s/format",
+ tracing_events_path, sys, name);
+
+ if (filename__read_str(path, &data, &size))
+ return NULL;
+
+ pevent_parse_format(pevent, &event, data, size, sys);
+
+ free(data);
+ return event;
+}
+
+struct event_format*
+trace_event__tp_format(const char *sys, const char *name)
+{
+ static bool initialized;
+
+ if (!initialized) {
+ int be = traceevent_host_bigendian();
+ struct pevent *pevent;
+
+ if (trace_event__init(&tevent))
+ return NULL;
+
+ pevent = tevent.pevent;
+ pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT);
+ pevent_set_file_bigendian(pevent, be);
+ pevent_set_host_bigendian(pevent, be);
+ initialized = true;
+ }
+
+ return tp_format(sys, name);
+}
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index b3e86b1e444..7b6d6868832 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -1,287 +1,76 @@
-#ifndef __PERF_TRACE_EVENTS_H
-#define __PERF_TRACE_EVENTS_H
+#ifndef _PERF_UTIL_TRACE_EVENT_H
+#define _PERF_UTIL_TRACE_EVENT_H
-#include <stdbool.h>
+#include <traceevent/event-parse.h>
#include "parse-events.h"
-#define __unused __attribute__((unused))
+struct machine;
+struct perf_sample;
+union perf_event;
+struct perf_tool;
+struct thread;
+struct plugin_list;
-
-#ifndef PAGE_MASK
-#define PAGE_MASK (page_size - 1)
-#endif
-
-enum {
- RINGBUF_TYPE_PADDING = 29,
- RINGBUF_TYPE_TIME_EXTEND = 30,
- RINGBUF_TYPE_TIME_STAMP = 31,
-};
-
-#ifndef TS_SHIFT
-#define TS_SHIFT 27
-#endif
-
-#define NSECS_PER_SEC 1000000000ULL
-#define NSECS_PER_USEC 1000ULL
-
-enum format_flags {
- FIELD_IS_ARRAY = 1,
- FIELD_IS_POINTER = 2,
- FIELD_IS_SIGNED = 4,
- FIELD_IS_STRING = 8,
- FIELD_IS_DYNAMIC = 16,
- FIELD_IS_FLAG = 32,
- FIELD_IS_SYMBOLIC = 64,
-};
-
-struct format_field {
- struct format_field *next;
- char *type;
- char *name;
- int offset;
- int size;
- unsigned long flags;
+struct trace_event {
+ struct pevent *pevent;
+ struct plugin_list *plugin_list;
};
-struct format {
- int nr_common;
- int nr_fields;
- struct format_field *common_fields;
- struct format_field *fields;
-};
-
-struct print_arg_atom {
- char *atom;
-};
-
-struct print_arg_string {
- char *string;
- int offset;
-};
-
-struct print_arg_field {
- char *name;
- struct format_field *field;
-};
-
-struct print_flag_sym {
- struct print_flag_sym *next;
- char *value;
- char *str;
-};
-
-struct print_arg_typecast {
- char *type;
- struct print_arg *item;
-};
-
-struct print_arg_flags {
- struct print_arg *field;
- char *delim;
- struct print_flag_sym *flags;
-};
-
-struct print_arg_symbol {
- struct print_arg *field;
- struct print_flag_sym *symbols;
-};
-
-struct print_arg;
-
-struct print_arg_op {
- char *op;
- int prio;
- struct print_arg *left;
- struct print_arg *right;
-};
-
-struct print_arg_func {
- char *name;
- struct print_arg *args;
-};
-
-enum print_arg_type {
- PRINT_NULL,
- PRINT_ATOM,
- PRINT_FIELD,
- PRINT_FLAGS,
- PRINT_SYMBOL,
- PRINT_TYPE,
- PRINT_STRING,
- PRINT_OP,
-};
-
-struct print_arg {
- struct print_arg *next;
- enum print_arg_type type;
- union {
- struct print_arg_atom atom;
- struct print_arg_field field;
- struct print_arg_typecast typecast;
- struct print_arg_flags flags;
- struct print_arg_symbol symbol;
- struct print_arg_func func;
- struct print_arg_string string;
- struct print_arg_op op;
- };
-};
-
-struct print_fmt {
- char *format;
- struct print_arg *args;
-};
-
-struct event {
- struct event *next;
- char *name;
- int id;
- int flags;
- struct format format;
- struct print_fmt print_fmt;
- char *system;
-};
-
-enum {
- EVENT_FL_ISFTRACE = 0x01,
- EVENT_FL_ISPRINT = 0x02,
- EVENT_FL_ISBPRINT = 0x04,
- EVENT_FL_ISFUNC = 0x08,
- EVENT_FL_ISFUNCENT = 0x10,
- EVENT_FL_ISFUNCRET = 0x20,
-
- EVENT_FL_FAILED = 0x80000000
-};
-
-struct record {
- unsigned long long ts;
- int size;
- void *data;
-};
-
-struct record *trace_peek_data(int cpu);
-struct record *trace_read_data(int cpu);
-
-void parse_set_info(int nr_cpus, int long_sz);
-
-ssize_t trace_report(int fd, bool repipe);
-
-void *malloc_or_die(unsigned int size);
-
-void parse_cmdlines(char *file, int size);
-void parse_proc_kallsyms(char *file, unsigned int size);
-void parse_ftrace_printk(char *file, unsigned int size);
-
-void print_funcs(void);
-void print_printk(void);
-
-int parse_ftrace_file(char *buf, unsigned long size);
-int parse_event_file(char *buf, unsigned long size, char *sys);
-void print_event(int cpu, void *data, int size, unsigned long long nsecs,
- char *comm);
-
-extern int file_bigendian;
-extern int host_bigendian;
+int trace_event__init(struct trace_event *t);
+void trace_event__cleanup(struct trace_event *t);
+struct event_format*
+trace_event__tp_format(const char *sys, const char *name);
int bigendian(void);
-static inline unsigned short __data2host2(unsigned short data)
-{
- unsigned short swap;
-
- if (host_bigendian == file_bigendian)
- return data;
-
- swap = ((data & 0xffULL) << 8) |
- ((data & (0xffULL << 8)) >> 8);
-
- return swap;
-}
-
-static inline unsigned int __data2host4(unsigned int data)
-{
- unsigned int swap;
-
- if (host_bigendian == file_bigendian)
- return data;
+void event_format__print(struct event_format *event,
+ int cpu, void *data, int size);
- swap = ((data & 0xffULL) << 24) |
- ((data & (0xffULL << 8)) << 8) |
- ((data & (0xffULL << 16)) >> 8) |
- ((data & (0xffULL << 24)) >> 24);
+int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size);
+int parse_event_file(struct pevent *pevent,
+ char *buf, unsigned long size, char *sys);
- return swap;
-}
+unsigned long long
+raw_field_value(struct event_format *event, const char *name, void *data);
-static inline unsigned long long __data2host8(unsigned long long data)
-{
- unsigned long long swap;
+void parse_proc_kallsyms(struct pevent *pevent, char *file, unsigned int size);
+void parse_ftrace_printk(struct pevent *pevent, char *file, unsigned int size);
- if (host_bigendian == file_bigendian)
- return data;
+ssize_t trace_report(int fd, struct trace_event *tevent, bool repipe);
- swap = ((data & 0xffULL) << 56) |
- ((data & (0xffULL << 8)) << 40) |
- ((data & (0xffULL << 16)) << 24) |
- ((data & (0xffULL << 24)) << 8) |
- ((data & (0xffULL << 32)) >> 8) |
- ((data & (0xffULL << 40)) >> 24) |
- ((data & (0xffULL << 48)) >> 40) |
- ((data & (0xffULL << 56)) >> 56);
+struct event_format *trace_find_next_event(struct pevent *pevent,
+ struct event_format *event);
+unsigned long long read_size(struct event_format *event, void *ptr, int size);
+unsigned long long eval_flag(const char *flag);
- return swap;
-}
+int read_tracing_data(int fd, struct list_head *pattrs);
-#define data2host2(ptr) __data2host2(*(unsigned short *)ptr)
-#define data2host4(ptr) __data2host4(*(unsigned int *)ptr)
-#define data2host8(ptr) ({ \
- unsigned long long __val; \
- \
- memcpy(&__val, (ptr), sizeof(unsigned long long)); \
- __data2host8(__val); \
-})
+struct tracing_data {
+ /* size is only valid if temp is 'true' */
+ ssize_t size;
+ bool temp;
+ char temp_file[50];
+};
-extern int header_page_ts_offset;
-extern int header_page_ts_size;
-extern int header_page_size_offset;
-extern int header_page_size_size;
-extern int header_page_data_offset;
-extern int header_page_data_size;
+struct tracing_data *tracing_data_get(struct list_head *pattrs,
+ int fd, bool temp);
+int tracing_data_put(struct tracing_data *tdata);
-extern bool latency_format;
-int trace_parse_common_type(void *data);
-int trace_parse_common_pid(void *data);
-int parse_common_pc(void *data);
-int parse_common_flags(void *data);
-int parse_common_lock_depth(void *data);
-struct event *trace_find_event(int id);
-struct event *trace_find_next_event(struct event *event);
-unsigned long long read_size(void *ptr, int size);
-unsigned long long
-raw_field_value(struct event *event, const char *name, void *data);
-void *raw_field_ptr(struct event *event, const char *name, void *data);
-unsigned long long eval_flag(const char *flag);
-
-int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events);
-ssize_t read_tracing_data_size(int fd, struct perf_event_attr *pattrs,
- int nb_events);
+struct addr_location;
-/* taken from kernel/trace/trace.h */
-enum trace_flag_type {
- TRACE_FLAG_IRQS_OFF = 0x01,
- TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
- TRACE_FLAG_NEED_RESCHED = 0x04,
- TRACE_FLAG_HARDIRQ = 0x08,
- TRACE_FLAG_SOFTIRQ = 0x10,
-};
+struct perf_session;
struct scripting_ops {
const char *name;
int (*start_script) (const char *script, int argc, const char **argv);
int (*stop_script) (void);
- void (*process_event) (int cpu, void *data, int size,
- unsigned long long nsecs, char *comm);
- int (*generate_script) (const char *outfile);
+ void (*process_event) (union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct thread *thread,
+ struct addr_location *al);
+ int (*generate_script) (struct pevent *pevent, const char *outfile);
};
int script_spec_register(const char *spec, struct scripting_ops *ops);
@@ -290,6 +79,7 @@ void setup_perl_scripting(void);
void setup_python_scripting(void);
struct scripting_context {
+ struct pevent *pevent;
void *event_data;
};
@@ -297,4 +87,4 @@ int common_pc(struct scripting_context *context);
int common_flags(struct scripting_context *context);
int common_lock_depth(struct scripting_context *context);
-#endif /* __PERF_TRACE_EVENTS_H */
+#endif /* _PERF_UTIL_TRACE_EVENT_H */
diff --git a/tools/perf/util/types.h b/tools/perf/util/types.h
deleted file mode 100644
index 7d6b8331f89..00000000000
--- a/tools/perf/util/types.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef __PERF_TYPES_H
-#define __PERF_TYPES_H
-
-/*
- * We define u64 as unsigned long long for every architecture
- * so that we can print it with %Lx without getting warnings.
- */
-typedef unsigned long long u64;
-typedef signed long long s64;
-typedef unsigned int u32;
-typedef signed int s32;
-typedef unsigned short u16;
-typedef signed short s16;
-typedef unsigned char u8;
-typedef signed char s8;
-
-#endif /* __PERF_TYPES_H */
diff --git a/tools/perf/util/ui/browser.c b/tools/perf/util/ui/browser.c
deleted file mode 100644
index 8bc010edca2..00000000000
--- a/tools/perf/util/ui/browser.c
+++ /dev/null
@@ -1,337 +0,0 @@
-#include "libslang.h"
-#include <linux/compiler.h>
-#include <linux/list.h>
-#include <linux/rbtree.h>
-#include <stdlib.h>
-#include <sys/ttydefaults.h>
-#include "browser.h"
-#include "helpline.h"
-#include "../color.h"
-#include "../util.h"
-#include <stdio.h>
-
-static int ui_browser__percent_color(double percent, bool current)
-{
- if (current)
- return HE_COLORSET_SELECTED;
- if (percent >= MIN_RED)
- return HE_COLORSET_TOP;
- if (percent >= MIN_GREEN)
- return HE_COLORSET_MEDIUM;
- return HE_COLORSET_NORMAL;
-}
-
-void ui_browser__set_color(struct ui_browser *self __used, int color)
-{
- SLsmg_set_color(color);
-}
-
-void ui_browser__set_percent_color(struct ui_browser *self,
- double percent, bool current)
-{
- int color = ui_browser__percent_color(percent, current);
- ui_browser__set_color(self, color);
-}
-
-void ui_browser__gotorc(struct ui_browser *self, int y, int x)
-{
- SLsmg_gotorc(self->y + y, self->x + x);
-}
-
-void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whence)
-{
- struct list_head *head = self->entries;
- struct list_head *pos;
-
- switch (whence) {
- case SEEK_SET:
- pos = head->next;
- break;
- case SEEK_CUR:
- pos = self->top;
- break;
- case SEEK_END:
- pos = head->prev;
- break;
- default:
- return;
- }
-
- if (offset > 0) {
- while (offset-- != 0)
- pos = pos->next;
- } else {
- while (offset++ != 0)
- pos = pos->prev;
- }
-
- self->top = pos;
-}
-
-void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence)
-{
- struct rb_root *root = self->entries;
- struct rb_node *nd;
-
- switch (whence) {
- case SEEK_SET:
- nd = rb_first(root);
- break;
- case SEEK_CUR:
- nd = self->top;
- break;
- case SEEK_END:
- nd = rb_last(root);
- break;
- default:
- return;
- }
-
- if (offset > 0) {
- while (offset-- != 0)
- nd = rb_next(nd);
- } else {
- while (offset++ != 0)
- nd = rb_prev(nd);
- }
-
- self->top = nd;
-}
-
-unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self)
-{
- struct rb_node *nd;
- int row = 0;
-
- if (self->top == NULL)
- self->top = rb_first(self->entries);
-
- nd = self->top;
-
- while (nd != NULL) {
- ui_browser__gotorc(self, row, 0);
- self->write(self, nd, row);
- if (++row == self->height)
- break;
- nd = rb_next(nd);
- }
-
- return row;
-}
-
-bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row)
-{
- return self->top_idx + row == self->index;
-}
-
-void ui_browser__refresh_dimensions(struct ui_browser *self)
-{
- int cols, rows;
- newtGetScreenSize(&cols, &rows);
-
- self->width = cols - 1;
- self->height = rows - 2;
- self->y = 1;
- self->x = 0;
-}
-
-void ui_browser__reset_index(struct ui_browser *self)
-{
- self->index = self->top_idx = 0;
- self->seek(self, 0, SEEK_SET);
-}
-
-void ui_browser__add_exit_key(struct ui_browser *self, int key)
-{
- newtFormAddHotKey(self->form, key);
-}
-
-void ui_browser__add_exit_keys(struct ui_browser *self, int keys[])
-{
- int i = 0;
-
- while (keys[i] && i < 64) {
- ui_browser__add_exit_key(self, keys[i]);
- ++i;
- }
-}
-
-int ui_browser__show(struct ui_browser *self, const char *title,
- const char *helpline, ...)
-{
- va_list ap;
- int keys[] = { NEWT_KEY_UP, NEWT_KEY_DOWN, NEWT_KEY_PGUP,
- NEWT_KEY_PGDN, NEWT_KEY_HOME, NEWT_KEY_END, ' ',
- NEWT_KEY_LEFT, NEWT_KEY_ESCAPE, 'q', CTRL('c'), 0 };
-
- if (self->form != NULL)
- newtFormDestroy(self->form);
-
- ui_browser__refresh_dimensions(self);
- self->form = newtForm(NULL, NULL, 0);
- if (self->form == NULL)
- return -1;
-
- self->sb = newtVerticalScrollbar(self->width, 1, self->height,
- HE_COLORSET_NORMAL,
- HE_COLORSET_SELECTED);
- if (self->sb == NULL)
- return -1;
-
- SLsmg_gotorc(0, 0);
- ui_browser__set_color(self, NEWT_COLORSET_ROOT);
- slsmg_write_nstring(title, self->width);
-
- ui_browser__add_exit_keys(self, keys);
- newtFormAddComponent(self->form, self->sb);
-
- va_start(ap, helpline);
- ui_helpline__vpush(helpline, ap);
- va_end(ap);
- return 0;
-}
-
-void ui_browser__hide(struct ui_browser *self)
-{
- newtFormDestroy(self->form);
- self->form = NULL;
- ui_helpline__pop();
-}
-
-int ui_browser__refresh(struct ui_browser *self)
-{
- int row;
-
- newtScrollbarSet(self->sb, self->index, self->nr_entries - 1);
- row = self->refresh(self);
- ui_browser__set_color(self, HE_COLORSET_NORMAL);
- SLsmg_fill_region(self->y + row, self->x,
- self->height - row, self->width, ' ');
-
- return 0;
-}
-
-int ui_browser__run(struct ui_browser *self)
-{
- struct newtExitStruct es;
-
- if (ui_browser__refresh(self) < 0)
- return -1;
-
- while (1) {
- off_t offset;
-
- newtFormRun(self->form, &es);
-
- if (es.reason != NEWT_EXIT_HOTKEY)
- break;
- switch (es.u.key) {
- case NEWT_KEY_DOWN:
- if (self->index == self->nr_entries - 1)
- break;
- ++self->index;
- if (self->index == self->top_idx + self->height) {
- ++self->top_idx;
- self->seek(self, +1, SEEK_CUR);
- }
- break;
- case NEWT_KEY_UP:
- if (self->index == 0)
- break;
- --self->index;
- if (self->index < self->top_idx) {
- --self->top_idx;
- self->seek(self, -1, SEEK_CUR);
- }
- break;
- case NEWT_KEY_PGDN:
- case ' ':
- if (self->top_idx + self->height > self->nr_entries - 1)
- break;
-
- offset = self->height;
- if (self->index + offset > self->nr_entries - 1)
- offset = self->nr_entries - 1 - self->index;
- self->index += offset;
- self->top_idx += offset;
- self->seek(self, +offset, SEEK_CUR);
- break;
- case NEWT_KEY_PGUP:
- if (self->top_idx == 0)
- break;
-
- if (self->top_idx < self->height)
- offset = self->top_idx;
- else
- offset = self->height;
-
- self->index -= offset;
- self->top_idx -= offset;
- self->seek(self, -offset, SEEK_CUR);
- break;
- case NEWT_KEY_HOME:
- ui_browser__reset_index(self);
- break;
- case NEWT_KEY_END:
- offset = self->height - 1;
- if (offset >= self->nr_entries)
- offset = self->nr_entries - 1;
-
- self->index = self->nr_entries - 1;
- self->top_idx = self->index - offset;
- self->seek(self, -offset, SEEK_END);
- break;
- default:
- return es.u.key;
- }
- if (ui_browser__refresh(self) < 0)
- return -1;
- }
- return -1;
-}
-
-unsigned int ui_browser__list_head_refresh(struct ui_browser *self)
-{
- struct list_head *pos;
- struct list_head *head = self->entries;
- int row = 0;
-
- if (self->top == NULL || self->top == self->entries)
- self->top = head->next;
-
- pos = self->top;
-
- list_for_each_from(pos, head) {
- ui_browser__gotorc(self, row, 0);
- self->write(self, pos, row);
- if (++row == self->height)
- break;
- }
-
- return row;
-}
-
-static struct newtPercentTreeColors {
- const char *topColorFg, *topColorBg;
- const char *mediumColorFg, *mediumColorBg;
- const char *normalColorFg, *normalColorBg;
- const char *selColorFg, *selColorBg;
- const char *codeColorFg, *codeColorBg;
-} defaultPercentTreeColors = {
- "red", "lightgray",
- "green", "lightgray",
- "black", "lightgray",
- "lightgray", "magenta",
- "blue", "lightgray",
-};
-
-void ui_browser__init(void)
-{
- struct newtPercentTreeColors *c = &defaultPercentTreeColors;
-
- sltt_set_color(HE_COLORSET_TOP, NULL, c->topColorFg, c->topColorBg);
- sltt_set_color(HE_COLORSET_MEDIUM, NULL, c->mediumColorFg, c->mediumColorBg);
- sltt_set_color(HE_COLORSET_NORMAL, NULL, c->normalColorFg, c->normalColorBg);
- sltt_set_color(HE_COLORSET_SELECTED, NULL, c->selColorFg, c->selColorBg);
- sltt_set_color(HE_COLORSET_CODE, NULL, c->codeColorFg, c->codeColorBg);
-}
diff --git a/tools/perf/util/ui/browser.h b/tools/perf/util/ui/browser.h
deleted file mode 100644
index 0dc7e4da36f..00000000000
--- a/tools/perf/util/ui/browser.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef _PERF_UI_BROWSER_H_
-#define _PERF_UI_BROWSER_H_ 1
-
-#include <stdbool.h>
-#include <newt.h>
-#include <sys/types.h>
-#include "../types.h"
-
-#define HE_COLORSET_TOP 50
-#define HE_COLORSET_MEDIUM 51
-#define HE_COLORSET_NORMAL 52
-#define HE_COLORSET_SELECTED 53
-#define HE_COLORSET_CODE 54
-
-struct ui_browser {
- newtComponent form, sb;
- u64 index, top_idx;
- void *top, *entries;
- u16 y, x, width, height;
- void *priv;
- unsigned int (*refresh)(struct ui_browser *self);
- void (*write)(struct ui_browser *self, void *entry, int row);
- void (*seek)(struct ui_browser *self, off_t offset, int whence);
- u32 nr_entries;
-};
-
-
-void ui_browser__set_color(struct ui_browser *self, int color);
-void ui_browser__set_percent_color(struct ui_browser *self,
- double percent, bool current);
-bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row);
-void ui_browser__refresh_dimensions(struct ui_browser *self);
-void ui_browser__reset_index(struct ui_browser *self);
-
-void ui_browser__gotorc(struct ui_browser *self, int y, int x);
-void ui_browser__add_exit_key(struct ui_browser *self, int key);
-void ui_browser__add_exit_keys(struct ui_browser *self, int keys[]);
-int ui_browser__show(struct ui_browser *self, const char *title,
- const char *helpline, ...);
-void ui_browser__hide(struct ui_browser *self);
-int ui_browser__refresh(struct ui_browser *self);
-int ui_browser__run(struct ui_browser *self);
-
-void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence);
-unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self);
-
-void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whence);
-unsigned int ui_browser__list_head_refresh(struct ui_browser *self);
-
-void ui_browser__init(void);
-#endif /* _PERF_UI_BROWSER_H_ */
diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c
deleted file mode 100644
index 82b78f99251..00000000000
--- a/tools/perf/util/ui/browsers/annotate.c
+++ /dev/null
@@ -1,237 +0,0 @@
-#include "../browser.h"
-#include "../helpline.h"
-#include "../libslang.h"
-#include "../../hist.h"
-#include "../../sort.h"
-#include "../../symbol.h"
-
-static void ui__error_window(const char *fmt, ...)
-{
- va_list ap;
-
- va_start(ap, fmt);
- newtWinMessagev((char *)"Error", (char *)"Ok", (char *)fmt, ap);
- va_end(ap);
-}
-
-struct annotate_browser {
- struct ui_browser b;
- struct rb_root entries;
- struct rb_node *curr_hot;
-};
-
-struct objdump_line_rb_node {
- struct rb_node rb_node;
- double percent;
- u32 idx;
-};
-
-static inline
-struct objdump_line_rb_node *objdump_line__rb(struct objdump_line *self)
-{
- return (struct objdump_line_rb_node *)(self + 1);
-}
-
-static void annotate_browser__write(struct ui_browser *self, void *entry, int row)
-{
- struct objdump_line *ol = rb_entry(entry, struct objdump_line, node);
- bool current_entry = ui_browser__is_current_entry(self, row);
- int width = self->width;
-
- if (ol->offset != -1) {
- struct objdump_line_rb_node *olrb = objdump_line__rb(ol);
- ui_browser__set_percent_color(self, olrb->percent, current_entry);
- slsmg_printf(" %7.2f ", olrb->percent);
- if (!current_entry)
- ui_browser__set_color(self, HE_COLORSET_CODE);
- } else {
- ui_browser__set_percent_color(self, 0, current_entry);
- slsmg_write_nstring(" ", 9);
- }
-
- SLsmg_write_char(':');
- slsmg_write_nstring(" ", 8);
- if (!*ol->line)
- slsmg_write_nstring(" ", width - 18);
- else
- slsmg_write_nstring(ol->line, width - 18);
-}
-
-static double objdump_line__calc_percent(struct objdump_line *self,
- struct list_head *head,
- struct symbol *sym)
-{
- double percent = 0.0;
-
- if (self->offset != -1) {
- int len = sym->end - sym->start;
- unsigned int hits = 0;
- struct sym_priv *priv = symbol__priv(sym);
- struct sym_ext *sym_ext = priv->ext;
- struct sym_hist *h = priv->hist;
- s64 offset = self->offset;
- struct objdump_line *next = objdump__get_next_ip_line(head, self);
-
-
- while (offset < (s64)len &&
- (next == NULL || offset < next->offset)) {
- if (sym_ext) {
- percent += sym_ext[offset].percent;
- } else
- hits += h->ip[offset];
-
- ++offset;
- }
-
- if (sym_ext == NULL && h->sum)
- percent = 100.0 * hits / h->sum;
- }
-
- return percent;
-}
-
-static void objdump__insert_line(struct rb_root *self,
- struct objdump_line_rb_node *line)
-{
- struct rb_node **p = &self->rb_node;
- struct rb_node *parent = NULL;
- struct objdump_line_rb_node *l;
-
- while (*p != NULL) {
- parent = *p;
- l = rb_entry(parent, struct objdump_line_rb_node, rb_node);
- if (line->percent < l->percent)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
- rb_link_node(&line->rb_node, parent, p);
- rb_insert_color(&line->rb_node, self);
-}
-
-static void annotate_browser__set_top(struct annotate_browser *self,
- struct rb_node *nd)
-{
- struct objdump_line_rb_node *rbpos;
- struct objdump_line *pos;
- unsigned back;
-
- ui_browser__refresh_dimensions(&self->b);
- back = self->b.height / 2;
- rbpos = rb_entry(nd, struct objdump_line_rb_node, rb_node);
- pos = ((struct objdump_line *)rbpos) - 1;
- self->b.top_idx = self->b.index = rbpos->idx;
-
- while (self->b.top_idx != 0 && back != 0) {
- pos = list_entry(pos->node.prev, struct objdump_line, node);
-
- --self->b.top_idx;
- --back;
- }
-
- self->b.top = pos;
- self->curr_hot = nd;
-}
-
-static int annotate_browser__run(struct annotate_browser *self)
-{
- struct rb_node *nd;
- struct hist_entry *he = self->b.priv;
- int key;
-
- if (ui_browser__show(&self->b, he->ms.sym->name,
- "<-, -> or ESC: exit, TAB/shift+TAB: cycle thru samples") < 0)
- return -1;
- /*
- * To allow builtin-annotate to cycle thru multiple symbols by
- * examining the exit key for this function.
- */
- ui_browser__add_exit_key(&self->b, NEWT_KEY_RIGHT);
-
- nd = self->curr_hot;
- if (nd) {
- int tabs[] = { NEWT_KEY_TAB, NEWT_KEY_UNTAB, 0 };
- ui_browser__add_exit_keys(&self->b, tabs);
- }
-
- while (1) {
- key = ui_browser__run(&self->b);
-
- switch (key) {
- case NEWT_KEY_TAB:
- nd = rb_prev(nd);
- if (nd == NULL)
- nd = rb_last(&self->entries);
- annotate_browser__set_top(self, nd);
- break;
- case NEWT_KEY_UNTAB:
- nd = rb_next(nd);
- if (nd == NULL)
- nd = rb_first(&self->entries);
- annotate_browser__set_top(self, nd);
- break;
- default:
- goto out;
- }
- }
-out:
- ui_browser__hide(&self->b);
- return key;
-}
-
-int hist_entry__tui_annotate(struct hist_entry *self)
-{
- struct objdump_line *pos, *n;
- struct objdump_line_rb_node *rbpos;
- LIST_HEAD(head);
- struct annotate_browser browser = {
- .b = {
- .entries = &head,
- .refresh = ui_browser__list_head_refresh,
- .seek = ui_browser__list_head_seek,
- .write = annotate_browser__write,
- .priv = self,
- },
- };
- int ret;
-
- if (self->ms.sym == NULL)
- return -1;
-
- if (self->ms.map->dso->annotate_warned)
- return -1;
-
- if (hist_entry__annotate(self, &head, sizeof(*rbpos)) < 0) {
- ui__error_window(ui_helpline__last_msg);
- return -1;
- }
-
- ui_helpline__push("Press <- or ESC to exit");
-
- list_for_each_entry(pos, &head, node) {
- size_t line_len = strlen(pos->line);
- if (browser.b.width < line_len)
- browser.b.width = line_len;
- rbpos = objdump_line__rb(pos);
- rbpos->idx = browser.b.nr_entries++;
- rbpos->percent = objdump_line__calc_percent(pos, &head, self->ms.sym);
- if (rbpos->percent < 0.01)
- continue;
- objdump__insert_line(&browser.entries, rbpos);
- }
-
- /*
- * Position the browser at the hottest line.
- */
- browser.curr_hot = rb_last(&browser.entries);
- if (browser.curr_hot)
- annotate_browser__set_top(&browser, browser.curr_hot);
-
- browser.b.width += 18; /* Percentage */
- ret = annotate_browser__run(&browser);
- list_for_each_entry_safe(pos, n, &head, node) {
- list_del(&pos->node);
- objdump_line__free(pos);
- }
- return ret;
-}
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
deleted file mode 100644
index ebda8c3fde9..00000000000
--- a/tools/perf/util/ui/browsers/hists.c
+++ /dev/null
@@ -1,1013 +0,0 @@
-#define _GNU_SOURCE
-#include <stdio.h>
-#undef _GNU_SOURCE
-#include "../libslang.h"
-#include <stdlib.h>
-#include <string.h>
-#include <newt.h>
-#include <linux/rbtree.h>
-
-#include "../../hist.h"
-#include "../../pstack.h"
-#include "../../sort.h"
-#include "../../util.h"
-
-#include "../browser.h"
-#include "../helpline.h"
-#include "../util.h"
-#include "map.h"
-
-struct hist_browser {
- struct ui_browser b;
- struct hists *hists;
- struct hist_entry *he_selection;
- struct map_symbol *selection;
-};
-
-static void hist_browser__refresh_dimensions(struct hist_browser *self)
-{
- /* 3 == +/- toggle symbol before actual hist_entry rendering */
- self->b.width = 3 + (hists__sort_list_width(self->hists) +
- sizeof("[k]"));
-}
-
-static void hist_browser__reset(struct hist_browser *self)
-{
- self->b.nr_entries = self->hists->nr_entries;
- hist_browser__refresh_dimensions(self);
- ui_browser__reset_index(&self->b);
-}
-
-static char tree__folded_sign(bool unfolded)
-{
- return unfolded ? '-' : '+';
-}
-
-static char map_symbol__folded(const struct map_symbol *self)
-{
- return self->has_children ? tree__folded_sign(self->unfolded) : ' ';
-}
-
-static char hist_entry__folded(const struct hist_entry *self)
-{
- return map_symbol__folded(&self->ms);
-}
-
-static char callchain_list__folded(const struct callchain_list *self)
-{
- return map_symbol__folded(&self->ms);
-}
-
-static void map_symbol__set_folding(struct map_symbol *self, bool unfold)
-{
- self->unfolded = unfold ? self->has_children : false;
-}
-
-static int callchain_node__count_rows_rb_tree(struct callchain_node *self)
-{
- int n = 0;
- struct rb_node *nd;
-
- for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
- struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
- struct callchain_list *chain;
- char folded_sign = ' '; /* No children */
-
- list_for_each_entry(chain, &child->val, list) {
- ++n;
- /* We need this because we may not have children */
- folded_sign = callchain_list__folded(chain);
- if (folded_sign == '+')
- break;
- }
-
- if (folded_sign == '-') /* Have children and they're unfolded */
- n += callchain_node__count_rows_rb_tree(child);
- }
-
- return n;
-}
-
-static int callchain_node__count_rows(struct callchain_node *node)
-{
- struct callchain_list *chain;
- bool unfolded = false;
- int n = 0;
-
- list_for_each_entry(chain, &node->val, list) {
- ++n;
- unfolded = chain->ms.unfolded;
- }
-
- if (unfolded)
- n += callchain_node__count_rows_rb_tree(node);
-
- return n;
-}
-
-static int callchain__count_rows(struct rb_root *chain)
-{
- struct rb_node *nd;
- int n = 0;
-
- for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
- struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
- n += callchain_node__count_rows(node);
- }
-
- return n;
-}
-
-static bool map_symbol__toggle_fold(struct map_symbol *self)
-{
- if (!self->has_children)
- return false;
-
- self->unfolded = !self->unfolded;
- return true;
-}
-
-static void callchain_node__init_have_children_rb_tree(struct callchain_node *self)
-{
- struct rb_node *nd = rb_first(&self->rb_root);
-
- for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
- struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
- struct callchain_list *chain;
- bool first = true;
-
- list_for_each_entry(chain, &child->val, list) {
- if (first) {
- first = false;
- chain->ms.has_children = chain->list.next != &child->val ||
- !RB_EMPTY_ROOT(&child->rb_root);
- } else
- chain->ms.has_children = chain->list.next == &child->val &&
- !RB_EMPTY_ROOT(&child->rb_root);
- }
-
- callchain_node__init_have_children_rb_tree(child);
- }
-}
-
-static void callchain_node__init_have_children(struct callchain_node *self)
-{
- struct callchain_list *chain;
-
- list_for_each_entry(chain, &self->val, list)
- chain->ms.has_children = !RB_EMPTY_ROOT(&self->rb_root);
-
- callchain_node__init_have_children_rb_tree(self);
-}
-
-static void callchain__init_have_children(struct rb_root *self)
-{
- struct rb_node *nd;
-
- for (nd = rb_first(self); nd; nd = rb_next(nd)) {
- struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
- callchain_node__init_have_children(node);
- }
-}
-
-static void hist_entry__init_have_children(struct hist_entry *self)
-{
- if (!self->init_have_children) {
- self->ms.has_children = !RB_EMPTY_ROOT(&self->sorted_chain);
- callchain__init_have_children(&self->sorted_chain);
- self->init_have_children = true;
- }
-}
-
-static bool hist_browser__toggle_fold(struct hist_browser *self)
-{
- if (map_symbol__toggle_fold(self->selection)) {
- struct hist_entry *he = self->he_selection;
-
- hist_entry__init_have_children(he);
- self->hists->nr_entries -= he->nr_rows;
-
- if (he->ms.unfolded)
- he->nr_rows = callchain__count_rows(&he->sorted_chain);
- else
- he->nr_rows = 0;
- self->hists->nr_entries += he->nr_rows;
- self->b.nr_entries = self->hists->nr_entries;
-
- return true;
- }
-
- /* If it doesn't have children, no toggling performed */
- return false;
-}
-
-static int callchain_node__set_folding_rb_tree(struct callchain_node *self, bool unfold)
-{
- int n = 0;
- struct rb_node *nd;
-
- for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
- struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
- struct callchain_list *chain;
- bool has_children = false;
-
- list_for_each_entry(chain, &child->val, list) {
- ++n;
- map_symbol__set_folding(&chain->ms, unfold);
- has_children = chain->ms.has_children;
- }
-
- if (has_children)
- n += callchain_node__set_folding_rb_tree(child, unfold);
- }
-
- return n;
-}
-
-static int callchain_node__set_folding(struct callchain_node *node, bool unfold)
-{
- struct callchain_list *chain;
- bool has_children = false;
- int n = 0;
-
- list_for_each_entry(chain, &node->val, list) {
- ++n;
- map_symbol__set_folding(&chain->ms, unfold);
- has_children = chain->ms.has_children;
- }
-
- if (has_children)
- n += callchain_node__set_folding_rb_tree(node, unfold);
-
- return n;
-}
-
-static int callchain__set_folding(struct rb_root *chain, bool unfold)
-{
- struct rb_node *nd;
- int n = 0;
-
- for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
- struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
- n += callchain_node__set_folding(node, unfold);
- }
-
- return n;
-}
-
-static void hist_entry__set_folding(struct hist_entry *self, bool unfold)
-{
- hist_entry__init_have_children(self);
- map_symbol__set_folding(&self->ms, unfold);
-
- if (self->ms.has_children) {
- int n = callchain__set_folding(&self->sorted_chain, unfold);
- self->nr_rows = unfold ? n : 0;
- } else
- self->nr_rows = 0;
-}
-
-static void hists__set_folding(struct hists *self, bool unfold)
-{
- struct rb_node *nd;
-
- self->nr_entries = 0;
-
- for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
- struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
- hist_entry__set_folding(he, unfold);
- self->nr_entries += 1 + he->nr_rows;
- }
-}
-
-static void hist_browser__set_folding(struct hist_browser *self, bool unfold)
-{
- hists__set_folding(self->hists, unfold);
- self->b.nr_entries = self->hists->nr_entries;
- /* Go to the start, we may be way after valid entries after a collapse */
- ui_browser__reset_index(&self->b);
-}
-
-static int hist_browser__run(struct hist_browser *self, const char *title)
-{
- int key;
- int exit_keys[] = { 'a', '?', 'h', 'C', 'd', 'D', 'E', 't',
- NEWT_KEY_ENTER, NEWT_KEY_RIGHT, NEWT_KEY_LEFT, 0, };
-
- self->b.entries = &self->hists->entries;
- self->b.nr_entries = self->hists->nr_entries;
-
- hist_browser__refresh_dimensions(self);
-
- if (ui_browser__show(&self->b, title,
- "Press '?' for help on key bindings") < 0)
- return -1;
-
- ui_browser__add_exit_keys(&self->b, exit_keys);
-
- while (1) {
- key = ui_browser__run(&self->b);
-
- switch (key) {
- case 'D': { /* Debug */
- static int seq;
- struct hist_entry *h = rb_entry(self->b.top,
- struct hist_entry, rb_node);
- ui_helpline__pop();
- ui_helpline__fpush("%d: nr_ent=(%d,%d), height=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d",
- seq++, self->b.nr_entries,
- self->hists->nr_entries,
- self->b.height,
- self->b.index,
- self->b.top_idx,
- h->row_offset, h->nr_rows);
- }
- break;
- case 'C':
- /* Collapse the whole world. */
- hist_browser__set_folding(self, false);
- break;
- case 'E':
- /* Expand the whole world. */
- hist_browser__set_folding(self, true);
- break;
- case NEWT_KEY_ENTER:
- if (hist_browser__toggle_fold(self))
- break;
- /* fall thru */
- default:
- goto out;
- }
- }
-out:
- ui_browser__hide(&self->b);
- return key;
-}
-
-static char *callchain_list__sym_name(struct callchain_list *self,
- char *bf, size_t bfsize)
-{
- if (self->ms.sym)
- return self->ms.sym->name;
-
- snprintf(bf, bfsize, "%#Lx", self->ip);
- return bf;
-}
-
-#define LEVEL_OFFSET_STEP 3
-
-static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self,
- struct callchain_node *chain_node,
- u64 total, int level,
- unsigned short row,
- off_t *row_offset,
- bool *is_current_entry)
-{
- struct rb_node *node;
- int first_row = row, width, offset = level * LEVEL_OFFSET_STEP;
- u64 new_total, remaining;
-
- if (callchain_param.mode == CHAIN_GRAPH_REL)
- new_total = chain_node->children_hit;
- else
- new_total = total;
-
- remaining = new_total;
- node = rb_first(&chain_node->rb_root);
- while (node) {
- struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
- struct rb_node *next = rb_next(node);
- u64 cumul = cumul_hits(child);
- struct callchain_list *chain;
- char folded_sign = ' ';
- int first = true;
- int extra_offset = 0;
-
- remaining -= cumul;
-
- list_for_each_entry(chain, &child->val, list) {
- char ipstr[BITS_PER_LONG / 4 + 1], *alloc_str;
- const char *str;
- int color;
- bool was_first = first;
-
- if (first)
- first = false;
- else
- extra_offset = LEVEL_OFFSET_STEP;
-
- folded_sign = callchain_list__folded(chain);
- if (*row_offset != 0) {
- --*row_offset;
- goto do_next;
- }
-
- alloc_str = NULL;
- str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
- if (was_first) {
- double percent = cumul * 100.0 / new_total;
-
- if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0)
- str = "Not enough memory!";
- else
- str = alloc_str;
- }
-
- color = HE_COLORSET_NORMAL;
- width = self->b.width - (offset + extra_offset + 2);
- if (ui_browser__is_current_entry(&self->b, row)) {
- self->selection = &chain->ms;
- color = HE_COLORSET_SELECTED;
- *is_current_entry = true;
- }
-
- ui_browser__set_color(&self->b, color);
- ui_browser__gotorc(&self->b, row, 0);
- slsmg_write_nstring(" ", offset + extra_offset);
- slsmg_printf("%c ", folded_sign);
- slsmg_write_nstring(str, width);
- free(alloc_str);
-
- if (++row == self->b.height)
- goto out;
-do_next:
- if (folded_sign == '+')
- break;
- }
-
- if (folded_sign == '-') {
- const int new_level = level + (extra_offset ? 2 : 1);
- row += hist_browser__show_callchain_node_rb_tree(self, child, new_total,
- new_level, row, row_offset,
- is_current_entry);
- }
- if (row == self->b.height)
- goto out;
- node = next;
- }
-out:
- return row - first_row;
-}
-
-static int hist_browser__show_callchain_node(struct hist_browser *self,
- struct callchain_node *node,
- int level, unsigned short row,
- off_t *row_offset,
- bool *is_current_entry)
-{
- struct callchain_list *chain;
- int first_row = row,
- offset = level * LEVEL_OFFSET_STEP,
- width = self->b.width - offset;
- char folded_sign = ' ';
-
- list_for_each_entry(chain, &node->val, list) {
- char ipstr[BITS_PER_LONG / 4 + 1], *s;
- int color;
-
- folded_sign = callchain_list__folded(chain);
-
- if (*row_offset != 0) {
- --*row_offset;
- continue;
- }
-
- color = HE_COLORSET_NORMAL;
- if (ui_browser__is_current_entry(&self->b, row)) {
- self->selection = &chain->ms;
- color = HE_COLORSET_SELECTED;
- *is_current_entry = true;
- }
-
- s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
- ui_browser__gotorc(&self->b, row, 0);
- ui_browser__set_color(&self->b, color);
- slsmg_write_nstring(" ", offset);
- slsmg_printf("%c ", folded_sign);
- slsmg_write_nstring(s, width - 2);
-
- if (++row == self->b.height)
- goto out;
- }
-
- if (folded_sign == '-')
- row += hist_browser__show_callchain_node_rb_tree(self, node,
- self->hists->stats.total_period,
- level + 1, row,
- row_offset,
- is_current_entry);
-out:
- return row - first_row;
-}
-
-static int hist_browser__show_callchain(struct hist_browser *self,
- struct rb_root *chain,
- int level, unsigned short row,
- off_t *row_offset,
- bool *is_current_entry)
-{
- struct rb_node *nd;
- int first_row = row;
-
- for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
- struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
-
- row += hist_browser__show_callchain_node(self, node, level,
- row, row_offset,
- is_current_entry);
- if (row == self->b.height)
- break;
- }
-
- return row - first_row;
-}
-
-static int hist_browser__show_entry(struct hist_browser *self,
- struct hist_entry *entry,
- unsigned short row)
-{
- char s[256];
- double percent;
- int printed = 0;
- int color, width = self->b.width;
- char folded_sign = ' ';
- bool current_entry = ui_browser__is_current_entry(&self->b, row);
- off_t row_offset = entry->row_offset;
-
- if (current_entry) {
- self->he_selection = entry;
- self->selection = &entry->ms;
- }
-
- if (symbol_conf.use_callchain) {
- hist_entry__init_have_children(entry);
- folded_sign = hist_entry__folded(entry);
- }
-
- if (row_offset == 0) {
- hist_entry__snprintf(entry, s, sizeof(s), self->hists, NULL, false,
- 0, false, self->hists->stats.total_period);
- percent = (entry->period * 100.0) / self->hists->stats.total_period;
-
- color = HE_COLORSET_SELECTED;
- if (!current_entry) {
- if (percent >= MIN_RED)
- color = HE_COLORSET_TOP;
- else if (percent >= MIN_GREEN)
- color = HE_COLORSET_MEDIUM;
- else
- color = HE_COLORSET_NORMAL;
- }
-
- ui_browser__set_color(&self->b, color);
- ui_browser__gotorc(&self->b, row, 0);
- if (symbol_conf.use_callchain) {
- slsmg_printf("%c ", folded_sign);
- width -= 2;
- }
- slsmg_write_nstring(s, width);
- ++row;
- ++printed;
- } else
- --row_offset;
-
- if (folded_sign == '-' && row != self->b.height) {
- printed += hist_browser__show_callchain(self, &entry->sorted_chain,
- 1, row, &row_offset,
- &current_entry);
- if (current_entry)
- self->he_selection = entry;
- }
-
- return printed;
-}
-
-static unsigned int hist_browser__refresh(struct ui_browser *self)
-{
- unsigned row = 0;
- struct rb_node *nd;
- struct hist_browser *hb = container_of(self, struct hist_browser, b);
-
- if (self->top == NULL)
- self->top = rb_first(&hb->hists->entries);
-
- for (nd = self->top; nd; nd = rb_next(nd)) {
- struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
-
- if (h->filtered)
- continue;
-
- row += hist_browser__show_entry(hb, h, row);
- if (row == self->height)
- break;
- }
-
- return row;
-}
-
-static struct rb_node *hists__filter_entries(struct rb_node *nd)
-{
- while (nd != NULL) {
- struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
- if (!h->filtered)
- return nd;
-
- nd = rb_next(nd);
- }
-
- return NULL;
-}
-
-static struct rb_node *hists__filter_prev_entries(struct rb_node *nd)
-{
- while (nd != NULL) {
- struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
- if (!h->filtered)
- return nd;
-
- nd = rb_prev(nd);
- }
-
- return NULL;
-}
-
-static void ui_browser__hists_seek(struct ui_browser *self,
- off_t offset, int whence)
-{
- struct hist_entry *h;
- struct rb_node *nd;
- bool first = true;
-
- switch (whence) {
- case SEEK_SET:
- nd = hists__filter_entries(rb_first(self->entries));
- break;
- case SEEK_CUR:
- nd = self->top;
- goto do_offset;
- case SEEK_END:
- nd = hists__filter_prev_entries(rb_last(self->entries));
- first = false;
- break;
- default:
- return;
- }
-
- /*
- * Moves not relative to the first visible entry invalidates its
- * row_offset:
- */
- h = rb_entry(self->top, struct hist_entry, rb_node);
- h->row_offset = 0;
-
- /*
- * Here we have to check if nd is expanded (+), if it is we can't go
- * the next top level hist_entry, instead we must compute an offset of
- * what _not_ to show and not change the first visible entry.
- *
- * This offset increments when we are going from top to bottom and
- * decreases when we're going from bottom to top.
- *
- * As we don't have backpointers to the top level in the callchains
- * structure, we need to always print the whole hist_entry callchain,
- * skipping the first ones that are before the first visible entry
- * and stop when we printed enough lines to fill the screen.
- */
-do_offset:
- if (offset > 0) {
- do {
- h = rb_entry(nd, struct hist_entry, rb_node);
- if (h->ms.unfolded) {
- u16 remaining = h->nr_rows - h->row_offset;
- if (offset > remaining) {
- offset -= remaining;
- h->row_offset = 0;
- } else {
- h->row_offset += offset;
- offset = 0;
- self->top = nd;
- break;
- }
- }
- nd = hists__filter_entries(rb_next(nd));
- if (nd == NULL)
- break;
- --offset;
- self->top = nd;
- } while (offset != 0);
- } else if (offset < 0) {
- while (1) {
- h = rb_entry(nd, struct hist_entry, rb_node);
- if (h->ms.unfolded) {
- if (first) {
- if (-offset > h->row_offset) {
- offset += h->row_offset;
- h->row_offset = 0;
- } else {
- h->row_offset += offset;
- offset = 0;
- self->top = nd;
- break;
- }
- } else {
- if (-offset > h->nr_rows) {
- offset += h->nr_rows;
- h->row_offset = 0;
- } else {
- h->row_offset = h->nr_rows + offset;
- offset = 0;
- self->top = nd;
- break;
- }
- }
- }
-
- nd = hists__filter_prev_entries(rb_prev(nd));
- if (nd == NULL)
- break;
- ++offset;
- self->top = nd;
- if (offset == 0) {
- /*
- * Last unfiltered hist_entry, check if it is
- * unfolded, if it is then we should have
- * row_offset at its last entry.
- */
- h = rb_entry(nd, struct hist_entry, rb_node);
- if (h->ms.unfolded)
- h->row_offset = h->nr_rows;
- break;
- }
- first = false;
- }
- } else {
- self->top = nd;
- h = rb_entry(nd, struct hist_entry, rb_node);
- h->row_offset = 0;
- }
-}
-
-static struct hist_browser *hist_browser__new(struct hists *hists)
-{
- struct hist_browser *self = zalloc(sizeof(*self));
-
- if (self) {
- self->hists = hists;
- self->b.refresh = hist_browser__refresh;
- self->b.seek = ui_browser__hists_seek;
- }
-
- return self;
-}
-
-static void hist_browser__delete(struct hist_browser *self)
-{
- free(self);
-}
-
-static struct hist_entry *hist_browser__selected_entry(struct hist_browser *self)
-{
- return self->he_selection;
-}
-
-static struct thread *hist_browser__selected_thread(struct hist_browser *self)
-{
- return self->he_selection->thread;
-}
-
-static int hists__browser_title(struct hists *self, char *bf, size_t size,
- const char *ev_name, const struct dso *dso,
- const struct thread *thread)
-{
- char unit;
- int printed;
- unsigned long nr_events = self->stats.nr_events[PERF_RECORD_SAMPLE];
-
- nr_events = convert_unit(nr_events, &unit);
- printed = snprintf(bf, size, "Events: %lu%c %s", nr_events, unit, ev_name);
-
- if (thread)
- printed += snprintf(bf + printed, size - printed,
- ", Thread: %s(%d)",
- (thread->comm_set ? thread->comm : ""),
- thread->pid);
- if (dso)
- printed += snprintf(bf + printed, size - printed,
- ", DSO: %s", dso->short_name);
- return printed;
-}
-
-int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
-{
- struct hist_browser *browser = hist_browser__new(self);
- struct pstack *fstack;
- const struct thread *thread_filter = NULL;
- const struct dso *dso_filter = NULL;
- char msg[160];
- int key = -1;
-
- if (browser == NULL)
- return -1;
-
- fstack = pstack__new(2);
- if (fstack == NULL)
- goto out;
-
- ui_helpline__push(helpline);
-
- hists__browser_title(self, msg, sizeof(msg), ev_name,
- dso_filter, thread_filter);
- while (1) {
- const struct thread *thread;
- const struct dso *dso;
- char *options[16];
- int nr_options = 0, choice = 0, i,
- annotate = -2, zoom_dso = -2, zoom_thread = -2,
- browse_map = -2;
-
- key = hist_browser__run(browser, msg);
-
- thread = hist_browser__selected_thread(browser);
- dso = browser->selection->map ? browser->selection->map->dso : NULL;
-
- switch (key) {
- case NEWT_KEY_TAB:
- case NEWT_KEY_UNTAB:
- /*
- * Exit the browser, let hists__browser_tree
- * go to the next or previous
- */
- goto out_free_stack;
- case 'a':
- if (browser->selection->map == NULL &&
- browser->selection->map->dso->annotate_warned)
- continue;
- goto do_annotate;
- case 'd':
- goto zoom_dso;
- case 't':
- goto zoom_thread;
- case NEWT_KEY_F1:
- case 'h':
- case '?':
- ui__help_window("-> Zoom into DSO/Threads & Annotate current symbol\n"
- "<- Zoom out\n"
- "a Annotate current symbol\n"
- "h/?/F1 Show this window\n"
- "C Collapse all callchains\n"
- "E Expand all callchains\n"
- "d Zoom into current DSO\n"
- "t Zoom into current Thread\n"
- "q/CTRL+C Exit browser");
- continue;
- case NEWT_KEY_ENTER:
- case NEWT_KEY_RIGHT:
- /* menu */
- break;
- case NEWT_KEY_LEFT: {
- const void *top;
-
- if (pstack__empty(fstack))
- continue;
- top = pstack__pop(fstack);
- if (top == &dso_filter)
- goto zoom_out_dso;
- if (top == &thread_filter)
- goto zoom_out_thread;
- continue;
- }
- case NEWT_KEY_ESCAPE:
- if (!ui__dialog_yesno("Do you really want to exit?"))
- continue;
- /* Fall thru */
- default:
- goto out_free_stack;
- }
-
- if (browser->selection->sym != NULL &&
- !browser->selection->map->dso->annotate_warned &&
- asprintf(&options[nr_options], "Annotate %s",
- browser->selection->sym->name) > 0)
- annotate = nr_options++;
-
- if (thread != NULL &&
- asprintf(&options[nr_options], "Zoom %s %s(%d) thread",
- (thread_filter ? "out of" : "into"),
- (thread->comm_set ? thread->comm : ""),
- thread->pid) > 0)
- zoom_thread = nr_options++;
-
- if (dso != NULL &&
- asprintf(&options[nr_options], "Zoom %s %s DSO",
- (dso_filter ? "out of" : "into"),
- (dso->kernel ? "the Kernel" : dso->short_name)) > 0)
- zoom_dso = nr_options++;
-
- if (browser->selection->map != NULL &&
- asprintf(&options[nr_options], "Browse map details") > 0)
- browse_map = nr_options++;
-
- options[nr_options++] = (char *)"Exit";
-
- choice = ui__popup_menu(nr_options, options);
-
- for (i = 0; i < nr_options - 1; ++i)
- free(options[i]);
-
- if (choice == nr_options - 1)
- break;
-
- if (choice == -1)
- continue;
-
- if (choice == annotate) {
- struct hist_entry *he;
-do_annotate:
- if (browser->selection->map->dso->origin == DSO__ORIG_KERNEL) {
- browser->selection->map->dso->annotate_warned = 1;
- ui_helpline__puts("No vmlinux file found, can't "
- "annotate with just a "
- "kallsyms file");
- continue;
- }
-
- he = hist_browser__selected_entry(browser);
- if (he == NULL)
- continue;
-
- hist_entry__tui_annotate(he);
- } else if (choice == browse_map)
- map__browse(browser->selection->map);
- else if (choice == zoom_dso) {
-zoom_dso:
- if (dso_filter) {
- pstack__remove(fstack, &dso_filter);
-zoom_out_dso:
- ui_helpline__pop();
- dso_filter = NULL;
- } else {
- if (dso == NULL)
- continue;
- ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s DSO\"",
- dso->kernel ? "the Kernel" : dso->short_name);
- dso_filter = dso;
- pstack__push(fstack, &dso_filter);
- }
- hists__filter_by_dso(self, dso_filter);
- hists__browser_title(self, msg, sizeof(msg), ev_name,
- dso_filter, thread_filter);
- hist_browser__reset(browser);
- } else if (choice == zoom_thread) {
-zoom_thread:
- if (thread_filter) {
- pstack__remove(fstack, &thread_filter);
-zoom_out_thread:
- ui_helpline__pop();
- thread_filter = NULL;
- } else {
- ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s(%d) thread\"",
- thread->comm_set ? thread->comm : "",
- thread->pid);
- thread_filter = thread;
- pstack__push(fstack, &thread_filter);
- }
- hists__filter_by_thread(self, thread_filter);
- hists__browser_title(self, msg, sizeof(msg), ev_name,
- dso_filter, thread_filter);
- hist_browser__reset(browser);
- }
- }
-out_free_stack:
- pstack__delete(fstack);
-out:
- hist_browser__delete(browser);
- return key;
-}
-
-int hists__tui_browse_tree(struct rb_root *self, const char *help)
-{
- struct rb_node *first = rb_first(self), *nd = first, *next;
- int key = 0;
-
- while (nd) {
- struct hists *hists = rb_entry(nd, struct hists, rb_node);
- const char *ev_name = __event_name(hists->type, hists->config);
-
- key = hists__browse(hists, help, ev_name);
- switch (key) {
- case NEWT_KEY_TAB:
- next = rb_next(nd);
- if (next)
- nd = next;
- break;
- case NEWT_KEY_UNTAB:
- if (nd == first)
- continue;
- nd = rb_prev(nd);
- default:
- return key;
- }
- }
-
- return key;
-}
diff --git a/tools/perf/util/ui/browsers/map.c b/tools/perf/util/ui/browsers/map.c
deleted file mode 100644
index e35437dfa5b..00000000000
--- a/tools/perf/util/ui/browsers/map.c
+++ /dev/null
@@ -1,155 +0,0 @@
-#include "../libslang.h"
-#include <elf.h>
-#include <sys/ttydefaults.h>
-#include <ctype.h>
-#include <string.h>
-#include <linux/bitops.h>
-#include "../../debug.h"
-#include "../../symbol.h"
-#include "../browser.h"
-#include "../helpline.h"
-#include "map.h"
-
-static int ui_entry__read(const char *title, char *bf, size_t size, int width)
-{
- struct newtExitStruct es;
- newtComponent form, entry;
- const char *result;
- int err = -1;
-
- newtCenteredWindow(width, 1, title);
- form = newtForm(NULL, NULL, 0);
- if (form == NULL)
- return -1;
-
- entry = newtEntry(0, 0, "0x", width, &result, NEWT_FLAG_SCROLL);
- if (entry == NULL)
- goto out_free_form;
-
- newtFormAddComponent(form, entry);
- newtFormAddHotKey(form, NEWT_KEY_ENTER);
- newtFormAddHotKey(form, NEWT_KEY_ESCAPE);
- newtFormAddHotKey(form, NEWT_KEY_LEFT);
- newtFormAddHotKey(form, CTRL('c'));
- newtFormRun(form, &es);
-
- if (result != NULL) {
- strncpy(bf, result, size);
- err = 0;
- }
-out_free_form:
- newtPopWindow();
- newtFormDestroy(form);
- return 0;
-}
-
-struct map_browser {
- struct ui_browser b;
- struct map *map;
- u8 addrlen;
-};
-
-static void map_browser__write(struct ui_browser *self, void *nd, int row)
-{
- struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
- struct map_browser *mb = container_of(self, struct map_browser, b);
- bool current_entry = ui_browser__is_current_entry(self, row);
- int width;
-
- ui_browser__set_percent_color(self, 0, current_entry);
- slsmg_printf("%*llx %*llx %c ",
- mb->addrlen, sym->start, mb->addrlen, sym->end,
- sym->binding == STB_GLOBAL ? 'g' :
- sym->binding == STB_LOCAL ? 'l' : 'w');
- width = self->width - ((mb->addrlen * 2) + 4);
- if (width > 0)
- slsmg_write_nstring(sym->name, width);
-}
-
-/* FIXME uber-kludgy, see comment on cmd_report... */
-static u32 *symbol__browser_index(struct symbol *self)
-{
- return ((void *)self) - sizeof(struct rb_node) - sizeof(u32);
-}
-
-static int map_browser__search(struct map_browser *self)
-{
- char target[512];
- struct symbol *sym;
- int err = ui_entry__read("Search by name/addr", target, sizeof(target), 40);
-
- if (err)
- return err;
-
- if (target[0] == '0' && tolower(target[1]) == 'x') {
- u64 addr = strtoull(target, NULL, 16);
- sym = map__find_symbol(self->map, addr, NULL);
- } else
- sym = map__find_symbol_by_name(self->map, target, NULL);
-
- if (sym != NULL) {
- u32 *idx = symbol__browser_index(sym);
-
- self->b.top = &sym->rb_node;
- self->b.index = self->b.top_idx = *idx;
- } else
- ui_helpline__fpush("%s not found!", target);
-
- return 0;
-}
-
-static int map_browser__run(struct map_browser *self)
-{
- int key;
-
- if (ui_browser__show(&self->b, self->map->dso->long_name,
- "Press <- or ESC to exit, %s / to search",
- verbose ? "" : "restart with -v to use") < 0)
- return -1;
-
- if (verbose)
- ui_browser__add_exit_key(&self->b, '/');
-
- while (1) {
- key = ui_browser__run(&self->b);
-
- if (verbose && key == '/')
- map_browser__search(self);
- else
- break;
- }
-
- ui_browser__hide(&self->b);
- return key;
-}
-
-int map__browse(struct map *self)
-{
- struct map_browser mb = {
- .b = {
- .entries = &self->dso->symbols[self->type],
- .refresh = ui_browser__rb_tree_refresh,
- .seek = ui_browser__rb_tree_seek,
- .write = map_browser__write,
- },
- .map = self,
- };
- struct rb_node *nd;
- char tmp[BITS_PER_LONG / 4];
- u64 maxaddr = 0;
-
- for (nd = rb_first(mb.b.entries); nd; nd = rb_next(nd)) {
- struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
-
- if (maxaddr < pos->end)
- maxaddr = pos->end;
- if (verbose) {
- u32 *idx = symbol__browser_index(pos);
- *idx = mb.b.nr_entries;
- }
- ++mb.b.nr_entries;
- }
-
- mb.addrlen = snprintf(tmp, sizeof(tmp), "%llx", maxaddr);
- return map_browser__run(&mb);
-}
diff --git a/tools/perf/util/ui/browsers/map.h b/tools/perf/util/ui/browsers/map.h
deleted file mode 100644
index df8581a43e1..00000000000
--- a/tools/perf/util/ui/browsers/map.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _PERF_UI_MAP_BROWSER_H_
-#define _PERF_UI_MAP_BROWSER_H_ 1
-struct map;
-
-int map__browse(struct map *self);
-#endif /* _PERF_UI_MAP_BROWSER_H_ */
diff --git a/tools/perf/util/ui/helpline.c b/tools/perf/util/ui/helpline.c
deleted file mode 100644
index 8d79daa4458..00000000000
--- a/tools/perf/util/ui/helpline.c
+++ /dev/null
@@ -1,69 +0,0 @@
-#define _GNU_SOURCE
-#include <stdio.h>
-#include <stdlib.h>
-#include <newt.h>
-
-#include "../debug.h"
-#include "helpline.h"
-
-void ui_helpline__pop(void)
-{
- newtPopHelpLine();
-}
-
-void ui_helpline__push(const char *msg)
-{
- newtPushHelpLine(msg);
-}
-
-void ui_helpline__vpush(const char *fmt, va_list ap)
-{
- char *s;
-
- if (vasprintf(&s, fmt, ap) < 0)
- vfprintf(stderr, fmt, ap);
- else {
- ui_helpline__push(s);
- free(s);
- }
-}
-
-void ui_helpline__fpush(const char *fmt, ...)
-{
- va_list ap;
-
- va_start(ap, fmt);
- ui_helpline__vpush(fmt, ap);
- va_end(ap);
-}
-
-void ui_helpline__puts(const char *msg)
-{
- ui_helpline__pop();
- ui_helpline__push(msg);
-}
-
-void ui_helpline__init(void)
-{
- ui_helpline__puts(" ");
-}
-
-char ui_helpline__last_msg[1024];
-
-int ui_helpline__show_help(const char *format, va_list ap)
-{
- int ret;
- static int backlog;
-
- ret = vsnprintf(ui_helpline__last_msg + backlog,
- sizeof(ui_helpline__last_msg) - backlog, format, ap);
- backlog += ret;
-
- if (ui_helpline__last_msg[backlog - 1] == '\n') {
- ui_helpline__puts(ui_helpline__last_msg);
- newtRefresh();
- backlog = 0;
- }
-
- return ret;
-}
diff --git a/tools/perf/util/ui/helpline.h b/tools/perf/util/ui/helpline.h
deleted file mode 100644
index ab6028d0c40..00000000000
--- a/tools/perf/util/ui/helpline.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _PERF_UI_HELPLINE_H_
-#define _PERF_UI_HELPLINE_H_ 1
-
-void ui_helpline__init(void);
-void ui_helpline__pop(void);
-void ui_helpline__push(const char *msg);
-void ui_helpline__vpush(const char *fmt, va_list ap);
-void ui_helpline__fpush(const char *fmt, ...);
-void ui_helpline__puts(const char *msg);
-
-#endif /* _PERF_UI_HELPLINE_H_ */
diff --git a/tools/perf/util/ui/libslang.h b/tools/perf/util/ui/libslang.h
deleted file mode 100644
index 5623da8e808..00000000000
--- a/tools/perf/util/ui/libslang.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef _PERF_UI_SLANG_H_
-#define _PERF_UI_SLANG_H_ 1
-/*
- * slang versions <= 2.0.6 have a "#if HAVE_LONG_LONG" that breaks
- * the build if it isn't defined. Use the equivalent one that glibc
- * has on features.h.
- */
-#include <features.h>
-#ifndef HAVE_LONG_LONG
-#define HAVE_LONG_LONG __GLIBC_HAVE_LONG_LONG
-#endif
-#include <slang.h>
-
-#if SLANG_VERSION < 20104
-#define slsmg_printf(msg, args...) \
- SLsmg_printf((char *)msg, ##args)
-#define slsmg_write_nstring(msg, len) \
- SLsmg_write_nstring((char *)msg, len)
-#define sltt_set_color(obj, name, fg, bg) \
- SLtt_set_color(obj,(char *)name, (char *)fg, (char *)bg)
-#else
-#define slsmg_printf SLsmg_printf
-#define slsmg_write_nstring SLsmg_write_nstring
-#define sltt_set_color SLtt_set_color
-#endif
-
-#endif /* _PERF_UI_SLANG_H_ */
diff --git a/tools/perf/util/ui/progress.c b/tools/perf/util/ui/progress.c
deleted file mode 100644
index d7fc399d36b..00000000000
--- a/tools/perf/util/ui/progress.c
+++ /dev/null
@@ -1,60 +0,0 @@
-#include <stdlib.h>
-#include <newt.h>
-#include "../cache.h"
-#include "progress.h"
-
-struct ui_progress {
- newtComponent form, scale;
-};
-
-struct ui_progress *ui_progress__new(const char *title, u64 total)
-{
- struct ui_progress *self = malloc(sizeof(*self));
-
- if (self != NULL) {
- int cols;
-
- if (use_browser <= 0)
- return self;
- newtGetScreenSize(&cols, NULL);
- cols -= 4;
- newtCenteredWindow(cols, 1, title);
- self->form = newtForm(NULL, NULL, 0);
- if (self->form == NULL)
- goto out_free_self;
- self->scale = newtScale(0, 0, cols, total);
- if (self->scale == NULL)
- goto out_free_form;
- newtFormAddComponent(self->form, self->scale);
- newtRefresh();
- }
-
- return self;
-
-out_free_form:
- newtFormDestroy(self->form);
-out_free_self:
- free(self);
- return NULL;
-}
-
-void ui_progress__update(struct ui_progress *self, u64 curr)
-{
- /*
- * FIXME: We should have a per UI backend way of showing progress,
- * stdio will just show a percentage as NN%, etc.
- */
- if (use_browser <= 0)
- return;
- newtScaleSet(self->scale, curr);
- newtRefresh();
-}
-
-void ui_progress__delete(struct ui_progress *self)
-{
- if (use_browser > 0) {
- newtFormDestroy(self->form);
- newtPopWindow();
- }
- free(self);
-}
diff --git a/tools/perf/util/ui/progress.h b/tools/perf/util/ui/progress.h
deleted file mode 100644
index a3820a0beb5..00000000000
--- a/tools/perf/util/ui/progress.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _PERF_UI_PROGRESS_H_
-#define _PERF_UI_PROGRESS_H_ 1
-
-struct ui_progress;
-
-struct ui_progress *ui_progress__new(const char *title, u64 total);
-void ui_progress__delete(struct ui_progress *self);
-
-void ui_progress__update(struct ui_progress *self, u64 curr);
-
-#endif
diff --git a/tools/perf/util/ui/setup.c b/tools/perf/util/ui/setup.c
deleted file mode 100644
index 662085032eb..00000000000
--- a/tools/perf/util/ui/setup.c
+++ /dev/null
@@ -1,42 +0,0 @@
-#include <newt.h>
-#include <signal.h>
-#include <stdbool.h>
-
-#include "../cache.h"
-#include "../debug.h"
-#include "browser.h"
-#include "helpline.h"
-
-static void newt_suspend(void *d __used)
-{
- newtSuspend();
- raise(SIGTSTP);
- newtResume();
-}
-
-void setup_browser(void)
-{
- if (!isatty(1) || !use_browser || dump_trace) {
- use_browser = 0;
- setup_pager();
- return;
- }
-
- use_browser = 1;
- newtInit();
- newtCls();
- newtSetSuspendCallback(newt_suspend, NULL);
- ui_helpline__init();
- ui_browser__init();
-}
-
-void exit_browser(bool wait_for_ok)
-{
- if (use_browser > 0) {
- if (wait_for_ok) {
- char title[] = "Fatal Error", ok[] = "Ok";
- newtWinMessage(title, ok, ui_helpline__last_msg);
- }
- newtFinished();
- }
-}
diff --git a/tools/perf/util/ui/util.c b/tools/perf/util/ui/util.c
deleted file mode 100644
index 056c69521a3..00000000000
--- a/tools/perf/util/ui/util.c
+++ /dev/null
@@ -1,113 +0,0 @@
-#include <newt.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdbool.h>
-#include <string.h>
-#include <sys/ttydefaults.h>
-
-#include "../cache.h"
-#include "../debug.h"
-#include "browser.h"
-#include "helpline.h"
-#include "util.h"
-
-static void newt_form__set_exit_keys(newtComponent self)
-{
- newtFormAddHotKey(self, NEWT_KEY_LEFT);
- newtFormAddHotKey(self, NEWT_KEY_ESCAPE);
- newtFormAddHotKey(self, 'Q');
- newtFormAddHotKey(self, 'q');
- newtFormAddHotKey(self, CTRL('c'));
-}
-
-static newtComponent newt_form__new(void)
-{
- newtComponent self = newtForm(NULL, NULL, 0);
- if (self)
- newt_form__set_exit_keys(self);
- return self;
-}
-
-int ui__popup_menu(int argc, char * const argv[])
-{
- struct newtExitStruct es;
- int i, rc = -1, max_len = 5;
- newtComponent listbox, form = newt_form__new();
-
- if (form == NULL)
- return -1;
-
- listbox = newtListbox(0, 0, argc, NEWT_FLAG_RETURNEXIT);
- if (listbox == NULL)
- goto out_destroy_form;
-
- newtFormAddComponent(form, listbox);
-
- for (i = 0; i < argc; ++i) {
- int len = strlen(argv[i]);
- if (len > max_len)
- max_len = len;
- if (newtListboxAddEntry(listbox, argv[i], (void *)(long)i))
- goto out_destroy_form;
- }
-
- newtCenteredWindow(max_len, argc, NULL);
- newtFormRun(form, &es);
- rc = newtListboxGetCurrent(listbox) - NULL;
- if (es.reason == NEWT_EXIT_HOTKEY)
- rc = -1;
- newtPopWindow();
-out_destroy_form:
- newtFormDestroy(form);
- return rc;
-}
-
-int ui__help_window(const char *text)
-{
- struct newtExitStruct es;
- newtComponent tb, form = newt_form__new();
- int rc = -1;
- int max_len = 0, nr_lines = 0;
- const char *t;
-
- if (form == NULL)
- return -1;
-
- t = text;
- while (1) {
- const char *sep = strchr(t, '\n');
- int len;
-
- if (sep == NULL)
- sep = strchr(t, '\0');
- len = sep - t;
- if (max_len < len)
- max_len = len;
- ++nr_lines;
- if (*sep == '\0')
- break;
- t = sep + 1;
- }
-
- tb = newtTextbox(0, 0, max_len, nr_lines, 0);
- if (tb == NULL)
- goto out_destroy_form;
-
- newtTextboxSetText(tb, text);
- newtFormAddComponent(form, tb);
- newtCenteredWindow(max_len, nr_lines, NULL);
- newtFormRun(form, &es);
- newtPopWindow();
- rc = 0;
-out_destroy_form:
- newtFormDestroy(form);
- return rc;
-}
-
-static const char yes[] = "Yes", no[] = "No";
-
-bool ui__dialog_yesno(const char *msg)
-{
- /* newtWinChoice should really be accepting const char pointers... */
- return newtWinChoice(NULL, (char *)yes, (char *)no, (char *)msg) == 1;
-}
diff --git a/tools/perf/util/ui/util.h b/tools/perf/util/ui/util.h
deleted file mode 100644
index afcbc1d9953..00000000000
--- a/tools/perf/util/ui/util.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _PERF_UI_UTIL_H_
-#define _PERF_UI_UTIL_H_ 1
-
-#include <stdbool.h>
-
-int ui__popup_menu(int argc, char * const argv[]);
-int ui__help_window(const char *text);
-bool ui__dialog_yesno(const char *msg);
-
-#endif /* _PERF_UI_UTIL_H_ */
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
new file mode 100644
index 00000000000..5ec80a575b5
--- /dev/null
+++ b/tools/perf/util/unwind-libdw.c
@@ -0,0 +1,210 @@
+#include <linux/compiler.h>
+#include <elfutils/libdw.h>
+#include <elfutils/libdwfl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include "unwind.h"
+#include "unwind-libdw.h"
+#include "machine.h"
+#include "thread.h"
+#include <linux/types.h>
+#include "event.h"
+#include "perf_regs.h"
+
+static char *debuginfo_path;
+
+static const Dwfl_Callbacks offline_callbacks = {
+ .find_debuginfo = dwfl_standard_find_debuginfo,
+ .debuginfo_path = &debuginfo_path,
+ .section_address = dwfl_offline_section_address,
+};
+
+static int __report_module(struct addr_location *al, u64 ip,
+ struct unwind_info *ui)
+{
+ Dwfl_Module *mod;
+ struct dso *dso = NULL;
+
+ thread__find_addr_location(ui->thread, ui->machine,
+ PERF_RECORD_MISC_USER,
+ MAP__FUNCTION, ip, al);
+
+ if (al->map)
+ dso = al->map->dso;
+
+ if (!dso)
+ return 0;
+
+ mod = dwfl_addrmodule(ui->dwfl, ip);
+ if (!mod)
+ mod = dwfl_report_elf(ui->dwfl, dso->short_name,
+ dso->long_name, -1, al->map->start,
+ false);
+
+ return mod && dwfl_addrmodule(ui->dwfl, ip) == mod ? 0 : -1;
+}
+
+static int report_module(u64 ip, struct unwind_info *ui)
+{
+ struct addr_location al;
+
+ return __report_module(&al, ip, ui);
+}
+
+static int entry(u64 ip, struct unwind_info *ui)
+
+{
+ struct unwind_entry e;
+ struct addr_location al;
+
+ if (__report_module(&al, ip, ui))
+ return -1;
+
+ e.ip = ip;
+ e.map = al.map;
+ e.sym = al.sym;
+
+ pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
+ al.sym ? al.sym->name : "''",
+ ip,
+ al.map ? al.map->map_ip(al.map, ip) : (u64) 0);
+
+ return ui->cb(&e, ui->arg);
+}
+
+static pid_t next_thread(Dwfl *dwfl, void *arg, void **thread_argp)
+{
+ /* We want only single thread to be processed. */
+ if (*thread_argp != NULL)
+ return 0;
+
+ *thread_argp = arg;
+ return dwfl_pid(dwfl);
+}
+
+static int access_dso_mem(struct unwind_info *ui, Dwarf_Addr addr,
+ Dwarf_Word *data)
+{
+ struct addr_location al;
+ ssize_t size;
+
+ thread__find_addr_map(ui->thread, ui->machine, PERF_RECORD_MISC_USER,
+ MAP__FUNCTION, addr, &al);
+ if (!al.map) {
+ pr_debug("unwind: no map for %lx\n", (unsigned long)addr);
+ return -1;
+ }
+
+ if (!al.map->dso)
+ return -1;
+
+ size = dso__data_read_addr(al.map->dso, al.map, ui->machine,
+ addr, (u8 *) data, sizeof(*data));
+
+ return !(size == sizeof(*data));
+}
+
+static bool memory_read(Dwfl *dwfl __maybe_unused, Dwarf_Addr addr, Dwarf_Word *result,
+ void *arg)
+{
+ struct unwind_info *ui = arg;
+ struct stack_dump *stack = &ui->sample->user_stack;
+ u64 start, end;
+ int offset;
+ int ret;
+
+ ret = perf_reg_value(&start, &ui->sample->user_regs, PERF_REG_SP);
+ if (ret)
+ return false;
+
+ end = start + stack->size;
+
+ /* Check overflow. */
+ if (addr + sizeof(Dwarf_Word) < addr)
+ return false;
+
+ if (addr < start || addr + sizeof(Dwarf_Word) > end) {
+ ret = access_dso_mem(ui, addr, result);
+ if (ret) {
+ pr_debug("unwind: access_mem 0x%" PRIx64 " not inside range"
+ " 0x%" PRIx64 "-0x%" PRIx64 "\n",
+ addr, start, end);
+ return false;
+ }
+ return true;
+ }
+
+ offset = addr - start;
+ *result = *(Dwarf_Word *)&stack->data[offset];
+ pr_debug("unwind: access_mem addr 0x%" PRIx64 ", val %lx, offset %d\n",
+ addr, (unsigned long)*result, offset);
+ return true;
+}
+
+static const Dwfl_Thread_Callbacks callbacks = {
+ .next_thread = next_thread,
+ .memory_read = memory_read,
+ .set_initial_registers = libdw__arch_set_initial_registers,
+};
+
+static int
+frame_callback(Dwfl_Frame *state, void *arg)
+{
+ struct unwind_info *ui = arg;
+ Dwarf_Addr pc;
+
+ if (!dwfl_frame_pc(state, &pc, NULL)) {
+ pr_err("%s", dwfl_errmsg(-1));
+ return DWARF_CB_ABORT;
+ }
+
+ return entry(pc, ui) || !(--ui->max_stack) ?
+ DWARF_CB_ABORT : DWARF_CB_OK;
+}
+
+int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
+ struct machine *machine, struct thread *thread,
+ struct perf_sample *data,
+ int max_stack)
+{
+ struct unwind_info ui = {
+ .sample = data,
+ .thread = thread,
+ .machine = machine,
+ .cb = cb,
+ .arg = arg,
+ .max_stack = max_stack,
+ };
+ Dwarf_Word ip;
+ int err = -EINVAL;
+
+ if (!data->user_regs.regs)
+ return -EINVAL;
+
+ ui.dwfl = dwfl_begin(&offline_callbacks);
+ if (!ui.dwfl)
+ goto out;
+
+ err = perf_reg_value(&ip, &data->user_regs, PERF_REG_IP);
+ if (err)
+ goto out;
+
+ err = report_module(ip, &ui);
+ if (err)
+ goto out;
+
+ if (!dwfl_attach_state(ui.dwfl, EM_NONE, thread->tid, &callbacks, &ui))
+ goto out;
+
+ err = dwfl_getthread_frames(ui.dwfl, thread->tid, frame_callback, &ui);
+
+ if (err && !ui.max_stack)
+ err = 0;
+
+ out:
+ if (err)
+ pr_debug("unwind: failed with '%s'\n", dwfl_errmsg(-1));
+
+ dwfl_end(ui.dwfl);
+ return 0;
+}
diff --git a/tools/perf/util/unwind-libdw.h b/tools/perf/util/unwind-libdw.h
new file mode 100644
index 00000000000..417a1426f3a
--- /dev/null
+++ b/tools/perf/util/unwind-libdw.h
@@ -0,0 +1,21 @@
+#ifndef __PERF_UNWIND_LIBDW_H
+#define __PERF_UNWIND_LIBDW_H
+
+#include <elfutils/libdwfl.h>
+#include "event.h"
+#include "thread.h"
+#include "unwind.h"
+
+bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg);
+
+struct unwind_info {
+ Dwfl *dwfl;
+ struct perf_sample *sample;
+ struct machine *machine;
+ struct thread *thread;
+ unwind_entry_cb_t cb;
+ void *arg;
+ int max_stack;
+};
+
+#endif /* __PERF_UNWIND_LIBDW_H */
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
new file mode 100644
index 00000000000..25578b98f5c
--- /dev/null
+++ b/tools/perf/util/unwind-libunwind.c
@@ -0,0 +1,579 @@
+/*
+ * Post mortem Dwarf CFI based unwinding on top of regs and stack dumps.
+ *
+ * Lots of this code have been borrowed or heavily inspired from parts of
+ * the libunwind 0.99 code which are (amongst other contributors I may have
+ * forgotten):
+ *
+ * Copyright (C) 2002-2007 Hewlett-Packard Co
+ * Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * And the bugs have been added by:
+ *
+ * Copyright (C) 2010, Frederic Weisbecker <fweisbec@gmail.com>
+ * Copyright (C) 2012, Jiri Olsa <jolsa@redhat.com>
+ *
+ */
+
+#include <elf.h>
+#include <gelf.h>
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <linux/list.h>
+#include <libunwind.h>
+#include <libunwind-ptrace.h>
+#include "thread.h"
+#include "session.h"
+#include "perf_regs.h"
+#include "unwind.h"
+#include "symbol.h"
+#include "util.h"
+
+extern int
+UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
+ unw_word_t ip,
+ unw_dyn_info_t *di,
+ unw_proc_info_t *pi,
+ int need_unwind_info, void *arg);
+
+#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
+
+extern int
+UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
+ unw_word_t ip,
+ unw_word_t segbase,
+ const char *obj_name, unw_word_t start,
+ unw_word_t end);
+
+#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame)
+
+#define DW_EH_PE_FORMAT_MASK 0x0f /* format of the encoded value */
+#define DW_EH_PE_APPL_MASK 0x70 /* how the value is to be applied */
+
+/* Pointer-encoding formats: */
+#define DW_EH_PE_omit 0xff
+#define DW_EH_PE_ptr 0x00 /* pointer-sized unsigned value */
+#define DW_EH_PE_udata4 0x03 /* unsigned 32-bit value */
+#define DW_EH_PE_udata8 0x04 /* unsigned 64-bit value */
+#define DW_EH_PE_sdata4 0x0b /* signed 32-bit value */
+#define DW_EH_PE_sdata8 0x0c /* signed 64-bit value */
+
+/* Pointer-encoding application: */
+#define DW_EH_PE_absptr 0x00 /* absolute value */
+#define DW_EH_PE_pcrel 0x10 /* rel. to addr. of encoded value */
+
+/*
+ * The following are not documented by LSB v1.3, yet they are used by
+ * GCC, presumably they aren't documented by LSB since they aren't
+ * used on Linux:
+ */
+#define DW_EH_PE_funcrel 0x40 /* start-of-procedure-relative */
+#define DW_EH_PE_aligned 0x50 /* aligned pointer */
+
+/* Flags intentionaly not handled, since they're not needed:
+ * #define DW_EH_PE_indirect 0x80
+ * #define DW_EH_PE_uleb128 0x01
+ * #define DW_EH_PE_udata2 0x02
+ * #define DW_EH_PE_sleb128 0x09
+ * #define DW_EH_PE_sdata2 0x0a
+ * #define DW_EH_PE_textrel 0x20
+ * #define DW_EH_PE_datarel 0x30
+ */
+
+struct unwind_info {
+ struct perf_sample *sample;
+ struct machine *machine;
+ struct thread *thread;
+};
+
+#define dw_read(ptr, type, end) ({ \
+ type *__p = (type *) ptr; \
+ type __v; \
+ if ((__p + 1) > (type *) end) \
+ return -EINVAL; \
+ __v = *__p++; \
+ ptr = (typeof(ptr)) __p; \
+ __v; \
+ })
+
+static int __dw_read_encoded_value(u8 **p, u8 *end, u64 *val,
+ u8 encoding)
+{
+ u8 *cur = *p;
+ *val = 0;
+
+ switch (encoding) {
+ case DW_EH_PE_omit:
+ *val = 0;
+ goto out;
+ case DW_EH_PE_ptr:
+ *val = dw_read(cur, unsigned long, end);
+ goto out;
+ default:
+ break;
+ }
+
+ switch (encoding & DW_EH_PE_APPL_MASK) {
+ case DW_EH_PE_absptr:
+ break;
+ case DW_EH_PE_pcrel:
+ *val = (unsigned long) cur;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((encoding & 0x07) == 0x00)
+ encoding |= DW_EH_PE_udata4;
+
+ switch (encoding & DW_EH_PE_FORMAT_MASK) {
+ case DW_EH_PE_sdata4:
+ *val += dw_read(cur, s32, end);
+ break;
+ case DW_EH_PE_udata4:
+ *val += dw_read(cur, u32, end);
+ break;
+ case DW_EH_PE_sdata8:
+ *val += dw_read(cur, s64, end);
+ break;
+ case DW_EH_PE_udata8:
+ *val += dw_read(cur, u64, end);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ out:
+ *p = cur;
+ return 0;
+}
+
+#define dw_read_encoded_value(ptr, end, enc) ({ \
+ u64 __v; \
+ if (__dw_read_encoded_value(&ptr, end, &__v, enc)) { \
+ return -EINVAL; \
+ } \
+ __v; \
+ })
+
+static u64 elf_section_offset(int fd, const char *name)
+{
+ Elf *elf;
+ GElf_Ehdr ehdr;
+ GElf_Shdr shdr;
+ u64 offset = 0;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL)
+ return 0;
+
+ do {
+ if (gelf_getehdr(elf, &ehdr) == NULL)
+ break;
+
+ if (!elf_section_by_name(elf, &ehdr, &shdr, name, NULL))
+ break;
+
+ offset = shdr.sh_offset;
+ } while (0);
+
+ elf_end(elf);
+ return offset;
+}
+
+struct table_entry {
+ u32 start_ip_offset;
+ u32 fde_offset;
+};
+
+struct eh_frame_hdr {
+ unsigned char version;
+ unsigned char eh_frame_ptr_enc;
+ unsigned char fde_count_enc;
+ unsigned char table_enc;
+
+ /*
+ * The rest of the header is variable-length and consists of the
+ * following members:
+ *
+ * encoded_t eh_frame_ptr;
+ * encoded_t fde_count;
+ */
+
+ /* A single encoded pointer should not be more than 8 bytes. */
+ u64 enc[2];
+
+ /*
+ * struct {
+ * encoded_t start_ip;
+ * encoded_t fde_addr;
+ * } binary_search_table[fde_count];
+ */
+ char data[0];
+} __packed;
+
+static int unwind_spec_ehframe(struct dso *dso, struct machine *machine,
+ u64 offset, u64 *table_data, u64 *segbase,
+ u64 *fde_count)
+{
+ struct eh_frame_hdr hdr;
+ u8 *enc = (u8 *) &hdr.enc;
+ u8 *end = (u8 *) &hdr.data;
+ ssize_t r;
+
+ r = dso__data_read_offset(dso, machine, offset,
+ (u8 *) &hdr, sizeof(hdr));
+ if (r != sizeof(hdr))
+ return -EINVAL;
+
+ /* We dont need eh_frame_ptr, just skip it. */
+ dw_read_encoded_value(enc, end, hdr.eh_frame_ptr_enc);
+
+ *fde_count = dw_read_encoded_value(enc, end, hdr.fde_count_enc);
+ *segbase = offset;
+ *table_data = (enc - (u8 *) &hdr) + offset;
+ return 0;
+}
+
+static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
+ u64 *table_data, u64 *segbase,
+ u64 *fde_count)
+{
+ int ret = -EINVAL, fd;
+ u64 offset;
+
+ fd = dso__data_fd(dso, machine);
+ if (fd < 0)
+ return -EINVAL;
+
+ /* Check the .eh_frame section for unwinding info */
+ offset = elf_section_offset(fd, ".eh_frame_hdr");
+
+ if (offset)
+ ret = unwind_spec_ehframe(dso, machine, offset,
+ table_data, segbase,
+ fde_count);
+
+ return ret;
+}
+
+#ifndef NO_LIBUNWIND_DEBUG_FRAME
+static int read_unwind_spec_debug_frame(struct dso *dso,
+ struct machine *machine, u64 *offset)
+{
+ int fd = dso__data_fd(dso, machine);
+
+ if (fd < 0)
+ return -EINVAL;
+
+ /* Check the .debug_frame section for unwinding info */
+ *offset = elf_section_offset(fd, ".debug_frame");
+
+ if (*offset)
+ return 0;
+
+ return -EINVAL;
+}
+#endif
+
+static struct map *find_map(unw_word_t ip, struct unwind_info *ui)
+{
+ struct addr_location al;
+
+ thread__find_addr_map(ui->thread, ui->machine, PERF_RECORD_MISC_USER,
+ MAP__FUNCTION, ip, &al);
+ return al.map;
+}
+
+static int
+find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
+ int need_unwind_info, void *arg)
+{
+ struct unwind_info *ui = arg;
+ struct map *map;
+ unw_dyn_info_t di;
+ u64 table_data, segbase, fde_count;
+
+ map = find_map(ip, ui);
+ if (!map || !map->dso)
+ return -EINVAL;
+
+ pr_debug("unwind: find_proc_info dso %s\n", map->dso->name);
+
+ /* Check the .eh_frame section for unwinding info */
+ if (!read_unwind_spec_eh_frame(map->dso, ui->machine,
+ &table_data, &segbase, &fde_count)) {
+ memset(&di, 0, sizeof(di));
+ di.format = UNW_INFO_FORMAT_REMOTE_TABLE;
+ di.start_ip = map->start;
+ di.end_ip = map->end;
+ di.u.rti.segbase = map->start + segbase;
+ di.u.rti.table_data = map->start + table_data;
+ di.u.rti.table_len = fde_count * sizeof(struct table_entry)
+ / sizeof(unw_word_t);
+ return dwarf_search_unwind_table(as, ip, &di, pi,
+ need_unwind_info, arg);
+ }
+
+#ifndef NO_LIBUNWIND_DEBUG_FRAME
+ /* Check the .debug_frame section for unwinding info */
+ if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) {
+ memset(&di, 0, sizeof(di));
+ if (dwarf_find_debug_frame(0, &di, ip, 0, map->dso->name,
+ map->start, map->end))
+ return dwarf_search_unwind_table(as, ip, &di, pi,
+ need_unwind_info, arg);
+ }
+#endif
+
+ return -EINVAL;
+}
+
+static int access_fpreg(unw_addr_space_t __maybe_unused as,
+ unw_regnum_t __maybe_unused num,
+ unw_fpreg_t __maybe_unused *val,
+ int __maybe_unused __write,
+ void __maybe_unused *arg)
+{
+ pr_err("unwind: access_fpreg unsupported\n");
+ return -UNW_EINVAL;
+}
+
+static int get_dyn_info_list_addr(unw_addr_space_t __maybe_unused as,
+ unw_word_t __maybe_unused *dil_addr,
+ void __maybe_unused *arg)
+{
+ return -UNW_ENOINFO;
+}
+
+static int resume(unw_addr_space_t __maybe_unused as,
+ unw_cursor_t __maybe_unused *cu,
+ void __maybe_unused *arg)
+{
+ pr_err("unwind: resume unsupported\n");
+ return -UNW_EINVAL;
+}
+
+static int
+get_proc_name(unw_addr_space_t __maybe_unused as,
+ unw_word_t __maybe_unused addr,
+ char __maybe_unused *bufp, size_t __maybe_unused buf_len,
+ unw_word_t __maybe_unused *offp, void __maybe_unused *arg)
+{
+ pr_err("unwind: get_proc_name unsupported\n");
+ return -UNW_EINVAL;
+}
+
+static int access_dso_mem(struct unwind_info *ui, unw_word_t addr,
+ unw_word_t *data)
+{
+ struct addr_location al;
+ ssize_t size;
+
+ thread__find_addr_map(ui->thread, ui->machine, PERF_RECORD_MISC_USER,
+ MAP__FUNCTION, addr, &al);
+ if (!al.map) {
+ pr_debug("unwind: no map for %lx\n", (unsigned long)addr);
+ return -1;
+ }
+
+ if (!al.map->dso)
+ return -1;
+
+ size = dso__data_read_addr(al.map->dso, al.map, ui->machine,
+ addr, (u8 *) data, sizeof(*data));
+
+ return !(size == sizeof(*data));
+}
+
+static int access_mem(unw_addr_space_t __maybe_unused as,
+ unw_word_t addr, unw_word_t *valp,
+ int __write, void *arg)
+{
+ struct unwind_info *ui = arg;
+ struct stack_dump *stack = &ui->sample->user_stack;
+ u64 start, end;
+ int offset;
+ int ret;
+
+ /* Don't support write, probably not needed. */
+ if (__write || !stack || !ui->sample->user_regs.regs) {
+ *valp = 0;
+ return 0;
+ }
+
+ ret = perf_reg_value(&start, &ui->sample->user_regs, PERF_REG_SP);
+ if (ret)
+ return ret;
+
+ end = start + stack->size;
+
+ /* Check overflow. */
+ if (addr + sizeof(unw_word_t) < addr)
+ return -EINVAL;
+
+ if (addr < start || addr + sizeof(unw_word_t) >= end) {
+ ret = access_dso_mem(ui, addr, valp);
+ if (ret) {
+ pr_debug("unwind: access_mem %p not inside range"
+ " 0x%" PRIx64 "-0x%" PRIx64 "\n",
+ (void *) addr, start, end);
+ *valp = 0;
+ return ret;
+ }
+ return 0;
+ }
+
+ offset = addr - start;
+ *valp = *(unw_word_t *)&stack->data[offset];
+ pr_debug("unwind: access_mem addr %p val %lx, offset %d\n",
+ (void *) addr, (unsigned long)*valp, offset);
+ return 0;
+}
+
+static int access_reg(unw_addr_space_t __maybe_unused as,
+ unw_regnum_t regnum, unw_word_t *valp,
+ int __write, void *arg)
+{
+ struct unwind_info *ui = arg;
+ int id, ret;
+ u64 val;
+
+ /* Don't support write, I suspect we don't need it. */
+ if (__write) {
+ pr_err("unwind: access_reg w %d\n", regnum);
+ return 0;
+ }
+
+ if (!ui->sample->user_regs.regs) {
+ *valp = 0;
+ return 0;
+ }
+
+ id = libunwind__arch_reg_id(regnum);
+ if (id < 0)
+ return -EINVAL;
+
+ ret = perf_reg_value(&val, &ui->sample->user_regs, id);
+ if (ret) {
+ pr_err("unwind: can't read reg %d\n", regnum);
+ return ret;
+ }
+
+ *valp = (unw_word_t) val;
+ pr_debug("unwind: reg %d, val %lx\n", regnum, (unsigned long)*valp);
+ return 0;
+}
+
+static void put_unwind_info(unw_addr_space_t __maybe_unused as,
+ unw_proc_info_t *pi __maybe_unused,
+ void *arg __maybe_unused)
+{
+ pr_debug("unwind: put_unwind_info called\n");
+}
+
+static int entry(u64 ip, struct thread *thread, struct machine *machine,
+ unwind_entry_cb_t cb, void *arg)
+{
+ struct unwind_entry e;
+ struct addr_location al;
+
+ thread__find_addr_location(thread, machine,
+ PERF_RECORD_MISC_USER,
+ MAP__FUNCTION, ip, &al);
+
+ e.ip = ip;
+ e.map = al.map;
+ e.sym = al.sym;
+
+ pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
+ al.sym ? al.sym->name : "''",
+ ip,
+ al.map ? al.map->map_ip(al.map, ip) : (u64) 0);
+
+ return cb(&e, arg);
+}
+
+static void display_error(int err)
+{
+ switch (err) {
+ case UNW_EINVAL:
+ pr_err("unwind: Only supports local.\n");
+ break;
+ case UNW_EUNSPEC:
+ pr_err("unwind: Unspecified error.\n");
+ break;
+ case UNW_EBADREG:
+ pr_err("unwind: Register unavailable.\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static unw_accessors_t accessors = {
+ .find_proc_info = find_proc_info,
+ .put_unwind_info = put_unwind_info,
+ .get_dyn_info_list_addr = get_dyn_info_list_addr,
+ .access_mem = access_mem,
+ .access_reg = access_reg,
+ .access_fpreg = access_fpreg,
+ .resume = resume,
+ .get_proc_name = get_proc_name,
+};
+
+static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
+ void *arg, int max_stack)
+{
+ unw_addr_space_t addr_space;
+ unw_cursor_t c;
+ int ret;
+
+ addr_space = unw_create_addr_space(&accessors, 0);
+ if (!addr_space) {
+ pr_err("unwind: Can't create unwind address space.\n");
+ return -ENOMEM;
+ }
+
+ ret = unw_init_remote(&c, addr_space, ui);
+ if (ret)
+ display_error(ret);
+
+ while (!ret && (unw_step(&c) > 0) && max_stack--) {
+ unw_word_t ip;
+
+ unw_get_reg(&c, UNW_REG_IP, &ip);
+ ret = ip ? entry(ip, ui->thread, ui->machine, cb, arg) : 0;
+ }
+
+ unw_destroy_addr_space(addr_space);
+ return ret;
+}
+
+int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
+ struct machine *machine, struct thread *thread,
+ struct perf_sample *data, int max_stack)
+{
+ u64 ip;
+ struct unwind_info ui = {
+ .sample = data,
+ .thread = thread,
+ .machine = machine,
+ };
+ int ret;
+
+ if (!data->user_regs.regs)
+ return -EINVAL;
+
+ ret = perf_reg_value(&ip, &data->user_regs, PERF_REG_IP);
+ if (ret)
+ return ret;
+
+ ret = entry(ip, thread, machine, cb, arg);
+ if (ret)
+ return -ENOMEM;
+
+ return --max_stack > 0 ? get_entries(&ui, cb, arg, max_stack) : 0;
+}
diff --git a/tools/perf/util/unwind.h b/tools/perf/util/unwind.h
new file mode 100644
index 00000000000..f03061260b4
--- /dev/null
+++ b/tools/perf/util/unwind.h
@@ -0,0 +1,37 @@
+#ifndef __UNWIND_H
+#define __UNWIND_H
+
+#include <linux/types.h>
+#include "event.h"
+#include "symbol.h"
+
+struct unwind_entry {
+ struct map *map;
+ struct symbol *sym;
+ u64 ip;
+};
+
+typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg);
+
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
+int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
+ struct machine *machine,
+ struct thread *thread,
+ struct perf_sample *data, int max_stack);
+/* libunwind specific */
+#ifdef HAVE_LIBUNWIND_SUPPORT
+int libunwind__arch_reg_id(int regnum);
+#endif
+#else
+static inline int
+unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
+ void *arg __maybe_unused,
+ struct machine *machine __maybe_unused,
+ struct thread *thread __maybe_unused,
+ struct perf_sample *data __maybe_unused,
+ int max_stack __maybe_unused)
+{
+ return 0;
+}
+#endif /* HAVE_DWARF_UNWIND_SUPPORT */
+#endif /* __UNWIND_H */
diff --git a/tools/perf/util/usage.c b/tools/perf/util/usage.c
index e16bf9a707e..4007aca8e0c 100644
--- a/tools/perf/util/usage.c
+++ b/tools/perf/util/usage.c
@@ -1,9 +1,13 @@
/*
- * GIT - The information manager from hell
+ * usage.c
+ *
+ * Various reporting routines.
+ * Originally copied from GIT source.
*
* Copyright (C) Linus Torvalds, 2005
*/
#include "util.h"
+#include "debug.h"
static void report(const char *prefix, const char *err, va_list params)
{
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 214265674dd..95aefa78bb0 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -1,5 +1,40 @@
+#include "../perf.h"
#include "util.h"
+#include <api/fs/fs.h>
#include <sys/mman.h>
+#ifdef HAVE_BACKTRACE_SUPPORT
+#include <execinfo.h>
+#endif
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <limits.h>
+#include <byteswap.h>
+#include <linux/kernel.h>
+
+/*
+ * XXX We need to find a better place for these things...
+ */
+unsigned int page_size;
+int cacheline_size;
+
+bool test_attr__enabled;
+
+bool perf_host = true;
+bool perf_guest = false;
+
+char tracing_events_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing/events";
+
+void event_attr_init(struct perf_event_attr *attr)
+{
+ if (!perf_host)
+ attr->exclude_host = 1;
+ if (!perf_guest)
+ attr->exclude_guest = 1;
+ /* to capture ABI version */
+ attr->size = sizeof(*attr);
+}
int mkdir_p(char *path, mode_t mode)
{
@@ -27,17 +62,20 @@ int mkdir_p(char *path, mode_t mode)
return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0;
}
-static int slow_copyfile(const char *from, const char *to)
+static int slow_copyfile(const char *from, const char *to, mode_t mode)
{
- int err = 0;
+ int err = -1;
char *line = NULL;
size_t n;
FILE *from_fp = fopen(from, "r"), *to_fp;
+ mode_t old_umask;
if (from_fp == NULL)
goto out;
+ old_umask = umask(mode ^ 0777);
to_fp = fopen(to, "w");
+ umask(old_umask);
if (to_fp == NULL)
goto out_fclose_from;
@@ -54,7 +92,7 @@ out:
return err;
}
-int copyfile(const char *from, const char *to)
+int copyfile_mode(const char *from, const char *to, mode_t mode)
{
int fromfd, tofd;
struct stat st;
@@ -65,13 +103,13 @@ int copyfile(const char *from, const char *to)
goto out;
if (st.st_size == 0) /* /proc? do it slowly... */
- return slow_copyfile(from, to);
+ return slow_copyfile(from, to, mode);
fromfd = open(from, O_RDONLY);
if (fromfd < 0)
goto out;
- tofd = creat(to, 0755);
+ tofd = creat(to, mode);
if (tofd < 0)
goto out_close_from;
@@ -93,6 +131,11 @@ out:
return err;
}
+int copyfile(const char *from, const char *to)
+{
+ return copyfile_mode(from, to, 0755);
+}
+
unsigned long convert_unit(unsigned long value, char *unit)
{
*unit = ' ';
@@ -114,3 +157,386 @@ unsigned long convert_unit(unsigned long value, char *unit)
return value;
}
+
+static ssize_t ion(bool is_read, int fd, void *buf, size_t n)
+{
+ void *buf_start = buf;
+ size_t left = n;
+
+ while (left) {
+ ssize_t ret = is_read ? read(fd, buf, left) :
+ write(fd, buf, left);
+
+ if (ret < 0 && errno == EINTR)
+ continue;
+ if (ret <= 0)
+ return ret;
+
+ left -= ret;
+ buf += ret;
+ }
+
+ BUG_ON((size_t)(buf - buf_start) != n);
+ return n;
+}
+
+/*
+ * Read exactly 'n' bytes or return an error.
+ */
+ssize_t readn(int fd, void *buf, size_t n)
+{
+ return ion(true, fd, buf, n);
+}
+
+/*
+ * Write exactly 'n' bytes or return an error.
+ */
+ssize_t writen(int fd, void *buf, size_t n)
+{
+ return ion(false, fd, buf, n);
+}
+
+size_t hex_width(u64 v)
+{
+ size_t n = 1;
+
+ while ((v >>= 4))
+ ++n;
+
+ return n;
+}
+
+static int hex(char ch)
+{
+ if ((ch >= '0') && (ch <= '9'))
+ return ch - '0';
+ if ((ch >= 'a') && (ch <= 'f'))
+ return ch - 'a' + 10;
+ if ((ch >= 'A') && (ch <= 'F'))
+ return ch - 'A' + 10;
+ return -1;
+}
+
+/*
+ * While we find nice hex chars, build a long_val.
+ * Return number of chars processed.
+ */
+int hex2u64(const char *ptr, u64 *long_val)
+{
+ const char *p = ptr;
+ *long_val = 0;
+
+ while (*p) {
+ const int hex_val = hex(*p);
+
+ if (hex_val < 0)
+ break;
+
+ *long_val = (*long_val << 4) | hex_val;
+ p++;
+ }
+
+ return p - ptr;
+}
+
+/* Obtain a backtrace and print it to stdout. */
+#ifdef HAVE_BACKTRACE_SUPPORT
+void dump_stack(void)
+{
+ void *array[16];
+ size_t size = backtrace(array, ARRAY_SIZE(array));
+ char **strings = backtrace_symbols(array, size);
+ size_t i;
+
+ printf("Obtained %zd stack frames.\n", size);
+
+ for (i = 0; i < size; i++)
+ printf("%s\n", strings[i]);
+
+ free(strings);
+}
+#else
+void dump_stack(void) {}
+#endif
+
+void get_term_dimensions(struct winsize *ws)
+{
+ char *s = getenv("LINES");
+
+ if (s != NULL) {
+ ws->ws_row = atoi(s);
+ s = getenv("COLUMNS");
+ if (s != NULL) {
+ ws->ws_col = atoi(s);
+ if (ws->ws_row && ws->ws_col)
+ return;
+ }
+ }
+#ifdef TIOCGWINSZ
+ if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
+ ws->ws_row && ws->ws_col)
+ return;
+#endif
+ ws->ws_row = 25;
+ ws->ws_col = 80;
+}
+
+static void set_tracing_events_path(const char *mountpoint)
+{
+ snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s",
+ mountpoint, "tracing/events");
+}
+
+const char *perf_debugfs_mount(const char *mountpoint)
+{
+ const char *mnt;
+
+ mnt = debugfs_mount(mountpoint);
+ if (!mnt)
+ return NULL;
+
+ set_tracing_events_path(mnt);
+
+ return mnt;
+}
+
+void perf_debugfs_set_path(const char *mntpt)
+{
+ snprintf(debugfs_mountpoint, strlen(debugfs_mountpoint), "%s", mntpt);
+ set_tracing_events_path(mntpt);
+}
+
+static const char *find_debugfs(void)
+{
+ const char *path = perf_debugfs_mount(NULL);
+
+ if (!path)
+ fprintf(stderr, "Your kernel does not support the debugfs filesystem");
+
+ return path;
+}
+
+/*
+ * Finds the path to the debugfs/tracing
+ * Allocates the string and stores it.
+ */
+const char *find_tracing_dir(void)
+{
+ static char *tracing;
+ static int tracing_found;
+ const char *debugfs;
+
+ if (tracing_found)
+ return tracing;
+
+ debugfs = find_debugfs();
+ if (!debugfs)
+ return NULL;
+
+ tracing = malloc(strlen(debugfs) + 9);
+ if (!tracing)
+ return NULL;
+
+ sprintf(tracing, "%s/tracing", debugfs);
+
+ tracing_found = 1;
+ return tracing;
+}
+
+char *get_tracing_file(const char *name)
+{
+ const char *tracing;
+ char *file;
+
+ tracing = find_tracing_dir();
+ if (!tracing)
+ return NULL;
+
+ file = malloc(strlen(tracing) + strlen(name) + 2);
+ if (!file)
+ return NULL;
+
+ sprintf(file, "%s/%s", tracing, name);
+ return file;
+}
+
+void put_tracing_file(char *file)
+{
+ free(file);
+}
+
+int parse_nsec_time(const char *str, u64 *ptime)
+{
+ u64 time_sec, time_nsec;
+ char *end;
+
+ time_sec = strtoul(str, &end, 10);
+ if (*end != '.' && *end != '\0')
+ return -1;
+
+ if (*end == '.') {
+ int i;
+ char nsec_buf[10];
+
+ if (strlen(++end) > 9)
+ return -1;
+
+ strncpy(nsec_buf, end, 9);
+ nsec_buf[9] = '\0';
+
+ /* make it nsec precision */
+ for (i = strlen(nsec_buf); i < 9; i++)
+ nsec_buf[i] = '0';
+
+ time_nsec = strtoul(nsec_buf, &end, 10);
+ if (*end != '\0')
+ return -1;
+ } else
+ time_nsec = 0;
+
+ *ptime = time_sec * NSEC_PER_SEC + time_nsec;
+ return 0;
+}
+
+unsigned long parse_tag_value(const char *str, struct parse_tag *tags)
+{
+ struct parse_tag *i = tags;
+
+ while (i->tag) {
+ char *s;
+
+ s = strchr(str, i->tag);
+ if (s) {
+ unsigned long int value;
+ char *endptr;
+
+ value = strtoul(str, &endptr, 10);
+ if (s != endptr)
+ break;
+
+ if (value > ULONG_MAX / i->mult)
+ break;
+ value *= i->mult;
+ return value;
+ }
+ i++;
+ }
+
+ return (unsigned long) -1;
+}
+
+int filename__read_int(const char *filename, int *value)
+{
+ char line[64];
+ int fd = open(filename, O_RDONLY), err = -1;
+
+ if (fd < 0)
+ return -1;
+
+ if (read(fd, line, sizeof(line)) > 0) {
+ *value = atoi(line);
+ err = 0;
+ }
+
+ close(fd);
+ return err;
+}
+
+int filename__read_str(const char *filename, char **buf, size_t *sizep)
+{
+ size_t size = 0, alloc_size = 0;
+ void *bf = NULL, *nbf;
+ int fd, n, err = 0;
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ return -errno;
+
+ do {
+ if (size == alloc_size) {
+ alloc_size += BUFSIZ;
+ nbf = realloc(bf, alloc_size);
+ if (!nbf) {
+ err = -ENOMEM;
+ break;
+ }
+
+ bf = nbf;
+ }
+
+ n = read(fd, bf + size, alloc_size - size);
+ if (n < 0) {
+ if (size) {
+ pr_warning("read failed %d: %s\n",
+ errno, strerror(errno));
+ err = 0;
+ } else
+ err = -errno;
+
+ break;
+ }
+
+ size += n;
+ } while (n > 0);
+
+ if (!err) {
+ *sizep = size;
+ *buf = bf;
+ } else
+ free(bf);
+
+ close(fd);
+ return err;
+}
+
+const char *get_filename_for_perf_kvm(void)
+{
+ const char *filename;
+
+ if (perf_host && !perf_guest)
+ filename = strdup("perf.data.host");
+ else if (!perf_host && perf_guest)
+ filename = strdup("perf.data.guest");
+ else
+ filename = strdup("perf.data.kvm");
+
+ return filename;
+}
+
+int perf_event_paranoid(void)
+{
+ char path[PATH_MAX];
+ const char *procfs = procfs__mountpoint();
+ int value;
+
+ if (!procfs)
+ return INT_MAX;
+
+ scnprintf(path, PATH_MAX, "%s/sys/kernel/perf_event_paranoid", procfs);
+
+ if (filename__read_int(path, &value))
+ return INT_MAX;
+
+ return value;
+}
+
+void mem_bswap_32(void *src, int byte_size)
+{
+ u32 *m = src;
+ while (byte_size > 0) {
+ *m = bswap_32(*m);
+ byte_size -= sizeof(u32);
+ ++m;
+ }
+}
+
+void mem_bswap_64(void *src, int byte_size)
+{
+ u64 *m = src;
+
+ while (byte_size > 0) {
+ *m = bswap_64(*m);
+ byte_size -= sizeof(u64);
+ ++m;
+ }
+}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 7562707ddd1..66864364ccb 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -1,8 +1,6 @@
#ifndef GIT_COMPAT_UTIL_H
#define GIT_COMPAT_UTIL_H
-#define _FILE_OFFSET_BITS 64
-
#ifndef FLEX_ARRAY
/*
* See if our compiler is known to support flexible array members.
@@ -40,7 +38,6 @@
#define decimal_length(x) ((int)(sizeof(x) * 2.56 + 0.5) + 1)
#define _ALL_SOURCE 1
-#define _GNU_SOURCE 1
#define _BSD_SOURCE 1
#define HAS_BOOL
@@ -70,26 +67,23 @@
#include <sys/poll.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
-#ifndef NO_SYS_SELECT_H
-#include <sys/select.h>
-#endif
-#include <netinet/in.h>
-#include <netinet/tcp.h>
-#include <arpa/inet.h>
-#include <netdb.h>
-#include <pwd.h>
#include <inttypes.h>
-#include "../../../include/linux/magic.h"
-#include "types.h"
+#include <linux/magic.h>
+#include <linux/types.h>
#include <sys/ttydefaults.h>
-
-#ifndef NO_ICONV
-#include <iconv.h>
-#endif
+#include <api/fs/debugfs.h>
+#include <termios.h>
+#include <linux/bitops.h>
extern const char *graph_line;
extern const char *graph_dotted_line;
extern char buildid_dir[];
+extern char tracing_events_path[];
+extern void perf_debugfs_set_path(const char *mountpoint);
+const char *perf_debugfs_mount(const char *mountpoint);
+const char *find_tracing_dir(void);
+char *get_tracing_file(const char *name);
+void put_tracing_file(char *file);
/* On most systems <limits.h> would have given us this, but
* not on some systems (e.g. GNU/Hurd).
@@ -135,6 +129,8 @@ extern char buildid_dir[];
#endif
#endif
+#define PERF_GTK_DSO "libperf-gtk.so"
+
/* General helper functions */
extern void usage(const char *err) NORETURN;
extern void die(const char *err, ...) NORETURN __attribute__((format (printf, 1, 2)));
@@ -190,6 +186,8 @@ static inline void *zalloc(size_t size)
return calloc(1, size);
}
+#define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
+
static inline int has_extension(const char *filename, const char *ext)
{
size_t len = strlen(filename);
@@ -206,9 +204,17 @@ static inline int has_extension(const char *filename, const char *ext)
#undef isalpha
#undef isprint
#undef isalnum
+#undef islower
+#undef isupper
#undef tolower
#undef toupper
+#ifndef NSEC_PER_MSEC
+#define NSEC_PER_MSEC 1000000L
+#endif
+
+int parse_nsec_time(const char *str, u64 *ptime);
+
extern unsigned char sane_ctype[256];
#define GIT_SPACE 0x01
#define GIT_DIGIT 0x02
@@ -226,6 +232,8 @@ extern unsigned char sane_ctype[256];
#define isalpha(x) sane_istest(x,GIT_ALPHA)
#define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT)
#define isprint(x) sane_istest(x,GIT_PRINT)
+#define islower(x) (sane_istest(x,GIT_ALPHA) && (x & 0x20))
+#define isupper(x) (sane_istest(x,GIT_ALPHA) && !(x & 0x20))
#define tolower(x) sane_case((unsigned char)(x), 0x20)
#define toupper(x) sane_case((unsigned char)(x), 0)
@@ -236,37 +244,90 @@ static inline int sane_case(int x, int high)
return x;
}
-#ifndef DIR_HAS_BSD_GROUP_SEMANTICS
-# define FORCE_DIR_SET_GID S_ISGID
-#else
-# define FORCE_DIR_SET_GID 0
-#endif
-
-#ifdef NO_NSEC
-#undef USE_NSEC
-#define ST_CTIME_NSEC(st) 0
-#define ST_MTIME_NSEC(st) 0
-#else
-#ifdef USE_ST_TIMESPEC
-#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctimespec.tv_nsec))
-#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtimespec.tv_nsec))
-#else
-#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctim.tv_nsec))
-#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtim.tv_nsec))
-#endif
-#endif
-
int mkdir_p(char *path, mode_t mode);
int copyfile(const char *from, const char *to);
+int copyfile_mode(const char *from, const char *to, mode_t mode);
s64 perf_atoll(const char *str);
char **argv_split(const char *str, int *argcp);
void argv_free(char **argv);
bool strglobmatch(const char *str, const char *pat);
bool strlazymatch(const char *str, const char *pat);
+int strtailcmp(const char *s1, const char *s2);
+char *strxfrchar(char *s, char from, char to);
unsigned long convert_unit(unsigned long value, char *unit);
+ssize_t readn(int fd, void *buf, size_t n);
+ssize_t writen(int fd, void *buf, size_t n);
+
+struct perf_event_attr;
+
+void event_attr_init(struct perf_event_attr *attr);
#define _STR(x) #x
#define STR(x) _STR(x)
+/*
+ * Determine whether some value is a power of two, where zero is
+ * *not* considered a power of two.
+ */
+
+static inline __attribute__((const))
+bool is_power_of_2(unsigned long n)
+{
+ return (n != 0 && ((n & (n - 1)) == 0));
+}
+
+static inline unsigned next_pow2(unsigned x)
+{
+ if (!x)
+ return 1;
+ return 1ULL << (32 - __builtin_clz(x - 1));
+}
+
+static inline unsigned long next_pow2_l(unsigned long x)
+{
+#if BITS_PER_LONG == 64
+ if (x <= (1UL << 31))
+ return next_pow2(x);
+ return (unsigned long)next_pow2(x >> 32) << 32;
+#else
+ return next_pow2(x);
#endif
+}
+
+size_t hex_width(u64 v);
+int hex2u64(const char *ptr, u64 *val);
+
+char *ltrim(char *s);
+char *rtrim(char *s);
+
+void dump_stack(void);
+
+extern unsigned int page_size;
+extern int cacheline_size;
+
+void get_term_dimensions(struct winsize *ws);
+
+struct parse_tag {
+ char tag;
+ int mult;
+};
+
+unsigned long parse_tag_value(const char *str, struct parse_tag *tags);
+
+#define SRCLINE_UNKNOWN ((char *) "??:0")
+
+struct dso;
+
+char *get_srcline(struct dso *dso, unsigned long addr);
+void free_srcline(char *srcline);
+
+int filename__read_int(const char *filename, int *value);
+int filename__read_str(const char *filename, char **buf, size_t *sizep);
+int perf_event_paranoid(void);
+
+void mem_bswap_64(void *src, int byte_size);
+void mem_bswap_32(void *src, int byte_size);
+
+const char *get_filename_for_perf_kvm(void);
+#endif /* GIT_COMPAT_UTIL_H */
diff --git a/tools/perf/util/values.c b/tools/perf/util/values.c
index cfa55d686e3..0fb3c1fcd3e 100644
--- a/tools/perf/util/values.c
+++ b/tools/perf/util/values.c
@@ -31,13 +31,14 @@ void perf_read_values_destroy(struct perf_read_values *values)
return;
for (i = 0; i < values->threads; i++)
- free(values->value[i]);
- free(values->pid);
- free(values->tid);
- free(values->counterrawid);
+ zfree(&values->value[i]);
+ zfree(&values->value);
+ zfree(&values->pid);
+ zfree(&values->tid);
+ zfree(&values->counterrawid);
for (i = 0; i < values->counters; i++)
- free(values->countername[i]);
- free(values->countername);
+ zfree(&values->countername[i]);
+ zfree(&values->countername);
}
static void perf_read_values__enlarge_threads(struct perf_read_values *values)
@@ -150,7 +151,7 @@ static void perf_read_values__display_pretty(FILE *fp,
if (width > tidwidth)
tidwidth = width;
for (j = 0; j < values->counters; j++) {
- width = snprintf(NULL, 0, "%Lu", values->value[i][j]);
+ width = snprintf(NULL, 0, "%" PRIu64, values->value[i][j]);
if (width > counterwidth[j])
counterwidth[j] = width;
}
@@ -165,7 +166,7 @@ static void perf_read_values__display_pretty(FILE *fp,
fprintf(fp, " %*d %*d", pidwidth, values->pid[i],
tidwidth, values->tid[i]);
for (j = 0; j < values->counters; j++)
- fprintf(fp, " %*Lu",
+ fprintf(fp, " %*" PRIu64,
counterwidth[j], values->value[i][j]);
fprintf(fp, "\n");
}
@@ -196,13 +197,13 @@ static void perf_read_values__display_raw(FILE *fp,
width = strlen(values->countername[j]);
if (width > namewidth)
namewidth = width;
- width = snprintf(NULL, 0, "%llx", values->counterrawid[j]);
+ width = snprintf(NULL, 0, "%" PRIx64, values->counterrawid[j]);
if (width > rawwidth)
rawwidth = width;
}
for (i = 0; i < values->threads; i++) {
for (j = 0; j < values->counters; j++) {
- width = snprintf(NULL, 0, "%Lu", values->value[i][j]);
+ width = snprintf(NULL, 0, "%" PRIu64, values->value[i][j]);
if (width > countwidth)
countwidth = width;
}
@@ -214,7 +215,7 @@ static void perf_read_values__display_raw(FILE *fp,
countwidth, "Count");
for (i = 0; i < values->threads; i++)
for (j = 0; j < values->counters; j++)
- fprintf(fp, " %*d %*d %*s %*llx %*Lu\n",
+ fprintf(fp, " %*d %*d %*s %*" PRIx64 " %*" PRIu64,
pidwidth, values->pid[i],
tidwidth, values->tid[i],
namewidth, values->countername[j],
diff --git a/tools/perf/util/values.h b/tools/perf/util/values.h
index 2fa967e1a88..b21a80c6cf8 100644
--- a/tools/perf/util/values.h
+++ b/tools/perf/util/values.h
@@ -1,7 +1,7 @@
#ifndef __PERF_VALUES_H
#define __PERF_VALUES_H
-#include "types.h"
+#include <linux/types.h>
struct perf_read_values {
int threads;
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
new file mode 100644
index 00000000000..0ddb3b8a89e
--- /dev/null
+++ b/tools/perf/util/vdso.c
@@ -0,0 +1,111 @@
+
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <linux/kernel.h>
+
+#include "vdso.h"
+#include "util.h"
+#include "symbol.h"
+#include "linux/string.h"
+
+static bool vdso_found;
+static char vdso_file[] = "/tmp/perf-vdso.so-XXXXXX";
+
+static int find_vdso_map(void **start, void **end)
+{
+ FILE *maps;
+ char line[128];
+ int found = 0;
+
+ maps = fopen("/proc/self/maps", "r");
+ if (!maps) {
+ pr_err("vdso: cannot open maps\n");
+ return -1;
+ }
+
+ while (!found && fgets(line, sizeof(line), maps)) {
+ int m = -1;
+
+ /* We care only about private r-x mappings. */
+ if (2 != sscanf(line, "%p-%p r-xp %*x %*x:%*x %*u %n",
+ start, end, &m))
+ continue;
+ if (m < 0)
+ continue;
+
+ if (!strncmp(&line[m], VDSO__MAP_NAME,
+ sizeof(VDSO__MAP_NAME) - 1))
+ found = 1;
+ }
+
+ fclose(maps);
+ return !found;
+}
+
+static char *get_file(void)
+{
+ char *vdso = NULL;
+ char *buf = NULL;
+ void *start, *end;
+ size_t size;
+ int fd;
+
+ if (vdso_found)
+ return vdso_file;
+
+ if (find_vdso_map(&start, &end))
+ return NULL;
+
+ size = end - start;
+
+ buf = memdup(start, size);
+ if (!buf)
+ return NULL;
+
+ fd = mkstemp(vdso_file);
+ if (fd < 0)
+ goto out;
+
+ if (size == (size_t) write(fd, buf, size))
+ vdso = vdso_file;
+
+ close(fd);
+
+ out:
+ free(buf);
+
+ vdso_found = (vdso != NULL);
+ return vdso;
+}
+
+void vdso__exit(void)
+{
+ if (vdso_found)
+ unlink(vdso_file);
+}
+
+struct dso *vdso__dso_findnew(struct list_head *head)
+{
+ struct dso *dso = dsos__find(head, VDSO__MAP_NAME, true);
+
+ if (!dso) {
+ char *file;
+
+ file = get_file();
+ if (!file)
+ return NULL;
+
+ dso = dso__new(VDSO__MAP_NAME);
+ if (dso != NULL) {
+ dsos__add(head, dso);
+ dso__set_long_name(dso, file, false);
+ }
+ }
+
+ return dso;
+}
diff --git a/tools/perf/util/vdso.h b/tools/perf/util/vdso.h
new file mode 100644
index 00000000000..0f76e7caf6f
--- /dev/null
+++ b/tools/perf/util/vdso.h
@@ -0,0 +1,18 @@
+#ifndef __PERF_VDSO__
+#define __PERF_VDSO__
+
+#include <linux/types.h>
+#include <string.h>
+#include <stdbool.h>
+
+#define VDSO__MAP_NAME "[vdso]"
+
+static inline bool is_vdso_map(const char *filename)
+{
+ return !strcmp(filename, VDSO__MAP_NAME);
+}
+
+struct dso *vdso__dso_findnew(struct list_head *head);
+void vdso__exit(void);
+
+#endif /* __PERF_VDSO__ */
diff --git a/tools/perf/util/wrapper.c b/tools/perf/util/wrapper.c
index 73e900edb5a..19f15b65070 100644
--- a/tools/perf/util/wrapper.c
+++ b/tools/perf/util/wrapper.c
@@ -7,7 +7,8 @@
* There's no pack memory to release - but stay close to the Git
* version so wrap this away:
*/
-static inline void release_pack_memory(size_t size __used, int flag __used)
+static inline void release_pack_memory(size_t size __maybe_unused,
+ int flag __maybe_unused)
{
}
diff --git a/tools/perf/util/xyarray.c b/tools/perf/util/xyarray.c
new file mode 100644
index 00000000000..22afbf6c536
--- /dev/null
+++ b/tools/perf/util/xyarray.c
@@ -0,0 +1,20 @@
+#include "xyarray.h"
+#include "util.h"
+
+struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size)
+{
+ size_t row_size = ylen * entry_size;
+ struct xyarray *xy = zalloc(sizeof(*xy) + xlen * row_size);
+
+ if (xy != NULL) {
+ xy->entry_size = entry_size;
+ xy->row_size = row_size;
+ }
+
+ return xy;
+}
+
+void xyarray__delete(struct xyarray *xy)
+{
+ free(xy);
+}
diff --git a/tools/perf/util/xyarray.h b/tools/perf/util/xyarray.h
new file mode 100644
index 00000000000..c488a07275d
--- /dev/null
+++ b/tools/perf/util/xyarray.h
@@ -0,0 +1,20 @@
+#ifndef _PERF_XYARRAY_H_
+#define _PERF_XYARRAY_H_ 1
+
+#include <sys/types.h>
+
+struct xyarray {
+ size_t row_size;
+ size_t entry_size;
+ char contents[];
+};
+
+struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size);
+void xyarray__delete(struct xyarray *xy);
+
+static inline void *xyarray__entry(struct xyarray *xy, int x, int y)
+{
+ return &xy->contents[x * xy->row_size + y * xy->entry_size];
+}
+
+#endif /* _PERF_XYARRAY_H_ */