aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kernel/topology.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/topology.c')
-rw-r--r--arch/x86/kernel/topology.c105
1 files changed, 98 insertions, 7 deletions
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
index 7e4515957a1..649b010da00 100644
--- a/arch/x86/kernel/topology.c
+++ b/arch/x86/kernel/topology.c
@@ -26,26 +26,117 @@
* Send feedback to <colpatch@us.ibm.com>
*/
#include <linux/nodemask.h>
+#include <linux/export.h>
#include <linux/mmzone.h>
#include <linux/init.h>
#include <linux/smp.h>
+#include <linux/irq.h>
#include <asm/cpu.h>
static DEFINE_PER_CPU(struct x86_cpu, cpu_devices);
#ifdef CONFIG_HOTPLUG_CPU
+
+#ifdef CONFIG_BOOTPARAM_HOTPLUG_CPU0
+static int cpu0_hotpluggable = 1;
+#else
+static int cpu0_hotpluggable;
+static int __init enable_cpu0_hotplug(char *str)
+{
+ cpu0_hotpluggable = 1;
+ return 1;
+}
+
+__setup("cpu0_hotplug", enable_cpu0_hotplug);
+#endif
+
+#ifdef CONFIG_DEBUG_HOTPLUG_CPU0
+/*
+ * This function offlines a CPU as early as possible and allows userspace to
+ * boot up without the CPU. The CPU can be onlined back by user after boot.
+ *
+ * This is only called for debugging CPU offline/online feature.
+ */
+int __ref _debug_hotplug_cpu(int cpu, int action)
+{
+ struct device *dev = get_cpu_device(cpu);
+ int ret;
+
+ if (!cpu_is_hotpluggable(cpu))
+ return -EINVAL;
+
+ lock_device_hotplug();
+
+ switch (action) {
+ case 0:
+ ret = cpu_down(cpu);
+ if (!ret) {
+ pr_info("CPU %u is now offline\n", cpu);
+ dev->offline = true;
+ kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
+ } else
+ pr_debug("Can't offline CPU%d.\n", cpu);
+ break;
+ case 1:
+ ret = cpu_up(cpu);
+ if (!ret) {
+ dev->offline = false;
+ kobject_uevent(&dev->kobj, KOBJ_ONLINE);
+ } else {
+ pr_debug("Can't online CPU%d.\n", cpu);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ unlock_device_hotplug();
+
+ return ret;
+}
+
+static int __init debug_hotplug_cpu(void)
+{
+ _debug_hotplug_cpu(0, 0);
+ return 0;
+}
+
+late_initcall_sync(debug_hotplug_cpu);
+#endif /* CONFIG_DEBUG_HOTPLUG_CPU0 */
+
int __ref arch_register_cpu(int num)
{
+ struct cpuinfo_x86 *c = &cpu_data(num);
+
+ /*
+ * Currently CPU0 is only hotpluggable on Intel platforms. Other
+ * vendors can add hotplug support later.
+ */
+ if (c->x86_vendor != X86_VENDOR_INTEL)
+ cpu0_hotpluggable = 0;
+
/*
- * CPU0 cannot be offlined due to several
- * restrictions and assumptions in kernel. This basically
- * doesnt add a control file, one cannot attempt to offline
- * BSP.
+ * Two known BSP/CPU0 dependencies: Resume from suspend/hibernate
+ * depends on BSP. PIC interrupts depend on BSP.
*
- * Also certain PCI quirks require not to enable hotplug control
- * for all CPU's.
+ * If the BSP depencies are under control, one can tell kernel to
+ * enable BSP hotplug. This basically adds a control file and
+ * one can attempt to offline BSP.
*/
- if (num)
+ if (num == 0 && cpu0_hotpluggable) {
+ unsigned int irq;
+ /*
+ * We won't take down the boot processor on i386 if some
+ * interrupts only are able to be serviced by the BSP in PIC.
+ */
+ for_each_active_irq(irq) {
+ if (!IO_APIC_IRQ(irq) && irq_has_action(irq)) {
+ cpu0_hotpluggable = 0;
+ break;
+ }
+ }
+ }
+ if (num || cpu0_hotpluggable)
per_cpu(cpu_devices, num).cpu.hotpluggable = 1;
return register_cpu(&per_cpu(cpu_devices, num).cpu, num);
yle='width: 83.3%;'/> -rw-r--r--tools/perf/util/cpumap.c479
-rw-r--r--tools/perf/util/cpumap.h84
-rw-r--r--tools/perf/util/ctype.c39
-rw-r--r--tools/perf/util/data.c133
-rw-r--r--tools/perf/util/data.h50
-rw-r--r--tools/perf/util/debug.c107
-rw-r--r--tools/perf/util/debug.h22
-rw-r--r--tools/perf/util/dso.c900
-rw-r--r--tools/perf/util/dso.h232
-rw-r--r--tools/perf/util/dwarf-aux.c884
-rw-r--r--tools/perf/util/dwarf-aux.h118
-rw-r--r--tools/perf/util/environment.c9
-rw-r--r--tools/perf/util/event.c868
-rw-r--r--tools/perf/util/event.h316
-rw-r--r--tools/perf/util/evlist.c1245
-rw-r--r--tools/perf/util/evlist.h265
-rw-r--r--tools/perf/util/evsel.c2036
-rw-r--r--tools/perf/util/evsel.h365
-rw-r--r--tools/perf/util/exec_cmd.c148
-rw-r--r--tools/perf/util/exec_cmd.h12
-rwxr-xr-xtools/perf/util/generate-cmdlist.sh39
-rw-r--r--tools/perf/util/header.c3071
-rw-r--r--tools/perf/util/header.h159
-rw-r--r--tools/perf/util/help.c339
-rw-r--r--tools/perf/util/help.h29
-rw-r--r--tools/perf/util/hist.c1414
-rw-r--r--tools/perf/util/hist.h347
-rw-r--r--tools/perf/util/hweight.c31
-rw-r--r--tools/perf/util/include/asm/alternative-asm.h8
-rw-r--r--tools/perf/util/include/asm/asm-offsets.h1
-rw-r--r--tools/perf/util/include/asm/byteorder.h2
-rw-r--r--tools/perf/util/include/asm/cpufeature.h9
-rw-r--r--tools/perf/util/include/asm/dwarf2.h13
-rw-r--r--tools/perf/util/include/asm/hash.h6
-rw-r--r--tools/perf/util/include/asm/hweight.h8
-rw-r--r--tools/perf/util/include/asm/swab.h1
-rw-r--r--tools/perf/util/include/asm/system.h1
-rw-r--r--tools/perf/util/include/asm/uaccess.h14
-rw-r--r--tools/perf/util/include/asm/unistd_32.h1
-rw-r--r--tools/perf/util/include/asm/unistd_64.h1
-rw-r--r--tools/perf/util/include/dwarf-regs.h8
-rw-r--r--tools/perf/util/include/linux/bitmap.h49
-rw-r--r--tools/perf/util/include/linux/bitops.h160
-rw-r--r--tools/perf/util/include/linux/const.h1
-rw-r--r--tools/perf/util/include/linux/ctype.h1
-rw-r--r--tools/perf/util/include/linux/kernel.h128
-rw-r--r--tools/perf/util/include/linux/linkage.h13
-rw-r--r--tools/perf/util/include/linux/list.h29
-rw-r--r--tools/perf/util/include/linux/poison.h1
-rw-r--r--tools/perf/util/include/linux/rbtree.h2
-rw-r--r--tools/perf/util/include/linux/rbtree_augmented.h2
-rw-r--r--tools/perf/util/include/linux/string.h4
-rw-r--r--tools/perf/util/intlist.c146
-rw-r--r--tools/perf/util/intlist.h77
-rw-r--r--tools/perf/util/levenshtein.c84
-rw-r--r--tools/perf/util/levenshtein.h8
-rw-r--r--tools/perf/util/machine.c1422
-rw-r--r--tools/perf/util/machine.h194
-rw-r--r--tools/perf/util/map.c764
-rw-r--r--tools/perf/util/map.h220
-rw-r--r--tools/perf/util/pager.c100
-rw-r--r--tools/perf/util/parse-events.c1354
-rw-r--r--tools/perf/util/parse-events.h112
-rw-r--r--tools/perf/util/parse-events.l218
-rw-r--r--tools/perf/util/parse-events.y454
-rw-r--r--tools/perf/util/parse-options.c681
-rw-r--r--tools/perf/util/parse-options.h209
-rw-r--r--tools/perf/util/path.c161
-rw-r--r--tools/perf/util/perf_regs.c27
-rw-r--r--tools/perf/util/perf_regs.h29
-rw-r--r--tools/perf/util/pmu.c796
-rw-r--r--tools/perf/util/pmu.h49
-rw-r--r--tools/perf/util/pmu.l43
-rw-r--r--tools/perf/util/pmu.y92
-rw-r--r--tools/perf/util/probe-event.c2573
-rw-r--r--tools/perf/util/probe-event.h141
-rw-r--r--tools/perf/util/probe-finder.c1603
-rw-r--r--tools/perf/util/probe-finder.h112
-rw-r--r--tools/perf/util/pstack.c75
-rw-r--r--tools/perf/util/pstack.h14
-rw-r--r--tools/perf/util/python-ext-sources22
-rw-r--r--tools/perf/util/python.c1074
-rw-r--r--tools/perf/util/quote.c54
-rw-r--r--tools/perf/util/quote.h29
-rw-r--r--tools/perf/util/rblist.c128
-rw-r--r--tools/perf/util/rblist.h48
-rw-r--r--tools/perf/util/record.c215
-rw-r--r--tools/perf/util/run-command.c214
-rw-r--r--tools/perf/util/run-command.h58
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c638
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c698
-rw-r--r--tools/perf/util/session.c1673
-rw-r--r--tools/perf/util/session.h129
-rw-r--r--tools/perf/util/setup.py48
-rw-r--r--tools/perf/util/sigchain.c52
-rw-r--r--tools/perf/util/sigchain.h10
-rw-r--r--tools/perf/util/sort.c1706
-rw-r--r--tools/perf/util/sort.h221
-rw-r--r--tools/perf/util/srcline.c299
-rw-r--r--tools/perf/util/stat.c63
-rw-r--r--tools/perf/util/stat.h25
-rw-r--r--tools/perf/util/strbuf.c134
-rw-r--r--tools/perf/util/strbuf.h92
-rw-r--r--tools/perf/util/strfilter.c199
-rw-r--r--tools/perf/util/strfilter.h48
-rw-r--r--tools/perf/util/string.c413
-rw-r--r--tools/perf/util/strlist.c173
-rw-r--r--tools/perf/util/strlist.h79
-rw-r--r--tools/perf/util/svghelper.c716
-rw-r--r--tools/perf/util/svghelper.h33
-rw-r--r--tools/perf/util/symbol-elf.c1625
-rw-r--r--tools/perf/util/symbol-minimal.c330
-rw-r--r--tools/perf/util/symbol.c1880
-rw-r--r--tools/perf/util/symbol.h297
-rw-r--r--tools/perf/util/target.c158
-rw-r--r--tools/perf/util/target.h79
-rw-r--r--tools/perf/util/thread.c193
-rw-r--r--tools/perf/util/thread.h81
-rw-r--r--tools/perf/util/thread_map.c294
-rw-r--r--tools/perf/util/thread_map.h29
-rw-r--r--tools/perf/util/tool.h47
-rw-r--r--tools/perf/util/top.c117
-rw-r--r--tools/perf/util/top.h47
-rw-r--r--tools/perf/util/trace-event-info.c600
-rw-r--r--tools/perf/util/trace-event-parse.c263
-rw-r--r--tools/perf/util/trace-event-read.c444
-rw-r--r--tools/perf/util/trace-event-scripting.c171
-rw-r--r--tools/perf/util/trace-event.c82
-rw-r--r--tools/perf/util/trace-event.h90
-rw-r--r--tools/perf/util/unwind-libdw.c210
-rw-r--r--tools/perf/util/unwind-libdw.h21
-rw-r--r--tools/perf/util/unwind-libunwind.c579
-rw-r--r--tools/perf/util/unwind.h37
-rw-r--r--tools/perf/util/usage.c84
-rw-r--r--tools/perf/util/util.c542
-rw-r--r--tools/perf/util/util.h333
-rw-r--r--tools/perf/util/values.c232
-rw-r--r--tools/perf/util/values.h27
-rw-r--r--tools/perf/util/vdso.c111
-rw-r--r--tools/perf/util/vdso.h18
-rw-r--r--tools/perf/util/wrapper.c41
-rw-r--r--tools/perf/util/xyarray.c20
-rw-r--r--tools/perf/util/xyarray.h20
161 files changed, 48427 insertions, 0 deletions
diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN
new file mode 100755
index 00000000000..39f17507578
--- /dev/null
+++ b/tools/perf/util/PERF-VERSION-GEN
@@ -0,0 +1,50 @@
+#!/bin/sh
+
+if [ $# -eq 1 ] ; then
+ OUTPUT=$1
+fi
+
+GVF=${OUTPUT}PERF-VERSION-FILE
+
+LF='
+'
+
+#
+# First check if there is a .git to get the version from git describe
+# otherwise try to get the version from the kernel Makefile
+#
+CID=
+TAG=
+if test -d ../../.git -o -f ../../.git
+then
+ TAG=$(git describe --abbrev=0 --match "v[0-9].[0-9]*" 2>/dev/null )
+ CID=$(git log -1 --abbrev=4 --pretty=format:"%h" 2>/dev/null) && CID="-g$CID"
+elif test -f ../../PERF-VERSION-FILE
+then
+ TAG=$(cut -d' ' -f3 ../../PERF-VERSION-FILE | sed -e 's/\"//g')
+fi
+if test -z "$TAG"
+then
+ TAG=$(MAKEFLAGS= make -sC ../.. kernelversion)
+fi
+VN="$TAG$CID"
+if test -n "$CID"
+then
+ # format version string, strip trailing zero of sublevel:
+ VN=$(echo "$VN" | sed -e 's/-/./g;s/\([0-9]*[.][0-9]*\)[.]0/\1/')
+fi
+
+VN=$(expr "$VN" : v*'\(.*\)')
+
+if test -r $GVF
+then
+ VC=$(sed -e 's/^#define PERF_VERSION "\(.*\)"/\1/' <$GVF)
+else
+ VC=unset
+fi
+test "$VN" = "$VC" || {
+ echo >&2 " PERF_VERSION = $VN"
+ echo "#define PERF_VERSION \"$VN\"" >$GVF
+}
+
+
diff --git a/tools/perf/util/abspath.c b/tools/perf/util/abspath.c
new file mode 100644
index 00000000000..0e76affe9c3
--- /dev/null
+++ b/tools/perf/util/abspath.c
@@ -0,0 +1,37 @@
+#include "cache.h"
+
+static const char *get_pwd_cwd(void)
+{
+ static char cwd[PATH_MAX + 1];
+ char *pwd;
+ struct stat cwd_stat, pwd_stat;
+ if (getcwd(cwd, PATH_MAX) == NULL)
+ return NULL;
+ pwd = getenv("PWD");
+ if (pwd && strcmp(pwd, cwd)) {
+ stat(cwd, &cwd_stat);
+ if (!stat(pwd, &pwd_stat) &&
+ pwd_stat.st_dev == cwd_stat.st_dev &&
+ pwd_stat.st_ino == cwd_stat.st_ino) {
+ strlcpy(cwd, pwd, PATH_MAX);
+ }
+ }
+ return cwd;
+}
+
+const char *make_nonrelative_path(const char *path)
+{
+ static char buf[PATH_MAX + 1];
+
+ if (is_absolute_path(path)) {
+ if (strlcpy(buf, path, PATH_MAX) >= PATH_MAX)
+ die("Too long path: %.*s", 60, path);
+ } else {
+ const char *cwd = get_pwd_cwd();
+ if (!cwd)
+ die("Cannot determine the current working directory");
+ if (snprintf(buf, PATH_MAX, "%s/%s", cwd, path) >= PATH_MAX)
+ die("Too long path: %.*s", 60, path);
+ }
+ return buf;
+}
diff --git a/tools/perf/util/alias.c b/tools/perf/util/alias.c
new file mode 100644
index 00000000000..c0b43ee40d9
--- /dev/null
+++ b/tools/perf/util/alias.c
@@ -0,0 +1,76 @@
+#include "cache.h"
+
+static const char *alias_key;
+static char *alias_val;
+
+static int alias_lookup_cb(const char *k, const char *v,
+ void *cb __maybe_unused)
+{
+ if (!prefixcmp(k, "alias.") && !strcmp(k+6, alias_key)) {
+ if (!v)
+ return config_error_nonbool(k);
+ alias_val = strdup(v);
+ return 0;
+ }
+ return 0;
+}
+
+char *alias_lookup(const char *alias)
+{
+ alias_key = alias;
+ alias_val = NULL;
+ perf_config(alias_lookup_cb, NULL);
+ return alias_val;
+}
+
+int split_cmdline(char *cmdline, const char ***argv)
+{
+ int src, dst, count = 0, size = 16;
+ char quoted = 0;
+
+ *argv = malloc(sizeof(char*) * size);
+
+ /* split alias_string */
+ (*argv)[count++] = cmdline;
+ for (src = dst = 0; cmdline[src];) {
+ char c = cmdline[src];
+ if (!quoted && isspace(c)) {
+ cmdline[dst++] = 0;
+ while (cmdline[++src]
+ && isspace(cmdline[src]))
+ ; /* skip */
+ if (count >= size) {
+ size += 16;
+ *argv = realloc(*argv, sizeof(char*) * size);
+ }
+ (*argv)[count++] = cmdline + dst;
+ } else if (!quoted && (c == '\'' || c == '"')) {
+ quoted = c;
+ src++;
+ } else if (c == quoted) {
+ quoted = 0;
+ src++;
+ } else {
+ if (c == '\\' && quoted != '\'') {
+ src++;
+ c = cmdline[src];
+ if (!c) {
+ zfree(argv);
+ return error("cmdline ends with \\");
+ }
+ }
+ cmdline[dst++] = c;
+ src++;
+ }
+ }
+
+ cmdline[dst] = 0;
+
+ if (quoted) {
+ zfree(argv);
+ return error("unclosed quote");
+ }
+
+ return count;
+}
+
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
new file mode 100644
index 00000000000..809b4c50bea
--- /dev/null
+++ b/tools/perf/util/annotate.c
@@ -0,0 +1,1412 @@
+/*
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Parts came from builtin-annotate.c, see those files for further
+ * copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
+#include "util.h"
+#include "ui/ui.h"
+#include "sort.h"
+#include "build-id.h"
+#include "color.h"
+#include "cache.h"
+#include "symbol.h"
+#include "debug.h"
+#include "annotate.h"
+#include "evsel.h"
+#include <pthread.h>
+#include <linux/bitops.h>
+
+const char *disassembler_style;
+const char *objdump_path;
+
+static struct ins *ins__find(const char *name);
+static int disasm_line__parse(char *line, char **namep, char **rawp);
+
+static void ins__delete(struct ins_operands *ops)
+{
+ zfree(&ops->source.raw);
+ zfree(&ops->source.name);
+ zfree(&ops->target.raw);
+ zfree(&ops->target.name);
+}
+
+static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops)
+{
+ return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->raw);
+}
+
+int ins__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops)
+{
+ if (ins->ops->scnprintf)
+ return ins->ops->scnprintf(ins, bf, size, ops);
+
+ return ins__raw_scnprintf(ins, bf, size, ops);
+}
+
+static int call__parse(struct ins_operands *ops)
+{
+ char *endptr, *tok, *name;
+
+ ops->target.addr = strtoull(ops->raw, &endptr, 16);
+
+ name = strchr(endptr, '<');
+ if (name == NULL)
+ goto indirect_call;
+
+ name++;
+
+ tok = strchr(name, '>');
+ if (tok == NULL)
+ return -1;
+
+ *tok = '\0';
+ ops->target.name = strdup(name);
+ *tok = '>';
+
+ return ops->target.name == NULL ? -1 : 0;
+
+indirect_call:
+ tok = strchr(endptr, '(');
+ if (tok != NULL) {
+ ops->target.addr = 0;
+ return 0;
+ }
+
+ tok = strchr(endptr, '*');
+ if (tok == NULL)
+ return -1;
+
+ ops->target.addr = strtoull(tok + 1, NULL, 16);
+ return 0;
+}
+
+static int call__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops)
+{
+ if (ops->target.name)
+ return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name);
+
+ if (ops->target.addr == 0)
+ return ins__raw_scnprintf(ins, bf, size, ops);
+
+ return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr);
+}
+
+static struct ins_ops call_ops = {
+ .parse = call__parse,
+ .scnprintf = call__scnprintf,
+};
+
+bool ins__is_call(const struct ins *ins)
+{
+ return ins->ops == &call_ops;
+}
+
+static int jump__parse(struct ins_operands *ops)
+{
+ const char *s = strchr(ops->raw, '+');
+
+ ops->target.addr = strtoull(ops->raw, NULL, 16);
+
+ if (s++ != NULL)
+ ops->target.offset = strtoull(s, NULL, 16);
+ else
+ ops->target.offset = UINT64_MAX;
+
+ return 0;
+}
+
+static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops)
+{
+ return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset);
+}
+
+static struct ins_ops jump_ops = {
+ .parse = jump__parse,
+ .scnprintf = jump__scnprintf,
+};
+
+bool ins__is_jump(const struct ins *ins)
+{
+ return ins->ops == &jump_ops;
+}
+
+static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
+{
+ char *endptr, *name, *t;
+
+ if (strstr(raw, "(%rip)") == NULL)
+ return 0;
+
+ *addrp = strtoull(comment, &endptr, 16);
+ name = strchr(endptr, '<');
+ if (name == NULL)
+ return -1;
+
+ name++;
+
+ t = strchr(name, '>');
+ if (t == NULL)
+ return 0;
+
+ *t = '\0';
+ *namep = strdup(name);
+ *t = '>';
+
+ return 0;
+}
+
+static int lock__parse(struct ins_operands *ops)
+{
+ char *name;
+
+ ops->locked.ops = zalloc(sizeof(*ops->locked.ops));
+ if (ops->locked.ops == NULL)
+ return 0;
+
+ if (disasm_line__parse(ops->raw, &name, &ops->locked.ops->raw) < 0)
+ goto out_free_ops;
+
+ ops->locked.ins = ins__find(name);
+ if (ops->locked.ins == NULL)
+ goto out_free_ops;
+
+ if (!ops->locked.ins->ops)
+ return 0;
+
+ if (ops->locked.ins->ops->parse)
+ ops->locked.ins->ops->parse(ops->locked.ops);
+
+ return 0;
+
+out_free_ops:
+ zfree(&ops->locked.ops);
+ return 0;
+}
+
+static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops)
+{
+ int printed;
+
+ if (ops->locked.ins == NULL)
+ return ins__raw_scnprintf(ins, bf, size, ops);
+
+ printed = scnprintf(bf, size, "%-6.6s ", ins->name);
+ return printed + ins__scnprintf(ops->locked.ins, bf + printed,
+ size - printed, ops->locked.ops);
+}
+
+static void lock__delete(struct ins_operands *ops)
+{
+ zfree(&ops->locked.ops);
+ zfree(&ops->target.raw);
+ zfree(&ops->target.name);
+}
+
+static struct ins_ops lock_ops = {
+ .free = lock__delete,
+ .parse = lock__parse,
+ .scnprintf = lock__scnprintf,
+};
+
+static int mov__parse(struct ins_operands *ops)
+{
+ char *s = strchr(ops->raw, ','), *target, *comment, prev;
+
+ if (s == NULL)
+ return -1;
+
+ *s = '\0';
+ ops->source.raw = strdup(ops->raw);
+ *s = ',';
+
+ if (ops->source.raw == NULL)
+ return -1;
+
+ target = ++s;
+
+ while (s[0] != '\0' && !isspace(s[0]))
+ ++s;
+ prev = *s;
+ *s = '\0';
+
+ ops->target.raw = strdup(target);
+ *s = prev;
+
+ if (ops->target.raw == NULL)
+ goto out_free_source;
+
+ comment = strchr(s, '#');
+ if (comment == NULL)
+ return 0;
+
+ while (comment[0] != '\0' && isspace(comment[0]))
+ ++comment;
+
+ comment__symbol(ops->source.raw, comment, &ops->source.addr, &ops->source.name);
+ comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
+
+ return 0;
+
+out_free_source:
+ zfree(&ops->source.raw);
+ return -1;
+}
+
+static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops)
+{
+ return scnprintf(bf, size, "%-6.6s %s,%s", ins->name,
+ ops->source.name ?: ops->source.raw,
+ ops->target.name ?: ops->target.raw);
+}
+
+static struct ins_ops mov_ops = {
+ .parse = mov__parse,
+ .scnprintf = mov__scnprintf,
+};
+
+static int dec__parse(struct ins_operands *ops)
+{
+ char *target, *comment, *s, prev;
+
+ target = s = ops->raw;
+
+ while (s[0] != '\0' && !isspace(s[0]))
+ ++s;
+ prev = *s;
+ *s = '\0';
+
+ ops->target.raw = strdup(target);
+ *s = prev;
+
+ if (ops->target.raw == NULL)
+ return -1;
+
+ comment = strchr(s, '#');
+ if (comment == NULL)
+ return 0;
+
+ while (comment[0] != '\0' && isspace(comment[0]))
+ ++comment;
+
+ comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
+
+ return 0;
+}
+
+static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops)
+{
+ return scnprintf(bf, size, "%-6.6s %s", ins->name,
+ ops->target.name ?: ops->target.raw);
+}
+
+static struct ins_ops dec_ops = {
+ .parse = dec__parse,
+ .scnprintf = dec__scnprintf,
+};
+
+static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
+ struct ins_operands *ops __maybe_unused)
+{
+ return scnprintf(bf, size, "%-6.6s", "nop");
+}
+
+static struct ins_ops nop_ops = {
+ .scnprintf = nop__scnprintf,
+};
+
+/*
+ * Must be sorted by name!
+ */
+static struct ins instructions[] = {
+ { .name = "add", .ops = &mov_ops, },
+ { .name = "addl", .ops = &mov_ops, },
+ { .name = "addq", .ops = &mov_ops, },
+ { .name = "addw", .ops = &mov_ops, },
+ { .name = "and", .ops = &mov_ops, },
+ { .name = "bts", .ops = &mov_ops, },
+ { .name = "call", .ops = &call_ops, },
+ { .name = "callq", .ops = &call_ops, },
+ { .name = "cmp", .ops = &mov_ops, },
+ { .name = "cmpb", .ops = &mov_ops, },
+ { .name = "cmpl", .ops = &mov_ops, },
+ { .name = "cmpq", .ops = &mov_ops, },
+ { .name = "cmpw", .ops = &mov_ops, },
+ { .name = "cmpxch", .ops = &mov_ops, },
+ { .name = "dec", .ops = &dec_ops, },
+ { .name = "decl", .ops = &dec_ops, },
+ { .name = "imul", .ops = &mov_ops, },
+ { .name = "inc", .ops = &dec_ops, },
+ { .name = "incl", .ops = &dec_ops, },
+ { .name = "ja", .ops = &jump_ops, },
+ { .name = "jae", .ops = &jump_ops, },
+ { .name = "jb", .ops = &jump_ops, },
+ { .name = "jbe", .ops = &jump_ops, },
+ { .name = "jc", .ops = &jump_ops, },
+ { .name = "jcxz", .ops = &jump_ops, },
+ { .name = "je", .ops = &jump_ops, },
+ { .name = "jecxz", .ops = &jump_ops, },
+ { .name = "jg", .ops = &jump_ops, },
+ { .name = "jge", .ops = &jump_ops, },
+ { .name = "jl", .ops = &jump_ops, },
+ { .name = "jle", .ops = &jump_ops, },
+ { .name = "jmp", .ops = &jump_ops, },
+ { .name = "jmpq", .ops = &jump_ops, },
+ { .name = "jna", .ops = &jump_ops, },
+ { .name = "jnae", .ops = &jump_ops, },
+ { .name = "jnb", .ops = &jump_ops, },
+ { .name = "jnbe", .ops = &jump_ops, },
+ { .name = "jnc", .ops = &jump_ops, },
+ { .name = "jne", .ops = &jump_ops, },
+ { .name = "jng", .ops = &jump_ops, },
+ { .name = "jnge", .ops = &jump_ops, },
+ { .name = "jnl", .ops = &jump_ops, },
+ { .name = "jnle", .ops = &jump_ops, },
+ { .name = "jno", .ops = &jump_ops, },
+ { .name = "jnp", .ops = &jump_ops, },
+ { .name = "jns", .ops = &jump_ops, },
+ { .name = "jnz", .ops = &jump_ops, },
+ { .name = "jo", .ops = &jump_ops, },
+ { .name = "jp", .ops = &jump_ops, },
+ { .name = "jpe", .ops = &jump_ops, },
+ { .name = "jpo", .ops = &jump_ops, },
+ { .name = "jrcxz", .ops = &jump_ops, },
+ { .name = "js", .ops = &jump_ops, },
+ { .name = "jz", .ops = &jump_ops, },
+ { .name = "lea", .ops = &mov_ops, },
+ { .name = "lock", .ops = &lock_ops, },
+ { .name = "mov", .ops = &mov_ops, },
+ { .name = "movb", .ops = &mov_ops, },
+ { .name = "movdqa",.ops = &mov_ops, },
+ { .name = "movl", .ops = &mov_ops, },
+ { .name = "movq", .ops = &mov_ops, },
+ { .name = "movslq", .ops = &mov_ops, },
+ { .name = "movzbl", .ops = &mov_ops, },
+ { .name = "movzwl", .ops = &mov_ops, },
+ { .name = "nop", .ops = &nop_ops, },
+ { .name = "nopl", .ops = &nop_ops, },
+ { .name = "nopw", .ops = &nop_ops, },
+ { .name = "or", .ops = &mov_ops, },
+ { .name = "orl", .ops = &mov_ops, },
+ { .name = "test", .ops = &mov_ops, },
+ { .name = "testb", .ops = &mov_ops, },
+ { .name = "testl", .ops = &mov_ops, },
+ { .name = "xadd", .ops = &mov_ops, },
+ { .name = "xbeginl", .ops = &jump_ops, },
+ { .name = "xbeginq", .ops = &jump_ops, },
+};
+
+static int ins__cmp(const void *name, const void *insp)
+{
+ const struct ins *ins = insp;
+
+ return strcmp(name, ins->name);
+}
+
+static struct ins *ins__find(const char *name)
+{
+ const int nmemb = ARRAY_SIZE(instructions);
+
+ return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__cmp);
+}
+
+int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ pthread_mutex_init(&notes->lock, NULL);
+ return 0;
+}
+
+int symbol__alloc_hist(struct symbol *sym)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ const size_t size = symbol__size(sym);
+ size_t sizeof_sym_hist;
+
+ /* Check for overflow when calculating sizeof_sym_hist */
+ if (size > (SIZE_MAX - sizeof(struct sym_hist)) / sizeof(u64))
+ return -1;
+
+ sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64));
+
+ /* Check for overflow in zalloc argument */
+ if (sizeof_sym_hist > (SIZE_MAX - sizeof(*notes->src))
+ / symbol_conf.nr_events)
+ return -1;
+
+ notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist);
+ if (notes->src == NULL)
+ return -1;
+ notes->src->sizeof_sym_hist = sizeof_sym_hist;
+ notes->src->nr_histograms = symbol_conf.nr_events;
+ INIT_LIST_HEAD(&notes->src->source);
+ return 0;
+}
+
+void symbol__annotate_zero_histograms(struct symbol *sym)
+{
+ struct annotation *notes = symbol__annotation(sym);
+
+ pthread_mutex_lock(&notes->lock);
+ if (notes->src != NULL)
+ memset(notes->src->histograms, 0,
+ notes->src->nr_histograms * notes->src->sizeof_sym_hist);
+ pthread_mutex_unlock(&notes->lock);
+}
+
+static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
+ struct annotation *notes, int evidx, u64 addr)
+{
+ unsigned offset;
+ struct sym_hist *h;
+
+ pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
+
+ if (addr < sym->start || addr > sym->end)
+ return -ERANGE;
+
+ offset = addr - sym->start;
+ h = annotation__histogram(notes, evidx);
+ h->sum++;
+ h->addr[offset]++;
+
+ pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
+ ", evidx=%d] => %" PRIu64 "\n", sym->start, sym->name,
+ addr, addr - sym->start, evidx, h->addr[offset]);
+ return 0;
+}
+
+static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
+ int evidx, u64 addr)
+{
+ struct annotation *notes;
+
+ if (sym == NULL)
+ return 0;
+
+ notes = symbol__annotation(sym);
+ if (notes->src == NULL) {
+ if (symbol__alloc_hist(sym) < 0)
+ return -ENOMEM;
+ }
+
+ return __symbol__inc_addr_samples(sym, map, notes, evidx, addr);
+}
+
+int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx)
+{
+ return symbol__inc_addr_samples(ams->sym, ams->map, evidx, ams->al_addr);
+}
+
+int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
+{
+ return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
+}
+
+static void disasm_line__init_ins(struct disasm_line *dl)
+{
+ dl->ins = ins__find(dl->name);
+
+ if (dl->ins == NULL)
+ return;
+
+ if (!dl->ins->ops)
+ return;
+
+ if (dl->ins->ops->parse)
+ dl->ins->ops->parse(&dl->ops);
+}
+
+static int disasm_line__parse(char *line, char **namep, char **rawp)
+{
+ char *name = line, tmp;
+
+ while (isspace(name[0]))
+ ++name;
+
+ if (name[0] == '\0')
+ return -1;
+
+ *rawp = name + 1;
+
+ while ((*rawp)[0] != '\0' && !isspace((*rawp)[0]))
+ ++*rawp;
+
+ tmp = (*rawp)[0];
+ (*rawp)[0] = '\0';
+ *namep = strdup(name);
+
+ if (*namep == NULL)
+ goto out_free_name;
+
+ (*rawp)[0] = tmp;
+
+ if ((*rawp)[0] != '\0') {
+ (*rawp)++;
+ while (isspace((*rawp)[0]))
+ ++(*rawp);
+ }
+
+ return 0;
+
+out_free_name:
+ zfree(namep);
+ return -1;
+}
+
+static struct disasm_line *disasm_line__new(s64 offset, char *line, size_t privsize)
+{
+ struct disasm_line *dl = zalloc(sizeof(*dl) + privsize);
+
+ if (dl != NULL) {
+ dl->offset = offset;
+ dl->line = strdup(line);
+ if (dl->line == NULL)
+ goto out_delete;
+
+ if (offset != -1) {
+ if (disasm_line__parse(dl->line, &dl->name, &dl->ops.raw) < 0)
+ goto out_free_line;
+
+ disasm_line__init_ins(dl);
+ }
+ }
+
+ return dl;
+
+out_free_line:
+ zfree(&dl->line);
+out_delete:
+ free(dl);
+ return NULL;
+}
+
+void disasm_line__free(struct disasm_line *dl)
+{
+ zfree(&dl->line);
+ zfree(&dl->name);
+ if (dl->ins && dl->ins->ops->free)
+ dl->ins->ops->free(&dl->ops);
+ else
+ ins__delete(&dl->ops);
+ free(dl);
+}
+
+int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
+{
+ if (raw || !dl->ins)
+ return scnprintf(bf, size, "%-6.6s %s", dl->name, dl->ops.raw);
+
+ return ins__scnprintf(dl->ins, bf, size, &dl->ops);
+}
+
+static void disasm__add(struct list_head *head, struct disasm_line *line)
+{
+ list_add_tail(&line->node, head);
+}
+
+struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos)
+{
+ list_for_each_entry_continue(pos, head, node)
+ if (pos->offset >= 0)
+ return pos;
+
+ return NULL;
+}
+
+double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
+ s64 end, const char **path)
+{
+ struct source_line *src_line = notes->src->lines;
+ double percent = 0.0;
+
+ if (src_line) {
+ size_t sizeof_src_line = sizeof(*src_line) +
+ sizeof(src_line->p) * (src_line->nr_pcnt - 1);
+
+ while (offset < end) {
+ src_line = (void *)notes->src->lines +
+ (sizeof_src_line * offset);
+
+ if (*path == NULL)
+ *path = src_line->path;
+
+ percent += src_line->p[evidx].percent;
+ offset++;
+ }
+ } else {
+ struct sym_hist *h = annotation__histogram(notes, evidx);
+ unsigned int hits = 0;
+
+ while (offset < end)
+ hits += h->addr[offset++];
+
+ if (h->sum)
+ percent = 100.0 * hits / h->sum;
+ }
+
+ return percent;
+}
+
+static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 start,
+ struct perf_evsel *evsel, u64 len, int min_pcnt, int printed,
+ int max_lines, struct disasm_line *queue)
+{
+ static const char *prev_line;
+ static const char *prev_color;
+
+ if (dl->offset != -1) {
+ const char *path = NULL;
+ double percent, max_percent = 0.0;
+ double *ppercents = &percent;
+ int i, nr_percent = 1;
+ const char *color;
+ struct annotation *notes = symbol__annotation(sym);
+ s64 offset = dl->offset;
+ const u64 addr = start + offset;
+ struct disasm_line *next;
+
+ next = disasm__get_next_ip_line(&notes->src->source, dl);
+
+ if (perf_evsel__is_group_event(evsel)) {
+ nr_percent = evsel->nr_members;
+ ppercents = calloc(nr_percent, sizeof(double));
+ if (ppercents == NULL)
+ return -1;
+ }
+
+ for (i = 0; i < nr_percent; i++) {
+ percent = disasm__calc_percent(notes,
+ notes->src->lines ? i : evsel->idx + i,
+ offset,
+ next ? next->offset : (s64) len,
+ &path);
+
+ ppercents[i] = percent;
+ if (percent > max_percent)
+ max_percent = percent;
+ }
+
+ if (max_percent < min_pcnt)
+ return -1;
+
+ if (max_lines && printed >= max_lines)
+ return 1;
+
+ if (queue != NULL) {
+ list_for_each_entry_from(queue, &notes->src->source, node) {
+ if (queue == dl)
+ break;
+ disasm_line__print(queue, sym, start, evsel, len,
+ 0, 0, 1, NULL);
+ }
+ }
+
+ color = get_percent_color(max_percent);
+
+ /*
+ * Also color the filename and line if needed, with
+ * the same color than the percentage. Don't print it
+ * twice for close colored addr with the same filename:line
+ */
+ if (path) {
+ if (!prev_line || strcmp(prev_line, path)
+ || color != prev_color) {
+ color_fprintf(stdout, color, " %s", path);
+ prev_line = path;
+ prev_color = color;
+ }
+ }
+
+ for (i = 0; i < nr_percent; i++) {
+ percent = ppercents[i];
+ color = get_percent_color(percent);
+ color_fprintf(stdout, color, " %7.2f", percent);
+ }
+
+ printf(" : ");
+ color_fprintf(stdout, PERF_COLOR_MAGENTA, " %" PRIx64 ":", addr);
+ color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", dl->line);
+
+ if (ppercents != &percent)
+ free(ppercents);
+
+ } else if (max_lines && printed >= max_lines)
+ return 1;
+ else {
+ int width = 8;
+
+ if (queue)
+ return -1;
+
+ if (perf_evsel__is_group_event(evsel))
+ width *= evsel->nr_members;
+
+ if (!*dl->line)
+ printf(" %*s:\n", width, " ");
+ else
+ printf(" %*s: %s\n", width, " ", dl->line);
+ }
+
+ return 0;
+}
+
+/*
+ * symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw)
+ * which looks like following
+ *
+ * 0000000000415500 <_init>:
+ * 415500: sub $0x8,%rsp
+ * 415504: mov 0x2f5ad5(%rip),%rax # 70afe0 <_DYNAMIC+0x2f8>
+ * 41550b: test %rax,%rax
+ * 41550e: je 415515 <_init+0x15>
+ * 415510: callq 416e70 <__gmon_start__@plt>
+ * 415515: add $0x8,%rsp
+ * 415519: retq
+ *
+ * it will be parsed and saved into struct disasm_line as
+ * <offset> <name> <ops.raw>
+ *
+ * The offset will be a relative offset from the start of the symbol and -1
+ * means that it's not a disassembly line so should be treated differently.
+ * The ops.raw part will be parsed further according to type of the instruction.
+ */
+static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
+ FILE *file, size_t privsize)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct disasm_line *dl;
+ char *line = NULL, *parsed_line, *tmp, *tmp2, *c;
+ size_t line_len;
+ s64 line_ip, offset = -1;
+
+ if (getline(&line, &line_len, file) < 0)
+ return -1;
+
+ if (!line)
+ return -1;
+
+ while (line_len != 0 && isspace(line[line_len - 1]))
+ line[--line_len] = '\0';
+
+ c = strchr(line, '\n');
+ if (c)
+ *c = 0;
+
+ line_ip = -1;
+ parsed_line = line;
+
+ /*
+ * Strip leading spaces:
+ */
+ tmp = line;
+ while (*tmp) {
+ if (*tmp != ' ')
+ break;
+ tmp++;
+ }
+
+ if (*tmp) {
+ /*
+ * Parse hexa addresses followed by ':'
+ */
+ line_ip = strtoull(tmp, &tmp2, 16);
+ if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
+ line_ip = -1;
+ }
+
+ if (line_ip != -1) {
+ u64 start = map__rip_2objdump(map, sym->start),
+ end = map__rip_2objdump(map, sym->end);
+
+ offset = line_ip - start;
+ if ((u64)line_ip < start || (u64)line_ip > end)
+ offset = -1;
+ else
+ parsed_line = tmp2 + 1;
+ }
+
+ dl = disasm_line__new(offset, parsed_line, privsize);
+ free(line);
+
+ if (dl == NULL)
+ return -1;
+
+ if (dl->ops.target.offset == UINT64_MAX)
+ dl->ops.target.offset = dl->ops.target.addr -
+ map__rip_2objdump(map, sym->start);
+
+ /* kcore has no symbols, so add the call target name */
+ if (dl->ins && ins__is_call(dl->ins) && !dl->ops.target.name) {
+ struct addr_map_symbol target = {
+ .map = map,
+ .addr = dl->ops.target.addr,
+ };
+
+ if (!map_groups__find_ams(&target, NULL) &&
+ target.sym->start == target.al_addr)
+ dl->ops.target.name = strdup(target.sym->name);
+ }
+
+ disasm__add(&notes->src->source, dl);
+
+ return 0;
+}
+
+static void delete_last_nop(struct symbol *sym)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct list_head *list = &notes->src->source;
+ struct disasm_line *dl;
+
+ while (!list_empty(list)) {
+ dl = list_entry(list->prev, struct disasm_line, node);
+
+ if (dl->ins && dl->ins->ops) {
+ if (dl->ins->ops != &nop_ops)
+ return;
+ } else {
+ if (!strstr(dl->line, " nop ") &&
+ !strstr(dl->line, " nopl ") &&
+ !strstr(dl->line, " nopw "))
+ return;
+ }
+
+ list_del(&dl->node);
+ disasm_line__free(dl);
+ }
+}
+
+int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
+{
+ struct dso *dso = map->dso;
+ char *filename = dso__build_id_filename(dso, NULL, 0);
+ bool free_filename = true;
+ char command[PATH_MAX * 2];
+ FILE *file;
+ int err = 0;
+ char symfs_filename[PATH_MAX];
+ struct kcore_extract kce;
+ bool delete_extract = false;
+
+ if (filename) {
+ snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
+ symbol_conf.symfs, filename);
+ }
+
+ if (filename == NULL) {
+ if (dso->has_build_id) {
+ pr_err("Can't annotate %s: not enough memory\n",
+ sym->name);
+ return -ENOMEM;
+ }
+ goto fallback;
+ } else if (readlink(symfs_filename, command, sizeof(command)) < 0 ||
+ strstr(command, "[kernel.kallsyms]") ||
+ access(symfs_filename, R_OK)) {
+ free(filename);
+fallback:
+ /*
+ * If we don't have build-ids or the build-id file isn't in the
+ * cache, or is just a kallsyms file, well, lets hope that this
+ * DSO is the same as when 'perf record' ran.
+ */
+ filename = (char *)dso->long_name;
+ snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
+ symbol_conf.symfs, filename);
+ free_filename = false;
+ }
+
+ if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
+ !dso__is_kcore(dso)) {
+ char bf[BUILD_ID_SIZE * 2 + 16] = " with build id ";
+ char *build_id_msg = NULL;
+
+ if (dso->annotate_warned)
+ goto out_free_filename;
+
+ if (dso->has_build_id) {
+ build_id__sprintf(dso->build_id,
+ sizeof(dso->build_id), bf + 15);
+ build_id_msg = bf;
+ }
+ err = -ENOENT;
+ dso->annotate_warned = 1;
+ pr_err("Can't annotate %s:\n\n"
+ "No vmlinux file%s\nwas found in the path.\n\n"
+ "Please use:\n\n"
+ " perf buildid-cache -vu vmlinux\n\n"
+ "or:\n\n"
+ " --vmlinux vmlinux\n",
+ sym->name, build_id_msg ?: "");
+ goto out_free_filename;
+ }
+
+ pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
+ filename, sym->name, map->unmap_ip(map, sym->start),
+ map->unmap_ip(map, sym->end));
+
+ pr_debug("annotating [%p] %30s : [%p] %30s\n",
+ dso, dso->long_name, sym, sym->name);
+
+ if (dso__is_kcore(dso)) {
+ kce.kcore_filename = symfs_filename;
+ kce.addr = map__rip_2objdump(map, sym->start);
+ kce.offs = sym->start;
+ kce.len = sym->end + 1 - sym->start;
+ if (!kcore_extract__create(&kce)) {
+ delete_extract = true;
+ strlcpy(symfs_filename, kce.extract_filename,
+ sizeof(symfs_filename));
+ if (free_filename) {
+ free(filename);
+ free_filename = false;
+ }
+ filename = symfs_filename;
+ }
+ }
+
+ snprintf(command, sizeof(command),
+ "%s %s%s --start-address=0x%016" PRIx64
+ " --stop-address=0x%016" PRIx64
+ " -d %s %s -C %s 2>/dev/null|grep -v %s|expand",
+ objdump_path ? objdump_path : "objdump",
+ disassembler_style ? "-M " : "",
+ disassembler_style ? disassembler_style : "",
+ map__rip_2objdump(map, sym->start),
+ map__rip_2objdump(map, sym->end+1),
+ symbol_conf.annotate_asm_raw ? "" : "--no-show-raw",
+ symbol_conf.annotate_src ? "-S" : "",
+ symfs_filename, filename);
+
+ pr_debug("Executing: %s\n", command);
+
+ file = popen(command, "r");
+ if (!file)
+ goto out_free_filename;
+
+ while (!feof(file))
+ if (symbol__parse_objdump_line(sym, map, file, privsize) < 0)
+ break;
+
+ /*
+ * kallsyms does not have symbol sizes so there may a nop at the end.
+ * Remove it.
+ */
+ if (dso__is_kcore(dso))
+ delete_last_nop(sym);
+
+ pclose(file);
+out_free_filename:
+ if (delete_extract)
+ kcore_extract__delete(&kce);
+ if (free_filename)
+ free(filename);
+ return err;
+}
+
+static void insert_source_line(struct rb_root *root, struct source_line *src_line)
+{
+ struct source_line *iter;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ int i, ret;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct source_line, node);
+
+ ret = strcmp(iter->path, src_line->path);
+ if (ret == 0) {
+ for (i = 0; i < src_line->nr_pcnt; i++)
+ iter->p[i].percent_sum += src_line->p[i].percent;
+ return;
+ }
+
+ if (ret < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ for (i = 0; i < src_line->nr_pcnt; i++)
+ src_line->p[i].percent_sum = src_line->p[i].percent;
+
+ rb_link_node(&src_line->node, parent, p);
+ rb_insert_color(&src_line->node, root);
+}
+
+static int cmp_source_line(struct source_line *a, struct source_line *b)
+{
+ int i;
+
+ for (i = 0; i < a->nr_pcnt; i++) {
+ if (a->p[i].percent_sum == b->p[i].percent_sum)
+ continue;
+ return a->p[i].percent_sum > b->p[i].percent_sum;
+ }
+
+ return 0;
+}
+
+static void __resort_source_line(struct rb_root *root, struct source_line *src_line)
+{
+ struct source_line *iter;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct source_line, node);
+
+ if (cmp_source_line(src_line, iter))
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&src_line->node, parent, p);
+ rb_insert_color(&src_line->node, root);
+}
+
+static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
+{
+ struct source_line *src_line;
+ struct rb_node *node;
+
+ node = rb_first(src_root);
+ while (node) {
+ struct rb_node *next;
+
+ src_line = rb_entry(node, struct source_line, node);
+ next = rb_next(node);
+ rb_erase(node, src_root);
+
+ __resort_source_line(dest_root, src_line);
+ node = next;
+ }
+}
+
+static void symbol__free_source_line(struct symbol *sym, int len)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct source_line *src_line = notes->src->lines;
+ size_t sizeof_src_line;
+ int i;
+
+ sizeof_src_line = sizeof(*src_line) +
+ (sizeof(src_line->p) * (src_line->nr_pcnt - 1));
+
+ for (i = 0; i < len; i++) {
+ free_srcline(src_line->path);
+ src_line = (void *)src_line + sizeof_src_line;
+ }
+
+ zfree(&notes->src->lines);
+}
+
+/* Get the filename:line for the colored entries */
+static int symbol__get_source_line(struct symbol *sym, struct map *map,
+ struct perf_evsel *evsel,
+ struct rb_root *root, int len)
+{
+ u64 start;
+ int i, k;
+ int evidx = evsel->idx;
+ struct source_line *src_line;
+ struct annotation *notes = symbol__annotation(sym);
+ struct sym_hist *h = annotation__histogram(notes, evidx);
+ struct rb_root tmp_root = RB_ROOT;
+ int nr_pcnt = 1;
+ u64 h_sum = h->sum;
+ size_t sizeof_src_line = sizeof(struct source_line);
+
+ if (perf_evsel__is_group_event(evsel)) {
+ for (i = 1; i < evsel->nr_members; i++) {
+ h = annotation__histogram(notes, evidx + i);
+ h_sum += h->sum;
+ }
+ nr_pcnt = evsel->nr_members;
+ sizeof_src_line += (nr_pcnt - 1) * sizeof(src_line->p);
+ }
+
+ if (!h_sum)
+ return 0;
+
+ src_line = notes->src->lines = calloc(len, sizeof_src_line);
+ if (!notes->src->lines)
+ return -1;
+
+ start = map__rip_2objdump(map, sym->start);
+
+ for (i = 0; i < len; i++) {
+ u64 offset;
+ double percent_max = 0.0;
+
+ src_line->nr_pcnt = nr_pcnt;
+
+ for (k = 0; k < nr_pcnt; k++) {
+ h = annotation__histogram(notes, evidx + k);
+ src_line->p[k].percent = 100.0 * h->addr[i] / h->sum;
+
+ if (src_line->p[k].percent > percent_max)
+ percent_max = src_line->p[k].percent;
+ }
+
+ if (percent_max <= 0.5)
+ goto next;
+
+ offset = start + i;
+ src_line->path = get_srcline(map->dso, offset);
+ insert_source_line(&tmp_root, src_line);
+
+ next:
+ src_line = (void *)src_line + sizeof_src_line;
+ }
+
+ resort_source_line(root, &tmp_root);
+ return 0;
+}
+
+static void print_summary(struct rb_root *root, const char *filename)
+{
+ struct source_line *src_line;
+ struct rb_node *node;
+
+ printf("\nSorted summary for file %s\n", filename);
+ printf("----------------------------------------------\n\n");
+
+ if (RB_EMPTY_ROOT(root)) {
+ printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
+ return;
+ }
+
+ node = rb_first(root);
+ while (node) {
+ double percent, percent_max = 0.0;
+ const char *color;
+ char *path;
+ int i;
+
+ src_line = rb_entry(node, struct source_line, node);
+ for (i = 0; i < src_line->nr_pcnt; i++) {
+ percent = src_line->p[i].percent_sum;
+ color = get_percent_color(percent);
+ color_fprintf(stdout, color, " %7.2f", percent);
+
+ if (percent > percent_max)
+ percent_max = percent;
+ }
+
+ path = src_line->path;
+ color = get_percent_color(percent_max);
+ color_fprintf(stdout, color, " %s\n", path);
+
+ node = rb_next(node);
+ }
+}
+
+static void symbol__annotate_hits(struct symbol *sym, struct perf_evsel *evsel)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct sym_hist *h = annotation__histogram(notes, evsel->idx);
+ u64 len = symbol__size(sym), offset;
+
+ for (offset = 0; offset < len; ++offset)
+ if (h->addr[offset] != 0)
+ printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
+ sym->start + offset, h->addr[offset]);
+ printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum);
+}
+
+int symbol__annotate_printf(struct symbol *sym, struct map *map,
+ struct perf_evsel *evsel, bool full_paths,
+ int min_pcnt, int max_lines, int context)
+{
+ struct dso *dso = map->dso;
+ char *filename;
+ const char *d_filename;
+ const char *evsel_name = perf_evsel__name(evsel);
+ struct annotation *notes = symbol__annotation(sym);
+ struct disasm_line *pos, *queue = NULL;
+ u64 start = map__rip_2objdump(map, sym->start);
+ int printed = 2, queue_len = 0;
+ int more = 0;
+ u64 len;
+ int width = 8;
+ int namelen, evsel_name_len, graph_dotted_len;
+
+ filename = strdup(dso->long_name);
+ if (!filename)
+ return -ENOMEM;
+
+ if (full_paths)
+ d_filename = filename;
+ else
+ d_filename = basename(filename);
+
+ len = symbol__size(sym);
+ namelen = strlen(d_filename);
+ evsel_name_len = strlen(evsel_name);
+
+ if (perf_evsel__is_group_event(evsel))
+ width *= evsel->nr_members;
+
+ printf(" %-*.*s| Source code & Disassembly of %s for %s\n",
+ width, width, "Percent", d_filename, evsel_name);
+
+ graph_dotted_len = width + namelen + evsel_name_len;
+ printf("-%-*.*s-----------------------------------------\n",
+ graph_dotted_len, graph_dotted_len, graph_dotted_line);
+
+ if (verbose)
+ symbol__annotate_hits(sym, evsel);
+
+ list_for_each_entry(pos, &notes->src->source, node) {
+ if (context && queue == NULL) {
+ queue = pos;
+ queue_len = 0;
+ }
+
+ switch (disasm_line__print(pos, sym, start, evsel, len,
+ min_pcnt, printed, max_lines,
+ queue)) {
+ case 0:
+ ++printed;
+ if (context) {
+ printed += queue_len;
+ queue = NULL;
+ queue_len = 0;
+ }
+ break;
+ case 1:
+ /* filtered by max_lines */
+ ++more;
+ break;
+ case -1:
+ default:
+ /*
+ * Filtered by min_pcnt or non IP lines when
+ * context != 0
+ */
+ if (!context)
+ break;
+ if (queue_len == context)
+ queue = list_entry(queue->node.next, typeof(*queue), node);
+ else
+ ++queue_len;
+ break;
+ }
+ }
+
+ free(filename);
+
+ return more;
+}
+
+void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct sym_hist *h = annotation__histogram(notes, evidx);
+
+ memset(h, 0, notes->src->sizeof_sym_hist);
+}
+
+void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct sym_hist *h = annotation__histogram(notes, evidx);
+ int len = symbol__size(sym), offset;
+
+ h->sum = 0;
+ for (offset = 0; offset < len; ++offset) {
+ h->addr[offset] = h->addr[offset] * 7 / 8;
+ h->sum += h->addr[offset];
+ }
+}
+
+void disasm__purge(struct list_head *head)
+{
+ struct disasm_line *pos, *n;
+
+ list_for_each_entry_safe(pos, n, head, node) {
+ list_del(&pos->node);
+ disasm_line__free(pos);
+ }
+}
+
+static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
+{
+ size_t printed;
+
+ if (dl->offset == -1)
+ return fprintf(fp, "%s\n", dl->line);
+
+ printed = fprintf(fp, "%#" PRIx64 " %s", dl->offset, dl->name);
+
+ if (dl->ops.raw[0] != '\0') {
+ printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
+ dl->ops.raw);
+ }
+
+ return printed + fprintf(fp, "\n");
+}
+
+size_t disasm__fprintf(struct list_head *head, FILE *fp)
+{
+ struct disasm_line *pos;
+ size_t printed = 0;
+
+ list_for_each_entry(pos, head, node)
+ printed += disasm_line__fprintf(pos, fp);
+
+ return printed;
+}
+
+int symbol__tty_annotate(struct symbol *sym, struct map *map,
+ struct perf_evsel *evsel, bool print_lines,
+ bool full_paths, int min_pcnt, int max_lines)
+{
+ struct dso *dso = map->dso;
+ struct rb_root source_line = RB_ROOT;
+ u64 len;
+
+ if (symbol__annotate(sym, map, 0) < 0)
+ return -1;
+
+ len = symbol__size(sym);
+
+ if (print_lines) {
+ symbol__get_source_line(sym, map, evsel, &source_line, len);
+ print_summary(&source_line, dso->long_name);
+ }
+
+ symbol__annotate_printf(sym, map, evsel, full_paths,
+ min_pcnt, max_lines, 0);
+ if (print_lines)
+ symbol__free_source_line(sym, len);
+
+ disasm__purge(&symbol__annotation(sym)->src->source);
+
+ return 0;
+}
+
+int hist_entry__annotate(struct hist_entry *he, size_t privsize)
+{
+ return symbol__annotate(he->ms.sym, he->ms.map, privsize);
+}
+
+bool ui__has_annotation(void)
+{
+ return use_browser == 1 && sort__has_sym;
+}
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
new file mode 100644
index 00000000000..112d6e26815
--- /dev/null
+++ b/tools/perf/util/annotate.h
@@ -0,0 +1,177 @@
+#ifndef __PERF_ANNOTATE_H
+#define __PERF_ANNOTATE_H
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <linux/types.h>
+#include "symbol.h"
+#include "hist.h"
+#include "sort.h"
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <pthread.h>
+
+struct ins;
+
+struct ins_operands {
+ char *raw;
+ struct {
+ char *raw;
+ char *name;
+ u64 addr;
+ u64 offset;
+ } target;
+ union {
+ struct {
+ char *raw;
+ char *name;
+ u64 addr;
+ } source;
+ struct {
+ struct ins *ins;
+ struct ins_operands *ops;
+ } locked;
+ };
+};
+
+struct ins_ops {
+ void (*free)(struct ins_operands *ops);
+ int (*parse)(struct ins_operands *ops);
+ int (*scnprintf)(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops);
+};
+
+struct ins {
+ const char *name;
+ struct ins_ops *ops;
+};
+
+bool ins__is_jump(const struct ins *ins);
+bool ins__is_call(const struct ins *ins);
+int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops);
+
+struct annotation;
+
+struct disasm_line {
+ struct list_head node;
+ s64 offset;
+ char *line;
+ char *name;
+ struct ins *ins;
+ struct ins_operands ops;
+};
+
+static inline bool disasm_line__has_offset(const struct disasm_line *dl)
+{
+ return dl->ops.target.offset != UINT64_MAX;
+}
+
+void disasm_line__free(struct disasm_line *dl);
+struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos);
+int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw);
+size_t disasm__fprintf(struct list_head *head, FILE *fp);
+double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
+ s64 end, const char **path);
+
+struct sym_hist {
+ u64 sum;
+ u64 addr[0];
+};
+
+struct source_line_percent {
+ double percent;
+ double percent_sum;
+};
+
+struct source_line {
+ struct rb_node node;
+ char *path;
+ int nr_pcnt;
+ struct source_line_percent p[1];
+};
+
+/** struct annotated_source - symbols with hits have this attached as in sannotation
+ *
+ * @histogram: Array of addr hit histograms per event being monitored
+ * @lines: If 'print_lines' is specified, per source code line percentages
+ * @source: source parsed from a disassembler like objdump -dS
+ *
+ * lines is allocated, percentages calculated and all sorted by percentage
+ * when the annotation is about to be presented, so the percentages are for
+ * one of the entries in the histogram array, i.e. for the event/counter being
+ * presented. It is deallocated right after symbol__{tui,tty,etc}_annotate
+ * returns.
+ */
+struct annotated_source {
+ struct list_head source;
+ struct source_line *lines;
+ int nr_histograms;
+ int sizeof_sym_hist;
+ struct sym_hist histograms[0];
+};
+
+struct annotation {
+ pthread_mutex_t lock;
+ struct annotated_source *src;
+};
+
+struct sannotation {
+ struct annotation annotation;
+ struct symbol symbol;
+};
+
+static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx)
+{
+ return (((void *)&notes->src->histograms) +
+ (notes->src->sizeof_sym_hist * idx));
+}
+
+static inline struct annotation *symbol__annotation(struct symbol *sym)
+{
+ struct sannotation *a = container_of(sym, struct sannotation, symbol);
+ return &a->annotation;
+}
+
+int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx);
+
+int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 addr);
+
+int symbol__alloc_hist(struct symbol *sym);
+void symbol__annotate_zero_histograms(struct symbol *sym);
+
+int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize);
+
+int hist_entry__annotate(struct hist_entry *he, size_t privsize);
+
+int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym);
+int symbol__annotate_printf(struct symbol *sym, struct map *map,
+ struct perf_evsel *evsel, bool full_paths,
+ int min_pcnt, int max_lines, int context);
+void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
+void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
+void disasm__purge(struct list_head *head);
+
+bool ui__has_annotation(void);
+
+int symbol__tty_annotate(struct symbol *sym, struct map *map,
+ struct perf_evsel *evsel, bool print_lines,
+ bool full_paths, int min_pcnt, int max_lines);
+
+#ifdef HAVE_SLANG_SUPPORT
+int symbol__tui_annotate(struct symbol *sym, struct map *map,
+ struct perf_evsel *evsel,
+ struct hist_browser_timer *hbt);
+#else
+static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused,
+ struct map *map __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused,
+ struct hist_browser_timer *hbt
+ __maybe_unused)
+{
+ return 0;
+}
+#endif
+
+extern const char *disassembler_style;
+
+#endif /* __PERF_ANNOTATE_H */
diff --git a/tools/perf/util/bitmap.c b/tools/perf/util/bitmap.c
new file mode 100644
index 00000000000..0a1adc1111f
--- /dev/null
+++ b/tools/perf/util/bitmap.c
@@ -0,0 +1,31 @@
+/*
+ * From lib/bitmap.c
+ * Helper functions for bitmap.h.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+#include <linux/bitmap.h>
+
+int __bitmap_weight(const unsigned long *bitmap, int bits)
+{
+ int k, w = 0, lim = bits/BITS_PER_LONG;
+
+ for (k = 0; k < lim; k++)
+ w += hweight_long(bitmap[k]);
+
+ if (bits % BITS_PER_LONG)
+ w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
+
+ return w;
+}
+
+void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits)
+{
+ int k;
+ int nr = BITS_TO_LONGS(bits);
+
+ for (k = 0; k < nr; k++)
+ dst[k] = bitmap1[k] | bitmap2[k];
+}
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
new file mode 100644
index 00000000000..a904a4cfe7d
--- /dev/null
+++ b/tools/perf/util/build-id.c
@@ -0,0 +1,108 @@
+/*
+ * build-id.c
+ *
+ * build-id support
+ *
+ * Copyright (C) 2009, 2010 Red Hat Inc.
+ * Copyright (C) 2009, 2010 Arnaldo Carvalho de Melo <acme@redhat.com>
+ */
+#include "util.h"
+#include <stdio.h>
+#include "build-id.h"
+#include "event.h"
+#include "symbol.h"
+#include <linux/kernel.h>
+#include "debug.h"
+#include "session.h"
+#include "tool.h"
+
+int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel __maybe_unused,
+ struct machine *machine)
+{
+ struct addr_location al;
+ u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ struct thread *thread = machine__findnew_thread(machine, sample->pid,
+ sample->tid);
+
+ if (thread == NULL) {
+ pr_err("problem processing %d event, skipping it.\n",
+ event->header.type);
+ return -1;
+ }
+
+ thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
+ sample->ip, &al);
+
+ if (al.map != NULL)
+ al.map->dso->hit = 1;
+
+ return 0;
+}
+
+static int perf_event__exit_del_thread(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample
+ __maybe_unused,
+ struct machine *machine)
+{
+ struct thread *thread = machine__findnew_thread(machine,
+ event->fork.pid,
+ event->fork.tid);
+
+ dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
+ event->fork.ppid, event->fork.ptid);
+
+ if (thread) {
+ rb_erase(&thread->rb_node, &machine->threads);
+ machine->last_match = NULL;
+ thread__delete(thread);
+ }
+
+ return 0;
+}
+
+struct perf_tool build_id__mark_dso_hit_ops = {
+ .sample = build_id__mark_dso_hit,
+ .mmap = perf_event__process_mmap,
+ .mmap2 = perf_event__process_mmap2,
+ .fork = perf_event__process_fork,
+ .exit = perf_event__exit_del_thread,
+ .attr = perf_event__process_attr,
+ .build_id = perf_event__process_build_id,
+};
+
+int build_id__sprintf(const u8 *build_id, int len, char *bf)
+{
+ char *bid = bf;
+ const u8 *raw = build_id;
+ int i;
+
+ for (i = 0; i < len; ++i) {
+ sprintf(bid, "%02x", *raw);
+ ++raw;
+ bid += 2;
+ }
+
+ return raw - build_id;
+}
+
+char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size)
+{
+ char build_id_hex[BUILD_ID_SIZE * 2 + 1];
+
+ if (!dso->has_build_id)
+ return NULL;
+
+ build_id__sprintf(dso->build_id, sizeof(dso->build_id), build_id_hex);
+ if (bf == NULL) {
+ if (asprintf(&bf, "%s/.build-id/%.2s/%s", buildid_dir,
+ build_id_hex, build_id_hex + 2) < 0)
+ return NULL;
+ } else
+ snprintf(bf, size, "%s/.build-id/%.2s/%s", buildid_dir,
+ build_id_hex, build_id_hex + 2);
+ return bf;
+}
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
new file mode 100644
index 00000000000..ae392561470
--- /dev/null
+++ b/tools/perf/util/build-id.h
@@ -0,0 +1,18 @@
+#ifndef PERF_BUILD_ID_H_
+#define PERF_BUILD_ID_H_ 1
+
+#define BUILD_ID_SIZE 20
+
+#include "tool.h"
+#include <linux/types.h>
+
+extern struct perf_tool build_id__mark_dso_hit_ops;
+struct dso;
+
+int build_id__sprintf(const u8 *build_id, int len, char *bf);
+char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size);
+
+int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample, struct perf_evsel *evsel,
+ struct machine *machine);
+#endif
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
new file mode 100644
index 00000000000..7b176dd02e1
--- /dev/null
+++ b/tools/perf/util/cache.h
@@ -0,0 +1,76 @@
+#ifndef __PERF_CACHE_H
+#define __PERF_CACHE_H
+
+#include <stdbool.h>
+#include "util.h"
+#include "strbuf.h"
+#include "../perf.h"
+#include "../ui/ui.h"
+
+#define CMD_EXEC_PATH "--exec-path"
+#define CMD_PERF_DIR "--perf-dir="
+#define CMD_WORK_TREE "--work-tree="
+#define CMD_DEBUGFS_DIR "--debugfs-dir="
+
+#define PERF_DIR_ENVIRONMENT "PERF_DIR"
+#define PERF_WORK_TREE_ENVIRONMENT "PERF_WORK_TREE"
+#define EXEC_PATH_ENVIRONMENT "PERF_EXEC_PATH"
+#define DEFAULT_PERF_DIR_ENVIRONMENT ".perf"
+#define PERF_DEBUGFS_ENVIRONMENT "PERF_DEBUGFS_DIR"
+
+typedef int (*config_fn_t)(const char *, const char *, void *);
+extern int perf_default_config(const char *, const char *, void *);
+extern int perf_config(config_fn_t fn, void *);
+extern int perf_config_int(const char *, const char *);
+extern int perf_config_bool(const char *, const char *);
+extern int config_error_nonbool(const char *);
+extern const char *perf_config_dirname(const char *, const char *);
+
+/* pager.c */
+extern void setup_pager(void);
+extern const char *pager_program;
+extern int pager_in_use(void);
+extern int pager_use_color;
+
+char *alias_lookup(const char *alias);
+int split_cmdline(char *cmdline, const char ***argv);
+
+#define alloc_nr(x) (((x)+16)*3/2)
+
+/*
+ * Realloc the buffer pointed at by variable 'x' so that it can hold
+ * at least 'nr' entries; the number of entries currently allocated
+ * is 'alloc', using the standard growing factor alloc_nr() macro.
+ *
+ * DO NOT USE any expression with side-effect for 'x' or 'alloc'.
+ */
+#define ALLOC_GROW(x, nr, alloc) \
+ do { \
+ if ((nr) > alloc) { \
+ if (alloc_nr(alloc) < (nr)) \
+ alloc = (nr); \
+ else \
+ alloc = alloc_nr(alloc); \
+ x = xrealloc((x), alloc * sizeof(*(x))); \
+ } \
+ } while(0)
+
+
+static inline int is_absolute_path(const char *path)
+{
+ return path[0] == '/';
+}
+
+const char *make_nonrelative_path(const char *path);
+char *strip_path_suffix(const char *path, const char *suffix);
+
+extern char *mkpath(const char *fmt, ...) __attribute__((format (printf, 1, 2)));
+extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2)));
+
+extern char *perf_pathdup(const char *fmt, ...)
+ __attribute__((format (printf, 1, 2)));
+
+/* Matches the libc/libbsd function attribute so we declare this unconditionally: */
+extern size_t strlcpy(char *dest, const char *src, size_t size);
+
+#endif /* __PERF_CACHE_H */
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
new file mode 100644
index 00000000000..48b6d3f5001
--- /dev/null
+++ b/tools/perf/util/callchain.c
@@ -0,0 +1,674 @@
+/*
+ * Copyright (C) 2009-2011, Frederic Weisbecker <fweisbec@gmail.com>
+ *
+ * Handle the callchains from the stream in an ad-hoc radix tree and then
+ * sort them in an rbtree.
+ *
+ * Using a radix for code path provides a fast retrieval and factorizes
+ * memory use. Also that lets us use the paths in a hierarchical graph view.
+ *
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <math.h>
+
+#include "asm/bug.h"
+
+#include "hist.h"
+#include "util.h"
+#include "sort.h"
+#include "machine.h"
+#include "callchain.h"
+
+__thread struct callchain_cursor callchain_cursor;
+
+int
+parse_callchain_report_opt(const char *arg)
+{
+ char *tok, *tok2;
+ char *endptr;
+
+ symbol_conf.use_callchain = true;
+
+ if (!arg)
+ return 0;
+
+ tok = strtok((char *)arg, ",");
+ if (!tok)
+ return -1;
+
+ /* get the output mode */
+ if (!strncmp(tok, "graph", strlen(arg))) {
+ callchain_param.mode = CHAIN_GRAPH_ABS;
+
+ } else if (!strncmp(tok, "flat", strlen(arg))) {
+ callchain_param.mode = CHAIN_FLAT;
+ } else if (!strncmp(tok, "fractal", strlen(arg))) {
+ callchain_param.mode = CHAIN_GRAPH_REL;
+ } else if (!strncmp(tok, "none", strlen(arg))) {
+ callchain_param.mode = CHAIN_NONE;
+ symbol_conf.use_callchain = false;
+ return 0;
+ } else {
+ return -1;
+ }
+
+ /* get the min percentage */
+ tok = strtok(NULL, ",");
+ if (!tok)
+ goto setup;
+
+ callchain_param.min_percent = strtod(tok, &endptr);
+ if (tok == endptr)
+ return -1;
+
+ /* get the print limit */
+ tok2 = strtok(NULL, ",");
+ if (!tok2)
+ goto setup;
+
+ if (tok2[0] != 'c') {
+ callchain_param.print_limit = strtoul(tok2, &endptr, 0);
+ tok2 = strtok(NULL, ",");
+ if (!tok2)
+ goto setup;
+ }
+
+ /* get the call chain order */
+ if (!strncmp(tok2, "caller", strlen("caller")))
+ callchain_param.order = ORDER_CALLER;
+ else if (!strncmp(tok2, "callee", strlen("callee")))
+ callchain_param.order = ORDER_CALLEE;
+ else
+ return -1;
+
+ /* Get the sort key */
+ tok2 = strtok(NULL, ",");
+ if (!tok2)
+ goto setup;
+ if (!strncmp(tok2, "function", strlen("function")))
+ callchain_param.key = CCKEY_FUNCTION;
+ else if (!strncmp(tok2, "address", strlen("address")))
+ callchain_param.key = CCKEY_ADDRESS;
+ else
+ return -1;
+setup:
+ if (callchain_register_param(&callchain_param) < 0) {
+ pr_err("Can't register callchain params\n");
+ return -1;
+ }
+ return 0;
+}
+
+static void
+rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
+ enum chain_mode mode)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct callchain_node *rnode;
+ u64 chain_cumul = callchain_cumul_hits(chain);
+
+ while (*p) {
+ u64 rnode_cumul;
+
+ parent = *p;
+ rnode = rb_entry(parent, struct callchain_node, rb_node);
+ rnode_cumul = callchain_cumul_hits(rnode);
+
+ switch (mode) {
+ case CHAIN_FLAT:
+ if (rnode->hit < chain->hit)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ break;
+ case CHAIN_GRAPH_ABS: /* Falldown */
+ case CHAIN_GRAPH_REL:
+ if (rnode_cumul < chain_cumul)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ break;
+ case CHAIN_NONE:
+ default:
+ break;
+ }
+ }
+
+ rb_link_node(&chain->rb_node, parent, p);
+ rb_insert_color(&chain->rb_node, root);
+}
+
+static void
+__sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
+ u64 min_hit)
+{
+ struct rb_node *n;
+ struct callchain_node *child;
+
+ n = rb_first(&node->rb_root_in);
+ while (n) {
+ child = rb_entry(n, struct callchain_node, rb_node_in);
+ n = rb_next(n);
+
+ __sort_chain_flat(rb_root, child, min_hit);
+ }
+
+ if (node->hit && node->hit >= min_hit)
+ rb_insert_callchain(rb_root, node, CHAIN_FLAT);
+}
+
+/*
+ * Once we get every callchains from the stream, we can now
+ * sort them by hit
+ */
+static void
+sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
+ u64 min_hit, struct callchain_param *param __maybe_unused)
+{
+ __sort_chain_flat(rb_root, &root->node, min_hit);
+}
+
+static void __sort_chain_graph_abs(struct callchain_node *node,
+ u64 min_hit)
+{
+ struct rb_node *n;
+ struct callchain_node *child;
+
+ node->rb_root = RB_ROOT;
+ n = rb_first(&node->rb_root_in);
+
+ while (n) {
+ child = rb_entry(n, struct callchain_node, rb_node_in);
+ n = rb_next(n);
+
+ __sort_chain_graph_abs(child, min_hit);
+ if (callchain_cumul_hits(child) >= min_hit)
+ rb_insert_callchain(&node->rb_root, child,
+ CHAIN_GRAPH_ABS);
+ }
+}
+
+static void
+sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root,
+ u64 min_hit, struct callchain_param *param __maybe_unused)
+{
+ __sort_chain_graph_abs(&chain_root->node, min_hit);
+ rb_root->rb_node = chain_root->node.rb_root.rb_node;
+}
+
+static void __sort_chain_graph_rel(struct callchain_node *node,
+ double min_percent)
+{
+ struct rb_node *n;
+ struct callchain_node *child;
+ u64 min_hit;
+
+ node->rb_root = RB_ROOT;
+ min_hit = ceil(node->children_hit * min_percent);
+
+ n = rb_first(&node->rb_root_in);
+ while (n) {
+ child = rb_entry(n, struct callchain_node, rb_node_in);
+ n = rb_next(n);
+
+ __sort_chain_graph_rel(child, min_percent);
+ if (callchain_cumul_hits(child) >= min_hit)
+ rb_insert_callchain(&node->rb_root, child,
+ CHAIN_GRAPH_REL);
+ }
+}
+
+static void
+sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
+ u64 min_hit __maybe_unused, struct callchain_param *param)
+{
+ __sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0);
+ rb_root->rb_node = chain_root->node.rb_root.rb_node;
+}
+
+int callchain_register_param(struct callchain_param *param)
+{
+ switch (param->mode) {
+ case CHAIN_GRAPH_ABS:
+ param->sort = sort_chain_graph_abs;
+ break;
+ case CHAIN_GRAPH_REL:
+ param->sort = sort_chain_graph_rel;
+ break;
+ case CHAIN_FLAT:
+ param->sort = sort_chain_flat;
+ break;
+ case CHAIN_NONE:
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Create a child for a parent. If inherit_children, then the new child
+ * will become the new parent of it's parent children
+ */
+static struct callchain_node *
+create_child(struct callchain_node *parent, bool inherit_children)
+{
+ struct callchain_node *new;
+
+ new = zalloc(sizeof(*new));
+ if (!new) {
+ perror("not enough memory to create child for code path tree");
+ return NULL;
+ }
+ new->parent = parent;
+ INIT_LIST_HEAD(&new->val);
+
+ if (inherit_children) {
+ struct rb_node *n;
+ struct callchain_node *child;
+
+ new->rb_root_in = parent->rb_root_in;
+ parent->rb_root_in = RB_ROOT;
+
+ n = rb_first(&new->rb_root_in);
+ while (n) {
+ child = rb_entry(n, struct callchain_node, rb_node_in);
+ child->parent = new;
+ n = rb_next(n);
+ }
+
+ /* make it the first child */
+ rb_link_node(&new->rb_node_in, NULL, &parent->rb_root_in.rb_node);
+ rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
+ }
+
+ return new;
+}
+
+
+/*
+ * Fill the node with callchain values
+ */
+static void
+fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
+{
+ struct callchain_cursor_node *cursor_node;
+
+ node->val_nr = cursor->nr - cursor->pos;
+ if (!node->val_nr)
+ pr_warning("Warning: empty node in callchain tree\n");
+
+ cursor_node = callchain_cursor_current(cursor);
+
+ while (cursor_node) {
+ struct callchain_list *call;
+
+ call = zalloc(sizeof(*call));
+ if (!call) {
+ perror("not enough memory for the code path tree");
+ return;
+ }
+ call->ip = cursor_node->ip;
+ call->ms.sym = cursor_node->sym;
+ call->ms.map = cursor_node->map;
+ list_add_tail(&call->list, &node->val);
+
+ callchain_cursor_advance(cursor);
+ cursor_node = callchain_cursor_current(cursor);
+ }
+}
+
+static struct callchain_node *
+add_child(struct callchain_node *parent,
+ struct callchain_cursor *cursor,
+ u64 period)
+{
+ struct callchain_node *new;
+
+ new = create_child(parent, false);
+ fill_node(new, cursor);
+
+ new->children_hit = 0;
+ new->hit = period;
+ return new;
+}
+
+static s64 match_chain(struct callchain_cursor_node *node,
+ struct callchain_list *cnode)
+{
+ struct symbol *sym = node->sym;
+
+ if (cnode->ms.sym && sym &&
+ callchain_param.key == CCKEY_FUNCTION)
+ return cnode->ms.sym->start - sym->start;
+ else
+ return cnode->ip - node->ip;
+}
+
+/*
+ * Split the parent in two parts (a new child is created) and
+ * give a part of its callchain to the created child.
+ * Then create another child to host the given callchain of new branch
+ */
+static void
+split_add_child(struct callchain_node *parent,
+ struct callchain_cursor *cursor,
+ struct callchain_list *to_split,
+ u64 idx_parents, u64 idx_local, u64 period)
+{
+ struct callchain_node *new;
+ struct list_head *old_tail;
+ unsigned int idx_total = idx_parents + idx_local;
+
+ /* split */
+ new = create_child(parent, true);
+
+ /* split the callchain and move a part to the new child */
+ old_tail = parent->val.prev;
+ list_del_range(&to_split->list, old_tail);
+ new->val.next = &to_split->list;
+ new->val.prev = old_tail;
+ to_split->list.prev = &new->val;
+ old_tail->next = &new->val;
+
+ /* split the hits */
+ new->hit = parent->hit;
+ new->children_hit = parent->children_hit;
+ parent->children_hit = callchain_cumul_hits(new);
+ new->val_nr = parent->val_nr - idx_local;
+ parent->val_nr = idx_local;
+
+ /* create a new child for the new branch if any */
+ if (idx_total < cursor->nr) {
+ struct callchain_node *first;
+ struct callchain_list *cnode;
+ struct callchain_cursor_node *node;
+ struct rb_node *p, **pp;
+
+ parent->hit = 0;
+ parent->children_hit += period;
+
+ node = callchain_cursor_current(cursor);
+ new = add_child(parent, cursor, period);
+
+ /*
+ * This is second child since we moved parent's children
+ * to new (first) child above.
+ */
+ p = parent->rb_root_in.rb_node;
+ first = rb_entry(p, struct callchain_node, rb_node_in);
+ cnode = list_first_entry(&first->val, struct callchain_list,
+ list);
+
+ if (match_chain(node, cnode) < 0)
+ pp = &p->rb_left;
+ else
+ pp = &p->rb_right;
+
+ rb_link_node(&new->rb_node_in, p, pp);
+ rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
+ } else {
+ parent->hit = period;
+ }
+}
+
+static int
+append_chain(struct callchain_node *root,
+ struct callchain_cursor *cursor,
+ u64 period);
+
+static void
+append_chain_children(struct callchain_node *root,
+ struct callchain_cursor *cursor,
+ u64 period)
+{
+ struct callchain_node *rnode;
+ struct callchain_cursor_node *node;
+ struct rb_node **p = &root->rb_root_in.rb_node;
+ struct rb_node *parent = NULL;
+
+ node = callchain_cursor_current(cursor);
+ if (!node)
+ return;
+
+ /* lookup in childrens */
+ while (*p) {
+ s64 ret;
+
+ parent = *p;
+ rnode = rb_entry(parent, struct callchain_node, rb_node_in);
+
+ /* If at least first entry matches, rely to children */
+ ret = append_chain(rnode, cursor, period);
+ if (ret == 0)
+ goto inc_children_hit;
+
+ if (ret < 0)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+ /* nothing in children, add to the current node */
+ rnode = add_child(root, cursor, period);
+ rb_link_node(&rnode->rb_node_in, parent, p);
+ rb_insert_color(&rnode->rb_node_in, &root->rb_root_in);
+
+inc_children_hit:
+ root->children_hit += period;
+}
+
+static int
+append_chain(struct callchain_node *root,
+ struct callchain_cursor *cursor,
+ u64 period)
+{
+ struct callchain_list *cnode;
+ u64 start = cursor->pos;
+ bool found = false;
+ u64 matches;
+ int cmp = 0;
+
+ /*
+ * Lookup in the current node
+ * If we have a symbol, then compare the start to match
+ * anywhere inside a function, unless function
+ * mode is disabled.
+ */
+ list_for_each_entry(cnode, &root->val, list) {
+ struct callchain_cursor_node *node;
+
+ node = callchain_cursor_current(cursor);
+ if (!node)
+ break;
+
+ cmp = match_chain(node, cnode);
+ if (cmp)
+ break;
+
+ found = true;
+
+ callchain_cursor_advance(cursor);
+ }
+
+ /* matches not, relay no the parent */
+ if (!found) {
+ WARN_ONCE(!cmp, "Chain comparison error\n");
+ return cmp;
+ }
+
+ matches = cursor->pos - start;
+
+ /* we match only a part of the node. Split it and add the new chain */
+ if (matches < root->val_nr) {
+ split_add_child(root, cursor, cnode, start, matches, period);
+ return 0;
+ }
+
+ /* we match 100% of the path, increment the hit */
+ if (matches == root->val_nr && cursor->pos == cursor->nr) {
+ root->hit += period;
+ return 0;
+ }
+
+ /* We match the node and still have a part remaining */
+ append_chain_children(root, cursor, period);
+
+ return 0;
+}
+
+int callchain_append(struct callchain_root *root,
+ struct callchain_cursor *cursor,
+ u64 period)
+{
+ if (!cursor->nr)
+ return 0;
+
+ callchain_cursor_commit(cursor);
+
+ append_chain_children(&root->node, cursor, period);
+
+ if (cursor->nr > root->max_depth)
+ root->max_depth = cursor->nr;
+
+ return 0;
+}
+
+static int
+merge_chain_branch(struct callchain_cursor *cursor,
+ struct callchain_node *dst, struct callchain_node *src)
+{
+ struct callchain_cursor_node **old_last = cursor->last;
+ struct callchain_node *child;
+ struct callchain_list *list, *next_list;
+ struct rb_node *n;
+ int old_pos = cursor->nr;
+ int err = 0;
+
+ list_for_each_entry_safe(list, next_list, &src->val, list) {
+ callchain_cursor_append(cursor, list->ip,
+ list->ms.map, list->ms.sym);
+ list_del(&list->list);
+ free(list);
+ }
+
+ if (src->hit) {
+ callchain_cursor_commit(cursor);
+ append_chain_children(dst, cursor, src->hit);
+ }
+
+ n = rb_first(&src->rb_root_in);
+ while (n) {
+ child = container_of(n, struct callchain_node, rb_node_in);
+ n = rb_next(n);
+ rb_erase(&child->rb_node_in, &src->rb_root_in);
+
+ err = merge_chain_branch(cursor, dst, child);
+ if (err)
+ break;
+
+ free(child);
+ }
+
+ cursor->nr = old_pos;
+ cursor->last = old_last;
+
+ return err;
+}
+
+int callchain_merge(struct callchain_cursor *cursor,
+ struct callchain_root *dst, struct callchain_root *src)
+{
+ return merge_chain_branch(cursor, &dst->node, &src->node);
+}
+
+int callchain_cursor_append(struct callchain_cursor *cursor,
+ u64 ip, struct map *map, struct symbol *sym)
+{
+ struct callchain_cursor_node *node = *cursor->last;
+
+ if (!node) {
+ node = calloc(1, sizeof(*node));
+ if (!node)
+ return -ENOMEM;
+
+ *cursor->last = node;
+ }
+
+ node->ip = ip;
+ node->map = map;
+ node->sym = sym;
+
+ cursor->nr++;
+
+ cursor->last = &node->next;
+
+ return 0;
+}
+
+int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent,
+ struct perf_evsel *evsel, struct addr_location *al,
+ int max_stack)
+{
+ if (sample->callchain == NULL)
+ return 0;
+
+ if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain ||
+ sort__has_parent) {
+ return machine__resolve_callchain(al->machine, evsel, al->thread,
+ sample, parent, al, max_stack);
+ }
+ return 0;
+}
+
+int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample)
+{
+ if (!symbol_conf.use_callchain)
+ return 0;
+ return callchain_append(he->callchain, &callchain_cursor, sample->period);
+}
+
+int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
+ bool hide_unresolved)
+{
+ al->map = node->map;
+ al->sym = node->sym;
+ if (node->map)
+ al->addr = node->map->map_ip(node->map, node->ip);
+ else
+ al->addr = node->ip;
+
+ if (al->sym == NULL) {
+ if (hide_unresolved)
+ return 0;
+ if (al->map == NULL)
+ goto out;
+ }
+
+ if (al->map->groups == &al->machine->kmaps) {
+ if (machine__is_host(al->machine)) {
+ al->cpumode = PERF_RECORD_MISC_KERNEL;
+ al->level = 'k';
+ } else {
+ al->cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
+ al->level = 'g';
+ }
+ } else {
+ if (machine__is_host(al->machine)) {
+ al->cpumode = PERF_RECORD_MISC_USER;
+ al->level = '.';
+ } else if (perf_guest) {
+ al->cpumode = PERF_RECORD_MISC_GUEST_USER;
+ al->level = 'u';
+ } else {
+ al->cpumode = PERF_RECORD_MISC_HYPERVISOR;
+ al->level = 'H';
+ }
+ }
+
+out:
+ return 1;
+}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
new file mode 100644
index 00000000000..8f84423a75d
--- /dev/null
+++ b/tools/perf/util/callchain.h
@@ -0,0 +1,179 @@
+#ifndef __PERF_CALLCHAIN_H
+#define __PERF_CALLCHAIN_H
+
+#include "../perf.h"
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include "event.h"
+#include "symbol.h"
+
+enum perf_call_graph_mode {
+ CALLCHAIN_NONE,
+ CALLCHAIN_FP,
+ CALLCHAIN_DWARF,
+ CALLCHAIN_MAX
+};
+
+enum chain_mode {
+ CHAIN_NONE,
+ CHAIN_FLAT,
+ CHAIN_GRAPH_ABS,
+ CHAIN_GRAPH_REL
+};
+
+enum chain_order {
+ ORDER_CALLER,
+ ORDER_CALLEE
+};
+
+struct callchain_node {
+ struct callchain_node *parent;
+ struct list_head val;
+ struct rb_node rb_node_in; /* to insert nodes in an rbtree */
+ struct rb_node rb_node; /* to sort nodes in an output tree */
+ struct rb_root rb_root_in; /* input tree of children */
+ struct rb_root rb_root; /* sorted output tree of children */
+ unsigned int val_nr;
+ u64 hit;
+ u64 children_hit;
+};
+
+struct callchain_root {
+ u64 max_depth;
+ struct callchain_node node;
+};
+
+struct callchain_param;
+
+typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_root *,
+ u64, struct callchain_param *);
+
+enum chain_key {
+ CCKEY_FUNCTION,
+ CCKEY_ADDRESS
+};
+
+struct callchain_param {
+ enum chain_mode mode;
+ u32 print_limit;
+ double min_percent;
+ sort_chain_func_t sort;
+ enum chain_order order;
+ enum chain_key key;
+};
+
+struct callchain_list {
+ u64 ip;
+ struct map_symbol ms;
+ struct list_head list;
+};
+
+/*
+ * A callchain cursor is a single linked list that
+ * let one feed a callchain progressively.
+ * It keeps persistent allocated entries to minimize
+ * allocations.
+ */
+struct callchain_cursor_node {
+ u64 ip;
+ struct map *map;
+ struct symbol *sym;
+ struct callchain_cursor_node *next;
+};
+
+struct callchain_cursor {
+ u64 nr;
+ struct callchain_cursor_node *first;
+ struct callchain_cursor_node **last;
+ u64 pos;
+ struct callchain_cursor_node *curr;
+};
+
+extern __thread struct callchain_cursor callchain_cursor;
+
+static inline void callchain_init(struct callchain_root *root)
+{
+ INIT_LIST_HEAD(&root->node.val);
+
+ root->node.parent = NULL;
+ root->node.hit = 0;
+ root->node.children_hit = 0;
+ root->node.rb_root_in = RB_ROOT;
+ root->max_depth = 0;
+}
+
+static inline u64 callchain_cumul_hits(struct callchain_node *node)
+{
+ return node->hit + node->children_hit;
+}
+
+int callchain_register_param(struct callchain_param *param);
+int callchain_append(struct callchain_root *root,
+ struct callchain_cursor *cursor,
+ u64 period);
+
+int callchain_merge(struct callchain_cursor *cursor,
+ struct callchain_root *dst, struct callchain_root *src);
+
+/*
+ * Initialize a cursor before adding entries inside, but keep
+ * the previously allocated entries as a cache.
+ */
+static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
+{
+ cursor->nr = 0;
+ cursor->last = &cursor->first;
+}
+
+int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
+ struct map *map, struct symbol *sym);
+
+/* Close a cursor writing session. Initialize for the reader */
+static inline void callchain_cursor_commit(struct callchain_cursor *cursor)
+{
+ cursor->curr = cursor->first;
+ cursor->pos = 0;
+}
+
+/* Cursor reading iteration helpers */
+static inline struct callchain_cursor_node *
+callchain_cursor_current(struct callchain_cursor *cursor)
+{
+ if (cursor->pos == cursor->nr)
+ return NULL;
+
+ return cursor->curr;
+}
+
+static inline void callchain_cursor_advance(struct callchain_cursor *cursor)
+{
+ cursor->curr = cursor->curr->next;
+ cursor->pos++;
+}
+
+struct option;
+struct hist_entry;
+
+int record_parse_callchain(const char *arg, struct record_opts *opts);
+int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset);
+int record_callchain_opt(const struct option *opt, const char *arg, int unset);
+
+int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent,
+ struct perf_evsel *evsel, struct addr_location *al,
+ int max_stack);
+int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample);
+int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
+ bool hide_unresolved);
+
+extern const char record_callchain_help[];
+int parse_callchain_report_opt(const char *arg);
+
+static inline void callchain_cursor_snapshot(struct callchain_cursor *dest,
+ struct callchain_cursor *src)
+{
+ *dest = *src;
+
+ dest->first = src->curr;
+ dest->nr -= src->pos;
+}
+#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
new file mode 100644
index 00000000000..88f7be39943
--- /dev/null
+++ b/tools/perf/util/cgroup.c
@@ -0,0 +1,177 @@
+#include "util.h"
+#include "../perf.h"
+#include "parse-options.h"
+#include "evsel.h"
+#include "cgroup.h"
+#include "evlist.h"
+
+int nr_cgroups;
+
+static int
+cgroupfs_find_mountpoint(char *buf, size_t maxlen)
+{
+ FILE *fp;
+ char mountpoint[PATH_MAX + 1], tokens[PATH_MAX + 1], type[PATH_MAX + 1];
+ char *token, *saved_ptr = NULL;
+ int found = 0;
+
+ fp = fopen("/proc/mounts", "r");
+ if (!fp)
+ return -1;
+
+ /*
+ * in order to handle split hierarchy, we need to scan /proc/mounts
+ * and inspect every cgroupfs mount point to find one that has
+ * perf_event subsystem
+ */
+ while (fscanf(fp, "%*s %"STR(PATH_MAX)"s %"STR(PATH_MAX)"s %"
+ STR(PATH_MAX)"s %*d %*d\n",
+ mountpoint, type, tokens) == 3) {
+
+ if (!strcmp(type, "cgroup")) {
+
+ token = strtok_r(tokens, ",", &saved_ptr);
+
+ while (token != NULL) {
+ if (!strcmp(token, "perf_event")) {
+ found = 1;
+ break;
+ }
+ token = strtok_r(NULL, ",", &saved_ptr);
+ }
+ }
+ if (found)
+ break;
+ }
+ fclose(fp);
+ if (!found)
+ return -1;
+
+ if (strlen(mountpoint) < maxlen) {
+ strcpy(buf, mountpoint);
+ return 0;
+ }
+ return -1;
+}
+
+static int open_cgroup(char *name)
+{
+ char path[PATH_MAX + 1];
+ char mnt[PATH_MAX + 1];
+ int fd;
+
+
+ if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1))
+ return -1;
+
+ snprintf(path, PATH_MAX, "%s/%s", mnt, name);
+
+ fd = open(path, O_RDONLY);
+ if (fd == -1)
+ fprintf(stderr, "no access to cgroup %s\n", path);
+
+ return fd;
+}
+
+static int add_cgroup(struct perf_evlist *evlist, char *str)
+{
+ struct perf_evsel *counter;
+ struct cgroup_sel *cgrp = NULL;
+ int n;
+ /*
+ * check if cgrp is already defined, if so we reuse it
+ */
+ evlist__for_each(evlist, counter) {
+ cgrp = counter->cgrp;
+ if (!cgrp)
+ continue;
+ if (!strcmp(cgrp->name, str))
+ break;
+
+ cgrp = NULL;
+ }
+
+ if (!cgrp) {
+ cgrp = zalloc(sizeof(*cgrp));
+ if (!cgrp)
+ return -1;
+
+ cgrp->name = str;
+
+ cgrp->fd = open_cgroup(str);
+ if (cgrp->fd == -1) {
+ free(cgrp);
+ return -1;
+ }
+ }
+
+ /*
+ * find corresponding event
+ * if add cgroup N, then need to find event N
+ */
+ n = 0;
+ evlist__for_each(evlist, counter) {
+ if (n == nr_cgroups)
+ goto found;
+ n++;
+ }
+ if (cgrp->refcnt == 0)
+ free(cgrp);
+
+ return -1;
+found:
+ cgrp->refcnt++;
+ counter->cgrp = cgrp;
+ return 0;
+}
+
+void close_cgroup(struct cgroup_sel *cgrp)
+{
+ if (!cgrp)
+ return;
+
+ /* XXX: not reentrant */
+ if (--cgrp->refcnt == 0) {
+ close(cgrp->fd);
+ zfree(&cgrp->name);
+ free(cgrp);
+ }
+}
+
+int parse_cgroups(const struct option *opt __maybe_unused, const char *str,
+ int unset __maybe_unused)
+{
+ struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
+ const char *p, *e, *eos = str + strlen(str);
+ char *s;
+ int ret;
+
+ if (list_empty(&evlist->entries)) {
+ fprintf(stderr, "must define events before cgroups\n");
+ return -1;
+ }
+
+ for (;;) {
+ p = strchr(str, ',');
+ e = p ? p : eos;
+
+ /* allow empty cgroups, i.e., skip */
+ if (e - str) {
+ /* termination added */
+ s = strndup(str, e - str);
+ if (!s)
+ return -1;
+ ret = add_cgroup(evlist, s);
+ if (ret) {
+ free(s);
+ return -1;
+ }
+ }
+ /* nr_cgroups is increased een for empty cgroups */
+ nr_cgroups++;
+ if (!p)
+ break;
+ str = p+1;
+ }
+ return 0;
+}
diff --git a/tools/perf/util/cgroup.h b/tools/perf/util/cgroup.h
new file mode 100644
index 00000000000..89acd6debdc
--- /dev/null
+++ b/tools/perf/util/cgroup.h
@@ -0,0 +1,17 @@
+#ifndef __CGROUP_H__
+#define __CGROUP_H__
+
+struct option;
+
+struct cgroup_sel {
+ char *name;
+ int fd;
+ int refcnt;
+};
+
+
+extern int nr_cgroups; /* number of explicit cgroups defined */
+extern void close_cgroup(struct cgroup_sel *cgrp);
+extern int parse_cgroups(const struct option *opt, const char *str, int unset);
+
+#endif /* __CGROUP_H__ */
diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
new file mode 100644
index 00000000000..87b8672eb41
--- /dev/null
+++ b/tools/perf/util/color.c
@@ -0,0 +1,337 @@
+#include <linux/kernel.h>
+#include "cache.h"
+#include "color.h"
+#include <math.h>
+
+int perf_use_color_default = -1;
+
+static int parse_color(const char *name, int len)
+{
+ static const char * const color_names[] = {
+ "normal", "black", "red", "green", "yellow",
+ "blue", "magenta", "cyan", "white"
+ };
+ char *end;
+ int i;
+
+ for (i = 0; i < (int)ARRAY_SIZE(color_names); i++) {
+ const char *str = color_names[i];
+ if (!strncasecmp(name, str, len) && !str[len])
+ return i - 1;
+ }
+ i = strtol(name, &end, 10);
+ if (end - name == len && i >= -1 && i <= 255)
+ return i;
+ return -2;
+}
+
+static int parse_attr(const char *name, int len)
+{
+ static const int attr_values[] = { 1, 2, 4, 5, 7 };
+ static const char * const attr_names[] = {
+ "bold", "dim", "ul", "blink", "reverse"
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(attr_names); i++) {
+ const char *str = attr_names[i];
+ if (!strncasecmp(name, str, len) && !str[len])
+ return attr_values[i];
+ }
+ return -1;
+}
+
+void color_parse(const char *value, const char *var, char *dst)
+{
+ color_parse_mem(value, strlen(value), var, dst);
+}
+
+void color_parse_mem(const char *value, int value_len, const char *var,
+ char *dst)
+{
+ const char *ptr = value;
+ int len = value_len;
+ int attr = -1;
+ int fg = -2;
+ int bg = -2;
+
+ if (!strncasecmp(value, "reset", len)) {
+ strcpy(dst, PERF_COLOR_RESET);
+ return;
+ }
+
+ /* [fg [bg]] [attr] */
+ while (len > 0) {
+ const char *word = ptr;
+ int val, wordlen = 0;
+
+ while (len > 0 && !isspace(word[wordlen])) {
+ wordlen++;
+ len--;
+ }
+
+ ptr = word + wordlen;
+ while (len > 0 && isspace(*ptr)) {
+ ptr++;
+ len--;
+ }
+
+ val = parse_color(word, wordlen);
+ if (val >= -1) {
+ if (fg == -2) {
+ fg = val;
+ continue;
+ }
+ if (bg == -2) {
+ bg = val;
+ continue;
+ }
+ goto bad;
+ }
+ val = parse_attr(word, wordlen);
+ if (val < 0 || attr != -1)
+ goto bad;
+ attr = val;
+ }
+
+ if (attr >= 0 || fg >= 0 || bg >= 0) {
+ int sep = 0;
+
+ *dst++ = '\033';
+ *dst++ = '[';
+ if (attr >= 0) {
+ *dst++ = '0' + attr;
+ sep++;
+ }
+ if (fg >= 0) {
+ if (sep++)
+ *dst++ = ';';
+ if (fg < 8) {
+ *dst++ = '3';
+ *dst++ = '0' + fg;
+ } else {
+ dst += sprintf(dst, "38;5;%d", fg);
+ }
+ }
+ if (bg >= 0) {
+ if (sep++)
+ *dst++ = ';';
+ if (bg < 8) {
+ *dst++ = '4';
+ *dst++ = '0' + bg;
+ } else {
+ dst += sprintf(dst, "48;5;%d", bg);
+ }
+ }
+ *dst++ = 'm';
+ }
+ *dst = 0;
+ return;
+bad:
+ die("bad color value '%.*s' for variable '%s'", value_len, value, var);
+}
+
+int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
+{
+ if (value) {
+ if (!strcasecmp(value, "never"))
+ return 0;
+ if (!strcasecmp(value, "always"))
+ return 1;
+ if (!strcasecmp(value, "auto"))
+ goto auto_color;
+ }
+
+ /* Missing or explicit false to turn off colorization */
+ if (!perf_config_bool(var, value))
+ return 0;
+
+ /* any normal truth value defaults to 'auto' */
+ auto_color:
+ if (stdout_is_tty < 0)
+ stdout_is_tty = isatty(1);
+ if (stdout_is_tty || (pager_in_use() && pager_use_color)) {
+ char *term = getenv("TERM");
+ if (term && strcmp(term, "dumb"))
+ return 1;
+ }
+ return 0;
+}
+
+int perf_color_default_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, "color.ui")) {
+ perf_use_color_default = perf_config_colorbool(var, value, -1);
+ return 0;
+ }
+
+ return perf_default_config(var, value, cb);
+}
+
+static int __color_vsnprintf(char *bf, size_t size, const char *color,
+ const char *fmt, va_list args, const char *trail)
+{
+ int r = 0;
+
+ /*
+ * Auto-detect:
+ */
+ if (perf_use_color_default < 0) {
+ if (isatty(1) || pager_in_use())
+ perf_use_color_default = 1;
+ else
+ perf_use_color_default = 0;
+ }
+
+ if (perf_use_color_default && *color)
+ r += scnprintf(bf, size, "%s", color);
+ r += vscnprintf(bf + r, size - r, fmt, args);
+ if (perf_use_color_default && *color)
+ r += scnprintf(bf + r, size - r, "%s", PERF_COLOR_RESET);
+ if (trail)
+ r += scnprintf(bf + r, size - r, "%s", trail);
+ return r;
+}
+
+static int __color_vfprintf(FILE *fp, const char *color, const char *fmt,
+ va_list args, const char *trail)
+{
+ int r = 0;
+
+ /*
+ * Auto-detect:
+ */
+ if (perf_use_color_default < 0) {
+ if (isatty(fileno(fp)) || pager_in_use())
+ perf_use_color_default = 1;
+ else
+ perf_use_color_default = 0;
+ }
+
+ if (perf_use_color_default && *color)
+ r += fprintf(fp, "%s", color);
+ r += vfprintf(fp, fmt, args);
+ if (perf_use_color_default && *color)
+ r += fprintf(fp, "%s", PERF_COLOR_RESET);
+ if (trail)
+ r += fprintf(fp, "%s", trail);
+ return r;
+}
+
+int color_vsnprintf(char *bf, size_t size, const char *color,
+ const char *fmt, va_list args)
+{
+ return __color_vsnprintf(bf, size, color, fmt, args, NULL);
+}
+
+int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args)
+{
+ return __color_vfprintf(fp, color, fmt, args, NULL);
+}
+
+int color_snprintf(char *bf, size_t size, const char *color,
+ const char *fmt, ...)
+{
+ va_list args;
+ int r;
+
+ va_start(args, fmt);
+ r = color_vsnprintf(bf, size, color, fmt, args);
+ va_end(args);
+ return r;
+}
+
+int color_fprintf(FILE *fp, const char *color, const char *fmt, ...)
+{
+ va_list args;
+ int r;
+
+ va_start(args, fmt);
+ r = color_vfprintf(fp, color, fmt, args);
+ va_end(args);
+ return r;
+}
+
+int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...)
+{
+ va_list args;
+ int r;
+ va_start(args, fmt);
+ r = __color_vfprintf(fp, color, fmt, args, "\n");
+ va_end(args);
+ return r;
+}
+
+/*
+ * This function splits the buffer by newlines and colors the lines individually.
+ *
+ * Returns 0 on success.
+ */
+int color_fwrite_lines(FILE *fp, const char *color,
+ size_t count, const char *buf)
+{
+ if (!*color)
+ return fwrite(buf, count, 1, fp) != 1;
+
+ while (count) {
+ char *p = memchr(buf, '\n', count);
+
+ if (p != buf && (fputs(color, fp) < 0 ||
+ fwrite(buf, p ? (size_t)(p - buf) : count, 1, fp) != 1 ||
+ fputs(PERF_COLOR_RESET, fp) < 0))
+ return -1;
+ if (!p)
+ return 0;
+ if (fputc('\n', fp) < 0)
+ return -1;
+ count -= p + 1 - buf;
+ buf = p + 1;
+ }
+ return 0;
+}
+
+const char *get_percent_color(double percent)
+{
+ const char *color = PERF_COLOR_NORMAL;
+
+ /*
+ * We color high-overhead entries in red, mid-overhead
+ * entries in green - and keep the low overhead places
+ * normal:
+ */
+ if (fabs(percent) >= MIN_RED)
+ color = PERF_COLOR_RED;
+ else {
+ if (fabs(percent) > MIN_GREEN)
+ color = PERF_COLOR_GREEN;
+ }
+ return color;
+}
+
+int percent_color_fprintf(FILE *fp, const char *fmt, double percent)
+{
+ int r;
+ const char *color;
+
+ color = get_percent_color(percent);
+ r = color_fprintf(fp, color, fmt, percent);
+
+ return r;
+}
+
+int value_color_snprintf(char *bf, size_t size, const char *fmt, double value)
+{
+ const char *color = get_percent_color(value);
+ return color_snprintf(bf, size, color, fmt, value);
+}
+
+int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...)
+{
+ va_list args;
+ double percent;
+
+ va_start(args, fmt);
+ percent = va_arg(args, double);
+ va_end(args);
+ return value_color_snprintf(bf, size, fmt, percent);
+}
diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h
new file mode 100644
index 00000000000..7ff30a62a13
--- /dev/null
+++ b/tools/perf/util/color.h
@@ -0,0 +1,47 @@
+#ifndef __PERF_COLOR_H
+#define __PERF_COLOR_H
+
+/* "\033[1;38;5;2xx;48;5;2xxm\0" is 23 bytes */
+#define COLOR_MAXLEN 24
+
+#define PERF_COLOR_NORMAL ""
+#define PERF_COLOR_RESET "\033[m"
+#define PERF_COLOR_BOLD "\033[1m"
+#define PERF_COLOR_RED "\033[31m"
+#define PERF_COLOR_GREEN "\033[32m"
+#define PERF_COLOR_YELLOW "\033[33m"
+#define PERF_COLOR_BLUE "\033[34m"
+#define PERF_COLOR_MAGENTA "\033[35m"
+#define PERF_COLOR_CYAN "\033[36m"
+#define PERF_COLOR_BG_RED "\033[41m"
+
+#define MIN_GREEN 0.5
+#define MIN_RED 5.0
+
+/*
+ * This variable stores the value of color.ui
+ */
+extern int perf_use_color_default;
+
+
+/*
+ * Use this instead of perf_default_config if you need the value of color.ui.
+ */
+int perf_color_default_config(const char *var, const char *value, void *cb);
+
+int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty);
+void color_parse(const char *value, const char *var, char *dst);
+void color_parse_mem(const char *value, int len, const char *var, char *dst);
+int color_vsnprintf(char *bf, size_t size, const char *color,
+ const char *fmt, va_list args);
+int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args);
+int color_fprintf(FILE *fp, const char *color, const char *fmt, ...);
+int color_snprintf(char *bf, size_t size, const char *color, const char *fmt, ...);
+int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...);
+int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf);
+int value_color_snprintf(char *bf, size_t size, const char *fmt, double value);
+int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...);
+int percent_color_fprintf(FILE *fp, const char *fmt, double percent);
+const char *get_percent_color(double percent);
+
+#endif /* __PERF_COLOR_H */
diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c
new file mode 100644
index 00000000000..f9e777629e2
--- /dev/null
+++ b/tools/perf/util/comm.c
@@ -0,0 +1,122 @@
+#include "comm.h"
+#include "util.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+struct comm_str {
+ char *str;
+ struct rb_node rb_node;
+ int ref;
+};
+
+/* Should perhaps be moved to struct machine */
+static struct rb_root comm_str_root;
+
+static void comm_str__get(struct comm_str *cs)
+{
+ cs->ref++;
+}
+
+static void comm_str__put(struct comm_str *cs)
+{
+ if (!--cs->ref) {
+ rb_erase(&cs->rb_node, &comm_str_root);
+ zfree(&cs->str);
+ free(cs);
+ }
+}
+
+static struct comm_str *comm_str__alloc(const char *str)
+{
+ struct comm_str *cs;
+
+ cs = zalloc(sizeof(*cs));
+ if (!cs)
+ return NULL;
+
+ cs->str = strdup(str);
+ if (!cs->str) {
+ free(cs);
+ return NULL;
+ }
+
+ return cs;
+}
+
+static struct comm_str *comm_str__findnew(const char *str, struct rb_root *root)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct comm_str *iter, *new;
+ int cmp;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct comm_str, rb_node);
+
+ cmp = strcmp(str, iter->str);
+ if (!cmp)
+ return iter;
+
+ if (cmp < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ new = comm_str__alloc(str);
+ if (!new)
+ return NULL;
+
+ rb_link_node(&new->rb_node, parent, p);
+ rb_insert_color(&new->rb_node, root);
+
+ return new;
+}
+
+struct comm *comm__new(const char *str, u64 timestamp)
+{
+ struct comm *comm = zalloc(sizeof(*comm));
+
+ if (!comm)
+ return NULL;
+
+ comm->start = timestamp;
+
+ comm->comm_str = comm_str__findnew(str, &comm_str_root);
+ if (!comm->comm_str) {
+ free(comm);
+ return NULL;
+ }
+
+ comm_str__get(comm->comm_str);
+
+ return comm;
+}
+
+int comm__override(struct comm *comm, const char *str, u64 timestamp)
+{
+ struct comm_str *new, *old = comm->comm_str;
+
+ new = comm_str__findnew(str, &comm_str_root);
+ if (!new)
+ return -ENOMEM;
+
+ comm_str__get(new);
+ comm_str__put(old);
+ comm->comm_str = new;
+ comm->start = timestamp;
+
+ return 0;
+}
+
+void comm__free(struct comm *comm)
+{
+ comm_str__put(comm->comm_str);
+ free(comm);
+}
+
+const char *comm__str(const struct comm *comm)
+{
+ return comm->comm_str->str;
+}
diff --git a/tools/perf/util/comm.h b/tools/perf/util/comm.h
new file mode 100644
index 00000000000..fac5bd51bef
--- /dev/null
+++ b/tools/perf/util/comm.h
@@ -0,0 +1,21 @@
+#ifndef __PERF_COMM_H
+#define __PERF_COMM_H
+
+#include "../perf.h"
+#include <linux/rbtree.h>
+#include <linux/list.h>
+
+struct comm_str;
+
+struct comm {
+ struct comm_str *comm_str;
+ u64 start;
+ struct list_head list;
+};
+
+void comm__free(struct comm *comm);
+struct comm *comm__new(const char *str, u64 timestamp);
+const char *comm__str(const struct comm *comm);
+int comm__override(struct comm *comm, const char *str, u64 timestamp);
+
+#endif /* __PERF_COMM_H */
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
new file mode 100644
index 00000000000..24519e14ac5
--- /dev/null
+++ b/tools/perf/util/config.c
@@ -0,0 +1,513 @@
+/*
+ * config.c
+ *
+ * Helper functions for parsing config items.
+ * Originally copied from GIT source.
+ *
+ * Copyright (C) Linus Torvalds, 2005
+ * Copyright (C) Johannes Schindelin, 2005
+ *
+ */
+#include "util.h"
+#include "cache.h"
+#include "exec_cmd.h"
+#include "util/hist.h" /* perf_hist_config */
+
+#define MAXNAME (256)
+
+#define DEBUG_CACHE_DIR ".debug"
+
+
+char buildid_dir[MAXPATHLEN]; /* root dir for buildid, binary cache */
+
+static FILE *config_file;
+static const char *config_file_name;
+static int config_linenr;
+static int config_file_eof;
+
+static const char *config_exclusive_filename;
+
+static int get_next_char(void)
+{
+ int c;
+ FILE *f;
+
+ c = '\n';
+ if ((f = config_file) != NULL) {
+ c = fgetc(f);
+ if (c == '\r') {
+ /* DOS like systems */
+ c = fgetc(f);
+ if (c != '\n') {
+ ungetc(c, f);
+ c = '\r';
+ }
+ }
+ if (c == '\n')
+ config_linenr++;
+ if (c == EOF) {
+ config_file_eof = 1;
+ c = '\n';
+ }
+ }
+ return c;
+}
+
+static char *parse_value(void)
+{
+ static char value[1024];
+ int quote = 0, comment = 0, space = 0;
+ size_t len = 0;
+
+ for (;;) {
+ int c = get_next_char();
+
+ if (len >= sizeof(value) - 1)
+ return NULL;
+ if (c == '\n') {
+ if (quote)
+ return NULL;
+ value[len] = 0;
+ return value;
+ }
+ if (comment)
+ continue;
+ if (isspace(c) && !quote) {
+ space = 1;
+ continue;
+ }
+ if (!quote) {
+ if (c == ';' || c == '#') {
+ comment = 1;
+ continue;
+ }
+ }
+ if (space) {
+ if (len)
+ value[len++] = ' ';
+ space = 0;
+ }
+ if (c == '\\') {
+ c = get_next_char();
+ switch (c) {
+ case '\n':
+ continue;
+ case 't':
+ c = '\t';
+ break;
+ case 'b':
+ c = '\b';
+ break;
+ case 'n':
+ c = '\n';
+ break;
+ /* Some characters escape as themselves */
+ case '\\': case '"':
+ break;
+ /* Reject unknown escape sequences */
+ default:
+ return NULL;
+ }
+ value[len++] = c;
+ continue;
+ }
+ if (c == '"') {
+ quote = 1-quote;
+ continue;
+ }
+ value[len++] = c;
+ }
+}
+
+static inline int iskeychar(int c)
+{
+ return isalnum(c) || c == '-' || c == '_';
+}
+
+static int get_value(config_fn_t fn, void *data, char *name, unsigned int len)
+{
+ int c;
+ char *value;
+
+ /* Get the full name */
+ for (;;) {
+ c = get_next_char();
+ if (config_file_eof)
+ break;
+ if (!iskeychar(c))
+ break;
+ name[len++] = c;
+ if (len >= MAXNAME)
+ return -1;
+ }
+ name[len] = 0;
+ while (c == ' ' || c == '\t')
+ c = get_next_char();
+
+ value = NULL;
+ if (c != '\n') {
+ if (c != '=')
+ return -1;
+ value = parse_value();
+ if (!value)
+ return -1;
+ }
+ return fn(name, value, data);
+}
+
+static int get_extended_base_var(char *name, int baselen, int c)
+{
+ do {
+ if (c == '\n')
+ return -1;
+ c = get_next_char();
+ } while (isspace(c));
+
+ /* We require the format to be '[base "extension"]' */
+ if (c != '"')
+ return -1;
+ name[baselen++] = '.';
+
+ for (;;) {
+ int ch = get_next_char();
+
+ if (ch == '\n')
+ return -1;
+ if (ch == '"')
+ break;
+ if (ch == '\\') {
+ ch = get_next_char();
+ if (ch == '\n')
+ return -1;
+ }
+ name[baselen++] = ch;
+ if (baselen > MAXNAME / 2)
+ return -1;
+ }
+
+ /* Final ']' */
+ if (get_next_char() != ']')
+ return -1;
+ return baselen;
+}
+
+static int get_base_var(char *name)
+{
+ int baselen = 0;
+
+ for (;;) {
+ int c = get_next_char();
+ if (config_file_eof)
+ return -1;
+ if (c == ']')
+ return baselen;
+ if (isspace(c))
+ return get_extended_base_var(name, baselen, c);
+ if (!iskeychar(c) && c != '.')
+ return -1;
+ if (baselen > MAXNAME / 2)
+ return -1;
+ name[baselen++] = tolower(c);
+ }
+}
+
+static int perf_parse_file(config_fn_t fn, void *data)
+{
+ int comment = 0;
+ int baselen = 0;
+ static char var[MAXNAME];
+
+ /* U+FEFF Byte Order Mark in UTF8 */
+ static const unsigned char *utf8_bom = (unsigned char *) "\xef\xbb\xbf";
+ const unsigned char *bomptr = utf8_bom;
+
+ for (;;) {
+ int c = get_next_char();
+ if (bomptr && *bomptr) {
+ /* We are at the file beginning; skip UTF8-encoded BOM
+ * if present. Sane editors won't put this in on their
+ * own, but e.g. Windows Notepad will do it happily. */
+ if ((unsigned char) c == *bomptr) {
+ bomptr++;
+ continue;
+ } else {
+ /* Do not tolerate partial BOM. */
+ if (bomptr != utf8_bom)
+ break;
+ /* No BOM at file beginning. Cool. */
+ bomptr = NULL;
+ }
+ }
+ if (c == '\n') {
+ if (config_file_eof)
+ return 0;
+ comment = 0;
+ continue;
+ }
+ if (comment || isspace(c))
+ continue;
+ if (c == '#' || c == ';') {
+ comment = 1;
+ continue;
+ }
+ if (c == '[') {
+ baselen = get_base_var(var);
+ if (baselen <= 0)
+ break;
+ var[baselen++] = '.';
+ var[baselen] = 0;
+ continue;
+ }
+ if (!isalpha(c))
+ break;
+ var[baselen] = tolower(c);
+ if (get_value(fn, data, var, baselen+1) < 0)
+ break;
+ }
+ die("bad config file line %d in %s", config_linenr, config_file_name);
+}
+
+static int parse_unit_factor(const char *end, unsigned long *val)
+{
+ if (!*end)
+ return 1;
+ else if (!strcasecmp(end, "k")) {
+ *val *= 1024;
+ return 1;
+ }
+ else if (!strcasecmp(end, "m")) {
+ *val *= 1024 * 1024;
+ return 1;
+ }
+ else if (!strcasecmp(end, "g")) {
+ *val *= 1024 * 1024 * 1024;
+ return 1;
+ }
+ return 0;
+}
+
+static int perf_parse_long(const char *value, long *ret)
+{
+ if (value && *value) {
+ char *end;
+ long val = strtol(value, &end, 0);
+ unsigned long factor = 1;
+ if (!parse_unit_factor(end, &factor))
+ return 0;
+ *ret = val * factor;
+ return 1;
+ }
+ return 0;
+}
+
+static void die_bad_config(const char *name)
+{
+ if (config_file_name)
+ die("bad config value for '%s' in %s", name, config_file_name);
+ die("bad config value for '%s'", name);
+}
+
+int perf_config_int(const char *name, const char *value)
+{
+ long ret = 0;
+ if (!perf_parse_long(value, &ret))
+ die_bad_config(name);
+ return ret;
+}
+
+static int perf_config_bool_or_int(const char *name, const char *value, int *is_bool)
+{
+ *is_bool = 1;
+ if (!value)
+ return 1;
+ if (!*value)
+ return 0;
+ if (!strcasecmp(value, "true") || !strcasecmp(value, "yes") || !strcasecmp(value, "on"))
+ return 1;
+ if (!strcasecmp(value, "false") || !strcasecmp(value, "no") || !strcasecmp(value, "off"))
+ return 0;
+ *is_bool = 0;
+ return perf_config_int(name, value);
+}
+
+int perf_config_bool(const char *name, const char *value)
+{
+ int discard;
+ return !!perf_config_bool_or_int(name, value, &discard);
+}
+
+const char *perf_config_dirname(const char *name, const char *value)
+{
+ if (!name)
+ return NULL;
+ return value;
+}
+
+static int perf_default_core_config(const char *var __maybe_unused,
+ const char *value __maybe_unused)
+{
+ /* Add other config variables here. */
+ return 0;
+}
+
+int perf_default_config(const char *var, const char *value,
+ void *dummy __maybe_unused)
+{
+ if (!prefixcmp(var, "core."))
+ return perf_default_core_config(var, value);
+
+ if (!prefixcmp(var, "hist."))
+ return perf_hist_config(var, value);
+
+ /* Add other config variables here. */
+ return 0;
+}
+
+static int perf_config_from_file(config_fn_t fn, const char *filename, void *data)
+{
+ int ret;
+ FILE *f = fopen(filename, "r");
+
+ ret = -1;
+ if (f) {
+ config_file = f;
+ config_file_name = filename;
+ config_linenr = 1;
+ config_file_eof = 0;
+ ret = perf_parse_file(fn, data);
+ fclose(f);
+ config_file_name = NULL;
+ }
+ return ret;
+}
+
+static const char *perf_etc_perfconfig(void)
+{
+ static const char *system_wide;
+ if (!system_wide)
+ system_wide = system_path(ETC_PERFCONFIG);
+ return system_wide;
+}
+
+static int perf_env_bool(const char *k, int def)
+{
+ const char *v = getenv(k);
+ return v ? perf_config_bool(k, v) : def;
+}
+
+static int perf_config_system(void)
+{
+ return !perf_env_bool("PERF_CONFIG_NOSYSTEM", 0);
+}
+
+static int perf_config_global(void)
+{
+ return !perf_env_bool("PERF_CONFIG_NOGLOBAL", 0);
+}
+
+int perf_config(config_fn_t fn, void *data)
+{
+ int ret = 0, found = 0;
+ const char *home = NULL;
+
+ /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */
+ if (config_exclusive_filename)
+ return perf_config_from_file(fn, config_exclusive_filename, data);
+ if (perf_config_system() && !access(perf_etc_perfconfig(), R_OK)) {
+ ret += perf_config_from_file(fn, perf_etc_perfconfig(),
+ data);
+ found += 1;
+ }
+
+ home = getenv("HOME");
+ if (perf_config_global() && home) {
+ char *user_config = strdup(mkpath("%s/.perfconfig", home));
+ struct stat st;
+
+ if (user_config == NULL) {
+ warning("Not enough memory to process %s/.perfconfig, "
+ "ignoring it.", home);
+ goto out;
+ }
+
+ if (stat(user_config, &st) < 0)
+ goto out_free;
+
+ if (st.st_uid && (st.st_uid != geteuid())) {
+ warning("File %s not owned by current user or root, "
+ "ignoring it.", user_config);
+ goto out_free;
+ }
+
+ if (!st.st_size)
+ goto out_free;
+
+ ret += perf_config_from_file(fn, user_config, data);
+ found += 1;
+out_free:
+ free(user_config);
+ }
+out:
+ if (found == 0)
+ return -1;
+ return ret;
+}
+
+/*
+ * Call this to report error for your variable that should not
+ * get a boolean value (i.e. "[my] var" means "true").
+ */
+int config_error_nonbool(const char *var)
+{
+ return error("Missing value for '%s'", var);
+}
+
+struct buildid_dir_config {
+ char *dir;
+};
+
+static int buildid_dir_command_config(const char *var, const char *value,
+ void *data)
+{
+ struct buildid_dir_config *c = data;
+ const char *v;
+
+ /* same dir for all commands */
+ if (!prefixcmp(var, "buildid.") && !strcmp(var + 8, "dir")) {
+ v = perf_config_dirname(var, value);
+ if (!v)
+ return -1;
+ strncpy(c->dir, v, MAXPATHLEN-1);
+ c->dir[MAXPATHLEN-1] = '\0';
+ }
+ return 0;
+}
+
+static void check_buildid_dir_config(void)
+{
+ struct buildid_dir_config c;
+ c.dir = buildid_dir;
+ perf_config(buildid_dir_command_config, &c);
+}
+
+void set_buildid_dir(void)
+{
+ buildid_dir[0] = '\0';
+
+ /* try config file */
+ check_buildid_dir_config();
+
+ /* default to $HOME/.debug */
+ if (buildid_dir[0] == '\0') {
+ char *v = getenv("HOME");
+ if (v) {
+ snprintf(buildid_dir, MAXPATHLEN-1, "%s/%s",
+ v, DEBUG_CACHE_DIR);
+ } else {
+ strncpy(buildid_dir, DEBUG_CACHE_DIR, MAXPATHLEN-1);
+ }
+ buildid_dir[MAXPATHLEN-1] = '\0';
+ }
+ /* for communicating with external commands */
+ setenv("PERF_BUILDID_DIR", buildid_dir, 1);
+}
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
new file mode 100644
index 00000000000..c4e55b71010
--- /dev/null
+++ b/tools/perf/util/cpumap.c
@@ -0,0 +1,479 @@
+#include "util.h"
+#include <api/fs/fs.h>
+#include "../perf.h"
+#include "cpumap.h"
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+static struct cpu_map *cpu_map__default_new(void)
+{
+ struct cpu_map *cpus;
+ int nr_cpus;
+
+ nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ if (nr_cpus < 0)
+ return NULL;
+
+ cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
+ if (cpus != NULL) {
+ int i;
+ for (i = 0; i < nr_cpus; ++i)
+ cpus->map[i] = i;
+
+ cpus->nr = nr_cpus;
+ }
+
+ return cpus;
+}
+
+static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
+{
+ size_t payload_size = nr_cpus * sizeof(int);
+ struct cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
+
+ if (cpus != NULL) {
+ cpus->nr = nr_cpus;
+ memcpy(cpus->map, tmp_cpus, payload_size);
+ }
+
+ return cpus;
+}
+
+struct cpu_map *cpu_map__read(FILE *file)
+{
+ struct cpu_map *cpus = NULL;
+ int nr_cpus = 0;
+ int *tmp_cpus = NULL, *tmp;
+ int max_entries = 0;
+ int n, cpu, prev;
+ char sep;
+
+ sep = 0;
+ prev = -1;
+ for (;;) {
+ n = fscanf(file, "%u%c", &cpu, &sep);
+ if (n <= 0)
+ break;
+ if (prev >= 0) {
+ int new_max = nr_cpus + cpu - prev - 1;
+
+ if (new_max >= max_entries) {
+ max_entries = new_max + MAX_NR_CPUS / 2;
+ tmp = realloc(tmp_cpus, max_entries * sizeof(int));
+ if (tmp == NULL)
+ goto out_free_tmp;
+ tmp_cpus = tmp;
+ }
+
+ while (++prev < cpu)
+ tmp_cpus[nr_cpus++] = prev;
+ }
+ if (nr_cpus == max_entries) {
+ max_entries += MAX_NR_CPUS;
+ tmp = realloc(tmp_cpus, max_entries * sizeof(int));
+ if (tmp == NULL)
+ goto out_free_tmp;
+ tmp_cpus = tmp;
+ }
+
+ tmp_cpus[nr_cpus++] = cpu;
+ if (n == 2 && sep == '-')
+ prev = cpu;
+ else
+ prev = -1;
+ if (n == 1 || sep == '\n')
+ break;
+ }
+
+ if (nr_cpus > 0)
+ cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
+ else
+ cpus = cpu_map__default_new();
+out_free_tmp:
+ free(tmp_cpus);
+ return cpus;
+}
+
+static struct cpu_map *cpu_map__read_all_cpu_map(void)
+{
+ struct cpu_map *cpus = NULL;
+ FILE *onlnf;
+
+ onlnf = fopen("/sys/devices/system/cpu/online", "r");
+ if (!onlnf)
+ return cpu_map__default_new();
+
+ cpus = cpu_map__read(onlnf);
+ fclose(onlnf);
+ return cpus;
+}
+
+struct cpu_map *cpu_map__new(const char *cpu_list)
+{
+ struct cpu_map *cpus = NULL;
+ unsigned long start_cpu, end_cpu = 0;
+ char *p = NULL;
+ int i, nr_cpus = 0;
+ int *tmp_cpus = NULL, *tmp;
+ int max_entries = 0;
+
+ if (!cpu_list)
+ return cpu_map__read_all_cpu_map();
+
+ if (!isdigit(*cpu_list))
+ goto out;
+
+ while (isdigit(*cpu_list)) {
+ p = NULL;
+ start_cpu = strtoul(cpu_list, &p, 0);
+ if (start_cpu >= INT_MAX
+ || (*p != '\0' && *p != ',' && *p != '-'))
+ goto invalid;
+
+ if (*p == '-') {
+ cpu_list = ++p;
+ p = NULL;
+ end_cpu = strtoul(cpu_list, &p, 0);
+
+ if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
+ goto invalid;
+
+ if (end_cpu < start_cpu)
+ goto invalid;
+ } else {
+ end_cpu = start_cpu;
+ }
+
+ for (; start_cpu <= end_cpu; start_cpu++) {
+ /* check for duplicates */
+ for (i = 0; i < nr_cpus; i++)
+ if (tmp_cpus[i] == (int)start_cpu)
+ goto invalid;
+
+ if (nr_cpus == max_entries) {
+ max_entries += MAX_NR_CPUS;
+ tmp = realloc(tmp_cpus, max_entries * sizeof(int));
+ if (tmp == NULL)
+ goto invalid;
+ tmp_cpus = tmp;
+ }
+ tmp_cpus[nr_cpus++] = (int)start_cpu;
+ }
+ if (*p)
+ ++p;
+
+ cpu_list = p;
+ }
+
+ if (nr_cpus > 0)
+ cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
+ else
+ cpus = cpu_map__default_new();
+invalid:
+ free(tmp_cpus);
+out:
+ return cpus;
+}
+
+size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp)
+{
+ int i;
+ size_t printed = fprintf(fp, "%d cpu%s: ",
+ map->nr, map->nr > 1 ? "s" : "");
+ for (i = 0; i < map->nr; ++i)
+ printed += fprintf(fp, "%s%d", i ? ", " : "", map->map[i]);
+
+ return printed + fprintf(fp, "\n");
+}
+
+struct cpu_map *cpu_map__dummy_new(void)
+{
+ struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
+
+ if (cpus != NULL) {
+ cpus->nr = 1;
+ cpus->map[0] = -1;
+ }
+
+ return cpus;
+}
+
+void cpu_map__delete(struct cpu_map *map)
+{
+ free(map);
+}
+
+int cpu_map__get_socket(struct cpu_map *map, int idx)
+{
+ FILE *fp;
+ const char *mnt;
+ char path[PATH_MAX];
+ int cpu, ret;
+
+ if (idx > map->nr)
+ return -1;
+
+ cpu = map->map[idx];
+
+ mnt = sysfs__mountpoint();
+ if (!mnt)
+ return -1;
+
+ snprintf(path, PATH_MAX,
+ "%s/devices/system/cpu/cpu%d/topology/physical_package_id",
+ mnt, cpu);
+
+ fp = fopen(path, "r");
+ if (!fp)
+ return -1;
+ ret = fscanf(fp, "%d", &cpu);
+ fclose(fp);
+ return ret == 1 ? cpu : -1;
+}
+
+static int cmp_ids(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
+static int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
+ int (*f)(struct cpu_map *map, int cpu))
+{
+ struct cpu_map *c;
+ int nr = cpus->nr;
+ int cpu, s1, s2;
+
+ /* allocate as much as possible */
+ c = calloc(1, sizeof(*c) + nr * sizeof(int));
+ if (!c)
+ return -1;
+
+ for (cpu = 0; cpu < nr; cpu++) {
+ s1 = f(cpus, cpu);
+ for (s2 = 0; s2 < c->nr; s2++) {
+ if (s1 == c->map[s2])
+ break;
+ }
+ if (s2 == c->nr) {
+ c->map[c->nr] = s1;
+ c->nr++;
+ }
+ }
+ /* ensure we process id in increasing order */
+ qsort(c->map, c->nr, sizeof(int), cmp_ids);
+
+ *res = c;
+ return 0;
+}
+
+int cpu_map__get_core(struct cpu_map *map, int idx)
+{
+ FILE *fp;
+ const char *mnt;
+ char path[PATH_MAX];
+ int cpu, ret, s;
+
+ if (idx > map->nr)
+ return -1;
+
+ cpu = map->map[idx];
+
+ mnt = sysfs__mountpoint();
+ if (!mnt)
+ return -1;
+
+ snprintf(path, PATH_MAX,
+ "%s/devices/system/cpu/cpu%d/topology/core_id",
+ mnt, cpu);
+
+ fp = fopen(path, "r");
+ if (!fp)
+ return -1;
+ ret = fscanf(fp, "%d", &cpu);
+ fclose(fp);
+ if (ret != 1)
+ return -1;
+
+ s = cpu_map__get_socket(map, idx);
+ if (s == -1)
+ return -1;
+
+ /*
+ * encode socket in upper 16 bits
+ * core_id is relative to socket, and
+ * we need a global id. So we combine
+ * socket+ core id
+ */
+ return (s << 16) | (cpu & 0xffff);
+}
+
+int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp)
+{
+ return cpu_map__build_map(cpus, sockp, cpu_map__get_socket);
+}
+
+int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep)
+{
+ return cpu_map__build_map(cpus, corep, cpu_map__get_core);
+}
+
+/* setup simple routines to easily access node numbers given a cpu number */
+static int get_max_num(char *path, int *max)
+{
+ size_t num;
+ char *buf;
+ int err = 0;
+
+ if (filename__read_str(path, &buf, &num))
+ return -1;
+
+ buf[num] = '\0';
+
+ /* start on the right, to find highest node num */
+ while (--num) {
+ if ((buf[num] == ',') || (buf[num] == '-')) {
+ num++;
+ break;
+ }
+ }
+ if (sscanf(&buf[num], "%d", max) < 1) {
+ err = -1;
+ goto out;
+ }
+
+ /* convert from 0-based to 1-based */
+ (*max)++;
+
+out:
+ free(buf);
+ return err;
+}
+
+/* Determine highest possible cpu in the system for sparse allocation */
+static void set_max_cpu_num(void)
+{
+ const char *mnt;
+ char path[PATH_MAX];
+ int ret = -1;
+
+ /* set up default */
+ max_cpu_num = 4096;
+
+ mnt = sysfs__mountpoint();
+ if (!mnt)
+ goto out;
+
+ /* get the highest possible cpu number for a sparse allocation */
+ ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
+ if (ret == PATH_MAX) {
+ pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
+ goto out;
+ }
+
+ ret = get_max_num(path, &max_cpu_num);
+
+out:
+ if (ret)
+ pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num);
+}
+
+/* Determine highest possible node in the system for sparse allocation */
+static void set_max_node_num(void)
+{
+ const char *mnt;
+ char path[PATH_MAX];
+ int ret = -1;
+
+ /* set up default */
+ max_node_num = 8;
+
+ mnt = sysfs__mountpoint();
+ if (!mnt)
+ goto out;
+
+ /* get the highest possible cpu number for a sparse allocation */
+ ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
+ if (ret == PATH_MAX) {
+ pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
+ goto out;
+ }
+
+ ret = get_max_num(path, &max_node_num);
+
+out:
+ if (ret)
+ pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
+}
+
+static int init_cpunode_map(void)
+{
+ int i;
+
+ set_max_cpu_num();
+ set_max_node_num();
+
+ cpunode_map = calloc(max_cpu_num, sizeof(int));
+ if (!cpunode_map) {
+ pr_err("%s: calloc failed\n", __func__);
+ return -1;
+ }
+
+ for (i = 0; i < max_cpu_num; i++)
+ cpunode_map[i] = -1;
+
+ return 0;
+}
+
+int cpu__setup_cpunode_map(void)
+{
+ struct dirent *dent1, *dent2;
+ DIR *dir1, *dir2;
+ unsigned int cpu, mem;
+ char buf[PATH_MAX];
+ char path[PATH_MAX];
+ const char *mnt;
+ int n;
+
+ /* initialize globals */
+ if (init_cpunode_map())
+ return -1;
+
+ mnt = sysfs__mountpoint();
+ if (!mnt)
+ return 0;
+
+ n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
+ if (n == PATH_MAX) {
+ pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
+ return -1;
+ }
+
+ dir1 = opendir(path);
+ if (!dir1)
+ return 0;
+
+ /* walk tree and setup map */
+ while ((dent1 = readdir(dir1)) != NULL) {
+ if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1)
+ continue;
+
+ n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
+ if (n == PATH_MAX) {
+ pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
+ continue;
+ }
+
+ dir2 = opendir(buf);
+ if (!dir2)
+ continue;
+ while ((dent2 = readdir(dir2)) != NULL) {
+ if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
+ continue;
+ cpunode_map[cpu] = mem;
+ }
+ closedir(dir2);
+ }
+ closedir(dir1);
+ return 0;
+}
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
new file mode 100644
index 00000000000..61a65484900
--- /dev/null
+++ b/tools/perf/util/cpumap.h
@@ -0,0 +1,84 @@
+#ifndef __PERF_CPUMAP_H
+#define __PERF_CPUMAP_H
+
+#include <stdio.h>
+#include <stdbool.h>
+
+#include "perf.h"
+#include "util/debug.h"
+
+struct cpu_map {
+ int nr;
+ int map[];
+};
+
+struct cpu_map *cpu_map__new(const char *cpu_list);
+struct cpu_map *cpu_map__dummy_new(void);
+void cpu_map__delete(struct cpu_map *map);
+struct cpu_map *cpu_map__read(FILE *file);
+size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp);
+int cpu_map__get_socket(struct cpu_map *map, int idx);
+int cpu_map__get_core(struct cpu_map *map, int idx);
+int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp);
+int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep);
+
+static inline int cpu_map__socket(struct cpu_map *sock, int s)
+{
+ if (!sock || s > sock->nr || s < 0)
+ return 0;
+ return sock->map[s];
+}
+
+static inline int cpu_map__id_to_socket(int id)
+{
+ return id >> 16;
+}
+
+static inline int cpu_map__id_to_cpu(int id)
+{
+ return id & 0xffff;
+}
+
+static inline int cpu_map__nr(const struct cpu_map *map)
+{
+ return map ? map->nr : 1;
+}
+
+static inline bool cpu_map__empty(const struct cpu_map *map)
+{
+ return map ? map->map[0] == -1 : true;
+}
+
+int max_cpu_num;
+int max_node_num;
+int *cpunode_map;
+
+int cpu__setup_cpunode_map(void);
+
+static inline int cpu__max_node(void)
+{
+ if (unlikely(!max_node_num))
+ pr_debug("cpu_map not initialized\n");
+
+ return max_node_num;
+}
+
+static inline int cpu__max_cpu(void)
+{
+ if (unlikely(!max_cpu_num))
+ pr_debug("cpu_map not initialized\n");
+
+ return max_cpu_num;
+}
+
+static inline int cpu__get_node(int cpu)
+{
+ if (unlikely(cpunode_map == NULL)) {
+ pr_debug("cpu_map not initialized\n");
+ return -1;
+ }
+
+ return cpunode_map[cpu];
+}
+
+#endif /* __PERF_CPUMAP_H */
diff --git a/tools/perf/util/ctype.c b/tools/perf/util/ctype.c
new file mode 100644
index 00000000000..aada3ac5e89
--- /dev/null
+++ b/tools/perf/util/ctype.c
@@ -0,0 +1,39 @@
+/*
+ * Sane locale-independent, ASCII ctype.
+ *
+ * No surprises, and works with signed and unsigned chars.
+ */
+#include "util.h"
+
+enum {
+ S = GIT_SPACE,
+ A = GIT_ALPHA,
+ D = GIT_DIGIT,
+ G = GIT_GLOB_SPECIAL, /* *, ?, [, \\ */
+ R = GIT_REGEX_SPECIAL, /* $, (, ), +, ., ^, {, | * */
+ P = GIT_PRINT_EXTRA, /* printable - alpha - digit - glob - regex */
+
+ PS = GIT_SPACE | GIT_PRINT_EXTRA,
+};
+
+unsigned char sane_ctype[256] = {
+/* 0 1 2 3 4 5 6 7 8 9 A B C D E F */
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, S, S, 0, 0, S, 0, 0, /* 0.. 15 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 16.. 31 */
+ PS,P, P, P, R, P, P, P, R, R, G, R, P, P, R, P, /* 32.. 47 */
+ D, D, D, D, D, D, D, D, D, D, P, P, P, P, P, G, /* 48.. 63 */
+ P, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 64.. 79 */
+ A, A, A, A, A, A, A, A, A, A, A, G, G, P, R, P, /* 80.. 95 */
+ P, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 96..111 */
+ A, A, A, A, A, A, A, A, A, A, A, R, R, P, P, 0, /* 112..127 */
+ /* Nothing in the 128.. range */
+};
+
+const char *graph_line =
+ "_____________________________________________________________________"
+ "_____________________________________________________________________";
+const char *graph_dotted_line =
+ "---------------------------------------------------------------------"
+ "---------------------------------------------------------------------"
+ "---------------------------------------------------------------------";
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
new file mode 100644
index 00000000000..55de44ecebe
--- /dev/null
+++ b/tools/perf/util/data.c
@@ -0,0 +1,133 @@
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <string.h>
+
+#include "data.h"
+#include "util.h"
+
+static bool check_pipe(struct perf_data_file *file)
+{
+ struct stat st;
+ bool is_pipe = false;
+ int fd = perf_data_file__is_read(file) ?
+ STDIN_FILENO : STDOUT_FILENO;
+
+ if (!file->path) {
+ if (!fstat(fd, &st) && S_ISFIFO(st.st_mode))
+ is_pipe = true;
+ } else {
+ if (!strcmp(file->path, "-"))
+ is_pipe = true;
+ }
+
+ if (is_pipe)
+ file->fd = fd;
+
+ return file->is_pipe = is_pipe;
+}
+
+static int check_backup(struct perf_data_file *file)
+{
+ struct stat st;
+
+ if (!stat(file->path, &st) && st.st_size) {
+ /* TODO check errors properly */
+ char oldname[PATH_MAX];
+ snprintf(oldname, sizeof(oldname), "%s.old",
+ file->path);
+ unlink(oldname);
+ rename(file->path, oldname);
+ }
+
+ return 0;
+}
+
+static int open_file_read(struct perf_data_file *file)
+{
+ struct stat st;
+ int fd;
+
+ fd = open(file->path, O_RDONLY);
+ if (fd < 0) {
+ int err = errno;
+
+ pr_err("failed to open %s: %s", file->path, strerror(err));
+ if (err == ENOENT && !strcmp(file->path, "perf.data"))
+ pr_err(" (try 'perf record' first)");
+ pr_err("\n");
+ return -err;
+ }
+
+ if (fstat(fd, &st) < 0)
+ goto out_close;
+
+ if (!file->force && st.st_uid && (st.st_uid != geteuid())) {
+ pr_err("file %s not owned by current user or root\n",
+ file->path);
+ goto out_close;
+ }
+
+ if (!st.st_size) {
+ pr_info("zero-sized file (%s), nothing to do!\n",
+ file->path);
+ goto out_close;
+ }
+
+ file->size = st.st_size;
+ return fd;
+
+ out_close:
+ close(fd);
+ return -1;
+}
+
+static int open_file_write(struct perf_data_file *file)
+{
+ int fd;
+
+ if (check_backup(file))
+ return -1;
+
+ fd = open(file->path, O_CREAT|O_RDWR|O_TRUNC, S_IRUSR|S_IWUSR);
+
+ if (fd < 0)
+ pr_err("failed to open %s : %s\n", file->path, strerror(errno));
+
+ return fd;
+}
+
+static int open_file(struct perf_data_file *file)
+{
+ int fd;
+
+ fd = perf_data_file__is_read(file) ?
+ open_file_read(file) : open_file_write(file);
+
+ file->fd = fd;
+ return fd < 0 ? -1 : 0;
+}
+
+int perf_data_file__open(struct perf_data_file *file)
+{
+ if (check_pipe(file))
+ return 0;
+
+ if (!file->path)
+ file->path = "perf.data";
+
+ return open_file(file);
+}
+
+void perf_data_file__close(struct perf_data_file *file)
+{
+ close(file->fd);
+}
+
+ssize_t perf_data_file__write(struct perf_data_file *file,
+ void *buf, size_t size)
+{
+ return writen(file->fd, buf, size);
+}
diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
new file mode 100644
index 00000000000..2b15d0c95c7
--- /dev/null
+++ b/tools/perf/util/data.h
@@ -0,0 +1,50 @@
+#ifndef __PERF_DATA_H
+#define __PERF_DATA_H
+
+#include <stdbool.h>
+
+enum perf_data_mode {
+ PERF_DATA_MODE_WRITE,
+ PERF_DATA_MODE_READ,
+};
+
+struct perf_data_file {
+ const char *path;
+ int fd;
+ bool is_pipe;
+ bool force;
+ unsigned long size;
+ enum perf_data_mode mode;
+};
+
+static inline bool perf_data_file__is_read(struct perf_data_file *file)
+{
+ return file->mode == PERF_DATA_MODE_READ;
+}
+
+static inline bool perf_data_file__is_write(struct perf_data_file *file)
+{
+ return file->mode == PERF_DATA_MODE_WRITE;
+}
+
+static inline int perf_data_file__is_pipe(struct perf_data_file *file)
+{
+ return file->is_pipe;
+}
+
+static inline int perf_data_file__fd(struct perf_data_file *file)
+{
+ return file->fd;
+}
+
+static inline unsigned long perf_data_file__size(struct perf_data_file *file)
+{
+ return file->size;
+}
+
+int perf_data_file__open(struct perf_data_file *file);
+void perf_data_file__close(struct perf_data_file *file);
+ssize_t perf_data_file__write(struct perf_data_file *file,
+ void *buf, size_t size);
+
+#endif /* __PERF_DATA_H */
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
new file mode 100644
index 00000000000..299b5558650
--- /dev/null
+++ b/tools/perf/util/debug.c
@@ -0,0 +1,107 @@
+/* For general debugging purposes */
+
+#include "../perf.h"
+
+#include <string.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+#include "cache.h"
+#include "color.h"
+#include "event.h"
+#include "debug.h"
+#include "util.h"
+#include "target.h"
+
+int verbose;
+bool dump_trace = false, quiet = false;
+
+static int _eprintf(int level, const char *fmt, va_list args)
+{
+ int ret = 0;
+
+ if (verbose >= level) {
+ if (use_browser >= 1)
+ ui_helpline__vshow(fmt, args);
+ else
+ ret = vfprintf(stderr, fmt, args);
+ }
+
+ return ret;
+}
+
+int eprintf(int level, const char *fmt, ...)
+{
+ va_list args;
+ int ret;
+
+ va_start(args, fmt);
+ ret = _eprintf(level, fmt, args);
+ va_end(args);
+
+ return ret;
+}
+
+/*
+ * Overloading libtraceevent standard info print
+ * function, display with -v in perf.
+ */
+void pr_stat(const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ _eprintf(1, fmt, args);
+ va_end(args);
+ eprintf(1, "\n");
+}
+
+int dump_printf(const char *fmt, ...)
+{
+ va_list args;
+ int ret = 0;
+
+ if (dump_trace) {
+ va_start(args, fmt);
+ ret = vprintf(fmt, args);
+ va_end(args);
+ }
+
+ return ret;
+}
+
+void trace_event(union perf_event *event)
+{
+ unsigned char *raw_event = (void *)event;
+ const char *color = PERF_COLOR_BLUE;
+ int i, j;
+
+ if (!dump_trace)
+ return;
+
+ printf(".");
+ color_fprintf(stdout, color, "\n. ... raw event: size %d bytes\n",
+ event->header.size);
+
+ for (i = 0; i < event->header.size; i++) {
+ if ((i & 15) == 0) {
+ printf(".");
+ color_fprintf(stdout, color, " %04x: ", i);
+ }
+
+ color_fprintf(stdout, color, " %02x", raw_event[i]);
+
+ if (((i & 15) == 15) || i == event->header.size-1) {
+ color_fprintf(stdout, color, " ");
+ for (j = 0; j < 15-(i & 15); j++)
+ color_fprintf(stdout, color, " ");
+ for (j = i & ~15; j <= i; j++) {
+ color_fprintf(stdout, color, "%c",
+ isprint(raw_event[j]) ?
+ raw_event[j] : '.');
+ }
+ color_fprintf(stdout, color, "\n");
+ }
+ }
+ printf(".\n");
+}
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
new file mode 100644
index 00000000000..443694c36b0
--- /dev/null
+++ b/tools/perf/util/debug.h
@@ -0,0 +1,22 @@
+/* For debugging general purposes */
+#ifndef __PERF_DEBUG_H
+#define __PERF_DEBUG_H
+
+#include <stdbool.h>
+#include "event.h"
+#include "../ui/helpline.h"
+#include "../ui/progress.h"
+#include "../ui/util.h"
+
+extern int verbose;
+extern bool quiet, dump_trace;
+
+int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
+void trace_event(union perf_event *event);
+
+int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2)));
+int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2)));
+
+void pr_stat(const char *fmt, ...);
+
+#endif /* __PERF_DEBUG_H */
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
new file mode 100644
index 00000000000..819f10414f0
--- /dev/null
+++ b/tools/perf/util/dso.c
@@ -0,0 +1,900 @@
+#include <asm/bug.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include "symbol.h"
+#include "dso.h"
+#include "machine.h"
+#include "util.h"
+#include "debug.h"
+
+char dso__symtab_origin(const struct dso *dso)
+{
+ static const char origin[] = {
+ [DSO_BINARY_TYPE__KALLSYMS] = 'k',
+ [DSO_BINARY_TYPE__VMLINUX] = 'v',
+ [DSO_BINARY_TYPE__JAVA_JIT] = 'j',
+ [DSO_BINARY_TYPE__DEBUGLINK] = 'l',
+ [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B',
+ [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
+ [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
+ [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o',
+ [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
+ [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
+ [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
+ [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
+ [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
+ [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
+ };
+
+ if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
+ return '!';
+ return origin[dso->symtab_type];
+}
+
+int dso__read_binary_type_filename(const struct dso *dso,
+ enum dso_binary_type type,
+ char *root_dir, char *filename, size_t size)
+{
+ char build_id_hex[BUILD_ID_SIZE * 2 + 1];
+ int ret = 0;
+
+ switch (type) {
+ case DSO_BINARY_TYPE__DEBUGLINK: {
+ char *debuglink;
+
+ strncpy(filename, dso->long_name, size);
+ debuglink = filename + dso->long_name_len;
+ while (debuglink != filename && *debuglink != '/')
+ debuglink--;
+ if (*debuglink == '/')
+ debuglink++;
+ ret = filename__read_debuglink(dso->long_name, debuglink,
+ size - (debuglink - filename));
+ }
+ break;
+ case DSO_BINARY_TYPE__BUILD_ID_CACHE:
+ /* skip the locally configured cache if a symfs is given */
+ if (symbol_conf.symfs[0] ||
+ (dso__build_id_filename(dso, filename, size) == NULL))
+ ret = -1;
+ break;
+
+ case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
+ snprintf(filename, size, "%s/usr/lib/debug%s.debug",
+ symbol_conf.symfs, dso->long_name);
+ break;
+
+ case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
+ snprintf(filename, size, "%s/usr/lib/debug%s",
+ symbol_conf.symfs, dso->long_name);
+ break;
+
+ case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
+ {
+ const char *last_slash;
+ size_t len;
+ size_t dir_size;
+
+ last_slash = dso->long_name + dso->long_name_len;
+ while (last_slash != dso->long_name && *last_slash != '/')
+ last_slash--;
+
+ len = scnprintf(filename, size, "%s", symbol_conf.symfs);
+ dir_size = last_slash - dso->long_name + 2;
+ if (dir_size > (size - len)) {
+ ret = -1;
+ break;
+ }
+ len += scnprintf(filename + len, dir_size, "%s", dso->long_name);
+ len += scnprintf(filename + len , size - len, ".debug%s",
+ last_slash);
+ break;
+ }
+
+ case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
+ if (!dso->has_build_id) {
+ ret = -1;
+ break;
+ }
+
+ build_id__sprintf(dso->build_id,
+ sizeof(dso->build_id),
+ build_id_hex);
+ snprintf(filename, size,
+ "%s/usr/lib/debug/.build-id/%.2s/%s.debug",
+ symbol_conf.symfs, build_id_hex, build_id_hex + 2);
+ break;
+
+ case DSO_BINARY_TYPE__VMLINUX:
+ case DSO_BINARY_TYPE__GUEST_VMLINUX:
+ case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
+ snprintf(filename, size, "%s%s",
+ symbol_conf.symfs, dso->long_name);
+ break;
+
+ case DSO_BINARY_TYPE__GUEST_KMODULE:
+ snprintf(filename, size, "%s%s%s", symbol_conf.symfs,
+ root_dir, dso->long_name);
+ break;
+
+ case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
+ snprintf(filename, size, "%s%s", symbol_conf.symfs,
+ dso->long_name);
+ break;
+
+ case DSO_BINARY_TYPE__KCORE:
+ case DSO_BINARY_TYPE__GUEST_KCORE:
+ snprintf(filename, size, "%s", dso->long_name);
+ break;
+
+ default:
+ case DSO_BINARY_TYPE__KALLSYMS:
+ case DSO_BINARY_TYPE__GUEST_KALLSYMS:
+ case DSO_BINARY_TYPE__JAVA_JIT:
+ case DSO_BINARY_TYPE__NOT_FOUND:
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Global list of open DSOs and the counter.
+ */
+static LIST_HEAD(dso__data_open);
+static long dso__data_open_cnt;
+
+static void dso__list_add(struct dso *dso)
+{
+ list_add_tail(&dso->data.open_entry, &dso__data_open);
+ dso__data_open_cnt++;
+}
+
+static void dso__list_del(struct dso *dso)
+{
+ list_del(&dso->data.open_entry);
+ WARN_ONCE(dso__data_open_cnt <= 0,
+ "DSO data fd counter out of bounds.");
+ dso__data_open_cnt--;
+}
+
+static void close_first_dso(void);
+
+static int do_open(char *name)
+{
+ int fd;
+
+ do {
+ fd = open(name, O_RDONLY);
+ if (fd >= 0)
+ return fd;
+
+ pr_debug("dso open failed, mmap: %s\n", strerror(errno));
+ if (!dso__data_open_cnt || errno != EMFILE)
+ break;
+
+ close_first_dso();
+ } while (1);
+
+ return -1;
+}
+
+static int __open_dso(struct dso *dso, struct machine *machine)
+{
+ int fd;
+ char *root_dir = (char *)"";
+ char *name = malloc(PATH_MAX);
+
+ if (!name)
+ return -ENOMEM;
+
+ if (machine)
+ root_dir = machine->root_dir;
+
+ if (dso__read_binary_type_filename(dso, dso->binary_type,
+ root_dir, name, PATH_MAX)) {
+ free(name);
+ return -EINVAL;
+ }
+
+ fd = do_open(name);
+ free(name);
+ return fd;
+}
+
+static void check_data_close(void);
+
+/**
+ * dso_close - Open DSO data file
+ * @dso: dso object
+ *
+ * Open @dso's data file descriptor and updates
+ * list/count of open DSO objects.
+ */
+static int open_dso(struct dso *dso, struct machine *machine)
+{
+ int fd = __open_dso(dso, machine);
+
+ if (fd > 0) {
+ dso__list_add(dso);
+ /*
+ * Check if we crossed the allowed number
+ * of opened DSOs and close one if needed.
+ */
+ check_data_close();
+ }
+
+ return fd;
+}
+
+static void close_data_fd(struct dso *dso)
+{
+ if (dso->data.fd >= 0) {
+ close(dso->data.fd);
+ dso->data.fd = -1;
+ dso->data.file_size = 0;
+ dso__list_del(dso);
+ }
+}
+
+/**
+ * dso_close - Close DSO data file
+ * @dso: dso object
+ *
+ * Close @dso's data file descriptor and updates
+ * list/count of open DSO objects.
+ */
+static void close_dso(struct dso *dso)
+{
+ close_data_fd(dso);
+}
+
+static void close_first_dso(void)
+{
+ struct dso *dso;
+
+ dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
+ close_dso(dso);
+}
+
+static rlim_t get_fd_limit(void)
+{
+ struct rlimit l;
+ rlim_t limit = 0;
+
+ /* Allow half of the current open fd limit. */
+ if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
+ if (l.rlim_cur == RLIM_INFINITY)
+ limit = l.rlim_cur;
+ else
+ limit = l.rlim_cur / 2;
+ } else {
+ pr_err("failed to get fd limit\n");
+ limit = 1;
+ }
+
+ return limit;
+}
+
+static bool may_cache_fd(void)
+{
+ static rlim_t limit;
+
+ if (!limit)
+ limit = get_fd_limit();
+
+ if (limit == RLIM_INFINITY)
+ return true;
+
+ return limit > (rlim_t) dso__data_open_cnt;
+}
+
+/*
+ * Check and close LRU dso if we crossed allowed limit
+ * for opened dso file descriptors. The limit is half
+ * of the RLIMIT_NOFILE files opened.
+*/
+static void check_data_close(void)
+{
+ bool cache_fd = may_cache_fd();
+
+ if (!cache_fd)
+ close_first_dso();
+}
+
+/**
+ * dso__data_close - Close DSO data file
+ * @dso: dso object
+ *
+ * External interface to close @dso's data file descriptor.
+ */
+void dso__data_close(struct dso *dso)
+{
+ close_dso(dso);
+}
+
+/**
+ * dso__data_fd - Get dso's data file descriptor
+ * @dso: dso object
+ * @machine: machine object
+ *
+ * External interface to find dso's file, open it and
+ * returns file descriptor.
+ */
+int dso__data_fd(struct dso *dso, struct machine *machine)
+{
+ enum dso_binary_type binary_type_data[] = {
+ DSO_BINARY_TYPE__BUILD_ID_CACHE,
+ DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
+ DSO_BINARY_TYPE__NOT_FOUND,
+ };
+ int i = 0;
+
+ if (dso->data.fd >= 0)
+ return dso->data.fd;
+
+ if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
+ dso->data.fd = open_dso(dso, machine);
+ return dso->data.fd;
+ }
+
+ do {
+ int fd;
+
+ dso->binary_type = binary_type_data[i++];
+
+ fd = open_dso(dso, machine);
+ if (fd >= 0)
+ return dso->data.fd = fd;
+
+ } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
+
+ return -EINVAL;
+}
+
+static void
+dso_cache__free(struct rb_root *root)
+{
+ struct rb_node *next = rb_first(root);
+
+ while (next) {
+ struct dso_cache *cache;
+
+ cache = rb_entry(next, struct dso_cache, rb_node);
+ next = rb_next(&cache->rb_node);
+ rb_erase(&cache->rb_node, root);
+ free(cache);
+ }
+}
+
+static struct dso_cache *dso_cache__find(const struct rb_root *root, u64 offset)
+{
+ struct rb_node * const *p = &root->rb_node;
+ const struct rb_node *parent = NULL;
+ struct dso_cache *cache;
+
+ while (*p != NULL) {
+ u64 end;
+
+ parent = *p;
+ cache = rb_entry(parent, struct dso_cache, rb_node);
+ end = cache->offset + DSO__DATA_CACHE_SIZE;
+
+ if (offset < cache->offset)
+ p = &(*p)->rb_left;
+ else if (offset >= end)
+ p = &(*p)->rb_right;
+ else
+ return cache;
+ }
+ return NULL;
+}
+
+static void
+dso_cache__insert(struct rb_root *root, struct dso_cache *new)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct dso_cache *cache;
+ u64 offset = new->offset;
+
+ while (*p != NULL) {
+ u64 end;
+
+ parent = *p;
+ cache = rb_entry(parent, struct dso_cache, rb_node);
+ end = cache->offset + DSO__DATA_CACHE_SIZE;
+
+ if (offset < cache->offset)
+ p = &(*p)->rb_left;
+ else if (offset >= end)
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&new->rb_node, parent, p);
+ rb_insert_color(&new->rb_node, root);
+}
+
+static ssize_t
+dso_cache__memcpy(struct dso_cache *cache, u64 offset,
+ u8 *data, u64 size)
+{
+ u64 cache_offset = offset - cache->offset;
+ u64 cache_size = min(cache->size - cache_offset, size);
+
+ memcpy(data, cache->data + cache_offset, cache_size);
+ return cache_size;
+}
+
+static ssize_t
+dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
+{
+ struct dso_cache *cache;
+ ssize_t ret;
+
+ do {
+ u64 cache_offset;
+
+ ret = -ENOMEM;
+
+ cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
+ if (!cache)
+ break;
+
+ cache_offset = offset & DSO__DATA_CACHE_MASK;
+ ret = -EINVAL;
+
+ if (-1 == lseek(dso->data.fd, cache_offset, SEEK_SET))
+ break;
+
+ ret = read(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE);
+ if (ret <= 0)
+ break;
+
+ cache->offset = cache_offset;
+ cache->size = ret;
+ dso_cache__insert(&dso->data.cache, cache);
+
+ ret = dso_cache__memcpy(cache, offset, data, size);
+
+ } while (0);
+
+ if (ret <= 0)
+ free(cache);
+
+ return ret;
+}
+
+static ssize_t dso_cache_read(struct dso *dso, u64 offset,
+ u8 *data, ssize_t size)
+{
+ struct dso_cache *cache;
+
+ cache = dso_cache__find(&dso->data.cache, offset);
+ if (cache)
+ return dso_cache__memcpy(cache, offset, data, size);
+ else
+ return dso_cache__read(dso, offset, data, size);
+}
+
+/*
+ * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
+ * in the rb_tree. Any read to already cached data is served
+ * by cached data.
+ */
+static ssize_t cached_read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
+{
+ ssize_t r = 0;
+ u8 *p = data;
+
+ do {
+ ssize_t ret;
+
+ ret = dso_cache_read(dso, offset, p, size);
+ if (ret < 0)
+ return ret;
+
+ /* Reached EOF, return what we have. */
+ if (!ret)
+ break;
+
+ BUG_ON(ret > size);
+
+ r += ret;
+ p += ret;
+ offset += ret;
+ size -= ret;
+
+ } while (size);
+
+ return r;
+}
+
+static int data_file_size(struct dso *dso)
+{
+ struct stat st;
+
+ if (!dso->data.file_size) {
+ if (fstat(dso->data.fd, &st)) {
+ pr_err("dso mmap failed, fstat: %s\n", strerror(errno));
+ return -1;
+ }
+ dso->data.file_size = st.st_size;
+ }
+
+ return 0;
+}
+
+static ssize_t data_read_offset(struct dso *dso, u64 offset,
+ u8 *data, ssize_t size)
+{
+ if (data_file_size(dso))
+ return -1;
+
+ /* Check the offset sanity. */
+ if (offset > dso->data.file_size)
+ return -1;
+
+ if (offset + size < offset)
+ return -1;
+
+ return cached_read(dso, offset, data, size);
+}
+
+/**
+ * dso__data_read_offset - Read data from dso file offset
+ * @dso: dso object
+ * @machine: machine object
+ * @offset: file offset
+ * @data: buffer to store data
+ * @size: size of the @data buffer
+ *
+ * External interface to read data from dso file offset. Open
+ * dso data file and use cached_read to get the data.
+ */
+ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
+ u64 offset, u8 *data, ssize_t size)
+{
+ if (dso__data_fd(dso, machine) < 0)
+ return -1;
+
+ return data_read_offset(dso, offset, data, size);
+}
+
+/**
+ * dso__data_read_addr - Read data from dso address
+ * @dso: dso object
+ * @machine: machine object
+ * @add: virtual memory address
+ * @data: buffer to store data
+ * @size: size of the @data buffer
+ *
+ * External interface to read data from dso address.
+ */
+ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
+ struct machine *machine, u64 addr,
+ u8 *data, ssize_t size)
+{
+ u64 offset = map->map_ip(map, addr);
+ return dso__data_read_offset(dso, machine, offset, data, size);
+}
+
+struct map *dso__new_map(const char *name)
+{
+ struct map *map = NULL;
+ struct dso *dso = dso__new(name);
+
+ if (dso)
+ map = map__new2(0, dso, MAP__FUNCTION);
+
+ return map;
+}
+
+struct dso *dso__kernel_findnew(struct machine *machine, const char *name,
+ const char *short_name, int dso_type)
+{
+ /*
+ * The kernel dso could be created by build_id processing.
+ */
+ struct dso *dso = __dsos__findnew(&machine->kernel_dsos, name);
+
+ /*
+ * We need to run this in all cases, since during the build_id
+ * processing we had no idea this was the kernel dso.
+ */
+ if (dso != NULL) {
+ dso__set_short_name(dso, short_name, false);
+ dso->kernel = dso_type;
+ }
+
+ return dso;
+}
+
+void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
+{
+ if (name == NULL)
+ return;
+
+ if (dso->long_name_allocated)
+ free((char *)dso->long_name);
+
+ dso->long_name = name;
+ dso->long_name_len = strlen(name);
+ dso->long_name_allocated = name_allocated;
+}
+
+void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
+{
+ if (name == NULL)
+ return;
+
+ if (dso->short_name_allocated)
+ free((char *)dso->short_name);
+
+ dso->short_name = name;
+ dso->short_name_len = strlen(name);
+ dso->short_name_allocated = name_allocated;
+}
+
+static void dso__set_basename(struct dso *dso)
+{
+ /*
+ * basename() may modify path buffer, so we must pass
+ * a copy.
+ */
+ char *base, *lname = strdup(dso->long_name);
+
+ if (!lname)
+ return;
+
+ /*
+ * basename() may return a pointer to internal
+ * storage which is reused in subsequent calls
+ * so copy the result.
+ */
+ base = strdup(basename(lname));
+
+ free(lname);
+
+ if (!base)
+ return;
+
+ dso__set_short_name(dso, base, true);
+}
+
+int dso__name_len(const struct dso *dso)
+{
+ if (!dso)
+ return strlen("[unknown]");
+ if (verbose)
+ return dso->long_name_len;
+
+ return dso->short_name_len;
+}
+
+bool dso__loaded(const struct dso *dso, enum map_type type)
+{
+ return dso->loaded & (1 << type);
+}
+
+bool dso__sorted_by_name(const struct dso *dso, enum map_type type)
+{
+ return dso->sorted_by_name & (1 << type);
+}
+
+void dso__set_sorted_by_name(struct dso *dso, enum map_type type)
+{
+ dso->sorted_by_name |= (1 << type);
+}
+
+struct dso *dso__new(const char *name)
+{
+ struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
+
+ if (dso != NULL) {
+ int i;
+ strcpy(dso->name, name);
+ dso__set_long_name(dso, dso->name, false);
+ dso__set_short_name(dso, dso->name, false);
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
+ dso->data.cache = RB_ROOT;
+ dso->data.fd = -1;
+ dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
+ dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
+ dso->loaded = 0;
+ dso->rel = 0;
+ dso->sorted_by_name = 0;
+ dso->has_build_id = 0;
+ dso->has_srcline = 1;
+ dso->a2l_fails = 1;
+ dso->kernel = DSO_TYPE_USER;
+ dso->needs_swap = DSO_SWAP__UNSET;
+ INIT_LIST_HEAD(&dso->node);
+ INIT_LIST_HEAD(&dso->data.open_entry);
+ }
+
+ return dso;
+}
+
+void dso__delete(struct dso *dso)
+{
+ int i;
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ symbols__delete(&dso->symbols[i]);
+
+ if (dso->short_name_allocated) {
+ zfree((char **)&dso->short_name);
+ dso->short_name_allocated = false;
+ }
+
+ if (dso->long_name_allocated) {
+ zfree((char **)&dso->long_name);
+ dso->long_name_allocated = false;
+ }
+
+ dso__data_close(dso);
+ dso_cache__free(&dso->data.cache);
+ dso__free_a2l(dso);
+ zfree(&dso->symsrc_filename);
+ free(dso);
+}
+
+void dso__set_build_id(struct dso *dso, void *build_id)
+{
+ memcpy(dso->build_id, build_id, sizeof(dso->build_id));
+ dso->has_build_id = 1;
+}
+
+bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
+{
+ return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0;
+}
+
+void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
+{
+ char path[PATH_MAX];
+
+ if (machine__is_default_guest(machine))
+ return;
+ sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
+ if (sysfs__read_build_id(path, dso->build_id,
+ sizeof(dso->build_id)) == 0)
+ dso->has_build_id = true;
+}
+
+int dso__kernel_module_get_build_id(struct dso *dso,
+ const char *root_dir)
+{
+ char filename[PATH_MAX];
+ /*
+ * kernel module short names are of the form "[module]" and
+ * we need just "module" here.
+ */
+ const char *name = dso->short_name + 1;
+
+ snprintf(filename, sizeof(filename),
+ "%s/sys/module/%.*s/notes/.note.gnu.build-id",
+ root_dir, (int)strlen(name) - 1, name);
+
+ if (sysfs__read_build_id(filename, dso->build_id,
+ sizeof(dso->build_id)) == 0)
+ dso->has_build_id = true;
+
+ return 0;
+}
+
+bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
+{
+ bool have_build_id = false;
+ struct dso *pos;
+
+ list_for_each_entry(pos, head, node) {
+ if (with_hits && !pos->hit)
+ continue;
+ if (pos->has_build_id) {
+ have_build_id = true;
+ continue;
+ }
+ if (filename__read_build_id(pos->long_name, pos->build_id,
+ sizeof(pos->build_id)) > 0) {
+ have_build_id = true;
+ pos->has_build_id = true;
+ }
+ }
+
+ return have_build_id;
+}
+
+void dsos__add(struct list_head *head, struct dso *dso)
+{
+ list_add_tail(&dso->node, head);
+}
+
+struct dso *dsos__find(const struct list_head *head, const char *name, bool cmp_short)
+{
+ struct dso *pos;
+
+ if (cmp_short) {
+ list_for_each_entry(pos, head, node)
+ if (strcmp(pos->short_name, name) == 0)
+ return pos;
+ return NULL;
+ }
+ list_for_each_entry(pos, head, node)
+ if (strcmp(pos->long_name, name) == 0)
+ return pos;
+ return NULL;
+}
+
+struct dso *__dsos__findnew(struct list_head *head, const char *name)
+{
+ struct dso *dso = dsos__find(head, name, false);
+
+ if (!dso) {
+ dso = dso__new(name);
+ if (dso != NULL) {
+ dsos__add(head, dso);
+ dso__set_basename(dso);
+ }
+ }
+
+ return dso;
+}
+
+size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
+ bool (skip)(struct dso *dso, int parm), int parm)
+{
+ struct dso *pos;
+ size_t ret = 0;
+
+ list_for_each_entry(pos, head, node) {
+ if (skip && skip(pos, parm))
+ continue;
+ ret += dso__fprintf_buildid(pos, fp);
+ ret += fprintf(fp, " %s\n", pos->long_name);
+ }
+ return ret;
+}
+
+size_t __dsos__fprintf(struct list_head *head, FILE *fp)
+{
+ struct dso *pos;
+ size_t ret = 0;
+
+ list_for_each_entry(pos, head, node) {
+ int i;
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ ret += dso__fprintf(pos, i, fp);
+ }
+
+ return ret;
+}
+
+size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
+{
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
+ return fprintf(fp, "%s", sbuild_id);
+}
+
+size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
+{
+ struct rb_node *nd;
+ size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
+
+ if (dso->short_name != dso->long_name)
+ ret += fprintf(fp, "%s, ", dso->long_name);
+ ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
+ dso__loaded(dso, type) ? "" : "NOT ");
+ ret += dso__fprintf_buildid(dso, fp);
+ ret += fprintf(fp, ")\n");
+ for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {
+ struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
+ ret += symbol__fprintf(pos, fp);
+ }
+
+ return ret;
+}
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
new file mode 100644
index 00000000000..ad553ba257b
--- /dev/null
+++ b/tools/perf/util/dso.h
@@ -0,0 +1,232 @@
+#ifndef __PERF_DSO
+#define __PERF_DSO
+
+#include <linux/types.h>
+#include <linux/rbtree.h>
+#include <stdbool.h>
+#include <linux/types.h>
+#include "map.h"
+#include "build-id.h"
+
+enum dso_binary_type {
+ DSO_BINARY_TYPE__KALLSYMS = 0,
+ DSO_BINARY_TYPE__GUEST_KALLSYMS,
+ DSO_BINARY_TYPE__VMLINUX,
+ DSO_BINARY_TYPE__GUEST_VMLINUX,
+ DSO_BINARY_TYPE__JAVA_JIT,
+ DSO_BINARY_TYPE__DEBUGLINK,
+ DSO_BINARY_TYPE__BUILD_ID_CACHE,
+ DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
+ DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
+ DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
+ DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
+ DSO_BINARY_TYPE__GUEST_KMODULE,
+ DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
+ DSO_BINARY_TYPE__KCORE,
+ DSO_BINARY_TYPE__GUEST_KCORE,
+ DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
+ DSO_BINARY_TYPE__NOT_FOUND,
+};
+
+enum dso_kernel_type {
+ DSO_TYPE_USER = 0,
+ DSO_TYPE_KERNEL,
+ DSO_TYPE_GUEST_KERNEL
+};
+
+enum dso_swap_type {
+ DSO_SWAP__UNSET,
+ DSO_SWAP__NO,
+ DSO_SWAP__YES,
+};
+
+#define DSO__SWAP(dso, type, val) \
+({ \
+ type ____r = val; \
+ BUG_ON(dso->needs_swap == DSO_SWAP__UNSET); \
+ if (dso->needs_swap == DSO_SWAP__YES) { \
+ switch (sizeof(____r)) { \
+ case 2: \
+ ____r = bswap_16(val); \
+ break; \
+ case 4: \
+ ____r = bswap_32(val); \
+ break; \
+ case 8: \
+ ____r = bswap_64(val); \
+ break; \
+ default: \
+ BUG_ON(1); \
+ } \
+ } \
+ ____r; \
+})
+
+#define DSO__DATA_CACHE_SIZE 4096
+#define DSO__DATA_CACHE_MASK ~(DSO__DATA_CACHE_SIZE - 1)
+
+struct dso_cache {
+ struct rb_node rb_node;
+ u64 offset;
+ u64 size;
+ char data[0];
+};
+
+struct dso {
+ struct list_head node;
+ struct rb_root symbols[MAP__NR_TYPES];
+ struct rb_root symbol_names[MAP__NR_TYPES];
+ void *a2l;
+ char *symsrc_filename;
+ unsigned int a2l_fails;
+ enum dso_kernel_type kernel;
+ enum dso_swap_type needs_swap;
+ enum dso_binary_type symtab_type;
+ enum dso_binary_type binary_type;
+ u8 adjust_symbols:1;
+ u8 has_build_id:1;
+ u8 has_srcline:1;
+ u8 hit:1;
+ u8 annotate_warned:1;
+ u8 short_name_allocated:1;
+ u8 long_name_allocated:1;
+ u8 sorted_by_name;
+ u8 loaded;
+ u8 rel;
+ u8 build_id[BUILD_ID_SIZE];
+ const char *short_name;
+ const char *long_name;
+ u16 long_name_len;
+ u16 short_name_len;
+
+ /* dso data file */
+ struct {
+ struct rb_root cache;
+ int fd;
+ size_t file_size;
+ struct list_head open_entry;
+ } data;
+
+ char name[0];
+};
+
+/* dso__for_each_symbol - iterate over the symbols of given type
+ *
+ * @dso: the 'struct dso *' in which symbols itereated
+ * @pos: the 'struct symbol *' to use as a loop cursor
+ * @n: the 'struct rb_node *' to use as a temporary storage
+ * @type: the 'enum map_type' type of symbols
+ */
+#define dso__for_each_symbol(dso, pos, n, type) \
+ symbols__for_each_entry(&(dso)->symbols[(type)], pos, n)
+
+static inline void dso__set_loaded(struct dso *dso, enum map_type type)
+{
+ dso->loaded |= (1 << type);
+}
+
+struct dso *dso__new(const char *name);
+void dso__delete(struct dso *dso);
+
+void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated);
+void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated);
+
+int dso__name_len(const struct dso *dso);
+
+bool dso__loaded(const struct dso *dso, enum map_type type);
+
+bool dso__sorted_by_name(const struct dso *dso, enum map_type type);
+void dso__set_sorted_by_name(struct dso *dso, enum map_type type);
+void dso__sort_by_name(struct dso *dso, enum map_type type);
+
+void dso__set_build_id(struct dso *dso, void *build_id);
+bool dso__build_id_equal(const struct dso *dso, u8 *build_id);
+void dso__read_running_kernel_build_id(struct dso *dso,
+ struct machine *machine);
+int dso__kernel_module_get_build_id(struct dso *dso, const char *root_dir);
+
+char dso__symtab_origin(const struct dso *dso);
+int dso__read_binary_type_filename(const struct dso *dso, enum dso_binary_type type,
+ char *root_dir, char *filename, size_t size);
+
+/*
+ * The dso__data_* external interface provides following functions:
+ * dso__data_fd
+ * dso__data_close
+ * dso__data_read_offset
+ * dso__data_read_addr
+ *
+ * Please refer to the dso.c object code for each function and
+ * arguments documentation. Following text tries to explain the
+ * dso file descriptor caching.
+ *
+ * The dso__data* interface allows caching of opened file descriptors
+ * to speed up the dso data accesses. The idea is to leave the file
+ * descriptor opened ideally for the whole life of the dso object.
+ *
+ * The current usage of the dso__data_* interface is as follows:
+ *
+ * Get DSO's fd:
+ * int fd = dso__data_fd(dso, machine);
+ * USE 'fd' SOMEHOW
+ *
+ * Read DSO's data:
+ * n = dso__data_read_offset(dso_0, &machine, 0, buf, BUFSIZE);
+ * n = dso__data_read_addr(dso_0, &machine, 0, buf, BUFSIZE);
+ *
+ * Eventually close DSO's fd:
+ * dso__data_close(dso);
+ *
+ * It is not necessary to close the DSO object data file. Each time new
+ * DSO data file is opened, the limit (RLIMIT_NOFILE/2) is checked. Once
+ * it is crossed, the oldest opened DSO object is closed.
+ *
+ * The dso__delete function calls close_dso function to ensure the
+ * data file descriptor gets closed/unmapped before the dso object
+ * is freed.
+ *
+ * TODO
+*/
+int dso__data_fd(struct dso *dso, struct machine *machine);
+void dso__data_close(struct dso *dso);
+
+ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
+ u64 offset, u8 *data, ssize_t size);
+ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
+ struct machine *machine, u64 addr,
+ u8 *data, ssize_t size);
+
+struct map *dso__new_map(const char *name);
+struct dso *dso__kernel_findnew(struct machine *machine, const char *name,
+ const char *short_name, int dso_type);
+
+void dsos__add(struct list_head *head, struct dso *dso);
+struct dso *dsos__find(const struct list_head *head, const char *name,
+ bool cmp_short);
+struct dso *__dsos__findnew(struct list_head *head, const char *name);
+bool __dsos__read_build_ids(struct list_head *head, bool with_hits);
+
+size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
+ bool (skip)(struct dso *dso, int parm), int parm);
+size_t __dsos__fprintf(struct list_head *head, FILE *fp);
+
+size_t dso__fprintf_buildid(struct dso *dso, FILE *fp);
+size_t dso__fprintf_symbols_by_name(struct dso *dso,
+ enum map_type type, FILE *fp);
+size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp);
+
+static inline bool dso__is_vmlinux(struct dso *dso)
+{
+ return dso->binary_type == DSO_BINARY_TYPE__VMLINUX ||
+ dso->binary_type == DSO_BINARY_TYPE__GUEST_VMLINUX;
+}
+
+static inline bool dso__is_kcore(struct dso *dso)
+{
+ return dso->binary_type == DSO_BINARY_TYPE__KCORE ||
+ dso->binary_type == DSO_BINARY_TYPE__GUEST_KCORE;
+}
+
+void dso__free_a2l(struct dso *dso);
+
+#endif /* __PERF_DSO */
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
new file mode 100644
index 00000000000..cc66c4049e0
--- /dev/null
+++ b/tools/perf/util/dwarf-aux.c
@@ -0,0 +1,884 @@
+/*
+ * dwarf-aux.c : libdw auxiliary interfaces
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+#include <stdbool.h>
+#include "util.h"
+#include "debug.h"
+#include "dwarf-aux.h"
+
+/**
+ * cu_find_realpath - Find the realpath of the target file
+ * @cu_die: A DIE(dwarf information entry) of CU(compilation Unit)
+ * @fname: The tail filename of the target file
+ *
+ * Find the real(long) path of @fname in @cu_die.
+ */
+const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname)
+{
+ Dwarf_Files *files;
+ size_t nfiles, i;
+ const char *src = NULL;
+ int ret;
+
+ if (!fname)
+ return NULL;
+
+ ret = dwarf_getsrcfiles(cu_die, &files, &nfiles);
+ if (ret != 0)
+ return NULL;
+
+ for (i = 0; i < nfiles; i++) {
+ src = dwarf_filesrc(files, i, NULL, NULL);
+ if (strtailcmp(src, fname) == 0)
+ break;
+ }
+ if (i == nfiles)
+ return NULL;
+ return src;
+}
+
+/**
+ * cu_get_comp_dir - Get the path of compilation directory
+ * @cu_die: a CU DIE
+ *
+ * Get the path of compilation directory of given @cu_die.
+ * Since this depends on DW_AT_comp_dir, older gcc will not
+ * embedded it. In that case, this returns NULL.
+ */
+const char *cu_get_comp_dir(Dwarf_Die *cu_die)
+{
+ Dwarf_Attribute attr;
+ if (dwarf_attr(cu_die, DW_AT_comp_dir, &attr) == NULL)
+ return NULL;
+ return dwarf_formstring(&attr);
+}
+
+/**
+ * cu_find_lineinfo - Get a line number and file name for given address
+ * @cu_die: a CU DIE
+ * @addr: An address
+ * @fname: a pointer which returns the file name string
+ * @lineno: a pointer which returns the line number
+ *
+ * Find a line number and file name for @addr in @cu_die.
+ */
+int cu_find_lineinfo(Dwarf_Die *cu_die, unsigned long addr,
+ const char **fname, int *lineno)
+{
+ Dwarf_Line *line;
+ Dwarf_Addr laddr;
+
+ line = dwarf_getsrc_die(cu_die, (Dwarf_Addr)addr);
+ if (line && dwarf_lineaddr(line, &laddr) == 0 &&
+ addr == (unsigned long)laddr && dwarf_lineno(line, lineno) == 0) {
+ *fname = dwarf_linesrc(line, NULL, NULL);
+ if (!*fname)
+ /* line number is useless without filename */
+ *lineno = 0;
+ }
+
+ return *lineno ?: -ENOENT;
+}
+
+static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data);
+
+/**
+ * cu_walk_functions_at - Walk on function DIEs at given address
+ * @cu_die: A CU DIE
+ * @addr: An address
+ * @callback: A callback which called with found DIEs
+ * @data: A user data
+ *
+ * Walk on function DIEs at given @addr in @cu_die. Passed DIEs
+ * should be subprogram or inlined-subroutines.
+ */
+int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
+ int (*callback)(Dwarf_Die *, void *), void *data)
+{
+ Dwarf_Die die_mem;
+ Dwarf_Die *sc_die;
+ int ret = -ENOENT;
+
+ /* Inlined function could be recursive. Trace it until fail */
+ for (sc_die = die_find_realfunc(cu_die, addr, &die_mem);
+ sc_die != NULL;
+ sc_die = die_find_child(sc_die, __die_find_inline_cb, &addr,
+ &die_mem)) {
+ ret = callback(sc_die, data);
+ if (ret)
+ break;
+ }
+
+ return ret;
+
+}
+
+/**
+ * die_compare_name - Compare diename and tname
+ * @dw_die: a DIE
+ * @tname: a string of target name
+ *
+ * Compare the name of @dw_die and @tname. Return false if @dw_die has no name.
+ */
+bool die_compare_name(Dwarf_Die *dw_die, const char *tname)
+{
+ const char *name;
+ name = dwarf_diename(dw_die);
+ return name ? (strcmp(tname, name) == 0) : false;
+}
+
+/**
+ * die_get_call_lineno - Get callsite line number of inline-function instance
+ * @in_die: a DIE of an inlined function instance
+ *
+ * Get call-site line number of @in_die. This means from where the inline
+ * function is called.
+ */
+int die_get_call_lineno(Dwarf_Die *in_die)
+{
+ Dwarf_Attribute attr;
+ Dwarf_Word ret;
+
+ if (!dwarf_attr(in_die, DW_AT_call_line, &attr))
+ return -ENOENT;
+
+ dwarf_formudata(&attr, &ret);
+ return (int)ret;
+}
+
+/**
+ * die_get_type - Get type DIE
+ * @vr_die: a DIE of a variable
+ * @die_mem: where to store a type DIE
+ *
+ * Get a DIE of the type of given variable (@vr_die), and store
+ * it to die_mem. Return NULL if fails to get a type DIE.
+ */
+Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
+{
+ Dwarf_Attribute attr;
+
+ if (dwarf_attr_integrate(vr_die, DW_AT_type, &attr) &&
+ dwarf_formref_die(&attr, die_mem))
+ return die_mem;
+ else
+ return NULL;
+}
+
+/* Get a type die, but skip qualifiers */
+static Dwarf_Die *__die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
+{
+ int tag;
+
+ do {
+ vr_die = die_get_type(vr_die, die_mem);
+ if (!vr_die)
+ break;
+ tag = dwarf_tag(vr_die);
+ } while (tag == DW_TAG_const_type ||
+ tag == DW_TAG_restrict_type ||
+ tag == DW_TAG_volatile_type ||
+ tag == DW_TAG_shared_type);
+
+ return vr_die;
+}
+
+/**
+ * die_get_real_type - Get a type die, but skip qualifiers and typedef
+ * @vr_die: a DIE of a variable
+ * @die_mem: where to store a type DIE
+ *
+ * Get a DIE of the type of given variable (@vr_die), and store
+ * it to die_mem. Return NULL if fails to get a type DIE.
+ * If the type is qualifiers (e.g. const) or typedef, this skips it
+ * and tries to find real type (structure or basic types, e.g. int).
+ */
+Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
+{
+ do {
+ vr_die = __die_get_real_type(vr_die, die_mem);
+ } while (vr_die && dwarf_tag(vr_die) == DW_TAG_typedef);
+
+ return vr_die;
+}
+
+/* Get attribute and translate it as a udata */
+static int die_get_attr_udata(Dwarf_Die *tp_die, unsigned int attr_name,
+ Dwarf_Word *result)
+{
+ Dwarf_Attribute attr;
+
+ if (dwarf_attr(tp_die, attr_name, &attr) == NULL ||
+ dwarf_formudata(&attr, result) != 0)
+ return -ENOENT;
+
+ return 0;
+}
+
+/* Get attribute and translate it as a sdata */
+static int die_get_attr_sdata(Dwarf_Die *tp_die, unsigned int attr_name,
+ Dwarf_Sword *result)
+{
+ Dwarf_Attribute attr;
+
+ if (dwarf_attr(tp_die, attr_name, &attr) == NULL ||
+ dwarf_formsdata(&attr, result) != 0)
+ return -ENOENT;
+
+ return 0;
+}
+
+/**
+ * die_is_signed_type - Check whether a type DIE is signed or not
+ * @tp_die: a DIE of a type
+ *
+ * Get the encoding of @tp_die and return true if the encoding
+ * is signed.
+ */
+bool die_is_signed_type(Dwarf_Die *tp_die)
+{
+ Dwarf_Word ret;
+
+ if (die_get_attr_udata(tp_die, DW_AT_encoding, &ret))
+ return false;
+
+ return (ret == DW_ATE_signed_char || ret == DW_ATE_signed ||
+ ret == DW_ATE_signed_fixed);
+}
+
+/**
+ * die_is_func_def - Ensure that this DIE is a subprogram and definition
+ * @dw_die: a DIE
+ *
+ * Ensure that this DIE is a subprogram and NOT a declaration. This
+ * returns true if @dw_die is a function definition.
+ **/
+bool die_is_func_def(Dwarf_Die *dw_die)
+{
+ Dwarf_Attribute attr;
+
+ return (dwarf_tag(dw_die) == DW_TAG_subprogram &&
+ dwarf_attr(dw_die, DW_AT_declaration, &attr) == NULL);
+}
+
+/**
+ * die_get_data_member_location - Get the data-member offset
+ * @mb_die: a DIE of a member of a data structure
+ * @offs: The offset of the member in the data structure
+ *
+ * Get the offset of @mb_die in the data structure including @mb_die, and
+ * stores result offset to @offs. If any error occurs this returns errno.
+ */
+int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs)
+{
+ Dwarf_Attribute attr;
+ Dwarf_Op *expr;
+ size_t nexpr;
+ int ret;
+
+ if (dwarf_attr(mb_die, DW_AT_data_member_location, &attr) == NULL)
+ return -ENOENT;
+
+ if (dwarf_formudata(&attr, offs) != 0) {
+ /* DW_AT_data_member_location should be DW_OP_plus_uconst */
+ ret = dwarf_getlocation(&attr, &expr, &nexpr);
+ if (ret < 0 || nexpr == 0)
+ return -ENOENT;
+
+ if (expr[0].atom != DW_OP_plus_uconst || nexpr != 1) {
+ pr_debug("Unable to get offset:Unexpected OP %x (%zd)\n",
+ expr[0].atom, nexpr);
+ return -ENOTSUP;
+ }
+ *offs = (Dwarf_Word)expr[0].number;
+ }
+ return 0;
+}
+
+/* Get the call file index number in CU DIE */
+static int die_get_call_fileno(Dwarf_Die *in_die)
+{
+ Dwarf_Sword idx;
+
+ if (die_get_attr_sdata(in_die, DW_AT_call_file, &idx) == 0)
+ return (int)idx;
+ else
+ return -ENOENT;
+}
+
+/* Get the declared file index number in CU DIE */
+static int die_get_decl_fileno(Dwarf_Die *pdie)
+{
+ Dwarf_Sword idx;
+
+ if (die_get_attr_sdata(pdie, DW_AT_decl_file, &idx) == 0)
+ return (int)idx;
+ else
+ return -ENOENT;
+}
+
+/**
+ * die_get_call_file - Get callsite file name of inlined function instance
+ * @in_die: a DIE of an inlined function instance
+ *
+ * Get call-site file name of @in_die. This means from which file the inline
+ * function is called.
+ */
+const char *die_get_call_file(Dwarf_Die *in_die)
+{
+ Dwarf_Die cu_die;
+ Dwarf_Files *files;
+ int idx;
+
+ idx = die_get_call_fileno(in_die);
+ if (idx < 0 || !dwarf_diecu(in_die, &cu_die, NULL, NULL) ||
+ dwarf_getsrcfiles(&cu_die, &files, NULL) != 0)
+ return NULL;
+
+ return dwarf_filesrc(files, idx, NULL, NULL);
+}
+
+
+/**
+ * die_find_child - Generic DIE search function in DIE tree
+ * @rt_die: a root DIE
+ * @callback: a callback function
+ * @data: a user data passed to the callback function
+ * @die_mem: a buffer for result DIE
+ *
+ * Trace DIE tree from @rt_die and call @callback for each child DIE.
+ * If @callback returns DIE_FIND_CB_END, this stores the DIE into
+ * @die_mem and returns it. If @callback returns DIE_FIND_CB_CONTINUE,
+ * this continues to trace the tree. Optionally, @callback can return
+ * DIE_FIND_CB_CHILD and DIE_FIND_CB_SIBLING, those means trace only
+ * the children and trace only the siblings respectively.
+ * Returns NULL if @callback can't find any appropriate DIE.
+ */
+Dwarf_Die *die_find_child(Dwarf_Die *rt_die,
+ int (*callback)(Dwarf_Die *, void *),
+ void *data, Dwarf_Die *die_mem)
+{
+ Dwarf_Die child_die;
+ int ret;
+
+ ret = dwarf_child(rt_die, die_mem);
+ if (ret != 0)
+ return NULL;
+
+ do {
+ ret = callback(die_mem, data);
+ if (ret == DIE_FIND_CB_END)
+ return die_mem;
+
+ if ((ret & DIE_FIND_CB_CHILD) &&
+ die_find_child(die_mem, callback, data, &child_die)) {
+ memcpy(die_mem, &child_die, sizeof(Dwarf_Die));
+ return die_mem;
+ }
+ } while ((ret & DIE_FIND_CB_SIBLING) &&
+ dwarf_siblingof(die_mem, die_mem) == 0);
+
+ return NULL;
+}
+
+struct __addr_die_search_param {
+ Dwarf_Addr addr;
+ Dwarf_Die *die_mem;
+};
+
+/* die_find callback for non-inlined function search */
+static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
+{
+ struct __addr_die_search_param *ad = data;
+
+ /*
+ * Since a declaration entry doesn't has given pc, this always returns
+ * function definition entry.
+ */
+ if (dwarf_tag(fn_die) == DW_TAG_subprogram &&
+ dwarf_haspc(fn_die, ad->addr)) {
+ memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die));
+ return DWARF_CB_ABORT;
+ }
+ return DWARF_CB_OK;
+}
+
+/**
+ * die_find_realfunc - Search a non-inlined function at given address
+ * @cu_die: a CU DIE which including @addr
+ * @addr: target address
+ * @die_mem: a buffer for result DIE
+ *
+ * Search a non-inlined function DIE which includes @addr. Stores the
+ * DIE to @die_mem and returns it if found. Returns NULL if failed.
+ */
+Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem)
+{
+ struct __addr_die_search_param ad;
+ ad.addr = addr;
+ ad.die_mem = die_mem;
+ /* dwarf_getscopes can't find subprogram. */
+ if (!dwarf_getfuncs(cu_die, __die_search_func_cb, &ad, 0))
+ return NULL;
+ else
+ return die_mem;
+}
+
+/* die_find callback for inline function search */
+static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data)
+{
+ Dwarf_Addr *addr = data;
+
+ if (dwarf_tag(die_mem) == DW_TAG_inlined_subroutine &&
+ dwarf_haspc(die_mem, *addr))
+ return DIE_FIND_CB_END;
+
+ return DIE_FIND_CB_CONTINUE;
+}
+
+/**
+ * die_find_top_inlinefunc - Search the top inlined function at given address
+ * @sp_die: a subprogram DIE which including @addr
+ * @addr: target address
+ * @die_mem: a buffer for result DIE
+ *
+ * Search an inlined function DIE which includes @addr. Stores the
+ * DIE to @die_mem and returns it if found. Returns NULL if failed.
+ * Even if several inlined functions are expanded recursively, this
+ * doesn't trace it down, and returns the topmost one.
+ */
+Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem)
+{
+ return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem);
+}
+
+/**
+ * die_find_inlinefunc - Search an inlined function at given address
+ * @sp_die: a subprogram DIE which including @addr
+ * @addr: target address
+ * @die_mem: a buffer for result DIE
+ *
+ * Search an inlined function DIE which includes @addr. Stores the
+ * DIE to @die_mem and returns it if found. Returns NULL if failed.
+ * If several inlined functions are expanded recursively, this trace
+ * it down and returns deepest one.
+ */
+Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem)
+{
+ Dwarf_Die tmp_die;
+
+ sp_die = die_find_child(sp_die, __die_find_inline_cb, &addr, &tmp_die);
+ if (!sp_die)
+ return NULL;
+
+ /* Inlined function could be recursive. Trace it until fail */
+ while (sp_die) {
+ memcpy(die_mem, sp_die, sizeof(Dwarf_Die));
+ sp_die = die_find_child(sp_die, __die_find_inline_cb, &addr,
+ &tmp_die);
+ }
+
+ return die_mem;
+}
+
+struct __instance_walk_param {
+ void *addr;
+ int (*callback)(Dwarf_Die *, void *);
+ void *data;
+ int retval;
+};
+
+static int __die_walk_instances_cb(Dwarf_Die *inst, void *data)
+{
+ struct __instance_walk_param *iwp = data;
+ Dwarf_Attribute attr_mem;
+ Dwarf_Die origin_mem;
+ Dwarf_Attribute *attr;
+ Dwarf_Die *origin;
+ int tmp;
+
+ attr = dwarf_attr(inst, DW_AT_abstract_origin, &attr_mem);
+ if (attr == NULL)
+ return DIE_FIND_CB_CONTINUE;
+
+ origin = dwarf_formref_die(attr, &origin_mem);
+ if (origin == NULL || origin->addr != iwp->addr)
+ return DIE_FIND_CB_CONTINUE;
+
+ /* Ignore redundant instances */
+ if (dwarf_tag(inst) == DW_TAG_inlined_subroutine) {
+ dwarf_decl_line(origin, &tmp);
+ if (die_get_call_lineno(inst) == tmp) {
+ tmp = die_get_decl_fileno(origin);
+ if (die_get_call_fileno(inst) == tmp)
+ return DIE_FIND_CB_CONTINUE;
+ }
+ }
+
+ iwp->retval = iwp->callback(inst, iwp->data);
+
+ return (iwp->retval) ? DIE_FIND_CB_END : DIE_FIND_CB_CONTINUE;
+}
+
+/**
+ * die_walk_instances - Walk on instances of given DIE
+ * @or_die: an abstract original DIE
+ * @callback: a callback function which is called with instance DIE
+ * @data: user data
+ *
+ * Walk on the instances of give @in_die. @in_die must be an inlined function
+ * declartion. This returns the return value of @callback if it returns
+ * non-zero value, or -ENOENT if there is no instance.
+ */
+int die_walk_instances(Dwarf_Die *or_die, int (*callback)(Dwarf_Die *, void *),
+ void *data)
+{
+ Dwarf_Die cu_die;
+ Dwarf_Die die_mem;
+ struct __instance_walk_param iwp = {
+ .addr = or_die->addr,
+ .callback = callback,
+ .data = data,
+ .retval = -ENOENT,
+ };
+
+ if (dwarf_diecu(or_die, &cu_die, NULL, NULL) == NULL)
+ return -ENOENT;
+
+ die_find_child(&cu_die, __die_walk_instances_cb, &iwp, &die_mem);
+
+ return iwp.retval;
+}
+
+/* Line walker internal parameters */
+struct __line_walk_param {
+ bool recursive;
+ line_walk_callback_t callback;
+ void *data;
+ int retval;
+};
+
+static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data)
+{
+ struct __line_walk_param *lw = data;
+ Dwarf_Addr addr = 0;
+ const char *fname;
+ int lineno;
+
+ if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) {
+ fname = die_get_call_file(in_die);
+ lineno = die_get_call_lineno(in_die);
+ if (fname && lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) {
+ lw->retval = lw->callback(fname, lineno, addr, lw->data);
+ if (lw->retval != 0)
+ return DIE_FIND_CB_END;
+ }
+ }
+ if (!lw->recursive)
+ /* Don't need to search recursively */
+ return DIE_FIND_CB_SIBLING;
+
+ if (addr) {
+ fname = dwarf_decl_file(in_die);
+ if (fname && dwarf_decl_line(in_die, &lineno) == 0) {
+ lw->retval = lw->callback(fname, lineno, addr, lw->data);
+ if (lw->retval != 0)
+ return DIE_FIND_CB_END;
+ }
+ }
+
+ /* Continue to search nested inlined function call-sites */
+ return DIE_FIND_CB_CONTINUE;
+}
+
+/* Walk on lines of blocks included in given DIE */
+static int __die_walk_funclines(Dwarf_Die *sp_die, bool recursive,
+ line_walk_callback_t callback, void *data)
+{
+ struct __line_walk_param lw = {
+ .recursive = recursive,
+ .callback = callback,
+ .data = data,
+ .retval = 0,
+ };
+ Dwarf_Die die_mem;
+ Dwarf_Addr addr;
+ const char *fname;
+ int lineno;
+
+ /* Handle function declaration line */
+ fname = dwarf_decl_file(sp_die);
+ if (fname && dwarf_decl_line(sp_die, &lineno) == 0 &&
+ dwarf_entrypc(sp_die, &addr) == 0) {
+ lw.retval = callback(fname, lineno, addr, data);
+ if (lw.retval != 0)
+ goto done;
+ }
+ die_find_child(sp_die, __die_walk_funclines_cb, &lw, &die_mem);
+done:
+ return lw.retval;
+}
+
+static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data)
+{
+ struct __line_walk_param *lw = data;
+
+ lw->retval = __die_walk_funclines(sp_die, true, lw->callback, lw->data);
+ if (lw->retval != 0)
+ return DWARF_CB_ABORT;
+
+ return DWARF_CB_OK;
+}
+
+/**
+ * die_walk_lines - Walk on lines inside given DIE
+ * @rt_die: a root DIE (CU, subprogram or inlined_subroutine)
+ * @callback: callback routine
+ * @data: user data
+ *
+ * Walk on all lines inside given @rt_die and call @callback on each line.
+ * If the @rt_die is a function, walk only on the lines inside the function,
+ * otherwise @rt_die must be a CU DIE.
+ * Note that this walks not only dwarf line list, but also function entries
+ * and inline call-site.
+ */
+int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
+{
+ Dwarf_Lines *lines;
+ Dwarf_Line *line;
+ Dwarf_Addr addr;
+ const char *fname;
+ int lineno, ret = 0;
+ Dwarf_Die die_mem, *cu_die;
+ size_t nlines, i;
+
+ /* Get the CU die */
+ if (dwarf_tag(rt_die) != DW_TAG_compile_unit)
+ cu_die = dwarf_diecu(rt_die, &die_mem, NULL, NULL);
+ else
+ cu_die = rt_die;
+ if (!cu_die) {
+ pr_debug2("Failed to get CU from given DIE.\n");
+ return -EINVAL;
+ }
+
+ /* Get lines list in the CU */
+ if (dwarf_getsrclines(cu_die, &lines, &nlines) != 0) {
+ pr_debug2("Failed to get source lines on this CU.\n");
+ return -ENOENT;
+ }
+ pr_debug2("Get %zd lines from this CU\n", nlines);
+
+ /* Walk on the lines on lines list */
+ for (i = 0; i < nlines; i++) {
+ line = dwarf_onesrcline(lines, i);
+ if (line == NULL ||
+ dwarf_lineno(line, &lineno) != 0 ||
+ dwarf_lineaddr(line, &addr) != 0) {
+ pr_debug2("Failed to get line info. "
+ "Possible error in debuginfo.\n");
+ continue;
+ }
+ /* Filter lines based on address */
+ if (rt_die != cu_die)
+ /*
+ * Address filtering
+ * The line is included in given function, and
+ * no inline block includes it.
+ */
+ if (!dwarf_haspc(rt_die, addr) ||
+ die_find_inlinefunc(rt_die, addr, &die_mem))
+ continue;
+ /* Get source line */
+ fname = dwarf_linesrc(line, NULL, NULL);
+
+ ret = callback(fname, lineno, addr, data);
+ if (ret != 0)
+ return ret;
+ }
+
+ /*
+ * Dwarf lines doesn't include function declarations and inlined
+ * subroutines. We have to check functions list or given function.
+ */
+ if (rt_die != cu_die)
+ /*
+ * Don't need walk functions recursively, because nested
+ * inlined functions don't have lines of the specified DIE.
+ */
+ ret = __die_walk_funclines(rt_die, false, callback, data);
+ else {
+ struct __line_walk_param param = {
+ .callback = callback,
+ .data = data,
+ .retval = 0,
+ };
+ dwarf_getfuncs(cu_die, __die_walk_culines_cb, &param, 0);
+ ret = param.retval;
+ }
+
+ return ret;
+}
+
+struct __find_variable_param {
+ const char *name;
+ Dwarf_Addr addr;
+};
+
+static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data)
+{
+ struct __find_variable_param *fvp = data;
+ Dwarf_Attribute attr;
+ int tag;
+
+ tag = dwarf_tag(die_mem);
+ if ((tag == DW_TAG_formal_parameter ||
+ tag == DW_TAG_variable) &&
+ die_compare_name(die_mem, fvp->name) &&
+ /* Does the DIE have location information or external instance? */
+ (dwarf_attr(die_mem, DW_AT_external, &attr) ||
+ dwarf_attr(die_mem, DW_AT_location, &attr)))
+ return DIE_FIND_CB_END;
+ if (dwarf_haspc(die_mem, fvp->addr))
+ return DIE_FIND_CB_CONTINUE;
+ else
+ return DIE_FIND_CB_SIBLING;
+}
+
+/**
+ * die_find_variable_at - Find a given name variable at given address
+ * @sp_die: a function DIE
+ * @name: variable name
+ * @addr: address
+ * @die_mem: a buffer for result DIE
+ *
+ * Find a variable DIE called @name at @addr in @sp_die.
+ */
+Dwarf_Die *die_find_variable_at(Dwarf_Die *sp_die, const char *name,
+ Dwarf_Addr addr, Dwarf_Die *die_mem)
+{
+ struct __find_variable_param fvp = { .name = name, .addr = addr};
+
+ return die_find_child(sp_die, __die_find_variable_cb, (void *)&fvp,
+ die_mem);
+}
+
+static int __die_find_member_cb(Dwarf_Die *die_mem, void *data)
+{
+ const char *name = data;
+
+ if ((dwarf_tag(die_mem) == DW_TAG_member) &&
+ die_compare_name(die_mem, name))
+ return DIE_FIND_CB_END;
+
+ return DIE_FIND_CB_SIBLING;
+}
+
+/**
+ * die_find_member - Find a given name member in a data structure
+ * @st_die: a data structure type DIE
+ * @name: member name
+ * @die_mem: a buffer for result DIE
+ *
+ * Find a member DIE called @name in @st_die.
+ */
+Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name,
+ Dwarf_Die *die_mem)
+{
+ return die_find_child(st_die, __die_find_member_cb, (void *)name,
+ die_mem);
+}
+
+/**
+ * die_get_typename - Get the name of given variable DIE
+ * @vr_die: a variable DIE
+ * @buf: a buffer for result type name
+ * @len: a max-length of @buf
+ *
+ * Get the name of @vr_die and stores it to @buf. Return the actual length
+ * of type name if succeeded. Return -E2BIG if @len is not enough long, and
+ * Return -ENOENT if failed to find type name.
+ * Note that the result will stores typedef name if possible, and stores
+ * "*(function_type)" if the type is a function pointer.
+ */
+int die_get_typename(Dwarf_Die *vr_die, char *buf, int len)
+{
+ Dwarf_Die type;
+ int tag, ret, ret2;
+ const char *tmp = "";
+
+ if (__die_get_real_type(vr_die, &type) == NULL)
+ return -ENOENT;
+
+ tag = dwarf_tag(&type);
+ if (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type)
+ tmp = "*";
+ else if (tag == DW_TAG_subroutine_type) {
+ /* Function pointer */
+ ret = snprintf(buf, len, "(function_type)");
+ return (ret >= len) ? -E2BIG : ret;
+ } else {
+ if (!dwarf_diename(&type))
+ return -ENOENT;
+ if (tag == DW_TAG_union_type)
+ tmp = "union ";
+ else if (tag == DW_TAG_structure_type)
+ tmp = "struct ";
+ else if (tag == DW_TAG_enumeration_type)
+ tmp = "enum ";
+ /* Write a base name */
+ ret = snprintf(buf, len, "%s%s", tmp, dwarf_diename(&type));
+ return (ret >= len) ? -E2BIG : ret;
+ }
+ ret = die_get_typename(&type, buf, len);
+ if (ret > 0) {
+ ret2 = snprintf(buf + ret, len - ret, "%s", tmp);
+ ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret;
+ }
+ return ret;
+}
+
+/**
+ * die_get_varname - Get the name and type of given variable DIE
+ * @vr_die: a variable DIE
+ * @buf: a buffer for type and variable name
+ * @len: the max-length of @buf
+ *
+ * Get the name and type of @vr_die and stores it in @buf as "type\tname".
+ */
+int die_get_varname(Dwarf_Die *vr_die, char *buf, int len)
+{
+ int ret, ret2;
+
+ ret = die_get_typename(vr_die, buf, len);
+ if (ret < 0) {
+ pr_debug("Failed to get type, make it unknown.\n");
+ ret = snprintf(buf, len, "(unknown_type)");
+ }
+ if (ret > 0) {
+ ret2 = snprintf(buf + ret, len - ret, "\t%s",
+ dwarf_diename(vr_die));
+ ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret;
+ }
+ return ret;
+}
+
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
new file mode 100644
index 00000000000..b4fe90c6cb2
--- /dev/null
+++ b/tools/perf/util/dwarf-aux.h
@@ -0,0 +1,118 @@
+#ifndef _DWARF_AUX_H
+#define _DWARF_AUX_H
+/*
+ * dwarf-aux.h : libdw auxiliary interfaces
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+#include <dwarf.h>
+#include <elfutils/libdw.h>
+#include <elfutils/libdwfl.h>
+#include <elfutils/version.h>
+
+/* Find the realpath of the target file */
+extern const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname);
+
+/* Get DW_AT_comp_dir (should be NULL with older gcc) */
+extern const char *cu_get_comp_dir(Dwarf_Die *cu_die);
+
+/* Get a line number and file name for given address */
+extern int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr,
+ const char **fname, int *lineno);
+
+/* Walk on funcitons at given address */
+extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
+ int (*callback)(Dwarf_Die *, void *), void *data);
+
+/* Ensure that this DIE is a subprogram and definition (not declaration) */
+extern bool die_is_func_def(Dwarf_Die *dw_die);
+
+/* Compare diename and tname */
+extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname);
+
+/* Get callsite line number of inline-function instance */
+extern int die_get_call_lineno(Dwarf_Die *in_die);
+
+/* Get callsite file name of inlined function instance */
+extern const char *die_get_call_file(Dwarf_Die *in_die);
+
+/* Get type die */
+extern Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem);
+
+/* Get a type die, but skip qualifiers and typedef */
+extern Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem);
+
+/* Check whether the DIE is signed or not */
+extern bool die_is_signed_type(Dwarf_Die *tp_die);
+
+/* Get data_member_location offset */
+extern int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs);
+
+/* Return values for die_find_child() callbacks */
+enum {
+ DIE_FIND_CB_END = 0, /* End of Search */
+ DIE_FIND_CB_CHILD = 1, /* Search only children */
+ DIE_FIND_CB_SIBLING = 2, /* Search only siblings */
+ DIE_FIND_CB_CONTINUE = 3, /* Search children and siblings */
+};
+
+/* Search child DIEs */
+extern Dwarf_Die *die_find_child(Dwarf_Die *rt_die,
+ int (*callback)(Dwarf_Die *, void *),
+ void *data, Dwarf_Die *die_mem);
+
+/* Search a non-inlined function including given address */
+extern Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem);
+
+/* Search the top inlined function including given address */
+extern Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem);
+
+/* Search the deepest inlined function including given address */
+extern Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem);
+
+/* Walk on the instances of given DIE */
+extern int die_walk_instances(Dwarf_Die *in_die,
+ int (*callback)(Dwarf_Die *, void *), void *data);
+
+/* Walker on lines (Note: line number will not be sorted) */
+typedef int (* line_walk_callback_t) (const char *fname, int lineno,
+ Dwarf_Addr addr, void *data);
+
+/*
+ * Walk on lines inside given DIE. If the DIE is a subprogram, walk only on
+ * the lines inside the subprogram, otherwise the DIE must be a CU DIE.
+ */
+extern int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback,
+ void *data);
+
+/* Find a variable called 'name' at given address */
+extern Dwarf_Die *die_find_variable_at(Dwarf_Die *sp_die, const char *name,
+ Dwarf_Addr addr, Dwarf_Die *die_mem);
+
+/* Find a member called 'name' */
+extern Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name,
+ Dwarf_Die *die_mem);
+
+/* Get the name of given variable DIE */
+extern int die_get_typename(Dwarf_Die *vr_die, char *buf, int len);
+
+/* Get the name and type of given variable DIE, stored as "type\tname" */
+extern int die_get_varname(Dwarf_Die *vr_die, char *buf, int len);
+#endif
diff --git a/tools/perf/util/environment.c b/tools/perf/util/environment.c
new file mode 100644
index 00000000000..275b0ee345f
--- /dev/null
+++ b/tools/perf/util/environment.c
@@ -0,0 +1,9 @@
+/*
+ * We put all the perf config variables in this same object
+ * file, so that programs can link against the config parser
+ * without having to link against all the rest of perf.
+ */
+#include "cache.h"
+
+const char *pager_program;
+int pager_use_color = 1;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
new file mode 100644
index 00000000000..d0281bdfa58
--- /dev/null
+++ b/tools/perf/util/event.c
@@ -0,0 +1,868 @@
+#include <linux/types.h>
+#include <sys/mman.h>
+#include "event.h"
+#include "debug.h"
+#include "hist.h"
+#include "machine.h"
+#include "sort.h"
+#include "string.h"
+#include "strlist.h"
+#include "thread.h"
+#include "thread_map.h"
+#include "symbol/kallsyms.h"
+
+static const char *perf_event__names[] = {
+ [0] = "TOTAL",
+ [PERF_RECORD_MMAP] = "MMAP",
+ [PERF_RECORD_MMAP2] = "MMAP2",
+ [PERF_RECORD_LOST] = "LOST",
+ [PERF_RECORD_COMM] = "COMM",
+ [PERF_RECORD_EXIT] = "EXIT",
+ [PERF_RECORD_THROTTLE] = "THROTTLE",
+ [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
+ [PERF_RECORD_FORK] = "FORK",
+ [PERF_RECORD_READ] = "READ",
+ [PERF_RECORD_SAMPLE] = "SAMPLE",
+ [PERF_RECORD_HEADER_ATTR] = "ATTR",
+ [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
+ [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
+ [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
+ [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
+};
+
+const char *perf_event__name(unsigned int id)
+{
+ if (id >= ARRAY_SIZE(perf_event__names))
+ return "INVALID";
+ if (!perf_event__names[id])
+ return "UNKNOWN";
+ return perf_event__names[id];
+}
+
+static struct perf_sample synth_sample = {
+ .pid = -1,
+ .tid = -1,
+ .time = -1,
+ .stream_id = -1,
+ .cpu = -1,
+ .period = 1,
+};
+
+static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len)
+{
+ char filename[PATH_MAX];
+ char bf[BUFSIZ];
+ FILE *fp;
+ size_t size = 0;
+ pid_t tgid = -1;
+
+ snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
+
+ fp = fopen(filename, "r");
+ if (fp == NULL) {
+ pr_debug("couldn't open %s\n", filename);
+ return 0;
+ }
+
+ while (!comm[0] || (tgid < 0)) {
+ if (fgets(bf, sizeof(bf), fp) == NULL) {
+ pr_warning("couldn't get COMM and pgid, malformed %s\n",
+ filename);
+ break;
+ }
+
+ if (memcmp(bf, "Name:", 5) == 0) {
+ char *name = bf + 5;
+ while (*name && isspace(*name))
+ ++name;
+ size = strlen(name) - 1;
+ if (size >= len)
+ size = len - 1;
+ memcpy(comm, name, size);
+ comm[size] = '\0';
+
+ } else if (memcmp(bf, "Tgid:", 5) == 0) {
+ char *tgids = bf + 5;
+ while (*tgids && isspace(*tgids))
+ ++tgids;
+ tgid = atoi(tgids);
+ }
+ }
+
+ fclose(fp);
+
+ return tgid;
+}
+
+static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
+ union perf_event *event, pid_t pid,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ size_t size;
+ pid_t tgid;
+
+ memset(&event->comm, 0, sizeof(event->comm));
+
+ if (machine__is_host(machine))
+ tgid = perf_event__get_comm_tgid(pid, event->comm.comm,
+ sizeof(event->comm.comm));
+ else
+ tgid = machine->pid;
+
+ if (tgid < 0)
+ goto out;
+
+ event->comm.pid = tgid;
+ event->comm.header.type = PERF_RECORD_COMM;
+
+ size = strlen(event->comm.comm) + 1;
+ size = PERF_ALIGN(size, sizeof(u64));
+ memset(event->comm.comm + size, 0, machine->id_hdr_size);
+ event->comm.header.size = (sizeof(event->comm) -
+ (sizeof(event->comm.comm) - size) +
+ machine->id_hdr_size);
+ event->comm.tid = pid;
+
+ if (process(tool, event, &synth_sample, machine) != 0)
+ return -1;
+
+out:
+ return tgid;
+}
+
+static int perf_event__synthesize_fork(struct perf_tool *tool,
+ union perf_event *event, pid_t pid,
+ pid_t tgid, perf_event__handler_t process,
+ struct machine *machine)
+{
+ memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
+
+ /* this is really a clone event but we use fork to synthesize it */
+ event->fork.ppid = tgid;
+ event->fork.ptid = tgid;
+ event->fork.pid = tgid;
+ event->fork.tid = pid;
+ event->fork.header.type = PERF_RECORD_FORK;
+
+ event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
+
+ if (process(tool, event, &synth_sample, machine) != 0)
+ return -1;
+
+ return 0;
+}
+
+int perf_event__synthesize_mmap_events(struct perf_tool *tool,
+ union perf_event *event,
+ pid_t pid, pid_t tgid,
+ perf_event__handler_t process,
+ struct machine *machine,
+ bool mmap_data)
+{
+ char filename[PATH_MAX];
+ FILE *fp;
+ int rc = 0;
+
+ if (machine__is_default_guest(machine))
+ return 0;
+
+ snprintf(filename, sizeof(filename), "%s/proc/%d/maps",
+ machine->root_dir, pid);
+
+ fp = fopen(filename, "r");
+ if (fp == NULL) {
+ /*
+ * We raced with a task exiting - just return:
+ */
+ pr_debug("couldn't open %s\n", filename);
+ return -1;
+ }
+
+ event->header.type = PERF_RECORD_MMAP2;
+
+ while (1) {
+ char bf[BUFSIZ];
+ char prot[5];
+ char execname[PATH_MAX];
+ char anonstr[] = "//anon";
+ unsigned int ino;
+ size_t size;
+ ssize_t n;
+
+ if (fgets(bf, sizeof(bf), fp) == NULL)
+ break;
+
+ /* ensure null termination since stack will be reused. */
+ strcpy(execname, "");
+
+ /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
+ n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
+ &event->mmap2.start, &event->mmap2.len, prot,
+ &event->mmap2.pgoff, &event->mmap2.maj,
+ &event->mmap2.min,
+ &ino, execname);
+
+ /*
+ * Anon maps don't have the execname.
+ */
+ if (n < 7)
+ continue;
+
+ event->mmap2.ino = (u64)ino;
+
+ /*
+ * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
+ */
+ if (machine__is_host(machine))
+ event->header.misc = PERF_RECORD_MISC_USER;
+ else
+ event->header.misc = PERF_RECORD_MISC_GUEST_USER;
+
+ /* map protection and flags bits */
+ event->mmap2.prot = 0;
+ event->mmap2.flags = 0;
+ if (prot[0] == 'r')
+ event->mmap2.prot |= PROT_READ;
+ if (prot[1] == 'w')
+ event->mmap2.prot |= PROT_WRITE;
+ if (prot[2] == 'x')
+ event->mmap2.prot |= PROT_EXEC;
+
+ if (prot[3] == 's')
+ event->mmap2.flags |= MAP_SHARED;
+ else
+ event->mmap2.flags |= MAP_PRIVATE;
+
+ if (prot[2] != 'x') {
+ if (!mmap_data || prot[0] != 'r')
+ continue;
+
+ event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
+ }
+
+ if (!strcmp(execname, ""))
+ strcpy(execname, anonstr);
+
+ size = strlen(execname) + 1;
+ memcpy(event->mmap2.filename, execname, size);
+ size = PERF_ALIGN(size, sizeof(u64));
+ event->mmap2.len -= event->mmap.start;
+ event->mmap2.header.size = (sizeof(event->mmap2) -
+ (sizeof(event->mmap2.filename) - size));
+ memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
+ event->mmap2.header.size += machine->id_hdr_size;
+ event->mmap2.pid = tgid;
+ event->mmap2.tid = pid;
+
+ if (process(tool, event, &synth_sample, machine) != 0) {
+ rc = -1;
+ break;
+ }
+ }
+
+ fclose(fp);
+ return rc;
+}
+
+int perf_event__synthesize_modules(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ int rc = 0;
+ struct rb_node *nd;
+ struct map_groups *kmaps = &machine->kmaps;
+ union perf_event *event = zalloc((sizeof(event->mmap) +
+ machine->id_hdr_size));
+ if (event == NULL) {
+ pr_debug("Not enough memory synthesizing mmap event "
+ "for kernel modules\n");
+ return -1;
+ }
+
+ event->header.type = PERF_RECORD_MMAP;
+
+ /*
+ * kernel uses 0 for user space maps, see kernel/perf_event.c
+ * __perf_event_mmap
+ */
+ if (machine__is_host(machine))
+ event->header.misc = PERF_RECORD_MISC_KERNEL;
+ else
+ event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
+
+ for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
+ nd; nd = rb_next(nd)) {
+ size_t size;
+ struct map *pos = rb_entry(nd, struct map, rb_node);
+
+ if (pos->dso->kernel)
+ continue;
+
+ size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
+ event->mmap.header.type = PERF_RECORD_MMAP;
+ event->mmap.header.size = (sizeof(event->mmap) -
+ (sizeof(event->mmap.filename) - size));
+ memset(event->mmap.filename + size, 0, machine->id_hdr_size);
+ event->mmap.header.size += machine->id_hdr_size;
+ event->mmap.start = pos->start;
+ event->mmap.len = pos->end - pos->start;
+ event->mmap.pid = machine->pid;
+
+ memcpy(event->mmap.filename, pos->dso->long_name,
+ pos->dso->long_name_len + 1);
+ if (process(tool, event, &synth_sample, machine) != 0) {
+ rc = -1;
+ break;
+ }
+ }
+
+ free(event);
+ return rc;
+}
+
+static int __event__synthesize_thread(union perf_event *comm_event,
+ union perf_event *mmap_event,
+ union perf_event *fork_event,
+ pid_t pid, int full,
+ perf_event__handler_t process,
+ struct perf_tool *tool,
+ struct machine *machine, bool mmap_data)
+{
+ char filename[PATH_MAX];
+ DIR *tasks;
+ struct dirent dirent, *next;
+ pid_t tgid;
+
+ /* special case: only send one comm event using passed in pid */
+ if (!full) {
+ tgid = perf_event__synthesize_comm(tool, comm_event, pid,
+ process, machine);
+
+ if (tgid == -1)
+ return -1;
+
+ return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
+ process, machine, mmap_data);
+ }
+
+ if (machine__is_default_guest(machine))
+ return 0;
+
+ snprintf(filename, sizeof(filename), "%s/proc/%d/task",
+ machine->root_dir, pid);
+
+ tasks = opendir(filename);
+ if (tasks == NULL) {
+ pr_debug("couldn't open %s\n", filename);
+ return 0;
+ }
+
+ while (!readdir_r(tasks, &dirent, &next) && next) {
+ char *end;
+ int rc = 0;
+ pid_t _pid;
+
+ _pid = strtol(dirent.d_name, &end, 10);
+ if (*end)
+ continue;
+
+ tgid = perf_event__synthesize_comm(tool, comm_event, _pid,
+ process, machine);
+ if (tgid == -1)
+ return -1;
+
+ if (_pid == pid) {
+ /* process the parent's maps too */
+ rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
+ process, machine, mmap_data);
+ } else {
+ /* only fork the tid's map, to save time */
+ rc = perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
+ process, machine);
+ }
+
+ if (rc)
+ return rc;
+ }
+
+ closedir(tasks);
+ return 0;
+}
+
+int perf_event__synthesize_thread_map(struct perf_tool *tool,
+ struct thread_map *threads,
+ perf_event__handler_t process,
+ struct machine *machine,
+ bool mmap_data)
+{
+ union perf_event *comm_event, *mmap_event, *fork_event;
+ int err = -1, thread, j;
+
+ comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
+ if (comm_event == NULL)
+ goto out;
+
+ mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
+ if (mmap_event == NULL)
+ goto out_free_comm;
+
+ fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
+ if (fork_event == NULL)
+ goto out_free_mmap;
+
+ err = 0;
+ for (thread = 0; thread < threads->nr; ++thread) {
+ if (__event__synthesize_thread(comm_event, mmap_event,
+ fork_event,
+ threads->map[thread], 0,
+ process, tool, machine,
+ mmap_data)) {
+ err = -1;
+ break;
+ }
+
+ /*
+ * comm.pid is set to thread group id by
+ * perf_event__synthesize_comm
+ */
+ if ((int) comm_event->comm.pid != threads->map[thread]) {
+ bool need_leader = true;
+
+ /* is thread group leader in thread_map? */
+ for (j = 0; j < threads->nr; ++j) {
+ if ((int) comm_event->comm.pid == threads->map[j]) {
+ need_leader = false;
+ break;
+ }
+ }
+
+ /* if not, generate events for it */
+ if (need_leader &&
+ __event__synthesize_thread(comm_event, mmap_event,
+ fork_event,
+ comm_event->comm.pid, 0,
+ process, tool, machine,
+ mmap_data)) {
+ err = -1;
+ break;
+ }
+ }
+ }
+ free(fork_event);
+out_free_mmap:
+ free(mmap_event);
+out_free_comm:
+ free(comm_event);
+out:
+ return err;
+}
+
+int perf_event__synthesize_threads(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine, bool mmap_data)
+{
+ DIR *proc;
+ char proc_path[PATH_MAX];
+ struct dirent dirent, *next;
+ union perf_event *comm_event, *mmap_event, *fork_event;
+ int err = -1;
+
+ if (machine__is_default_guest(machine))
+ return 0;
+
+ comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
+ if (comm_event == NULL)
+ goto out;
+
+ mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
+ if (mmap_event == NULL)
+ goto out_free_comm;
+
+ fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
+ if (fork_event == NULL)
+ goto out_free_mmap;
+
+ snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
+ proc = opendir(proc_path);
+
+ if (proc == NULL)
+ goto out_free_fork;
+
+ while (!readdir_r(proc, &dirent, &next) && next) {
+ char *end;
+ pid_t pid = strtol(dirent.d_name, &end, 10);
+
+ if (*end) /* only interested in proper numerical dirents */
+ continue;
+ /*
+ * We may race with exiting thread, so don't stop just because
+ * one thread couldn't be synthesized.
+ */
+ __event__synthesize_thread(comm_event, mmap_event, fork_event, pid,
+ 1, process, tool, machine, mmap_data);
+ }
+
+ err = 0;
+ closedir(proc);
+out_free_fork:
+ free(fork_event);
+out_free_mmap:
+ free(mmap_event);
+out_free_comm:
+ free(comm_event);
+out:
+ return err;
+}
+
+struct process_symbol_args {
+ const char *name;
+ u64 start;
+};
+
+static int find_symbol_cb(void *arg, const char *name, char type,
+ u64 start)
+{
+ struct process_symbol_args *args = arg;
+
+ /*
+ * Must be a function or at least an alias, as in PARISC64, where "_text" is
+ * an 'A' to the same address as "_stext".
+ */
+ if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
+ type == 'A') || strcmp(name, args->name))
+ return 0;
+
+ args->start = start;
+ return 1;
+}
+
+u64 kallsyms__get_function_start(const char *kallsyms_filename,
+ const char *symbol_name)
+{
+ struct process_symbol_args args = { .name = symbol_name, };
+
+ if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
+ return 0;
+
+ return args.start;
+}
+
+int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ size_t size;
+ const char *mmap_name;
+ char name_buff[PATH_MAX];
+ struct map *map;
+ struct kmap *kmap;
+ int err;
+ /*
+ * We should get this from /sys/kernel/sections/.text, but till that is
+ * available use this, and after it is use this as a fallback for older
+ * kernels.
+ */
+ union perf_event *event = zalloc((sizeof(event->mmap) +
+ machine->id_hdr_size));
+ if (event == NULL) {
+ pr_debug("Not enough memory synthesizing mmap event "
+ "for kernel modules\n");
+ return -1;
+ }
+
+ mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
+ if (machine__is_host(machine)) {
+ /*
+ * kernel uses PERF_RECORD_MISC_USER for user space maps,
+ * see kernel/perf_event.c __perf_event_mmap
+ */
+ event->header.misc = PERF_RECORD_MISC_KERNEL;
+ } else {
+ event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
+ }
+
+ map = machine->vmlinux_maps[MAP__FUNCTION];
+ kmap = map__kmap(map);
+ size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
+ "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
+ size = PERF_ALIGN(size, sizeof(u64));
+ event->mmap.header.type = PERF_RECORD_MMAP;
+ event->mmap.header.size = (sizeof(event->mmap) -
+ (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
+ event->mmap.pgoff = kmap->ref_reloc_sym->addr;
+ event->mmap.start = map->start;
+ event->mmap.len = map->end - event->mmap.start;
+ event->mmap.pid = machine->pid;
+
+ err = process(tool, event, &synth_sample, machine);
+ free(event);
+
+ return err;
+}
+
+size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid);
+}
+
+int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ return machine__process_comm_event(machine, event, sample);
+}
+
+int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ return machine__process_lost_event(machine, event, sample);
+}
+
+size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
+ event->mmap.pid, event->mmap.tid, event->mmap.start,
+ event->mmap.len, event->mmap.pgoff,
+ (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
+ event->mmap.filename);
+}
+
+size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
+ " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
+ event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
+ event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
+ event->mmap2.min, event->mmap2.ino,
+ event->mmap2.ino_generation,
+ (event->mmap2.prot & PROT_READ) ? 'r' : '-',
+ (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
+ (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
+ (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
+ event->mmap2.filename);
+}
+
+int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ return machine__process_mmap_event(machine, event, sample);
+}
+
+int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ return machine__process_mmap2_event(machine, event, sample);
+}
+
+size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, "(%d:%d):(%d:%d)\n",
+ event->fork.pid, event->fork.tid,
+ event->fork.ppid, event->fork.ptid);
+}
+
+int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ return machine__process_fork_event(machine, event, sample);
+}
+
+int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ return machine__process_exit_event(machine, event, sample);
+}
+
+size_t perf_event__fprintf(union perf_event *event, FILE *fp)
+{
+ size_t ret = fprintf(fp, "PERF_RECORD_%s",
+ perf_event__name(event->header.type));
+
+ switch (event->header.type) {
+ case PERF_RECORD_COMM:
+ ret += perf_event__fprintf_comm(event, fp);
+ break;
+ case PERF_RECORD_FORK:
+ case PERF_RECORD_EXIT:
+ ret += perf_event__fprintf_task(event, fp);
+ break;
+ case PERF_RECORD_MMAP:
+ ret += perf_event__fprintf_mmap(event, fp);
+ break;
+ case PERF_RECORD_MMAP2:
+ ret += perf_event__fprintf_mmap2(event, fp);
+ break;
+ default:
+ ret += fprintf(fp, "\n");
+ }
+
+ return ret;
+}
+
+int perf_event__process(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ return machine__process_event(machine, event, sample);
+}
+
+void thread__find_addr_map(struct thread *thread,
+ struct machine *machine, u8 cpumode,
+ enum map_type type, u64 addr,
+ struct addr_location *al)
+{
+ struct map_groups *mg = thread->mg;
+ bool load_map = false;
+
+ al->machine = machine;
+ al->thread = thread;
+ al->addr = addr;
+ al->cpumode = cpumode;
+ al->filtered = 0;
+
+ if (machine == NULL) {
+ al->map = NULL;
+ return;
+ }
+
+ if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
+ al->level = 'k';
+ mg = &machine->kmaps;
+ load_map = true;
+ } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
+ al->level = '.';
+ } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
+ al->level = 'g';
+ mg = &machine->kmaps;
+ load_map = true;
+ } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
+ al->level = 'u';
+ } else {
+ al->level = 'H';
+ al->map = NULL;
+
+ if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
+ cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
+ !perf_guest)
+ al->filtered |= (1 << HIST_FILTER__GUEST);
+ if ((cpumode == PERF_RECORD_MISC_USER ||
+ cpumode == PERF_RECORD_MISC_KERNEL) &&
+ !perf_host)
+ al->filtered |= (1 << HIST_FILTER__HOST);
+
+ return;
+ }
+try_again:
+ al->map = map_groups__find(mg, type, al->addr);
+ if (al->map == NULL) {
+ /*
+ * If this is outside of all known maps, and is a negative
+ * address, try to look it up in the kernel dso, as it might be
+ * a vsyscall or vdso (which executes in user-mode).
+ *
+ * XXX This is nasty, we should have a symbol list in the
+ * "[vdso]" dso, but for now lets use the old trick of looking
+ * in the whole kernel symbol list.
+ */
+ if ((long long)al->addr < 0 &&
+ cpumode == PERF_RECORD_MISC_USER &&
+ machine && mg != &machine->kmaps) {
+ mg = &machine->kmaps;
+ goto try_again;
+ }
+ } else {
+ /*
+ * Kernel maps might be changed when loading symbols so loading
+ * must be done prior to using kernel maps.
+ */
+ if (load_map)
+ map__load(al->map, machine->symbol_filter);
+ al->addr = al->map->map_ip(al->map, al->addr);
+ }
+}
+
+void thread__find_addr_location(struct thread *thread, struct machine *machine,
+ u8 cpumode, enum map_type type, u64 addr,
+ struct addr_location *al)
+{
+ thread__find_addr_map(thread, machine, cpumode, type, addr, al);
+ if (al->map != NULL)
+ al->sym = map__find_symbol(al->map, al->addr,
+ machine->symbol_filter);
+ else
+ al->sym = NULL;
+}
+
+int perf_event__preprocess_sample(const union perf_event *event,
+ struct machine *machine,
+ struct addr_location *al,
+ struct perf_sample *sample)
+{
+ u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ struct thread *thread = machine__findnew_thread(machine, sample->pid,
+ sample->tid);
+
+ if (thread == NULL)
+ return -1;
+
+ dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
+ /*
+ * Have we already created the kernel maps for this machine?
+ *
+ * This should have happened earlier, when we processed the kernel MMAP
+ * events, but for older perf.data files there was no such thing, so do
+ * it now.
+ */
+ if (cpumode == PERF_RECORD_MISC_KERNEL &&
+ machine->vmlinux_maps[MAP__FUNCTION] == NULL)
+ machine__create_kernel_maps(machine);
+
+ thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
+ sample->ip, al);
+ dump_printf(" ...... dso: %s\n",
+ al->map ? al->map->dso->long_name :
+ al->level == 'H' ? "[hypervisor]" : "<not found>");
+
+ if (thread__is_filtered(thread))
+ al->filtered |= (1 << HIST_FILTER__THREAD);
+
+ al->sym = NULL;
+ al->cpu = sample->cpu;
+
+ if (al->map) {
+ struct dso *dso = al->map->dso;
+
+ if (symbol_conf.dso_list &&
+ (!dso || !(strlist__has_entry(symbol_conf.dso_list,
+ dso->short_name) ||
+ (dso->short_name != dso->long_name &&
+ strlist__has_entry(symbol_conf.dso_list,
+ dso->long_name))))) {
+ al->filtered |= (1 << HIST_FILTER__DSO);
+ }
+
+ al->sym = map__find_symbol(al->map, al->addr,
+ machine->symbol_filter);
+ }
+
+ if (symbol_conf.sym_list &&
+ (!al->sym || !strlist__has_entry(symbol_conf.sym_list,
+ al->sym->name))) {
+ al->filtered |= (1 << HIST_FILTER__SYMBOL);
+ }
+
+ return 0;
+}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
new file mode 100644
index 00000000000..e5dd40addb3
--- /dev/null
+++ b/tools/perf/util/event.h
@@ -0,0 +1,316 @@
+#ifndef __PERF_RECORD_H
+#define __PERF_RECORD_H
+
+#include <limits.h>
+#include <stdio.h>
+
+#include "../perf.h"
+#include "map.h"
+#include "build-id.h"
+#include "perf_regs.h"
+
+struct mmap_event {
+ struct perf_event_header header;
+ u32 pid, tid;
+ u64 start;
+ u64 len;
+ u64 pgoff;
+ char filename[PATH_MAX];
+};
+
+struct mmap2_event {
+ struct perf_event_header header;
+ u32 pid, tid;
+ u64 start;
+ u64 len;
+ u64 pgoff;
+ u32 maj;
+ u32 min;
+ u64 ino;
+ u64 ino_generation;
+ u32 prot;
+ u32 flags;
+ char filename[PATH_MAX];
+};
+
+struct comm_event {
+ struct perf_event_header header;
+ u32 pid, tid;
+ char comm[16];
+};
+
+struct fork_event {
+ struct perf_event_header header;
+ u32 pid, ppid;
+ u32 tid, ptid;
+ u64 time;
+};
+
+struct lost_event {
+ struct perf_event_header header;
+ u64 id;
+ u64 lost;
+};
+
+/*
+ * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
+ */
+struct read_event {
+ struct perf_event_header header;
+ u32 pid, tid;
+ u64 value;
+ u64 time_enabled;
+ u64 time_running;
+ u64 id;
+};
+
+struct throttle_event {
+ struct perf_event_header header;
+ u64 time;
+ u64 id;
+ u64 stream_id;
+};
+
+#define PERF_SAMPLE_MASK \
+ (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
+ PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \
+ PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \
+ PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD | \
+ PERF_SAMPLE_IDENTIFIER)
+
+/* perf sample has 16 bits size limit */
+#define PERF_SAMPLE_MAX_SIZE (1 << 16)
+
+struct sample_event {
+ struct perf_event_header header;
+ u64 array[];
+};
+
+struct regs_dump {
+ u64 abi;
+ u64 mask;
+ u64 *regs;
+
+ /* Cached values/mask filled by first register access. */
+ u64 cache_regs[PERF_REGS_MAX];
+ u64 cache_mask;
+};
+
+struct stack_dump {
+ u16 offset;
+ u64 size;
+ char *data;
+};
+
+struct sample_read_value {
+ u64 value;
+ u64 id;
+};
+
+struct sample_read {
+ u64 time_enabled;
+ u64 time_running;
+ union {
+ struct {
+ u64 nr;
+ struct sample_read_value *values;
+ } group;
+ struct sample_read_value one;
+ };
+};
+
+struct ip_callchain {
+ u64 nr;
+ u64 ips[0];
+};
+
+struct branch_flags {
+ u64 mispred:1;
+ u64 predicted:1;
+ u64 in_tx:1;
+ u64 abort:1;
+ u64 reserved:60;
+};
+
+struct branch_entry {
+ u64 from;
+ u64 to;
+ struct branch_flags flags;
+};
+
+struct branch_stack {
+ u64 nr;
+ struct branch_entry entries[0];
+};
+
+struct perf_sample {
+ u64 ip;
+ u32 pid, tid;
+ u64 time;
+ u64 addr;
+ u64 id;
+ u64 stream_id;
+ u64 period;
+ u64 weight;
+ u64 transaction;
+ u32 cpu;
+ u32 raw_size;
+ u64 data_src;
+ void *raw_data;
+ struct ip_callchain *callchain;
+ struct branch_stack *branch_stack;
+ struct regs_dump user_regs;
+ struct stack_dump user_stack;
+ struct sample_read read;
+};
+
+#define PERF_MEM_DATA_SRC_NONE \
+ (PERF_MEM_S(OP, NA) |\
+ PERF_MEM_S(LVL, NA) |\
+ PERF_MEM_S(SNOOP, NA) |\
+ PERF_MEM_S(LOCK, NA) |\
+ PERF_MEM_S(TLB, NA))
+
+struct build_id_event {
+ struct perf_event_header header;
+ pid_t pid;
+ u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
+ char filename[];
+};
+
+enum perf_user_event_type { /* above any possible kernel type */
+ PERF_RECORD_USER_TYPE_START = 64,
+ PERF_RECORD_HEADER_ATTR = 64,
+ PERF_RECORD_HEADER_EVENT_TYPE = 65, /* depreceated */
+ PERF_RECORD_HEADER_TRACING_DATA = 66,
+ PERF_RECORD_HEADER_BUILD_ID = 67,
+ PERF_RECORD_FINISHED_ROUND = 68,
+ PERF_RECORD_HEADER_MAX
+};
+
+struct attr_event {
+ struct perf_event_header header;
+ struct perf_event_attr attr;
+ u64 id[];
+};
+
+#define MAX_EVENT_NAME 64
+
+struct perf_trace_event_type {
+ u64 event_id;
+ char name[MAX_EVENT_NAME];
+};
+
+struct event_type_event {
+ struct perf_event_header header;
+ struct perf_trace_event_type event_type;
+};
+
+struct tracing_data_event {
+ struct perf_event_header header;
+ u32 size;
+};
+
+union perf_event {
+ struct perf_event_header header;
+ struct mmap_event mmap;
+ struct mmap2_event mmap2;
+ struct comm_event comm;
+ struct fork_event fork;
+ struct lost_event lost;
+ struct read_event read;
+ struct throttle_event throttle;
+ struct sample_event sample;
+ struct attr_event attr;
+ struct event_type_event event_type;
+ struct tracing_data_event tracing_data;
+ struct build_id_event build_id;
+};
+
+void perf_event__print_totals(void);
+
+struct perf_tool;
+struct thread_map;
+
+typedef int (*perf_event__handler_t)(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+
+int perf_event__synthesize_thread_map(struct perf_tool *tool,
+ struct thread_map *threads,
+ perf_event__handler_t process,
+ struct machine *machine, bool mmap_data);
+int perf_event__synthesize_threads(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine, bool mmap_data);
+int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine);
+
+int perf_event__synthesize_modules(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine);
+
+int perf_event__process_comm(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_lost(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_mmap(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_mmap2(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_fork(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_exit(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+
+struct addr_location;
+
+int perf_event__preprocess_sample(const union perf_event *event,
+ struct machine *machine,
+ struct addr_location *al,
+ struct perf_sample *sample);
+
+const char *perf_event__name(unsigned int id);
+
+size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
+ u64 read_format);
+int perf_event__synthesize_sample(union perf_event *event, u64 type,
+ u64 read_format,
+ const struct perf_sample *sample,
+ bool swapped);
+
+int perf_event__synthesize_mmap_events(struct perf_tool *tool,
+ union perf_event *event,
+ pid_t pid, pid_t tgid,
+ perf_event__handler_t process,
+ struct machine *machine,
+ bool mmap_data);
+
+size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf(union perf_event *event, FILE *fp);
+
+u64 kallsyms__get_function_start(const char *kallsyms_filename,
+ const char *symbol_name);
+
+#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
new file mode 100644
index 00000000000..59ef2802fcf
--- /dev/null
+++ b/tools/perf/util/evlist.c
@@ -0,0 +1,1245 @@
+/*
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Parts came from builtin-{top,stat,record}.c, see those files for further
+ * copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+#include "util.h"
+#include <api/fs/debugfs.h>
+#include <poll.h>
+#include "cpumap.h"
+#include "thread_map.h"
+#include "target.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "debug.h"
+#include <unistd.h>
+
+#include "parse-events.h"
+#include "parse-options.h"
+
+#include <sys/mman.h>
+
+#include <linux/bitops.h>
+#include <linux/hash.h>
+
+#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
+#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
+
+void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
+ struct thread_map *threads)
+{
+ int i;
+
+ for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
+ INIT_HLIST_HEAD(&evlist->heads[i]);
+ INIT_LIST_HEAD(&evlist->entries);
+ perf_evlist__set_maps(evlist, cpus, threads);
+ evlist->workload.pid = -1;
+}
+
+struct perf_evlist *perf_evlist__new(void)
+{
+ struct perf_evlist *evlist = zalloc(sizeof(*evlist));
+
+ if (evlist != NULL)
+ perf_evlist__init(evlist, NULL, NULL);
+
+ return evlist;
+}
+
+struct perf_evlist *perf_evlist__new_default(void)
+{
+ struct perf_evlist *evlist = perf_evlist__new();
+
+ if (evlist && perf_evlist__add_default(evlist)) {
+ perf_evlist__delete(evlist);
+ evlist = NULL;
+ }
+
+ return evlist;
+}
+
+/**
+ * perf_evlist__set_id_pos - set the positions of event ids.
+ * @evlist: selected event list
+ *
+ * Events with compatible sample types all have the same id_pos
+ * and is_pos. For convenience, put a copy on evlist.
+ */
+void perf_evlist__set_id_pos(struct perf_evlist *evlist)
+{
+ struct perf_evsel *first = perf_evlist__first(evlist);
+
+ evlist->id_pos = first->id_pos;
+ evlist->is_pos = first->is_pos;
+}
+
+static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+
+ evlist__for_each(evlist, evsel)
+ perf_evsel__calc_id_pos(evsel);
+
+ perf_evlist__set_id_pos(evlist);
+}
+
+static void perf_evlist__purge(struct perf_evlist *evlist)
+{
+ struct perf_evsel *pos, *n;
+
+ evlist__for_each_safe(evlist, n, pos) {
+ list_del_init(&pos->node);
+ perf_evsel__delete(pos);
+ }
+
+ evlist->nr_entries = 0;
+}
+
+void perf_evlist__exit(struct perf_evlist *evlist)
+{
+ zfree(&evlist->mmap);
+ zfree(&evlist->pollfd);
+}
+
+void perf_evlist__delete(struct perf_evlist *evlist)
+{
+ perf_evlist__munmap(evlist);
+ perf_evlist__close(evlist);
+ cpu_map__delete(evlist->cpus);
+ thread_map__delete(evlist->threads);
+ evlist->cpus = NULL;
+ evlist->threads = NULL;
+ perf_evlist__purge(evlist);
+ perf_evlist__exit(evlist);
+ free(evlist);
+}
+
+void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
+{
+ list_add_tail(&entry->node, &evlist->entries);
+ entry->idx = evlist->nr_entries;
+
+ if (!evlist->nr_entries++)
+ perf_evlist__set_id_pos(evlist);
+}
+
+void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
+ struct list_head *list,
+ int nr_entries)
+{
+ bool set_id_pos = !evlist->nr_entries;
+
+ list_splice_tail(list, &evlist->entries);
+ evlist->nr_entries += nr_entries;
+ if (set_id_pos)
+ perf_evlist__set_id_pos(evlist);
+}
+
+void __perf_evlist__set_leader(struct list_head *list)
+{
+ struct perf_evsel *evsel, *leader;
+
+ leader = list_entry(list->next, struct perf_evsel, node);
+ evsel = list_entry(list->prev, struct perf_evsel, node);
+
+ leader->nr_members = evsel->idx - leader->idx + 1;
+
+ __evlist__for_each(list, evsel) {
+ evsel->leader = leader;
+ }
+}
+
+void perf_evlist__set_leader(struct perf_evlist *evlist)
+{
+ if (evlist->nr_entries) {
+ evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
+ __perf_evlist__set_leader(&evlist->entries);
+ }
+}
+
+int perf_evlist__add_default(struct perf_evlist *evlist)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ };
+ struct perf_evsel *evsel;
+
+ event_attr_init(&attr);
+
+ evsel = perf_evsel__new(&attr);
+ if (evsel == NULL)
+ goto error;
+
+ /* use strdup() because free(evsel) assumes name is allocated */
+ evsel->name = strdup("cycles");
+ if (!evsel->name)
+ goto error_free;
+
+ perf_evlist__add(evlist, evsel);
+ return 0;
+error_free:
+ perf_evsel__delete(evsel);
+error:
+ return -ENOMEM;
+}
+
+static int perf_evlist__add_attrs(struct perf_evlist *evlist,
+ struct perf_event_attr *attrs, size_t nr_attrs)
+{
+ struct perf_evsel *evsel, *n;
+ LIST_HEAD(head);
+ size_t i;
+
+ for (i = 0; i < nr_attrs; i++) {
+ evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
+ if (evsel == NULL)
+ goto out_delete_partial_list;
+ list_add_tail(&evsel->node, &head);
+ }
+
+ perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
+
+ return 0;
+
+out_delete_partial_list:
+ __evlist__for_each_safe(&head, n, evsel)
+ perf_evsel__delete(evsel);
+ return -1;
+}
+
+int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
+ struct perf_event_attr *attrs, size_t nr_attrs)
+{
+ size_t i;
+
+ for (i = 0; i < nr_attrs; i++)
+ event_attr_init(attrs + i);
+
+ return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
+}
+
+struct perf_evsel *
+perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
+{
+ struct perf_evsel *evsel;
+
+ evlist__for_each(evlist, evsel) {
+ if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
+ (int)evsel->attr.config == id)
+ return evsel;
+ }
+
+ return NULL;
+}
+
+struct perf_evsel *
+perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
+ const char *name)
+{
+ struct perf_evsel *evsel;
+
+ evlist__for_each(evlist, evsel) {
+ if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
+ (strcmp(evsel->name, name) == 0))
+ return evsel;
+ }
+
+ return NULL;
+}
+
+int perf_evlist__add_newtp(struct perf_evlist *evlist,
+ const char *sys, const char *name, void *handler)
+{
+ struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
+
+ if (evsel == NULL)
+ return -1;
+
+ evsel->handler = handler;
+ perf_evlist__add(evlist, evsel);
+ return 0;
+}
+
+void perf_evlist__disable(struct perf_evlist *evlist)
+{
+ int cpu, thread;
+ struct perf_evsel *pos;
+ int nr_cpus = cpu_map__nr(evlist->cpus);
+ int nr_threads = thread_map__nr(evlist->threads);
+
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
+ evlist__for_each(evlist, pos) {
+ if (!perf_evsel__is_group_leader(pos) || !pos->fd)
+ continue;
+ for (thread = 0; thread < nr_threads; thread++)
+ ioctl(FD(pos, cpu, thread),
+ PERF_EVENT_IOC_DISABLE, 0);
+ }
+ }
+}
+
+void perf_evlist__enable(struct perf_evlist *evlist)
+{
+ int cpu, thread;
+ struct perf_evsel *pos;
+ int nr_cpus = cpu_map__nr(evlist->cpus);
+ int nr_threads = thread_map__nr(evlist->threads);
+
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
+ evlist__for_each(evlist, pos) {
+ if (!perf_evsel__is_group_leader(pos) || !pos->fd)
+ continue;
+ for (thread = 0; thread < nr_threads; thread++)
+ ioctl(FD(pos, cpu, thread),
+ PERF_EVENT_IOC_ENABLE, 0);
+ }
+ }
+}
+
+int perf_evlist__disable_event(struct perf_evlist *evlist,
+ struct perf_evsel *evsel)
+{
+ int cpu, thread, err;
+
+ if (!evsel->fd)
+ return 0;
+
+ for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ for (thread = 0; thread < evlist->threads->nr; thread++) {
+ err = ioctl(FD(evsel, cpu, thread),
+ PERF_EVENT_IOC_DISABLE, 0);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
+int perf_evlist__enable_event(struct perf_evlist *evlist,
+ struct perf_evsel *evsel)
+{
+ int cpu, thread, err;
+
+ if (!evsel->fd)
+ return -EINVAL;
+
+ for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ for (thread = 0; thread < evlist->threads->nr; thread++) {
+ err = ioctl(FD(evsel, cpu, thread),
+ PERF_EVENT_IOC_ENABLE, 0);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
+{
+ int nr_cpus = cpu_map__nr(evlist->cpus);
+ int nr_threads = thread_map__nr(evlist->threads);
+ int nfds = nr_cpus * nr_threads * evlist->nr_entries;
+ evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
+ return evlist->pollfd != NULL ? 0 : -ENOMEM;
+}
+
+void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
+{
+ fcntl(fd, F_SETFL, O_NONBLOCK);
+ evlist->pollfd[evlist->nr_fds].fd = fd;
+ evlist->pollfd[evlist->nr_fds].events = POLLIN;
+ evlist->nr_fds++;
+}
+
+static void perf_evlist__id_hash(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+ int cpu, int thread, u64 id)
+{
+ int hash;
+ struct perf_sample_id *sid = SID(evsel, cpu, thread);
+
+ sid->id = id;
+ sid->evsel = evsel;
+ hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
+ hlist_add_head(&sid->node, &evlist->heads[hash]);
+}
+
+void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
+ int cpu, int thread, u64 id)
+{
+ perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
+ evsel->id[evsel->ids++] = id;
+}
+
+static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+ int cpu, int thread, int fd)
+{
+ u64 read_data[4] = { 0, };
+ int id_idx = 1; /* The first entry is the counter value */
+ u64 id;
+ int ret;
+
+ ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
+ if (!ret)
+ goto add;
+
+ if (errno != ENOTTY)
+ return -1;
+
+ /* Legacy way to get event id.. All hail to old kernels! */
+
+ /*
+ * This way does not work with group format read, so bail
+ * out in that case.
+ */
+ if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
+ return -1;
+
+ if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
+ read(fd, &read_data, sizeof(read_data)) == -1)
+ return -1;
+
+ if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ ++id_idx;
+ if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ ++id_idx;
+
+ id = read_data[id_idx];
+
+ add:
+ perf_evlist__id_add(evlist, evsel, cpu, thread, id);
+ return 0;
+}
+
+struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
+{
+ struct hlist_head *head;
+ struct perf_sample_id *sid;
+ int hash;
+
+ hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
+ head = &evlist->heads[hash];
+
+ hlist_for_each_entry(sid, head, node)
+ if (sid->id == id)
+ return sid;
+
+ return NULL;
+}
+
+struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
+{
+ struct perf_sample_id *sid;
+
+ if (evlist->nr_entries == 1)
+ return perf_evlist__first(evlist);
+
+ sid = perf_evlist__id2sid(evlist, id);
+ if (sid)
+ return sid->evsel;
+
+ if (!perf_evlist__sample_id_all(evlist))
+ return perf_evlist__first(evlist);
+
+ return NULL;
+}
+
+static int perf_evlist__event2id(struct perf_evlist *evlist,
+ union perf_event *event, u64 *id)
+{
+ const u64 *array = event->sample.array;
+ ssize_t n;
+
+ n = (event->header.size - sizeof(event->header)) >> 3;
+
+ if (event->header.type == PERF_RECORD_SAMPLE) {
+ if (evlist->id_pos >= n)
+ return -1;
+ *id = array[evlist->id_pos];
+ } else {
+ if (evlist->is_pos > n)
+ return -1;
+ n -= evlist->is_pos;
+ *id = array[n];
+ }
+ return 0;
+}
+
+static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
+ union perf_event *event)
+{
+ struct perf_evsel *first = perf_evlist__first(evlist);
+ struct hlist_head *head;
+ struct perf_sample_id *sid;
+ int hash;
+ u64 id;
+
+ if (evlist->nr_entries == 1)
+ return first;
+
+ if (!first->attr.sample_id_all &&
+ event->header.type != PERF_RECORD_SAMPLE)
+ return first;
+
+ if (perf_evlist__event2id(evlist, event, &id))
+ return NULL;
+
+ /* Synthesized events have an id of zero */
+ if (!id)
+ return first;
+
+ hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
+ head = &evlist->heads[hash];
+
+ hlist_for_each_entry(sid, head, node) {
+ if (sid->id == id)
+ return sid->evsel;
+ }
+ return NULL;
+}
+
+union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
+{
+ struct perf_mmap *md = &evlist->mmap[idx];
+ unsigned int head = perf_mmap__read_head(md);
+ unsigned int old = md->prev;
+ unsigned char *data = md->base + page_size;
+ union perf_event *event = NULL;
+
+ if (evlist->overwrite) {
+ /*
+ * If we're further behind than half the buffer, there's a chance
+ * the writer will bite our tail and mess up the samples under us.
+ *
+ * If we somehow ended up ahead of the head, we got messed up.
+ *
+ * In either case, truncate and restart at head.
+ */
+ int diff = head - old;
+ if (diff > md->mask / 2 || diff < 0) {
+ fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
+
+ /*
+ * head points to a known good entry, start there.
+ */
+ old = head;
+ }
+ }
+
+ if (old != head) {
+ size_t size;
+
+ event = (union perf_event *)&data[old & md->mask];
+ size = event->header.size;
+
+ /*
+ * Event straddles the mmap boundary -- header should always
+ * be inside due to u64 alignment of output.
+ */
+ if ((old & md->mask) + size != ((old + size) & md->mask)) {
+ unsigned int offset = old;
+ unsigned int len = min(sizeof(*event), size), cpy;
+ void *dst = md->event_copy;
+
+ do {
+ cpy = min(md->mask + 1 - (offset & md->mask), len);
+ memcpy(dst, &data[offset & md->mask], cpy);
+ offset += cpy;
+ dst += cpy;
+ len -= cpy;
+ } while (len);
+
+ event = (union perf_event *) md->event_copy;
+ }
+
+ old += size;
+ }
+
+ md->prev = old;
+
+ return event;
+}
+
+void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
+{
+ if (!evlist->overwrite) {
+ struct perf_mmap *md = &evlist->mmap[idx];
+ unsigned int old = md->prev;
+
+ perf_mmap__write_tail(md, old);
+ }
+}
+
+static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
+{
+ if (evlist->mmap[idx].base != NULL) {
+ munmap(evlist->mmap[idx].base, evlist->mmap_len);
+ evlist->mmap[idx].base = NULL;
+ }
+}
+
+void perf_evlist__munmap(struct perf_evlist *evlist)
+{
+ int i;
+
+ if (evlist->mmap == NULL)
+ return;
+
+ for (i = 0; i < evlist->nr_mmaps; i++)
+ __perf_evlist__munmap(evlist, i);
+
+ zfree(&evlist->mmap);
+}
+
+static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
+{
+ evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
+ if (cpu_map__empty(evlist->cpus))
+ evlist->nr_mmaps = thread_map__nr(evlist->threads);
+ evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
+ return evlist->mmap != NULL ? 0 : -ENOMEM;
+}
+
+static int __perf_evlist__mmap(struct perf_evlist *evlist,
+ int idx, int prot, int mask, int fd)
+{
+ evlist->mmap[idx].prev = 0;
+ evlist->mmap[idx].mask = mask;
+ evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
+ MAP_SHARED, fd, 0);
+ if (evlist->mmap[idx].base == MAP_FAILED) {
+ pr_debug2("failed to mmap perf event ring buffer, error %d\n",
+ errno);
+ evlist->mmap[idx].base = NULL;
+ return -1;
+ }
+
+ perf_evlist__add_pollfd(evlist, fd);
+ return 0;
+}
+
+static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
+ int prot, int mask, int cpu, int thread,
+ int *output)
+{
+ struct perf_evsel *evsel;
+
+ evlist__for_each(evlist, evsel) {
+ int fd = FD(evsel, cpu, thread);
+
+ if (*output == -1) {
+ *output = fd;
+ if (__perf_evlist__mmap(evlist, idx, prot, mask,
+ *output) < 0)
+ return -1;
+ } else {
+ if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
+ return -1;
+ }
+
+ if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+ perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot,
+ int mask)
+{
+ int cpu, thread;
+ int nr_cpus = cpu_map__nr(evlist->cpus);
+ int nr_threads = thread_map__nr(evlist->threads);
+
+ pr_debug2("perf event ring buffer mmapped per cpu\n");
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
+ int output = -1;
+
+ for (thread = 0; thread < nr_threads; thread++) {
+ if (perf_evlist__mmap_per_evsel(evlist, cpu, prot, mask,
+ cpu, thread, &output))
+ goto out_unmap;
+ }
+ }
+
+ return 0;
+
+out_unmap:
+ for (cpu = 0; cpu < nr_cpus; cpu++)
+ __perf_evlist__munmap(evlist, cpu);
+ return -1;
+}
+
+static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot,
+ int mask)
+{
+ int thread;
+ int nr_threads = thread_map__nr(evlist->threads);
+
+ pr_debug2("perf event ring buffer mmapped per thread\n");
+ for (thread = 0; thread < nr_threads; thread++) {
+ int output = -1;
+
+ if (perf_evlist__mmap_per_evsel(evlist, thread, prot, mask, 0,
+ thread, &output))
+ goto out_unmap;
+ }
+
+ return 0;
+
+out_unmap:
+ for (thread = 0; thread < nr_threads; thread++)
+ __perf_evlist__munmap(evlist, thread);
+ return -1;
+}
+
+static size_t perf_evlist__mmap_size(unsigned long pages)
+{
+ /* 512 kiB: default amount of unprivileged mlocked memory */
+ if (pages == UINT_MAX)
+ pages = (512 * 1024) / page_size;
+ else if (!is_power_of_2(pages))
+ return 0;
+
+ return (pages + 1) * page_size;
+}
+
+static long parse_pages_arg(const char *str, unsigned long min,
+ unsigned long max)
+{
+ unsigned long pages, val;
+ static struct parse_tag tags[] = {
+ { .tag = 'B', .mult = 1 },
+ { .tag = 'K', .mult = 1 << 10 },
+ { .tag = 'M', .mult = 1 << 20 },
+ { .tag = 'G', .mult = 1 << 30 },
+ { .tag = 0 },
+ };
+
+ if (str == NULL)
+ return -EINVAL;
+
+ val = parse_tag_value(str, tags);
+ if (val != (unsigned long) -1) {
+ /* we got file size value */
+ pages = PERF_ALIGN(val, page_size) / page_size;
+ } else {
+ /* we got pages count value */
+ char *eptr;
+ pages = strtoul(str, &eptr, 10);
+ if (*eptr != '\0')
+ return -EINVAL;
+ }
+
+ if (pages == 0 && min == 0) {
+ /* leave number of pages at 0 */
+ } else if (!is_power_of_2(pages)) {
+ /* round pages up to next power of 2 */
+ pages = next_pow2_l(pages);
+ if (!pages)
+ return -EINVAL;
+ pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
+ pages * page_size, pages);
+ }
+
+ if (pages > max)
+ return -EINVAL;
+
+ return pages;
+}
+
+int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
+ int unset __maybe_unused)
+{
+ unsigned int *mmap_pages = opt->value;
+ unsigned long max = UINT_MAX;
+ long pages;
+
+ if (max > SIZE_MAX / page_size)
+ max = SIZE_MAX / page_size;
+
+ pages = parse_pages_arg(str, 1, max);
+ if (pages < 0) {
+ pr_err("Invalid argument for --mmap_pages/-m\n");
+ return -1;
+ }
+
+ *mmap_pages = pages;
+ return 0;
+}
+
+/**
+ * perf_evlist__mmap - Create mmaps to receive events.
+ * @evlist: list of events
+ * @pages: map length in pages
+ * @overwrite: overwrite older events?
+ *
+ * If @overwrite is %false the user needs to signal event consumption using
+ * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
+ * automatically.
+ *
+ * Return: %0 on success, negative error code otherwise.
+ */
+int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
+ bool overwrite)
+{
+ struct perf_evsel *evsel;
+ const struct cpu_map *cpus = evlist->cpus;
+ const struct thread_map *threads = evlist->threads;
+ int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
+
+ if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
+ return -ENOMEM;
+
+ if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
+ return -ENOMEM;
+
+ evlist->overwrite = overwrite;
+ evlist->mmap_len = perf_evlist__mmap_size(pages);
+ pr_debug("mmap size %zuB\n", evlist->mmap_len);
+ mask = evlist->mmap_len - page_size - 1;
+
+ evlist__for_each(evlist, evsel) {
+ if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+ evsel->sample_id == NULL &&
+ perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
+ return -ENOMEM;
+ }
+
+ if (cpu_map__empty(cpus))
+ return perf_evlist__mmap_per_thread(evlist, prot, mask);
+
+ return perf_evlist__mmap_per_cpu(evlist, prot, mask);
+}
+
+int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
+{
+ evlist->threads = thread_map__new_str(target->pid, target->tid,
+ target->uid);
+
+ if (evlist->threads == NULL)
+ return -1;
+
+ if (target__uses_dummy_map(target))
+ evlist->cpus = cpu_map__dummy_new();
+ else
+ evlist->cpus = cpu_map__new(target->cpu_list);
+
+ if (evlist->cpus == NULL)
+ goto out_delete_threads;
+
+ return 0;
+
+out_delete_threads:
+ thread_map__delete(evlist->threads);
+ return -1;
+}
+
+int perf_evlist__apply_filters(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+ int err = 0;
+ const int ncpus = cpu_map__nr(evlist->cpus),
+ nthreads = thread_map__nr(evlist->threads);
+
+ evlist__for_each(evlist, evsel) {
+ if (evsel->filter == NULL)
+ continue;
+
+ err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
+{
+ struct perf_evsel *evsel;
+ int err = 0;
+ const int ncpus = cpu_map__nr(evlist->cpus),
+ nthreads = thread_map__nr(evlist->threads);
+
+ evlist__for_each(evlist, evsel) {
+ err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
+{
+ struct perf_evsel *pos;
+
+ if (evlist->nr_entries == 1)
+ return true;
+
+ if (evlist->id_pos < 0 || evlist->is_pos < 0)
+ return false;
+
+ evlist__for_each(evlist, pos) {
+ if (pos->id_pos != evlist->id_pos ||
+ pos->is_pos != evlist->is_pos)
+ return false;
+ }
+
+ return true;
+}
+
+u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+
+ if (evlist->combined_sample_type)
+ return evlist->combined_sample_type;
+
+ evlist__for_each(evlist, evsel)
+ evlist->combined_sample_type |= evsel->attr.sample_type;
+
+ return evlist->combined_sample_type;
+}
+
+u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
+{
+ evlist->combined_sample_type = 0;
+ return __perf_evlist__combined_sample_type(evlist);
+}
+
+bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
+{
+ struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
+ u64 read_format = first->attr.read_format;
+ u64 sample_type = first->attr.sample_type;
+
+ evlist__for_each(evlist, pos) {
+ if (read_format != pos->attr.read_format)
+ return false;
+ }
+
+ /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
+ if ((sample_type & PERF_SAMPLE_READ) &&
+ !(read_format & PERF_FORMAT_ID)) {
+ return false;
+ }
+
+ return true;
+}
+
+u64 perf_evlist__read_format(struct perf_evlist *evlist)
+{
+ struct perf_evsel *first = perf_evlist__first(evlist);
+ return first->attr.read_format;
+}
+
+u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
+{
+ struct perf_evsel *first = perf_evlist__first(evlist);
+ struct perf_sample *data;
+ u64 sample_type;
+ u16 size = 0;
+
+ if (!first->attr.sample_id_all)
+ goto out;
+
+ sample_type = first->attr.sample_type;
+
+ if (sample_type & PERF_SAMPLE_TID)
+ size += sizeof(data->tid) * 2;
+
+ if (sample_type & PERF_SAMPLE_TIME)
+ size += sizeof(data->time);
+
+ if (sample_type & PERF_SAMPLE_ID)
+ size += sizeof(data->id);
+
+ if (sample_type & PERF_SAMPLE_STREAM_ID)
+ size += sizeof(data->stream_id);
+
+ if (sample_type & PERF_SAMPLE_CPU)
+ size += sizeof(data->cpu) * 2;
+
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ size += sizeof(data->id);
+out:
+ return size;
+}
+
+bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
+{
+ struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
+
+ evlist__for_each_continue(evlist, pos) {
+ if (first->attr.sample_id_all != pos->attr.sample_id_all)
+ return false;
+ }
+
+ return true;
+}
+
+bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
+{
+ struct perf_evsel *first = perf_evlist__first(evlist);
+ return first->attr.sample_id_all;
+}
+
+void perf_evlist__set_selected(struct perf_evlist *evlist,
+ struct perf_evsel *evsel)
+{
+ evlist->selected = evsel;
+}
+
+void perf_evlist__close(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+ int ncpus = cpu_map__nr(evlist->cpus);
+ int nthreads = thread_map__nr(evlist->threads);
+ int n;
+
+ evlist__for_each_reverse(evlist, evsel) {
+ n = evsel->cpus ? evsel->cpus->nr : ncpus;
+ perf_evsel__close(evsel, n, nthreads);
+ }
+}
+
+int perf_evlist__open(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+ int err;
+
+ perf_evlist__update_id_pos(evlist);
+
+ evlist__for_each(evlist, evsel) {
+ err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
+ if (err < 0)
+ goto out_err;
+ }
+
+ return 0;
+out_err:
+ perf_evlist__close(evlist);
+ errno = -err;
+ return err;
+}
+
+int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
+ const char *argv[], bool pipe_output,
+ void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
+{
+ int child_ready_pipe[2], go_pipe[2];
+ char bf;
+
+ if (pipe(child_ready_pipe) < 0) {
+ perror("failed to create 'ready' pipe");
+ return -1;
+ }
+
+ if (pipe(go_pipe) < 0) {
+ perror("failed to create 'go' pipe");
+ goto out_close_ready_pipe;
+ }
+
+ evlist->workload.pid = fork();
+ if (evlist->workload.pid < 0) {
+ perror("failed to fork");
+ goto out_close_pipes;
+ }
+
+ if (!evlist->workload.pid) {
+ if (pipe_output)
+ dup2(2, 1);
+
+ signal(SIGTERM, SIG_DFL);
+
+ close(child_ready_pipe[0]);
+ close(go_pipe[1]);
+ fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
+
+ /*
+ * Tell the parent we're ready to go
+ */
+ close(child_ready_pipe[1]);
+
+ /*
+ * Wait until the parent tells us to go.
+ */
+ if (read(go_pipe[0], &bf, 1) == -1)
+ perror("unable to read pipe");
+
+ execvp(argv[0], (char **)argv);
+
+ if (exec_error) {
+ union sigval val;
+
+ val.sival_int = errno;
+ if (sigqueue(getppid(), SIGUSR1, val))
+ perror(argv[0]);
+ } else
+ perror(argv[0]);
+ exit(-1);
+ }
+
+ if (exec_error) {
+ struct sigaction act = {
+ .sa_flags = SA_SIGINFO,
+ .sa_sigaction = exec_error,
+ };
+ sigaction(SIGUSR1, &act, NULL);
+ }
+
+ if (target__none(target))
+ evlist->threads->map[0] = evlist->workload.pid;
+
+ close(child_ready_pipe[1]);
+ close(go_pipe[0]);
+ /*
+ * wait for child to settle
+ */
+ if (read(child_ready_pipe[0], &bf, 1) == -1) {
+ perror("unable to read pipe");
+ goto out_close_pipes;
+ }
+
+ fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
+ evlist->workload.cork_fd = go_pipe[1];
+ close(child_ready_pipe[0]);
+ return 0;
+
+out_close_pipes:
+ close(go_pipe[0]);
+ close(go_pipe[1]);
+out_close_ready_pipe:
+ close(child_ready_pipe[0]);
+ close(child_ready_pipe[1]);
+ return -1;
+}
+
+int perf_evlist__start_workload(struct perf_evlist *evlist)
+{
+ if (evlist->workload.cork_fd > 0) {
+ char bf = 0;
+ int ret;
+ /*
+ * Remove the cork, let it rip!
+ */
+ ret = write(evlist->workload.cork_fd, &bf, 1);
+ if (ret < 0)
+ perror("enable to write to pipe");
+
+ close(evlist->workload.cork_fd);
+ return ret;
+ }
+
+ return 0;
+}
+
+int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
+ struct perf_sample *sample)
+{
+ struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
+
+ if (!evsel)
+ return -EFAULT;
+ return perf_evsel__parse_sample(evsel, event, sample);
+}
+
+size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
+{
+ struct perf_evsel *evsel;
+ size_t printed = 0;
+
+ evlist__for_each(evlist, evsel) {
+ printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
+ perf_evsel__name(evsel));
+ }
+
+ return printed + fprintf(fp, "\n");
+}
+
+int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
+ int err, char *buf, size_t size)
+{
+ char sbuf[128];
+
+ switch (err) {
+ case ENOENT:
+ scnprintf(buf, size, "%s",
+ "Error:\tUnable to find debugfs\n"
+ "Hint:\tWas your kernel was compiled with debugfs support?\n"
+ "Hint:\tIs the debugfs filesystem mounted?\n"
+ "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
+ break;
+ case EACCES:
+ scnprintf(buf, size,
+ "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
+ "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
+ debugfs_mountpoint, debugfs_mountpoint);
+ break;
+ default:
+ scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
+ break;
+ }
+
+ return 0;
+}
+
+int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
+ int err, char *buf, size_t size)
+{
+ int printed, value;
+ char sbuf[128], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
+
+ switch (err) {
+ case EACCES:
+ case EPERM:
+ printed = scnprintf(buf, size,
+ "Error:\t%s.\n"
+ "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
+
+ value = perf_event_paranoid();
+
+ printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
+
+ if (value >= 2) {
+ printed += scnprintf(buf + printed, size - printed,
+ "For your workloads it needs to be <= 1\nHint:\t");
+ }
+ printed += scnprintf(buf + printed, size - printed,
+ "For system wide tracing it needs to be set to -1");
+
+ printed += scnprintf(buf + printed, size - printed,
+ ".\nHint:\tThe current value is %d.", value);
+ break;
+ default:
+ scnprintf(buf, size, "%s", emsg);
+ break;
+ }
+
+ return 0;
+}
+
+void perf_evlist__to_front(struct perf_evlist *evlist,
+ struct perf_evsel *move_evsel)
+{
+ struct perf_evsel *evsel, *n;
+ LIST_HEAD(move);
+
+ if (move_evsel == perf_evlist__first(evlist))
+ return;
+
+ evlist__for_each_safe(evlist, n, evsel) {
+ if (evsel->leader == move_evsel->leader)
+ list_move_tail(&evsel->node, &move);
+ }
+
+ list_splice(&move, &evlist->entries);
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
new file mode 100644
index 00000000000..f5173cd6369
--- /dev/null
+++ b/tools/perf/util/evlist.h
@@ -0,0 +1,265 @@
+#ifndef __PERF_EVLIST_H
+#define __PERF_EVLIST_H 1
+
+#include <linux/list.h>
+#include <stdio.h>
+#include "../perf.h"
+#include "event.h"
+#include "evsel.h"
+#include "util.h"
+#include <unistd.h>
+
+struct pollfd;
+struct thread_map;
+struct cpu_map;
+struct record_opts;
+
+#define PERF_EVLIST__HLIST_BITS 8
+#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
+
+struct perf_mmap {
+ void *base;
+ int mask;
+ unsigned int prev;
+ char event_copy[PERF_SAMPLE_MAX_SIZE];
+};
+
+struct perf_evlist {
+ struct list_head entries;
+ struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
+ int nr_entries;
+ int nr_groups;
+ int nr_fds;
+ int nr_mmaps;
+ size_t mmap_len;
+ int id_pos;
+ int is_pos;
+ u64 combined_sample_type;
+ struct {
+ int cork_fd;
+ pid_t pid;
+ } workload;
+ bool overwrite;
+ struct perf_mmap *mmap;
+ struct pollfd *pollfd;
+ struct thread_map *threads;
+ struct cpu_map *cpus;
+ struct perf_evsel *selected;
+};
+
+struct perf_evsel_str_handler {
+ const char *name;
+ void *handler;
+};
+
+struct perf_evlist *perf_evlist__new(void);
+struct perf_evlist *perf_evlist__new_default(void);
+void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
+ struct thread_map *threads);
+void perf_evlist__exit(struct perf_evlist *evlist);
+void perf_evlist__delete(struct perf_evlist *evlist);
+
+void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
+int perf_evlist__add_default(struct perf_evlist *evlist);
+int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
+ struct perf_event_attr *attrs, size_t nr_attrs);
+
+#define perf_evlist__add_default_attrs(evlist, array) \
+ __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
+
+int perf_evlist__add_newtp(struct perf_evlist *evlist,
+ const char *sys, const char *name, void *handler);
+
+int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter);
+
+struct perf_evsel *
+perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id);
+
+struct perf_evsel *
+perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
+ const char *name);
+
+void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
+ int cpu, int thread, u64 id);
+
+void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
+
+struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
+
+struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
+
+union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
+
+void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
+
+int perf_evlist__open(struct perf_evlist *evlist);
+void perf_evlist__close(struct perf_evlist *evlist);
+
+void perf_evlist__set_id_pos(struct perf_evlist *evlist);
+bool perf_can_sample_identifier(void);
+void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts);
+int record_opts__config(struct record_opts *opts);
+
+int perf_evlist__prepare_workload(struct perf_evlist *evlist,
+ struct target *target,
+ const char *argv[], bool pipe_output,
+ void (*exec_error)(int signo, siginfo_t *info,
+ void *ucontext));
+int perf_evlist__start_workload(struct perf_evlist *evlist);
+
+int perf_evlist__parse_mmap_pages(const struct option *opt,
+ const char *str,
+ int unset);
+
+int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
+ bool overwrite);
+void perf_evlist__munmap(struct perf_evlist *evlist);
+
+void perf_evlist__disable(struct perf_evlist *evlist);
+void perf_evlist__enable(struct perf_evlist *evlist);
+
+int perf_evlist__disable_event(struct perf_evlist *evlist,
+ struct perf_evsel *evsel);
+int perf_evlist__enable_event(struct perf_evlist *evlist,
+ struct perf_evsel *evsel);
+
+void perf_evlist__set_selected(struct perf_evlist *evlist,
+ struct perf_evsel *evsel);
+
+static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
+ struct cpu_map *cpus,
+ struct thread_map *threads)
+{
+ evlist->cpus = cpus;
+ evlist->threads = threads;
+}
+
+int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
+int perf_evlist__apply_filters(struct perf_evlist *evlist);
+
+void __perf_evlist__set_leader(struct list_head *list);
+void perf_evlist__set_leader(struct perf_evlist *evlist);
+
+u64 perf_evlist__read_format(struct perf_evlist *evlist);
+u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist);
+u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist);
+bool perf_evlist__sample_id_all(struct perf_evlist *evlist);
+u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist);
+
+int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
+ struct perf_sample *sample);
+
+bool perf_evlist__valid_sample_type(struct perf_evlist *evlist);
+bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist);
+bool perf_evlist__valid_read_format(struct perf_evlist *evlist);
+
+void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
+ struct list_head *list,
+ int nr_entries);
+
+static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
+{
+ return list_entry(evlist->entries.next, struct perf_evsel, node);
+}
+
+static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
+{
+ return list_entry(evlist->entries.prev, struct perf_evsel, node);
+}
+
+size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
+
+int perf_evlist__strerror_tp(struct perf_evlist *evlist, int err, char *buf, size_t size);
+int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
+
+static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
+{
+ struct perf_event_mmap_page *pc = mm->base;
+ int head = ACCESS_ONCE(pc->data_head);
+ rmb();
+ return head;
+}
+
+static inline void perf_mmap__write_tail(struct perf_mmap *md,
+ unsigned long tail)
+{
+ struct perf_event_mmap_page *pc = md->base;
+
+ /*
+ * ensure all reads are done before we write the tail out.
+ */
+ mb();
+ pc->data_tail = tail;
+}
+
+bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str);
+void perf_evlist__to_front(struct perf_evlist *evlist,
+ struct perf_evsel *move_evsel);
+
+/**
+ * __evlist__for_each - iterate thru all the evsels
+ * @list: list_head instance to iterate
+ * @evsel: struct evsel iterator
+ */
+#define __evlist__for_each(list, evsel) \
+ list_for_each_entry(evsel, list, node)
+
+/**
+ * evlist__for_each - iterate thru all the evsels
+ * @evlist: evlist instance to iterate
+ * @evsel: struct evsel iterator
+ */
+#define evlist__for_each(evlist, evsel) \
+ __evlist__for_each(&(evlist)->entries, evsel)
+
+/**
+ * __evlist__for_each_continue - continue iteration thru all the evsels
+ * @list: list_head instance to iterate
+ * @evsel: struct evsel iterator
+ */
+#define __evlist__for_each_continue(list, evsel) \
+ list_for_each_entry_continue(evsel, list, node)
+
+/**
+ * evlist__for_each_continue - continue iteration thru all the evsels
+ * @evlist: evlist instance to iterate
+ * @evsel: struct evsel iterator
+ */
+#define evlist__for_each_continue(evlist, evsel) \
+ __evlist__for_each_continue(&(evlist)->entries, evsel)
+
+/**
+ * __evlist__for_each_reverse - iterate thru all the evsels in reverse order
+ * @list: list_head instance to iterate
+ * @evsel: struct evsel iterator
+ */
+#define __evlist__for_each_reverse(list, evsel) \
+ list_for_each_entry_reverse(evsel, list, node)
+
+/**
+ * evlist__for_each_reverse - iterate thru all the evsels in reverse order
+ * @evlist: evlist instance to iterate
+ * @evsel: struct evsel iterator
+ */
+#define evlist__for_each_reverse(evlist, evsel) \
+ __evlist__for_each_reverse(&(evlist)->entries, evsel)
+
+/**
+ * __evlist__for_each_safe - safely iterate thru all the evsels
+ * @list: list_head instance to iterate
+ * @tmp: struct evsel temp iterator
+ * @evsel: struct evsel iterator
+ */
+#define __evlist__for_each_safe(list, tmp, evsel) \
+ list_for_each_entry_safe(evsel, tmp, list, node)
+
+/**
+ * evlist__for_each_safe - safely iterate thru all the evsels
+ * @evlist: evlist instance to iterate
+ * @evsel: struct evsel iterator
+ * @tmp: struct evsel temp iterator
+ */
+#define evlist__for_each_safe(evlist, tmp, evsel) \
+ __evlist__for_each_safe(&(evlist)->entries, tmp, evsel)
+
+#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
new file mode 100644
index 00000000000..8606175fe1e
--- /dev/null
+++ b/tools/perf/util/evsel.c
@@ -0,0 +1,2036 @@
+/*
+ * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Parts came from builtin-{top,stat,record}.c, see those files for further
+ * copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
+#include <byteswap.h>
+#include <linux/bitops.h>
+#include <api/fs/debugfs.h>
+#include <traceevent/event-parse.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/perf_event.h>
+#include <sys/resource.h>
+#include "asm/bug.h"
+#include "evsel.h"
+#include "evlist.h"
+#include "util.h"
+#include "cpumap.h"
+#include "thread_map.h"
+#include "target.h"
+#include "perf_regs.h"
+#include "debug.h"
+#include "trace-event.h"
+
+static struct {
+ bool sample_id_all;
+ bool exclude_guest;
+ bool mmap2;
+} perf_missing_features;
+
+#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
+
+int __perf_evsel__sample_size(u64 sample_type)
+{
+ u64 mask = sample_type & PERF_SAMPLE_MASK;
+ int size = 0;
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ if (mask & (1ULL << i))
+ size++;
+ }
+
+ size *= sizeof(u64);
+
+ return size;
+}
+
+/**
+ * __perf_evsel__calc_id_pos - calculate id_pos.
+ * @sample_type: sample type
+ *
+ * This function returns the position of the event id (PERF_SAMPLE_ID or
+ * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
+ * sample_event.
+ */
+static int __perf_evsel__calc_id_pos(u64 sample_type)
+{
+ int idx = 0;
+
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ return 0;
+
+ if (!(sample_type & PERF_SAMPLE_ID))
+ return -1;
+
+ if (sample_type & PERF_SAMPLE_IP)
+ idx += 1;
+
+ if (sample_type & PERF_SAMPLE_TID)
+ idx += 1;
+
+ if (sample_type & PERF_SAMPLE_TIME)
+ idx += 1;
+
+ if (sample_type & PERF_SAMPLE_ADDR)
+ idx += 1;
+
+ return idx;
+}
+
+/**
+ * __perf_evsel__calc_is_pos - calculate is_pos.
+ * @sample_type: sample type
+ *
+ * This function returns the position (counting backwards) of the event id
+ * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
+ * sample_id_all is used there is an id sample appended to non-sample events.
+ */
+static int __perf_evsel__calc_is_pos(u64 sample_type)
+{
+ int idx = 1;
+
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ return 1;
+
+ if (!(sample_type & PERF_SAMPLE_ID))
+ return -1;
+
+ if (sample_type & PERF_SAMPLE_CPU)
+ idx += 1;
+
+ if (sample_type & PERF_SAMPLE_STREAM_ID)
+ idx += 1;
+
+ return idx;
+}
+
+void perf_evsel__calc_id_pos(struct perf_evsel *evsel)
+{
+ evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type);
+ evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type);
+}
+
+void hists__init(struct hists *hists)
+{
+ memset(hists, 0, sizeof(*hists));
+ hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
+ hists->entries_in = &hists->entries_in_array[0];
+ hists->entries_collapsed = RB_ROOT;
+ hists->entries = RB_ROOT;
+ pthread_mutex_init(&hists->lock, NULL);
+}
+
+void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
+ enum perf_event_sample_format bit)
+{
+ if (!(evsel->attr.sample_type & bit)) {
+ evsel->attr.sample_type |= bit;
+ evsel->sample_size += sizeof(u64);
+ perf_evsel__calc_id_pos(evsel);
+ }
+}
+
+void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
+ enum perf_event_sample_format bit)
+{
+ if (evsel->attr.sample_type & bit) {
+ evsel->attr.sample_type &= ~bit;
+ evsel->sample_size -= sizeof(u64);
+ perf_evsel__calc_id_pos(evsel);
+ }
+}
+
+void perf_evsel__set_sample_id(struct perf_evsel *evsel,
+ bool can_sample_identifier)
+{
+ if (can_sample_identifier) {
+ perf_evsel__reset_sample_bit(evsel, ID);
+ perf_evsel__set_sample_bit(evsel, IDENTIFIER);
+ } else {
+ perf_evsel__set_sample_bit(evsel, ID);
+ }
+ evsel->attr.read_format |= PERF_FORMAT_ID;
+}
+
+void perf_evsel__init(struct perf_evsel *evsel,
+ struct perf_event_attr *attr, int idx)
+{
+ evsel->idx = idx;
+ evsel->attr = *attr;
+ evsel->leader = evsel;
+ evsel->unit = "";
+ evsel->scale = 1.0;
+ INIT_LIST_HEAD(&evsel->node);
+ hists__init(&evsel->hists);
+ evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
+ perf_evsel__calc_id_pos(evsel);
+}
+
+struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
+{
+ struct perf_evsel *evsel = zalloc(sizeof(*evsel));
+
+ if (evsel != NULL)
+ perf_evsel__init(evsel, attr, idx);
+
+ return evsel;
+}
+
+struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
+{
+ struct perf_evsel *evsel = zalloc(sizeof(*evsel));
+
+ if (evsel != NULL) {
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_TRACEPOINT,
+ .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
+ PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
+ };
+
+ if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
+ goto out_free;
+
+ evsel->tp_format = trace_event__tp_format(sys, name);
+ if (evsel->tp_format == NULL)
+ goto out_free;
+
+ event_attr_init(&attr);
+ attr.config = evsel->tp_format->id;
+ attr.sample_period = 1;
+ perf_evsel__init(evsel, &attr, idx);
+ }
+
+ return evsel;
+
+out_free:
+ zfree(&evsel->name);
+ free(evsel);
+ return NULL;
+}
+
+const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
+ "cycles",
+ "instructions",
+ "cache-references",
+ "cache-misses",
+ "branches",
+ "branch-misses",
+ "bus-cycles",
+ "stalled-cycles-frontend",
+ "stalled-cycles-backend",
+ "ref-cycles",
+};
+
+static const char *__perf_evsel__hw_name(u64 config)
+{
+ if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
+ return perf_evsel__hw_names[config];
+
+ return "unknown-hardware";
+}
+
+static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
+{
+ int colon = 0, r = 0;
+ struct perf_event_attr *attr = &evsel->attr;
+ bool exclude_guest_default = false;
+
+#define MOD_PRINT(context, mod) do { \
+ if (!attr->exclude_##context) { \
+ if (!colon) colon = ++r; \
+ r += scnprintf(bf + r, size - r, "%c", mod); \
+ } } while(0)
+
+ if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
+ MOD_PRINT(kernel, 'k');
+ MOD_PRINT(user, 'u');
+ MOD_PRINT(hv, 'h');
+ exclude_guest_default = true;
+ }
+
+ if (attr->precise_ip) {
+ if (!colon)
+ colon = ++r;
+ r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
+ exclude_guest_default = true;
+ }
+
+ if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
+ MOD_PRINT(host, 'H');
+ MOD_PRINT(guest, 'G');
+ }
+#undef MOD_PRINT
+ if (colon)
+ bf[colon - 1] = ':';
+ return r;
+}
+
+static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+ int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
+ return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
+}
+
+const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
+ "cpu-clock",
+ "task-clock",
+ "page-faults",
+ "context-switches",
+ "cpu-migrations",
+ "minor-faults",
+ "major-faults",
+ "alignment-faults",
+ "emulation-faults",
+ "dummy",
+};
+
+static const char *__perf_evsel__sw_name(u64 config)
+{
+ if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
+ return perf_evsel__sw_names[config];
+ return "unknown-software";
+}
+
+static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+ int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
+ return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
+}
+
+static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
+{
+ int r;
+
+ r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
+
+ if (type & HW_BREAKPOINT_R)
+ r += scnprintf(bf + r, size - r, "r");
+
+ if (type & HW_BREAKPOINT_W)
+ r += scnprintf(bf + r, size - r, "w");
+
+ if (type & HW_BREAKPOINT_X)
+ r += scnprintf(bf + r, size - r, "x");
+
+ return r;
+}
+
+static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+ struct perf_event_attr *attr = &evsel->attr;
+ int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
+ return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
+}
+
+const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_EVSEL__MAX_ALIASES] = {
+ { "L1-dcache", "l1-d", "l1d", "L1-data", },
+ { "L1-icache", "l1-i", "l1i", "L1-instruction", },
+ { "LLC", "L2", },
+ { "dTLB", "d-tlb", "Data-TLB", },
+ { "iTLB", "i-tlb", "Instruction-TLB", },
+ { "branch", "branches", "bpu", "btb", "bpc", },
+ { "node", },
+};
+
+const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_EVSEL__MAX_ALIASES] = {
+ { "load", "loads", "read", },
+ { "store", "stores", "write", },
+ { "prefetch", "prefetches", "speculative-read", "speculative-load", },
+};
+
+const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
+ [PERF_EVSEL__MAX_ALIASES] = {
+ { "refs", "Reference", "ops", "access", },
+ { "misses", "miss", },
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+#define CACHE_READ (1 << C(OP_READ))
+#define CACHE_WRITE (1 << C(OP_WRITE))
+#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
+#define COP(x) (1 << x)
+
+/*
+ * cache operartion stat
+ * L1I : Read and prefetch only
+ * ITLB and BPU : Read-only
+ */
+static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
+ [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
+ [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(ITLB)] = (CACHE_READ),
+ [C(BPU)] = (CACHE_READ),
+ [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+};
+
+bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
+{
+ if (perf_evsel__hw_cache_stat[type] & COP(op))
+ return true; /* valid */
+ else
+ return false; /* invalid */
+}
+
+int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
+ char *bf, size_t size)
+{
+ if (result) {
+ return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
+ perf_evsel__hw_cache_op[op][0],
+ perf_evsel__hw_cache_result[result][0]);
+ }
+
+ return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
+ perf_evsel__hw_cache_op[op][1]);
+}
+
+static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
+{
+ u8 op, result, type = (config >> 0) & 0xff;
+ const char *err = "unknown-ext-hardware-cache-type";
+
+ if (type > PERF_COUNT_HW_CACHE_MAX)
+ goto out_err;
+
+ op = (config >> 8) & 0xff;
+ err = "unknown-ext-hardware-cache-op";
+ if (op > PERF_COUNT_HW_CACHE_OP_MAX)
+ goto out_err;
+
+ result = (config >> 16) & 0xff;
+ err = "unknown-ext-hardware-cache-result";
+ if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
+ goto out_err;
+
+ err = "invalid-cache";
+ if (!perf_evsel__is_cache_op_valid(type, op))
+ goto out_err;
+
+ return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
+out_err:
+ return scnprintf(bf, size, "%s", err);
+}
+
+static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+ int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
+ return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
+}
+
+static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+ int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
+ return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
+}
+
+const char *perf_evsel__name(struct perf_evsel *evsel)
+{
+ char bf[128];
+
+ if (evsel->name)
+ return evsel->name;
+
+ switch (evsel->attr.type) {
+ case PERF_TYPE_RAW:
+ perf_evsel__raw_name(evsel, bf, sizeof(bf));
+ break;
+
+ case PERF_TYPE_HARDWARE:
+ perf_evsel__hw_name(evsel, bf, sizeof(bf));
+ break;
+
+ case PERF_TYPE_HW_CACHE:
+ perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
+ break;
+
+ case PERF_TYPE_SOFTWARE:
+ perf_evsel__sw_name(evsel, bf, sizeof(bf));
+ break;
+
+ case PERF_TYPE_TRACEPOINT:
+ scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
+ break;
+
+ case PERF_TYPE_BREAKPOINT:
+ perf_evsel__bp_name(evsel, bf, sizeof(bf));
+ break;
+
+ default:
+ scnprintf(bf, sizeof(bf), "unknown attr type: %d",
+ evsel->attr.type);
+ break;
+ }
+
+ evsel->name = strdup(bf);
+
+ return evsel->name ?: "unknown";
+}
+
+const char *perf_evsel__group_name(struct perf_evsel *evsel)
+{
+ return evsel->group_name ?: "anon group";
+}
+
+int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
+{
+ int ret;
+ struct perf_evsel *pos;
+ const char *group_name = perf_evsel__group_name(evsel);
+
+ ret = scnprintf(buf, size, "%s", group_name);
+
+ ret += scnprintf(buf + ret, size - ret, " { %s",
+ perf_evsel__name(evsel));
+
+ for_each_group_member(pos, evsel)
+ ret += scnprintf(buf + ret, size - ret, ", %s",
+ perf_evsel__name(pos));
+
+ ret += scnprintf(buf + ret, size - ret, " }");
+
+ return ret;
+}
+
+static void
+perf_evsel__config_callgraph(struct perf_evsel *evsel,
+ struct record_opts *opts)
+{
+ bool function = perf_evsel__is_function_event(evsel);
+ struct perf_event_attr *attr = &evsel->attr;
+
+ perf_evsel__set_sample_bit(evsel, CALLCHAIN);
+
+ if (opts->call_graph == CALLCHAIN_DWARF) {
+ if (!function) {
+ perf_evsel__set_sample_bit(evsel, REGS_USER);
+ perf_evsel__set_sample_bit(evsel, STACK_USER);
+ attr->sample_regs_user = PERF_REGS_MASK;
+ attr->sample_stack_user = opts->stack_dump_size;
+ attr->exclude_callchain_user = 1;
+ } else {
+ pr_info("Cannot use DWARF unwind for function trace event,"
+ " falling back to framepointers.\n");
+ }
+ }
+
+ if (function) {
+ pr_info("Disabling user space callchains for function trace event.\n");
+ attr->exclude_callchain_user = 1;
+ }
+}
+
+/*
+ * The enable_on_exec/disabled value strategy:
+ *
+ * 1) For any type of traced program:
+ * - all independent events and group leaders are disabled
+ * - all group members are enabled
+ *
+ * Group members are ruled by group leaders. They need to
+ * be enabled, because the group scheduling relies on that.
+ *
+ * 2) For traced programs executed by perf:
+ * - all independent events and group leaders have
+ * enable_on_exec set
+ * - we don't specifically enable or disable any event during
+ * the record command
+ *
+ * Independent events and group leaders are initially disabled
+ * and get enabled by exec. Group members are ruled by group
+ * leaders as stated in 1).
+ *
+ * 3) For traced programs attached by perf (pid/tid):
+ * - we specifically enable or disable all events during
+ * the record command
+ *
+ * When attaching events to already running traced we
+ * enable/disable events specifically, as there's no
+ * initial traced exec call.
+ */
+void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
+{
+ struct perf_evsel *leader = evsel->leader;
+ struct perf_event_attr *attr = &evsel->attr;
+ int track = !evsel->idx; /* only the first counter needs these */
+ bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
+
+ attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
+ attr->inherit = !opts->no_inherit;
+
+ perf_evsel__set_sample_bit(evsel, IP);
+ perf_evsel__set_sample_bit(evsel, TID);
+
+ if (evsel->sample_read) {
+ perf_evsel__set_sample_bit(evsel, READ);
+
+ /*
+ * We need ID even in case of single event, because
+ * PERF_SAMPLE_READ process ID specific data.
+ */
+ perf_evsel__set_sample_id(evsel, false);
+
+ /*
+ * Apply group format only if we belong to group
+ * with more than one members.
+ */
+ if (leader->nr_members > 1) {
+ attr->read_format |= PERF_FORMAT_GROUP;
+ attr->inherit = 0;
+ }
+ }
+
+ /*
+ * We default some events to have a default interval. But keep
+ * it a weak assumption overridable by the user.
+ */
+ if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
+ opts->user_interval != ULLONG_MAX)) {
+ if (opts->freq) {
+ perf_evsel__set_sample_bit(evsel, PERIOD);
+ attr->freq = 1;
+ attr->sample_freq = opts->freq;
+ } else {
+ attr->sample_period = opts->default_interval;
+ }
+ }
+
+ /*
+ * Disable sampling for all group members other
+ * than leader in case leader 'leads' the sampling.
+ */
+ if ((leader != evsel) && leader->sample_read) {
+ attr->sample_freq = 0;
+ attr->sample_period = 0;
+ }
+
+ if (opts->no_samples)
+ attr->sample_freq = 0;
+
+ if (opts->inherit_stat)
+ attr->inherit_stat = 1;
+
+ if (opts->sample_address) {
+ perf_evsel__set_sample_bit(evsel, ADDR);
+ attr->mmap_data = track;
+ }
+
+ if (opts->call_graph_enabled)
+ perf_evsel__config_callgraph(evsel, opts);
+
+ if (target__has_cpu(&opts->target))
+ perf_evsel__set_sample_bit(evsel, CPU);
+
+ if (opts->period)
+ perf_evsel__set_sample_bit(evsel, PERIOD);
+
+ if (!perf_missing_features.sample_id_all &&
+ (opts->sample_time || !opts->no_inherit ||
+ target__has_cpu(&opts->target) || per_cpu))
+ perf_evsel__set_sample_bit(evsel, TIME);
+
+ if (opts->raw_samples) {
+ perf_evsel__set_sample_bit(evsel, TIME);
+ perf_evsel__set_sample_bit(evsel, RAW);
+ perf_evsel__set_sample_bit(evsel, CPU);
+ }
+
+ if (opts->sample_address)
+ perf_evsel__set_sample_bit(evsel, DATA_SRC);
+
+ if (opts->no_buffering) {
+ attr->watermark = 0;
+ attr->wakeup_events = 1;
+ }
+ if (opts->branch_stack) {
+ perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
+ attr->branch_sample_type = opts->branch_stack;
+ }
+
+ if (opts->sample_weight)
+ perf_evsel__set_sample_bit(evsel, WEIGHT);
+
+ attr->mmap = track;
+ attr->mmap2 = track && !perf_missing_features.mmap2;
+ attr->comm = track;
+
+ if (opts->sample_transaction)
+ perf_evsel__set_sample_bit(evsel, TRANSACTION);
+
+ /*
+ * XXX see the function comment above
+ *
+ * Disabling only independent events or group leaders,
+ * keeping group members enabled.
+ */
+ if (perf_evsel__is_group_leader(evsel))
+ attr->disabled = 1;
+
+ /*
+ * Setting enable_on_exec for independent events and
+ * group leaders for traced executed by perf.
+ */
+ if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) &&
+ !opts->initial_delay)
+ attr->enable_on_exec = 1;
+}
+
+int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ int cpu, thread;
+ evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
+
+ if (evsel->fd) {
+ for (cpu = 0; cpu < ncpus; cpu++) {
+ for (thread = 0; thread < nthreads; thread++) {
+ FD(evsel, cpu, thread) = -1;
+ }
+ }
+ }
+
+ return evsel->fd != NULL ? 0 : -ENOMEM;
+}
+
+static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads,
+ int ioc, void *arg)
+{
+ int cpu, thread;
+
+ for (cpu = 0; cpu < ncpus; cpu++) {
+ for (thread = 0; thread < nthreads; thread++) {
+ int fd = FD(evsel, cpu, thread),
+ err = ioctl(fd, ioc, arg);
+
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
+ const char *filter)
+{
+ return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
+ PERF_EVENT_IOC_SET_FILTER,
+ (void *)filter);
+}
+
+int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
+ PERF_EVENT_IOC_ENABLE,
+ 0);
+}
+
+int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
+ if (evsel->sample_id == NULL)
+ return -ENOMEM;
+
+ evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
+ if (evsel->id == NULL) {
+ xyarray__delete(evsel->sample_id);
+ evsel->sample_id = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus)
+{
+ memset(evsel->counts, 0, (sizeof(*evsel->counts) +
+ (ncpus * sizeof(struct perf_counts_values))));
+}
+
+int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
+{
+ evsel->counts = zalloc((sizeof(*evsel->counts) +
+ (ncpus * sizeof(struct perf_counts_values))));
+ return evsel->counts != NULL ? 0 : -ENOMEM;
+}
+
+void perf_evsel__free_fd(struct perf_evsel *evsel)
+{
+ xyarray__delete(evsel->fd);
+ evsel->fd = NULL;
+}
+
+void perf_evsel__free_id(struct perf_evsel *evsel)
+{
+ xyarray__delete(evsel->sample_id);
+ evsel->sample_id = NULL;
+ zfree(&evsel->id);
+}
+
+void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ int cpu, thread;
+
+ for (cpu = 0; cpu < ncpus; cpu++)
+ for (thread = 0; thread < nthreads; ++thread) {
+ close(FD(evsel, cpu, thread));
+ FD(evsel, cpu, thread) = -1;
+ }
+}
+
+void perf_evsel__free_counts(struct perf_evsel *evsel)
+{
+ zfree(&evsel->counts);
+}
+
+void perf_evsel__exit(struct perf_evsel *evsel)
+{
+ assert(list_empty(&evsel->node));
+ perf_evsel__free_fd(evsel);
+ perf_evsel__free_id(evsel);
+}
+
+void perf_evsel__delete(struct perf_evsel *evsel)
+{
+ perf_evsel__exit(evsel);
+ close_cgroup(evsel->cgrp);
+ zfree(&evsel->group_name);
+ if (evsel->tp_format)
+ pevent_free_format(evsel->tp_format);
+ zfree(&evsel->name);
+ free(evsel);
+}
+
+static inline void compute_deltas(struct perf_evsel *evsel,
+ int cpu,
+ struct perf_counts_values *count)
+{
+ struct perf_counts_values tmp;
+
+ if (!evsel->prev_raw_counts)
+ return;
+
+ if (cpu == -1) {
+ tmp = evsel->prev_raw_counts->aggr;
+ evsel->prev_raw_counts->aggr = *count;
+ } else {
+ tmp = evsel->prev_raw_counts->cpu[cpu];
+ evsel->prev_raw_counts->cpu[cpu] = *count;
+ }
+
+ count->val = count->val - tmp.val;
+ count->ena = count->ena - tmp.ena;
+ count->run = count->run - tmp.run;
+}
+
+int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
+ int cpu, int thread, bool scale)
+{
+ struct perf_counts_values count;
+ size_t nv = scale ? 3 : 1;
+
+ if (FD(evsel, cpu, thread) < 0)
+ return -EINVAL;
+
+ if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
+ return -ENOMEM;
+
+ if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
+ return -errno;
+
+ compute_deltas(evsel, cpu, &count);
+
+ if (scale) {
+ if (count.run == 0)
+ count.val = 0;
+ else if (count.run < count.ena)
+ count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
+ } else
+ count.ena = count.run = 0;
+
+ evsel->counts->cpu[cpu] = count;
+ return 0;
+}
+
+int __perf_evsel__read(struct perf_evsel *evsel,
+ int ncpus, int nthreads, bool scale)
+{
+ size_t nv = scale ? 3 : 1;
+ int cpu, thread;
+ struct perf_counts_values *aggr = &evsel->counts->aggr, count;
+
+ aggr->val = aggr->ena = aggr->run = 0;
+
+ for (cpu = 0; cpu < ncpus; cpu++) {
+ for (thread = 0; thread < nthreads; thread++) {
+ if (FD(evsel, cpu, thread) < 0)
+ continue;
+
+ if (readn(FD(evsel, cpu, thread),
+ &count, nv * sizeof(u64)) < 0)
+ return -errno;
+
+ aggr->val += count.val;
+ if (scale) {
+ aggr->ena += count.ena;
+ aggr->run += count.run;
+ }
+ }
+ }
+
+ compute_deltas(evsel, -1, aggr);
+
+ evsel->counts->scaled = 0;
+ if (scale) {
+ if (aggr->run == 0) {
+ evsel->counts->scaled = -1;
+ aggr->val = 0;
+ return 0;
+ }
+
+ if (aggr->run < aggr->ena) {
+ evsel->counts->scaled = 1;
+ aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
+ }
+ } else
+ aggr->ena = aggr->run = 0;
+
+ return 0;
+}
+
+static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
+{
+ struct perf_evsel *leader = evsel->leader;
+ int fd;
+
+ if (perf_evsel__is_group_leader(evsel))
+ return -1;
+
+ /*
+ * Leader must be already processed/open,
+ * if not it's a bug.
+ */
+ BUG_ON(!leader->fd);
+
+ fd = FD(leader, cpu, thread);
+ BUG_ON(fd == -1);
+
+ return fd;
+}
+
+#define __PRINT_ATTR(fmt, cast, field) \
+ fprintf(fp, " %-19s "fmt"\n", #field, cast attr->field)
+
+#define PRINT_ATTR_U32(field) __PRINT_ATTR("%u" , , field)
+#define PRINT_ATTR_X32(field) __PRINT_ATTR("%#x", , field)
+#define PRINT_ATTR_U64(field) __PRINT_ATTR("%" PRIu64, (uint64_t), field)
+#define PRINT_ATTR_X64(field) __PRINT_ATTR("%#"PRIx64, (uint64_t), field)
+
+#define PRINT_ATTR2N(name1, field1, name2, field2) \
+ fprintf(fp, " %-19s %u %-19s %u\n", \
+ name1, attr->field1, name2, attr->field2)
+
+#define PRINT_ATTR2(field1, field2) \
+ PRINT_ATTR2N(#field1, field1, #field2, field2)
+
+static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp)
+{
+ size_t ret = 0;
+
+ ret += fprintf(fp, "%.60s\n", graph_dotted_line);
+ ret += fprintf(fp, "perf_event_attr:\n");
+
+ ret += PRINT_ATTR_U32(type);
+ ret += PRINT_ATTR_U32(size);
+ ret += PRINT_ATTR_X64(config);
+ ret += PRINT_ATTR_U64(sample_period);
+ ret += PRINT_ATTR_U64(sample_freq);
+ ret += PRINT_ATTR_X64(sample_type);
+ ret += PRINT_ATTR_X64(read_format);
+
+ ret += PRINT_ATTR2(disabled, inherit);
+ ret += PRINT_ATTR2(pinned, exclusive);
+ ret += PRINT_ATTR2(exclude_user, exclude_kernel);
+ ret += PRINT_ATTR2(exclude_hv, exclude_idle);
+ ret += PRINT_ATTR2(mmap, comm);
+ ret += PRINT_ATTR2(freq, inherit_stat);
+ ret += PRINT_ATTR2(enable_on_exec, task);
+ ret += PRINT_ATTR2(watermark, precise_ip);
+ ret += PRINT_ATTR2(mmap_data, sample_id_all);
+ ret += PRINT_ATTR2(exclude_host, exclude_guest);
+ ret += PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel,
+ "excl.callchain_user", exclude_callchain_user);
+ ret += PRINT_ATTR_U32(mmap2);
+
+ ret += PRINT_ATTR_U32(wakeup_events);
+ ret += PRINT_ATTR_U32(wakeup_watermark);
+ ret += PRINT_ATTR_X32(bp_type);
+ ret += PRINT_ATTR_X64(bp_addr);
+ ret += PRINT_ATTR_X64(config1);
+ ret += PRINT_ATTR_U64(bp_len);
+ ret += PRINT_ATTR_X64(config2);
+ ret += PRINT_ATTR_X64(branch_sample_type);
+ ret += PRINT_ATTR_X64(sample_regs_user);
+ ret += PRINT_ATTR_U32(sample_stack_user);
+
+ ret += fprintf(fp, "%.60s\n", graph_dotted_line);
+
+ return ret;
+}
+
+static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+ struct thread_map *threads)
+{
+ int cpu, thread;
+ unsigned long flags = 0;
+ int pid = -1, err;
+ enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
+
+ if (evsel->fd == NULL &&
+ perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
+ return -ENOMEM;
+
+ if (evsel->cgrp) {
+ flags = PERF_FLAG_PID_CGROUP;
+ pid = evsel->cgrp->fd;
+ }
+
+fallback_missing_features:
+ if (perf_missing_features.mmap2)
+ evsel->attr.mmap2 = 0;
+ if (perf_missing_features.exclude_guest)
+ evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
+retry_sample_id:
+ if (perf_missing_features.sample_id_all)
+ evsel->attr.sample_id_all = 0;
+
+ if (verbose >= 2)
+ perf_event_attr__fprintf(&evsel->attr, stderr);
+
+ for (cpu = 0; cpu < cpus->nr; cpu++) {
+
+ for (thread = 0; thread < threads->nr; thread++) {
+ int group_fd;
+
+ if (!evsel->cgrp)
+ pid = threads->map[thread];
+
+ group_fd = get_group_fd(evsel, cpu, thread);
+retry_open:
+ pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
+ pid, cpus->map[cpu], group_fd, flags);
+
+ FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
+ pid,
+ cpus->map[cpu],
+ group_fd, flags);
+ if (FD(evsel, cpu, thread) < 0) {
+ err = -errno;
+ pr_debug2("sys_perf_event_open failed, error %d\n",
+ err);
+ goto try_fallback;
+ }
+ set_rlimit = NO_CHANGE;
+ }
+ }
+
+ return 0;
+
+try_fallback:
+ /*
+ * perf stat needs between 5 and 22 fds per CPU. When we run out
+ * of them try to increase the limits.
+ */
+ if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
+ struct rlimit l;
+ int old_errno = errno;
+
+ if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
+ if (set_rlimit == NO_CHANGE)
+ l.rlim_cur = l.rlim_max;
+ else {
+ l.rlim_cur = l.rlim_max + 1000;
+ l.rlim_max = l.rlim_cur;
+ }
+ if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
+ set_rlimit++;
+ errno = old_errno;
+ goto retry_open;
+ }
+ }
+ errno = old_errno;
+ }
+
+ if (err != -EINVAL || cpu > 0 || thread > 0)
+ goto out_close;
+
+ if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
+ perf_missing_features.mmap2 = true;
+ goto fallback_missing_features;
+ } else if (!perf_missing_features.exclude_guest &&
+ (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
+ perf_missing_features.exclude_guest = true;
+ goto fallback_missing_features;
+ } else if (!perf_missing_features.sample_id_all) {
+ perf_missing_features.sample_id_all = true;
+ goto retry_sample_id;
+ }
+
+out_close:
+ do {
+ while (--thread >= 0) {
+ close(FD(evsel, cpu, thread));
+ FD(evsel, cpu, thread) = -1;
+ }
+ thread = threads->nr;
+ } while (--cpu >= 0);
+ return err;
+}
+
+void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ if (evsel->fd == NULL)
+ return;
+
+ perf_evsel__close_fd(evsel, ncpus, nthreads);
+ perf_evsel__free_fd(evsel);
+}
+
+static struct {
+ struct cpu_map map;
+ int cpus[1];
+} empty_cpu_map = {
+ .map.nr = 1,
+ .cpus = { -1, },
+};
+
+static struct {
+ struct thread_map map;
+ int threads[1];
+} empty_thread_map = {
+ .map.nr = 1,
+ .threads = { -1, },
+};
+
+int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+ struct thread_map *threads)
+{
+ if (cpus == NULL) {
+ /* Work around old compiler warnings about strict aliasing */
+ cpus = &empty_cpu_map.map;
+ }
+
+ if (threads == NULL)
+ threads = &empty_thread_map.map;
+
+ return __perf_evsel__open(evsel, cpus, threads);
+}
+
+int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
+ struct cpu_map *cpus)
+{
+ return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
+}
+
+int perf_evsel__open_per_thread(struct perf_evsel *evsel,
+ struct thread_map *threads)
+{
+ return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
+}
+
+static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
+ const union perf_event *event,
+ struct perf_sample *sample)
+{
+ u64 type = evsel->attr.sample_type;
+ const u64 *array = event->sample.array;
+ bool swapped = evsel->needs_swap;
+ union u64_swap u;
+
+ array += ((event->header.size -
+ sizeof(event->header)) / sizeof(u64)) - 1;
+
+ if (type & PERF_SAMPLE_IDENTIFIER) {
+ sample->id = *array;
+ array--;
+ }
+
+ if (type & PERF_SAMPLE_CPU) {
+ u.val64 = *array;
+ if (swapped) {
+ /* undo swap of u64, then swap on individual u32s */
+ u.val64 = bswap_64(u.val64);
+ u.val32[0] = bswap_32(u.val32[0]);
+ }
+
+ sample->cpu = u.val32[0];
+ array--;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ sample->stream_id = *array;
+ array--;
+ }
+
+ if (type & PERF_SAMPLE_ID) {
+ sample->id = *array;
+ array--;
+ }
+
+ if (type & PERF_SAMPLE_TIME) {
+ sample->time = *array;
+ array--;
+ }
+
+ if (type & PERF_SAMPLE_TID) {
+ u.val64 = *array;
+ if (swapped) {
+ /* undo swap of u64, then swap on individual u32s */
+ u.val64 = bswap_64(u.val64);
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val32[1] = bswap_32(u.val32[1]);
+ }
+
+ sample->pid = u.val32[0];
+ sample->tid = u.val32[1];
+ array--;
+ }
+
+ return 0;
+}
+
+static inline bool overflow(const void *endp, u16 max_size, const void *offset,
+ u64 size)
+{
+ return size > max_size || offset + size > endp;
+}
+
+#define OVERFLOW_CHECK(offset, size, max_size) \
+ do { \
+ if (overflow(endp, (max_size), (offset), (size))) \
+ return -EFAULT; \
+ } while (0)
+
+#define OVERFLOW_CHECK_u64(offset) \
+ OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
+
+int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
+ struct perf_sample *data)
+{
+ u64 type = evsel->attr.sample_type;
+ bool swapped = evsel->needs_swap;
+ const u64 *array;
+ u16 max_size = event->header.size;
+ const void *endp = (void *)event + max_size;
+ u64 sz;
+
+ /*
+ * used for cross-endian analysis. See git commit 65014ab3
+ * for why this goofiness is needed.
+ */
+ union u64_swap u;
+
+ memset(data, 0, sizeof(*data));
+ data->cpu = data->pid = data->tid = -1;
+ data->stream_id = data->id = data->time = -1ULL;
+ data->period = evsel->attr.sample_period;
+ data->weight = 0;
+
+ if (event->header.type != PERF_RECORD_SAMPLE) {
+ if (!evsel->attr.sample_id_all)
+ return 0;
+ return perf_evsel__parse_id_sample(evsel, event, data);
+ }
+
+ array = event->sample.array;
+
+ /*
+ * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
+ * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
+ * check the format does not go past the end of the event.
+ */
+ if (evsel->sample_size + sizeof(event->header) > event->header.size)
+ return -EFAULT;
+
+ data->id = -1ULL;
+ if (type & PERF_SAMPLE_IDENTIFIER) {
+ data->id = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_IP) {
+ data->ip = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TID) {
+ u.val64 = *array;
+ if (swapped) {
+ /* undo swap of u64, then swap on individual u32s */
+ u.val64 = bswap_64(u.val64);
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val32[1] = bswap_32(u.val32[1]);
+ }
+
+ data->pid = u.val32[0];
+ data->tid = u.val32[1];
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TIME) {
+ data->time = *array;
+ array++;
+ }
+
+ data->addr = 0;
+ if (type & PERF_SAMPLE_ADDR) {
+ data->addr = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ID) {
+ data->id = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ data->stream_id = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_CPU) {
+
+ u.val64 = *array;
+ if (swapped) {
+ /* undo swap of u64, then swap on individual u32s */
+ u.val64 = bswap_64(u.val64);
+ u.val32[0] = bswap_32(u.val32[0]);
+ }
+
+ data->cpu = u.val32[0];
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_PERIOD) {
+ data->period = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_READ) {
+ u64 read_format = evsel->attr.read_format;
+
+ OVERFLOW_CHECK_u64(array);
+ if (read_format & PERF_FORMAT_GROUP)
+ data->read.group.nr = *array;
+ else
+ data->read.one.value = *array;
+
+ array++;
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+ OVERFLOW_CHECK_u64(array);
+ data->read.time_enabled = *array;
+ array++;
+ }
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+ OVERFLOW_CHECK_u64(array);
+ data->read.time_running = *array;
+ array++;
+ }
+
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ if (read_format & PERF_FORMAT_GROUP) {
+ const u64 max_group_nr = UINT64_MAX /
+ sizeof(struct sample_read_value);
+
+ if (data->read.group.nr > max_group_nr)
+ return -EFAULT;
+ sz = data->read.group.nr *
+ sizeof(struct sample_read_value);
+ OVERFLOW_CHECK(array, sz, max_size);
+ data->read.group.values =
+ (struct sample_read_value *)array;
+ array = (void *)array + sz;
+ } else {
+ OVERFLOW_CHECK_u64(array);
+ data->read.one.id = *array;
+ array++;
+ }
+ }
+
+ if (type & PERF_SAMPLE_CALLCHAIN) {
+ const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
+
+ OVERFLOW_CHECK_u64(array);
+ data->callchain = (struct ip_callchain *)array++;
+ if (data->callchain->nr > max_callchain_nr)
+ return -EFAULT;
+ sz = data->callchain->nr * sizeof(u64);
+ OVERFLOW_CHECK(array, sz, max_size);
+ array = (void *)array + sz;
+ }
+
+ if (type & PERF_SAMPLE_RAW) {
+ OVERFLOW_CHECK_u64(array);
+ u.val64 = *array;
+ if (WARN_ONCE(swapped,
+ "Endianness of raw data not corrected!\n")) {
+ /* undo swap of u64, then swap on individual u32s */
+ u.val64 = bswap_64(u.val64);
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val32[1] = bswap_32(u.val32[1]);
+ }
+ data->raw_size = u.val32[0];
+ array = (void *)array + sizeof(u32);
+
+ OVERFLOW_CHECK(array, data->raw_size, max_size);
+ data->raw_data = (void *)array;
+ array = (void *)array + data->raw_size;
+ }
+
+ if (type & PERF_SAMPLE_BRANCH_STACK) {
+ const u64 max_branch_nr = UINT64_MAX /
+ sizeof(struct branch_entry);
+
+ OVERFLOW_CHECK_u64(array);
+ data->branch_stack = (struct branch_stack *)array++;
+
+ if (data->branch_stack->nr > max_branch_nr)
+ return -EFAULT;
+ sz = data->branch_stack->nr * sizeof(struct branch_entry);
+ OVERFLOW_CHECK(array, sz, max_size);
+ array = (void *)array + sz;
+ }
+
+ if (type & PERF_SAMPLE_REGS_USER) {
+ OVERFLOW_CHECK_u64(array);
+ data->user_regs.abi = *array;
+ array++;
+
+ if (data->user_regs.abi) {
+ u64 mask = evsel->attr.sample_regs_user;
+
+ sz = hweight_long(mask) * sizeof(u64);
+ OVERFLOW_CHECK(array, sz, max_size);
+ data->user_regs.mask = mask;
+ data->user_regs.regs = (u64 *)array;
+ array = (void *)array + sz;
+ }
+ }
+
+ if (type & PERF_SAMPLE_STACK_USER) {
+ OVERFLOW_CHECK_u64(array);
+ sz = *array++;
+
+ data->user_stack.offset = ((char *)(array - 1)
+ - (char *) event);
+
+ if (!sz) {
+ data->user_stack.size = 0;
+ } else {
+ OVERFLOW_CHECK(array, sz, max_size);
+ data->user_stack.data = (char *)array;
+ array = (void *)array + sz;
+ OVERFLOW_CHECK_u64(array);
+ data->user_stack.size = *array++;
+ if (WARN_ONCE(data->user_stack.size > sz,
+ "user stack dump failure\n"))
+ return -EFAULT;
+ }
+ }
+
+ data->weight = 0;
+ if (type & PERF_SAMPLE_WEIGHT) {
+ OVERFLOW_CHECK_u64(array);
+ data->weight = *array;
+ array++;
+ }
+
+ data->data_src = PERF_MEM_DATA_SRC_NONE;
+ if (type & PERF_SAMPLE_DATA_SRC) {
+ OVERFLOW_CHECK_u64(array);
+ data->data_src = *array;
+ array++;
+ }
+
+ data->transaction = 0;
+ if (type & PERF_SAMPLE_TRANSACTION) {
+ OVERFLOW_CHECK_u64(array);
+ data->transaction = *array;
+ array++;
+ }
+
+ return 0;
+}
+
+size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
+ u64 read_format)
+{
+ size_t sz, result = sizeof(struct sample_event);
+
+ if (type & PERF_SAMPLE_IDENTIFIER)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_IP)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_TID)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_TIME)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_ADDR)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_ID)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_STREAM_ID)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_CPU)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_PERIOD)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_READ) {
+ result += sizeof(u64);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ result += sizeof(u64);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ result += sizeof(u64);
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ if (read_format & PERF_FORMAT_GROUP) {
+ sz = sample->read.group.nr *
+ sizeof(struct sample_read_value);
+ result += sz;
+ } else {
+ result += sizeof(u64);
+ }
+ }
+
+ if (type & PERF_SAMPLE_CALLCHAIN) {
+ sz = (sample->callchain->nr + 1) * sizeof(u64);
+ result += sz;
+ }
+
+ if (type & PERF_SAMPLE_RAW) {
+ result += sizeof(u32);
+ result += sample->raw_size;
+ }
+
+ if (type & PERF_SAMPLE_BRANCH_STACK) {
+ sz = sample->branch_stack->nr * sizeof(struct branch_entry);
+ sz += sizeof(u64);
+ result += sz;
+ }
+
+ if (type & PERF_SAMPLE_REGS_USER) {
+ if (sample->user_regs.abi) {
+ result += sizeof(u64);
+ sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
+ result += sz;
+ } else {
+ result += sizeof(u64);
+ }
+ }
+
+ if (type & PERF_SAMPLE_STACK_USER) {
+ sz = sample->user_stack.size;
+ result += sizeof(u64);
+ if (sz) {
+ result += sz;
+ result += sizeof(u64);
+ }
+ }
+
+ if (type & PERF_SAMPLE_WEIGHT)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_DATA_SRC)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_TRANSACTION)
+ result += sizeof(u64);
+
+ return result;
+}
+
+int perf_event__synthesize_sample(union perf_event *event, u64 type,
+ u64 read_format,
+ const struct perf_sample *sample,
+ bool swapped)
+{
+ u64 *array;
+ size_t sz;
+ /*
+ * used for cross-endian analysis. See git commit 65014ab3
+ * for why this goofiness is needed.
+ */
+ union u64_swap u;
+
+ array = event->sample.array;
+
+ if (type & PERF_SAMPLE_IDENTIFIER) {
+ *array = sample->id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_IP) {
+ *array = sample->ip;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TID) {
+ u.val32[0] = sample->pid;
+ u.val32[1] = sample->tid;
+ if (swapped) {
+ /*
+ * Inverse of what is done in perf_evsel__parse_sample
+ */
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val32[1] = bswap_32(u.val32[1]);
+ u.val64 = bswap_64(u.val64);
+ }
+
+ *array = u.val64;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TIME) {
+ *array = sample->time;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ADDR) {
+ *array = sample->addr;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ID) {
+ *array = sample->id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ *array = sample->stream_id;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_CPU) {
+ u.val32[0] = sample->cpu;
+ if (swapped) {
+ /*
+ * Inverse of what is done in perf_evsel__parse_sample
+ */
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val64 = bswap_64(u.val64);
+ }
+ *array = u.val64;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_PERIOD) {
+ *array = sample->period;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_READ) {
+ if (read_format & PERF_FORMAT_GROUP)
+ *array = sample->read.group.nr;
+ else
+ *array = sample->read.one.value;
+ array++;
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+ *array = sample->read.time_enabled;
+ array++;
+ }
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+ *array = sample->read.time_running;
+ array++;
+ }
+
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ if (read_format & PERF_FORMAT_GROUP) {
+ sz = sample->read.group.nr *
+ sizeof(struct sample_read_value);
+ memcpy(array, sample->read.group.values, sz);
+ array = (void *)array + sz;
+ } else {
+ *array = sample->read.one.id;
+ array++;
+ }
+ }
+
+ if (type & PERF_SAMPLE_CALLCHAIN) {
+ sz = (sample->callchain->nr + 1) * sizeof(u64);
+ memcpy(array, sample->callchain, sz);
+ array = (void *)array + sz;
+ }
+
+ if (type & PERF_SAMPLE_RAW) {
+ u.val32[0] = sample->raw_size;
+ if (WARN_ONCE(swapped,
+ "Endianness of raw data not corrected!\n")) {
+ /*
+ * Inverse of what is done in perf_evsel__parse_sample
+ */
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val32[1] = bswap_32(u.val32[1]);
+ u.val64 = bswap_64(u.val64);
+ }
+ *array = u.val64;
+ array = (void *)array + sizeof(u32);
+
+ memcpy(array, sample->raw_data, sample->raw_size);
+ array = (void *)array + sample->raw_size;
+ }
+
+ if (type & PERF_SAMPLE_BRANCH_STACK) {
+ sz = sample->branch_stack->nr * sizeof(struct branch_entry);
+ sz += sizeof(u64);
+ memcpy(array, sample->branch_stack, sz);
+ array = (void *)array + sz;
+ }
+
+ if (type & PERF_SAMPLE_REGS_USER) {
+ if (sample->user_regs.abi) {
+ *array++ = sample->user_regs.abi;
+ sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
+ memcpy(array, sample->user_regs.regs, sz);
+ array = (void *)array + sz;
+ } else {
+ *array++ = 0;
+ }
+ }
+
+ if (type & PERF_SAMPLE_STACK_USER) {
+ sz = sample->user_stack.size;
+ *array++ = sz;
+ if (sz) {
+ memcpy(array, sample->user_stack.data, sz);
+ array = (void *)array + sz;
+ *array++ = sz;
+ }
+ }
+
+ if (type & PERF_SAMPLE_WEIGHT) {
+ *array = sample->weight;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_DATA_SRC) {
+ *array = sample->data_src;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TRANSACTION) {
+ *array = sample->transaction;
+ array++;
+ }
+
+ return 0;
+}
+
+struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
+{
+ return pevent_find_field(evsel->tp_format, name);
+}
+
+void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
+ const char *name)
+{
+ struct format_field *field = perf_evsel__field(evsel, name);
+ int offset;
+
+ if (!field)
+ return NULL;
+
+ offset = field->offset;
+
+ if (field->flags & FIELD_IS_DYNAMIC) {
+ offset = *(int *)(sample->raw_data + field->offset);
+ offset &= 0xffff;
+ }
+
+ return sample->raw_data + offset;
+}
+
+u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
+ const char *name)
+{
+ struct format_field *field = perf_evsel__field(evsel, name);
+ void *ptr;
+ u64 value;
+
+ if (!field)
+ return 0;
+
+ ptr = sample->raw_data + field->offset;
+
+ switch (field->size) {
+ case 1:
+ return *(u8 *)ptr;
+ case 2:
+ value = *(u16 *)ptr;
+ break;
+ case 4:
+ value = *(u32 *)ptr;
+ break;
+ case 8:
+ value = *(u64 *)ptr;
+ break;
+ default:
+ return 0;
+ }
+
+ if (!evsel->needs_swap)
+ return value;
+
+ switch (field->size) {
+ case 2:
+ return bswap_16(value);
+ case 4:
+ return bswap_32(value);
+ case 8:
+ return bswap_64(value);
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
+{
+ va_list args;
+ int ret = 0;
+
+ if (!*first) {
+ ret += fprintf(fp, ",");
+ } else {
+ ret += fprintf(fp, ":");
+ *first = false;
+ }
+
+ va_start(args, fmt);
+ ret += vfprintf(fp, fmt, args);
+ va_end(args);
+ return ret;
+}
+
+static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value)
+{
+ if (value == 0)
+ return 0;
+
+ return comma_fprintf(fp, first, " %s: %" PRIu64, field, value);
+}
+
+#define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field)
+
+struct bit_names {
+ int bit;
+ const char *name;
+};
+
+static int bits__fprintf(FILE *fp, const char *field, u64 value,
+ struct bit_names *bits, bool *first)
+{
+ int i = 0, printed = comma_fprintf(fp, first, " %s: ", field);
+ bool first_bit = true;
+
+ do {
+ if (value & bits[i].bit) {
+ printed += fprintf(fp, "%s%s", first_bit ? "" : "|", bits[i].name);
+ first_bit = false;
+ }
+ } while (bits[++i].name != NULL);
+
+ return printed;
+}
+
+static int sample_type__fprintf(FILE *fp, bool *first, u64 value)
+{
+#define bit_name(n) { PERF_SAMPLE_##n, #n }
+ struct bit_names bits[] = {
+ bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
+ bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
+ bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
+ bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
+ bit_name(IDENTIFIER),
+ { .name = NULL, }
+ };
+#undef bit_name
+ return bits__fprintf(fp, "sample_type", value, bits, first);
+}
+
+static int read_format__fprintf(FILE *fp, bool *first, u64 value)
+{
+#define bit_name(n) { PERF_FORMAT_##n, #n }
+ struct bit_names bits[] = {
+ bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
+ bit_name(ID), bit_name(GROUP),
+ { .name = NULL, }
+ };
+#undef bit_name
+ return bits__fprintf(fp, "read_format", value, bits, first);
+}
+
+int perf_evsel__fprintf(struct perf_evsel *evsel,
+ struct perf_attr_details *details, FILE *fp)
+{
+ bool first = true;
+ int printed = 0;
+
+ if (details->event_group) {
+ struct perf_evsel *pos;
+
+ if (!perf_evsel__is_group_leader(evsel))
+ return 0;
+
+ if (evsel->nr_members > 1)
+ printed += fprintf(fp, "%s{", evsel->group_name ?: "");
+
+ printed += fprintf(fp, "%s", perf_evsel__name(evsel));
+ for_each_group_member(pos, evsel)
+ printed += fprintf(fp, ",%s", perf_evsel__name(pos));
+
+ if (evsel->nr_members > 1)
+ printed += fprintf(fp, "}");
+ goto out;
+ }
+
+ printed += fprintf(fp, "%s", perf_evsel__name(evsel));
+
+ if (details->verbose || details->freq) {
+ printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64,
+ (u64)evsel->attr.sample_freq);
+ }
+
+ if (details->verbose) {
+ if_print(type);
+ if_print(config);
+ if_print(config1);
+ if_print(config2);
+ if_print(size);
+ printed += sample_type__fprintf(fp, &first, evsel->attr.sample_type);
+ if (evsel->attr.read_format)
+ printed += read_format__fprintf(fp, &first, evsel->attr.read_format);
+ if_print(disabled);
+ if_print(inherit);
+ if_print(pinned);
+ if_print(exclusive);
+ if_print(exclude_user);
+ if_print(exclude_kernel);
+ if_print(exclude_hv);
+ if_print(exclude_idle);
+ if_print(mmap);
+ if_print(mmap2);
+ if_print(comm);
+ if_print(freq);
+ if_print(inherit_stat);
+ if_print(enable_on_exec);
+ if_print(task);
+ if_print(watermark);
+ if_print(precise_ip);
+ if_print(mmap_data);
+ if_print(sample_id_all);
+ if_print(exclude_host);
+ if_print(exclude_guest);
+ if_print(__reserved_1);
+ if_print(wakeup_events);
+ if_print(bp_type);
+ if_print(branch_sample_type);
+ }
+out:
+ fputc('\n', fp);
+ return ++printed;
+}
+
+bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
+ char *msg, size_t msgsize)
+{
+ if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
+ evsel->attr.type == PERF_TYPE_HARDWARE &&
+ evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
+ /*
+ * If it's cycles then fall back to hrtimer based
+ * cpu-clock-tick sw counter, which is always available even if
+ * no PMU support.
+ *
+ * PPC returns ENXIO until 2.6.37 (behavior changed with commit
+ * b0a873e).
+ */
+ scnprintf(msg, msgsize, "%s",
+"The cycles event is not supported, trying to fall back to cpu-clock-ticks");
+
+ evsel->attr.type = PERF_TYPE_SOFTWARE;
+ evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
+
+ zfree(&evsel->name);
+ return true;
+ }
+
+ return false;
+}
+
+int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
+ int err, char *msg, size_t size)
+{
+ switch (err) {
+ case EPERM:
+ case EACCES:
+ return scnprintf(msg, size,
+ "You may not have permission to collect %sstats.\n"
+ "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
+ " -1 - Not paranoid at all\n"
+ " 0 - Disallow raw tracepoint access for unpriv\n"
+ " 1 - Disallow cpu events for unpriv\n"
+ " 2 - Disallow kernel profiling for unpriv",
+ target->system_wide ? "system-wide " : "");
+ case ENOENT:
+ return scnprintf(msg, size, "The %s event is not supported.",
+ perf_evsel__name(evsel));
+ case EMFILE:
+ return scnprintf(msg, size, "%s",
+ "Too many events are opened.\n"
+ "Try again after reducing the number of events.");
+ case ENODEV:
+ if (target->cpu_list)
+ return scnprintf(msg, size, "%s",
+ "No such device - did you specify an out-of-range profile CPU?\n");
+ break;
+ case EOPNOTSUPP:
+ if (evsel->attr.precise_ip)
+ return scnprintf(msg, size, "%s",
+ "\'precise\' request may not be supported. Try removing 'p' modifier.");
+#if defined(__i386__) || defined(__x86_64__)
+ if (evsel->attr.type == PERF_TYPE_HARDWARE)
+ return scnprintf(msg, size, "%s",
+ "No hardware sampling interrupt available.\n"
+ "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
+#endif
+ break;
+ default:
+ break;
+ }
+
+ return scnprintf(msg, size,
+ "The sys_perf_event_open() syscall returned with %d (%s) for event (%s). \n"
+ "/bin/dmesg may provide additional information.\n"
+ "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
+ err, strerror(err), perf_evsel__name(evsel));
+}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
new file mode 100644
index 00000000000..a52e9a5bb2d
--- /dev/null
+++ b/tools/perf/util/evsel.h
@@ -0,0 +1,365 @@
+#ifndef __PERF_EVSEL_H
+#define __PERF_EVSEL_H 1
+
+#include <linux/list.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <linux/perf_event.h>
+#include <linux/types.h>
+#include "xyarray.h"
+#include "cgroup.h"
+#include "hist.h"
+#include "symbol.h"
+
+struct perf_counts_values {
+ union {
+ struct {
+ u64 val;
+ u64 ena;
+ u64 run;
+ };
+ u64 values[3];
+ };
+};
+
+struct perf_counts {
+ s8 scaled;
+ struct perf_counts_values aggr;
+ struct perf_counts_values cpu[];
+};
+
+struct perf_evsel;
+
+/*
+ * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
+ * more than one entry in the evlist.
+ */
+struct perf_sample_id {
+ struct hlist_node node;
+ u64 id;
+ struct perf_evsel *evsel;
+
+ /* Holds total ID period value for PERF_SAMPLE_READ processing. */
+ u64 period;
+};
+
+/** struct perf_evsel - event selector
+ *
+ * @name - Can be set to retain the original event name passed by the user,
+ * so that when showing results in tools such as 'perf stat', we
+ * show the name used, not some alias.
+ * @id_pos: the position of the event id (PERF_SAMPLE_ID or
+ * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of
+ * struct sample_event
+ * @is_pos: the position (counting backwards) of the event id (PERF_SAMPLE_ID or
+ * PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all
+ * is used there is an id sample appended to non-sample events
+ */
+struct perf_evsel {
+ struct list_head node;
+ struct perf_event_attr attr;
+ char *filter;
+ struct xyarray *fd;
+ struct xyarray *sample_id;
+ u64 *id;
+ struct perf_counts *counts;
+ struct perf_counts *prev_raw_counts;
+ int idx;
+ u32 ids;
+ struct hists hists;
+ char *name;
+ double scale;
+ const char *unit;
+ struct event_format *tp_format;
+ union {
+ void *priv;
+ off_t id_offset;
+ };
+ struct cgroup_sel *cgrp;
+ void *handler;
+ struct cpu_map *cpus;
+ unsigned int sample_size;
+ int id_pos;
+ int is_pos;
+ bool supported;
+ bool needs_swap;
+ /* parse modifier helper */
+ int exclude_GH;
+ int nr_members;
+ int sample_read;
+ struct perf_evsel *leader;
+ char *group_name;
+};
+
+union u64_swap {
+ u64 val64;
+ u32 val32[2];
+};
+
+#define hists_to_evsel(h) container_of(h, struct perf_evsel, hists)
+
+struct cpu_map;
+struct thread_map;
+struct perf_evlist;
+struct record_opts;
+
+struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx);
+
+static inline struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
+{
+ return perf_evsel__new_idx(attr, 0);
+}
+
+struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx);
+
+static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name)
+{
+ return perf_evsel__newtp_idx(sys, name, 0);
+}
+
+struct event_format *event_format__new(const char *sys, const char *name);
+
+void perf_evsel__init(struct perf_evsel *evsel,
+ struct perf_event_attr *attr, int idx);
+void perf_evsel__exit(struct perf_evsel *evsel);
+void perf_evsel__delete(struct perf_evsel *evsel);
+
+void perf_evsel__config(struct perf_evsel *evsel,
+ struct record_opts *opts);
+
+int __perf_evsel__sample_size(u64 sample_type);
+void perf_evsel__calc_id_pos(struct perf_evsel *evsel);
+
+bool perf_evsel__is_cache_op_valid(u8 type, u8 op);
+
+#define PERF_EVSEL__MAX_ALIASES 8
+
+extern const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_EVSEL__MAX_ALIASES];
+extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_EVSEL__MAX_ALIASES];
+extern const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
+ [PERF_EVSEL__MAX_ALIASES];
+extern const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX];
+extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX];
+int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
+ char *bf, size_t size);
+const char *perf_evsel__name(struct perf_evsel *evsel);
+
+const char *perf_evsel__group_name(struct perf_evsel *evsel);
+int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size);
+
+int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
+int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
+int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
+void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus);
+void perf_evsel__free_fd(struct perf_evsel *evsel);
+void perf_evsel__free_id(struct perf_evsel *evsel);
+void perf_evsel__free_counts(struct perf_evsel *evsel);
+void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
+
+void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
+ enum perf_event_sample_format bit);
+void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
+ enum perf_event_sample_format bit);
+
+#define perf_evsel__set_sample_bit(evsel, bit) \
+ __perf_evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)
+
+#define perf_evsel__reset_sample_bit(evsel, bit) \
+ __perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
+
+void perf_evsel__set_sample_id(struct perf_evsel *evsel,
+ bool use_sample_identifier);
+
+int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
+ const char *filter);
+int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads);
+
+int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
+ struct cpu_map *cpus);
+int perf_evsel__open_per_thread(struct perf_evsel *evsel,
+ struct thread_map *threads);
+int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+ struct thread_map *threads);
+void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads);
+
+struct perf_sample;
+
+void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
+ const char *name);
+u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
+ const char *name);
+
+static inline char *perf_evsel__strval(struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ const char *name)
+{
+ return perf_evsel__rawptr(evsel, sample, name);
+}
+
+struct format_field;
+
+struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name);
+
+#define perf_evsel__match(evsel, t, c) \
+ (evsel->attr.type == PERF_TYPE_##t && \
+ evsel->attr.config == PERF_COUNT_##c)
+
+static inline bool perf_evsel__match2(struct perf_evsel *e1,
+ struct perf_evsel *e2)
+{
+ return (e1->attr.type == e2->attr.type) &&
+ (e1->attr.config == e2->attr.config);
+}
+
+#define perf_evsel__cmp(a, b) \
+ ((a) && \
+ (b) && \
+ (a)->attr.type == (b)->attr.type && \
+ (a)->attr.config == (b)->attr.config)
+
+int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
+ int cpu, int thread, bool scale);
+
+/**
+ * perf_evsel__read_on_cpu - Read out the results on a CPU and thread
+ *
+ * @evsel - event selector to read value
+ * @cpu - CPU of interest
+ * @thread - thread of interest
+ */
+static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel,
+ int cpu, int thread)
+{
+ return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
+}
+
+/**
+ * perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
+ *
+ * @evsel - event selector to read value
+ * @cpu - CPU of interest
+ * @thread - thread of interest
+ */
+static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
+ int cpu, int thread)
+{
+ return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
+}
+
+int __perf_evsel__read(struct perf_evsel *evsel, int ncpus, int nthreads,
+ bool scale);
+
+/**
+ * perf_evsel__read - Read the aggregate results on all CPUs
+ *
+ * @evsel - event selector to read value
+ * @ncpus - Number of cpus affected, from zero
+ * @nthreads - Number of threads affected, from zero
+ */
+static inline int perf_evsel__read(struct perf_evsel *evsel,
+ int ncpus, int nthreads)
+{
+ return __perf_evsel__read(evsel, ncpus, nthreads, false);
+}
+
+/**
+ * perf_evsel__read_scaled - Read the aggregate results on all CPUs, scaled
+ *
+ * @evsel - event selector to read value
+ * @ncpus - Number of cpus affected, from zero
+ * @nthreads - Number of threads affected, from zero
+ */
+static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
+ int ncpus, int nthreads)
+{
+ return __perf_evsel__read(evsel, ncpus, nthreads, true);
+}
+
+void hists__init(struct hists *hists);
+
+int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
+ struct perf_sample *sample);
+
+static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel)
+{
+ return list_entry(evsel->node.next, struct perf_evsel, node);
+}
+
+static inline struct perf_evsel *perf_evsel__prev(struct perf_evsel *evsel)
+{
+ return list_entry(evsel->node.prev, struct perf_evsel, node);
+}
+
+/**
+ * perf_evsel__is_group_leader - Return whether given evsel is a leader event
+ *
+ * @evsel - evsel selector to be tested
+ *
+ * Return %true if @evsel is a group leader or a stand-alone event
+ */
+static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel)
+{
+ return evsel->leader == evsel;
+}
+
+/**
+ * perf_evsel__is_group_event - Return whether given evsel is a group event
+ *
+ * @evsel - evsel selector to be tested
+ *
+ * Return %true iff event group view is enabled and @evsel is a actual group
+ * leader which has other members in the group
+ */
+static inline bool perf_evsel__is_group_event(struct perf_evsel *evsel)
+{
+ if (!symbol_conf.event_group)
+ return false;
+
+ return perf_evsel__is_group_leader(evsel) && evsel->nr_members > 1;
+}
+
+/**
+ * perf_evsel__is_function_event - Return whether given evsel is a function
+ * trace event
+ *
+ * @evsel - evsel selector to be tested
+ *
+ * Return %true if event is function trace event
+ */
+static inline bool perf_evsel__is_function_event(struct perf_evsel *evsel)
+{
+#define FUNCTION_EVENT "ftrace:function"
+
+ return evsel->name &&
+ !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
+
+#undef FUNCTION_EVENT
+}
+
+struct perf_attr_details {
+ bool freq;
+ bool verbose;
+ bool event_group;
+};
+
+int perf_evsel__fprintf(struct perf_evsel *evsel,
+ struct perf_attr_details *details, FILE *fp);
+
+bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
+ char *msg, size_t msgsize);
+int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
+ int err, char *msg, size_t size);
+
+static inline int perf_evsel__group_idx(struct perf_evsel *evsel)
+{
+ return evsel->idx - evsel->leader->idx;
+}
+
+#define for_each_group_member(_evsel, _leader) \
+for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node); \
+ (_evsel) && (_evsel)->leader == (_leader); \
+ (_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))
+
+#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/exec_cmd.c b/tools/perf/util/exec_cmd.c
new file mode 100644
index 00000000000..7adf4ad15d8
--- /dev/null
+++ b/tools/perf/util/exec_cmd.c
@@ -0,0 +1,148 @@
+#include "cache.h"
+#include "exec_cmd.h"
+#include "quote.h"
+
+#include <string.h>
+
+#define MAX_ARGS 32
+
+static const char *argv_exec_path;
+static const char *argv0_path;
+
+const char *system_path(const char *path)
+{
+ static const char *prefix = PREFIX;
+ struct strbuf d = STRBUF_INIT;
+
+ if (is_absolute_path(path))
+ return path;
+
+ strbuf_addf(&d, "%s/%s", prefix, path);
+ path = strbuf_detach(&d, NULL);
+ return path;
+}
+
+const char *perf_extract_argv0_path(const char *argv0)
+{
+ const char *slash;
+
+ if (!argv0 || !*argv0)
+ return NULL;
+ slash = argv0 + strlen(argv0);
+
+ while (argv0 <= slash && !is_dir_sep(*slash))
+ slash--;
+
+ if (slash >= argv0) {
+ argv0_path = strndup(argv0, slash - argv0);
+ return argv0_path ? slash + 1 : NULL;
+ }
+
+ return argv0;
+}
+
+void perf_set_argv_exec_path(const char *exec_path)
+{
+ argv_exec_path = exec_path;
+ /*
+ * Propagate this setting to external programs.
+ */
+ setenv(EXEC_PATH_ENVIRONMENT, exec_path, 1);
+}
+
+
+/* Returns the highest-priority, location to look for perf programs. */
+const char *perf_exec_path(void)
+{
+ const char *env;
+
+ if (argv_exec_path)
+ return argv_exec_path;
+
+ env = getenv(EXEC_PATH_ENVIRONMENT);
+ if (env && *env) {
+ return env;
+ }
+
+ return system_path(PERF_EXEC_PATH);
+}
+
+static void add_path(struct strbuf *out, const char *path)
+{
+ if (path && *path) {
+ if (is_absolute_path(path))
+ strbuf_addstr(out, path);
+ else
+ strbuf_addstr(out, make_nonrelative_path(path));
+
+ strbuf_addch(out, PATH_SEP);
+ }
+}
+
+void setup_path(void)
+{
+ const char *old_path = getenv("PATH");
+ struct strbuf new_path = STRBUF_INIT;
+
+ add_path(&new_path, perf_exec_path());
+ add_path(&new_path, argv0_path);
+
+ if (old_path)
+ strbuf_addstr(&new_path, old_path);
+ else
+ strbuf_addstr(&new_path, "/usr/local/bin:/usr/bin:/bin");
+
+ setenv("PATH", new_path.buf, 1);
+
+ strbuf_release(&new_path);
+}
+
+static const char **prepare_perf_cmd(const char **argv)
+{
+ int argc;
+ const char **nargv;
+
+ for (argc = 0; argv[argc]; argc++)
+ ; /* just counting */
+ nargv = malloc(sizeof(*nargv) * (argc + 2));
+
+ nargv[0] = "perf";
+ for (argc = 0; argv[argc]; argc++)
+ nargv[argc + 1] = argv[argc];
+ nargv[argc + 1] = NULL;
+ return nargv;
+}
+
+int execv_perf_cmd(const char **argv) {
+ const char **nargv = prepare_perf_cmd(argv);
+
+ /* execvp() can only ever return if it fails */
+ execvp("perf", (char **)nargv);
+
+ free(nargv);
+ return -1;
+}
+
+
+int execl_perf_cmd(const char *cmd,...)
+{
+ int argc;
+ const char *argv[MAX_ARGS + 1];
+ const char *arg;
+ va_list param;
+
+ va_start(param, cmd);
+ argv[0] = cmd;
+ argc = 1;
+ while (argc < MAX_ARGS) {
+ arg = argv[argc++] = va_arg(param, char *);
+ if (!arg)
+ break;
+ }
+ va_end(param);
+ if (MAX_ARGS <= argc)
+ return error("too many args to run %s", cmd);
+
+ argv[argc] = NULL;
+ return execv_perf_cmd(argv);
+}
diff --git a/tools/perf/util/exec_cmd.h b/tools/perf/util/exec_cmd.h
new file mode 100644
index 00000000000..bc4b915963f
--- /dev/null
+++ b/tools/perf/util/exec_cmd.h
@@ -0,0 +1,12 @@
+#ifndef __PERF_EXEC_CMD_H
+#define __PERF_EXEC_CMD_H
+
+extern void perf_set_argv_exec_path(const char *exec_path);
+extern const char *perf_extract_argv0_path(const char *path);
+extern const char *perf_exec_path(void);
+extern void setup_path(void);
+extern int execv_perf_cmd(const char **argv); /* NULL terminated */
+extern int execl_perf_cmd(const char *cmd, ...);
+extern const char *system_path(const char *path);
+
+#endif /* __PERF_EXEC_CMD_H */
diff --git a/tools/perf/util/generate-cmdlist.sh b/tools/perf/util/generate-cmdlist.sh
new file mode 100755
index 00000000000..36a885d2cd2
--- /dev/null
+++ b/tools/perf/util/generate-cmdlist.sh
@@ -0,0 +1,39 @@
+#!/bin/sh
+
+echo "/* Automatically generated by $0 */
+struct cmdname_help
+{
+ char name[16];
+ char help[80];
+};
+
+static struct cmdname_help common_cmds[] = {"
+
+sed -n -e 's/^perf-\([^ ]*\)[ ].* common.*/\1/p' command-list.txt |
+sort |
+while read cmd
+do
+ sed -n '
+ /^NAME/,/perf-'"$cmd"'/H
+ ${
+ x
+ s/.*perf-'"$cmd"' - \(.*\)/ {"'"$cmd"'", "\1"},/
+ p
+ }' "Documentation/perf-$cmd.txt"
+done
+
+echo "#ifdef HAVE_LIBELF_SUPPORT"
+sed -n -e 's/^perf-\([^ ]*\)[ ].* full.*/\1/p' command-list.txt |
+sort |
+while read cmd
+do
+ sed -n '
+ /^NAME/,/perf-'"$cmd"'/H
+ ${
+ x
+ s/.*perf-'"$cmd"' - \(.*\)/ {"'"$cmd"'", "\1"},/
+ p
+ }' "Documentation/perf-$cmd.txt"
+done
+echo "#endif /* HAVE_LIBELF_SUPPORT */"
+echo "};"
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
new file mode 100644
index 00000000000..893f8e2df92
--- /dev/null
+++ b/tools/perf/util/header.c
@@ -0,0 +1,3071 @@
+#include "util.h"
+#include <sys/types.h>
+#include <byteswap.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <sys/utsname.h>
+
+#include "evlist.h"
+#include "evsel.h"
+#include "header.h"
+#include "../perf.h"
+#include "trace-event.h"
+#include "session.h"
+#include "symbol.h"
+#include "debug.h"
+#include "cpumap.h"
+#include "pmu.h"
+#include "vdso.h"
+#include "strbuf.h"
+#include "build-id.h"
+#include "data.h"
+
+static bool no_buildid_cache = false;
+
+static u32 header_argc;
+static const char **header_argv;
+
+/*
+ * magic2 = "PERFILE2"
+ * must be a numerical value to let the endianness
+ * determine the memory layout. That way we are able
+ * to detect endianness when reading the perf.data file
+ * back.
+ *
+ * we check for legacy (PERFFILE) format.
+ */
+static const char *__perf_magic1 = "PERFFILE";
+static const u64 __perf_magic2 = 0x32454c4946524550ULL;
+static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
+
+#define PERF_MAGIC __perf_magic2
+
+struct perf_file_attr {
+ struct perf_event_attr attr;
+ struct perf_file_section ids;
+};
+
+void perf_header__set_feat(struct perf_header *header, int feat)
+{
+ set_bit(feat, header->adds_features);
+}
+
+void perf_header__clear_feat(struct perf_header *header, int feat)
+{
+ clear_bit(feat, header->adds_features);
+}
+
+bool perf_header__has_feat(const struct perf_header *header, int feat)
+{
+ return test_bit(feat, header->adds_features);
+}
+
+static int do_write(int fd, const void *buf, size_t size)
+{
+ while (size) {
+ int ret = write(fd, buf, size);
+
+ if (ret < 0)
+ return -errno;
+
+ size -= ret;
+ buf += ret;
+ }
+
+ return 0;
+}
+
+#define NAME_ALIGN 64
+
+static int write_padded(int fd, const void *bf, size_t count,
+ size_t count_aligned)
+{
+ static const char zero_buf[NAME_ALIGN];
+ int err = do_write(fd, bf, count);
+
+ if (!err)
+ err = do_write(fd, zero_buf, count_aligned - count);
+
+ return err;
+}
+
+static int do_write_string(int fd, const char *str)
+{
+ u32 len, olen;
+ int ret;
+
+ olen = strlen(str) + 1;
+ len = PERF_ALIGN(olen, NAME_ALIGN);
+
+ /* write len, incl. \0 */
+ ret = do_write(fd, &len, sizeof(len));
+ if (ret < 0)
+ return ret;
+
+ return write_padded(fd, str, olen, len);
+}
+
+static char *do_read_string(int fd, struct perf_header *ph)
+{
+ ssize_t sz, ret;
+ u32 len;
+ char *buf;
+
+ sz = readn(fd, &len, sizeof(len));
+ if (sz < (ssize_t)sizeof(len))
+ return NULL;
+
+ if (ph->needs_swap)
+ len = bswap_32(len);
+
+ buf = malloc(len);
+ if (!buf)
+ return NULL;
+
+ ret = readn(fd, buf, len);
+ if (ret == (ssize_t)len) {
+ /*
+ * strings are padded by zeroes
+ * thus the actual strlen of buf
+ * may be less than len
+ */
+ return buf;
+ }
+
+ free(buf);
+ return NULL;
+}
+
+int
+perf_header__set_cmdline(int argc, const char **argv)
+{
+ int i;
+
+ /*
+ * If header_argv has already been set, do not override it.
+ * This allows a command to set the cmdline, parse args and
+ * then call another builtin function that implements a
+ * command -- e.g, cmd_kvm calling cmd_record.
+ */
+ if (header_argv)
+ return 0;
+
+ header_argc = (u32)argc;
+
+ /* do not include NULL termination */
+ header_argv = calloc(argc, sizeof(char *));
+ if (!header_argv)
+ return -ENOMEM;
+
+ /*
+ * must copy argv contents because it gets moved
+ * around during option parsing
+ */
+ for (i = 0; i < argc ; i++)
+ header_argv[i] = argv[i];
+
+ return 0;
+}
+
+#define dsos__for_each_with_build_id(pos, head) \
+ list_for_each_entry(pos, head, node) \
+ if (!pos->has_build_id) \
+ continue; \
+ else
+
+static int write_buildid(const char *name, size_t name_len, u8 *build_id,
+ pid_t pid, u16 misc, int fd)
+{
+ int err;
+ struct build_id_event b;
+ size_t len;
+
+ len = name_len + 1;
+ len = PERF_ALIGN(len, NAME_ALIGN);
+
+ memset(&b, 0, sizeof(b));
+ memcpy(&b.build_id, build_id, BUILD_ID_SIZE);
+ b.pid = pid;
+ b.header.misc = misc;
+ b.header.size = sizeof(b) + len;
+
+ err = do_write(fd, &b, sizeof(b));
+ if (err < 0)
+ return err;
+
+ return write_padded(fd, name, name_len + 1, len);
+}
+
+static int __dsos__write_buildid_table(struct list_head *head,
+ struct machine *machine,
+ pid_t pid, u16 misc, int fd)
+{
+ char nm[PATH_MAX];
+ struct dso *pos;
+
+ dsos__for_each_with_build_id(pos, head) {
+ int err;
+ const char *name;
+ size_t name_len;
+
+ if (!pos->hit)
+ continue;
+
+ if (is_vdso_map(pos->short_name)) {
+ name = (char *) VDSO__MAP_NAME;
+ name_len = sizeof(VDSO__MAP_NAME) + 1;
+ } else if (dso__is_kcore(pos)) {
+ machine__mmap_name(machine, nm, sizeof(nm));
+ name = nm;
+ name_len = strlen(nm) + 1;
+ } else {
+ name = pos->long_name;
+ name_len = pos->long_name_len + 1;
+ }
+
+ err = write_buildid(name, name_len, pos->build_id,
+ pid, misc, fd);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int machine__write_buildid_table(struct machine *machine, int fd)
+{
+ int err;
+ u16 kmisc = PERF_RECORD_MISC_KERNEL,
+ umisc = PERF_RECORD_MISC_USER;
+
+ if (!machine__is_host(machine)) {
+ kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
+ umisc = PERF_RECORD_MISC_GUEST_USER;
+ }
+
+ err = __dsos__write_buildid_table(&machine->kernel_dsos, machine,
+ machine->pid, kmisc, fd);
+ if (err == 0)
+ err = __dsos__write_buildid_table(&machine->user_dsos, machine,
+ machine->pid, umisc, fd);
+ return err;
+}
+
+static int dsos__write_buildid_table(struct perf_header *header, int fd)
+{
+ struct perf_session *session = container_of(header,
+ struct perf_session, header);
+ struct rb_node *nd;
+ int err = machine__write_buildid_table(&session->machines.host, fd);
+
+ if (err)
+ return err;
+
+ for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ err = machine__write_buildid_table(pos, fd);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
+ const char *name, bool is_kallsyms, bool is_vdso)
+{
+ const size_t size = PATH_MAX;
+ char *realname, *filename = zalloc(size),
+ *linkname = zalloc(size), *targetname;
+ int len, err = -1;
+ bool slash = is_kallsyms || is_vdso;
+
+ if (is_kallsyms) {
+ if (symbol_conf.kptr_restrict) {
+ pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
+ err = 0;
+ goto out_free;
+ }
+ realname = (char *) name;
+ } else
+ realname = realpath(name, NULL);
+
+ if (realname == NULL || filename == NULL || linkname == NULL)
+ goto out_free;
+
+ len = scnprintf(filename, size, "%s%s%s",
+ debugdir, slash ? "/" : "",
+ is_vdso ? VDSO__MAP_NAME : realname);
+ if (mkdir_p(filename, 0755))
+ goto out_free;
+
+ snprintf(filename + len, size - len, "/%s", sbuild_id);
+
+ if (access(filename, F_OK)) {
+ if (is_kallsyms) {
+ if (copyfile("/proc/kallsyms", filename))
+ goto out_free;
+ } else if (link(realname, filename) && copyfile(name, filename))
+ goto out_free;
+ }
+
+ len = scnprintf(linkname, size, "%s/.build-id/%.2s",
+ debugdir, sbuild_id);
+
+ if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
+ goto out_free;
+
+ snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
+ targetname = filename + strlen(debugdir) - 5;
+ memcpy(targetname, "../..", 5);
+
+ if (symlink(targetname, linkname) == 0)
+ err = 0;
+out_free:
+ if (!is_kallsyms)
+ free(realname);
+ free(filename);
+ free(linkname);
+ return err;
+}
+
+static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
+ const char *name, const char *debugdir,
+ bool is_kallsyms, bool is_vdso)
+{
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ build_id__sprintf(build_id, build_id_size, sbuild_id);
+
+ return build_id_cache__add_s(sbuild_id, debugdir, name,
+ is_kallsyms, is_vdso);
+}
+
+int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
+{
+ const size_t size = PATH_MAX;
+ char *filename = zalloc(size),
+ *linkname = zalloc(size);
+ int err = -1;
+
+ if (filename == NULL || linkname == NULL)
+ goto out_free;
+
+ snprintf(linkname, size, "%s/.build-id/%.2s/%s",
+ debugdir, sbuild_id, sbuild_id + 2);
+
+ if (access(linkname, F_OK))
+ goto out_free;
+
+ if (readlink(linkname, filename, size - 1) < 0)
+ goto out_free;
+
+ if (unlink(linkname))
+ goto out_free;
+
+ /*
+ * Since the link is relative, we must make it absolute:
+ */
+ snprintf(linkname, size, "%s/.build-id/%.2s/%s",
+ debugdir, sbuild_id, filename);
+
+ if (unlink(linkname))
+ goto out_free;
+
+ err = 0;
+out_free:
+ free(filename);
+ free(linkname);
+ return err;
+}
+
+static int dso__cache_build_id(struct dso *dso, struct machine *machine,
+ const char *debugdir)
+{
+ bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
+ bool is_vdso = is_vdso_map(dso->short_name);
+ const char *name = dso->long_name;
+ char nm[PATH_MAX];
+
+ if (dso__is_kcore(dso)) {
+ is_kallsyms = true;
+ machine__mmap_name(machine, nm, sizeof(nm));
+ name = nm;
+ }
+ return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name,
+ debugdir, is_kallsyms, is_vdso);
+}
+
+static int __dsos__cache_build_ids(struct list_head *head,
+ struct machine *machine, const char *debugdir)
+{
+ struct dso *pos;
+ int err = 0;
+
+ dsos__for_each_with_build_id(pos, head)
+ if (dso__cache_build_id(pos, machine, debugdir))
+ err = -1;
+
+ return err;
+}
+
+static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
+{
+ int ret = __dsos__cache_build_ids(&machine->kernel_dsos, machine,
+ debugdir);
+ ret |= __dsos__cache_build_ids(&machine->user_dsos, machine, debugdir);
+ return ret;
+}
+
+static int perf_session__cache_build_ids(struct perf_session *session)
+{
+ struct rb_node *nd;
+ int ret;
+ char debugdir[PATH_MAX];
+
+ snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
+
+ if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
+ return -1;
+
+ ret = machine__cache_build_ids(&session->machines.host, debugdir);
+
+ for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ ret |= machine__cache_build_ids(pos, debugdir);
+ }
+ return ret ? -1 : 0;
+}
+
+static bool machine__read_build_ids(struct machine *machine, bool with_hits)
+{
+ bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
+ ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
+ return ret;
+}
+
+static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
+{
+ struct rb_node *nd;
+ bool ret = machine__read_build_ids(&session->machines.host, with_hits);
+
+ for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ ret |= machine__read_build_ids(pos, with_hits);
+ }
+
+ return ret;
+}
+
+static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist)
+{
+ return read_tracing_data(fd, &evlist->entries);
+}
+
+
+static int write_build_id(int fd, struct perf_header *h,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct perf_session *session;
+ int err;
+
+ session = container_of(h, struct perf_session, header);
+
+ if (!perf_session__read_build_ids(session, true))
+ return -1;
+
+ err = dsos__write_buildid_table(h, fd);
+ if (err < 0) {
+ pr_debug("failed to write buildid table\n");
+ return err;
+ }
+ if (!no_buildid_cache)
+ perf_session__cache_build_ids(session);
+
+ return 0;
+}
+
+static int write_hostname(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct utsname uts;
+ int ret;
+
+ ret = uname(&uts);
+ if (ret < 0)
+ return -1;
+
+ return do_write_string(fd, uts.nodename);
+}
+
+static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct utsname uts;
+ int ret;
+
+ ret = uname(&uts);
+ if (ret < 0)
+ return -1;
+
+ return do_write_string(fd, uts.release);
+}
+
+static int write_arch(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct utsname uts;
+ int ret;
+
+ ret = uname(&uts);
+ if (ret < 0)
+ return -1;
+
+ return do_write_string(fd, uts.machine);
+}
+
+static int write_version(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ return do_write_string(fd, perf_version_string);
+}
+
+static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+#ifndef CPUINFO_PROC
+#define CPUINFO_PROC NULL
+#endif
+ FILE *file;
+ char *buf = NULL;
+ char *s, *p;
+ const char *search = CPUINFO_PROC;
+ size_t len = 0;
+ int ret = -1;
+
+ if (!search)
+ return -1;
+
+ file = fopen("/proc/cpuinfo", "r");
+ if (!file)
+ return -1;
+
+ while (getline(&buf, &len, file) > 0) {
+ ret = strncmp(buf, search, strlen(search));
+ if (!ret)
+ break;
+ }
+
+ if (ret)
+ goto done;
+
+ s = buf;
+
+ p = strchr(buf, ':');
+ if (p && *(p+1) == ' ' && *(p+2))
+ s = p + 2;
+ p = strchr(s, '\n');
+ if (p)
+ *p = '\0';
+
+ /* squash extra space characters (branding string) */
+ p = s;
+ while (*p) {
+ if (isspace(*p)) {
+ char *r = p + 1;
+ char *q = r;
+ *p = ' ';
+ while (*q && isspace(*q))
+ q++;
+ if (q != (p+1))
+ while ((*r++ = *q++));
+ }
+ p++;
+ }
+ ret = do_write_string(fd, s);
+done:
+ free(buf);
+ fclose(file);
+ return ret;
+}
+
+static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ long nr;
+ u32 nrc, nra;
+ int ret;
+
+ nr = sysconf(_SC_NPROCESSORS_CONF);
+ if (nr < 0)
+ return -1;
+
+ nrc = (u32)(nr & UINT_MAX);
+
+ nr = sysconf(_SC_NPROCESSORS_ONLN);
+ if (nr < 0)
+ return -1;
+
+ nra = (u32)(nr & UINT_MAX);
+
+ ret = do_write(fd, &nrc, sizeof(nrc));
+ if (ret < 0)
+ return ret;
+
+ return do_write(fd, &nra, sizeof(nra));
+}
+
+static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+ u32 nre, nri, sz;
+ int ret;
+
+ nre = evlist->nr_entries;
+
+ /*
+ * write number of events
+ */
+ ret = do_write(fd, &nre, sizeof(nre));
+ if (ret < 0)
+ return ret;
+
+ /*
+ * size of perf_event_attr struct
+ */
+ sz = (u32)sizeof(evsel->attr);
+ ret = do_write(fd, &sz, sizeof(sz));
+ if (ret < 0)
+ return ret;
+
+ evlist__for_each(evlist, evsel) {
+ ret = do_write(fd, &evsel->attr, sz);
+ if (ret < 0)
+ return ret;
+ /*
+ * write number of unique id per event
+ * there is one id per instance of an event
+ *
+ * copy into an nri to be independent of the
+ * type of ids,
+ */
+ nri = evsel->ids;
+ ret = do_write(fd, &nri, sizeof(nri));
+ if (ret < 0)
+ return ret;
+
+ /*
+ * write event string as passed on cmdline
+ */
+ ret = do_write_string(fd, perf_evsel__name(evsel));
+ if (ret < 0)
+ return ret;
+ /*
+ * write unique ids for this event
+ */
+ ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ char buf[MAXPATHLEN];
+ char proc[32];
+ u32 i, n;
+ int ret;
+
+ /*
+ * actual atual path to perf binary
+ */
+ sprintf(proc, "/proc/%d/exe", getpid());
+ ret = readlink(proc, buf, sizeof(buf));
+ if (ret <= 0)
+ return -1;
+
+ /* readlink() does not add null termination */
+ buf[ret] = '\0';
+
+ /* account for binary path */
+ n = header_argc + 1;
+
+ ret = do_write(fd, &n, sizeof(n));
+ if (ret < 0)
+ return ret;
+
+ ret = do_write_string(fd, buf);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0 ; i < header_argc; i++) {
+ ret = do_write_string(fd, header_argv[i]);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+#define CORE_SIB_FMT \
+ "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
+#define THRD_SIB_FMT \
+ "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
+
+struct cpu_topo {
+ u32 core_sib;
+ u32 thread_sib;
+ char **core_siblings;
+ char **thread_siblings;
+};
+
+static int build_cpu_topo(struct cpu_topo *tp, int cpu)
+{
+ FILE *fp;
+ char filename[MAXPATHLEN];
+ char *buf = NULL, *p;
+ size_t len = 0;
+ ssize_t sret;
+ u32 i = 0;
+ int ret = -1;
+
+ sprintf(filename, CORE_SIB_FMT, cpu);
+ fp = fopen(filename, "r");
+ if (!fp)
+ goto try_threads;
+
+ sret = getline(&buf, &len, fp);
+ fclose(fp);
+ if (sret <= 0)
+ goto try_threads;
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ for (i = 0; i < tp->core_sib; i++) {
+ if (!strcmp(buf, tp->core_siblings[i]))
+ break;
+ }
+ if (i == tp->core_sib) {
+ tp->core_siblings[i] = buf;
+ tp->core_sib++;
+ buf = NULL;
+ len = 0;
+ }
+ ret = 0;
+
+try_threads:
+ sprintf(filename, THRD_SIB_FMT, cpu);
+ fp = fopen(filename, "r");
+ if (!fp)
+ goto done;
+
+ if (getline(&buf, &len, fp) <= 0)
+ goto done;
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ for (i = 0; i < tp->thread_sib; i++) {
+ if (!strcmp(buf, tp->thread_siblings[i]))
+ break;
+ }
+ if (i == tp->thread_sib) {
+ tp->thread_siblings[i] = buf;
+ tp->thread_sib++;
+ buf = NULL;
+ }
+ ret = 0;
+done:
+ if(fp)
+ fclose(fp);
+ free(buf);
+ return ret;
+}
+
+static void free_cpu_topo(struct cpu_topo *tp)
+{
+ u32 i;
+
+ if (!tp)
+ return;
+
+ for (i = 0 ; i < tp->core_sib; i++)
+ zfree(&tp->core_siblings[i]);
+
+ for (i = 0 ; i < tp->thread_sib; i++)
+ zfree(&tp->thread_siblings[i]);
+
+ free(tp);
+}
+
+static struct cpu_topo *build_cpu_topology(void)
+{
+ struct cpu_topo *tp;
+ void *addr;
+ u32 nr, i;
+ size_t sz;
+ long ncpus;
+ int ret = -1;
+
+ ncpus = sysconf(_SC_NPROCESSORS_CONF);
+ if (ncpus < 0)
+ return NULL;
+
+ nr = (u32)(ncpus & UINT_MAX);
+
+ sz = nr * sizeof(char *);
+
+ addr = calloc(1, sizeof(*tp) + 2 * sz);
+ if (!addr)
+ return NULL;
+
+ tp = addr;
+
+ addr += sizeof(*tp);
+ tp->core_siblings = addr;
+ addr += sz;
+ tp->thread_siblings = addr;
+
+ for (i = 0; i < nr; i++) {
+ ret = build_cpu_topo(tp, i);
+ if (ret < 0)
+ break;
+ }
+ if (ret) {
+ free_cpu_topo(tp);
+ tp = NULL;
+ }
+ return tp;
+}
+
+static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct cpu_topo *tp;
+ u32 i;
+ int ret;
+
+ tp = build_cpu_topology();
+ if (!tp)
+ return -1;
+
+ ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
+ if (ret < 0)
+ goto done;
+
+ for (i = 0; i < tp->core_sib; i++) {
+ ret = do_write_string(fd, tp->core_siblings[i]);
+ if (ret < 0)
+ goto done;
+ }
+ ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
+ if (ret < 0)
+ goto done;
+
+ for (i = 0; i < tp->thread_sib; i++) {
+ ret = do_write_string(fd, tp->thread_siblings[i]);
+ if (ret < 0)
+ break;
+ }
+done:
+ free_cpu_topo(tp);
+ return ret;
+}
+
+
+
+static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ char *buf = NULL;
+ FILE *fp;
+ size_t len = 0;
+ int ret = -1, n;
+ uint64_t mem;
+
+ fp = fopen("/proc/meminfo", "r");
+ if (!fp)
+ return -1;
+
+ while (getline(&buf, &len, fp) > 0) {
+ ret = strncmp(buf, "MemTotal:", 9);
+ if (!ret)
+ break;
+ }
+ if (!ret) {
+ n = sscanf(buf, "%*s %"PRIu64, &mem);
+ if (n == 1)
+ ret = do_write(fd, &mem, sizeof(mem));
+ }
+ free(buf);
+ fclose(fp);
+ return ret;
+}
+
+static int write_topo_node(int fd, int node)
+{
+ char str[MAXPATHLEN];
+ char field[32];
+ char *buf = NULL, *p;
+ size_t len = 0;
+ FILE *fp;
+ u64 mem_total, mem_free, mem;
+ int ret = -1;
+
+ sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
+ fp = fopen(str, "r");
+ if (!fp)
+ return -1;
+
+ while (getline(&buf, &len, fp) > 0) {
+ /* skip over invalid lines */
+ if (!strchr(buf, ':'))
+ continue;
+ if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
+ goto done;
+ if (!strcmp(field, "MemTotal:"))
+ mem_total = mem;
+ if (!strcmp(field, "MemFree:"))
+ mem_free = mem;
+ }
+
+ fclose(fp);
+ fp = NULL;
+
+ ret = do_write(fd, &mem_total, sizeof(u64));
+ if (ret)
+ goto done;
+
+ ret = do_write(fd, &mem_free, sizeof(u64));
+ if (ret)
+ goto done;
+
+ ret = -1;
+ sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
+
+ fp = fopen(str, "r");
+ if (!fp)
+ goto done;
+
+ if (getline(&buf, &len, fp) <= 0)
+ goto done;
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ ret = do_write_string(fd, buf);
+done:
+ free(buf);
+ if (fp)
+ fclose(fp);
+ return ret;
+}
+
+static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ char *buf = NULL;
+ size_t len = 0;
+ FILE *fp;
+ struct cpu_map *node_map = NULL;
+ char *c;
+ u32 nr, i, j;
+ int ret = -1;
+
+ fp = fopen("/sys/devices/system/node/online", "r");
+ if (!fp)
+ return -1;
+
+ if (getline(&buf, &len, fp) <= 0)
+ goto done;
+
+ c = strchr(buf, '\n');
+ if (c)
+ *c = '\0';
+
+ node_map = cpu_map__new(buf);
+ if (!node_map)
+ goto done;
+
+ nr = (u32)node_map->nr;
+
+ ret = do_write(fd, &nr, sizeof(nr));
+ if (ret < 0)
+ goto done;
+
+ for (i = 0; i < nr; i++) {
+ j = (u32)node_map->map[i];
+ ret = do_write(fd, &j, sizeof(j));
+ if (ret < 0)
+ break;
+
+ ret = write_topo_node(fd, i);
+ if (ret < 0)
+ break;
+ }
+done:
+ free(buf);
+ fclose(fp);
+ free(node_map);
+ return ret;
+}
+
+/*
+ * File format:
+ *
+ * struct pmu_mappings {
+ * u32 pmu_num;
+ * struct pmu_map {
+ * u32 type;
+ * char name[];
+ * }[pmu_num];
+ * };
+ */
+
+static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct perf_pmu *pmu = NULL;
+ off_t offset = lseek(fd, 0, SEEK_CUR);
+ __u32 pmu_num = 0;
+ int ret;
+
+ /* write real pmu_num later */
+ ret = do_write(fd, &pmu_num, sizeof(pmu_num));
+ if (ret < 0)
+ return ret;
+
+ while ((pmu = perf_pmu__scan(pmu))) {
+ if (!pmu->name)
+ continue;
+ pmu_num++;
+
+ ret = do_write(fd, &pmu->type, sizeof(pmu->type));
+ if (ret < 0)
+ return ret;
+
+ ret = do_write_string(fd, pmu->name);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
+ /* discard all */
+ lseek(fd, offset, SEEK_SET);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * File format:
+ *
+ * struct group_descs {
+ * u32 nr_groups;
+ * struct group_desc {
+ * char name[];
+ * u32 leader_idx;
+ * u32 nr_members;
+ * }[nr_groups];
+ * };
+ */
+static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist)
+{
+ u32 nr_groups = evlist->nr_groups;
+ struct perf_evsel *evsel;
+ int ret;
+
+ ret = do_write(fd, &nr_groups, sizeof(nr_groups));
+ if (ret < 0)
+ return ret;
+
+ evlist__for_each(evlist, evsel) {
+ if (perf_evsel__is_group_leader(evsel) &&
+ evsel->nr_members > 1) {
+ const char *name = evsel->group_name ?: "{anon_group}";
+ u32 leader_idx = evsel->idx;
+ u32 nr_members = evsel->nr_members;
+
+ ret = do_write_string(fd, name);
+ if (ret < 0)
+ return ret;
+
+ ret = do_write(fd, &leader_idx, sizeof(leader_idx));
+ if (ret < 0)
+ return ret;
+
+ ret = do_write(fd, &nr_members, sizeof(nr_members));
+ if (ret < 0)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/*
+ * default get_cpuid(): nothing gets recorded
+ * actual implementation must be in arch/$(ARCH)/util/header.c
+ */
+int __attribute__ ((weak)) get_cpuid(char *buffer __maybe_unused,
+ size_t sz __maybe_unused)
+{
+ return -1;
+}
+
+static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ char buffer[64];
+ int ret;
+
+ ret = get_cpuid(buffer, sizeof(buffer));
+ if (!ret)
+ goto write_it;
+
+ return -1;
+write_it:
+ return do_write_string(fd, buffer);
+}
+
+static int write_branch_stack(int fd __maybe_unused,
+ struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ return 0;
+}
+
+static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ fprintf(fp, "# hostname : %s\n", ph->env.hostname);
+}
+
+static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ fprintf(fp, "# os release : %s\n", ph->env.os_release);
+}
+
+static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
+{
+ fprintf(fp, "# arch : %s\n", ph->env.arch);
+}
+
+static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
+}
+
+static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
+ fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
+}
+
+static void print_version(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ fprintf(fp, "# perf version : %s\n", ph->env.version);
+}
+
+static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ int nr, i;
+ char *str;
+
+ nr = ph->env.nr_cmdline;
+ str = ph->env.cmdline;
+
+ fprintf(fp, "# cmdline : ");
+
+ for (i = 0; i < nr; i++) {
+ fprintf(fp, "%s ", str);
+ str += strlen(str) + 1;
+ }
+ fputc('\n', fp);
+}
+
+static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ int nr, i;
+ char *str;
+
+ nr = ph->env.nr_sibling_cores;
+ str = ph->env.sibling_cores;
+
+ for (i = 0; i < nr; i++) {
+ fprintf(fp, "# sibling cores : %s\n", str);
+ str += strlen(str) + 1;
+ }
+
+ nr = ph->env.nr_sibling_threads;
+ str = ph->env.sibling_threads;
+
+ for (i = 0; i < nr; i++) {
+ fprintf(fp, "# sibling threads : %s\n", str);
+ str += strlen(str) + 1;
+ }
+}
+
+static void free_event_desc(struct perf_evsel *events)
+{
+ struct perf_evsel *evsel;
+
+ if (!events)
+ return;
+
+ for (evsel = events; evsel->attr.size; evsel++) {
+ zfree(&evsel->name);
+ zfree(&evsel->id);
+ }
+
+ free(events);
+}
+
+static struct perf_evsel *
+read_event_desc(struct perf_header *ph, int fd)
+{
+ struct perf_evsel *evsel, *events = NULL;
+ u64 *id;
+ void *buf = NULL;
+ u32 nre, sz, nr, i, j;
+ ssize_t ret;
+ size_t msz;
+
+ /* number of events */
+ ret = readn(fd, &nre, sizeof(nre));
+ if (ret != (ssize_t)sizeof(nre))
+ goto error;
+
+ if (ph->needs_swap)
+ nre = bswap_32(nre);
+
+ ret = readn(fd, &sz, sizeof(sz));
+ if (ret != (ssize_t)sizeof(sz))
+ goto error;
+
+ if (ph->needs_swap)
+ sz = bswap_32(sz);
+
+ /* buffer to hold on file attr struct */
+ buf = malloc(sz);
+ if (!buf)
+ goto error;
+
+ /* the last event terminates with evsel->attr.size == 0: */
+ events = calloc(nre + 1, sizeof(*events));
+ if (!events)
+ goto error;
+
+ msz = sizeof(evsel->attr);
+ if (sz < msz)
+ msz = sz;
+
+ for (i = 0, evsel = events; i < nre; evsel++, i++) {
+ evsel->idx = i;
+
+ /*
+ * must read entire on-file attr struct to
+ * sync up with layout.
+ */
+ ret = readn(fd, buf, sz);
+ if (ret != (ssize_t)sz)
+ goto error;
+
+ if (ph->needs_swap)
+ perf_event__attr_swap(buf);
+
+ memcpy(&evsel->attr, buf, msz);
+
+ ret = readn(fd, &nr, sizeof(nr));
+ if (ret != (ssize_t)sizeof(nr))
+ goto error;
+
+ if (ph->needs_swap) {
+ nr = bswap_32(nr);
+ evsel->needs_swap = true;
+ }
+
+ evsel->name = do_read_string(fd, ph);
+
+ if (!nr)
+ continue;
+
+ id = calloc(nr, sizeof(*id));
+ if (!id)
+ goto error;
+ evsel->ids = nr;
+ evsel->id = id;
+
+ for (j = 0 ; j < nr; j++) {
+ ret = readn(fd, id, sizeof(*id));
+ if (ret != (ssize_t)sizeof(*id))
+ goto error;
+ if (ph->needs_swap)
+ *id = bswap_64(*id);
+ id++;
+ }
+ }
+out:
+ free(buf);
+ return events;
+error:
+ if (events)
+ free_event_desc(events);
+ events = NULL;
+ goto out;
+}
+
+static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
+{
+ struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
+ u32 j;
+ u64 *id;
+
+ if (!events) {
+ fprintf(fp, "# event desc: not available or unable to read\n");
+ return;
+ }
+
+ for (evsel = events; evsel->attr.size; evsel++) {
+ fprintf(fp, "# event : name = %s, ", evsel->name);
+
+ fprintf(fp, "type = %d, config = 0x%"PRIx64
+ ", config1 = 0x%"PRIx64", config2 = 0x%"PRIx64,
+ evsel->attr.type,
+ (u64)evsel->attr.config,
+ (u64)evsel->attr.config1,
+ (u64)evsel->attr.config2);
+
+ fprintf(fp, ", excl_usr = %d, excl_kern = %d",
+ evsel->attr.exclude_user,
+ evsel->attr.exclude_kernel);
+
+ fprintf(fp, ", excl_host = %d, excl_guest = %d",
+ evsel->attr.exclude_host,
+ evsel->attr.exclude_guest);
+
+ fprintf(fp, ", precise_ip = %d", evsel->attr.precise_ip);
+
+ fprintf(fp, ", attr_mmap2 = %d", evsel->attr.mmap2);
+ fprintf(fp, ", attr_mmap = %d", evsel->attr.mmap);
+ fprintf(fp, ", attr_mmap_data = %d", evsel->attr.mmap_data);
+ if (evsel->ids) {
+ fprintf(fp, ", id = {");
+ for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
+ if (j)
+ fputc(',', fp);
+ fprintf(fp, " %"PRIu64, *id);
+ }
+ fprintf(fp, " }");
+ }
+
+ fputc('\n', fp);
+ }
+
+ free_event_desc(events);
+}
+
+static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
+}
+
+static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ u32 nr, c, i;
+ char *str, *tmp;
+ uint64_t mem_total, mem_free;
+
+ /* nr nodes */
+ nr = ph->env.nr_numa_nodes;
+ str = ph->env.numa_nodes;
+
+ for (i = 0; i < nr; i++) {
+ /* node number */
+ c = strtoul(str, &tmp, 0);
+ if (*tmp != ':')
+ goto error;
+
+ str = tmp + 1;
+ mem_total = strtoull(str, &tmp, 0);
+ if (*tmp != ':')
+ goto error;
+
+ str = tmp + 1;
+ mem_free = strtoull(str, &tmp, 0);
+ if (*tmp != ':')
+ goto error;
+
+ fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
+ " free = %"PRIu64" kB\n",
+ c, mem_total, mem_free);
+
+ str = tmp + 1;
+ fprintf(fp, "# node%u cpu list : %s\n", c, str);
+
+ str += strlen(str) + 1;
+ }
+ return;
+error:
+ fprintf(fp, "# numa topology : not available\n");
+}
+
+static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
+{
+ fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
+}
+
+static void print_branch_stack(struct perf_header *ph __maybe_unused,
+ int fd __maybe_unused, FILE *fp)
+{
+ fprintf(fp, "# contains samples with branch stack\n");
+}
+
+static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ const char *delimiter = "# pmu mappings: ";
+ char *str, *tmp;
+ u32 pmu_num;
+ u32 type;
+
+ pmu_num = ph->env.nr_pmu_mappings;
+ if (!pmu_num) {
+ fprintf(fp, "# pmu mappings: not available\n");
+ return;
+ }
+
+ str = ph->env.pmu_mappings;
+
+ while (pmu_num) {
+ type = strtoul(str, &tmp, 0);
+ if (*tmp != ':')
+ goto error;
+
+ str = tmp + 1;
+ fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
+
+ delimiter = ", ";
+ str += strlen(str) + 1;
+ pmu_num--;
+ }
+
+ fprintf(fp, "\n");
+
+ if (!pmu_num)
+ return;
+error:
+ fprintf(fp, "# pmu mappings: unable to read\n");
+}
+
+static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ struct perf_session *session;
+ struct perf_evsel *evsel;
+ u32 nr = 0;
+
+ session = container_of(ph, struct perf_session, header);
+
+ evlist__for_each(session->evlist, evsel) {
+ if (perf_evsel__is_group_leader(evsel) &&
+ evsel->nr_members > 1) {
+ fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
+ perf_evsel__name(evsel));
+
+ nr = evsel->nr_members - 1;
+ } else if (nr) {
+ fprintf(fp, ",%s", perf_evsel__name(evsel));
+
+ if (--nr == 0)
+ fprintf(fp, "}\n");
+ }
+ }
+}
+
+static int __event_process_build_id(struct build_id_event *bev,
+ char *filename,
+ struct perf_session *session)
+{
+ int err = -1;
+ struct list_head *head;
+ struct machine *machine;
+ u16 misc;
+ struct dso *dso;
+ enum dso_kernel_type dso_type;
+
+ machine = perf_session__findnew_machine(session, bev->pid);
+ if (!machine)
+ goto out;
+
+ misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+
+ switch (misc) {
+ case PERF_RECORD_MISC_KERNEL:
+ dso_type = DSO_TYPE_KERNEL;
+ head = &machine->kernel_dsos;
+ break;
+ case PERF_RECORD_MISC_GUEST_KERNEL:
+ dso_type = DSO_TYPE_GUEST_KERNEL;
+ head = &machine->kernel_dsos;
+ break;
+ case PERF_RECORD_MISC_USER:
+ case PERF_RECORD_MISC_GUEST_USER:
+ dso_type = DSO_TYPE_USER;
+ head = &machine->user_dsos;
+ break;
+ default:
+ goto out;
+ }
+
+ dso = __dsos__findnew(head, filename);
+ if (dso != NULL) {
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ dso__set_build_id(dso, &bev->build_id);
+
+ if (filename[0] == '[')
+ dso->kernel = dso_type;
+
+ build_id__sprintf(dso->build_id, sizeof(dso->build_id),
+ sbuild_id);
+ pr_debug("build id event received for %s: %s\n",
+ dso->long_name, sbuild_id);
+ }
+
+ err = 0;
+out:
+ return err;
+}
+
+static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
+ int input, u64 offset, u64 size)
+{
+ struct perf_session *session = container_of(header, struct perf_session, header);
+ struct {
+ struct perf_event_header header;
+ u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
+ char filename[0];
+ } old_bev;
+ struct build_id_event bev;
+ char filename[PATH_MAX];
+ u64 limit = offset + size;
+
+ while (offset < limit) {
+ ssize_t len;
+
+ if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
+ return -1;
+
+ if (header->needs_swap)
+ perf_event_header__bswap(&old_bev.header);
+
+ len = old_bev.header.size - sizeof(old_bev);
+ if (readn(input, filename, len) != len)
+ return -1;
+
+ bev.header = old_bev.header;
+
+ /*
+ * As the pid is the missing value, we need to fill
+ * it properly. The header.misc value give us nice hint.
+ */
+ bev.pid = HOST_KERNEL_ID;
+ if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
+ bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
+ bev.pid = DEFAULT_GUEST_KERNEL_ID;
+
+ memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
+ __event_process_build_id(&bev, filename, session);
+
+ offset += bev.header.size;
+ }
+
+ return 0;
+}
+
+static int perf_header__read_build_ids(struct perf_header *header,
+ int input, u64 offset, u64 size)
+{
+ struct perf_session *session = container_of(header, struct perf_session, header);
+ struct build_id_event bev;
+ char filename[PATH_MAX];
+ u64 limit = offset + size, orig_offset = offset;
+ int err = -1;
+
+ while (offset < limit) {
+ ssize_t len;
+
+ if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
+ goto out;
+
+ if (header->needs_swap)
+ perf_event_header__bswap(&bev.header);
+
+ len = bev.header.size - sizeof(bev);
+ if (readn(input, filename, len) != len)
+ goto out;
+ /*
+ * The a1645ce1 changeset:
+ *
+ * "perf: 'perf kvm' tool for monitoring guest performance from host"
+ *
+ * Added a field to struct build_id_event that broke the file
+ * format.
+ *
+ * Since the kernel build-id is the first entry, process the
+ * table using the old format if the well known
+ * '[kernel.kallsyms]' string for the kernel build-id has the
+ * first 4 characters chopped off (where the pid_t sits).
+ */
+ if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
+ if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
+ return -1;
+ return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
+ }
+
+ __event_process_build_id(&bev, filename, session);
+
+ offset += bev.header.size;
+ }
+ err = 0;
+out:
+ return err;
+}
+
+static int process_tracing_data(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph __maybe_unused,
+ int fd, void *data)
+{
+ ssize_t ret = trace_report(fd, data, false);
+ return ret < 0 ? -1 : 0;
+}
+
+static int process_build_id(struct perf_file_section *section,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
+ pr_debug("Failed to read buildids, continuing...\n");
+ return 0;
+}
+
+static int process_hostname(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ph->env.hostname = do_read_string(fd, ph);
+ return ph->env.hostname ? 0 : -ENOMEM;
+}
+
+static int process_osrelease(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ph->env.os_release = do_read_string(fd, ph);
+ return ph->env.os_release ? 0 : -ENOMEM;
+}
+
+static int process_version(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ph->env.version = do_read_string(fd, ph);
+ return ph->env.version ? 0 : -ENOMEM;
+}
+
+static int process_arch(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ph->env.arch = do_read_string(fd, ph);
+ return ph->env.arch ? 0 : -ENOMEM;
+}
+
+static int process_nrcpus(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ssize_t ret;
+ u32 nr;
+
+ ret = readn(fd, &nr, sizeof(nr));
+ if (ret != sizeof(nr))
+ return -1;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ ph->env.nr_cpus_online = nr;
+
+ ret = readn(fd, &nr, sizeof(nr));
+ if (ret != sizeof(nr))
+ return -1;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ ph->env.nr_cpus_avail = nr;
+ return 0;
+}
+
+static int process_cpudesc(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ph->env.cpu_desc = do_read_string(fd, ph);
+ return ph->env.cpu_desc ? 0 : -ENOMEM;
+}
+
+static int process_cpuid(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ph->env.cpuid = do_read_string(fd, ph);
+ return ph->env.cpuid ? 0 : -ENOMEM;
+}
+
+static int process_total_mem(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ uint64_t mem;
+ ssize_t ret;
+
+ ret = readn(fd, &mem, sizeof(mem));
+ if (ret != sizeof(mem))
+ return -1;
+
+ if (ph->needs_swap)
+ mem = bswap_64(mem);
+
+ ph->env.total_mem = mem;
+ return 0;
+}
+
+static struct perf_evsel *
+perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
+{
+ struct perf_evsel *evsel;
+
+ evlist__for_each(evlist, evsel) {
+ if (evsel->idx == idx)
+ return evsel;
+ }
+
+ return NULL;
+}
+
+static void
+perf_evlist__set_event_name(struct perf_evlist *evlist,
+ struct perf_evsel *event)
+{
+ struct perf_evsel *evsel;
+
+ if (!event->name)
+ return;
+
+ evsel = perf_evlist__find_by_index(evlist, event->idx);
+ if (!evsel)
+ return;
+
+ if (evsel->name)
+ return;
+
+ evsel->name = strdup(event->name);
+}
+
+static int
+process_event_desc(struct perf_file_section *section __maybe_unused,
+ struct perf_header *header, int fd,
+ void *data __maybe_unused)
+{
+ struct perf_session *session;
+ struct perf_evsel *evsel, *events = read_event_desc(header, fd);
+
+ if (!events)
+ return 0;
+
+ session = container_of(header, struct perf_session, header);
+ for (evsel = events; evsel->attr.size; evsel++)
+ perf_evlist__set_event_name(session->evlist, evsel);
+
+ free_event_desc(events);
+
+ return 0;
+}
+
+static int process_cmdline(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ssize_t ret;
+ char *str;
+ u32 nr, i;
+ struct strbuf sb;
+
+ ret = readn(fd, &nr, sizeof(nr));
+ if (ret != sizeof(nr))
+ return -1;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ ph->env.nr_cmdline = nr;
+ strbuf_init(&sb, 128);
+
+ for (i = 0; i < nr; i++) {
+ str = do_read_string(fd, ph);
+ if (!str)
+ goto error;
+
+ /* include a NULL character at the end */
+ strbuf_add(&sb, str, strlen(str) + 1);
+ free(str);
+ }
+ ph->env.cmdline = strbuf_detach(&sb, NULL);
+ return 0;
+
+error:
+ strbuf_release(&sb);
+ return -1;
+}
+
+static int process_cpu_topology(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ssize_t ret;
+ u32 nr, i;
+ char *str;
+ struct strbuf sb;
+
+ ret = readn(fd, &nr, sizeof(nr));
+ if (ret != sizeof(nr))
+ return -1;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ ph->env.nr_sibling_cores = nr;
+ strbuf_init(&sb, 128);
+
+ for (i = 0; i < nr; i++) {
+ str = do_read_string(fd, ph);
+ if (!str)
+ goto error;
+
+ /* include a NULL character at the end */
+ strbuf_add(&sb, str, strlen(str) + 1);
+ free(str);
+ }
+ ph->env.sibling_cores = strbuf_detach(&sb, NULL);
+
+ ret = readn(fd, &nr, sizeof(nr));
+ if (ret != sizeof(nr))
+ return -1;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ ph->env.nr_sibling_threads = nr;
+
+ for (i = 0; i < nr; i++) {
+ str = do_read_string(fd, ph);
+ if (!str)
+ goto error;
+
+ /* include a NULL character at the end */
+ strbuf_add(&sb, str, strlen(str) + 1);
+ free(str);
+ }
+ ph->env.sibling_threads = strbuf_detach(&sb, NULL);
+ return 0;
+
+error:
+ strbuf_release(&sb);
+ return -1;
+}
+
+static int process_numa_topology(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ssize_t ret;
+ u32 nr, node, i;
+ char *str;
+ uint64_t mem_total, mem_free;
+ struct strbuf sb;
+
+ /* nr nodes */
+ ret = readn(fd, &nr, sizeof(nr));
+ if (ret != sizeof(nr))
+ goto error;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ ph->env.nr_numa_nodes = nr;
+ strbuf_init(&sb, 256);
+
+ for (i = 0; i < nr; i++) {
+ /* node number */
+ ret = readn(fd, &node, sizeof(node));
+ if (ret != sizeof(node))
+ goto error;
+
+ ret = readn(fd, &mem_total, sizeof(u64));
+ if (ret != sizeof(u64))
+ goto error;
+
+ ret = readn(fd, &mem_free, sizeof(u64));
+ if (ret != sizeof(u64))
+ goto error;
+
+ if (ph->needs_swap) {
+ node = bswap_32(node);
+ mem_total = bswap_64(mem_total);
+ mem_free = bswap_64(mem_free);
+ }
+
+ strbuf_addf(&sb, "%u:%"PRIu64":%"PRIu64":",
+ node, mem_total, mem_free);
+
+ str = do_read_string(fd, ph);
+ if (!str)
+ goto error;
+
+ /* include a NULL character at the end */
+ strbuf_add(&sb, str, strlen(str) + 1);
+ free(str);
+ }
+ ph->env.numa_nodes = strbuf_detach(&sb, NULL);
+ return 0;
+
+error:
+ strbuf_release(&sb);
+ return -1;
+}
+
+static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ssize_t ret;
+ char *name;
+ u32 pmu_num;
+ u32 type;
+ struct strbuf sb;
+
+ ret = readn(fd, &pmu_num, sizeof(pmu_num));
+ if (ret != sizeof(pmu_num))
+ return -1;
+
+ if (ph->needs_swap)
+ pmu_num = bswap_32(pmu_num);
+
+ if (!pmu_num) {
+ pr_debug("pmu mappings not available\n");
+ return 0;
+ }
+
+ ph->env.nr_pmu_mappings = pmu_num;
+ strbuf_init(&sb, 128);
+
+ while (pmu_num) {
+ if (readn(fd, &type, sizeof(type)) != sizeof(type))
+ goto error;
+ if (ph->needs_swap)
+ type = bswap_32(type);
+
+ name = do_read_string(fd, ph);
+ if (!name)
+ goto error;
+
+ strbuf_addf(&sb, "%u:%s", type, name);
+ /* include a NULL character at the end */
+ strbuf_add(&sb, "", 1);
+
+ free(name);
+ pmu_num--;
+ }
+ ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
+ return 0;
+
+error:
+ strbuf_release(&sb);
+ return -1;
+}
+
+static int process_group_desc(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ size_t ret = -1;
+ u32 i, nr, nr_groups;
+ struct perf_session *session;
+ struct perf_evsel *evsel, *leader = NULL;
+ struct group_desc {
+ char *name;
+ u32 leader_idx;
+ u32 nr_members;
+ } *desc;
+
+ if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
+ return -1;
+
+ if (ph->needs_swap)
+ nr_groups = bswap_32(nr_groups);
+
+ ph->env.nr_groups = nr_groups;
+ if (!nr_groups) {
+ pr_debug("group desc not available\n");
+ return 0;
+ }
+
+ desc = calloc(nr_groups, sizeof(*desc));
+ if (!desc)
+ return -1;
+
+ for (i = 0; i < nr_groups; i++) {
+ desc[i].name = do_read_string(fd, ph);
+ if (!desc[i].name)
+ goto out_free;
+
+ if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
+ goto out_free;
+
+ if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
+ goto out_free;
+
+ if (ph->needs_swap) {
+ desc[i].leader_idx = bswap_32(desc[i].leader_idx);
+ desc[i].nr_members = bswap_32(desc[i].nr_members);
+ }
+ }
+
+ /*
+ * Rebuild group relationship based on the group_desc
+ */
+ session = container_of(ph, struct perf_session, header);
+ session->evlist->nr_groups = nr_groups;
+
+ i = nr = 0;
+ evlist__for_each(session->evlist, evsel) {
+ if (evsel->idx == (int) desc[i].leader_idx) {
+ evsel->leader = evsel;
+ /* {anon_group} is a dummy name */
+ if (strcmp(desc[i].name, "{anon_group}")) {
+ evsel->group_name = desc[i].name;
+ desc[i].name = NULL;
+ }
+ evsel->nr_members = desc[i].nr_members;
+
+ if (i >= nr_groups || nr > 0) {
+ pr_debug("invalid group desc\n");
+ goto out_free;
+ }
+
+ leader = evsel;
+ nr = evsel->nr_members - 1;
+ i++;
+ } else if (nr) {
+ /* This is a group member */
+ evsel->leader = leader;
+
+ nr--;
+ }
+ }
+
+ if (i != nr_groups || nr != 0) {
+ pr_debug("invalid group desc\n");
+ goto out_free;
+ }
+
+ ret = 0;
+out_free:
+ for (i = 0; i < nr_groups; i++)
+ zfree(&desc[i].name);
+ free(desc);
+
+ return ret;
+}
+
+struct feature_ops {
+ int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
+ void (*print)(struct perf_header *h, int fd, FILE *fp);
+ int (*process)(struct perf_file_section *section,
+ struct perf_header *h, int fd, void *data);
+ const char *name;
+ bool full_only;
+};
+
+#define FEAT_OPA(n, func) \
+ [n] = { .name = #n, .write = write_##func, .print = print_##func }
+#define FEAT_OPP(n, func) \
+ [n] = { .name = #n, .write = write_##func, .print = print_##func, \
+ .process = process_##func }
+#define FEAT_OPF(n, func) \
+ [n] = { .name = #n, .write = write_##func, .print = print_##func, \
+ .process = process_##func, .full_only = true }
+
+/* feature_ops not implemented: */
+#define print_tracing_data NULL
+#define print_build_id NULL
+
+static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
+ FEAT_OPP(HEADER_TRACING_DATA, tracing_data),
+ FEAT_OPP(HEADER_BUILD_ID, build_id),
+ FEAT_OPP(HEADER_HOSTNAME, hostname),
+ FEAT_OPP(HEADER_OSRELEASE, osrelease),
+ FEAT_OPP(HEADER_VERSION, version),
+ FEAT_OPP(HEADER_ARCH, arch),
+ FEAT_OPP(HEADER_NRCPUS, nrcpus),
+ FEAT_OPP(HEADER_CPUDESC, cpudesc),
+ FEAT_OPP(HEADER_CPUID, cpuid),
+ FEAT_OPP(HEADER_TOTAL_MEM, total_mem),
+ FEAT_OPP(HEADER_EVENT_DESC, event_desc),
+ FEAT_OPP(HEADER_CMDLINE, cmdline),
+ FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology),
+ FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology),
+ FEAT_OPA(HEADER_BRANCH_STACK, branch_stack),
+ FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings),
+ FEAT_OPP(HEADER_GROUP_DESC, group_desc),
+};
+
+struct header_print_data {
+ FILE *fp;
+ bool full; /* extended list of headers */
+};
+
+static int perf_file_section__fprintf_info(struct perf_file_section *section,
+ struct perf_header *ph,
+ int feat, int fd, void *data)
+{
+ struct header_print_data *hd = data;
+
+ if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
+ pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
+ "%d, continuing...\n", section->offset, feat);
+ return 0;
+ }
+ if (feat >= HEADER_LAST_FEATURE) {
+ pr_warning("unknown feature %d\n", feat);
+ return 0;
+ }
+ if (!feat_ops[feat].print)
+ return 0;
+
+ if (!feat_ops[feat].full_only || hd->full)
+ feat_ops[feat].print(ph, fd, hd->fp);
+ else
+ fprintf(hd->fp, "# %s info available, use -I to display\n",
+ feat_ops[feat].name);
+
+ return 0;
+}
+
+int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
+{
+ struct header_print_data hd;
+ struct perf_header *header = &session->header;
+ int fd = perf_data_file__fd(session->file);
+ hd.fp = fp;
+ hd.full = full;
+
+ perf_header__process_sections(header, fd, &hd,
+ perf_file_section__fprintf_info);
+ return 0;
+}
+
+static int do_write_feat(int fd, struct perf_header *h, int type,
+ struct perf_file_section **p,
+ struct perf_evlist *evlist)
+{
+ int err;
+ int ret = 0;
+
+ if (perf_header__has_feat(h, type)) {
+ if (!feat_ops[type].write)
+ return -1;
+
+ (*p)->offset = lseek(fd, 0, SEEK_CUR);
+
+ err = feat_ops[type].write(fd, h, evlist);
+ if (err < 0) {
+ pr_debug("failed to write feature %d\n", type);
+
+ /* undo anything written */
+ lseek(fd, (*p)->offset, SEEK_SET);
+
+ return -1;
+ }
+ (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
+ (*p)++;
+ }
+ return ret;
+}
+
+static int perf_header__adds_write(struct perf_header *header,
+ struct perf_evlist *evlist, int fd)
+{
+ int nr_sections;
+ struct perf_file_section *feat_sec, *p;
+ int sec_size;
+ u64 sec_start;
+ int feat;
+ int err;
+
+ nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
+ if (!nr_sections)
+ return 0;
+
+ feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
+ if (feat_sec == NULL)
+ return -ENOMEM;
+
+ sec_size = sizeof(*feat_sec) * nr_sections;
+
+ sec_start = header->feat_offset;
+ lseek(fd, sec_start + sec_size, SEEK_SET);
+
+ for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
+ if (do_write_feat(fd, header, feat, &p, evlist))
+ perf_header__clear_feat(header, feat);
+ }
+
+ lseek(fd, sec_start, SEEK_SET);
+ /*
+ * may write more than needed due to dropped feature, but
+ * this is okay, reader will skip the mising entries
+ */
+ err = do_write(fd, feat_sec, sec_size);
+ if (err < 0)
+ pr_debug("failed to write feature section\n");
+ free(feat_sec);
+ return err;
+}
+
+int perf_header__write_pipe(int fd)
+{
+ struct perf_pipe_file_header f_header;
+ int err;
+
+ f_header = (struct perf_pipe_file_header){
+ .magic = PERF_MAGIC,
+ .size = sizeof(f_header),
+ };
+
+ err = do_write(fd, &f_header, sizeof(f_header));
+ if (err < 0) {
+ pr_debug("failed to write perf pipe header\n");
+ return err;
+ }
+
+ return 0;
+}
+
+int perf_session__write_header(struct perf_session *session,
+ struct perf_evlist *evlist,
+ int fd, bool at_exit)
+{
+ struct perf_file_header f_header;
+ struct perf_file_attr f_attr;
+ struct perf_header *header = &session->header;
+ struct perf_evsel *evsel;
+ u64 attr_offset;
+ int err;
+
+ lseek(fd, sizeof(f_header), SEEK_SET);
+
+ evlist__for_each(session->evlist, evsel) {
+ evsel->id_offset = lseek(fd, 0, SEEK_CUR);
+ err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
+ if (err < 0) {
+ pr_debug("failed to write perf header\n");
+ return err;
+ }
+ }
+
+ attr_offset = lseek(fd, 0, SEEK_CUR);
+
+ evlist__for_each(evlist, evsel) {
+ f_attr = (struct perf_file_attr){
+ .attr = evsel->attr,
+ .ids = {
+ .offset = evsel->id_offset,
+ .size = evsel->ids * sizeof(u64),
+ }
+ };
+ err = do_write(fd, &f_attr, sizeof(f_attr));
+ if (err < 0) {
+ pr_debug("failed to write perf header attribute\n");
+ return err;
+ }
+ }
+
+ if (!header->data_offset)
+ header->data_offset = lseek(fd, 0, SEEK_CUR);
+ header->feat_offset = header->data_offset + header->data_size;
+
+ if (at_exit) {
+ err = perf_header__adds_write(header, evlist, fd);
+ if (err < 0)
+ return err;
+ }
+
+ f_header = (struct perf_file_header){
+ .magic = PERF_MAGIC,
+ .size = sizeof(f_header),
+ .attr_size = sizeof(f_attr),
+ .attrs = {
+ .offset = attr_offset,
+ .size = evlist->nr_entries * sizeof(f_attr),
+ },
+ .data = {
+ .offset = header->data_offset,
+ .size = header->data_size,
+ },
+ /* event_types is ignored, store zeros */
+ };
+
+ memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
+
+ lseek(fd, 0, SEEK_SET);
+ err = do_write(fd, &f_header, sizeof(f_header));
+ if (err < 0) {
+ pr_debug("failed to write perf header\n");
+ return err;
+ }
+ lseek(fd, header->data_offset + header->data_size, SEEK_SET);
+
+ return 0;
+}
+
+static int perf_header__getbuffer64(struct perf_header *header,
+ int fd, void *buf, size_t size)
+{
+ if (readn(fd, buf, size) <= 0)
+ return -1;
+
+ if (header->needs_swap)
+ mem_bswap_64(buf, size);
+
+ return 0;
+}
+
+int perf_header__process_sections(struct perf_header *header, int fd,
+ void *data,
+ int (*process)(struct perf_file_section *section,
+ struct perf_header *ph,
+ int feat, int fd, void *data))
+{
+ struct perf_file_section *feat_sec, *sec;
+ int nr_sections;
+ int sec_size;
+ int feat;
+ int err;
+
+ nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
+ if (!nr_sections)
+ return 0;
+
+ feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
+ if (!feat_sec)
+ return -1;
+
+ sec_size = sizeof(*feat_sec) * nr_sections;
+
+ lseek(fd, header->feat_offset, SEEK_SET);
+
+ err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
+ if (err < 0)
+ goto out_free;
+
+ for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
+ err = process(sec++, header, feat, fd, data);
+ if (err < 0)
+ goto out_free;
+ }
+ err = 0;
+out_free:
+ free(feat_sec);
+ return err;
+}
+
+static const int attr_file_abi_sizes[] = {
+ [0] = PERF_ATTR_SIZE_VER0,
+ [1] = PERF_ATTR_SIZE_VER1,
+ [2] = PERF_ATTR_SIZE_VER2,
+ [3] = PERF_ATTR_SIZE_VER3,
+ 0,
+};
+
+/*
+ * In the legacy file format, the magic number is not used to encode endianness.
+ * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
+ * on ABI revisions, we need to try all combinations for all endianness to
+ * detect the endianness.
+ */
+static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
+{
+ uint64_t ref_size, attr_size;
+ int i;
+
+ for (i = 0 ; attr_file_abi_sizes[i]; i++) {
+ ref_size = attr_file_abi_sizes[i]
+ + sizeof(struct perf_file_section);
+ if (hdr_sz != ref_size) {
+ attr_size = bswap_64(hdr_sz);
+ if (attr_size != ref_size)
+ continue;
+
+ ph->needs_swap = true;
+ }
+ pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
+ i,
+ ph->needs_swap);
+ return 0;
+ }
+ /* could not determine endianness */
+ return -1;
+}
+
+#define PERF_PIPE_HDR_VER0 16
+
+static const size_t attr_pipe_abi_sizes[] = {
+ [0] = PERF_PIPE_HDR_VER0,
+ 0,
+};
+
+/*
+ * In the legacy pipe format, there is an implicit assumption that endiannesss
+ * between host recording the samples, and host parsing the samples is the
+ * same. This is not always the case given that the pipe output may always be
+ * redirected into a file and analyzed on a different machine with possibly a
+ * different endianness and perf_event ABI revsions in the perf tool itself.
+ */
+static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
+{
+ u64 attr_size;
+ int i;
+
+ for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
+ if (hdr_sz != attr_pipe_abi_sizes[i]) {
+ attr_size = bswap_64(hdr_sz);
+ if (attr_size != hdr_sz)
+ continue;
+
+ ph->needs_swap = true;
+ }
+ pr_debug("Pipe ABI%d perf.data file detected\n", i);
+ return 0;
+ }
+ return -1;
+}
+
+bool is_perf_magic(u64 magic)
+{
+ if (!memcmp(&magic, __perf_magic1, sizeof(magic))
+ || magic == __perf_magic2
+ || magic == __perf_magic2_sw)
+ return true;
+
+ return false;
+}
+
+static int check_magic_endian(u64 magic, uint64_t hdr_sz,
+ bool is_pipe, struct perf_header *ph)
+{
+ int ret;
+
+ /* check for legacy format */
+ ret = memcmp(&magic, __perf_magic1, sizeof(magic));
+ if (ret == 0) {
+ ph->version = PERF_HEADER_VERSION_1;
+ pr_debug("legacy perf.data format\n");
+ if (is_pipe)
+ return try_all_pipe_abis(hdr_sz, ph);
+
+ return try_all_file_abis(hdr_sz, ph);
+ }
+ /*
+ * the new magic number serves two purposes:
+ * - unique number to identify actual perf.data files
+ * - encode endianness of file
+ */
+
+ /* check magic number with one endianness */
+ if (magic == __perf_magic2)
+ return 0;
+
+ /* check magic number with opposite endianness */
+ if (magic != __perf_magic2_sw)
+ return -1;
+
+ ph->needs_swap = true;
+ ph->version = PERF_HEADER_VERSION_2;
+
+ return 0;
+}
+
+int perf_file_header__read(struct perf_file_header *header,
+ struct perf_header *ph, int fd)
+{
+ ssize_t ret;
+
+ lseek(fd, 0, SEEK_SET);
+
+ ret = readn(fd, header, sizeof(*header));
+ if (ret <= 0)
+ return -1;
+
+ if (check_magic_endian(header->magic,
+ header->attr_size, false, ph) < 0) {
+ pr_debug("magic/endian check failed\n");
+ return -1;
+ }
+
+ if (ph->needs_swap) {
+ mem_bswap_64(header, offsetof(struct perf_file_header,
+ adds_features));
+ }
+
+ if (header->size != sizeof(*header)) {
+ /* Support the previous format */
+ if (header->size == offsetof(typeof(*header), adds_features))
+ bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
+ else
+ return -1;
+ } else if (ph->needs_swap) {
+ /*
+ * feature bitmap is declared as an array of unsigned longs --
+ * not good since its size can differ between the host that
+ * generated the data file and the host analyzing the file.
+ *
+ * We need to handle endianness, but we don't know the size of
+ * the unsigned long where the file was generated. Take a best
+ * guess at determining it: try 64-bit swap first (ie., file
+ * created on a 64-bit host), and check if the hostname feature
+ * bit is set (this feature bit is forced on as of fbe96f2).
+ * If the bit is not, undo the 64-bit swap and try a 32-bit
+ * swap. If the hostname bit is still not set (e.g., older data
+ * file), punt and fallback to the original behavior --
+ * clearing all feature bits and setting buildid.
+ */
+ mem_bswap_64(&header->adds_features,
+ BITS_TO_U64(HEADER_FEAT_BITS));
+
+ if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
+ /* unswap as u64 */
+ mem_bswap_64(&header->adds_features,
+ BITS_TO_U64(HEADER_FEAT_BITS));
+
+ /* unswap as u32 */
+ mem_bswap_32(&header->adds_features,
+ BITS_TO_U32(HEADER_FEAT_BITS));
+ }
+
+ if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
+ bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
+ set_bit(HEADER_BUILD_ID, header->adds_features);
+ }
+ }
+
+ memcpy(&ph->adds_features, &header->adds_features,
+ sizeof(ph->adds_features));
+
+ ph->data_offset = header->data.offset;
+ ph->data_size = header->data.size;
+ ph->feat_offset = header->data.offset + header->data.size;
+ return 0;
+}
+
+static int perf_file_section__process(struct perf_file_section *section,
+ struct perf_header *ph,
+ int feat, int fd, void *data)
+{
+ if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
+ pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
+ "%d, continuing...\n", section->offset, feat);
+ return 0;
+ }
+
+ if (feat >= HEADER_LAST_FEATURE) {
+ pr_debug("unknown feature %d, continuing...\n", feat);
+ return 0;
+ }
+
+ if (!feat_ops[feat].process)
+ return 0;
+
+ return feat_ops[feat].process(section, ph, fd, data);
+}
+
+static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
+ struct perf_header *ph, int fd,
+ bool repipe)
+{
+ ssize_t ret;
+
+ ret = readn(fd, header, sizeof(*header));
+ if (ret <= 0)
+ return -1;
+
+ if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
+ pr_debug("endian/magic failed\n");
+ return -1;
+ }
+
+ if (ph->needs_swap)
+ header->size = bswap_64(header->size);
+
+ if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int perf_header__read_pipe(struct perf_session *session)
+{
+ struct perf_header *header = &session->header;
+ struct perf_pipe_file_header f_header;
+
+ if (perf_file_header__read_pipe(&f_header, header,
+ perf_data_file__fd(session->file),
+ session->repipe) < 0) {
+ pr_debug("incompatible file format\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int read_attr(int fd, struct perf_header *ph,
+ struct perf_file_attr *f_attr)
+{
+ struct perf_event_attr *attr = &f_attr->attr;
+ size_t sz, left;
+ size_t our_sz = sizeof(f_attr->attr);
+ ssize_t ret;
+
+ memset(f_attr, 0, sizeof(*f_attr));
+
+ /* read minimal guaranteed structure */
+ ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
+ if (ret <= 0) {
+ pr_debug("cannot read %d bytes of header attr\n",
+ PERF_ATTR_SIZE_VER0);
+ return -1;
+ }
+
+ /* on file perf_event_attr size */
+ sz = attr->size;
+
+ if (ph->needs_swap)
+ sz = bswap_32(sz);
+
+ if (sz == 0) {
+ /* assume ABI0 */
+ sz = PERF_ATTR_SIZE_VER0;
+ } else if (sz > our_sz) {
+ pr_debug("file uses a more recent and unsupported ABI"
+ " (%zu bytes extra)\n", sz - our_sz);
+ return -1;
+ }
+ /* what we have not yet read and that we know about */
+ left = sz - PERF_ATTR_SIZE_VER0;
+ if (left) {
+ void *ptr = attr;
+ ptr += PERF_ATTR_SIZE_VER0;
+
+ ret = readn(fd, ptr, left);
+ }
+ /* read perf_file_section, ids are read in caller */
+ ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
+
+ return ret <= 0 ? -1 : 0;
+}
+
+static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
+ struct pevent *pevent)
+{
+ struct event_format *event;
+ char bf[128];
+
+ /* already prepared */
+ if (evsel->tp_format)
+ return 0;
+
+ if (pevent == NULL) {
+ pr_debug("broken or missing trace data\n");
+ return -1;
+ }
+
+ event = pevent_find_event(pevent, evsel->attr.config);
+ if (event == NULL)
+ return -1;
+
+ if (!evsel->name) {
+ snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
+ evsel->name = strdup(bf);
+ if (evsel->name == NULL)
+ return -1;
+ }
+
+ evsel->tp_format = event;
+ return 0;
+}
+
+static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
+ struct pevent *pevent)
+{
+ struct perf_evsel *pos;
+
+ evlist__for_each(evlist, pos) {
+ if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
+ perf_evsel__prepare_tracepoint_event(pos, pevent))
+ return -1;
+ }
+
+ return 0;
+}
+
+int perf_session__read_header(struct perf_session *session)
+{
+ struct perf_data_file *file = session->file;
+ struct perf_header *header = &session->header;
+ struct perf_file_header f_header;
+ struct perf_file_attr f_attr;
+ u64 f_id;
+ int nr_attrs, nr_ids, i, j;
+ int fd = perf_data_file__fd(file);
+
+ session->evlist = perf_evlist__new();
+ if (session->evlist == NULL)
+ return -ENOMEM;
+
+ if (perf_data_file__is_pipe(file))
+ return perf_header__read_pipe(session);
+
+ if (perf_file_header__read(&f_header, header, fd) < 0)
+ return -EINVAL;
+
+ /*
+ * Sanity check that perf.data was written cleanly; data size is
+ * initialized to 0 and updated only if the on_exit function is run.
+ * If data size is still 0 then the file contains only partial
+ * information. Just warn user and process it as much as it can.
+ */
+ if (f_header.data.size == 0) {
+ pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
+ "Was the 'perf record' command properly terminated?\n",
+ file->path);
+ }
+
+ nr_attrs = f_header.attrs.size / f_header.attr_size;
+ lseek(fd, f_header.attrs.offset, SEEK_SET);
+
+ for (i = 0; i < nr_attrs; i++) {
+ struct perf_evsel *evsel;
+ off_t tmp;
+
+ if (read_attr(fd, header, &f_attr) < 0)
+ goto out_errno;
+
+ if (header->needs_swap)
+ perf_event__attr_swap(&f_attr.attr);
+
+ tmp = lseek(fd, 0, SEEK_CUR);
+ evsel = perf_evsel__new(&f_attr.attr);
+
+ if (evsel == NULL)
+ goto out_delete_evlist;
+
+ evsel->needs_swap = header->needs_swap;
+ /*
+ * Do it before so that if perf_evsel__alloc_id fails, this
+ * entry gets purged too at perf_evlist__delete().
+ */
+ perf_evlist__add(session->evlist, evsel);
+
+ nr_ids = f_attr.ids.size / sizeof(u64);
+ /*
+ * We don't have the cpu and thread maps on the header, so
+ * for allocating the perf_sample_id table we fake 1 cpu and
+ * hattr->ids threads.
+ */
+ if (perf_evsel__alloc_id(evsel, 1, nr_ids))
+ goto out_delete_evlist;
+
+ lseek(fd, f_attr.ids.offset, SEEK_SET);
+
+ for (j = 0; j < nr_ids; j++) {
+ if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
+ goto out_errno;
+
+ perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
+ }
+
+ lseek(fd, tmp, SEEK_SET);
+ }
+
+ symbol_conf.nr_events = nr_attrs;
+
+ perf_header__process_sections(header, fd, &session->tevent,
+ perf_file_section__process);
+
+ if (perf_evlist__prepare_tracepoint_events(session->evlist,
+ session->tevent.pevent))
+ goto out_delete_evlist;
+
+ return 0;
+out_errno:
+ return -errno;
+
+out_delete_evlist:
+ perf_evlist__delete(session->evlist);
+ session->evlist = NULL;
+ return -ENOMEM;
+}
+
+int perf_event__synthesize_attr(struct perf_tool *tool,
+ struct perf_event_attr *attr, u32 ids, u64 *id,
+ perf_event__handler_t process)
+{
+ union perf_event *ev;
+ size_t size;
+ int err;
+
+ size = sizeof(struct perf_event_attr);
+ size = PERF_ALIGN(size, sizeof(u64));
+ size += sizeof(struct perf_event_header);
+ size += ids * sizeof(u64);
+
+ ev = malloc(size);
+
+ if (ev == NULL)
+ return -ENOMEM;
+
+ ev->attr.attr = *attr;
+ memcpy(ev->attr.id, id, ids * sizeof(u64));
+
+ ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
+ ev->attr.header.size = (u16)size;
+
+ if (ev->attr.header.size == size)
+ err = process(tool, ev, NULL, NULL);
+ else
+ err = -E2BIG;
+
+ free(ev);
+
+ return err;
+}
+
+int perf_event__synthesize_attrs(struct perf_tool *tool,
+ struct perf_session *session,
+ perf_event__handler_t process)
+{
+ struct perf_evsel *evsel;
+ int err = 0;
+
+ evlist__for_each(session->evlist, evsel) {
+ err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
+ evsel->id, process);
+ if (err) {
+ pr_debug("failed to create perf header attribute\n");
+ return err;
+ }
+ }
+
+ return err;
+}
+
+int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_evlist **pevlist)
+{
+ u32 i, ids, n_ids;
+ struct perf_evsel *evsel;
+ struct perf_evlist *evlist = *pevlist;
+
+ if (evlist == NULL) {
+ *pevlist = evlist = perf_evlist__new();
+ if (evlist == NULL)
+ return -ENOMEM;
+ }
+
+ evsel = perf_evsel__new(&event->attr.attr);
+ if (evsel == NULL)
+ return -ENOMEM;
+
+ perf_evlist__add(evlist, evsel);
+
+ ids = event->header.size;
+ ids -= (void *)&event->attr.id - (void *)event;
+ n_ids = ids / sizeof(u64);
+ /*
+ * We don't have the cpu and thread maps on the header, so
+ * for allocating the perf_sample_id table we fake 1 cpu and
+ * hattr->ids threads.
+ */
+ if (perf_evsel__alloc_id(evsel, 1, n_ids))
+ return -ENOMEM;
+
+ for (i = 0; i < n_ids; i++) {
+ perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
+ }
+
+ symbol_conf.nr_events = evlist->nr_entries;
+
+ return 0;
+}
+
+int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
+ struct perf_evlist *evlist,
+ perf_event__handler_t process)
+{
+ union perf_event ev;
+ struct tracing_data *tdata;
+ ssize_t size = 0, aligned_size = 0, padding;
+ int err __maybe_unused = 0;
+
+ /*
+ * We are going to store the size of the data followed
+ * by the data contents. Since the fd descriptor is a pipe,
+ * we cannot seek back to store the size of the data once
+ * we know it. Instead we:
+ *
+ * - write the tracing data to the temp file
+ * - get/write the data size to pipe
+ * - write the tracing data from the temp file
+ * to the pipe
+ */
+ tdata = tracing_data_get(&evlist->entries, fd, true);
+ if (!tdata)
+ return -1;
+
+ memset(&ev, 0, sizeof(ev));
+
+ ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
+ size = tdata->size;
+ aligned_size = PERF_ALIGN(size, sizeof(u64));
+ padding = aligned_size - size;
+ ev.tracing_data.header.size = sizeof(ev.tracing_data);
+ ev.tracing_data.size = aligned_size;
+
+ process(tool, &ev, NULL, NULL);
+
+ /*
+ * The put function will copy all the tracing data
+ * stored in temp file to the pipe.
+ */
+ tracing_data_put(tdata);
+
+ write_padded(fd, NULL, 0, padding);
+
+ return aligned_size;
+}
+
+int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_session *session)
+{
+ ssize_t size_read, padding, size = event->tracing_data.size;
+ int fd = perf_data_file__fd(session->file);
+ off_t offset = lseek(fd, 0, SEEK_CUR);
+ char buf[BUFSIZ];
+
+ /* setup for reading amidst mmap */
+ lseek(fd, offset + sizeof(struct tracing_data_event),
+ SEEK_SET);
+
+ size_read = trace_report(fd, &session->tevent,
+ session->repipe);
+ padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
+
+ if (readn(fd, buf, padding) < 0) {
+ pr_err("%s: reading input file", __func__);
+ return -1;
+ }
+ if (session->repipe) {
+ int retw = write(STDOUT_FILENO, buf, padding);
+ if (retw <= 0 || retw != padding) {
+ pr_err("%s: repiping tracing data padding", __func__);
+ return -1;
+ }
+ }
+
+ if (size_read + padding != size) {
+ pr_err("%s: tracing data size mismatch", __func__);
+ return -1;
+ }
+
+ perf_evlist__prepare_tracepoint_events(session->evlist,
+ session->tevent.pevent);
+
+ return size_read + padding;
+}
+
+int perf_event__synthesize_build_id(struct perf_tool *tool,
+ struct dso *pos, u16 misc,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ union perf_event ev;
+ size_t len;
+ int err = 0;
+
+ if (!pos->hit)
+ return err;
+
+ memset(&ev, 0, sizeof(ev));
+
+ len = pos->long_name_len + 1;
+ len = PERF_ALIGN(len, NAME_ALIGN);
+ memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
+ ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
+ ev.build_id.header.misc = misc;
+ ev.build_id.pid = machine->pid;
+ ev.build_id.header.size = sizeof(ev.build_id) + len;
+ memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
+
+ err = process(tool, &ev, NULL, machine);
+
+ return err;
+}
+
+int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_session *session)
+{
+ __event_process_build_id(&event->build_id,
+ event->build_id.filename,
+ session);
+ return 0;
+}
+
+void disable_buildid_cache(void)
+{
+ no_buildid_cache = true;
+}
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
new file mode 100644
index 00000000000..d08cfe49940
--- /dev/null
+++ b/tools/perf/util/header.h
@@ -0,0 +1,159 @@
+#ifndef __PERF_HEADER_H
+#define __PERF_HEADER_H
+
+#include <linux/perf_event.h>
+#include <sys/types.h>
+#include <stdbool.h>
+#include <linux/bitmap.h>
+#include <linux/types.h>
+#include "event.h"
+
+
+enum {
+ HEADER_RESERVED = 0, /* always cleared */
+ HEADER_FIRST_FEATURE = 1,
+ HEADER_TRACING_DATA = 1,
+ HEADER_BUILD_ID,
+
+ HEADER_HOSTNAME,
+ HEADER_OSRELEASE,
+ HEADER_VERSION,
+ HEADER_ARCH,
+ HEADER_NRCPUS,
+ HEADER_CPUDESC,
+ HEADER_CPUID,
+ HEADER_TOTAL_MEM,
+ HEADER_CMDLINE,
+ HEADER_EVENT_DESC,
+ HEADER_CPU_TOPOLOGY,
+ HEADER_NUMA_TOPOLOGY,
+ HEADER_BRANCH_STACK,
+ HEADER_PMU_MAPPINGS,
+ HEADER_GROUP_DESC,
+ HEADER_LAST_FEATURE,
+ HEADER_FEAT_BITS = 256,
+};
+
+enum perf_header_version {
+ PERF_HEADER_VERSION_1,
+ PERF_HEADER_VERSION_2,
+};
+
+struct perf_file_section {
+ u64 offset;
+ u64 size;
+};
+
+struct perf_file_header {
+ u64 magic;
+ u64 size;
+ u64 attr_size;
+ struct perf_file_section attrs;
+ struct perf_file_section data;
+ /* event_types is ignored */
+ struct perf_file_section event_types;
+ DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
+};
+
+struct perf_pipe_file_header {
+ u64 magic;
+ u64 size;
+};
+
+struct perf_header;
+
+int perf_file_header__read(struct perf_file_header *header,
+ struct perf_header *ph, int fd);
+
+struct perf_session_env {
+ char *hostname;
+ char *os_release;
+ char *version;
+ char *arch;
+ int nr_cpus_online;
+ int nr_cpus_avail;
+ char *cpu_desc;
+ char *cpuid;
+ unsigned long long total_mem;
+
+ int nr_cmdline;
+ int nr_sibling_cores;
+ int nr_sibling_threads;
+ int nr_numa_nodes;
+ int nr_pmu_mappings;
+ int nr_groups;
+ char *cmdline;
+ char *sibling_cores;
+ char *sibling_threads;
+ char *numa_nodes;
+ char *pmu_mappings;
+};
+
+struct perf_header {
+ enum perf_header_version version;
+ bool needs_swap;
+ u64 data_offset;
+ u64 data_size;
+ u64 feat_offset;
+ DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
+ struct perf_session_env env;
+};
+
+struct perf_evlist;
+struct perf_session;
+
+int perf_session__read_header(struct perf_session *session);
+int perf_session__write_header(struct perf_session *session,
+ struct perf_evlist *evlist,
+ int fd, bool at_exit);
+int perf_header__write_pipe(int fd);
+
+void perf_header__set_feat(struct perf_header *header, int feat);
+void perf_header__clear_feat(struct perf_header *header, int feat);
+bool perf_header__has_feat(const struct perf_header *header, int feat);
+
+int perf_header__set_cmdline(int argc, const char **argv);
+
+int perf_header__process_sections(struct perf_header *header, int fd,
+ void *data,
+ int (*process)(struct perf_file_section *section,
+ struct perf_header *ph,
+ int feat, int fd, void *data));
+
+int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full);
+
+int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
+ const char *name, bool is_kallsyms, bool is_vdso);
+int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
+
+int perf_event__synthesize_attr(struct perf_tool *tool,
+ struct perf_event_attr *attr, u32 ids, u64 *id,
+ perf_event__handler_t process);
+int perf_event__synthesize_attrs(struct perf_tool *tool,
+ struct perf_session *session,
+ perf_event__handler_t process);
+int perf_event__process_attr(struct perf_tool *tool, union perf_event *event,
+ struct perf_evlist **pevlist);
+
+int perf_event__synthesize_tracing_data(struct perf_tool *tool,
+ int fd, struct perf_evlist *evlist,
+ perf_event__handler_t process);
+int perf_event__process_tracing_data(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_session *session);
+
+int perf_event__synthesize_build_id(struct perf_tool *tool,
+ struct dso *pos, u16 misc,
+ perf_event__handler_t process,
+ struct machine *machine);
+int perf_event__process_build_id(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_session *session);
+bool is_perf_magic(u64 magic);
+
+/*
+ * arch specific callback
+ */
+int get_cpuid(char *buffer, size_t sz);
+
+#endif /* __PERF_HEADER_H */
diff --git a/tools/perf/util/help.c b/tools/perf/util/help.c
new file mode 100644
index 00000000000..86c37c47226
--- /dev/null
+++ b/tools/perf/util/help.c
@@ -0,0 +1,339 @@
+#include "cache.h"
+#include "../builtin.h"
+#include "exec_cmd.h"
+#include "levenshtein.h"
+#include "help.h"
+#include <termios.h>
+
+void add_cmdname(struct cmdnames *cmds, const char *name, size_t len)
+{
+ struct cmdname *ent = malloc(sizeof(*ent) + len + 1);
+
+ ent->len = len;
+ memcpy(ent->name, name, len);
+ ent->name[len] = 0;
+
+ ALLOC_GROW(cmds->names, cmds->cnt + 1, cmds->alloc);
+ cmds->names[cmds->cnt++] = ent;
+}
+
+static void clean_cmdnames(struct cmdnames *cmds)
+{
+ unsigned int i;
+
+ for (i = 0; i < cmds->cnt; ++i)
+ zfree(&cmds->names[i]);
+ zfree(&cmds->names);
+ cmds->cnt = 0;
+ cmds->alloc = 0;
+}
+
+static int cmdname_compare(const void *a_, const void *b_)
+{
+ struct cmdname *a = *(struct cmdname **)a_;
+ struct cmdname *b = *(struct cmdname **)b_;
+ return strcmp(a->name, b->name);
+}
+
+static void uniq(struct cmdnames *cmds)
+{
+ unsigned int i, j;
+
+ if (!cmds->cnt)
+ return;
+
+ for (i = j = 1; i < cmds->cnt; i++)
+ if (strcmp(cmds->names[i]->name, cmds->names[i-1]->name))
+ cmds->names[j++] = cmds->names[i];
+
+ cmds->cnt = j;
+}
+
+void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
+{
+ size_t ci, cj, ei;
+ int cmp;
+
+ ci = cj = ei = 0;
+ while (ci < cmds->cnt && ei < excludes->cnt) {
+ cmp = strcmp(cmds->names[ci]->name, excludes->names[ei]->name);
+ if (cmp < 0)
+ cmds->names[cj++] = cmds->names[ci++];
+ else if (cmp == 0)
+ ci++, ei++;
+ else if (cmp > 0)
+ ei++;
+ }
+
+ while (ci < cmds->cnt)
+ cmds->names[cj++] = cmds->names[ci++];
+
+ cmds->cnt = cj;
+}
+
+static void pretty_print_string_list(struct cmdnames *cmds, int longest)
+{
+ int cols = 1, rows;
+ int space = longest + 1; /* min 1 SP between words */
+ struct winsize win;
+ int max_cols;
+ int i, j;
+
+ get_term_dimensions(&win);
+ max_cols = win.ws_col - 1; /* don't print *on* the edge */
+
+ if (space < max_cols)
+ cols = max_cols / space;
+ rows = (cmds->cnt + cols - 1) / cols;
+
+ for (i = 0; i < rows; i++) {
+ printf(" ");
+
+ for (j = 0; j < cols; j++) {
+ unsigned int n = j * rows + i;
+ unsigned int size = space;
+
+ if (n >= cmds->cnt)
+ break;
+ if (j == cols-1 || n + rows >= cmds->cnt)
+ size = 1;
+ printf("%-*s", size, cmds->names[n]->name);
+ }
+ putchar('\n');
+ }
+}
+
+static int is_executable(const char *name)
+{
+ struct stat st;
+
+ if (stat(name, &st) || /* stat, not lstat */
+ !S_ISREG(st.st_mode))
+ return 0;
+
+ return st.st_mode & S_IXUSR;
+}
+
+static void list_commands_in_dir(struct cmdnames *cmds,
+ const char *path,
+ const char *prefix)
+{
+ int prefix_len;
+ DIR *dir = opendir(path);
+ struct dirent *de;
+ struct strbuf buf = STRBUF_INIT;
+ int len;
+
+ if (!dir)
+ return;
+ if (!prefix)
+ prefix = "perf-";
+ prefix_len = strlen(prefix);
+
+ strbuf_addf(&buf, "%s/", path);
+ len = buf.len;
+
+ while ((de = readdir(dir)) != NULL) {
+ int entlen;
+
+ if (prefixcmp(de->d_name, prefix))
+ continue;
+
+ strbuf_setlen(&buf, len);
+ strbuf_addstr(&buf, de->d_name);
+ if (!is_executable(buf.buf))
+ continue;
+
+ entlen = strlen(de->d_name) - prefix_len;
+ if (has_extension(de->d_name, ".exe"))
+ entlen -= 4;
+
+ add_cmdname(cmds, de->d_name + prefix_len, entlen);
+ }
+ closedir(dir);
+ strbuf_release(&buf);
+}
+
+void load_command_list(const char *prefix,
+ struct cmdnames *main_cmds,
+ struct cmdnames *other_cmds)
+{
+ const char *env_path = getenv("PATH");
+ const char *exec_path = perf_exec_path();
+
+ if (exec_path) {
+ list_commands_in_dir(main_cmds, exec_path, prefix);
+ qsort(main_cmds->names, main_cmds->cnt,
+ sizeof(*main_cmds->names), cmdname_compare);
+ uniq(main_cmds);
+ }
+
+ if (env_path) {
+ char *paths, *path, *colon;
+ path = paths = strdup(env_path);
+ while (1) {
+ if ((colon = strchr(path, PATH_SEP)))
+ *colon = 0;
+ if (!exec_path || strcmp(path, exec_path))
+ list_commands_in_dir(other_cmds, path, prefix);
+
+ if (!colon)
+ break;
+ path = colon + 1;
+ }
+ free(paths);
+
+ qsort(other_cmds->names, other_cmds->cnt,
+ sizeof(*other_cmds->names), cmdname_compare);
+ uniq(other_cmds);
+ }
+ exclude_cmds(other_cmds, main_cmds);
+}
+
+void list_commands(const char *title, struct cmdnames *main_cmds,
+ struct cmdnames *other_cmds)
+{
+ unsigned int i, longest = 0;
+
+ for (i = 0; i < main_cmds->cnt; i++)
+ if (longest < main_cmds->names[i]->len)
+ longest = main_cmds->names[i]->len;
+ for (i = 0; i < other_cmds->cnt; i++)
+ if (longest < other_cmds->names[i]->len)
+ longest = other_cmds->names[i]->len;
+
+ if (main_cmds->cnt) {
+ const char *exec_path = perf_exec_path();
+ printf("available %s in '%s'\n", title, exec_path);
+ printf("----------------");
+ mput_char('-', strlen(title) + strlen(exec_path));
+ putchar('\n');
+ pretty_print_string_list(main_cmds, longest);
+ putchar('\n');
+ }
+
+ if (other_cmds->cnt) {
+ printf("%s available from elsewhere on your $PATH\n", title);
+ printf("---------------------------------------");
+ mput_char('-', strlen(title));
+ putchar('\n');
+ pretty_print_string_list(other_cmds, longest);
+ putchar('\n');
+ }
+}
+
+int is_in_cmdlist(struct cmdnames *c, const char *s)
+{
+ unsigned int i;
+
+ for (i = 0; i < c->cnt; i++)
+ if (!strcmp(s, c->names[i]->name))
+ return 1;
+ return 0;
+}
+
+static int autocorrect;
+static struct cmdnames aliases;
+
+static int perf_unknown_cmd_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, "help.autocorrect"))
+ autocorrect = perf_config_int(var,value);
+ /* Also use aliases for command lookup */
+ if (!prefixcmp(var, "alias."))
+ add_cmdname(&aliases, var + 6, strlen(var + 6));
+
+ return perf_default_config(var, value, cb);
+}
+
+static int levenshtein_compare(const void *p1, const void *p2)
+{
+ const struct cmdname *const *c1 = p1, *const *c2 = p2;
+ const char *s1 = (*c1)->name, *s2 = (*c2)->name;
+ int l1 = (*c1)->len;
+ int l2 = (*c2)->len;
+ return l1 != l2 ? l1 - l2 : strcmp(s1, s2);
+}
+
+static void add_cmd_list(struct cmdnames *cmds, struct cmdnames *old)
+{
+ unsigned int i;
+
+ ALLOC_GROW(cmds->names, cmds->cnt + old->cnt, cmds->alloc);
+
+ for (i = 0; i < old->cnt; i++)
+ cmds->names[cmds->cnt++] = old->names[i];
+ zfree(&old->names);
+ old->cnt = 0;
+}
+
+const char *help_unknown_cmd(const char *cmd)
+{
+ unsigned int i, n = 0, best_similarity = 0;
+ struct cmdnames main_cmds, other_cmds;
+
+ memset(&main_cmds, 0, sizeof(main_cmds));
+ memset(&other_cmds, 0, sizeof(main_cmds));
+ memset(&aliases, 0, sizeof(aliases));
+
+ perf_config(perf_unknown_cmd_config, NULL);
+
+ load_command_list("perf-", &main_cmds, &other_cmds);
+
+ add_cmd_list(&main_cmds, &aliases);
+ add_cmd_list(&main_cmds, &other_cmds);
+ qsort(main_cmds.names, main_cmds.cnt,
+ sizeof(main_cmds.names), cmdname_compare);
+ uniq(&main_cmds);
+
+ if (main_cmds.cnt) {
+ /* This reuses cmdname->len for similarity index */
+ for (i = 0; i < main_cmds.cnt; ++i)
+ main_cmds.names[i]->len =
+ levenshtein(cmd, main_cmds.names[i]->name, 0, 2, 1, 4);
+
+ qsort(main_cmds.names, main_cmds.cnt,
+ sizeof(*main_cmds.names), levenshtein_compare);
+
+ best_similarity = main_cmds.names[0]->len;
+ n = 1;
+ while (n < main_cmds.cnt && best_similarity == main_cmds.names[n]->len)
+ ++n;
+ }
+
+ if (autocorrect && n == 1) {
+ const char *assumed = main_cmds.names[0]->name;
+
+ main_cmds.names[0] = NULL;
+ clean_cmdnames(&main_cmds);
+ fprintf(stderr, "WARNING: You called a perf program named '%s', "
+ "which does not exist.\n"
+ "Continuing under the assumption that you meant '%s'\n",
+ cmd, assumed);
+ if (autocorrect > 0) {
+ fprintf(stderr, "in %0.1f seconds automatically...\n",
+ (float)autocorrect/10.0);
+ poll(NULL, 0, autocorrect * 100);
+ }
+ return assumed;
+ }
+
+ fprintf(stderr, "perf: '%s' is not a perf-command. See 'perf --help'.\n", cmd);
+
+ if (main_cmds.cnt && best_similarity < 6) {
+ fprintf(stderr, "\nDid you mean %s?\n",
+ n < 2 ? "this": "one of these");
+
+ for (i = 0; i < n; i++)
+ fprintf(stderr, "\t%s\n", main_cmds.names[i]->name);
+ }
+
+ exit(1);
+}
+
+int cmd_version(int argc __maybe_unused, const char **argv __maybe_unused,
+ const char *prefix __maybe_unused)
+{
+ printf("perf version %s\n", perf_version_string);
+ return 0;
+}
diff --git a/tools/perf/util/help.h b/tools/perf/util/help.h
new file mode 100644
index 00000000000..7f5c6dedd71
--- /dev/null
+++ b/tools/perf/util/help.h
@@ -0,0 +1,29 @@
+#ifndef __PERF_HELP_H
+#define __PERF_HELP_H
+
+struct cmdnames {
+ size_t alloc;
+ size_t cnt;
+ struct cmdname {
+ size_t len; /* also used for similarity index in help.c */
+ char name[FLEX_ARRAY];
+ } **names;
+};
+
+static inline void mput_char(char c, unsigned int num)
+{
+ while(num--)
+ putchar(c);
+}
+
+void load_command_list(const char *prefix,
+ struct cmdnames *main_cmds,
+ struct cmdnames *other_cmds);
+void add_cmdname(struct cmdnames *cmds, const char *name, size_t len);
+/* Here we require that excludes is a sorted list. */
+void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes);
+int is_in_cmdlist(struct cmdnames *c, const char *s);
+void list_commands(const char *title, struct cmdnames *main_cmds,
+ struct cmdnames *other_cmds);
+
+#endif /* __PERF_HELP_H */
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
new file mode 100644
index 00000000000..30df6187ee0
--- /dev/null
+++ b/tools/perf/util/hist.c
@@ -0,0 +1,1414 @@
+#include "util.h"
+#include "build-id.h"
+#include "hist.h"
+#include "session.h"
+#include "sort.h"
+#include "evsel.h"
+#include "annotate.h"
+#include <math.h>
+
+static bool hists__filter_entry_by_dso(struct hists *hists,
+ struct hist_entry *he);
+static bool hists__filter_entry_by_thread(struct hists *hists,
+ struct hist_entry *he);
+static bool hists__filter_entry_by_symbol(struct hists *hists,
+ struct hist_entry *he);
+
+struct callchain_param callchain_param = {
+ .mode = CHAIN_GRAPH_REL,
+ .min_percent = 0.5,
+ .order = ORDER_CALLEE,
+ .key = CCKEY_FUNCTION
+};
+
+u16 hists__col_len(struct hists *hists, enum hist_column col)
+{
+ return hists->col_len[col];
+}
+
+void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
+{
+ hists->col_len[col] = len;
+}
+
+bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
+{
+ if (len > hists__col_len(hists, col)) {
+ hists__set_col_len(hists, col, len);
+ return true;
+ }
+ return false;
+}
+
+void hists__reset_col_len(struct hists *hists)
+{
+ enum hist_column col;
+
+ for (col = 0; col < HISTC_NR_COLS; ++col)
+ hists__set_col_len(hists, col, 0);
+}
+
+static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
+{
+ const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
+
+ if (hists__col_len(hists, dso) < unresolved_col_width &&
+ !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
+ !symbol_conf.dso_list)
+ hists__set_col_len(hists, dso, unresolved_col_width);
+}
+
+void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
+{
+ const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
+ int symlen;
+ u16 len;
+
+ /*
+ * +4 accounts for '[x] ' priv level info
+ * +2 accounts for 0x prefix on raw addresses
+ * +3 accounts for ' y ' symtab origin info
+ */
+ if (h->ms.sym) {
+ symlen = h->ms.sym->namelen + 4;
+ if (verbose)
+ symlen += BITS_PER_LONG / 4 + 2 + 3;
+ hists__new_col_len(hists, HISTC_SYMBOL, symlen);
+ } else {
+ symlen = unresolved_col_width + 4 + 2;
+ hists__new_col_len(hists, HISTC_SYMBOL, symlen);
+ hists__set_unres_dso_col_len(hists, HISTC_DSO);
+ }
+
+ len = thread__comm_len(h->thread);
+ if (hists__new_col_len(hists, HISTC_COMM, len))
+ hists__set_col_len(hists, HISTC_THREAD, len + 6);
+
+ if (h->ms.map) {
+ len = dso__name_len(h->ms.map->dso);
+ hists__new_col_len(hists, HISTC_DSO, len);
+ }
+
+ if (h->parent)
+ hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
+
+ if (h->branch_info) {
+ if (h->branch_info->from.sym) {
+ symlen = (int)h->branch_info->from.sym->namelen + 4;
+ if (verbose)
+ symlen += BITS_PER_LONG / 4 + 2 + 3;
+ hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
+
+ symlen = dso__name_len(h->branch_info->from.map->dso);
+ hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
+ } else {
+ symlen = unresolved_col_width + 4 + 2;
+ hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
+ hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
+ }
+
+ if (h->branch_info->to.sym) {
+ symlen = (int)h->branch_info->to.sym->namelen + 4;
+ if (verbose)
+ symlen += BITS_PER_LONG / 4 + 2 + 3;
+ hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
+
+ symlen = dso__name_len(h->branch_info->to.map->dso);
+ hists__new_col_len(hists, HISTC_DSO_TO, symlen);
+ } else {
+ symlen = unresolved_col_width + 4 + 2;
+ hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
+ hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
+ }
+ }
+
+ if (h->mem_info) {
+ if (h->mem_info->daddr.sym) {
+ symlen = (int)h->mem_info->daddr.sym->namelen + 4
+ + unresolved_col_width + 2;
+ hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
+ symlen);
+ hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
+ symlen + 1);
+ } else {
+ symlen = unresolved_col_width + 4 + 2;
+ hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
+ symlen);
+ }
+ if (h->mem_info->daddr.map) {
+ symlen = dso__name_len(h->mem_info->daddr.map->dso);
+ hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
+ symlen);
+ } else {
+ symlen = unresolved_col_width + 4 + 2;
+ hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
+ }
+ } else {
+ symlen = unresolved_col_width + 4 + 2;
+ hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
+ hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
+ }
+
+ hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
+ hists__new_col_len(hists, HISTC_MEM_TLB, 22);
+ hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
+ hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
+ hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
+ hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
+
+ if (h->transaction)
+ hists__new_col_len(hists, HISTC_TRANSACTION,
+ hist_entry__transaction_len());
+}
+
+void hists__output_recalc_col_len(struct hists *hists, int max_rows)
+{
+ struct rb_node *next = rb_first(&hists->entries);
+ struct hist_entry *n;
+ int row = 0;
+
+ hists__reset_col_len(hists);
+
+ while (next && row++ < max_rows) {
+ n = rb_entry(next, struct hist_entry, rb_node);
+ if (!n->filtered)
+ hists__calc_col_len(hists, n);
+ next = rb_next(&n->rb_node);
+ }
+}
+
+static void he_stat__add_cpumode_period(struct he_stat *he_stat,
+ unsigned int cpumode, u64 period)
+{
+ switch (cpumode) {
+ case PERF_RECORD_MISC_KERNEL:
+ he_stat->period_sys += period;
+ break;
+ case PERF_RECORD_MISC_USER:
+ he_stat->period_us += period;
+ break;
+ case PERF_RECORD_MISC_GUEST_KERNEL:
+ he_stat->period_guest_sys += period;
+ break;
+ case PERF_RECORD_MISC_GUEST_USER:
+ he_stat->period_guest_us += period;
+ break;
+ default:
+ break;
+ }
+}
+
+static void he_stat__add_period(struct he_stat *he_stat, u64 period,
+ u64 weight)
+{
+
+ he_stat->period += period;
+ he_stat->weight += weight;
+ he_stat->nr_events += 1;
+}
+
+static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
+{
+ dest->period += src->period;
+ dest->period_sys += src->period_sys;
+ dest->period_us += src->period_us;
+ dest->period_guest_sys += src->period_guest_sys;
+ dest->period_guest_us += src->period_guest_us;
+ dest->nr_events += src->nr_events;
+ dest->weight += src->weight;
+}
+
+static void he_stat__decay(struct he_stat *he_stat)
+{
+ he_stat->period = (he_stat->period * 7) / 8;
+ he_stat->nr_events = (he_stat->nr_events * 7) / 8;
+ /* XXX need decay for weight too? */
+}
+
+static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
+{
+ u64 prev_period = he->stat.period;
+ u64 diff;
+
+ if (prev_period == 0)
+ return true;
+
+ he_stat__decay(&he->stat);
+ if (symbol_conf.cumulate_callchain)
+ he_stat__decay(he->stat_acc);
+
+ diff = prev_period - he->stat.period;
+
+ hists->stats.total_period -= diff;
+ if (!he->filtered)
+ hists->stats.total_non_filtered_period -= diff;
+
+ return he->stat.period == 0;
+}
+
+void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
+{
+ struct rb_node *next = rb_first(&hists->entries);
+ struct hist_entry *n;
+
+ while (next) {
+ n = rb_entry(next, struct hist_entry, rb_node);
+ next = rb_next(&n->rb_node);
+ /*
+ * We may be annotating this, for instance, so keep it here in
+ * case some it gets new samples, we'll eventually free it when
+ * the user stops browsing and it agains gets fully decayed.
+ */
+ if (((zap_user && n->level == '.') ||
+ (zap_kernel && n->level != '.') ||
+ hists__decay_entry(hists, n)) &&
+ !n->used) {
+ rb_erase(&n->rb_node, &hists->entries);
+
+ if (sort__need_collapse)
+ rb_erase(&n->rb_node_in, &hists->entries_collapsed);
+
+ --hists->nr_entries;
+ if (!n->filtered)
+ --hists->nr_non_filtered_entries;
+
+ hist_entry__free(n);
+ }
+ }
+}
+
+/*
+ * histogram, sorted on item, collects periods
+ */
+
+static struct hist_entry *hist_entry__new(struct hist_entry *template,
+ bool sample_self)
+{
+ size_t callchain_size = 0;
+ struct hist_entry *he;
+
+ if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain)
+ callchain_size = sizeof(struct callchain_root);
+
+ he = zalloc(sizeof(*he) + callchain_size);
+
+ if (he != NULL) {
+ *he = *template;
+
+ if (symbol_conf.cumulate_callchain) {
+ he->stat_acc = malloc(sizeof(he->stat));
+ if (he->stat_acc == NULL) {
+ free(he);
+ return NULL;
+ }
+ memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
+ if (!sample_self)
+ memset(&he->stat, 0, sizeof(he->stat));
+ }
+
+ if (he->ms.map)
+ he->ms.map->referenced = true;
+
+ if (he->branch_info) {
+ /*
+ * This branch info is (a part of) allocated from
+ * sample__resolve_bstack() and will be freed after
+ * adding new entries. So we need to save a copy.
+ */
+ he->branch_info = malloc(sizeof(*he->branch_info));
+ if (he->branch_info == NULL) {
+ free(he->stat_acc);
+ free(he);
+ return NULL;
+ }
+
+ memcpy(he->branch_info, template->branch_info,
+ sizeof(*he->branch_info));
+
+ if (he->branch_info->from.map)
+ he->branch_info->from.map->referenced = true;
+ if (he->branch_info->to.map)
+ he->branch_info->to.map->referenced = true;
+ }
+
+ if (he->mem_info) {
+ if (he->mem_info->iaddr.map)
+ he->mem_info->iaddr.map->referenced = true;
+ if (he->mem_info->daddr.map)
+ he->mem_info->daddr.map->referenced = true;
+ }
+
+ if (symbol_conf.use_callchain)
+ callchain_init(he->callchain);
+
+ INIT_LIST_HEAD(&he->pairs.node);
+ }
+
+ return he;
+}
+
+static u8 symbol__parent_filter(const struct symbol *parent)
+{
+ if (symbol_conf.exclude_other && parent == NULL)
+ return 1 << HIST_FILTER__PARENT;
+ return 0;
+}
+
+static struct hist_entry *add_hist_entry(struct hists *hists,
+ struct hist_entry *entry,
+ struct addr_location *al,
+ bool sample_self)
+{
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+ struct hist_entry *he;
+ int64_t cmp;
+ u64 period = entry->stat.period;
+ u64 weight = entry->stat.weight;
+
+ p = &hists->entries_in->rb_node;
+
+ while (*p != NULL) {
+ parent = *p;
+ he = rb_entry(parent, struct hist_entry, rb_node_in);
+
+ /*
+ * Make sure that it receives arguments in a same order as
+ * hist_entry__collapse() so that we can use an appropriate
+ * function when searching an entry regardless which sort
+ * keys were used.
+ */
+ cmp = hist_entry__cmp(he, entry);
+
+ if (!cmp) {
+ if (sample_self)
+ he_stat__add_period(&he->stat, period, weight);
+ if (symbol_conf.cumulate_callchain)
+ he_stat__add_period(he->stat_acc, period, weight);
+
+ /*
+ * This mem info was allocated from sample__resolve_mem
+ * and will not be used anymore.
+ */
+ zfree(&entry->mem_info);
+
+ /* If the map of an existing hist_entry has
+ * become out-of-date due to an exec() or
+ * similar, update it. Otherwise we will
+ * mis-adjust symbol addresses when computing
+ * the history counter to increment.
+ */
+ if (he->ms.map != entry->ms.map) {
+ he->ms.map = entry->ms.map;
+ if (he->ms.map)
+ he->ms.map->referenced = true;
+ }
+ goto out;
+ }
+
+ if (cmp < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ he = hist_entry__new(entry, sample_self);
+ if (!he)
+ return NULL;
+
+ rb_link_node(&he->rb_node_in, parent, p);
+ rb_insert_color(&he->rb_node_in, hists->entries_in);
+out:
+ if (sample_self)
+ he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
+ if (symbol_conf.cumulate_callchain)
+ he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
+ return he;
+}
+
+struct hist_entry *__hists__add_entry(struct hists *hists,
+ struct addr_location *al,
+ struct symbol *sym_parent,
+ struct branch_info *bi,
+ struct mem_info *mi,
+ u64 period, u64 weight, u64 transaction,
+ bool sample_self)
+{
+ struct hist_entry entry = {
+ .thread = al->thread,
+ .comm = thread__comm(al->thread),
+ .ms = {
+ .map = al->map,
+ .sym = al->sym,
+ },
+ .cpu = al->cpu,
+ .cpumode = al->cpumode,
+ .ip = al->addr,
+ .level = al->level,
+ .stat = {
+ .nr_events = 1,
+ .period = period,
+ .weight = weight,
+ },
+ .parent = sym_parent,
+ .filtered = symbol__parent_filter(sym_parent) | al->filtered,
+ .hists = hists,
+ .branch_info = bi,
+ .mem_info = mi,
+ .transaction = transaction,
+ };
+
+ return add_hist_entry(hists, &entry, al, sample_self);
+}
+
+static int
+iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
+ struct addr_location *al __maybe_unused)
+{
+ return 0;
+}
+
+static int
+iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
+ struct addr_location *al __maybe_unused)
+{
+ return 0;
+}
+
+static int
+iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
+{
+ struct perf_sample *sample = iter->sample;
+ struct mem_info *mi;
+
+ mi = sample__resolve_mem(sample, al);
+ if (mi == NULL)
+ return -ENOMEM;
+
+ iter->priv = mi;
+ return 0;
+}
+
+static int
+iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
+{
+ u64 cost;
+ struct mem_info *mi = iter->priv;
+ struct hist_entry *he;
+
+ if (mi == NULL)
+ return -EINVAL;
+
+ cost = iter->sample->weight;
+ if (!cost)
+ cost = 1;
+
+ /*
+ * must pass period=weight in order to get the correct
+ * sorting from hists__collapse_resort() which is solely
+ * based on periods. We want sorting be done on nr_events * weight
+ * and this is indirectly achieved by passing period=weight here
+ * and the he_stat__add_period() function.
+ */
+ he = __hists__add_entry(&iter->evsel->hists, al, iter->parent, NULL, mi,
+ cost, cost, 0, true);
+ if (!he)
+ return -ENOMEM;
+
+ iter->he = he;
+ return 0;
+}
+
+static int
+iter_finish_mem_entry(struct hist_entry_iter *iter,
+ struct addr_location *al __maybe_unused)
+{
+ struct perf_evsel *evsel = iter->evsel;
+ struct hist_entry *he = iter->he;
+ int err = -EINVAL;
+
+ if (he == NULL)
+ goto out;
+
+ hists__inc_nr_samples(&evsel->hists, he->filtered);
+
+ err = hist_entry__append_callchain(he, iter->sample);
+
+out:
+ /*
+ * We don't need to free iter->priv (mem_info) here since
+ * the mem info was either already freed in add_hist_entry() or
+ * passed to a new hist entry by hist_entry__new().
+ */
+ iter->priv = NULL;
+
+ iter->he = NULL;
+ return err;
+}
+
+static int
+iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
+{
+ struct branch_info *bi;
+ struct perf_sample *sample = iter->sample;
+
+ bi = sample__resolve_bstack(sample, al);
+ if (!bi)
+ return -ENOMEM;
+
+ iter->curr = 0;
+ iter->total = sample->branch_stack->nr;
+
+ iter->priv = bi;
+ return 0;
+}
+
+static int
+iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
+ struct addr_location *al __maybe_unused)
+{
+ /* to avoid calling callback function */
+ iter->he = NULL;
+
+ return 0;
+}
+
+static int
+iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
+{
+ struct branch_info *bi = iter->priv;
+ int i = iter->curr;
+
+ if (bi == NULL)
+ return 0;
+
+ if (iter->curr >= iter->total)
+ return 0;
+
+ al->map = bi[i].to.map;
+ al->sym = bi[i].to.sym;
+ al->addr = bi[i].to.addr;
+ return 1;
+}
+
+static int
+iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
+{
+ struct branch_info *bi;
+ struct perf_evsel *evsel = iter->evsel;
+ struct hist_entry *he = NULL;
+ int i = iter->curr;
+ int err = 0;
+
+ bi = iter->priv;
+
+ if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
+ goto out;
+
+ /*
+ * The report shows the percentage of total branches captured
+ * and not events sampled. Thus we use a pseudo period of 1.
+ */
+ he = __hists__add_entry(&evsel->hists, al, iter->parent, &bi[i], NULL,
+ 1, 1, 0, true);
+ if (he == NULL)
+ return -ENOMEM;
+
+ hists__inc_nr_samples(&evsel->hists, he->filtered);
+
+out:
+ iter->he = he;
+ iter->curr++;
+ return err;
+}
+
+static int
+iter_finish_branch_entry(struct hist_entry_iter *iter,
+ struct addr_location *al __maybe_unused)
+{
+ zfree(&iter->priv);
+ iter->he = NULL;
+
+ return iter->curr >= iter->total ? 0 : -1;
+}
+
+static int
+iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
+ struct addr_location *al __maybe_unused)
+{
+ return 0;
+}
+
+static int
+iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
+{
+ struct perf_evsel *evsel = iter->evsel;
+ struct perf_sample *sample = iter->sample;
+ struct hist_entry *he;
+
+ he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL,
+ sample->period, sample->weight,
+ sample->transaction, true);
+ if (he == NULL)
+ return -ENOMEM;
+
+ iter->he = he;
+ return 0;
+}
+
+static int
+iter_finish_normal_entry(struct hist_entry_iter *iter,
+ struct addr_location *al __maybe_unused)
+{
+ struct hist_entry *he = iter->he;
+ struct perf_evsel *evsel = iter->evsel;
+ struct perf_sample *sample = iter->sample;
+
+ if (he == NULL)
+ return 0;
+
+ iter->he = NULL;
+
+ hists__inc_nr_samples(&evsel->hists, he->filtered);
+
+ return hist_entry__append_callchain(he, sample);
+}
+
+static int
+iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
+ struct addr_location *al __maybe_unused)
+{
+ struct hist_entry **he_cache;
+
+ callchain_cursor_commit(&callchain_cursor);
+
+ /*
+ * This is for detecting cycles or recursions so that they're
+ * cumulated only one time to prevent entries more than 100%
+ * overhead.
+ */
+ he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1));
+ if (he_cache == NULL)
+ return -ENOMEM;
+
+ iter->priv = he_cache;
+ iter->curr = 0;
+
+ return 0;
+}
+
+static int
+iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
+ struct addr_location *al)
+{
+ struct perf_evsel *evsel = iter->evsel;
+ struct perf_sample *sample = iter->sample;
+ struct hist_entry **he_cache = iter->priv;
+ struct hist_entry *he;
+ int err = 0;
+
+ he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL,
+ sample->period, sample->weight,
+ sample->transaction, true);
+ if (he == NULL)
+ return -ENOMEM;
+
+ iter->he = he;
+ he_cache[iter->curr++] = he;
+
+ callchain_append(he->callchain, &callchain_cursor, sample->period);
+
+ /*
+ * We need to re-initialize the cursor since callchain_append()
+ * advanced the cursor to the end.
+ */
+ callchain_cursor_commit(&callchain_cursor);
+
+ hists__inc_nr_samples(&evsel->hists, he->filtered);
+
+ return err;
+}
+
+static int
+iter_next_cumulative_entry(struct hist_entry_iter *iter,
+ struct addr_location *al)
+{
+ struct callchain_cursor_node *node;
+
+ node = callchain_cursor_current(&callchain_cursor);
+ if (node == NULL)
+ return 0;
+
+ return fill_callchain_info(al, node, iter->hide_unresolved);
+}
+
+static int
+iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
+ struct addr_location *al)
+{
+ struct perf_evsel *evsel = iter->evsel;
+ struct perf_sample *sample = iter->sample;
+ struct hist_entry **he_cache = iter->priv;
+ struct hist_entry *he;
+ struct hist_entry he_tmp = {
+ .cpu = al->cpu,
+ .thread = al->thread,
+ .comm = thread__comm(al->thread),
+ .ip = al->addr,
+ .ms = {
+ .map = al->map,
+ .sym = al->sym,
+ },
+ .parent = iter->parent,
+ };
+ int i;
+ struct callchain_cursor cursor;
+
+ callchain_cursor_snapshot(&cursor, &callchain_cursor);
+
+ callchain_cursor_advance(&callchain_cursor);
+
+ /*
+ * Check if there's duplicate entries in the callchain.
+ * It's possible that it has cycles or recursive calls.
+ */
+ for (i = 0; i < iter->curr; i++) {
+ if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
+ /* to avoid calling callback function */
+ iter->he = NULL;
+ return 0;
+ }
+ }
+
+ he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL,
+ sample->period, sample->weight,
+ sample->transaction, false);
+ if (he == NULL)
+ return -ENOMEM;
+
+ iter->he = he;
+ he_cache[iter->curr++] = he;
+
+ callchain_append(he->callchain, &cursor, sample->period);
+ return 0;
+}
+
+static int
+iter_finish_cumulative_entry(struct hist_entry_iter *iter,
+ struct addr_location *al __maybe_unused)
+{
+ zfree(&iter->priv);
+ iter->he = NULL;
+
+ return 0;
+}
+
+const struct hist_iter_ops hist_iter_mem = {
+ .prepare_entry = iter_prepare_mem_entry,
+ .add_single_entry = iter_add_single_mem_entry,
+ .next_entry = iter_next_nop_entry,
+ .add_next_entry = iter_add_next_nop_entry,
+ .finish_entry = iter_finish_mem_entry,
+};
+
+const struct hist_iter_ops hist_iter_branch = {
+ .prepare_entry = iter_prepare_branch_entry,
+ .add_single_entry = iter_add_single_branch_entry,
+ .next_entry = iter_next_branch_entry,
+ .add_next_entry = iter_add_next_branch_entry,
+ .finish_entry = iter_finish_branch_entry,
+};
+
+const struct hist_iter_ops hist_iter_normal = {
+ .prepare_entry = iter_prepare_normal_entry,
+ .add_single_entry = iter_add_single_normal_entry,
+ .next_entry = iter_next_nop_entry,
+ .add_next_entry = iter_add_next_nop_entry,
+ .finish_entry = iter_finish_normal_entry,
+};
+
+const struct hist_iter_ops hist_iter_cumulative = {
+ .prepare_entry = iter_prepare_cumulative_entry,
+ .add_single_entry = iter_add_single_cumulative_entry,
+ .next_entry = iter_next_cumulative_entry,
+ .add_next_entry = iter_add_next_cumulative_entry,
+ .finish_entry = iter_finish_cumulative_entry,
+};
+
+int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
+ struct perf_evsel *evsel, struct perf_sample *sample,
+ int max_stack_depth, void *arg)
+{
+ int err, err2;
+
+ err = sample__resolve_callchain(sample, &iter->parent, evsel, al,
+ max_stack_depth);
+ if (err)
+ return err;
+
+ iter->evsel = evsel;
+ iter->sample = sample;
+
+ err = iter->ops->prepare_entry(iter, al);
+ if (err)
+ goto out;
+
+ err = iter->ops->add_single_entry(iter, al);
+ if (err)
+ goto out;
+
+ if (iter->he && iter->add_entry_cb) {
+ err = iter->add_entry_cb(iter, al, true, arg);
+ if (err)
+ goto out;
+ }
+
+ while (iter->ops->next_entry(iter, al)) {
+ err = iter->ops->add_next_entry(iter, al);
+ if (err)
+ break;
+
+ if (iter->he && iter->add_entry_cb) {
+ err = iter->add_entry_cb(iter, al, false, arg);
+ if (err)
+ goto out;
+ }
+ }
+
+out:
+ err2 = iter->ops->finish_entry(iter, al);
+ if (!err)
+ err = err2;
+
+ return err;
+}
+
+int64_t
+hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ struct perf_hpp_fmt *fmt;
+ int64_t cmp = 0;
+
+ perf_hpp__for_each_sort_list(fmt) {
+ if (perf_hpp__should_skip(fmt))
+ continue;
+
+ cmp = fmt->cmp(left, right);
+ if (cmp)
+ break;
+ }
+
+ return cmp;
+}
+
+int64_t
+hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
+{
+ struct perf_hpp_fmt *fmt;
+ int64_t cmp = 0;
+
+ perf_hpp__for_each_sort_list(fmt) {
+ if (perf_hpp__should_skip(fmt))
+ continue;
+
+ cmp = fmt->collapse(left, right);
+ if (cmp)
+ break;
+ }
+
+ return cmp;
+}
+
+void hist_entry__free(struct hist_entry *he)
+{
+ zfree(&he->branch_info);
+ zfree(&he->mem_info);
+ zfree(&he->stat_acc);
+ free_srcline(he->srcline);
+ free(he);
+}
+
+/*
+ * collapse the histogram
+ */
+
+static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
+ struct rb_root *root,
+ struct hist_entry *he)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct hist_entry *iter;
+ int64_t cmp;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct hist_entry, rb_node_in);
+
+ cmp = hist_entry__collapse(iter, he);
+
+ if (!cmp) {
+ he_stat__add_stat(&iter->stat, &he->stat);
+ if (symbol_conf.cumulate_callchain)
+ he_stat__add_stat(iter->stat_acc, he->stat_acc);
+
+ if (symbol_conf.use_callchain) {
+ callchain_cursor_reset(&callchain_cursor);
+ callchain_merge(&callchain_cursor,
+ iter->callchain,
+ he->callchain);
+ }
+ hist_entry__free(he);
+ return false;
+ }
+
+ if (cmp < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&he->rb_node_in, parent, p);
+ rb_insert_color(&he->rb_node_in, root);
+ return true;
+}
+
+static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
+{
+ struct rb_root *root;
+
+ pthread_mutex_lock(&hists->lock);
+
+ root = hists->entries_in;
+ if (++hists->entries_in > &hists->entries_in_array[1])
+ hists->entries_in = &hists->entries_in_array[0];
+
+ pthread_mutex_unlock(&hists->lock);
+
+ return root;
+}
+
+static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
+{
+ hists__filter_entry_by_dso(hists, he);
+ hists__filter_entry_by_thread(hists, he);
+ hists__filter_entry_by_symbol(hists, he);
+}
+
+void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
+{
+ struct rb_root *root;
+ struct rb_node *next;
+ struct hist_entry *n;
+
+ if (!sort__need_collapse)
+ return;
+
+ root = hists__get_rotate_entries_in(hists);
+ next = rb_first(root);
+
+ while (next) {
+ if (session_done())
+ break;
+ n = rb_entry(next, struct hist_entry, rb_node_in);
+ next = rb_next(&n->rb_node_in);
+
+ rb_erase(&n->rb_node_in, root);
+ if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
+ /*
+ * If it wasn't combined with one of the entries already
+ * collapsed, we need to apply the filters that may have
+ * been set by, say, the hist_browser.
+ */
+ hists__apply_filters(hists, n);
+ }
+ if (prog)
+ ui_progress__update(prog, 1);
+ }
+}
+
+static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
+{
+ struct perf_hpp_fmt *fmt;
+ int64_t cmp = 0;
+
+ perf_hpp__for_each_sort_list(fmt) {
+ if (perf_hpp__should_skip(fmt))
+ continue;
+
+ cmp = fmt->sort(a, b);
+ if (cmp)
+ break;
+ }
+
+ return cmp;
+}
+
+static void hists__reset_filter_stats(struct hists *hists)
+{
+ hists->nr_non_filtered_entries = 0;
+ hists->stats.total_non_filtered_period = 0;
+}
+
+void hists__reset_stats(struct hists *hists)
+{
+ hists->nr_entries = 0;
+ hists->stats.total_period = 0;
+
+ hists__reset_filter_stats(hists);
+}
+
+static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
+{
+ hists->nr_non_filtered_entries++;
+ hists->stats.total_non_filtered_period += h->stat.period;
+}
+
+void hists__inc_stats(struct hists *hists, struct hist_entry *h)
+{
+ if (!h->filtered)
+ hists__inc_filter_stats(hists, h);
+
+ hists->nr_entries++;
+ hists->stats.total_period += h->stat.period;
+}
+
+static void __hists__insert_output_entry(struct rb_root *entries,
+ struct hist_entry *he,
+ u64 min_callchain_hits)
+{
+ struct rb_node **p = &entries->rb_node;
+ struct rb_node *parent = NULL;
+ struct hist_entry *iter;
+
+ if (symbol_conf.use_callchain)
+ callchain_param.sort(&he->sorted_chain, he->callchain,
+ min_callchain_hits, &callchain_param);
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct hist_entry, rb_node);
+
+ if (hist_entry__sort(he, iter) > 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&he->rb_node, parent, p);
+ rb_insert_color(&he->rb_node, entries);
+}
+
+void hists__output_resort(struct hists *hists)
+{
+ struct rb_root *root;
+ struct rb_node *next;
+ struct hist_entry *n;
+ u64 min_callchain_hits;
+
+ min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
+
+ if (sort__need_collapse)
+ root = &hists->entries_collapsed;
+ else
+ root = hists->entries_in;
+
+ next = rb_first(root);
+ hists->entries = RB_ROOT;
+
+ hists__reset_stats(hists);
+ hists__reset_col_len(hists);
+
+ while (next) {
+ n = rb_entry(next, struct hist_entry, rb_node_in);
+ next = rb_next(&n->rb_node_in);
+
+ __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
+ hists__inc_stats(hists, n);
+
+ if (!n->filtered)
+ hists__calc_col_len(hists, n);
+ }
+}
+
+static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
+ enum hist_filter filter)
+{
+ h->filtered &= ~(1 << filter);
+ if (h->filtered)
+ return;
+
+ /* force fold unfiltered entry for simplicity */
+ h->ms.unfolded = false;
+ h->row_offset = 0;
+
+ hists->stats.nr_non_filtered_samples += h->stat.nr_events;
+
+ hists__inc_filter_stats(hists, h);
+ hists__calc_col_len(hists, h);
+}
+
+
+static bool hists__filter_entry_by_dso(struct hists *hists,
+ struct hist_entry *he)
+{
+ if (hists->dso_filter != NULL &&
+ (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
+ he->filtered |= (1 << HIST_FILTER__DSO);
+ return true;
+ }
+
+ return false;
+}
+
+void hists__filter_by_dso(struct hists *hists)
+{
+ struct rb_node *nd;
+
+ hists->stats.nr_non_filtered_samples = 0;
+
+ hists__reset_filter_stats(hists);
+ hists__reset_col_len(hists);
+
+ for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ if (symbol_conf.exclude_other && !h->parent)
+ continue;
+
+ if (hists__filter_entry_by_dso(hists, h))
+ continue;
+
+ hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
+ }
+}
+
+static bool hists__filter_entry_by_thread(struct hists *hists,
+ struct hist_entry *he)
+{
+ if (hists->thread_filter != NULL &&
+ he->thread != hists->thread_filter) {
+ he->filtered |= (1 << HIST_FILTER__THREAD);
+ return true;
+ }
+
+ return false;
+}
+
+void hists__filter_by_thread(struct hists *hists)
+{
+ struct rb_node *nd;
+
+ hists->stats.nr_non_filtered_samples = 0;
+
+ hists__reset_filter_stats(hists);
+ hists__reset_col_len(hists);
+
+ for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ if (hists__filter_entry_by_thread(hists, h))
+ continue;
+
+ hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
+ }
+}
+
+static bool hists__filter_entry_by_symbol(struct hists *hists,
+ struct hist_entry *he)
+{
+ if (hists->symbol_filter_str != NULL &&
+ (!he->ms.sym || strstr(he->ms.sym->name,
+ hists->symbol_filter_str) == NULL)) {
+ he->filtered |= (1 << HIST_FILTER__SYMBOL);
+ return true;
+ }
+
+ return false;
+}
+
+void hists__filter_by_symbol(struct hists *hists)
+{
+ struct rb_node *nd;
+
+ hists->stats.nr_non_filtered_samples = 0;
+
+ hists__reset_filter_stats(hists);
+ hists__reset_col_len(hists);
+
+ for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ if (hists__filter_entry_by_symbol(hists, h))
+ continue;
+
+ hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
+ }
+}
+
+void events_stats__inc(struct events_stats *stats, u32 type)
+{
+ ++stats->nr_events[0];
+ ++stats->nr_events[type];
+}
+
+void hists__inc_nr_events(struct hists *hists, u32 type)
+{
+ events_stats__inc(&hists->stats, type);
+}
+
+void hists__inc_nr_samples(struct hists *hists, bool filtered)
+{
+ events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
+ if (!filtered)
+ hists->stats.nr_non_filtered_samples++;
+}
+
+static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
+ struct hist_entry *pair)
+{
+ struct rb_root *root;
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+ struct hist_entry *he;
+ int64_t cmp;
+
+ if (sort__need_collapse)
+ root = &hists->entries_collapsed;
+ else
+ root = hists->entries_in;
+
+ p = &root->rb_node;
+
+ while (*p != NULL) {
+ parent = *p;
+ he = rb_entry(parent, struct hist_entry, rb_node_in);
+
+ cmp = hist_entry__collapse(he, pair);
+
+ if (!cmp)
+ goto out;
+
+ if (cmp < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ he = hist_entry__new(pair, true);
+ if (he) {
+ memset(&he->stat, 0, sizeof(he->stat));
+ he->hists = hists;
+ rb_link_node(&he->rb_node_in, parent, p);
+ rb_insert_color(&he->rb_node_in, root);
+ hists__inc_stats(hists, he);
+ he->dummy = true;
+ }
+out:
+ return he;
+}
+
+static struct hist_entry *hists__find_entry(struct hists *hists,
+ struct hist_entry *he)
+{
+ struct rb_node *n;
+
+ if (sort__need_collapse)
+ n = hists->entries_collapsed.rb_node;
+ else
+ n = hists->entries_in->rb_node;
+
+ while (n) {
+ struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
+ int64_t cmp = hist_entry__collapse(iter, he);
+
+ if (cmp < 0)
+ n = n->rb_left;
+ else if (cmp > 0)
+ n = n->rb_right;
+ else
+ return iter;
+ }
+
+ return NULL;
+}
+
+/*
+ * Look for pairs to link to the leader buckets (hist_entries):
+ */
+void hists__match(struct hists *leader, struct hists *other)
+{
+ struct rb_root *root;
+ struct rb_node *nd;
+ struct hist_entry *pos, *pair;
+
+ if (sort__need_collapse)
+ root = &leader->entries_collapsed;
+ else
+ root = leader->entries_in;
+
+ for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+ pos = rb_entry(nd, struct hist_entry, rb_node_in);
+ pair = hists__find_entry(other, pos);
+
+ if (pair)
+ hist_entry__add_pair(pair, pos);
+ }
+}
+
+/*
+ * Look for entries in the other hists that are not present in the leader, if
+ * we find them, just add a dummy entry on the leader hists, with period=0,
+ * nr_events=0, to serve as the list header.
+ */
+int hists__link(struct hists *leader, struct hists *other)
+{
+ struct rb_root *root;
+ struct rb_node *nd;
+ struct hist_entry *pos, *pair;
+
+ if (sort__need_collapse)
+ root = &other->entries_collapsed;
+ else
+ root = other->entries_in;
+
+ for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+ pos = rb_entry(nd, struct hist_entry, rb_node_in);
+
+ if (!hist_entry__has_pairs(pos)) {
+ pair = hists__add_dummy_entry(leader, pos);
+ if (pair == NULL)
+ return -1;
+ hist_entry__add_pair(pos, pair);
+ }
+ }
+
+ return 0;
+}
+
+u64 hists__total_period(struct hists *hists)
+{
+ return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
+ hists->stats.total_period;
+}
+
+int parse_filter_percentage(const struct option *opt __maybe_unused,
+ const char *arg, int unset __maybe_unused)
+{
+ if (!strcmp(arg, "relative"))
+ symbol_conf.filter_relative = true;
+ else if (!strcmp(arg, "absolute"))
+ symbol_conf.filter_relative = false;
+ else
+ return -1;
+
+ return 0;
+}
+
+int perf_hist_config(const char *var, const char *value)
+{
+ if (!strcmp(var, "hist.percentage"))
+ return parse_filter_percentage(NULL, value, 0);
+
+ return 0;
+}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
new file mode 100644
index 00000000000..742f49a8572
--- /dev/null
+++ b/tools/perf/util/hist.h
@@ -0,0 +1,347 @@
+#ifndef __PERF_HIST_H
+#define __PERF_HIST_H
+
+#include <linux/types.h>
+#include <pthread.h>
+#include "callchain.h"
+#include "header.h"
+#include "color.h"
+#include "ui/progress.h"
+
+extern struct callchain_param callchain_param;
+
+struct hist_entry;
+struct addr_location;
+struct symbol;
+
+enum hist_filter {
+ HIST_FILTER__DSO,
+ HIST_FILTER__THREAD,
+ HIST_FILTER__PARENT,
+ HIST_FILTER__SYMBOL,
+ HIST_FILTER__GUEST,
+ HIST_FILTER__HOST,
+};
+
+/*
+ * The kernel collects the number of events it couldn't send in a stretch and
+ * when possible sends this number in a PERF_RECORD_LOST event. The number of
+ * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
+ * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
+ * the sum of all struct lost_event.lost fields reported.
+ *
+ * The total_period is needed because by default auto-freq is used, so
+ * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
+ * the total number of low level events, it is necessary to to sum all struct
+ * sample_event.period and stash the result in total_period.
+ */
+struct events_stats {
+ u64 total_period;
+ u64 total_non_filtered_period;
+ u64 total_lost;
+ u64 total_invalid_chains;
+ u32 nr_events[PERF_RECORD_HEADER_MAX];
+ u32 nr_non_filtered_samples;
+ u32 nr_lost_warned;
+ u32 nr_unknown_events;
+ u32 nr_invalid_chains;
+ u32 nr_unknown_id;
+ u32 nr_unprocessable_samples;
+};
+
+enum hist_column {
+ HISTC_SYMBOL,
+ HISTC_DSO,
+ HISTC_THREAD,
+ HISTC_COMM,
+ HISTC_PARENT,
+ HISTC_CPU,
+ HISTC_SRCLINE,
+ HISTC_MISPREDICT,
+ HISTC_IN_TX,
+ HISTC_ABORT,
+ HISTC_SYMBOL_FROM,
+ HISTC_SYMBOL_TO,
+ HISTC_DSO_FROM,
+ HISTC_DSO_TO,
+ HISTC_LOCAL_WEIGHT,
+ HISTC_GLOBAL_WEIGHT,
+ HISTC_MEM_DADDR_SYMBOL,
+ HISTC_MEM_DADDR_DSO,
+ HISTC_MEM_LOCKED,
+ HISTC_MEM_TLB,
+ HISTC_MEM_LVL,
+ HISTC_MEM_SNOOP,
+ HISTC_MEM_DCACHELINE,
+ HISTC_TRANSACTION,
+ HISTC_NR_COLS, /* Last entry */
+};
+
+struct thread;
+struct dso;
+
+struct hists {
+ struct rb_root entries_in_array[2];
+ struct rb_root *entries_in;
+ struct rb_root entries;
+ struct rb_root entries_collapsed;
+ u64 nr_entries;
+ u64 nr_non_filtered_entries;
+ const struct thread *thread_filter;
+ const struct dso *dso_filter;
+ const char *uid_filter_str;
+ const char *symbol_filter_str;
+ pthread_mutex_t lock;
+ struct events_stats stats;
+ u64 event_stream;
+ u16 col_len[HISTC_NR_COLS];
+};
+
+struct hist_entry_iter;
+
+struct hist_iter_ops {
+ int (*prepare_entry)(struct hist_entry_iter *, struct addr_location *);
+ int (*add_single_entry)(struct hist_entry_iter *, struct addr_location *);
+ int (*next_entry)(struct hist_entry_iter *, struct addr_location *);
+ int (*add_next_entry)(struct hist_entry_iter *, struct addr_location *);
+ int (*finish_entry)(struct hist_entry_iter *, struct addr_location *);
+};
+
+struct hist_entry_iter {
+ int total;
+ int curr;
+
+ bool hide_unresolved;
+
+ struct perf_evsel *evsel;
+ struct perf_sample *sample;
+ struct hist_entry *he;
+ struct symbol *parent;
+ void *priv;
+
+ const struct hist_iter_ops *ops;
+ /* user-defined callback function (optional) */
+ int (*add_entry_cb)(struct hist_entry_iter *iter,
+ struct addr_location *al, bool single, void *arg);
+};
+
+extern const struct hist_iter_ops hist_iter_normal;
+extern const struct hist_iter_ops hist_iter_branch;
+extern const struct hist_iter_ops hist_iter_mem;
+extern const struct hist_iter_ops hist_iter_cumulative;
+
+struct hist_entry *__hists__add_entry(struct hists *hists,
+ struct addr_location *al,
+ struct symbol *parent,
+ struct branch_info *bi,
+ struct mem_info *mi, u64 period,
+ u64 weight, u64 transaction,
+ bool sample_self);
+int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
+ struct perf_evsel *evsel, struct perf_sample *sample,
+ int max_stack_depth, void *arg);
+
+int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right);
+int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
+int hist_entry__transaction_len(void);
+int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size,
+ struct hists *hists);
+void hist_entry__free(struct hist_entry *);
+
+void hists__output_resort(struct hists *hists);
+void hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
+
+void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
+void hists__output_recalc_col_len(struct hists *hists, int max_rows);
+
+u64 hists__total_period(struct hists *hists);
+void hists__reset_stats(struct hists *hists);
+void hists__inc_stats(struct hists *hists, struct hist_entry *h);
+void hists__inc_nr_events(struct hists *hists, u32 type);
+void hists__inc_nr_samples(struct hists *hists, bool filtered);
+void events_stats__inc(struct events_stats *stats, u32 type);
+size_t events_stats__fprintf(struct events_stats *stats, FILE *fp);
+
+size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
+ int max_cols, float min_pcnt, FILE *fp);
+
+void hists__filter_by_dso(struct hists *hists);
+void hists__filter_by_thread(struct hists *hists);
+void hists__filter_by_symbol(struct hists *hists);
+
+static inline bool hists__has_filter(struct hists *hists)
+{
+ return hists->thread_filter || hists->dso_filter ||
+ hists->symbol_filter_str;
+}
+
+u16 hists__col_len(struct hists *hists, enum hist_column col);
+void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len);
+bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len);
+void hists__reset_col_len(struct hists *hists);
+void hists__calc_col_len(struct hists *hists, struct hist_entry *he);
+
+void hists__match(struct hists *leader, struct hists *other);
+int hists__link(struct hists *leader, struct hists *other);
+
+struct perf_hpp {
+ char *buf;
+ size_t size;
+ const char *sep;
+ void *ptr;
+};
+
+struct perf_hpp_fmt {
+ int (*header)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct perf_evsel *evsel);
+ int (*width)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct perf_evsel *evsel);
+ int (*color)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he);
+ int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he);
+ int64_t (*cmp)(struct hist_entry *a, struct hist_entry *b);
+ int64_t (*collapse)(struct hist_entry *a, struct hist_entry *b);
+ int64_t (*sort)(struct hist_entry *a, struct hist_entry *b);
+
+ struct list_head list;
+ struct list_head sort_list;
+ bool elide;
+};
+
+extern struct list_head perf_hpp__list;
+extern struct list_head perf_hpp__sort_list;
+
+#define perf_hpp__for_each_format(format) \
+ list_for_each_entry(format, &perf_hpp__list, list)
+
+#define perf_hpp__for_each_format_safe(format, tmp) \
+ list_for_each_entry_safe(format, tmp, &perf_hpp__list, list)
+
+#define perf_hpp__for_each_sort_list(format) \
+ list_for_each_entry(format, &perf_hpp__sort_list, sort_list)
+
+#define perf_hpp__for_each_sort_list_safe(format, tmp) \
+ list_for_each_entry_safe(format, tmp, &perf_hpp__sort_list, sort_list)
+
+extern struct perf_hpp_fmt perf_hpp__format[];
+
+enum {
+ /* Matches perf_hpp__format array. */
+ PERF_HPP__OVERHEAD,
+ PERF_HPP__OVERHEAD_SYS,
+ PERF_HPP__OVERHEAD_US,
+ PERF_HPP__OVERHEAD_GUEST_SYS,
+ PERF_HPP__OVERHEAD_GUEST_US,
+ PERF_HPP__OVERHEAD_ACC,
+ PERF_HPP__SAMPLES,
+ PERF_HPP__PERIOD,
+
+ PERF_HPP__MAX_INDEX
+};
+
+void perf_hpp__init(void);
+void perf_hpp__column_register(struct perf_hpp_fmt *format);
+void perf_hpp__column_unregister(struct perf_hpp_fmt *format);
+void perf_hpp__column_enable(unsigned col);
+void perf_hpp__column_disable(unsigned col);
+void perf_hpp__cancel_cumulate(void);
+
+void perf_hpp__register_sort_field(struct perf_hpp_fmt *format);
+void perf_hpp__setup_output_field(void);
+void perf_hpp__reset_output_field(void);
+void perf_hpp__append_sort_keys(void);
+
+bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format);
+bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b);
+
+static inline bool perf_hpp__should_skip(struct perf_hpp_fmt *format)
+{
+ return format->elide;
+}
+
+void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists);
+
+typedef u64 (*hpp_field_fn)(struct hist_entry *he);
+typedef int (*hpp_callback_fn)(struct perf_hpp *hpp, bool front);
+typedef int (*hpp_snprint_fn)(struct perf_hpp *hpp, const char *fmt, ...);
+
+int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
+ hpp_field_fn get_field, const char *fmt,
+ hpp_snprint_fn print_fn, bool fmt_percent);
+int __hpp__fmt_acc(struct perf_hpp *hpp, struct hist_entry *he,
+ hpp_field_fn get_field, const char *fmt,
+ hpp_snprint_fn print_fn, bool fmt_percent);
+
+static inline void advance_hpp(struct perf_hpp *hpp, int inc)
+{
+ hpp->buf += inc;
+ hpp->size -= inc;
+}
+
+static inline size_t perf_hpp__use_color(void)
+{
+ return !symbol_conf.field_sep;
+}
+
+static inline size_t perf_hpp__color_overhead(void)
+{
+ return perf_hpp__use_color() ?
+ (COLOR_MAXLEN + sizeof(PERF_COLOR_RESET)) * PERF_HPP__MAX_INDEX
+ : 0;
+}
+
+struct perf_evlist;
+
+struct hist_browser_timer {
+ void (*timer)(void *arg);
+ void *arg;
+ int refresh;
+};
+
+#ifdef HAVE_SLANG_SUPPORT
+#include "../ui/keysyms.h"
+int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel,
+ struct hist_browser_timer *hbt);
+
+int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
+ struct hist_browser_timer *hbt,
+ float min_pcnt,
+ struct perf_session_env *env);
+int script_browse(const char *script_opt);
+#else
+static inline
+int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused,
+ const char *help __maybe_unused,
+ struct hist_browser_timer *hbt __maybe_unused,
+ float min_pcnt __maybe_unused,
+ struct perf_session_env *env __maybe_unused)
+{
+ return 0;
+}
+
+static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused,
+ struct hist_browser_timer *hbt __maybe_unused)
+{
+ return 0;
+}
+
+static inline int script_browse(const char *script_opt __maybe_unused)
+{
+ return 0;
+}
+
+#define K_LEFT -1000
+#define K_RIGHT -2000
+#define K_SWITCH_INPUT_DATA -3000
+#endif
+
+unsigned int hists__sort_list_width(struct hists *hists);
+
+struct option;
+int parse_filter_percentage(const struct option *opt __maybe_unused,
+ const char *arg, int unset __maybe_unused);
+int perf_hist_config(const char *var, const char *value);
+
+#endif /* __PERF_HIST_H */
diff --git a/tools/perf/util/hweight.c b/tools/perf/util/hweight.c
new file mode 100644
index 00000000000..5c1d0d099f0
--- /dev/null
+++ b/tools/perf/util/hweight.c
@@ -0,0 +1,31 @@
+#include <linux/bitops.h>
+
+/**
+ * hweightN - returns the hamming weight of a N-bit word
+ * @x: the word to weigh
+ *
+ * The Hamming Weight of a number is the total number of bits set in it.
+ */
+
+unsigned int hweight32(unsigned int w)
+{
+ unsigned int res = w - ((w >> 1) & 0x55555555);
+ res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
+ res = (res + (res >> 4)) & 0x0F0F0F0F;
+ res = res + (res >> 8);
+ return (res + (res >> 16)) & 0x000000FF;
+}
+
+unsigned long hweight64(__u64 w)
+{
+#if BITS_PER_LONG == 32
+ return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
+#elif BITS_PER_LONG == 64
+ __u64 res = w - ((w >> 1) & 0x5555555555555555ul);
+ res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
+ res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
+ res = res + (res >> 8);
+ res = res + (res >> 16);
+ return (res + (res >> 32)) & 0x00000000000000FFul;
+#endif
+}
diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
new file mode 100644
index 00000000000..6789d788d49
--- /dev/null
+++ b/tools/perf/util/include/asm/alternative-asm.h
@@ -0,0 +1,8 @@
+#ifndef _PERF_ASM_ALTERNATIVE_ASM_H
+#define _PERF_ASM_ALTERNATIVE_ASM_H
+
+/* Just disable it so we can build arch/x86/lib/memcpy_64.S for perf bench: */
+
+#define altinstruction_entry #
+
+#endif
diff --git a/tools/perf/util/include/asm/asm-offsets.h b/tools/perf/util/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..ed538942523
--- /dev/null
+++ b/tools/perf/util/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+/* stub */
diff --git a/tools/perf/util/include/asm/byteorder.h b/tools/perf/util/include/asm/byteorder.h
new file mode 100644
index 00000000000..2a9bdc06630
--- /dev/null
+++ b/tools/perf/util/include/asm/byteorder.h
@@ -0,0 +1,2 @@
+#include <asm/types.h>
+#include "../../../../include/uapi/linux/swab.h"
diff --git a/tools/perf/util/include/asm/cpufeature.h b/tools/perf/util/include/asm/cpufeature.h
new file mode 100644
index 00000000000..acffd5e4d1d
--- /dev/null
+++ b/tools/perf/util/include/asm/cpufeature.h
@@ -0,0 +1,9 @@
+
+#ifndef PERF_CPUFEATURE_H
+#define PERF_CPUFEATURE_H
+
+/* cpufeature.h ... dummy header file for including arch/x86/lib/memcpy_64.S */
+
+#define X86_FEATURE_REP_GOOD 0
+
+#endif /* PERF_CPUFEATURE_H */
diff --git a/tools/perf/util/include/asm/dwarf2.h b/tools/perf/util/include/asm/dwarf2.h
new file mode 100644
index 00000000000..afe38199e92
--- /dev/null
+++ b/tools/perf/util/include/asm/dwarf2.h
@@ -0,0 +1,13 @@
+
+#ifndef PERF_DWARF2_H
+#define PERF_DWARF2_H
+
+/* dwarf2.h ... dummy header file for including arch/x86/lib/mem{cpy,set}_64.S */
+
+#define CFI_STARTPROC
+#define CFI_ENDPROC
+#define CFI_REMEMBER_STATE
+#define CFI_RESTORE_STATE
+
+#endif /* PERF_DWARF2_H */
+
diff --git a/tools/perf/util/include/asm/hash.h b/tools/perf/util/include/asm/hash.h
new file mode 100644
index 00000000000..d82b170bb21
--- /dev/null
+++ b/tools/perf/util/include/asm/hash.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_GENERIC_HASH_H
+#define __ASM_GENERIC_HASH_H
+
+/* Stub */
+
+#endif /* __ASM_GENERIC_HASH_H */
diff --git a/tools/perf/util/include/asm/hweight.h b/tools/perf/util/include/asm/hweight.h
new file mode 100644
index 00000000000..36cf26d434a
--- /dev/null
+++ b/tools/perf/util/include/asm/hweight.h
@@ -0,0 +1,8 @@
+#ifndef PERF_HWEIGHT_H
+#define PERF_HWEIGHT_H
+
+#include <linux/types.h>
+unsigned int hweight32(unsigned int w);
+unsigned long hweight64(__u64 w);
+
+#endif /* PERF_HWEIGHT_H */
diff --git a/tools/perf/util/include/asm/swab.h b/tools/perf/util/include/asm/swab.h
new file mode 100644
index 00000000000..ed538942523
--- /dev/null
+++ b/tools/perf/util/include/asm/swab.h
@@ -0,0 +1 @@
+/* stub */
diff --git a/tools/perf/util/include/asm/system.h b/tools/perf/util/include/asm/system.h
new file mode 100644
index 00000000000..710cecca972
--- /dev/null
+++ b/tools/perf/util/include/asm/system.h
@@ -0,0 +1 @@
+/* Empty */
diff --git a/tools/perf/util/include/asm/uaccess.h b/tools/perf/util/include/asm/uaccess.h
new file mode 100644
index 00000000000..d0f72b8fcc3
--- /dev/null
+++ b/tools/perf/util/include/asm/uaccess.h
@@ -0,0 +1,14 @@
+#ifndef _PERF_ASM_UACCESS_H_
+#define _PERF_ASM_UACCESS_H_
+
+#define __get_user(src, dest) \
+({ \
+ (src) = *dest; \
+ 0; \
+})
+
+#define get_user __get_user
+
+#define access_ok(type, addr, size) 1
+
+#endif
diff --git a/tools/perf/util/include/asm/unistd_32.h b/tools/perf/util/include/asm/unistd_32.h
new file mode 100644
index 00000000000..8b137891791
--- /dev/null
+++ b/tools/perf/util/include/asm/unistd_32.h
@@ -0,0 +1 @@
+
diff --git a/tools/perf/util/include/asm/unistd_64.h b/tools/perf/util/include/asm/unistd_64.h
new file mode 100644
index 00000000000..8b137891791
--- /dev/null
+++ b/tools/perf/util/include/asm/unistd_64.h
@@ -0,0 +1 @@
+
diff --git a/tools/perf/util/include/dwarf-regs.h b/tools/perf/util/include/dwarf-regs.h
new file mode 100644
index 00000000000..8f149655f49
--- /dev/null
+++ b/tools/perf/util/include/dwarf-regs.h
@@ -0,0 +1,8 @@
+#ifndef _PERF_DWARF_REGS_H_
+#define _PERF_DWARF_REGS_H_
+
+#ifdef HAVE_DWARF_SUPPORT
+const char *get_arch_regstr(unsigned int n);
+#endif
+
+#endif
diff --git a/tools/perf/util/include/linux/bitmap.h b/tools/perf/util/include/linux/bitmap.h
new file mode 100644
index 00000000000..01ffd12dc79
--- /dev/null
+++ b/tools/perf/util/include/linux/bitmap.h
@@ -0,0 +1,49 @@
+#ifndef _PERF_BITOPS_H
+#define _PERF_BITOPS_H
+
+#include <string.h>
+#include <linux/bitops.h>
+
+#define DECLARE_BITMAP(name,bits) \
+ unsigned long name[BITS_TO_LONGS(bits)]
+
+int __bitmap_weight(const unsigned long *bitmap, int bits);
+void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+
+#define BITMAP_LAST_WORD_MASK(nbits) \
+( \
+ ((nbits) % BITS_PER_LONG) ? \
+ (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
+)
+
+#define small_const_nbits(nbits) \
+ (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
+
+static inline void bitmap_zero(unsigned long *dst, int nbits)
+{
+ if (small_const_nbits(nbits))
+ *dst = 0UL;
+ else {
+ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memset(dst, 0, len);
+ }
+}
+
+static inline int bitmap_weight(const unsigned long *src, int nbits)
+{
+ if (small_const_nbits(nbits))
+ return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
+ return __bitmap_weight(src, nbits);
+}
+
+static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if (small_const_nbits(nbits))
+ *dst = *src1 | *src2;
+ else
+ __bitmap_or(dst, src1, src2, nbits);
+}
+
+#endif /* _PERF_BITOPS_H */
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
new file mode 100644
index 00000000000..dadfa7e5428
--- /dev/null
+++ b/tools/perf/util/include/linux/bitops.h
@@ -0,0 +1,160 @@
+#ifndef _PERF_LINUX_BITOPS_H_
+#define _PERF_LINUX_BITOPS_H_
+
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <asm/hweight.h>
+
+#ifndef __WORDSIZE
+#define __WORDSIZE (__SIZEOF_LONG__ * 8)
+#endif
+
+#define BITS_PER_LONG __WORDSIZE
+#define BITS_PER_BYTE 8
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
+#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
+#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE)
+
+#define for_each_set_bit(bit, addr, size) \
+ for ((bit) = find_first_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_from(bit, addr, size) \
+ for ((bit) = find_next_bit((addr), (size), (bit)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+static inline void set_bit(int nr, unsigned long *addr)
+{
+ addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG);
+}
+
+static inline void clear_bit(int nr, unsigned long *addr)
+{
+ addr[nr / BITS_PER_LONG] &= ~(1UL << (nr % BITS_PER_LONG));
+}
+
+static __always_inline int test_bit(unsigned int nr, const unsigned long *addr)
+{
+ return ((1UL << (nr % BITS_PER_LONG)) &
+ (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
+}
+
+static inline unsigned long hweight_long(unsigned long w)
+{
+ return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
+}
+
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __ffs(unsigned long word)
+{
+ int num = 0;
+
+#if BITS_PER_LONG == 64
+ if ((word & 0xffffffff) == 0) {
+ num += 32;
+ word >>= 32;
+ }
+#endif
+ if ((word & 0xffff) == 0) {
+ num += 16;
+ word >>= 16;
+ }
+ if ((word & 0xff) == 0) {
+ num += 8;
+ word >>= 8;
+ }
+ if ((word & 0xf) == 0) {
+ num += 4;
+ word >>= 4;
+ }
+ if ((word & 0x3) == 0) {
+ num += 2;
+ word >>= 2;
+ }
+ if ((word & 0x1) == 0)
+ num += 1;
+ return num;
+}
+
+typedef const unsigned long __attribute__((__may_alias__)) long_alias_t;
+
+/*
+ * Find the first set bit in a memory region.
+ */
+static inline unsigned long
+find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ long_alias_t *p = (long_alias_t *) addr;
+ unsigned long result = 0;
+ unsigned long tmp;
+
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+
+ tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found:
+ return result + __ffs(tmp);
+}
+
+/*
+ * Find the next set bit in a memory region.
+ */
+static inline unsigned long
+find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
+{
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG-1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= (~0UL << offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp &= (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + __ffs(tmp);
+}
+
+#endif
diff --git a/tools/perf/util/include/linux/const.h b/tools/perf/util/include/linux/const.h
new file mode 100644
index 00000000000..c10a35e1afb
--- /dev/null
+++ b/tools/perf/util/include/linux/const.h
@@ -0,0 +1 @@
+#include "../../../../include/uapi/linux/const.h"
diff --git a/tools/perf/util/include/linux/ctype.h b/tools/perf/util/include/linux/ctype.h
new file mode 100644
index 00000000000..a53d4ee1e0b
--- /dev/null
+++ b/tools/perf/util/include/linux/ctype.h
@@ -0,0 +1 @@
+#include "../util.h"
diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h
new file mode 100644