aboutsummaryrefslogtreecommitdiff
path: root/tools/perf/util/machine.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/machine.c')
-rw-r--r--tools/perf/util/machine.c378
1 files changed, 211 insertions, 167 deletions
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 6188d2876a7..c73e1fc12e5 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -9,6 +9,7 @@
#include "strlist.h"
#include "thread.h"
#include <stdbool.h>
+#include <symbol/kallsyms.h>
#include "unwind.h"
int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
@@ -26,6 +27,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
machine->pid = pid;
machine->symbol_filter = NULL;
+ machine->id_hdr_size = 0;
machine->root_dir = strdup(root_dir);
if (machine->root_dir == NULL)
@@ -40,12 +42,29 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
return -ENOMEM;
snprintf(comm, sizeof(comm), "[guest/%d]", pid);
- thread__set_comm(thread, comm);
+ thread__set_comm(thread, comm, 0);
}
return 0;
}
+struct machine *machine__new_host(void)
+{
+ struct machine *machine = malloc(sizeof(*machine));
+
+ if (machine != NULL) {
+ machine__init(machine, "", HOST_KERNEL_ID);
+
+ if (machine__create_kernel_maps(machine) < 0)
+ goto out_delete;
+ }
+
+ return machine;
+out_delete:
+ free(machine);
+ return NULL;
+}
+
static void dsos__delete(struct list_head *dsos)
{
struct dso *pos, *n;
@@ -84,8 +103,7 @@ void machine__exit(struct machine *machine)
map_groups__exit(&machine->kmaps);
dsos__delete(&machine->user_dsos);
dsos__delete(&machine->kernel_dsos);
- free(machine->root_dir);
- machine->root_dir = NULL;
+ zfree(&machine->root_dir);
}
void machine__delete(struct machine *machine)
@@ -298,6 +316,17 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
rb_link_node(&th->rb_node, parent, p);
rb_insert_color(&th->rb_node, &machine->threads);
machine->last_match = th;
+
+ /*
+ * We have to initialize map_groups separately
+ * after rb tree is updated.
+ *
+ * The reason is that we call machine__findnew_thread
+ * within thread__init_map_groups to find the thread
+ * leader and that would screwed the rb tree.
+ */
+ if (thread__init_map_groups(th, machine))
+ return NULL;
}
return th;
@@ -309,12 +338,14 @@ struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
return __machine__findnew_thread(machine, pid, tid, true);
}
-struct thread *machine__find_thread(struct machine *machine, pid_t tid)
+struct thread *machine__find_thread(struct machine *machine, pid_t pid,
+ pid_t tid)
{
- return __machine__findnew_thread(machine, 0, tid, false);
+ return __machine__findnew_thread(machine, pid, tid, false);
}
-int machine__process_comm_event(struct machine *machine, union perf_event *event)
+int machine__process_comm_event(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample)
{
struct thread *thread = machine__findnew_thread(machine,
event->comm.pid,
@@ -323,7 +354,7 @@ int machine__process_comm_event(struct machine *machine, union perf_event *event
if (dump_trace)
perf_event__fprintf_comm(event, stdout);
- if (thread == NULL || thread__set_comm(thread, event->comm.comm)) {
+ if (thread == NULL || thread__set_comm(thread, event->comm.comm, sample->time)) {
dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
return -1;
}
@@ -332,7 +363,7 @@ int machine__process_comm_event(struct machine *machine, union perf_event *event
}
int machine__process_lost_event(struct machine *machine __maybe_unused,
- union perf_event *event)
+ union perf_event *event, struct perf_sample *sample __maybe_unused)
{
dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
event->lost.id, event->lost.lost);
@@ -465,49 +496,50 @@ struct process_args {
u64 start;
};
-static int symbol__in_kernel(void *arg, const char *name,
- char type __maybe_unused, u64 start)
+static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
+ size_t bufsz)
{
- struct process_args *args = arg;
-
- if (strchr(name, '['))
- return 0;
-
- args->start = start;
- return 1;
+ if (machine__is_default_guest(machine))
+ scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
+ else
+ scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
}
-/* Figure out the start address of kernel map from /proc/kallsyms */
-static u64 machine__get_kernel_start_addr(struct machine *machine)
+const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
+
+/* Figure out the start address of kernel map from /proc/kallsyms.
+ * Returns the name of the start symbol in *symbol_name. Pass in NULL as
+ * symbol_name if it's not that important.
+ */
+static u64 machine__get_kernel_start_addr(struct machine *machine,
+ const char **symbol_name)
{
- const char *filename;
- char path[PATH_MAX];
- struct process_args args;
+ char filename[PATH_MAX];
+ int i;
+ const char *name;
+ u64 addr = 0;
- if (machine__is_host(machine)) {
- filename = "/proc/kallsyms";
- } else {
- if (machine__is_default_guest(machine))
- filename = (char *)symbol_conf.default_guest_kallsyms;
- else {
- sprintf(path, "%s/proc/kallsyms", machine->root_dir);
- filename = path;
- }
- }
+ machine__get_kallsyms_filename(machine, filename, PATH_MAX);
if (symbol__restricted_filename(filename, "/proc/kallsyms"))
return 0;
- if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
- return 0;
+ for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
+ addr = kallsyms__get_function_start(filename, name);
+ if (addr)
+ break;
+ }
+
+ if (symbol_name)
+ *symbol_name = name;
- return args.start;
+ return addr;
}
int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
{
enum map_type type;
- u64 start = machine__get_kernel_start_addr(machine);
+ u64 start = machine__get_kernel_start_addr(machine, NULL);
for (type = 0; type < MAP__NR_TYPES; ++type) {
struct kmap *kmap;
@@ -547,11 +579,10 @@ void machine__destroy_kernel_maps(struct machine *machine)
* on one of them.
*/
if (type == MAP__FUNCTION) {
- free((char *)kmap->ref_reloc_sym->name);
- kmap->ref_reloc_sym->name = NULL;
- free(kmap->ref_reloc_sym);
- }
- kmap->ref_reloc_sym = NULL;
+ zfree((char **)&kmap->ref_reloc_sym->name);
+ zfree(&kmap->ref_reloc_sym);
+ } else
+ kmap->ref_reloc_sym = NULL;
}
map__delete(machine->vmlinux_maps[type]);
@@ -699,7 +730,7 @@ static char *get_kernel_version(const char *root_dir)
}
static int map_groups__set_modules_path_dir(struct map_groups *mg,
- const char *dir_name)
+ const char *dir_name, int depth)
{
struct dirent *dent;
DIR *dir = opendir(dir_name);
@@ -724,7 +755,15 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg,
!strcmp(dent->d_name, ".."))
continue;
- ret = map_groups__set_modules_path_dir(mg, path);
+ /* Do not follow top-level source and build symlinks */
+ if (depth == 0) {
+ if (!strcmp(dent->d_name, "source") ||
+ !strcmp(dent->d_name, "build"))
+ continue;
+ }
+
+ ret = map_groups__set_modules_path_dir(mg, path,
+ depth + 1);
if (ret < 0)
goto out;
} else {
@@ -749,8 +788,7 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg,
ret = -1;
goto out;
}
- dso__set_long_name(map->dso, long_name);
- map->dso->lname_alloc = 1;
+ dso__set_long_name(map->dso, long_name, true);
dso__kernel_module_get_build_id(map->dso, "");
}
}
@@ -769,87 +807,60 @@ static int machine__set_modules_path(struct machine *machine)
if (!version)
return -1;
- snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
+ snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
machine->root_dir, version);
free(version);
- return map_groups__set_modules_path_dir(&machine->kmaps, modules_path);
+ return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
}
-static int machine__create_modules(struct machine *machine)
+static int machine__create_module(void *arg, const char *name, u64 start)
{
- char *line = NULL;
- size_t n;
- FILE *file;
+ struct machine *machine = arg;
struct map *map;
+
+ map = machine__new_module(machine, start, name);
+ if (map == NULL)
+ return -1;
+
+ dso__kernel_module_get_build_id(map->dso, machine->root_dir);
+
+ return 0;
+}
+
+static int machine__create_modules(struct machine *machine)
+{
const char *modules;
char path[PATH_MAX];
- if (machine__is_default_guest(machine))
+ if (machine__is_default_guest(machine)) {
modules = symbol_conf.default_guest_modules;
- else {
- sprintf(path, "%s/proc/modules", machine->root_dir);
+ } else {
+ snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
modules = path;
}
if (symbol__restricted_filename(modules, "/proc/modules"))
return -1;
- file = fopen(modules, "r");
- if (file == NULL)
+ if (modules__parse(modules, machine, machine__create_module))
return -1;
- while (!feof(file)) {
- char name[PATH_MAX];
- u64 start;
- char *sep;
- int line_len;
-
- line_len = getline(&line, &n, file);
- if (line_len < 0)
- break;
-
- if (!line)
- goto out_failure;
-
- line[--line_len] = '\0'; /* \n */
-
- sep = strrchr(line, 'x');
- if (sep == NULL)
- continue;
-
- hex2u64(sep + 1, &start);
-
- sep = strchr(line, ' ');
- if (sep == NULL)
- continue;
-
- *sep = '\0';
-
- snprintf(name, sizeof(name), "[%s]", line);
- map = machine__new_module(machine, start, name);
- if (map == NULL)
- goto out_delete_line;
- dso__kernel_module_get_build_id(map->dso, machine->root_dir);
- }
+ if (!machine__set_modules_path(machine))
+ return 0;
- free(line);
- fclose(file);
+ pr_debug("Problems setting modules path maps, continuing anyway...\n");
- if (machine__set_modules_path(machine) < 0) {
- pr_debug("Problems setting modules path maps, continuing anyway...\n");
- }
return 0;
-
-out_delete_line:
- free(line);
-out_failure:
- return -1;
}
int machine__create_kernel_maps(struct machine *machine)
{
struct dso *kernel = machine__get_kernel(machine);
+ const char *name;
+ u64 addr = machine__get_kernel_start_addr(machine, &name);
+ if (!addr)
+ return -1;
if (kernel == NULL ||
__machine__create_kernel_maps(machine, kernel) < 0)
@@ -868,6 +879,13 @@ int machine__create_kernel_maps(struct machine *machine)
* Now that we have all the maps created, just set the ->end of them:
*/
map_groups__fixup_end(&machine->kmaps);
+
+ if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
+ addr)) {
+ machine__destroy_kernel_maps(machine);
+ return -1;
+ }
+
return 0;
}
@@ -952,8 +970,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
if (name == NULL)
goto out_problem;
- map->dso->short_name = name;
- map->dso->sname_alloc = 1;
+ dso__set_short_name(map->dso, name, true);
map->end = map->start + event->mmap.len;
} else if (is_kernel_mmap) {
const char *symbol_name = (event->mmap.filename +
@@ -998,7 +1015,8 @@ out_problem:
}
int machine__process_mmap2_event(struct machine *machine,
- union perf_event *event)
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
{
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
struct thread *thread;
@@ -1018,7 +1036,7 @@ int machine__process_mmap2_event(struct machine *machine,
}
thread = machine__findnew_thread(machine, event->mmap2.pid,
- event->mmap2.pid);
+ event->mmap2.tid);
if (thread == NULL)
goto out_problem;
@@ -1032,6 +1050,8 @@ int machine__process_mmap2_event(struct machine *machine,
event->mmap2.pid, event->mmap2.maj,
event->mmap2.min, event->mmap2.ino,
event->mmap2.ino_generation,
+ event->mmap2.prot,
+ event->mmap2.flags,
event->mmap2.filename, type);
if (map == NULL)
@@ -1045,7 +1065,8 @@ out_problem:
return 0;
}
-int machine__process_mmap_event(struct machine *machine, union perf_event *event)
+int machine__process_mmap_event(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
{
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
struct thread *thread;
@@ -1065,7 +1086,7 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
}
thread = machine__findnew_thread(machine, event->mmap.pid,
- event->mmap.pid);
+ event->mmap.tid);
if (thread == NULL)
goto out_problem;
@@ -1076,7 +1097,7 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
map = map__new(&machine->user_dsos, event->mmap.start,
event->mmap.len, event->mmap.pgoff,
- event->mmap.pid, 0, 0, 0, 0,
+ event->mmap.pid, 0, 0, 0, 0, 0, 0,
event->mmap.filename,
type);
@@ -1102,9 +1123,12 @@ static void machine__remove_thread(struct machine *machine, struct thread *th)
list_add_tail(&th->node, &machine->dead_threads);
}
-int machine__process_fork_event(struct machine *machine, union perf_event *event)
+int machine__process_fork_event(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample)
{
- struct thread *thread = machine__find_thread(machine, event->fork.tid);
+ struct thread *thread = machine__find_thread(machine,
+ event->fork.pid,
+ event->fork.tid);
struct thread *parent = machine__findnew_thread(machine,
event->fork.ppid,
event->fork.ptid);
@@ -1119,7 +1143,7 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
perf_event__fprintf_task(event, stdout);
if (thread == NULL || parent == NULL ||
- thread__fork(thread, parent) < 0) {
+ thread__fork(thread, parent, sample->time) < 0) {
dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
return -1;
}
@@ -1127,10 +1151,12 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
return 0;
}
-int machine__process_exit_event(struct machine *machine __maybe_unused,
- union perf_event *event)
+int machine__process_exit_event(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
{
- struct thread *thread = machine__find_thread(machine, event->fork.tid);
+ struct thread *thread = machine__find_thread(machine,
+ event->fork.pid,
+ event->fork.tid);
if (dump_trace)
perf_event__fprintf_task(event, stdout);
@@ -1141,23 +1167,24 @@ int machine__process_exit_event(struct machine *machine __maybe_unused,
return 0;
}
-int machine__process_event(struct machine *machine, union perf_event *event)
+int machine__process_event(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample)
{
int ret;
switch (event->header.type) {
case PERF_RECORD_COMM:
- ret = machine__process_comm_event(machine, event); break;
+ ret = machine__process_comm_event(machine, event, sample); break;
case PERF_RECORD_MMAP:
- ret = machine__process_mmap_event(machine, event); break;
+ ret = machine__process_mmap_event(machine, event, sample); break;
case PERF_RECORD_MMAP2:
- ret = machine__process_mmap2_event(machine, event); break;
+ ret = machine__process_mmap2_event(machine, event, sample); break;
case PERF_RECORD_FORK:
- ret = machine__process_fork_event(machine, event); break;
+ ret = machine__process_fork_event(machine, event, sample); break;
case PERF_RECORD_EXIT:
- ret = machine__process_exit_event(machine, event); break;
+ ret = machine__process_exit_event(machine, event, sample); break;
case PERF_RECORD_LOST:
- ret = machine__process_lost_event(machine, event); break;
+ ret = machine__process_lost_event(machine, event, sample); break;
default:
ret = -1;
break;
@@ -1173,39 +1200,22 @@ static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
return 0;
}
-static const u8 cpumodes[] = {
- PERF_RECORD_MISC_USER,
- PERF_RECORD_MISC_KERNEL,
- PERF_RECORD_MISC_GUEST_USER,
- PERF_RECORD_MISC_GUEST_KERNEL
-};
-#define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
-
static void ip__resolve_ams(struct machine *machine, struct thread *thread,
struct addr_map_symbol *ams,
u64 ip)
{
struct addr_location al;
- size_t i;
- u8 m;
memset(&al, 0, sizeof(al));
+ /*
+ * We cannot use the header.misc hint to determine whether a
+ * branch stack address is user, kernel, guest, hypervisor.
+ * Branches may straddle the kernel/user/hypervisor boundaries.
+ * Thus, we have to try consecutively until we find a match
+ * or else, the symbol is unknown
+ */
+ thread__find_cpumode_addr_location(thread, machine, MAP__FUNCTION, ip, &al);
- for (i = 0; i < NCPUMODES; i++) {
- m = cpumodes[i];
- /*
- * We cannot use the header.misc hint to determine whether a
- * branch stack address is user, kernel, guest, hypervisor.
- * Branches may straddle the kernel/user/hypervisor boundaries.
- * Thus, we have to try consecutively until we find a match
- * or else, the symbol is unknown
- */
- thread__find_addr_location(thread, machine, m, MAP__FUNCTION,
- ip, &al);
- if (al.sym)
- goto found;
- }
-found:
ams->addr = ip;
ams->al_addr = al.addr;
ams->sym = al.sym;
@@ -1227,37 +1237,35 @@ static void ip__resolve_data(struct machine *machine, struct thread *thread,
ams->map = al.map;
}
-struct mem_info *machine__resolve_mem(struct machine *machine,
- struct thread *thr,
- struct perf_sample *sample,
- u8 cpumode)
+struct mem_info *sample__resolve_mem(struct perf_sample *sample,
+ struct addr_location *al)
{
struct mem_info *mi = zalloc(sizeof(*mi));
if (!mi)
return NULL;
- ip__resolve_ams(machine, thr, &mi->iaddr, sample->ip);
- ip__resolve_data(machine, thr, cpumode, &mi->daddr, sample->addr);
+ ip__resolve_ams(al->machine, al->thread, &mi->iaddr, sample->ip);
+ ip__resolve_data(al->machine, al->thread, al->cpumode,
+ &mi->daddr, sample->addr);
mi->data_src.val = sample->data_src;
return mi;
}
-struct branch_info *machine__resolve_bstack(struct machine *machine,
- struct thread *thr,
- struct branch_stack *bs)
+struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
+ struct addr_location *al)
{
- struct branch_info *bi;
unsigned int i;
+ const struct branch_stack *bs = sample->branch_stack;
+ struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
- bi = calloc(bs->nr, sizeof(struct branch_info));
if (!bi)
return NULL;
for (i = 0; i < bs->nr; i++) {
- ip__resolve_ams(machine, thr, &bi[i].to, bs->entries[i].to);
- ip__resolve_ams(machine, thr, &bi[i].from, bs->entries[i].from);
+ ip__resolve_ams(al->machine, al->thread, &bi[i].to, bs->entries[i].to);
+ ip__resolve_ams(al->machine, al->thread, &bi[i].from, bs->entries[i].from);
bi[i].flags = bs->entries[i].flags;
}
return bi;
@@ -1267,10 +1275,12 @@ static int machine__resolve_callchain_sample(struct machine *machine,
struct thread *thread,
struct ip_callchain *chain,
struct symbol **parent,
- struct addr_location *root_al)
+ struct addr_location *root_al,
+ int max_stack)
{
u8 cpumode = PERF_RECORD_MISC_USER;
- unsigned int i;
+ int chain_nr = min(max_stack, (int)chain->nr);
+ int i;
int err;
callchain_cursor_reset(&callchain_cursor);
@@ -1280,7 +1290,7 @@ static int machine__resolve_callchain_sample(struct machine *machine,
return 0;
}
- for (i = 0; i < chain->nr; i++) {
+ for (i = 0; i < chain_nr; i++) {
u64 ip;
struct addr_location al;
@@ -1313,7 +1323,7 @@ static int machine__resolve_callchain_sample(struct machine *machine,
continue;
}
- al.filtered = false;
+ al.filtered = 0;
thread__find_addr_location(thread, machine, cpumode,
MAP__FUNCTION, ip, &al);
if (al.sym != NULL) {
@@ -1327,8 +1337,6 @@ static int machine__resolve_callchain_sample(struct machine *machine,
*root_al = al;
callchain_cursor_reset(&callchain_cursor);
}
- if (!symbol_conf.use_callchain)
- break;
}
err = callchain_cursor_append(&callchain_cursor,
@@ -1352,12 +1360,14 @@ int machine__resolve_callchain(struct machine *machine,
struct thread *thread,
struct perf_sample *sample,
struct symbol **parent,
- struct addr_location *root_al)
+ struct addr_location *root_al,
+ int max_stack)
{
int ret;
ret = machine__resolve_callchain_sample(machine, thread,
- sample->callchain, parent, root_al);
+ sample->callchain, parent,
+ root_al, max_stack);
if (ret)
return ret;
@@ -1372,7 +1382,41 @@ int machine__resolve_callchain(struct machine *machine,
return 0;
return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
- thread, evsel->attr.sample_regs_user,
- sample);
+ thread, sample, max_stack);
}
+
+int machine__for_each_thread(struct machine *machine,
+ int (*fn)(struct thread *thread, void *p),
+ void *priv)
+{
+ struct rb_node *nd;
+ struct thread *thread;
+ int rc = 0;
+
+ for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
+ thread = rb_entry(nd, struct thread, rb_node);
+ rc = fn(thread, priv);
+ if (rc != 0)
+ return rc;
+ }
+
+ list_for_each_entry(thread, &machine->dead_threads, node) {
+ rc = fn(thread, priv);
+ if (rc != 0)
+ return rc;
+ }
+ return rc;
+}
+
+int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
+ struct target *target, struct thread_map *threads,
+ perf_event__handler_t process, bool data_mmap)
+{
+ if (target__has_task(target))
+ return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
+ else if (target__has_cpu(target))
+ return perf_event__synthesize_threads(tool, process, machine, data_mmap);
+ /* command specified */
+ return 0;
+}