diff options
Diffstat (limited to 'tools')
792 files changed, 76744 insertions, 17363 deletions
diff --git a/tools/Makefile b/tools/Makefile index 3ae43947a17..9a617adc667 100644 --- a/tools/Makefile +++ b/tools/Makefile @@ -3,19 +3,24 @@ include scripts/Makefile.include help: @echo 'Possible targets:' @echo '' + @echo ' acpi - ACPI tools' + @echo ' cgroup - cgroup tools' @echo ' cpupower - a tool for all things x86 CPU power' @echo ' firewire - the userspace part of nosy, an IEEE-1394 traffic sniffer' + @echo ' hv - tools used when in Hyper-V clients' @echo ' lguest - a minimal 32-bit x86 hypervisor' @echo ' perf - Linux performance measurement and analysis tool' @echo ' selftests - various kernel selftests' @echo ' turbostat - Intel CPU idle stats and freq reporting tool' @echo ' usb - USB testing tools' @echo ' virtio - vhost test module' + @echo ' net - misc networking tools' @echo ' vm - misc vm tools' @echo ' x86_energy_perf_policy - Intel energy policy tool' + @echo ' tmon - thermal monitoring and tuning tool' @echo '' @echo 'You can do:' - @echo ' $$ make -C tools/<tool>_install' + @echo ' $$ make -C tools/ <tool>_install' @echo '' @echo ' from the kernel command line to build and install one of' @echo ' the tools above' @@ -30,48 +35,85 @@ help: @echo ' the respective build directory.' @echo ' clean: a summary clean target to clean _all_ folders' +acpi: FORCE + $(call descend,power/$@) + cpupower: FORCE - $(QUIET_SUBDIR0)power/$@/ $(QUIET_SUBDIR1) + $(call descend,power/$@) + +cgroup firewire hv guest usb virtio vm net: FORCE + $(call descend,$@) + +liblockdep: FORCE + $(call descend,lib/lockdep) -firewire lguest perf usb virtio vm: FORCE - $(QUIET_SUBDIR0)$@/ $(QUIET_SUBDIR1) +libapikfs: FORCE + $(call descend,lib/api) + +perf: libapikfs FORCE + $(call descend,$@) selftests: FORCE - $(QUIET_SUBDIR0)testing/$@/ $(QUIET_SUBDIR1) + $(call descend,testing/$@) turbostat x86_energy_perf_policy: FORCE - $(QUIET_SUBDIR0)power/x86/$@/ $(QUIET_SUBDIR1) + $(call descend,power/x86/$@) + +tmon: FORCE + $(call descend,thermal/$@) + +acpi_install: + $(call descend,power/$(@:_install=),install) cpupower_install: - $(QUIET_SUBDIR0)power/$(@:_install=)/ $(QUIET_SUBDIR1) install + $(call descend,power/$(@:_install=),install) -firewire_install lguest_install perf_install usb_install virtio_install vm_install: - $(QUIET_SUBDIR0)$(@:_install=)/ $(QUIET_SUBDIR1) install +cgroup_install firewire_install hv_install lguest_install perf_install usb_install virtio_install vm_install net_install: + $(call descend,$(@:_install=),install) selftests_install: - $(QUIET_SUBDIR0)testing/$(@:_clean=)/ $(QUIET_SUBDIR1) install + $(call descend,testing/$(@:_clean=),install) turbostat_install x86_energy_perf_policy_install: - $(QUIET_SUBDIR0)power/x86/$(@:_install=)/ $(QUIET_SUBDIR1) install + $(call descend,power/x86/$(@:_install=),install) -install: cpupower_install firewire_install lguest_install perf_install \ - selftests_install turbostat_install usb_install virtio_install \ - vm_install x86_energy_perf_policy_install +tmon_install: + $(call descend,thermal/$(@:_install=),install) + +install: acpi_install cgroup_install cpupower_install hv_install firewire_install lguest_install \ + perf_install selftests_install turbostat_install usb_install \ + virtio_install vm_install net_install x86_energy_perf_policy_install \ + tmon + +acpi_clean: + $(call descend,power/acpi,clean) cpupower_clean: - $(QUIET_SUBDIR0)power/cpupower/ $(QUIET_SUBDIR1) clean + $(call descend,power/cpupower,clean) + +cgroup_clean hv_clean firewire_clean lguest_clean usb_clean virtio_clean vm_clean net_clean: + $(call descend,$(@:_clean=),clean) -firewire_clean lguest_clean perf_clean usb_clean virtio_clean vm_clean: - $(QUIET_SUBDIR0)$(@:_clean=)/ $(QUIET_SUBDIR1) clean +liblockdep_clean: + $(call descend,lib/lockdep,clean) + +libapikfs_clean: + $(call descend,lib/api,clean) + +perf_clean: libapikfs_clean + $(call descend,$(@:_clean=),clean) selftests_clean: - $(QUIET_SUBDIR0)testing/$(@:_clean=)/ $(QUIET_SUBDIR1) clean + $(call descend,testing/$(@:_clean=),clean) turbostat_clean x86_energy_perf_policy_clean: - $(QUIET_SUBDIR0)power/x86/$(@:_clean=)/ $(QUIET_SUBDIR1) clean + $(call descend,power/x86/$(@:_clean=),clean) + +tmon_clean: + $(call descend,thermal/tmon,clean) -clean: cpupower_clean firewire_clean lguest_clean perf_clean selftests_clean \ - turbostat_clean usb_clean virtio_clean vm_clean \ - x86_energy_perf_policy_clean +clean: acpi_clean cgroup_clean cpupower_clean hv_clean firewire_clean lguest_clean \ + perf_clean selftests_clean turbostat_clean usb_clean virtio_clean \ + vm_clean net_clean x86_energy_perf_policy_clean tmon_clean .PHONY: FORCE diff --git a/tools/cgroup/.gitignore b/tools/cgroup/.gitignore new file mode 100644 index 00000000000..633cd9b874f --- /dev/null +++ b/tools/cgroup/.gitignore @@ -0,0 +1 @@ +cgroup_event_listener diff --git a/tools/cgroup/Makefile b/tools/cgroup/Makefile new file mode 100644 index 00000000000..b4286196b76 --- /dev/null +++ b/tools/cgroup/Makefile @@ -0,0 +1,11 @@ +# Makefile for cgroup tools + +CC = $(CROSS_COMPILE)gcc +CFLAGS = -Wall -Wextra + +all: cgroup_event_listener +%: %.c + $(CC) $(CFLAGS) -o $@ $^ + +clean: + $(RM) cgroup_event_listener diff --git a/tools/cgroup/cgroup_event_listener.c b/tools/cgroup/cgroup_event_listener.c new file mode 100644 index 00000000000..4eb5507205c --- /dev/null +++ b/tools/cgroup/cgroup_event_listener.c @@ -0,0 +1,82 @@ +/* + * cgroup_event_listener.c - Simple listener of cgroup events + * + * Copyright (C) Kirill A. Shutemov <kirill@shutemov.name> + */ + +#include <assert.h> +#include <err.h> +#include <errno.h> +#include <fcntl.h> +#include <libgen.h> +#include <limits.h> +#include <stdio.h> +#include <string.h> +#include <unistd.h> + +#include <sys/eventfd.h> + +#define USAGE_STR "Usage: cgroup_event_listener <path-to-control-file> <args>" + +int main(int argc, char **argv) +{ + int efd = -1; + int cfd = -1; + int event_control = -1; + char event_control_path[PATH_MAX]; + char line[LINE_MAX]; + int ret; + + if (argc != 3) + errx(1, "%s", USAGE_STR); + + cfd = open(argv[1], O_RDONLY); + if (cfd == -1) + err(1, "Cannot open %s", argv[1]); + + ret = snprintf(event_control_path, PATH_MAX, "%s/cgroup.event_control", + dirname(argv[1])); + if (ret >= PATH_MAX) + errx(1, "Path to cgroup.event_control is too long"); + + event_control = open(event_control_path, O_WRONLY); + if (event_control == -1) + err(1, "Cannot open %s", event_control_path); + + efd = eventfd(0, 0); + if (efd == -1) + err(1, "eventfd() failed"); + + ret = snprintf(line, LINE_MAX, "%d %d %s", efd, cfd, argv[2]); + if (ret >= LINE_MAX) + errx(1, "Arguments string is too long"); + + ret = write(event_control, line, strlen(line) + 1); + if (ret == -1) + err(1, "Cannot write to cgroup.event_control"); + + while (1) { + uint64_t result; + + ret = read(efd, &result, sizeof(result)); + if (ret == -1) { + if (errno == EINTR) + continue; + err(1, "Cannot read from eventfd"); + } + assert(ret == sizeof(result)); + + ret = access(event_control_path, W_OK); + if ((ret == -1) && (errno == ENOENT)) { + puts("The cgroup seems to have removed."); + break; + } + + if (ret == -1) + err(1, "cgroup.event_control is not accessible any more"); + + printf("%s %s: crossed\n", argv[1], argv[2]); + } + + return 0; +} diff --git a/tools/firewire/nosy-dump.c b/tools/firewire/nosy-dump.c index f93b776370b..3179c711bd6 100644 --- a/tools/firewire/nosy-dump.c +++ b/tools/firewire/nosy-dump.c @@ -150,6 +150,8 @@ subaction_create(uint32_t *data, size_t length) /* we put the ack in the subaction struct for easy access. */ sa = malloc(sizeof *sa - sizeof sa->packet + length); + if (!sa) + exit(EXIT_FAILURE); sa->ack = data[length / 4 - 1]; sa->length = length; memcpy(&sa->packet, data, length); @@ -180,6 +182,8 @@ link_transaction_lookup(int request_node, int response_node, int tlabel) } t = malloc(sizeof *t); + if (!t) + exit(EXIT_FAILURE); t->request_node = request_node; t->response_node = response_node; t->tlabel = tlabel; diff --git a/tools/hv/Makefile b/tools/hv/Makefile new file mode 100644 index 00000000000..bd22f786a60 --- /dev/null +++ b/tools/hv/Makefile @@ -0,0 +1,13 @@ +# Makefile for Hyper-V tools + +CC = $(CROSS_COMPILE)gcc +PTHREAD_LIBS = -lpthread +WARNINGS = -Wall -Wextra +CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) + +all: hv_kvp_daemon hv_vss_daemon +%: %.c + $(CC) $(CFLAGS) -o $@ $^ + +clean: + $(RM) hv_kvp_daemon hv_vss_daemon diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c new file mode 100644 index 00000000000..fba1c75aa48 --- /dev/null +++ b/tools/hv/hv_fcopy_daemon.c @@ -0,0 +1,197 @@ +/* + * An implementation of host to guest copy functionality for Linux. + * + * Copyright (C) 2014, Microsoft, Inc. + * + * Author : K. Y. Srinivasan <kys@microsoft.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + */ + + +#include <sys/types.h> +#include <sys/socket.h> +#include <sys/poll.h> +#include <linux/types.h> +#include <linux/kdev_t.h> +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <string.h> +#include <ctype.h> +#include <errno.h> +#include <linux/hyperv.h> +#include <syslog.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <dirent.h> + +static int target_fd; +static char target_fname[W_MAX_PATH]; + +static int hv_start_fcopy(struct hv_start_fcopy *smsg) +{ + int error = HV_E_FAIL; + char *q, *p; + + /* + * If possile append a path seperator to the path. + */ + if (strlen((char *)smsg->path_name) < (W_MAX_PATH - 2)) + strcat((char *)smsg->path_name, "/"); + + p = (char *)smsg->path_name; + snprintf(target_fname, sizeof(target_fname), "%s/%s", + (char *)smsg->path_name, smsg->file_name); + + syslog(LOG_INFO, "Target file name: %s", target_fname); + /* + * Check to see if the path is already in place; if not, + * create if required. + */ + while ((q = strchr(p, '/')) != NULL) { + if (q == p) { + p++; + continue; + } + *q = '\0'; + if (access((char *)smsg->path_name, F_OK)) { + if (smsg->copy_flags & CREATE_PATH) { + if (mkdir((char *)smsg->path_name, 0755)) { + syslog(LOG_ERR, "Failed to create %s", + (char *)smsg->path_name); + goto done; + } + } else { + syslog(LOG_ERR, "Invalid path: %s", + (char *)smsg->path_name); + goto done; + } + } + p = q + 1; + *q = '/'; + } + + if (!access(target_fname, F_OK)) { + syslog(LOG_INFO, "File: %s exists", target_fname); + if (!(smsg->copy_flags & OVER_WRITE)) { + error = HV_ERROR_ALREADY_EXISTS; + goto done; + } + } + + target_fd = open(target_fname, O_RDWR | O_CREAT | O_CLOEXEC, 0744); + if (target_fd == -1) { + syslog(LOG_INFO, "Open Failed: %s", strerror(errno)); + goto done; + } + + error = 0; +done: + return error; +} + +static int hv_copy_data(struct hv_do_fcopy *cpmsg) +{ + ssize_t bytes_written; + + bytes_written = pwrite(target_fd, cpmsg->data, cpmsg->size, + cpmsg->offset); + + if (bytes_written != cpmsg->size) + return HV_E_FAIL; + + return 0; +} + +static int hv_copy_finished(void) +{ + close(target_fd); + return 0; +} +static int hv_copy_cancel(void) +{ + close(target_fd); + unlink(target_fname); + return 0; + +} + +int main(void) +{ + int fd, fcopy_fd, len; + int error; + int version = FCOPY_CURRENT_VERSION; + char *buffer[4096 * 2]; + struct hv_fcopy_hdr *in_msg; + + if (daemon(1, 0)) { + syslog(LOG_ERR, "daemon() failed; error: %s", strerror(errno)); + exit(EXIT_FAILURE); + } + + openlog("HV_FCOPY", 0, LOG_USER); + syslog(LOG_INFO, "HV_FCOPY starting; pid is:%d", getpid()); + + fcopy_fd = open("/dev/vmbus/hv_fcopy", O_RDWR); + + if (fcopy_fd < 0) { + syslog(LOG_ERR, "open /dev/vmbus/hv_fcopy failed; error: %d %s", + errno, strerror(errno)); + exit(EXIT_FAILURE); + } + + /* + * Register with the kernel. + */ + if ((write(fcopy_fd, &version, sizeof(int))) != sizeof(int)) { + syslog(LOG_ERR, "Registration failed: %s", strerror(errno)); + exit(EXIT_FAILURE); + } + + while (1) { + /* + * In this loop we process fcopy messages after the + * handshake is complete. + */ + len = pread(fcopy_fd, buffer, (4096 * 2), 0); + if (len < 0) { + syslog(LOG_ERR, "pread failed: %s", strerror(errno)); + exit(EXIT_FAILURE); + } + in_msg = (struct hv_fcopy_hdr *)buffer; + + switch (in_msg->operation) { + case START_FILE_COPY: + error = hv_start_fcopy((struct hv_start_fcopy *)in_msg); + break; + case WRITE_TO_FILE: + error = hv_copy_data((struct hv_do_fcopy *)in_msg); + break; + case COMPLETE_FCOPY: + error = hv_copy_finished(); + break; + case CANCEL_FCOPY: + error = hv_copy_cancel(); + break; + + default: + syslog(LOG_ERR, "Unknown operation: %d", + in_msg->operation); + + } + + if (pwrite(fcopy_fd, &error, sizeof(int), 0) != sizeof(int)) { + syslog(LOG_ERR, "pwrite failed: %s", strerror(errno)); + exit(EXIT_FAILURE); + } + } +} diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c index 5959affd882..4088b816a3e 100644 --- a/tools/hv/hv_kvp_daemon.c +++ b/tools/hv/hv_kvp_daemon.c @@ -26,7 +26,6 @@ #include <sys/socket.h> #include <sys/poll.h> #include <sys/utsname.h> -#include <linux/types.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> @@ -43,6 +42,7 @@ #include <sys/stat.h> #include <fcntl.h> #include <dirent.h> +#include <net/if.h> /* * KVP protocol: The user mode component first registers with the @@ -78,8 +78,6 @@ enum { DNS }; -static char kvp_send_buffer[4096]; -static char kvp_recv_buffer[4096 * 2]; static struct sockaddr_nl addr; static int in_hand_shake = 1; @@ -88,18 +86,24 @@ static char *os_major = ""; static char *os_minor = ""; static char *processor_arch; static char *os_build; +static char *os_version; static char *lic_version = "Unknown version"; +static char full_domain_name[HV_KVP_EXCHANGE_MAX_VALUE_SIZE]; static struct utsname uts_buf; /* * The location of the interface configuration file. */ -#define KVP_CONFIG_LOC "/var/opt/" +#define KVP_CONFIG_LOC "/var/lib/hyperv" #define MAX_FILE_NAME 100 #define ENTRIES_PER_BLOCK 50 +#ifndef SOL_NETLINK +#define SOL_NETLINK 270 +#endif + struct kvp_record { char key[HV_KVP_EXCHANGE_MAX_KEY_SIZE]; char value[HV_KVP_EXCHANGE_MAX_VALUE_SIZE]; @@ -121,7 +125,8 @@ static void kvp_acquire_lock(int pool) fl.l_pid = getpid(); if (fcntl(kvp_file_info[pool].fd, F_SETLKW, &fl) == -1) { - syslog(LOG_ERR, "Failed to acquire the lock pool: %d", pool); + syslog(LOG_ERR, "Failed to acquire the lock pool: %d; error: %d %s", pool, + errno, strerror(errno)); exit(EXIT_FAILURE); } } @@ -132,8 +137,8 @@ static void kvp_release_lock(int pool) fl.l_pid = getpid(); if (fcntl(kvp_file_info[pool].fd, F_SETLK, &fl) == -1) { - perror("fcntl"); - syslog(LOG_ERR, "Failed to release the lock pool: %d", pool); + syslog(LOG_ERR, "Failed to release the lock pool: %d; error: %d %s", pool, + errno, strerror(errno)); exit(EXIT_FAILURE); } } @@ -149,10 +154,11 @@ static void kvp_update_file(int pool) */ kvp_acquire_lock(pool); - filep = fopen(kvp_file_info[pool].fname, "w"); + filep = fopen(kvp_file_info[pool].fname, "we"); if (!filep) { + syslog(LOG_ERR, "Failed to open file, pool: %d; error: %d %s", pool, + errno, strerror(errno)); kvp_release_lock(pool); - syslog(LOG_ERR, "Failed to open file, pool: %d", pool); exit(EXIT_FAILURE); } @@ -180,10 +186,11 @@ static void kvp_update_mem_state(int pool) kvp_acquire_lock(pool); - filep = fopen(kvp_file_info[pool].fname, "r"); + filep = fopen(kvp_file_info[pool].fname, "re"); if (!filep) { + syslog(LOG_ERR, "Failed to open file, pool: %d; error: %d %s", pool, + errno, strerror(errno)); kvp_release_lock(pool); - syslog(LOG_ERR, "Failed to open file, pool: %d", pool); exit(EXIT_FAILURE); } for (;;) { @@ -232,9 +239,10 @@ static int kvp_file_init(void) int i; int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK; - if (access("/var/opt/hyperv", F_OK)) { - if (mkdir("/var/opt/hyperv", S_IRUSR | S_IWUSR | S_IROTH)) { - syslog(LOG_ERR, " Failed to create /var/opt/hyperv"); + if (access(KVP_CONFIG_LOC, F_OK)) { + if (mkdir(KVP_CONFIG_LOC, 0755 /* rwxr-xr-x */)) { + syslog(LOG_ERR, "Failed to create '%s'; error: %d %s", KVP_CONFIG_LOC, + errno, strerror(errno)); exit(EXIT_FAILURE); } } @@ -243,20 +251,23 @@ static int kvp_file_init(void) fname = kvp_file_info[i].fname; records_read = 0; num_blocks = 1; - sprintf(fname, "/var/opt/hyperv/.kvp_pool_%d", i); - fd = open(fname, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR | S_IROTH); + sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i); + fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */); if (fd == -1) return 1; - filep = fopen(fname, "r"); - if (!filep) + filep = fopen(fname, "re"); + if (!filep) { + close(fd); return 1; + } record = malloc(alloc_unit * num_blocks); if (record == NULL) { fclose(filep); + close(fd); return 1; } for (;;) { @@ -280,6 +291,7 @@ static int kvp_file_init(void) num_blocks); if (record == NULL) { fclose(filep); + close(fd); return 1; } continue; @@ -297,7 +309,7 @@ static int kvp_file_init(void) return 0; } -static int kvp_key_delete(int pool, __u8 *key, int key_size) +static int kvp_key_delete(int pool, const char *key, int key_size) { int i; int j, k; @@ -340,7 +352,7 @@ static int kvp_key_delete(int pool, __u8 *key, int key_size) return 1; } -static int kvp_key_add_or_modify(int pool, __u8 *key, int key_size, __u8 *value, +static int kvp_key_add_or_modify(int pool, const char *key, int key_size, const char *value, int value_size) { int i; @@ -394,7 +406,7 @@ static int kvp_key_add_or_modify(int pool, __u8 *key, int key_size, __u8 *value, return 0; } -static int kvp_get_value(int pool, __u8 *key, int key_size, __u8 *value, +static int kvp_get_value(int pool, const char *key, int key_size, char *value, int value_size) { int i; @@ -426,8 +438,8 @@ static int kvp_get_value(int pool, __u8 *key, int key_size, __u8 *value, return 1; } -static int kvp_pool_enumerate(int pool, int index, __u8 *key, int key_size, - __u8 *value, int value_size) +static int kvp_pool_enumerate(int pool, int index, char *key, int key_size, + char *value, int value_size) { struct kvp_record *record; @@ -453,7 +465,9 @@ void kvp_get_os_info(void) char *p, buf[512]; uname(&uts_buf); - os_build = uts_buf.release; + os_version = uts_buf.release; + os_build = strdup(uts_buf.release); + os_name = uts_buf.sysname; processor_arch = uts_buf.machine; @@ -462,7 +476,7 @@ void kvp_get_os_info(void) * string to be of the form: x.y.z * Strip additional information we may have. */ - p = strchr(os_build, '-'); + p = strchr(os_version, '-'); if (p) *p = '\0'; @@ -757,7 +771,9 @@ static void kvp_process_ipconfig_file(char *cmd, break; x = strchr(p, '\n'); - *x = '\0'; + if (x) + *x = '\0'; + strcat(config_buf, p); strcat(config_buf, ";"); } @@ -879,7 +895,7 @@ static int kvp_process_ip_address(void *addrp, addr_length = INET6_ADDRSTRLEN; } - if ((length - *offset) < addr_length + 1) + if ((length - *offset) < addr_length + 2) return HV_E_FAIL; if (str == NULL) { strcpy(buffer, "inet_ntop failed\n"); @@ -887,11 +903,13 @@ static int kvp_process_ip_address(void *addrp, } if (*offset == 0) strcpy(buffer, tmp); - else + else { + strcat(buffer, ";"); strcat(buffer, tmp); - strcat(buffer, ";"); + } *offset += strlen(str) + 1; + return 0; } @@ -953,7 +971,9 @@ kvp_get_ip_info(int family, char *if_name, int op, * supported address families; if not we gather info on * the specified address family. */ - if ((family != 0) && (curp->ifa_addr->sa_family != family)) { + if ((((family != 0) && + (curp->ifa_addr->sa_family != family))) || + (curp->ifa_flags & IFF_LOOPBACK)) { curp = curp->ifa_next; continue; } @@ -1004,9 +1024,10 @@ kvp_get_ip_info(int family, char *if_name, int op, if (sn_offset == 0) strcpy(sn_str, cidr_mask); - else + else { + strcat((char *)ip_buffer->sub_net, ";"); strcat(sn_str, cidr_mask); - strcat((char *)ip_buffer->sub_net, ";"); + } sn_offset += strlen(sn_str) + 1; } @@ -1154,16 +1175,13 @@ static int process_ip_string(FILE *f, char *ip_string, int type) snprintf(str, sizeof(str), "%s", "DNS"); break; } - if (i != 0) { - if (type != DNS) { - snprintf(sub_str, sizeof(sub_str), - "_%d", i++); - } else { - snprintf(sub_str, sizeof(sub_str), - "%d", ++i); - } - } else if (type == DNS) { + + if (type == DNS) { snprintf(sub_str, sizeof(sub_str), "%d", ++i); + } else if (type == GATEWAY && i == 0) { + ++i; + } else { + snprintf(sub_str, sizeof(sub_str), "%d", i++); } @@ -1183,17 +1201,13 @@ static int process_ip_string(FILE *f, char *ip_string, int type) snprintf(str, sizeof(str), "%s", "DNS"); break; } - if ((j != 0) || (type == DNS)) { - if (type != DNS) { - snprintf(sub_str, sizeof(sub_str), - "_%d", j++); - } else { - snprintf(sub_str, sizeof(sub_str), - "%d", ++i); - } - } else if (type == DNS) { - snprintf(sub_str, sizeof(sub_str), - "%d", ++i); + + if (type == DNS) { + snprintf(sub_str, sizeof(sub_str), "%d", ++i); + } else if (j == 0) { + ++j; + } else { + snprintf(sub_str, sizeof(sub_str), "_%d", j++); } } else { return HV_INVALIDARG; @@ -1236,18 +1250,19 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val) * Here is the format of the ip configuration file: * * HWADDR=macaddr - * IF_NAME=interface name - * DHCP=yes (This is optional; if yes, DHCP is configured) + * DEVICE=interface name + * BOOTPROTO=<protocol> (where <protocol> is "dhcp" if DHCP is configured + * or "none" if no boot-time protocol should be used) * - * IPADDR=ipaddr1 - * IPADDR_1=ipaddr2 - * IPADDR_x=ipaddry (where y = x + 1) + * IPADDR0=ipaddr1 + * IPADDR1=ipaddr2 + * IPADDRx=ipaddry (where y = x + 1) * - * NETMASK=netmask1 - * NETMASK_x=netmasky (where y = x + 1) + * NETMASK0=netmask1 + * NETMASKx=netmasky (where y = x + 1) * * GATEWAY=ipaddr1 - * GATEWAY_x=ipaddry (where y = x + 1) + * GATEWAYx=ipaddry (where y = x + 1) * * DNSx=ipaddrx (where first DNS address is tagged as DNS1 etc) * @@ -1263,12 +1278,13 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val) */ snprintf(if_file, sizeof(if_file), "%s%s%s", KVP_CONFIG_LOC, - "hyperv/ifcfg-", if_name); + "/ifcfg-", if_name); file = fopen(if_file, "w"); if (file == NULL) { - syslog(LOG_ERR, "Failed to open config file"); + syslog(LOG_ERR, "Failed to open config file; error: %d %s", + errno, strerror(errno)); return HV_E_FAIL; } @@ -1283,15 +1299,16 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val) } error = kvp_write_file(file, "HWADDR", "", mac_addr); + free(mac_addr); if (error) goto setval_error; - error = kvp_write_file(file, "IF_NAME", "", if_name); + error = kvp_write_file(file, "DEVICE", "", if_name); if (error) goto setval_error; if (new_val->dhcp_enabled) { - error = kvp_write_file(file, "DHCP", "", "yes"); + error = kvp_write_file(file, "BOOTPROTO", "", "dhcp"); if (error) goto setval_error; @@ -1299,6 +1316,11 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val) * We are done!. */ goto setval_done; + + } else { + error = kvp_write_file(file, "BOOTPROTO", "", "none"); + if (error) + goto setval_error; } /* @@ -1323,7 +1345,6 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val) goto setval_error; setval_done: - free(mac_addr); fclose(file); /* @@ -1332,18 +1353,21 @@ setval_done: */ snprintf(cmd, sizeof(cmd), "%s %s", "hv_set_ifconfig", if_file); - system(cmd); + if (system(cmd)) { + syslog(LOG_ERR, "Failed to execute cmd '%s'; error: %d %s", + cmd, errno, strerror(errno)); + return HV_E_FAIL; + } return 0; setval_error: syslog(LOG_ERR, "Failed to write config file"); - free(mac_addr); fclose(file); return error; } -static int +static void kvp_get_domain_name(char *buffer, int length) { struct addrinfo hints, *info ; @@ -1357,34 +1381,29 @@ kvp_get_domain_name(char *buffer, int length) error = getaddrinfo(buffer, NULL, &hints, &info); if (error != 0) { - strcpy(buffer, "getaddrinfo failed\n"); - return error; + snprintf(buffer, length, "getaddrinfo failed: 0x%x %s", + error, gai_strerror(error)); + return; } - strcpy(buffer, info->ai_canonname); + snprintf(buffer, length, "%s", info->ai_canonname); freeaddrinfo(info); - return error; } static int netlink_send(int fd, struct cn_msg *msg) { - struct nlmsghdr *nlh; + struct nlmsghdr nlh = { .nlmsg_type = NLMSG_DONE }; unsigned int size; struct msghdr message; - char buffer[64]; struct iovec iov[2]; - size = NLMSG_SPACE(sizeof(struct cn_msg) + msg->len); + size = sizeof(struct cn_msg) + msg->len; - nlh = (struct nlmsghdr *)buffer; - nlh->nlmsg_seq = 0; - nlh->nlmsg_pid = getpid(); - nlh->nlmsg_type = NLMSG_DONE; - nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh)); - nlh->nlmsg_flags = 0; + nlh.nlmsg_pid = getpid(); + nlh.nlmsg_len = NLMSG_LENGTH(size); - iov[0].iov_base = nlh; - iov[0].iov_len = sizeof(*nlh); + iov[0].iov_base = &nlh; + iov[0].iov_len = sizeof(nlh); iov[1].iov_base = msg; iov[1].iov_len = size; @@ -1400,7 +1419,7 @@ netlink_send(int fd, struct cn_msg *msg) int main(void) { - int fd, len, sock_opt; + int fd, len, nl_group; int error; struct cn_msg *message; struct pollfd pfd; @@ -1414,14 +1433,29 @@ int main(void) int pool; char *if_name; struct hv_kvp_ipaddr_value *kvp_ip_val; + char *kvp_recv_buffer; + size_t kvp_recv_buffer_len; - daemon(1, 0); + if (daemon(1, 0)) + return 1; openlog("KVP", 0, LOG_USER); syslog(LOG_INFO, "KVP starting; pid is:%d", getpid()); + + kvp_recv_buffer_len = NLMSG_LENGTH(0) + sizeof(struct cn_msg) + sizeof(struct hv_kvp_msg); + kvp_recv_buffer = calloc(1, kvp_recv_buffer_len); + if (!kvp_recv_buffer) { + syslog(LOG_ERR, "Failed to allocate netlink buffer"); + exit(EXIT_FAILURE); + } /* * Retrieve OS release information. */ kvp_get_os_info(); + /* + * Cache Fully Qualified Domain Name because getaddrinfo takes an + * unpredictable amount of time to finish. + */ + kvp_get_domain_name(full_domain_name, sizeof(full_domain_name)); if (kvp_file_init()) { syslog(LOG_ERR, "Failed to initialize the pools"); @@ -1430,27 +1464,34 @@ int main(void) fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR); if (fd < 0) { - syslog(LOG_ERR, "netlink socket creation failed; error:%d", fd); + syslog(LOG_ERR, "netlink socket creation failed; error: %d %s", errno, + strerror(errno)); exit(EXIT_FAILURE); } addr.nl_family = AF_NETLINK; addr.nl_pad = 0; addr.nl_pid = 0; - addr.nl_groups = CN_KVP_IDX; + addr.nl_groups = 0; error = bind(fd, (struct sockaddr *)&addr, sizeof(addr)); if (error < 0) { - syslog(LOG_ERR, "bind failed; error:%d", error); + syslog(LOG_ERR, "bind failed; error: %d %s", errno, strerror(errno)); + close(fd); + exit(EXIT_FAILURE); + } + nl_group = CN_KVP_IDX; + + if (setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &nl_group, sizeof(nl_group)) < 0) { + syslog(LOG_ERR, "setsockopt failed; error: %d %s", errno, strerror(errno)); close(fd); exit(EXIT_FAILURE); } - sock_opt = addr.nl_groups; - setsockopt(fd, 270, 1, &sock_opt, sizeof(sock_opt)); + /* * Register ourselves with the kernel. */ - message = (struct cn_msg *)kvp_send_buffer; + message = (struct cn_msg *)kvp_recv_buffer; message->id.idx = CN_KVP_IDX; message->id.val = CN_KVP_VAL; @@ -1461,7 +1502,7 @@ int main(void) len = netlink_send(fd, message); if (len < 0) { - syslog(LOG_ERR, "netlink_send failed; error:%d", len); + syslog(LOG_ERR, "netlink_send failed; error: %d %s", errno, strerror(errno)); close(fd); exit(EXIT_FAILURE); } @@ -1473,19 +1514,38 @@ int main(void) socklen_t addr_l = sizeof(addr); pfd.events = POLLIN; pfd.revents = 0; - poll(&pfd, 1, -1); - len = recvfrom(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0, + if (poll(&pfd, 1, -1) < 0) { + syslog(LOG_ERR, "poll failed; error: %d %s", errno, strerror(errno)); + if (errno == EINVAL) { + close(fd); + exit(EXIT_FAILURE); + } + else + continue; + } + + len = recvfrom(fd, kvp_recv_buffer, kvp_recv_buffer_len, 0, addr_p, &addr_l); - if (len < 0 || addr.nl_pid) { + if (len < 0) { syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s", addr.nl_pid, errno, strerror(errno)); close(fd); return -1; } + if (addr.nl_pid) { + syslog(LOG_WARNING, "Received packet from untrusted pid:%u", + addr.nl_pid); + continue; + } + incoming_msg = (struct nlmsghdr *)kvp_recv_buffer; + + if (incoming_msg->nlmsg_type != NLMSG_DONE) + continue; + incoming_cn_msg = (struct cn_msg *)NLMSG_DATA(incoming_msg); hv_msg = (struct hv_kvp_msg *)incoming_cn_msg->data; @@ -1614,8 +1674,7 @@ int main(void) switch (hv_msg->body.kvp_enum_data.index) { case FullyQualifiedDomainName: - kvp_get_domain_name(key_value, - HV_KVP_EXCHANGE_MAX_VALUE_SIZE); + strcpy(key_value, full_domain_name); strcpy(key_name, "FullyQualifiedDomainName"); break; case IntegrationServicesVersion: @@ -1649,7 +1708,7 @@ int main(void) strcpy(key_name, "OSMinorVersion"); break; case OSVersion: - strcpy(key_value, os_build); + strcpy(key_value, os_version); strcpy(key_name, "OSVersion"); break; case ProcessorArchitecture: @@ -1674,7 +1733,8 @@ kvp_done: len = netlink_send(fd, incoming_cn_msg); if (len < 0) { - syslog(LOG_ERR, "net_link send failed; error:%d", len); + syslog(LOG_ERR, "net_link send failed; error: %d %s", errno, + strerror(errno)); exit(EXIT_FAILURE); } } diff --git a/tools/hv/hv_set_ifconfig.sh b/tools/hv/hv_set_ifconfig.sh index 3e9427e08d8..735aafd64a3 100755 --- a/tools/hv/hv_set_ifconfig.sh +++ b/tools/hv/hv_set_ifconfig.sh @@ -20,18 +20,19 @@ # Here is the format of the ip configuration file: # # HWADDR=macaddr -# IF_NAME=interface name -# DHCP=yes (This is optional; if yes, DHCP is configured) +# DEVICE=interface name +# BOOTPROTO=<protocol> (where <protocol> is "dhcp" if DHCP is configured +# or "none" if no boot-time protocol should be used) # -# IPADDR=ipaddr1 -# IPADDR_1=ipaddr2 -# IPADDR_x=ipaddry (where y = x + 1) +# IPADDR0=ipaddr1 +# IPADDR1=ipaddr2 +# IPADDRx=ipaddry (where y = x + 1) # -# NETMASK=netmask1 -# NETMASK_x=netmasky (where y = x + 1) +# NETMASK0=netmask1 +# NETMASKx=netmasky (where y = x + 1) # # GATEWAY=ipaddr1 -# GATEWAY_x=ipaddry (where y = x + 1) +# GATEWAYx=ipaddry (where y = x + 1) # # DNSx=ipaddrx (where first DNS address is tagged as DNS1 etc) # @@ -53,11 +54,6 @@ echo "NM_CONTROLLED=no" >> $1 echo "PEERDNS=yes" >> $1 echo "ONBOOT=yes" >> $1 -dhcp=$(grep "DHCP" $1 2>/dev/null) -if [ "$dhcp" != "" ]; -then -echo "BOOTPROTO=dhcp" >> $1; -fi cp $1 /etc/sysconfig/network-scripts/ @@ -65,4 +61,4 @@ cp $1 /etc/sysconfig/network-scripts/ interface=$(echo $1 | awk -F - '{ print $2 }') /sbin/ifdown $interface 2>/dev/null -/sbin/ifup $interfac 2>/dev/null +/sbin/ifup $interface 2>/dev/null diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c new file mode 100644 index 00000000000..6a213b8cd7b --- /dev/null +++ b/tools/hv/hv_vss_daemon.c @@ -0,0 +1,267 @@ +/* + * An implementation of the host initiated guest snapshot for Hyper-V. + * + * + * Copyright (C) 2013, Microsoft, Inc. + * Author : K. Y. Srinivasan <kys@microsoft.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + */ + + +#include <sys/types.h> +#include <sys/socket.h> +#include <sys/poll.h> +#include <sys/ioctl.h> +#include <fcntl.h> +#include <stdio.h> +#include <mntent.h> +#include <stdlib.h> +#include <unistd.h> +#include <string.h> +#include <ctype.h> +#include <errno.h> +#include <arpa/inet.h> +#include <linux/fs.h> +#include <linux/connector.h> +#include <linux/hyperv.h> +#include <linux/netlink.h> +#include <syslog.h> + +static struct sockaddr_nl addr; + +#ifndef SOL_NETLINK +#define SOL_NETLINK 270 +#endif + + +static int vss_do_freeze(char *dir, unsigned int cmd, char *fs_op) +{ + int ret, fd = open(dir, O_RDONLY); + + if (fd < 0) + return 1; + ret = ioctl(fd, cmd, 0); + syslog(LOG_INFO, "VSS: %s of %s: %s\n", fs_op, dir, strerror(errno)); + close(fd); + return !!ret; +} + +static int vss_operate(int operation) +{ + char *fs_op; + char match[] = "/dev/"; + FILE *mounts; + struct mntent *ent; + unsigned int cmd; + int error = 0, root_seen = 0; + + switch (operation) { + case VSS_OP_FREEZE: + cmd = FIFREEZE; + fs_op = "freeze"; + break; + case VSS_OP_THAW: + cmd = FITHAW; + fs_op = "thaw"; + break; + default: + return -1; + } + + mounts = setmntent("/proc/mounts", "r"); + if (mounts == NULL) + return -1; + + while ((ent = getmntent(mounts))) { + if (strncmp(ent->mnt_fsname, match, strlen(match))) + continue; + if (strcmp(ent->mnt_type, "iso9660") == 0) + continue; + if (strcmp(ent->mnt_type, "vfat") == 0) + continue; + if (strcmp(ent->mnt_dir, "/") == 0) { + root_seen = 1; + continue; + } + error |= vss_do_freeze(ent->mnt_dir, cmd, fs_op); + } + endmntent(mounts); + + if (root_seen) { + error |= vss_do_freeze("/", cmd, fs_op); + } + + return error; +} + +static int netlink_send(int fd, struct cn_msg *msg) +{ + struct nlmsghdr nlh = { .nlmsg_type = NLMSG_DONE }; + unsigned int size; + struct msghdr message; + struct iovec iov[2]; + + size = sizeof(struct cn_msg) + msg->len; + + nlh.nlmsg_pid = getpid(); + nlh.nlmsg_len = NLMSG_LENGTH(size); + + iov[0].iov_base = &nlh; + iov[0].iov_len = sizeof(nlh); + + iov[1].iov_base = msg; + iov[1].iov_len = size; + + memset(&message, 0, sizeof(message)); + message.msg_name = &addr; + message.msg_namelen = sizeof(addr); + message.msg_iov = iov; + message.msg_iovlen = 2; + + return sendmsg(fd, &message, 0); +} + +int main(void) +{ + int fd, len, nl_group; + int error; + struct cn_msg *message; + struct pollfd pfd; + struct nlmsghdr *incoming_msg; + struct cn_msg *incoming_cn_msg; + int op; + struct hv_vss_msg *vss_msg; + char *vss_recv_buffer; + size_t vss_recv_buffer_len; + + if (daemon(1, 0)) + return 1; + + openlog("Hyper-V VSS", 0, LOG_USER); + syslog(LOG_INFO, "VSS starting; pid is:%d", getpid()); + + vss_recv_buffer_len = NLMSG_LENGTH(0) + sizeof(struct cn_msg) + sizeof(struct hv_vss_msg); + vss_recv_buffer = calloc(1, vss_recv_buffer_len); + if (!vss_recv_buffer) { + syslog(LOG_ERR, "Failed to allocate netlink buffers"); + exit(EXIT_FAILURE); + } + + fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR); + if (fd < 0) { + syslog(LOG_ERR, "netlink socket creation failed; error:%d %s", + errno, strerror(errno)); + exit(EXIT_FAILURE); + } + addr.nl_family = AF_NETLINK; + addr.nl_pad = 0; + addr.nl_pid = 0; + addr.nl_groups = 0; + + + error = bind(fd, (struct sockaddr *)&addr, sizeof(addr)); + if (error < 0) { + syslog(LOG_ERR, "bind failed; error:%d %s", errno, strerror(errno)); + close(fd); + exit(EXIT_FAILURE); + } + nl_group = CN_VSS_IDX; + if (setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &nl_group, sizeof(nl_group)) < 0) { + syslog(LOG_ERR, "setsockopt failed; error:%d %s", errno, strerror(errno)); + close(fd); + exit(EXIT_FAILURE); + } + /* + * Register ourselves with the kernel. + */ + message = (struct cn_msg *)vss_recv_buffer; + message->id.idx = CN_VSS_IDX; + message->id.val = CN_VSS_VAL; + message->ack = 0; + vss_msg = (struct hv_vss_msg *)message->data; + vss_msg->vss_hdr.operation = VSS_OP_REGISTER; + + message->len = sizeof(struct hv_vss_msg); + + len = netlink_send(fd, message); + if (len < 0) { + syslog(LOG_ERR, "netlink_send failed; error:%d %s", errno, strerror(errno)); + close(fd); + exit(EXIT_FAILURE); + } + + pfd.fd = fd; + + while (1) { + struct sockaddr *addr_p = (struct sockaddr *) &addr; + socklen_t addr_l = sizeof(addr); + pfd.events = POLLIN; + pfd.revents = 0; + + if (poll(&pfd, 1, -1) < 0) { + syslog(LOG_ERR, "poll failed; error:%d %s", errno, strerror(errno)); + if (errno == EINVAL) { + close(fd); + exit(EXIT_FAILURE); + } + else + continue; + } + + len = recvfrom(fd, vss_recv_buffer, vss_recv_buffer_len, 0, + addr_p, &addr_l); + + if (len < 0) { + syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s", + addr.nl_pid, errno, strerror(errno)); + close(fd); + return -1; + } + + if (addr.nl_pid) { + syslog(LOG_WARNING, + "Received packet from untrusted pid:%u", + addr.nl_pid); + continue; + } + + incoming_msg = (struct nlmsghdr *)vss_recv_buffer; + + if (incoming_msg->nlmsg_type != NLMSG_DONE) + continue; + + incoming_cn_msg = (struct cn_msg *)NLMSG_DATA(incoming_msg); + vss_msg = (struct hv_vss_msg *)incoming_cn_msg->data; + op = vss_msg->vss_hdr.operation; + error = HV_S_OK; + + switch (op) { + case VSS_OP_FREEZE: + case VSS_OP_THAW: + error = vss_operate(op); + if (error) + error = HV_E_FAIL; + break; + default: + syslog(LOG_ERR, "Illegal op:%d\n", op); + } + vss_msg->error = error; + len = netlink_send(fd, incoming_cn_msg); + if (len < 0) { + syslog(LOG_ERR, "net_link send failed; error:%d %s", + errno, strerror(errno)); + exit(EXIT_FAILURE); + } + } + +} diff --git a/tools/perf/util/include/asm/bug.h b/tools/include/asm/bug.h index 7fcc6810adc..9e5f4846967 100644 --- a/tools/perf/util/include/asm/bug.h +++ b/tools/include/asm/bug.h @@ -1,5 +1,7 @@ -#ifndef _PERF_ASM_GENERIC_BUG_H -#define _PERF_ASM_GENERIC_BUG_H +#ifndef _TOOLS_ASM_BUG_H +#define _TOOLS_ASM_BUG_H + +#include <linux/compiler.h> #define __WARN_printf(arg...) do { fprintf(stderr, arg); } while (0) @@ -19,4 +21,5 @@ __warned = 1; \ unlikely(__ret_warn_once); \ }) -#endif + +#endif /* _TOOLS_ASM_BUG_H */ diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h new file mode 100644 index 00000000000..88461f09cc8 --- /dev/null +++ b/tools/include/linux/compiler.h @@ -0,0 +1,40 @@ +#ifndef _TOOLS_LINUX_COMPILER_H_ +#define _TOOLS_LINUX_COMPILER_H_ + +#ifndef __always_inline +# define __always_inline inline __attribute__((always_inline)) +#endif + +#define __user + +#ifndef __attribute_const__ +# define __attribute_const__ +#endif + +#ifndef __maybe_unused +# define __maybe_unused __attribute__((unused)) +#endif + +#ifndef __packed +# define __packed __attribute__((__packed__)) +#endif + +#ifndef __force +# define __force +#endif + +#ifndef __weak +# define __weak __attribute__((weak)) +#endif + +#ifndef likely +# define likely(x) __builtin_expect(!!(x), 1) +#endif + +#ifndef unlikely +# define unlikely(x) __builtin_expect(!!(x), 0) +#endif + +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) + +#endif /* _TOOLS_LINUX_COMPILER_H */ diff --git a/tools/include/linux/export.h b/tools/include/linux/export.h new file mode 100644 index 00000000000..d07e586b9ba --- /dev/null +++ b/tools/include/linux/export.h @@ -0,0 +1,10 @@ +#ifndef _TOOLS_LINUX_EXPORT_H_ +#define _TOOLS_LINUX_EXPORT_H_ + +#define EXPORT_SYMBOL(sym) +#define EXPORT_SYMBOL_GPL(sym) +#define EXPORT_SYMBOL_GPL_FUTURE(sym) +#define EXPORT_UNUSED_SYMBOL(sym) +#define EXPORT_UNUSED_SYMBOL_GPL(sym) + +#endif diff --git a/tools/include/linux/hash.h b/tools/include/linux/hash.h new file mode 100644 index 00000000000..d026c657301 --- /dev/null +++ b/tools/include/linux/hash.h @@ -0,0 +1,5 @@ +#include "../../../include/linux/hash.h" + +#ifndef _TOOLS_LINUX_HASH_H +#define _TOOLS_LINUX_HASH_H +#endif diff --git a/tools/include/linux/types.h b/tools/include/linux/types.h new file mode 100644 index 00000000000..b5cf25e05df --- /dev/null +++ b/tools/include/linux/types.h @@ -0,0 +1,75 @@ +#ifndef _TOOLS_LINUX_TYPES_H_ +#define _TOOLS_LINUX_TYPES_H_ + +#include <stdbool.h> +#include <stddef.h> +#include <stdint.h> + +#define __SANE_USERSPACE_TYPES__ /* For PPC64, to get LL64 types */ +#include <asm/types.h> + +struct page; +struct kmem_cache; + +typedef enum { + GFP_KERNEL, + GFP_ATOMIC, + __GFP_HIGHMEM, + __GFP_HIGH +} gfp_t; + +/* + * We define u64 as uint64_t for every architecture + * so that we can print it with "%"PRIx64 without getting warnings. + * + * typedef __u64 u64; + * typedef __s64 s64; + */ +typedef uint64_t u64; +typedef int64_t s64; + +typedef __u32 u32; +typedef __s32 s32; + +typedef __u16 u16; +typedef __s16 s16; + +typedef __u8 u8; +typedef __s8 s8; + +#ifdef __CHECKER__ +#define __bitwise__ __attribute__((bitwise)) +#else +#define __bitwise__ +#endif +#ifdef __CHECK_ENDIAN__ +#define __bitwise __bitwise__ +#else +#define __bitwise +#endif + +#define __force +#define __user +#define __must_check +#define __cold + +typedef __u16 __bitwise __le16; +typedef __u16 __bitwise __be16; +typedef __u32 __bitwise __le32; +typedef __u32 __bitwise __be32; +typedef __u64 __bitwise __le64; +typedef __u64 __bitwise __be64; + +struct list_head { + struct list_head *next, *prev; +}; + +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +#endif /* _TOOLS_LINUX_TYPES_H_ */ diff --git a/tools/include/tools/be_byteshift.h b/tools/include/tools/be_byteshift.h index f4912e2668b..84c17d83657 100644 --- a/tools/include/tools/be_byteshift.h +++ b/tools/include/tools/be_byteshift.h @@ -1,68 +1,68 @@ #ifndef _TOOLS_BE_BYTESHIFT_H #define _TOOLS_BE_BYTESHIFT_H -#include <linux/types.h> +#include <stdint.h> -static inline __u16 __get_unaligned_be16(const __u8 *p) +static inline uint16_t __get_unaligned_be16(const uint8_t *p) { return p[0] << 8 | p[1]; } -static inline __u32 __get_unaligned_be32(const __u8 *p) +static inline uint32_t __get_unaligned_be32(const uint8_t *p) { return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3]; } -static inline __u64 __get_unaligned_be64(const __u8 *p) +static inline uint64_t __get_unaligned_be64(const uint8_t *p) { - return (__u64)__get_unaligned_be32(p) << 32 | + return (uint64_t)__get_unaligned_be32(p) << 32 | __get_unaligned_be32(p + 4); } -static inline void __put_unaligned_be16(__u16 val, __u8 *p) +static inline void __put_unaligned_be16(uint16_t val, uint8_t *p) { *p++ = val >> 8; *p++ = val; } -static inline void __put_unaligned_be32(__u32 val, __u8 *p) +static inline void __put_unaligned_be32(uint32_t val, uint8_t *p) { __put_unaligned_be16(val >> 16, p); __put_unaligned_be16(val, p + 2); } -static inline void __put_unaligned_be64(__u64 val, __u8 *p) +static inline void __put_unaligned_be64(uint64_t val, uint8_t *p) { __put_unaligned_be32(val >> 32, p); __put_unaligned_be32(val, p + 4); } -static inline __u16 get_unaligned_be16(const void *p) +static inline uint16_t get_unaligned_be16(const void *p) { - return __get_unaligned_be16((const __u8 *)p); + return __get_unaligned_be16((const uint8_t *)p); } -static inline __u32 get_unaligned_be32(const void *p) +static inline uint32_t get_unaligned_be32(const void *p) { - return __get_unaligned_be32((const __u8 *)p); + return __get_unaligned_be32((const uint8_t *)p); } -static inline __u64 get_unaligned_be64(const void *p) +static inline uint64_t get_unaligned_be64(const void *p) { - return __get_unaligned_be64((const __u8 *)p); + return __get_unaligned_be64((const uint8_t *)p); } -static inline void put_unaligned_be16(__u16 val, void *p) +static inline void put_unaligned_be16(uint16_t val, void *p) { __put_unaligned_be16(val, p); } -static inline void put_unaligned_be32(__u32 val, void *p) +static inline void put_unaligned_be32(uint32_t val, void *p) { __put_unaligned_be32(val, p); } -static inline void put_unaligned_be64(__u64 val, void *p) +static inline void put_unaligned_be64(uint64_t val, void *p) { __put_unaligned_be64(val, p); } diff --git a/tools/include/tools/le_byteshift.h b/tools/include/tools/le_byteshift.h index c99d45a68bd..8fe9f2488ec 100644 --- a/tools/include/tools/le_byteshift.h +++ b/tools/include/tools/le_byteshift.h @@ -1,68 +1,68 @@ #ifndef _TOOLS_LE_BYTESHIFT_H #define _TOOLS_LE_BYTESHIFT_H -#include <linux/types.h> +#include <stdint.h> -static inline __u16 __get_unaligned_le16(const __u8 *p) +static inline uint16_t __get_unaligned_le16(const uint8_t *p) { return p[0] | p[1] << 8; } -static inline __u32 __get_unaligned_le32(const __u8 *p) +static inline uint32_t __get_unaligned_le32(const uint8_t *p) { return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24; } -static inline __u64 __get_unaligned_le64(const __u8 *p) +static inline uint64_t __get_unaligned_le64(const uint8_t *p) { - return (__u64)__get_unaligned_le32(p + 4) << 32 | + return (uint64_t)__get_unaligned_le32(p + 4) << 32 | __get_unaligned_le32(p); } -static inline void __put_unaligned_le16(__u16 val, __u8 *p) +static inline void __put_unaligned_le16(uint16_t val, uint8_t *p) { *p++ = val; *p++ = val >> 8; } -static inline void __put_unaligned_le32(__u32 val, __u8 *p) +static inline void __put_unaligned_le32(uint32_t val, uint8_t *p) { __put_unaligned_le16(val >> 16, p + 2); __put_unaligned_le16(val, p); } -static inline void __put_unaligned_le64(__u64 val, __u8 *p) +static inline void __put_unaligned_le64(uint64_t val, uint8_t *p) { __put_unaligned_le32(val >> 32, p + 4); __put_unaligned_le32(val, p); } -static inline __u16 get_unaligned_le16(const void *p) +static inline uint16_t get_unaligned_le16(const void *p) { - return __get_unaligned_le16((const __u8 *)p); + return __get_unaligned_le16((const uint8_t *)p); } -static inline __u32 get_unaligned_le32(const void *p) +static inline uint32_t get_unaligned_le32(const void *p) { - return __get_unaligned_le32((const __u8 *)p); + return __get_unaligned_le32((const uint8_t *)p); } -static inline __u64 get_unaligned_le64(const void *p) +static inline uint64_t get_unaligned_le64(const void *p) { - return __get_unaligned_le64((const __u8 *)p); + return __get_unaligned_le64((const uint8_t *)p); } -static inline void put_unaligned_le16(__u16 val, void *p) +static inline void put_unaligned_le16(uint16_t val, void *p) { __put_unaligned_le16(val, p); } -static inline void put_unaligned_le32(__u32 val, void *p) +static inline void put_unaligned_le32(uint32_t val, void *p) { __put_unaligned_le32(val, p); } -static inline void put_unaligned_le64(__u64 val, void *p) +static inline void put_unaligned_le64(uint64_t val, void *p) { __put_unaligned_le64(val, p); } diff --git a/tools/lguest/Makefile b/tools/lguest/Makefile index 0ac34206f7a..97bca4871ea 100644 --- a/tools/lguest/Makefile +++ b/tools/lguest/Makefile @@ -1,5 +1,4 @@ # This creates the demonstration utility "lguest" which runs a Linux guest. -# Missing headers? Add "-I../../../include -I../../../arch/x86/include" CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -U_FORTIFY_SOURCE all: lguest diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index fd2f9221b24..32cf2ce15d6 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -42,14 +42,10 @@ #include <pwd.h> #include <grp.h> -#include <linux/virtio_config.h> -#include <linux/virtio_net.h> -#include <linux/virtio_blk.h> -#include <linux/virtio_console.h> -#include <linux/virtio_rng.h> -#include <linux/virtio_ring.h> -#include <asm/bootparam.h> -#include "../../include/linux/lguest_launcher.h" +#ifndef VIRTIO_F_ANY_LAYOUT +#define VIRTIO_F_ANY_LAYOUT 27 +#endif + /*L:110 * We can ignore the 43 include files we need for this program, but I do want * to draw attention to the use of kernel-style types. @@ -65,6 +61,15 @@ typedef uint16_t u16; typedef uint8_t u8; /*:*/ +#include <linux/virtio_config.h> +#include <linux/virtio_net.h> +#include <linux/virtio_blk.h> +#include <linux/virtio_console.h> +#include <linux/virtio_rng.h> +#include <linux/virtio_ring.h> +#include <asm/bootparam.h> +#include "../../include/linux/lguest_launcher.h" + #define BRIDGE_PFX "bridge:" #ifndef SIOCBRADDIF #define SIOCBRADDIF 0x89a2 /* add interface to bridge */ @@ -177,30 +182,8 @@ static struct termios orig_term; * in precise order. */ #define wmb() __asm__ __volatile__("" : : : "memory") -#define mb() __asm__ __volatile__("" : : : "memory") - -/* - * Convert an iovec element to the given type. - * - * This is a fairly ugly trick: we need to know the size of the type and - * alignment requirement to check the pointer is kosher. It's also nice to - * have the name of the type in case we report failure. - * - * Typing those three things all the time is cumbersome and error prone, so we - * have a macro which sets them all up and passes to the real function. - */ -#define convert(iov, type) \ - ((type *)_convert((iov), sizeof(type), __alignof__(type), #type)) - -static void *_convert(struct iovec *iov, size_t size, size_t align, - const char *name) -{ - if (iov->iov_len != size) - errx(1, "Bad iovec size %zu for %s", iov->iov_len, name); - if ((unsigned long)iov->iov_base % align != 0) - errx(1, "Bad alignment %p for %s", iov->iov_base, name); - return iov->iov_base; -} +#define rmb() __asm__ __volatile__("lock; addl $0,0(%%esp)" : : : "memory") +#define mb() __asm__ __volatile__("lock; addl $0,0(%%esp)" : : : "memory") /* Wrapper for the last available index. Makes it easier to change. */ #define lg_last_avail(vq) ((vq)->last_avail_idx) @@ -228,7 +211,8 @@ static bool iov_empty(const struct iovec iov[], unsigned int num_iov) } /* Take len bytes from the front of this iovec. */ -static void iov_consume(struct iovec iov[], unsigned num_iov, unsigned len) +static void iov_consume(struct iovec iov[], unsigned num_iov, + void *dest, unsigned len) { unsigned int i; @@ -236,11 +220,16 @@ static void iov_consume(struct iovec iov[], unsigned num_iov, unsigned len) unsigned int used; used = iov[i].iov_len < len ? iov[i].iov_len : len; + if (dest) { + memcpy(dest, iov[i].iov_base, used); + dest += used; + } iov[i].iov_base += used; iov[i].iov_len -= used; len -= used; } - assert(len == 0); + if (len != 0) + errx(1, "iovec too short!"); } /* The device virtqueue descriptors are followed by feature bitmasks. */ @@ -693,6 +682,12 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, errx(1, "Guest moved used index from %u to %u", last_avail, vq->vring.avail->idx); + /* + * Make sure we read the descriptor number *after* we read the ring + * update; don't let the cpu or compiler change the order. + */ + rmb(); + /* * Grab the next descriptor number they're advertising, and increment * the index we've seen. @@ -712,6 +707,12 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, i = head; /* + * We have to read the descriptor after we read the descriptor number, + * but there's a data dependency there so the CPU shouldn't reorder + * that: no rmb() required. + */ + + /* * If this is an indirect entry, then this buffer contains a descriptor * table which we handle as if it's any normal descriptor chain. */ @@ -864,7 +865,7 @@ static void console_output(struct virtqueue *vq) warn("Write to stdout gave %i (%d)", len, errno); break; } - iov_consume(iov, out, len); + iov_consume(iov, out, NULL, len); } /* @@ -1547,6 +1548,8 @@ static void setup_tun_net(char *arg) add_feature(dev, VIRTIO_NET_F_HOST_ECN); /* We handle indirect ring entries */ add_feature(dev, VIRTIO_RING_F_INDIRECT_DESC); + /* We're compliant with the damn spec. */ + add_feature(dev, VIRTIO_F_ANY_LAYOUT); set_config(dev, sizeof(conf), &conf); /* We don't need the socket any more; setup is done. */ @@ -1591,9 +1594,9 @@ static void blk_request(struct virtqueue *vq) { struct vblk_info *vblk = vq->dev->priv; unsigned int head, out_num, in_num, wlen; - int ret; + int ret, i; u8 *in; - struct virtio_blk_outhdr *out; + struct virtio_blk_outhdr out; struct iovec iov[vq->vring.num]; off64_t off; @@ -1603,32 +1606,36 @@ static void blk_request(struct virtqueue *vq) */ head = wait_for_vq_desc(vq, iov, &out_num, &in_num); - /* - * Every block request should contain at least one output buffer - * (detailing the location on disk and the type of request) and one - * input buffer (to hold the result). - */ - if (out_num == 0 || in_num == 0) - errx(1, "Bad virtblk cmd %u out=%u in=%u", - head, out_num, in_num); + /* Copy the output header from the front of the iov (adjusts iov) */ + iov_consume(iov, out_num, &out, sizeof(out)); + + /* Find and trim end of iov input array, for our status byte. */ + in = NULL; + for (i = out_num + in_num - 1; i >= out_num; i--) { + if (iov[i].iov_len > 0) { + in = iov[i].iov_base + iov[i].iov_len - 1; + iov[i].iov_len--; + break; + } + } + if (!in) + errx(1, "Bad virtblk cmd with no room for status"); - out = convert(&iov[0], struct virtio_blk_outhdr); - in = convert(&iov[out_num+in_num-1], u8); /* * For historical reasons, block operations are expressed in 512 byte * "sectors". */ - off = out->sector * 512; + off = out.sector * 512; /* * In general the virtio block driver is allowed to try SCSI commands. * It'd be nice if we supported eject, for example, but we don't. */ - if (out->type & VIRTIO_BLK_T_SCSI_CMD) { + if (out.type & VIRTIO_BLK_T_SCSI_CMD) { fprintf(stderr, "Scsi commands unsupported\n"); *in = VIRTIO_BLK_S_UNSUPP; wlen = sizeof(*in); - } else if (out->type & VIRTIO_BLK_T_OUT) { + } else if (out.type & VIRTIO_BLK_T_OUT) { /* * Write * @@ -1636,10 +1643,10 @@ static void blk_request(struct virtqueue *vq) * if they try to write past end. */ if (lseek64(vblk->fd, off, SEEK_SET) != off) - err(1, "Bad seek to sector %llu", out->sector); + err(1, "Bad seek to sector %llu", out.sector); - ret = writev(vblk->fd, iov+1, out_num-1); - verbose("WRITE to sector %llu: %i\n", out->sector, ret); + ret = writev(vblk->fd, iov, out_num); + verbose("WRITE to sector %llu: %i\n", out.sector, ret); /* * Grr... Now we know how long the descriptor they sent was, we @@ -1655,7 +1662,7 @@ static void blk_request(struct virtqueue *vq) wlen = sizeof(*in); *in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR); - } else if (out->type & VIRTIO_BLK_T_FLUSH) { + } else if (out.type & VIRTIO_BLK_T_FLUSH) { /* Flush */ ret = fdatasync(vblk->fd); verbose("FLUSH fdatasync: %i\n", ret); @@ -1669,10 +1676,9 @@ static void blk_request(struct virtqueue *vq) * if they try to read past end. */ if (lseek64(vblk->fd, off, SEEK_SET) != off) - err(1, "Bad seek to sector %llu", out->sector); + err(1, "Bad seek to sector %llu", out.sector); - ret = readv(vblk->fd, iov+1, in_num-1); - verbose("READ from sector %llu: %i\n", out->sector, ret); + ret = readv(vblk->fd, iov + out_num, in_num); if (ret >= 0) { wlen = sizeof(*in) + ret; *in = VIRTIO_BLK_S_OK; @@ -1758,7 +1764,7 @@ static void rng_input(struct virtqueue *vq) len = readv(rng_info->rfd, iov, in_num); if (len <= 0) err(1, "Read from /dev/random gave %i", len); - iov_consume(iov, in_num, len); + iov_consume(iov, in_num, NULL, len); totlen += len; } diff --git a/tools/lguest/lguest.txt b/tools/lguest/lguest.txt index bff0c554485..06e1f464951 100644 --- a/tools/lguest/lguest.txt +++ b/tools/lguest/lguest.txt @@ -29,10 +29,6 @@ Running Lguest: You will need to configure your kernel with the following options: - "General setup": - "Prompt for development and/or incomplete code/drivers" = Y - (CONFIG_EXPERIMENTAL=y) - "Processor type and features": "Paravirtualized guest support" = Y "Lguest guest support" = Y @@ -43,10 +39,10 @@ Running Lguest: "Device Drivers": "Block devices" - "Virtio block driver (EXPERIMENTAL)" = M/Y + "Virtio block driver" = M/Y "Network device support" "Universal TUN/TAP device driver support" = M/Y - "Virtio network driver (EXPERIMENTAL)" = M/Y + "Virtio network driver" = M/Y (CONFIG_VIRTIO_BLK=m, CONFIG_VIRTIO_NET=m and CONFIG_TUN=m) "Virtualization" @@ -74,7 +70,7 @@ Running Lguest: - Run an lguest as root: - Documentation/virtual/lguest/lguest 64 vmlinux --tunnet=192.168.19.1 \ + tools/lguest/lguest 64 vmlinux --tunnet=192.168.19.1 \ --block=rootfile root=/dev/vda Explanation: diff --git a/tools/lib/api/Makefile b/tools/lib/api/Makefile new file mode 100644 index 00000000000..ce00f7ee645 --- /dev/null +++ b/tools/lib/api/Makefile @@ -0,0 +1,44 @@ +include ../../scripts/Makefile.include +include ../../perf/config/utilities.mak # QUIET_CLEAN + +CC = $(CROSS_COMPILE)gcc +AR = $(CROSS_COMPILE)ar + +# guard against environment variables +LIB_H= +LIB_OBJS= + +LIB_H += fs/debugfs.h +LIB_H += fs/fs.h + +LIB_OBJS += $(OUTPUT)fs/debugfs.o +LIB_OBJS += $(OUTPUT)fs/fs.o + +LIBFILE = libapikfs.a + +CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) -fPIC +EXTLIBS = -lelf -lpthread -lrt -lm +ALL_CFLAGS = $(CFLAGS) $(BASIC_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 +ALL_LDFLAGS = $(LDFLAGS) + +RM = rm -f + +$(LIBFILE): $(LIB_OBJS) + $(QUIET_AR)$(RM) $@ && $(AR) rcs $(OUTPUT)$@ $(LIB_OBJS) + +$(LIB_OBJS): $(LIB_H) + +libapi_dirs: + $(QUIET_MKDIR)mkdir -p $(OUTPUT)fs/ + +$(OUTPUT)%.o: %.c libapi_dirs + $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $< +$(OUTPUT)%.s: %.c libapi_dirs + $(QUIET_CC)$(CC) -S $(ALL_CFLAGS) $< +$(OUTPUT)%.o: %.S libapi_dirs + $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $< + +clean: + $(call QUIET_CLEAN, libapi) $(RM) $(LIB_OBJS) $(LIBFILE) + +.PHONY: clean diff --git a/tools/perf/util/debugfs.c b/tools/lib/api/fs/debugfs.c index dd8b19319c0..a74fba6d774 100644 --- a/tools/perf/util/debugfs.c +++ b/tools/lib/api/fs/debugfs.c @@ -1,36 +1,38 @@ -#include "util.h" -#include "debugfs.h" -#include "cache.h" - -#include <linux/kernel.h> +#include <errno.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <stdbool.h> +#include <sys/vfs.h> #include <sys/mount.h> +#include <linux/kernel.h> + +#include "debugfs.h" -static int debugfs_premounted; char debugfs_mountpoint[PATH_MAX + 1] = "/sys/kernel/debug"; -char tracing_events_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing/events"; -static const char *debugfs_known_mountpoints[] = { - "/sys/kernel/debug/", - "/debug/", +static const char * const debugfs_known_mountpoints[] = { + "/sys/kernel/debug", + "/debug", 0, }; -static int debugfs_found; +static bool debugfs_found; /* find the path to the mounted debugfs */ const char *debugfs_find_mountpoint(void) { - const char **ptr; + const char * const *ptr; char type[100]; FILE *fp; if (debugfs_found) - return (const char *) debugfs_mountpoint; + return (const char *)debugfs_mountpoint; ptr = debugfs_known_mountpoints; while (*ptr) { if (debugfs_valid_mountpoint(*ptr) == 0) { - debugfs_found = 1; + debugfs_found = true; strcpy(debugfs_mountpoint, *ptr); return debugfs_mountpoint; } @@ -52,7 +54,7 @@ const char *debugfs_find_mountpoint(void) if (strcmp(type, "debugfs") != 0) return NULL; - debugfs_found = 1; + debugfs_found = true; return debugfs_mountpoint; } @@ -71,21 +73,12 @@ int debugfs_valid_mountpoint(const char *debugfs) return 0; } -static void debugfs_set_tracing_events_path(const char *mountpoint) -{ - snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s", - mountpoint, "tracing/events"); -} - /* mount the debugfs somewhere if it's not mounted */ - char *debugfs_mount(const char *mountpoint) { /* see if it's already mounted */ - if (debugfs_find_mountpoint()) { - debugfs_premounted = 1; + if (debugfs_find_mountpoint()) goto out; - } /* if not mounted and no argument */ if (mountpoint == NULL) { @@ -100,15 +93,8 @@ char *debugfs_mount(const char *mountpoint) return NULL; /* save the mountpoint */ - debugfs_found = 1; + debugfs_found = true; strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint)); out: - debugfs_set_tracing_events_path(debugfs_mountpoint); return debugfs_mountpoint; } - -void debugfs_set_path(const char *mountpoint) -{ - snprintf(debugfs_mountpoint, sizeof(debugfs_mountpoint), "%s", mountpoint); - debugfs_set_tracing_events_path(mountpoint); -} diff --git a/tools/lib/api/fs/debugfs.h b/tools/lib/api/fs/debugfs.h new file mode 100644 index 00000000000..f19d3df9609 --- /dev/null +++ b/tools/lib/api/fs/debugfs.h @@ -0,0 +1,29 @@ +#ifndef __API_DEBUGFS_H__ +#define __API_DEBUGFS_H__ + +#define _STR(x) #x +#define STR(x) _STR(x) + +/* + * On most systems <limits.h> would have given us this, but not on some systems + * (e.g. GNU/Hurd). + */ +#ifndef PATH_MAX +#define PATH_MAX 4096 +#endif + +#ifndef DEBUGFS_MAGIC +#define DEBUGFS_MAGIC 0x64626720 +#endif + +#ifndef PERF_DEBUGFS_ENVIRONMENT +#define PERF_DEBUGFS_ENVIRONMENT "PERF_DEBUGFS_DIR" +#endif + +const char *debugfs_find_mountpoint(void); +int debugfs_valid_mountpoint(const char *debugfs); +char *debugfs_mount(const char *mountpoint); + +extern char debugfs_mountpoint[]; + +#endif /* __API_DEBUGFS_H__ */ diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c new file mode 100644 index 00000000000..c1b49c36a95 --- /dev/null +++ b/tools/lib/api/fs/fs.c @@ -0,0 +1,165 @@ +/* TODO merge/factor in debugfs.c here */ + +#include <ctype.h> +#include <errno.h> +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/vfs.h> + +#include "debugfs.h" +#include "fs.h" + +static const char * const sysfs__fs_known_mountpoints[] = { + "/sys", + 0, +}; + +static const char * const procfs__known_mountpoints[] = { + "/proc", + 0, +}; + +struct fs { + const char *name; + const char * const *mounts; + char path[PATH_MAX + 1]; + bool found; + long magic; +}; + +enum { + FS__SYSFS = 0, + FS__PROCFS = 1, +}; + +static struct fs fs__entries[] = { + [FS__SYSFS] = { + .name = "sysfs", + .mounts = sysfs__fs_known_mountpoints, + .magic = SYSFS_MAGIC, + }, + [FS__PROCFS] = { + .name = "proc", + .mounts = procfs__known_mountpoints, + .magic = PROC_SUPER_MAGIC, + }, +}; + +static bool fs__read_mounts(struct fs *fs) +{ + bool found = false; + char type[100]; + FILE *fp; + + fp = fopen("/proc/mounts", "r"); + if (fp == NULL) + return NULL; + + while (!found && + fscanf(fp, "%*s %" STR(PATH_MAX) "s %99s %*s %*d %*d\n", + fs->path, type) == 2) { + + if (strcmp(type, fs->name) == 0) + found = true; + } + + fclose(fp); + return fs->found = found; +} + +static int fs__valid_mount(const char *fs, long magic) +{ + struct statfs st_fs; + + if (statfs(fs, &st_fs) < 0) + return -ENOENT; + else if (st_fs.f_type != magic) + return -ENOENT; + + return 0; +} + +static bool fs__check_mounts(struct fs *fs) +{ + const char * const *ptr; + + ptr = fs->mounts; + while (*ptr) { + if (fs__valid_mount(*ptr, fs->magic) == 0) { + fs->found = true; + strcpy(fs->path, *ptr); + return true; + } + ptr++; + } + + return false; +} + +static void mem_toupper(char *f, size_t len) +{ + while (len) { + *f = toupper(*f); + f++; + len--; + } +} + +/* + * Check for "NAME_PATH" environment variable to override fs location (for + * testing). This matches the recommendation in Documentation/sysfs-rules.txt + * for SYSFS_PATH. + */ +static bool fs__env_override(struct fs *fs) +{ + char *override_path; + size_t name_len = strlen(fs->name); + /* name + "_PATH" + '\0' */ + char upper_name[name_len + 5 + 1]; + memcpy(upper_name, fs->name, name_len); + mem_toupper(upper_name, name_len); + strcpy(&upper_name[name_len], "_PATH"); + + override_path = getenv(upper_name); + if (!override_path) + return false; + + fs->found = true; + strncpy(fs->path, override_path, sizeof(fs->path)); + return true; +} + +static const char *fs__get_mountpoint(struct fs *fs) +{ + if (fs__env_override(fs)) + return fs->path; + + if (fs__check_mounts(fs)) + return fs->path; + + if (fs__read_mounts(fs)) + return fs->path; + + return NULL; +} + +static const char *fs__mountpoint(int idx) +{ + struct fs *fs = &fs__entries[idx]; + + if (fs->found) + return (const char *)fs->path; + + return fs__get_mountpoint(fs); +} + +#define FS__MOUNTPOINT(name, idx) \ +const char *name##__mountpoint(void) \ +{ \ + return fs__mountpoint(idx); \ +} + +FS__MOUNTPOINT(sysfs, FS__SYSFS); +FS__MOUNTPOINT(procfs, FS__PROCFS); diff --git a/tools/lib/api/fs/fs.h b/tools/lib/api/fs/fs.h new file mode 100644 index 00000000000..cb7049551f3 --- /dev/null +++ b/tools/lib/api/fs/fs.h @@ -0,0 +1,14 @@ +#ifndef __API_FS__ +#define __API_FS__ + +#ifndef SYSFS_MAGIC +#define SYSFS_MAGIC 0x62656572 +#endif + +#ifndef PROC_SUPER_MAGIC +#define PROC_SUPER_MAGIC 0x9fa0 +#endif + +const char *sysfs__mountpoint(void); +const char *procfs__mountpoint(void); +#endif /* __API_FS__ */ diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile new file mode 100644 index 00000000000..52f9279c6c1 --- /dev/null +++ b/tools/lib/lockdep/Makefile @@ -0,0 +1,243 @@ +# file format version +FILE_VERSION = 1 + +LIBLOCKDEP_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion) + +# Makefiles suck: This macro sets a default value of $(2) for the +# variable named by $(1), unless the variable has been set by +# environment or command line. This is necessary for CC and AR +# because make sets default values, so the simpler ?= approach +# won't work as expected. +define allow-override + $(if $(or $(findstring environment,$(origin $(1))),\ + $(findstring command line,$(origin $(1)))),,\ + $(eval $(1) = $(2))) +endef + +# Allow setting CC and AR, or setting CROSS_COMPILE as a prefix. +$(call allow-override,CC,$(CROSS_COMPILE)gcc) +$(call allow-override,AR,$(CROSS_COMPILE)ar) + +INSTALL = install + +# Use DESTDIR for installing into a different root directory. +# This is useful for building a package. The program will be +# installed in this directory as if it was the root directory. +# Then the build tool can move it later. +DESTDIR ?= +DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))' + +prefix ?= /usr/local +libdir_relative = lib +libdir = $(prefix)/$(libdir_relative) +bindir_relative = bin +bindir = $(prefix)/$(bindir_relative) + +export DESTDIR DESTDIR_SQ INSTALL + +# copy a bit from Linux kbuild + +ifeq ("$(origin V)", "command line") + VERBOSE = $(V) +endif +ifndef VERBOSE + VERBOSE = 0 +endif + +ifeq ("$(origin O)", "command line") + BUILD_OUTPUT := $(O) +endif + +ifeq ($(BUILD_SRC),) +ifneq ($(BUILD_OUTPUT),) + +define build_output + $(if $(VERBOSE:1=),@)$(MAKE) -C $(BUILD_OUTPUT) \ + BUILD_SRC=$(CURDIR) -f $(CURDIR)/Makefile $1 +endef + +saved-output := $(BUILD_OUTPUT) +BUILD_OUTPUT := $(shell cd $(BUILD_OUTPUT) && /bin/pwd) +$(if $(BUILD_OUTPUT),, \ + $(error output directory "$(saved-output)" does not exist)) + +all: sub-make + +gui: force + $(call build_output, all_cmd) + +$(filter-out gui,$(MAKECMDGOALS)): sub-make + +sub-make: force + $(call build_output, $(MAKECMDGOALS)) + + +# Leave processing to above invocation of make +skip-makefile := 1 + +endif # BUILD_OUTPUT +endif # BUILD_SRC + +# We process the rest of the Makefile if this is the final invocation of make +ifeq ($(skip-makefile),) + +srctree := $(realpath $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR))) +objtree := $(realpath $(CURDIR)) +src := $(srctree) +obj := $(objtree) + +export prefix libdir bindir src obj + +# Shell quotes +libdir_SQ = $(subst ','\'',$(libdir)) +bindir_SQ = $(subst ','\'',$(bindir)) + +LIB_FILE = liblockdep.a liblockdep.so.$(LIBLOCKDEP_VERSION) +BIN_FILE = lockdep + +CONFIG_INCLUDES = +CONFIG_LIBS = +CONFIG_FLAGS = + +OBJ = $@ +N = + +export Q VERBOSE + +INCLUDES = -I. -I/usr/local/include -I./uinclude -I./include -I../../include $(CONFIG_INCLUDES) + +# Set compile option CFLAGS if not set elsewhere +CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g + +override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ) + +ifeq ($(VERBOSE),1) + Q = + print_compile = + print_app_build = + print_fpic_compile = + print_shared_lib_compile = + print_install = +else + Q = @ + print_compile = echo ' CC '$(OBJ); + print_app_build = echo ' BUILD '$(OBJ); + print_fpic_compile = echo ' CC FPIC '$(OBJ); + print_shared_lib_compile = echo ' BUILD SHARED LIB '$(OBJ); + print_static_lib_build = echo ' BUILD STATIC LIB '$(OBJ); + print_install = echo ' INSTALL '$1' to $(DESTDIR_SQ)$2'; +endif + +do_fpic_compile = \ + ($(print_fpic_compile) \ + $(CC) -c $(CFLAGS) $(EXT) -fPIC $< -o $@) + +do_app_build = \ + ($(print_app_build) \ + $(CC) $^ -rdynamic -o $@ $(CONFIG_LIBS) $(LIBS)) + +do_compile_shared_library = \ + ($(print_shared_lib_compile) \ + $(CC) --shared $^ -o $@ -lpthread -ldl -Wl,-soname='"$@"';$(shell ln -s $@ liblockdep.so)) + +do_build_static_lib = \ + ($(print_static_lib_build) \ + $(RM) $@; $(AR) rcs $@ $^) + + +define do_compile + $(print_compile) \ + $(CC) -c $(CFLAGS) $(EXT) $< -o $(obj)/$@; +endef + +$(obj)/%.o: $(src)/%.c + $(Q)$(call do_compile) + +%.o: $(src)/%.c + $(Q)$(call do_compile) + +PEVENT_LIB_OBJS = common.o lockdep.o preload.o rbtree.o + +ALL_OBJS = $(PEVENT_LIB_OBJS) + +CMD_TARGETS = $(LIB_FILE) + +TARGETS = $(CMD_TARGETS) + + +all: all_cmd + +all_cmd: $(CMD_TARGETS) + +liblockdep.so.$(LIBLOCKDEP_VERSION): $(PEVENT_LIB_OBJS) + $(Q)$(do_compile_shared_library) + +liblockdep.a: $(PEVENT_LIB_OBJS) + $(Q)$(do_build_static_lib) + +$(PEVENT_LIB_OBJS): %.o: $(src)/%.c + $(Q)$(do_fpic_compile) + +## make deps + +all_objs := $(sort $(ALL_OBJS)) +all_deps := $(all_objs:%.o=.%.d) + +# let .d file also depends on the source and header files +define check_deps + @set -e; $(RM) $@; \ + $(CC) -MM $(CFLAGS) $< > $@.$$$$; \ + sed 's,\($*\)\.o[ :]*,\1.o $@ : ,g' < $@.$$$$ > $@; \ + $(RM) $@.$$$$ +endef + +$(all_deps): .%.d: $(src)/%.c + $(Q)$(call check_deps) + +$(all_objs) : %.o : .%.d + +dep_includes := $(wildcard $(all_deps)) + +ifneq ($(dep_includes),) + include $(dep_includes) +endif + +### Detect environment changes +TRACK_CFLAGS = $(subst ','\'',$(CFLAGS)):$(ARCH):$(CROSS_COMPILE) + +tags: force + $(RM) tags + find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \ + --regex-c++='/_PE\(([^,)]*).*/PEVENT_ERRNO__\1/' + +TAGS: force + $(RM) TAGS + find . -name '*.[ch]' | xargs etags \ + --regex='/_PE(\([^,)]*\).*/PEVENT_ERRNO__\1/' + +define do_install + $(print_install) \ + if [ ! -d '$(DESTDIR_SQ)$2' ]; then \ + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \ + fi; \ + $(INSTALL) $1 '$(DESTDIR_SQ)$2' +endef + +install_lib: all_cmd + $(Q)$(call do_install,$(LIB_FILE),$(libdir_SQ)) + $(Q)$(call do_install,$(BIN_FILE),$(bindir_SQ)) + +install: install_lib + +clean: + $(RM) *.o *~ $(TARGETS) *.a *liblockdep*.so* $(VERSION_FILES) .*.d + $(RM) tags TAGS + +endif # skip-makefile + +PHONY += force +force: + +# Declare the contents of the .PHONY variable as phony. We keep that +# information in a variable so we can use it in if_changed and friends. +.PHONY: $(PHONY) diff --git a/tools/lib/lockdep/common.c b/tools/lib/lockdep/common.c new file mode 100644 index 00000000000..8ef602f18a3 --- /dev/null +++ b/tools/lib/lockdep/common.c @@ -0,0 +1,33 @@ +#include <stddef.h> +#include <stdbool.h> +#include <linux/compiler.h> +#include <linux/lockdep.h> +#include <unistd.h> +#include <sys/syscall.h> + +static __thread struct task_struct current_obj; + +/* lockdep wants these */ +bool debug_locks = true; +bool debug_locks_silent; + +__attribute__((constructor)) static void liblockdep_init(void) +{ + lockdep_init(); +} + +__attribute__((destructor)) static void liblockdep_exit(void) +{ + debug_check_no_locks_held(¤t_obj); +} + +struct task_struct *__curr(void) +{ + if (current_obj.pid == 0) { + /* Makes lockdep output pretty */ + prctl(PR_GET_NAME, current_obj.comm); + current_obj.pid = syscall(__NR_gettid); + } + + return ¤t_obj; +} diff --git a/tools/lib/lockdep/include/liblockdep/common.h b/tools/lib/lockdep/include/liblockdep/common.h new file mode 100644 index 00000000000..0bda630027c --- /dev/null +++ b/tools/lib/lockdep/include/liblockdep/common.h @@ -0,0 +1,50 @@ +#ifndef _LIBLOCKDEP_COMMON_H +#define _LIBLOCKDEP_COMMON_H + +#include <pthread.h> + +#define NR_LOCKDEP_CACHING_CLASSES 2 +#define MAX_LOCKDEP_SUBCLASSES 8UL + +#ifndef CALLER_ADDR0 +#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) +#endif + +#ifndef _RET_IP_ +#define _RET_IP_ CALLER_ADDR0 +#endif + +#ifndef _THIS_IP_ +#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) +#endif + +struct lockdep_subclass_key { + char __one_byte; +}; + +struct lock_class_key { + struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; +}; + +struct lockdep_map { + struct lock_class_key *key; + struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; + const char *name; +#ifdef CONFIG_LOCK_STAT + int cpu; + unsigned long ip; +#endif +}; + +void lockdep_init_map(struct lockdep_map *lock, const char *name, + struct lock_class_key *key, int subclass); +void lock_acquire(struct lockdep_map *lock, unsigned int subclass, + int trylock, int read, int check, + struct lockdep_map *nest_lock, unsigned long ip); +void lock_release(struct lockdep_map *lock, int nested, + unsigned long ip); + +#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ + { .name = (_name), .key = (void *)(_key), } + +#endif diff --git a/tools/lib/lockdep/include/liblockdep/mutex.h b/tools/lib/lockdep/include/liblockdep/mutex.h new file mode 100644 index 00000000000..ee53a42818c --- /dev/null +++ b/tools/lib/lockdep/include/liblockdep/mutex.h @@ -0,0 +1,70 @@ +#ifndef _LIBLOCKDEP_MUTEX_H +#define _LIBLOCKDEP_MUTEX_H + +#include <pthread.h> +#include "common.h" + +struct liblockdep_pthread_mutex { + pthread_mutex_t mutex; + struct lockdep_map dep_map; +}; + +typedef struct liblockdep_pthread_mutex liblockdep_pthread_mutex_t; + +#define LIBLOCKDEP_PTHREAD_MUTEX_INITIALIZER(mtx) \ + (const struct liblockdep_pthread_mutex) { \ + .mutex = PTHREAD_MUTEX_INITIALIZER, \ + .dep_map = STATIC_LOCKDEP_MAP_INIT(#mtx, &((&(mtx))->dep_map)), \ +} + +static inline int __mutex_init(liblockdep_pthread_mutex_t *lock, + const char *name, + struct lock_class_key *key, + const pthread_mutexattr_t *__mutexattr) +{ + lockdep_init_map(&lock->dep_map, name, key, 0); + return pthread_mutex_init(&lock->mutex, __mutexattr); +} + +#define liblockdep_pthread_mutex_init(mutex, mutexattr) \ +({ \ + static struct lock_class_key __key; \ + \ + __mutex_init((mutex), #mutex, &__key, (mutexattr)); \ +}) + +static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock) +{ + lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_); + return pthread_mutex_lock(&lock->mutex); +} + +static inline int liblockdep_pthread_mutex_unlock(liblockdep_pthread_mutex_t *lock) +{ + lock_release(&lock->dep_map, 0, (unsigned long)_RET_IP_); + return pthread_mutex_unlock(&lock->mutex); +} + +static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *lock) +{ + lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_); + return pthread_mutex_trylock(&lock->mutex) == 0 ? 1 : 0; +} + +static inline int liblockdep_pthread_mutex_destroy(liblockdep_pthread_mutex_t *lock) +{ + return pthread_mutex_destroy(&lock->mutex); +} + +#ifdef __USE_LIBLOCKDEP + +#define pthread_mutex_t liblockdep_pthread_mutex_t +#define pthread_mutex_init liblockdep_pthread_mutex_init +#define pthread_mutex_lock liblockdep_pthread_mutex_lock +#define pthread_mutex_unlock liblockdep_pthread_mutex_unlock +#define pthread_mutex_trylock liblockdep_pthread_mutex_trylock +#define pthread_mutex_destroy liblockdep_pthread_mutex_destroy + +#endif + +#endif diff --git a/tools/lib/lockdep/include/liblockdep/rwlock.h b/tools/lib/lockdep/include/liblockdep/rwlock.h new file mode 100644 index 00000000000..4ec03f86155 --- /dev/null +++ b/tools/lib/lockdep/include/liblockdep/rwlock.h @@ -0,0 +1,86 @@ +#ifndef _LIBLOCKDEP_RWLOCK_H +#define _LIBLOCKDEP_RWLOCK_H + +#include <pthread.h> +#include "common.h" + +struct liblockdep_pthread_rwlock { + pthread_rwlock_t rwlock; + struct lockdep_map dep_map; +}; + +typedef struct liblockdep_pthread_rwlock liblockdep_pthread_rwlock_t; + +#define LIBLOCKDEP_PTHREAD_RWLOCK_INITIALIZER(rwl) \ + (struct liblockdep_pthread_rwlock) { \ + .rwlock = PTHREAD_RWLOCK_INITIALIZER, \ + .dep_map = STATIC_LOCKDEP_MAP_INIT(#rwl, &((&(rwl))->dep_map)), \ +} + +static inline int __rwlock_init(liblockdep_pthread_rwlock_t *lock, + const char *name, + struct lock_class_key *key, + const pthread_rwlockattr_t *attr) +{ + lockdep_init_map(&lock->dep_map, name, key, 0); + + return pthread_rwlock_init(&lock->rwlock, attr); +} + +#define liblockdep_pthread_rwlock_init(lock, attr) \ +({ \ + static struct lock_class_key __key; \ + \ + __rwlock_init((lock), #lock, &__key, (attr)); \ +}) + +static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *lock) +{ + lock_acquire(&lock->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_); + return pthread_rwlock_rdlock(&lock->rwlock); + +} + +static inline int liblockdep_pthread_rwlock_unlock(liblockdep_pthread_rwlock_t *lock) +{ + lock_release(&lock->dep_map, 0, (unsigned long)_RET_IP_); + return pthread_rwlock_unlock(&lock->rwlock); +} + +static inline int liblockdep_pthread_rwlock_wrlock(liblockdep_pthread_rwlock_t *lock) +{ + lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_); + return pthread_rwlock_wrlock(&lock->rwlock); +} + +static inline int liblockdep_pthread_rwlock_tryrdlock(liblockdep_pthread_rwlock_t *lock) +{ + lock_acquire(&lock->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_); + return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0; +} + +static inline int liblockdep_pthread_rwlock_trywlock(liblockdep_pthread_rwlock_t *lock) +{ + lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_); + return pthread_rwlock_trywlock(&lock->rwlock) == 0 ? 1 : 0; +} + +static inline int liblockdep_rwlock_destroy(liblockdep_pthread_rwlock_t *lock) +{ + return pthread_rwlock_destroy(&lock->rwlock); +} + +#ifdef __USE_LIBLOCKDEP + +#define pthread_rwlock_t liblockdep_pthread_rwlock_t +#define pthread_rwlock_init liblockdep_pthread_rwlock_init +#define pthread_rwlock_rdlock liblockdep_pthread_rwlock_rdlock +#define pthread_rwlock_unlock liblockdep_pthread_rwlock_unlock +#define pthread_rwlock_wrlock liblockdep_pthread_rwlock_wrlock +#define pthread_rwlock_tryrdlock liblockdep_pthread_rwlock_tryrdlock +#define pthread_rwlock_trywlock liblockdep_pthread_rwlock_trywlock +#define pthread_rwlock_destroy liblockdep_rwlock_destroy + +#endif + +#endif diff --git a/tools/lib/lockdep/lockdep b/tools/lib/lockdep/lockdep new file mode 100755 index 00000000000..49af9fe19f5 --- /dev/null +++ b/tools/lib/lockdep/lockdep @@ -0,0 +1,3 @@ +#!/bin/bash + +LD_PRELOAD="./liblockdep.so $LD_PRELOAD" "$@" diff --git a/tools/lib/lockdep/lockdep.c b/tools/lib/lockdep/lockdep.c new file mode 100644 index 00000000000..f42b7e9aa48 --- /dev/null +++ b/tools/lib/lockdep/lockdep.c @@ -0,0 +1,2 @@ +#include <linux/lockdep.h> +#include "../../../kernel/locking/lockdep.c" diff --git a/tools/lib/lockdep/lockdep_internals.h b/tools/lib/lockdep/lockdep_internals.h new file mode 100644 index 00000000000..29d0c954cc2 --- /dev/null +++ b/tools/lib/lockdep/lockdep_internals.h @@ -0,0 +1 @@ +#include "../../../kernel/locking/lockdep_internals.h" diff --git a/tools/lib/lockdep/lockdep_states.h b/tools/lib/lockdep/lockdep_states.h new file mode 100644 index 00000000000..248d235efda --- /dev/null +++ b/tools/lib/lockdep/lockdep_states.h @@ -0,0 +1 @@ +#include "../../../kernel/locking/lockdep_states.h" diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c new file mode 100644 index 00000000000..6f803609e49 --- /dev/null +++ b/tools/lib/lockdep/preload.c @@ -0,0 +1,445 @@ +#define _GNU_SOURCE +#include <pthread.h> +#include <stdio.h> +#include <dlfcn.h> +#include <stdlib.h> +#include <sysexits.h> +#include "include/liblockdep/mutex.h" +#include "../../../include/linux/rbtree.h" + +/** + * struct lock_lookup - liblockdep's view of a single unique lock + * @orig: pointer to the original pthread lock, used for lookups + * @dep_map: lockdep's dep_map structure + * @key: lockdep's key structure + * @node: rb-tree node used to store the lock in a global tree + * @name: a unique name for the lock + */ +struct lock_lookup { + void *orig; /* Original pthread lock, used for lookups */ + struct lockdep_map dep_map; /* Since all locks are dynamic, we need + * a dep_map and a key for each lock */ + /* + * Wait, there's no support for key classes? Yup :( + * Most big projects wrap the pthread api with their own calls to + * be compatible with different locking methods. This means that + * "classes" will be brokes since the function that creates all + * locks will point to a generic locking function instead of the + * actual code that wants to do the locking. + */ + struct lock_class_key key; + struct rb_node node; +#define LIBLOCKDEP_MAX_LOCK_NAME 22 + char name[LIBLOCKDEP_MAX_LOCK_NAME]; +}; + +/* This is where we store our locks */ +static struct rb_root locks = RB_ROOT; +static pthread_rwlock_t locks_rwlock = PTHREAD_RWLOCK_INITIALIZER; + +/* pthread mutex API */ + +#ifdef __GLIBC__ +extern int __pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr); +extern int __pthread_mutex_lock(pthread_mutex_t *mutex); +extern int __pthread_mutex_trylock(pthread_mutex_t *mutex); +extern int __pthread_mutex_unlock(pthread_mutex_t *mutex); +extern int __pthread_mutex_destroy(pthread_mutex_t *mutex); +#else +#define __pthread_mutex_init NULL +#define __pthread_mutex_lock NULL +#define __pthread_mutex_trylock NULL +#define __pthread_mutex_unlock NULL +#define __pthread_mutex_destroy NULL +#endif +static int (*ll_pthread_mutex_init)(pthread_mutex_t *mutex, + const pthread_mutexattr_t *attr) = __pthread_mutex_init; +static int (*ll_pthread_mutex_lock)(pthread_mutex_t *mutex) = __pthread_mutex_lock; +static int (*ll_pthread_mutex_trylock)(pthread_mutex_t *mutex) = __pthread_mutex_trylock; +static int (*ll_pthread_mutex_unlock)(pthread_mutex_t *mutex) = __pthread_mutex_unlock; +static int (*ll_pthread_mutex_destroy)(pthread_mutex_t *mutex) = __pthread_mutex_destroy; + +/* pthread rwlock API */ + +#ifdef __GLIBC__ +extern int __pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr); +extern int __pthread_rwlock_destroy(pthread_rwlock_t *rwlock); +extern int __pthread_rwlock_wrlock(pthread_rwlock_t *rwlock); +extern int __pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock); +extern int __pthread_rwlock_rdlock(pthread_rwlock_t *rwlock); +extern int __pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock); +extern int __pthread_rwlock_unlock(pthread_rwlock_t *rwlock); +#else +#define __pthread_rwlock_init NULL +#define __pthread_rwlock_destroy NULL +#define __pthread_rwlock_wrlock NULL +#define __pthread_rwlock_trywrlock NULL +#define __pthread_rwlock_rdlock NULL +#define __pthread_rwlock_tryrdlock NULL +#define __pthread_rwlock_unlock NULL +#endif + +static int (*ll_pthread_rwlock_init)(pthread_rwlock_t *rwlock, + const pthread_rwlockattr_t *attr) = __pthread_rwlock_init; +static int (*ll_pthread_rwlock_destroy)(pthread_rwlock_t *rwlock) = __pthread_rwlock_destroy; +static int (*ll_pthread_rwlock_rdlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_rdlock; +static int (*ll_pthread_rwlock_tryrdlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_tryrdlock; +static int (*ll_pthread_rwlock_trywrlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_trywrlock; +static int (*ll_pthread_rwlock_wrlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_wrlock; +static int (*ll_pthread_rwlock_unlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_unlock; + +enum { none, prepare, done, } __init_state; +static void init_preload(void); +static void try_init_preload(void) +{ + if (__init_state != done) + init_preload(); +} + +static struct rb_node **__get_lock_node(void *lock, struct rb_node **parent) +{ + struct rb_node **node = &locks.rb_node; + struct lock_lookup *l; + + *parent = NULL; + + while (*node) { + l = rb_entry(*node, struct lock_lookup, node); + + *parent = *node; + if (lock < l->orig) + node = &l->node.rb_left; + else if (lock > l->orig) + node = &l->node.rb_right; + else + return node; + } + + return node; +} + +#ifndef LIBLOCKDEP_STATIC_ENTRIES +#define LIBLOCKDEP_STATIC_ENTRIES 1024 +#endif + +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) + +static struct lock_lookup __locks[LIBLOCKDEP_STATIC_ENTRIES]; +static int __locks_nr; + +static inline bool is_static_lock(struct lock_lookup *lock) +{ + return lock >= __locks && lock < __locks + ARRAY_SIZE(__locks); +} + +static struct lock_lookup *alloc_lock(void) +{ + if (__init_state != done) { + /* + * Some programs attempt to initialize and use locks in their + * allocation path. This means that a call to malloc() would + * result in locks being initialized and locked. + * + * Why is it an issue for us? dlsym() below will try allocating + * to give us the original function. Since this allocation will + * result in a locking operations, we have to let pthread deal + * with it, but we can't! we don't have the pointer to the + * original API since we're inside dlsym() trying to get it + */ + + int idx = __locks_nr++; + if (idx >= ARRAY_SIZE(__locks)) { + fprintf(stderr, + "LOCKDEP error: insufficient LIBLOCKDEP_STATIC_ENTRIES\n"); + exit(EX_UNAVAILABLE); + } + return __locks + idx; + } + + return malloc(sizeof(struct lock_lookup)); +} + +static inline void free_lock(struct lock_lookup *lock) +{ + if (likely(!is_static_lock(lock))) + free(lock); +} + +/** + * __get_lock - find or create a lock instance + * @lock: pointer to a pthread lock function + * + * Try to find an existing lock in the rbtree using the provided pointer. If + * one wasn't found - create it. + */ +static struct lock_lookup *__get_lock(void *lock) +{ + struct rb_node **node, *parent; + struct lock_lookup *l; + + ll_pthread_rwlock_rdlock(&locks_rwlock); + node = __get_lock_node(lock, &parent); + ll_pthread_rwlock_unlock(&locks_rwlock); + if (*node) { + return rb_entry(*node, struct lock_lookup, node); + } + + /* We didn't find the lock, let's create it */ + l = alloc_lock(); + if (l == NULL) + return NULL; + + l->orig = lock; + /* + * Currently the name of the lock is the ptr value of the pthread lock, + * while not optimal, it makes debugging a bit easier. + * + * TODO: Get the real name of the lock using libdwarf + */ + sprintf(l->name, "%p", lock); + lockdep_init_map(&l->dep_map, l->name, &l->key, 0); + + ll_pthread_rwlock_wrlock(&locks_rwlock); + /* This might have changed since the last time we fetched it */ + node = __get_lock_node(lock, &parent); + rb_link_node(&l->node, parent, node); + rb_insert_color(&l->node, &locks); + ll_pthread_rwlock_unlock(&locks_rwlock); + + return l; +} + +static void __del_lock(struct lock_lookup *lock) +{ + ll_pthread_rwlock_wrlock(&locks_rwlock); + rb_erase(&lock->node, &locks); + ll_pthread_rwlock_unlock(&locks_rwlock); + free_lock(lock); +} + +int pthread_mutex_init(pthread_mutex_t *mutex, + const pthread_mutexattr_t *attr) +{ + int r; + + /* + * We keep trying to init our preload module because there might be + * code in init sections that tries to touch locks before we are + * initialized, in that case we'll need to manually call preload + * to get us going. + * + * Funny enough, kernel's lockdep had the same issue, and used + * (almost) the same solution. See look_up_lock_class() in + * kernel/locking/lockdep.c for details. + */ + try_init_preload(); + + r = ll_pthread_mutex_init(mutex, attr); + if (r == 0) + /* + * We do a dummy initialization here so that lockdep could + * warn us if something fishy is going on - such as + * initializing a held lock. + */ + __get_lock(mutex); + + return r; +} + +int pthread_mutex_lock(pthread_mutex_t *mutex) +{ + int r; + + try_init_preload(); + + lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL, + (unsigned long)_RET_IP_); + /* + * Here's the thing with pthread mutexes: unlike the kernel variant, + * they can fail. + * + * This means that the behaviour here is a bit different from what's + * going on in the kernel: there we just tell lockdep that we took the + * lock before actually taking it, but here we must deal with the case + * that locking failed. + * + * To do that we'll "release" the lock if locking failed - this way + * we'll get lockdep doing the correct checks when we try to take + * the lock, and if that fails - we'll be back to the correct + * state by releasing it. + */ + r = ll_pthread_mutex_lock(mutex); + if (r) + lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_); + + return r; +} + +int pthread_mutex_trylock(pthread_mutex_t *mutex) +{ + int r; + + try_init_preload(); + + lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_); + r = ll_pthread_mutex_trylock(mutex); + if (r) + lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_); + + return r; +} + +int pthread_mutex_unlock(pthread_mutex_t *mutex) +{ + int r; + + try_init_preload(); + + lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_); + /* + * Just like taking a lock, only in reverse! + * + * If we fail releasing the lock, tell lockdep we're holding it again. + */ + r = ll_pthread_mutex_unlock(mutex); + if (r) + lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_); + + return r; +} + +int pthread_mutex_destroy(pthread_mutex_t *mutex) +{ + try_init_preload(); + + /* + * Let's see if we're releasing a lock that's held. + * + * TODO: Hook into free() and add that check there as well. + */ + debug_check_no_locks_freed(mutex, mutex + sizeof(*mutex)); + __del_lock(__get_lock(mutex)); + return ll_pthread_mutex_destroy(mutex); +} + +/* This is the rwlock part, very similar to what happened with mutex above */ +int pthread_rwlock_init(pthread_rwlock_t *rwlock, + const pthread_rwlockattr_t *attr) +{ + int r; + + try_init_preload(); + + r = ll_pthread_rwlock_init(rwlock, attr); + if (r == 0) + __get_lock(rwlock); + + return r; +} + +int pthread_rwlock_destroy(pthread_rwlock_t *rwlock) +{ + try_init_preload(); + + debug_check_no_locks_freed(rwlock, rwlock + sizeof(*rwlock)); + __del_lock(__get_lock(rwlock)); + return ll_pthread_rwlock_destroy(rwlock); +} + +int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock) +{ + int r; + + init_preload(); + + lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_); + r = ll_pthread_rwlock_rdlock(rwlock); + if (r) + lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); + + return r; +} + +int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock) +{ + int r; + + init_preload(); + + lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_); + r = ll_pthread_rwlock_tryrdlock(rwlock); + if (r) + lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); + + return r; +} + +int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock) +{ + int r; + + init_preload(); + + lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_); + r = ll_pthread_rwlock_trywrlock(rwlock); + if (r) + lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); + + return r; +} + +int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock) +{ + int r; + + init_preload(); + + lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_); + r = ll_pthread_rwlock_wrlock(rwlock); + if (r) + lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); + + return r; +} + +int pthread_rwlock_unlock(pthread_rwlock_t *rwlock) +{ + int r; + + init_preload(); + + lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); + r = ll_pthread_rwlock_unlock(rwlock); + if (r) + lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_); + + return r; +} + +__attribute__((constructor)) static void init_preload(void) +{ + if (__init_state == done) + return; + +#ifndef __GLIBC__ + __init_state = prepare; + + ll_pthread_mutex_init = dlsym(RTLD_NEXT, "pthread_mutex_init"); + ll_pthread_mutex_lock = dlsym(RTLD_NEXT, "pthread_mutex_lock"); + ll_pthread_mutex_trylock = dlsym(RTLD_NEXT, "pthread_mutex_trylock"); + ll_pthread_mutex_unlock = dlsym(RTLD_NEXT, "pthread_mutex_unlock"); + ll_pthread_mutex_destroy = dlsym(RTLD_NEXT, "pthread_mutex_destroy"); + + ll_pthread_rwlock_init = dlsym(RTLD_NEXT, "pthread_rwlock_init"); + ll_pthread_rwlock_destroy = dlsym(RTLD_NEXT, "pthread_rwlock_destroy"); + ll_pthread_rwlock_rdlock = dlsym(RTLD_NEXT, "pthread_rwlock_rdlock"); + ll_pthread_rwlock_tryrdlock = dlsym(RTLD_NEXT, "pthread_rwlock_tryrdlock"); + ll_pthread_rwlock_wrlock = dlsym(RTLD_NEXT, "pthread_rwlock_wrlock"); + ll_pthread_rwlock_trywrlock = dlsym(RTLD_NEXT, "pthread_rwlock_trywrlock"); + ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock"); +#endif + + lockdep_init(); + + __init_state = done; +} diff --git a/tools/lib/lockdep/rbtree.c b/tools/lib/lockdep/rbtree.c new file mode 100644 index 00000000000..f7f43033c8b --- /dev/null +++ b/tools/lib/lockdep/rbtree.c @@ -0,0 +1 @@ +#include "../../../lib/rbtree.c" diff --git a/tools/lib/lockdep/run_tests.sh b/tools/lib/lockdep/run_tests.sh new file mode 100755 index 00000000000..5334ad9d39b --- /dev/null +++ b/tools/lib/lockdep/run_tests.sh @@ -0,0 +1,27 @@ +#! /bin/bash + +make &> /dev/null + +for i in `ls tests/*.c`; do + testname=$(basename -s .c "$i") + gcc -o tests/$testname -pthread -lpthread $i liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &> /dev/null + echo -ne "$testname... " + if [ $(timeout 1 ./tests/$testname | wc -l) -gt 0 ]; then + echo "PASSED!" + else + echo "FAILED!" + fi + rm tests/$testname +done + +for i in `ls tests/*.c`; do + testname=$(basename -s .c "$i") + gcc -o tests/$testname -pthread -lpthread -Iinclude $i &> /dev/null + echo -ne "(PRELOAD) $testname... " + if [ $(timeout 1 ./lockdep ./tests/$testname | wc -l) -gt 0 ]; then + echo "PASSED!" + else + echo "FAILED!" + fi + rm tests/$testname +done diff --git a/tools/lib/lockdep/tests/AA.c b/tools/lib/lockdep/tests/AA.c new file mode 100644 index 00000000000..0f782ff404a --- /dev/null +++ b/tools/lib/lockdep/tests/AA.c @@ -0,0 +1,13 @@ +#include <liblockdep/mutex.h> + +void main(void) +{ + pthread_mutex_t a, b; + + pthread_mutex_init(&a, NULL); + pthread_mutex_init(&b, NULL); + + pthread_mutex_lock(&a); + pthread_mutex_lock(&b); + pthread_mutex_lock(&a); +} diff --git a/tools/lib/lockdep/tests/ABBA.c b/tools/lib/lockdep/tests/ABBA.c new file mode 100644 index 00000000000..07f0e29d548 --- /dev/null +++ b/tools/lib/lockdep/tests/ABBA.c @@ -0,0 +1,13 @@ +#include <liblockdep/mutex.h> +#include "common.h" + +void main(void) +{ + pthread_mutex_t a, b; + + pthread_mutex_init(&a, NULL); + pthread_mutex_init(&b, NULL); + + LOCK_UNLOCK_2(a, b); + LOCK_UNLOCK_2(b, a); +} diff --git a/tools/lib/lockdep/tests/ABBCCA.c b/tools/lib/lockdep/tests/ABBCCA.c new file mode 100644 index 00000000000..843db09ac66 --- /dev/null +++ b/tools/lib/lockdep/tests/ABBCCA.c @@ -0,0 +1,15 @@ +#include <liblockdep/mutex.h> +#include "common.h" + +void main(void) +{ + pthread_mutex_t a, b, c; + + pthread_mutex_init(&a, NULL); + pthread_mutex_init(&b, NULL); + pthread_mutex_init(&c, NULL); + + LOCK_UNLOCK_2(a, b); + LOCK_UNLOCK_2(b, c); + LOCK_UNLOCK_2(c, a); +} diff --git a/tools/lib/lockdep/tests/ABBCCDDA.c b/tools/lib/lockdep/tests/ABBCCDDA.c new file mode 100644 index 00000000000..33620e268f8 --- /dev/null +++ b/tools/lib/lockdep/tests/ABBCCDDA.c @@ -0,0 +1,17 @@ +#include <liblockdep/mutex.h> +#include "common.h" + +void main(void) +{ + pthread_mutex_t a, b, c, d; + + pthread_mutex_init(&a, NULL); + pthread_mutex_init(&b, NULL); + pthread_mutex_init(&c, NULL); + pthread_mutex_init(&d, NULL); + + LOCK_UNLOCK_2(a, b); + LOCK_UNLOCK_2(b, c); + LOCK_UNLOCK_2(c, d); + LOCK_UNLOCK_2(d, a); +} diff --git a/tools/lib/lockdep/tests/ABCABC.c b/tools/lib/lockdep/tests/ABCABC.c new file mode 100644 index 00000000000..3fee51e3a68 --- /dev/null +++ b/tools/lib/lockdep/tests/ABCABC.c @@ -0,0 +1,15 @@ +#include <liblockdep/mutex.h> +#include "common.h" + +void main(void) +{ + pthread_mutex_t a, b, c; + + pthread_mutex_init(&a, NULL); + pthread_mutex_init(&b, NULL); + pthread_mutex_init(&c, NULL); + + LOCK_UNLOCK_2(a, b); + LOCK_UNLOCK_2(c, a); + LOCK_UNLOCK_2(b, c); +} diff --git a/tools/lib/lockdep/tests/ABCDBCDA.c b/tools/lib/lockdep/tests/ABCDBCDA.c new file mode 100644 index 00000000000..427ba562c75 --- /dev/null +++ b/tools/lib/lockdep/tests/ABCDBCDA.c @@ -0,0 +1,17 @@ +#include <liblockdep/mutex.h> +#include "common.h" + +void main(void) +{ + pthread_mutex_t a, b, c, d; + + pthread_mutex_init(&a, NULL); + pthread_mutex_init(&b, NULL); + pthread_mutex_init(&c, NULL); + pthread_mutex_init(&d, NULL); + + LOCK_UNLOCK_2(a, b); + LOCK_UNLOCK_2(c, d); + LOCK_UNLOCK_2(b, c); + LOCK_UNLOCK_2(d, a); +} diff --git a/tools/lib/lockdep/tests/ABCDBDDA.c b/tools/lib/lockdep/tests/ABCDBDDA.c new file mode 100644 index 00000000000..680c6cf3e91 --- /dev/null +++ b/tools/lib/lockdep/tests/ABCDBDDA.c @@ -0,0 +1,17 @@ +#include <liblockdep/mutex.h> +#include "common.h" + +void main(void) +{ + pthread_mutex_t a, b, c, d; + + pthread_mutex_init(&a, NULL); + pthread_mutex_init(&b, NULL); + pthread_mutex_init(&c, NULL); + pthread_mutex_init(&d, NULL); + + LOCK_UNLOCK_2(a, b); + LOCK_UNLOCK_2(c, d); + LOCK_UNLOCK_2(b, d); + LOCK_UNLOCK_2(d, a); +} diff --git a/tools/lib/lockdep/tests/WW.c b/tools/lib/lockdep/tests/WW.c new file mode 100644 index 00000000000..d44f77d7102 --- /dev/null +++ b/tools/lib/lockdep/tests/WW.c @@ -0,0 +1,13 @@ +#include <liblockdep/rwlock.h> + +void main(void) +{ + pthread_rwlock_t a, b; + + pthread_rwlock_init(&a, NULL); + pthread_rwlock_init(&b, NULL); + + pthread_rwlock_wrlock(&a); + pthread_rwlock_rdlock(&b); + pthread_rwlock_wrlock(&a); +} diff --git a/tools/lib/lockdep/tests/common.h b/tools/lib/lockdep/tests/common.h new file mode 100644 index 00000000000..d89e94d47d8 --- /dev/null +++ b/tools/lib/lockdep/tests/common.h @@ -0,0 +1,12 @@ +#ifndef _LIBLOCKDEP_TEST_COMMON_H +#define _LIBLOCKDEP_TEST_COMMON_H + +#define LOCK_UNLOCK_2(a, b) \ + do { \ + pthread_mutex_lock(&(a)); \ + pthread_mutex_lock(&(b)); \ + pthread_mutex_unlock(&(b)); \ + pthread_mutex_unlock(&(a)); \ + } while(0) + +#endif diff --git a/tools/lib/lockdep/tests/unlock_balance.c b/tools/lib/lockdep/tests/unlock_balance.c new file mode 100644 index 00000000000..0bc62de686f --- /dev/null +++ b/tools/lib/lockdep/tests/unlock_balance.c @@ -0,0 +1,12 @@ +#include <liblockdep/mutex.h> + +void main(void) +{ + pthread_mutex_t a; + + pthread_mutex_init(&a, NULL); + + pthread_mutex_lock(&a); + pthread_mutex_unlock(&a); + pthread_mutex_unlock(&a); +} diff --git a/tools/lib/lockdep/uinclude/asm/hash.h b/tools/lib/lockdep/uinclude/asm/hash.h new file mode 100644 index 00000000000..d82b170bb21 --- /dev/null +++ b/tools/lib/lockdep/uinclude/asm/hash.h @@ -0,0 +1,6 @@ +#ifndef __ASM_GENERIC_HASH_H +#define __ASM_GENERIC_HASH_H + +/* Stub */ + +#endif /* __ASM_GENERIC_HASH_H */ diff --git a/tools/lib/lockdep/uinclude/asm/hweight.h b/tools/lib/lockdep/uinclude/asm/hweight.h new file mode 100644 index 00000000000..fab00ff936d --- /dev/null +++ b/tools/lib/lockdep/uinclude/asm/hweight.h @@ -0,0 +1,3 @@ + +/* empty file */ + diff --git a/tools/lib/lockdep/uinclude/asm/sections.h b/tools/lib/lockdep/uinclude/asm/sections.h new file mode 100644 index 00000000000..fab00ff936d --- /dev/null +++ b/tools/lib/lockdep/uinclude/asm/sections.h @@ -0,0 +1,3 @@ + +/* empty file */ + diff --git a/tools/lib/lockdep/uinclude/linux/bitops.h b/tools/lib/lockdep/uinclude/linux/bitops.h new file mode 100644 index 00000000000..fab00ff936d --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/bitops.h @@ -0,0 +1,3 @@ + +/* empty file */ + diff --git a/tools/lib/lockdep/uinclude/linux/compiler.h b/tools/lib/lockdep/uinclude/linux/compiler.h new file mode 100644 index 00000000000..7ac838a1f19 --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/compiler.h @@ -0,0 +1,7 @@ +#ifndef _LIBLOCKDEP_LINUX_COMPILER_H_ +#define _LIBLOCKDEP_LINUX_COMPILER_H_ + +#define __used __attribute__((__unused__)) +#define unlikely + +#endif diff --git a/tools/lib/lockdep/uinclude/linux/debug_locks.h b/tools/lib/lockdep/uinclude/linux/debug_locks.h new file mode 100644 index 00000000000..f38eb64df79 --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/debug_locks.h @@ -0,0 +1,12 @@ +#ifndef _LIBLOCKDEP_DEBUG_LOCKS_H_ +#define _LIBLOCKDEP_DEBUG_LOCKS_H_ + +#include <stddef.h> +#include <linux/compiler.h> + +#define DEBUG_LOCKS_WARN_ON(x) (x) + +extern bool debug_locks; +extern bool debug_locks_silent; + +#endif diff --git a/tools/lib/lockdep/uinclude/linux/delay.h b/tools/lib/lockdep/uinclude/linux/delay.h new file mode 100644 index 00000000000..fab00ff936d --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/delay.h @@ -0,0 +1,3 @@ + +/* empty file */ + diff --git a/tools/lib/lockdep/uinclude/linux/ftrace.h b/tools/lib/lockdep/uinclude/linux/ftrace.h new file mode 100644 index 00000000000..fab00ff936d --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/ftrace.h @@ -0,0 +1,3 @@ + +/* empty file */ + diff --git a/tools/lib/lockdep/uinclude/linux/gfp.h b/tools/lib/lockdep/uinclude/linux/gfp.h new file mode 100644 index 00000000000..fab00ff936d --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/gfp.h @@ -0,0 +1,3 @@ + +/* empty file */ + diff --git a/tools/lib/lockdep/uinclude/linux/hardirq.h b/tools/lib/lockdep/uinclude/linux/hardirq.h new file mode 100644 index 00000000000..c8f3f8f5872 --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/hardirq.h @@ -0,0 +1,11 @@ +#ifndef _LIBLOCKDEP_LINUX_HARDIRQ_H_ +#define _LIBLOCKDEP_LINUX_HARDIRQ_H_ + +#define SOFTIRQ_BITS 0UL +#define HARDIRQ_BITS 0UL +#define SOFTIRQ_SHIFT 0UL +#define HARDIRQ_SHIFT 0UL +#define hardirq_count() 0UL +#define softirq_count() 0UL + +#endif diff --git a/tools/lib/lockdep/uinclude/linux/hash.h b/tools/lib/lockdep/uinclude/linux/hash.h new file mode 100644 index 00000000000..0f8479858dc --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/hash.h @@ -0,0 +1 @@ +#include "../../../include/linux/hash.h" diff --git a/tools/lib/lockdep/uinclude/linux/interrupt.h b/tools/lib/lockdep/uinclude/linux/interrupt.h new file mode 100644 index 00000000000..fab00ff936d --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/interrupt.h @@ -0,0 +1,3 @@ + +/* empty file */ + diff --git a/tools/lib/lockdep/uinclude/linux/irqflags.h b/tools/lib/lockdep/uinclude/linux/irqflags.h new file mode 100644 index 00000000000..6cc296f0fad --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/irqflags.h @@ -0,0 +1,38 @@ +#ifndef _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_ +#define _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_ + +# define trace_hardirq_context(p) 0 +# define trace_softirq_context(p) 0 +# define trace_hardirqs_enabled(p) 0 +# define trace_softirqs_enabled(p) 0 +# define trace_hardirq_enter() do { } while (0) +# define trace_hardirq_exit() do { } while (0) +# define lockdep_softirq_enter() do { } while (0) +# define lockdep_softirq_exit() do { } while (0) +# define INIT_TRACE_IRQFLAGS + +# define stop_critical_timings() do { } while (0) +# define start_critical_timings() do { } while (0) + +#define raw_local_irq_disable() do { } while (0) +#define raw_local_irq_enable() do { } while (0) +#define raw_local_irq_save(flags) ((flags) = 0) +#define raw_local_irq_restore(flags) do { } while (0) +#define raw_local_save_flags(flags) ((flags) = 0) +#define raw_irqs_disabled_flags(flags) do { } while (0) +#define raw_irqs_disabled() 0 +#define raw_safe_halt() + +#define local_irq_enable() do { } while (0) +#define local_irq_disable() do { } while (0) +#define local_irq_save(flags) ((flags) = 0) +#define local_irq_restore(flags) do { } while (0) +#define local_save_flags(flags) ((flags) = 0) +#define irqs_disabled() (1) +#define irqs_disabled_flags(flags) (0) +#define safe_halt() do { } while (0) + +#define trace_lock_release(x, y) +#define trace_lock_acquire(a, b, c, d, e, f, g) + +#endif diff --git a/tools/lib/lockdep/uinclude/linux/kallsyms.h b/tools/lib/lockdep/uinclude/linux/kallsyms.h new file mode 100644 index 00000000000..b0f2dbdf1a1 --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/kallsyms.h @@ -0,0 +1,32 @@ +#ifndef _LIBLOCKDEP_LINUX_KALLSYMS_H_ +#define _LIBLOCKDEP_LINUX_KALLSYMS_H_ + +#include <linux/kernel.h> +#include <stdio.h> + +#define KSYM_NAME_LEN 128 + +struct module; + +static inline const char *kallsyms_lookup(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset, + char **modname, char *namebuf) +{ + return NULL; +} + +#include <execinfo.h> +#include <stdlib.h> +static inline void print_ip_sym(unsigned long ip) +{ + char **name; + + name = backtrace_symbols((void **)&ip, 1); + + printf("%s\n", *name); + + free(name); +} + +#endif diff --git a/tools/lib/lockdep/uinclude/linux/kern_levels.h b/tools/lib/lockdep/uinclude/linux/kern_levels.h new file mode 100644 index 00000000000..3b9bade2869 --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/kern_levels.h @@ -0,0 +1,25 @@ +#ifndef __KERN_LEVELS_H__ +#define __KERN_LEVELS_H__ + +#define KERN_SOH "" /* ASCII Start Of Header */ +#define KERN_SOH_ASCII '' + +#define KERN_EMERG KERN_SOH "" /* system is unusable */ +#define KERN_ALERT KERN_SOH "" /* action must be taken immediately */ +#define KERN_CRIT KERN_SOH "" /* critical conditions */ +#define KERN_ERR KERN_SOH "" /* error conditions */ +#define KERN_WARNING KERN_SOH "" /* warning conditions */ +#define KERN_NOTICE KERN_SOH "" /* normal but significant condition */ +#define KERN_INFO KERN_SOH "" /* informational */ +#define KERN_DEBUG KERN_SOH "" /* debug-level messages */ + +#define KERN_DEFAULT KERN_SOH "" /* the default kernel loglevel */ + +/* + * Annotation for a "continued" line of log printout (only done after a + * line that had no enclosing \n). Only to be used by core/arch code + * during early bootup (a continued line is not SMP-safe otherwise). + */ +#define KERN_CONT "" + +#endif diff --git a/tools/lib/lockdep/uinclude/linux/kernel.h b/tools/lib/lockdep/uinclude/linux/kernel.h new file mode 100644 index 00000000000..a11e3c357be --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/kernel.h @@ -0,0 +1,44 @@ +#ifndef _LIBLOCKDEP_LINUX_KERNEL_H_ +#define _LIBLOCKDEP_LINUX_KERNEL_H_ + +#include <linux/export.h> +#include <linux/types.h> +#include <linux/rcu.h> +#include <linux/hardirq.h> +#include <linux/kern_levels.h> + +#ifndef container_of +#define container_of(ptr, type, member) ({ \ + const typeof(((type *)0)->member) * __mptr = (ptr); \ + (type *)((char *)__mptr - offsetof(type, member)); }) +#endif + +#define max(x, y) ({ \ + typeof(x) _max1 = (x); \ + typeof(y) _max2 = (y); \ + (void) (&_max1 == &_max2); \ + _max1 > _max2 ? _max1 : _max2; }) + +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#define WARN_ON(x) (x) +#define WARN_ON_ONCE(x) (x) +#define likely(x) (x) +#define WARN(x, y, z) (x) +#define uninitialized_var(x) x +#define __init +#define noinline +#define list_add_tail_rcu list_add_tail + +#ifndef CALLER_ADDR0 +#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) +#endif + +#ifndef _RET_IP_ +#define _RET_IP_ CALLER_ADDR0 +#endif + +#ifndef _THIS_IP_ +#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) +#endif + +#endif diff --git a/tools/lib/lockdep/uinclude/linux/kmemcheck.h b/tools/lib/lockdep/uinclude/linux/kmemcheck.h new file mode 100644 index 00000000000..94d598bc6ab --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/kmemcheck.h @@ -0,0 +1,8 @@ +#ifndef _LIBLOCKDEP_LINUX_KMEMCHECK_H_ +#define _LIBLOCKDEP_LINUX_KMEMCHECK_H_ + +static inline void kmemcheck_mark_initialized(void *address, unsigned int n) +{ +} + +#endif diff --git a/tools/lib/lockdep/uinclude/linux/linkage.h b/tools/lib/lockdep/uinclude/linux/linkage.h new file mode 100644 index 00000000000..fab00ff936d --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/linkage.h @@ -0,0 +1,3 @@ + +/* empty file */ + diff --git a/tools/lib/lockdep/uinclude/linux/list.h b/tools/lib/lockdep/uinclude/linux/list.h new file mode 100644 index 00000000000..6e9ef31ed82 --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/list.h @@ -0,0 +1 @@ +#include "../../../include/linux/list.h" diff --git a/tools/lib/lockdep/uinclude/linux/lockdep.h b/tools/lib/lockdep/uinclude/linux/lockdep.h new file mode 100644 index 00000000000..c1552c28507 --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/lockdep.h @@ -0,0 +1,58 @@ +#ifndef _LIBLOCKDEP_LOCKDEP_H_ +#define _LIBLOCKDEP_LOCKDEP_H_ + +#include <sys/prctl.h> +#include <sys/syscall.h> +#include <string.h> +#include <limits.h> +#include <linux/utsname.h> + + +#define MAX_LOCK_DEPTH 2000UL + +#define asmlinkage +#define __visible + +#include "../../../include/linux/lockdep.h" + +struct task_struct { + u64 curr_chain_key; + int lockdep_depth; + unsigned int lockdep_recursion; + struct held_lock held_locks[MAX_LOCK_DEPTH]; + gfp_t lockdep_reclaim_gfp; + int pid; + char comm[17]; +}; + +extern struct task_struct *__curr(void); + +#define current (__curr()) + +#define debug_locks_off() 1 +#define task_pid_nr(tsk) ((tsk)->pid) + +#define KSYM_NAME_LEN 128 +#define printk printf + +#define list_del_rcu list_del + +#define atomic_t unsigned long +#define atomic_inc(x) ((*(x))++) + +static struct new_utsname *init_utsname(void) +{ + static struct new_utsname n = (struct new_utsname) { + .release = "liblockdep", + .version = LIBLOCKDEP_VERSION, + }; + + return &n; +} + +#define print_tainted() "" +#define static_obj(x) 1 + +#define debug_show_all_locks() + +#endif diff --git a/tools/lib/lockdep/uinclude/linux/module.h b/tools/lib/lockdep/uinclude/linux/module.h new file mode 100644 index 00000000000..09c7a7be8cc --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/module.h @@ -0,0 +1,6 @@ +#ifndef _LIBLOCKDEP_LINUX_MODULE_H_ +#define _LIBLOCKDEP_LINUX_MODULE_H_ + +#define module_param(name, type, perm) + +#endif diff --git a/tools/lib/lockdep/uinclude/linux/mutex.h b/tools/lib/lockdep/uinclude/linux/mutex.h new file mode 100644 index 00000000000..fab00ff936d --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/mutex.h @@ -0,0 +1,3 @@ + +/* empty file */ + diff --git a/tools/lib/lockdep/uinclude/linux/poison.h b/tools/lib/lockdep/uinclude/linux/poison.h new file mode 100644 index 00000000000..0c27bdf1423 --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/poison.h @@ -0,0 +1 @@ +#include "../../../include/linux/poison.h" diff --git a/tools/lib/lockdep/uinclude/linux/prefetch.h b/tools/lib/lockdep/uinclude/linux/prefetch.h new file mode 100644 index 00000000000..d73fe6f850a --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/prefetch.h @@ -0,0 +1,6 @@ +#ifndef _LIBLOCKDEP_LINUX_PREFETCH_H_ +#define _LIBLOCKDEP_LINUX_PREFETCH_H + +static inline void prefetch(void *a __attribute__((unused))) { } + +#endif diff --git a/tools/lib/lockdep/uinclude/linux/proc_fs.h b/tools/lib/lockdep/uinclude/linux/proc_fs.h new file mode 100644 index 00000000000..fab00ff936d --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/proc_fs.h @@ -0,0 +1,3 @@ + +/* empty file */ + diff --git a/tools/lib/lockdep/uinclude/linux/rbtree.h b/tools/lib/lockdep/uinclude/linux/rbtree.h new file mode 100644 index 00000000000..965901db486 --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/rbtree.h @@ -0,0 +1 @@ +#include "../../../include/linux/rbtree.h" diff --git a/tools/lib/lockdep/uinclude/linux/rbtree_augmented.h b/tools/lib/lockdep/uinclude/linux/rbtree_augmented.h new file mode 100644 index 00000000000..c3759477379 --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/rbtree_augmented.h @@ -0,0 +1,2 @@ +#define __always_inline +#include "../../../include/linux/rbtree_augmented.h" diff --git a/tools/lib/lockdep/uinclude/linux/rcu.h b/tools/lib/lockdep/uinclude/linux/rcu.h new file mode 100644 index 00000000000..042ee8e463c --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/rcu.h @@ -0,0 +1,21 @@ +#ifndef _LIBLOCKDEP_RCU_H_ +#define _LIBLOCKDEP_RCU_H_ + +int rcu_scheduler_active; + +static inline int rcu_lockdep_current_cpu_online(void) +{ + return 1; +} + +static inline int rcu_is_cpu_idle(void) +{ + return 1; +} + +static inline bool rcu_is_watching(void) +{ + return false; +} + +#endif diff --git a/tools/lib/lockdep/uinclude/linux/seq_file.h b/tools/lib/lockdep/uinclude/linux/seq_file.h new file mode 100644 index 00000000000..fab00ff936d --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/seq_file.h @@ -0,0 +1,3 @@ + +/* empty file */ + diff --git a/tools/lib/lockdep/uinclude/linux/spinlock.h b/tools/lib/lockdep/uinclude/linux/spinlock.h new file mode 100644 index 00000000000..68c1aa2bcba --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/spinlock.h @@ -0,0 +1,25 @@ +#ifndef _LIBLOCKDEP_SPINLOCK_H_ +#define _LIBLOCKDEP_SPINLOCK_H_ + +#include <pthread.h> +#include <stdbool.h> + +#define arch_spinlock_t pthread_mutex_t +#define __ARCH_SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER + +static inline void arch_spin_lock(arch_spinlock_t *mutex) +{ + pthread_mutex_lock(mutex); +} + +static inline void arch_spin_unlock(arch_spinlock_t *mutex) +{ + pthread_mutex_unlock(mutex); +} + +static inline bool arch_spin_is_locked(arch_spinlock_t *mutex) +{ + return true; +} + +#endif diff --git a/tools/lib/lockdep/uinclude/linux/stacktrace.h b/tools/lib/lockdep/uinclude/linux/stacktrace.h new file mode 100644 index 00000000000..39aecc6b19d --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/stacktrace.h @@ -0,0 +1,32 @@ +#ifndef _LIBLOCKDEP_LINUX_STACKTRACE_H_ +#define _LIBLOCKDEP_LINUX_STACKTRACE_H_ + +#include <execinfo.h> + +struct stack_trace { + unsigned int nr_entries, max_entries; + unsigned long *entries; + int skip; +}; + +static inline void print_stack_trace(struct stack_trace *trace, int spaces) +{ + backtrace_symbols_fd((void **)trace->entries, trace->nr_entries, 1); +} + +#define save_stack_trace(trace) \ + ((trace)->nr_entries = \ + backtrace((void **)(trace)->entries, (trace)->max_entries)) + +static inline int dump_stack(void) +{ + void *array[64]; + size_t size; + + size = backtrace(array, 64); + backtrace_symbols_fd(array, size, 1); + + return 0; +} + +#endif diff --git a/tools/lib/lockdep/uinclude/linux/stringify.h b/tools/lib/lockdep/uinclude/linux/stringify.h new file mode 100644 index 00000000000..05dfcd1ac11 --- /dev/null +++ b/tools/lib/lockdep/uinclude/linux/stringify.h @@ -0,0 +1,7 @@ +#ifndef _LIBLOCKDEP_LINUX_STRINGIFY_H_ +#define _LIBLOCKDEP_LINUX_STRINGIFY_H_ + +#define __stringify_1(x...) #x +#define __stringify(x...) __stringify_1(x) + +#endif diff --git a/tools/lib/lockdep/uinclude/trace/events/lock.h b/tools/lib/lockdep/uinclude/trace/events/lock.h new file mode 100644 index 00000000000..fab00ff936d --- /dev/null +++ b/tools/lib/lockdep/uinclude/trace/events/lock.h @@ -0,0 +1,3 @@ + +/* empty file */ + diff --git a/tools/lib/symbol/kallsyms.c b/tools/lib/symbol/kallsyms.c new file mode 100644 index 00000000000..18bc271a4bb --- /dev/null +++ b/tools/lib/symbol/kallsyms.c @@ -0,0 +1,58 @@ +#include "symbol/kallsyms.h" +#include <stdio.h> +#include <stdlib.h> + +int kallsyms__parse(const char *filename, void *arg, + int (*process_symbol)(void *arg, const char *name, + char type, u64 start)) +{ + char *line = NULL; + size_t n; + int err = -1; + FILE *file = fopen(filename, "r"); + + if (file == NULL) + goto out_failure; + + err = 0; + + while (!feof(file)) { + u64 start; + int line_len, len; + char symbol_type; + char *symbol_name; + + line_len = getline(&line, &n, file); + if (line_len < 0 || !line) + break; + + line[--line_len] = '\0'; /* \n */ + + len = hex2u64(line, &start); + + len++; + if (len + 2 >= line_len) + continue; + + symbol_type = line[len]; + len += 2; + symbol_name = line + len; + len = line_len - len; + + if (len >= KSYM_NAME_LEN) { + err = -1; + break; + } + + err = process_symbol(arg, symbol_name, symbol_type, start); + if (err) + break; + } + + free(line); + fclose(file); + return err; + +out_failure: + return -1; +} diff --git a/tools/lib/symbol/kallsyms.h b/tools/lib/symbol/kallsyms.h new file mode 100644 index 00000000000..6084f5e18b3 --- /dev/null +++ b/tools/lib/symbol/kallsyms.h @@ -0,0 +1,24 @@ +#ifndef __TOOLS_KALLSYMS_H_ +#define __TOOLS_KALLSYMS_H_ 1 + +#include <elf.h> +#include <linux/ctype.h> +#include <linux/types.h> + +#ifndef KSYM_NAME_LEN +#define KSYM_NAME_LEN 256 +#endif + +static inline u8 kallsyms2elf_type(char type) +{ + if (type == 'W') + return STB_WEAK; + + return isupper(type) ? STB_GLOBAL : STB_LOCAL; +} + +int kallsyms__parse(const char *filename, void *arg, + int (*process_symbol)(void *arg, const char *name, + char type, u64 start)); + +#endif /* __TOOLS_KALLSYMS_H_ */ diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile index 04d959fa022..005c9cc0693 100644 --- a/tools/lib/traceevent/Makefile +++ b/tools/lib/traceevent/Makefile @@ -39,15 +39,36 @@ bindir_relative = bin bindir = $(prefix)/$(bindir_relative) man_dir = $(prefix)/share/man man_dir_SQ = '$(subst ','\'',$(man_dir))' -html_install = $(prefix)/share/kernelshark/html -html_install_SQ = '$(subst ','\'',$(html_install))' -img_install = $(prefix)/share/kernelshark/html/images -img_install_SQ = '$(subst ','\'',$(img_install))' -export man_dir man_dir_SQ html_install html_install_SQ INSTALL -export img_install img_install_SQ +export man_dir man_dir_SQ INSTALL export DESTDIR DESTDIR_SQ +set_plugin_dir := 1 + +# Set plugin_dir to preffered global plugin location +# If we install under $HOME directory we go under +# $(HOME)/.traceevent/plugins +# +# We dont set PLUGIN_DIR in case we install under $HOME +# directory, because by default the code looks under: +# $(HOME)/.traceevent/plugins by default. +# +ifeq ($(plugin_dir),) +ifeq ($(prefix),$(HOME)) +override plugin_dir = $(HOME)/.traceevent/plugins +set_plugin_dir := 0 +else +override plugin_dir = $(prefix)/lib/traceevent/plugins +endif +endif + +ifeq ($(set_plugin_dir),1) +PLUGIN_DIR = -DPLUGIN_DIR="$(plugin_dir)" +PLUGIN_DIR_SQ = '$(subst ','\'',$(PLUGIN_DIR))' +endif + +include $(if $(BUILD_SRC),$(BUILD_SRC)/)../../scripts/Makefile.include + # copy a bit from Linux kbuild ifeq ("$(origin V)", "command line") @@ -62,24 +83,16 @@ ifeq ("$(origin O)", "command line") endif ifeq ($(BUILD_SRC),) -ifneq ($(BUILD_OUTPUT),) +ifneq ($(OUTPUT),) define build_output - $(if $(VERBOSE:1=),@)$(MAKE) -C $(BUILD_OUTPUT) \ - BUILD_SRC=$(CURDIR) -f $(CURDIR)/Makefile $1 + $(if $(VERBOSE:1=),@)+$(MAKE) -C $(OUTPUT) \ + BUILD_SRC=$(CURDIR)/ -f $(CURDIR)/Makefile $1 endef -saved-output := $(BUILD_OUTPUT) -BUILD_OUTPUT := $(shell cd $(BUILD_OUTPUT) && /bin/pwd) -$(if $(BUILD_OUTPUT),, \ - $(error output directory "$(saved-output)" does not exist)) - all: sub-make -gui: force - $(call build_output, all_cmd) - -$(filter-out gui,$(MAKECMDGOALS)): sub-make +$(MAKECMDGOALS): sub-make sub-make: force $(call build_output, $(MAKECMDGOALS)) @@ -88,7 +101,7 @@ sub-make: force # Leave processing to above invocation of make skip-makefile := 1 -endif # BUILD_OUTPUT +endif # OUTPUT endif # BUILD_SRC # We process the rest of the Makefile if this is the final invocation of make @@ -104,6 +117,7 @@ export prefix bindir src obj # Shell quotes bindir_SQ = $(subst ','\'',$(bindir)) bindir_relative_SQ = $(subst ','\'',$(bindir_relative)) +plugin_dir_SQ = $(subst ','\'',$(plugin_dir)) LIB_FILE = libtraceevent.a libtraceevent.so @@ -122,7 +136,7 @@ export Q VERBOSE EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION) -INCLUDES = -I. -I/usr/local/include $(CONFIG_INCLUDES) +INCLUDES = -I. -I $(srctree)/../../include $(CONFIG_INCLUDES) # Set compile option CFLAGS if not set elsewhere CFLAGS ?= -g -Wall @@ -133,41 +147,14 @@ override CFLAGS += $(udis86-flags) -D_GNU_SOURCE ifeq ($(VERBOSE),1) Q = - print_compile = - print_app_build = - print_fpic_compile = - print_shared_lib_compile = - print_plugin_obj_compile = - print_plugin_build = - print_install = else Q = @ - print_compile = echo ' CC '$(OBJ); - print_app_build = echo ' BUILD '$(OBJ); - print_fpic_compile = echo ' CC FPIC '$(OBJ); - print_shared_lib_compile = echo ' BUILD SHARED LIB '$(OBJ); - print_plugin_obj_compile = echo ' CC PLUGIN OBJ '$(OBJ); - print_plugin_build = echo ' CC PLUGI '$(OBJ); - print_static_lib_build = echo ' BUILD STATIC LIB '$(OBJ); - print_install = echo ' INSTALL '$1' to $(DESTDIR_SQ)$2'; endif -do_fpic_compile = \ - ($(print_fpic_compile) \ - $(CC) -c $(CFLAGS) $(EXT) -fPIC $< -o $@) - -do_app_build = \ - ($(print_app_build) \ - $(CC) $^ -rdynamic -o $@ $(CONFIG_LIBS) $(LIBS)) - do_compile_shared_library = \ ($(print_shared_lib_compile) \ $(CC) --shared $^ -o $@) -do_compile_plugin_obj = \ - ($(print_plugin_obj_compile) \ - $(CC) -c $(CFLAGS) -fPIC -o $@ $<) - do_plugin_build = \ ($(print_plugin_build) \ $(CC) $(CFLAGS) -shared -nostartfiles -o $@ $<) @@ -177,22 +164,37 @@ do_build_static_lib = \ $(RM) $@; $(AR) rcs $@ $^) -define do_compile - $(print_compile) \ - $(CC) -c $(CFLAGS) $(EXT) $< -o $(obj)/$@; -endef +do_compile = $(QUIET_CC)$(CC) -c $(CFLAGS) $(EXT) $< -o $(obj)/$@; $(obj)/%.o: $(src)/%.c - $(Q)$(call do_compile) + $(call do_compile) %.o: $(src)/%.c - $(Q)$(call do_compile) + $(call do_compile) + +PEVENT_LIB_OBJS = event-parse.o +PEVENT_LIB_OBJS += event-plugin.o +PEVENT_LIB_OBJS += trace-seq.o +PEVENT_LIB_OBJS += parse-filter.o +PEVENT_LIB_OBJS += parse-utils.o +PEVENT_LIB_OBJS += kbuffer-parse.o + +PLUGIN_OBJS = plugin_jbd2.o +PLUGIN_OBJS += plugin_hrtimer.o +PLUGIN_OBJS += plugin_kmem.o +PLUGIN_OBJS += plugin_kvm.o +PLUGIN_OBJS += plugin_mac80211.o +PLUGIN_OBJS += plugin_sched_switch.o +PLUGIN_OBJS += plugin_function.o +PLUGIN_OBJS += plugin_xen.o +PLUGIN_OBJS += plugin_scsi.o +PLUGIN_OBJS += plugin_cfg80211.o -PEVENT_LIB_OBJS = event-parse.o trace-seq.o parse-filter.o parse-utils.o +PLUGINS := $(PLUGIN_OBJS:.o=.so) -ALL_OBJS = $(PEVENT_LIB_OBJS) +ALL_OBJS = $(PEVENT_LIB_OBJS) $(PLUGIN_OBJS) -CMD_TARGETS = $(LIB_FILE) +CMD_TARGETS = $(LIB_FILE) $(PLUGINS) TARGETS = $(CMD_TARGETS) @@ -202,32 +204,40 @@ all: all_cmd all_cmd: $(CMD_TARGETS) libtraceevent.so: $(PEVENT_LIB_OBJS) - $(Q)$(do_compile_shared_library) + $(QUIET_LINK)$(CC) --shared $^ -o $@ libtraceevent.a: $(PEVENT_LIB_OBJS) - $(Q)$(do_build_static_lib) + $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^ + +plugins: $(PLUGINS) $(PEVENT_LIB_OBJS): %.o: $(src)/%.c TRACEEVENT-CFLAGS - $(Q)$(do_fpic_compile) + $(QUIET_CC_FPIC)$(CC) -c $(CFLAGS) $(EXT) -fPIC $< -o $@ + +$(PLUGIN_OBJS): %.o : $(src)/%.c + $(QUIET_CC_FPIC)$(CC) -c $(CFLAGS) -fPIC -o $@ $< + +$(PLUGINS): %.so: %.o + $(QUIET_LINK)$(CC) $(CFLAGS) -shared -nostartfiles -o $@ $< define make_version.h - (echo '/* This file is automatically generated. Do not modify. */'; \ - echo \#define VERSION_CODE $(shell \ - expr $(VERSION) \* 256 + $(PATCHLEVEL)); \ - echo '#define EXTRAVERSION ' $(EXTRAVERSION); \ - echo '#define VERSION_STRING "'$(VERSION).$(PATCHLEVEL).$(EXTRAVERSION)'"'; \ - echo '#define FILE_VERSION '$(FILE_VERSION); \ - ) > $1 + (echo '/* This file is automatically generated. Do not modify. */'; \ + echo \#define VERSION_CODE $(shell \ + expr $(VERSION) \* 256 + $(PATCHLEVEL)); \ + echo '#define EXTRAVERSION ' $(EXTRAVERSION); \ + echo '#define VERSION_STRING "'$(VERSION).$(PATCHLEVEL).$(EXTRAVERSION)'"'; \ + echo '#define FILE_VERSION '$(FILE_VERSION); \ + ) > $1 endef define update_version.h - ($(call make_version.h, $@.tmp); \ - if [ -r $@ ] && cmp -s $@ $@.tmp; then \ - rm -f $@.tmp; \ - else \ - echo ' UPDATE $@'; \ - mv -f $@.tmp $@; \ - fi); + ($(call make_version.h, $@.tmp); \ + if [ -r $@ ] && cmp -s $@ $@.tmp; then \ + rm -f $@.tmp; \ + else \ + echo ' UPDATE $@'; \ + mv -f $@.tmp $@; \ + fi); endef ep_version.h: force @@ -236,13 +246,13 @@ ep_version.h: force VERSION_FILES = ep_version.h define update_dir - (echo $1 > $@.tmp; \ - if [ -r $@ ] && cmp -s $@ $@.tmp; then \ - rm -f $@.tmp; \ - else \ - echo ' UPDATE $@'; \ - mv -f $@.tmp $@; \ - fi); + (echo $1 > $@.tmp; \ + if [ -r $@ ] && cmp -s $@ $@.tmp; then \ + rm -f $@.tmp; \ + else \ + echo ' UPDATE $@'; \ + mv -f $@.tmp $@; \ + fi); endef ## make deps @@ -252,15 +262,12 @@ all_deps := $(all_objs:%.o=.%.d) # let .d file also depends on the source and header files define check_deps - @set -e; $(RM) $@; \ - $(CC) -M $(CFLAGS) $< > $@.$$$$; \ - sed 's,\($*\)\.o[ :]*,\1.o $@ : ,g' < $@.$$$$ > $@; \ - $(RM) $@.$$$$ + @set -e; $(RM) $@; \ + $(CC) -MM $(CFLAGS) $< > $@.$$$$; \ + sed 's,\($*\)\.o[ :]*,\1.o $@ : ,g' < $@.$$$$ > $@; \ + $(RM) $@.$$$$ endef -$(gui_deps): ks_version.h -$(non_gui_deps): tc_version.h - $(all_deps): .%.d: $(src)/%.c $(Q)$(call check_deps) @@ -278,7 +285,7 @@ TRACK_CFLAGS = $(subst ','\'',$(CFLAGS)):$(ARCH):$(CROSS_COMPILE) TRACEEVENT-CFLAGS: force @FLAGS='$(TRACK_CFLAGS)'; \ if test x"$$FLAGS" != x"`cat TRACEEVENT-CFLAGS 2>/dev/null`" ; then \ - echo 1>&2 " * new build flags or cross compiler"; \ + echo 1>&2 " FLAGS: * new build flags or cross compiler"; \ echo "$$FLAGS" >TRACEEVENT-CFLAGS; \ fi @@ -293,27 +300,41 @@ TAGS: force --regex='/_PE(\([^,)]*\).*/PEVENT_ERRNO__\1/' define do_install - $(print_install) \ if [ ! -d '$(DESTDIR_SQ)$2' ]; then \ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \ fi; \ $(INSTALL) $1 '$(DESTDIR_SQ)$2' endef -install_lib: all_cmd install_plugins install_python - $(Q)$(call do_install,$(LIB_FILE),$(bindir_SQ)) +define do_install_plugins + for plugin in $1; do \ + $(call do_install,$$plugin,$(plugin_dir_SQ)); \ + done +endef + +install_lib: all_cmd install_plugins + $(call QUIET_INSTALL, $(LIB_FILE)) \ + $(call do_install,$(LIB_FILE),$(bindir_SQ)) + +install_plugins: $(PLUGINS) + $(call QUIET_INSTALL, trace_plugins) \ + $(call do_install_plugins, $(PLUGINS)) install: install_lib clean: - $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d - $(RM) TRACEEVENT-CFLAGS tags TAGS + $(call QUIET_CLEAN, libtraceevent) \ + $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d \ + $(RM) TRACEEVENT-CFLAGS tags TAGS endif # skip-makefile -PHONY += force +PHONY += force plugins force: +plugins: + @echo > /dev/null + # Declare the contents of the .PHONY variable as phony. We keep that # information in a variable so we can use it in if_changed and friends. .PHONY: $(PHONY) diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index f2989c525e4..93825a17dcc 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -13,8 +13,7 @@ * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * License along with this program; if not, see <http://www.gnu.org/licenses> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * @@ -51,6 +50,18 @@ static int show_warning = 1; warning(fmt, ##__VA_ARGS__); \ } while (0) +#define do_warning_event(event, fmt, ...) \ + do { \ + if (!show_warning) \ + continue; \ + \ + if (event) \ + warning("[%s:%s] " fmt, event->system, \ + event->name, ##__VA_ARGS__); \ + else \ + warning(fmt, ##__VA_ARGS__); \ + } while (0) + static void init_input_buf(const char *buf, unsigned long long size) { input_buf = buf; @@ -174,7 +185,7 @@ static int cmdline_init(struct pevent *pevent) return 0; } -static char *find_cmdline(struct pevent *pevent, int pid) +static const char *find_cmdline(struct pevent *pevent, int pid) { const struct cmdline *comm; struct cmdline key; @@ -306,6 +317,11 @@ int pevent_register_comm(struct pevent *pevent, const char *comm, int pid) return 0; } +void pevent_register_trace_clock(struct pevent *pevent, char *trace_clock) +{ + pevent->trace_clock = trace_clock; +} + struct func_map { unsigned long long addr; char *func; @@ -600,10 +616,11 @@ find_printk(struct pevent *pevent, unsigned long long addr) * This registers a string by the address it was stored in the kernel. * The @fmt passed in is duplicated. */ -int pevent_register_print_string(struct pevent *pevent, char *fmt, +int pevent_register_print_string(struct pevent *pevent, const char *fmt, unsigned long long addr) { struct printk_list *item = malloc(sizeof(*item)); + char *p; if (!item) return -1; @@ -611,10 +628,21 @@ int pevent_register_print_string(struct pevent *pevent, char *fmt, item->next = pevent->printklist; item->addr = addr; + /* Strip off quotes and '\n' from the end */ + if (fmt[0] == '"') + fmt++; item->printk = strdup(fmt); if (!item->printk) goto out_free; + p = item->printk + strlen(item->printk) - 1; + if (*p == '"') + *p = 0; + + p -= 2; + if (strcmp(p, "\\n") == 0) + *p = 0; + pevent->printklist = item; pevent->printk_count++; @@ -737,6 +765,9 @@ static void free_arg(struct print_arg *arg) case PRINT_BSTRING: free(arg->string.string); break; + case PRINT_BITMASK: + free(arg->bitmask.bitmask); + break; case PRINT_DYNAMIC_ARRAY: free(arg->dynarray.index); break; @@ -1224,6 +1255,34 @@ static int field_is_long(struct format_field *field) return 0; } +static unsigned int type_size(const char *name) +{ + /* This covers all FIELD_IS_STRING types. */ + static struct { + const char *type; + unsigned int size; + } table[] = { + { "u8", 1 }, + { "u16", 2 }, + { "u32", 4 }, + { "u64", 8 }, + { "s8", 1 }, + { "s16", 2 }, + { "s32", 4 }, + { "s64", 8 }, + { "char", 1 }, + { }, + }; + int i; + + for (i = 0; table[i].type; i++) { + if (!strcmp(table[i].type, name)) + return table[i].size; + } + + return 0; +} + static int event_read_fields(struct event_format *event, struct format_field **fields) { struct format_field *field = NULL; @@ -1233,6 +1292,8 @@ static int event_read_fields(struct event_format *event, struct format_field **f int count = 0; do { + unsigned int size_dynamic = 0; + type = read_token(&token); if (type == EVENT_NEWLINE) { free_token(token); @@ -1309,7 +1370,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f } if (!field->type) { - do_warning("%s: no type found", __func__); + do_warning_event(event, "%s: no type found", __func__); goto fail; } field->name = last_token; @@ -1356,7 +1417,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f free_token(token); type = read_token(&token); if (type == EVENT_NONE) { - do_warning("failed to find token"); + do_warning_event(event, "failed to find token"); goto fail; } } @@ -1391,6 +1452,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f field->type = new_type; strcat(field->type, " "); strcat(field->type, field->name); + size_dynamic = type_size(field->name); free_token(field->name); strcat(field->type, brackets); field->name = token; @@ -1463,7 +1525,8 @@ static int event_read_fields(struct event_format *event, struct format_field **f if (read_expect_type(EVENT_ITEM, &token)) goto fail; - /* add signed type */ + if (strtoul(token, NULL, 0)) + field->flags |= FIELD_IS_SIGNED; free_token(token); if (read_expected(EVENT_OP, ";") < 0) @@ -1478,10 +1541,14 @@ static int event_read_fields(struct event_format *event, struct format_field **f if (field->flags & FIELD_IS_ARRAY) { if (field->arraylen) field->elementsize = field->size / field->arraylen; + else if (field->flags & FIELD_IS_DYNAMIC) + field->elementsize = size_dynamic; else if (field->flags & FIELD_IS_STRING) field->elementsize = 1; - else - field->elementsize = event->pevent->long_size; + else if (field->flags & FIELD_IS_LONG) + field->elementsize = event->pevent ? + event->pevent->long_size : + sizeof(long); } else field->elementsize = field->size; @@ -1554,6 +1621,24 @@ process_arg(struct event_format *event, struct print_arg *arg, char **tok) static enum event_type process_op(struct event_format *event, struct print_arg *arg, char **tok); +/* + * For __print_symbolic() and __print_flags, we need to completely + * evaluate the first argument, which defines what to print next. + */ +static enum event_type +process_field_arg(struct event_format *event, struct print_arg *arg, char **tok) +{ + enum event_type type; + + type = process_arg(event, arg, tok); + + while (type == EVENT_OP) { + type = process_op(event, arg, tok); + } + + return type; +} + static enum event_type process_cond(struct event_format *event, struct print_arg *top, char **tok) { @@ -1566,7 +1651,7 @@ process_cond(struct event_format *event, struct print_arg *top, char **tok) right = alloc_arg(); if (!arg || !left || !right) { - do_warning("%s: not enough memory!", __func__); + do_warning_event(event, "%s: not enough memory!", __func__); /* arg will be freed at out_free */ free_arg(left); free_arg(right); @@ -1616,7 +1701,7 @@ process_array(struct event_format *event, struct print_arg *top, char **tok) arg = alloc_arg(); if (!arg) { - do_warning("%s: not enough memory!", __func__); + do_warning_event(event, "%s: not enough memory!", __func__); /* '*tok' is set to top->op.op. No need to free. */ *tok = NULL; return EVENT_ERROR; @@ -1722,7 +1807,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) if (arg->type == PRINT_OP && !arg->op.left) { /* handle single op */ if (token[1]) { - do_warning("bad op token %s", token); + do_warning_event(event, "bad op token %s", token); goto out_free; } switch (token[0]) { @@ -1732,7 +1817,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) case '-': break; default: - do_warning("bad op token %s", token); + do_warning_event(event, "bad op token %s", token); goto out_free; } @@ -1785,6 +1870,8 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) strcmp(token, "/") == 0 || strcmp(token, "<") == 0 || strcmp(token, ">") == 0 || + strcmp(token, "<=") == 0 || + strcmp(token, ">=") == 0 || strcmp(token, "==") == 0 || strcmp(token, "!=") == 0) { @@ -1816,7 +1903,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) char *new_atom; if (left->type != PRINT_ATOM) { - do_warning("bad pointer type"); + do_warning_event(event, "bad pointer type"); goto out_free; } new_atom = realloc(left->atom.atom, @@ -1858,7 +1945,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) type = process_array(event, arg, tok); } else { - do_warning("unknown op '%s'", token); + do_warning_event(event, "unknown op '%s'", token); event->flags |= EVENT_FL_FAILED; /* the arg is now the left side */ goto out_free; @@ -1879,7 +1966,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) return type; out_warn_free: - do_warning("%s: not enough memory!", __func__); + do_warning_event(event, "%s: not enough memory!", __func__); out_free: free_token(token); *tok = NULL; @@ -2184,6 +2271,7 @@ static int arg_num_eval(struct print_arg *arg, long long *val) case PRINT_FIELD ... PRINT_SYMBOL: case PRINT_STRING: case PRINT_BSTRING: + case PRINT_BITMASK: default: do_warning("invalid eval type %d", arg->type); ret = 0; @@ -2212,6 +2300,7 @@ static char *arg_eval (struct print_arg *arg) case PRINT_FIELD ... PRINT_SYMBOL: case PRINT_STRING: case PRINT_BSTRING: + case PRINT_BITMASK: default: do_warning("invalid eval type %d", arg->type); break; @@ -2313,11 +2402,11 @@ process_flags(struct event_format *event, struct print_arg *arg, char **tok) field = alloc_arg(); if (!field) { - do_warning("%s: not enough memory!", __func__); + do_warning_event(event, "%s: not enough memory!", __func__); goto out_free; } - type = process_arg(event, field, &token); + type = process_field_arg(event, field, &token); /* Handle operations in the first argument */ while (type == EVENT_OP) @@ -2366,11 +2455,12 @@ process_symbols(struct event_format *event, struct print_arg *arg, char **tok) field = alloc_arg(); if (!field) { - do_warning("%s: not enough memory!", __func__); + do_warning_event(event, "%s: not enough memory!", __func__); goto out_free; } - type = process_arg(event, field, &token); + type = process_field_arg(event, field, &token); + if (test_type_token(type, token, EVENT_DELIM, ",")) goto out_free_field; @@ -2404,7 +2494,7 @@ process_hex(struct event_format *event, struct print_arg *arg, char **tok) field = alloc_arg(); if (!field) { - do_warning("%s: not enough memory!", __func__); + do_warning_event(event, "%s: not enough memory!", __func__); goto out_free; } @@ -2419,7 +2509,7 @@ process_hex(struct event_format *event, struct print_arg *arg, char **tok) field = alloc_arg(); if (!field) { - do_warning("%s: not enough memory!", __func__); + do_warning_event(event, "%s: not enough memory!", __func__); *tok = NULL; return EVENT_ERROR; } @@ -2481,8 +2571,8 @@ process_dynamic_array(struct event_format *event, struct print_arg *arg, char ** free_token(token); arg = alloc_arg(); - if (!field) { - do_warning("%s: not enough memory!", __func__); + if (!arg) { + do_warning_event(event, "%s: not enough memory!", __func__); *tok = NULL; return EVENT_ERROR; } @@ -2541,13 +2631,14 @@ process_paren(struct event_format *event, struct print_arg *arg, char **tok) /* prevous must be an atom */ if (arg->type != PRINT_ATOM) { - do_warning("previous needed to be PRINT_ATOM"); + do_warning_event(event, "previous needed to be PRINT_ATOM"); goto out_free; } item_arg = alloc_arg(); if (!item_arg) { - do_warning("%s: not enough memory!", __func__); + do_warning_event(event, "%s: not enough memory!", + __func__); goto out_free; } @@ -2597,6 +2688,35 @@ process_str(struct event_format *event __maybe_unused, struct print_arg *arg, return EVENT_ERROR; } +static enum event_type +process_bitmask(struct event_format *event __maybe_unused, struct print_arg *arg, + char **tok) +{ + enum event_type type; + char *token; + + if (read_expect_type(EVENT_ITEM, &token) < 0) + goto out_free; + + arg->type = PRINT_BITMASK; + arg->bitmask.bitmask = token; + arg->bitmask.offset = -1; + + if (read_expected(EVENT_DELIM, ")") < 0) + goto out_err; + + type = read_token(&token); + *tok = token; + + return type; + + out_free: + free_token(token); + out_err: + *tok = NULL; + return EVENT_ERROR; +} + static struct pevent_function_handler * find_func_handler(struct pevent *pevent, char *func_name) { @@ -2637,7 +2757,6 @@ process_func_handler(struct event_format *event, struct pevent_function_handler struct print_arg *farg; enum event_type type; char *token; - char *test; int i; arg->type = PRINT_FUNC; @@ -2649,20 +2768,27 @@ process_func_handler(struct event_format *event, struct pevent_function_handler for (i = 0; i < func->nr_args; i++) { farg = alloc_arg(); if (!farg) { - do_warning("%s: not enough memory!", __func__); + do_warning_event(event, "%s: not enough memory!", + __func__); return EVENT_ERROR; } type = process_arg(event, farg, &token); - if (i < (func->nr_args - 1)) - test = ","; - else - test = ")"; - - if (test_type_token(type, token, EVENT_DELIM, test)) { - free_arg(farg); - free_token(token); - return EVENT_ERROR; + if (i < (func->nr_args - 1)) { + if (type != EVENT_DELIM || strcmp(token, ",") != 0) { + do_warning_event(event, + "Error: function '%s()' expects %d arguments but event %s only uses %d", + func->name, func->nr_args, + event->name, i + 1); + goto err; + } + } else { + if (type != EVENT_DELIM || strcmp(token, ")") != 0) { + do_warning_event(event, + "Error: function '%s()' only expects %d arguments but event %s has more", + func->name, func->nr_args, event->name); + goto err; + } } *next_arg = farg; @@ -2674,6 +2800,11 @@ process_func_handler(struct event_format *event, struct pevent_function_handler *tok = token; return type; + +err: + free_arg(farg); + free_token(token); + return EVENT_ERROR; } static enum event_type @@ -2700,6 +2831,10 @@ process_function(struct event_format *event, struct print_arg *arg, free_token(token); return process_str(event, arg, tok); } + if (strcmp(token, "__get_bitmask") == 0) { + free_token(token); + return process_bitmask(event, arg, tok); + } if (strcmp(token, "__get_dynamic_array") == 0) { free_token(token); return process_dynamic_array(event, arg, tok); @@ -2711,7 +2846,7 @@ process_function(struct event_format *event, struct print_arg *arg, return process_func_handler(event, func, arg, tok); } - do_warning("function %s not defined", token); + do_warning_event(event, "function %s not defined", token); free_token(token); return EVENT_ERROR; } @@ -2797,7 +2932,7 @@ process_arg_token(struct event_format *event, struct print_arg *arg, case EVENT_ERROR ... EVENT_NEWLINE: default: - do_warning("unexpected type %d", type); + do_warning_event(event, "unexpected type %d", type); return EVENT_ERROR; } *tok = token; @@ -2820,7 +2955,8 @@ static int event_read_print_args(struct event_format *event, struct print_arg ** arg = alloc_arg(); if (!arg) { - do_warning("%s: not enough memory!", __func__); + do_warning_event(event, "%s: not enough memory!", + __func__); return -1; } @@ -3226,6 +3362,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg return eval_type(val, arg, 0); case PRINT_STRING: case PRINT_BSTRING: + case PRINT_BITMASK: return 0; case PRINT_FUNC: { struct trace_seq s; @@ -3381,17 +3518,31 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg goto out_warning_op; } break; + case PRINT_DYNAMIC_ARRAY: + /* Without [], we pass the address to the dynamic data */ + offset = pevent_read_number(pevent, + data + arg->dynarray.field->offset, + arg->dynarray.field->size); + /* + * The actual length of the dynamic array is stored + * in the top half of the field, and the offset + * is in the bottom half of the 32 bit field. + */ + offset &= 0xffff; + val = (unsigned long long)((unsigned long)data + offset); + break; default: /* not sure what to do there */ return 0; } return val; out_warning_op: - do_warning("%s: unknown op '%s'", __func__, arg->op.op); + do_warning_event(event, "%s: unknown op '%s'", __func__, arg->op.op); return 0; out_warning_field: - do_warning("%s: field %s not found", __func__, arg->field.name); + do_warning_event(event, "%s: field %s not found", + __func__, arg->field.name); return 0; } @@ -3444,6 +3595,60 @@ static void print_str_to_seq(struct trace_seq *s, const char *format, trace_seq_printf(s, format, str); } +static void print_bitmask_to_seq(struct pevent *pevent, + struct trace_seq *s, const char *format, + int len_arg, const void *data, int size) +{ + int nr_bits = size * 8; + int str_size = (nr_bits + 3) / 4; + int len = 0; + char buf[3]; + char *str; + int index; + int i; + + /* + * The kernel likes to put in commas every 32 bits, we + * can do the same. + */ + str_size += (nr_bits - 1) / 32; + + str = malloc(str_size + 1); + if (!str) { + do_warning("%s: not enough memory!", __func__); + return; + } + str[str_size] = 0; + + /* Start out with -2 for the two chars per byte */ + for (i = str_size - 2; i >= 0; i -= 2) { + /* + * data points to a bit mask of size bytes. + * In the kernel, this is an array of long words, thus + * endianess is very important. + */ + if (pevent->file_bigendian) + index = size - (len + 1); + else + index = len; + + snprintf(buf, 3, "%02x", *((unsigned char *)data + index)); + memcpy(str + i, buf, 2); + len++; + if (!(len & 3) && i > 0) { + i--; + str[i] = ','; + } + } + + if (len_arg >= 0) + trace_seq_printf(s, format, len_arg, str); + else + trace_seq_printf(s, format, str); + + free(str); +} + static void print_str_arg(struct trace_seq *s, void *data, int size, struct event_format *event, const char *format, int len_arg, struct print_arg *arg) @@ -3451,6 +3656,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, struct pevent *pevent = event->pevent; struct print_flag_sym *flag; struct format_field *field; + struct printk_map *printk; unsigned long long val, fval; unsigned long addr; char *str; @@ -3486,12 +3692,18 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, if (!(field->flags & FIELD_IS_ARRAY) && field->size == pevent->long_size) { addr = *(unsigned long *)(data + field->offset); - trace_seq_printf(s, "%lx", addr); + /* Check if it matches a print format */ + printk = find_printk(pevent, addr); + if (printk) + trace_seq_puts(s, printk->printk); + else + trace_seq_printf(s, "%lx", addr); break; } str = malloc(len + 1); if (!str) { - do_warning("%s: not enough memory!", __func__); + do_warning_event(event, "%s: not enough memory!", + __func__); return; } memcpy(str, data + field->offset, len); @@ -3528,15 +3740,23 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, } break; case PRINT_HEX: - field = arg->hex.field->field.field; - if (!field) { - str = arg->hex.field->field.name; - field = pevent_find_any_field(event, str); - if (!field) - goto out_warning_field; - arg->hex.field->field.field = field; + if (arg->hex.field->type == PRINT_DYNAMIC_ARRAY) { + unsigned long offset; + offset = pevent_read_number(pevent, + data + arg->hex.field->dynarray.field->offset, + arg->hex.field->dynarray.field->size); + hex = data + (offset & 0xffff); + } else { + field = arg->hex.field->field.field; + if (!field) { + str = arg->hex.field->field.name; + field = pevent_find_any_field(event, str); + if (!field) + goto out_warning_field; + arg->hex.field->field.field = field; + } + hex = data + field->offset; } - hex = data + field->offset; len = eval_num_arg(data, size, event, arg->hex.size); for (i = 0; i < len; i++) { if (i) @@ -3564,6 +3784,23 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, case PRINT_BSTRING: print_str_to_seq(s, format, len_arg, arg->string.string); break; + case PRINT_BITMASK: { + int bitmask_offset; + int bitmask_size; + + if (arg->bitmask.offset == -1) { + struct format_field *f; + + f = pevent_find_any_field(event, arg->bitmask.bitmask); + arg->bitmask.offset = f->offset; + } + bitmask_offset = data2host4(pevent, data + arg->bitmask.offset); + bitmask_size = bitmask_offset >> 16; + bitmask_offset &= 0xffff; + print_bitmask_to_seq(pevent, s, format, len_arg, + data + bitmask_offset, bitmask_size); + break; + } case PRINT_OP: /* * The only op for string should be ? : @@ -3589,7 +3826,8 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, return; out_warning_field: - do_warning("%s: field %s not found", __func__, arg->field.name); + do_warning_event(event, "%s: field %s not found", + __func__, arg->field.name); } static unsigned long long @@ -3634,14 +3872,16 @@ process_defined_func(struct trace_seq *s, void *data, int size, trace_seq_terminate(&str); string = malloc(sizeof(*string)); if (!string) { - do_warning("%s(%d): malloc str", __func__, __LINE__); + do_warning_event(event, "%s(%d): malloc str", + __func__, __LINE__); goto out_free; } string->next = strings; string->str = strdup(str.buffer); if (!string->str) { free(string); - do_warning("%s(%d): malloc str", __func__, __LINE__); + do_warning_event(event, "%s(%d): malloc str", + __func__, __LINE__); goto out_free; } args[i] = (uintptr_t)string->str; @@ -3653,7 +3893,7 @@ process_defined_func(struct trace_seq *s, void *data, int size, * Something went totally wrong, this is not * an input error, something in this code broke. */ - do_warning("Unexpected end of arguments\n"); + do_warning_event(event, "Unexpected end of arguments\n"); goto out_free; } farg = farg->next; @@ -3703,12 +3943,12 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc if (!field) { field = pevent_find_field(event, "buf"); if (!field) { - do_warning("can't find buffer field for binary printk"); + do_warning_event(event, "can't find buffer field for binary printk"); return NULL; } ip_field = pevent_find_field(event, "ip"); if (!ip_field) { - do_warning("can't find ip field for binary printk"); + do_warning_event(event, "can't find ip field for binary printk"); return NULL; } pevent->bprint_buf_field = field; @@ -3722,7 +3962,8 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc */ args = alloc_arg(); if (!args) { - do_warning("%s(%d): not enough memory!", __func__, __LINE__); + do_warning_event(event, "%s(%d): not enough memory!", + __func__, __LINE__); return NULL; } arg = args; @@ -3734,8 +3975,8 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc if (asprintf(&arg->atom.atom, "%lld", ip) < 0) goto out_free; - /* skip the first "%pf : " */ - for (ptr = fmt + 6, bptr = data + field->offset; + /* skip the first "%pf: " */ + for (ptr = fmt + 5, bptr = data + field->offset; bptr < data + size && *ptr; ptr++) { int ls = 0; @@ -3788,7 +4029,7 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc bptr += vsize; arg = alloc_arg(); if (!arg) { - do_warning("%s(%d): not enough memory!", + do_warning_event(event, "%s(%d): not enough memory!", __func__, __LINE__); goto out_free; } @@ -3811,7 +4052,7 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc case 's': arg = alloc_arg(); if (!arg) { - do_warning("%s(%d): not enough memory!", + do_warning_event(event, "%s(%d): not enough memory!", __func__, __LINE__); goto out_free; } @@ -3845,14 +4086,13 @@ get_bprint_format(void *data, int size __maybe_unused, struct format_field *field; struct printk_map *printk; char *format; - char *p; field = pevent->bprint_fmt_field; if (!field) { field = pevent_find_field(event, "fmt"); if (!field) { - do_warning("can't find format field for binary printk"); + do_warning_event(event, "can't find format field for binary printk"); return NULL; } pevent->bprint_fmt_field = field; @@ -3862,25 +4102,13 @@ get_bprint_format(void *data, int size __maybe_unused, printk = find_printk(pevent, addr); if (!printk) { - if (asprintf(&format, "%%pf : (NO FORMAT FOUND at %llx)\n", addr) < 0) + if (asprintf(&format, "%%pf: (NO FORMAT FOUND at %llx)\n", addr) < 0) return NULL; return format; } - p = printk->printk; - /* Remove any quotes. */ - if (*p == '"') - p++; - if (asprintf(&format, "%s : %s", "%pf", p) < 0) + if (asprintf(&format, "%s: %s", "%pf", printk->printk) < 0) return NULL; - /* remove ending quotes and new line since we will add one too */ - p = format + strlen(format) - 1; - if (*p == '"') - *p = 0; - - p -= 2; - if (strcmp(p, "\\n") == 0) - *p = 0; return format; } @@ -3889,7 +4117,7 @@ static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size, struct event_format *event, struct print_arg *arg) { unsigned char *buf; - char *fmt = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x"; + const char *fmt = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x"; if (arg->type == PRINT_FUNC) { process_defined_func(s, data, size, event, arg); @@ -3908,8 +4136,8 @@ static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size, arg->field.field = pevent_find_any_field(event, arg->field.name); if (!arg->field.field) { - do_warning("%s: field %s not found", - __func__, arg->field.name); + do_warning_event(event, "%s: field %s not found", + __func__, arg->field.name); return; } } @@ -3926,12 +4154,13 @@ static int is_printable_array(char *p, unsigned int len) unsigned int i; for (i = 0; i < len && p[i]; i++) - if (!isprint(p[i])) + if (!isprint(p[i]) && !isspace(p[i])) return 0; return 1; } -static void print_event_fields(struct trace_seq *s, void *data, int size, +static void print_event_fields(struct trace_seq *s, void *data, + int size __maybe_unused, struct event_format *event) { struct format_field *field; @@ -4011,6 +4240,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event unsigned long long val; struct func_map *func; const char *saveptr; + struct trace_seq p; char *bprint_fmt = NULL; char format[32]; int show_func; @@ -4079,7 +4309,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event case '*': /* The argument is the length. */ if (!arg) { - do_warning("no argument match"); + do_warning_event(event, "no argument match"); event->flags |= EVENT_FL_FAILED; goto out_failed; } @@ -4116,7 +4346,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event case 'X': case 'u': if (!arg) { - do_warning("no argument match"); + do_warning_event(event, "no argument match"); event->flags |= EVENT_FL_FAILED; goto out_failed; } @@ -4126,7 +4356,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event /* should never happen */ if (len > 31) { - do_warning("bad format!"); + do_warning_event(event, "bad format!"); event->flags |= EVENT_FL_FAILED; len = 31; } @@ -4193,13 +4423,13 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event trace_seq_printf(s, format, (long long)val); break; default: - do_warning("bad count (%d)", ls); + do_warning_event(event, "bad count (%d)", ls); event->flags |= EVENT_FL_FAILED; } break; case 's': if (!arg) { - do_warning("no matching argument"); + do_warning_event(event, "no matching argument"); event->flags |= EVENT_FL_FAILED; goto out_failed; } @@ -4209,7 +4439,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event /* should never happen */ if (len > 31) { - do_warning("bad format!"); + do_warning_event(event, "bad format!"); event->flags |= EVENT_FL_FAILED; len = 31; } @@ -4218,8 +4448,13 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event format[len] = 0; if (!len_as_arg) len_arg = -1; - print_str_arg(s, data, size, event, + /* Use helper trace_seq */ + trace_seq_init(&p); + print_str_arg(&p, data, size, event, format, len_arg, arg); + trace_seq_terminate(&p); + trace_seq_puts(s, p.buffer); + trace_seq_destroy(&p); arg = arg->next; break; default: @@ -4390,11 +4625,11 @@ void pevent_event_info(struct trace_seq *s, struct event_format *event, { int print_pretty = 1; - if (event->pevent->print_raw) + if (event->pevent->print_raw || (event->flags & EVENT_FL_PRINTRAW)) print_event_fields(s, record->data, record->size, event); else { - if (event->handler) + if (event->handler && !(event->flags & EVENT_FL_NOHANDLE)) print_pretty = event->handler(s, record, event, event->context); @@ -4405,10 +4640,23 @@ void pevent_event_info(struct trace_seq *s, struct event_format *event, trace_seq_terminate(s); } +static bool is_timestamp_in_us(char *trace_clock, bool use_trace_clock) +{ + if (!use_trace_clock) + return true; + + if (!strcmp(trace_clock, "local") || !strcmp(trace_clock, "global") + || !strcmp(trace_clock, "uptime") || !strcmp(trace_clock, "perf")) + return true; + + /* trace_clock is setting in tsc or counter mode */ + return false; +} + void pevent_print_event(struct pevent *pevent, struct trace_seq *s, - struct pevent_record *record) + struct pevent_record *record, bool use_trace_clock) { - static char *spaces = " "; /* 20 spaces */ + static const char *spaces = " "; /* 20 spaces */ struct event_format *event; unsigned long secs; unsigned long usecs; @@ -4419,9 +4667,14 @@ void pevent_print_event(struct pevent *pevent, struct trace_seq *s, int pid; int len; int p; + bool use_usec_format; - secs = record->ts / NSECS_PER_SEC; - nsecs = record->ts - secs * NSECS_PER_SEC; + use_usec_format = is_timestamp_in_us(pevent->trace_clock, + use_trace_clock); + if (use_usec_format) { + secs = record->ts / NSECS_PER_SEC; + nsecs = record->ts - secs * NSECS_PER_SEC; + } if (record->size < 0) { do_warning("ug! negative record size %d", record->size); @@ -4446,15 +4699,20 @@ void pevent_print_event(struct pevent *pevent, struct trace_seq *s, } else trace_seq_printf(s, "%16s-%-5d [%03d]", comm, pid, record->cpu); - if (pevent->flags & PEVENT_NSEC_OUTPUT) { - usecs = nsecs; - p = 9; - } else { - usecs = (nsecs + 500) / NSECS_PER_USEC; - p = 6; - } + if (use_usec_format) { + if (pevent->flags & PEVENT_NSEC_OUTPUT) { + usecs = nsecs; + p = 9; + } else { + usecs = (nsecs + 500) / NSECS_PER_USEC; + p = 6; + } - trace_seq_printf(s, " %5lu.%0*lu: %s: ", secs, p, usecs, event->name); + trace_seq_printf(s, " %5lu.%0*lu: %s: ", + secs, p, usecs, event->name); + } else + trace_seq_printf(s, " %12llu: %s: ", + record->ts, event->name); /* Space out the event names evenly. */ len = strlen(event->name); @@ -4674,6 +4932,9 @@ static void print_args(struct print_arg *args) case PRINT_BSTRING: printf("__get_str(%s)", args->string.string); break; + case PRINT_BITMASK: + printf("__get_bitmask(%s)", args->bitmask.bitmask); + break; case PRINT_TYPE: printf("(%s)", args->typecast.type); print_args(args->typecast.item); @@ -5005,8 +5266,38 @@ enum pevent_errno __pevent_parse_format(struct event_format **eventp, return ret; } +static enum pevent_errno +__pevent_parse_event(struct pevent *pevent, + struct event_format **eventp, + const char *buf, unsigned long size, + const char *sys) +{ + int ret = __pevent_parse_format(eventp, pevent, buf, size, sys); + struct event_format *event = *eventp; + + if (event == NULL) + return ret; + + if (pevent && add_event(pevent, event)) { + ret = PEVENT_ERRNO__MEM_ALLOC_FAILED; + goto event_add_failed; + } + +#define PRINT_ARGS 0 + if (PRINT_ARGS && event->print_fmt.args) + print_args(event->print_fmt.args); + + return 0; + +event_add_failed: + pevent_free_format(event); + return ret; +} + /** * pevent_parse_format - parse the event format + * @pevent: the handle to the pevent + * @eventp: returned format * @buf: the buffer storing the event format string * @size: the size of @buf * @sys: the system the event belongs to @@ -5018,10 +5309,12 @@ enum pevent_errno __pevent_parse_format(struct event_format **eventp, * * /sys/kernel/debug/tracing/events/.../.../format */ -enum pevent_errno pevent_parse_format(struct event_format **eventp, const char *buf, +enum pevent_errno pevent_parse_format(struct pevent *pevent, + struct event_format **eventp, + const char *buf, unsigned long size, const char *sys) { - return __pevent_parse_format(eventp, NULL, buf, size, sys); + return __pevent_parse_event(pevent, eventp, buf, size, sys); } /** @@ -5042,25 +5335,7 @@ enum pevent_errno pevent_parse_event(struct pevent *pevent, const char *buf, unsigned long size, const char *sys) { struct event_format *event = NULL; - int ret = __pevent_parse_format(&event, pevent, buf, size, sys); - - if (event == NULL) - return ret; - - if (add_event(pevent, event)) { - ret = PEVENT_ERRNO__MEM_ALLOC_FAILED; - goto event_add_failed; - } - -#define PRINT_ARGS 0 - if (PRINT_ARGS && event->print_fmt.args) - print_args(event->print_fmt.args); - - return 0; - -event_add_failed: - pevent_free_format(event); - return ret; + return __pevent_parse_event(pevent, &event, buf, size, sys); } #undef _PE @@ -5070,8 +5345,8 @@ static const char * const pevent_error_str[] = { }; #undef _PE -int pevent_strerror(struct pevent *pevent, enum pevent_errno errnum, - char *buf, size_t buflen) +int pevent_strerror(struct pevent *pevent __maybe_unused, + enum pevent_errno errnum, char *buf, size_t buflen) { int idx; const char *msg; @@ -5092,21 +5367,7 @@ int pevent_strerror(struct pevent *pevent, enum pevent_errno errnum, idx = errnum - __PEVENT_ERRNO__START - 1; msg = pevent_error_str[idx]; - - switch (errnum) { - case PEVENT_ERRNO__MEM_ALLOC_FAILED: - case PEVENT_ERRNO__PARSE_EVENT_FAILED: - case PEVENT_ERRNO__READ_ID_FAILED: - case PEVENT_ERRNO__READ_FORMAT_FAILED: - case PEVENT_ERRNO__READ_PRINT_FAILED: - case PEVENT_ERRNO__OLD_FTRACE_ARG_FAILED: - snprintf(buf, buflen, "%s", msg); - break; - - default: - /* cannot reach here */ - break; - } + snprintf(buf, buflen, "%s", msg); return 0; } @@ -5287,6 +5548,48 @@ int pevent_print_num_field(struct trace_seq *s, const char *fmt, return -1; } +/** + * pevent_print_func_field - print a field and a format for function pointers + * @s: The seq to print to + * @fmt: The printf format to print the field with. + * @event: the event that the field is for + * @name: The name of the field + * @record: The record with the field name. + * @err: print default error if failed. + * + * Returns: 0 on success, -1 field not found, or 1 if buffer is full. + */ +int pevent_print_func_field(struct trace_seq *s, const char *fmt, + struct event_format *event, const char *name, + struct pevent_record *record, int err) +{ + struct format_field *field = pevent_find_field(event, name); + struct pevent *pevent = event->pevent; + unsigned long long val; + struct func_map *func; + char tmp[128]; + + if (!field) + goto failed; + + if (pevent_read_number_field(field, record->data, &val)) + goto failed; + + func = find_func(pevent, val); + + if (func) + snprintf(tmp, 128, "%s/0x%llx", func->func, func->addr - val); + else + sprintf(tmp, "0x%08llx", val); + + return trace_seq_printf(s, fmt, tmp); + + failed: + if (err) + trace_seq_printf(s, "CAN'T FIND FIELD \"%s\"", name); + return -1; +} + static void free_func_handle(struct pevent_function_handler *func) { struct pevent_func_params *params; @@ -5362,7 +5665,7 @@ int pevent_register_print_function(struct pevent *pevent, if (type == PEVENT_FUNC_ARG_VOID) break; - if (type < 0 || type >= PEVENT_FUNC_ARG_MAX_TYPES) { + if (type >= PEVENT_FUNC_ARG_MAX_TYPES) { do_warning("Invalid argument type %d", type); ret = PEVENT_ERRNO__INVALID_ARG_TYPE; goto out_free; @@ -5395,6 +5698,52 @@ int pevent_register_print_function(struct pevent *pevent, } /** + * pevent_unregister_print_function - unregister a helper function + * @pevent: the handle to the pevent + * @func: the function to process the helper function + * @name: the name of the helper function + * + * This function removes existing print handler for function @name. + * + * Returns 0 if the handler was removed successully, -1 otherwise. + */ +int pevent_unregister_print_function(struct pevent *pevent, + pevent_func_handler func, char *name) +{ + struct pevent_function_handler *func_handle; + + func_handle = find_func_handler(pevent, name); + if (func_handle && func_handle->func == func) { + remove_func_handler(pevent, name); + return 0; + } + return -1; +} + +static struct event_format *pevent_search_event(struct pevent *pevent, int id, + const char *sys_name, + const char *event_name) +{ + struct event_format *event; + + if (id >= 0) { + /* search by id */ + event = pevent_find_event(pevent, id); + if (!event) + return NULL; + if (event_name && (strcmp(event_name, event->name) != 0)) + return NULL; + if (sys_name && (strcmp(sys_name, event->system) != 0)) + return NULL; + } else { + event = pevent_find_event_by_name(pevent, sys_name, event_name); + if (!event) + return NULL; + } + return event; +} + +/** * pevent_register_event_handler - register a way to parse an event * @pevent: the handle to the pevent * @id: the id of the event to register @@ -5411,28 +5760,16 @@ int pevent_register_print_function(struct pevent *pevent, * If @id is >= 0, then it is used to find the event. * else @sys_name and @event_name are used. */ -int pevent_register_event_handler(struct pevent *pevent, - int id, char *sys_name, char *event_name, - pevent_event_handler_func func, - void *context) +int pevent_register_event_handler(struct pevent *pevent, int id, + const char *sys_name, const char *event_name, + pevent_event_handler_func func, void *context) { struct event_format *event; struct event_handler *handle; - if (id >= 0) { - /* search by id */ - event = pevent_find_event(pevent, id); - if (!event) - goto not_found; - if (event_name && (strcmp(event_name, event->name) != 0)) - goto not_found; - if (sys_name && (strcmp(sys_name, event->system) != 0)) - goto not_found; - } else { - event = pevent_find_event_by_name(pevent, sys_name, event_name); - if (!event) - goto not_found; - } + event = pevent_search_event(pevent, id, sys_name, event_name); + if (event == NULL) + goto not_found; pr_stat("overriding event (%d) %s:%s with new print handler", event->id, event->system, event->name); @@ -5472,6 +5809,79 @@ int pevent_register_event_handler(struct pevent *pevent, return -1; } +static int handle_matches(struct event_handler *handler, int id, + const char *sys_name, const char *event_name, + pevent_event_handler_func func, void *context) +{ + if (id >= 0 && id != handler->id) + return 0; + + if (event_name && (strcmp(event_name, handler->event_name) != 0)) + return 0; + + if (sys_name && (strcmp(sys_name, handler->sys_name) != 0)) + return 0; + + if (func != handler->func || context != handler->context) + return 0; + + return 1; +} + +/** + * pevent_unregister_event_handler - unregister an existing event handler + * @pevent: the handle to the pevent + * @id: the id of the event to unregister + * @sys_name: the system name the handler belongs to + * @event_name: the name of the event handler + * @func: the function to call to parse the event information + * @context: the data to be passed to @func + * + * This function removes existing event handler (parser). + * + * If @id is >= 0, then it is used to find the event. + * else @sys_name and @event_name are used. + * + * Returns 0 if handler was removed successfully, -1 if event was not found. + */ +int pevent_unregister_event_handler(struct pevent *pevent, int id, + const char *sys_name, const char *event_name, + pevent_event_handler_func func, void *context) +{ + struct event_format *event; + struct event_handler *handle; + struct event_handler **next; + + event = pevent_search_event(pevent, id, sys_name, event_name); + if (event == NULL) + goto not_found; + + if (event->handler == func && event->context == context) { + pr_stat("removing override handler for event (%d) %s:%s. Going back to default handler.", + event->id, event->system, event->name); + + event->handler = NULL; + event->context = NULL; + return 0; + } + +not_found: + for (next = &pevent->handlers; *next; next = &(*next)->next) { + handle = *next; + if (handle_matches(handle, id, sys_name, event_name, + func, context)) + break; + } + + if (!(*next)) + return -1; + + *next = handle->next; + free_handler(handle); + + return 0; +} + /** * pevent_alloc - create a pevent handle */ @@ -5560,7 +5970,7 @@ void pevent_free(struct pevent *pevent) } if (pevent->func_map) { - for (i = 0; i < pevent->func_count; i++) { + for (i = 0; i < (int)pevent->func_count; i++) { free(pevent->func_map[i].func); free(pevent->func_map[i].mod); } @@ -5582,7 +5992,7 @@ void pevent_free(struct pevent *pevent) } if (pevent->printk_map) { - for (i = 0; i < pevent->printk_count; i++) + for (i = 0; i < (int)pevent->printk_count; i++) free(pevent->printk_map[i].printk); free(pevent->printk_map); } diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h index 24a4bbabc5d..7a3873ff9a4 100644 --- a/tools/lib/traceevent/event-parse.h +++ b/tools/lib/traceevent/event-parse.h @@ -13,16 +13,17 @@ * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * License along with this program; if not, see <http://www.gnu.org/licenses> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #ifndef _PARSE_EVENTS_H #define _PARSE_EVENTS_H +#include <stdbool.h> #include <stdarg.h> #include <regex.h> +#include <string.h> #ifndef __maybe_unused #define __maybe_unused __attribute__((unused)) @@ -57,6 +58,12 @@ struct pevent_record { #endif }; +enum trace_seq_fail { + TRACE_SEQ__GOOD, + TRACE_SEQ__BUFFER_POISONED, + TRACE_SEQ__MEM_ALLOC_FAILED, +}; + /* * Trace sequences are used to allow a function to call several other functions * to create a string of data to use (up to a max of PAGE_SIZE). @@ -67,9 +74,11 @@ struct trace_seq { unsigned int buffer_size; unsigned int len; unsigned int readpos; + enum trace_seq_fail state; }; void trace_seq_init(struct trace_seq *s); +void trace_seq_reset(struct trace_seq *s); void trace_seq_destroy(struct trace_seq *s); extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) @@ -96,10 +105,10 @@ typedef int (*pevent_event_handler_func)(struct trace_seq *s, void *context); typedef int (*pevent_plugin_load_func)(struct pevent *pevent); -typedef int (*pevent_plugin_unload_func)(void); +typedef int (*pevent_plugin_unload_func)(struct pevent *pevent); -struct plugin_option { - struct plugin_option *next; +struct pevent_plugin_option { + struct pevent_plugin_option *next; void *handle; char *file; char *name; @@ -121,12 +130,12 @@ struct plugin_option { * PEVENT_PLUGIN_UNLOADER: (optional) * The function called just before unloading * - * int PEVENT_PLUGIN_UNLOADER(void) + * int PEVENT_PLUGIN_UNLOADER(struct pevent *pevent) * * PEVENT_PLUGIN_OPTIONS: (optional) * Plugin options that can be set before loading * - * struct plugin_option PEVENT_PLUGIN_OPTIONS[] = { + * struct pevent_plugin_option PEVENT_PLUGIN_OPTIONS[] = { * { * .name = "option-name", * .plugin_alias = "overide-file-name", (optional) @@ -199,6 +208,11 @@ struct print_arg_string { int offset; }; +struct print_arg_bitmask { + char *bitmask; + int offset; +}; + struct print_arg_field { char *name; struct format_field *field; @@ -265,6 +279,7 @@ enum print_arg_type { PRINT_DYNAMIC_ARRAY, PRINT_OP, PRINT_FUNC, + PRINT_BITMASK, }; struct print_arg { @@ -279,6 +294,7 @@ struct print_arg { struct print_arg_hex hex; struct print_arg_func func; struct print_arg_string string; + struct print_arg_bitmask bitmask; struct print_arg_op op; struct print_arg_dynarray dynarray; }; @@ -307,6 +323,8 @@ enum { EVENT_FL_ISBPRINT = 0x04, EVENT_FL_ISFUNCENT = 0x10, EVENT_FL_ISFUNCRET = 0x20, + EVENT_FL_NOHANDLE = 0x40, + EVENT_FL_PRINTRAW = 0x80, EVENT_FL_FAILED = 0x80000000 }; @@ -343,6 +361,8 @@ enum pevent_func_arg_type { enum pevent_flag { PEVENT_NSEC_OUTPUT = 1, /* output in NSECS */ + PEVENT_DISABLE_SYS_PLUGINS = 1 << 1, + PEVENT_DISABLE_PLUGINS = 1 << 2, }; #define PEVENT_ERRORS \ @@ -352,12 +372,35 @@ enum pevent_flag { _PE(READ_FORMAT_FAILED, "failed to read event format"), \ _PE(READ_PRINT_FAILED, "failed to read event print fmt"), \ _PE(OLD_FTRACE_ARG_FAILED,"failed to allocate field name for ftrace"),\ - _PE(INVALID_ARG_TYPE, "invalid argument type") + _PE(INVALID_ARG_TYPE, "invalid argument type"), \ + _PE(INVALID_EXP_TYPE, "invalid expression type"), \ + _PE(INVALID_OP_TYPE, "invalid operator type"), \ + _PE(INVALID_EVENT_NAME, "invalid event name"), \ + _PE(EVENT_NOT_FOUND, "no event found"), \ + _PE(SYNTAX_ERROR, "syntax error"), \ + _PE(ILLEGAL_RVALUE, "illegal rvalue"), \ + _PE(ILLEGAL_LVALUE, "illegal lvalue for string comparison"), \ + _PE(INVALID_REGEX, "regex did not compute"), \ + _PE(ILLEGAL_STRING_CMP, "illegal comparison for string"), \ + _PE(ILLEGAL_INTEGER_CMP,"illegal comparison for integer"), \ + _PE(REPARENT_NOT_OP, "cannot reparent other than OP"), \ + _PE(REPARENT_FAILED, "failed to reparent filter OP"), \ + _PE(BAD_FILTER_ARG, "bad arg in filter tree"), \ + _PE(UNEXPECTED_TYPE, "unexpected type (not a value)"), \ + _PE(ILLEGAL_TOKEN, "illegal token"), \ + _PE(INVALID_PAREN, "open parenthesis cannot come here"), \ + _PE(UNBALANCED_PAREN, "unbalanced number of parenthesis"), \ + _PE(UNKNOWN_TOKEN, "unknown token"), \ + _PE(FILTER_NOT_FOUND, "no filter found"), \ + _PE(NOT_A_NUMBER, "must have number field"), \ + _PE(NO_FILTER, "no filters exists"), \ + _PE(FILTER_MISS, "record does not match to filter") #undef _PE #define _PE(__code, __str) PEVENT_ERRNO__ ## __code enum pevent_errno { PEVENT_ERRNO__SUCCESS = 0, + PEVENT_ERRNO__FILTER_MATCH = PEVENT_ERRNO__SUCCESS, /* * Choose an arbitrary negative big number not to clash with standard @@ -374,6 +417,22 @@ enum pevent_errno { }; #undef _PE +struct plugin_list; + +#define INVALID_PLUGIN_LIST_OPTION ((char **)((unsigned long)-1)) + +struct plugin_list *traceevent_load_plugins(struct pevent *pevent); +void traceevent_unload_plugins(struct plugin_list *plugin_list, + struct pevent *pevent); +char **traceevent_plugin_list_options(void); +void traceevent_plugin_free_options_list(char **list); +int traceevent_plugin_add_options(const char *name, + struct pevent_plugin_option *options); +void traceevent_plugin_remove_options(struct pevent_plugin_option *options); +void traceevent_print_plugins(struct trace_seq *s, + const char *prefix, const char *suffix, + const struct plugin_list *list); + struct cmdline; struct cmdline_list; struct func_map; @@ -400,6 +459,7 @@ struct pevent { int cpus; int long_size; + int page_size; struct cmdline *cmdlines; struct cmdline_list *cmdlist; @@ -449,6 +509,8 @@ struct pevent { /* cache */ struct event_format *last_event; + + char *trace_clock; }; static inline void pevent_set_flag(struct pevent *pevent, int flag) @@ -516,6 +578,15 @@ __data2host8(struct pevent *pevent, unsigned long long data) __data2host8(pevent, __val); \ }) +static inline int traceevent_host_bigendian(void) +{ + unsigned char str[] = { 0x1, 0x2, 0x3, 0x4 }; + unsigned int val; + + memcpy(&val, str, 4); + return val == 0x01020304; +} + /* taken from kernel/trace/trace.h */ enum trace_flag_type { TRACE_FLAG_IRQS_OFF = 0x01, @@ -526,21 +597,24 @@ enum trace_flag_type { }; int pevent_register_comm(struct pevent *pevent, const char *comm, int pid); +void pevent_register_trace_clock(struct pevent *pevent, char *trace_clock); int pevent_register_function(struct pevent *pevent, char *name, unsigned long long addr, char *mod); -int pevent_register_print_string(struct pevent *pevent, char *fmt, +int pevent_register_print_string(struct pevent *pevent, const char *fmt, unsigned long long addr); int pevent_pid_is_registered(struct pevent *pevent, int pid); void pevent_print_event(struct pevent *pevent, struct trace_seq *s, - struct pevent_record *record); + struct pevent_record *record, bool use_trace_clock); int pevent_parse_header_page(struct pevent *pevent, char *buf, unsigned long size, int long_size); enum pevent_errno pevent_parse_event(struct pevent *pevent, const char *buf, unsigned long size, const char *sys); -enum pevent_errno pevent_parse_format(struct event_format **eventp, const char *buf, +enum pevent_errno pevent_parse_format(struct pevent *pevent, + struct event_format **eventp, + const char *buf, unsigned long size, const char *sys); void pevent_free_format(struct event_format *event); @@ -562,12 +636,22 @@ int pevent_print_num_field(struct trace_seq *s, const char *fmt, struct event_format *event, const char *name, struct pevent_record *record, int err); -int pevent_register_event_handler(struct pevent *pevent, int id, char *sys_name, char *event_name, +int pevent_print_func_field(struct trace_seq *s, const char *fmt, + struct event_format *event, const char *name, + struct pevent_record *record, int err); + +int pevent_register_event_handler(struct pevent *pevent, int id, + const char *sys_name, const char *event_name, pevent_event_handler_func func, void *context); +int pevent_unregister_event_handler(struct pevent *pevent, int id, + const char *sys_name, const char *event_name, + pevent_event_handler_func func, void *context); int pevent_register_print_function(struct pevent *pevent, pevent_func_handler func, enum pevent_func_arg_type ret_type, char *name, ...); +int pevent_unregister_print_function(struct pevent *pevent, + pevent_func_handler func, char *name); struct format_field *pevent_find_common_field(struct event_format *event, const char *name); struct format_field *pevent_find_field(struct event_format *event, const char *name); @@ -620,6 +704,16 @@ static inline void pevent_set_long_size(struct pevent *pevent, int long_size) pevent->long_size = long_size; } +static inline int pevent_get_page_size(struct pevent *pevent) +{ + return pevent->page_size; +} + +static inline void pevent_set_page_size(struct pevent *pevent, int _page_size) +{ + pevent->page_size = _page_size; +} + static inline int pevent_is_file_bigendian(struct pevent *pevent) { return pevent->file_bigendian; @@ -789,18 +883,22 @@ struct filter_type { struct filter_arg *filter; }; +#define PEVENT_FILTER_ERROR_BUFSZ 1024 + struct event_filter { struct pevent *pevent; int filters; struct filter_type *event_filters; + char error_buffer[PEVENT_FILTER_ERROR_BUFSZ]; }; struct event_filter *pevent_filter_alloc(struct pevent *pevent); -#define FILTER_NONE -2 -#define FILTER_NOEXIST -1 -#define FILTER_MISS 0 -#define FILTER_MATCH 1 +/* for backward compatibility */ +#define FILTER_NONE PEVENT_ERRNO__NO_FILTER +#define FILTER_NOEXIST PEVENT_ERRNO__FILTER_NOT_FOUND +#define FILTER_MISS PEVENT_ERRNO__FILTER_MISS +#define FILTER_MATCH PEVENT_ERRNO__FILTER_MATCH enum filter_trivial_type { FILTER_TRIVIAL_FALSE, @@ -808,20 +906,21 @@ enum filter_trivial_type { FILTER_TRIVIAL_BOTH, }; -int pevent_filter_add_filter_str(struct event_filter *filter, - const char *filter_str, - char **error_str); +enum pevent_errno pevent_filter_add_filter_str(struct event_filter *filter, + const char *filter_str); +enum pevent_errno pevent_filter_match(struct event_filter *filter, + struct pevent_record *record); -int pevent_filter_match(struct event_filter *filter, - struct pevent_record *record); +int pevent_filter_strerror(struct event_filter *filter, enum pevent_errno err, + char *buf, size_t buflen); int pevent_event_filtered(struct event_filter *filter, int event_id); void pevent_filter_reset(struct event_filter *filter); -void pevent_filter_clear_trivial(struct event_filter *filter, +int pevent_filter_clear_trivial(struct event_filter *filter, enum filter_trivial_type type); void pevent_filter_free(struct event_filter *filter); diff --git a/tools/lib/traceevent/event-plugin.c b/tools/lib/traceevent/event-plugin.c new file mode 100644 index 00000000000..136162c03af --- /dev/null +++ b/tools/lib/traceevent/event-plugin.c @@ -0,0 +1,416 @@ +/* + * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License (not later!) + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include <stdio.h> +#include <string.h> +#include <dlfcn.h> +#include <stdlib.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <unistd.h> +#include <dirent.h> +#include "event-parse.h" +#include "event-utils.h" + +#define LOCAL_PLUGIN_DIR ".traceevent/plugins" + +static struct registered_plugin_options { + struct registered_plugin_options *next; + struct pevent_plugin_option *options; +} *registered_options; + +static struct trace_plugin_options { + struct trace_plugin_options *next; + char *plugin; + char *option; + char *value; +} *trace_plugin_options; + +struct plugin_list { + struct plugin_list *next; + char *name; + void *handle; +}; + +/** + * traceevent_plugin_list_options - get list of plugin options + * + * Returns an array of char strings that list the currently registered + * plugin options in the format of <plugin>:<option>. This list can be + * used by toggling the option. + * + * Returns NULL if there's no options registered. On error it returns + * INVALID_PLUGIN_LIST_OPTION + * + * Must be freed with traceevent_plugin_free_options_list(). + */ +char **traceevent_plugin_list_options(void) +{ + struct registered_plugin_options *reg; + struct pevent_plugin_option *op; + char **list = NULL; + char *name; + int count = 0; + + for (reg = registered_options; reg; reg = reg->next) { + for (op = reg->options; op->name; op++) { + char *alias = op->plugin_alias ? op->plugin_alias : op->file; + char **temp = list; + + name = malloc(strlen(op->name) + strlen(alias) + 2); + if (!name) + goto err; + + sprintf(name, "%s:%s", alias, op->name); + list = realloc(list, count + 2); + if (!list) { + list = temp; + free(name); + goto err; + } + list[count++] = name; + list[count] = NULL; + } + } + return list; + + err: + while (--count >= 0) + free(list[count]); + free(list); + + return INVALID_PLUGIN_LIST_OPTION; +} + +void traceevent_plugin_free_options_list(char **list) +{ + int i; + + if (!list) + return; + + if (list == INVALID_PLUGIN_LIST_OPTION) + return; + + for (i = 0; list[i]; i++) + free(list[i]); + + free(list); +} + +static int +update_option(const char *file, struct pevent_plugin_option *option) +{ + struct trace_plugin_options *op; + char *plugin; + + if (option->plugin_alias) { + plugin = strdup(option->plugin_alias); + if (!plugin) + return -1; + } else { + char *p; + plugin = strdup(file); + if (!plugin) + return -1; + p = strstr(plugin, "."); + if (p) + *p = '\0'; + } + + /* first look for named options */ + for (op = trace_plugin_options; op; op = op->next) { + if (!op->plugin) + continue; + if (strcmp(op->plugin, plugin) != 0) + continue; + if (strcmp(op->option, option->name) != 0) + continue; + + option->value = op->value; + option->set ^= 1; + goto out; + } + + /* first look for unnamed options */ + for (op = trace_plugin_options; op; op = op->next) { + if (op->plugin) + continue; + if (strcmp(op->option, option->name) != 0) + continue; + + option->value = op->value; + option->set ^= 1; + break; + } + + out: + free(plugin); + return 0; +} + +/** + * traceevent_plugin_add_options - Add a set of options by a plugin + * @name: The name of the plugin adding the options + * @options: The set of options being loaded + * + * Sets the options with the values that have been added by user. + */ +int traceevent_plugin_add_options(const char *name, + struct pevent_plugin_option *options) +{ + struct registered_plugin_options *reg; + + reg = malloc(sizeof(*reg)); + if (!reg) + return -1; + reg->next = registered_options; + reg->options = options; + registered_options = reg; + + while (options->name) { + update_option(name, options); + options++; + } + return 0; +} + +/** + * traceevent_plugin_remove_options - remove plugin options that were registered + * @options: Options to removed that were registered with traceevent_plugin_add_options + */ +void traceevent_plugin_remove_options(struct pevent_plugin_option *options) +{ + struct registered_plugin_options **last; + struct registered_plugin_options *reg; + + for (last = ®istered_options; *last; last = &(*last)->next) { + if ((*last)->options == options) { + reg = *last; + *last = reg->next; + free(reg); + return; + } + } +} + +/** + * traceevent_print_plugins - print out the list of plugins loaded + * @s: the trace_seq descripter to write to + * @prefix: The prefix string to add before listing the option name + * @suffix: The suffix string ot append after the option name + * @list: The list of plugins (usually returned by traceevent_load_plugins() + * + * Writes to the trace_seq @s the list of plugins (files) that is + * returned by traceevent_load_plugins(). Use @prefix and @suffix for formating: + * @prefix = " ", @suffix = "\n". + */ +void traceevent_print_plugins(struct trace_seq *s, + const char *prefix, const char *suffix, + const struct plugin_list *list) +{ + while (list) { + trace_seq_printf(s, "%s%s%s", prefix, list->name, suffix); + list = list->next; + } +} + +static void +load_plugin(struct pevent *pevent, const char *path, + const char *file, void *data) +{ + struct plugin_list **plugin_list = data; + pevent_plugin_load_func func; + struct plugin_list *list; + const char *alias; + char *plugin; + void *handle; + + plugin = malloc(strlen(path) + strlen(file) + 2); + if (!plugin) { + warning("could not allocate plugin memory\n"); + return; + } + + strcpy(plugin, path); + strcat(plugin, "/"); + strcat(plugin, file); + + handle = dlopen(plugin, RTLD_NOW | RTLD_GLOBAL); + if (!handle) { + warning("could not load plugin '%s'\n%s\n", + plugin, dlerror()); + goto out_free; + } + + alias = dlsym(handle, PEVENT_PLUGIN_ALIAS_NAME); + if (!alias) + alias = file; + + func = dlsym(handle, PEVENT_PLUGIN_LOADER_NAME); + if (!func) { + warning("could not find func '%s' in plugin '%s'\n%s\n", + PEVENT_PLUGIN_LOADER_NAME, plugin, dlerror()); + goto out_free; + } + + list = malloc(sizeof(*list)); + if (!list) { + warning("could not allocate plugin memory\n"); + goto out_free; + } + + list->next = *plugin_list; + list->handle = handle; + list->name = plugin; + *plugin_list = list; + + pr_stat("registering plugin: %s", plugin); + func(pevent); + return; + + out_free: + free(plugin); +} + +static void +load_plugins_dir(struct pevent *pevent, const char *suffix, + const char *path, + void (*load_plugin)(struct pevent *pevent, + const char *path, + const char *name, + void *data), + void *data) +{ + struct dirent *dent; + struct stat st; + DIR *dir; + int ret; + + ret = stat(path, &st); + if (ret < 0) + return; + + if (!S_ISDIR(st.st_mode)) + return; + + dir = opendir(path); + if (!dir) + return; + + while ((dent = readdir(dir))) { + const char *name = dent->d_name; + + if (strcmp(name, ".") == 0 || + strcmp(name, "..") == 0) + continue; + + /* Only load plugins that end in suffix */ + if (strcmp(name + (strlen(name) - strlen(suffix)), suffix) != 0) + continue; + + load_plugin(pevent, path, name, data); + } + + closedir(dir); +} + +static void +load_plugins(struct pevent *pevent, const char *suffix, + void (*load_plugin)(struct pevent *pevent, + const char *path, + const char *name, + void *data), + void *data) +{ + char *home; + char *path; + char *envdir; + + if (pevent->flags & PEVENT_DISABLE_PLUGINS) + return; + + /* + * If a system plugin directory was defined, + * check that first. + */ +#ifdef PLUGIN_DIR + if (!(pevent->flags & PEVENT_DISABLE_SYS_PLUGINS)) + load_plugins_dir(pevent, suffix, PLUGIN_DIR, + load_plugin, data); +#endif + + /* + * Next let the environment-set plugin directory + * override the system defaults. + */ + envdir = getenv("TRACEEVENT_PLUGIN_DIR"); + if (envdir) + load_plugins_dir(pevent, suffix, envdir, load_plugin, data); + + /* + * Now let the home directory override the environment + * or system defaults. + */ + home = getenv("HOME"); + if (!home) + return; + + path = malloc(strlen(home) + strlen(LOCAL_PLUGIN_DIR) + 2); + if (!path) { + warning("could not allocate plugin memory\n"); + return; + } + + strcpy(path, home); + strcat(path, "/"); + strcat(path, LOCAL_PLUGIN_DIR); + + load_plugins_dir(pevent, suffix, path, load_plugin, data); + + free(path); +} + +struct plugin_list* +traceevent_load_plugins(struct pevent *pevent) +{ + struct plugin_list *list = NULL; + + load_plugins(pevent, ".so", load_plugin, &list); + return list; +} + +void +traceevent_unload_plugins(struct plugin_list *plugin_list, struct pevent *pevent) +{ + pevent_plugin_unload_func func; + struct plugin_list *list; + + while (plugin_list) { + list = plugin_list; + plugin_list = list->next; + func = dlsym(list->handle, PEVENT_PLUGIN_UNLOADER_NAME); + if (func) + func(pevent); + dlclose(list->handle); + free(list->name); + free(list); + } +} diff --git a/tools/lib/traceevent/event-utils.h b/tools/lib/traceevent/event-utils.h index bc075006966..d1dc2170e40 100644 --- a/tools/lib/traceevent/event-utils.h +++ b/tools/lib/traceevent/event-utils.h @@ -13,8 +13,7 @@ * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * License along with this program; if not, see <http://www.gnu.org/licenses> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ @@ -24,18 +23,14 @@ #include <ctype.h> /* Can be overridden */ -void die(const char *fmt, ...); -void *malloc_or_die(unsigned int size); void warning(const char *fmt, ...); void pr_stat(const char *fmt, ...); void vpr_stat(const char *fmt, va_list ap); /* Always available */ -void __die(const char *fmt, ...); void __warning(const char *fmt, ...); void __pr_stat(const char *fmt, ...); -void __vdie(const char *fmt, ...); void __vwarning(const char *fmt, ...); void __vpr_stat(const char *fmt, ...); diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c new file mode 100644 index 00000000000..dcc665228c7 --- /dev/null +++ b/tools/lib/traceevent/kbuffer-parse.c @@ -0,0 +1,732 @@ +/* + * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License (not later!) + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "kbuffer.h" + +#define MISSING_EVENTS (1 << 31) +#define MISSING_STORED (1 << 30) + +#define COMMIT_MASK ((1 << 27) - 1) + +enum { + KBUFFER_FL_HOST_BIG_ENDIAN = (1<<0), + KBUFFER_FL_BIG_ENDIAN = (1<<1), + KBUFFER_FL_LONG_8 = (1<<2), + KBUFFER_FL_OLD_FORMAT = (1<<3), +}; + +#define ENDIAN_MASK (KBUFFER_FL_HOST_BIG_ENDIAN | KBUFFER_FL_BIG_ENDIAN) + +/** kbuffer + * @timestamp - timestamp of current event + * @lost_events - # of lost events between this subbuffer and previous + * @flags - special flags of the kbuffer + * @subbuffer - pointer to the sub-buffer page + * @data - pointer to the start of data on the sub-buffer page + * @index - index from @data to the @curr event data + * @curr - offset from @data to the start of current event + * (includes metadata) + * @next - offset from @data to the start of next event + * @size - The size of data on @data + * @start - The offset from @subbuffer where @data lives + * + * @read_4 - Function to read 4 raw bytes (may swap) + * @read_8 - Function to read 8 raw bytes (may swap) + * @read_long - Function to read a long word (4 or 8 bytes with needed swap) + */ +struct kbuffer { + unsigned long long timestamp; + long long lost_events; + unsigned long flags; + void *subbuffer; + void *data; + unsigned int index; + unsigned int curr; + unsigned int next; + unsigned int size; + unsigned int start; + + unsigned int (*read_4)(void *ptr); + unsigned long long (*read_8)(void *ptr); + unsigned long long (*read_long)(struct kbuffer *kbuf, void *ptr); + int (*next_event)(struct kbuffer *kbuf); +}; + +static void *zmalloc(size_t size) +{ + return calloc(1, size); +} + +static int host_is_bigendian(void) +{ + unsigned char str[] = { 0x1, 0x2, 0x3, 0x4 }; + unsigned int *ptr; + + ptr = (unsigned int *)str; + return *ptr == 0x01020304; +} + +static int do_swap(struct kbuffer *kbuf) +{ + return ((kbuf->flags & KBUFFER_FL_HOST_BIG_ENDIAN) + kbuf->flags) & + ENDIAN_MASK; +} + +static unsigned long long __read_8(void *ptr) +{ + unsigned long long data = *(unsigned long long *)ptr; + + return data; +} + +static unsigned long long __read_8_sw(void *ptr) +{ + unsigned long long data = *(unsigned long long *)ptr; + unsigned long long swap; + + swap = ((data & 0xffULL) << 56) | + ((data & (0xffULL << 8)) << 40) | + ((data & (0xffULL << 16)) << 24) | + ((data & (0xffULL << 24)) << 8) | + ((data & (0xffULL << 32)) >> 8) | + ((data & (0xffULL << 40)) >> 24) | + ((data & (0xffULL << 48)) >> 40) | + ((data & (0xffULL << 56)) >> 56); + + return swap; +} + +static unsigned int __read_4(void *ptr) +{ + unsigned int data = *(unsigned int *)ptr; + + return data; +} + +static unsigned int __read_4_sw(void *ptr) +{ + unsigned int data = *(unsigned int *)ptr; + unsigned int swap; + + swap = ((data & 0xffULL) << 24) | + ((data & (0xffULL << 8)) << 8) | + ((data & (0xffULL << 16)) >> 8) | + ((data & (0xffULL << 24)) >> 24); + + return swap; +} + +static unsigned long long read_8(struct kbuffer *kbuf, void *ptr) +{ + return kbuf->read_8(ptr); +} + +static unsigned int read_4(struct kbuffer *kbuf, void *ptr) +{ + return kbuf->read_4(ptr); +} + +static unsigned long long __read_long_8(struct kbuffer *kbuf, void *ptr) +{ + return kbuf->read_8(ptr); +} + +static unsigned long long __read_long_4(struct kbuffer *kbuf, void *ptr) +{ + return kbuf->read_4(ptr); +} + +static unsigned long long read_long(struct kbuffer *kbuf, void *ptr) +{ + return kbuf->read_long(kbuf, ptr); +} + +static int calc_index(struct kbuffer *kbuf, void *ptr) +{ + return (unsigned long)ptr - (unsigned long)kbuf->data; +} + +static int __next_event(struct kbuffer *kbuf); + +/** + * kbuffer_alloc - allocat a new kbuffer + * @size; enum to denote size of word + * @endian: enum to denote endianness + * + * Allocates and returns a new kbuffer. + */ +struct kbuffer * +kbuffer_alloc(enum kbuffer_long_size size, enum kbuffer_endian endian) +{ + struct kbuffer *kbuf; + int flags = 0; + + switch (size) { + case KBUFFER_LSIZE_4: + break; + case KBUFFER_LSIZE_8: + flags |= KBUFFER_FL_LONG_8; + break; + default: + return NULL; + } + + switch (endian) { + case KBUFFER_ENDIAN_LITTLE: + break; + case KBUFFER_ENDIAN_BIG: + flags |= KBUFFER_FL_BIG_ENDIAN; + break; + default: + return NULL; + } + + kbuf = zmalloc(sizeof(*kbuf)); + if (!kbuf) + return NULL; + + kbuf->flags = flags; + + if (host_is_bigendian()) + kbuf->flags |= KBUFFER_FL_HOST_BIG_ENDIAN; + + if (do_swap(kbuf)) { + kbuf->read_8 = __read_8_sw; + kbuf->read_4 = __read_4_sw; + } else { + kbuf->read_8 = __read_8; + kbuf->read_4 = __read_4; + } + + if (kbuf->flags & KBUFFER_FL_LONG_8) + kbuf->read_long = __read_long_8; + else + kbuf->read_long = __read_long_4; + + /* May be changed by kbuffer_set_old_format() */ + kbuf->next_event = __next_event; + + return kbuf; +} + +/** kbuffer_free - free an allocated kbuffer + * @kbuf: The kbuffer to free + * + * Can take NULL as a parameter. + */ +void kbuffer_free(struct kbuffer *kbuf) +{ + free(kbuf); +} + +static unsigned int type4host(struct kbuffer *kbuf, + unsigned int type_len_ts) +{ + if (kbuf->flags & KBUFFER_FL_BIG_ENDIAN) + return (type_len_ts >> 29) & 3; + else + return type_len_ts & 3; +} + +static unsigned int len4host(struct kbuffer *kbuf, + unsigned int type_len_ts) +{ + if (kbuf->flags & KBUFFER_FL_BIG_ENDIAN) + return (type_len_ts >> 27) & 7; + else + return (type_len_ts >> 2) & 7; +} + +static unsigned int type_len4host(struct kbuffer *kbuf, + unsigned int type_len_ts) +{ + if (kbuf->flags & KBUFFER_FL_BIG_ENDIAN) + return (type_len_ts >> 27) & ((1 << 5) - 1); + else + return type_len_ts & ((1 << 5) - 1); +} + +static unsigned int ts4host(struct kbuffer *kbuf, + unsigned int type_len_ts) +{ + if (kbuf->flags & KBUFFER_FL_BIG_ENDIAN) + return type_len_ts & ((1 << 27) - 1); + else + return type_len_ts >> 5; +} + +/* + * Linux 2.6.30 and earlier (not much ealier) had a different + * ring buffer format. It should be obsolete, but we handle it anyway. + */ +enum old_ring_buffer_type { + OLD_RINGBUF_TYPE_PADDING, + OLD_RINGBUF_TYPE_TIME_EXTEND, + OLD_RINGBUF_TYPE_TIME_STAMP, + OLD_RINGBUF_TYPE_DATA, +}; + +static unsigned int old_update_pointers(struct kbuffer *kbuf) +{ + unsigned long long extend; + unsigned int type_len_ts; + unsigned int type; + unsigned int len; + unsigned int delta; + unsigned int length; + void *ptr = kbuf->data + kbuf->curr; + + type_len_ts = read_4(kbuf, ptr); + ptr += 4; + + type = type4host(kbuf, type_len_ts); + len = len4host(kbuf, type_len_ts); + delta = ts4host(kbuf, type_len_ts); + + switch (type) { + case OLD_RINGBUF_TYPE_PADDING: + kbuf->next = kbuf->size; + return 0; + + case OLD_RINGBUF_TYPE_TIME_EXTEND: + extend = read_4(kbuf, ptr); + extend <<= TS_SHIFT; + extend += delta; + delta = extend; + ptr += 4; + break; + + case OLD_RINGBUF_TYPE_TIME_STAMP: + /* should never happen! */ + kbuf->curr = kbuf->size; + kbuf->next = kbuf->size; + kbuf->index = kbuf->size; + return -1; + default: + if (len) + length = len * 4; + else { + length = read_4(kbuf, ptr); + length -= 4; + ptr += 4; + } + break; + } + + kbuf->timestamp += delta; + kbuf->index = calc_index(kbuf, ptr); + kbuf->next = kbuf->index + length; + + return type; +} + +static int __old_next_event(struct kbuffer *kbuf) +{ + int type; + + do { + kbuf->curr = kbuf->next; + if (kbuf->next >= kbuf->size) + return -1; + type = old_update_pointers(kbuf); + } while (type == OLD_RINGBUF_TYPE_TIME_EXTEND || type == OLD_RINGBUF_TYPE_PADDING); + + return 0; +} + +static unsigned int +translate_data(struct kbuffer *kbuf, void *data, void **rptr, + unsigned long long *delta, int *length) +{ + unsigned long long extend; + unsigned int type_len_ts; + unsigned int type_len; + + type_len_ts = read_4(kbuf, data); + data += 4; + + type_len = type_len4host(kbuf, type_len_ts); + *delta = ts4host(kbuf, type_len_ts); + + switch (type_len) { + case KBUFFER_TYPE_PADDING: + *length = read_4(kbuf, data); + data += *length; + break; + + case KBUFFER_TYPE_TIME_EXTEND: + extend = read_4(kbuf, data); + data += 4; + extend <<= TS_SHIFT; + extend += *delta; + *delta = extend; + *length = 0; + break; + + case KBUFFER_TYPE_TIME_STAMP: + data += 12; + *length = 0; + break; + case 0: + *length = read_4(kbuf, data) - 4; + *length = (*length + 3) & ~3; + data += 4; + break; + default: + *length = type_len * 4; + break; + } + + *rptr = data; + + return type_len; +} + +static unsigned int update_pointers(struct kbuffer *kbuf) +{ + unsigned long long delta; + unsigned int type_len; + int length; + void *ptr = kbuf->data + kbuf->curr; + + type_len = translate_data(kbuf, ptr, &ptr, &delta, &length); + + kbuf->timestamp += delta; + kbuf->index = calc_index(kbuf, ptr); + kbuf->next = kbuf->index + length; + + return type_len; +} + +/** + * kbuffer_translate_data - read raw data to get a record + * @swap: Set to 1 if bytes in words need to be swapped when read + * @data: The raw data to read + * @size: Address to store the size of the event data. + * + * Returns a pointer to the event data. To determine the entire + * record size (record metadata + data) just add the difference between + * @data and the returned value to @size. + */ +void *kbuffer_translate_data(int swap, void *data, unsigned int *size) +{ + unsigned long long delta; + struct kbuffer kbuf; + int type_len; + int length; + void *ptr; + + if (swap) { + kbuf.read_8 = __read_8_sw; + kbuf.read_4 = __read_4_sw; + kbuf.flags = host_is_bigendian() ? 0 : KBUFFER_FL_BIG_ENDIAN; + } else { + kbuf.read_8 = __read_8; + kbuf.read_4 = __read_4; + kbuf.flags = host_is_bigendian() ? KBUFFER_FL_BIG_ENDIAN: 0; + } + + type_len = translate_data(&kbuf, data, &ptr, &delta, &length); + switch (type_len) { + case KBUFFER_TYPE_PADDING: + case KBUFFER_TYPE_TIME_EXTEND: + case KBUFFER_TYPE_TIME_STAMP: + return NULL; + }; + + *size = length; + + return ptr; +} + +static int __next_event(struct kbuffer *kbuf) +{ + int type; + + do { + kbuf->curr = kbuf->next; + if (kbuf->next >= kbuf->size) + return -1; + type = update_pointers(kbuf); + } while (type == KBUFFER_TYPE_TIME_EXTEND || type == KBUFFER_TYPE_PADDING); + + return 0; +} + +static int next_event(struct kbuffer *kbuf) +{ + return kbuf->next_event(kbuf); +} + +/** + * kbuffer_next_event - increment the current pointer + * @kbuf: The kbuffer to read + * @ts: Address to store the next record's timestamp (may be NULL to ignore) + * + * Increments the pointers into the subbuffer of the kbuffer to point to the + * next event so that the next kbuffer_read_event() will return a + * new event. + * + * Returns the data of the next event if a new event exists on the subbuffer, + * NULL otherwise. + */ +void *kbuffer_next_event(struct kbuffer *kbuf, unsigned long long *ts) +{ + int ret; + + if (!kbuf || !kbuf->subbuffer) + return NULL; + + ret = next_event(kbuf); + if (ret < 0) + return NULL; + + if (ts) + *ts = kbuf->timestamp; + + return kbuf->data + kbuf->index; +} + +/** + * kbuffer_load_subbuffer - load a new subbuffer into the kbuffer + * @kbuf: The kbuffer to load + * @subbuffer: The subbuffer to load into @kbuf. + * + * Load a new subbuffer (page) into @kbuf. This will reset all + * the pointers and update the @kbuf timestamp. The next read will + * return the first event on @subbuffer. + * + * Returns 0 on succes, -1 otherwise. + */ +int kbuffer_load_subbuffer(struct kbuffer *kbuf, void *subbuffer) +{ + unsigned long long flags; + void *ptr = subbuffer; + + if (!kbuf || !subbuffer) + return -1; + + kbuf->subbuffer = subbuffer; + + kbuf->timestamp = read_8(kbuf, ptr); + ptr += 8; + + kbuf->curr = 0; + + if (kbuf->flags & KBUFFER_FL_LONG_8) + kbuf->start = 16; + else + kbuf->start = 12; + + kbuf->data = subbuffer + kbuf->start; + + flags = read_long(kbuf, ptr); + kbuf->size = (unsigned int)flags & COMMIT_MASK; + + if (flags & MISSING_EVENTS) { + if (flags & MISSING_STORED) { + ptr = kbuf->data + kbuf->size; + kbuf->lost_events = read_long(kbuf, ptr); + } else + kbuf->lost_events = -1; + } else + kbuf->lost_events = 0; + + kbuf->index = 0; + kbuf->next = 0; + + next_event(kbuf); + + return 0; +} + +/** + * kbuffer_read_event - read the next event in the kbuffer subbuffer + * @kbuf: The kbuffer to read from + * @ts: The address to store the timestamp of the event (may be NULL to ignore) + * + * Returns a pointer to the data part of the current event. + * NULL if no event is left on the subbuffer. + */ +void *kbuffer_read_event(struct kbuffer *kbuf, unsigned long long *ts) +{ + if (!kbuf || !kbuf->subbuffer) + return NULL; + + if (kbuf->curr >= kbuf->size) + return NULL; + + if (ts) + *ts = kbuf->timestamp; + return kbuf->data + kbuf->index; +} + +/** + * kbuffer_timestamp - Return the timestamp of the current event + * @kbuf: The kbuffer to read from + * + * Returns the timestamp of the current (next) event. + */ +unsigned long long kbuffer_timestamp(struct kbuffer *kbuf) +{ + return kbuf->timestamp; +} + +/** + * kbuffer_read_at_offset - read the event that is at offset + * @kbuf: The kbuffer to read from + * @offset: The offset into the subbuffer + * @ts: The address to store the timestamp of the event (may be NULL to ignore) + * + * The @offset must be an index from the @kbuf subbuffer beginning. + * If @offset is bigger than the stored subbuffer, NULL will be returned. + * + * Returns the data of the record that is at @offset. Note, @offset does + * not need to be the start of the record, the offset just needs to be + * in the record (or beginning of it). + * + * Note, the kbuf timestamp and pointers are updated to the + * returned record. That is, kbuffer_read_event() will return the same + * data and timestamp, and kbuffer_next_event() will increment from + * this record. + */ +void *kbuffer_read_at_offset(struct kbuffer *kbuf, int offset, + unsigned long long *ts) +{ + void *data; + + if (offset < kbuf->start) + offset = 0; + else + offset -= kbuf->start; + + /* Reset the buffer */ + kbuffer_load_subbuffer(kbuf, kbuf->subbuffer); + + while (kbuf->curr < offset) { + data = kbuffer_next_event(kbuf, ts); + if (!data) + break; + } + + return data; +} + +/** + * kbuffer_subbuffer_size - the size of the loaded subbuffer + * @kbuf: The kbuffer to read from + * + * Returns the size of the subbuffer. Note, this size is + * where the last event resides. The stored subbuffer may actually be + * bigger due to padding and such. + */ +int kbuffer_subbuffer_size(struct kbuffer *kbuf) +{ + return kbuf->size; +} + +/** + * kbuffer_curr_index - Return the index of the record + * @kbuf: The kbuffer to read from + * + * Returns the index from the start of the data part of + * the subbuffer to the current location. Note this is not + * from the start of the subbuffer. An index of zero will + * point to the first record. Use kbuffer_curr_offset() for + * the actually offset (that can be used by kbuffer_read_at_offset()) + */ +int kbuffer_curr_index(struct kbuffer *kbuf) +{ + return kbuf->curr; +} + +/** + * kbuffer_curr_offset - Return the offset of the record + * @kbuf: The kbuffer to read from + * + * Returns the offset from the start of the subbuffer to the + * current location. + */ +int kbuffer_curr_offset(struct kbuffer *kbuf) +{ + return kbuf->curr + kbuf->start; +} + +/** + * kbuffer_event_size - return the size of the event data + * @kbuf: The kbuffer to read + * + * Returns the size of the event data (the payload not counting + * the meta data of the record) of the current event. + */ +int kbuffer_event_size(struct kbuffer *kbuf) +{ + return kbuf->next - kbuf->index; +} + +/** + * kbuffer_curr_size - return the size of the entire record + * @kbuf: The kbuffer to read + * + * Returns the size of the entire record (meta data and payload) + * of the current event. + */ +int kbuffer_curr_size(struct kbuffer *kbuf) +{ + return kbuf->next - kbuf->curr; +} + +/** + * kbuffer_missed_events - return the # of missed events from last event. + * @kbuf: The kbuffer to read from + * + * Returns the # of missed events (if recorded) before the current + * event. Note, only events on the beginning of a subbuffer can + * have missed events, all other events within the buffer will be + * zero. + */ +int kbuffer_missed_events(struct kbuffer *kbuf) +{ + /* Only the first event can have missed events */ + if (kbuf->curr) + return 0; + + return kbuf->lost_events; +} + +/** + * kbuffer_set_old_forma - set the kbuffer to use the old format parsing + * @kbuf: The kbuffer to set + * + * This is obsolete (or should be). The first kernels to use the + * new ring buffer had a slightly different ring buffer format + * (2.6.30 and earlier). It is still somewhat supported by kbuffer, + * but should not be counted on in the future. + */ +void kbuffer_set_old_format(struct kbuffer *kbuf) +{ + kbuf->flags |= KBUFFER_FL_OLD_FORMAT; + + kbuf->next_event = __old_next_event; +} diff --git a/tools/lib/traceevent/kbuffer.h b/tools/lib/traceevent/kbuffer.h new file mode 100644 index 00000000000..c831f64b17a --- /dev/null +++ b/tools/lib/traceevent/kbuffer.h @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2012 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License (not later!) + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#ifndef _KBUFFER_H +#define _KBUFFER_H + +#ifndef TS_SHIFT +#define TS_SHIFT 27 +#endif + +enum kbuffer_endian { + KBUFFER_ENDIAN_BIG, + KBUFFER_ENDIAN_LITTLE, +}; + +enum kbuffer_long_size { + KBUFFER_LSIZE_4, + KBUFFER_LSIZE_8, +}; + +enum { + KBUFFER_TYPE_PADDING = 29, + KBUFFER_TYPE_TIME_EXTEND = 30, + KBUFFER_TYPE_TIME_STAMP = 31, +}; + +struct kbuffer; + +struct kbuffer *kbuffer_alloc(enum kbuffer_long_size size, enum kbuffer_endian endian); +void kbuffer_free(struct kbuffer *kbuf); +int kbuffer_load_subbuffer(struct kbuffer *kbuf, void *subbuffer); +void *kbuffer_read_event(struct kbuffer *kbuf, unsigned long long *ts); +void *kbuffer_next_event(struct kbuffer *kbuf, unsigned long long *ts); +unsigned long long kbuffer_timestamp(struct kbuffer *kbuf); + +void *kbuffer_translate_data(int swap, void *data, unsigned int *size); + +void *kbuffer_read_at_offset(struct kbuffer *kbuf, int offset, unsigned long long *ts); + +int kbuffer_curr_index(struct kbuffer *kbuf); + +int kbuffer_curr_offset(struct kbuffer *kbuf); +int kbuffer_curr_size(struct kbuffer *kbuf); +int kbuffer_event_size(struct kbuffer *kbuf); +int kbuffer_missed_events(struct kbuffer *kbuf); +int kbuffer_subbuffer_size(struct kbuffer *kbuf); + +void kbuffer_set_old_format(struct kbuffer *kbuf); + +#endif /* _K_BUFFER_H */ diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c index 5ea4326ad11..b50234402fc 100644 --- a/tools/lib/traceevent/parse-filter.c +++ b/tools/lib/traceevent/parse-filter.c @@ -13,8 +13,7 @@ * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * License along with this program; if not, see <http://www.gnu.org/licenses> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ @@ -39,41 +38,31 @@ struct event_list { struct event_format *event; }; -#define MAX_ERR_STR_SIZE 256 - -static void show_error(char **error_str, const char *fmt, ...) +static void show_error(char *error_buf, const char *fmt, ...) { unsigned long long index; const char *input; - char *error; va_list ap; int len; int i; - if (!error_str) - return; - input = pevent_get_input_buf(); index = pevent_get_input_buf_ptr(); len = input ? strlen(input) : 0; - error = malloc_or_die(MAX_ERR_STR_SIZE + (len*2) + 3); - if (len) { - strcpy(error, input); - error[len] = '\n'; + strcpy(error_buf, input); + error_buf[len] = '\n'; for (i = 1; i < len && i < index; i++) - error[len+i] = ' '; - error[len + i] = '^'; - error[len + i + 1] = '\n'; + error_buf[len+i] = ' '; + error_buf[len + i] = '^'; + error_buf[len + i + 1] = '\n'; len += i+2; } va_start(ap, fmt); - vsnprintf(error + len, MAX_ERR_STR_SIZE, fmt, ap); + vsnprintf(error_buf + len, PEVENT_FILTER_ERROR_BUFSZ - len, fmt, ap); va_end(ap); - - *error_str = error; } static void free_token(char *token) @@ -96,7 +85,11 @@ static enum event_type read_token(char **tok) (strcmp(token, "=") == 0 || strcmp(token, "!") == 0) && pevent_peek_char() == '~') { /* append it */ - *tok = malloc_or_die(3); + *tok = malloc(3); + if (*tok == NULL) { + free_token(token); + return EVENT_ERROR; + } sprintf(*tok, "%c%c", *token, '~'); free_token(token); /* Now remove the '~' from the buffer */ @@ -148,11 +141,13 @@ add_filter_type(struct event_filter *filter, int id) if (filter_type) return filter_type; - filter->event_filters = realloc(filter->event_filters, - sizeof(*filter->event_filters) * - (filter->filters + 1)); - if (!filter->event_filters) - die("Could not allocate filter"); + filter_type = realloc(filter->event_filters, + sizeof(*filter->event_filters) * + (filter->filters + 1)); + if (!filter_type) + return NULL; + + filter->event_filters = filter_type; for (i = 0; i < filter->filters; i++) { if (filter->event_filters[i].event_id > id) @@ -183,7 +178,10 @@ struct event_filter *pevent_filter_alloc(struct pevent *pevent) { struct event_filter *filter; - filter = malloc_or_die(sizeof(*filter)); + filter = malloc(sizeof(*filter)); + if (filter == NULL) + return NULL; + memset(filter, 0, sizeof(*filter)); filter->pevent = pevent; pevent_ref(pevent); @@ -193,12 +191,7 @@ struct event_filter *pevent_filter_alloc(struct pevent *pevent) static struct filter_arg *allocate_arg(void) { - struct filter_arg *arg; - - arg = malloc_or_die(sizeof(*arg)); - memset(arg, 0, sizeof(*arg)); - - return arg; + return calloc(1, sizeof(struct filter_arg)); } static void free_arg(struct filter_arg *arg) @@ -243,15 +236,19 @@ static void free_arg(struct filter_arg *arg) free(arg); } -static void add_event(struct event_list **events, +static int add_event(struct event_list **events, struct event_format *event) { struct event_list *list; - list = malloc_or_die(sizeof(*list)); + list = malloc(sizeof(*list)); + if (list == NULL) + return -1; + list->next = *events; *events = list; list->event = event; + return 0; } static int event_match(struct event_format *event, @@ -266,7 +263,7 @@ static int event_match(struct event_format *event, !regexec(ereg, event->name, 0, NULL, 0); } -static int +static enum pevent_errno find_event(struct pevent *pevent, struct event_list **events, char *sys_name, char *event_name) { @@ -274,6 +271,7 @@ find_event(struct pevent *pevent, struct event_list **events, regex_t ereg; regex_t sreg; int match = 0; + int fail = 0; char *reg; int ret; int i; @@ -284,23 +282,31 @@ find_event(struct pevent *pevent, struct event_list **events, sys_name = NULL; } - reg = malloc_or_die(strlen(event_name) + 3); + reg = malloc(strlen(event_name) + 3); + if (reg == NULL) + return PEVENT_ERRNO__MEM_ALLOC_FAILED; + sprintf(reg, "^%s$", event_name); ret = regcomp(&ereg, reg, REG_ICASE|REG_NOSUB); free(reg); if (ret) - return -1; + return PEVENT_ERRNO__INVALID_EVENT_NAME; if (sys_name) { - reg = malloc_or_die(strlen(sys_name) + 3); + reg = malloc(strlen(sys_name) + 3); + if (reg == NULL) { + regfree(&ereg); + return PEVENT_ERRNO__MEM_ALLOC_FAILED; + } + sprintf(reg, "^%s$", sys_name); ret = regcomp(&sreg, reg, REG_ICASE|REG_NOSUB); free(reg); if (ret) { regfree(&ereg); - return -1; + return PEVENT_ERRNO__INVALID_EVENT_NAME; } } @@ -308,7 +314,10 @@ find_event(struct pevent *pevent, struct event_list **events, event = pevent->events[i]; if (event_match(event, sys_name ? &sreg : NULL, &ereg)) { match = 1; - add_event(events, event); + if (add_event(events, event) < 0) { + fail = 1; + break; + } } } @@ -317,7 +326,9 @@ find_event(struct pevent *pevent, struct event_list **events, regfree(&sreg); if (!match) - return -1; + return PEVENT_ERRNO__EVENT_NOT_FOUND; + if (fail) + return PEVENT_ERRNO__MEM_ALLOC_FAILED; return 0; } @@ -333,14 +344,18 @@ static void free_events(struct event_list *events) } } -static struct filter_arg * +static enum pevent_errno create_arg_item(struct event_format *event, const char *token, - enum event_type type, char **error_str) + enum event_type type, struct filter_arg **parg, char *error_str) { struct format_field *field; struct filter_arg *arg; arg = allocate_arg(); + if (arg == NULL) { + show_error(error_str, "failed to allocate filter arg"); + return PEVENT_ERRNO__MEM_ALLOC_FAILED; + } switch (type) { @@ -350,8 +365,11 @@ create_arg_item(struct event_format *event, const char *token, arg->value.type = type == EVENT_DQUOTE ? FILTER_STRING : FILTER_CHAR; arg->value.str = strdup(token); - if (!arg->value.str) - die("malloc string"); + if (!arg->value.str) { + free_arg(arg); + show_error(error_str, "failed to allocate string filter arg"); + return PEVENT_ERRNO__MEM_ALLOC_FAILED; + } break; case EVENT_ITEM: /* if it is a number, then convert it */ @@ -378,11 +396,11 @@ create_arg_item(struct event_format *event, const char *token, break; default: free_arg(arg); - show_error(error_str, "expected a value but found %s", - token); - return NULL; + show_error(error_str, "expected a value but found %s", token); + return PEVENT_ERRNO__UNEXPECTED_TYPE; } - return arg; + *parg = arg; + return 0; } static struct filter_arg * @@ -391,6 +409,9 @@ create_arg_op(enum filter_op_type btype) struct filter_arg *arg; arg = allocate_arg(); + if (!arg) + return NULL; + arg->type = FILTER_ARG_OP; arg->op.type = btype; @@ -403,6 +424,9 @@ create_arg_exp(enum filter_exp_type etype) struct filter_arg *arg; arg = allocate_arg(); + if (!arg) + return NULL; + arg->type = FILTER_ARG_EXP; arg->op.type = etype; @@ -415,6 +439,9 @@ create_arg_cmp(enum filter_exp_type etype) struct filter_arg *arg; arg = allocate_arg(); + if (!arg) + return NULL; + /* Use NUM and change if necessary */ arg->type = FILTER_ARG_NUM; arg->op.type = etype; @@ -422,8 +449,8 @@ create_arg_cmp(enum filter_exp_type etype) return arg; } -static int add_right(struct filter_arg *op, struct filter_arg *arg, - char **error_str) +static enum pevent_errno +add_right(struct filter_arg *op, struct filter_arg *arg, char *error_str) { struct filter_arg *left; char *str; @@ -454,9 +481,8 @@ static int add_right(struct filter_arg *op, struct filter_arg *arg, case FILTER_ARG_FIELD: break; default: - show_error(error_str, - "Illegal rvalue"); - return -1; + show_error(error_str, "Illegal rvalue"); + return PEVENT_ERRNO__ILLEGAL_RVALUE; } /* @@ -503,7 +529,7 @@ static int add_right(struct filter_arg *op, struct filter_arg *arg, if (left->type != FILTER_ARG_FIELD) { show_error(error_str, "Illegal lvalue for string comparison"); - return -1; + return PEVENT_ERRNO__ILLEGAL_LVALUE; } /* Make sure this is a valid string compare */ @@ -522,25 +548,31 @@ static int add_right(struct filter_arg *op, struct filter_arg *arg, show_error(error_str, "RegEx '%s' did not compute", str); - return -1; + return PEVENT_ERRNO__INVALID_REGEX; } break; default: show_error(error_str, "Illegal comparison for string"); - return -1; + return PEVENT_ERRNO__ILLEGAL_STRING_CMP; } op->type = FILTER_ARG_STR; op->str.type = op_type; op->str.field = left->field.field; op->str.val = strdup(str); - if (!op->str.val) - die("malloc string"); + if (!op->str.val) { + show_error(error_str, "Failed to allocate string filter"); + return PEVENT_ERRNO__MEM_ALLOC_FAILED; + } /* * Need a buffer to copy data for tests */ - op->str.buffer = malloc_or_die(op->str.field->size + 1); + op->str.buffer = malloc(op->str.field->size + 1); + if (!op->str.buffer) { + show_error(error_str, "Failed to allocate string filter"); + return PEVENT_ERRNO__MEM_ALLOC_FAILED; + } /* Null terminate this buffer */ op->str.buffer[op->str.field->size] = 0; @@ -558,7 +590,7 @@ static int add_right(struct filter_arg *op, struct filter_arg *arg, case FILTER_CMP_NOT_REGEX: show_error(error_str, "Op not allowed with integers"); - return -1; + return PEVENT_ERRNO__ILLEGAL_INTEGER_CMP; default: break; @@ -578,9 +610,8 @@ static int add_right(struct filter_arg *op, struct filter_arg *arg, return 0; out_fail: - show_error(error_str, - "Syntax error"); - return -1; + show_error(error_str, "Syntax error"); + return PEVENT_ERRNO__SYNTAX_ERROR; } static struct filter_arg * @@ -593,7 +624,7 @@ rotate_op_right(struct filter_arg *a, struct filter_arg *b) return arg; } -static int add_left(struct filter_arg *op, struct filter_arg *arg) +static enum pevent_errno add_left(struct filter_arg *op, struct filter_arg *arg) { switch (op->type) { case FILTER_ARG_EXP: @@ -612,11 +643,11 @@ static int add_left(struct filter_arg *op, struct filter_arg *arg) /* left arg of compares must be a field */ if (arg->type != FILTER_ARG_FIELD && arg->type != FILTER_ARG_BOOLEAN) - return -1; + return PEVENT_ERRNO__INVALID_ARG_TYPE; op->num.left = arg; break; default: - return -1; + return PEVENT_ERRNO__INVALID_ARG_TYPE; } return 0; } @@ -729,15 +760,18 @@ enum filter_vals { FILTER_VAL_TRUE, }; -void reparent_op_arg(struct filter_arg *parent, struct filter_arg *old_child, - struct filter_arg *arg) +static enum pevent_errno +reparent_op_arg(struct filter_arg *parent, struct filter_arg *old_child, + struct filter_arg *arg, char *error_str) { struct filter_arg *other_child; struct filter_arg **ptr; if (parent->type != FILTER_ARG_OP && - arg->type != FILTER_ARG_OP) - die("can not reparent other than OP"); + arg->type != FILTER_ARG_OP) { + show_error(error_str, "can not reparent other than OP"); + return PEVENT_ERRNO__REPARENT_NOT_OP; + } /* Get the sibling */ if (old_child->op.right == arg) { @@ -746,8 +780,10 @@ void reparent_op_arg(struct filter_arg *parent, struct filter_arg *old_child, } else if (old_child->op.left == arg) { ptr = &old_child->op.left; other_child = old_child->op.right; - } else - die("Error in reparent op, find other child"); + } else { + show_error(error_str, "Error in reparent op, find other child"); + return PEVENT_ERRNO__REPARENT_FAILED; + } /* Detach arg from old_child */ *ptr = NULL; @@ -758,23 +794,29 @@ void reparent_op_arg(struct filter_arg *parent, struct filter_arg *old_child, *parent = *arg; /* Free arg without recussion */ free(arg); - return; + return 0; } if (parent->op.right == old_child) ptr = &parent->op.right; else if (parent->op.left == old_child) ptr = &parent->op.left; - else - die("Error in reparent op"); + else { + show_error(error_str, "Error in reparent op"); + return PEVENT_ERRNO__REPARENT_FAILED; + } + *ptr = arg; free_arg(old_child); + return 0; } -enum filter_vals test_arg(struct filter_arg *parent, struct filter_arg *arg) +/* Returns either filter_vals (success) or pevent_errno (failfure) */ +static int test_arg(struct filter_arg *parent, struct filter_arg *arg, + char *error_str) { - enum filter_vals lval, rval; + int lval, rval; switch (arg->type) { @@ -789,63 +831,68 @@ enum filter_vals test_arg(struct filter_arg *parent, struct filter_arg *arg) return FILTER_VAL_NORM; case FILTER_ARG_EXP: - lval = test_arg(arg, arg->exp.left); + lval = test_arg(arg, arg->exp.left, error_str); if (lval != FILTER_VAL_NORM) return lval; - rval = test_arg(arg, arg->exp.right); + rval = test_arg(arg, arg->exp.right, error_str); if (rval != FILTER_VAL_NORM) return rval; return FILTER_VAL_NORM; case FILTER_ARG_NUM: - lval = test_arg(arg, arg->num.left); + lval = test_arg(arg, arg->num.left, error_str); if (lval != FILTER_VAL_NORM) return lval; - rval = test_arg(arg, arg->num.right); + rval = test_arg(arg, arg->num.right, error_str); if (rval != FILTER_VAL_NORM) return rval; return FILTER_VAL_NORM; case FILTER_ARG_OP: if (arg->op.type != FILTER_OP_NOT) { - lval = test_arg(arg, arg->op.left); + lval = test_arg(arg, arg->op.left, error_str); switch (lval) { case FILTER_VAL_NORM: break; case FILTER_VAL_TRUE: if (arg->op.type == FILTER_OP_OR) return FILTER_VAL_TRUE; - rval = test_arg(arg, arg->op.right); + rval = test_arg(arg, arg->op.right, error_str); if (rval != FILTER_VAL_NORM) return rval; - reparent_op_arg(parent, arg, arg->op.right); - return FILTER_VAL_NORM; + return reparent_op_arg(parent, arg, arg->op.right, + error_str); case FILTER_VAL_FALSE: if (arg->op.type == FILTER_OP_AND) return FILTER_VAL_FALSE; - rval = test_arg(arg, arg->op.right); + rval = test_arg(arg, arg->op.right, error_str); if (rval != FILTER_VAL_NORM) return rval; - reparent_op_arg(parent, arg, arg->op.right); - return FILTER_VAL_NORM; + return reparent_op_arg(parent, arg, arg->op.right, + error_str); + + default: + return lval; } } - rval = test_arg(arg, arg->op.right); + rval = test_arg(arg, arg->op.right, error_str); switch (rval) { case FILTER_VAL_NORM: + default: break; + case FILTER_VAL_TRUE: if (arg->op.type == FILTER_OP_OR) return FILTER_VAL_TRUE; if (arg->op.type == FILTER_OP_NOT) return FILTER_VAL_FALSE; - reparent_op_arg(parent, arg, arg->op.left); - return FILTER_VAL_NORM; + return reparent_op_arg(parent, arg, arg->op.left, + error_str); case FILTER_VAL_FALSE: if (arg->op.type == FILTER_OP_AND) @@ -853,41 +900,56 @@ enum filter_vals test_arg(struct filter_arg *parent, struct filter_arg *arg) if (arg->op.type == FILTER_OP_NOT) return FILTER_VAL_TRUE; - reparent_op_arg(parent, arg, arg->op.left); - return FILTER_VAL_NORM; + return reparent_op_arg(parent, arg, arg->op.left, + error_str); } - return FILTER_VAL_NORM; + return rval; default: - die("bad arg in filter tree"); + show_error(error_str, "bad arg in filter tree"); + return PEVENT_ERRNO__BAD_FILTER_ARG; } return FILTER_VAL_NORM; } /* Remove any unknown event fields */ -static struct filter_arg *collapse_tree(struct filter_arg *arg) +static int collapse_tree(struct filter_arg *arg, + struct filter_arg **arg_collapsed, char *error_str) { - enum filter_vals ret; + int ret; - ret = test_arg(arg, arg); + ret = test_arg(arg, arg, error_str); switch (ret) { case FILTER_VAL_NORM: - return arg; + break; case FILTER_VAL_TRUE: case FILTER_VAL_FALSE: free_arg(arg); arg = allocate_arg(); - arg->type = FILTER_ARG_BOOLEAN; - arg->boolean.value = ret == FILTER_VAL_TRUE; + if (arg) { + arg->type = FILTER_ARG_BOOLEAN; + arg->boolean.value = ret == FILTER_VAL_TRUE; + } else { + show_error(error_str, "Failed to allocate filter arg"); + ret = PEVENT_ERRNO__MEM_ALLOC_FAILED; + } + break; + + default: + /* test_arg() already set the error_str */ + free_arg(arg); + arg = NULL; + break; } - return arg; + *arg_collapsed = arg; + return ret; } -static int +static enum pevent_errno process_filter(struct event_format *event, struct filter_arg **parg, - char **error_str, int not) + char *error_str, int not) { enum event_type type; char *token = NULL; @@ -899,7 +961,7 @@ process_filter(struct event_format *event, struct filter_arg **parg, enum filter_op_type btype; enum filter_exp_type etype; enum filter_cmp_type ctype; - int ret; + enum pevent_errno ret; *parg = NULL; @@ -910,8 +972,8 @@ process_filter(struct event_format *event, struct filter_arg **parg, case EVENT_SQUOTE: case EVENT_DQUOTE: case EVENT_ITEM: - arg = create_arg_item(event, token, type, error_str); - if (!arg) + ret = create_arg_item(event, token, type, &arg, error_str); + if (ret < 0) goto fail; if (!left_item) left_item = arg; @@ -924,20 +986,20 @@ process_filter(struct event_format *event, struct filter_arg **parg, if (not) { arg = NULL; if (current_op) - goto fail_print; + goto fail_syntax; free(token); *parg = current_exp; return 0; } } else - goto fail_print; + goto fail_syntax; arg = NULL; break; case EVENT_DELIM: if (*token == ',') { - show_error(error_str, - "Illegal token ','"); + show_error(error_str, "Illegal token ','"); + ret = PEVENT_ERRNO__ILLEGAL_TOKEN; goto fail; } @@ -945,19 +1007,23 @@ process_filter(struct event_format *event, struct filter_arg **parg, if (left_item) { show_error(error_str, "Open paren can not come after item"); + ret = PEVENT_ERRNO__INVALID_PAREN; goto fail; } if (current_exp) { show_error(error_str, "Open paren can not come after expression"); + ret = PEVENT_ERRNO__INVALID_PAREN; goto fail; } ret = process_filter(event, &arg, error_str, 0); - if (ret != 1) { - if (ret == 0) + if (ret != PEVENT_ERRNO__UNBALANCED_PAREN) { + if (ret == 0) { show_error(error_str, "Unbalanced number of '('"); + ret = PEVENT_ERRNO__UNBALANCED_PAREN; + } goto fail; } ret = 0; @@ -965,7 +1031,7 @@ process_filter(struct event_format *event, struct filter_arg **parg, /* A not wants just one expression */ if (not) { if (current_op) - goto fail_print; + goto fail_syntax; *parg = arg; return 0; } @@ -980,19 +1046,19 @@ process_filter(struct event_format *event, struct filter_arg **parg, } else { /* ')' */ if (!current_op && !current_exp) - goto fail_print; + goto fail_syntax; /* Make sure everything is finished at this level */ if (current_exp && !check_op_done(current_exp)) - goto fail_print; + goto fail_syntax; if (current_op && !check_op_done(current_op)) - goto fail_print; + goto fail_syntax; if (current_op) *parg = current_op; else *parg = current_exp; - return 1; + return PEVENT_ERRNO__UNBALANCED_PAREN; } break; @@ -1004,21 +1070,22 @@ process_filter(struct event_format *event, struct filter_arg **parg, case OP_BOOL: /* Logic ops need a left expression */ if (!current_exp && !current_op) - goto fail_print; + goto fail_syntax; /* fall through */ case OP_NOT: /* logic only processes ops and exp */ if (left_item) - goto fail_print; + goto fail_syntax; break; case OP_EXP: case OP_CMP: if (!left_item) - goto fail_print; + goto fail_syntax; break; case OP_NONE: show_error(error_str, "Unknown op token %s", token); + ret = PEVENT_ERRNO__UNKNOWN_TOKEN; goto fail; } @@ -1026,6 +1093,8 @@ process_filter(struct event_format *event, struct filter_arg **parg, switch (op_type) { case OP_BOOL: arg = create_arg_op(btype); + if (arg == NULL) + goto fail_alloc; if (current_op) ret = add_left(arg, current_op); else @@ -1036,6 +1105,8 @@ process_filter(struct event_format *event, struct filter_arg **parg, case OP_NOT: arg = create_arg_op(btype); + if (arg == NULL) + goto fail_alloc; if (current_op) ret = add_right(current_op, arg, error_str); if (ret < 0) @@ -1055,6 +1126,8 @@ process_filter(struct event_format *event, struct filter_arg **parg, arg = create_arg_exp(etype); else arg = create_arg_cmp(ctype); + if (arg == NULL) + goto fail_alloc; if (current_op) ret = add_right(current_op, arg, error_str); @@ -1063,7 +1136,7 @@ process_filter(struct event_format *event, struct filter_arg **parg, ret = add_left(arg, left_item); if (ret < 0) { arg = NULL; - goto fail_print; + goto fail_syntax; } current_exp = arg; break; @@ -1072,57 +1145,64 @@ process_filter(struct event_format *event, struct filter_arg **parg, } arg = NULL; if (ret < 0) - goto fail_print; + goto fail_syntax; break; case EVENT_NONE: break; + case EVENT_ERROR: + goto fail_alloc; default: - goto fail_print; + goto fail_syntax; } } while (type != EVENT_NONE); if (!current_op && !current_exp) - goto fail_print; + goto fail_syntax; if (!current_op) current_op = current_exp; - current_op = collapse_tree(current_op); + ret = collapse_tree(current_op, parg, error_str); + if (ret < 0) + goto fail; *parg = current_op; return 0; - fail_print: + fail_alloc: + show_error(error_str, "failed to allocate filter arg"); + ret = PEVENT_ERRNO__MEM_ALLOC_FAILED; + goto fail; + fail_syntax: show_error(error_str, "Syntax error"); + ret = PEVENT_ERRNO__SYNTAX_ERROR; fail: free_arg(current_op); free_arg(current_exp); free_arg(arg); free(token); - return -1; + return ret; } -static int +static enum pevent_errno process_event(struct event_format *event, const char *filter_str, - struct filter_arg **parg, char **error_str) + struct filter_arg **parg, char *error_str) { int ret; pevent_buffer_init(filter_str, strlen(filter_str)); ret = process_filter(event, parg, error_str, 0); - if (ret == 1) { - show_error(error_str, - "Unbalanced number of ')'"); - return -1; - } if (ret < 0) return ret; /* If parg is NULL, then make it into FALSE */ if (!*parg) { *parg = allocate_arg(); + if (*parg == NULL) + return PEVENT_ERRNO__MEM_ALLOC_FAILED; + (*parg)->type = FILTER_ARG_BOOLEAN; (*parg)->boolean.value = FILTER_FALSE; } @@ -1130,13 +1210,13 @@ process_event(struct event_format *event, const char *filter_str, return 0; } -static int filter_event(struct event_filter *filter, - struct event_format *event, - const char *filter_str, char **error_str) +static enum pevent_errno +filter_event(struct event_filter *filter, struct event_format *event, + const char *filter_str, char *error_str) { struct filter_type *filter_type; struct filter_arg *arg; - int ret; + enum pevent_errno ret; if (filter_str) { ret = process_event(event, filter_str, &arg, error_str); @@ -1146,11 +1226,17 @@ static int filter_event(struct event_filter *filter, } else { /* just add a TRUE arg */ arg = allocate_arg(); + if (arg == NULL) + return PEVENT_ERRNO__MEM_ALLOC_FAILED; + arg->type = FILTER_ARG_BOOLEAN; arg->boolean.value = FILTER_TRUE; } filter_type = add_filter_type(filter, event->id); + if (filter_type == NULL) + return PEVENT_ERRNO__MEM_ALLOC_FAILED; + if (filter_type->filter) free_arg(filter_type->filter); filter_type->filter = arg; @@ -1158,22 +1244,24 @@ static int filter_event(struct event_filter *filter, return 0; } +static void filter_init_error_buf(struct event_filter *filter) +{ + /* clear buffer to reset show error */ + pevent_buffer_init("", 0); + filter->error_buffer[0] = '\0'; +} + /** * pevent_filter_add_filter_str - add a new filter * @filter: the event filter to add to * @filter_str: the filter string that contains the filter - * @error_str: string containing reason for failed filter - * - * Returns 0 if the filter was successfully added - * -1 if there was an error. * - * On error, if @error_str points to a string pointer, - * it is set to the reason that the filter failed. - * This string must be freed with "free". + * Returns 0 if the filter was successfully added or a + * negative error code. Use pevent_filter_strerror() to see + * actual error message in case of error. */ -int pevent_filter_add_filter_str(struct event_filter *filter, - const char *filter_str, - char **error_str) +enum pevent_errno pevent_filter_add_filter_str(struct event_filter *filter, + const char *filter_str) { struct pevent *pevent = filter->pevent; struct event_list *event; @@ -1184,15 +1272,11 @@ int pevent_filter_add_filter_str(struct event_filter *filter, char *event_name = NULL; char *sys_name = NULL; char *sp; - int rtn = 0; + enum pevent_errno rtn = 0; /* PEVENT_ERRNO__SUCCESS */ int len; int ret; - /* clear buffer to reset show error */ - pevent_buffer_init("", 0); - - if (error_str) - *error_str = NULL; + filter_init_error_buf(filter); filter_start = strchr(filter_str, ':'); if (filter_start) @@ -1200,7 +1284,6 @@ int pevent_filter_add_filter_str(struct event_filter *filter, else len = strlen(filter_str); - do { next_event = strchr(filter_str, ','); if (next_event && @@ -1211,7 +1294,12 @@ int pevent_filter_add_filter_str(struct event_filter *filter, else len = strlen(filter_str); - this_event = malloc_or_die(len + 1); + this_event = malloc(len + 1); + if (this_event == NULL) { + /* This can only happen when events is NULL, but still */ + free_events(events); + return PEVENT_ERRNO__MEM_ALLOC_FAILED; + } memcpy(this_event, filter_str, len); this_event[len] = 0; @@ -1224,27 +1312,18 @@ int pevent_filter_add_filter_str(struct event_filter *filter, event_name = strtok_r(NULL, "/", &sp); if (!sys_name) { - show_error(error_str, "No filter found"); /* This can only happen when events is NULL, but still */ free_events(events); free(this_event); - return -1; + return PEVENT_ERRNO__FILTER_NOT_FOUND; } /* Find this event */ ret = find_event(pevent, &events, strim(sys_name), strim(event_name)); if (ret < 0) { - if (event_name) - show_error(error_str, - "No event found under '%s.%s'", - sys_name, event_name); - else - show_error(error_str, - "No event found under '%s'", - sys_name); free_events(events); free(this_event); - return -1; + return ret; } free(this_event); } while (filter_str); @@ -1256,7 +1335,7 @@ int pevent_filter_add_filter_str(struct event_filter *filter, /* filter starts here */ for (event = events; event; event = event->next) { ret = filter_event(filter, event->event, filter_start, - error_str); + filter->error_buffer); /* Failures are returned if a parse error happened */ if (ret < 0) rtn = ret; @@ -1264,8 +1343,10 @@ int pevent_filter_add_filter_str(struct event_filter *filter, if (ret >= 0 && pevent->test_filters) { char *test; test = pevent_filter_make_string(filter, event->event->id); - printf(" '%s: %s'\n", event->event->name, test); - free(test); + if (test) { + printf(" '%s: %s'\n", event->event->name, test); + free(test); + } } } @@ -1283,6 +1364,32 @@ static void free_filter_type(struct filter_type *filter_type) } /** + * pevent_filter_strerror - fill error message in a buffer + * @filter: the event filter contains error + * @err: the error code + * @buf: the buffer to be filled in + * @buflen: the size of the buffer + * + * Returns 0 if message was filled successfully, -1 if error + */ +int pevent_filter_strerror(struct event_filter *filter, enum pevent_errno err, + char *buf, size_t buflen) +{ + if (err <= __PEVENT_ERRNO__START || err >= __PEVENT_ERRNO__END) + return -1; + + if (strlen(filter->error_buffer) > 0) { + size_t len = snprintf(buf, buflen, "%s", filter->error_buffer); + + if (len > buflen) + return -1; + return 0; + } + + return pevent_strerror(filter->pevent, err, buf, buflen); +} + +/** * pevent_filter_remove_event - remove a filter for an event * @filter: the event filter to remove from * @event_id: the event to remove a filter for @@ -1375,6 +1482,9 @@ static int copy_filter_type(struct event_filter *filter, if (strcmp(str, "TRUE") == 0 || strcmp(str, "FALSE") == 0) { /* Add trivial event */ arg = allocate_arg(); + if (arg == NULL) + return -1; + arg->type = FILTER_ARG_BOOLEAN; if (strcmp(str, "TRUE") == 0) arg->boolean.value = 1; @@ -1382,6 +1492,9 @@ static int copy_filter_type(struct event_filter *filter, arg->boolean.value = 0; filter_type = add_filter_type(filter, event->id); + if (filter_type == NULL) + return -1; + filter_type->filter = arg; free(str); @@ -1483,8 +1596,10 @@ int pevent_update_trivial(struct event_filter *dest, struct event_filter *source * @type: remove only true, false, or both * * Removes filters that only contain a TRUE or FALES boolean arg. + * + * Returns 0 on success and -1 if there was a problem. */ -void pevent_filter_clear_trivial(struct event_filter *filter, +int pevent_filter_clear_trivial(struct event_filter *filter, enum filter_trivial_type type) { struct filter_type *filter_type; @@ -1493,13 +1608,15 @@ void pevent_filter_clear_trivial(struct event_filter *filter, int i; if (!filter->filters) - return; + return 0; /* * Two steps, first get all ids with trivial filters. * then remove those ids. */ for (i = 0; i < filter->filters; i++) { + int *new_ids; + filter_type = &filter->event_filters[i]; if (filter_type->filter->type != FILTER_ARG_BOOLEAN) continue; @@ -1514,19 +1631,24 @@ void pevent_filter_clear_trivial(struct event_filter *filter, break; } - ids = realloc(ids, sizeof(*ids) * (count + 1)); - if (!ids) - die("Can't allocate ids"); + new_ids = realloc(ids, sizeof(*ids) * (count + 1)); + if (!new_ids) { + free(ids); + return -1; + } + + ids = new_ids; ids[count++] = filter_type->event_id; } if (!count) - return; + return 0; for (i = 0; i < count; i++) pevent_filter_remove_event(filter, ids[i]); free(ids); + return 0; } /** @@ -1566,8 +1688,8 @@ int pevent_filter_event_has_trivial(struct event_filter *filter, } } -static int test_filter(struct event_format *event, - struct filter_arg *arg, struct pevent_record *record); +static int test_filter(struct event_format *event, struct filter_arg *arg, + struct pevent_record *record, enum pevent_errno *err); static const char * get_comm(struct event_format *event, struct pevent_record *record) @@ -1613,15 +1735,24 @@ get_value(struct event_format *event, } static unsigned long long -get_arg_value(struct event_format *event, struct filter_arg *arg, struct pevent_record *record); +get_arg_value(struct event_format *event, struct filter_arg *arg, + struct pevent_record *record, enum pevent_errno *err); static unsigned long long -get_exp_value(struct event_format *event, struct filter_arg *arg, struct pevent_record *record) +get_exp_value(struct event_format *event, struct filter_arg *arg, + struct pevent_record *record, enum pevent_errno *err) { unsigned long long lval, rval; - lval = get_arg_value(event, arg->exp.left, record); - rval = get_arg_value(event, arg->exp.right, record); + lval = get_arg_value(event, arg->exp.left, record, err); + rval = get_arg_value(event, arg->exp.right, record, err); + + if (*err) { + /* + * There was an error, no need to process anymore. + */ + return 0; + } switch (arg->exp.type) { case FILTER_EXP_ADD: @@ -1656,39 +1787,51 @@ get_exp_value(struct event_format *event, struct filter_arg *arg, struct pevent_ case FILTER_EXP_NOT: default: - die("error in exp"); + if (!*err) + *err = PEVENT_ERRNO__INVALID_EXP_TYPE; } return 0; } static unsigned long long -get_arg_value(struct event_format *event, struct filter_arg *arg, struct pevent_record *record) +get_arg_value(struct event_format *event, struct filter_arg *arg, + struct pevent_record *record, enum pevent_errno *err) { switch (arg->type) { case FILTER_ARG_FIELD: return get_value(event, arg->field.field, record); case FILTER_ARG_VALUE: - if (arg->value.type != FILTER_NUMBER) - die("must have number field!"); + if (arg->value.type != FILTER_NUMBER) { + if (!*err) + *err = PEVENT_ERRNO__NOT_A_NUMBER; + } return arg->value.val; case FILTER_ARG_EXP: - return get_exp_value(event, arg, record); + return get_exp_value(event, arg, record, err); default: - die("oops in filter"); + if (!*err) + *err = PEVENT_ERRNO__INVALID_ARG_TYPE; } return 0; } -static int test_num(struct event_format *event, - struct filter_arg *arg, struct pevent_record *record) +static int test_num(struct event_format *event, struct filter_arg *arg, + struct pevent_record *record, enum pevent_errno *err) { unsigned long long lval, rval; - lval = get_arg_value(event, arg->num.left, record); - rval = get_arg_value(event, arg->num.right, record); + lval = get_arg_value(event, arg->num.left, record, err); + rval = get_arg_value(event, arg->num.right, record, err); + + if (*err) { + /* + * There was an error, no need to process anymore. + */ + return 0; + } switch (arg->num.type) { case FILTER_CMP_EQ: @@ -1710,7 +1853,8 @@ static int test_num(struct event_format *event, return lval <= rval; default: - /* ?? */ + if (!*err) + *err = PEVENT_ERRNO__ILLEGAL_INTEGER_CMP; return 0; } } @@ -1757,8 +1901,8 @@ static const char *get_field_str(struct filter_arg *arg, struct pevent_record *r return val; } -static int test_str(struct event_format *event, - struct filter_arg *arg, struct pevent_record *record) +static int test_str(struct event_format *event, struct filter_arg *arg, + struct pevent_record *record, enum pevent_errno *err) { const char *val; @@ -1782,48 +1926,57 @@ static int test_str(struct event_format *event, return regexec(&arg->str.reg, val, 0, NULL, 0); default: - /* ?? */ + if (!*err) + *err = PEVENT_ERRNO__ILLEGAL_STRING_CMP; return 0; } } -static int test_op(struct event_format *event, - struct filter_arg *arg, struct pevent_record *record) +static int test_op(struct event_format *event, struct filter_arg *arg, + struct pevent_record *record, enum pevent_errno *err) { switch (arg->op.type) { case FILTER_OP_AND: - return test_filter(event, arg->op.left, record) && - test_filter(event, arg->op.right, record); + return test_filter(event, arg->op.left, record, err) && + test_filter(event, arg->op.right, record, err); case FILTER_OP_OR: - return test_filter(event, arg->op.left, record) || - test_filter(event, arg->op.right, record); + return test_filter(event, arg->op.left, record, err) || + test_filter(event, arg->op.right, record, err); case FILTER_OP_NOT: - return !test_filter(event, arg->op.right, record); + return !test_filter(event, arg->op.right, record, err); default: - /* ?? */ + if (!*err) + *err = PEVENT_ERRNO__INVALID_OP_TYPE; return 0; } } -static int test_filter(struct event_format *event, - struct filter_arg *arg, struct pevent_record *record) +static int test_filter(struct event_format *event, struct filter_arg *arg, + struct pevent_record *record, enum pevent_errno *err) { + if (*err) { + /* + * There was an error, no need to process anymore. + */ + return 0; + } + switch (arg->type) { case FILTER_ARG_BOOLEAN: /* easy case */ return arg->boolean.value; case FILTER_ARG_OP: - return test_op(event, arg, record); + return test_op(event, arg, record, err); case FILTER_ARG_NUM: - return test_num(event, arg, record); + return test_num(event, arg, record, err); case FILTER_ARG_STR: - return test_str(event, arg, record); + return test_str(event, arg, record, err); case FILTER_ARG_EXP: case FILTER_ARG_VALUE: @@ -1832,11 +1985,11 @@ static int test_filter(struct event_format *event, * Expressions, fields and values evaluate * to true if they return non zero */ - return !!get_arg_value(event, arg, record); + return !!get_arg_value(event, arg, record, err); default: - die("oops!"); - /* ?? */ + if (!*err) + *err = PEVENT_ERRNO__INVALID_ARG_TYPE; return 0; } } @@ -1849,8 +2002,7 @@ static int test_filter(struct event_format *event, * Returns 1 if filter found for @event_id * otherwise 0; */ -int pevent_event_filtered(struct event_filter *filter, - int event_id) +int pevent_event_filtered(struct event_filter *filter, int event_id) { struct filter_type *filter_type; @@ -1867,31 +2019,38 @@ int pevent_event_filtered(struct event_filter *filter, * @filter: filter struct with filter information * @record: the record to test against the filter * - * Returns: - * 1 - filter found for event and @record matches - * 0 - filter found for event and @record does not match - * -1 - no filter found for @record's event - * -2 - if no filters exist + * Returns: match result or error code (prefixed with PEVENT_ERRNO__) + * FILTER_MATCH - filter found for event and @record matches + * FILTER_MISS - filter found for event and @record does not match + * FILTER_NOT_FOUND - no filter found for @record's event + * NO_FILTER - if no filters exist + * otherwise - error occurred during test */ -int pevent_filter_match(struct event_filter *filter, - struct pevent_record *record) +enum pevent_errno pevent_filter_match(struct event_filter *filter, + struct pevent_record *record) { struct pevent *pevent = filter->pevent; struct filter_type *filter_type; int event_id; + int ret; + enum pevent_errno err = 0; + + filter_init_error_buf(filter); if (!filter->filters) - return FILTER_NONE; + return PEVENT_ERRNO__NO_FILTER; event_id = pevent_data_type(pevent, record); filter_type = find_filter_type(filter, event_id); - if (!filter_type) - return FILTER_NOEXIST; + return PEVENT_ERRNO__FILTER_NOT_FOUND; + + ret = test_filter(filter_type->event, filter_type->filter, record, &err); + if (err) + return err; - return test_filter(filter_type->event, filter_type->filter, record) ? - FILTER_MATCH : FILTER_MISS; + return ret ? PEVENT_ERRNO__FILTER_MATCH : PEVENT_ERRNO__FILTER_MISS; } static char *op_to_str(struct event_filter *filter, struct filter_arg *arg) @@ -1903,7 +2062,6 @@ static char *op_to_str(struct event_filter *filter, struct filter_arg *arg) int left_val = -1; int right_val = -1; int val; - int len; switch (arg->op.type) { case FILTER_OP_AND: @@ -1950,11 +2108,7 @@ static char *op_to_str(struct event_filter *filter, struct filter_arg *arg) default: break; } - str = malloc_or_die(6); - if (val) - strcpy(str, "TRUE"); - else - strcpy(str, "FALSE"); + asprintf(&str, val ? "TRUE" : "FALSE"); break; } } @@ -1972,10 +2126,7 @@ static char *op_to_str(struct event_filter *filter, struct filter_arg *arg) break; } - len = strlen(left) + strlen(right) + strlen(op) + 10; - str = malloc_or_die(len); - snprintf(str, len, "(%s) %s (%s)", - left, op, right); + asprintf(&str, "(%s) %s (%s)", left, op, right); break; case FILTER_OP_NOT: @@ -1991,16 +2142,10 @@ static char *op_to_str(struct event_filter *filter, struct filter_arg *arg) right_val = 0; if (right_val >= 0) { /* just return the opposite */ - str = malloc_or_die(6); - if (right_val) - strcpy(str, "FALSE"); - else - strcpy(str, "TRUE"); + asprintf(&str, right_val ? "FALSE" : "TRUE"); break; } - len = strlen(right) + strlen(op) + 3; - str = malloc_or_die(len); - snprintf(str, len, "%s(%s)", op, right); + asprintf(&str, "%s(%s)", op, right); break; default: @@ -2014,11 +2159,9 @@ static char *op_to_str(struct event_filter *filter, struct filter_arg *arg) static char *val_to_str(struct event_filter *filter, struct filter_arg *arg) { - char *str; - - str = malloc_or_die(30); + char *str = NULL; - snprintf(str, 30, "%lld", arg->value.val); + asprintf(&str, "%lld", arg->value.val); return str; } @@ -2034,7 +2177,6 @@ static char *exp_to_str(struct event_filter *filter, struct filter_arg *arg) char *rstr; char *op; char *str = NULL; - int len; lstr = arg_to_str(filter, arg->exp.left); rstr = arg_to_str(filter, arg->exp.right); @@ -2073,12 +2215,11 @@ static char *exp_to_str(struct event_filter *filter, struct filter_arg *arg) op = "^"; break; default: - die("oops in exp"); + op = "[ERROR IN EXPRESSION TYPE]"; + break; } - len = strlen(op) + strlen(lstr) + strlen(rstr) + 4; - str = malloc_or_die(len); - snprintf(str, len, "%s %s %s", lstr, op, rstr); + asprintf(&str, "%s %s %s", lstr, op, rstr); out: free(lstr); free(rstr); @@ -2092,7 +2233,6 @@ static char *num_to_str(struct event_filter *filter, struct filter_arg *arg) char *rstr; char *str = NULL; char *op = NULL; - int len; lstr = arg_to_str(filter, arg->num.left); rstr = arg_to_str(filter, arg->num.right); @@ -2123,10 +2263,7 @@ static char *num_to_str(struct event_filter *filter, struct filter_arg *arg) if (!op) op = "<="; - len = strlen(lstr) + strlen(op) + strlen(rstr) + 4; - str = malloc_or_die(len); - sprintf(str, "%s %s %s", lstr, op, rstr); - + asprintf(&str, "%s %s %s", lstr, op, rstr); break; default: @@ -2144,7 +2281,6 @@ static char *str_to_str(struct event_filter *filter, struct filter_arg *arg) { char *str = NULL; char *op = NULL; - int len; switch (arg->str.type) { case FILTER_CMP_MATCH: @@ -2162,12 +2298,8 @@ static char *str_to_str(struct event_filter *filter, struct filter_arg *arg) if (!op) op = "!~"; - len = strlen(arg->str.field->name) + strlen(op) + - strlen(arg->str.val) + 6; - str = malloc_or_die(len); - snprintf(str, len, "%s %s \"%s\"", - arg->str.field->name, - op, arg->str.val); + asprintf(&str, "%s %s \"%s\"", + arg->str.field->name, op, arg->str.val); break; default: @@ -2179,15 +2311,11 @@ static char *str_to_str(struct event_filter *filter, struct filter_arg *arg) static char *arg_to_str(struct event_filter *filter, struct filter_arg *arg) { - char *str; + char *str = NULL; switch (arg->type) { case FILTER_ARG_BOOLEAN: - str = malloc_or_die(6); - if (arg->boolean.value) - strcpy(str, "TRUE"); - else - strcpy(str, "FALSE"); + asprintf(&str, arg->boolean.value ? "TRUE" : "FALSE"); return str; case FILTER_ARG_OP: @@ -2222,7 +2350,7 @@ static char *arg_to_str(struct event_filter *filter, struct filter_arg *arg) * * Returns a string that displays the filter contents. * This string must be freed with free(str). - * NULL is returned if no filter is found. + * NULL is returned if no filter is found or allocation failed. */ char * pevent_filter_make_string(struct event_filter *filter, int event_id) diff --git a/tools/lib/traceevent/parse-utils.c b/tools/lib/traceevent/parse-utils.c index f023a133abb..eda07fa31dc 100644 --- a/tools/lib/traceevent/parse-utils.c +++ b/tools/lib/traceevent/parse-utils.c @@ -1,3 +1,22 @@ +/* + * Copyright (C) 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License (not later!) + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ #include <stdio.h> #include <stdlib.h> #include <string.h> @@ -6,40 +25,6 @@ #define __weak __attribute__((weak)) -void __vdie(const char *fmt, va_list ap) -{ - int ret = errno; - - if (errno) - perror("trace-cmd"); - else - ret = -1; - - fprintf(stderr, " "); - vfprintf(stderr, fmt, ap); - - fprintf(stderr, "\n"); - exit(ret); -} - -void __die(const char *fmt, ...) -{ - va_list ap; - - va_start(ap, fmt); - __vdie(fmt, ap); - va_end(ap); -} - -void __weak die(const char *fmt, ...) -{ - va_list ap; - - va_start(ap, fmt); - __vdie(fmt, ap); - va_end(ap); -} - void __vwarning(const char *fmt, va_list ap) { if (errno) @@ -98,13 +83,3 @@ void __weak pr_stat(const char *fmt, ...) __vpr_stat(fmt, ap); va_end(ap); } - -void __weak *malloc_or_die(unsigned int size) -{ - void *data; - - data = malloc(size); - if (!data) - die("malloc"); - return data; -} diff --git a/tools/lib/traceevent/plugin_cfg80211.c b/tools/lib/traceevent/plugin_cfg80211.c new file mode 100644 index 00000000000..c066b25905f --- /dev/null +++ b/tools/lib/traceevent/plugin_cfg80211.c @@ -0,0 +1,30 @@ +#include <stdio.h> +#include <string.h> +#include <inttypes.h> +#include <endian.h> +#include "event-parse.h" + +static unsigned long long +process___le16_to_cpup(struct trace_seq *s, + unsigned long long *args) +{ + uint16_t *val = (uint16_t *) (unsigned long) args[0]; + return val ? (long long) le16toh(*val) : 0; +} + +int PEVENT_PLUGIN_LOADER(struct pevent *pevent) +{ + pevent_register_print_function(pevent, + process___le16_to_cpup, + PEVENT_FUNC_ARG_INT, + "__le16_to_cpup", + PEVENT_FUNC_ARG_PTR, + PEVENT_FUNC_ARG_VOID); + return 0; +} + +void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent) +{ + pevent_unregister_print_function(pevent, process___le16_to_cpup, + "__le16_to_cpup"); +} diff --git a/tools/lib/traceevent/plugin_function.c b/tools/lib/traceevent/plugin_function.c new file mode 100644 index 00000000000..a00ec190821 --- /dev/null +++ b/tools/lib/traceevent/plugin_function.c @@ -0,0 +1,194 @@ +/* + * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License (not later!) + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "event-parse.h" +#include "event-utils.h" + +static struct func_stack { + int size; + char **stack; +} *fstack; + +static int cpus = -1; + +#define STK_BLK 10 + +struct pevent_plugin_option plugin_options[] = +{ + { + .name = "parent", + .plugin_alias = "ftrace", + .description = + "Print parent of functions for function events", + }, + { + .name = "indent", + .plugin_alias = "ftrace", + .description = + "Try to show function call indents, based on parents", + .set = 1, + }, + { + .name = NULL, + } +}; + +static struct pevent_plugin_option *ftrace_parent = &plugin_options[0]; +static struct pevent_plugin_option *ftrace_indent = &plugin_options[1]; + +static void add_child(struct func_stack *stack, const char *child, int pos) +{ + int i; + + if (!child) + return; + + if (pos < stack->size) + free(stack->stack[pos]); + else { + char **ptr; + + ptr = realloc(stack->stack, sizeof(char *) * + (stack->size + STK_BLK)); + if (!ptr) { + warning("could not allocate plugin memory\n"); + return; + } + + stack->stack = ptr; + + for (i = stack->size; i < stack->size + STK_BLK; i++) + stack->stack[i] = NULL; + stack->size += STK_BLK; + } + + stack->stack[pos] = strdup(child); +} + +static int add_and_get_index(const char *parent, const char *child, int cpu) +{ + int i; + + if (cpu < 0) + return 0; + + if (cpu > cpus) { + struct func_stack *ptr; + + ptr = realloc(fstack, sizeof(*fstack) * (cpu + 1)); + if (!ptr) { + warning("could not allocate plugin memory\n"); + return 0; + } + + fstack = ptr; + + /* Account for holes in the cpu count */ + for (i = cpus + 1; i <= cpu; i++) + memset(&fstack[i], 0, sizeof(fstack[i])); + cpus = cpu; + } + + for (i = 0; i < fstack[cpu].size && fstack[cpu].stack[i]; i++) { + if (strcmp(parent, fstack[cpu].stack[i]) == 0) { + add_child(&fstack[cpu], child, i+1); + return i; + } + } + + /* Not found */ + add_child(&fstack[cpu], parent, 0); + add_child(&fstack[cpu], child, 1); + return 0; +} + +static int function_handler(struct trace_seq *s, struct pevent_record *record, + struct event_format *event, void *context) +{ + struct pevent *pevent = event->pevent; + unsigned long long function; + unsigned long long pfunction; + const char *func; + const char *parent; + int index; + + if (pevent_get_field_val(s, event, "ip", record, &function, 1)) + return trace_seq_putc(s, '!'); + + func = pevent_find_function(pevent, function); + + if (pevent_get_field_val(s, event, "parent_ip", record, &pfunction, 1)) + return trace_seq_putc(s, '!'); + + parent = pevent_find_function(pevent, pfunction); + + if (parent && ftrace_indent->set) + index = add_and_get_index(parent, func, record->cpu); + + trace_seq_printf(s, "%*s", index*3, ""); + + if (func) + trace_seq_printf(s, "%s", func); + else + trace_seq_printf(s, "0x%llx", function); + + if (ftrace_parent->set) { + trace_seq_printf(s, " <-- "); + if (parent) + trace_seq_printf(s, "%s", parent); + else + trace_seq_printf(s, "0x%llx", pfunction); + } + + return 0; +} + +int PEVENT_PLUGIN_LOADER(struct pevent *pevent) +{ + pevent_register_event_handler(pevent, -1, "ftrace", "function", + function_handler, NULL); + + traceevent_plugin_add_options("ftrace", plugin_options); + + return 0; +} + +void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent) +{ + int i, x; + + pevent_unregister_event_handler(pevent, -1, "ftrace", "function", + function_handler, NULL); + + for (i = 0; i <= cpus; i++) { + for (x = 0; x < fstack[i].size && fstack[i].stack[x]; x++) + free(fstack[i].stack[x]); + free(fstack[i].stack); + } + + traceevent_plugin_remove_options(plugin_options); + + free(fstack); + fstack = NULL; + cpus = -1; +} diff --git a/tools/lib/traceevent/plugin_hrtimer.c b/tools/lib/traceevent/plugin_hrtimer.c new file mode 100644 index 00000000000..12bf14cc115 --- /dev/null +++ b/tools/lib/traceevent/plugin_hrtimer.c @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2009 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> + * Copyright (C) 2009 Johannes Berg <johannes@sipsolutions.net> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License (not later!) + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "event-parse.h" + +static int timer_expire_handler(struct trace_seq *s, + struct pevent_record *record, + struct event_format *event, void *context) +{ + trace_seq_printf(s, "hrtimer="); + + if (pevent_print_num_field(s, "0x%llx", event, "timer", + record, 0) == -1) + pevent_print_num_field(s, "0x%llx", event, "hrtimer", + record, 1); + + trace_seq_printf(s, " now="); + + pevent_print_num_field(s, "%llu", event, "now", record, 1); + + pevent_print_func_field(s, " function=%s", event, "function", + record, 0); + return 0; +} + +static int timer_start_handler(struct trace_seq *s, + struct pevent_record *record, + struct event_format *event, void *context) +{ + trace_seq_printf(s, "hrtimer="); + + if (pevent_print_num_field(s, "0x%llx", event, "timer", + record, 0) == -1) + pevent_print_num_field(s, "0x%llx", event, "hrtimer", + record, 1); + + pevent_print_func_field(s, " function=%s", event, "function", + record, 0); + + trace_seq_printf(s, " expires="); + pevent_print_num_field(s, "%llu", event, "expires", record, 1); + + trace_seq_printf(s, " softexpires="); + pevent_print_num_field(s, "%llu", event, "softexpires", record, 1); + return 0; +} + +int PEVENT_PLUGIN_LOADER(struct pevent *pevent) +{ + pevent_register_event_handler(pevent, -1, + "timer", "hrtimer_expire_entry", + timer_expire_handler, NULL); + + pevent_register_event_handler(pevent, -1, "timer", "hrtimer_start", + timer_start_handler, NULL); + return 0; +} + +void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent) +{ + pevent_unregister_event_handler(pevent, -1, + "timer", "hrtimer_expire_entry", + timer_expire_handler, NULL); + + pevent_unregister_event_handler(pevent, -1, "timer", "hrtimer_start", + timer_start_handler, NULL); +} diff --git a/tools/lib/traceevent/plugin_jbd2.c b/tools/lib/traceevent/plugin_jbd2.c new file mode 100644 index 00000000000..0db714c721b --- /dev/null +++ b/tools/lib/traceevent/plugin_jbd2.c @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License (not later!) + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "event-parse.h" + +#define MINORBITS 20 +#define MINORMASK ((1U << MINORBITS) - 1) + +#define MAJOR(dev) ((unsigned int) ((dev) >> MINORBITS)) +#define MINOR(dev) ((unsigned int) ((dev) & MINORMASK)) + +static unsigned long long +process_jbd2_dev_to_name(struct trace_seq *s, + unsigned long long *args) +{ + unsigned int dev = args[0]; + + trace_seq_printf(s, "%d:%d", MAJOR(dev), MINOR(dev)); + return 0; +} + +static unsigned long long +process_jiffies_to_msecs(struct trace_seq *s, + unsigned long long *args) +{ + unsigned long long jiffies = args[0]; + + trace_seq_printf(s, "%lld", jiffies); + return jiffies; +} + +int PEVENT_PLUGIN_LOADER(struct pevent *pevent) +{ + pevent_register_print_function(pevent, + process_jbd2_dev_to_name, + PEVENT_FUNC_ARG_STRING, + "jbd2_dev_to_name", + PEVENT_FUNC_ARG_INT, + PEVENT_FUNC_ARG_VOID); + + pevent_register_print_function(pevent, + process_jiffies_to_msecs, + PEVENT_FUNC_ARG_LONG, + "jiffies_to_msecs", + PEVENT_FUNC_ARG_LONG, + PEVENT_FUNC_ARG_VOID); + return 0; +} + +void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent) +{ + pevent_unregister_print_function(pevent, process_jbd2_dev_to_name, + "jbd2_dev_to_name"); + + pevent_unregister_print_function(pevent, process_jiffies_to_msecs, + "jiffies_to_msecs"); +} diff --git a/tools/lib/traceevent/plugin_kmem.c b/tools/lib/traceevent/plugin_kmem.c new file mode 100644 index 00000000000..70650ff48d7 --- /dev/null +++ b/tools/lib/traceevent/plugin_kmem.c @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2009 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License (not later!) + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "event-parse.h" + +static int call_site_handler(struct trace_seq *s, struct pevent_record *record, + struct event_format *event, void *context) +{ + struct format_field *field; + unsigned long long val, addr; + void *data = record->data; + const char *func; + + field = pevent_find_field(event, "call_site"); + if (!field) + return 1; + + if (pevent_read_number_field(field, data, &val)) + return 1; + + func = pevent_find_function(event->pevent, val); + if (!func) + return 1; + + addr = pevent_find_function_address(event->pevent, val); + + trace_seq_printf(s, "(%s+0x%x) ", func, (int)(val - addr)); + return 1; +} + +int PEVENT_PLUGIN_LOADER(struct pevent *pevent) +{ + pevent_register_event_handler(pevent, -1, "kmem", "kfree", + call_site_handler, NULL); + + pevent_register_event_handler(pevent, -1, "kmem", "kmalloc", + call_site_handler, NULL); + + pevent_register_event_handler(pevent, -1, "kmem", "kmalloc_node", + call_site_handler, NULL); + + pevent_register_event_handler(pevent, -1, "kmem", "kmem_cache_alloc", + call_site_handler, NULL); + + pevent_register_event_handler(pevent, -1, "kmem", + "kmem_cache_alloc_node", + call_site_handler, NULL); + + pevent_register_event_handler(pevent, -1, "kmem", "kmem_cache_free", + call_site_handler, NULL); + return 0; +} + +void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent) +{ + pevent_unregister_event_handler(pevent, -1, "kmem", "kfree", + call_site_handler, NULL); + + pevent_unregister_event_handler(pevent, -1, "kmem", "kmalloc", + call_site_handler, NULL); + + pevent_unregister_event_handler(pevent, -1, "kmem", "kmalloc_node", + call_site_handler, NULL); + + pevent_unregister_event_handler(pevent, -1, "kmem", "kmem_cache_alloc", + call_site_handler, NULL); + + pevent_unregister_event_handler(pevent, -1, "kmem", + "kmem_cache_alloc_node", + call_site_handler, NULL); + + pevent_unregister_event_handler(pevent, -1, "kmem", "kmem_cache_free", + call_site_handler, NULL); +} diff --git a/tools/lib/traceevent/plugin_kvm.c b/tools/lib/traceevent/plugin_kvm.c new file mode 100644 index 00000000000..9e0e8c61b43 --- /dev/null +++ b/tools/lib/traceevent/plugin_kvm.c @@ -0,0 +1,465 @@ +/* + * Copyright (C) 2009 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License (not later!) + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <stdint.h> + +#include "event-parse.h" + +#ifdef HAVE_UDIS86 + +#include <udis86.h> + +static ud_t ud; + +static void init_disassembler(void) +{ + ud_init(&ud); + ud_set_syntax(&ud, UD_SYN_ATT); +} + +static const char *disassemble(unsigned char *insn, int len, uint64_t rip, + int cr0_pe, int eflags_vm, + int cs_d, int cs_l) +{ + int mode; + + if (!cr0_pe) + mode = 16; + else if (eflags_vm) + mode = 16; + else if (cs_l) + mode = 64; + else if (cs_d) + mode = 32; + else + mode = 16; + + ud_set_pc(&ud, rip); + ud_set_mode(&ud, mode); + ud_set_input_buffer(&ud, insn, len); + ud_disassemble(&ud); + return ud_insn_asm(&ud); +} + +#else + +static void init_disassembler(void) +{ +} + +static const char *disassemble(unsigned char *insn, int len, uint64_t rip, + int cr0_pe, int eflags_vm, + int cs_d, int cs_l) +{ + static char out[15*3+1]; + int i; + + for (i = 0; i < len; ++i) + sprintf(out + i * 3, "%02x ", insn[i]); + out[len*3-1] = '\0'; + return out; +} + +#endif + + +#define VMX_EXIT_REASONS \ + _ER(EXCEPTION_NMI, 0) \ + _ER(EXTERNAL_INTERRUPT, 1) \ + _ER(TRIPLE_FAULT, 2) \ + _ER(PENDING_INTERRUPT, 7) \ + _ER(NMI_WINDOW, 8) \ + _ER(TASK_SWITCH, 9) \ + _ER(CPUID, 10) \ + _ER(HLT, 12) \ + _ER(INVD, 13) \ + _ER(INVLPG, 14) \ + _ER(RDPMC, 15) \ + _ER(RDTSC, 16) \ + _ER(VMCALL, 18) \ + _ER(VMCLEAR, 19) \ + _ER(VMLAUNCH, 20) \ + _ER(VMPTRLD, 21) \ + _ER(VMPTRST, 22) \ + _ER(VMREAD, 23) \ + _ER(VMRESUME, 24) \ + _ER(VMWRITE, 25) \ + _ER(VMOFF, 26) \ + _ER(VMON, 27) \ + _ER(CR_ACCESS, 28) \ + _ER(DR_ACCESS, 29) \ + _ER(IO_INSTRUCTION, 30) \ + _ER(MSR_READ, 31) \ + _ER(MSR_WRITE, 32) \ + _ER(MWAIT_INSTRUCTION, 36) \ + _ER(MONITOR_INSTRUCTION, 39) \ + _ER(PAUSE_INSTRUCTION, 40) \ + _ER(MCE_DURING_VMENTRY, 41) \ + _ER(TPR_BELOW_THRESHOLD, 43) \ + _ER(APIC_ACCESS, 44) \ + _ER(EOI_INDUCED, 45) \ + _ER(EPT_VIOLATION, 48) \ + _ER(EPT_MISCONFIG, 49) \ + _ER(INVEPT, 50) \ + _ER(PREEMPTION_TIMER, 52) \ + _ER(WBINVD, 54) \ + _ER(XSETBV, 55) \ + _ER(APIC_WRITE, 56) \ + _ER(INVPCID, 58) + +#define SVM_EXIT_REASONS \ + _ER(EXIT_READ_CR0, 0x000) \ + _ER(EXIT_READ_CR3, 0x003) \ + _ER(EXIT_READ_CR4, 0x004) \ + _ER(EXIT_READ_CR8, 0x008) \ + _ER(EXIT_WRITE_CR0, 0x010) \ + _ER(EXIT_WRITE_CR3, 0x013) \ + _ER(EXIT_WRITE_CR4, 0x014) \ + _ER(EXIT_WRITE_CR8, 0x018) \ + _ER(EXIT_READ_DR0, 0x020) \ + _ER(EXIT_READ_DR1, 0x021) \ + _ER(EXIT_READ_DR2, 0x022) \ + _ER(EXIT_READ_DR3, 0x023) \ + _ER(EXIT_READ_DR4, 0x024) \ + _ER(EXIT_READ_DR5, 0x025) \ + _ER(EXIT_READ_DR6, 0x026) \ + _ER(EXIT_READ_DR7, 0x027) \ + _ER(EXIT_WRITE_DR0, 0x030) \ + _ER(EXIT_WRITE_DR1, 0x031) \ + _ER(EXIT_WRITE_DR2, 0x032) \ + _ER(EXIT_WRITE_DR3, 0x033) \ + _ER(EXIT_WRITE_DR4, 0x034) \ + _ER(EXIT_WRITE_DR5, 0x035) \ + _ER(EXIT_WRITE_DR6, 0x036) \ + _ER(EXIT_WRITE_DR7, 0x037) \ + _ER(EXIT_EXCP_BASE, 0x040) \ + _ER(EXIT_INTR, 0x060) \ + _ER(EXIT_NMI, 0x061) \ + _ER(EXIT_SMI, 0x062) \ + _ER(EXIT_INIT, 0x063) \ + _ER(EXIT_VINTR, 0x064) \ + _ER(EXIT_CR0_SEL_WRITE, 0x065) \ + _ER(EXIT_IDTR_READ, 0x066) \ + _ER(EXIT_GDTR_READ, 0x067) \ + _ER(EXIT_LDTR_READ, 0x068) \ + _ER(EXIT_TR_READ, 0x069) \ + _ER(EXIT_IDTR_WRITE, 0x06a) \ + _ER(EXIT_GDTR_WRITE, 0x06b) \ + _ER(EXIT_LDTR_WRITE, 0x06c) \ + _ER(EXIT_TR_WRITE, 0x06d) \ + _ER(EXIT_RDTSC, 0x06e) \ + _ER(EXIT_RDPMC, 0x06f) \ + _ER(EXIT_PUSHF, 0x070) \ + _ER(EXIT_POPF, 0x071) \ + _ER(EXIT_CPUID, 0x072) \ + _ER(EXIT_RSM, 0x073) \ + _ER(EXIT_IRET, 0x074) \ + _ER(EXIT_SWINT, 0x075) \ + _ER(EXIT_INVD, 0x076) \ + _ER(EXIT_PAUSE, 0x077) \ + _ER(EXIT_HLT, 0x078) \ + _ER(EXIT_INVLPG, 0x079) \ + _ER(EXIT_INVLPGA, 0x07a) \ + _ER(EXIT_IOIO, 0x07b) \ + _ER(EXIT_MSR, 0x07c) \ + _ER(EXIT_TASK_SWITCH, 0x07d) \ + _ER(EXIT_FERR_FREEZE, 0x07e) \ + _ER(EXIT_SHUTDOWN, 0x07f) \ + _ER(EXIT_VMRUN, 0x080) \ + _ER(EXIT_VMMCALL, 0x081) \ + _ER(EXIT_VMLOAD, 0x082) \ + _ER(EXIT_VMSAVE, 0x083) \ + _ER(EXIT_STGI, 0x084) \ + _ER(EXIT_CLGI, 0x085) \ + _ER(EXIT_SKINIT, 0x086) \ + _ER(EXIT_RDTSCP, 0x087) \ + _ER(EXIT_ICEBP, 0x088) \ + _ER(EXIT_WBINVD, 0x089) \ + _ER(EXIT_MONITOR, 0x08a) \ + _ER(EXIT_MWAIT, 0x08b) \ + _ER(EXIT_MWAIT_COND, 0x08c) \ + _ER(EXIT_NPF, 0x400) \ + _ER(EXIT_ERR, -1) + +#define _ER(reason, val) { #reason, val }, +struct str_values { + const char *str; + int val; +}; + +static struct str_values vmx_exit_reasons[] = { + VMX_EXIT_REASONS + { NULL, -1} +}; + +static struct str_values svm_exit_reasons[] = { + SVM_EXIT_REASONS + { NULL, -1} +}; + +static struct isa_exit_reasons { + unsigned isa; + struct str_values *strings; +} isa_exit_reasons[] = { + { .isa = 1, .strings = vmx_exit_reasons }, + { .isa = 2, .strings = svm_exit_reasons }, + { } +}; + +static const char *find_exit_reason(unsigned isa, int val) +{ + struct str_values *strings = NULL; + int i; + + for (i = 0; isa_exit_reasons[i].strings; ++i) + if (isa_exit_reasons[i].isa == isa) { + strings = isa_exit_reasons[i].strings; + break; + } + if (!strings) + return "UNKNOWN-ISA"; + for (i = 0; strings[i].val >= 0; i++) + if (strings[i].val == val) + break; + if (strings[i].str) + return strings[i].str; + return "UNKNOWN"; +} + +static int kvm_exit_handler(struct trace_seq *s, struct pevent_record *record, + struct event_format *event, void *context) +{ + unsigned long long isa; + unsigned long long val; + unsigned long long info1 = 0, info2 = 0; + + if (pevent_get_field_val(s, event, "exit_reason", record, &val, 1) < 0) + return -1; + + if (pevent_get_field_val(s, event, "isa", record, &isa, 0) < 0) + isa = 1; + + trace_seq_printf(s, "reason %s", find_exit_reason(isa, val)); + + pevent_print_num_field(s, " rip 0x%lx", event, "guest_rip", record, 1); + + if (pevent_get_field_val(s, event, "info1", record, &info1, 0) >= 0 + && pevent_get_field_val(s, event, "info2", record, &info2, 0) >= 0) + trace_seq_printf(s, " info %llx %llx", info1, info2); + + return 0; +} + +#define KVM_EMUL_INSN_F_CR0_PE (1 << 0) +#define KVM_EMUL_INSN_F_EFL_VM (1 << 1) +#define KVM_EMUL_INSN_F_CS_D (1 << 2) +#define KVM_EMUL_INSN_F_CS_L (1 << 3) + +static int kvm_emulate_insn_handler(struct trace_seq *s, + struct pevent_record *record, + struct event_format *event, void *context) +{ + unsigned long long rip, csbase, len, flags, failed; + int llen; + uint8_t *insn; + const char *disasm; + + if (pevent_get_field_val(s, event, "rip", record, &rip, 1) < 0) + return -1; + + if (pevent_get_field_val(s, event, "csbase", record, &csbase, 1) < 0) + return -1; + + if (pevent_get_field_val(s, event, "len", record, &len, 1) < 0) + return -1; + + if (pevent_get_field_val(s, event, "flags", record, &flags, 1) < 0) + return -1; + + if (pevent_get_field_val(s, event, "failed", record, &failed, 1) < 0) + return -1; + + insn = pevent_get_field_raw(s, event, "insn", record, &llen, 1); + if (!insn) + return -1; + + disasm = disassemble(insn, len, rip, + flags & KVM_EMUL_INSN_F_CR0_PE, + flags & KVM_EMUL_INSN_F_EFL_VM, + flags & KVM_EMUL_INSN_F_CS_D, + flags & KVM_EMUL_INSN_F_CS_L); + + trace_seq_printf(s, "%llx:%llx: %s%s", csbase, rip, disasm, + failed ? " FAIL" : ""); + return 0; +} + +union kvm_mmu_page_role { + unsigned word; + struct { + unsigned glevels:4; + unsigned level:4; + unsigned quadrant:2; + unsigned pad_for_nice_hex_output:6; + unsigned direct:1; + unsigned access:3; + unsigned invalid:1; + unsigned cr4_pge:1; + unsigned nxe:1; + }; +}; + +static int kvm_mmu_print_role(struct trace_seq *s, struct pevent_record *record, + struct event_format *event, void *context) +{ + unsigned long long val; + static const char *access_str[] = { + "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" + }; + union kvm_mmu_page_role role; + + if (pevent_get_field_val(s, event, "role", record, &val, 1) < 0) + return -1; + + role.word = (int)val; + + /* + * We can only use the structure if file is of the same + * endianess. + */ + if (pevent_is_file_bigendian(event->pevent) == + pevent_is_host_bigendian(event->pevent)) { + + trace_seq_printf(s, "%u/%u q%u%s %s%s %spge %snxe", + role.level, + role.glevels, + role.quadrant, + role.direct ? " direct" : "", + access_str[role.access], + role.invalid ? " invalid" : "", + role.cr4_pge ? "" : "!", + role.nxe ? "" : "!"); + } else + trace_seq_printf(s, "WORD: %08x", role.word); + + pevent_print_num_field(s, " root %u ", event, + "root_count", record, 1); + + if (pevent_get_field_val(s, event, "unsync", record, &val, 1) < 0) + return -1; + + trace_seq_printf(s, "%s%c", val ? "unsync" : "sync", 0); + return 0; +} + +static int kvm_mmu_get_page_handler(struct trace_seq *s, + struct pevent_record *record, + struct event_format *event, void *context) +{ + unsigned long long val; + + if (pevent_get_field_val(s, event, "created", record, &val, 1) < 0) + return -1; + + trace_seq_printf(s, "%s ", val ? "new" : "existing"); + + if (pevent_get_field_val(s, event, "gfn", record, &val, 1) < 0) + return -1; + + trace_seq_printf(s, "sp gfn %llx ", val); + return kvm_mmu_print_role(s, record, event, context); +} + +#define PT_WRITABLE_SHIFT 1 +#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT) + +static unsigned long long +process_is_writable_pte(struct trace_seq *s, unsigned long long *args) +{ + unsigned long pte = args[0]; + return pte & PT_WRITABLE_MASK; +} + +int PEVENT_PLUGIN_LOADER(struct pevent *pevent) +{ + init_disassembler(); + + pevent_register_event_handler(pevent, -1, "kvm", "kvm_exit", + kvm_exit_handler, NULL); + + pevent_register_event_handler(pevent, -1, "kvm", "kvm_emulate_insn", + kvm_emulate_insn_handler, NULL); + + pevent_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page", + kvm_mmu_get_page_handler, NULL); + + pevent_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_sync_page", + kvm_mmu_print_role, NULL); + + pevent_register_event_handler(pevent, -1, + "kvmmmu", "kvm_mmu_unsync_page", + kvm_mmu_print_role, NULL); + + pevent_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_zap_page", + kvm_mmu_print_role, NULL); + + pevent_register_event_handler(pevent, -1, "kvmmmu", + "kvm_mmu_prepare_zap_page", kvm_mmu_print_role, + NULL); + + pevent_register_print_function(pevent, + process_is_writable_pte, + PEVENT_FUNC_ARG_INT, + "is_writable_pte", + PEVENT_FUNC_ARG_LONG, + PEVENT_FUNC_ARG_VOID); + return 0; +} + +void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent) +{ + pevent_unregister_event_handler(pevent, -1, "kvm", "kvm_exit", + kvm_exit_handler, NULL); + + pevent_unregister_event_handler(pevent, -1, "kvm", "kvm_emulate_insn", + kvm_emulate_insn_handler, NULL); + + pevent_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page", + kvm_mmu_get_page_handler, NULL); + + pevent_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_sync_page", + kvm_mmu_print_role, NULL); + + pevent_unregister_event_handler(pevent, -1, + "kvmmmu", "kvm_mmu_unsync_page", + kvm_mmu_print_role, NULL); + + pevent_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_zap_page", + kvm_mmu_print_role, NULL); + + pevent_unregister_event_handler(pevent, -1, "kvmmmu", + "kvm_mmu_prepare_zap_page", kvm_mmu_print_role, + NULL); + + pevent_unregister_print_function(pevent, process_is_writable_pte, + "is_writable_pte"); +} diff --git a/tools/lib/traceevent/plugin_mac80211.c b/tools/lib/traceevent/plugin_mac80211.c new file mode 100644 index 00000000000..7e15a0f1c2f --- /dev/null +++ b/tools/lib/traceevent/plugin_mac80211.c @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2009 Johannes Berg <johannes@sipsolutions.net> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License (not later!) + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "event-parse.h" + +#define INDENT 65 + +static void print_string(struct trace_seq *s, struct event_format *event, + const char *name, const void *data) +{ + struct format_field *f = pevent_find_field(event, name); + int offset; + int length; + + if (!f) { + trace_seq_printf(s, "NOTFOUND:%s", name); + return; + } + + offset = f->offset; + length = f->size; + + if (!strncmp(f->type, "__data_loc", 10)) { + unsigned long long v; + if (pevent_read_number_field(f, data, &v)) { + trace_seq_printf(s, "invalid_data_loc"); + return; + } + offset = v & 0xffff; + length = v >> 16; + } + + trace_seq_printf(s, "%.*s", length, (char *)data + offset); +} + +#define SF(fn) pevent_print_num_field(s, fn ":%d", event, fn, record, 0) +#define SFX(fn) pevent_print_num_field(s, fn ":%#x", event, fn, record, 0) +#define SP() trace_seq_putc(s, ' ') + +static int drv_bss_info_changed(struct trace_seq *s, + struct pevent_record *record, + struct event_format *event, void *context) +{ + void *data = record->data; + + print_string(s, event, "wiphy_name", data); + trace_seq_printf(s, " vif:"); + print_string(s, event, "vif_name", data); + pevent_print_num_field(s, "(%d)", event, "vif_type", record, 1); + + trace_seq_printf(s, "\n%*s", INDENT, ""); + SF("assoc"); SP(); + SF("aid"); SP(); + SF("cts"); SP(); + SF("shortpre"); SP(); + SF("shortslot"); SP(); + SF("dtimper"); SP(); + trace_seq_printf(s, "\n%*s", INDENT, ""); + SF("bcnint"); SP(); + SFX("assoc_cap"); SP(); + SFX("basic_rates"); SP(); + SF("enable_beacon"); + trace_seq_printf(s, "\n%*s", INDENT, ""); + SF("ht_operation_mode"); + + return 0; +} + +int PEVENT_PLUGIN_LOADER(struct pevent *pevent) +{ + pevent_register_event_handler(pevent, -1, "mac80211", + "drv_bss_info_changed", + drv_bss_info_changed, NULL); + return 0; +} + +void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent) +{ + pevent_unregister_event_handler(pevent, -1, "mac80211", + "drv_bss_info_changed", + drv_bss_info_changed, NULL); +} diff --git a/tools/lib/traceevent/plugin_sched_switch.c b/tools/lib/traceevent/plugin_sched_switch.c new file mode 100644 index 00000000000..f1ce6006525 --- /dev/null +++ b/tools/lib/traceevent/plugin_sched_switch.c @@ -0,0 +1,160 @@ +/* + * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License (not later!) + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "event-parse.h" + +static void write_state(struct trace_seq *s, int val) +{ + const char states[] = "SDTtZXxW"; + int found = 0; + int i; + + for (i = 0; i < (sizeof(states) - 1); i++) { + if (!(val & (1 << i))) + continue; + + if (found) + trace_seq_putc(s, '|'); + + found = 1; + trace_seq_putc(s, states[i]); + } + + if (!found) + trace_seq_putc(s, 'R'); +} + +static void write_and_save_comm(struct format_field *field, + struct pevent_record *record, + struct trace_seq *s, int pid) +{ + const char *comm; + int len; + + comm = (char *)(record->data + field->offset); + len = s->len; + trace_seq_printf(s, "%.*s", + field->size, comm); + + /* make sure the comm has a \0 at the end. */ + trace_seq_terminate(s); + comm = &s->buffer[len]; + + /* Help out the comm to ids. This will handle dups */ + pevent_register_comm(field->event->pevent, comm, pid); +} + +static int sched_wakeup_handler(struct trace_seq *s, + struct pevent_record *record, + struct event_format *event, void *context) +{ + struct format_field *field; + unsigned long long val; + + if (pevent_get_field_val(s, event, "pid", record, &val, 1)) + return trace_seq_putc(s, '!'); + + field = pevent_find_any_field(event, "comm"); + if (field) { + write_and_save_comm(field, record, s, val); + trace_seq_putc(s, ':'); + } + trace_seq_printf(s, "%lld", val); + + if (pevent_get_field_val(s, event, "prio", record, &val, 0) == 0) + trace_seq_printf(s, " [%lld]", val); + + if (pevent_get_field_val(s, event, "success", record, &val, 1) == 0) + trace_seq_printf(s, " success=%lld", val); + + if (pevent_get_field_val(s, event, "target_cpu", record, &val, 0) == 0) + trace_seq_printf(s, " CPU:%03llu", val); + + return 0; +} + +static int sched_switch_handler(struct trace_seq *s, + struct pevent_record *record, + struct event_format *event, void *context) +{ + struct format_field *field; + unsigned long long val; + + if (pevent_get_field_val(s, event, "prev_pid", record, &val, 1)) + return trace_seq_putc(s, '!'); + + field = pevent_find_any_field(event, "prev_comm"); + if (field) { + write_and_save_comm(field, record, s, val); + trace_seq_putc(s, ':'); + } + trace_seq_printf(s, "%lld ", val); + + if (pevent_get_field_val(s, event, "prev_prio", record, &val, 0) == 0) + trace_seq_printf(s, "[%lld] ", val); + + if (pevent_get_field_val(s, event, "prev_state", record, &val, 0) == 0) + write_state(s, val); + + trace_seq_puts(s, " ==> "); + + if (pevent_get_field_val(s, event, "next_pid", record, &val, 1)) + return trace_seq_putc(s, '!'); + + field = pevent_find_any_field(event, "next_comm"); + if (field) { + write_and_save_comm(field, record, s, val); + trace_seq_putc(s, ':'); + } + trace_seq_printf(s, "%lld", val); + + if (pevent_get_field_val(s, event, "next_prio", record, &val, 0) == 0) + trace_seq_printf(s, " [%lld]", val); + + return 0; +} + +int PEVENT_PLUGIN_LOADER(struct pevent *pevent) +{ + pevent_register_event_handler(pevent, -1, "sched", "sched_switch", + sched_switch_handler, NULL); + + pevent_register_event_handler(pevent, -1, "sched", "sched_wakeup", + sched_wakeup_handler, NULL); + + pevent_register_event_handler(pevent, -1, "sched", "sched_wakeup_new", + sched_wakeup_handler, NULL); + return 0; +} + +void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent) +{ + pevent_unregister_event_handler(pevent, -1, "sched", "sched_switch", + sched_switch_handler, NULL); + + pevent_unregister_event_handler(pevent, -1, "sched", "sched_wakeup", + sched_wakeup_handler, NULL); + + pevent_unregister_event_handler(pevent, -1, "sched", "sched_wakeup_new", + sched_wakeup_handler, NULL); +} diff --git a/tools/lib/traceevent/plugin_scsi.c b/tools/lib/traceevent/plugin_scsi.c new file mode 100644 index 00000000000..eda326fc862 --- /dev/null +++ b/tools/lib/traceevent/plugin_scsi.c @@ -0,0 +1,429 @@ +#include <stdio.h> +#include <string.h> +#include <inttypes.h> +#include "event-parse.h" + +typedef unsigned long sector_t; +typedef uint64_t u64; +typedef unsigned int u32; + +/* + * SCSI opcodes + */ +#define TEST_UNIT_READY 0x00 +#define REZERO_UNIT 0x01 +#define REQUEST_SENSE 0x03 +#define FORMAT_UNIT 0x04 +#define READ_BLOCK_LIMITS 0x05 +#define REASSIGN_BLOCKS 0x07 +#define INITIALIZE_ELEMENT_STATUS 0x07 +#define READ_6 0x08 +#define WRITE_6 0x0a +#define SEEK_6 0x0b +#define READ_REVERSE 0x0f +#define WRITE_FILEMARKS 0x10 +#define SPACE 0x11 +#define INQUIRY 0x12 +#define RECOVER_BUFFERED_DATA 0x14 +#define MODE_SELECT 0x15 +#define RESERVE 0x16 +#define RELEASE 0x17 +#define COPY 0x18 +#define ERASE 0x19 +#define MODE_SENSE 0x1a +#define START_STOP 0x1b +#define RECEIVE_DIAGNOSTIC 0x1c +#define SEND_DIAGNOSTIC 0x1d +#define ALLOW_MEDIUM_REMOVAL 0x1e + +#define READ_FORMAT_CAPACITIES 0x23 +#define SET_WINDOW 0x24 +#define READ_CAPACITY 0x25 +#define READ_10 0x28 +#define WRITE_10 0x2a +#define SEEK_10 0x2b +#define POSITION_TO_ELEMENT 0x2b +#define WRITE_VERIFY 0x2e +#define VERIFY 0x2f +#define SEARCH_HIGH 0x30 +#define SEARCH_EQUAL 0x31 +#define SEARCH_LOW 0x32 +#define SET_LIMITS 0x33 +#define PRE_FETCH 0x34 +#define READ_POSITION 0x34 +#define SYNCHRONIZE_CACHE 0x35 +#define LOCK_UNLOCK_CACHE 0x36 +#define READ_DEFECT_DATA 0x37 +#define MEDIUM_SCAN 0x38 +#define COMPARE 0x39 +#define COPY_VERIFY 0x3a +#define WRITE_BUFFER 0x3b +#define READ_BUFFER 0x3c +#define UPDATE_BLOCK 0x3d +#define READ_LONG 0x3e +#define WRITE_LONG 0x3f +#define CHANGE_DEFINITION 0x40 +#define WRITE_SAME 0x41 +#define UNMAP 0x42 +#define READ_TOC 0x43 +#define READ_HEADER 0x44 +#define GET_EVENT_STATUS_NOTIFICATION 0x4a +#define LOG_SELECT 0x4c +#define LOG_SENSE 0x4d +#define XDWRITEREAD_10 0x53 +#define MODE_SELECT_10 0x55 +#define RESERVE_10 0x56 +#define RELEASE_10 0x57 +#define MODE_SENSE_10 0x5a +#define PERSISTENT_RESERVE_IN 0x5e +#define PERSISTENT_RESERVE_OUT 0x5f +#define VARIABLE_LENGTH_CMD 0x7f +#define REPORT_LUNS 0xa0 +#define SECURITY_PROTOCOL_IN 0xa2 +#define MAINTENANCE_IN 0xa3 +#define MAINTENANCE_OUT 0xa4 +#define MOVE_MEDIUM 0xa5 +#define EXCHANGE_MEDIUM 0xa6 +#define READ_12 0xa8 +#define WRITE_12 0xaa +#define READ_MEDIA_SERIAL_NUMBER 0xab +#define WRITE_VERIFY_12 0xae +#define VERIFY_12 0xaf +#define SEARCH_HIGH_12 0xb0 +#define SEARCH_EQUAL_12 0xb1 +#define SEARCH_LOW_12 0xb2 +#define SECURITY_PROTOCOL_OUT 0xb5 +#define READ_ELEMENT_STATUS 0xb8 +#define SEND_VOLUME_TAG 0xb6 +#define WRITE_LONG_2 0xea +#define EXTENDED_COPY 0x83 +#define RECEIVE_COPY_RESULTS 0x84 +#define ACCESS_CONTROL_IN 0x86 +#define ACCESS_CONTROL_OUT 0x87 +#define READ_16 0x88 +#define WRITE_16 0x8a +#define READ_ATTRIBUTE 0x8c +#define WRITE_ATTRIBUTE 0x8d +#define VERIFY_16 0x8f +#define SYNCHRONIZE_CACHE_16 0x91 +#define WRITE_SAME_16 0x93 +#define SERVICE_ACTION_IN 0x9e +/* values for service action in */ +#define SAI_READ_CAPACITY_16 0x10 +#define SAI_GET_LBA_STATUS 0x12 +/* values for VARIABLE_LENGTH_CMD service action codes + * see spc4r17 Section D.3.5, table D.7 and D.8 */ +#define VLC_SA_RECEIVE_CREDENTIAL 0x1800 +/* values for maintenance in */ +#define MI_REPORT_IDENTIFYING_INFORMATION 0x05 +#define MI_REPORT_TARGET_PGS 0x0a +#define MI_REPORT_ALIASES 0x0b +#define MI_REPORT_SUPPORTED_OPERATION_CODES 0x0c +#define MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS 0x0d +#define MI_REPORT_PRIORITY 0x0e +#define MI_REPORT_TIMESTAMP 0x0f +#define MI_MANAGEMENT_PROTOCOL_IN 0x10 +/* value for MI_REPORT_TARGET_PGS ext header */ +#define MI_EXT_HDR_PARAM_FMT 0x20 +/* values for maintenance out */ +#define MO_SET_IDENTIFYING_INFORMATION 0x06 +#define MO_SET_TARGET_PGS 0x0a +#define MO_CHANGE_ALIASES 0x0b +#define MO_SET_PRIORITY 0x0e +#define MO_SET_TIMESTAMP 0x0f +#define MO_MANAGEMENT_PROTOCOL_OUT 0x10 +/* values for variable length command */ +#define XDREAD_32 0x03 +#define XDWRITE_32 0x04 +#define XPWRITE_32 0x06 +#define XDWRITEREAD_32 0x07 +#define READ_32 0x09 +#define VERIFY_32 0x0a +#define WRITE_32 0x0b +#define WRITE_SAME_32 0x0d + +#define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f) +#define SERVICE_ACTION32(cdb) ((cdb[8] << 8) | cdb[9]) + +static const char * +scsi_trace_misc(struct trace_seq *, unsigned char *, int); + +static const char * +scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = p->buffer + p->len; + sector_t lba = 0, txlen = 0; + + lba |= ((cdb[1] & 0x1F) << 16); + lba |= (cdb[2] << 8); + lba |= cdb[3]; + txlen = cdb[4]; + + trace_seq_printf(p, "lba=%llu txlen=%llu", + (unsigned long long)lba, (unsigned long long)txlen); + trace_seq_putc(p, 0); + return ret; +} + +static const char * +scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = p->buffer + p->len; + sector_t lba = 0, txlen = 0; + + lba |= (cdb[2] << 24); + lba |= (cdb[3] << 16); + lba |= (cdb[4] << 8); + lba |= cdb[5]; + txlen |= (cdb[7] << 8); + txlen |= cdb[8]; + + trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u", + (unsigned long long)lba, (unsigned long long)txlen, + cdb[1] >> 5); + + if (cdb[0] == WRITE_SAME) + trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1); + + trace_seq_putc(p, 0); + return ret; +} + +static const char * +scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = p->buffer + p->len; + sector_t lba = 0, txlen = 0; + + lba |= (cdb[2] << 24); + lba |= (cdb[3] << 16); + lba |= (cdb[4] << 8); + lba |= cdb[5]; + txlen |= (cdb[6] << 24); + txlen |= (cdb[7] << 16); + txlen |= (cdb[8] << 8); + txlen |= cdb[9]; + + trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u", + (unsigned long long)lba, (unsigned long long)txlen, + cdb[1] >> 5); + trace_seq_putc(p, 0); + return ret; +} + +static const char * +scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = p->buffer + p->len; + sector_t lba = 0, txlen = 0; + + lba |= ((u64)cdb[2] << 56); + lba |= ((u64)cdb[3] << 48); + lba |= ((u64)cdb[4] << 40); + lba |= ((u64)cdb[5] << 32); + lba |= (cdb[6] << 24); + lba |= (cdb[7] << 16); + lba |= (cdb[8] << 8); + lba |= cdb[9]; + txlen |= (cdb[10] << 24); + txlen |= (cdb[11] << 16); + txlen |= (cdb[12] << 8); + txlen |= cdb[13]; + + trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u", + (unsigned long long)lba, (unsigned long long)txlen, + cdb[1] >> 5); + + if (cdb[0] == WRITE_SAME_16) + trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1); + + trace_seq_putc(p, 0); + return ret; +} + +static const char * +scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = p->buffer + p->len, *cmd; + sector_t lba = 0, txlen = 0; + u32 ei_lbrt = 0; + + switch (SERVICE_ACTION32(cdb)) { + case READ_32: + cmd = "READ"; + break; + case VERIFY_32: + cmd = "VERIFY"; + break; + case WRITE_32: + cmd = "WRITE"; + break; + case WRITE_SAME_32: + cmd = "WRITE_SAME"; + break; + default: + trace_seq_printf(p, "UNKNOWN"); + goto out; + } + + lba |= ((u64)cdb[12] << 56); + lba |= ((u64)cdb[13] << 48); + lba |= ((u64)cdb[14] << 40); + lba |= ((u64)cdb[15] << 32); + lba |= (cdb[16] << 24); + lba |= (cdb[17] << 16); + lba |= (cdb[18] << 8); + lba |= cdb[19]; + ei_lbrt |= (cdb[20] << 24); + ei_lbrt |= (cdb[21] << 16); + ei_lbrt |= (cdb[22] << 8); + ei_lbrt |= cdb[23]; + txlen |= (cdb[28] << 24); + txlen |= (cdb[29] << 16); + txlen |= (cdb[30] << 8); + txlen |= cdb[31]; + + trace_seq_printf(p, "%s_32 lba=%llu txlen=%llu protect=%u ei_lbrt=%u", + cmd, (unsigned long long)lba, + (unsigned long long)txlen, cdb[10] >> 5, ei_lbrt); + + if (SERVICE_ACTION32(cdb) == WRITE_SAME_32) + trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1); + +out: + trace_seq_putc(p, 0); + return ret; +} + +static const char * +scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = p->buffer + p->len; + unsigned int regions = cdb[7] << 8 | cdb[8]; + + trace_seq_printf(p, "regions=%u", (regions - 8) / 16); + trace_seq_putc(p, 0); + return ret; +} + +static const char * +scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = p->buffer + p->len, *cmd; + sector_t lba = 0; + u32 alloc_len = 0; + + switch (SERVICE_ACTION16(cdb)) { + case SAI_READ_CAPACITY_16: + cmd = "READ_CAPACITY_16"; + break; + case SAI_GET_LBA_STATUS: + cmd = "GET_LBA_STATUS"; + break; + default: + trace_seq_printf(p, "UNKNOWN"); + goto out; + } + + lba |= ((u64)cdb[2] << 56); + lba |= ((u64)cdb[3] << 48); + lba |= ((u64)cdb[4] << 40); + lba |= ((u64)cdb[5] << 32); + lba |= (cdb[6] << 24); + lba |= (cdb[7] << 16); + lba |= (cdb[8] << 8); + lba |= cdb[9]; + alloc_len |= (cdb[10] << 24); + alloc_len |= (cdb[11] << 16); + alloc_len |= (cdb[12] << 8); + alloc_len |= cdb[13]; + + trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd, + (unsigned long long)lba, alloc_len); + +out: + trace_seq_putc(p, 0); + return ret; +} + +static const char * +scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len) +{ + switch (SERVICE_ACTION32(cdb)) { + case READ_32: + case VERIFY_32: + case WRITE_32: + case WRITE_SAME_32: + return scsi_trace_rw32(p, cdb, len); + default: + return scsi_trace_misc(p, cdb, len); + } +} + +static const char * +scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len) +{ + const char *ret = p->buffer + p->len; + + trace_seq_printf(p, "-"); + trace_seq_putc(p, 0); + return ret; +} + +const char * +scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len) +{ + switch (cdb[0]) { + case READ_6: + case WRITE_6: + return scsi_trace_rw6(p, cdb, len); + case READ_10: + case VERIFY: + case WRITE_10: + case WRITE_SAME: + return scsi_trace_rw10(p, cdb, len); + case READ_12: + case VERIFY_12: + case WRITE_12: + return scsi_trace_rw12(p, cdb, len); + case READ_16: + case VERIFY_16: + case WRITE_16: + case WRITE_SAME_16: + return scsi_trace_rw16(p, cdb, len); + case UNMAP: + return scsi_trace_unmap(p, cdb, len); + case SERVICE_ACTION_IN: + return scsi_trace_service_action_in(p, cdb, len); + case VARIABLE_LENGTH_CMD: + return scsi_trace_varlen(p, cdb, len); + default: + return scsi_trace_misc(p, cdb, len); + } +} + +unsigned long long process_scsi_trace_parse_cdb(struct trace_seq *s, + unsigned long long *args) +{ + scsi_trace_parse_cdb(s, (unsigned char *) (unsigned long) args[1], args[2]); + return 0; +} + +int PEVENT_PLUGIN_LOADER(struct pevent *pevent) +{ + pevent_register_print_function(pevent, + process_scsi_trace_parse_cdb, + PEVENT_FUNC_ARG_STRING, + "scsi_trace_parse_cdb", + PEVENT_FUNC_ARG_PTR, + PEVENT_FUNC_ARG_PTR, + PEVENT_FUNC_ARG_INT, + PEVENT_FUNC_ARG_VOID); + return 0; +} + +void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent) +{ + pevent_unregister_print_function(pevent, process_scsi_trace_parse_cdb, + "scsi_trace_parse_cdb"); +} diff --git a/tools/lib/traceevent/plugin_xen.c b/tools/lib/traceevent/plugin_xen.c new file mode 100644 index 00000000000..3a413eaada6 --- /dev/null +++ b/tools/lib/traceevent/plugin_xen.c @@ -0,0 +1,136 @@ +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include "event-parse.h" + +#define __HYPERVISOR_set_trap_table 0 +#define __HYPERVISOR_mmu_update 1 +#define __HYPERVISOR_set_gdt 2 +#define __HYPERVISOR_stack_switch 3 +#define __HYPERVISOR_set_callbacks 4 +#define __HYPERVISOR_fpu_taskswitch 5 +#define __HYPERVISOR_sched_op_compat 6 +#define __HYPERVISOR_dom0_op 7 +#define __HYPERVISOR_set_debugreg 8 +#define __HYPERVISOR_get_debugreg 9 +#define __HYPERVISOR_update_descriptor 10 +#define __HYPERVISOR_memory_op 12 +#define __HYPERVISOR_multicall 13 +#define __HYPERVISOR_update_va_mapping 14 +#define __HYPERVISOR_set_timer_op 15 +#define __HYPERVISOR_event_channel_op_compat 16 +#define __HYPERVISOR_xen_version 17 +#define __HYPERVISOR_console_io 18 +#define __HYPERVISOR_physdev_op_compat 19 +#define __HYPERVISOR_grant_table_op 20 +#define __HYPERVISOR_vm_assist 21 +#define __HYPERVISOR_update_va_mapping_otherdomain 22 +#define __HYPERVISOR_iret 23 /* x86 only */ +#define __HYPERVISOR_vcpu_op 24 +#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */ +#define __HYPERVISOR_mmuext_op 26 +#define __HYPERVISOR_acm_op 27 +#define __HYPERVISOR_nmi_op 28 +#define __HYPERVISOR_sched_op 29 +#define __HYPERVISOR_callback_op 30 +#define __HYPERVISOR_xenoprof_op 31 +#define __HYPERVISOR_event_channel_op 32 +#define __HYPERVISOR_physdev_op 33 +#define __HYPERVISOR_hvm_op 34 +#define __HYPERVISOR_tmem_op 38 + +/* Architecture-specific hypercall definitions. */ +#define __HYPERVISOR_arch_0 48 +#define __HYPERVISOR_arch_1 49 +#define __HYPERVISOR_arch_2 50 +#define __HYPERVISOR_arch_3 51 +#define __HYPERVISOR_arch_4 52 +#define __HYPERVISOR_arch_5 53 +#define __HYPERVISOR_arch_6 54 +#define __HYPERVISOR_arch_7 55 + +#define N(x) [__HYPERVISOR_##x] = "("#x")" +static const char *xen_hypercall_names[] = { + N(set_trap_table), + N(mmu_update), + N(set_gdt), + N(stack_switch), + N(set_callbacks), + N(fpu_taskswitch), + N(sched_op_compat), + N(dom0_op), + N(set_debugreg), + N(get_debugreg), + N(update_descriptor), + N(memory_op), + N(multicall), + N(update_va_mapping), + N(set_timer_op), + N(event_channel_op_compat), + N(xen_version), + N(console_io), + N(physdev_op_compat), + N(grant_table_op), + N(vm_assist), + N(update_va_mapping_otherdomain), + N(iret), + N(vcpu_op), + N(set_segment_base), + N(mmuext_op), + N(acm_op), + N(nmi_op), + N(sched_op), + N(callback_op), + N(xenoprof_op), + N(event_channel_op), + N(physdev_op), + N(hvm_op), + +/* Architecture-specific hypercall definitions. */ + N(arch_0), + N(arch_1), + N(arch_2), + N(arch_3), + N(arch_4), + N(arch_5), + N(arch_6), + N(arch_7), +}; +#undef N + +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) + +static const char *xen_hypercall_name(unsigned op) +{ + if (op < ARRAY_SIZE(xen_hypercall_names) && + xen_hypercall_names[op] != NULL) + return xen_hypercall_names[op]; + + return ""; +} + +unsigned long long process_xen_hypercall_name(struct trace_seq *s, + unsigned long long *args) +{ + unsigned int op = args[0]; + + trace_seq_printf(s, "%s", xen_hypercall_name(op)); + return 0; +} + +int PEVENT_PLUGIN_LOADER(struct pevent *pevent) +{ + pevent_register_print_function(pevent, + process_xen_hypercall_name, + PEVENT_FUNC_ARG_STRING, + "xen_hypercall_name", + PEVENT_FUNC_ARG_INT, + PEVENT_FUNC_ARG_VOID); + return 0; +} + +void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent) +{ + pevent_unregister_print_function(pevent, process_xen_hypercall_name, + "xen_hypercall_name"); +} diff --git a/tools/lib/traceevent/trace-seq.c b/tools/lib/traceevent/trace-seq.c index b1ccc923e8a..ec3bd16a548 100644 --- a/tools/lib/traceevent/trace-seq.c +++ b/tools/lib/traceevent/trace-seq.c @@ -13,8 +13,7 @@ * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * License along with this program; if not, see <http://www.gnu.org/licenses> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ @@ -23,6 +22,7 @@ #include <string.h> #include <stdarg.h> +#include <asm/bug.h> #include "event-parse.h" #include "event-utils.h" @@ -33,10 +33,21 @@ #define TRACE_SEQ_POISON ((void *)0xdeadbeef) #define TRACE_SEQ_CHECK(s) \ do { \ - if ((s)->buffer == TRACE_SEQ_POISON) \ - die("Usage of trace_seq after it was destroyed"); \ + if (WARN_ONCE((s)->buffer == TRACE_SEQ_POISON, \ + "Usage of trace_seq after it was destroyed")) \ + (s)->state = TRACE_SEQ__BUFFER_POISONED; \ } while (0) +#define TRACE_SEQ_CHECK_RET_N(s, n) \ +do { \ + TRACE_SEQ_CHECK(s); \ + if ((s)->state != TRACE_SEQ__GOOD) \ + return n; \ +} while (0) + +#define TRACE_SEQ_CHECK_RET(s) TRACE_SEQ_CHECK_RET_N(s, ) +#define TRACE_SEQ_CHECK_RET0(s) TRACE_SEQ_CHECK_RET_N(s, 0) + /** * trace_seq_init - initialize the trace_seq structure * @s: a pointer to the trace_seq structure to initialize @@ -46,7 +57,24 @@ void trace_seq_init(struct trace_seq *s) s->len = 0; s->readpos = 0; s->buffer_size = TRACE_SEQ_BUF_SIZE; - s->buffer = malloc_or_die(s->buffer_size); + s->buffer = malloc(s->buffer_size); + if (s->buffer != NULL) + s->state = TRACE_SEQ__GOOD; + else + s->state = TRACE_SEQ__MEM_ALLOC_FAILED; +} + +/** + * trace_seq_reset - re-initialize the trace_seq structure + * @s: a pointer to the trace_seq structure to reset + */ +void trace_seq_reset(struct trace_seq *s) +{ + if (!s) + return; + TRACE_SEQ_CHECK(s); + s->len = 0; + s->readpos = 0; } /** @@ -59,17 +87,23 @@ void trace_seq_destroy(struct trace_seq *s) { if (!s) return; - TRACE_SEQ_CHECK(s); + TRACE_SEQ_CHECK_RET(s); free(s->buffer); s->buffer = TRACE_SEQ_POISON; } static void expand_buffer(struct trace_seq *s) { + char *buf; + + buf = realloc(s->buffer, s->buffer_size + TRACE_SEQ_BUF_SIZE); + if (WARN_ONCE(!buf, "Can't allocate trace_seq buffer memory")) { + s->state = TRACE_SEQ__MEM_ALLOC_FAILED; + return; + } + + s->buffer = buf; s->buffer_size += TRACE_SEQ_BUF_SIZE; - s->buffer = realloc(s->buffer, s->buffer_size); - if (!s->buffer) - die("Can't allocate trace_seq buffer memory"); } /** @@ -93,9 +127,9 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...) int len; int ret; - TRACE_SEQ_CHECK(s); - try_again: + TRACE_SEQ_CHECK_RET0(s); + len = (s->buffer_size - 1) - s->len; va_start(ap, fmt); @@ -129,9 +163,9 @@ trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) int len; int ret; - TRACE_SEQ_CHECK(s); - try_again: + TRACE_SEQ_CHECK_RET0(s); + len = (s->buffer_size - 1) - s->len; ret = vsnprintf(s->buffer + s->len, len, fmt, args); @@ -160,13 +194,15 @@ int trace_seq_puts(struct trace_seq *s, const char *str) { int len; - TRACE_SEQ_CHECK(s); + TRACE_SEQ_CHECK_RET0(s); len = strlen(str); while (len > ((s->buffer_size - 1) - s->len)) expand_buffer(s); + TRACE_SEQ_CHECK_RET0(s); + memcpy(s->buffer + s->len, str, len); s->len += len; @@ -175,11 +211,13 @@ int trace_seq_puts(struct trace_seq *s, const char *str) int trace_seq_putc(struct trace_seq *s, unsigned char c) { - TRACE_SEQ_CHECK(s); + TRACE_SEQ_CHECK_RET0(s); while (s->len >= (s->buffer_size - 1)) expand_buffer(s); + TRACE_SEQ_CHECK_RET0(s); + s->buffer[s->len++] = c; return 1; @@ -187,7 +225,7 @@ int trace_seq_putc(struct trace_seq *s, unsigned char c) void trace_seq_terminate(struct trace_seq *s) { - TRACE_SEQ_CHECK(s); + TRACE_SEQ_CHECK_RET(s); /* There's always one character left on the buffer */ s->buffer[s->len] = 0; @@ -196,5 +234,16 @@ void trace_seq_terminate(struct trace_seq *s) int trace_seq_do_printf(struct trace_seq *s) { TRACE_SEQ_CHECK(s); - return printf("%.*s", s->len, s->buffer); + + switch (s->state) { + case TRACE_SEQ__GOOD: + return printf("%.*s", s->len, s->buffer); + case TRACE_SEQ__BUFFER_POISONED: + puts("Usage of trace_seq after it was destroyed"); + break; + case TRACE_SEQ__MEM_ALLOC_FAILED: + puts("Can't allocate trace_seq buffer memory"); + break; + } + return -1; } diff --git a/tools/net/Makefile b/tools/net/Makefile new file mode 100644 index 00000000000..ee577ea03ba --- /dev/null +++ b/tools/net/Makefile @@ -0,0 +1,34 @@ +prefix = /usr + +CC = gcc +LEX = flex +YACC = bison + +%.yacc.c: %.y + $(YACC) -o $@ -d $< + +%.lex.c: %.l + $(LEX) -o $@ $< + +all : bpf_jit_disasm bpf_dbg bpf_asm + +bpf_jit_disasm : CFLAGS = -Wall -O2 -DPACKAGE='bpf_jit_disasm' +bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl +bpf_jit_disasm : bpf_jit_disasm.o + +bpf_dbg : CFLAGS = -Wall -O2 +bpf_dbg : LDLIBS = -lreadline +bpf_dbg : bpf_dbg.o + +bpf_asm : CFLAGS = -Wall -O2 -I. +bpf_asm : LDLIBS = +bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o +bpf_exp.lex.o : bpf_exp.yacc.c + +clean : + rm -rf *.o bpf_jit_disasm bpf_dbg bpf_asm bpf_exp.yacc.* bpf_exp.lex.* + +install : + install bpf_jit_disasm $(prefix)/bin/bpf_jit_disasm + install bpf_dbg $(prefix)/bin/bpf_dbg + install bpf_asm $(prefix)/bin/bpf_asm diff --git a/tools/net/bpf_asm.c b/tools/net/bpf_asm.c new file mode 100644 index 00000000000..c15aef097b0 --- /dev/null +++ b/tools/net/bpf_asm.c @@ -0,0 +1,52 @@ +/* + * Minimal BPF assembler + * + * Instead of libpcap high-level filter expressions, it can be quite + * useful to define filters in low-level BPF assembler (that is kept + * close to Steven McCanne and Van Jacobson's original BPF paper). + * In particular for BPF JIT implementors, JIT security auditors, or + * just for defining BPF expressions that contain extensions which are + * not supported by compilers. + * + * How to get into it: + * + * 1) read Documentation/networking/filter.txt + * 2) Run `bpf_asm [-c] <filter-prog file>` to translate into binary + * blob that is loadable with xt_bpf, cls_bpf et al. Note: -c will + * pretty print a C-like construct. + * + * Copyright 2013 Daniel Borkmann <borkmann@redhat.com> + * Licensed under the GNU General Public License, version 2.0 (GPLv2) + */ + +#include <stdbool.h> +#include <stdio.h> +#include <string.h> + +extern void bpf_asm_compile(FILE *fp, bool cstyle); + +int main(int argc, char **argv) +{ + FILE *fp = stdin; + bool cstyle = false; + int i; + + for (i = 1; i < argc; i++) { + if (!strncmp("-c", argv[i], 2)) { + cstyle = true; + continue; + } + + fp = fopen(argv[i], "r"); + if (!fp) { + fp = stdin; + continue; + } + + break; + } + + bpf_asm_compile(fp, cstyle); + + return 0; +} diff --git a/tools/net/bpf_dbg.c b/tools/net/bpf_dbg.c new file mode 100644 index 00000000000..9a287bec695 --- /dev/null +++ b/tools/net/bpf_dbg.c @@ -0,0 +1,1395 @@ +/* + * Minimal BPF debugger + * + * Minimal BPF debugger that mimics the kernel's engine (w/o extensions) + * and allows for single stepping through selected packets from a pcap + * with a provided user filter in order to facilitate verification of a + * BPF program. Besides others, this is useful to verify BPF programs + * before attaching to a live system, and can be used in socket filters, + * cls_bpf, xt_bpf, team driver and e.g. PTP code; in particular when a + * single more complex BPF program is being used. Reasons for a more + * complex BPF program are likely primarily to optimize execution time + * for making a verdict when multiple simple BPF programs are combined + * into one in order to prevent parsing same headers multiple times. + * + * More on how to debug BPF opcodes see Documentation/networking/filter.txt + * which is the main document on BPF. Mini howto for getting started: + * + * 1) `./bpf_dbg` to enter the shell (shell cmds denoted with '>'): + * 2) > load bpf 6,40 0 0 12,21 0 3 20... (output from `bpf_asm` or + * `tcpdump -iem1 -ddd port 22 | tr '\n' ','` to load as filter) + * 3) > load pcap foo.pcap + * 4) > run <n>/disassemble/dump/quit (self-explanatory) + * 5) > breakpoint 2 (sets bp at loaded BPF insns 2, do `run` then; + * multiple bps can be set, of course, a call to `breakpoint` + * w/o args shows currently loaded bps, `breakpoint reset` for + * resetting all breakpoints) + * 6) > select 3 (`run` etc will start from the 3rd packet in the pcap) + * 7) > step [-<n>, +<n>] (performs single stepping through the BPF) + * + * Copyright 2013 Daniel Borkmann <borkmann@redhat.com> + * Licensed under the GNU General Public License, version 2.0 (GPLv2) + */ + +#include <stdio.h> +#include <unistd.h> +#include <stdlib.h> +#include <ctype.h> +#include <stdbool.h> +#include <stdarg.h> +#include <setjmp.h> +#include <linux/filter.h> +#include <linux/if_packet.h> +#include <readline/readline.h> +#include <readline/history.h> +#include <sys/types.h> +#include <sys/socket.h> +#include <sys/stat.h> +#include <sys/mman.h> +#include <fcntl.h> +#include <errno.h> +#include <signal.h> +#include <arpa/inet.h> +#include <net/ethernet.h> + +#define TCPDUMP_MAGIC 0xa1b2c3d4 + +#define BPF_LDX_B (BPF_LDX | BPF_B) +#define BPF_LDX_W (BPF_LDX | BPF_W) +#define BPF_JMP_JA (BPF_JMP | BPF_JA) +#define BPF_JMP_JEQ (BPF_JMP | BPF_JEQ) +#define BPF_JMP_JGT (BPF_JMP | BPF_JGT) +#define BPF_JMP_JGE (BPF_JMP | BPF_JGE) +#define BPF_JMP_JSET (BPF_JMP | BPF_JSET) +#define BPF_ALU_ADD (BPF_ALU | BPF_ADD) +#define BPF_ALU_SUB (BPF_ALU | BPF_SUB) +#define BPF_ALU_MUL (BPF_ALU | BPF_MUL) +#define BPF_ALU_DIV (BPF_ALU | BPF_DIV) +#define BPF_ALU_MOD (BPF_ALU | BPF_MOD) +#define BPF_ALU_NEG (BPF_ALU | BPF_NEG) +#define BPF_ALU_AND (BPF_ALU | BPF_AND) +#define BPF_ALU_OR (BPF_ALU | BPF_OR) +#define BPF_ALU_XOR (BPF_ALU | BPF_XOR) +#define BPF_ALU_LSH (BPF_ALU | BPF_LSH) +#define BPF_ALU_RSH (BPF_ALU | BPF_RSH) +#define BPF_MISC_TAX (BPF_MISC | BPF_TAX) +#define BPF_MISC_TXA (BPF_MISC | BPF_TXA) +#define BPF_LD_B (BPF_LD | BPF_B) +#define BPF_LD_H (BPF_LD | BPF_H) +#define BPF_LD_W (BPF_LD | BPF_W) + +#ifndef array_size +# define array_size(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#ifndef __check_format_printf +# define __check_format_printf(pos_fmtstr, pos_fmtargs) \ + __attribute__ ((format (printf, (pos_fmtstr), (pos_fmtargs)))) +#endif + +enum { + CMD_OK, + CMD_ERR, + CMD_EX, +}; + +struct shell_cmd { + const char *name; + int (*func)(char *args); +}; + +struct pcap_filehdr { + uint32_t magic; + uint16_t version_major; + uint16_t version_minor; + int32_t thiszone; + uint32_t sigfigs; + uint32_t snaplen; + uint32_t linktype; +}; + +struct pcap_timeval { + int32_t tv_sec; + int32_t tv_usec; +}; + +struct pcap_pkthdr { + struct pcap_timeval ts; + uint32_t caplen; + uint32_t len; +}; + +struct bpf_regs { + uint32_t A; + uint32_t X; + uint32_t M[BPF_MEMWORDS]; + uint32_t R; + bool Rs; + uint16_t Pc; +}; + +static struct sock_filter bpf_image[BPF_MAXINSNS + 1]; +static unsigned int bpf_prog_len = 0; + +static int bpf_breakpoints[64]; +static struct bpf_regs bpf_regs[BPF_MAXINSNS + 1]; +static struct bpf_regs bpf_curr; +static unsigned int bpf_regs_len = 0; + +static int pcap_fd = -1; +static unsigned int pcap_packet = 0; +static size_t pcap_map_size = 0; +static char *pcap_ptr_va_start, *pcap_ptr_va_curr; + +static const char * const op_table[] = { + [BPF_ST] = "st", + [BPF_STX] = "stx", + [BPF_LD_B] = "ldb", + [BPF_LD_H] = "ldh", + [BPF_LD_W] = "ld", + [BPF_LDX] = "ldx", + [BPF_LDX_B] = "ldxb", + [BPF_JMP_JA] = "ja", + [BPF_JMP_JEQ] = "jeq", + [BPF_JMP_JGT] = "jgt", + [BPF_JMP_JGE] = "jge", + [BPF_JMP_JSET] = "jset", + [BPF_ALU_ADD] = "add", + [BPF_ALU_SUB] = "sub", + [BPF_ALU_MUL] = "mul", + [BPF_ALU_DIV] = "div", + [BPF_ALU_MOD] = "mod", + [BPF_ALU_NEG] = "neg", + [BPF_ALU_AND] = "and", + [BPF_ALU_OR] = "or", + [BPF_ALU_XOR] = "xor", + [BPF_ALU_LSH] = "lsh", + [BPF_ALU_RSH] = "rsh", + [BPF_MISC_TAX] = "tax", + [BPF_MISC_TXA] = "txa", + [BPF_RET] = "ret", +}; + +static __check_format_printf(1, 2) int rl_printf(const char *fmt, ...) +{ + int ret; + va_list vl; + + va_start(vl, fmt); + ret = vfprintf(rl_outstream, fmt, vl); + va_end(vl); + + return ret; +} + +static int matches(const char *cmd, const char *pattern) +{ + int len = strlen(cmd); + + if (len > strlen(pattern)) + return -1; + + return memcmp(pattern, cmd, len); +} + +static void hex_dump(const uint8_t *buf, size_t len) +{ + int i; + + rl_printf("%3u: ", 0); + for (i = 0; i < len; i++) { + if (i && !(i % 16)) + rl_printf("\n%3u: ", i); + rl_printf("%02x ", buf[i]); + } + rl_printf("\n"); +} + +static bool bpf_prog_loaded(void) +{ + if (bpf_prog_len == 0) + rl_printf("no bpf program loaded!\n"); + + return bpf_prog_len > 0; +} + +static void bpf_disasm(const struct sock_filter f, unsigned int i) +{ + const char *op, *fmt; + int val = f.k; + char buf[256]; + + switch (f.code) { + case BPF_RET | BPF_K: + op = op_table[BPF_RET]; + fmt = "#%#x"; + break; + case BPF_RET | BPF_A: + op = op_table[BPF_RET]; + fmt = "a"; + break; + case BPF_RET | BPF_X: + op = op_table[BPF_RET]; + fmt = "x"; + break; + case BPF_MISC_TAX: + op = op_table[BPF_MISC_TAX]; + fmt = ""; + break; + case BPF_MISC_TXA: + op = op_table[BPF_MISC_TXA]; + fmt = ""; + break; + case BPF_ST: + op = op_table[BPF_ST]; + fmt = "M[%d]"; + break; + case BPF_STX: + op = op_table[BPF_STX]; + fmt = "M[%d]"; + break; + case BPF_LD_W | BPF_ABS: + op = op_table[BPF_LD_W]; + fmt = "[%d]"; + break; + case BPF_LD_H | BPF_ABS: + op = op_table[BPF_LD_H]; + fmt = "[%d]"; + break; + case BPF_LD_B | BPF_ABS: + op = op_table[BPF_LD_B]; + fmt = "[%d]"; + break; + case BPF_LD_W | BPF_LEN: + op = op_table[BPF_LD_W]; + fmt = "#len"; + break; + case BPF_LD_W | BPF_IND: + op = op_table[BPF_LD_W]; + fmt = "[x+%d]"; + break; + case BPF_LD_H | BPF_IND: + op = op_table[BPF_LD_H]; + fmt = "[x+%d]"; + break; + case BPF_LD_B | BPF_IND: + op = op_table[BPF_LD_B]; + fmt = "[x+%d]"; + break; + case BPF_LD | BPF_IMM: + op = op_table[BPF_LD_W]; + fmt = "#%#x"; + break; + case BPF_LDX | BPF_IMM: + op = op_table[BPF_LDX]; + fmt = "#%#x"; + break; + case BPF_LDX_B | BPF_MSH: + op = op_table[BPF_LDX_B]; + fmt = "4*([%d]&0xf)"; + break; + case BPF_LD | BPF_MEM: + op = op_table[BPF_LD_W]; + fmt = "M[%d]"; + break; + case BPF_LDX | BPF_MEM: + op = op_table[BPF_LDX]; + fmt = "M[%d]"; + break; + case BPF_JMP_JA: + op = op_table[BPF_JMP_JA]; + fmt = "%d"; + val = i + 1 + f.k; + break; + case BPF_JMP_JGT | BPF_X: + op = op_table[BPF_JMP_JGT]; + fmt = "x"; + break; + case BPF_JMP_JGT | BPF_K: + op = op_table[BPF_JMP_JGT]; + fmt = "#%#x"; + break; + case BPF_JMP_JGE | BPF_X: + op = op_table[BPF_JMP_JGE]; + fmt = "x"; + break; + case BPF_JMP_JGE | BPF_K: + op = op_table[BPF_JMP_JGE]; + fmt = "#%#x"; + break; + case BPF_JMP_JEQ | BPF_X: + op = op_table[BPF_JMP_JEQ]; + fmt = "x"; + break; + case BPF_JMP_JEQ | BPF_K: + op = op_table[BPF_JMP_JEQ]; + fmt = "#%#x"; + break; + case BPF_JMP_JSET | BPF_X: + op = op_table[BPF_JMP_JSET]; + fmt = "x"; + break; + case BPF_JMP_JSET | BPF_K: + op = op_table[BPF_JMP_JSET]; + fmt = "#%#x"; + break; + case BPF_ALU_NEG: + op = op_table[BPF_ALU_NEG]; + fmt = ""; + break; + case BPF_ALU_LSH | BPF_X: + op = op_table[BPF_ALU_LSH]; + fmt = "x"; + break; + case BPF_ALU_LSH | BPF_K: + op = op_table[BPF_ALU_LSH]; + fmt = "#%d"; + break; + case BPF_ALU_RSH | BPF_X: + op = op_table[BPF_ALU_RSH]; + fmt = "x"; + break; + case BPF_ALU_RSH | BPF_K: + op = op_table[BPF_ALU_RSH]; + fmt = "#%d"; + break; + case BPF_ALU_ADD | BPF_X: + op = op_table[BPF_ALU_ADD]; + fmt = "x"; + break; + case BPF_ALU_ADD | BPF_K: + op = op_table[BPF_ALU_ADD]; + fmt = "#%d"; + break; + case BPF_ALU_SUB | BPF_X: + op = op_table[BPF_ALU_SUB]; + fmt = "x"; + break; + case BPF_ALU_SUB | BPF_K: + op = op_table[BPF_ALU_SUB]; + fmt = "#%d"; + break; + case BPF_ALU_MUL | BPF_X: + op = op_table[BPF_ALU_MUL]; + fmt = "x"; + break; + case BPF_ALU_MUL | BPF_K: + op = op_table[BPF_ALU_MUL]; + fmt = "#%d"; + break; + case BPF_ALU_DIV | BPF_X: + op = op_table[BPF_ALU_DIV]; + fmt = "x"; + break; + case BPF_ALU_DIV | BPF_K: + op = op_table[BPF_ALU_DIV]; + fmt = "#%d"; + break; + case BPF_ALU_MOD | BPF_X: + op = op_table[BPF_ALU_MOD]; + fmt = "x"; + break; + case BPF_ALU_MOD | BPF_K: + op = op_table[BPF_ALU_MOD]; + fmt = "#%d"; + break; + case BPF_ALU_AND | BPF_X: + op = op_table[BPF_ALU_AND]; + fmt = "x"; + break; + case BPF_ALU_AND | BPF_K: + op = op_table[BPF_ALU_AND]; + fmt = "#%#x"; + break; + case BPF_ALU_OR | BPF_X: + op = op_table[BPF_ALU_OR]; + fmt = "x"; + break; + case BPF_ALU_OR | BPF_K: + op = op_table[BPF_ALU_OR]; + fmt = "#%#x"; + break; + case BPF_ALU_XOR | BPF_X: + op = op_table[BPF_ALU_XOR]; + fmt = "x"; + break; + case BPF_ALU_XOR | BPF_K: + op = op_table[BPF_ALU_XOR]; + fmt = "#%#x"; + break; + default: + op = "nosup"; + fmt = "%#x"; + val = f.code; + break; + } + + memset(buf, 0, sizeof(buf)); + snprintf(buf, sizeof(buf), fmt, val); + buf[sizeof(buf) - 1] = 0; + + if ((BPF_CLASS(f.code) == BPF_JMP && BPF_OP(f.code) != BPF_JA)) + rl_printf("l%d:\t%s %s, l%d, l%d\n", i, op, buf, + i + 1 + f.jt, i + 1 + f.jf); + else + rl_printf("l%d:\t%s %s\n", i, op, buf); +} + +static void bpf_dump_curr(struct bpf_regs *r, struct sock_filter *f) +{ + int i, m = 0; + + rl_printf("pc: [%u]\n", r->Pc); + rl_printf("code: [%u] jt[%u] jf[%u] k[%u]\n", + f->code, f->jt, f->jf, f->k); + rl_printf("curr: "); + bpf_disasm(*f, r->Pc); + + if (f->jt || f->jf) { + rl_printf("jt: "); + bpf_disasm(*(f + f->jt + 1), r->Pc + f->jt + 1); + rl_printf("jf: "); + bpf_disasm(*(f + f->jf + 1), r->Pc + f->jf + 1); + } + + rl_printf("A: [%#08x][%u]\n", r->A, r->A); + rl_printf("X: [%#08x][%u]\n", r->X, r->X); + if (r->Rs) + rl_printf("ret: [%#08x][%u]!\n", r->R, r->R); + + for (i = 0; i < BPF_MEMWORDS; i++) { + if (r->M[i]) { + m++; + rl_printf("M[%d]: [%#08x][%u]\n", i, r->M[i], r->M[i]); + } + } + if (m == 0) + rl_printf("M[0,%d]: [%#08x][%u]\n", BPF_MEMWORDS - 1, 0, 0); +} + +static void bpf_dump_pkt(uint8_t *pkt, uint32_t pkt_caplen, uint32_t pkt_len) +{ + if (pkt_caplen != pkt_len) + rl_printf("cap: %u, len: %u\n", pkt_caplen, pkt_len); + else + rl_printf("len: %u\n", pkt_len); + + hex_dump(pkt, pkt_caplen); +} + +static void bpf_disasm_all(const struct sock_filter *f, unsigned int len) +{ + unsigned int i; + + for (i = 0; i < len; i++) + bpf_disasm(f[i], i); +} + +static void bpf_dump_all(const struct sock_filter *f, unsigned int len) +{ + unsigned int i; + + rl_printf("/* { op, jt, jf, k }, */\n"); + for (i = 0; i < len; i++) + rl_printf("{ %#04x, %2u, %2u, %#010x },\n", + f[i].code, f[i].jt, f[i].jf, f[i].k); +} + +static bool bpf_runnable(struct sock_filter *f, unsigned int len) +{ + int sock, ret, i; + struct sock_fprog bpf = { + .filter = f, + .len = len, + }; + + sock = socket(AF_INET, SOCK_DGRAM, 0); + if (sock < 0) { + rl_printf("cannot open socket!\n"); + return false; + } + ret = setsockopt(sock, SOL_SOCKET, SO_ATTACH_FILTER, &bpf, sizeof(bpf)); + close(sock); + if (ret < 0) { + rl_printf("program not allowed to run by kernel!\n"); + return false; + } + for (i = 0; i < len; i++) { + if (BPF_CLASS(f[i].code) == BPF_LD && + f[i].k > SKF_AD_OFF) { + rl_printf("extensions currently not supported!\n"); + return false; + } + } + + return true; +} + +static void bpf_reset_breakpoints(void) +{ + int i; + + for (i = 0; i < array_size(bpf_breakpoints); i++) + bpf_breakpoints[i] = -1; +} + +static void bpf_set_breakpoints(unsigned int where) +{ + int i; + bool set = false; + + for (i = 0; i < array_size(bpf_breakpoints); i++) { + if (bpf_breakpoints[i] == (int) where) { + rl_printf("breakpoint already set!\n"); + set = true; + break; + } + + if (bpf_breakpoints[i] == -1 && set == false) { + bpf_breakpoints[i] = where; + set = true; + } + } + + if (!set) + rl_printf("too many breakpoints set, reset first!\n"); +} + +static void bpf_dump_breakpoints(void) +{ + int i; + + rl_printf("breakpoints: "); + + for (i = 0; i < array_size(bpf_breakpoints); i++) { + if (bpf_breakpoints[i] < 0) + continue; + rl_printf("%d ", bpf_breakpoints[i]); + } + + rl_printf("\n"); +} + +static void bpf_reset(void) +{ + bpf_regs_len = 0; + + memset(bpf_regs, 0, sizeof(bpf_regs)); + memset(&bpf_curr, 0, sizeof(bpf_curr)); +} + +static void bpf_safe_regs(void) +{ + memcpy(&bpf_regs[bpf_regs_len++], &bpf_curr, sizeof(bpf_curr)); +} + +static bool bpf_restore_regs(int off) +{ + unsigned int index = bpf_regs_len - 1 + off; + + if (index == 0) { + bpf_reset(); + return true; + } else if (index < bpf_regs_len) { + memcpy(&bpf_curr, &bpf_regs[index], sizeof(bpf_curr)); + bpf_regs_len = index; + return true; + } else { + rl_printf("reached bottom of register history stack!\n"); + return false; + } +} + +static uint32_t extract_u32(uint8_t *pkt, uint32_t off) +{ + uint32_t r; + + memcpy(&r, &pkt[off], sizeof(r)); + + return ntohl(r); +} + +static uint16_t extract_u16(uint8_t *pkt, uint32_t off) +{ + uint16_t r; + + memcpy(&r, &pkt[off], sizeof(r)); + + return ntohs(r); +} + +static uint8_t extract_u8(uint8_t *pkt, uint32_t off) +{ + return pkt[off]; +} + +static void set_return(struct bpf_regs *r) +{ + r->R = 0; + r->Rs = true; +} + +static void bpf_single_step(struct bpf_regs *r, struct sock_filter *f, + uint8_t *pkt, uint32_t pkt_caplen, + uint32_t pkt_len) +{ + uint32_t K = f->k; + int d; + + switch (f->code) { + case BPF_RET | BPF_K: + r->R = K; + r->Rs = true; + break; + case BPF_RET | BPF_A: + r->R = r->A; + r->Rs = true; + break; + case BPF_RET | BPF_X: + r->R = r->X; + r->Rs = true; + break; + case BPF_MISC_TAX: + r->X = r->A; + break; + case BPF_MISC_TXA: + r->A = r->X; + break; + case BPF_ST: + r->M[K] = r->A; + break; + case BPF_STX: + r->M[K] = r->X; + break; + case BPF_LD_W | BPF_ABS: + d = pkt_caplen - K; + if (d >= sizeof(uint32_t)) + r->A = extract_u32(pkt, K); + else + set_return(r); + break; + case BPF_LD_H | BPF_ABS: + d = pkt_caplen - K; + if (d >= sizeof(uint16_t)) + r->A = extract_u16(pkt, K); + else + set_return(r); + break; + case BPF_LD_B | BPF_ABS: + d = pkt_caplen - K; + if (d >= sizeof(uint8_t)) + r->A = extract_u8(pkt, K); + else + set_return(r); + break; + case BPF_LD_W | BPF_IND: + d = pkt_caplen - (r->X + K); + if (d >= sizeof(uint32_t)) + r->A = extract_u32(pkt, r->X + K); + break; + case BPF_LD_H | BPF_IND: + d = pkt_caplen - (r->X + K); + if (d >= sizeof(uint16_t)) + r->A = extract_u16(pkt, r->X + K); + else + set_return(r); + break; + case BPF_LD_B | BPF_IND: + d = pkt_caplen - (r->X + K); + if (d >= sizeof(uint8_t)) + r->A = extract_u8(pkt, r->X + K); + else + set_return(r); + break; + case BPF_LDX_B | BPF_MSH: + d = pkt_caplen - K; + if (d >= sizeof(uint8_t)) { + r->X = extract_u8(pkt, K); + r->X = (r->X & 0xf) << 2; + } else + set_return(r); + break; + case BPF_LD_W | BPF_LEN: + r->A = pkt_len; + break; + case BPF_LDX_W | BPF_LEN: + r->A = pkt_len; + break; + case BPF_LD | BPF_IMM: + r->A = K; + break; + case BPF_LDX | BPF_IMM: + r->X = K; + break; + case BPF_LD | BPF_MEM: + r->A = r->M[K]; + break; + case BPF_LDX | BPF_MEM: + r->X = r->M[K]; + break; + case BPF_JMP_JA: + r->Pc += K; + break; + case BPF_JMP_JGT | BPF_X: + r->Pc += r->A > r->X ? f->jt : f->jf; + break; + case BPF_JMP_JGT | BPF_K: + r->Pc += r->A > K ? f->jt : f->jf; + break; + case BPF_JMP_JGE | BPF_X: + r->Pc += r->A >= r->X ? f->jt : f->jf; + break; + case BPF_JMP_JGE | BPF_K: + r->Pc += r->A >= K ? f->jt : f->jf; + break; + case BPF_JMP_JEQ | BPF_X: + r->Pc += r->A == r->X ? f->jt : f->jf; + break; + case BPF_JMP_JEQ | BPF_K: + r->Pc += r->A == K ? f->jt : f->jf; + break; + case BPF_JMP_JSET | BPF_X: + r->Pc += r->A & r->X ? f->jt : f->jf; + break; + case BPF_JMP_JSET | BPF_K: + r->Pc += r->A & K ? f->jt : f->jf; + break; + case BPF_ALU_NEG: + r->A = -r->A; + break; + case BPF_ALU_LSH | BPF_X: + r->A <<= r->X; + break; + case BPF_ALU_LSH | BPF_K: + r->A <<= K; + break; + case BPF_ALU_RSH | BPF_X: + r->A >>= r->X; + break; + case BPF_ALU_RSH | BPF_K: + r->A >>= K; + break; + case BPF_ALU_ADD | BPF_X: + r->A += r->X; + break; + case BPF_ALU_ADD | BPF_K: + r->A += K; + break; + case BPF_ALU_SUB | BPF_X: + r->A -= r->X; + break; + case BPF_ALU_SUB | BPF_K: + r->A -= K; + break; + case BPF_ALU_MUL | BPF_X: + r->A *= r->X; + break; + case BPF_ALU_MUL | BPF_K: + r->A *= K; + break; + case BPF_ALU_DIV | BPF_X: + case BPF_ALU_MOD | BPF_X: + if (r->X == 0) { + set_return(r); + break; + } + goto do_div; + case BPF_ALU_DIV | BPF_K: + case BPF_ALU_MOD | BPF_K: + if (K == 0) { + set_return(r); + break; + } +do_div: + switch (f->code) { + case BPF_ALU_DIV | BPF_X: + r->A /= r->X; + break; + case BPF_ALU_DIV | BPF_K: + r->A /= K; + break; + case BPF_ALU_MOD | BPF_X: + r->A %= r->X; + break; + case BPF_ALU_MOD | BPF_K: + r->A %= K; + break; + } + break; + case BPF_ALU_AND | BPF_X: + r->A &= r->X; + break; + case BPF_ALU_AND | BPF_K: + r->A &= K; + break; + case BPF_ALU_OR | BPF_X: + r->A |= r->X; + break; + case BPF_ALU_OR | BPF_K: + r->A |= K; + break; + case BPF_ALU_XOR | BPF_X: + r->A ^= r->X; + break; + case BPF_ALU_XOR | BPF_K: + r->A ^= K; + break; + } +} + +static bool bpf_pc_has_breakpoint(uint16_t pc) +{ + int i; + + for (i = 0; i < array_size(bpf_breakpoints); i++) { + if (bpf_breakpoints[i] < 0) + continue; + if (bpf_breakpoints[i] == pc) + return true; + } + + return false; +} + +static bool bpf_handle_breakpoint(struct bpf_regs *r, struct sock_filter *f, + uint8_t *pkt, uint32_t pkt_caplen, + uint32_t pkt_len) +{ + rl_printf("-- register dump --\n"); + bpf_dump_curr(r, &f[r->Pc]); + rl_printf("-- packet dump --\n"); + bpf_dump_pkt(pkt, pkt_caplen, pkt_len); + rl_printf("(breakpoint)\n"); + return true; +} + +static int bpf_run_all(struct sock_filter *f, uint16_t bpf_len, uint8_t *pkt, + uint32_t pkt_caplen, uint32_t pkt_len) +{ + bool stop = false; + + while (bpf_curr.Rs == false && stop == false) { + bpf_safe_regs(); + + if (bpf_pc_has_breakpoint(bpf_curr.Pc)) + stop = bpf_handle_breakpoint(&bpf_curr, f, pkt, + pkt_caplen, pkt_len); + + bpf_single_step(&bpf_curr, &f[bpf_curr.Pc], pkt, pkt_caplen, + pkt_len); + bpf_curr.Pc++; + } + + return stop ? -1 : bpf_curr.R; +} + +static int bpf_run_stepping(struct sock_filter *f, uint16_t bpf_len, + uint8_t *pkt, uint32_t pkt_caplen, + uint32_t pkt_len, int next) +{ + bool stop = false; + int i = 1; + + while (bpf_curr.Rs == false && stop == false) { + bpf_safe_regs(); + + if (i++ == next) + stop = bpf_handle_breakpoint(&bpf_curr, f, pkt, + pkt_caplen, pkt_len); + + bpf_single_step(&bpf_curr, &f[bpf_curr.Pc], pkt, pkt_caplen, + pkt_len); + bpf_curr.Pc++; + } + + return stop ? -1 : bpf_curr.R; +} + +static bool pcap_loaded(void) +{ + if (pcap_fd < 0) + rl_printf("no pcap file loaded!\n"); + + return pcap_fd >= 0; +} + +static struct pcap_pkthdr *pcap_curr_pkt(void) +{ + return (void *) pcap_ptr_va_curr; +} + +static bool pcap_next_pkt(void) +{ + struct pcap_pkthdr *hdr = pcap_curr_pkt(); + + if (pcap_ptr_va_curr + sizeof(*hdr) - + pcap_ptr_va_start >= pcap_map_size) + return false; + if (hdr->caplen == 0 || hdr->len == 0 || hdr->caplen > hdr->len) + return false; + if (pcap_ptr_va_curr + sizeof(*hdr) + hdr->caplen - + pcap_ptr_va_start >= pcap_map_size) + return false; + + pcap_ptr_va_curr += (sizeof(*hdr) + hdr->caplen); + return true; +} + +static void pcap_reset_pkt(void) +{ + pcap_ptr_va_curr = pcap_ptr_va_start + sizeof(struct pcap_filehdr); +} + +static int try_load_pcap(const char *file) +{ + struct pcap_filehdr *hdr; + struct stat sb; + int ret; + + pcap_fd = open(file, O_RDONLY); + if (pcap_fd < 0) { + rl_printf("cannot open pcap [%s]!\n", strerror(errno)); + return CMD_ERR; + } + + ret = fstat(pcap_fd, &sb); + if (ret < 0) { + rl_printf("cannot fstat pcap file!\n"); + return CMD_ERR; + } + + if (!S_ISREG(sb.st_mode)) { + rl_printf("not a regular pcap file, duh!\n"); + return CMD_ERR; + } + + pcap_map_size = sb.st_size; + if (pcap_map_size <= sizeof(struct pcap_filehdr)) { + rl_printf("pcap file too small!\n"); + return CMD_ERR; + } + + pcap_ptr_va_start = mmap(NULL, pcap_map_size, PROT_READ, + MAP_SHARED | MAP_LOCKED, pcap_fd, 0); + if (pcap_ptr_va_start == MAP_FAILED) { + rl_printf("mmap of file failed!"); + return CMD_ERR; + } + + hdr = (void *) pcap_ptr_va_start; + if (hdr->magic != TCPDUMP_MAGIC) { + rl_printf("wrong pcap magic!\n"); + return CMD_ERR; + } + + pcap_reset_pkt(); + + return CMD_OK; + +} + +static void try_close_pcap(void) +{ + if (pcap_fd >= 0) { + munmap(pcap_ptr_va_start, pcap_map_size); + close(pcap_fd); + + pcap_ptr_va_start = pcap_ptr_va_curr = NULL; + pcap_map_size = 0; + pcap_packet = 0; + pcap_fd = -1; + } +} + +static int cmd_load_bpf(char *bpf_string) +{ + char sp, *token, separator = ','; + unsigned short bpf_len, i = 0; + struct sock_filter tmp; + + bpf_prog_len = 0; + memset(bpf_image, 0, sizeof(bpf_image)); + + if (sscanf(bpf_string, "%hu%c", &bpf_len, &sp) != 2 || + sp != separator || bpf_len > BPF_MAXINSNS || bpf_len == 0) { + rl_printf("syntax error in head length encoding!\n"); + return CMD_ERR; + } + + token = bpf_string; + while ((token = strchr(token, separator)) && (++token)[0]) { + if (i >= bpf_len) { + rl_printf("program exceeds encoded length!\n"); + return CMD_ERR; + } + + if (sscanf(token, "%hu %hhu %hhu %u,", + &tmp.code, &tmp.jt, &tmp.jf, &tmp.k) != 4) { + rl_printf("syntax error at instruction %d!\n", i); + return CMD_ERR; + } + + bpf_image[i].code = tmp.code; + bpf_image[i].jt = tmp.jt; + bpf_image[i].jf = tmp.jf; + bpf_image[i].k = tmp.k; + + i++; + } + + if (i != bpf_len) { + rl_printf("syntax error exceeding encoded length!\n"); + return CMD_ERR; + } else + bpf_prog_len = bpf_len; + if (!bpf_runnable(bpf_image, bpf_prog_len)) + bpf_prog_len = 0; + + return CMD_OK; +} + +static int cmd_load_pcap(char *file) +{ + char *file_trim, *tmp; + + file_trim = strtok_r(file, " ", &tmp); + if (file_trim == NULL) + return CMD_ERR; + + try_close_pcap(); + + return try_load_pcap(file_trim); +} + +static int cmd_load(char *arg) +{ + char *subcmd, *cont, *tmp = strdup(arg); + int ret = CMD_OK; + + subcmd = strtok_r(tmp, " ", &cont); + if (subcmd == NULL) + goto out; + if (matches(subcmd, "bpf") == 0) { + bpf_reset(); + bpf_reset_breakpoints(); + + ret = cmd_load_bpf(cont); + } else if (matches(subcmd, "pcap") == 0) { + ret = cmd_load_pcap(cont); + } else { +out: + rl_printf("bpf <code>: load bpf code\n"); + rl_printf("pcap <file>: load pcap file\n"); + ret = CMD_ERR; + } + + free(tmp); + return ret; +} + +static int cmd_step(char *num) +{ + struct pcap_pkthdr *hdr; + int steps, ret; + + if (!bpf_prog_loaded() || !pcap_loaded()) + return CMD_ERR; + + steps = strtol(num, NULL, 10); + if (steps == 0 || strlen(num) == 0) + steps = 1; + if (steps < 0) { + if (!bpf_restore_regs(steps)) + return CMD_ERR; + steps = 1; + } + + hdr = pcap_curr_pkt(); + ret = bpf_run_stepping(bpf_image, bpf_prog_len, + (uint8_t *) hdr + sizeof(*hdr), + hdr->caplen, hdr->len, steps); + if (ret >= 0 || bpf_curr.Rs) { + bpf_reset(); + if (!pcap_next_pkt()) { + rl_printf("(going back to first packet)\n"); + pcap_reset_pkt(); + } else { + rl_printf("(next packet)\n"); + } + } + + return CMD_OK; +} + +static int cmd_select(char *num) +{ + unsigned int which, i; + bool have_next = true; + + if (!pcap_loaded() || strlen(num) == 0) + return CMD_ERR; + + which = strtoul(num, NULL, 10); + if (which == 0) { + rl_printf("packet count starts with 1, clamping!\n"); + which = 1; + } + + pcap_reset_pkt(); + bpf_reset(); + + for (i = 0; i < which && (have_next = pcap_next_pkt()); i++) + /* noop */; + if (!have_next || pcap_curr_pkt() == NULL) { + rl_printf("no packet #%u available!\n", which); + pcap_reset_pkt(); + return CMD_ERR; + } + + return CMD_OK; +} + +static int cmd_breakpoint(char *subcmd) +{ + if (!bpf_prog_loaded()) + return CMD_ERR; + if (strlen(subcmd) == 0) + bpf_dump_breakpoints(); + else if (matches(subcmd, "reset") == 0) + bpf_reset_breakpoints(); + else { + unsigned int where = strtoul(subcmd, NULL, 10); + + if (where < bpf_prog_len) { + bpf_set_breakpoints(where); + rl_printf("breakpoint at: "); + bpf_disasm(bpf_image[where], where); + } + } + + return CMD_OK; +} + +static int cmd_run(char *num) +{ + static uint32_t pass = 0, fail = 0; + bool has_limit = true; + int pkts = 0, i = 0; + + if (!bpf_prog_loaded() || !pcap_loaded()) + return CMD_ERR; + + pkts = strtol(num, NULL, 10); + if (pkts == 0 || strlen(num) == 0) + has_limit = false; + + do { + struct pcap_pkthdr *hdr = pcap_curr_pkt(); + int ret = bpf_run_all(bpf_image, bpf_prog_len, + (uint8_t *) hdr + sizeof(*hdr), + hdr->caplen, hdr->len); + if (ret > 0) + pass++; + else if (ret == 0) + fail++; + else + return CMD_OK; + bpf_reset(); + } while (pcap_next_pkt() && (!has_limit || (has_limit && ++i < pkts))); + + rl_printf("bpf passes:%u fails:%u\n", pass, fail); + + pcap_reset_pkt(); + bpf_reset(); + + pass = fail = 0; + return CMD_OK; +} + +static int cmd_disassemble(char *line_string) +{ + bool single_line = false; + unsigned long line; + + if (!bpf_prog_loaded()) + return CMD_ERR; + if (strlen(line_string) > 0 && + (line = strtoul(line_string, NULL, 10)) < bpf_prog_len) + single_line = true; + if (single_line) + bpf_disasm(bpf_image[line], line); + else + bpf_disasm_all(bpf_image, bpf_prog_len); + + return CMD_OK; +} + +static int cmd_dump(char *dontcare) +{ + if (!bpf_prog_loaded()) + return CMD_ERR; + + bpf_dump_all(bpf_image, bpf_prog_len); + + return CMD_OK; +} + +static int cmd_quit(char *dontcare) +{ + return CMD_EX; +} + +static const struct shell_cmd cmds[] = { + { .name = "load", .func = cmd_load }, + { .name = "select", .func = cmd_select }, + { .name = "step", .func = cmd_step }, + { .name = "run", .func = cmd_run }, + { .name = "breakpoint", .func = cmd_breakpoint }, + { .name = "disassemble", .func = cmd_disassemble }, + { .name = "dump", .func = cmd_dump }, + { .name = "quit", .func = cmd_quit }, +}; + +static int execf(char *arg) +{ + char *cmd, *cont, *tmp = strdup(arg); + int i, ret = 0, len; + + cmd = strtok_r(tmp, " ", &cont); + if (cmd == NULL) + goto out; + len = strlen(cmd); + for (i = 0; i < array_size(cmds); i++) { + if (len != strlen(cmds[i].name)) + continue; + if (strncmp(cmds[i].name, cmd, len) == 0) { + ret = cmds[i].func(cont); + break; + } + } +out: + free(tmp); + return ret; +} + +static char *shell_comp_gen(const char *buf, int state) +{ + static int list_index, len; + + if (!state) { + list_index = 0; + len = strlen(buf); + } + + for (; list_index < array_size(cmds); ) { + const char *name = cmds[list_index].name; + + list_index++; + if (strncmp(name, buf, len) == 0) + return strdup(name); + } + + return NULL; +} + +static char **shell_completion(const char *buf, int start, int end) +{ + char **matches = NULL; + + if (start == 0) + matches = rl_completion_matches(buf, shell_comp_gen); + + return matches; +} + +static void intr_shell(int sig) +{ + if (rl_end) + rl_kill_line(-1, 0); + + rl_crlf(); + rl_refresh_line(0, 0); + rl_free_line_state(); +} + +static void init_shell(FILE *fin, FILE *fout) +{ + char file[128]; + + snprintf(file, sizeof(file), "%s/.bpf_dbg_history", getenv("HOME")); + read_history(file); + + rl_instream = fin; + rl_outstream = fout; + + rl_readline_name = "bpf_dbg"; + rl_terminal_name = getenv("TERM"); + + rl_catch_signals = 0; + rl_catch_sigwinch = 1; + + rl_attempted_completion_function = shell_completion; + + rl_bind_key('\t', rl_complete); + + rl_bind_key_in_map('\t', rl_complete, emacs_meta_keymap); + rl_bind_key_in_map('\033', rl_complete, emacs_meta_keymap); + + snprintf(file, sizeof(file), "%s/.bpf_dbg_init", getenv("HOME")); + rl_read_init_file(file); + + rl_prep_terminal(0); + rl_set_signals(); + + signal(SIGINT, intr_shell); +} + +static void exit_shell(FILE *fin, FILE *fout) +{ + char file[128]; + + snprintf(file, sizeof(file), "%s/.bpf_dbg_history", getenv("HOME")); + write_history(file); + + clear_history(); + rl_deprep_terminal(); + + try_close_pcap(); + + if (fin != stdin) + fclose(fin); + if (fout != stdout) + fclose(fout); +} + +static int run_shell_loop(FILE *fin, FILE *fout) +{ + char *buf; + + init_shell(fin, fout); + + while ((buf = readline("> ")) != NULL) { + int ret = execf(buf); + if (ret == CMD_EX) + break; + if (ret == CMD_OK && strlen(buf) > 0) + add_history(buf); + + free(buf); + } + + exit_shell(fin, fout); + return 0; +} + +int main(int argc, char **argv) +{ + FILE *fin = NULL, *fout = NULL; + + if (argc >= 2) + fin = fopen(argv[1], "r"); + if (argc >= 3) + fout = fopen(argv[2], "w"); + + return run_shell_loop(fin ? : stdin, fout ? : stdout); +} diff --git a/tools/net/bpf_exp.l b/tools/net/bpf_exp.l new file mode 100644 index 00000000000..833a96611da --- /dev/null +++ b/tools/net/bpf_exp.l @@ -0,0 +1,144 @@ +/* + * BPF asm code lexer + * + * This program is free software; you can distribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Syntax kept close to: + * + * Steven McCanne and Van Jacobson. 1993. The BSD packet filter: a new + * architecture for user-level packet capture. In Proceedings of the + * USENIX Winter 1993 Conference Proceedings on USENIX Winter 1993 + * Conference Proceedings (USENIX'93). USENIX Association, Berkeley, + * CA, USA, 2-2. + * + * Copyright 2013 Daniel Borkmann <borkmann@redhat.com> + * Licensed under the GNU General Public License, version 2.0 (GPLv2) + */ + +%{ + +#include <stdio.h> +#include <stdint.h> +#include <stdlib.h> + +#include "bpf_exp.yacc.h" + +extern void yyerror(const char *str); + +%} + +%option align +%option ecs + +%option nounput +%option noreject +%option noinput +%option noyywrap + +%option 8bit +%option caseless +%option yylineno + +%% + +"ldb" { return OP_LDB; } +"ldh" { return OP_LDH; } +"ld" { return OP_LD; } +"ldi" { return OP_LDI; } +"ldx" { return OP_LDX; } +"ldxi" { return OP_LDXI; } +"ldxb" { return OP_LDXB; } +"st" { return OP_ST; } +"stx" { return OP_STX; } +"jmp" { return OP_JMP; } +"ja" { return OP_JMP; } +"jeq" { return OP_JEQ; } +"jneq" { return OP_JNEQ; } +"jne" { return OP_JNEQ; } +"jlt" { return OP_JLT; } +"jle" { return OP_JLE; } +"jgt" { return OP_JGT; } +"jge" { return OP_JGE; } +"jset" { return OP_JSET; } +"add" { return OP_ADD; } +"sub" { return OP_SUB; } +"mul" { return OP_MUL; } +"div" { return OP_DIV; } +"mod" { return OP_MOD; } +"neg" { return OP_NEG; } +"and" { return OP_AND; } +"xor" { return OP_XOR; } +"or" { return OP_OR; } +"lsh" { return OP_LSH; } +"rsh" { return OP_RSH; } +"ret" { return OP_RET; } +"tax" { return OP_TAX; } +"txa" { return OP_TXA; } + +"#"?("len") { return K_PKT_LEN; } +"#"?("proto") { return K_PROTO; } +"#"?("type") { return K_TYPE; } +"#"?("poff") { return K_POFF; } +"#"?("ifidx") { return K_IFIDX; } +"#"?("nla") { return K_NLATTR; } +"#"?("nlan") { return K_NLATTR_NEST; } +"#"?("mark") { return K_MARK; } +"#"?("queue") { return K_QUEUE; } +"#"?("hatype") { return K_HATYPE; } +"#"?("rxhash") { return K_RXHASH; } +"#"?("cpu") { return K_CPU; } +"#"?("vlan_tci") { return K_VLANT; } +"#"?("vlan_pr") { return K_VLANP; } +"#"?("rand") { return K_RAND; } + +":" { return ':'; } +"," { return ','; } +"#" { return '#'; } +"%" { return '%'; } +"[" { return '['; } +"]" { return ']'; } +"(" { return '('; } +")" { return ')'; } +"x" { return 'x'; } +"a" { return 'a'; } +"+" { return '+'; } +"M" { return 'M'; } +"*" { return '*'; } +"&" { return '&'; } + +([0][x][a-fA-F0-9]+) { + yylval.number = strtoul(yytext, NULL, 16); + return number; + } +([0][b][0-1]+) { + yylval.number = strtol(yytext + 2, NULL, 2); + return number; + } +(([0])|([-+]?[1-9][0-9]*)) { + yylval.number = strtol(yytext, NULL, 10); + return number; + } +([0][0-9]+) { + yylval.number = strtol(yytext + 1, NULL, 8); + return number; + } +[a-zA-Z_][a-zA-Z0-9_]+ { + yylval.label = strdup(yytext); + return label; + } + +"/*"([^\*]|\*[^/])*"*/" { /* NOP */ } +";"[^\n]* { /* NOP */ } +^#.* { /* NOP */ } +[ \t]+ { /* NOP */ } +[ \n]+ { /* NOP */ } + +. { + printf("unknown character \'%s\'", yytext); + yyerror("lex unknown character"); + } + +%% diff --git a/tools/net/bpf_exp.y b/tools/net/bpf_exp.y new file mode 100644 index 00000000000..e6306c51c26 --- /dev/null +++ b/tools/net/bpf_exp.y @@ -0,0 +1,771 @@ +/* + * BPF asm code parser + * + * This program is free software; you can distribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Syntax kept close to: + * + * Steven McCanne and Van Jacobson. 1993. The BSD packet filter: a new + * architecture for user-level packet capture. In Proceedings of the + * USENIX Winter 1993 Conference Proceedings on USENIX Winter 1993 + * Conference Proceedings (USENIX'93). USENIX Association, Berkeley, + * CA, USA, 2-2. + * + * Copyright 2013 Daniel Borkmann <borkmann@redhat.com> + * Licensed under the GNU General Public License, version 2.0 (GPLv2) + */ + +%{ + +#include <stdio.h> +#include <string.h> +#include <stdint.h> +#include <stdlib.h> +#include <stdbool.h> +#include <unistd.h> +#include <errno.h> +#include <assert.h> +#include <linux/filter.h> + +#include "bpf_exp.yacc.h" + +enum jmp_type { JTL, JFL, JKL }; + +extern FILE *yyin; +extern int yylex(void); +extern void yyerror(const char *str); + +extern void bpf_asm_compile(FILE *fp, bool cstyle); +static void bpf_set_curr_instr(uint16_t op, uint8_t jt, uint8_t jf, uint32_t k); +static void bpf_set_curr_label(char *label); +static void bpf_set_jmp_label(char *label, enum jmp_type type); + +%} + +%union { + char *label; + uint32_t number; +} + +%token OP_LDB OP_LDH OP_LD OP_LDX OP_ST OP_STX OP_JMP OP_JEQ OP_JGT OP_JGE +%token OP_JSET OP_ADD OP_SUB OP_MUL OP_DIV OP_AND OP_OR OP_XOR OP_LSH OP_RSH +%token OP_RET OP_TAX OP_TXA OP_LDXB OP_MOD OP_NEG OP_JNEQ OP_JLT OP_JLE OP_LDI +%token OP_LDXI + +%token K_PKT_LEN K_PROTO K_TYPE K_NLATTR K_NLATTR_NEST K_MARK K_QUEUE K_HATYPE +%token K_RXHASH K_CPU K_IFIDX K_VLANT K_VLANP K_POFF K_RAND + +%token ':' ',' '[' ']' '(' ')' 'x' 'a' '+' 'M' '*' '&' '#' '%' + +%token number label + +%type <label> label +%type <number> number + +%% + +prog + : line + | prog line + ; + +line + : instr + | labelled_instr + ; + +labelled_instr + : labelled instr + ; + +instr + : ldb + | ldh + | ld + | ldi + | ldx + | ldxi + | st + | stx + | jmp + | jeq + | jneq + | jlt + | jle + | jgt + | jge + | jset + | add + | sub + | mul + | div + | mod + | neg + | and + | or + | xor + | lsh + | rsh + | ret + | tax + | txa + ; + +labelled + : label ':' { bpf_set_curr_label($1); } + ; + +ldb + : OP_LDB '[' 'x' '+' number ']' { + bpf_set_curr_instr(BPF_LD | BPF_B | BPF_IND, 0, 0, $5); } + | OP_LDB '[' '%' 'x' '+' number ']' { + bpf_set_curr_instr(BPF_LD | BPF_B | BPF_IND, 0, 0, $6); } + | OP_LDB '[' number ']' { + bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, $3); } + | OP_LDB K_PROTO { + bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_PROTOCOL); } + | OP_LDB K_TYPE { + bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_PKTTYPE); } + | OP_LDB K_IFIDX { + bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_IFINDEX); } + | OP_LDB K_NLATTR { + bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_NLATTR); } + | OP_LDB K_NLATTR_NEST { + bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_NLATTR_NEST); } + | OP_LDB K_MARK { + bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_MARK); } + | OP_LDB K_QUEUE { + bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_QUEUE); } + | OP_LDB K_HATYPE { + bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_HATYPE); } + | OP_LDB K_RXHASH { + bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_RXHASH); } + | OP_LDB K_CPU { + bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_CPU); } + | OP_LDB K_VLANT { + bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_VLAN_TAG); } + | OP_LDB K_VLANP { + bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT); } + | OP_LDB K_POFF { + bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_PAY_OFFSET); } + | OP_LDB K_RAND { + bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_RANDOM); } + ; + +ldh + : OP_LDH '[' 'x' '+' number ']' { + bpf_set_curr_instr(BPF_LD | BPF_H | BPF_IND, 0, 0, $5); } + | OP_LDH '[' '%' 'x' '+' number ']' { + bpf_set_curr_instr(BPF_LD | BPF_H | BPF_IND, 0, 0, $6); } + | OP_LDH '[' number ']' { + bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, $3); } + | OP_LDH K_PROTO { + bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_PROTOCOL); } + | OP_LDH K_TYPE { + bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_PKTTYPE); } + | OP_LDH K_IFIDX { + bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_IFINDEX); } + | OP_LDH K_NLATTR { + bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_NLATTR); } + | OP_LDH K_NLATTR_NEST { + bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_NLATTR_NEST); } + | OP_LDH K_MARK { + bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_MARK); } + | OP_LDH K_QUEUE { + bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_QUEUE); } + | OP_LDH K_HATYPE { + bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_HATYPE); } + | OP_LDH K_RXHASH { + bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_RXHASH); } + | OP_LDH K_CPU { + bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_CPU); } + | OP_LDH K_VLANT { + bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_VLAN_TAG); } + | OP_LDH K_VLANP { + bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT); } + | OP_LDH K_POFF { + bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_PAY_OFFSET); } + | OP_LDH K_RAND { + bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_RANDOM); } + ; + +ldi + : OP_LDI '#' number { + bpf_set_curr_instr(BPF_LD | BPF_IMM, 0, 0, $3); } + | OP_LDI number { + bpf_set_curr_instr(BPF_LD | BPF_IMM, 0, 0, $2); } + ; + +ld + : OP_LD '#' number { + bpf_set_curr_instr(BPF_LD | BPF_IMM, 0, 0, $3); } + | OP_LD K_PKT_LEN { + bpf_set_curr_instr(BPF_LD | BPF_W | BPF_LEN, 0, 0, 0); } + | OP_LD K_PROTO { + bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_PROTOCOL); } + | OP_LD K_TYPE { + bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_PKTTYPE); } + | OP_LD K_IFIDX { + bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_IFINDEX); } + | OP_LD K_NLATTR { + bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_NLATTR); } + | OP_LD K_NLATTR_NEST { + bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_NLATTR_NEST); } + | OP_LD K_MARK { + bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_MARK); } + | OP_LD K_QUEUE { + bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_QUEUE); } + | OP_LD K_HATYPE { + bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_HATYPE); } + | OP_LD K_RXHASH { + bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_RXHASH); } + | OP_LD K_CPU { + bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_CPU); } + | OP_LD K_VLANT { + bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_VLAN_TAG); } + | OP_LD K_VLANP { + bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT); } + | OP_LD K_POFF { + bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_PAY_OFFSET); } + | OP_LD K_RAND { + bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, + SKF_AD_OFF + SKF_AD_RANDOM); } + | OP_LD 'M' '[' number ']' { + bpf_set_curr_instr(BPF_LD | BPF_MEM, 0, 0, $4); } + | OP_LD '[' 'x' '+' number ']' { + bpf_set_curr_instr(BPF_LD | BPF_W | BPF_IND, 0, 0, $5); } + | OP_LD '[' '%' 'x' '+' number ']' { + bpf_set_curr_instr(BPF_LD | BPF_W | BPF_IND, 0, 0, $6); } + | OP_LD '[' number ']' { + bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, $3); } + ; + +ldxi + : OP_LDXI '#' number { + bpf_set_curr_instr(BPF_LDX | BPF_IMM, 0, 0, $3); } + | OP_LDXI number { + bpf_set_curr_instr(BPF_LDX | BPF_IMM, 0, 0, $2); } + ; + +ldx + : OP_LDX '#' number { + bpf_set_curr_instr(BPF_LDX | BPF_IMM, 0, 0, $3); } + | OP_LDX K_PKT_LEN { + bpf_set_curr_instr(BPF_LDX | BPF_W | BPF_LEN, 0, 0, 0); } + | OP_LDX 'M' '[' number ']' { + bpf_set_curr_instr(BPF_LDX | BPF_MEM, 0, 0, $4); } + | OP_LDXB number '*' '(' '[' number ']' '&' number ')' { + if ($2 != 4 || $9 != 0xf) { + fprintf(stderr, "ldxb offset not supported!\n"); + exit(0); + } else { + bpf_set_curr_instr(BPF_LDX | BPF_MSH | BPF_B, 0, 0, $6); } } + | OP_LDX number '*' '(' '[' number ']' '&' number ')' { + if ($2 != 4 || $9 != 0xf) { + fprintf(stderr, "ldxb offset not supported!\n"); + exit(0); + } else { + bpf_set_curr_instr(BPF_LDX | BPF_MSH | BPF_B, 0, 0, $6); } } + ; + +st + : OP_ST 'M' '[' number ']' { + bpf_set_curr_instr(BPF_ST, 0, 0, $4); } + ; + +stx + : OP_STX 'M' '[' number ']' { + bpf_set_curr_instr(BPF_STX, 0, 0, $4); } + ; + +jmp + : OP_JMP label { + bpf_set_jmp_label($2, JKL); + bpf_set_curr_instr(BPF_JMP | BPF_JA, 0, 0, 0); } + ; + +jeq + : OP_JEQ '#' number ',' label ',' label { + bpf_set_jmp_label($5, JTL); + bpf_set_jmp_label($7, JFL); + bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_K, 0, 0, $3); } + | OP_JEQ 'x' ',' label ',' label { + bpf_set_jmp_label($4, JTL); + bpf_set_jmp_label($6, JFL); + bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 0); } + | OP_JEQ '%' 'x' ',' label ',' label { + bpf_set_jmp_label($5, JTL); + bpf_set_jmp_label($7, JFL); + bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 0); } + | OP_JEQ '#' number ',' label { + bpf_set_jmp_label($5, JTL); + bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_K, 0, 0, $3); } + | OP_JEQ 'x' ',' label { + bpf_set_jmp_label($4, JTL); + bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 0); } + | OP_JEQ '%' 'x' ',' label { + bpf_set_jmp_label($5, JTL); + bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 0); } + ; + +jneq + : OP_JNEQ '#' number ',' label { + bpf_set_jmp_label($5, JFL); + bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_K, 0, 0, $3); } + | OP_JNEQ 'x' ',' label { + bpf_set_jmp_label($4, JFL); + bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 0); } + | OP_JNEQ '%' 'x' ',' label { + bpf_set_jmp_label($5, JFL); + bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 0); } + ; + +jlt + : OP_JLT '#' number ',' label { + bpf_set_jmp_label($5, JFL); + bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_K, 0, 0, $3); } + | OP_JLT 'x' ',' label { + bpf_set_jmp_label($4, JFL); + bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 0); } + | OP_JLT '%' 'x' ',' label { + bpf_set_jmp_label($5, JFL); + bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 0); } + ; + +jle + : OP_JLE '#' number ',' label { + bpf_set_jmp_label($5, JFL); + bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_K, 0, 0, $3); } + | OP_JLE 'x' ',' label { + bpf_set_jmp_label($4, JFL); + bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 0); } + | OP_JLE '%' 'x' ',' label { + bpf_set_jmp_label($5, JFL); + bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 0); } + ; + +jgt + : OP_JGT '#' number ',' label ',' label { + bpf_set_jmp_label($5, JTL); + bpf_set_jmp_label($7, JFL); + bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_K, 0, 0, $3); } + | OP_JGT 'x' ',' label ',' label { + bpf_set_jmp_label($4, JTL); + bpf_set_jmp_label($6, JFL); + bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 0); } + | OP_JGT '%' 'x' ',' label ',' label { + bpf_set_jmp_label($5, JTL); + bpf_set_jmp_label($7, JFL); + bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 0); } + | OP_JGT '#' number ',' label { + bpf_set_jmp_label($5, JTL); + bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_K, 0, 0, $3); } + | OP_JGT 'x' ',' label { + bpf_set_jmp_label($4, JTL); + bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 0); } + | OP_JGT '%' 'x' ',' label { + bpf_set_jmp_label($5, JTL); + bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 0); } + ; + +jge + : OP_JGE '#' number ',' label ',' label { + bpf_set_jmp_label($5, JTL); + bpf_set_jmp_label($7, JFL); + bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_K, 0, 0, $3); } + | OP_JGE 'x' ',' label ',' label { + bpf_set_jmp_label($4, JTL); + bpf_set_jmp_label($6, JFL); + bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 0); } + | OP_JGE '%' 'x' ',' label ',' label { + bpf_set_jmp_label($5, JTL); + bpf_set_jmp_label($7, JFL); + bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 0); } + | OP_JGE '#' number ',' label { + bpf_set_jmp_label($5, JTL); + bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_K, 0, 0, $3); } + | OP_JGE 'x' ',' label { + bpf_set_jmp_label($4, JTL); + bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 0); } + | OP_JGE '%' 'x' ',' label { + bpf_set_jmp_label($5, JTL); + bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 0); } + ; + +jset + : OP_JSET '#' number ',' label ',' label { + bpf_set_jmp_label($5, JTL); + bpf_set_jmp_label($7, JFL); + bpf_set_curr_instr(BPF_JMP | BPF_JSET | BPF_K, 0, 0, $3); } + | OP_JSET 'x' ',' label ',' label { + bpf_set_jmp_label($4, JTL); + bpf_set_jmp_label($6, JFL); + bpf_set_curr_instr(BPF_JMP | BPF_JSET | BPF_X, 0, 0, 0); } + | OP_JSET '%' 'x' ',' label ',' label { + bpf_set_jmp_label($5, JTL); + bpf_set_jmp_label($7, JFL); + bpf_set_curr_instr(BPF_JMP | BPF_JSET | BPF_X, 0, 0, 0); } + | OP_JSET '#' number ',' label { + bpf_set_jmp_label($5, JTL); + bpf_set_curr_instr(BPF_JMP | BPF_JSET | BPF_K, 0, 0, $3); } + | OP_JSET 'x' ',' label { + bpf_set_jmp_label($4, JTL); + bpf_set_curr_instr(BPF_JMP | BPF_JSET | BPF_X, 0, 0, 0); } + | OP_JSET '%' 'x' ',' label { + bpf_set_jmp_label($5, JTL); + bpf_set_curr_instr(BPF_JMP | BPF_JSET | BPF_X, 0, 0, 0); } + ; + +add + : OP_ADD '#' number { + bpf_set_curr_instr(BPF_ALU | BPF_ADD | BPF_K, 0, 0, $3); } + | OP_ADD 'x' { + bpf_set_curr_instr(BPF_ALU | BPF_ADD | BPF_X, 0, 0, 0); } + | OP_ADD '%' 'x' { + bpf_set_curr_instr(BPF_ALU | BPF_ADD | BPF_X, 0, 0, 0); } + ; + +sub + : OP_SUB '#' number { + bpf_set_curr_instr(BPF_ALU | BPF_SUB | BPF_K, 0, 0, $3); } + | OP_SUB 'x' { + bpf_set_curr_instr(BPF_ALU | BPF_SUB | BPF_X, 0, 0, 0); } + | OP_SUB '%' 'x' { + bpf_set_curr_instr(BPF_ALU | BPF_SUB | BPF_X, 0, 0, 0); } + ; + +mul + : OP_MUL '#' number { + bpf_set_curr_instr(BPF_ALU | BPF_MUL | BPF_K, 0, 0, $3); } + | OP_MUL 'x' { + bpf_set_curr_instr(BPF_ALU | BPF_MUL | BPF_X, 0, 0, 0); } + | OP_MUL '%' 'x' { + bpf_set_curr_instr(BPF_ALU | BPF_MUL | BPF_X, 0, 0, 0); } + ; + +div + : OP_DIV '#' number { + bpf_set_curr_instr(BPF_ALU | BPF_DIV | BPF_K, 0, 0, $3); } + | OP_DIV 'x' { + bpf_set_curr_instr(BPF_ALU | BPF_DIV | BPF_X, 0, 0, 0); } + | OP_DIV '%' 'x' { + bpf_set_curr_instr(BPF_ALU | BPF_DIV | BPF_X, 0, 0, 0); } + ; + +mod + : OP_MOD '#' number { + bpf_set_curr_instr(BPF_ALU | BPF_MOD | BPF_K, 0, 0, $3); } + | OP_MOD 'x' { + bpf_set_curr_instr(BPF_ALU | BPF_MOD | BPF_X, 0, 0, 0); } + | OP_MOD '%' 'x' { + bpf_set_curr_instr(BPF_ALU | BPF_MOD | BPF_X, 0, 0, 0); } + ; + +neg + : OP_NEG { + bpf_set_curr_instr(BPF_ALU | BPF_NEG, 0, 0, 0); } + ; + +and + : OP_AND '#' number { + bpf_set_curr_instr(BPF_ALU | BPF_AND | BPF_K, 0, 0, $3); } + | OP_AND 'x' { + bpf_set_curr_instr(BPF_ALU | BPF_AND | BPF_X, 0, 0, 0); } + | OP_AND '%' 'x' { + bpf_set_curr_instr(BPF_ALU | BPF_AND | BPF_X, 0, 0, 0); } + ; + +or + : OP_OR '#' number { + bpf_set_curr_instr(BPF_ALU | BPF_OR | BPF_K, 0, 0, $3); } + | OP_OR 'x' { + bpf_set_curr_instr(BPF_ALU | BPF_OR | BPF_X, 0, 0, 0); } + | OP_OR '%' 'x' { + bpf_set_curr_instr(BPF_ALU | BPF_OR | BPF_X, 0, 0, 0); } + ; + +xor + : OP_XOR '#' number { + bpf_set_curr_instr(BPF_ALU | BPF_XOR | BPF_K, 0, 0, $3); } + | OP_XOR 'x' { + bpf_set_curr_instr(BPF_ALU | BPF_XOR | BPF_X, 0, 0, 0); } + | OP_XOR '%' 'x' { + bpf_set_curr_instr(BPF_ALU | BPF_XOR | BPF_X, 0, 0, 0); } + ; + +lsh + : OP_LSH '#' number { + bpf_set_curr_instr(BPF_ALU | BPF_LSH | BPF_K, 0, 0, $3); } + | OP_LSH 'x' { + bpf_set_curr_instr(BPF_ALU | BPF_LSH | BPF_X, 0, 0, 0); } + | OP_LSH '%' 'x' { + bpf_set_curr_instr(BPF_ALU | BPF_LSH | BPF_X, 0, 0, 0); } + ; + +rsh + : OP_RSH '#' number { + bpf_set_curr_instr(BPF_ALU | BPF_RSH | BPF_K, 0, 0, $3); } + | OP_RSH 'x' { + bpf_set_curr_instr(BPF_ALU | BPF_RSH | BPF_X, 0, 0, 0); } + | OP_RSH '%' 'x' { + bpf_set_curr_instr(BPF_ALU | BPF_RSH | BPF_X, 0, 0, 0); } + ; + +ret + : OP_RET 'a' { + bpf_set_curr_instr(BPF_RET | BPF_A, 0, 0, 0); } + | OP_RET '%' 'a' { + bpf_set_curr_instr(BPF_RET | BPF_A, 0, 0, 0); } + | OP_RET 'x' { + bpf_set_curr_instr(BPF_RET | BPF_X, 0, 0, 0); } + | OP_RET '%' 'x' { + bpf_set_curr_instr(BPF_RET | BPF_X, 0, 0, 0); } + | OP_RET '#' number { + bpf_set_curr_instr(BPF_RET | BPF_K, 0, 0, $3); } + ; + +tax + : OP_TAX { + bpf_set_curr_instr(BPF_MISC | BPF_TAX, 0, 0, 0); } + ; + +txa + : OP_TXA { + bpf_set_curr_instr(BPF_MISC | BPF_TXA, 0, 0, 0); } + ; + +%% + +static int curr_instr = 0; +static struct sock_filter out[BPF_MAXINSNS]; +static char **labels, **labels_jt, **labels_jf, **labels_k; + +static void bpf_assert_max(void) +{ + if (curr_instr >= BPF_MAXINSNS) { + fprintf(stderr, "only max %u insns allowed!\n", BPF_MAXINSNS); + exit(0); + } +} + +static void bpf_set_curr_instr(uint16_t code, uint8_t jt, uint8_t jf, + uint32_t k) +{ + bpf_assert_max(); + out[curr_instr].code = code; + out[curr_instr].jt = jt; + out[curr_instr].jf = jf; + out[curr_instr].k = k; + curr_instr++; +} + +static void bpf_set_curr_label(char *label) +{ + bpf_assert_max(); + labels[curr_instr] = label; +} + +static void bpf_set_jmp_label(char *label, enum jmp_type type) +{ + bpf_assert_max(); + switch (type) { + case JTL: + labels_jt[curr_instr] = label; + break; + case JFL: + labels_jf[curr_instr] = label; + break; + case JKL: + labels_k[curr_instr] = label; + break; + } +} + +static int bpf_find_insns_offset(const char *label) +{ + int i, max = curr_instr, ret = -ENOENT; + + for (i = 0; i < max; i++) { + if (labels[i] && !strcmp(label, labels[i])) { + ret = i; + break; + } + } + + if (ret == -ENOENT) { + fprintf(stderr, "no such label \'%s\'!\n", label); + exit(0); + } + + return ret; +} + +static void bpf_stage_1_insert_insns(void) +{ + yyparse(); +} + +static void bpf_reduce_k_jumps(void) +{ + int i; + + for (i = 0; i < curr_instr; i++) { + if (labels_k[i]) { + int off = bpf_find_insns_offset(labels_k[i]); + out[i].k = (uint32_t) (off - i - 1); + } + } +} + +static void bpf_reduce_jt_jumps(void) +{ + int i; + + for (i = 0; i < curr_instr; i++) { + if (labels_jt[i]) { + int off = bpf_find_insns_offset(labels_jt[i]); + out[i].jt = (uint8_t) (off - i -1); + } + } +} + +static void bpf_reduce_jf_jumps(void) +{ + int i; + + for (i = 0; i < curr_instr; i++) { + if (labels_jf[i]) { + int off = bpf_find_insns_offset(labels_jf[i]); + out[i].jf = (uint8_t) (off - i - 1); + } + } +} + +static void bpf_stage_2_reduce_labels(void) +{ + bpf_reduce_k_jumps(); + bpf_reduce_jt_jumps(); + bpf_reduce_jf_jumps(); +} + +static void bpf_pretty_print_c(void) +{ + int i; + + for (i = 0; i < curr_instr; i++) + printf("{ %#04x, %2u, %2u, %#010x },\n", out[i].code, + out[i].jt, out[i].jf, out[i].k); +} + +static void bpf_pretty_print(void) +{ + int i; + + printf("%u,", curr_instr); + for (i = 0; i < curr_instr; i++) + printf("%u %u %u %u,", out[i].code, + out[i].jt, out[i].jf, out[i].k); + printf("\n"); +} + +static void bpf_init(void) +{ + memset(out, 0, sizeof(out)); + + labels = calloc(BPF_MAXINSNS, sizeof(*labels)); + assert(labels); + labels_jt = calloc(BPF_MAXINSNS, sizeof(*labels_jt)); + assert(labels_jt); + labels_jf = calloc(BPF_MAXINSNS, sizeof(*labels_jf)); + assert(labels_jf); + labels_k = calloc(BPF_MAXINSNS, sizeof(*labels_k)); + assert(labels_k); +} + +static void bpf_destroy_labels(void) +{ + int i; + + for (i = 0; i < curr_instr; i++) { + free(labels_jf[i]); + free(labels_jt[i]); + free(labels_k[i]); + free(labels[i]); + } +} + +static void bpf_destroy(void) +{ + bpf_destroy_labels(); + free(labels_jt); + free(labels_jf); + free(labels_k); + free(labels); +} + +void bpf_asm_compile(FILE *fp, bool cstyle) +{ + yyin = fp; + + bpf_init(); + bpf_stage_1_insert_insns(); + bpf_stage_2_reduce_labels(); + bpf_destroy(); + + if (cstyle) + bpf_pretty_print_c(); + else + bpf_pretty_print(); + + if (fp != stdin) + fclose(yyin); +} + +void yyerror(const char *str) +{ + exit(1); +} diff --git a/tools/net/bpf_jit_disasm.c b/tools/net/bpf_jit_disasm.c new file mode 100644 index 00000000000..c5baf9c591b --- /dev/null +++ b/tools/net/bpf_jit_disasm.c @@ -0,0 +1,197 @@ +/* + * Minimal BPF JIT image disassembler + * + * Disassembles BPF JIT compiler emitted opcodes back to asm insn's for + * debugging or verification purposes. + * + * To get the disassembly of the JIT code, do the following: + * + * 1) `echo 2 > /proc/sys/net/core/bpf_jit_enable` + * 2) Load a BPF filter (e.g. `tcpdump -p -n -s 0 -i eth1 host 192.168.20.0/24`) + * 3) Run e.g. `bpf_jit_disasm -o` to read out the last JIT code + * + * Copyright 2013 Daniel Borkmann <borkmann@redhat.com> + * Licensed under the GNU General Public License, version 2.0 (GPLv2) + */ + +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <assert.h> +#include <unistd.h> +#include <string.h> +#include <bfd.h> +#include <dis-asm.h> +#include <sys/klog.h> +#include <sys/types.h> +#include <regex.h> + +static void get_exec_path(char *tpath, size_t size) +{ + char *path; + ssize_t len; + + snprintf(tpath, size, "/proc/%d/exe", (int) getpid()); + tpath[size - 1] = 0; + + path = strdup(tpath); + assert(path); + + len = readlink(path, tpath, size); + tpath[len] = 0; + + free(path); +} + +static void get_asm_insns(uint8_t *image, size_t len, int opcodes) +{ + int count, i, pc = 0; + char tpath[256]; + struct disassemble_info info; + disassembler_ftype disassemble; + bfd *bfdf; + + memset(tpath, 0, sizeof(tpath)); + get_exec_path(tpath, sizeof(tpath)); + + bfdf = bfd_openr(tpath, NULL); + assert(bfdf); + assert(bfd_check_format(bfdf, bfd_object)); + + init_disassemble_info(&info, stdout, (fprintf_ftype) fprintf); + info.arch = bfd_get_arch(bfdf); + info.mach = bfd_get_mach(bfdf); + info.buffer = image; + info.buffer_length = len; + + disassemble_init_for_target(&info); + + disassemble = disassembler(bfdf); + assert(disassemble); + + do { + printf("%4x:\t", pc); + + count = disassemble(pc, &info); + + if (opcodes) { + printf("\n\t"); + for (i = 0; i < count; ++i) + printf("%02x ", (uint8_t) image[pc + i]); + } + printf("\n"); + + pc += count; + } while(count > 0 && pc < len); + + bfd_close(bfdf); +} + +static char *get_klog_buff(int *klen) +{ + int ret, len = klogctl(10, NULL, 0); + char *buff = malloc(len); + + assert(buff && klen); + ret = klogctl(3, buff, len); + assert(ret >= 0); + *klen = ret; + + return buff; +} + +static void put_klog_buff(char *buff) +{ + free(buff); +} + +static int get_last_jit_image(char *haystack, size_t hlen, + uint8_t *image, size_t ilen) +{ + char *ptr, *pptr, *tmp; + off_t off = 0; + int ret, flen, proglen, pass, ulen = 0; + regmatch_t pmatch[1]; + unsigned long base; + regex_t regex; + + if (hlen == 0) + return 0; + + ret = regcomp(®ex, "flen=[[:alnum:]]+ proglen=[[:digit:]]+ " + "pass=[[:digit:]]+ image=[[:xdigit:]]+", REG_EXTENDED); + assert(ret == 0); + + ptr = haystack; + while (1) { + ret = regexec(®ex, ptr, 1, pmatch, 0); + if (ret == 0) { + ptr += pmatch[0].rm_eo; + off += pmatch[0].rm_eo; + assert(off < hlen); + } else + break; + } + + ptr = haystack + off - (pmatch[0].rm_eo - pmatch[0].rm_so); + ret = sscanf(ptr, "flen=%d proglen=%d pass=%d image=%lx", + &flen, &proglen, &pass, &base); + if (ret != 4) + return 0; + + tmp = ptr = haystack + off; + while ((ptr = strtok(tmp, "\n")) != NULL && ulen < ilen) { + tmp = NULL; + if (!strstr(ptr, "JIT code")) + continue; + pptr = ptr; + while ((ptr = strstr(pptr, ":"))) + pptr = ptr + 1; + ptr = pptr; + do { + image[ulen++] = (uint8_t) strtoul(pptr, &pptr, 16); + if (ptr == pptr || ulen >= ilen) { + ulen--; + break; + } + ptr = pptr; + } while (1); + } + + assert(ulen == proglen); + printf("%d bytes emitted from JIT compiler (pass:%d, flen:%d)\n", + proglen, pass, flen); + printf("%lx + <x>:\n", base); + + regfree(®ex); + return ulen; +} + +int main(int argc, char **argv) +{ + int len, klen, opcodes = 0; + char *kbuff; + static uint8_t image[32768]; + + if (argc > 1) { + if (!strncmp("-o", argv[argc - 1], 2)) { + opcodes = 1; + } else { + printf("usage: bpf_jit_disasm [-o: show opcodes]\n"); + exit(0); + } + } + + bfd_init(); + memset(image, 0, sizeof(image)); + + kbuff = get_klog_buff(&klen); + + len = get_last_jit_image(kbuff, klen, image, sizeof(image)); + if (len > 0) + get_asm_insns(image, len, opcodes); + + put_klog_buff(kbuff); + + return 0; +} diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore index 8f8fbc227a4..782d86e961b 100644 --- a/tools/perf/.gitignore +++ b/tools/perf/.gitignore @@ -13,6 +13,7 @@ perf*.html common-cmds.h perf.data perf.data.old +output.svg perf-archive tags TAGS diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile index 9f2e44f2b17..3ba1c0b0990 100644 --- a/tools/perf/Documentation/Makefile +++ b/tools/perf/Documentation/Makefile @@ -1,9 +1,5 @@ -OUTPUT := ./ -ifeq ("$(origin O)", "command line") - ifneq ($(O),) - OUTPUT := $(O)/ - endif -endif +include ../../scripts/Makefile.include +include ../config/utilities.mak MAN1_TXT= \ $(filter-out $(addsuffix .txt, $(ARTICLES) $(SP_ARTICLES)), \ @@ -64,6 +60,7 @@ MAKEINFO=makeinfo INSTALL_INFO=install-info DOCBOOK2X_TEXI=docbook2x-texi DBLATEX=dblatex +XMLTO=xmlto ifndef PERL_PATH PERL_PATH = /usr/bin/perl endif @@ -71,6 +68,16 @@ endif -include ../config.mak.autogen -include ../config.mak +_tmp_tool_path := $(call get-executable,$(ASCIIDOC)) +ifeq ($(_tmp_tool_path),) + missing_tools = $(ASCIIDOC) +endif + +_tmp_tool_path := $(call get-executable,$(XMLTO)) +ifeq ($(_tmp_tool_path),) + missing_tools += $(XMLTO) +endif + # # For asciidoc ... # -7.1.2, no extra settings are needed. @@ -137,17 +144,18 @@ NO_SUBDIR = : endif ifneq ($(findstring $(MAKEFLAGS),s),s) -ifndef V - QUIET_ASCIIDOC = @echo ' ' ASCIIDOC $@; - QUIET_XMLTO = @echo ' ' XMLTO $@; - QUIET_DB2TEXI = @echo ' ' DB2TEXI $@; - QUIET_MAKEINFO = @echo ' ' MAKEINFO $@; - QUIET_DBLATEX = @echo ' ' DBLATEX $@; - QUIET_XSLTPROC = @echo ' ' XSLTPROC $@; - QUIET_GEN = @echo ' ' GEN $@; +ifneq ($(V),1) + QUIET_ASCIIDOC = @echo ' ASCIIDOC '$@; + QUIET_XMLTO = @echo ' XMLTO '$@; + QUIET_DB2TEXI = @echo ' DB2TEXI '$@; + QUIET_MAKEINFO = @echo ' MAKEINFO '$@; + QUIET_DBLATEX = @echo ' DBLATEX '$@; + QUIET_XSLTPROC = @echo ' XSLTPROC '$@; + QUIET_GEN = @echo ' GEN '$@; QUIET_STDERR = 2> /dev/null QUIET_SUBDIR0 = +@subdir= - QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \ + QUIET_SUBDIR1 = ;$(NO_SUBDIR) \ + echo ' SUBDIR ' $$subdir; \ $(MAKE) $(PRINT_DIR) -C $$subdir export V endif @@ -170,35 +178,49 @@ pdf: $(OUTPUT)user-manual.pdf install: install-man -install-man: man - $(INSTALL) -d -m 755 $(DESTDIR)$(man1dir) -# $(INSTALL) -d -m 755 $(DESTDIR)$(man5dir) -# $(INSTALL) -d -m 755 $(DESTDIR)$(man7dir) - $(INSTALL) -m 644 $(DOC_MAN1) $(DESTDIR)$(man1dir) -# $(INSTALL) -m 644 $(DOC_MAN5) $(DESTDIR)$(man5dir) -# $(INSTALL) -m 644 $(DOC_MAN7) $(DESTDIR)$(man7dir) +check-man-tools: +ifdef missing_tools + $(error "You need to install $(missing_tools) for man pages") +endif + +do-install-man: man + $(call QUIET_INSTALL, Documentation-man) \ + $(INSTALL) -d -m 755 $(DESTDIR)$(man1dir); \ +# $(INSTALL) -d -m 755 $(DESTDIR)$(man5dir); \ +# $(INSTALL) -d -m 755 $(DESTDIR)$(man7dir); \ + $(INSTALL) -m 644 $(DOC_MAN1) $(DESTDIR)$(man1dir); \ +# $(INSTALL) -m 644 $(DOC_MAN5) $(DESTDIR)$(man5dir); \ +# $(INSTALL) -m 644 $(DOC_MAN7) $(DESTDIR)$(man7dir) + +install-man: check-man-tools man + +ifdef missing_tools + DO_INSTALL_MAN = $(warning Please install $(missing_tools) to have the man pages installed) +else + DO_INSTALL_MAN = do-install-man +endif + +try-install-man: $(DO_INSTALL_MAN) install-info: info - $(INSTALL) -d -m 755 $(DESTDIR)$(infodir) - $(INSTALL) -m 644 $(OUTPUT)perf.info $(OUTPUT)perfman.info $(DESTDIR)$(infodir) + $(call QUIET_INSTALL, Documentation-info) \ + $(INSTALL) -d -m 755 $(DESTDIR)$(infodir); \ + $(INSTALL) -m 644 $(OUTPUT)perf.info $(OUTPUT)perfman.info $(DESTDIR)$(infodir); \ if test -r $(DESTDIR)$(infodir)/dir; then \ - $(INSTALL_INFO) --info-dir=$(DESTDIR)$(infodir) perf.info ;\ - $(INSTALL_INFO) --info-dir=$(DESTDIR)$(infodir) perfman.info ;\ + $(INSTALL_INFO) --info-dir=$(DESTDIR)$(infodir) perf.info ;\ + $(INSTALL_INFO) --info-dir=$(DESTDIR)$(infodir) perfman.info ;\ else \ echo "No directory found in $(DESTDIR)$(infodir)" >&2 ; \ fi install-pdf: pdf - $(INSTALL) -d -m 755 $(DESTDIR)$(pdfdir) - $(INSTALL) -m 644 $(OUTPUT)user-manual.pdf $(DESTDIR)$(pdfdir) + $(call QUIET_INSTALL, Documentation-pdf) \ + $(INSTALL) -d -m 755 $(DESTDIR)$(pdfdir); \ + $(INSTALL) -m 644 $(OUTPUT)user-manual.pdf $(DESTDIR)$(pdfdir) #install-html: html # '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir) -$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE - $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) $(OUTPUT)PERF-VERSION-FILE - --include $(OUTPUT)PERF-VERSION-FILE # # Determine "include::" file references in asciidoc files. @@ -228,15 +250,17 @@ $(OUTPUT)cmd-list.made: cmd-list.perl ../command-list.txt $(MAN1_TXT) $(PERL_PATH) ./cmd-list.perl ../command-list.txt $(QUIET_STDERR) && \ date >$@ +CLEAN_FILES = \ + $(MAN_XML) $(addsuffix +,$(MAN_XML)) \ + $(MAN_HTML) $(addsuffix +,$(MAN_HTML)) \ + $(DOC_HTML) $(DOC_MAN1) $(DOC_MAN5) $(DOC_MAN7) \ + $(OUTPUT)*.texi $(OUTPUT)*.texi+ $(OUTPUT)*.texi++ \ + $(OUTPUT)perf.info $(OUTPUT)perfman.info \ + $(OUTPUT)howto-index.txt $(OUTPUT)howto/*.html $(OUTPUT)doc.dep \ + $(OUTPUT)technical/api-*.html $(OUTPUT)technical/api-index.txt \ + $(cmds_txt) $(OUTPUT)*.made clean: - $(RM) $(MAN_XML) $(addsuffix +,$(MAN_XML)) - $(RM) $(MAN_HTML) $(addsuffix +,$(MAN_HTML)) - $(RM) $(DOC_HTML) $(DOC_MAN1) $(DOC_MAN5) $(DOC_MAN7) - $(RM) $(OUTPUT)*.texi $(OUTPUT)*.texi+ $(OUTPUT)*.texi++ - $(RM) $(OUTPUT)perf.info $(OUTPUT)perfman.info - $(RM) $(OUTPUT)howto-index.txt $(OUTPUT)howto/*.html $(OUTPUT)doc.dep - $(RM) $(OUTPUT)technical/api-*.html $(OUTPUT)technical/api-index.txt - $(RM) $(cmds_txt) $(OUTPUT)*.made + $(call QUIET_CLEAN, Documentation) $(RM) $(CLEAN_FILES) $(MAN_HTML): $(OUTPUT)%.html : %.txt $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ @@ -246,7 +270,7 @@ $(MAN_HTML): $(OUTPUT)%.html : %.txt $(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.xml $(QUIET_XMLTO)$(RM) $@ && \ - xmlto -o $(OUTPUT) -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $< + $(XMLTO) -o $(OUTPUT). -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $< $(OUTPUT)%.xml : %.txt $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ @@ -317,5 +341,3 @@ $(patsubst %.txt,%.html,$(wildcard howto/*.txt)): %.html : %.txt #quick-install-html: # '$(SHELL_PATH_SQ)' ./install-doc-quick.sh $(HTML_REF) $(DESTDIR)$(htmldir) - -.PHONY: .FORCE-PERF-VERSION-FILE diff --git a/tools/perf/Documentation/android.txt b/tools/perf/Documentation/android.txt new file mode 100644 index 00000000000..8484c3a04a6 --- /dev/null +++ b/tools/perf/Documentation/android.txt @@ -0,0 +1,78 @@ +How to compile perf for Android +========================================= + +I. Set the Android NDK environment +------------------------------------------------ + +(a). Use the Android NDK +------------------------------------------------ +1. You need to download and install the Android Native Development Kit (NDK). +Set the NDK variable to point to the path where you installed the NDK: + export NDK=/path/to/android-ndk + +2. Set cross-compiling environment variables for NDK toolchain and sysroot. +For arm: + export NDK_TOOLCHAIN=${NDK}/toolchains/arm-linux-androideabi-4.6/prebuilt/linux-x86/bin/arm-linux-androideabi- + export NDK_SYSROOT=${NDK}/platforms/android-9/arch-arm +For x86: + export NDK_TOOLCHAIN=${NDK}/toolchains/x86-4.6/prebuilt/linux-x86/bin/i686-linux-android- + export NDK_SYSROOT=${NDK}/platforms/android-9/arch-x86 + +This method is not working for Android NDK versions up to Revision 8b. +perf uses some bionic enhancements that are not included in these NDK versions. +You can use method (b) described below instead. + +(b). Use the Android source tree +----------------------------------------------- +1. Download the master branch of the Android source tree. +Set the environment for the target you want using: + source build/envsetup.sh + lunch + +2. Build your own NDK sysroot to contain latest bionic changes and set the +NDK sysroot environment variable. + cd ${ANDROID_BUILD_TOP}/ndk +For arm: + ./build/tools/build-ndk-sysroot.sh --abi=arm + export NDK_SYSROOT=${ANDROID_BUILD_TOP}/ndk/build/platforms/android-3/arch-arm +For x86: + ./build/tools/build-ndk-sysroot.sh --abi=x86 + export NDK_SYSROOT=${ANDROID_BUILD_TOP}/ndk/build/platforms/android-3/arch-x86 + +3. Set the NDK toolchain environment variable. +For arm: + export NDK_TOOLCHAIN=${ANDROID_TOOLCHAIN}/arm-linux-androideabi- +For x86: + export NDK_TOOLCHAIN=${ANDROID_TOOLCHAIN}/i686-linux-android- + +II. Compile perf for Android +------------------------------------------------ +You need to run make with the NDK toolchain and sysroot defined above: +For arm: + make ARCH=arm CROSS_COMPILE=${NDK_TOOLCHAIN} CFLAGS="--sysroot=${NDK_SYSROOT}" +For x86: + make ARCH=x86 CROSS_COMPILE=${NDK_TOOLCHAIN} CFLAGS="--sysroot=${NDK_SYSROOT}" + +III. Install perf +----------------------------------------------- +You need to connect to your Android device/emulator using adb. +Install perf using: + adb push perf /data/perf + +If you also want to use perf-archive you need busybox tools for Android. +For installing perf-archive, you first need to replace #!/bin/bash with #!/system/bin/sh: + sed 's/#!\/bin\/bash/#!\/system\/bin\/sh/g' perf-archive >> /tmp/perf-archive + chmod +x /tmp/perf-archive + adb push /tmp/perf-archive /data/perf-archive + +IV. Environment settings for running perf +------------------------------------------------ +Some perf features need environment variables to run properly. +You need to set these before running perf on the target: + adb shell + # PERF_PAGER=cat + +IV. Run perf +------------------------------------------------ +Run perf on your device/emulator to which you previously connected using adb: + # ./data/perf diff --git a/tools/perf/Documentation/examples.txt b/tools/perf/Documentation/examples.txt index 77f95276242..a4e39215648 100644 --- a/tools/perf/Documentation/examples.txt +++ b/tools/perf/Documentation/examples.txt @@ -66,7 +66,7 @@ Furthermore, these tracepoints can be used to sample the workload as well. For example the page allocations done by a 'git gc' can be captured the following way: - titan:~/git> perf record -f -e kmem:mm_page_alloc -c 1 ./git gc + titan:~/git> perf record -e kmem:mm_page_alloc -c 1 ./git gc Counting objects: 1148, done. Delta compression using up to 2 threads. Compressing objects: 100% (450/450), done. @@ -120,7 +120,7 @@ Furthermore, call-graph sampling can be done too, of page allocations - to see precisely what kind of page allocations there are: - titan:~/git> perf record -f -g -e kmem:mm_page_alloc -c 1 ./git gc + titan:~/git> perf record -g -e kmem:mm_page_alloc -c 1 ./git gc Counting objects: 1148, done. Delta compression using up to 2 threads. Compressing objects: 100% (450/450), done. diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt index c8ffd9fd5c6..e9cd39a92dc 100644 --- a/tools/perf/Documentation/perf-annotate.txt +++ b/tools/perf/Documentation/perf-annotate.txt @@ -61,11 +61,13 @@ OPTIONS --stdio:: Use the stdio interface. ---tui:: Use the TUI interface Use of --tui requires a tty, if one is not +--tui:: Use the TUI interface. Use of --tui requires a tty, if one is not present, as when piping to other commands, the stdio interface is used. This interfaces starts by centering on the line with more samples, TAB/UNTAB cycles through the lines with more samples. +--gtk:: Use the GTK interface. + -C:: --cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can be provided as a comma-separated list with no space: 0,1. Ranges of @@ -88,6 +90,12 @@ OPTIONS --objdump=<path>:: Path to objdump binary. +--skip-missing:: + Skip symbols that cannot be annotated. + +--group:: + Show event group information together + SEE ALSO -------- linkperf:perf-record[1], linkperf:perf-report[1] diff --git a/tools/perf/Documentation/perf-archive.txt b/tools/perf/Documentation/perf-archive.txt index fae174dc7d0..ac6ecbb3e66 100644 --- a/tools/perf/Documentation/perf-archive.txt +++ b/tools/perf/Documentation/perf-archive.txt @@ -12,9 +12,9 @@ SYNOPSIS DESCRIPTION ----------- -This command runs runs perf-buildid-list --with-hits, and collects the files -with the buildids found so that analisys of perf.data contents can be possible -on another machine. +This command runs perf-buildid-list --with-hits, and collects the files with the +buildids found so that analysis of perf.data contents can be possible on another +machine. SEE ALSO diff --git a/tools/perf/Documentation/perf-bench.txt b/tools/perf/Documentation/perf-bench.txt index 7065cd6fbdf..4464ad770d5 100644 --- a/tools/perf/Documentation/perf-bench.txt +++ b/tools/perf/Documentation/perf-bench.txt @@ -48,6 +48,12 @@ SUBSYSTEM 'mem':: Memory access performance. +'numa':: + NUMA scheduling and MM benchmarks. + +'futex':: + Futex stressing benchmarks. + 'all':: All benchmark subsystems. @@ -187,6 +193,22 @@ Show only the result with page faults before memset. --no-prefault:: Show only the result without page faults before memset. +SUITES FOR 'numa' +~~~~~~~~~~~~~~~~~ +*mem*:: +Suite for evaluating NUMA workloads. + +SUITES FOR 'futex' +~~~~~~~~~~~~~~~~~~ +*hash*:: +Suite for evaluating hash tables. + +*wake*:: +Suite for evaluating wake calls. + +*requeue*:: +Suite for evaluating requeue calls. + SEE ALSO -------- linkperf:perf[1] diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt index c1057701a7d..fd77d81ea74 100644 --- a/tools/perf/Documentation/perf-buildid-cache.txt +++ b/tools/perf/Documentation/perf-buildid-cache.txt @@ -21,9 +21,29 @@ OPTIONS -a:: --add=:: Add specified file to the cache. +-k:: +--kcore:: + Add specified kcore file to the cache. For the current host that is + /proc/kcore which requires root permissions to read. Be aware that + running 'perf buildid-cache' as root may update root's build-id cache + not the user's. Use the -v option to see where the file is created. + Note that the copied file contains only code sections not the whole core + image. Note also that files "kallsyms" and "modules" must also be in the + same directory and are also copied. All 3 files are created with read + permissions for root only. kcore will not be added if there is already a + kcore in the cache (with the same build-id) that has the same modules at + the same addresses. Use the -v option to see if a copy of kcore is + actually made. -r:: --remove=:: Remove specified file from the cache. +-M:: +--missing=:: + List missing build ids in the cache for the specified file. +-u:: +--update:: + Update specified file of the cache. It can be used to update kallsyms + kernel dso to vmlinux in order to support annotation. -v:: --verbose:: Be more verbose. diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt index ab7f667de1b..b3b8abae62b 100644 --- a/tools/perf/Documentation/perf-diff.txt +++ b/tools/perf/Documentation/perf-diff.txt @@ -3,17 +3,17 @@ perf-diff(1) NAME ---- -perf-diff - Read two perf.data files and display the differential profile +perf-diff - Read perf.data files and display the differential profile SYNOPSIS -------- [verse] -'perf diff' [oldfile] [newfile] +'perf diff' [baseline file] [data file1] [[data file2] ... ] DESCRIPTION ----------- -This command displays the performance difference amongst two perf.data files -captured via perf record. +This command displays the performance difference amongst two or more perf.data +files captured via perf record. If no parameters are passed it will assume perf.data.old and perf.data. @@ -22,10 +22,6 @@ specified perf.data files. OPTIONS ------- --M:: ---displacement:: - Show position displacement relative to baseline. - -D:: --dump-raw-trace:: Dump raw trace in ASCII. @@ -37,21 +33,25 @@ OPTIONS -d:: --dsos=:: Only consider symbols in these dsos. CSV that understands - file://filename entries. + file://filename entries. This option will affect the percentage + of the Baseline/Delta column. See --percentage for more info. -C:: --comms=:: Only consider symbols in these comms. CSV that understands - file://filename entries. + file://filename entries. This option will affect the percentage + of the Baseline/Delta column. See --percentage for more info. -S:: --symbols=:: Only consider these symbols. CSV that understands - file://filename entries. + file://filename entries. This option will affect the percentage + of the Baseline/Delta column. See --percentage for more info. -s:: --sort=:: - Sort by key(s): pid, comm, dso, symbol. + Sort by key(s): pid, comm, dso, symbol, cpu, parent, srcline. + Please see description of --sort in the perf-report man page. -t:: --field-separator=:: @@ -72,6 +72,135 @@ OPTIONS --symfs=<directory>:: Look for files with symbols relative to this directory. +-b:: +--baseline-only:: + Show only items with match in baseline. + +-c:: +--compute:: + Differential computation selection - delta,ratio,wdiff (default is delta). + See COMPARISON METHODS section for more info. + +-p:: +--period:: + Show period values for both compared hist entries. + +-F:: +--formula:: + Show formula for given computation. + +-o:: +--order:: + Specify compute sorting column number. + +--percentage:: + Determine how to display the overhead percentage of filtered entries. + Filters can be applied by --comms, --dsos and/or --symbols options. + + "relative" means it's relative to filtered entries only so that the + sum of shown entries will be always 100%. "absolute" means it retains + the original value before and after the filter is applied. + +COMPARISON +---------- +The comparison is governed by the baseline file. The baseline perf.data +file is iterated for samples. All other perf.data files specified on +the command line are searched for the baseline sample pair. If the pair +is found, specified computation is made and result is displayed. + +All samples from non-baseline perf.data files, that do not match any +baseline entry, are displayed with empty space within baseline column +and possible computation results (delta) in their related column. + +Example files samples: +- file A with samples f1, f2, f3, f4, f6 +- file B with samples f2, f4, f5 +- file C with samples f1, f2, f5 + +Example output: + x - computation takes place for pair + b - baseline sample percentage + +- perf diff A B C + + baseline/A compute/B compute/C samples + --------------------------------------- + b x f1 + b x x f2 + b f3 + b x f4 + b f6 + x x f5 + +- perf diff B A C + + baseline/B compute/A compute/C samples + --------------------------------------- + b x x f2 + b x f4 + b x f5 + x x f1 + x f3 + x f6 + +- perf diff C B A + + baseline/C compute/B compute/A samples + --------------------------------------- + b x f1 + b x x f2 + b x f5 + x f3 + x x f4 + x f6 + +COMPARISON METHODS +------------------ +delta +~~~~~ +If specified the 'Delta' column is displayed with value 'd' computed as: + + d = A->period_percent - B->period_percent + +with: + - A/B being matching hist entry from data/baseline file specified + (or perf.data/perf.data.old) respectively. + + - period_percent being the % of the hist entry period value within + single data file + + - with filtering by -C, -d and/or -S, period_percent might be changed + relative to how entries are filtered. Use --percentage=absolute to + prevent such fluctuation. + +ratio +~~~~~ +If specified the 'Ratio' column is displayed with value 'r' computed as: + + r = A->period / B->period + +with: + - A/B being matching hist entry from data/baseline file specified + (or perf.data/perf.data.old) respectively. + + - period being the hist entry period value + +wdiff:WEIGHT-B,WEIGHT-A +~~~~~~~~~~~~~~~~~~~~~~~ +If specified the 'Weighted diff' column is displayed with value 'd' computed as: + + d = B->period * WEIGHT-A - A->period * WEIGHT-B + + - A/B being matching hist entry from data/baseline file specified + (or perf.data/perf.data.old) respectively. + + - period being the hist entry period value + + - WEIGHT-A/WEIGHT-B being user suplied weights in the the '-c' option + behind ':' separator like '-c wdiff:1,2'. + - WIEGHT-A being the weight of the data file + - WIEGHT-B being the weight of the baseline data file + SEE ALSO -------- -linkperf:perf-record[1] +linkperf:perf-record[1], linkperf:perf-report[1] diff --git a/tools/perf/Documentation/perf-evlist.txt b/tools/perf/Documentation/perf-evlist.txt index 15217345c2f..1ceb3700ffb 100644 --- a/tools/perf/Documentation/perf-evlist.txt +++ b/tools/perf/Documentation/perf-evlist.txt @@ -28,6 +28,10 @@ OPTIONS --verbose=:: Show all fields. +-g:: +--group:: + Show event group information. + SEE ALSO -------- linkperf:perf-record[1], linkperf:perf-list[1], diff --git a/tools/perf/Documentation/perf-inject.txt b/tools/perf/Documentation/perf-inject.txt index 025630d43cd..a00a34276c5 100644 --- a/tools/perf/Documentation/perf-inject.txt +++ b/tools/perf/Documentation/perf-inject.txt @@ -29,6 +29,17 @@ OPTIONS -v:: --verbose:: Be more verbose. +-i:: +--input=:: + Input file name. (default: stdin) +-o:: +--output=:: + Output file name. (default: stdout) +-s:: +--sched-stat:: + Merge sched_stat and sched_switch for getting events where and how long + tasks slept. sched_switch contains a callchain where a task slept and + sched_stat contains a timeslice how long a task slept. SEE ALSO -------- diff --git a/tools/perf/Documentation/perf-kvm.txt b/tools/perf/Documentation/perf-kvm.txt index 326f2cb333c..52276a6d2b7 100644 --- a/tools/perf/Documentation/perf-kvm.txt +++ b/tools/perf/Documentation/perf-kvm.txt @@ -10,9 +10,10 @@ SYNOPSIS [verse] 'perf kvm' [--host] [--guest] [--guestmount=<path> [--guestkallsyms=<path> --guestmodules=<path> | --guestvmlinux=<path>]] - {top|record|report|diff|buildid-list} + {top|record|report|diff|buildid-list} [<options>] 'perf kvm' [--host] [--guest] [--guestkallsyms=<path> --guestmodules=<path> - | --guestvmlinux=<path>] {top|record|report|diff|buildid-list|stat} + | --guestvmlinux=<path>] {top|record|report|diff|buildid-list|stat} [<options>] +'perf kvm stat [record|report|live] [<options>] DESCRIPTION ----------- @@ -23,10 +24,17 @@ There are a couple of variants of perf kvm: of an arbitrary workload. 'perf kvm record <command>' to record the performance counter profile - of an arbitrary workload and save it into a perf data file. If both - --host and --guest are input, the perf data file name is perf.data.kvm. - If there is no --host but --guest, the file name is perf.data.guest. - If there is no --guest but --host, the file name is perf.data.host. + of an arbitrary workload and save it into a perf data file. We set the + default behavior of perf kvm as --guest, so if neither --host nor --guest + is input, the perf data file name is perf.data.guest. If --host is input, + the perf data file name is perf.data.kvm. If you want to record data into + perf.data.host, please input --host --no-guest. The behaviors are shown as + following: + Default('') -> perf.data.guest + --host -> perf.data.kvm + --guest -> perf.data.guest + --host --guest -> perf.data.kvm + --host --no-guest -> perf.data.host 'perf kvm report' to display the performance counter profile information recorded via perf kvm record. @@ -36,7 +44,9 @@ There are a couple of variants of perf kvm: 'perf kvm buildid-list' to display the buildids found in a perf data file, so that other tools can be used to fetch packages with matching symbol tables - for use by perf report. + for use by perf report. As buildid is read from /sys/kernel/notes in os, then + if you want to list the buildid for guest, please make sure your perf data file + was captured with --guestmount in perf kvm record. 'perf kvm stat <command>' to run a command and gather performance counter statistics. @@ -50,17 +60,21 @@ There are a couple of variants of perf kvm: 'perf kvm stat report' reports statistical data which includes events handled time, samples, and so on. + 'perf kvm stat live' reports statistical data in a live mode (similar to + record + report but with statistical data updated live at a given display + rate). + OPTIONS ------- -i:: ---input=:: +--input=<path>:: Input file name. -o:: ---output:: +--output=<path>:: Output file name. ---host=:: +--host:: Collect host side performance profile. ---guest=:: +--guest:: Collect guest side performance profile. --guestmount=<path>:: Guest os root file system mount directory. Users mounts guest os @@ -79,19 +93,61 @@ OPTIONS kernel module information. Users copy it out from guest os. --guestvmlinux=<path>:: Guest os kernel vmlinux. +-v:: +--verbose:: + Be more verbose (show counter open errors, etc). STAT REPORT OPTIONS ------------------- --vcpu=<value>:: analyze events which occures on this vcpu. (default: all vcpus) ---events=<value>:: - events to be analyzed. Possible values: vmexit, mmio, ioport. +--event=<value>:: + event to be analyzed. Possible values: vmexit, mmio, ioport. (default: vmexit) -k:: --key=<value>:: Sorting key. Possible values: sample (default, sort by samples number), time (sort by average time). +-p:: +--pid=:: + Analyze events only for given process ID(s) (comma separated list). + +STAT LIVE OPTIONS +----------------- +-d:: +--display:: + Time in seconds between display updates + +-m:: +--mmap-pages=:: + Number of mmap data pages (must be a power of two) or size + specification with appended unit character - B/K/M/G. The + size is rounded up to have nearest pages power of two value. + +-a:: +--all-cpus:: + System-wide collection from all CPUs. + +-p:: +--pid=:: + Analyze events only for given process ID(s) (comma separated list). + +--vcpu=<value>:: + analyze events which occures on this vcpu. (default: all vcpus) + + +--event=<value>:: + event to be analyzed. Possible values: vmexit, mmio, ioport. + (default: vmexit) + +-k:: +--key=<value>:: + Sorting key. Possible values: sample (default, sort by samples + number), time (sort by average time). + +--duration=<value>:: + Show events other than HLT that take longer than duration usecs. SEE ALSO -------- diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt index d1e39dc8c81..6fce6a62220 100644 --- a/tools/perf/Documentation/perf-list.txt +++ b/tools/perf/Documentation/perf-list.txt @@ -8,7 +8,7 @@ perf-list - List all symbolic event types SYNOPSIS -------- [verse] -'perf list' [hw|sw|cache|tracepoint|event_glob] +'perf list' [hw|sw|cache|tracepoint|pmu|event_glob] DESCRIPTION ----------- @@ -29,6 +29,8 @@ counted. The following modifiers exist: G - guest counting (in KVM guests) H - host counting (not in KVM guests) p - precise level + S - read sample value (PERF_SAMPLE_READ) + D - pin the event to the PMU The 'p' modifier can be used for specifying how precise the instruction address should be. The 'p' modifier can be specified multiple times: @@ -104,6 +106,8 @@ To limit the list use: 'subsys_glob:event_glob' to filter by tracepoint subsystems such as sched, block, etc. +. 'pmu' to print the kernel supplied PMU events. + . If none of the above is matched, it will apply the supplied glob to all events, printing the ones that match. diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt index c7f5f55634a..ab25be28c9d 100644 --- a/tools/perf/Documentation/perf-lock.txt +++ b/tools/perf/Documentation/perf-lock.txt @@ -48,7 +48,7 @@ REPORT OPTIONS -k:: --key=<value>:: Sorting key. Possible values: acquired (default), contended, - wait_total, wait_max, wait_min. + avg_wait, wait_total, wait_max, wait_min. INFO OPTIONS ------------ diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt new file mode 100644 index 00000000000..1d78a4064da --- /dev/null +++ b/tools/perf/Documentation/perf-mem.txt @@ -0,0 +1,52 @@ +perf-mem(1) +=========== + +NAME +---- +perf-mem - Profile memory accesses + +SYNOPSIS +-------- +[verse] +'perf mem' [<options>] (record [<command>] | report) + +DESCRIPTION +----------- +"perf mem -t <TYPE> record" runs a command and gathers memory operation data +from it, into perf.data. Perf record options are accepted and are passed through. + +"perf mem -t <TYPE> report" displays the result. It invokes perf report with the +right set of options to display a memory access profile. + +Note that on Intel systems the memory latency reported is the use-latency, +not the pure load (or store latency). Use latency includes any pipeline +queueing delays in addition to the memory subsystem latency. + +OPTIONS +------- +<command>...:: + Any command you can specify in a shell. + +-t:: +--type=:: + Select the memory operation type: load or store (default: load) + +-D:: +--dump-raw-samples=:: + Dump the raw decoded samples on the screen in a format that is easy to parse with + one sample per line. + +-x:: +--field-separator:: + Specify the field separator used when dump raw samples (-D option). By default, + The separator is the space character. + +-C:: +--cpu-list:: + Restrict dump of raw samples to those provided via this option. Note that the same + option can be passed in record mode. It will be interpreted the same way as perf + record. + +SEE ALSO +-------- +linkperf:perf-record[1], linkperf:perf-report[1] diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt index b715cb71592..1513935c399 100644 --- a/tools/perf/Documentation/perf-probe.txt +++ b/tools/perf/Documentation/perf-probe.txt @@ -136,6 +136,8 @@ Each probe argument follows below syntax. 'NAME' specifies the name of this argument (optional). You can use the name of local variable, local data structure member (e.g. var->field, var.field2), local array with fixed index (e.g. array[1], var->array[0], var->pointer[2]), or kprobe-tracer argument format (e.g. $retval, %ax, etc). Note that the name of this argument will be set as the last member name if you specify a local data structure member (e.g. field2 for 'var->field1.field2'.) 'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type. +On x86 systems %REG is always the short form of the register: for example %AX. %RAX or %EAX is not valid. + LINE SYNTAX ----------- Line range is described by following syntax. diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index b38a1f9ad46..d460049cae8 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -57,6 +57,8 @@ OPTIONS -t:: --tid=:: Record events on existing thread ID (comma separated list). + This option also disables inheritance by default. Enable it by adding + --inherit. -u:: --uid=:: @@ -65,16 +67,9 @@ OPTIONS -r:: --realtime=:: Collect data with this RT SCHED_FIFO priority. --D:: ---no-delay:: - Collect data without buffering. --A:: ---append:: - Append to the output file to do incremental profiling. --f:: ---force:: - Overwrite existing data file. (deprecated) +--no-buffering:: + Collect data without buffering. -c:: --count=:: @@ -93,11 +88,25 @@ OPTIONS -m:: --mmap-pages=:: - Number of mmap data pages. Must be a power of two. + Number of mmap data pages (must be a power of two) or size + specification with appended unit character - B/K/M/G. The + size is rounded up to have nearest pages power of two value. -g:: + Enables call-graph (stack chain/backtrace) recording. + --call-graph:: - Do call-graph (stack chain/backtrace) recording. + Setup and enable call-graph (stack chain/backtrace) recording, + implies -g. + + Allows specifying "fp" (frame pointer) or "dwarf" + (DWARF's CFI - Call Frame Information) as the method to collect + the information used to show the call graphs. + + In some systems, where binaries are build with gcc + --fomit-frame-pointer, using the "fp" method will produce bogus + call graphs, using "dwarf", if available (perf tools linked to + the libunwind library) should be used instead. -q:: --quiet:: @@ -172,16 +181,39 @@ following filters are defined: - u: only when the branch target is at the user level - k: only when the branch target is in the kernel - hv: only when the target is at the hypervisor level + - in_tx: only when the target is in a hardware transaction + - no_tx: only when the target is not in a hardware transaction + - abort_tx: only when the target is a hardware transaction abort + - cond: conditional branches + -The option requires at least one branch type among any, any_call, any_ret, ind_call. -The privilege levels may be ommitted, in which case, the privilege levels of the associated +The option requires at least one branch type among any, any_call, any_ret, ind_call, cond. +The privilege levels may be omitted, in which case, the privilege levels of the associated event are applied to the branch filter. Both kernel (k) and hypervisor (hv) privilege levels are subject to permissions. When sampling on multiple events, branch stack sampling is enabled for all the sampling events. The sampled branch type is the same for all events. The various filters must be specified as a comma separated list: --branch-filter any_ret,u,k Note that this feature may not be available on all processors. +--weight:: +Enable weightened sampling. An additional weight is recorded per sample and can be +displayed with the weight and local_weight sort keys. This currently works for TSX +abort events and some memory events in precise mode on modern Intel CPUs. + +--transaction:: +Record transaction flags for transaction related events. + +--per-thread:: +Use per-thread mmaps. By default per-cpu mmaps are created. This option +overrides that and uses per-thread mmaps. A side-effect of that is that +inheritance is automatically disabled. --per-thread is ignored with a warning +if combined with -a or -C options. + +-D:: +--delay=:: +After starting the program, wait msecs before measuring. This is useful to +filter out the startup phase of the program, which is often very different. + SEE ALSO -------- linkperf:perf-stat[1], linkperf:perf-list[1] diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt index f4d91bebd59..d2b59af62bc 100644 --- a/tools/perf/Documentation/perf-report.txt +++ b/tools/perf/Documentation/perf-report.txt @@ -25,10 +25,6 @@ OPTIONS --verbose:: Be more verbose. (show symbol address, etc) --d:: ---dsos=:: - Only consider symbols in these dsos. CSV that understands - file://filename entries. -n:: --show-nr-samples:: Show the number of samples for each symbol @@ -42,11 +38,18 @@ OPTIONS -c:: --comms=:: Only consider symbols in these comms. CSV that understands - file://filename entries. + file://filename entries. This option will affect the percentage of + the overhead column. See --percentage for more info. +-d:: +--dsos=:: + Only consider symbols in these dsos. CSV that understands + file://filename entries. This option will affect the percentage of + the overhead column. See --percentage for more info. -S:: --symbols=:: Only consider these symbols. CSV that understands - file://filename entries. + file://filename entries. This option will affect the percentage of + the overhead column. See --percentage for more info. --symbol-filter=:: Only show symbols that match (partially) with this filter. @@ -57,11 +60,85 @@ OPTIONS -s:: --sort=:: - Sort by key(s): pid, comm, dso, symbol, parent, srcline. + Sort histogram entries by given key(s) - multiple keys can be specified + in CSV format. Following sort keys are available: + pid, comm, dso, symbol, parent, cpu, srcline, weight, local_weight. + + Each key has following meaning: + + - comm: command (name) of the task which can be read via /proc/<pid>/comm + - pid: command and tid of the task + - dso: name of library or module executed at the time of sample + - symbol: name of function executed at the time of sample + - parent: name of function matched to the parent regex filter. Unmatched + entries are displayed as "[other]". + - cpu: cpu number the task ran at the time of sample + - srcline: filename and line number executed at the time of sample. The + DWARF debugging info must be provided. + - weight: Event specific weight, e.g. memory latency or transaction + abort cost. This is the global weight. + - local_weight: Local weight version of the weight above. + - transaction: Transaction abort flags. + - overhead: Overhead percentage of sample + - overhead_sys: Overhead percentage of sample running in system mode + - overhead_us: Overhead percentage of sample running in user mode + - overhead_guest_sys: Overhead percentage of sample running in system mode + on guest machine + - overhead_guest_us: Overhead percentage of sample running in user mode on + guest machine + - sample: Number of sample + - period: Raw number of event count of sample + + By default, comm, dso and symbol keys are used. + (i.e. --sort comm,dso,symbol) + + If --branch-stack option is used, following sort keys are also + available: + dso_from, dso_to, symbol_from, symbol_to, mispredict. + + - dso_from: name of library or module branched from + - dso_to: name of library or module branched to + - symbol_from: name of function branched from + - symbol_to: name of function branched to + - mispredict: "N" for predicted branch, "Y" for mispredicted branch + - in_tx: branch in TSX transaction + - abort: TSX transaction abort. + + And default sort keys are changed to comm, dso_from, symbol_from, dso_to + and symbol_to, see '--branch-stack'. + +-F:: +--fields=:: + Specify output field - multiple keys can be specified in CSV format. + Following fields are available: + overhead, overhead_sys, overhead_us, overhead_children, sample and period. + Also it can contain any sort key(s). + + By default, every sort keys not specified in -F will be appended + automatically. + + If --mem-mode option is used, following sort keys are also available + (incompatible with --branch-stack): + symbol_daddr, dso_daddr, locked, tlb, mem, snoop, dcacheline. + + - symbol_daddr: name of data symbol being executed on at the time of sample + - dso_daddr: name of library or module containing the data being executed + on at the time of sample + - locked: whether the bus was locked at the time of sample + - tlb: type of tlb access for the data at the time of sample + - mem: type of memory access for the data at the time of sample + - snoop: type of snoop (if any) for the data at the time of sample + - dcacheline: the cacheline the data address is on at the time of sample + + And default sort keys are changed to local_weight, mem, sym, dso, + symbol_daddr, dso_daddr, snoop, tlb, locked, see '--mem-mode'. -p:: --parent=<regex>:: - regex filter to identify parent, see: '--sort parent' + A regex filter to identify parent. The parent is a caller of this + function and searched through the callchain, thus it requires callchain + information recorded. The pattern is in the exteneded regex format and + defaults to "\^sys_|^do_page_fault", see '--sort parent'. -x:: --exclude-other:: @@ -74,7 +151,6 @@ OPTIONS -t:: --field-separator=:: - Use a special separator character and don't pad with spaces, replacing all occurrences of this separator in symbol names (and other output) with a '.' character, that thus it's the only non valid separator. @@ -83,7 +159,7 @@ OPTIONS --dump-raw-trace:: Dump raw trace in ASCII. --g [type,min[,limit],order]:: +-g [type,min[,limit],order[,key]]:: --call-graph:: Display call chains using type, min percent threshold, optional print limit and order. @@ -97,12 +173,34 @@ OPTIONS - callee: callee based call graph. - caller: inverted caller based call graph. - Default: fractal,0.5,callee. + key can be: + - function: compare on functions + - address: compare on individual code addresses + + Default: fractal,0.5,callee,function. + +--children:: + Accumulate callchain of children to parent entry so that then can + show up in the output. The output will have a new "Children" column + and will be sorted on the data. It requires callchains are recorded. + +--max-stack:: + Set the stack depth limit when parsing the callchain, anything + beyond the specified depth will be ignored. This is a trade-off + between information loss and faster processing especially for + workloads that can have a very long callchain stack. + + Default: 127 -G:: --inverted:: alias for inverted caller based call graph. +--ignore-callees=<regex>:: + Ignore callees of the function(s) matching the given regex. + This has the effect of collecting the callers of each such + function into one place in the call-graph tree. + --pretty=<key>:: Pretty printing style. key: normal, raw @@ -171,6 +269,42 @@ OPTIONS --objdump=<path>:: Path to objdump binary. +--group:: + Show event group information together. + +--demangle:: + Demangle symbol names to human readable form. It's enabled by default, + disable with --no-demangle. + +--mem-mode:: + Use the data addresses of samples in addition to instruction addresses + to build the histograms. To generate meaningful output, the perf.data + file must have been obtained using perf record -d -W and using a + special event -e cpu/mem-loads/ or -e cpu/mem-stores/. See + 'perf mem' for simpler access. + +--percent-limit:: + Do not show entries which have an overhead under that percent. + (Default: 0). + +--percentage:: + Determine how to display the overhead percentage of filtered entries. + Filters can be applied by --comms, --dsos and/or --symbols options and + Zoom operations on the TUI (thread, dso, etc). + + "relative" means it's relative to filtered entries only so that the + sum of shown entries will be always 100%. "absolute" means it retains + the original value before and after the filter is applied. + +--header:: + Show header information in the perf.data file. This includes + various information like hostname, OS and perf version, cpu/mem + info, perf command line, event list and so on. Currently only + --stdio output supports this feature. + +--header-only:: + Show only perf.data header (forces --stdio). + SEE ALSO -------- linkperf:perf-stat[1], linkperf:perf-annotate[1] diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt index a4027f221a5..9f1f054b843 100644 --- a/tools/perf/Documentation/perf-script-python.txt +++ b/tools/perf/Documentation/perf-script-python.txt @@ -336,7 +336,6 @@ scripts listed by the 'perf script -l' command e.g.: ---- root@tropicana:~# perf script -l List of available trace scripts: - workqueue-stats workqueue stats (ins/exe/create/destroy) wakeup-latency system-wide min/max/avg wakeup latency rw-by-file <comm> r/w activity for a program, by file rw-by-pid system-wide r/w activity @@ -402,7 +401,6 @@ should show a new entry for your script: ---- root@tropicana:~# perf script -l List of available trace scripts: - workqueue-stats workqueue stats (ins/exe/create/destroy) wakeup-latency system-wide min/max/avg wakeup latency rw-by-file <comm> r/w activity for a program, by file rw-by-pid system-wide r/w activity diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt index e9cbfcddfa3..05f9a0a6784 100644 --- a/tools/perf/Documentation/perf-script.txt +++ b/tools/perf/Documentation/perf-script.txt @@ -115,7 +115,7 @@ OPTIONS -f:: --fields:: Comma separated list of fields to print. Options are: - comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff. + comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff, srcline. Field list can be prepended with the type, trace, sw or hw, to indicate to which event type the field list applies. e.g., -f sw:comm,tid,time,ip,sym and -f trace:time,cpu,trace @@ -203,6 +203,18 @@ OPTIONS --show-kernel-path:: Try to resolve the path of [kernel.kallsyms] +--show-task-events + Display task related events (e.g. FORK, COMM, EXIT). + +--show-mmap-events + Display mmap related events (e.g. MMAP, MMAP2). + +--header + Show perf.data header. + +--header-only + Show only perf.data header. + SEE ALSO -------- linkperf:perf-record[1], linkperf:perf-script-perl[1], diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index 2fa173b5197..29ee857c09c 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -52,7 +52,7 @@ OPTIONS -r:: --repeat=<n>:: - repeat command and print average + stddev (max: 100) + repeat command and print average + stddev (max: 100). 0 means forever. -B:: --big-num:: @@ -108,7 +108,39 @@ with it. --append may be used here. Examples: 3>results perf stat --log-fd 3 -- $cmd 3>>results perf stat --log-fd 3 --append -- $cmd +--pre:: +--post:: + Pre and post measurement hooks, e.g.: +perf stat --repeat 10 --null --sync --pre 'make -s O=defconfig-build/clean' -- make -s -j64 O=defconfig-build/ bzImage + +-I msecs:: +--interval-print msecs:: + Print count deltas every N milliseconds (minimum: 100ms) + example: perf stat -I 1000 -e cycles -a sleep 5 + +--per-socket:: +Aggregate counts per processor socket for system-wide mode measurements. This +is a useful mode to detect imbalance between sockets. To enable this mode, +use --per-socket in addition to -a. (system-wide). The output includes the +socket number and the number of online processors on that socket. This is +useful to gauge the amount of aggregation. + +--per-core:: +Aggregate counts per physical processor for system-wide mode measurements. This +is a useful mode to detect imbalance between physical cores. To enable this mode, +use --per-core in addition to -a. (system-wide). The output includes the +core number and the number of online logical processors on that physical processor. + +-D msecs:: +--delay msecs:: +After starting the program, wait msecs before measuring. This is useful to +filter out the startup phase of the program, which is often very different. + +-T:: +--transaction:: + +Print statistics of transactional execution if supported. EXAMPLES -------- diff --git a/tools/perf/Documentation/perf-test.txt b/tools/perf/Documentation/perf-test.txt index b24ac40fcd5..d1d3e5121f8 100644 --- a/tools/perf/Documentation/perf-test.txt +++ b/tools/perf/Documentation/perf-test.txt @@ -23,6 +23,10 @@ from 'perf test list'. OPTIONS ------- +-s:: +--skip:: + Tests to skip (comma separater numeric list). + -v:: --verbose:: Be more verbose. diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt index 1632b0efc75..5e0f986dff3 100644 --- a/tools/perf/Documentation/perf-timechart.txt +++ b/tools/perf/Documentation/perf-timechart.txt @@ -8,7 +8,7 @@ perf-timechart - Tool to visualize total system behavior during a workload SYNOPSIS -------- [verse] -'perf timechart' {record} +'perf timechart' [<timechart options>] {record} [<record options>] DESCRIPTION ----------- @@ -20,8 +20,8 @@ There are two variants of perf timechart: 'perf timechart' to turn a trace into a Scalable Vector Graphics file, that can be viewed with popular SVG viewers such as 'Inkscape'. -OPTIONS -------- +TIMECHART OPTIONS +----------------- -o:: --output=:: Select the output file (default: output.svg) @@ -34,12 +34,58 @@ OPTIONS -P:: --power-only:: Only output the CPU power section of the diagram +-T:: +--tasks-only:: + Don't output processor state transitions -p:: --process:: Select the processes to display, by name or PID --symfs=<directory>:: Look for files with symbols relative to this directory. +-n:: +--proc-num:: + Print task info for at least given number of tasks. +-t:: +--topology:: + Sort CPUs according to topology. +--highlight=<duration_nsecs|task_name>:: + Highlight tasks (using different color) that run more than given + duration or tasks with given name. If number is given it's interpreted + as number of nanoseconds. If non-numeric string is given it's + interpreted as task name. + +RECORD OPTIONS +-------------- +-P:: +--power-only:: + Record only power-related events +-T:: +--tasks-only:: + Record only tasks-related events +-g:: +--callchain:: + Do call-graph (stack chain/backtrace) recording + +EXAMPLES +-------- + +$ perf timechart record git pull + + [ perf record: Woken up 13 times to write data ] + [ perf record: Captured and wrote 4.253 MB perf.data (~185801 samples) ] + +$ perf timechart + + Written 10.2 seconds of trace to output.svg. + +Record system-wide timechart: + + $ perf timechart record + + then generate timechart and highlight 'gcc' tasks: + + $ perf timechart --highlight gcc SEE ALSO -------- diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt index 5b80d84d6b4..180ae02137a 100644 --- a/tools/perf/Documentation/perf-top.txt +++ b/tools/perf/Documentation/perf-top.txt @@ -50,7 +50,6 @@ Default is to monitor all CPUS. --count-filter=<count>:: Only display functions with more events than this. --g:: --group:: Put the counters into a counter group. @@ -60,7 +59,7 @@ Default is to monitor all CPUS. -i:: --inherit:: - Child tasks inherit counters, only makes sens with -p option. + Child tasks do not inherit counters. -k <path>:: --vmlinux=<path>:: @@ -68,7 +67,9 @@ Default is to monitor all CPUS. -m <pages>:: --mmap-pages=<pages>:: - Number of mmapped data pages. + Number of mmap data pages (must be a power of two) or size + specification with appended unit character - B/K/M/G. The + size is rounded up to have nearest pages power of two value. -p <pid>:: --pid=<pid>:: @@ -86,7 +87,6 @@ Default is to monitor all CPUS. --realtime=<priority>:: Collect data with this RT SCHED_FIFO priority. --s <symbol>:: --sym-annotate=<symbol>:: Annotate this symbol. @@ -112,7 +112,18 @@ Default is to monitor all CPUS. -s:: --sort:: - Sort by key(s): pid, comm, dso, symbol, parent, srcline. + Sort by key(s): pid, comm, dso, symbol, parent, srcline, weight, + local_weight, abort, in_tx, transaction, overhead, sample, period. + Please see description of --sort in the perf-report man page. + +--fields=:: + Specify output field - multiple keys can be specified in CSV format. + Following fields are available: + overhead, overhead_sys, overhead_us, overhead_children, sample and period. + Also it can contain any sort key(s). + + By default, every sort keys not specified in --field will be appended + automatically. -n:: --show-nr-samples:: @@ -122,13 +133,16 @@ Default is to monitor all CPUS. Show a column with the sum of periods. --dsos:: - Only consider symbols in these dsos. + Only consider symbols in these dsos. This option will affect the + percentage of the overhead column. See --percentage for more info. --comms:: - Only consider symbols in these comms. + Only consider symbols in these comms. This option will affect the + percentage of the overhead column. See --percentage for more info. --symbols:: - Only consider these symbols. + Only consider these symbols. This option will affect the + percentage of the overhead column. See --percentage for more info. -M:: --disassembler-style=:: Set disassembler style for objdump. @@ -140,20 +154,44 @@ Default is to monitor all CPUS. --asm-raw:: Show raw instruction encoding of assembly instructions. --G [type,min,order]:: +-g:: + Enables call-graph (stack chain/backtrace) recording. + --call-graph:: - Display call chains using type, min percent threshold and order. - type can be either: - - flat: single column, linear exposure of call chains. - - graph: use a graph tree, displaying absolute overhead rates. - - fractal: like graph, but displays relative rates. Each branch of - the tree is considered as a new profiled object. + Setup and enable call-graph (stack chain/backtrace) recording, + implies -g. + +--children:: + Accumulate callchain of children to parent entry so that then can + show up in the output. The output will have a new "Children" column + and will be sorted on the data. It requires -g/--call-graph option + enabled. + +--max-stack:: + Set the stack depth limit when parsing the callchain, anything + beyond the specified depth will be ignored. This is a trade-off + between information loss and faster processing especially for + workloads that can have a very long callchain stack. + + Default: 127 + +--ignore-callees=<regex>:: + Ignore callees of the function(s) matching the given regex. + This has the effect of collecting the callers of each such + function into one place in the call-graph tree. + +--percent-limit:: + Do not show entries which have an overhead under that percent. + (Default: 0). - order can be either: - - callee: callee based call graph. - - caller: inverted caller based call graph. +--percentage:: + Determine how to display the overhead percentage of filtered entries. + Filters can be applied by --comms, --dsos and/or --symbols options and + Zoom operations on the TUI (thread, dso, etc). - Default: fractal,0.5,callee. + "relative" means it's relative to filtered entries only so that the + sum of shown entries will be always 100%. "absolute" means it retains + the original value before and after the filter is applied. INTERACTIVE PROMPTING KEYS -------------------------- @@ -190,4 +228,4 @@ Pressing any unmapped key displays a menu, and prompts for input. SEE ALSO -------- -linkperf:perf-stat[1], linkperf:perf-list[1] +linkperf:perf-stat[1], linkperf:perf-list[1], linkperf:perf-report[1] diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt index 3a2ae37310a..fae38d9a44a 100644 --- a/tools/perf/Documentation/perf-trace.txt +++ b/tools/perf/Documentation/perf-trace.txt @@ -9,6 +9,7 @@ SYNOPSIS -------- [verse] 'perf trace' +'perf trace record' DESCRIPTION ----------- @@ -16,38 +17,96 @@ This command will show the events associated with the target, initially syscalls, but other system events like pagefaults, task lifetime events, scheduling events, etc. -Initially this is a live mode only tool, but eventually will work with -perf.data files like the other tools, allowing a detached 'record' from -analysis phases. +This is a live mode tool in addition to working with perf.data files like +the other perf tools. Files can be generated using the 'perf record' command +but the session needs to include the raw_syscalls events (-e 'raw_syscalls:*'). +Alernatively, the 'perf trace record' can be used as a shortcut to +automatically include the raw_syscalls events when writing events to a file. + +The following options apply to perf trace; options to perf trace record are +found in the perf record man page. OPTIONS ------- +-a:: --all-cpus:: System-wide collection from all CPUs. +-e:: +--expr:: + List of events to show, currently only syscall names. + Prefixing with ! shows all syscalls but the ones specified. You may + need to escape it. + +-o:: +--output=:: + Output file name. + -p:: --pid=:: Record events on existing process ID (comma separated list). +-t:: --tid=:: Record events on existing thread ID (comma separated list). +-u:: --uid=:: Record events in threads owned by uid. Name or number. +-v:: +--verbose=:: + Verbosity level. + +-i:: --no-inherit:: Child tasks do not inherit counters. +-m:: --mmap-pages=:: - Number of mmap data pages. Must be a power of two. + Number of mmap data pages (must be a power of two) or size + specification with appended unit character - B/K/M/G. The + size is rounded up to have nearest pages power of two value. +-C:: --cpu:: Collect samples only on the list of CPUs provided. Multiple CPUs can be provided as a comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2. In per-thread mode with inheritance mode on (default), Events are captured only when the thread executes on the designated CPUs. Default is to monitor all CPUs. +--duration: + Show only events that had a duration greater than N.M ms. + +--sched: + Accrue thread runtime and provide a summary at the end of the session. + +-i +--input + Process events from a given perf data file. + +-T +--time + Print full timestamp rather time relative to first sample. + +--comm:: + Show process COMM right beside its ID, on by default, disable with --no-comm. + +-s:: +--summary:: + Show only a summary of syscalls by thread with min, max, and average times + (in msec) and relative stddev. + +-S:: +--with-summary:: + Show all syscalls followed by a summary by thread with min, max, and + average times (in msec) and relative stddev. + +--tool_stats:: + Show tool stats such as number of times fd->pathname was discovered thru + hooking the open syscall return + vfs_getname or via reading /proc/pid/fd, etc. + SEE ALSO -------- linkperf:perf-record[1], linkperf:perf-script[1] diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index 80db3f4bcf7..45da209b6ed 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST @@ -1,6 +1,14 @@ tools/perf tools/scripts tools/lib/traceevent +tools/lib/api +tools/lib/symbol/kallsyms.c +tools/lib/symbol/kallsyms.h +tools/include/asm/bug.h +tools/include/linux/compiler.h +tools/include/linux/hash.h +tools/include/linux/export.h +tools/include/linux/types.h include/linux/const.h include/linux/perf_event.h include/linux/rbtree.h @@ -11,11 +19,21 @@ lib/rbtree.c include/linux/swab.h arch/*/include/asm/unistd*.h arch/*/include/asm/perf_regs.h +arch/*/include/uapi/asm/unistd*.h +arch/*/include/uapi/asm/perf_regs.h arch/*/lib/memcpy*.S arch/*/lib/memset*.S include/linux/poison.h include/linux/magic.h include/linux/hw_breakpoint.h +include/linux/rbtree_augmented.h +include/uapi/linux/perf_event.h +include/uapi/linux/const.h +include/uapi/linux/swab.h +include/uapi/linux/hw_breakpoint.h arch/x86/include/asm/svm.h arch/x86/include/asm/vmx.h arch/x86/include/asm/kvm_host.h +arch/x86/include/uapi/asm/svm.h +arch/x86/include/uapi/asm/vmx.h +arch/x86/include/uapi/asm/kvm.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 00deed4d615..cb2e5868c8e 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -1,1079 +1,90 @@ -include ../scripts/Makefile.include - -# The default target of this Makefile is... -all: - -include config/utilities.mak - -# Define V to have a more verbose compile. -# -# Define O to save output files in a separate directory. -# -# Define ARCH as name of target architecture if you want cross-builds. -# -# Define CROSS_COMPILE as prefix name of compiler if you want cross-builds. -# -# Define NO_LIBPERL to disable perl script extension. # -# Define NO_LIBPYTHON to disable python script extension. +# This is a simple wrapper Makefile that calls the main Makefile.perf +# with a -j option to do parallel builds # -# Define PYTHON to point to the python binary if the default -# `python' is not correct; for example: PYTHON=python2 +# If you want to invoke the perf build in some non-standard way then +# you can use the 'make -f Makefile.perf' method to invoke it. # -# Define PYTHON_CONFIG to point to the python-config binary if -# the default `$(PYTHON)-config' is not correct. -# -# Define ASCIIDOC8 if you want to format documentation with AsciiDoc 8 -# -# Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72. -# -# Define LDFLAGS=-static to build a static binary. -# -# Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds. -# -# Define NO_DWARF if you do not want debug-info analysis feature at all. + # -# Define WERROR=0 to disable treating any warnings as errors. +# Clear out the built-in rules GNU make defines by default (such as .o targets), +# so that we pass through all targets to Makefile.perf: # -# Define NO_NEWT if you do not want TUI support. +.SUFFIXES: + # -# Define NO_GTK2 if you do not want GTK+ GUI support. +# We don't want to pass along options like -j: # -# Define NO_DEMANGLE if you do not want C++ symbol demangling. +unexport MAKEFLAGS + # -# Define NO_LIBELF if you do not want libelf dependency (e.g. cross-builds) +# Do a parallel build with multiple jobs, based on the number of CPUs online +# in this system: 'make -j8' on a 8-CPU system, etc. # -# Define NO_LIBUNWIND if you do not want libunwind dependency for dwarf -# backtrace post unwind. +# (To override it, run 'make JOBS=1' and similar.) # -# Define NO_BACKTRACE if you do not want stack backtrace debug feature - -$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE - @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT) --include $(OUTPUT)PERF-VERSION-FILE - -uname_M := $(shell uname -m 2>/dev/null || echo not) - -ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ - -e s/arm.*/arm/ -e s/sa110/arm/ \ - -e s/s390x/s390/ -e s/parisc64/parisc/ \ - -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ - -e s/sh[234].*/sh/ ) -NO_PERF_REGS := 1 - -CC = $(CROSS_COMPILE)gcc -AR = $(CROSS_COMPILE)ar - -# Additional ARCH settings for x86 -ifeq ($(ARCH),i386) - override ARCH := x86 - NO_PERF_REGS := 0 - LIBUNWIND_LIBS = -lunwind -lunwind-x86 -endif -ifeq ($(ARCH),x86_64) - override ARCH := x86 - IS_X86_64 := 0 - ifeq (, $(findstring m32,$(EXTRA_CFLAGS))) - IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -x c - | tail -n 1) - endif - ifeq (${IS_X86_64}, 1) - RAW_ARCH := x86_64 - ARCH_CFLAGS := -DARCH_X86_64 - ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S - endif - NO_PERF_REGS := 0 - LIBUNWIND_LIBS = -lunwind -lunwind-x86_64 +ifeq ($(JOBS),) + JOBS := $(shell grep -c ^processor /proc/cpuinfo 2>/dev/null) + ifeq ($(JOBS),) + JOBS := 1 + endif endif -# Treat warnings as errors unless directed not to -ifneq ($(WERROR),0) - CFLAGS_WERROR := -Werror +# +# Only pass canonical directory names as the output directory: +# +ifneq ($(O),) + FULL_O := $(shell readlink -f $(O) || echo $(O)) endif +# +# Only accept the 'DEBUG' variable from the command line: +# ifeq ("$(origin DEBUG)", "command line") - PERF_DEBUG = $(DEBUG) -endif -ifndef PERF_DEBUG - CFLAGS_OPTIMIZE = -O6 -D_FORTIFY_SOURCE=2 -endif - -ifdef PARSER_DEBUG - PARSER_DEBUG_BISON := -t - PARSER_DEBUG_FLEX := -d - PARSER_DEBUG_CFLAGS := -DPARSER_DEBUG -endif - -CFLAGS = -fno-omit-frame-pointer -ggdb3 -funwind-tables -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) $(PARSER_DEBUG_CFLAGS) -EXTLIBS = -lpthread -lrt -lelf -lm -ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -ALL_LDFLAGS = $(LDFLAGS) -STRIP ?= strip - -# Among the variables below, these: -# perfexecdir -# template_dir -# mandir -# infodir -# htmldir -# ETC_PERFCONFIG (but not sysconfdir) -# can be specified as a relative path some/where/else; -# this is interpreted as relative to $(prefix) and "perf" at -# runtime figures out where they are based on the path to the executable. -# This can help installing the suite in a relocatable way. - -# Make the path relative to DESTDIR, not to prefix -ifndef DESTDIR -prefix = $(HOME) -endif -bindir_relative = bin -bindir = $(prefix)/$(bindir_relative) -mandir = share/man -infodir = share/info -perfexecdir = libexec/perf-core -sharedir = $(prefix)/share -template_dir = share/perf-core/templates -htmldir = share/doc/perf-doc -ifeq ($(prefix),/usr) -sysconfdir = /etc -ETC_PERFCONFIG = $(sysconfdir)/perfconfig -else -sysconfdir = $(prefix)/etc -ETC_PERFCONFIG = etc/perfconfig -endif -lib = lib - -export prefix bindir sharedir sysconfdir - -RM = rm -f -MKDIR = mkdir -FIND = find -INSTALL = install - -# sparse is architecture-neutral, which means that we need to tell it -# explicitly what architecture to check for. Fix this up for yours.. -SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ - --include config/feature-tests.mak - -ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -fstack-protector-all),y) - CFLAGS := $(CFLAGS) -fstack-protector-all -endif - -ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -Wstack-protector),y) - CFLAGS := $(CFLAGS) -Wstack-protector -endif - -ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -Wvolatile-register-var),y) - CFLAGS := $(CFLAGS) -Wvolatile-register-var -endif - -### --- END CONFIGURATION SECTION --- - -BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)util -I$(TRACE_EVENT_DIR) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -BASIC_LDFLAGS = - -# Guard against environment variables -BUILTIN_OBJS = -LIB_H = -LIB_OBJS = -PYRF_OBJS = -SCRIPT_SH = - -SCRIPT_SH += perf-archive.sh - -grep-libs = $(filter -l%,$(1)) -strip-libs = $(filter-out -l%,$(1)) - -TRACE_EVENT_DIR = ../lib/traceevent/ - -ifneq ($(OUTPUT),) - TE_PATH=$(OUTPUT) + ifeq ($(DEBUG),) + override DEBUG = 0 + else + SET_DEBUG = "DEBUG=$(DEBUG)" + endif else - TE_PATH=$(TRACE_EVENT_DIR) + override DEBUG = 0 endif -LIBTRACEEVENT = $(TE_PATH)libtraceevent.a -TE_LIB := -L$(TE_PATH) -ltraceevent - -PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources) -PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py +define print_msg + @printf ' BUILD: Doing '\''make \033[33m-j'$(JOBS)'\033[m'\'' parallel build\n' +endef -export LIBTRACEEVENT +define make + @$(MAKE) -f Makefile.perf --no-print-directory -j$(JOBS) O=$(FULL_O) $(SET_DEBUG) $@ +endef -$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) - $(QUIET_GEN)CFLAGS='$(BASIC_CFLAGS)' $(PYTHON_WORD) util/setup.py \ - --quiet build_ext; \ - mkdir -p $(OUTPUT)python && \ - cp $(PYTHON_EXTBUILD_LIB)perf.so $(OUTPUT)python/ # -# No Perl scripts right now: +# Needed if no target specified: +# (Except for tags and TAGS targets. The reason is that the +# Makefile does not treat tags/TAGS as targets but as files +# and thus won't rebuilt them once they are in place.) # - -SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH)) +all tags TAGS: + $(print_msg) + $(make) # -# Single 'perf' binary right now: +# The clean target is not really parallel, don't print the jobs info: # -PROGRAMS += $(OUTPUT)perf - -LANG_BINDINGS = - -# what 'all' will build and 'install' will install, in perfexecdir -ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS) - -# what 'all' will build but not install in perfexecdir -OTHER_PROGRAMS = $(OUTPUT)perf - -# Set paths to tools early so that they can be used for version tests. -ifndef SHELL_PATH - SHELL_PATH = /bin/sh -endif -ifndef PERL_PATH - PERL_PATH = /usr/bin/perl -endif - -export PERL_PATH - -FLEX = flex -BISON= bison - -$(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c - $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c - -$(OUTPUT)util/parse-events-bison.c: util/parse-events.y - $(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c - -$(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c - $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c - -$(OUTPUT)util/pmu-bison.c: util/pmu.y - $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c - -$(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c -$(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c - -LIB_FILE=$(OUTPUT)libperf.a - -LIB_H += ../../include/uapi/linux/perf_event.h -LIB_H += ../../include/linux/rbtree.h -LIB_H += ../../include/linux/list.h -LIB_H += ../../include/uapi/linux/const.h -LIB_H += ../../include/linux/hash.h -LIB_H += ../../include/linux/stringify.h -LIB_H += util/include/linux/bitmap.h -LIB_H += util/include/linux/bitops.h -LIB_H += util/include/linux/compiler.h -LIB_H += util/include/linux/const.h -LIB_H += util/include/linux/ctype.h -LIB_H += util/include/linux/kernel.h -LIB_H += util/include/linux/list.h -LIB_H += util/include/linux/export.h -LIB_H += util/include/linux/magic.h -LIB_H += util/include/linux/poison.h -LIB_H += util/include/linux/prefetch.h -LIB_H += util/include/linux/rbtree.h -LIB_H += util/include/linux/rbtree_augmented.h -LIB_H += util/include/linux/string.h -LIB_H += util/include/linux/types.h -LIB_H += util/include/linux/linkage.h -LIB_H += util/include/asm/asm-offsets.h -LIB_H += util/include/asm/bug.h -LIB_H += util/include/asm/byteorder.h -LIB_H += util/include/asm/hweight.h -LIB_H += util/include/asm/swab.h -LIB_H += util/include/asm/system.h -LIB_H += util/include/asm/uaccess.h -LIB_H += util/include/dwarf-regs.h -LIB_H += util/include/asm/dwarf2.h -LIB_H += util/include/asm/cpufeature.h -LIB_H += util/include/asm/unistd_32.h -LIB_H += util/include/asm/unistd_64.h -LIB_H += perf.h -LIB_H += util/annotate.h -LIB_H += util/cache.h -LIB_H += util/callchain.h -LIB_H += util/build-id.h -LIB_H += util/debug.h -LIB_H += util/debugfs.h -LIB_H += util/sysfs.h -LIB_H += util/pmu.h -LIB_H += util/event.h -LIB_H += util/evsel.h -LIB_H += util/evlist.h -LIB_H += util/exec_cmd.h -LIB_H += util/types.h -LIB_H += util/levenshtein.h -LIB_H += util/map.h -LIB_H += util/parse-options.h -LIB_H += util/parse-events.h -LIB_H += util/quote.h -LIB_H += util/util.h -LIB_H += util/xyarray.h -LIB_H += util/header.h -LIB_H += util/help.h -LIB_H += util/session.h -LIB_H += util/strbuf.h -LIB_H += util/strlist.h -LIB_H += util/strfilter.h -LIB_H += util/svghelper.h -LIB_H += util/tool.h -LIB_H += util/run-command.h -LIB_H += util/sigchain.h -LIB_H += util/symbol.h -LIB_H += util/color.h -LIB_H += util/values.h -LIB_H += util/sort.h -LIB_H += util/hist.h -LIB_H += util/thread.h -LIB_H += util/thread_map.h -LIB_H += util/trace-event.h -LIB_H += util/probe-finder.h -LIB_H += util/dwarf-aux.h -LIB_H += util/probe-event.h -LIB_H += util/pstack.h -LIB_H += util/cpumap.h -LIB_H += util/top.h -LIB_H += $(ARCH_INCLUDE) -LIB_H += util/cgroup.h -LIB_H += $(TRACE_EVENT_DIR)event-parse.h -LIB_H += util/target.h -LIB_H += util/rblist.h -LIB_H += util/intlist.h -LIB_H += util/perf_regs.h -LIB_H += util/unwind.h -LIB_H += ui/helpline.h -LIB_H += util/vdso.h - -LIB_OBJS += $(OUTPUT)util/abspath.o -LIB_OBJS += $(OUTPUT)util/alias.o -LIB_OBJS += $(OUTPUT)util/annotate.o -LIB_OBJS += $(OUTPUT)util/build-id.o -LIB_OBJS += $(OUTPUT)util/config.o -LIB_OBJS += $(OUTPUT)util/ctype.o -LIB_OBJS += $(OUTPUT)util/debugfs.o -LIB_OBJS += $(OUTPUT)util/sysfs.o -LIB_OBJS += $(OUTPUT)util/pmu.o -LIB_OBJS += $(OUTPUT)util/environment.o -LIB_OBJS += $(OUTPUT)util/event.o -LIB_OBJS += $(OUTPUT)util/evlist.o -LIB_OBJS += $(OUTPUT)util/evsel.o -LIB_OBJS += $(OUTPUT)util/exec_cmd.o -LIB_OBJS += $(OUTPUT)util/help.o -LIB_OBJS += $(OUTPUT)util/levenshtein.o -LIB_OBJS += $(OUTPUT)util/parse-options.o -LIB_OBJS += $(OUTPUT)util/parse-events.o -LIB_OBJS += $(OUTPUT)util/parse-events-test.o -LIB_OBJS += $(OUTPUT)util/path.o -LIB_OBJS += $(OUTPUT)util/rbtree.o -LIB_OBJS += $(OUTPUT)util/bitmap.o -LIB_OBJS += $(OUTPUT)util/hweight.o -LIB_OBJS += $(OUTPUT)util/run-command.o -LIB_OBJS += $(OUTPUT)util/quote.o -LIB_OBJS += $(OUTPUT)util/strbuf.o -LIB_OBJS += $(OUTPUT)util/string.o -LIB_OBJS += $(OUTPUT)util/strlist.o -LIB_OBJS += $(OUTPUT)util/strfilter.o -LIB_OBJS += $(OUTPUT)util/top.o -LIB_OBJS += $(OUTPUT)util/usage.o -LIB_OBJS += $(OUTPUT)util/wrapper.o -LIB_OBJS += $(OUTPUT)util/sigchain.o -LIB_OBJS += $(OUTPUT)util/symbol.o -LIB_OBJS += $(OUTPUT)util/symbol-elf.o -LIB_OBJS += $(OUTPUT)util/dso-test-data.o -LIB_OBJS += $(OUTPUT)util/color.o -LIB_OBJS += $(OUTPUT)util/pager.o -LIB_OBJS += $(OUTPUT)util/header.o -LIB_OBJS += $(OUTPUT)util/callchain.o -LIB_OBJS += $(OUTPUT)util/values.o -LIB_OBJS += $(OUTPUT)util/debug.o -LIB_OBJS += $(OUTPUT)util/map.o -LIB_OBJS += $(OUTPUT)util/pstack.o -LIB_OBJS += $(OUTPUT)util/session.o -LIB_OBJS += $(OUTPUT)util/thread.o -LIB_OBJS += $(OUTPUT)util/thread_map.o -LIB_OBJS += $(OUTPUT)util/trace-event-parse.o -LIB_OBJS += $(OUTPUT)util/parse-events-flex.o -LIB_OBJS += $(OUTPUT)util/parse-events-bison.o -LIB_OBJS += $(OUTPUT)util/pmu-flex.o -LIB_OBJS += $(OUTPUT)util/pmu-bison.o -LIB_OBJS += $(OUTPUT)util/trace-event-read.o -LIB_OBJS += $(OUTPUT)util/trace-event-info.o -LIB_OBJS += $(OUTPUT)util/trace-event-scripting.o -LIB_OBJS += $(OUTPUT)util/svghelper.o -LIB_OBJS += $(OUTPUT)util/sort.o -LIB_OBJS += $(OUTPUT)util/hist.o -LIB_OBJS += $(OUTPUT)util/probe-event.o -LIB_OBJS += $(OUTPUT)util/util.o -LIB_OBJS += $(OUTPUT)util/xyarray.o -LIB_OBJS += $(OUTPUT)util/cpumap.o -LIB_OBJS += $(OUTPUT)util/cgroup.o -LIB_OBJS += $(OUTPUT)util/target.o -LIB_OBJS += $(OUTPUT)util/rblist.o -LIB_OBJS += $(OUTPUT)util/intlist.o -LIB_OBJS += $(OUTPUT)util/vdso.o -LIB_OBJS += $(OUTPUT)util/stat.o - -LIB_OBJS += $(OUTPUT)ui/helpline.o -LIB_OBJS += $(OUTPUT)ui/hist.o -LIB_OBJS += $(OUTPUT)ui/stdio/hist.o - -BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o -BUILTIN_OBJS += $(OUTPUT)builtin-bench.o -# Benchmark modules -BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o -BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o -ifeq ($(RAW_ARCH),x86_64) -BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy-x86-64-asm.o -BUILTIN_OBJS += $(OUTPUT)bench/mem-memset-x86-64-asm.o -endif -BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o -BUILTIN_OBJS += $(OUTPUT)bench/mem-memset.o - -BUILTIN_OBJS += $(OUTPUT)builtin-diff.o -BUILTIN_OBJS += $(OUTPUT)builtin-evlist.o -BUILTIN_OBJS += $(OUTPUT)builtin-help.o -BUILTIN_OBJS += $(OUTPUT)builtin-sched.o -BUILTIN_OBJS += $(OUTPUT)builtin-buildid-list.o -BUILTIN_OBJS += $(OUTPUT)builtin-buildid-cache.o -BUILTIN_OBJS += $(OUTPUT)builtin-list.o -BUILTIN_OBJS += $(OUTPUT)builtin-record.o -BUILTIN_OBJS += $(OUTPUT)builtin-report.o -BUILTIN_OBJS += $(OUTPUT)builtin-stat.o -BUILTIN_OBJS += $(OUTPUT)builtin-timechart.o -BUILTIN_OBJS += $(OUTPUT)builtin-top.o -BUILTIN_OBJS += $(OUTPUT)builtin-script.o -BUILTIN_OBJS += $(OUTPUT)builtin-probe.o -BUILTIN_OBJS += $(OUTPUT)builtin-kmem.o -BUILTIN_OBJS += $(OUTPUT)builtin-lock.o -BUILTIN_OBJS += $(OUTPUT)builtin-kvm.o -BUILTIN_OBJS += $(OUTPUT)builtin-test.o -BUILTIN_OBJS += $(OUTPUT)builtin-inject.o - -PERFLIBS = $(LIB_FILE) $(LIBTRACEEVENT) +clean: + $(make) # -# Platform specific tweaks +# The build-test target is not really parallel, don't print the jobs info: # +build-test: + @$(MAKE) -f tests/make --no-print-directory -# We choose to avoid "if .. else if .. else .. endif endif" -# because maintaining the nesting to match is a pain. If -# we had "elif" things would have been much nicer... - --include config.mak.autogen --include config.mak - -ifdef NO_LIBELF - NO_DWARF := 1 - NO_DEMANGLE := 1 - NO_LIBUNWIND := 1 -else -FLAGS_LIBELF=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -ifneq ($(call try-cc,$(SOURCE_LIBELF),$(FLAGS_LIBELF)),y) - FLAGS_GLIBC=$(ALL_CFLAGS) $(ALL_LDFLAGS) - ifneq ($(call try-cc,$(SOURCE_GLIBC),$(FLAGS_GLIBC)),y) - msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static); - else - NO_LIBELF := 1 - NO_DWARF := 1 - NO_DEMANGLE := 1 - endif -else - FLAGS_DWARF=$(ALL_CFLAGS) -ldw -lelf $(ALL_LDFLAGS) $(EXTLIBS) - ifneq ($(call try-cc,$(SOURCE_DWARF),$(FLAGS_DWARF)),y) - msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev); - NO_DWARF := 1 - endif # Dwarf support -endif # SOURCE_LIBELF -endif # NO_LIBELF - -ifndef NO_LIBUNWIND -# for linking with debug library, run like: -# make DEBUG=1 LIBUNWIND_DIR=/opt/libunwind/ -ifdef LIBUNWIND_DIR - LIBUNWIND_CFLAGS := -I$(LIBUNWIND_DIR)/include - LIBUNWIND_LDFLAGS := -L$(LIBUNWIND_DIR)/lib -endif - -FLAGS_UNWIND=$(LIBUNWIND_CFLAGS) $(ALL_CFLAGS) $(LIBUNWIND_LDFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(LIBUNWIND_LIBS) -ifneq ($(call try-cc,$(SOURCE_LIBUNWIND),$(FLAGS_UNWIND)),y) - msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 0.99); - NO_LIBUNWIND := 1 -endif # Libunwind support -endif # NO_LIBUNWIND - --include arch/$(ARCH)/Makefile - -ifneq ($(OUTPUT),) - BASIC_CFLAGS += -I$(OUTPUT) -endif - -ifdef NO_LIBELF -EXTLIBS := $(filter-out -lelf,$(EXTLIBS)) - -# Remove ELF/DWARF dependent codes -LIB_OBJS := $(filter-out $(OUTPUT)util/symbol-elf.o,$(LIB_OBJS)) -LIB_OBJS := $(filter-out $(OUTPUT)util/dwarf-aux.o,$(LIB_OBJS)) -LIB_OBJS := $(filter-out $(OUTPUT)util/probe-event.o,$(LIB_OBJS)) -LIB_OBJS := $(filter-out $(OUTPUT)util/probe-finder.o,$(LIB_OBJS)) - -BUILTIN_OBJS := $(filter-out $(OUTPUT)builtin-probe.o,$(BUILTIN_OBJS)) - -# Use minimal symbol handling -LIB_OBJS += $(OUTPUT)util/symbol-minimal.o - -else # NO_LIBELF -BASIC_CFLAGS += -DLIBELF_SUPPORT - -ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_COMMON)),y) - BASIC_CFLAGS += -DLIBELF_MMAP -endif - -ifndef NO_DWARF -ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined) - msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled); -else - BASIC_CFLAGS += -DDWARF_SUPPORT - EXTLIBS += -lelf -ldw - LIB_OBJS += $(OUTPUT)util/probe-finder.o - LIB_OBJS += $(OUTPUT)util/dwarf-aux.o -endif # PERF_HAVE_DWARF_REGS -endif # NO_DWARF -endif # NO_LIBELF - -ifndef NO_LIBUNWIND - BASIC_CFLAGS += -DLIBUNWIND_SUPPORT - EXTLIBS += $(LIBUNWIND_LIBS) - BASIC_CFLAGS := $(LIBUNWIND_CFLAGS) $(BASIC_CFLAGS) - BASIC_LDFLAGS := $(LIBUNWIND_LDFLAGS) $(BASIC_LDFLAGS) - LIB_OBJS += $(OUTPUT)util/unwind.o -endif - -ifndef NO_LIBAUDIT - FLAGS_LIBAUDIT = $(ALL_CFLAGS) $(ALL_LDFLAGS) -laudit - ifneq ($(call try-cc,$(SOURCE_LIBAUDIT),$(FLAGS_LIBAUDIT)),y) - msg := $(warning No libaudit.h found, disables 'trace' tool, please install audit-libs-devel or libaudit-dev); - else - BASIC_CFLAGS += -DLIBAUDIT_SUPPORT - BUILTIN_OBJS += $(OUTPUT)builtin-trace.o - EXTLIBS += -laudit - endif -endif - -ifndef NO_NEWT - FLAGS_NEWT=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -lnewt - ifneq ($(call try-cc,$(SOURCE_NEWT),$(FLAGS_NEWT)),y) - msg := $(warning newt not found, disables TUI support. Please install newt-devel or libnewt-dev); - else - # Fedora has /usr/include/slang/slang.h, but ubuntu /usr/include/slang.h - BASIC_CFLAGS += -I/usr/include/slang - BASIC_CFLAGS += -DNEWT_SUPPORT - EXTLIBS += -lnewt -lslang - LIB_OBJS += $(OUTPUT)ui/setup.o - LIB_OBJS += $(OUTPUT)ui/browser.o - LIB_OBJS += $(OUTPUT)ui/browsers/annotate.o - LIB_OBJS += $(OUTPUT)ui/browsers/hists.o - LIB_OBJS += $(OUTPUT)ui/browsers/map.o - LIB_OBJS += $(OUTPUT)ui/progress.o - LIB_OBJS += $(OUTPUT)ui/util.o - LIB_OBJS += $(OUTPUT)ui/tui/setup.o - LIB_OBJS += $(OUTPUT)ui/tui/util.o - LIB_OBJS += $(OUTPUT)ui/tui/helpline.o - LIB_H += ui/browser.h - LIB_H += ui/browsers/map.h - LIB_H += ui/keysyms.h - LIB_H += ui/libslang.h - LIB_H += ui/progress.h - LIB_H += ui/util.h - LIB_H += ui/ui.h - endif -endif - -ifndef NO_GTK2 - FLAGS_GTK2=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(shell pkg-config --libs --cflags gtk+-2.0 2>/dev/null) - ifneq ($(call try-cc,$(SOURCE_GTK2),$(FLAGS_GTK2)),y) - msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev); - else - ifeq ($(call try-cc,$(SOURCE_GTK2_INFOBAR),$(FLAGS_GTK2)),y) - BASIC_CFLAGS += -DHAVE_GTK_INFO_BAR - endif - BASIC_CFLAGS += -DGTK2_SUPPORT - BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0 2>/dev/null) - EXTLIBS += $(shell pkg-config --libs gtk+-2.0 2>/dev/null) - LIB_OBJS += $(OUTPUT)ui/gtk/browser.o - LIB_OBJS += $(OUTPUT)ui/gtk/setup.o - LIB_OBJS += $(OUTPUT)ui/gtk/util.o - LIB_OBJS += $(OUTPUT)ui/gtk/helpline.o - # Make sure that it'd be included only once. - ifeq ($(findstring -DNEWT_SUPPORT,$(BASIC_CFLAGS)),) - LIB_OBJS += $(OUTPUT)ui/setup.o - LIB_OBJS += $(OUTPUT)ui/util.o - endif - endif -endif - -ifdef NO_LIBPERL - BASIC_CFLAGS += -DNO_LIBPERL -else - PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null) - PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS)) - PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS)) - PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null` - FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS) - - ifneq ($(call try-cc,$(SOURCE_PERL_EMBED),$(FLAGS_PERL_EMBED)),y) - BASIC_CFLAGS += -DNO_LIBPERL - else - ALL_LDFLAGS += $(PERL_EMBED_LDFLAGS) - EXTLIBS += $(PERL_EMBED_LIBADD) - LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-perl.o - LIB_OBJS += $(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o - endif -endif - -disable-python = $(eval $(disable-python_code)) -define disable-python_code - BASIC_CFLAGS += -DNO_LIBPYTHON - $(if $(1),$(warning No $(1) was found)) - $(warning Python support won't be built) -endef - -override PYTHON := \ - $(call get-executable-or-default,PYTHON,python) - -ifndef PYTHON - $(call disable-python,python interpreter) - python-clean := -else - - PYTHON_WORD := $(call shell-wordify,$(PYTHON)) - - # python extension build directories - PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/ - PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/ - PYTHON_EXTBUILD_TMP := $(PYTHON_EXTBUILD)tmp/ - export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP - - python-clean := rm -rf $(PYTHON_EXTBUILD) $(OUTPUT)python/perf.so - - ifdef NO_LIBPYTHON - $(call disable-python) - else - - override PYTHON_CONFIG := \ - $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON)-config) - - ifndef PYTHON_CONFIG - $(call disable-python,python-config tool) - else - - PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG)) - - PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null) - PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) - PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) - PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null) - FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) - - ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED)),y) - $(call disable-python,Python.h (for Python 2.x)) - else - - ifneq ($(call try-cc,$(SOURCE_PYTHON_VERSION),$(FLAGS_PYTHON_EMBED)),y) - $(warning Python 3 is not yet supported; please set) - $(warning PYTHON and/or PYTHON_CONFIG appropriately.) - $(warning If you also have Python 2 installed, then) - $(warning try something like:) - $(warning $(and ,)) - $(warning $(and ,) make PYTHON=python2) - $(warning $(and ,)) - $(warning Otherwise, disable Python support entirely:) - $(warning $(and ,)) - $(warning $(and ,) make NO_LIBPYTHON=1) - $(warning $(and ,)) - $(error $(and ,)) - else - ALL_LDFLAGS += $(PYTHON_EMBED_LDFLAGS) - EXTLIBS += $(PYTHON_EMBED_LIBADD) - LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o - LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o - LANG_BINDINGS += $(OUTPUT)python/perf.so - endif - - endif - endif - endif -endif - -ifdef NO_DEMANGLE - BASIC_CFLAGS += -DNO_DEMANGLE -else - ifdef HAVE_CPLUS_DEMANGLE - EXTLIBS += -liberty - BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE - else - FLAGS_BFD=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -DPACKAGE='perf' -lbfd - has_bfd := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD)) - ifeq ($(has_bfd),y) - EXTLIBS += -lbfd - else - FLAGS_BFD_IBERTY=$(FLAGS_BFD) -liberty - has_bfd_iberty := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD_IBERTY)) - ifeq ($(has_bfd_iberty),y) - EXTLIBS += -lbfd -liberty - else - FLAGS_BFD_IBERTY_Z=$(FLAGS_BFD_IBERTY) -lz - has_bfd_iberty_z := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD_IBERTY_Z)) - ifeq ($(has_bfd_iberty_z),y) - EXTLIBS += -lbfd -liberty -lz - else - FLAGS_CPLUS_DEMANGLE=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -liberty - has_cplus_demangle := $(call try-cc,$(SOURCE_CPLUS_DEMANGLE),$(FLAGS_CPLUS_DEMANGLE)) - ifeq ($(has_cplus_demangle),y) - EXTLIBS += -liberty - BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE - else - msg := $(warning No bfd.h/libbfd found, install binutils-dev[el]/zlib-static to gain symbol demangling) - BASIC_CFLAGS += -DNO_DEMANGLE - endif - endif - endif - endif - endif -endif - -ifeq ($(NO_PERF_REGS),0) - ifeq ($(ARCH),x86) - LIB_H += arch/x86/include/perf_regs.h - endif - BASIC_CFLAGS += -DHAVE_PERF_REGS -endif - -ifndef NO_STRLCPY - ifeq ($(call try-cc,$(SOURCE_STRLCPY),),y) - BASIC_CFLAGS += -DHAVE_STRLCPY - endif -endif - -ifndef NO_BACKTRACE - ifeq ($(call try-cc,$(SOURCE_BACKTRACE),),y) - BASIC_CFLAGS += -DBACKTRACE_SUPPORT - endif -endif - -ifdef ASCIIDOC8 - export ASCIIDOC8 -endif - -# Shell quote (do not use $(call) to accommodate ancient setups); - -ETC_PERFCONFIG_SQ = $(subst ','\'',$(ETC_PERFCONFIG)) - -DESTDIR_SQ = $(subst ','\'',$(DESTDIR)) -bindir_SQ = $(subst ','\'',$(bindir)) -bindir_relative_SQ = $(subst ','\'',$(bindir_relative)) -mandir_SQ = $(subst ','\'',$(mandir)) -infodir_SQ = $(subst ','\'',$(infodir)) -perfexecdir_SQ = $(subst ','\'',$(perfexecdir)) -template_dir_SQ = $(subst ','\'',$(template_dir)) -htmldir_SQ = $(subst ','\'',$(htmldir)) -prefix_SQ = $(subst ','\'',$(prefix)) -sysconfdir_SQ = $(subst ','\'',$(sysconfdir)) - -SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) - -LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive -Wl,--start-group $(EXTLIBS) -Wl,--end-group - -ALL_CFLAGS += $(BASIC_CFLAGS) -ALL_CFLAGS += $(ARCH_CFLAGS) -ALL_LDFLAGS += $(BASIC_LDFLAGS) - -export INSTALL SHELL_PATH - - -### Build rules - -SHELL = $(SHELL_PATH) - -all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS) - -please_set_SHELL_PATH_to_a_more_modern_shell: - @$$(:) - -shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell - -strip: $(PROGRAMS) $(OUTPUT)perf - $(STRIP) $(STRIP_OPTS) $(PROGRAMS) $(OUTPUT)perf - -$(OUTPUT)perf.o: perf.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -DPERF_VERSION='"$(PERF_VERSION)"' \ - '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ - $(ALL_CFLAGS) -c $(filter %.c,$^) -o $@ - -$(OUTPUT)perf: $(OUTPUT)perf.o $(BUILTIN_OBJS) $(PERFLIBS) - $(QUIET_LINK)$(CC) $(ALL_CFLAGS) $(ALL_LDFLAGS) $(OUTPUT)perf.o \ - $(BUILTIN_OBJS) $(LIBS) -o $@ - -$(OUTPUT)builtin-help.o: builtin-help.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \ - '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ - '-DPERF_MAN_PATH="$(mandir_SQ)"' \ - '-DPERF_INFO_PATH="$(infodir_SQ)"' $< - -$(OUTPUT)builtin-timechart.o: builtin-timechart.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \ - '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ - '-DPERF_MAN_PATH="$(mandir_SQ)"' \ - '-DPERF_INFO_PATH="$(infodir_SQ)"' $< - -$(OUTPUT)common-cmds.h: util/generate-cmdlist.sh command-list.txt - -$(OUTPUT)common-cmds.h: $(wildcard Documentation/perf-*.txt) - $(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@ - -$(SCRIPTS) : % : %.sh - $(QUIET_GEN)$(INSTALL) '$@.sh' '$(OUTPUT)$@' - -# These can record PERF_VERSION -$(OUTPUT)perf.o perf.spec \ - $(SCRIPTS) \ - : $(OUTPUT)PERF-VERSION-FILE - -.SUFFIXES: -.SUFFIXES: .o .c .S .s - -# These two need to be here so that when O= is not used they take precedence -# over the general rule for .o - -$(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(ALL_CFLAGS) -w $< - -$(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w $< - -$(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $< -$(OUTPUT)%.i: %.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -E $(ALL_CFLAGS) $< -$(OUTPUT)%.s: %.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -S $(ALL_CFLAGS) $< -$(OUTPUT)%.o: %.S - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $< -$(OUTPUT)%.s: %.S - $(QUIET_CC)$(CC) -o $@ -E $(ALL_CFLAGS) $< - -$(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \ - '-DPERF_EXEC_PATH="$(perfexecdir_SQ)"' \ - '-DBINDIR="$(bindir_relative_SQ)"' \ - '-DPREFIX="$(prefix_SQ)"' \ - $< - -$(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< - -$(OUTPUT)ui/browser.o: ui/browser.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $< - -$(OUTPUT)ui/browsers/annotate.o: ui/browsers/annotate.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $< - -$(OUTPUT)ui/browsers/hists.o: ui/browsers/hists.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $< - -$(OUTPUT)ui/browsers/map.o: ui/browsers/map.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $< - -$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< - -$(OUTPUT)util/parse-events.o: util/parse-events.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Wno-redundant-decls $< - -$(OUTPUT)util/scripting-engines/trace-event-perl.o: util/scripting-engines/trace-event-perl.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $< - -$(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $< - -$(OUTPUT)util/scripting-engines/trace-event-python.o: util/scripting-engines/trace-event-python.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $< - -$(OUTPUT)scripts/python/Perf-Trace-Util/Context.o: scripts/python/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $< - -$(OUTPUT)perf-%: %.o $(PERFLIBS) - $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) - -$(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H) -$(patsubst perf-%,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h) - -# we compile into subdirectories. if the target directory is not the source directory, they might not exists. So -# we depend the various files onto their directories. -DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h -$(DIRECTORY_DEPS): | $(sort $(dir $(DIRECTORY_DEPS))) -# In the second step, we make a rule to actually create these directories -$(sort $(dir $(DIRECTORY_DEPS))): - $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null - -$(LIB_FILE): $(LIB_OBJS) - $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS) - -# libtraceevent.a -$(LIBTRACEEVENT): - $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) libtraceevent.a - -$(LIBTRACEEVENT)-clean: - $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) clean - -help: - @echo 'Perf make targets:' - @echo ' doc - make *all* documentation (see below)' - @echo ' man - make manpage documentation (access with man <foo>)' - @echo ' html - make html documentation' - @echo ' info - make GNU info documentation (access with info <foo>)' - @echo ' pdf - make pdf documentation' - @echo ' TAGS - use etags to make tag information for source browsing' - @echo ' tags - use ctags to make tag information for source browsing' - @echo ' cscope - use cscope to make interactive browsing database' - @echo '' - @echo 'Perf install targets:' - @echo ' NOTE: documentation build requires asciidoc, xmlto packages to be installed' - @echo ' HINT: use "make prefix=<path> <install target>" to install to a particular' - @echo ' path like make prefix=/usr/local install install-doc' - @echo ' install - install compiled binaries' - @echo ' install-doc - install *all* documentation' - @echo ' install-man - install manpage documentation' - @echo ' install-html - install html documentation' - @echo ' install-info - install GNU info documentation' - @echo ' install-pdf - install pdf documentation' - @echo '' - @echo ' quick-install-doc - alias for quick-install-man' - @echo ' quick-install-man - install the documentation quickly' - @echo ' quick-install-html - install the html documentation quickly' - @echo '' - @echo 'Perf maintainer targets:' - @echo ' clean - clean all binary objects and build output' - -doc: - $(MAKE) -C Documentation all - -man: - $(MAKE) -C Documentation man - -html: - $(MAKE) -C Documentation html - -info: - $(MAKE) -C Documentation info - -pdf: - $(MAKE) -C Documentation pdf - -TAGS: - $(RM) TAGS - $(FIND) . -name '*.[hcS]' -print | xargs etags -a - -tags: - $(RM) tags - $(FIND) . -name '*.[hcS]' -print | xargs ctags -a - -cscope: - $(RM) cscope* - $(FIND) . -name '*.[hcS]' -print | xargs cscope -b - -### Detect prefix changes -TRACK_CFLAGS = $(subst ','\'',$(ALL_CFLAGS)):\ - $(bindir_SQ):$(perfexecdir_SQ):$(template_dir_SQ):$(prefix_SQ) - -$(OUTPUT)PERF-CFLAGS: .FORCE-PERF-CFLAGS - @FLAGS='$(TRACK_CFLAGS)'; \ - if test x"$$FLAGS" != x"`cat $(OUTPUT)PERF-CFLAGS 2>/dev/null`" ; then \ - echo 1>&2 " * new build flags or prefix"; \ - echo "$$FLAGS" >$(OUTPUT)PERF-CFLAGS; \ - fi - -### Testing rules - -# GNU make supports exporting all variables by "export" without parameters. -# However, the environment gets quite big, and some programs have problems -# with that. - -check: $(OUTPUT)common-cmds.h - if sparse; \ - then \ - for i in *.c */*.c; \ - do \ - sparse $(ALL_CFLAGS) $(SPARSE_FLAGS) $$i || exit; \ - done; \ - else \ - exit 1; \ - fi - -### Installation rules - -ifneq ($(filter /%,$(firstword $(perfexecdir))),) -perfexec_instdir = $(perfexecdir) -else -perfexec_instdir = $(prefix)/$(perfexecdir) -endif -perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir)) - -install: all - $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' - $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)' - $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' - $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin' - $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' - $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' - $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl' - $(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin' - $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace' - $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin' - $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace' - $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python' - $(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin' - $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d' - $(INSTALL) bash_completion '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf' - -install-python_ext: - $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)' - -install-doc: - $(MAKE) -C Documentation install - -install-man: - $(MAKE) -C Documentation install-man - -install-html: - $(MAKE) -C Documentation install-html - -install-info: - $(MAKE) -C Documentation install-info - -install-pdf: - $(MAKE) -C Documentation install-pdf - -quick-install-doc: - $(MAKE) -C Documentation quick-install - -quick-install-man: - $(MAKE) -C Documentation quick-install-man - -quick-install-html: - $(MAKE) -C Documentation quick-install-html - -### Cleaning rules - -clean: $(LIBTRACEEVENT)-clean - $(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf.o $(LANG_BINDINGS) - $(RM) $(ALL_PROGRAMS) perf - $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* - $(MAKE) -C Documentation/ clean - $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS - $(RM) $(OUTPUT)util/*-bison* - $(RM) $(OUTPUT)util/*-flex* - $(python-clean) +# +# All other targets get passed through: +# +%: + $(print_msg) + $(make) -.PHONY: all install clean strip $(LIBTRACEEVENT) -.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell -.PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope .FORCE-PERF-CFLAGS +.PHONY: tags TAGS diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf new file mode 100644 index 00000000000..9670a16fa57 --- /dev/null +++ b/tools/perf/Makefile.perf @@ -0,0 +1,938 @@ +include ../scripts/Makefile.include + +# The default target of this Makefile is... +all: + +include config/utilities.mak + +# Define V to have a more verbose compile. +# +# Define VF to have a more verbose feature check output. +# +# Define O to save output files in a separate directory. +# +# Define ARCH as name of target architecture if you want cross-builds. +# +# Define CROSS_COMPILE as prefix name of compiler if you want cross-builds. +# +# Define NO_LIBPERL to disable perl script extension. +# +# Define NO_LIBPYTHON to disable python script extension. +# +# Define PYTHON to point to the python binary if the default +# `python' is not correct; for example: PYTHON=python2 +# +# Define PYTHON_CONFIG to point to the python-config binary if +# the default `$(PYTHON)-config' is not correct. +# +# Define ASCIIDOC8 if you want to format documentation with AsciiDoc 8 +# +# Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72. +# +# Define LDFLAGS=-static to build a static binary. +# +# Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds. +# +# Define NO_DWARF if you do not want debug-info analysis feature at all. +# +# Define WERROR=0 to disable treating any warnings as errors. +# +# Define NO_NEWT if you do not want TUI support. (deprecated) +# +# Define NO_SLANG if you do not want TUI support. +# +# Define NO_GTK2 if you do not want GTK+ GUI support. +# +# Define NO_DEMANGLE if you do not want C++ symbol demangling. +# +# Define NO_LIBELF if you do not want libelf dependency (e.g. cross-builds) +# +# Define NO_LIBUNWIND if you do not want libunwind dependency for dwarf +# backtrace post unwind. +# +# Define NO_BACKTRACE if you do not want stack backtrace debug feature +# +# Define NO_LIBNUMA if you do not want numa perf benchmark +# +# Define NO_LIBAUDIT if you do not want libaudit support +# +# Define NO_LIBBIONIC if you do not want bionic support +# +# Define NO_LIBDW_DWARF_UNWIND if you do not want libdw support +# for dwarf backtrace post unwind. + +ifeq ($(srctree),) +srctree := $(patsubst %/,%,$(dir $(shell pwd))) +srctree := $(patsubst %/,%,$(dir $(srctree))) +#$(info Determined 'srctree' to be $(srctree)) +endif + +ifneq ($(objtree),) +#$(info Determined 'objtree' to be $(objtree)) +endif + +ifneq ($(OUTPUT),) +#$(info Determined 'OUTPUT' to be $(OUTPUT)) +endif + +$(OUTPUT)PERF-VERSION-FILE: ../../.git/HEAD + @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT) + @touch $(OUTPUT)PERF-VERSION-FILE + +CC = $(CROSS_COMPILE)gcc +AR = $(CROSS_COMPILE)ar +PKG_CONFIG = $(CROSS_COMPILE)pkg-config + +RM = rm -f +LN = ln -f +MKDIR = mkdir +FIND = find +INSTALL = install +FLEX = flex +BISON = bison +STRIP = strip + +LIB_DIR = $(srctree)/tools/lib/api/ +TRACE_EVENT_DIR = $(srctree)/tools/lib/traceevent/ + +# include config/Makefile by default and rule out +# non-config cases +config := 1 + +NON_CONFIG_TARGETS := clean TAGS tags cscope help + +ifdef MAKECMDGOALS +ifeq ($(filter-out $(NON_CONFIG_TARGETS),$(MAKECMDGOALS)),) + config := 0 +endif +endif + +ifeq ($(config),1) +include config/Makefile +endif + +export prefix bindir sharedir sysconfdir DESTDIR + +# sparse is architecture-neutral, which means that we need to tell it +# explicitly what architecture to check for. Fix this up for yours.. +SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ + +# Guard against environment variables +BUILTIN_OBJS = +LIB_H = +LIB_OBJS = +GTK_OBJS = +PYRF_OBJS = +SCRIPT_SH = + +SCRIPT_SH += perf-archive.sh + +grep-libs = $(filter -l%,$(1)) +strip-libs = $(filter-out -l%,$(1)) + +ifneq ($(OUTPUT),) + TE_PATH=$(OUTPUT) +ifneq ($(subdir),) + LIB_PATH=$(OUTPUT)/../lib/api/ +else + LIB_PATH=$(OUTPUT) +endif +else + TE_PATH=$(TRACE_EVENT_DIR) + LIB_PATH=$(LIB_DIR) +endif + +LIBTRACEEVENT = $(TE_PATH)libtraceevent.a +export LIBTRACEEVENT + +LIBAPIKFS = $(LIB_PATH)libapikfs.a +export LIBAPIKFS + +# python extension build directories +PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/ +PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/ +PYTHON_EXTBUILD_TMP := $(PYTHON_EXTBUILD)tmp/ +export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP + +python-clean := $(call QUIET_CLEAN, python) $(RM) -r $(PYTHON_EXTBUILD) $(OUTPUT)python/perf.so + +PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources) +PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py $(LIBTRACEEVENT) $(LIBAPIKFS) + +$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) + $(QUIET_GEN)CFLAGS='$(CFLAGS)' $(PYTHON_WORD) util/setup.py \ + --quiet build_ext; \ + mkdir -p $(OUTPUT)python && \ + cp $(PYTHON_EXTBUILD_LIB)perf.so $(OUTPUT)python/ +# +# No Perl scripts right now: +# + +SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH)) + +# +# Single 'perf' binary right now: +# +PROGRAMS += $(OUTPUT)perf + +# what 'all' will build and 'install' will install, in perfexecdir +ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS) + +# what 'all' will build but not install in perfexecdir +OTHER_PROGRAMS = $(OUTPUT)perf + +# Set paths to tools early so that they can be used for version tests. +ifndef SHELL_PATH + SHELL_PATH = /bin/sh +endif +ifndef PERL_PATH + PERL_PATH = /usr/bin/perl +endif + +export PERL_PATH + +$(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c + $(QUIET_FLEX)$(FLEX) -o $@ --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) util/parse-events.l + +$(OUTPUT)util/parse-events-bison.c: util/parse-events.y + $(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c -p parse_events_ + +$(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c + $(QUIET_FLEX)$(FLEX) -o $@ --header-file=$(OUTPUT)util/pmu-flex.h util/pmu.l + +$(OUTPUT)util/pmu-bison.c: util/pmu.y + $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c -p perf_pmu_ + +$(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c +$(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c + +LIB_FILE=$(OUTPUT)libperf.a + +LIB_H += ../lib/symbol/kallsyms.h +LIB_H += ../../include/uapi/linux/perf_event.h +LIB_H += ../../include/linux/rbtree.h +LIB_H += ../../include/linux/list.h +LIB_H += ../../include/uapi/linux/const.h +LIB_H += ../include/linux/hash.h +LIB_H += ../../include/linux/stringify.h +LIB_H += util/include/linux/bitmap.h +LIB_H += util/include/linux/bitops.h +LIB_H += ../include/linux/compiler.h +LIB_H += util/include/linux/const.h +LIB_H += util/include/linux/ctype.h +LIB_H += util/include/linux/kernel.h +LIB_H += util/include/linux/list.h +LIB_H += ../include/linux/export.h +LIB_H += util/include/linux/poison.h +LIB_H += util/include/linux/rbtree.h +LIB_H += util/include/linux/rbtree_augmented.h +LIB_H += util/include/linux/string.h +LIB_H += ../include/linux/types.h +LIB_H += util/include/linux/linkage.h +LIB_H += util/include/asm/asm-offsets.h +LIB_H += ../include/asm/bug.h +LIB_H += util/include/asm/byteorder.h +LIB_H += util/include/asm/hweight.h +LIB_H += util/include/asm/swab.h +LIB_H += util/include/asm/system.h +LIB_H += util/include/asm/uaccess.h +LIB_H += util/include/dwarf-regs.h +LIB_H += util/include/asm/dwarf2.h +LIB_H += util/include/asm/cpufeature.h +LIB_H += util/include/asm/unistd_32.h +LIB_H += util/include/asm/unistd_64.h +LIB_H += perf.h +LIB_H += util/annotate.h +LIB_H += util/cache.h +LIB_H += util/callchain.h +LIB_H += util/build-id.h +LIB_H += util/debug.h +LIB_H += util/pmu.h +LIB_H += util/event.h +LIB_H += util/evsel.h +LIB_H += util/evlist.h +LIB_H += util/exec_cmd.h +LIB_H += util/levenshtein.h +LIB_H += util/machine.h +LIB_H += util/map.h +LIB_H += util/parse-options.h +LIB_H += util/parse-events.h +LIB_H += util/quote.h +LIB_H += util/util.h +LIB_H += util/xyarray.h +LIB_H += util/header.h +LIB_H += util/help.h +LIB_H += util/session.h +LIB_H += util/strbuf.h +LIB_H += util/strlist.h +LIB_H += util/strfilter.h +LIB_H += util/svghelper.h +LIB_H += util/tool.h +LIB_H += util/run-command.h +LIB_H += util/sigchain.h +LIB_H += util/dso.h +LIB_H += util/symbol.h +LIB_H += util/color.h +LIB_H += util/values.h +LIB_H += util/sort.h +LIB_H += util/hist.h +LIB_H += util/comm.h +LIB_H += util/thread.h +LIB_H += util/thread_map.h +LIB_H += util/trace-event.h +LIB_H += util/probe-finder.h +LIB_H += util/dwarf-aux.h +LIB_H += util/probe-event.h +LIB_H += util/pstack.h +LIB_H += util/cpumap.h +LIB_H += util/top.h +LIB_H += $(ARCH_INCLUDE) +LIB_H += util/cgroup.h +LIB_H += $(LIB_INCLUDE)traceevent/event-parse.h +LIB_H += util/target.h +LIB_H += util/rblist.h +LIB_H += util/intlist.h +LIB_H += util/perf_regs.h +LIB_H += util/unwind.h +LIB_H += util/vdso.h +LIB_H += ui/helpline.h +LIB_H += ui/progress.h +LIB_H += ui/util.h +LIB_H += ui/ui.h +LIB_H += util/data.h + +LIB_OBJS += $(OUTPUT)util/abspath.o +LIB_OBJS += $(OUTPUT)util/alias.o +LIB_OBJS += $(OUTPUT)util/annotate.o +LIB_OBJS += $(OUTPUT)util/build-id.o +LIB_OBJS += $(OUTPUT)util/config.o +LIB_OBJS += $(OUTPUT)util/ctype.o +LIB_OBJS += $(OUTPUT)util/pmu.o +LIB_OBJS += $(OUTPUT)util/environment.o +LIB_OBJS += $(OUTPUT)util/event.o +LIB_OBJS += $(OUTPUT)util/evlist.o +LIB_OBJS += $(OUTPUT)util/evsel.o +LIB_OBJS += $(OUTPUT)util/exec_cmd.o +LIB_OBJS += $(OUTPUT)util/help.o +LIB_OBJS += $(OUTPUT)util/kallsyms.o +LIB_OBJS += $(OUTPUT)util/levenshtein.o +LIB_OBJS += $(OUTPUT)util/parse-options.o +LIB_OBJS += $(OUTPUT)util/parse-events.o +LIB_OBJS += $(OUTPUT)util/path.o +LIB_OBJS += $(OUTPUT)util/rbtree.o +LIB_OBJS += $(OUTPUT)util/bitmap.o +LIB_OBJS += $(OUTPUT)util/hweight.o +LIB_OBJS += $(OUTPUT)util/run-command.o +LIB_OBJS += $(OUTPUT)util/quote.o +LIB_OBJS += $(OUTPUT)util/strbuf.o +LIB_OBJS += $(OUTPUT)util/string.o +LIB_OBJS += $(OUTPUT)util/strlist.o +LIB_OBJS += $(OUTPUT)util/strfilter.o +LIB_OBJS += $(OUTPUT)util/top.o +LIB_OBJS += $(OUTPUT)util/usage.o +LIB_OBJS += $(OUTPUT)util/wrapper.o +LIB_OBJS += $(OUTPUT)util/sigchain.o +LIB_OBJS += $(OUTPUT)util/dso.o +LIB_OBJS += $(OUTPUT)util/symbol.o +LIB_OBJS += $(OUTPUT)util/symbol-elf.o +LIB_OBJS += $(OUTPUT)util/color.o +LIB_OBJS += $(OUTPUT)util/pager.o +LIB_OBJS += $(OUTPUT)util/header.o +LIB_OBJS += $(OUTPUT)util/callchain.o +LIB_OBJS += $(OUTPUT)util/values.o +LIB_OBJS += $(OUTPUT)util/debug.o +LIB_OBJS += $(OUTPUT)util/machine.o +LIB_OBJS += $(OUTPUT)util/map.o +LIB_OBJS += $(OUTPUT)util/pstack.o +LIB_OBJS += $(OUTPUT)util/session.o +LIB_OBJS += $(OUTPUT)util/comm.o +LIB_OBJS += $(OUTPUT)util/thread.o +LIB_OBJS += $(OUTPUT)util/thread_map.o +LIB_OBJS += $(OUTPUT)util/trace-event-parse.o +LIB_OBJS += $(OUTPUT)util/parse-events-flex.o +LIB_OBJS += $(OUTPUT)util/parse-events-bison.o +LIB_OBJS += $(OUTPUT)util/pmu-flex.o +LIB_OBJS += $(OUTPUT)util/pmu-bison.o +LIB_OBJS += $(OUTPUT)util/trace-event-read.o +LIB_OBJS += $(OUTPUT)util/trace-event-info.o +LIB_OBJS += $(OUTPUT)util/trace-event-scripting.o +LIB_OBJS += $(OUTPUT)util/trace-event.o +LIB_OBJS += $(OUTPUT)util/svghelper.o +LIB_OBJS += $(OUTPUT)util/sort.o +LIB_OBJS += $(OUTPUT)util/hist.o +LIB_OBJS += $(OUTPUT)util/probe-event.o +LIB_OBJS += $(OUTPUT)util/util.o +LIB_OBJS += $(OUTPUT)util/xyarray.o +LIB_OBJS += $(OUTPUT)util/cpumap.o +LIB_OBJS += $(OUTPUT)util/cgroup.o +LIB_OBJS += $(OUTPUT)util/target.o +LIB_OBJS += $(OUTPUT)util/rblist.o +LIB_OBJS += $(OUTPUT)util/intlist.o +LIB_OBJS += $(OUTPUT)util/vdso.o +LIB_OBJS += $(OUTPUT)util/stat.o +LIB_OBJS += $(OUTPUT)util/record.o +LIB_OBJS += $(OUTPUT)util/srcline.o +LIB_OBJS += $(OUTPUT)util/data.o + +LIB_OBJS += $(OUTPUT)ui/setup.o +LIB_OBJS += $(OUTPUT)ui/helpline.o +LIB_OBJS += $(OUTPUT)ui/progress.o +LIB_OBJS += $(OUTPUT)ui/util.o +LIB_OBJS += $(OUTPUT)ui/hist.o +LIB_OBJS += $(OUTPUT)ui/stdio/hist.o + +LIB_OBJS += $(OUTPUT)arch/common.o + +LIB_OBJS += $(OUTPUT)tests/parse-events.o +LIB_OBJS += $(OUTPUT)tests/dso-data.o +LIB_OBJS += $(OUTPUT)tests/attr.o +LIB_OBJS += $(OUTPUT)tests/vmlinux-kallsyms.o +LIB_OBJS += $(OUTPUT)tests/open-syscall.o +LIB_OBJS += $(OUTPUT)tests/open-syscall-all-cpus.o +LIB_OBJS += $(OUTPUT)tests/open-syscall-tp-fields.o +LIB_OBJS += $(OUTPUT)tests/mmap-basic.o +LIB_OBJS += $(OUTPUT)tests/perf-record.o +LIB_OBJS += $(OUTPUT)tests/rdpmc.o +LIB_OBJS += $(OUTPUT)tests/evsel-roundtrip-name.o +LIB_OBJS += $(OUTPUT)tests/evsel-tp-sched.o +LIB_OBJS += $(OUTPUT)tests/pmu.o +LIB_OBJS += $(OUTPUT)tests/hists_common.o +LIB_OBJS += $(OUTPUT)tests/hists_link.o +LIB_OBJS += $(OUTPUT)tests/hists_filter.o +LIB_OBJS += $(OUTPUT)tests/hists_output.o +LIB_OBJS += $(OUTPUT)tests/hists_cumulate.o +LIB_OBJS += $(OUTPUT)tests/python-use.o +LIB_OBJS += $(OUTPUT)tests/bp_signal.o +LIB_OBJS += $(OUTPUT)tests/bp_signal_overflow.o +LIB_OBJS += $(OUTPUT)tests/task-exit.o +LIB_OBJS += $(OUTPUT)tests/sw-clock.o +ifeq ($(ARCH),x86) +LIB_OBJS += $(OUTPUT)tests/perf-time-to-tsc.o +endif +LIB_OBJS += $(OUTPUT)tests/code-reading.o +LIB_OBJS += $(OUTPUT)tests/sample-parsing.o +LIB_OBJS += $(OUTPUT)tests/parse-no-sample-id-all.o +ifndef NO_DWARF_UNWIND +ifeq ($(ARCH),$(filter $(ARCH),x86 arm)) +LIB_OBJS += $(OUTPUT)tests/dwarf-unwind.o +endif +endif +LIB_OBJS += $(OUTPUT)tests/mmap-thread-lookup.o +LIB_OBJS += $(OUTPUT)tests/thread-mg-share.o + +BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o +BUILTIN_OBJS += $(OUTPUT)builtin-bench.o +# Benchmark modules +BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o +BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o +ifeq ($(RAW_ARCH),x86_64) +BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy-x86-64-asm.o +BUILTIN_OBJS += $(OUTPUT)bench/mem-memset-x86-64-asm.o +endif +BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o +BUILTIN_OBJS += $(OUTPUT)bench/mem-memset.o +BUILTIN_OBJS += $(OUTPUT)bench/futex-hash.o +BUILTIN_OBJS += $(OUTPUT)bench/futex-wake.o +BUILTIN_OBJS += $(OUTPUT)bench/futex-requeue.o + +BUILTIN_OBJS += $(OUTPUT)builtin-diff.o +BUILTIN_OBJS += $(OUTPUT)builtin-evlist.o +BUILTIN_OBJS += $(OUTPUT)builtin-help.o +BUILTIN_OBJS += $(OUTPUT)builtin-sched.o +BUILTIN_OBJS += $(OUTPUT)builtin-buildid-list.o +BUILTIN_OBJS += $(OUTPUT)builtin-buildid-cache.o +BUILTIN_OBJS += $(OUTPUT)builtin-list.o +BUILTIN_OBJS += $(OUTPUT)builtin-record.o +BUILTIN_OBJS += $(OUTPUT)builtin-report.o +BUILTIN_OBJS += $(OUTPUT)builtin-stat.o +BUILTIN_OBJS += $(OUTPUT)builtin-timechart.o +BUILTIN_OBJS += $(OUTPUT)builtin-top.o +BUILTIN_OBJS += $(OUTPUT)builtin-script.o +BUILTIN_OBJS += $(OUTPUT)builtin-probe.o +BUILTIN_OBJS += $(OUTPUT)builtin-kmem.o +BUILTIN_OBJS += $(OUTPUT)builtin-lock.o +BUILTIN_OBJS += $(OUTPUT)builtin-kvm.o +BUILTIN_OBJS += $(OUTPUT)builtin-inject.o +BUILTIN_OBJS += $(OUTPUT)tests/builtin-test.o +BUILTIN_OBJS += $(OUTPUT)builtin-mem.o + +PERFLIBS = $(LIB_FILE) $(LIBAPIKFS) $(LIBTRACEEVENT) + +# We choose to avoid "if .. else if .. else .. endif endif" +# because maintaining the nesting to match is a pain. If +# we had "elif" things would have been much nicer... + +-include arch/$(ARCH)/Makefile + +ifneq ($(OUTPUT),) + CFLAGS += -I$(OUTPUT) +endif + +ifdef NO_LIBELF +EXTLIBS := $(filter-out -lelf,$(EXTLIBS)) + +# Remove ELF/DWARF dependent codes +LIB_OBJS := $(filter-out $(OUTPUT)util/symbol-elf.o,$(LIB_OBJS)) +LIB_OBJS := $(filter-out $(OUTPUT)util/dwarf-aux.o,$(LIB_OBJS)) +LIB_OBJS := $(filter-out $(OUTPUT)util/probe-event.o,$(LIB_OBJS)) +LIB_OBJS := $(filter-out $(OUTPUT)util/probe-finder.o,$(LIB_OBJS)) + +BUILTIN_OBJS := $(filter-out $(OUTPUT)builtin-probe.o,$(BUILTIN_OBJS)) + +# Use minimal symbol handling +LIB_OBJS += $(OUTPUT)util/symbol-minimal.o + +else # NO_LIBELF +ifndef NO_DWARF + LIB_OBJS += $(OUTPUT)util/probe-finder.o + LIB_OBJS += $(OUTPUT)util/dwarf-aux.o +endif # NO_DWARF +endif # NO_LIBELF + +ifndef NO_LIBDW_DWARF_UNWIND + LIB_OBJS += $(OUTPUT)util/unwind-libdw.o + LIB_H += util/unwind-libdw.h +endif + +ifndef NO_LIBUNWIND + LIB_OBJS += $(OUTPUT)util/unwind-libunwind.o +endif +LIB_OBJS += $(OUTPUT)tests/keep-tracking.o + +ifndef NO_LIBAUDIT + BUILTIN_OBJS += $(OUTPUT)builtin-trace.o +endif + +ifndef NO_SLANG + LIB_OBJS += $(OUTPUT)ui/browser.o + LIB_OBJS += $(OUTPUT)ui/browsers/annotate.o + LIB_OBJS += $(OUTPUT)ui/browsers/hists.o + LIB_OBJS += $(OUTPUT)ui/browsers/map.o + LIB_OBJS += $(OUTPUT)ui/browsers/scripts.o + LIB_OBJS += $(OUTPUT)ui/browsers/header.o + LIB_OBJS += $(OUTPUT)ui/tui/setup.o + LIB_OBJS += $(OUTPUT)ui/tui/util.o + LIB_OBJS += $(OUTPUT)ui/tui/helpline.o + LIB_OBJS += $(OUTPUT)ui/tui/progress.o + LIB_H += ui/tui/tui.h + LIB_H += ui/browser.h + LIB_H += ui/browsers/map.h + LIB_H += ui/keysyms.h + LIB_H += ui/libslang.h +endif + +ifndef NO_GTK2 + ALL_PROGRAMS += $(OUTPUT)libperf-gtk.so + + GTK_OBJS += $(OUTPUT)ui/gtk/browser.o + GTK_OBJS += $(OUTPUT)ui/gtk/hists.o + GTK_OBJS += $(OUTPUT)ui/gtk/setup.o + GTK_OBJS += $(OUTPUT)ui/gtk/util.o + GTK_OBJS += $(OUTPUT)ui/gtk/helpline.o + GTK_OBJS += $(OUTPUT)ui/gtk/progress.o + GTK_OBJS += $(OUTPUT)ui/gtk/annotate.o + +install-gtk: $(OUTPUT)libperf-gtk.so + $(call QUIET_INSTALL, 'GTK UI') \ + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(libdir_SQ)'; \ + $(INSTALL) $(OUTPUT)libperf-gtk.so '$(DESTDIR_SQ)$(libdir_SQ)' +endif + +ifndef NO_LIBPERL + LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-perl.o + LIB_OBJS += $(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o +endif + +ifndef NO_LIBPYTHON + LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o + LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o +endif + +ifeq ($(NO_PERF_REGS),0) + ifeq ($(ARCH),x86) + LIB_H += arch/x86/include/perf_regs.h + endif + LIB_OBJS += $(OUTPUT)util/perf_regs.o +endif + +ifndef NO_LIBNUMA + BUILTIN_OBJS += $(OUTPUT)bench/numa.o +endif + +ifdef ASCIIDOC8 + export ASCIIDOC8 +endif + +LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive -Wl,--start-group $(EXTLIBS) -Wl,--end-group + +export INSTALL SHELL_PATH + +### Build rules + +SHELL = $(SHELL_PATH) + +all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS) + +please_set_SHELL_PATH_to_a_more_modern_shell: + @$$(:) + +shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell + +strip: $(PROGRAMS) $(OUTPUT)perf + $(STRIP) $(STRIP_OPTS) $(PROGRAMS) $(OUTPUT)perf + +$(OUTPUT)perf.o: perf.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -include $(OUTPUT)PERF-VERSION-FILE \ + '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ + $(CFLAGS) -c $(filter %.c,$^) -o $@ + +$(OUTPUT)perf: $(OUTPUT)perf.o $(BUILTIN_OBJS) $(PERFLIBS) + $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(OUTPUT)perf.o \ + $(BUILTIN_OBJS) $(LIBS) -o $@ + +$(GTK_OBJS): $(OUTPUT)%.o: %.c $(LIB_H) + $(QUIET_CC)$(CC) -o $@ -c -fPIC $(CFLAGS) $(GTK_CFLAGS) $< + +$(OUTPUT)libperf-gtk.so: $(GTK_OBJS) $(PERFLIBS) + $(QUIET_LINK)$(CC) -o $@ -shared $(LDFLAGS) $(filter %.o,$^) $(GTK_LIBS) + +$(OUTPUT)builtin-help.o: builtin-help.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \ + '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ + '-DPERF_MAN_PATH="$(mandir_SQ)"' \ + '-DPERF_INFO_PATH="$(infodir_SQ)"' $< + +$(OUTPUT)builtin-timechart.o: builtin-timechart.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \ + '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ + '-DPERF_MAN_PATH="$(mandir_SQ)"' \ + '-DPERF_INFO_PATH="$(infodir_SQ)"' $< + +$(OUTPUT)common-cmds.h: util/generate-cmdlist.sh command-list.txt + +$(OUTPUT)common-cmds.h: $(wildcard Documentation/perf-*.txt) + $(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@ + +$(SCRIPTS) : % : %.sh + $(QUIET_GEN)$(INSTALL) '$@.sh' '$(OUTPUT)$@' + +# These can record PERF_VERSION +$(OUTPUT)perf.o perf.spec \ + $(SCRIPTS) \ + : $(OUTPUT)PERF-VERSION-FILE + +.SUFFIXES: + +# +# If a target does not match any of the later rules then prefix it by $(OUTPUT) +# This makes targets like 'make O=/tmp/perf perf.o' work in a natural way. +# +ifneq ($(OUTPUT),) +%.o: $(OUTPUT)%.o + @echo " # Redirected target $@ => $(OUTPUT)$@" +util/%.o: $(OUTPUT)util/%.o + @echo " # Redirected target $@ => $(OUTPUT)$@" +bench/%.o: $(OUTPUT)bench/%.o + @echo " # Redirected target $@ => $(OUTPUT)$@" +tests/%.o: $(OUTPUT)tests/%.o + @echo " # Redirected target $@ => $(OUTPUT)$@" +endif + +# These two need to be here so that when O= is not used they take precedence +# over the general rule for .o + +$(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(CFLAGS) -w $< + +$(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w $< + +$(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $< +$(OUTPUT)%.i: %.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -E $(CFLAGS) $< +$(OUTPUT)%.s: %.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -S $(CFLAGS) $< +$(OUTPUT)%.o: %.S + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $< +$(OUTPUT)%.s: %.S + $(QUIET_CC)$(CC) -o $@ -E $(CFLAGS) $< + +$(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \ + '-DPERF_EXEC_PATH="$(perfexecdir_SQ)"' \ + '-DPREFIX="$(prefix_SQ)"' \ + $< + +$(OUTPUT)tests/attr.o: tests/attr.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \ + '-DBINDIR="$(bindir_SQ)"' -DPYTHON='"$(PYTHON_WORD)"' \ + $< + +$(OUTPUT)tests/python-use.o: tests/python-use.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \ + -DPYTHONPATH='"$(OUTPUT)python"' \ + -DPYTHON='"$(PYTHON_WORD)"' \ + $< + +$(OUTPUT)tests/dwarf-unwind.o: tests/dwarf-unwind.c + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -fno-optimize-sibling-calls $< + +$(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< + +$(OUTPUT)ui/setup.o: ui/setup.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DLIBDIR='"$(libdir_SQ)"' $< + +$(OUTPUT)ui/browser.o: ui/browser.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $< + +$(OUTPUT)ui/browsers/annotate.o: ui/browsers/annotate.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $< + +$(OUTPUT)ui/browsers/hists.o: ui/browsers/hists.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $< + +$(OUTPUT)ui/browsers/map.o: ui/browsers/map.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $< + +$(OUTPUT)ui/browsers/scripts.o: ui/browsers/scripts.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $< + +$(OUTPUT)util/kallsyms.o: ../lib/symbol/kallsyms.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $< + +$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< + +$(OUTPUT)util/parse-events.o: util/parse-events.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-redundant-decls $< + +$(OUTPUT)util/scripting-engines/trace-event-perl.o: util/scripting-engines/trace-event-perl.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-undef -Wno-switch-default $< + +$(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs -Wno-undef -Wno-switch-default $< + +$(OUTPUT)util/scripting-engines/trace-event-python.o: util/scripting-engines/trace-event-python.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $< + +$(OUTPUT)scripts/python/Perf-Trace-Util/Context.o: scripts/python/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $< + +$(OUTPUT)perf-%: %.o $(PERFLIBS) + $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $(LDFLAGS) $(filter %.o,$^) $(LIBS) + +$(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H) +$(patsubst perf-%,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h) + +# we compile into subdirectories. if the target directory is not the source directory, they might not exists. So +# we depend the various files onto their directories. +DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(GTK_OBJS) +DIRECTORY_DEPS += $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h +# no need to add flex objects, because they depend on bison ones +DIRECTORY_DEPS += $(OUTPUT)util/parse-events-bison.c +DIRECTORY_DEPS += $(OUTPUT)util/pmu-bison.c + +OUTPUT_DIRECTORIES := $(sort $(dir $(DIRECTORY_DEPS))) + +$(DIRECTORY_DEPS): | $(OUTPUT_DIRECTORIES) +# In the second step, we make a rule to actually create these directories +$(OUTPUT_DIRECTORIES): + $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null + +$(LIB_FILE): $(LIB_OBJS) + $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS) + +# libtraceevent.a +TE_SOURCES = $(wildcard $(TRACE_EVENT_DIR)*.[ch]) + +LIBTRACEEVENT_FLAGS = $(QUIET_SUBDIR1) O=$(OUTPUT) +LIBTRACEEVENT_FLAGS += CFLAGS="-g -Wall $(EXTRA_CFLAGS)" +LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ) + +$(LIBTRACEEVENT): $(TE_SOURCES) $(OUTPUT)PERF-CFLAGS + $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) libtraceevent.a plugins + +$(LIBTRACEEVENT)-clean: + $(call QUIET_CLEAN, libtraceevent) + @$(MAKE) -C $(TRACE_EVENT_DIR) O=$(OUTPUT) clean >/dev/null + +install-traceevent-plugins: $(LIBTRACEEVENT) + $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) install_plugins + +LIBAPIKFS_SOURCES = $(wildcard $(LIB_PATH)fs/*.[ch]) + +# if subdir is set, we've been called from above so target has been built +# already +$(LIBAPIKFS): $(LIBAPIKFS_SOURCES) +ifeq ($(subdir),) + $(QUIET_SUBDIR0)$(LIB_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) libapikfs.a +endif + +$(LIBAPIKFS)-clean: +ifeq ($(subdir),) + $(call QUIET_CLEAN, libapikfs) + @$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null +endif + +help: + @echo 'Perf make targets:' + @echo ' doc - make *all* documentation (see below)' + @echo ' man - make manpage documentation (access with man <foo>)' + @echo ' html - make html documentation' + @echo ' info - make GNU info documentation (access with info <foo>)' + @echo ' pdf - make pdf documentation' + @echo ' TAGS - use etags to make tag information for source browsing' + @echo ' tags - use ctags to make tag information for source browsing' + @echo ' cscope - use cscope to make interactive browsing database' + @echo '' + @echo 'Perf install targets:' + @echo ' NOTE: documentation build requires asciidoc, xmlto packages to be installed' + @echo ' HINT: use "prefix" or "DESTDIR" to install to a particular' + @echo ' path like "make prefix=/usr/local install install-doc"' + @echo ' install - install compiled binaries' + @echo ' install-doc - install *all* documentation' + @echo ' install-man - install manpage documentation' + @echo ' install-html - install html documentation' + @echo ' install-info - install GNU info documentation' + @echo ' install-pdf - install pdf documentation' + @echo '' + @echo ' quick-install-doc - alias for quick-install-man' + @echo ' quick-install-man - install the documentation quickly' + @echo ' quick-install-html - install the html documentation quickly' + @echo '' + @echo 'Perf maintainer targets:' + @echo ' clean - clean all binary objects and build output' + + +DOC_TARGETS := doc man html info pdf + +INSTALL_DOC_TARGETS := $(patsubst %,install-%,$(DOC_TARGETS)) try-install-man +INSTALL_DOC_TARGETS += quick-install-doc quick-install-man quick-install-html + +# 'make doc' should call 'make -C Documentation all' +$(DOC_TARGETS): + $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) $(@:doc=all) + +TAG_FOLDERS= . ../lib/traceevent ../lib/api ../lib/symbol +TAG_FILES= ../../include/uapi/linux/perf_event.h + +TAGS: + $(QUIET_GEN)$(RM) TAGS; \ + $(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print | xargs etags -a $(TAG_FILES) + +tags: + $(QUIET_GEN)$(RM) tags; \ + $(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print | xargs ctags -a $(TAG_FILES) + +cscope: + $(QUIET_GEN)$(RM) cscope*; \ + $(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print | xargs cscope -b $(TAG_FILES) + +### Detect prefix changes +TRACK_CFLAGS = $(subst ','\'',$(CFLAGS)):\ + $(bindir_SQ):$(perfexecdir_SQ):$(template_dir_SQ):$(prefix_SQ):$(plugindir_SQ) + +$(OUTPUT)PERF-CFLAGS: .FORCE-PERF-CFLAGS + @FLAGS='$(TRACK_CFLAGS)'; \ + if test x"$$FLAGS" != x"`cat $(OUTPUT)PERF-CFLAGS 2>/dev/null`" ; then \ + echo 1>&2 " FLAGS: * new build flags or prefix"; \ + echo "$$FLAGS" >$(OUTPUT)PERF-CFLAGS; \ + fi + +### Testing rules + +# GNU make supports exporting all variables by "export" without parameters. +# However, the environment gets quite big, and some programs have problems +# with that. + +check: $(OUTPUT)common-cmds.h + if sparse; \ + then \ + for i in *.c */*.c; \ + do \ + sparse $(CFLAGS) $(SPARSE_FLAGS) $$i || exit; \ + done; \ + else \ + exit 1; \ + fi + +### Installation rules + +install-gtk: + +install-bin: all install-gtk + $(call QUIET_INSTALL, binaries) \ + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'; \ + $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'; \ + $(LN) '$(DESTDIR_SQ)$(bindir_SQ)/perf' '$(DESTDIR_SQ)$(bindir_SQ)/trace' + $(call QUIET_INSTALL, libexec) \ + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' + $(call QUIET_INSTALL, perf-archive) \ + $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' +ifndef NO_LIBPERL + $(call QUIET_INSTALL, perl-scripts) \ + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'; \ + $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'; \ + $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl'; \ + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'; \ + $(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin' +endif +ifndef NO_LIBPYTHON + $(call QUIET_INSTALL, python-scripts) \ + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \ + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'; \ + $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \ + $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'; \ + $(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin' +endif + $(call QUIET_INSTALL, perf_completion-script) \ + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d'; \ + $(INSTALL) perf-completion.sh '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf' + $(call QUIET_INSTALL, tests) \ + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \ + $(INSTALL) tests/attr.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \ + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \ + $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr' + +install: install-bin try-install-man install-traceevent-plugins + +install-python_ext: + $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)' + +# 'make install-doc' should call 'make -C Documentation install' +$(INSTALL_DOC_TARGETS): + $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) $(@:-doc=) + +### Cleaning rules + +# +# This is here, not in config/Makefile, because config/Makefile does +# not get included for the clean target: +# +config-clean: + $(call QUIET_CLEAN, config) + @$(MAKE) -C config/feature-checks clean >/dev/null + +clean: $(LIBTRACEEVENT)-clean $(LIBAPIKFS)-clean config-clean + $(call QUIET_CLEAN, core-objs) $(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf.o $(LANG_BINDINGS) $(GTK_OBJS) + $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf + $(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS $(OUTPUT)PERF-FEATURES $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* + $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean + $(python-clean) + +# +# Trick: if ../../.git does not exist - we are building out of tree for example, +# then force version regeneration: +# +ifeq ($(wildcard ../../.git/HEAD),) + GIT-HEAD-PHONY = ../../.git/HEAD +else + GIT-HEAD-PHONY = +endif + +.PHONY: all install clean config-clean strip install-gtk +.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell +.PHONY: $(GIT-HEAD-PHONY) TAGS tags cscope .FORCE-PERF-CFLAGS + diff --git a/tools/perf/arch/arm/Makefile b/tools/perf/arch/arm/Makefile index 15130b50dfe..09d62153d38 100644 --- a/tools/perf/arch/arm/Makefile +++ b/tools/perf/arch/arm/Makefile @@ -2,3 +2,13 @@ ifndef NO_DWARF PERF_HAVE_DWARF_REGS := 1 LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o endif +ifndef NO_LIBUNWIND +LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind-libunwind.o +endif +ifndef NO_LIBDW_DWARF_UNWIND +LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind-libdw.o +endif +ifndef NO_DWARF_UNWIND +LIB_OBJS += $(OUTPUT)arch/$(ARCH)/tests/regs_load.o +LIB_OBJS += $(OUTPUT)arch/$(ARCH)/tests/dwarf-unwind.o +endif diff --git a/tools/perf/arch/arm/include/perf_regs.h b/tools/perf/arch/arm/include/perf_regs.h new file mode 100644 index 00000000000..f619c9c5a4b --- /dev/null +++ b/tools/perf/arch/arm/include/perf_regs.h @@ -0,0 +1,59 @@ +#ifndef ARCH_PERF_REGS_H +#define ARCH_PERF_REGS_H + +#include <stdlib.h> +#include <linux/types.h> +#include <asm/perf_regs.h> + +void perf_regs_load(u64 *regs); + +#define PERF_REGS_MASK ((1ULL << PERF_REG_ARM_MAX) - 1) +#define PERF_REGS_MAX PERF_REG_ARM_MAX +#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_32 + +#define PERF_REG_IP PERF_REG_ARM_PC +#define PERF_REG_SP PERF_REG_ARM_SP + +static inline const char *perf_reg_name(int id) +{ + switch (id) { + case PERF_REG_ARM_R0: + return "r0"; + case PERF_REG_ARM_R1: + return "r1"; + case PERF_REG_ARM_R2: + return "r2"; + case PERF_REG_ARM_R3: + return "r3"; + case PERF_REG_ARM_R4: + return "r4"; + case PERF_REG_ARM_R5: + return "r5"; + case PERF_REG_ARM_R6: + return "r6"; + case PERF_REG_ARM_R7: + return "r7"; + case PERF_REG_ARM_R8: + return "r8"; + case PERF_REG_ARM_R9: + return "r9"; + case PERF_REG_ARM_R10: + return "r10"; + case PERF_REG_ARM_FP: + return "fp"; + case PERF_REG_ARM_IP: + return "ip"; + case PERF_REG_ARM_SP: + return "sp"; + case PERF_REG_ARM_LR: + return "lr"; + case PERF_REG_ARM_PC: + return "pc"; + default: + return NULL; + } + + return NULL; +} + +#endif /* ARCH_PERF_REGS_H */ diff --git a/tools/perf/arch/arm/tests/dwarf-unwind.c b/tools/perf/arch/arm/tests/dwarf-unwind.c new file mode 100644 index 00000000000..9f870d27cb3 --- /dev/null +++ b/tools/perf/arch/arm/tests/dwarf-unwind.c @@ -0,0 +1,60 @@ +#include <string.h> +#include "perf_regs.h" +#include "thread.h" +#include "map.h" +#include "event.h" +#include "tests/tests.h" + +#define STACK_SIZE 8192 + +static int sample_ustack(struct perf_sample *sample, + struct thread *thread, u64 *regs) +{ + struct stack_dump *stack = &sample->user_stack; + struct map *map; + unsigned long sp; + u64 stack_size, *buf; + + buf = malloc(STACK_SIZE); + if (!buf) { + pr_debug("failed to allocate sample uregs data\n"); + return -1; + } + + sp = (unsigned long) regs[PERF_REG_ARM_SP]; + + map = map_groups__find(thread->mg, MAP__VARIABLE, (u64) sp); + if (!map) { + pr_debug("failed to get stack map\n"); + free(buf); + return -1; + } + + stack_size = map->end - sp; + stack_size = stack_size > STACK_SIZE ? STACK_SIZE : stack_size; + + memcpy(buf, (void *) sp, stack_size); + stack->data = (char *) buf; + stack->size = stack_size; + return 0; +} + +int test__arch_unwind_sample(struct perf_sample *sample, + struct thread *thread) +{ + struct regs_dump *regs = &sample->user_regs; + u64 *buf; + + buf = calloc(1, sizeof(u64) * PERF_REGS_MAX); + if (!buf) { + pr_debug("failed to allocate sample uregs data\n"); + return -1; + } + + perf_regs_load(buf); + regs->abi = PERF_SAMPLE_REGS_ABI; + regs->regs = buf; + regs->mask = PERF_REGS_MASK; + + return sample_ustack(sample, thread, buf); +} diff --git a/tools/perf/arch/arm/tests/regs_load.S b/tools/perf/arch/arm/tests/regs_load.S new file mode 100644 index 00000000000..e09e983946f --- /dev/null +++ b/tools/perf/arch/arm/tests/regs_load.S @@ -0,0 +1,58 @@ +#include <linux/linkage.h> + +#define R0 0x00 +#define R1 0x08 +#define R2 0x10 +#define R3 0x18 +#define R4 0x20 +#define R5 0x28 +#define R6 0x30 +#define R7 0x38 +#define R8 0x40 +#define R9 0x48 +#define SL 0x50 +#define FP 0x58 +#define IP 0x60 +#define SP 0x68 +#define LR 0x70 +#define PC 0x78 + +/* + * Implementation of void perf_regs_load(u64 *regs); + * + * This functions fills in the 'regs' buffer from the actual registers values, + * in the way the perf built-in unwinding test expects them: + * - the PC at the time at the call to this function. Since this function + * is called using a bl instruction, the PC value is taken from LR. + * The built-in unwinding test then unwinds the call stack from the dwarf + * information in unwind__get_entries. + * + * Notes: + * - the 8 bytes stride in the registers offsets comes from the fact + * that the registers are stored in an u64 array (u64 *regs), + * - the regs buffer needs to be zeroed before the call to this function, + * in this case using a calloc in dwarf-unwind.c. + */ + +.text +.type perf_regs_load,%function +ENTRY(perf_regs_load) + str r0, [r0, #R0] + str r1, [r0, #R1] + str r2, [r0, #R2] + str r3, [r0, #R3] + str r4, [r0, #R4] + str r5, [r0, #R5] + str r6, [r0, #R6] + str r7, [r0, #R7] + str r8, [r0, #R8] + str r9, [r0, #R9] + str sl, [r0, #SL] + str fp, [r0, #FP] + str ip, [r0, #IP] + str sp, [r0, #SP] + str lr, [r0, #LR] + str lr, [r0, #PC] // store pc as lr in order to skip the call + // to this function + mov pc, lr +ENDPROC(perf_regs_load) diff --git a/tools/perf/arch/arm/util/dwarf-regs.c b/tools/perf/arch/arm/util/dwarf-regs.c index e8d5c551c69..33ec5b339da 100644 --- a/tools/perf/arch/arm/util/dwarf-regs.c +++ b/tools/perf/arch/arm/util/dwarf-regs.c @@ -8,10 +8,7 @@ * published by the Free Software Foundation. */ -#include <stdlib.h> -#ifndef __UCLIBC__ -#include <libio.h> -#endif +#include <stddef.h> #include <dwarf-regs.h> struct pt_regs_dwarfnum { diff --git a/tools/perf/arch/arm/util/unwind-libdw.c b/tools/perf/arch/arm/util/unwind-libdw.c new file mode 100644 index 00000000000..b4176c60117 --- /dev/null +++ b/tools/perf/arch/arm/util/unwind-libdw.c @@ -0,0 +1,36 @@ +#include <elfutils/libdwfl.h> +#include "../../util/unwind-libdw.h" +#include "../../util/perf_regs.h" + +bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg) +{ + struct unwind_info *ui = arg; + struct regs_dump *user_regs = &ui->sample->user_regs; + Dwarf_Word dwarf_regs[PERF_REG_ARM_MAX]; + +#define REG(r) ({ \ + Dwarf_Word val = 0; \ + perf_reg_value(&val, user_regs, PERF_REG_ARM_##r); \ + val; \ +}) + + dwarf_regs[0] = REG(R0); + dwarf_regs[1] = REG(R1); + dwarf_regs[2] = REG(R2); + dwarf_regs[3] = REG(R3); + dwarf_regs[4] = REG(R4); + dwarf_regs[5] = REG(R5); + dwarf_regs[6] = REG(R6); + dwarf_regs[7] = REG(R7); + dwarf_regs[8] = REG(R8); + dwarf_regs[9] = REG(R9); + dwarf_regs[10] = REG(R10); + dwarf_regs[11] = REG(FP); + dwarf_regs[12] = REG(IP); + dwarf_regs[13] = REG(SP); + dwarf_regs[14] = REG(LR); + dwarf_regs[15] = REG(PC); + + return dwfl_thread_state_registers(thread, 0, PERF_REG_ARM_MAX, + dwarf_regs); +} diff --git a/tools/perf/arch/arm/util/unwind-libunwind.c b/tools/perf/arch/arm/util/unwind-libunwind.c new file mode 100644 index 00000000000..729ed69a666 --- /dev/null +++ b/tools/perf/arch/arm/util/unwind-libunwind.c @@ -0,0 +1,48 @@ + +#include <errno.h> +#include <libunwind.h> +#include "perf_regs.h" +#include "../../util/unwind.h" + +int libunwind__arch_reg_id(int regnum) +{ + switch (regnum) { + case UNW_ARM_R0: + return PERF_REG_ARM_R0; + case UNW_ARM_R1: + return PERF_REG_ARM_R1; + case UNW_ARM_R2: + return PERF_REG_ARM_R2; + case UNW_ARM_R3: + return PERF_REG_ARM_R3; + case UNW_ARM_R4: + return PERF_REG_ARM_R4; + case UNW_ARM_R5: + return PERF_REG_ARM_R5; + case UNW_ARM_R6: + return PERF_REG_ARM_R6; + case UNW_ARM_R7: + return PERF_REG_ARM_R7; + case UNW_ARM_R8: + return PERF_REG_ARM_R8; + case UNW_ARM_R9: + return PERF_REG_ARM_R9; + case UNW_ARM_R10: + return PERF_REG_ARM_R10; + case UNW_ARM_R11: + return PERF_REG_ARM_FP; + case UNW_ARM_R12: + return PERF_REG_ARM_IP; + case UNW_ARM_R13: + return PERF_REG_ARM_SP; + case UNW_ARM_R14: + return PERF_REG_ARM_LR; + case UNW_ARM_R15: + return PERF_REG_ARM_PC; + default: + pr_err("unwind: invalid reg id %d\n", regnum); + return -EINVAL; + } + + return -EINVAL; +} diff --git a/tools/perf/arch/arm64/Makefile b/tools/perf/arch/arm64/Makefile new file mode 100644 index 00000000000..67e9b3d38e8 --- /dev/null +++ b/tools/perf/arch/arm64/Makefile @@ -0,0 +1,7 @@ +ifndef NO_DWARF +PERF_HAVE_DWARF_REGS := 1 +LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o +endif +ifndef NO_LIBUNWIND +LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind-libunwind.o +endif diff --git a/tools/perf/arch/arm64/include/perf_regs.h b/tools/perf/arch/arm64/include/perf_regs.h new file mode 100644 index 00000000000..e9441b9e2a3 --- /dev/null +++ b/tools/perf/arch/arm64/include/perf_regs.h @@ -0,0 +1,88 @@ +#ifndef ARCH_PERF_REGS_H +#define ARCH_PERF_REGS_H + +#include <stdlib.h> +#include <linux/types.h> +#include <asm/perf_regs.h> + +#define PERF_REGS_MASK ((1ULL << PERF_REG_ARM64_MAX) - 1) +#define PERF_REG_IP PERF_REG_ARM64_PC +#define PERF_REG_SP PERF_REG_ARM64_SP + +static inline const char *perf_reg_name(int id) +{ + switch (id) { + case PERF_REG_ARM64_X0: + return "x0"; + case PERF_REG_ARM64_X1: + return "x1"; + case PERF_REG_ARM64_X2: + return "x2"; + case PERF_REG_ARM64_X3: + return "x3"; + case PERF_REG_ARM64_X4: + return "x4"; + case PERF_REG_ARM64_X5: + return "x5"; + case PERF_REG_ARM64_X6: + return "x6"; + case PERF_REG_ARM64_X7: + return "x7"; + case PERF_REG_ARM64_X8: + return "x8"; + case PERF_REG_ARM64_X9: + return "x9"; + case PERF_REG_ARM64_X10: + return "x10"; + case PERF_REG_ARM64_X11: + return "x11"; + case PERF_REG_ARM64_X12: + return "x12"; + case PERF_REG_ARM64_X13: + return "x13"; + case PERF_REG_ARM64_X14: + return "x14"; + case PERF_REG_ARM64_X15: + return "x15"; + case PERF_REG_ARM64_X16: + return "x16"; + case PERF_REG_ARM64_X17: + return "x17"; + case PERF_REG_ARM64_X18: + return "x18"; + case PERF_REG_ARM64_X19: + return "x19"; + case PERF_REG_ARM64_X20: + return "x20"; + case PERF_REG_ARM64_X21: + return "x21"; + case PERF_REG_ARM64_X22: + return "x22"; + case PERF_REG_ARM64_X23: + return "x23"; + case PERF_REG_ARM64_X24: + return "x24"; + case PERF_REG_ARM64_X25: + return "x25"; + case PERF_REG_ARM64_X26: + return "x26"; + case PERF_REG_ARM64_X27: + return "x27"; + case PERF_REG_ARM64_X28: + return "x28"; + case PERF_REG_ARM64_X29: + return "x29"; + case PERF_REG_ARM64_SP: + return "sp"; + case PERF_REG_ARM64_LR: + return "lr"; + case PERF_REG_ARM64_PC: + return "pc"; + default: + return NULL; + } + + return NULL; +} + +#endif /* ARCH_PERF_REGS_H */ diff --git a/tools/perf/arch/arm64/util/dwarf-regs.c b/tools/perf/arch/arm64/util/dwarf-regs.c new file mode 100644 index 00000000000..d49efeb8172 --- /dev/null +++ b/tools/perf/arch/arm64/util/dwarf-regs.c @@ -0,0 +1,80 @@ +/* + * Mapping of DWARF debug register numbers into register names. + * + * Copyright (C) 2010 Will Deacon, ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <stddef.h> +#include <dwarf-regs.h> + +struct pt_regs_dwarfnum { + const char *name; + unsigned int dwarfnum; +}; + +#define STR(s) #s +#define REG_DWARFNUM_NAME(r, num) {.name = r, .dwarfnum = num} +#define GPR_DWARFNUM_NAME(num) \ + {.name = STR(%x##num), .dwarfnum = num} +#define REG_DWARFNUM_END {.name = NULL, .dwarfnum = 0} + +/* + * Reference: + * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0057b/IHI0057B_aadwarf64.pdf + */ +static const struct pt_regs_dwarfnum regdwarfnum_table[] = { + GPR_DWARFNUM_NAME(0), + GPR_DWARFNUM_NAME(1), + GPR_DWARFNUM_NAME(2), + GPR_DWARFNUM_NAME(3), + GPR_DWARFNUM_NAME(4), + GPR_DWARFNUM_NAME(5), + GPR_DWARFNUM_NAME(6), + GPR_DWARFNUM_NAME(7), + GPR_DWARFNUM_NAME(8), + GPR_DWARFNUM_NAME(9), + GPR_DWARFNUM_NAME(10), + GPR_DWARFNUM_NAME(11), + GPR_DWARFNUM_NAME(12), + GPR_DWARFNUM_NAME(13), + GPR_DWARFNUM_NAME(14), + GPR_DWARFNUM_NAME(15), + GPR_DWARFNUM_NAME(16), + GPR_DWARFNUM_NAME(17), + GPR_DWARFNUM_NAME(18), + GPR_DWARFNUM_NAME(19), + GPR_DWARFNUM_NAME(20), + GPR_DWARFNUM_NAME(21), + GPR_DWARFNUM_NAME(22), + GPR_DWARFNUM_NAME(23), + GPR_DWARFNUM_NAME(24), + GPR_DWARFNUM_NAME(25), + GPR_DWARFNUM_NAME(26), + GPR_DWARFNUM_NAME(27), + GPR_DWARFNUM_NAME(28), + GPR_DWARFNUM_NAME(29), + REG_DWARFNUM_NAME("%lr", 30), + REG_DWARFNUM_NAME("%sp", 31), + REG_DWARFNUM_END, +}; + +/** + * get_arch_regstr() - lookup register name from it's DWARF register number + * @n: the DWARF register number + * + * get_arch_regstr() returns the name of the register in struct + * regdwarfnum_table from it's DWARF register number. If the register is not + * found in the table, this returns NULL; + */ +const char *get_arch_regstr(unsigned int n) +{ + const struct pt_regs_dwarfnum *roff; + for (roff = regdwarfnum_table; roff->name != NULL; roff++) + if (roff->dwarfnum == n) + return roff->name; + return NULL; +} diff --git a/tools/perf/arch/arm64/util/unwind-libunwind.c b/tools/perf/arch/arm64/util/unwind-libunwind.c new file mode 100644 index 00000000000..436ee43859d --- /dev/null +++ b/tools/perf/arch/arm64/util/unwind-libunwind.c @@ -0,0 +1,82 @@ + +#include <errno.h> +#include <libunwind.h> +#include "perf_regs.h" +#include "../../util/unwind.h" + +int libunwind__arch_reg_id(int regnum) +{ + switch (regnum) { + case UNW_AARCH64_X0: + return PERF_REG_ARM64_X0; + case UNW_AARCH64_X1: + return PERF_REG_ARM64_X1; + case UNW_AARCH64_X2: + return PERF_REG_ARM64_X2; + case UNW_AARCH64_X3: + return PERF_REG_ARM64_X3; + case UNW_AARCH64_X4: + return PERF_REG_ARM64_X4; + case UNW_AARCH64_X5: + return PERF_REG_ARM64_X5; + case UNW_AARCH64_X6: + return PERF_REG_ARM64_X6; + case UNW_AARCH64_X7: + return PERF_REG_ARM64_X7; + case UNW_AARCH64_X8: + return PERF_REG_ARM64_X8; + case UNW_AARCH64_X9: + return PERF_REG_ARM64_X9; + case UNW_AARCH64_X10: + return PERF_REG_ARM64_X10; + case UNW_AARCH64_X11: + return PERF_REG_ARM64_X11; + case UNW_AARCH64_X12: + return PERF_REG_ARM64_X12; + case UNW_AARCH64_X13: + return PERF_REG_ARM64_X13; + case UNW_AARCH64_X14: + return PERF_REG_ARM64_X14; + case UNW_AARCH64_X15: + return PERF_REG_ARM64_X15; + case UNW_AARCH64_X16: + return PERF_REG_ARM64_X16; + case UNW_AARCH64_X17: + return PERF_REG_ARM64_X17; + case UNW_AARCH64_X18: + return PERF_REG_ARM64_X18; + case UNW_AARCH64_X19: + return PERF_REG_ARM64_X19; + case UNW_AARCH64_X20: + return PERF_REG_ARM64_X20; + case UNW_AARCH64_X21: + return PERF_REG_ARM64_X21; + case UNW_AARCH64_X22: + return PERF_REG_ARM64_X22; + case UNW_AARCH64_X23: + return PERF_REG_ARM64_X23; + case UNW_AARCH64_X24: + return PERF_REG_ARM64_X24; + case UNW_AARCH64_X25: + return PERF_REG_ARM64_X25; + case UNW_AARCH64_X26: + return PERF_REG_ARM64_X26; + case UNW_AARCH64_X27: + return PERF_REG_ARM64_X27; + case UNW_AARCH64_X28: + return PERF_REG_ARM64_X28; + case UNW_AARCH64_X29: + return PERF_REG_ARM64_X29; + case UNW_AARCH64_X30: + return PERF_REG_ARM64_LR; + case UNW_AARCH64_SP: + return PERF_REG_ARM64_SP; + case UNW_AARCH64_PC: + return PERF_REG_ARM64_PC; + default: + pr_err("unwind: invalid reg id %d\n", regnum); + return -EINVAL; + } + + return -EINVAL; +} diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c new file mode 100644 index 00000000000..42faf369211 --- /dev/null +++ b/tools/perf/arch/common.c @@ -0,0 +1,211 @@ +#include <stdio.h> +#include <sys/utsname.h> +#include "common.h" +#include "../util/debug.h" + +const char *const arm_triplets[] = { + "arm-eabi-", + "arm-linux-androideabi-", + "arm-unknown-linux-", + "arm-unknown-linux-gnu-", + "arm-unknown-linux-gnueabi-", + NULL +}; + +const char *const powerpc_triplets[] = { + "powerpc-unknown-linux-gnu-", + "powerpc64-unknown-linux-gnu-", + NULL +}; + +const char *const s390_triplets[] = { + "s390-ibm-linux-", + NULL +}; + +const char *const sh_triplets[] = { + "sh-unknown-linux-gnu-", + "sh64-unknown-linux-gnu-", + NULL +}; + +const char *const sparc_triplets[] = { + "sparc-unknown-linux-gnu-", + "sparc64-unknown-linux-gnu-", + NULL +}; + +const char *const x86_triplets[] = { + "x86_64-pc-linux-gnu-", + "x86_64-unknown-linux-gnu-", + "i686-pc-linux-gnu-", + "i586-pc-linux-gnu-", + "i486-pc-linux-gnu-", + "i386-pc-linux-gnu-", + "i686-linux-android-", + "i686-android-linux-", + NULL +}; + +const char *const mips_triplets[] = { + "mips-unknown-linux-gnu-", + "mipsel-linux-android-", + NULL +}; + +static bool lookup_path(char *name) +{ + bool found = false; + char *path, *tmp; + char buf[PATH_MAX]; + char *env = getenv("PATH"); + + if (!env) + return false; + + env = strdup(env); + if (!env) + return false; + + path = strtok_r(env, ":", &tmp); + while (path) { + scnprintf(buf, sizeof(buf), "%s/%s", path, name); + if (access(buf, F_OK) == 0) { + found = true; + break; + } + path = strtok_r(NULL, ":", &tmp); + } + free(env); + return found; +} + +static int lookup_triplets(const char *const *triplets, const char *name) +{ + int i; + char buf[PATH_MAX]; + + for (i = 0; triplets[i] != NULL; i++) { + scnprintf(buf, sizeof(buf), "%s%s", triplets[i], name); + if (lookup_path(buf)) + return i; + } + return -1; +} + +/* + * Return architecture name in a normalized form. + * The conversion logic comes from the Makefile. + */ +static const char *normalize_arch(char *arch) +{ + if (!strcmp(arch, "x86_64")) + return "x86"; + if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6') + return "x86"; + if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5)) + return "sparc"; + if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110")) + return "arm"; + if (!strncmp(arch, "s390", 4)) + return "s390"; + if (!strncmp(arch, "parisc", 6)) + return "parisc"; + if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3)) + return "powerpc"; + if (!strncmp(arch, "mips", 4)) + return "mips"; + if (!strncmp(arch, "sh", 2) && isdigit(arch[2])) + return "sh"; + + return arch; +} + +static int perf_session_env__lookup_binutils_path(struct perf_session_env *env, + const char *name, + const char **path) +{ + int idx; + const char *arch, *cross_env; + struct utsname uts; + const char *const *path_list; + char *buf = NULL; + + arch = normalize_arch(env->arch); + + if (uname(&uts) < 0) + goto out; + + /* + * We don't need to try to find objdump path for native system. + * Just use default binutils path (e.g.: "objdump"). + */ + if (!strcmp(normalize_arch(uts.machine), arch)) + goto out; + + cross_env = getenv("CROSS_COMPILE"); + if (cross_env) { + if (asprintf(&buf, "%s%s", cross_env, name) < 0) + goto out_error; + if (buf[0] == '/') { + if (access(buf, F_OK) == 0) + goto out; + goto out_error; + } + if (lookup_path(buf)) + goto out; + zfree(&buf); + } + + if (!strcmp(arch, "arm")) + path_list = arm_triplets; + else if (!strcmp(arch, "powerpc")) + path_list = powerpc_triplets; + else if (!strcmp(arch, "sh")) + path_list = sh_triplets; + else if (!strcmp(arch, "s390")) + path_list = s390_triplets; + else if (!strcmp(arch, "sparc")) + path_list = sparc_triplets; + else if (!strcmp(arch, "x86")) + path_list = x86_triplets; + else if (!strcmp(arch, "mips")) + path_list = mips_triplets; + else { + ui__error("binutils for %s not supported.\n", arch); + goto out_error; + } + + idx = lookup_triplets(path_list, name); + if (idx < 0) { + ui__error("Please install %s for %s.\n" + "You can add it to PATH, set CROSS_COMPILE or " + "override the default using --%s.\n", + name, arch, name); + goto out_error; + } + + if (asprintf(&buf, "%s%s", path_list[idx], name) < 0) + goto out_error; + +out: + *path = buf; + return 0; +out_error: + free(buf); + *path = NULL; + return -1; +} + +int perf_session_env__lookup_objdump(struct perf_session_env *env) +{ + /* + * For live mode, env->arch will be NULL and we can use + * the native objdump tool. + */ + if (env->arch == NULL) + return 0; + + return perf_session_env__lookup_binutils_path(env, "objdump", + &objdump_path); +} diff --git a/tools/perf/arch/common.h b/tools/perf/arch/common.h new file mode 100644 index 00000000000..ede246eda9b --- /dev/null +++ b/tools/perf/arch/common.h @@ -0,0 +1,10 @@ +#ifndef ARCH_PERF_COMMON_H +#define ARCH_PERF_COMMON_H + +#include "../util/session.h" + +extern const char *objdump_path; + +int perf_session_env__lookup_objdump(struct perf_session_env *env); + +#endif /* ARCH_PERF_COMMON_H */ diff --git a/tools/perf/arch/powerpc/util/dwarf-regs.c b/tools/perf/arch/powerpc/util/dwarf-regs.c index 7cdd61d0e27..733151cdf46 100644 --- a/tools/perf/arch/powerpc/util/dwarf-regs.c +++ b/tools/perf/arch/powerpc/util/dwarf-regs.c @@ -9,10 +9,7 @@ * 2 of the License, or (at your option) any later version. */ -#include <stdlib.h> -#ifndef __UCLIBC__ -#include <libio.h> -#endif +#include <stddef.h> #include <dwarf-regs.h> diff --git a/tools/perf/arch/s390/util/dwarf-regs.c b/tools/perf/arch/s390/util/dwarf-regs.c index e19653e025f..0469df02ee6 100644 --- a/tools/perf/arch/s390/util/dwarf-regs.c +++ b/tools/perf/arch/s390/util/dwarf-regs.c @@ -6,7 +6,7 @@ * */ -#include <libio.h> +#include <stddef.h> #include <dwarf-regs.h> #define NUM_GPRS 16 diff --git a/tools/perf/arch/sh/util/dwarf-regs.c b/tools/perf/arch/sh/util/dwarf-regs.c index a11edb007a6..0d0897f57a1 100644 --- a/tools/perf/arch/sh/util/dwarf-regs.c +++ b/tools/perf/arch/sh/util/dwarf-regs.c @@ -19,7 +19,7 @@ * */ -#include <libio.h> +#include <stddef.h> #include <dwarf-regs.h> /* diff --git a/tools/perf/arch/sparc/util/dwarf-regs.c b/tools/perf/arch/sparc/util/dwarf-regs.c index 0ab88483720..92eda412fed 100644 --- a/tools/perf/arch/sparc/util/dwarf-regs.c +++ b/tools/perf/arch/sparc/util/dwarf-regs.c @@ -9,7 +9,7 @@ * 2 of the License, or (at your option) any later version. */ -#include <libio.h> +#include <stddef.h> #include <dwarf-regs.h> #define SPARC_MAX_REGS 96 diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile index 815841c04eb..1641542e363 100644 --- a/tools/perf/arch/x86/Makefile +++ b/tools/perf/arch/x86/Makefile @@ -3,6 +3,15 @@ PERF_HAVE_DWARF_REGS := 1 LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o endif ifndef NO_LIBUNWIND -LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind.o +LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind-libunwind.o +endif +ifndef NO_LIBDW_DWARF_UNWIND +LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind-libdw.o +endif +ifndef NO_DWARF_UNWIND +LIB_OBJS += $(OUTPUT)arch/$(ARCH)/tests/regs_load.o +LIB_OBJS += $(OUTPUT)arch/$(ARCH)/tests/dwarf-unwind.o endif LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o +LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/tsc.o +LIB_H += arch/$(ARCH)/util/tsc.h diff --git a/tools/perf/arch/x86/include/perf_regs.h b/tools/perf/arch/x86/include/perf_regs.h index 46fc9f15c6b..7df517acfef 100644 --- a/tools/perf/arch/x86/include/perf_regs.h +++ b/tools/perf/arch/x86/include/perf_regs.h @@ -2,17 +2,23 @@ #define ARCH_PERF_REGS_H #include <stdlib.h> -#include "../../util/types.h" -#include "../../../../../arch/x86/include/asm/perf_regs.h" +#include <linux/types.h> +#include <asm/perf_regs.h> -#ifndef ARCH_X86_64 +void perf_regs_load(u64 *regs); + +#ifndef HAVE_ARCH_X86_64_SUPPORT #define PERF_REGS_MASK ((1ULL << PERF_REG_X86_32_MAX) - 1) +#define PERF_REGS_MAX PERF_REG_X86_32_MAX +#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_32 #else #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \ (1ULL << PERF_REG_X86_ES) | \ (1ULL << PERF_REG_X86_FS) | \ (1ULL << PERF_REG_X86_GS)) #define PERF_REGS_MASK (((1ULL << PERF_REG_X86_64_MAX) - 1) & ~REG_NOSUPPORT) +#define PERF_REGS_MAX PERF_REG_X86_64_MAX +#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_64 #endif #define PERF_REG_IP PERF_REG_X86_IP #define PERF_REG_SP PERF_REG_X86_SP @@ -52,7 +58,7 @@ static inline const char *perf_reg_name(int id) return "FS"; case PERF_REG_X86_GS: return "GS"; -#ifdef ARCH_X86_64 +#ifdef HAVE_ARCH_X86_64_SUPPORT case PERF_REG_X86_R8: return "R8"; case PERF_REG_X86_R9: @@ -69,7 +75,7 @@ static inline const char *perf_reg_name(int id) return "R14"; case PERF_REG_X86_R15: return "R15"; -#endif /* ARCH_X86_64 */ +#endif /* HAVE_ARCH_X86_64_SUPPORT */ default: return NULL; } diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c new file mode 100644 index 00000000000..9f89f899ccc --- /dev/null +++ b/tools/perf/arch/x86/tests/dwarf-unwind.c @@ -0,0 +1,60 @@ +#include <string.h> +#include "perf_regs.h" +#include "thread.h" +#include "map.h" +#include "event.h" +#include "tests/tests.h" + +#define STACK_SIZE 8192 + +static int sample_ustack(struct perf_sample *sample, + struct thread *thread, u64 *regs) +{ + struct stack_dump *stack = &sample->user_stack; + struct map *map; + unsigned long sp; + u64 stack_size, *buf; + + buf = malloc(STACK_SIZE); + if (!buf) { + pr_debug("failed to allocate sample uregs data\n"); + return -1; + } + + sp = (unsigned long) regs[PERF_REG_X86_SP]; + + map = map_groups__find(thread->mg, MAP__VARIABLE, (u64) sp); + if (!map) { + pr_debug("failed to get stack map\n"); + free(buf); + return -1; + } + + stack_size = map->end - sp; + stack_size = stack_size > STACK_SIZE ? STACK_SIZE : stack_size; + + memcpy(buf, (void *) sp, stack_size); + stack->data = (char *) buf; + stack->size = stack_size; + return 0; +} + +int test__arch_unwind_sample(struct perf_sample *sample, + struct thread *thread) +{ + struct regs_dump *regs = &sample->user_regs; + u64 *buf; + + buf = malloc(sizeof(u64) * PERF_REGS_MAX); + if (!buf) { + pr_debug("failed to allocate sample uregs data\n"); + return -1; + } + + perf_regs_load(buf); + regs->abi = PERF_SAMPLE_REGS_ABI; + regs->regs = buf; + regs->mask = PERF_REGS_MASK; + + return sample_ustack(sample, thread, buf); +} diff --git a/tools/perf/arch/x86/tests/regs_load.S b/tools/perf/arch/x86/tests/regs_load.S new file mode 100644 index 00000000000..60875d5c556 --- /dev/null +++ b/tools/perf/arch/x86/tests/regs_load.S @@ -0,0 +1,98 @@ +#include <linux/linkage.h> + +#define AX 0 +#define BX 1 * 8 +#define CX 2 * 8 +#define DX 3 * 8 +#define SI 4 * 8 +#define DI 5 * 8 +#define BP 6 * 8 +#define SP 7 * 8 +#define IP 8 * 8 +#define FLAGS 9 * 8 +#define CS 10 * 8 +#define SS 11 * 8 +#define DS 12 * 8 +#define ES 13 * 8 +#define FS 14 * 8 +#define GS 15 * 8 +#define R8 16 * 8 +#define R9 17 * 8 +#define R10 18 * 8 +#define R11 19 * 8 +#define R12 20 * 8 +#define R13 21 * 8 +#define R14 22 * 8 +#define R15 23 * 8 + +.text +#ifdef HAVE_ARCH_X86_64_SUPPORT +ENTRY(perf_regs_load) + movq %rax, AX(%rdi) + movq %rbx, BX(%rdi) + movq %rcx, CX(%rdi) + movq %rdx, DX(%rdi) + movq %rsi, SI(%rdi) + movq %rdi, DI(%rdi) + movq %rbp, BP(%rdi) + + leaq 8(%rsp), %rax /* exclude this call. */ + movq %rax, SP(%rdi) + + movq 0(%rsp), %rax + movq %rax, IP(%rdi) + + movq $0, FLAGS(%rdi) + movq $0, CS(%rdi) + movq $0, SS(%rdi) + movq $0, DS(%rdi) + movq $0, ES(%rdi) + movq $0, FS(%rdi) + movq $0, GS(%rdi) + + movq %r8, R8(%rdi) + movq %r9, R9(%rdi) + movq %r10, R10(%rdi) + movq %r11, R11(%rdi) + movq %r12, R12(%rdi) + movq %r13, R13(%rdi) + movq %r14, R14(%rdi) + movq %r15, R15(%rdi) + ret +ENDPROC(perf_regs_load) +#else +ENTRY(perf_regs_load) + push %edi + movl 8(%esp), %edi + movl %eax, AX(%edi) + movl %ebx, BX(%edi) + movl %ecx, CX(%edi) + movl %edx, DX(%edi) + movl %esi, SI(%edi) + pop %eax + movl %eax, DI(%edi) + movl %ebp, BP(%edi) + + leal 4(%esp), %eax /* exclude this call. */ + movl %eax, SP(%edi) + + movl 0(%esp), %eax + movl %eax, IP(%edi) + + movl $0, FLAGS(%edi) + movl $0, CS(%edi) + movl $0, SS(%edi) + movl $0, DS(%edi) + movl $0, ES(%edi) + movl $0, FS(%edi) + movl $0, GS(%edi) + ret +ENDPROC(perf_regs_load) +#endif + +/* + * We need to provide note.GNU-stack section, saying that we want + * NOT executable stack. Otherwise the final linking will assume that + * the ELF stack should not be restricted at all and set it RWX. + */ +.section .note.GNU-stack,"",@progbits diff --git a/tools/perf/arch/x86/util/dwarf-regs.c b/tools/perf/arch/x86/util/dwarf-regs.c index a794d308192..be22dd46323 100644 --- a/tools/perf/arch/x86/util/dwarf-regs.c +++ b/tools/perf/arch/x86/util/dwarf-regs.c @@ -20,7 +20,7 @@ * */ -#include <libio.h> +#include <stddef.h> #include <dwarf-regs.h> /* diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c new file mode 100644 index 00000000000..40021fa3129 --- /dev/null +++ b/tools/perf/arch/x86/util/tsc.c @@ -0,0 +1,59 @@ +#include <stdbool.h> +#include <errno.h> + +#include <linux/perf_event.h> + +#include "../../perf.h" +#include <linux/types.h> +#include "../../util/debug.h" +#include "tsc.h" + +u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc) +{ + u64 t, quot, rem; + + t = ns - tc->time_zero; + quot = t / tc->time_mult; + rem = t % tc->time_mult; + return (quot << tc->time_shift) + + (rem << tc->time_shift) / tc->time_mult; +} + +u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc) +{ + u64 quot, rem; + + quot = cyc >> tc->time_shift; + rem = cyc & ((1 << tc->time_shift) - 1); + return tc->time_zero + quot * tc->time_mult + + ((rem * tc->time_mult) >> tc->time_shift); +} + +int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc, + struct perf_tsc_conversion *tc) +{ + bool cap_user_time_zero; + u32 seq; + int i = 0; + + while (1) { + seq = pc->lock; + rmb(); + tc->time_mult = pc->time_mult; + tc->time_shift = pc->time_shift; + tc->time_zero = pc->time_zero; + cap_user_time_zero = pc->cap_user_time_zero; + rmb(); + if (pc->lock == seq && !(seq & 1)) + break; + if (++i > 10000) { + pr_debug("failed to get perf_event_mmap_page lock\n"); + return -EINVAL; + } + } + + if (!cap_user_time_zero) + return -EOPNOTSUPP; + + return 0; +} diff --git a/tools/perf/arch/x86/util/tsc.h b/tools/perf/arch/x86/util/tsc.h new file mode 100644 index 00000000000..2affe0366b5 --- /dev/null +++ b/tools/perf/arch/x86/util/tsc.h @@ -0,0 +1,20 @@ +#ifndef TOOLS_PERF_ARCH_X86_UTIL_TSC_H__ +#define TOOLS_PERF_ARCH_X86_UTIL_TSC_H__ + +#include <linux/types.h> + +struct perf_tsc_conversion { + u16 time_shift; + u32 time_mult; + u64 time_zero; +}; + +struct perf_event_mmap_page; + +int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc, + struct perf_tsc_conversion *tc); + +u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc); +u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc); + +#endif /* TOOLS_PERF_ARCH_X86_UTIL_TSC_H__ */ diff --git a/tools/perf/arch/x86/util/unwind-libdw.c b/tools/perf/arch/x86/util/unwind-libdw.c new file mode 100644 index 00000000000..c4b72176ca8 --- /dev/null +++ b/tools/perf/arch/x86/util/unwind-libdw.c @@ -0,0 +1,51 @@ +#include <elfutils/libdwfl.h> +#include "../../util/unwind-libdw.h" +#include "../../util/perf_regs.h" + +bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg) +{ + struct unwind_info *ui = arg; + struct regs_dump *user_regs = &ui->sample->user_regs; + Dwarf_Word dwarf_regs[17]; + unsigned nregs; + +#define REG(r) ({ \ + Dwarf_Word val = 0; \ + perf_reg_value(&val, user_regs, PERF_REG_X86_##r); \ + val; \ +}) + + if (user_regs->abi == PERF_SAMPLE_REGS_ABI_32) { + dwarf_regs[0] = REG(AX); + dwarf_regs[1] = REG(CX); + dwarf_regs[2] = REG(DX); + dwarf_regs[3] = REG(BX); + dwarf_regs[4] = REG(SP); + dwarf_regs[5] = REG(BP); + dwarf_regs[6] = REG(SI); + dwarf_regs[7] = REG(DI); + dwarf_regs[8] = REG(IP); + nregs = 9; + } else { + dwarf_regs[0] = REG(AX); + dwarf_regs[1] = REG(DX); + dwarf_regs[2] = REG(CX); + dwarf_regs[3] = REG(BX); + dwarf_regs[4] = REG(SI); + dwarf_regs[5] = REG(DI); + dwarf_regs[6] = REG(BP); + dwarf_regs[7] = REG(SP); + dwarf_regs[8] = REG(R8); + dwarf_regs[9] = REG(R9); + dwarf_regs[10] = REG(R10); + dwarf_regs[11] = REG(R11); + dwarf_regs[12] = REG(R12); + dwarf_regs[13] = REG(R13); + dwarf_regs[14] = REG(R14); + dwarf_regs[15] = REG(R15); + dwarf_regs[16] = REG(IP); + nregs = 17; + } + + return dwfl_thread_state_registers(thread, 0, nregs, dwarf_regs); +} diff --git a/tools/perf/arch/x86/util/unwind.c b/tools/perf/arch/x86/util/unwind-libunwind.c index 78d956eff96..3261f68c6a7 100644 --- a/tools/perf/arch/x86/util/unwind.c +++ b/tools/perf/arch/x86/util/unwind-libunwind.c @@ -4,8 +4,8 @@ #include "perf_regs.h" #include "../../util/unwind.h" -#ifdef ARCH_X86_64 -int unwind__arch_reg_id(int regnum) +#ifdef HAVE_ARCH_X86_64_SUPPORT +int libunwind__arch_reg_id(int regnum) { int id; @@ -69,7 +69,7 @@ int unwind__arch_reg_id(int regnum) return id; } #else -int unwind__arch_reg_id(int regnum) +int libunwind__arch_reg_id(int regnum) { int id; @@ -108,4 +108,4 @@ int unwind__arch_reg_id(int regnum) return id; } -#endif /* ARCH_X86_64 */ +#endif /* HAVE_ARCH_X86_64_SUPPORT */ diff --git a/tools/perf/bash_completion b/tools/perf/bash_completion deleted file mode 100644 index 56e6a12aab5..00000000000 --- a/tools/perf/bash_completion +++ /dev/null @@ -1,62 +0,0 @@ -# perf completion - -function_exists() -{ - declare -F $1 > /dev/null - return $? -} - -function_exists __ltrim_colon_completions || -__ltrim_colon_completions() -{ - if [[ "$1" == *:* && "$COMP_WORDBREAKS" == *:* ]]; then - # Remove colon-word prefix from COMPREPLY items - local colon_word=${1%${1##*:}} - local i=${#COMPREPLY[*]} - while [[ $((--i)) -ge 0 ]]; do - COMPREPLY[$i]=${COMPREPLY[$i]#"$colon_word"} - done - fi -} - -have perf && -_perf() -{ - local cur prev cmd - - COMPREPLY=() - if function_exists _get_comp_words_by_ref; then - _get_comp_words_by_ref -n : cur prev - else - cur=$(_get_cword :) - prev=${COMP_WORDS[COMP_CWORD-1]} - fi - - cmd=${COMP_WORDS[0]} - - # List perf subcommands or long options - if [ $COMP_CWORD -eq 1 ]; then - if [[ $cur == --* ]]; then - COMPREPLY=( $( compgen -W '--help --version \ - --exec-path --html-path --paginate --no-pager \ - --perf-dir --work-tree --debugfs-dir' -- "$cur" ) ) - else - cmds=$($cmd --list-cmds) - COMPREPLY=( $( compgen -W '$cmds' -- "$cur" ) ) - fi - # List possible events for -e option - elif [[ $prev == "-e" && "${COMP_WORDS[1]}" == @(record|stat|top) ]]; then - evts=$($cmd list --raw-dump) - COMPREPLY=( $( compgen -W '$evts' -- "$cur" ) ) - __ltrim_colon_completions $cur - # List long option names - elif [[ $cur == --* ]]; then - subcmd=${COMP_WORDS[1]} - opts=$($cmd $subcmd --list-opts) - COMPREPLY=( $( compgen -W '$opts' -- "$cur" ) ) - # Fall down to list regular files - else - _filedir - fi -} && -complete -F _perf perf diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h index 8f89998eeaf..eba46709b27 100644 --- a/tools/perf/bench/bench.h +++ b/tools/perf/bench/bench.h @@ -1,11 +1,39 @@ #ifndef BENCH_H #define BENCH_H +/* + * The madvise transparent hugepage constants were added in glibc + * 2.13. For compatibility with older versions of glibc, define these + * tokens if they are not already defined. + * + * PA-RISC uses different madvise values from other architectures and + * needs to be special-cased. + */ +#ifdef __hppa__ +# ifndef MADV_HUGEPAGE +# define MADV_HUGEPAGE 67 +# endif +# ifndef MADV_NOHUGEPAGE +# define MADV_NOHUGEPAGE 68 +# endif +#else +# ifndef MADV_HUGEPAGE +# define MADV_HUGEPAGE 14 +# endif +# ifndef MADV_NOHUGEPAGE +# define MADV_NOHUGEPAGE 15 +# endif +#endif + +extern int bench_numa(int argc, const char **argv, const char *prefix); extern int bench_sched_messaging(int argc, const char **argv, const char *prefix); extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); extern int bench_mem_memcpy(int argc, const char **argv, const char *prefix __maybe_unused); extern int bench_mem_memset(int argc, const char **argv, const char *prefix); +extern int bench_futex_hash(int argc, const char **argv, const char *prefix); +extern int bench_futex_wake(int argc, const char **argv, const char *prefix); +extern int bench_futex_requeue(int argc, const char **argv, const char *prefix); #define BENCH_FORMAT_DEFAULT_STR "default" #define BENCH_FORMAT_DEFAULT 0 diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c new file mode 100644 index 00000000000..a84206e9c4a --- /dev/null +++ b/tools/perf/bench/futex-hash.c @@ -0,0 +1,212 @@ +/* + * Copyright (C) 2013 Davidlohr Bueso <davidlohr@hp.com> + * + * futex-hash: Stress the hell out of the Linux kernel futex uaddr hashing. + * + * This program is particularly useful for measuring the kernel's futex hash + * table/function implementation. In order for it to make sense, use with as + * many threads and futexes as possible. + */ + +#include "../perf.h" +#include "../util/util.h" +#include "../util/stat.h" +#include "../util/parse-options.h" +#include "../util/header.h" +#include "bench.h" +#include "futex.h" + +#include <err.h> +#include <stdlib.h> +#include <sys/time.h> +#include <pthread.h> + +static unsigned int nthreads = 0; +static unsigned int nsecs = 10; +/* amount of futexes per thread */ +static unsigned int nfutexes = 1024; +static bool fshared = false, done = false, silent = false; + +struct timeval start, end, runtime; +static pthread_mutex_t thread_lock; +static unsigned int threads_starting; +static struct stats throughput_stats; +static pthread_cond_t thread_parent, thread_worker; + +struct worker { + int tid; + u_int32_t *futex; + pthread_t thread; + unsigned long ops; +}; + +static const struct option options[] = { + OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"), + OPT_UINTEGER('r', "runtime", &nsecs, "Specify runtime (in seconds)"), + OPT_UINTEGER('f', "futexes", &nfutexes, "Specify amount of futexes per threads"), + OPT_BOOLEAN( 's', "silent", &silent, "Silent mode: do not display data/details"), + OPT_BOOLEAN( 'S', "shared", &fshared, "Use shared futexes instead of private ones"), + OPT_END() +}; + +static const char * const bench_futex_hash_usage[] = { + "perf bench futex hash <options>", + NULL +}; + +static void *workerfn(void *arg) +{ + int ret; + unsigned int i; + struct worker *w = (struct worker *) arg; + + pthread_mutex_lock(&thread_lock); + threads_starting--; + if (!threads_starting) + pthread_cond_signal(&thread_parent); + pthread_cond_wait(&thread_worker, &thread_lock); + pthread_mutex_unlock(&thread_lock); + + do { + for (i = 0; i < nfutexes; i++, w->ops++) { + /* + * We want the futex calls to fail in order to stress + * the hashing of uaddr and not measure other steps, + * such as internal waitqueue handling, thus enlarging + * the critical region protected by hb->lock. + */ + ret = futex_wait(&w->futex[i], 1234, NULL, + fshared ? 0 : FUTEX_PRIVATE_FLAG); + if (!silent && + (!ret || errno != EAGAIN || errno != EWOULDBLOCK)) + warn("Non-expected futex return call"); + } + } while (!done); + + return NULL; +} + +static void toggle_done(int sig __maybe_unused, + siginfo_t *info __maybe_unused, + void *uc __maybe_unused) +{ + /* inform all threads that we're done for the day */ + done = true; + gettimeofday(&end, NULL); + timersub(&end, &start, &runtime); +} + +static void print_summary(void) +{ + unsigned long avg = avg_stats(&throughput_stats); + double stddev = stddev_stats(&throughput_stats); + + printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n", + !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg), + (int) runtime.tv_sec); +} + +int bench_futex_hash(int argc, const char **argv, + const char *prefix __maybe_unused) +{ + int ret = 0; + cpu_set_t cpu; + struct sigaction act; + unsigned int i, ncpus; + pthread_attr_t thread_attr; + struct worker *worker = NULL; + + argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0); + if (argc) { + usage_with_options(bench_futex_hash_usage, options); + exit(EXIT_FAILURE); + } + + ncpus = sysconf(_SC_NPROCESSORS_ONLN); + + sigfillset(&act.sa_mask); + act.sa_sigaction = toggle_done; + sigaction(SIGINT, &act, NULL); + + if (!nthreads) /* default to the number of CPUs */ + nthreads = ncpus; + + worker = calloc(nthreads, sizeof(*worker)); + if (!worker) + goto errmem; + + printf("Run summary [PID %d]: %d threads, each operating on %d [%s] futexes for %d secs.\n\n", + getpid(), nthreads, nfutexes, fshared ? "shared":"private", nsecs); + + init_stats(&throughput_stats); + pthread_mutex_init(&thread_lock, NULL); + pthread_cond_init(&thread_parent, NULL); + pthread_cond_init(&thread_worker, NULL); + + threads_starting = nthreads; + pthread_attr_init(&thread_attr); + gettimeofday(&start, NULL); + for (i = 0; i < nthreads; i++) { + worker[i].tid = i; + worker[i].futex = calloc(nfutexes, sizeof(*worker[i].futex)); + if (!worker[i].futex) + goto errmem; + + CPU_ZERO(&cpu); + CPU_SET(i % ncpus, &cpu); + + ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu); + if (ret) + err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); + + ret = pthread_create(&worker[i].thread, &thread_attr, workerfn, + (void *)(struct worker *) &worker[i]); + if (ret) + err(EXIT_FAILURE, "pthread_create"); + + } + pthread_attr_destroy(&thread_attr); + + pthread_mutex_lock(&thread_lock); + while (threads_starting) + pthread_cond_wait(&thread_parent, &thread_lock); + pthread_cond_broadcast(&thread_worker); + pthread_mutex_unlock(&thread_lock); + + sleep(nsecs); + toggle_done(0, NULL, NULL); + + for (i = 0; i < nthreads; i++) { + ret = pthread_join(worker[i].thread, NULL); + if (ret) + err(EXIT_FAILURE, "pthread_join"); + } + + /* cleanup & report results */ + pthread_cond_destroy(&thread_parent); + pthread_cond_destroy(&thread_worker); + pthread_mutex_destroy(&thread_lock); + + for (i = 0; i < nthreads; i++) { + unsigned long t = worker[i].ops/runtime.tv_sec; + update_stats(&throughput_stats, t); + if (!silent) { + if (nfutexes == 1) + printf("[thread %2d] futex: %p [ %ld ops/sec ]\n", + worker[i].tid, &worker[i].futex[0], t); + else + printf("[thread %2d] futexes: %p ... %p [ %ld ops/sec ]\n", + worker[i].tid, &worker[i].futex[0], + &worker[i].futex[nfutexes-1], t); + } + + free(worker[i].futex); + } + + print_summary(); + + free(worker); + return ret; +errmem: + err(EXIT_FAILURE, "calloc"); +} diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c new file mode 100644 index 00000000000..a16255876f1 --- /dev/null +++ b/tools/perf/bench/futex-requeue.c @@ -0,0 +1,211 @@ +/* + * Copyright (C) 2013 Davidlohr Bueso <davidlohr@hp.com> + * + * futex-requeue: Block a bunch of threads on futex1 and requeue them + * on futex2, N at a time. + * + * This program is particularly useful to measure the latency of nthread + * requeues without waking up any tasks -- thus mimicking a regular futex_wait. + */ + +#include "../perf.h" +#include "../util/util.h" +#include "../util/stat.h" +#include "../util/parse-options.h" +#include "../util/header.h" +#include "bench.h" +#include "futex.h" + +#include <err.h> +#include <stdlib.h> +#include <sys/time.h> +#include <pthread.h> + +static u_int32_t futex1 = 0, futex2 = 0; + +/* + * How many tasks to requeue at a time. + * Default to 1 in order to make the kernel work more. + */ +static unsigned int nrequeue = 1; + +/* + * There can be significant variance from run to run, + * the more repeats, the more exact the overall avg and + * the better idea of the futex latency. + */ +static unsigned int repeat = 10; + +static pthread_t *worker; +static bool done = 0, silent = 0; +static pthread_mutex_t thread_lock; +static pthread_cond_t thread_parent, thread_worker; +static struct stats requeuetime_stats, requeued_stats; +static unsigned int ncpus, threads_starting, nthreads = 0; + +static const struct option options[] = { + OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"), + OPT_UINTEGER('q', "nrequeue", &nrequeue, "Specify amount of threads to requeue at once"), + OPT_UINTEGER('r', "repeat", &repeat, "Specify amount of times to repeat the run"), + OPT_BOOLEAN( 's', "silent", &silent, "Silent mode: do not display data/details"), + OPT_END() +}; + +static const char * const bench_futex_requeue_usage[] = { + "perf bench futex requeue <options>", + NULL +}; + +static void print_summary(void) +{ + double requeuetime_avg = avg_stats(&requeuetime_stats); + double requeuetime_stddev = stddev_stats(&requeuetime_stats); + unsigned int requeued_avg = avg_stats(&requeued_stats); + + printf("Requeued %d of %d threads in %.4f ms (+-%.2f%%)\n", + requeued_avg, + nthreads, + requeuetime_avg/1e3, + rel_stddev_stats(requeuetime_stddev, requeuetime_avg)); +} + +static void *workerfn(void *arg __maybe_unused) +{ + pthread_mutex_lock(&thread_lock); + threads_starting--; + if (!threads_starting) + pthread_cond_signal(&thread_parent); + pthread_cond_wait(&thread_worker, &thread_lock); + pthread_mutex_unlock(&thread_lock); + + futex_wait(&futex1, 0, NULL, FUTEX_PRIVATE_FLAG); + return NULL; +} + +static void block_threads(pthread_t *w, + pthread_attr_t thread_attr) +{ + cpu_set_t cpu; + unsigned int i; + + threads_starting = nthreads; + + /* create and block all threads */ + for (i = 0; i < nthreads; i++) { + CPU_ZERO(&cpu); + CPU_SET(i % ncpus, &cpu); + + if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu)) + err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); + + if (pthread_create(&w[i], &thread_attr, workerfn, NULL)) + err(EXIT_FAILURE, "pthread_create"); + } +} + +static void toggle_done(int sig __maybe_unused, + siginfo_t *info __maybe_unused, + void *uc __maybe_unused) +{ + done = true; +} + +int bench_futex_requeue(int argc, const char **argv, + const char *prefix __maybe_unused) +{ + int ret = 0; + unsigned int i, j; + struct sigaction act; + pthread_attr_t thread_attr; + + argc = parse_options(argc, argv, options, bench_futex_requeue_usage, 0); + if (argc) + goto err; + + ncpus = sysconf(_SC_NPROCESSORS_ONLN); + + sigfillset(&act.sa_mask); + act.sa_sigaction = toggle_done; + sigaction(SIGINT, &act, NULL); + + if (!nthreads) + nthreads = ncpus; + + worker = calloc(nthreads, sizeof(*worker)); + if (!worker) + err(EXIT_FAILURE, "calloc"); + + printf("Run summary [PID %d]: Requeuing %d threads (from %p to %p), " + "%d at a time.\n\n", + getpid(), nthreads, &futex1, &futex2, nrequeue); + + init_stats(&requeued_stats); + init_stats(&requeuetime_stats); + pthread_attr_init(&thread_attr); + pthread_mutex_init(&thread_lock, NULL); + pthread_cond_init(&thread_parent, NULL); + pthread_cond_init(&thread_worker, NULL); + + for (j = 0; j < repeat && !done; j++) { + unsigned int nrequeued = 0; + struct timeval start, end, runtime; + + /* create, launch & block all threads */ + block_threads(worker, thread_attr); + + /* make sure all threads are already blocked */ + pthread_mutex_lock(&thread_lock); + while (threads_starting) + pthread_cond_wait(&thread_parent, &thread_lock); + pthread_cond_broadcast(&thread_worker); + pthread_mutex_unlock(&thread_lock); + + usleep(100000); + + /* Ok, all threads are patiently blocked, start requeueing */ + gettimeofday(&start, NULL); + for (nrequeued = 0; nrequeued < nthreads; nrequeued += nrequeue) + /* + * Do not wakeup any tasks blocked on futex1, allowing + * us to really measure futex_wait functionality. + */ + futex_cmp_requeue(&futex1, 0, &futex2, 0, nrequeue, + FUTEX_PRIVATE_FLAG); + gettimeofday(&end, NULL); + timersub(&end, &start, &runtime); + + update_stats(&requeued_stats, nrequeued); + update_stats(&requeuetime_stats, runtime.tv_usec); + + if (!silent) { + printf("[Run %d]: Requeued %d of %d threads in %.4f ms\n", + j + 1, nrequeued, nthreads, runtime.tv_usec/1e3); + } + + /* everybody should be blocked on futex2, wake'em up */ + nrequeued = futex_wake(&futex2, nthreads, FUTEX_PRIVATE_FLAG); + if (nthreads != nrequeued) + warnx("couldn't wakeup all tasks (%d/%d)", nrequeued, nthreads); + + for (i = 0; i < nthreads; i++) { + ret = pthread_join(worker[i], NULL); + if (ret) + err(EXIT_FAILURE, "pthread_join"); + } + + } + + /* cleanup & report results */ + pthread_cond_destroy(&thread_parent); + pthread_cond_destroy(&thread_worker); + pthread_mutex_destroy(&thread_lock); + pthread_attr_destroy(&thread_attr); + + print_summary(); + + free(worker); + return ret; +err: + usage_with_options(bench_futex_requeue_usage, options); + exit(EXIT_FAILURE); +} diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c new file mode 100644 index 00000000000..d096169b161 --- /dev/null +++ b/tools/perf/bench/futex-wake.c @@ -0,0 +1,201 @@ +/* + * Copyright (C) 2013 Davidlohr Bueso <davidlohr@hp.com> + * + * futex-wake: Block a bunch of threads on a futex and wake'em up, N at a time. + * + * This program is particularly useful to measure the latency of nthread wakeups + * in non-error situations: all waiters are queued and all wake calls wakeup + * one or more tasks, and thus the waitqueue is never empty. + */ + +#include "../perf.h" +#include "../util/util.h" +#include "../util/stat.h" +#include "../util/parse-options.h" +#include "../util/header.h" +#include "bench.h" +#include "futex.h" + +#include <err.h> +#include <stdlib.h> +#include <sys/time.h> +#include <pthread.h> + +/* all threads will block on the same futex */ +static u_int32_t futex1 = 0; + +/* + * How many wakeups to do at a time. + * Default to 1 in order to make the kernel work more. + */ +static unsigned int nwakes = 1; + +/* + * There can be significant variance from run to run, + * the more repeats, the more exact the overall avg and + * the better idea of the futex latency. + */ +static unsigned int repeat = 10; + +pthread_t *worker; +static bool done = 0, silent = 0; +static pthread_mutex_t thread_lock; +static pthread_cond_t thread_parent, thread_worker; +static struct stats waketime_stats, wakeup_stats; +static unsigned int ncpus, threads_starting, nthreads = 0; + +static const struct option options[] = { + OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"), + OPT_UINTEGER('w', "nwakes", &nwakes, "Specify amount of threads to wake at once"), + OPT_UINTEGER('r', "repeat", &repeat, "Specify amount of times to repeat the run"), + OPT_BOOLEAN( 's', "silent", &silent, "Silent mode: do not display data/details"), + OPT_END() +}; + +static const char * const bench_futex_wake_usage[] = { + "perf bench futex wake <options>", + NULL +}; + +static void *workerfn(void *arg __maybe_unused) +{ + pthread_mutex_lock(&thread_lock); + threads_starting--; + if (!threads_starting) + pthread_cond_signal(&thread_parent); + pthread_cond_wait(&thread_worker, &thread_lock); + pthread_mutex_unlock(&thread_lock); + + futex_wait(&futex1, 0, NULL, FUTEX_PRIVATE_FLAG); + return NULL; +} + +static void print_summary(void) +{ + double waketime_avg = avg_stats(&waketime_stats); + double waketime_stddev = stddev_stats(&waketime_stats); + unsigned int wakeup_avg = avg_stats(&wakeup_stats); + + printf("Wokeup %d of %d threads in %.4f ms (+-%.2f%%)\n", + wakeup_avg, + nthreads, + waketime_avg/1e3, + rel_stddev_stats(waketime_stddev, waketime_avg)); +} + +static void block_threads(pthread_t *w, + pthread_attr_t thread_attr) +{ + cpu_set_t cpu; + unsigned int i; + + threads_starting = nthreads; + + /* create and block all threads */ + for (i = 0; i < nthreads; i++) { + CPU_ZERO(&cpu); + CPU_SET(i % ncpus, &cpu); + + if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu)) + err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); + + if (pthread_create(&w[i], &thread_attr, workerfn, NULL)) + err(EXIT_FAILURE, "pthread_create"); + } +} + +static void toggle_done(int sig __maybe_unused, + siginfo_t *info __maybe_unused, + void *uc __maybe_unused) +{ + done = true; +} + +int bench_futex_wake(int argc, const char **argv, + const char *prefix __maybe_unused) +{ + int ret = 0; + unsigned int i, j; + struct sigaction act; + pthread_attr_t thread_attr; + + argc = parse_options(argc, argv, options, bench_futex_wake_usage, 0); + if (argc) { + usage_with_options(bench_futex_wake_usage, options); + exit(EXIT_FAILURE); + } + + ncpus = sysconf(_SC_NPROCESSORS_ONLN); + + sigfillset(&act.sa_mask); + act.sa_sigaction = toggle_done; + sigaction(SIGINT, &act, NULL); + + if (!nthreads) + nthreads = ncpus; + + worker = calloc(nthreads, sizeof(*worker)); + if (!worker) + err(EXIT_FAILURE, "calloc"); + + printf("Run summary [PID %d]: blocking on %d threads (at futex %p), " + "waking up %d at a time.\n\n", + getpid(), nthreads, &futex1, nwakes); + + init_stats(&wakeup_stats); + init_stats(&waketime_stats); + pthread_attr_init(&thread_attr); + pthread_mutex_init(&thread_lock, NULL); + pthread_cond_init(&thread_parent, NULL); + pthread_cond_init(&thread_worker, NULL); + + for (j = 0; j < repeat && !done; j++) { + unsigned int nwoken = 0; + struct timeval start, end, runtime; + + /* create, launch & block all threads */ + block_threads(worker, thread_attr); + + /* make sure all threads are already blocked */ + pthread_mutex_lock(&thread_lock); + while (threads_starting) + pthread_cond_wait(&thread_parent, &thread_lock); + pthread_cond_broadcast(&thread_worker); + pthread_mutex_unlock(&thread_lock); + + usleep(100000); + + /* Ok, all threads are patiently blocked, start waking folks up */ + gettimeofday(&start, NULL); + while (nwoken != nthreads) + nwoken += futex_wake(&futex1, nwakes, FUTEX_PRIVATE_FLAG); + gettimeofday(&end, NULL); + timersub(&end, &start, &runtime); + + update_stats(&wakeup_stats, nwoken); + update_stats(&waketime_stats, runtime.tv_usec); + + if (!silent) { + printf("[Run %d]: Wokeup %d of %d threads in %.4f ms\n", + j + 1, nwoken, nthreads, runtime.tv_usec/1e3); + } + + for (i = 0; i < nthreads; i++) { + ret = pthread_join(worker[i], NULL); + if (ret) + err(EXIT_FAILURE, "pthread_join"); + } + + } + + /* cleanup & report results */ + pthread_cond_destroy(&thread_parent); + pthread_cond_destroy(&thread_worker); + pthread_mutex_destroy(&thread_lock); + pthread_attr_destroy(&thread_attr); + + print_summary(); + + free(worker); + return ret; +} diff --git a/tools/perf/bench/futex.h b/tools/perf/bench/futex.h new file mode 100644 index 00000000000..71f2844cf97 --- /dev/null +++ b/tools/perf/bench/futex.h @@ -0,0 +1,71 @@ +/* + * Glibc independent futex library for testing kernel functionality. + * Shamelessly stolen from Darren Hart <dvhltc@us.ibm.com> + * http://git.kernel.org/cgit/linux/kernel/git/dvhart/futextest.git/ + */ + +#ifndef _FUTEX_H +#define _FUTEX_H + +#include <unistd.h> +#include <sys/syscall.h> +#include <sys/types.h> +#include <linux/futex.h> + +/** + * futex() - SYS_futex syscall wrapper + * @uaddr: address of first futex + * @op: futex op code + * @val: typically expected value of uaddr, but varies by op + * @timeout: typically an absolute struct timespec (except where noted + * otherwise). Overloaded by some ops + * @uaddr2: address of second futex for some ops\ + * @val3: varies by op + * @opflags: flags to be bitwise OR'd with op, such as FUTEX_PRIVATE_FLAG + * + * futex() is used by all the following futex op wrappers. It can also be + * used for misuse and abuse testing. Generally, the specific op wrappers + * should be used instead. It is a macro instead of an static inline function as + * some of the types over overloaded (timeout is used for nr_requeue for + * example). + * + * These argument descriptions are the defaults for all + * like-named arguments in the following wrappers except where noted below. + */ +#define futex(uaddr, op, val, timeout, uaddr2, val3, opflags) \ + syscall(SYS_futex, uaddr, op | opflags, val, timeout, uaddr2, val3) + +/** + * futex_wait() - block on uaddr with optional timeout + * @timeout: relative timeout + */ +static inline int +futex_wait(u_int32_t *uaddr, u_int32_t val, struct timespec *timeout, int opflags) +{ + return futex(uaddr, FUTEX_WAIT, val, timeout, NULL, 0, opflags); +} + +/** + * futex_wake() - wake one or more tasks blocked on uaddr + * @nr_wake: wake up to this many tasks + */ +static inline int +futex_wake(u_int32_t *uaddr, int nr_wake, int opflags) +{ + return futex(uaddr, FUTEX_WAKE, nr_wake, NULL, NULL, 0, opflags); +} + +/** +* futex_cmp_requeue() - requeue tasks from uaddr to uaddr2 +* @nr_wake: wake up to this many tasks +* @nr_requeue: requeue up to this many tasks +*/ +static inline int +futex_cmp_requeue(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2, int nr_wake, + int nr_requeue, int opflags) +{ + return futex(uaddr, FUTEX_CMP_REQUEUE, nr_wake, nr_requeue, uaddr2, + val, opflags); +} + +#endif /* _FUTEX_H */ diff --git a/tools/perf/bench/mem-memcpy-arch.h b/tools/perf/bench/mem-memcpy-arch.h index a72e36cb539..57b4ed87145 100644 --- a/tools/perf/bench/mem-memcpy-arch.h +++ b/tools/perf/bench/mem-memcpy-arch.h @@ -1,5 +1,5 @@ -#ifdef ARCH_X86_64 +#ifdef HAVE_ARCH_X86_64_SUPPORT #define MEMCPY_FN(fn, name, desc) \ extern void *fn(void *, const void *, size_t); diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c index 93c83e3cb4a..5ce71d3b72c 100644 --- a/tools/perf/bench/mem-memcpy.c +++ b/tools/perf/bench/mem-memcpy.c @@ -58,7 +58,7 @@ struct routine routines[] = { { "default", "Default memcpy() provided by glibc", memcpy }, -#ifdef ARCH_X86_64 +#ifdef HAVE_ARCH_X86_64_SUPPORT #define MEMCPY_FN(fn, name, desc) { name, desc, fn }, #include "mem-memcpy-x86-64-asm-def.h" @@ -111,12 +111,14 @@ static double timeval2double(struct timeval *ts) static void alloc_mem(void **dst, void **src, size_t length) { *dst = zalloc(length); - if (!dst) + if (!*dst) die("memory allocation failed - maybe length is too large?\n"); *src = zalloc(length); - if (!src) + if (!*src) die("memory allocation failed - maybe length is too large?\n"); + /* Make sure to always replace the zero pages even if MMAP_THRESH is crossed */ + memset(*src, 0, length); } static u64 do_memcpy_cycle(memcpy_t fn, size_t len, bool prefault) diff --git a/tools/perf/bench/mem-memset-arch.h b/tools/perf/bench/mem-memset-arch.h index a040fa77665..633800cb0dc 100644 --- a/tools/perf/bench/mem-memset-arch.h +++ b/tools/perf/bench/mem-memset-arch.h @@ -1,5 +1,5 @@ -#ifdef ARCH_X86_64 +#ifdef HAVE_ARCH_X86_64_SUPPORT #define MEMSET_FN(fn, name, desc) \ extern void *fn(void *, int, size_t); diff --git a/tools/perf/bench/mem-memset.c b/tools/perf/bench/mem-memset.c index c6e4bc52349..9af79d2b18e 100644 --- a/tools/perf/bench/mem-memset.c +++ b/tools/perf/bench/mem-memset.c @@ -58,7 +58,7 @@ static const struct routine routines[] = { { "default", "Default memset() provided by glibc", memset }, -#ifdef ARCH_X86_64 +#ifdef HAVE_ARCH_X86_64_SUPPORT #define MEMSET_FN(fn, name, desc) { name, desc, fn }, #include "mem-memset-x86-64-asm-def.h" @@ -111,7 +111,7 @@ static double timeval2double(struct timeval *ts) static void alloc_mem(void **dst, size_t length) { *dst = zalloc(length); - if (!dst) + if (!*dst) die("memory allocation failed - maybe length is too large?\n"); } diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c new file mode 100644 index 00000000000..ebfa163b80b --- /dev/null +++ b/tools/perf/bench/numa.c @@ -0,0 +1,1744 @@ +/* + * numa.c + * + * numa: Simulate NUMA-sensitive workload and measure their NUMA performance + */ + +#include "../perf.h" +#include "../builtin.h" +#include "../util/util.h" +#include "../util/parse-options.h" + +#include "bench.h" + +#include <errno.h> +#include <sched.h> +#include <stdio.h> +#include <assert.h> +#include <malloc.h> +#include <signal.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <pthread.h> +#include <sys/mman.h> +#include <sys/time.h> +#include <sys/wait.h> +#include <sys/prctl.h> +#include <sys/types.h> + +#include <numa.h> +#include <numaif.h> + +/* + * Regular printout to the terminal, supressed if -q is specified: + */ +#define tprintf(x...) do { if (g && g->p.show_details >= 0) printf(x); } while (0) + +/* + * Debug printf: + */ +#define dprintf(x...) do { if (g && g->p.show_details >= 1) printf(x); } while (0) + +struct thread_data { + int curr_cpu; + cpu_set_t bind_cpumask; + int bind_node; + u8 *process_data; + int process_nr; + int thread_nr; + int task_nr; + unsigned int loops_done; + u64 val; + u64 runtime_ns; + pthread_mutex_t *process_lock; +}; + +/* Parameters set by options: */ + +struct params { + /* Startup synchronization: */ + bool serialize_startup; + + /* Task hierarchy: */ + int nr_proc; + int nr_threads; + + /* Working set sizes: */ + const char *mb_global_str; + const char *mb_proc_str; + const char *mb_proc_locked_str; + const char *mb_thread_str; + + double mb_global; + double mb_proc; + double mb_proc_locked; + double mb_thread; + + /* Access patterns to the working set: */ + bool data_reads; + bool data_writes; + bool data_backwards; + bool data_zero_memset; + bool data_rand_walk; + u32 nr_loops; + u32 nr_secs; + u32 sleep_usecs; + + /* Working set initialization: */ + bool init_zero; + bool init_random; + bool init_cpu0; + + /* Misc options: */ + int show_details; + int run_all; + int thp; + + long bytes_global; + long bytes_process; + long bytes_process_locked; + long bytes_thread; + + int nr_tasks; + bool show_quiet; + + bool show_convergence; + bool measure_convergence; + + int perturb_secs; + int nr_cpus; + int nr_nodes; + + /* Affinity options -C and -N: */ + char *cpu_list_str; + char *node_list_str; +}; + + +/* Global, read-writable area, accessible to all processes and threads: */ + +struct global_info { + u8 *data; + + pthread_mutex_t startup_mutex; + int nr_tasks_started; + + pthread_mutex_t startup_done_mutex; + + pthread_mutex_t start_work_mutex; + int nr_tasks_working; + + pthread_mutex_t stop_work_mutex; + u64 bytes_done; + + struct thread_data *threads; + + /* Convergence latency measurement: */ + bool all_converged; + bool stop_work; + + int print_once; + + struct params p; +}; + +static struct global_info *g = NULL; + +static int parse_cpus_opt(const struct option *opt, const char *arg, int unset); +static int parse_nodes_opt(const struct option *opt, const char *arg, int unset); + +struct params p0; + +static const struct option options[] = { + OPT_INTEGER('p', "nr_proc" , &p0.nr_proc, "number of processes"), + OPT_INTEGER('t', "nr_threads" , &p0.nr_threads, "number of threads per process"), + + OPT_STRING('G', "mb_global" , &p0.mb_global_str, "MB", "global memory (MBs)"), + OPT_STRING('P', "mb_proc" , &p0.mb_proc_str, "MB", "process memory (MBs)"), + OPT_STRING('L', "mb_proc_locked", &p0.mb_proc_locked_str,"MB", "process serialized/locked memory access (MBs), <= process_memory"), + OPT_STRING('T', "mb_thread" , &p0.mb_thread_str, "MB", "thread memory (MBs)"), + + OPT_UINTEGER('l', "nr_loops" , &p0.nr_loops, "max number of loops to run"), + OPT_UINTEGER('s', "nr_secs" , &p0.nr_secs, "max number of seconds to run"), + OPT_UINTEGER('u', "usleep" , &p0.sleep_usecs, "usecs to sleep per loop iteration"), + + OPT_BOOLEAN('R', "data_reads" , &p0.data_reads, "access the data via writes (can be mixed with -W)"), + OPT_BOOLEAN('W', "data_writes" , &p0.data_writes, "access the data via writes (can be mixed with -R)"), + OPT_BOOLEAN('B', "data_backwards", &p0.data_backwards, "access the data backwards as well"), + OPT_BOOLEAN('Z', "data_zero_memset", &p0.data_zero_memset,"access the data via glibc bzero only"), + OPT_BOOLEAN('r', "data_rand_walk", &p0.data_rand_walk, "access the data with random (32bit LFSR) walk"), + + + OPT_BOOLEAN('z', "init_zero" , &p0.init_zero, "bzero the initial allocations"), + OPT_BOOLEAN('I', "init_random" , &p0.init_random, "randomize the contents of the initial allocations"), + OPT_BOOLEAN('0', "init_cpu0" , &p0.init_cpu0, "do the initial allocations on CPU#0"), + OPT_INTEGER('x', "perturb_secs", &p0.perturb_secs, "perturb thread 0/0 every X secs, to test convergence stability"), + + OPT_INCR ('d', "show_details" , &p0.show_details, "Show details"), + OPT_INCR ('a', "all" , &p0.run_all, "Run all tests in the suite"), + OPT_INTEGER('H', "thp" , &p0.thp, "MADV_NOHUGEPAGE < 0 < MADV_HUGEPAGE"), + OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details"), + OPT_BOOLEAN('m', "measure_convergence", &p0.measure_convergence, "measure convergence latency"), + OPT_BOOLEAN('q', "quiet" , &p0.show_quiet, "bzero the initial allocations"), + OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"), + + /* Special option string parsing callbacks: */ + OPT_CALLBACK('C', "cpus", NULL, "cpu[,cpu2,...cpuN]", + "bind the first N tasks to these specific cpus (the rest is unbound)", + parse_cpus_opt), + OPT_CALLBACK('M', "memnodes", NULL, "node[,node2,...nodeN]", + "bind the first N tasks to these specific memory nodes (the rest is unbound)", + parse_nodes_opt), + OPT_END() +}; + +static const char * const bench_numa_usage[] = { + "perf bench numa <options>", + NULL +}; + +static const char * const numa_usage[] = { + "perf bench numa mem [<options>]", + NULL +}; + +static cpu_set_t bind_to_cpu(int target_cpu) +{ + cpu_set_t orig_mask, mask; + int ret; + + ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask); + BUG_ON(ret); + + CPU_ZERO(&mask); + + if (target_cpu == -1) { + int cpu; + + for (cpu = 0; cpu < g->p.nr_cpus; cpu++) + CPU_SET(cpu, &mask); + } else { + BUG_ON(target_cpu < 0 || target_cpu >= g->p.nr_cpus); + CPU_SET(target_cpu, &mask); + } + + ret = sched_setaffinity(0, sizeof(mask), &mask); + BUG_ON(ret); + + return orig_mask; +} + +static cpu_set_t bind_to_node(int target_node) +{ + int cpus_per_node = g->p.nr_cpus/g->p.nr_nodes; + cpu_set_t orig_mask, mask; + int cpu; + int ret; + + BUG_ON(cpus_per_node*g->p.nr_nodes != g->p.nr_cpus); + BUG_ON(!cpus_per_node); + + ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask); + BUG_ON(ret); + + CPU_ZERO(&mask); + + if (target_node == -1) { + for (cpu = 0; cpu < g->p.nr_cpus; cpu++) + CPU_SET(cpu, &mask); + } else { + int cpu_start = (target_node + 0) * cpus_per_node; + int cpu_stop = (target_node + 1) * cpus_per_node; + + BUG_ON(cpu_stop > g->p.nr_cpus); + + for (cpu = cpu_start; cpu < cpu_stop; cpu++) + CPU_SET(cpu, &mask); + } + + ret = sched_setaffinity(0, sizeof(mask), &mask); + BUG_ON(ret); + + return orig_mask; +} + +static void bind_to_cpumask(cpu_set_t mask) +{ + int ret; + + ret = sched_setaffinity(0, sizeof(mask), &mask); + BUG_ON(ret); +} + +static void mempol_restore(void) +{ + int ret; + + ret = set_mempolicy(MPOL_DEFAULT, NULL, g->p.nr_nodes-1); + + BUG_ON(ret); +} + +static void bind_to_memnode(int node) +{ + unsigned long nodemask; + int ret; + + if (node == -1) + return; + + BUG_ON(g->p.nr_nodes > (int)sizeof(nodemask)); + nodemask = 1L << node; + + ret = set_mempolicy(MPOL_BIND, &nodemask, sizeof(nodemask)*8); + dprintf("binding to node %d, mask: %016lx => %d\n", node, nodemask, ret); + + BUG_ON(ret); +} + +#define HPSIZE (2*1024*1024) + +#define set_taskname(fmt...) \ +do { \ + char name[20]; \ + \ + snprintf(name, 20, fmt); \ + prctl(PR_SET_NAME, name); \ +} while (0) + +static u8 *alloc_data(ssize_t bytes0, int map_flags, + int init_zero, int init_cpu0, int thp, int init_random) +{ + cpu_set_t orig_mask; + ssize_t bytes; + u8 *buf; + int ret; + + if (!bytes0) + return NULL; + + /* Allocate and initialize all memory on CPU#0: */ + if (init_cpu0) { + orig_mask = bind_to_node(0); + bind_to_memnode(0); + } + + bytes = bytes0 + HPSIZE; + + buf = (void *)mmap(0, bytes, PROT_READ|PROT_WRITE, MAP_ANON|map_flags, -1, 0); + BUG_ON(buf == (void *)-1); + + if (map_flags == MAP_PRIVATE) { + if (thp > 0) { + ret = madvise(buf, bytes, MADV_HUGEPAGE); + if (ret && !g->print_once) { + g->print_once = 1; + printf("WARNING: Could not enable THP - do: 'echo madvise > /sys/kernel/mm/transparent_hugepage/enabled'\n"); + } + } + if (thp < 0) { + ret = madvise(buf, bytes, MADV_NOHUGEPAGE); + if (ret && !g->print_once) { + g->print_once = 1; + printf("WARNING: Could not disable THP: run a CONFIG_TRANSPARENT_HUGEPAGE kernel?\n"); + } + } + } + + if (init_zero) { + bzero(buf, bytes); + } else { + /* Initialize random contents, different in each word: */ + if (init_random) { + u64 *wbuf = (void *)buf; + long off = rand(); + long i; + + for (i = 0; i < bytes/8; i++) + wbuf[i] = i + off; + } + } + + /* Align to 2MB boundary: */ + buf = (void *)(((unsigned long)buf + HPSIZE-1) & ~(HPSIZE-1)); + + /* Restore affinity: */ + if (init_cpu0) { + bind_to_cpumask(orig_mask); + mempol_restore(); + } + + return buf; +} + +static void free_data(void *data, ssize_t bytes) +{ + int ret; + + if (!data) + return; + + ret = munmap(data, bytes); + BUG_ON(ret); +} + +/* + * Create a shared memory buffer that can be shared between processes, zeroed: + */ +static void * zalloc_shared_data(ssize_t bytes) +{ + return alloc_data(bytes, MAP_SHARED, 1, g->p.init_cpu0, g->p.thp, g->p.init_random); +} + +/* + * Create a shared memory buffer that can be shared between processes: + */ +static void * setup_shared_data(ssize_t bytes) +{ + return alloc_data(bytes, MAP_SHARED, 0, g->p.init_cpu0, g->p.thp, g->p.init_random); +} + +/* + * Allocate process-local memory - this will either be shared between + * threads of this process, or only be accessed by this thread: + */ +static void * setup_private_data(ssize_t bytes) +{ + return alloc_data(bytes, MAP_PRIVATE, 0, g->p.init_cpu0, g->p.thp, g->p.init_random); +} + +/* + * Return a process-shared (global) mutex: + */ +static void init_global_mutex(pthread_mutex_t *mutex) +{ + pthread_mutexattr_t attr; + + pthread_mutexattr_init(&attr); + pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED); + pthread_mutex_init(mutex, &attr); +} + +static int parse_cpu_list(const char *arg) +{ + p0.cpu_list_str = strdup(arg); + + dprintf("got CPU list: {%s}\n", p0.cpu_list_str); + + return 0; +} + +static int parse_setup_cpu_list(void) +{ + struct thread_data *td; + char *str0, *str; + int t; + + if (!g->p.cpu_list_str) + return 0; + + dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks); + + str0 = str = strdup(g->p.cpu_list_str); + t = 0; + + BUG_ON(!str); + + tprintf("# binding tasks to CPUs:\n"); + tprintf("# "); + + while (true) { + int bind_cpu, bind_cpu_0, bind_cpu_1; + char *tok, *tok_end, *tok_step, *tok_len, *tok_mul; + int bind_len; + int step; + int mul; + + tok = strsep(&str, ","); + if (!tok) + break; + + tok_end = strstr(tok, "-"); + + dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end); + if (!tok_end) { + /* Single CPU specified: */ + bind_cpu_0 = bind_cpu_1 = atol(tok); + } else { + /* CPU range specified (for example: "5-11"): */ + bind_cpu_0 = atol(tok); + bind_cpu_1 = atol(tok_end + 1); + } + + step = 1; + tok_step = strstr(tok, "#"); + if (tok_step) { + step = atol(tok_step + 1); + BUG_ON(step <= 0 || step >= g->p.nr_cpus); + } + + /* + * Mask length. + * Eg: "--cpus 8_4-16#4" means: '--cpus 8_4,12_4,16_4', + * where the _4 means the next 4 CPUs are allowed. + */ + bind_len = 1; + tok_len = strstr(tok, "_"); + if (tok_len) { + bind_len = atol(tok_len + 1); + BUG_ON(bind_len <= 0 || bind_len > g->p.nr_cpus); + } + + /* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */ + mul = 1; + tok_mul = strstr(tok, "x"); + if (tok_mul) { + mul = atol(tok_mul + 1); + BUG_ON(mul <= 0); + } + + dprintf("CPUs: %d_%d-%d#%dx%d\n", bind_cpu_0, bind_len, bind_cpu_1, step, mul); + + if (bind_cpu_0 >= g->p.nr_cpus || bind_cpu_1 >= g->p.nr_cpus) { + printf("\nTest not applicable, system has only %d CPUs.\n", g->p.nr_cpus); + return -1; + } + + BUG_ON(bind_cpu_0 < 0 || bind_cpu_1 < 0); + BUG_ON(bind_cpu_0 > bind_cpu_1); + + for (bind_cpu = bind_cpu_0; bind_cpu <= bind_cpu_1; bind_cpu += step) { + int i; + + for (i = 0; i < mul; i++) { + int cpu; + + if (t >= g->p.nr_tasks) { + printf("\n# NOTE: ignoring bind CPUs starting at CPU#%d\n #", bind_cpu); + goto out; + } + td = g->threads + t; + + if (t) + tprintf(","); + if (bind_len > 1) { + tprintf("%2d/%d", bind_cpu, bind_len); + } else { + tprintf("%2d", bind_cpu); + } + + CPU_ZERO(&td->bind_cpumask); + for (cpu = bind_cpu; cpu < bind_cpu+bind_len; cpu++) { + BUG_ON(cpu < 0 || cpu >= g->p.nr_cpus); + CPU_SET(cpu, &td->bind_cpumask); + } + t++; + } + } + } +out: + + tprintf("\n"); + + if (t < g->p.nr_tasks) + printf("# NOTE: %d tasks bound, %d tasks unbound\n", t, g->p.nr_tasks - t); + + free(str0); + return 0; +} + +static int parse_cpus_opt(const struct option *opt __maybe_unused, + const char *arg, int unset __maybe_unused) +{ + if (!arg) + return -1; + + return parse_cpu_list(arg); +} + +static int parse_node_list(const char *arg) +{ + p0.node_list_str = strdup(arg); + + dprintf("got NODE list: {%s}\n", p0.node_list_str); + + return 0; +} + +static int parse_setup_node_list(void) +{ + struct thread_data *td; + char *str0, *str; + int t; + + if (!g->p.node_list_str) + return 0; + + dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks); + + str0 = str = strdup(g->p.node_list_str); + t = 0; + + BUG_ON(!str); + + tprintf("# binding tasks to NODEs:\n"); + tprintf("# "); + + while (true) { + int bind_node, bind_node_0, bind_node_1; + char *tok, *tok_end, *tok_step, *tok_mul; + int step; + int mul; + + tok = strsep(&str, ","); + if (!tok) + break; + + tok_end = strstr(tok, "-"); + + dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end); + if (!tok_end) { + /* Single NODE specified: */ + bind_node_0 = bind_node_1 = atol(tok); + } else { + /* NODE range specified (for example: "5-11"): */ + bind_node_0 = atol(tok); + bind_node_1 = atol(tok_end + 1); + } + + step = 1; + tok_step = strstr(tok, "#"); + if (tok_step) { + step = atol(tok_step + 1); + BUG_ON(step <= 0 || step >= g->p.nr_nodes); + } + + /* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */ + mul = 1; + tok_mul = strstr(tok, "x"); + if (tok_mul) { + mul = atol(tok_mul + 1); + BUG_ON(mul <= 0); + } + + dprintf("NODEs: %d-%d #%d\n", bind_node_0, bind_node_1, step); + + if (bind_node_0 >= g->p.nr_nodes || bind_node_1 >= g->p.nr_nodes) { + printf("\nTest not applicable, system has only %d nodes.\n", g->p.nr_nodes); + return -1; + } + + BUG_ON(bind_node_0 < 0 || bind_node_1 < 0); + BUG_ON(bind_node_0 > bind_node_1); + + for (bind_node = bind_node_0; bind_node <= bind_node_1; bind_node += step) { + int i; + + for (i = 0; i < mul; i++) { + if (t >= g->p.nr_tasks) { + printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node); + goto out; + } + td = g->threads + t; + + if (!t) + tprintf(" %2d", bind_node); + else + tprintf(",%2d", bind_node); + + td->bind_node = bind_node; + t++; + } + } + } +out: + + tprintf("\n"); + + if (t < g->p.nr_tasks) + printf("# NOTE: %d tasks mem-bound, %d tasks unbound\n", t, g->p.nr_tasks - t); + + free(str0); + return 0; +} + +static int parse_nodes_opt(const struct option *opt __maybe_unused, + const char *arg, int unset __maybe_unused) +{ + if (!arg) + return -1; + + return parse_node_list(arg); + + return 0; +} + +#define BIT(x) (1ul << x) + +static inline uint32_t lfsr_32(uint32_t lfsr) +{ + const uint32_t taps = BIT(1) | BIT(5) | BIT(6) | BIT(31); + return (lfsr>>1) ^ ((0x0u - (lfsr & 0x1u)) & taps); +} + +/* + * Make sure there's real data dependency to RAM (when read + * accesses are enabled), so the compiler, the CPU and the + * kernel (KSM, zero page, etc.) cannot optimize away RAM + * accesses: + */ +static inline u64 access_data(u64 *data __attribute__((unused)), u64 val) +{ + if (g->p.data_reads) + val += *data; + if (g->p.data_writes) + *data = val + 1; + return val; +} + +/* + * The worker process does two types of work, a forwards going + * loop and a backwards going loop. + * + * We do this so that on multiprocessor systems we do not create + * a 'train' of processing, with highly synchronized processes, + * skewing the whole benchmark. + */ +static u64 do_work(u8 *__data, long bytes, int nr, int nr_max, int loop, u64 val) +{ + long words = bytes/sizeof(u64); + u64 *data = (void *)__data; + long chunk_0, chunk_1; + u64 *d0, *d, *d1; + long off; + long i; + + BUG_ON(!data && words); + BUG_ON(data && !words); + + if (!data) + return val; + + /* Very simple memset() work variant: */ + if (g->p.data_zero_memset && !g->p.data_rand_walk) { + bzero(data, bytes); + return val; + } + + /* Spread out by PID/TID nr and by loop nr: */ + chunk_0 = words/nr_max; + chunk_1 = words/g->p.nr_loops; + off = nr*chunk_0 + loop*chunk_1; + + while (off >= words) + off -= words; + + if (g->p.data_rand_walk) { + u32 lfsr = nr + loop + val; + int j; + + for (i = 0; i < words/1024; i++) { + long start, end; + + lfsr = lfsr_32(lfsr); + + start = lfsr % words; + end = min(start + 1024, words-1); + + if (g->p.data_zero_memset) { + bzero(data + start, (end-start) * sizeof(u64)); + } else { + for (j = start; j < end; j++) + val = access_data(data + j, val); + } + } + } else if (!g->p.data_backwards || (nr + loop) & 1) { + + d0 = data + off; + d = data + off + 1; + d1 = data + words; + + /* Process data forwards: */ + for (;;) { + if (unlikely(d >= d1)) + d = data; + if (unlikely(d == d0)) + break; + + val = access_data(d, val); + + d++; + } + } else { + /* Process data backwards: */ + + d0 = data + off; + d = data + off - 1; + d1 = data + words; + + /* Process data forwards: */ + for (;;) { + if (unlikely(d < data)) + d = data + words-1; + if (unlikely(d == d0)) + break; + + val = access_data(d, val); + + d--; + } + } + + return val; +} + +static void update_curr_cpu(int task_nr, unsigned long bytes_worked) +{ + unsigned int cpu; + + cpu = sched_getcpu(); + + g->threads[task_nr].curr_cpu = cpu; + prctl(0, bytes_worked); +} + +#define MAX_NR_NODES 64 + +/* + * Count the number of nodes a process's threads + * are spread out on. + * + * A count of 1 means that the process is compressed + * to a single node. A count of g->p.nr_nodes means it's + * spread out on the whole system. + */ +static int count_process_nodes(int process_nr) +{ + char node_present[MAX_NR_NODES] = { 0, }; + int nodes; + int n, t; + + for (t = 0; t < g->p.nr_threads; t++) { + struct thread_data *td; + int task_nr; + int node; + + task_nr = process_nr*g->p.nr_threads + t; + td = g->threads + task_nr; + + node = numa_node_of_cpu(td->curr_cpu); + node_present[node] = 1; + } + + nodes = 0; + + for (n = 0; n < MAX_NR_NODES; n++) + nodes += node_present[n]; + + return nodes; +} + +/* + * Count the number of distinct process-threads a node contains. + * + * A count of 1 means that the node contains only a single + * process. If all nodes on the system contain at most one + * process then we are well-converged. + */ +static int count_node_processes(int node) +{ + int processes = 0; + int t, p; + + for (p = 0; p < g->p.nr_proc; p++) { + for (t = 0; t < g->p.nr_threads; t++) { + struct thread_data *td; + int task_nr; + int n; + + task_nr = p*g->p.nr_threads + t; + td = g->threads + task_nr; + + n = numa_node_of_cpu(td->curr_cpu); + if (n == node) { + processes++; + break; + } + } + } + + return processes; +} + +static void calc_convergence_compression(int *strong) +{ + unsigned int nodes_min, nodes_max; + int p; + + nodes_min = -1; + nodes_max = 0; + + for (p = 0; p < g->p.nr_proc; p++) { + unsigned int nodes = count_process_nodes(p); + + nodes_min = min(nodes, nodes_min); + nodes_max = max(nodes, nodes_max); + } + + /* Strong convergence: all threads compress on a single node: */ + if (nodes_min == 1 && nodes_max == 1) { + *strong = 1; + } else { + *strong = 0; + tprintf(" {%d-%d}", nodes_min, nodes_max); + } +} + +static void calc_convergence(double runtime_ns_max, double *convergence) +{ + unsigned int loops_done_min, loops_done_max; + int process_groups; + int nodes[MAX_NR_NODES]; + int distance; + int nr_min; + int nr_max; + int strong; + int sum; + int nr; + int node; + int cpu; + int t; + + if (!g->p.show_convergence && !g->p.measure_convergence) + return; + + for (node = 0; node < g->p.nr_nodes; node++) + nodes[node] = 0; + + loops_done_min = -1; + loops_done_max = 0; + + for (t = 0; t < g->p.nr_tasks; t++) { + struct thread_data *td = g->threads + t; + unsigned int loops_done; + + cpu = td->curr_cpu; + + /* Not all threads have written it yet: */ + if (cpu < 0) + continue; + + node = numa_node_of_cpu(cpu); + + nodes[node]++; + + loops_done = td->loops_done; + loops_done_min = min(loops_done, loops_done_min); + loops_done_max = max(loops_done, loops_done_max); + } + + nr_max = 0; + nr_min = g->p.nr_tasks; + sum = 0; + + for (node = 0; node < g->p.nr_nodes; node++) { + nr = nodes[node]; + nr_min = min(nr, nr_min); + nr_max = max(nr, nr_max); + sum += nr; + } + BUG_ON(nr_min > nr_max); + + BUG_ON(sum > g->p.nr_tasks); + + if (0 && (sum < g->p.nr_tasks)) + return; + + /* + * Count the number of distinct process groups present + * on nodes - when we are converged this will decrease + * to g->p.nr_proc: + */ + process_groups = 0; + + for (node = 0; node < g->p.nr_nodes; node++) { + int processes = count_node_processes(node); + + nr = nodes[node]; + tprintf(" %2d/%-2d", nr, processes); + + process_groups += processes; + } + + distance = nr_max - nr_min; + + tprintf(" [%2d/%-2d]", distance, process_groups); + + tprintf(" l:%3d-%-3d (%3d)", + loops_done_min, loops_done_max, loops_done_max-loops_done_min); + + if (loops_done_min && loops_done_max) { + double skew = 1.0 - (double)loops_done_min/loops_done_max; + + tprintf(" [%4.1f%%]", skew * 100.0); + } + + calc_convergence_compression(&strong); + + if (strong && process_groups == g->p.nr_proc) { + if (!*convergence) { + *convergence = runtime_ns_max; + tprintf(" (%6.1fs converged)\n", *convergence/1e9); + if (g->p.measure_convergence) { + g->all_converged = true; + g->stop_work = true; + } + } + } else { + if (*convergence) { + tprintf(" (%6.1fs de-converged)", runtime_ns_max/1e9); + *convergence = 0; + } + tprintf("\n"); + } +} + +static void show_summary(double runtime_ns_max, int l, double *convergence) +{ + tprintf("\r # %5.1f%% [%.1f mins]", + (double)(l+1)/g->p.nr_loops*100.0, runtime_ns_max/1e9 / 60.0); + + calc_convergence(runtime_ns_max, convergence); + + if (g->p.show_details >= 0) + fflush(stdout); +} + +static void *worker_thread(void *__tdata) +{ + struct thread_data *td = __tdata; + struct timeval start0, start, stop, diff; + int process_nr = td->process_nr; + int thread_nr = td->thread_nr; + unsigned long last_perturbance; + int task_nr = td->task_nr; + int details = g->p.show_details; + int first_task, last_task; + double convergence = 0; + u64 val = td->val; + double runtime_ns_max; + u8 *global_data; + u8 *process_data; + u8 *thread_data; + u64 bytes_done; + long work_done; + u32 l; + + bind_to_cpumask(td->bind_cpumask); + bind_to_memnode(td->bind_node); + + set_taskname("thread %d/%d", process_nr, thread_nr); + + global_data = g->data; + process_data = td->process_data; + thread_data = setup_private_data(g->p.bytes_thread); + + bytes_done = 0; + + last_task = 0; + if (process_nr == g->p.nr_proc-1 && thread_nr == g->p.nr_threads-1) + last_task = 1; + + first_task = 0; + if (process_nr == 0 && thread_nr == 0) + first_task = 1; + + if (details >= 2) { + printf("# thread %2d / %2d global mem: %p, process mem: %p, thread mem: %p\n", + process_nr, thread_nr, global_data, process_data, thread_data); + } + + if (g->p.serialize_startup) { + pthread_mutex_lock(&g->startup_mutex); + g->nr_tasks_started++; + pthread_mutex_unlock(&g->startup_mutex); + + /* Here we will wait for the main process to start us all at once: */ + pthread_mutex_lock(&g->start_work_mutex); + g->nr_tasks_working++; + + /* Last one wake the main process: */ + if (g->nr_tasks_working == g->p.nr_tasks) + pthread_mutex_unlock(&g->startup_done_mutex); + + pthread_mutex_unlock(&g->start_work_mutex); + } + + gettimeofday(&start0, NULL); + + start = stop = start0; + last_perturbance = start.tv_sec; + + for (l = 0; l < g->p.nr_loops; l++) { + start = stop; + + if (g->stop_work) + break; + + val += do_work(global_data, g->p.bytes_global, process_nr, g->p.nr_proc, l, val); + val += do_work(process_data, g->p.bytes_process, thread_nr, g->p.nr_threads, l, val); + val += do_work(thread_data, g->p.bytes_thread, 0, 1, l, val); + + if (g->p.sleep_usecs) { + pthread_mutex_lock(td->process_lock); + usleep(g->p.sleep_usecs); + pthread_mutex_unlock(td->process_lock); + } + /* + * Amount of work to be done under a process-global lock: + */ + if (g->p.bytes_process_locked) { + pthread_mutex_lock(td->process_lock); + val += do_work(process_data, g->p.bytes_process_locked, thread_nr, g->p.nr_threads, l, val); + pthread_mutex_unlock(td->process_lock); + } + + work_done = g->p.bytes_global + g->p.bytes_process + + g->p.bytes_process_locked + g->p.bytes_thread; + + update_curr_cpu(task_nr, work_done); + bytes_done += work_done; + + if (details < 0 && !g->p.perturb_secs && !g->p.measure_convergence && !g->p.nr_secs) + continue; + + td->loops_done = l; + + gettimeofday(&stop, NULL); + + /* Check whether our max runtime timed out: */ + if (g->p.nr_secs) { + timersub(&stop, &start0, &diff); + if ((u32)diff.tv_sec >= g->p.nr_secs) { + g->stop_work = true; + break; + } + } + + /* Update the summary at most once per second: */ + if (start.tv_sec == stop.tv_sec) + continue; + + /* + * Perturb the first task's equilibrium every g->p.perturb_secs seconds, + * by migrating to CPU#0: + */ + if (first_task && g->p.perturb_secs && (int)(stop.tv_sec - last_perturbance) >= g->p.perturb_secs) { + cpu_set_t orig_mask; + int target_cpu; + int this_cpu; + + last_perturbance = stop.tv_sec; + + /* + * Depending on where we are running, move into + * the other half of the system, to create some + * real disturbance: + */ + this_cpu = g->threads[task_nr].curr_cpu; + if (this_cpu < g->p.nr_cpus/2) + target_cpu = g->p.nr_cpus-1; + else + target_cpu = 0; + + orig_mask = bind_to_cpu(target_cpu); + + /* Here we are running on the target CPU already */ + if (details >= 1) + printf(" (injecting perturbalance, moved to CPU#%d)\n", target_cpu); + + bind_to_cpumask(orig_mask); + } + + if (details >= 3) { + timersub(&stop, &start, &diff); + runtime_ns_max = diff.tv_sec * 1000000000; + runtime_ns_max += diff.tv_usec * 1000; + + if (details >= 0) { + printf(" #%2d / %2d: %14.2lf nsecs/op [val: %016"PRIx64"]\n", + process_nr, thread_nr, runtime_ns_max / bytes_done, val); + } + fflush(stdout); + } + if (!last_task) + continue; + + timersub(&stop, &start0, &diff); + runtime_ns_max = diff.tv_sec * 1000000000ULL; + runtime_ns_max += diff.tv_usec * 1000ULL; + + show_summary(runtime_ns_max, l, &convergence); + } + + gettimeofday(&stop, NULL); + timersub(&stop, &start0, &diff); + td->runtime_ns = diff.tv_sec * 1000000000ULL; + td->runtime_ns += diff.tv_usec * 1000ULL; + + free_data(thread_data, g->p.bytes_thread); + + pthread_mutex_lock(&g->stop_work_mutex); + g->bytes_done += bytes_done; + pthread_mutex_unlock(&g->stop_work_mutex); + + return NULL; +} + +/* + * A worker process starts a couple of threads: + */ +static void worker_process(int process_nr) +{ + pthread_mutex_t process_lock; + struct thread_data *td; + pthread_t *pthreads; + u8 *process_data; + int task_nr; + int ret; + int t; + + pthread_mutex_init(&process_lock, NULL); + set_taskname("process %d", process_nr); + + /* + * Pick up the memory policy and the CPU binding of our first thread, + * so that we initialize memory accordingly: + */ + task_nr = process_nr*g->p.nr_threads; + td = g->threads + task_nr; + + bind_to_memnode(td->bind_node); + bind_to_cpumask(td->bind_cpumask); + + pthreads = zalloc(g->p.nr_threads * sizeof(pthread_t)); + process_data = setup_private_data(g->p.bytes_process); + + if (g->p.show_details >= 3) { + printf(" # process %2d global mem: %p, process mem: %p\n", + process_nr, g->data, process_data); + } + + for (t = 0; t < g->p.nr_threads; t++) { + task_nr = process_nr*g->p.nr_threads + t; + td = g->threads + task_nr; + + td->process_data = process_data; + td->process_nr = process_nr; + td->thread_nr = t; + td->task_nr = task_nr; + td->val = rand(); + td->curr_cpu = -1; + td->process_lock = &process_lock; + + ret = pthread_create(pthreads + t, NULL, worker_thread, td); + BUG_ON(ret); + } + + for (t = 0; t < g->p.nr_threads; t++) { + ret = pthread_join(pthreads[t], NULL); + BUG_ON(ret); + } + + free_data(process_data, g->p.bytes_process); + free(pthreads); +} + +static void print_summary(void) +{ + if (g->p.show_details < 0) + return; + + printf("\n ###\n"); + printf(" # %d %s will execute (on %d nodes, %d CPUs):\n", + g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", g->p.nr_nodes, g->p.nr_cpus); + printf(" # %5dx %5ldMB global shared mem operations\n", + g->p.nr_loops, g->p.bytes_global/1024/1024); + printf(" # %5dx %5ldMB process shared mem operations\n", + g->p.nr_loops, g->p.bytes_process/1024/1024); + printf(" # %5dx %5ldMB thread local mem operations\n", + g->p.nr_loops, g->p.bytes_thread/1024/1024); + + printf(" ###\n"); + + printf("\n ###\n"); fflush(stdout); +} + +static void init_thread_data(void) +{ + ssize_t size = sizeof(*g->threads)*g->p.nr_tasks; + int t; + + g->threads = zalloc_shared_data(size); + + for (t = 0; t < g->p.nr_tasks; t++) { + struct thread_data *td = g->threads + t; + int cpu; + + /* Allow all nodes by default: */ + td->bind_node = -1; + + /* Allow all CPUs by default: */ + CPU_ZERO(&td->bind_cpumask); + for (cpu = 0; cpu < g->p.nr_cpus; cpu++) + CPU_SET(cpu, &td->bind_cpumask); + } +} + +static void deinit_thread_data(void) +{ + ssize_t size = sizeof(*g->threads)*g->p.nr_tasks; + + free_data(g->threads, size); +} + +static int init(void) +{ + g = (void *)alloc_data(sizeof(*g), MAP_SHARED, 1, 0, 0 /* THP */, 0); + + /* Copy over options: */ + g->p = p0; + + g->p.nr_cpus = numa_num_configured_cpus(); + + g->p.nr_nodes = numa_max_node() + 1; + + /* char array in count_process_nodes(): */ + BUG_ON(g->p.nr_nodes > MAX_NR_NODES || g->p.nr_nodes < 0); + + if (g->p.show_quiet && !g->p.show_details) + g->p.show_details = -1; + + /* Some memory should be specified: */ + if (!g->p.mb_global_str && !g->p.mb_proc_str && !g->p.mb_thread_str) + return -1; + + if (g->p.mb_global_str) { + g->p.mb_global = atof(g->p.mb_global_str); + BUG_ON(g->p.mb_global < 0); + } + + if (g->p.mb_proc_str) { + g->p.mb_proc = atof(g->p.mb_proc_str); + BUG_ON(g->p.mb_proc < 0); + } + + if (g->p.mb_proc_locked_str) { + g->p.mb_proc_locked = atof(g->p.mb_proc_locked_str); + BUG_ON(g->p.mb_proc_locked < 0); + BUG_ON(g->p.mb_proc_locked > g->p.mb_proc); + } + + if (g->p.mb_thread_str) { + g->p.mb_thread = atof(g->p.mb_thread_str); + BUG_ON(g->p.mb_thread < 0); + } + + BUG_ON(g->p.nr_threads <= 0); + BUG_ON(g->p.nr_proc <= 0); + + g->p.nr_tasks = g->p.nr_proc*g->p.nr_threads; + + g->p.bytes_global = g->p.mb_global *1024L*1024L; + g->p.bytes_process = g->p.mb_proc *1024L*1024L; + g->p.bytes_process_locked = g->p.mb_proc_locked *1024L*1024L; + g->p.bytes_thread = g->p.mb_thread *1024L*1024L; + + g->data = setup_shared_data(g->p.bytes_global); + + /* Startup serialization: */ + init_global_mutex(&g->start_work_mutex); + init_global_mutex(&g->startup_mutex); + init_global_mutex(&g->startup_done_mutex); + init_global_mutex(&g->stop_work_mutex); + + init_thread_data(); + + tprintf("#\n"); + if (parse_setup_cpu_list() || parse_setup_node_list()) + return -1; + tprintf("#\n"); + + print_summary(); + + return 0; +} + +static void deinit(void) +{ + free_data(g->data, g->p.bytes_global); + g->data = NULL; + + deinit_thread_data(); + + free_data(g, sizeof(*g)); + g = NULL; +} + +/* + * Print a short or long result, depending on the verbosity setting: + */ +static void print_res(const char *name, double val, + const char *txt_unit, const char *txt_short, const char *txt_long) +{ + if (!name) + name = "main,"; + + if (g->p.show_quiet) + printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short); + else + printf(" %14.3f %s\n", val, txt_long); +} + +static int __bench_numa(const char *name) +{ + struct timeval start, stop, diff; + u64 runtime_ns_min, runtime_ns_sum; + pid_t *pids, pid, wpid; + double delta_runtime; + double runtime_avg; + double runtime_sec_max; + double runtime_sec_min; + int wait_stat; + double bytes; + int i, t; + + if (init()) + return -1; + + pids = zalloc(g->p.nr_proc * sizeof(*pids)); + pid = -1; + + /* All threads try to acquire it, this way we can wait for them to start up: */ + pthread_mutex_lock(&g->start_work_mutex); + + if (g->p.serialize_startup) { + tprintf(" #\n"); + tprintf(" # Startup synchronization: ..."); fflush(stdout); + } + + gettimeofday(&start, NULL); + + for (i = 0; i < g->p.nr_proc; i++) { + pid = fork(); + dprintf(" # process %2d: PID %d\n", i, pid); + + BUG_ON(pid < 0); + if (!pid) { + /* Child process: */ + worker_process(i); + + exit(0); + } + pids[i] = pid; + + } + /* Wait for all the threads to start up: */ + while (g->nr_tasks_started != g->p.nr_tasks) + usleep(1000); + + BUG_ON(g->nr_tasks_started != g->p.nr_tasks); + + if (g->p.serialize_startup) { + double startup_sec; + + pthread_mutex_lock(&g->startup_done_mutex); + + /* This will start all threads: */ + pthread_mutex_unlock(&g->start_work_mutex); + + /* This mutex is locked - the last started thread will wake us: */ + pthread_mutex_lock(&g->startup_done_mutex); + + gettimeofday(&stop, NULL); + + timersub(&stop, &start, &diff); + + startup_sec = diff.tv_sec * 1000000000.0; + startup_sec += diff.tv_usec * 1000.0; + startup_sec /= 1e9; + + tprintf(" threads initialized in %.6f seconds.\n", startup_sec); + tprintf(" #\n"); + + start = stop; + pthread_mutex_unlock(&g->startup_done_mutex); + } else { + gettimeofday(&start, NULL); + } + + /* Parent process: */ + + + for (i = 0; i < g->p.nr_proc; i++) { + wpid = waitpid(pids[i], &wait_stat, 0); + BUG_ON(wpid < 0); + BUG_ON(!WIFEXITED(wait_stat)); + + } + + runtime_ns_sum = 0; + runtime_ns_min = -1LL; + + for (t = 0; t < g->p.nr_tasks; t++) { + u64 thread_runtime_ns = g->threads[t].runtime_ns; + + runtime_ns_sum += thread_runtime_ns; + runtime_ns_min = min(thread_runtime_ns, runtime_ns_min); + } + + gettimeofday(&stop, NULL); + timersub(&stop, &start, &diff); + + BUG_ON(bench_format != BENCH_FORMAT_DEFAULT); + + tprintf("\n ###\n"); + tprintf("\n"); + + runtime_sec_max = diff.tv_sec * 1000000000.0; + runtime_sec_max += diff.tv_usec * 1000.0; + runtime_sec_max /= 1e9; + + runtime_sec_min = runtime_ns_min/1e9; + + bytes = g->bytes_done; + runtime_avg = (double)runtime_ns_sum / g->p.nr_tasks / 1e9; + + if (g->p.measure_convergence) { + print_res(name, runtime_sec_max, + "secs,", "NUMA-convergence-latency", "secs latency to NUMA-converge"); + } + + print_res(name, runtime_sec_max, + "secs,", "runtime-max/thread", "secs slowest (max) thread-runtime"); + + print_res(name, runtime_sec_min, + "secs,", "runtime-min/thread", "secs fastest (min) thread-runtime"); + + print_res(name, runtime_avg, + "secs,", "runtime-avg/thread", "secs average thread-runtime"); + + delta_runtime = (runtime_sec_max - runtime_sec_min)/2.0; + print_res(name, delta_runtime / runtime_sec_max * 100.0, + "%,", "spread-runtime/thread", "% difference between max/avg runtime"); + + print_res(name, bytes / g->p.nr_tasks / 1e9, + "GB,", "data/thread", "GB data processed, per thread"); + + print_res(name, bytes / 1e9, + "GB,", "data-total", "GB data processed, total"); + + print_res(name, runtime_sec_max * 1e9 / (bytes / g->p.nr_tasks), + "nsecs,", "runtime/byte/thread","nsecs/byte/thread runtime"); + + print_res(name, bytes / g->p.nr_tasks / 1e9 / runtime_sec_max, + "GB/sec,", "thread-speed", "GB/sec/thread speed"); + + print_res(name, bytes / runtime_sec_max / 1e9, + "GB/sec,", "total-speed", "GB/sec total speed"); + + free(pids); + + deinit(); + + return 0; +} + +#define MAX_ARGS 50 + +static int command_size(const char **argv) +{ + int size = 0; + + while (*argv) { + size++; + argv++; + } + + BUG_ON(size >= MAX_ARGS); + + return size; +} + +static void init_params(struct params *p, const char *name, int argc, const char **argv) +{ + int i; + + printf("\n # Running %s \"perf bench numa", name); + + for (i = 0; i < argc; i++) + printf(" %s", argv[i]); + + printf("\"\n"); + + memset(p, 0, sizeof(*p)); + + /* Initialize nonzero defaults: */ + + p->serialize_startup = 1; + p->data_reads = true; + p->data_writes = true; + p->data_backwards = true; + p->data_rand_walk = true; + p->nr_loops = -1; + p->init_random = true; + p->mb_global_str = "1"; + p->nr_proc = 1; + p->nr_threads = 1; + p->nr_secs = 5; + p->run_all = argc == 1; +} + +static int run_bench_numa(const char *name, const char **argv) +{ + int argc = command_size(argv); + + init_params(&p0, name, argc, argv); + argc = parse_options(argc, argv, options, bench_numa_usage, 0); + if (argc) + goto err; + + if (__bench_numa(name)) + goto err; + + return 0; + +err: + return -1; +} + +#define OPT_BW_RAM "-s", "20", "-zZq", "--thp", " 1", "--no-data_rand_walk" +#define OPT_BW_RAM_NOTHP OPT_BW_RAM, "--thp", "-1" + +#define OPT_CONV "-s", "100", "-zZ0qcm", "--thp", " 1" +#define OPT_CONV_NOTHP OPT_CONV, "--thp", "-1" + +#define OPT_BW "-s", "20", "-zZ0q", "--thp", " 1" +#define OPT_BW_NOTHP OPT_BW, "--thp", "-1" + +/* + * The built-in test-suite executed by "perf bench numa -a". + * + * (A minimum of 4 nodes and 16 GB of RAM is recommended.) + */ +static const char *tests[][MAX_ARGS] = { + /* Basic single-stream NUMA bandwidth measurements: */ + { "RAM-bw-local,", "mem", "-p", "1", "-t", "1", "-P", "1024", + "-C" , "0", "-M", "0", OPT_BW_RAM }, + { "RAM-bw-local-NOTHP,", + "mem", "-p", "1", "-t", "1", "-P", "1024", + "-C" , "0", "-M", "0", OPT_BW_RAM_NOTHP }, + { "RAM-bw-remote,", "mem", "-p", "1", "-t", "1", "-P", "1024", + "-C" , "0", "-M", "1", OPT_BW_RAM }, + + /* 2-stream NUMA bandwidth measurements: */ + { "RAM-bw-local-2x,", "mem", "-p", "2", "-t", "1", "-P", "1024", + "-C", "0,2", "-M", "0x2", OPT_BW_RAM }, + { "RAM-bw-remote-2x,", "mem", "-p", "2", "-t", "1", "-P", "1024", + "-C", "0,2", "-M", "1x2", OPT_BW_RAM }, + + /* Cross-stream NUMA bandwidth measurement: */ + { "RAM-bw-cross,", "mem", "-p", "2", "-t", "1", "-P", "1024", + "-C", "0,8", "-M", "1,0", OPT_BW_RAM }, + + /* Convergence latency measurements: */ + { " 1x3-convergence,", "mem", "-p", "1", "-t", "3", "-P", "512", OPT_CONV }, + { " 1x4-convergence,", "mem", "-p", "1", "-t", "4", "-P", "512", OPT_CONV }, + { " 1x6-convergence,", "mem", "-p", "1", "-t", "6", "-P", "1020", OPT_CONV }, + { " 2x3-convergence,", "mem", "-p", "3", "-t", "3", "-P", "1020", OPT_CONV }, + { " 3x3-convergence,", "mem", "-p", "3", "-t", "3", "-P", "1020", OPT_CONV }, + { " 4x4-convergence,", "mem", "-p", "4", "-t", "4", "-P", "512", OPT_CONV }, + { " 4x4-convergence-NOTHP,", + "mem", "-p", "4", "-t", "4", "-P", "512", OPT_CONV_NOTHP }, + { " 4x6-convergence,", "mem", "-p", "4", "-t", "6", "-P", "1020", OPT_CONV }, + { " 4x8-convergence,", "mem", "-p", "4", "-t", "8", "-P", "512", OPT_CONV }, + { " 8x4-convergence,", "mem", "-p", "8", "-t", "4", "-P", "512", OPT_CONV }, + { " 8x4-convergence-NOTHP,", + "mem", "-p", "8", "-t", "4", "-P", "512", OPT_CONV_NOTHP }, + { " 3x1-convergence,", "mem", "-p", "3", "-t", "1", "-P", "512", OPT_CONV }, + { " 4x1-convergence,", "mem", "-p", "4", "-t", "1", "-P", "512", OPT_CONV }, + { " 8x1-convergence,", "mem", "-p", "8", "-t", "1", "-P", "512", OPT_CONV }, + { "16x1-convergence,", "mem", "-p", "16", "-t", "1", "-P", "256", OPT_CONV }, + { "32x1-convergence,", "mem", "-p", "32", "-t", "1", "-P", "128", OPT_CONV }, + + /* Various NUMA process/thread layout bandwidth measurements: */ + { " 2x1-bw-process,", "mem", "-p", "2", "-t", "1", "-P", "1024", OPT_BW }, + { " 3x1-bw-process,", "mem", "-p", "3", "-t", "1", "-P", "1024", OPT_BW }, + { " 4x1-bw-process,", "mem", "-p", "4", "-t", "1", "-P", "1024", OPT_BW }, + { " 8x1-bw-process,", "mem", "-p", "8", "-t", "1", "-P", " 512", OPT_BW }, + { " 8x1-bw-process-NOTHP,", + "mem", "-p", "8", "-t", "1", "-P", " 512", OPT_BW_NOTHP }, + { "16x1-bw-process,", "mem", "-p", "16", "-t", "1", "-P", "256", OPT_BW }, + + { " 4x1-bw-thread,", "mem", "-p", "1", "-t", "4", "-T", "256", OPT_BW }, + { " 8x1-bw-thread,", "mem", "-p", "1", "-t", "8", "-T", "256", OPT_BW }, + { "16x1-bw-thread,", "mem", "-p", "1", "-t", "16", "-T", "128", OPT_BW }, + { "32x1-bw-thread,", "mem", "-p", "1", "-t", "32", "-T", "64", OPT_BW }, + + { " 2x3-bw-thread,", "mem", "-p", "2", "-t", "3", "-P", "512", OPT_BW }, + { " 4x4-bw-thread,", "mem", "-p", "4", "-t", "4", "-P", "512", OPT_BW }, + { " 4x6-bw-thread,", "mem", "-p", "4", "-t", "6", "-P", "512", OPT_BW }, + { " 4x8-bw-thread,", "mem", "-p", "4", "-t", "8", "-P", "512", OPT_BW }, + { " 4x8-bw-thread-NOTHP,", + "mem", "-p", "4", "-t", "8", "-P", "512", OPT_BW_NOTHP }, + { " 3x3-bw-thread,", "mem", "-p", "3", "-t", "3", "-P", "512", OPT_BW }, + { " 5x5-bw-thread,", "mem", "-p", "5", "-t", "5", "-P", "512", OPT_BW }, + + { "2x16-bw-thread,", "mem", "-p", "2", "-t", "16", "-P", "512", OPT_BW }, + { "1x32-bw-thread,", "mem", "-p", "1", "-t", "32", "-P", "2048", OPT_BW }, + + { "numa02-bw,", "mem", "-p", "1", "-t", "32", "-T", "32", OPT_BW }, + { "numa02-bw-NOTHP,", "mem", "-p", "1", "-t", "32", "-T", "32", OPT_BW_NOTHP }, + { "numa01-bw-thread,", "mem", "-p", "2", "-t", "16", "-T", "192", OPT_BW }, + { "numa01-bw-thread-NOTHP,", + "mem", "-p", "2", "-t", "16", "-T", "192", OPT_BW_NOTHP }, +}; + +static int bench_all(void) +{ + int nr = ARRAY_SIZE(tests); + int ret; + int i; + + ret = system("echo ' #'; echo ' # Running test on: '$(uname -a); echo ' #'"); + BUG_ON(ret < 0); + + for (i = 0; i < nr; i++) { + run_bench_numa(tests[i][0], tests[i] + 1); + } + + printf("\n"); + + return 0; +} + +int bench_numa(int argc, const char **argv, const char *prefix __maybe_unused) +{ + init_params(&p0, "main,", argc, argv); + argc = parse_options(argc, argv, options, bench_numa_usage, 0); + if (argc) + goto err; + + if (p0.run_all) + return bench_all(); + + if (__bench_numa(NULL)) + goto err; + + return 0; + +err: + usage_with_options(numa_usage, options); + return -1; +} diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c index 69cfba8d4c6..07a8d7646a1 100644 --- a/tools/perf/bench/sched-pipe.c +++ b/tools/perf/bench/sched-pipe.c @@ -7,9 +7,7 @@ * Based on pipe-test-1m.c by Ingo Molnar <mingo@redhat.com> * http://people.redhat.com/mingo/cfs-scheduler/tools/pipe-test-1m.c * Ported to perf by Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp> - * */ - #include "../perf.h" #include "../util/util.h" #include "../util/parse-options.h" @@ -28,12 +26,24 @@ #include <sys/time.h> #include <sys/types.h> +#include <pthread.h> + +struct thread_data { + int nr; + int pipe_read; + int pipe_write; + pthread_t pthread; +}; + #define LOOPS_DEFAULT 1000000 -static int loops = LOOPS_DEFAULT; +static int loops = LOOPS_DEFAULT; + +/* Use processes by default: */ +static bool threaded; static const struct option options[] = { - OPT_INTEGER('l', "loop", &loops, - "Specify number of loops"), + OPT_INTEGER('l', "loop", &loops, "Specify number of loops"), + OPT_BOOLEAN('T', "threaded", &threaded, "Specify threads/process based task setup"), OPT_END() }; @@ -42,13 +52,37 @@ static const char * const bench_sched_pipe_usage[] = { NULL }; -int bench_sched_pipe(int argc, const char **argv, - const char *prefix __maybe_unused) +static void *worker_thread(void *__tdata) { - int pipe_1[2], pipe_2[2]; + struct thread_data *td = __tdata; int m = 0, i; + int ret; + + for (i = 0; i < loops; i++) { + if (!td->nr) { + ret = read(td->pipe_read, &m, sizeof(int)); + BUG_ON(ret != sizeof(int)); + ret = write(td->pipe_write, &m, sizeof(int)); + BUG_ON(ret != sizeof(int)); + } else { + ret = write(td->pipe_write, &m, sizeof(int)); + BUG_ON(ret != sizeof(int)); + ret = read(td->pipe_read, &m, sizeof(int)); + BUG_ON(ret != sizeof(int)); + } + } + + return NULL; +} + +int bench_sched_pipe(int argc, const char **argv, const char *prefix __maybe_unused) +{ + struct thread_data threads[2], *td; + int pipe_1[2], pipe_2[2]; struct timeval start, stop, diff; unsigned long long result_usec = 0; + int nr_threads = 2; + int t; /* * why does "ret" exist? @@ -58,43 +92,66 @@ int bench_sched_pipe(int argc, const char **argv, int __maybe_unused ret, wait_stat; pid_t pid, retpid __maybe_unused; - argc = parse_options(argc, argv, options, - bench_sched_pipe_usage, 0); + argc = parse_options(argc, argv, options, bench_sched_pipe_usage, 0); BUG_ON(pipe(pipe_1)); BUG_ON(pipe(pipe_2)); - pid = fork(); - assert(pid >= 0); - gettimeofday(&start, NULL); - if (!pid) { - for (i = 0; i < loops; i++) { - ret = read(pipe_1[0], &m, sizeof(int)); - ret = write(pipe_2[1], &m, sizeof(int)); - } - } else { - for (i = 0; i < loops; i++) { - ret = write(pipe_1[1], &m, sizeof(int)); - ret = read(pipe_2[0], &m, sizeof(int)); + for (t = 0; t < nr_threads; t++) { + td = threads + t; + + td->nr = t; + + if (t == 0) { + td->pipe_read = pipe_1[0]; + td->pipe_write = pipe_2[1]; + } else { + td->pipe_write = pipe_1[1]; + td->pipe_read = pipe_2[0]; } } - gettimeofday(&stop, NULL); - timersub(&stop, &start, &diff); - if (pid) { + if (threaded) { + + for (t = 0; t < nr_threads; t++) { + td = threads + t; + + ret = pthread_create(&td->pthread, NULL, worker_thread, td); + BUG_ON(ret); + } + + for (t = 0; t < nr_threads; t++) { + td = threads + t; + + ret = pthread_join(td->pthread, NULL); + BUG_ON(ret); + } + + } else { + pid = fork(); + assert(pid >= 0); + + if (!pid) { + worker_thread(threads + 0); + exit(0); + } else { + worker_thread(threads + 1); + } + retpid = waitpid(pid, &wait_stat, 0); assert((retpid == pid) && WIFEXITED(wait_stat)); - } else { - exit(0); } + gettimeofday(&stop, NULL); + timersub(&stop, &start, &diff); + switch (bench_format) { case BENCH_FORMAT_DEFAULT: - printf("# Executed %d pipe operations between two tasks\n\n", - loops); + printf("# Executed %d pipe operations between two %s\n\n", + loops, threaded ? "threads" : "processes"); result_usec = diff.tv_sec * 1000000; result_usec += diff.tv_usec; diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 9ea38540b87..1ec429fef2b 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -28,22 +28,25 @@ #include "util/hist.h" #include "util/session.h" #include "util/tool.h" +#include "util/data.h" +#include "arch/common.h" +#include <dlfcn.h> #include <linux/bitmap.h> struct perf_annotate { struct perf_tool tool; - char const *input_name; - bool force, use_tui, use_stdio; + bool force, use_tui, use_stdio, use_gtk; bool full_paths; bool print_line; + bool skip_missing; const char *sym_hist_filter; const char *cpu_list; DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); }; static int perf_evsel__add_sample(struct perf_evsel *evsel, - struct perf_sample *sample, + struct perf_sample *sample __maybe_unused, struct addr_location *al, struct perf_annotate *ann) { @@ -62,21 +65,13 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel, return 0; } - he = __hists__add_entry(&evsel->hists, al, NULL, 1); + he = __hists__add_entry(&evsel->hists, al, NULL, NULL, NULL, 1, 1, 0, + true); if (he == NULL) return -ENOMEM; - ret = 0; - if (he->ms.sym != NULL) { - struct annotation *notes = symbol__annotation(he->ms.sym); - if (notes->src == NULL && symbol__alloc_hist(he->ms.sym) < 0) - return -ENOMEM; - - ret = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); - } - - evsel->hists.stats.total_period += sample->period; - hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); + ret = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); + hists__inc_nr_samples(&evsel->hists, true); return ret; } @@ -89,8 +84,7 @@ static int process_sample_event(struct perf_tool *tool, struct perf_annotate *ann = container_of(tool, struct perf_annotate, tool); struct addr_location al; - if (perf_event__preprocess_sample(event, machine, &al, sample, - symbol__annotate_init) < 0) { + if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) { pr_warning("problem processing %d event, skipping it.\n", event->header.type); return -1; @@ -108,17 +102,19 @@ static int process_sample_event(struct perf_tool *tool, return 0; } -static int hist_entry__tty_annotate(struct hist_entry *he, int evidx, +static int hist_entry__tty_annotate(struct hist_entry *he, + struct perf_evsel *evsel, struct perf_annotate *ann) { - return symbol__tty_annotate(he->ms.sym, he->ms.map, evidx, + return symbol__tty_annotate(he->ms.sym, he->ms.map, evsel, ann->print_line, ann->full_paths, 0, 0); } -static void hists__find_annotations(struct hists *self, int evidx, +static void hists__find_annotations(struct hists *hists, + struct perf_evsel *evsel, struct perf_annotate *ann) { - struct rb_node *nd = rb_first(&self->entries), *next; + struct rb_node *nd = rb_first(&hists->entries), *next; int key = K_RIGHT; while (nd) { @@ -138,9 +134,32 @@ find_next: continue; } - if (use_browser > 0) { - key = hist_entry__tui_annotate(he, evidx, NULL, NULL, 0); + if (use_browser == 2) { + int ret; + int (*annotate)(struct hist_entry *he, + struct perf_evsel *evsel, + struct hist_browser_timer *hbt); + + annotate = dlsym(perf_gtk_handle, + "hist_entry__gtk_annotate"); + if (annotate == NULL) { + ui__error("GTK browser not found!\n"); + return; + } + + ret = annotate(he, evsel, NULL); + if (!ret || !ann->skip_missing) + return; + + /* skip missing symbols */ + nd = rb_next(nd); + } else if (use_browser == 1) { + key = hist_entry__tui_annotate(he, evsel, NULL); switch (key) { + case -1: + if (!ann->skip_missing) + return; + /* fall through */ case K_RIGHT: next = rb_next(nd); break; @@ -154,15 +173,14 @@ find_next: if (next != NULL) nd = next; } else { - hist_entry__tty_annotate(he, evidx, ann); + hist_entry__tty_annotate(he, evsel, ann); nd = rb_next(nd); /* * Since we have a hist_entry per IP for the same * symbol, free he->ms.sym->src to signal we already * processed this symbol. */ - free(notes->src); - notes->src = NULL; + zfree(¬es->src); } } } @@ -173,12 +191,18 @@ static int __cmd_annotate(struct perf_annotate *ann) struct perf_session *session; struct perf_evsel *pos; u64 total_nr_samples; + struct perf_data_file file = { + .path = input_name, + .mode = PERF_DATA_MODE_READ, + .force = ann->force, + }; - session = perf_session__new(ann->input_name, O_RDONLY, - ann->force, false, &ann->tool); + session = perf_session__new(&file, false, &ann->tool); if (session == NULL) return -ENOMEM; + machines__set_symbol_filter(&session->machines, symbol__annotate_init); + if (ann->cpu_list) { ret = perf_session__cpu_bitmap(session, ann->cpu_list, ann->cpu_bitmap); @@ -186,6 +210,12 @@ static int __cmd_annotate(struct perf_annotate *ann) goto out_delete; } + if (!objdump_path) { + ret = perf_session_env__lookup_objdump(&session->header.env); + if (ret) + goto out_delete; + } + ret = perf_session__process_events(session, &ann->tool); if (ret) goto out_delete; @@ -202,22 +232,40 @@ static int __cmd_annotate(struct perf_annotate *ann) perf_session__fprintf_dsos(session, stdout); total_nr_samples = 0; - list_for_each_entry(pos, &session->evlist->entries, node) { + evlist__for_each(session->evlist, pos) { struct hists *hists = &pos->hists; u32 nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE]; if (nr_samples > 0) { total_nr_samples += nr_samples; - hists__collapse_resort(hists); + hists__collapse_resort(hists, NULL); hists__output_resort(hists); - hists__find_annotations(hists, pos->idx, ann); + + if (symbol_conf.event_group && + !perf_evsel__is_group_leader(pos)) + continue; + + hists__find_annotations(hists, pos, ann); } } if (total_nr_samples == 0) { - ui__error("The %s file has no samples!\n", session->filename); + ui__error("The %s file has no samples!\n", file.path); goto out_delete; } + + if (use_browser == 2) { + void (*show_annotations)(void); + + show_annotations = dlsym(perf_gtk_handle, + "perf_gtk__show_annotations"); + if (show_annotations == NULL) { + ui__error("GTK browser not found!\n"); + goto out_delete; + } + show_annotations(); + } + out_delete: /* * Speed up the exit process, for large files this can @@ -245,14 +293,16 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused) .tool = { .sample = process_sample_event, .mmap = perf_event__process_mmap, + .mmap2 = perf_event__process_mmap2, .comm = perf_event__process_comm, - .fork = perf_event__process_task, + .exit = perf_event__process_exit, + .fork = perf_event__process_fork, .ordered_samples = true, .ordering_requires_timestamps = true, }, }; const struct option options[] = { - OPT_STRING('i', "input", &annotate.input_name, "file", + OPT_STRING('i', "input", &input_name, "file", "input file name"), OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", "only consider symbols in these dsos"), @@ -263,6 +313,7 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused) "be more verbose (show symbol address, etc)"), OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), + OPT_BOOLEAN(0, "gtk", &annotate.use_gtk, "Use the GTK interface"), OPT_BOOLEAN(0, "tui", &annotate.use_tui, "Use the TUI interface"), OPT_BOOLEAN(0, "stdio", &annotate.use_stdio, "Use the stdio interface"), OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, @@ -273,6 +324,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused) "print matching source lines (may be slow)"), OPT_BOOLEAN('P', "full-paths", &annotate.full_paths, "Don't shorten the displayed pathnames"), + OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing, + "Skip symbols that cannot be annotated"), OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"), OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", "Look for files with symbols relative to this directory"), @@ -284,6 +337,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused) "Specify disassembler style (e.g. -M intel for intel syntax)"), OPT_STRING(0, "objdump", &objdump_path, "path", "objdump binary to use for disassembly and annotations"), + OPT_BOOLEAN(0, "group", &symbol_conf.event_group, + "Show event group information together"), OPT_END() }; @@ -293,6 +348,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused) use_browser = 0; else if (annotate.use_tui) use_browser = 1; + else if (annotate.use_gtk) + use_browser = 2; setup_browser(true); @@ -302,11 +359,12 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused) if (symbol__init() < 0) return -1; - setup_sorting(annotate_usage, options); + if (setup_sorting() < 0) + usage_with_options(annotate_usage, options); if (argc) { /* - * Special case: if there's an argument left then assume tha + * Special case: if there's an argument left then assume that * it's a symbol filter: */ if (argc > 1) diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c index cae9a5fd2ec..1e6e7771054 100644 --- a/tools/perf/builtin-bench.c +++ b/tools/perf/builtin-bench.c @@ -1,21 +1,19 @@ /* - * * builtin-bench.c * - * General benchmarking subsystem provided by perf + * General benchmarking collections provided by perf * * Copyright (C) 2009, Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp> - * */ /* + * Available benchmark collection list: * - * Available subsystem list: - * sched ... scheduler and IPC mechanism + * sched ... scheduler and IPC performance * mem ... memory access performance - * + * numa ... NUMA scheduling and MM performance + * futex ... Futex performance */ - #include "perf.h" #include "util/util.h" #include "util/parse-options.h" @@ -25,95 +23,101 @@ #include <stdio.h> #include <stdlib.h> #include <string.h> +#include <sys/prctl.h> + +typedef int (*bench_fn_t)(int argc, const char **argv, const char *prefix); + +struct bench { + const char *name; + const char *summary; + bench_fn_t fn; +}; + +#ifdef HAVE_LIBNUMA_SUPPORT +static struct bench numa_benchmarks[] = { + { "mem", "Benchmark for NUMA workloads", bench_numa }, + { "all", "Test all NUMA benchmarks", NULL }, + { NULL, NULL, NULL } +}; +#endif -struct bench_suite { - const char *name; - const char *summary; - int (*fn)(int, const char **, const char *); +static struct bench sched_benchmarks[] = { + { "messaging", "Benchmark for scheduling and IPC", bench_sched_messaging }, + { "pipe", "Benchmark for pipe() between two processes", bench_sched_pipe }, + { "all", "Test all scheduler benchmarks", NULL }, + { NULL, NULL, NULL } }; - \ -/* sentinel: easy for help */ -#define suite_all { "all", "Test all benchmark suites", NULL } - -static struct bench_suite sched_suites[] = { - { "messaging", - "Benchmark for scheduler and IPC mechanisms", - bench_sched_messaging }, - { "pipe", - "Flood of communication over pipe() between two processes", - bench_sched_pipe }, - suite_all, - { NULL, - NULL, - NULL } + +static struct bench mem_benchmarks[] = { + { "memcpy", "Benchmark for memcpy()", bench_mem_memcpy }, + { "memset", "Benchmark for memset() tests", bench_mem_memset }, + { "all", "Test all memory benchmarks", NULL }, + { NULL, NULL, NULL } }; -static struct bench_suite mem_suites[] = { - { "memcpy", - "Simple memory copy in various ways", - bench_mem_memcpy }, - { "memset", - "Simple memory set in various ways", - bench_mem_memset }, - suite_all, - { NULL, - NULL, - NULL } +static struct bench futex_benchmarks[] = { + { "hash", "Benchmark for futex hash table", bench_futex_hash }, + { "wake", "Benchmark for futex wake calls", bench_futex_wake }, + { "requeue", "Benchmark for futex requeue calls", bench_futex_requeue }, + { "all", "Test all futex benchmarks", NULL }, + { NULL, NULL, NULL } }; -struct bench_subsys { - const char *name; - const char *summary; - struct bench_suite *suites; +struct collection { + const char *name; + const char *summary; + struct bench *benchmarks; }; -static struct bench_subsys subsystems[] = { - { "sched", - "scheduler and IPC mechanism", - sched_suites }, - { "mem", - "memory access performance", - mem_suites }, - { "all", /* sentinel: easy for help */ - "all benchmark subsystem", - NULL }, - { NULL, - NULL, - NULL } +static struct collection collections[] = { + { "sched", "Scheduler and IPC benchmarks", sched_benchmarks }, + { "mem", "Memory access benchmarks", mem_benchmarks }, +#ifdef HAVE_LIBNUMA_SUPPORT + { "numa", "NUMA scheduling and MM benchmarks", numa_benchmarks }, +#endif + {"futex", "Futex stressing benchmarks", futex_benchmarks }, + { "all", "All benchmarks", NULL }, + { NULL, NULL, NULL } }; -static void dump_suites(int subsys_index) +/* Iterate over all benchmark collections: */ +#define for_each_collection(coll) \ + for (coll = collections; coll->name; coll++) + +/* Iterate over all benchmarks within a collection: */ +#define for_each_bench(coll, bench) \ + for (bench = coll->benchmarks; bench && bench->name; bench++) + +static void dump_benchmarks(struct collection *coll) { - int i; + struct bench *bench; - printf("# List of available suites for %s...\n\n", - subsystems[subsys_index].name); + printf("\n # List of available benchmarks for collection '%s':\n\n", coll->name); - for (i = 0; subsystems[subsys_index].suites[i].name; i++) - printf("%14s: %s\n", - subsystems[subsys_index].suites[i].name, - subsystems[subsys_index].suites[i].summary); + for_each_bench(coll, bench) + printf("%14s: %s\n", bench->name, bench->summary); printf("\n"); - return; } static const char *bench_format_str; + +/* Output/formatting style, exported to benchmark modules: */ int bench_format = BENCH_FORMAT_DEFAULT; static const struct option bench_options[] = { - OPT_STRING('f', "format", &bench_format_str, "default", - "Specify format style"), + OPT_STRING('f', "format", &bench_format_str, "default", "Specify format style"), OPT_END() }; static const char * const bench_usage[] = { - "perf bench [<common options>] <subsystem> <suite> [<options>]", + "perf bench [<common options>] <collection> <benchmark> [<options>]", NULL }; static void print_usage(void) { + struct collection *coll; int i; printf("Usage: \n"); @@ -121,11 +125,10 @@ static void print_usage(void) printf("\t%s\n", bench_usage[i]); printf("\n"); - printf("# List of available subsystems...\n\n"); + printf(" # List of all available benchmark collections:\n\n"); - for (i = 0; subsystems[i].name; i++) - printf("%14s: %s\n", - subsystems[i].name, subsystems[i].summary); + for_each_collection(coll) + printf("%14s: %s\n", coll->name, coll->summary); printf("\n"); } @@ -142,43 +145,74 @@ static int bench_str2int(const char *str) return BENCH_FORMAT_UNKNOWN; } -static void all_suite(struct bench_subsys *subsys) /* FROM HERE */ +/* + * Run a specific benchmark but first rename the running task's ->comm[] + * to something meaningful: + */ +static int run_bench(const char *coll_name, const char *bench_name, bench_fn_t fn, + int argc, const char **argv, const char *prefix) { - int i; + int size; + char *name; + int ret; + + size = strlen(coll_name) + 1 + strlen(bench_name) + 1; + + name = zalloc(size); + BUG_ON(!name); + + scnprintf(name, size, "%s-%s", coll_name, bench_name); + + prctl(PR_SET_NAME, name); + argv[0] = name; + + ret = fn(argc, argv, prefix); + + free(name); + + return ret; +} + +static void run_collection(struct collection *coll) +{ + struct bench *bench; const char *argv[2]; - struct bench_suite *suites = subsys->suites; argv[1] = NULL; /* * TODO: - * preparing preset parameters for + * + * Preparing preset parameters for * embedded, ordinary PC, HPC, etc... - * will be helpful + * would be helpful. */ - for (i = 0; suites[i].fn; i++) { - printf("# Running %s/%s benchmark...\n", - subsys->name, - suites[i].name); - - argv[1] = suites[i].name; - suites[i].fn(1, argv, NULL); + for_each_bench(coll, bench) { + if (!bench->fn) + break; + printf("# Running %s/%s benchmark...\n", coll->name, bench->name); + fflush(stdout); + + argv[1] = bench->name; + run_bench(coll->name, bench->name, bench->fn, 1, argv, NULL); printf("\n"); } } -static void all_subsystem(void) +static void run_all_collections(void) { - int i; - for (i = 0; subsystems[i].suites; i++) - all_suite(&subsystems[i]); + struct collection *coll; + + for_each_collection(coll) + run_collection(coll); } int cmd_bench(int argc, const char **argv, const char *prefix __maybe_unused) { - int i, j, status = 0; + struct collection *coll; + int ret = 0; if (argc < 2) { - /* No subsystem specified. */ + /* No collection specified. */ print_usage(); goto end; } @@ -188,7 +222,7 @@ int cmd_bench(int argc, const char **argv, const char *prefix __maybe_unused) bench_format = bench_str2int(bench_format_str); if (bench_format == BENCH_FORMAT_UNKNOWN) { - printf("Unknown format descriptor:%s\n", bench_format_str); + printf("Unknown format descriptor: '%s'\n", bench_format_str); goto end; } @@ -198,51 +232,51 @@ int cmd_bench(int argc, const char **argv, const char *prefix __maybe_unused) } if (!strcmp(argv[0], "all")) { - all_subsystem(); + run_all_collections(); goto end; } - for (i = 0; subsystems[i].name; i++) { - if (strcmp(subsystems[i].name, argv[0])) + for_each_collection(coll) { + struct bench *bench; + + if (strcmp(coll->name, argv[0])) continue; if (argc < 2) { - /* No suite specified. */ - dump_suites(i); + /* No bench specified. */ + dump_benchmarks(coll); goto end; } if (!strcmp(argv[1], "all")) { - all_suite(&subsystems[i]); + run_collection(coll); goto end; } - for (j = 0; subsystems[i].suites[j].name; j++) { - if (strcmp(subsystems[i].suites[j].name, argv[1])) + for_each_bench(coll, bench) { + if (strcmp(bench->name, argv[1])) continue; if (bench_format == BENCH_FORMAT_DEFAULT) - printf("# Running %s/%s benchmark...\n", - subsystems[i].name, - subsystems[i].suites[j].name); - status = subsystems[i].suites[j].fn(argc - 1, - argv + 1, prefix); + printf("# Running '%s/%s' benchmark:\n", coll->name, bench->name); + fflush(stdout); + ret = run_bench(coll->name, bench->name, bench->fn, argc-1, argv+1, prefix); goto end; } if (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")) { - dump_suites(i); + dump_benchmarks(coll); goto end; } - printf("Unknown suite:%s for %s\n", argv[1], argv[0]); - status = 1; + printf("Unknown benchmark: '%s' for collection '%s'\n", argv[1], argv[0]); + ret = 1; goto end; } - printf("Unknown subsystem:%s\n", argv[0]); - status = 1; + printf("Unknown collection: '%s'\n", argv[0]); + ret = 1; end: - return status; + return ret; } diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c index d37e077f4b1..b22dbb16f87 100644 --- a/tools/perf/builtin-buildid-cache.c +++ b/tools/perf/builtin-buildid-cache.c @@ -6,6 +6,11 @@ * Copyright (C) 2010, Red Hat Inc. * Copyright (C) 2010, Arnaldo Carvalho de Melo <acme@redhat.com> */ +#include <sys/types.h> +#include <sys/time.h> +#include <time.h> +#include <dirent.h> +#include <unistd.h> #include "builtin.h" #include "perf.h" #include "util/cache.h" @@ -13,8 +18,169 @@ #include "util/header.h" #include "util/parse-options.h" #include "util/strlist.h" +#include "util/build-id.h" +#include "util/session.h" #include "util/symbol.h" +static int build_id_cache__kcore_buildid(const char *proc_dir, char *sbuildid) +{ + char root_dir[PATH_MAX]; + char notes[PATH_MAX]; + u8 build_id[BUILD_ID_SIZE]; + char *p; + + strlcpy(root_dir, proc_dir, sizeof(root_dir)); + + p = strrchr(root_dir, '/'); + if (!p) + return -1; + *p = '\0'; + + scnprintf(notes, sizeof(notes), "%s/sys/kernel/notes", root_dir); + + if (sysfs__read_build_id(notes, build_id, sizeof(build_id))) + return -1; + + build_id__sprintf(build_id, sizeof(build_id), sbuildid); + + return 0; +} + +static int build_id_cache__kcore_dir(char *dir, size_t sz) +{ + struct timeval tv; + struct tm tm; + char dt[32]; + + if (gettimeofday(&tv, NULL) || !localtime_r(&tv.tv_sec, &tm)) + return -1; + + if (!strftime(dt, sizeof(dt), "%Y%m%d%H%M%S", &tm)) + return -1; + + scnprintf(dir, sz, "%s%02u", dt, (unsigned)tv.tv_usec / 10000); + + return 0; +} + +static bool same_kallsyms_reloc(const char *from_dir, char *to_dir) +{ + char from[PATH_MAX]; + char to[PATH_MAX]; + const char *name; + u64 addr1 = 0, addr2 = 0; + int i; + + scnprintf(from, sizeof(from), "%s/kallsyms", from_dir); + scnprintf(to, sizeof(to), "%s/kallsyms", to_dir); + + for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { + addr1 = kallsyms__get_function_start(from, name); + if (addr1) + break; + } + + if (name) + addr2 = kallsyms__get_function_start(to, name); + + return addr1 == addr2; +} + +static int build_id_cache__kcore_existing(const char *from_dir, char *to_dir, + size_t to_dir_sz) +{ + char from[PATH_MAX]; + char to[PATH_MAX]; + char to_subdir[PATH_MAX]; + struct dirent *dent; + int ret = -1; + DIR *d; + + d = opendir(to_dir); + if (!d) + return -1; + + scnprintf(from, sizeof(from), "%s/modules", from_dir); + + while (1) { + dent = readdir(d); + if (!dent) + break; + if (dent->d_type != DT_DIR) + continue; + scnprintf(to, sizeof(to), "%s/%s/modules", to_dir, + dent->d_name); + scnprintf(to_subdir, sizeof(to_subdir), "%s/%s", + to_dir, dent->d_name); + if (!compare_proc_modules(from, to) && + same_kallsyms_reloc(from_dir, to_subdir)) { + strlcpy(to_dir, to_subdir, to_dir_sz); + ret = 0; + break; + } + } + + closedir(d); + + return ret; +} + +static int build_id_cache__add_kcore(const char *filename, const char *debugdir) +{ + char dir[32], sbuildid[BUILD_ID_SIZE * 2 + 1]; + char from_dir[PATH_MAX], to_dir[PATH_MAX]; + char *p; + + strlcpy(from_dir, filename, sizeof(from_dir)); + + p = strrchr(from_dir, '/'); + if (!p || strcmp(p + 1, "kcore")) + return -1; + *p = '\0'; + + if (build_id_cache__kcore_buildid(from_dir, sbuildid)) + return -1; + + scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s", + debugdir, sbuildid); + + if (!build_id_cache__kcore_existing(from_dir, to_dir, sizeof(to_dir))) { + pr_debug("same kcore found in %s\n", to_dir); + return 0; + } + + if (build_id_cache__kcore_dir(dir, sizeof(dir))) + return -1; + + scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s/%s", + debugdir, sbuildid, dir); + + if (mkdir_p(to_dir, 0755)) + return -1; + + if (kcore_copy(from_dir, to_dir)) { + /* Remove YYYYmmddHHMMSShh directory */ + if (!rmdir(to_dir)) { + p = strrchr(to_dir, '/'); + if (p) + *p = '\0'; + /* Try to remove buildid directory */ + if (!rmdir(to_dir)) { + p = strrchr(to_dir, '/'); + if (p) + *p = '\0'; + /* Try to remove [kernel.kcore] directory */ + rmdir(to_dir); + } + } + return -1; + } + + pr_debug("kcore added to build-id cache directory %s\n", to_dir); + + return 0; +} + static int build_id_cache__add_file(const char *filename, const char *debugdir) { char sbuild_id[BUILD_ID_SIZE * 2 + 1]; @@ -57,19 +223,96 @@ static int build_id_cache__remove_file(const char *filename, return err; } +static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused) +{ + char filename[PATH_MAX]; + u8 build_id[BUILD_ID_SIZE]; + + if (dso__build_id_filename(dso, filename, sizeof(filename)) && + filename__read_build_id(filename, build_id, + sizeof(build_id)) != sizeof(build_id)) { + if (errno == ENOENT) + return false; + + pr_warning("Problems with %s file, consider removing it from the cache\n", + filename); + } else if (memcmp(dso->build_id, build_id, sizeof(dso->build_id))) { + pr_warning("Problems with %s file, consider removing it from the cache\n", + filename); + } + + return true; +} + +static int build_id_cache__fprintf_missing(const char *filename, bool force, FILE *fp) +{ + struct perf_data_file file = { + .path = filename, + .mode = PERF_DATA_MODE_READ, + .force = force, + }; + struct perf_session *session = perf_session__new(&file, false, NULL); + if (session == NULL) + return -1; + + perf_session__fprintf_dsos_buildid(session, fp, dso__missing_buildid_cache, 0); + perf_session__delete(session); + + return 0; +} + +static int build_id_cache__update_file(const char *filename, + const char *debugdir) +{ + u8 build_id[BUILD_ID_SIZE]; + char sbuild_id[BUILD_ID_SIZE * 2 + 1]; + + int err; + + if (filename__read_build_id(filename, &build_id, sizeof(build_id)) < 0) { + pr_debug("Couldn't read a build-id in %s\n", filename); + return -1; + } + + build_id__sprintf(build_id, sizeof(build_id), sbuild_id); + err = build_id_cache__remove_s(sbuild_id, debugdir); + if (!err) { + err = build_id_cache__add_s(sbuild_id, debugdir, filename, + false, false); + } + if (verbose) + pr_info("Updating %s %s: %s\n", sbuild_id, filename, + err ? "FAIL" : "Ok"); + + return err; +} + int cmd_buildid_cache(int argc, const char **argv, const char *prefix __maybe_unused) { struct strlist *list; struct str_node *pos; + int ret = 0; + bool force = false; char debugdir[PATH_MAX]; char const *add_name_list_str = NULL, - *remove_name_list_str = NULL; + *remove_name_list_str = NULL, + *missing_filename = NULL, + *update_name_list_str = NULL, + *kcore_filename; + const struct option buildid_cache_options[] = { OPT_STRING('a', "add", &add_name_list_str, "file list", "file(s) to add"), + OPT_STRING('k', "kcore", &kcore_filename, + "file", "kcore file to add"), OPT_STRING('r', "remove", &remove_name_list_str, "file list", "file(s) to remove"), + OPT_STRING('M', "missing", &missing_filename, "file", + "to find missing build ids in the cache"), + OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), + OPT_STRING('u', "update", &update_name_list_str, "file list", + "file(s) to update"), OPT_INCR('v', "verbose", &verbose, "be more verbose"), OPT_END() }; @@ -124,5 +367,30 @@ int cmd_buildid_cache(int argc, const char **argv, } } - return 0; + if (missing_filename) + ret = build_id_cache__fprintf_missing(missing_filename, force, stdout); + + if (update_name_list_str) { + list = strlist__new(true, update_name_list_str); + if (list) { + strlist__for_each(pos, list) + if (build_id_cache__update_file(pos->s, debugdir)) { + if (errno == ENOENT) { + pr_debug("%s wasn't in the cache\n", + pos->s); + continue; + } + pr_warning("Couldn't update %s: %s\n", + pos->s, strerror(errno)); + } + + strlist__delete(list); + } + } + + if (kcore_filename && + build_id_cache__add_kcore(kcore_filename, debugdir)) + pr_warning("Couldn't add %s\n", kcore_filename); + + return ret; } diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c index a0e94fffa03..ed3873b3e23 100644 --- a/tools/perf/builtin-buildid-list.c +++ b/tools/perf/builtin-buildid-list.c @@ -15,6 +15,7 @@ #include "util/parse-options.h" #include "util/session.h" #include "util/symbol.h" +#include "util/data.h" static int sysfs__fprintf_build_id(FILE *fp) { @@ -44,34 +45,40 @@ static int filename__fprintf_build_id(const char *name, FILE *fp) return fprintf(fp, "%s\n", sbuild_id); } -static int perf_session__list_build_ids(const char *input_name, - bool force, bool with_hits) +static bool dso__skip_buildid(struct dso *dso, int with_hits) +{ + return with_hits && !dso->hit; +} + +static int perf_session__list_build_ids(bool force, bool with_hits) { struct perf_session *session; + struct perf_data_file file = { + .path = input_name, + .mode = PERF_DATA_MODE_READ, + .force = force, + }; symbol__elf_init(); - - session = perf_session__new(input_name, O_RDONLY, force, false, - &build_id__mark_dso_hit_ops); - if (session == NULL) - return -1; - /* * See if this is an ELF file first: */ - if (filename__fprintf_build_id(session->filename, stdout)) + if (filename__fprintf_build_id(input_name, stdout)) goto out; + session = perf_session__new(&file, false, &build_id__mark_dso_hit_ops); + if (session == NULL) + return -1; /* * in pipe-mode, the only way to get the buildids is to parse * the record stream. Buildids are stored as RECORD_HEADER_BUILD_ID */ - if (with_hits || session->fd_pipe) + if (with_hits || perf_data_file__is_pipe(&file)) perf_session__process_events(session, &build_id__mark_dso_hit_ops); - perf_session__fprintf_dsos_buildid(session, stdout, with_hits); -out: + perf_session__fprintf_dsos_buildid(session, stdout, dso__skip_buildid, with_hits); perf_session__delete(session); +out: return 0; } @@ -81,7 +88,6 @@ int cmd_buildid_list(int argc, const char **argv, bool show_kernel = false; bool with_hits = false; bool force = false; - const char *input_name = NULL; const struct option options[] = { OPT_BOOLEAN('H', "with-hits", &with_hits, "Show only DSOs with hits"), OPT_STRING('i', "input", &input_name, "file", "input file name"), @@ -101,5 +107,5 @@ int cmd_buildid_list(int argc, const char **argv, if (show_kernel) return sysfs__fprintf_build_id(stdout); - return perf_session__list_build_ids(input_name, force, with_hits); + return perf_session__list_build_ids(force, with_hits); } diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index a0b531c14b9..9a5a035cb42 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -16,19 +16,306 @@ #include "util/sort.h" #include "util/symbol.h" #include "util/util.h" +#include "util/data.h" #include <stdlib.h> +#include <math.h> -static char const *input_old = "perf.data.old", - *input_new = "perf.data"; -static char diff__default_sort_order[] = "dso,symbol"; -static bool force; -static bool show_displacement; +/* Diff command specific HPP columns. */ +enum { + PERF_HPP_DIFF__BASELINE, + PERF_HPP_DIFF__PERIOD, + PERF_HPP_DIFF__PERIOD_BASELINE, + PERF_HPP_DIFF__DELTA, + PERF_HPP_DIFF__RATIO, + PERF_HPP_DIFF__WEIGHTED_DIFF, + PERF_HPP_DIFF__FORMULA, -static int hists__add_entry(struct hists *self, - struct addr_location *al, u64 period) + PERF_HPP_DIFF__MAX_INDEX +}; + +struct diff_hpp_fmt { + struct perf_hpp_fmt fmt; + int idx; + char *header; + int header_width; +}; + +struct data__file { + struct perf_session *session; + struct perf_data_file file; + int idx; + struct hists *hists; + struct diff_hpp_fmt fmt[PERF_HPP_DIFF__MAX_INDEX]; +}; + +static struct data__file *data__files; +static int data__files_cnt; + +#define data__for_each_file_start(i, d, s) \ + for (i = s, d = &data__files[s]; \ + i < data__files_cnt; \ + i++, d = &data__files[i]) + +#define data__for_each_file(i, d) data__for_each_file_start(i, d, 0) +#define data__for_each_file_new(i, d) data__for_each_file_start(i, d, 1) + +static bool force; +static bool show_period; +static bool show_formula; +static bool show_baseline_only; +static unsigned int sort_compute; + +static s64 compute_wdiff_w1; +static s64 compute_wdiff_w2; + +enum { + COMPUTE_DELTA, + COMPUTE_RATIO, + COMPUTE_WEIGHTED_DIFF, + COMPUTE_MAX, +}; + +const char *compute_names[COMPUTE_MAX] = { + [COMPUTE_DELTA] = "delta", + [COMPUTE_RATIO] = "ratio", + [COMPUTE_WEIGHTED_DIFF] = "wdiff", +}; + +static int compute; + +static int compute_2_hpp[COMPUTE_MAX] = { + [COMPUTE_DELTA] = PERF_HPP_DIFF__DELTA, + [COMPUTE_RATIO] = PERF_HPP_DIFF__RATIO, + [COMPUTE_WEIGHTED_DIFF] = PERF_HPP_DIFF__WEIGHTED_DIFF, +}; + +#define MAX_COL_WIDTH 70 + +static struct header_column { + const char *name; + int width; +} columns[PERF_HPP_DIFF__MAX_INDEX] = { + [PERF_HPP_DIFF__BASELINE] = { + .name = "Baseline", + }, + [PERF_HPP_DIFF__PERIOD] = { + .name = "Period", + .width = 14, + }, + [PERF_HPP_DIFF__PERIOD_BASELINE] = { + .name = "Base period", + .width = 14, + }, + [PERF_HPP_DIFF__DELTA] = { + .name = "Delta", + .width = 7, + }, + [PERF_HPP_DIFF__RATIO] = { + .name = "Ratio", + .width = 14, + }, + [PERF_HPP_DIFF__WEIGHTED_DIFF] = { + .name = "Weighted diff", + .width = 14, + }, + [PERF_HPP_DIFF__FORMULA] = { + .name = "Formula", + .width = MAX_COL_WIDTH, + } +}; + +static int setup_compute_opt_wdiff(char *opt) +{ + char *w1_str = opt; + char *w2_str; + + int ret = -EINVAL; + + if (!opt) + goto out; + + w2_str = strchr(opt, ','); + if (!w2_str) + goto out; + + *w2_str++ = 0x0; + if (!*w2_str) + goto out; + + compute_wdiff_w1 = strtol(w1_str, NULL, 10); + compute_wdiff_w2 = strtol(w2_str, NULL, 10); + + if (!compute_wdiff_w1 || !compute_wdiff_w2) + goto out; + + pr_debug("compute wdiff w1(%" PRId64 ") w2(%" PRId64 ")\n", + compute_wdiff_w1, compute_wdiff_w2); + + ret = 0; + + out: + if (ret) + pr_err("Failed: wrong weight data, use 'wdiff:w1,w2'\n"); + + return ret; +} + +static int setup_compute_opt(char *opt) +{ + if (compute == COMPUTE_WEIGHTED_DIFF) + return setup_compute_opt_wdiff(opt); + + if (opt) { + pr_err("Failed: extra option specified '%s'", opt); + return -EINVAL; + } + + return 0; +} + +static int setup_compute(const struct option *opt, const char *str, + int unset __maybe_unused) +{ + int *cp = (int *) opt->value; + char *cstr = (char *) str; + char buf[50]; + unsigned i; + char *option; + + if (!str) { + *cp = COMPUTE_DELTA; + return 0; + } + + option = strchr(str, ':'); + if (option) { + unsigned len = option++ - str; + + /* + * The str data are not writeable, so we need + * to use another buffer. + */ + + /* No option value is longer. */ + if (len >= sizeof(buf)) + return -EINVAL; + + strncpy(buf, str, len); + buf[len] = 0x0; + cstr = buf; + } + + for (i = 0; i < COMPUTE_MAX; i++) + if (!strcmp(cstr, compute_names[i])) { + *cp = i; + return setup_compute_opt(option); + } + + pr_err("Failed: '%s' is not computation method " + "(use 'delta','ratio' or 'wdiff')\n", str); + return -EINVAL; +} + +static double period_percent(struct hist_entry *he, u64 period) +{ + u64 total = hists__total_period(he->hists); + + return (period * 100.0) / total; +} + +static double compute_delta(struct hist_entry *he, struct hist_entry *pair) +{ + double old_percent = period_percent(he, he->stat.period); + double new_percent = period_percent(pair, pair->stat.period); + + pair->diff.period_ratio_delta = new_percent - old_percent; + pair->diff.computed = true; + return pair->diff.period_ratio_delta; +} + +static double compute_ratio(struct hist_entry *he, struct hist_entry *pair) +{ + double old_period = he->stat.period ?: 1; + double new_period = pair->stat.period; + + pair->diff.computed = true; + pair->diff.period_ratio = new_period / old_period; + return pair->diff.period_ratio; +} + +static s64 compute_wdiff(struct hist_entry *he, struct hist_entry *pair) +{ + u64 old_period = he->stat.period; + u64 new_period = pair->stat.period; + + pair->diff.computed = true; + pair->diff.wdiff = new_period * compute_wdiff_w2 - + old_period * compute_wdiff_w1; + + return pair->diff.wdiff; +} + +static int formula_delta(struct hist_entry *he, struct hist_entry *pair, + char *buf, size_t size) +{ + u64 he_total = he->hists->stats.total_period; + u64 pair_total = pair->hists->stats.total_period; + + if (symbol_conf.filter_relative) { + he_total = he->hists->stats.total_non_filtered_period; + pair_total = pair->hists->stats.total_non_filtered_period; + } + return scnprintf(buf, size, + "(%" PRIu64 " * 100 / %" PRIu64 ") - " + "(%" PRIu64 " * 100 / %" PRIu64 ")", + pair->stat.period, pair_total, + he->stat.period, he_total); +} + +static int formula_ratio(struct hist_entry *he, struct hist_entry *pair, + char *buf, size_t size) +{ + double old_period = he->stat.period; + double new_period = pair->stat.period; + + return scnprintf(buf, size, "%.0F / %.0F", new_period, old_period); +} + +static int formula_wdiff(struct hist_entry *he, struct hist_entry *pair, + char *buf, size_t size) +{ + u64 old_period = he->stat.period; + u64 new_period = pair->stat.period; + + return scnprintf(buf, size, + "(%" PRIu64 " * " "%" PRId64 ") - (%" PRIu64 " * " "%" PRId64 ")", + new_period, compute_wdiff_w2, old_period, compute_wdiff_w1); +} + +static int formula_fprintf(struct hist_entry *he, struct hist_entry *pair, + char *buf, size_t size) +{ + switch (compute) { + case COMPUTE_DELTA: + return formula_delta(he, pair, buf, size); + case COMPUTE_RATIO: + return formula_ratio(he, pair, buf, size); + case COMPUTE_WEIGHTED_DIFF: + return formula_wdiff(he, pair, buf, size); + default: + BUG_ON(1); + } + + return -1; +} + +static int hists__add_entry(struct hists *hists, + struct addr_location *al, u64 period, + u64 weight, u64 transaction) { - if (__hists__add_entry(self, al, NULL, period) != NULL) + if (__hists__add_entry(hists, al, NULL, NULL, NULL, period, weight, + transaction, true) != NULL) return 0; return -ENOMEM; } @@ -41,21 +328,28 @@ static int diff__process_sample_event(struct perf_tool *tool __maybe_unused, { struct addr_location al; - if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) { + if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) { pr_warning("problem processing %d event, skipping it.\n", event->header.type); return -1; } - if (al.filtered || al.sym == NULL) - return 0; - - if (hists__add_entry(&evsel->hists, &al, sample->period)) { + if (hists__add_entry(&evsel->hists, &al, sample->period, + sample->weight, sample->transaction)) { pr_warning("problem incrementing symbol period, skipping event\n"); return -1; } + /* + * The total_period is updated here before going to the output + * tree since normally only the baseline hists will call + * hists__output_resort() and precompute needs the total + * period in order to sort entries by percentage delta. + */ evsel->hists.stats.total_period += sample->period; + if (!al.filtered) + evsel->hists.stats.total_non_filtered_period += sample->period; + return 0; } @@ -63,15 +357,195 @@ static struct perf_tool tool = { .sample = diff__process_sample_event, .mmap = perf_event__process_mmap, .comm = perf_event__process_comm, - .exit = perf_event__process_task, - .fork = perf_event__process_task, + .exit = perf_event__process_exit, + .fork = perf_event__process_fork, .lost = perf_event__process_lost, .ordered_samples = true, .ordering_requires_timestamps = true, }; -static void insert_hist_entry_by_name(struct rb_root *root, - struct hist_entry *he) +static struct perf_evsel *evsel_match(struct perf_evsel *evsel, + struct perf_evlist *evlist) +{ + struct perf_evsel *e; + + evlist__for_each(evlist, e) { + if (perf_evsel__match2(evsel, e)) + return e; + } + + return NULL; +} + +static void perf_evlist__collapse_resort(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) { + struct hists *hists = &evsel->hists; + + hists__collapse_resort(hists, NULL); + } +} + +static struct hist_entry* +get_pair_data(struct hist_entry *he, struct data__file *d) +{ + if (hist_entry__has_pairs(he)) { + struct hist_entry *pair; + + list_for_each_entry(pair, &he->pairs.head, pairs.node) + if (pair->hists == d->hists) + return pair; + } + + return NULL; +} + +static struct hist_entry* +get_pair_fmt(struct hist_entry *he, struct diff_hpp_fmt *dfmt) +{ + void *ptr = dfmt - dfmt->idx; + struct data__file *d = container_of(ptr, struct data__file, fmt); + + return get_pair_data(he, d); +} + +static void hists__baseline_only(struct hists *hists) +{ + struct rb_root *root; + struct rb_node *next; + + if (sort__need_collapse) + root = &hists->entries_collapsed; + else + root = hists->entries_in; + + next = rb_first(root); + while (next != NULL) { + struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node_in); + + next = rb_next(&he->rb_node_in); + if (!hist_entry__next_pair(he)) { + rb_erase(&he->rb_node_in, root); + hist_entry__free(he); + } + } +} + +static void hists__precompute(struct hists *hists) +{ + struct rb_root *root; + struct rb_node *next; + + if (sort__need_collapse) + root = &hists->entries_collapsed; + else + root = hists->entries_in; + + next = rb_first(root); + while (next != NULL) { + struct hist_entry *he, *pair; + + he = rb_entry(next, struct hist_entry, rb_node_in); + next = rb_next(&he->rb_node_in); + + pair = get_pair_data(he, &data__files[sort_compute]); + if (!pair) + continue; + + switch (compute) { + case COMPUTE_DELTA: + compute_delta(he, pair); + break; + case COMPUTE_RATIO: + compute_ratio(he, pair); + break; + case COMPUTE_WEIGHTED_DIFF: + compute_wdiff(he, pair); + break; + default: + BUG_ON(1); + } + } +} + +static int64_t cmp_doubles(double l, double r) +{ + if (l > r) + return -1; + else if (l < r) + return 1; + else + return 0; +} + +static int64_t +__hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right, + int c) +{ + switch (c) { + case COMPUTE_DELTA: + { + double l = left->diff.period_ratio_delta; + double r = right->diff.period_ratio_delta; + + return cmp_doubles(l, r); + } + case COMPUTE_RATIO: + { + double l = left->diff.period_ratio; + double r = right->diff.period_ratio; + + return cmp_doubles(l, r); + } + case COMPUTE_WEIGHTED_DIFF: + { + s64 l = left->diff.wdiff; + s64 r = right->diff.wdiff; + + return r - l; + } + default: + BUG_ON(1); + } + + return 0; +} + +static int64_t +hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right, + int c) +{ + bool pairs_left = hist_entry__has_pairs(left); + bool pairs_right = hist_entry__has_pairs(right); + struct hist_entry *p_right, *p_left; + + if (!pairs_left && !pairs_right) + return 0; + + if (!pairs_left || !pairs_right) + return pairs_left ? -1 : 1; + + p_left = get_pair_data(left, &data__files[sort_compute]); + p_right = get_pair_data(right, &data__files[sort_compute]); + + if (!p_left && !p_right) + return 0; + + if (!p_left || !p_right) + return p_left ? -1 : 1; + + /* + * We have 2 entries of same kind, let's + * make the data comparison. + */ + return __hist_entry__cmp_compute(p_left, p_right, c); +} + +static void insert_hist_entry_by_compute(struct rb_root *root, + struct hist_entry *he, + int c) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; @@ -80,7 +554,7 @@ static void insert_hist_entry_by_name(struct rb_root *root, while (*p != NULL) { parent = *p; iter = rb_entry(parent, struct hist_entry, rb_node); - if (hist_entry__cmp(he, iter) < 0) + if (hist_entry__cmp_compute(he, iter, c) < 0) p = &(*p)->rb_left; else p = &(*p)->rb_right; @@ -90,139 +564,150 @@ static void insert_hist_entry_by_name(struct rb_root *root, rb_insert_color(&he->rb_node, root); } -static void hists__name_resort(struct hists *self, bool sort) +static void hists__compute_resort(struct hists *hists) { - unsigned long position = 1; - struct rb_root tmp = RB_ROOT; - struct rb_node *next = rb_first(&self->entries); + struct rb_root *root; + struct rb_node *next; - while (next != NULL) { - struct hist_entry *n = rb_entry(next, struct hist_entry, rb_node); + if (sort__need_collapse) + root = &hists->entries_collapsed; + else + root = hists->entries_in; - next = rb_next(&n->rb_node); - n->position = position++; + hists->entries = RB_ROOT; + next = rb_first(root); - if (sort) { - rb_erase(&n->rb_node, &self->entries); - insert_hist_entry_by_name(&tmp, n); - } - } + hists__reset_stats(hists); + hists__reset_col_len(hists); - if (sort) - self->entries = tmp; -} + while (next != NULL) { + struct hist_entry *he; -static struct hist_entry *hists__find_entry(struct hists *self, - struct hist_entry *he) -{ - struct rb_node *n = self->entries.rb_node; + he = rb_entry(next, struct hist_entry, rb_node_in); + next = rb_next(&he->rb_node_in); - while (n) { - struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node); - int64_t cmp = hist_entry__cmp(he, iter); + insert_hist_entry_by_compute(&hists->entries, he, compute); + hists__inc_stats(hists, he); - if (cmp < 0) - n = n->rb_left; - else if (cmp > 0) - n = n->rb_right; - else - return iter; + if (!he->filtered) + hists__calc_col_len(hists, he); } - - return NULL; } -static void hists__match(struct hists *older, struct hists *newer) +static void hists__process(struct hists *hists) { - struct rb_node *nd; + if (show_baseline_only) + hists__baseline_only(hists); - for (nd = rb_first(&newer->entries); nd; nd = rb_next(nd)) { - struct hist_entry *pos = rb_entry(nd, struct hist_entry, rb_node); - pos->pair = hists__find_entry(older, pos); + if (sort_compute) { + hists__precompute(hists); + hists__compute_resort(hists); + } else { + hists__output_resort(hists); } + + hists__fprintf(hists, true, 0, 0, 0, stdout); } -static struct perf_evsel *evsel_match(struct perf_evsel *evsel, - struct perf_evlist *evlist) +static void data__fprintf(void) { - struct perf_evsel *e; + struct data__file *d; + int i; - list_for_each_entry(e, &evlist->entries, node) - if (perf_evsel__match2(evsel, e)) - return e; + fprintf(stdout, "# Data files:\n"); - return NULL; + data__for_each_file(i, d) + fprintf(stdout, "# [%d] %s %s\n", + d->idx, d->file.path, + !d->idx ? "(Baseline)" : ""); + + fprintf(stdout, "#\n"); } -static void perf_evlist__resort_hists(struct perf_evlist *evlist, bool name) +static void data_process(void) { - struct perf_evsel *evsel; + struct perf_evlist *evlist_base = data__files[0].session->evlist; + struct perf_evsel *evsel_base; + bool first = true; - list_for_each_entry(evsel, &evlist->entries, node) { - struct hists *hists = &evsel->hists; + evlist__for_each(evlist_base, evsel_base) { + struct data__file *d; + int i; - hists__output_resort(hists); + data__for_each_file_new(i, d) { + struct perf_evlist *evlist = d->session->evlist; + struct perf_evsel *evsel; - /* - * The hists__name_resort only sets possition - * if name is false. - */ - if (name || ((!name) && show_displacement)) - hists__name_resort(hists, name); + evsel = evsel_match(evsel_base, evlist); + if (!evsel) + continue; + + d->hists = &evsel->hists; + + hists__match(&evsel_base->hists, &evsel->hists); + + if (!show_baseline_only) + hists__link(&evsel_base->hists, + &evsel->hists); + } + + fprintf(stdout, "%s# Event '%s'\n#\n", first ? "" : "\n", + perf_evsel__name(evsel_base)); + + first = false; + + if (verbose || data__files_cnt > 2) + data__fprintf(); + + hists__process(&evsel_base->hists); } } -static int __cmd_diff(void) +static void data__free(struct data__file *d) { - int ret, i; -#define older (session[0]) -#define newer (session[1]) - struct perf_session *session[2]; - struct perf_evlist *evlist_new, *evlist_old; - struct perf_evsel *evsel; - bool first = true; + int col; - older = perf_session__new(input_old, O_RDONLY, force, false, - &tool); - newer = perf_session__new(input_new, O_RDONLY, force, false, - &tool); - if (session[0] == NULL || session[1] == NULL) - return -ENOMEM; + for (col = 0; col < PERF_HPP_DIFF__MAX_INDEX; col++) { + struct diff_hpp_fmt *fmt = &d->fmt[col]; - for (i = 0; i < 2; ++i) { - ret = perf_session__process_events(session[i], &tool); - if (ret) - goto out_delete; + zfree(&fmt->header); } +} - evlist_old = older->evlist; - evlist_new = newer->evlist; +static int __cmd_diff(void) +{ + struct data__file *d; + int ret = -EINVAL, i; - perf_evlist__resort_hists(evlist_old, true); - perf_evlist__resort_hists(evlist_new, false); + data__for_each_file(i, d) { + d->session = perf_session__new(&d->file, false, &tool); + if (!d->session) { + pr_err("Failed to open %s\n", d->file.path); + ret = -ENOMEM; + goto out_delete; + } - list_for_each_entry(evsel, &evlist_new->entries, node) { - struct perf_evsel *evsel_old; + ret = perf_session__process_events(d->session, &tool); + if (ret) { + pr_err("Failed to process %s\n", d->file.path); + goto out_delete; + } - evsel_old = evsel_match(evsel, evlist_old); - if (!evsel_old) - continue; + perf_evlist__collapse_resort(d->session->evlist); + } - fprintf(stdout, "%s# Event '%s'\n#\n", first ? "" : "\n", - perf_evsel__name(evsel)); + data_process(); - first = false; + out_delete: + data__for_each_file(i, d) { + if (d->session) + perf_session__delete(d->session); - hists__match(&evsel_old->hists, &evsel->hists); - hists__fprintf(&evsel->hists, true, 0, 0, stdout); + data__free(d); } -out_delete: - for (i = 0; i < 2; ++i) - perf_session__delete(session[i]); + free(data__files); return ret; -#undef older -#undef newer } static const char * const diff_usage[] = { @@ -233,8 +718,16 @@ static const char * const diff_usage[] = { static const struct option options[] = { OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), - OPT_BOOLEAN('M', "displacement", &show_displacement, - "Show position displacement relative to baseline"), + OPT_BOOLEAN('b', "baseline-only", &show_baseline_only, + "Show only items with match in baseline"), + OPT_CALLBACK('c', "compute", &compute, + "delta,ratio,wdiff:w1,w2 (default delta)", + "Entries differential computation selection", + setup_compute), + OPT_BOOLEAN('p', "period", &show_period, + "Show period values."), + OPT_BOOLEAN('F', "formula", &show_formula, + "Show formula."), OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), @@ -247,60 +740,425 @@ static const struct option options[] = { OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", "only consider these symbols"), OPT_STRING('s', "sort", &sort_order, "key[,key2...]", - "sort by key(s): pid, comm, dso, symbol, parent"), + "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..." + " Please refer the man page for the complete list."), OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator", "separator for columns, no spaces will be added between " "columns '.' is reserved."), OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", "Look for files with symbols relative to this directory"), + OPT_UINTEGER('o', "order", &sort_compute, "Specify compute sorting."), + OPT_CALLBACK(0, "percentage", NULL, "relative|absolute", + "How to display percentage of filtered entries", parse_filter_percentage), OPT_END() }; +static double baseline_percent(struct hist_entry *he) +{ + u64 total = hists__total_period(he->hists); + + return 100.0 * he->stat.period / total; +} + +static int hpp__color_baseline(struct perf_hpp_fmt *fmt, + struct perf_hpp *hpp, struct hist_entry *he) +{ + struct diff_hpp_fmt *dfmt = + container_of(fmt, struct diff_hpp_fmt, fmt); + double percent = baseline_percent(he); + char pfmt[20] = " "; + + if (!he->dummy) { + scnprintf(pfmt, 20, "%%%d.2f%%%%", dfmt->header_width - 1); + return percent_color_snprintf(hpp->buf, hpp->size, + pfmt, percent); + } else + return scnprintf(hpp->buf, hpp->size, "%*s", + dfmt->header_width, pfmt); +} + +static int hpp__entry_baseline(struct hist_entry *he, char *buf, size_t size) +{ + double percent = baseline_percent(he); + const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%"; + int ret = 0; + + if (!he->dummy) + ret = scnprintf(buf, size, fmt, percent); + + return ret; +} + +static int __hpp__color_compare(struct perf_hpp_fmt *fmt, + struct perf_hpp *hpp, struct hist_entry *he, + int comparison_method) +{ + struct diff_hpp_fmt *dfmt = + container_of(fmt, struct diff_hpp_fmt, fmt); + struct hist_entry *pair = get_pair_fmt(he, dfmt); + double diff; + s64 wdiff; + char pfmt[20] = " "; + + if (!pair) + goto dummy_print; + + switch (comparison_method) { + case COMPUTE_DELTA: + if (pair->diff.computed) + diff = pair->diff.period_ratio_delta; + else + diff = compute_delta(he, pair); + + if (fabs(diff) < 0.01) + goto dummy_print; + scnprintf(pfmt, 20, "%%%+d.2f%%%%", dfmt->header_width - 1); + return percent_color_snprintf(hpp->buf, hpp->size, + pfmt, diff); + case COMPUTE_RATIO: + if (he->dummy) + goto dummy_print; + if (pair->diff.computed) + diff = pair->diff.period_ratio; + else + diff = compute_ratio(he, pair); + + scnprintf(pfmt, 20, "%%%d.6f", dfmt->header_width); + return value_color_snprintf(hpp->buf, hpp->size, + pfmt, diff); + case COMPUTE_WEIGHTED_DIFF: + if (he->dummy) + goto dummy_print; + if (pair->diff.computed) + wdiff = pair->diff.wdiff; + else + wdiff = compute_wdiff(he, pair); + + scnprintf(pfmt, 20, "%%14ld", dfmt->header_width); + return color_snprintf(hpp->buf, hpp->size, + get_percent_color(wdiff), + pfmt, wdiff); + default: + BUG_ON(1); + } +dummy_print: + return scnprintf(hpp->buf, hpp->size, "%*s", + dfmt->header_width, pfmt); +} + +static int hpp__color_delta(struct perf_hpp_fmt *fmt, + struct perf_hpp *hpp, struct hist_entry *he) +{ + return __hpp__color_compare(fmt, hpp, he, COMPUTE_DELTA); +} + +static int hpp__color_ratio(struct perf_hpp_fmt *fmt, + struct perf_hpp *hpp, struct hist_entry *he) +{ + return __hpp__color_compare(fmt, hpp, he, COMPUTE_RATIO); +} + +static int hpp__color_wdiff(struct perf_hpp_fmt *fmt, + struct perf_hpp *hpp, struct hist_entry *he) +{ + return __hpp__color_compare(fmt, hpp, he, COMPUTE_WEIGHTED_DIFF); +} + +static void +hpp__entry_unpair(struct hist_entry *he, int idx, char *buf, size_t size) +{ + switch (idx) { + case PERF_HPP_DIFF__PERIOD_BASELINE: + scnprintf(buf, size, "%" PRIu64, he->stat.period); + break; + + default: + break; + } +} + +static void +hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair, + int idx, char *buf, size_t size) +{ + double diff; + double ratio; + s64 wdiff; + + switch (idx) { + case PERF_HPP_DIFF__DELTA: + if (pair->diff.computed) + diff = pair->diff.period_ratio_delta; + else + diff = compute_delta(he, pair); + + if (fabs(diff) >= 0.01) + scnprintf(buf, size, "%+4.2F%%", diff); + break; + + case PERF_HPP_DIFF__RATIO: + /* No point for ratio number if we are dummy.. */ + if (he->dummy) + break; + + if (pair->diff.computed) + ratio = pair->diff.period_ratio; + else + ratio = compute_ratio(he, pair); + + if (ratio > 0.0) + scnprintf(buf, size, "%14.6F", ratio); + break; + + case PERF_HPP_DIFF__WEIGHTED_DIFF: + /* No point for wdiff number if we are dummy.. */ + if (he->dummy) + break; + + if (pair->diff.computed) + wdiff = pair->diff.wdiff; + else + wdiff = compute_wdiff(he, pair); + + if (wdiff != 0) + scnprintf(buf, size, "%14ld", wdiff); + break; + + case PERF_HPP_DIFF__FORMULA: + formula_fprintf(he, pair, buf, size); + break; + + case PERF_HPP_DIFF__PERIOD: + scnprintf(buf, size, "%" PRIu64, pair->stat.period); + break; + + default: + BUG_ON(1); + }; +} + +static void +__hpp__entry_global(struct hist_entry *he, struct diff_hpp_fmt *dfmt, + char *buf, size_t size) +{ + struct hist_entry *pair = get_pair_fmt(he, dfmt); + int idx = dfmt->idx; + + /* baseline is special */ + if (idx == PERF_HPP_DIFF__BASELINE) + hpp__entry_baseline(he, buf, size); + else { + if (pair) + hpp__entry_pair(he, pair, idx, buf, size); + else + hpp__entry_unpair(he, idx, buf, size); + } +} + +static int hpp__entry_global(struct perf_hpp_fmt *_fmt, struct perf_hpp *hpp, + struct hist_entry *he) +{ + struct diff_hpp_fmt *dfmt = + container_of(_fmt, struct diff_hpp_fmt, fmt); + char buf[MAX_COL_WIDTH] = " "; + + __hpp__entry_global(he, dfmt, buf, MAX_COL_WIDTH); + + if (symbol_conf.field_sep) + return scnprintf(hpp->buf, hpp->size, "%s", buf); + else + return scnprintf(hpp->buf, hpp->size, "%*s", + dfmt->header_width, buf); +} + +static int hpp__header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct perf_evsel *evsel __maybe_unused) +{ + struct diff_hpp_fmt *dfmt = + container_of(fmt, struct diff_hpp_fmt, fmt); + + BUG_ON(!dfmt->header); + return scnprintf(hpp->buf, hpp->size, dfmt->header); +} + +static int hpp__width(struct perf_hpp_fmt *fmt, + struct perf_hpp *hpp __maybe_unused, + struct perf_evsel *evsel __maybe_unused) +{ + struct diff_hpp_fmt *dfmt = + container_of(fmt, struct diff_hpp_fmt, fmt); + + BUG_ON(dfmt->header_width <= 0); + return dfmt->header_width; +} + +static void init_header(struct data__file *d, struct diff_hpp_fmt *dfmt) +{ +#define MAX_HEADER_NAME 100 + char buf_indent[MAX_HEADER_NAME]; + char buf[MAX_HEADER_NAME]; + const char *header = NULL; + int width = 0; + + BUG_ON(dfmt->idx >= PERF_HPP_DIFF__MAX_INDEX); + header = columns[dfmt->idx].name; + width = columns[dfmt->idx].width; + + /* Only our defined HPP fmts should appear here. */ + BUG_ON(!header); + + if (data__files_cnt > 2) + scnprintf(buf, MAX_HEADER_NAME, "%s/%d", header, d->idx); + +#define NAME (data__files_cnt > 2 ? buf : header) + dfmt->header_width = width; + width = (int) strlen(NAME); + if (dfmt->header_width < width) + dfmt->header_width = width; + + scnprintf(buf_indent, MAX_HEADER_NAME, "%*s", + dfmt->header_width, NAME); + + dfmt->header = strdup(buf_indent); +#undef MAX_HEADER_NAME +#undef NAME +} + +static void data__hpp_register(struct data__file *d, int idx) +{ + struct diff_hpp_fmt *dfmt = &d->fmt[idx]; + struct perf_hpp_fmt *fmt = &dfmt->fmt; + + dfmt->idx = idx; + + fmt->header = hpp__header; + fmt->width = hpp__width; + fmt->entry = hpp__entry_global; + + /* TODO more colors */ + switch (idx) { + case PERF_HPP_DIFF__BASELINE: + fmt->color = hpp__color_baseline; + break; + case PERF_HPP_DIFF__DELTA: + fmt->color = hpp__color_delta; + break; + case PERF_HPP_DIFF__RATIO: + fmt->color = hpp__color_ratio; + break; + case PERF_HPP_DIFF__WEIGHTED_DIFF: + fmt->color = hpp__color_wdiff; + break; + default: + break; + } + + init_header(d, dfmt); + perf_hpp__column_register(fmt); +} + static void ui_init(void) { - perf_hpp__init(); + struct data__file *d; + int i; + + data__for_each_file(i, d) { - /* No overhead column. */ - perf_hpp__column_enable(PERF_HPP__OVERHEAD, false); + /* + * Baseline or compute realted columns: + * + * PERF_HPP_DIFF__BASELINE + * PERF_HPP_DIFF__DELTA + * PERF_HPP_DIFF__RATIO + * PERF_HPP_DIFF__WEIGHTED_DIFF + */ + data__hpp_register(d, i ? compute_2_hpp[compute] : + PERF_HPP_DIFF__BASELINE); - /* Display baseline/delta/displacement columns. */ - perf_hpp__column_enable(PERF_HPP__BASELINE, true); - perf_hpp__column_enable(PERF_HPP__DELTA, true); + /* + * And the rest: + * + * PERF_HPP_DIFF__FORMULA + * PERF_HPP_DIFF__PERIOD + * PERF_HPP_DIFF__PERIOD_BASELINE + */ + if (show_formula && i) + data__hpp_register(d, PERF_HPP_DIFF__FORMULA); - if (show_displacement) - perf_hpp__column_enable(PERF_HPP__DISPL, true); + if (show_period) + data__hpp_register(d, i ? PERF_HPP_DIFF__PERIOD : + PERF_HPP_DIFF__PERIOD_BASELINE); + } +} + +static int data_init(int argc, const char **argv) +{ + struct data__file *d; + static const char *defaults[] = { + "perf.data.old", + "perf.data", + }; + bool use_default = true; + int i; + + data__files_cnt = 2; + + if (argc) { + if (argc == 1) + defaults[1] = argv[0]; + else { + data__files_cnt = argc; + use_default = false; + } + } else if (perf_guest) { + defaults[0] = "perf.data.host"; + defaults[1] = "perf.data.guest"; + } + + if (sort_compute >= (unsigned int) data__files_cnt) { + pr_err("Order option out of limit.\n"); + return -EINVAL; + } + + data__files = zalloc(sizeof(*data__files) * data__files_cnt); + if (!data__files) + return -ENOMEM; + + data__for_each_file(i, d) { + struct perf_data_file *file = &d->file; + + file->path = use_default ? defaults[i] : argv[i]; + file->mode = PERF_DATA_MODE_READ, + file->force = force, + + d->idx = i; + } + + return 0; } int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused) { - sort_order = diff__default_sort_order; + perf_config(perf_default_config, NULL); + argc = parse_options(argc, argv, options, diff_usage, 0); - if (argc) { - if (argc > 2) - usage_with_options(diff_usage, options); - if (argc == 2) { - input_old = argv[0]; - input_new = argv[1]; - } else - input_new = argv[0]; - } else if (symbol_conf.default_guest_vmlinux_name || - symbol_conf.default_guest_kallsyms) { - input_old = "perf.data.host"; - input_new = "perf.data.guest"; - } - - symbol_conf.exclude_other = false; + if (symbol__init() < 0) return -1; + if (data_init(argc, argv) < 0) + return -1; + ui_init(); - setup_sorting(diff_usage, options); + sort__mode = SORT_MODE__DIFF; + + if (setup_sorting() < 0) + usage_with_options(diff_usage, options); + setup_pager(); - sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", NULL); - sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", NULL); - sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", NULL); + sort__setup_elide(NULL); return __cmd_diff(); } diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c index 997afb82691..c99e0de7e54 100644 --- a/tools/perf/builtin-evlist.c +++ b/tools/perf/builtin-evlist.c @@ -14,95 +14,23 @@ #include "util/parse-events.h" #include "util/parse-options.h" #include "util/session.h" +#include "util/data.h" -struct perf_attr_details { - bool freq; - bool verbose; -}; - -static int comma_printf(bool *first, const char *fmt, ...) -{ - va_list args; - int ret = 0; - - if (!*first) { - ret += printf(","); - } else { - ret += printf(":"); - *first = false; - } - - va_start(args, fmt); - ret += vprintf(fmt, args); - va_end(args); - return ret; -} - -static int __if_print(bool *first, const char *field, u64 value) -{ - if (value == 0) - return 0; - - return comma_printf(first, " %s: %" PRIu64, field, value); -} - -#define if_print(field) __if_print(&first, #field, pos->attr.field) - -static int __cmd_evlist(const char *input_name, struct perf_attr_details *details) +static int __cmd_evlist(const char *file_name, struct perf_attr_details *details) { struct perf_session *session; struct perf_evsel *pos; + struct perf_data_file file = { + .path = file_name, + .mode = PERF_DATA_MODE_READ, + }; - session = perf_session__new(input_name, O_RDONLY, 0, false, NULL); + session = perf_session__new(&file, 0, NULL); if (session == NULL) return -ENOMEM; - list_for_each_entry(pos, &session->evlist->entries, node) { - bool first = true; - - printf("%s", perf_evsel__name(pos)); - - if (details->verbose || details->freq) { - comma_printf(&first, " sample_freq=%" PRIu64, - (u64)pos->attr.sample_freq); - } - - if (details->verbose) { - if_print(type); - if_print(config); - if_print(config1); - if_print(config2); - if_print(size); - if_print(sample_type); - if_print(read_format); - if_print(disabled); - if_print(inherit); - if_print(pinned); - if_print(exclusive); - if_print(exclude_user); - if_print(exclude_kernel); - if_print(exclude_hv); - if_print(exclude_idle); - if_print(mmap); - if_print(comm); - if_print(freq); - if_print(inherit_stat); - if_print(enable_on_exec); - if_print(task); - if_print(watermark); - if_print(precise_ip); - if_print(mmap_data); - if_print(sample_id_all); - if_print(exclude_host); - if_print(exclude_guest); - if_print(__reserved_1); - if_print(wakeup_events); - if_print(bp_type); - if_print(branch_sample_type); - } - - putchar('\n'); - } + evlist__for_each(session->evlist, pos) + perf_evsel__fprintf(pos, details, stdout); perf_session__delete(session); return 0; @@ -111,12 +39,13 @@ static int __cmd_evlist(const char *input_name, struct perf_attr_details *detail int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused) { struct perf_attr_details details = { .verbose = false, }; - const char *input_name = NULL; const struct option options[] = { OPT_STRING('i', "input", &input_name, "file", "Input file name"), OPT_BOOLEAN('F', "freq", &details.freq, "Show the sample frequency"), OPT_BOOLEAN('v', "verbose", &details.verbose, "Show all event attr details"), + OPT_BOOLEAN('g', "group", &details.event_group, + "Show event group information"), OPT_END() }; const char * const evlist_usage[] = { @@ -128,5 +57,10 @@ int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused) if (argc) usage_with_options(evlist_usage, options); + if (details.event_group && (details.verbose || details.freq)) { + pr_err("--group option is not compatible with other options\n"); + usage_with_options(evlist_usage, options); + } + return __cmd_evlist(input_name, &details); } diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index 4688bea95c1..16c7c11ad06 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -8,35 +8,47 @@ #include "builtin.h" #include "perf.h" +#include "util/color.h" +#include "util/evlist.h" +#include "util/evsel.h" #include "util/session.h" #include "util/tool.h" #include "util/debug.h" +#include "util/build-id.h" +#include "util/data.h" #include "util/parse-options.h" +#include <linux/list.h> + struct perf_inject { - struct perf_tool tool; - bool build_ids; + struct perf_tool tool; + bool build_ids; + bool sched_stat; + const char *input_name; + struct perf_data_file output; + u64 bytes_written; + struct list_head samples; }; -static int perf_event__repipe_synth(struct perf_tool *tool __maybe_unused, - union perf_event *event, - struct machine *machine __maybe_unused) -{ - uint32_t size; - void *buf = event; - - size = event->header.size; +struct event_entry { + struct list_head node; + u32 tid; + union perf_event event[0]; +}; - while (size) { - int ret = write(STDOUT_FILENO, buf, size); - if (ret < 0) - return -errno; +static int perf_event__repipe_synth(struct perf_tool *tool, + union perf_event *event) +{ + struct perf_inject *inject = container_of(tool, struct perf_inject, tool); + ssize_t size; - size -= ret; - buf += ret; - } + size = perf_data_file__write(&inject->output, event, + event->header.size); + if (size < 0) + return -errno; + inject->bytes_written += size; return 0; } @@ -45,48 +57,55 @@ static int perf_event__repipe_op2_synth(struct perf_tool *tool, struct perf_session *session __maybe_unused) { - return perf_event__repipe_synth(tool, event, NULL); + return perf_event__repipe_synth(tool, event); } -static int perf_event__repipe_event_type_synth(struct perf_tool *tool, - union perf_event *event) -{ - return perf_event__repipe_synth(tool, event, NULL); -} - -static int perf_event__repipe_tracing_data_synth(union perf_event *event, - struct perf_session *session - __maybe_unused) -{ - return perf_event__repipe_synth(NULL, event, NULL); -} - -static int perf_event__repipe_attr(union perf_event *event, - struct perf_evlist **pevlist __maybe_unused) +static int perf_event__repipe_attr(struct perf_tool *tool, + union perf_event *event, + struct perf_evlist **pevlist) { + struct perf_inject *inject = container_of(tool, struct perf_inject, + tool); int ret; - ret = perf_event__process_attr(event, pevlist); + + ret = perf_event__process_attr(tool, event, pevlist); if (ret) return ret; - return perf_event__repipe_synth(NULL, event, NULL); + if (!inject->output.is_pipe) + return 0; + + return perf_event__repipe_synth(tool, event); } static int perf_event__repipe(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample __maybe_unused, - struct machine *machine) + struct machine *machine __maybe_unused) { - return perf_event__repipe_synth(tool, event, machine); + return perf_event__repipe_synth(tool, event); } +typedef int (*inject_handler)(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct machine *machine); + static int perf_event__repipe_sample(struct perf_tool *tool, union perf_event *event, - struct perf_sample *sample __maybe_unused, - struct perf_evsel *evsel __maybe_unused, - struct machine *machine) + struct perf_sample *sample, + struct perf_evsel *evsel, + struct machine *machine) { - return perf_event__repipe_synth(tool, event, machine); + if (evsel->handler) { + inject_handler f = evsel->handler; + return f(tool, event, sample, evsel, machine); + } + + build_id__mark_dso_hit(tool, event, sample, evsel, machine); + + return perf_event__repipe_synth(tool, event); } static int perf_event__repipe_mmap(struct perf_tool *tool, @@ -102,62 +121,76 @@ static int perf_event__repipe_mmap(struct perf_tool *tool, return err; } -static int perf_event__repipe_task(struct perf_tool *tool, +static int perf_event__repipe_mmap2(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + int err; + + err = perf_event__process_mmap2(tool, event, sample, machine); + perf_event__repipe(tool, event, sample, machine); + + return err; +} + +static int perf_event__repipe_fork(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) { int err; - err = perf_event__process_task(tool, event, sample, machine); + err = perf_event__process_fork(tool, event, sample, machine); perf_event__repipe(tool, event, sample, machine); return err; } -static int perf_event__repipe_tracing_data(union perf_event *event, +static int perf_event__repipe_tracing_data(struct perf_tool *tool, + union perf_event *event, struct perf_session *session) { int err; - perf_event__repipe_synth(NULL, event, NULL); - err = perf_event__process_tracing_data(event, session); + perf_event__repipe_synth(tool, event); + err = perf_event__process_tracing_data(tool, event, session); return err; } -static int dso__read_build_id(struct dso *self) +static int dso__read_build_id(struct dso *dso) { - if (self->has_build_id) + if (dso->has_build_id) return 0; - if (filename__read_build_id(self->long_name, self->build_id, - sizeof(self->build_id)) > 0) { - self->has_build_id = true; + if (filename__read_build_id(dso->long_name, dso->build_id, + sizeof(dso->build_id)) > 0) { + dso->has_build_id = true; return 0; } return -1; } -static int dso__inject_build_id(struct dso *self, struct perf_tool *tool, +static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool, struct machine *machine) { u16 misc = PERF_RECORD_MISC_USER; int err; - if (dso__read_build_id(self) < 0) { - pr_debug("no build_id found for %s\n", self->long_name); + if (dso__read_build_id(dso) < 0) { + pr_debug("no build_id found for %s\n", dso->long_name); return -1; } - if (self->kernel) + if (dso->kernel) misc = PERF_RECORD_MISC_KERNEL; - err = perf_event__synthesize_build_id(tool, self, misc, perf_event__repipe, + err = perf_event__synthesize_build_id(tool, dso, misc, perf_event__repipe, machine); if (err) { - pr_err("Can't synthesize build_id event for %s\n", self->long_name); + pr_err("Can't synthesize build_id event for %s\n", dso->long_name); return -1; } @@ -176,7 +209,7 @@ static int perf_event__inject_buildid(struct perf_tool *tool, cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - thread = machine__findnew_thread(machine, event->ip.pid); + thread = machine__findnew_thread(machine, sample->pid, sample->tid); if (thread == NULL) { pr_err("problem processing %d event, skipping it.\n", event->header.type); @@ -184,7 +217,7 @@ static int perf_event__inject_buildid(struct perf_tool *tool, } thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, - event->ip.ip, &al); + sample->ip, &al); if (al.map != NULL) { if (!al.map->dso->hit) { @@ -196,7 +229,7 @@ static int perf_event__inject_buildid(struct perf_tool *tool, * account this as unresolved. */ } else { -#ifdef LIBELF_SUPPORT +#ifdef HAVE_LIBELF_SUPPORT pr_warning("no symbols found in %s, maybe " "install a debug package?\n", al.map->dso->long_name); @@ -210,33 +243,156 @@ repipe: return 0; } -extern volatile int session_done; +static int perf_inject__sched_process_exit(struct perf_tool *tool, + union perf_event *event __maybe_unused, + struct perf_sample *sample, + struct perf_evsel *evsel __maybe_unused, + struct machine *machine __maybe_unused) +{ + struct perf_inject *inject = container_of(tool, struct perf_inject, tool); + struct event_entry *ent; + + list_for_each_entry(ent, &inject->samples, node) { + if (sample->tid == ent->tid) { + list_del_init(&ent->node); + free(ent); + break; + } + } + + return 0; +} + +static int perf_inject__sched_switch(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct machine *machine) +{ + struct perf_inject *inject = container_of(tool, struct perf_inject, tool); + struct event_entry *ent; + + perf_inject__sched_process_exit(tool, event, sample, evsel, machine); + + ent = malloc(event->header.size + sizeof(struct event_entry)); + if (ent == NULL) { + color_fprintf(stderr, PERF_COLOR_RED, + "Not enough memory to process sched switch event!"); + return -1; + } + + ent->tid = sample->tid; + memcpy(&ent->event, event, event->header.size); + list_add(&ent->node, &inject->samples); + return 0; +} + +static int perf_inject__sched_stat(struct perf_tool *tool, + union perf_event *event __maybe_unused, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct machine *machine) +{ + struct event_entry *ent; + union perf_event *event_sw; + struct perf_sample sample_sw; + struct perf_inject *inject = container_of(tool, struct perf_inject, tool); + u32 pid = perf_evsel__intval(evsel, sample, "pid"); + + list_for_each_entry(ent, &inject->samples, node) { + if (pid == ent->tid) + goto found; + } + + return 0; +found: + event_sw = &ent->event[0]; + perf_evsel__parse_sample(evsel, event_sw, &sample_sw); + + sample_sw.period = sample->period; + sample_sw.time = sample->time; + perf_event__synthesize_sample(event_sw, evsel->attr.sample_type, + evsel->attr.read_format, &sample_sw, + false); + build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine); + return perf_event__repipe(tool, event_sw, &sample_sw, machine); +} static void sig_handler(int sig __maybe_unused) { session_done = 1; } +static int perf_evsel__check_stype(struct perf_evsel *evsel, + u64 sample_type, const char *sample_msg) +{ + struct perf_event_attr *attr = &evsel->attr; + const char *name = perf_evsel__name(evsel); + + if (!(attr->sample_type & sample_type)) { + pr_err("Samples for %s event do not have %s attribute set.", + name, sample_msg); + return -EINVAL; + } + + return 0; +} + static int __cmd_inject(struct perf_inject *inject) { struct perf_session *session; int ret = -EINVAL; + struct perf_data_file file = { + .path = inject->input_name, + .mode = PERF_DATA_MODE_READ, + }; + struct perf_data_file *file_out = &inject->output; signal(SIGINT, sig_handler); - if (inject->build_ids) { - inject->tool.sample = perf_event__inject_buildid; + if (inject->build_ids || inject->sched_stat) { inject->tool.mmap = perf_event__repipe_mmap; - inject->tool.fork = perf_event__repipe_task; + inject->tool.mmap2 = perf_event__repipe_mmap2; + inject->tool.fork = perf_event__repipe_fork; inject->tool.tracing_data = perf_event__repipe_tracing_data; } - session = perf_session__new("-", O_RDONLY, false, true, &inject->tool); + session = perf_session__new(&file, true, &inject->tool); if (session == NULL) return -ENOMEM; + if (inject->build_ids) { + inject->tool.sample = perf_event__inject_buildid; + } else if (inject->sched_stat) { + struct perf_evsel *evsel; + + inject->tool.ordered_samples = true; + + evlist__for_each(session->evlist, evsel) { + const char *name = perf_evsel__name(evsel); + + if (!strcmp(name, "sched:sched_switch")) { + if (perf_evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID")) + return -EINVAL; + + evsel->handler = perf_inject__sched_switch; + } else if (!strcmp(name, "sched:sched_process_exit")) + evsel->handler = perf_inject__sched_process_exit; + else if (!strncmp(name, "sched:sched_stat_", 17)) + evsel->handler = perf_inject__sched_stat; + } + } + + if (!file_out->is_pipe) + lseek(file_out->fd, session->header.data_offset, SEEK_SET); + ret = perf_session__process_events(session, &inject->tool); + if (!file_out->is_pipe) { + session->header.data_size = inject->bytes_written; + perf_session__write_header(session, session->evlist, file_out->fd, true); + } + perf_session__delete(session); return ret; @@ -248,6 +404,7 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused) .tool = { .sample = perf_event__repipe_sample, .mmap = perf_event__repipe, + .mmap2 = perf_event__repipe, .comm = perf_event__repipe, .fork = perf_event__repipe, .exit = perf_event__repipe, @@ -256,14 +413,27 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused) .throttle = perf_event__repipe, .unthrottle = perf_event__repipe, .attr = perf_event__repipe_attr, - .event_type = perf_event__repipe_event_type_synth, - .tracing_data = perf_event__repipe_tracing_data_synth, + .tracing_data = perf_event__repipe_op2_synth, + .finished_round = perf_event__repipe_op2_synth, .build_id = perf_event__repipe_op2_synth, }, + .input_name = "-", + .samples = LIST_HEAD_INIT(inject.samples), + .output = { + .path = "-", + .mode = PERF_DATA_MODE_WRITE, + }, }; const struct option options[] = { OPT_BOOLEAN('b', "build-ids", &inject.build_ids, "Inject build-ids into the output stream"), + OPT_STRING('i', "input", &inject.input_name, "file", + "input file name"), + OPT_STRING('o', "output", &inject.output.path, "file", + "output file name"), + OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat, + "Merge sched-stat and sched-switch for getting events " + "where and how long tasks slept"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show build ids, etc)"), OPT_END() @@ -281,6 +451,11 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused) if (argc) usage_with_options(inject_usage, options); + if (perf_data_file__open(&inject.output)) { + perror("failed to create output file"); + return -1; + } + if (symbol__init() < 0) return -1; diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 14bf82f6365..bef3376bfaf 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -13,10 +13,13 @@ #include "util/parse-options.h" #include "util/trace-event.h" +#include "util/data.h" +#include "util/cpumap.h" #include "util/debug.h" #include <linux/rbtree.h> +#include <linux/string.h> struct alloc_stat; typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *); @@ -29,9 +32,6 @@ static int caller_lines = -1; static bool raw_ip; -static int *cpunode_map; -static int max_cpu_num; - struct alloc_stat { u64 call_site; u64 ptr; @@ -53,76 +53,6 @@ static struct rb_root root_caller_sorted; static unsigned long total_requested, total_allocated; static unsigned long nr_allocs, nr_cross_allocs; -#define PATH_SYS_NODE "/sys/devices/system/node" - -static int init_cpunode_map(void) -{ - FILE *fp; - int i, err = -1; - - fp = fopen("/sys/devices/system/cpu/kernel_max", "r"); - if (!fp) { - max_cpu_num = 4096; - return 0; - } - - if (fscanf(fp, "%d", &max_cpu_num) < 1) { - pr_err("Failed to read 'kernel_max' from sysfs"); - goto out_close; - } - - max_cpu_num++; - - cpunode_map = calloc(max_cpu_num, sizeof(int)); - if (!cpunode_map) { - pr_err("%s: calloc failed\n", __func__); - goto out_close; - } - - for (i = 0; i < max_cpu_num; i++) - cpunode_map[i] = -1; - - err = 0; -out_close: - fclose(fp); - return err; -} - -static int setup_cpunode_map(void) -{ - struct dirent *dent1, *dent2; - DIR *dir1, *dir2; - unsigned int cpu, mem; - char buf[PATH_MAX]; - - if (init_cpunode_map()) - return -1; - - dir1 = opendir(PATH_SYS_NODE); - if (!dir1) - return -1; - - while ((dent1 = readdir(dir1)) != NULL) { - if (dent1->d_type != DT_DIR || - sscanf(dent1->d_name, "node%u", &mem) < 1) - continue; - - snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name); - dir2 = opendir(buf); - if (!dir2) - continue; - while ((dent2 = readdir(dir2)) != NULL) { - if (dent2->d_type != DT_LNK || - sscanf(dent2->d_name, "cpu%u", &cpu) < 1) - continue; - cpunode_map[cpu] = mem; - } - closedir(dir2); - } - closedir(dir1); - return 0; -} - static int insert_alloc_stat(unsigned long call_site, unsigned long ptr, int bytes_req, int bytes_alloc, int cpu) { @@ -233,7 +163,7 @@ static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel, int ret = perf_evsel__process_alloc_event(evsel, sample); if (!ret) { - int node1 = cpunode_map[sample->cpu], + int node1 = cpu__get_node(sample->cpu), node2 = perf_evsel__intval(evsel, sample, "node"); if (node1 != node2) @@ -304,7 +234,8 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused, struct perf_evsel *evsel, struct machine *machine) { - struct thread *thread = machine__findnew_thread(machine, event->ip.pid); + struct thread *thread = machine__findnew_thread(machine, sample->pid, + sample->tid); if (thread == NULL) { pr_debug("problem processing %d event, skipping it.\n", @@ -312,10 +243,10 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused, return -1; } - dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); + dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid); - if (evsel->handler.func != NULL) { - tracepoint_handler f = evsel->handler.func; + if (evsel->handler != NULL) { + tracepoint_handler f = evsel->handler; return f(evsel, sample); } @@ -340,7 +271,7 @@ static void __print_result(struct rb_root *root, struct perf_session *session, int n_lines, int is_caller) { struct rb_node *next; - struct machine *machine; + struct machine *machine = &session->machines.host; printf("%.102s\n", graph_dotted_line); printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr"); @@ -349,11 +280,6 @@ static void __print_result(struct rb_root *root, struct perf_session *session, next = rb_first(root); - machine = perf_session__find_host_machine(session); - if (!machine) { - pr_err("__print_result: couldn't find kernel information\n"); - return; - } while (next && n_lines--) { struct alloc_stat *data = rb_entry(next, struct alloc_stat, node); @@ -477,7 +403,7 @@ static void sort_result(void) __sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort); } -static int __cmd_kmem(const char *input_name) +static int __cmd_kmem(void) { int err = -EINVAL; struct perf_session *session; @@ -489,8 +415,12 @@ static int __cmd_kmem(const char *input_name) { "kmem:kfree", perf_evsel__process_free_event, }, { "kmem:kmem_cache_free", perf_evsel__process_free_event, }, }; + struct perf_data_file file = { + .path = input_name, + .mode = PERF_DATA_MODE_READ, + }; - session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_kmem); + session = perf_session__new(&file, false, &perf_kmem); if (session == NULL) return -ENOMEM; @@ -614,8 +544,7 @@ static struct sort_dimension *avail_sorts[] = { &pingpong_sort_dimension, }; -#define NUM_AVAIL_SORTS \ - (int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *)) +#define NUM_AVAIL_SORTS ((int)ARRAY_SIZE(avail_sorts)) static int sort_dimension__add(const char *tok, struct list_head *list) { @@ -624,12 +553,11 @@ static int sort_dimension__add(const char *tok, struct list_head *list) for (i = 0; i < NUM_AVAIL_SORTS; i++) { if (!strcmp(avail_sorts[i]->name, tok)) { - sort = malloc(sizeof(*sort)); + sort = memdup(avail_sorts[i], sizeof(*avail_sorts[i])); if (!sort) { - pr_err("%s: malloc failed\n", __func__); + pr_err("%s: memdup failed\n", __func__); return -1; } - memcpy(sort, avail_sorts[i], sizeof(*sort)); list_add_tail(&sort->list, list); return 0; } @@ -714,7 +642,7 @@ static int parse_line_opt(const struct option *opt __maybe_unused, static int __cmd_record(int argc, const char **argv) { const char * const record_args[] = { - "record", "-a", "-R", "-f", "-c", "1", + "record", "-a", "-R", "-c", "1", "-e", "kmem:kmalloc", "-e", "kmem:kmalloc_node", "-e", "kmem:kfree", @@ -743,7 +671,6 @@ static int __cmd_record(int argc, const char **argv) int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused) { const char * const default_sort_order = "frag,hit,bytes"; - const char *input_name = NULL; const struct option kmem_options[] = { OPT_STRING('i', "input", &input_name, "file", "input file name"), OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL, @@ -757,11 +684,13 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused) OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"), OPT_END() }; - const char * const kmem_usage[] = { - "perf kmem [<options>] {record|stat}", + const char *const kmem_subcommands[] = { "record", "stat", NULL }; + const char *kmem_usage[] = { + NULL, NULL }; - argc = parse_options(argc, argv, kmem_options, kmem_usage, 0); + argc = parse_options_subcommand(argc, argv, kmem_options, + kmem_subcommands, kmem_usage, 0); if (!argc) usage_with_options(kmem_usage, kmem_options); @@ -771,7 +700,7 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused) if (!strncmp(argv[0], "rec", 3)) { return __cmd_record(argc, argv); } else if (!strcmp(argv[0], "stat")) { - if (setup_cpunode_map()) + if (cpu__setup_cpunode_map()) return -1; if (list_empty(&caller_sort)) @@ -779,7 +708,7 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused) if (list_empty(&alloc_sort)) setup_sorting(&alloc_sort, default_sort_order); - return __cmd_kmem(input_name); + return __cmd_kmem(); } else usage_with_options(kmem_usage, kmem_options); diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c index 260abc535b5..0f1e5a2f6ad 100644 --- a/tools/perf/builtin-kvm.c +++ b/tools/perf/builtin-kvm.c @@ -2,29 +2,37 @@ #include "perf.h" #include "util/evsel.h" +#include "util/evlist.h" #include "util/util.h" #include "util/cache.h" #include "util/symbol.h" #include "util/thread.h" #include "util/header.h" #include "util/session.h" - +#include "util/intlist.h" #include "util/parse-options.h" #include "util/trace-event.h" #include "util/debug.h" -#include "util/debugfs.h" +#include <api/fs/debugfs.h> #include "util/tool.h" #include "util/stat.h" +#include "util/top.h" +#include "util/data.h" #include <sys/prctl.h> +#ifdef HAVE_TIMERFD_SUPPORT +#include <sys/timerfd.h> +#endif +#include <termios.h> #include <semaphore.h> #include <pthread.h> #include <math.h> -#include "../../arch/x86/include/asm/svm.h" -#include "../../arch/x86/include/asm/vmx.h" -#include "../../arch/x86/include/asm/kvm.h" +#if defined(__i386__) || defined(__x86_64__) +#include <asm/svm.h> +#include <asm/vmx.h> +#include <asm/kvm.h> struct event_key { #define INVALID_KEY (~0ULL) @@ -58,7 +66,7 @@ struct kvm_event_key { }; -struct perf_kvm; +struct perf_kvm_stat; struct kvm_events_ops { bool (*is_begin_event)(struct perf_evsel *evsel, @@ -66,7 +74,7 @@ struct kvm_events_ops { struct event_key *key); bool (*is_end_event)(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key); - void (*decode_key)(struct perf_kvm *kvm, struct event_key *key, + void (*decode_key)(struct perf_kvm_stat *kvm, struct event_key *key, char decode[20]); const char *name; }; @@ -79,8 +87,10 @@ struct exit_reasons_table { #define EVENTS_BITS 12 #define EVENTS_CACHE_SIZE (1UL << EVENTS_BITS) -struct perf_kvm { +struct perf_kvm_stat { struct perf_tool tool; + struct record_opts opts; + struct perf_evlist *evlist; struct perf_session *session; const char *file_name; @@ -95,10 +105,20 @@ struct perf_kvm { struct kvm_events_ops *events_ops; key_cmp_fun compare; struct list_head kvm_events_cache[EVENTS_CACHE_SIZE]; + u64 total_time; u64 total_count; + u64 lost_events; + u64 duration; + + const char *pid_str; + struct intlist *pid_list; struct rb_root result; + + int timerfd; + unsigned int display_time; + bool live; }; @@ -146,7 +166,7 @@ static struct exit_reasons_table svm_exit_reasons[] = { SVM_EXIT_REASONS }; -static const char *get_exit_reason(struct perf_kvm *kvm, u64 exit_code) +static const char *get_exit_reason(struct perf_kvm_stat *kvm, u64 exit_code) { int i = kvm->exit_reasons_size; struct exit_reasons_table *tbl = kvm->exit_reasons; @@ -162,7 +182,7 @@ static const char *get_exit_reason(struct perf_kvm *kvm, u64 exit_code) return "UNKNOWN"; } -static void exit_event_decode_key(struct perf_kvm *kvm, +static void exit_event_decode_key(struct perf_kvm_stat *kvm, struct event_key *key, char decode[20]) { @@ -228,7 +248,7 @@ static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample, return false; } -static void mmio_event_decode_key(struct perf_kvm *kvm __maybe_unused, +static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused, struct event_key *key, char decode[20]) { @@ -271,7 +291,7 @@ static bool ioport_event_end(struct perf_evsel *evsel, return kvm_entry_event(evsel); } -static void ioport_event_decode_key(struct perf_kvm *kvm __maybe_unused, +static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused, struct event_key *key, char decode[20]) { @@ -286,7 +306,7 @@ static struct kvm_events_ops ioport_events = { .name = "IO Port Access" }; -static bool register_kvm_events_ops(struct perf_kvm *kvm) +static bool register_kvm_events_ops(struct perf_kvm_stat *kvm) { bool ret = true; @@ -311,14 +331,38 @@ struct vcpu_event_record { }; -static void init_kvm_event_record(struct perf_kvm *kvm) +static void init_kvm_event_record(struct perf_kvm_stat *kvm) { - int i; + unsigned int i; - for (i = 0; i < (int)EVENTS_CACHE_SIZE; i++) + for (i = 0; i < EVENTS_CACHE_SIZE; i++) INIT_LIST_HEAD(&kvm->kvm_events_cache[i]); } +#ifdef HAVE_TIMERFD_SUPPORT +static void clear_events_cache_stats(struct list_head *kvm_events_cache) +{ + struct list_head *head; + struct kvm_event *event; + unsigned int i; + int j; + + for (i = 0; i < EVENTS_CACHE_SIZE; i++) { + head = &kvm_events_cache[i]; + list_for_each_entry(event, head, hash_entry) { + /* reset stats for event */ + event->total.time = 0; + init_stats(&event->total.stats); + + for (j = 0; j < event->max_vcpu; ++j) { + event->vcpu[j].time = 0; + init_stats(&event->vcpu[j].stats); + } + } + } +} +#endif + static int kvm_events_hash_fn(u64 key) { return key & (EVENTS_CACHE_SIZE - 1); @@ -327,6 +371,7 @@ static int kvm_events_hash_fn(u64 key) static bool kvm_event_expand(struct kvm_event *event, int vcpu_id) { int old_max_vcpu = event->max_vcpu; + void *prev; if (vcpu_id < event->max_vcpu) return true; @@ -334,9 +379,11 @@ static bool kvm_event_expand(struct kvm_event *event, int vcpu_id) while (event->max_vcpu <= vcpu_id) event->max_vcpu += DEFAULT_VCPU_NUM; + prev = event->vcpu; event->vcpu = realloc(event->vcpu, event->max_vcpu * sizeof(*event->vcpu)); if (!event->vcpu) { + free(prev); pr_err("Not enough memory\n"); return false; } @@ -357,10 +404,11 @@ static struct kvm_event *kvm_alloc_init_event(struct event_key *key) } event->key = *key; + init_stats(&event->total.stats); return event; } -static struct kvm_event *find_create_kvm_event(struct perf_kvm *kvm, +static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm, struct event_key *key) { struct kvm_event *event; @@ -369,9 +417,10 @@ static struct kvm_event *find_create_kvm_event(struct perf_kvm *kvm, BUG_ON(key->key == INVALID_KEY); head = &kvm->kvm_events_cache[kvm_events_hash_fn(key->key)]; - list_for_each_entry(event, head, hash_entry) + list_for_each_entry(event, head, hash_entry) { if (event->key.key == key->key && event->key.info == key->info) return event; + } event = kvm_alloc_init_event(key); if (!event) @@ -381,7 +430,7 @@ static struct kvm_event *find_create_kvm_event(struct perf_kvm *kvm, return event; } -static bool handle_begin_event(struct perf_kvm *kvm, +static bool handle_begin_event(struct perf_kvm_stat *kvm, struct vcpu_event_record *vcpu_record, struct event_key *key, u64 timestamp) { @@ -416,7 +465,10 @@ static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event) static bool update_kvm_event(struct kvm_event *event, int vcpu_id, u64 time_diff) { - kvm_update_event_stats(&event->total, time_diff); + if (vcpu_id == -1) { + kvm_update_event_stats(&event->total, time_diff); + return true; + } if (!kvm_event_expand(event, vcpu_id)) return false; @@ -425,13 +477,19 @@ static bool update_kvm_event(struct kvm_event *event, int vcpu_id, return true; } -static bool handle_end_event(struct perf_kvm *kvm, +static bool handle_end_event(struct perf_kvm_stat *kvm, struct vcpu_event_record *vcpu_record, struct event_key *key, - u64 timestamp) + struct perf_sample *sample) { struct kvm_event *event; u64 time_begin, time_diff; + int vcpu; + + if (kvm->trace_vcpu == -1) + vcpu = -1; + else + vcpu = vcpu_record->vcpu_id; event = vcpu_record->last_event; time_begin = vcpu_record->start_time; @@ -458,10 +516,26 @@ static bool handle_end_event(struct perf_kvm *kvm, vcpu_record->last_event = NULL; vcpu_record->start_time = 0; - BUG_ON(timestamp < time_begin); + /* seems to happen once in a while during live mode */ + if (sample->time < time_begin) { + pr_debug("End time before begin time; skipping event.\n"); + return true; + } + + time_diff = sample->time - time_begin; + + if (kvm->duration && time_diff > kvm->duration) { + char decode[32]; + + kvm->events_ops->decode_key(kvm, &event->key, decode); + if (strcmp(decode, "HLT")) { + pr_info("%" PRIu64 " VM %d, vcpu %d: %s event took %" PRIu64 "usec\n", + sample->time, sample->pid, vcpu_record->vcpu_id, + decode, time_diff/1000); + } + } - time_diff = timestamp - time_begin; - return update_kvm_event(event, vcpu_record->vcpu_id, time_diff); + return update_kvm_event(event, vcpu, time_diff); } static @@ -486,7 +560,7 @@ struct vcpu_event_record *per_vcpu_record(struct thread *thread, return thread->priv; } -static bool handle_kvm_event(struct perf_kvm *kvm, +static bool handle_kvm_event(struct perf_kvm_stat *kvm, struct thread *thread, struct perf_evsel *evsel, struct perf_sample *sample) @@ -498,11 +572,16 @@ static bool handle_kvm_event(struct perf_kvm *kvm, if (!vcpu_record) return true; + /* only process events for vcpus user cares about */ + if ((kvm->trace_vcpu != -1) && + (kvm->trace_vcpu != vcpu_record->vcpu_id)) + return true; + if (kvm->events_ops->is_begin_event(evsel, sample, &key)) return handle_begin_event(kvm, vcpu_record, &key, sample->time); if (kvm->events_ops->is_end_event(evsel, sample, &key)) - return handle_end_event(kvm, vcpu_record, &key, sample->time); + return handle_end_event(kvm, vcpu_record, &key, sample); return true; } @@ -531,6 +610,8 @@ static int compare_kvm_event_ ## func(struct kvm_event *one, \ GET_EVENT_KEY(time, time); COMPARE_EVENT_KEY(count, stats.n); COMPARE_EVENT_KEY(mean, stats.mean); +GET_EVENT_KEY(max, stats.max); +GET_EVENT_KEY(min, stats.min); #define DEF_SORT_NAME_KEY(name, compare_key) \ { #name, compare_kvm_event_ ## compare_key } @@ -541,7 +622,7 @@ static struct kvm_event_key keys[] = { { NULL, NULL } }; -static bool select_key(struct perf_kvm *kvm) +static bool select_key(struct perf_kvm_stat *kvm) { int i; @@ -577,7 +658,8 @@ static void insert_to_result(struct rb_root *result, struct kvm_event *event, rb_insert_color(&event->rb, result); } -static void update_total_count(struct perf_kvm *kvm, struct kvm_event *event) +static void +update_total_count(struct perf_kvm_stat *kvm, struct kvm_event *event) { int vcpu = kvm->trace_vcpu; @@ -590,19 +672,21 @@ static bool event_is_valid(struct kvm_event *event, int vcpu) return !!get_event_count(event, vcpu); } -static void sort_result(struct perf_kvm *kvm) +static void sort_result(struct perf_kvm_stat *kvm) { unsigned int i; int vcpu = kvm->trace_vcpu; struct kvm_event *event; - for (i = 0; i < EVENTS_CACHE_SIZE; i++) - list_for_each_entry(event, &kvm->kvm_events_cache[i], hash_entry) + for (i = 0; i < EVENTS_CACHE_SIZE; i++) { + list_for_each_entry(event, &kvm->kvm_events_cache[i], hash_entry) { if (event_is_valid(event, vcpu)) { update_total_count(kvm, event); insert_to_result(&kvm->result, event, kvm->compare, vcpu); } + } + } } /* returns left most element of result, and erase it */ @@ -617,50 +701,113 @@ static struct kvm_event *pop_from_result(struct rb_root *result) return container_of(node, struct kvm_event, rb); } -static void print_vcpu_info(int vcpu) +static void print_vcpu_info(struct perf_kvm_stat *kvm) { + int vcpu = kvm->trace_vcpu; + pr_info("Analyze events for "); + if (kvm->live) { + if (kvm->opts.target.system_wide) + pr_info("all VMs, "); + else if (kvm->opts.target.pid) + pr_info("pid(s) %s, ", kvm->opts.target.pid); + else + pr_info("dazed and confused on what is monitored, "); + } + if (vcpu == -1) pr_info("all VCPUs:\n\n"); else pr_info("VCPU %d:\n\n", vcpu); } -static void print_result(struct perf_kvm *kvm) +static void show_timeofday(void) +{ + char date[64]; + struct timeval tv; + struct tm ltime; + + gettimeofday(&tv, NULL); + if (localtime_r(&tv.tv_sec, <ime)) { + strftime(date, sizeof(date), "%H:%M:%S", <ime); + pr_info("%s.%06ld", date, tv.tv_usec); + } else + pr_info("00:00:00.000000"); + + return; +} + +static void print_result(struct perf_kvm_stat *kvm) { char decode[20]; struct kvm_event *event; int vcpu = kvm->trace_vcpu; + if (kvm->live) { + puts(CONSOLE_CLEAR); + show_timeofday(); + } + pr_info("\n\n"); - print_vcpu_info(vcpu); + print_vcpu_info(kvm); pr_info("%20s ", kvm->events_ops->name); pr_info("%10s ", "Samples"); pr_info("%9s ", "Samples%"); pr_info("%9s ", "Time%"); + pr_info("%10s ", "Min Time"); + pr_info("%10s ", "Max Time"); pr_info("%16s ", "Avg time"); pr_info("\n\n"); while ((event = pop_from_result(&kvm->result))) { - u64 ecount, etime; + u64 ecount, etime, max, min; ecount = get_event_count(event, vcpu); etime = get_event_time(event, vcpu); + max = get_event_max(event, vcpu); + min = get_event_min(event, vcpu); kvm->events_ops->decode_key(kvm, &event->key, decode); pr_info("%20s ", decode); pr_info("%10llu ", (unsigned long long)ecount); pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100); pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100); + pr_info("%8" PRIu64 "us ", min / 1000); + pr_info("%8" PRIu64 "us ", max / 1000); pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount/1e3, kvm_event_rel_stddev(vcpu, event)); pr_info("\n"); } - pr_info("\nTotal Samples:%lld, Total events handled time:%.2fus.\n\n", - (unsigned long long)kvm->total_count, kvm->total_time / 1e3); + pr_info("\nTotal Samples:%" PRIu64 ", Total events handled time:%.2fus.\n\n", + kvm->total_count, kvm->total_time / 1e3); + + if (kvm->lost_events) + pr_info("\nLost events: %" PRIu64 "\n\n", kvm->lost_events); +} + +#ifdef HAVE_TIMERFD_SUPPORT +static int process_lost_event(struct perf_tool *tool, + union perf_event *event __maybe_unused, + struct perf_sample *sample __maybe_unused, + struct machine *machine __maybe_unused) +{ + struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat, tool); + + kvm->lost_events++; + return 0; +} +#endif + +static bool skip_sample(struct perf_kvm_stat *kvm, + struct perf_sample *sample) +{ + if (kvm->pid_list && intlist__find(kvm->pid_list, sample->pid) == NULL) + return true; + + return false; } static int process_sample_event(struct perf_tool *tool, @@ -669,9 +816,14 @@ static int process_sample_event(struct perf_tool *tool, struct perf_evsel *evsel, struct machine *machine) { - struct thread *thread = machine__findnew_thread(machine, sample->tid); - struct perf_kvm *kvm = container_of(tool, struct perf_kvm, tool); + struct thread *thread; + struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat, + tool); + + if (skip_sample(kvm, sample)) + return 0; + thread = machine__findnew_thread(machine, sample->pid, sample->tid); if (thread == NULL) { pr_debug("problem processing %d event, skipping it.\n", event->header.type); @@ -684,10 +836,20 @@ static int process_sample_event(struct perf_tool *tool, return 0; } -static int get_cpu_isa(struct perf_session *session) +static int cpu_isa_config(struct perf_kvm_stat *kvm) { - char *cpuid = session->header.env.cpuid; - int isa; + char buf[64], *cpuid; + int err, isa; + + if (kvm->live) { + err = get_cpuid(buf, sizeof(buf)); + if (err != 0) { + pr_err("Failed to look up CPU type (Intel or AMD)\n"); + return err; + } + cpuid = buf; + } else + cpuid = kvm->session->header.env.cpuid; if (strstr(cpuid, "Intel")) isa = 1; @@ -695,13 +857,371 @@ static int get_cpu_isa(struct perf_session *session) isa = 0; else { pr_err("CPU %s is not supported.\n", cpuid); - isa = -ENOTSUP; + return -ENOTSUP; + } + + if (isa == 1) { + kvm->exit_reasons = vmx_exit_reasons; + kvm->exit_reasons_size = ARRAY_SIZE(vmx_exit_reasons); + kvm->exit_reasons_isa = "VMX"; + } + + return 0; +} + +static bool verify_vcpu(int vcpu) +{ + if (vcpu != -1 && vcpu < 0) { + pr_err("Invalid vcpu:%d.\n", vcpu); + return false; + } + + return true; +} + +#ifdef HAVE_TIMERFD_SUPPORT +/* keeping the max events to a modest level to keep + * the processing of samples per mmap smooth. + */ +#define PERF_KVM__MAX_EVENTS_PER_MMAP 25 + +static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx, + u64 *mmap_time) +{ + union perf_event *event; + struct perf_sample sample; + s64 n = 0; + int err; + + *mmap_time = ULLONG_MAX; + while ((event = perf_evlist__mmap_read(kvm->evlist, idx)) != NULL) { + err = perf_evlist__parse_sample(kvm->evlist, event, &sample); + if (err) { + perf_evlist__mmap_consume(kvm->evlist, idx); + pr_err("Failed to parse sample\n"); + return -1; + } + + err = perf_session_queue_event(kvm->session, event, &sample, 0); + /* + * FIXME: Here we can't consume the event, as perf_session_queue_event will + * point to it, and it'll get possibly overwritten by the kernel. + */ + perf_evlist__mmap_consume(kvm->evlist, idx); + + if (err) { + pr_err("Failed to enqueue sample: %d\n", err); + return -1; + } + + /* save time stamp of our first sample for this mmap */ + if (n == 0) + *mmap_time = sample.time; + + /* limit events per mmap handled all at once */ + n++; + if (n == PERF_KVM__MAX_EVENTS_PER_MMAP) + break; + } + + return n; +} + +static int perf_kvm__mmap_read(struct perf_kvm_stat *kvm) +{ + int i, err, throttled = 0; + s64 n, ntotal = 0; + u64 flush_time = ULLONG_MAX, mmap_time; + + for (i = 0; i < kvm->evlist->nr_mmaps; i++) { + n = perf_kvm__mmap_read_idx(kvm, i, &mmap_time); + if (n < 0) + return -1; + + /* flush time is going to be the minimum of all the individual + * mmap times. Essentially, we flush all the samples queued up + * from the last pass under our minimal start time -- that leaves + * a very small race for samples to come in with a lower timestamp. + * The ioctl to return the perf_clock timestamp should close the + * race entirely. + */ + if (mmap_time < flush_time) + flush_time = mmap_time; + + ntotal += n; + if (n == PERF_KVM__MAX_EVENTS_PER_MMAP) + throttled = 1; + } + + /* flush queue after each round in which we processed events */ + if (ntotal) { + kvm->session->ordered_samples.next_flush = flush_time; + err = kvm->tool.finished_round(&kvm->tool, NULL, kvm->session); + if (err) { + if (kvm->lost_events) + pr_info("\nLost events: %" PRIu64 "\n\n", + kvm->lost_events); + return err; + } + } + + return throttled; +} + +static volatile int done; + +static void sig_handler(int sig __maybe_unused) +{ + done = 1; +} + +static int perf_kvm__timerfd_create(struct perf_kvm_stat *kvm) +{ + struct itimerspec new_value; + int rc = -1; + + kvm->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK); + if (kvm->timerfd < 0) { + pr_err("timerfd_create failed\n"); + goto out; + } + + new_value.it_value.tv_sec = kvm->display_time; + new_value.it_value.tv_nsec = 0; + new_value.it_interval.tv_sec = kvm->display_time; + new_value.it_interval.tv_nsec = 0; + + if (timerfd_settime(kvm->timerfd, 0, &new_value, NULL) != 0) { + pr_err("timerfd_settime failed: %d\n", errno); + close(kvm->timerfd); + goto out; + } + + rc = 0; +out: + return rc; +} + +static int perf_kvm__handle_timerfd(struct perf_kvm_stat *kvm) +{ + uint64_t c; + int rc; + + rc = read(kvm->timerfd, &c, sizeof(uint64_t)); + if (rc < 0) { + if (errno == EAGAIN) + return 0; + + pr_err("Failed to read timer fd: %d\n", errno); + return -1; + } + + if (rc != sizeof(uint64_t)) { + pr_err("Error reading timer fd - invalid size returned\n"); + return -1; + } + + if (c != 1) + pr_debug("Missed timer beats: %" PRIu64 "\n", c-1); + + /* update display */ + sort_result(kvm); + print_result(kvm); + + /* reset counts */ + clear_events_cache_stats(kvm->kvm_events_cache); + kvm->total_count = 0; + kvm->total_time = 0; + kvm->lost_events = 0; + + return 0; +} + +static int fd_set_nonblock(int fd) +{ + long arg = 0; + + arg = fcntl(fd, F_GETFL); + if (arg < 0) { + pr_err("Failed to get current flags for fd %d\n", fd); + return -1; + } + + if (fcntl(fd, F_SETFL, arg | O_NONBLOCK) < 0) { + pr_err("Failed to set non-block option on fd %d\n", fd); + return -1; } - return isa; + return 0; } -static int read_events(struct perf_kvm *kvm) +static +int perf_kvm__handle_stdin(struct termios *tc_now, struct termios *tc_save) +{ + int c; + + tcsetattr(0, TCSANOW, tc_now); + c = getc(stdin); + tcsetattr(0, TCSAFLUSH, tc_save); + + if (c == 'q') + return 1; + + return 0; +} + +static int kvm_events_live_report(struct perf_kvm_stat *kvm) +{ + struct pollfd *pollfds = NULL; + int nr_fds, nr_stdin, ret, err = -EINVAL; + struct termios tc, save; + + /* live flag must be set first */ + kvm->live = true; + + ret = cpu_isa_config(kvm); + if (ret < 0) + return ret; + + if (!verify_vcpu(kvm->trace_vcpu) || + !select_key(kvm) || + !register_kvm_events_ops(kvm)) { + goto out; + } + + init_kvm_event_record(kvm); + + tcgetattr(0, &save); + tc = save; + tc.c_lflag &= ~(ICANON | ECHO); + tc.c_cc[VMIN] = 0; + tc.c_cc[VTIME] = 0; + + signal(SIGINT, sig_handler); + signal(SIGTERM, sig_handler); + + /* copy pollfds -- need to add timerfd and stdin */ + nr_fds = kvm->evlist->nr_fds; + pollfds = zalloc(sizeof(struct pollfd) * (nr_fds + 2)); + if (!pollfds) { + err = -ENOMEM; + goto out; + } + memcpy(pollfds, kvm->evlist->pollfd, + sizeof(struct pollfd) * kvm->evlist->nr_fds); + + /* add timer fd */ + if (perf_kvm__timerfd_create(kvm) < 0) { + err = -1; + goto out; + } + + pollfds[nr_fds].fd = kvm->timerfd; + pollfds[nr_fds].events = POLLIN; + nr_fds++; + + pollfds[nr_fds].fd = fileno(stdin); + pollfds[nr_fds].events = POLLIN; + nr_stdin = nr_fds; + nr_fds++; + if (fd_set_nonblock(fileno(stdin)) != 0) + goto out; + + /* everything is good - enable the events and process */ + perf_evlist__enable(kvm->evlist); + + while (!done) { + int rc; + + rc = perf_kvm__mmap_read(kvm); + if (rc < 0) + break; + + err = perf_kvm__handle_timerfd(kvm); + if (err) + goto out; + + if (pollfds[nr_stdin].revents & POLLIN) + done = perf_kvm__handle_stdin(&tc, &save); + + if (!rc && !done) + err = poll(pollfds, nr_fds, 100); + } + + perf_evlist__disable(kvm->evlist); + + if (err == 0) { + sort_result(kvm); + print_result(kvm); + } + +out: + if (kvm->timerfd >= 0) + close(kvm->timerfd); + + free(pollfds); + return err; +} + +static int kvm_live_open_events(struct perf_kvm_stat *kvm) +{ + int err, rc = -1; + struct perf_evsel *pos; + struct perf_evlist *evlist = kvm->evlist; + + perf_evlist__config(evlist, &kvm->opts); + + /* + * Note: exclude_{guest,host} do not apply here. + * This command processes KVM tracepoints from host only + */ + evlist__for_each(evlist, pos) { + struct perf_event_attr *attr = &pos->attr; + + /* make sure these *are* set */ + perf_evsel__set_sample_bit(pos, TID); + perf_evsel__set_sample_bit(pos, TIME); + perf_evsel__set_sample_bit(pos, CPU); + perf_evsel__set_sample_bit(pos, RAW); + /* make sure these are *not*; want as small a sample as possible */ + perf_evsel__reset_sample_bit(pos, PERIOD); + perf_evsel__reset_sample_bit(pos, IP); + perf_evsel__reset_sample_bit(pos, CALLCHAIN); + perf_evsel__reset_sample_bit(pos, ADDR); + perf_evsel__reset_sample_bit(pos, READ); + attr->mmap = 0; + attr->comm = 0; + attr->task = 0; + + attr->sample_period = 1; + + attr->watermark = 0; + attr->wakeup_events = 1000; + + /* will enable all once we are ready */ + attr->disabled = 1; + } + + err = perf_evlist__open(evlist); + if (err < 0) { + printf("Couldn't create the events: %s\n", strerror(errno)); + goto out; + } + + if (perf_evlist__mmap(evlist, kvm->opts.mmap_pages, false) < 0) { + ui__error("Failed to mmap the events: %s\n", strerror(errno)); + perf_evlist__close(evlist); + goto out; + } + + rc = 0; + +out: + return rc; +} +#endif + +static int read_events(struct perf_kvm_stat *kvm) { int ret; @@ -710,10 +1230,13 @@ static int read_events(struct perf_kvm *kvm) .comm = perf_event__process_comm, .ordered_samples = true, }; + struct perf_data_file file = { + .path = kvm->file_name, + .mode = PERF_DATA_MODE_READ, + }; kvm->tool = eops; - kvm->session = perf_session__new(kvm->file_name, O_RDONLY, 0, false, - &kvm->tool); + kvm->session = perf_session__new(&file, false, &kvm->tool); if (!kvm->session) { pr_err("Initializing perf session failed\n"); return -EINVAL; @@ -726,35 +1249,34 @@ static int read_events(struct perf_kvm *kvm) * Do not use 'isa' recorded in kvm_exit tracepoint since it is not * traced in the old kernel. */ - ret = get_cpu_isa(kvm->session); - + ret = cpu_isa_config(kvm); if (ret < 0) return ret; - if (ret == 1) { - kvm->exit_reasons = vmx_exit_reasons; - kvm->exit_reasons_size = ARRAY_SIZE(vmx_exit_reasons); - kvm->exit_reasons_isa = "VMX"; - } - return perf_session__process_events(kvm->session, &kvm->tool); } -static bool verify_vcpu(int vcpu) +static int parse_target_str(struct perf_kvm_stat *kvm) { - if (vcpu != -1 && vcpu < 0) { - pr_err("Invalid vcpu:%d.\n", vcpu); - return false; + if (kvm->pid_str) { + kvm->pid_list = intlist__new(kvm->pid_str); + if (kvm->pid_list == NULL) { + pr_err("Error parsing process id string\n"); + return -EINVAL; + } } - return true; + return 0; } -static int kvm_events_report_vcpu(struct perf_kvm *kvm) +static int kvm_events_report_vcpu(struct perf_kvm_stat *kvm) { int ret = -EINVAL; int vcpu = kvm->trace_vcpu; + if (parse_target_str(kvm) != 0) + goto exit; + if (!verify_vcpu(vcpu)) goto exit; @@ -778,16 +1300,11 @@ exit: return ret; } -static const char * const record_args[] = { - "record", - "-R", - "-f", - "-m", "1024", - "-c", "1", - "-e", "kvm:kvm_entry", - "-e", "kvm:kvm_exit", - "-e", "kvm:kvm_mmio", - "-e", "kvm:kvm_pio", +static const char * const kvm_events_tp[] = { + "kvm:kvm_entry", + "kvm:kvm_exit", + "kvm:kvm_mmio", + "kvm:kvm_pio", }; #define STRDUP_FAIL_EXIT(s) \ @@ -798,12 +1315,20 @@ static const char * const record_args[] = { _p; \ }) -static int kvm_events_record(struct perf_kvm *kvm, int argc, const char **argv) +static int +kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv) { unsigned int rec_argc, i, j; const char **rec_argv; + const char * const record_args[] = { + "record", + "-R", + "-m", "1024", + "-c", "1", + }; - rec_argc = ARRAY_SIZE(record_args) + argc + 2; + rec_argc = ARRAY_SIZE(record_args) + argc + 2 + + 2 * ARRAY_SIZE(kvm_events_tp); rec_argv = calloc(rec_argc + 1, sizeof(char *)); if (rec_argv == NULL) @@ -812,6 +1337,11 @@ static int kvm_events_record(struct perf_kvm *kvm, int argc, const char **argv) for (i = 0; i < ARRAY_SIZE(record_args); i++) rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]); + for (j = 0; j < ARRAY_SIZE(kvm_events_tp); j++) { + rec_argv[i++] = "-e"; + rec_argv[i++] = STRDUP_FAIL_EXIT(kvm_events_tp[j]); + } + rec_argv[i++] = STRDUP_FAIL_EXIT("-o"); rec_argv[i++] = STRDUP_FAIL_EXIT(kvm->file_name); @@ -821,7 +1351,8 @@ static int kvm_events_record(struct perf_kvm *kvm, int argc, const char **argv) return cmd_record(i, rec_argv, NULL); } -static int kvm_events_report(struct perf_kvm *kvm, int argc, const char **argv) +static int +kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv) { const struct option kvm_events_report_options[] = { OPT_STRING(0, "event", &kvm->report_event, "report event", @@ -831,6 +1362,8 @@ static int kvm_events_report(struct perf_kvm *kvm, int argc, const char **argv) OPT_STRING('k', "key", &kvm->sort_key, "sort-key", "key for sorting: sample(sort by samples number)" " time (sort by avg time)"), + OPT_STRING('p', "pid", &kvm->pid_str, "pid", + "analyze events only for given process id(s)"), OPT_END() }; @@ -853,6 +1386,184 @@ static int kvm_events_report(struct perf_kvm *kvm, int argc, const char **argv) return kvm_events_report_vcpu(kvm); } +#ifdef HAVE_TIMERFD_SUPPORT +static struct perf_evlist *kvm_live_event_list(void) +{ + struct perf_evlist *evlist; + char *tp, *name, *sys; + unsigned int j; + int err = -1; + + evlist = perf_evlist__new(); + if (evlist == NULL) + return NULL; + + for (j = 0; j < ARRAY_SIZE(kvm_events_tp); j++) { + + tp = strdup(kvm_events_tp[j]); + if (tp == NULL) + goto out; + + /* split tracepoint into subsystem and name */ + sys = tp; + name = strchr(tp, ':'); + if (name == NULL) { + pr_err("Error parsing %s tracepoint: subsystem delimiter not found\n", + kvm_events_tp[j]); + free(tp); + goto out; + } + *name = '\0'; + name++; + + if (perf_evlist__add_newtp(evlist, sys, name, NULL)) { + pr_err("Failed to add %s tracepoint to the list\n", kvm_events_tp[j]); + free(tp); + goto out; + } + + free(tp); + } + + err = 0; + +out: + if (err) { + perf_evlist__delete(evlist); + evlist = NULL; + } + + return evlist; +} + +static int kvm_events_live(struct perf_kvm_stat *kvm, + int argc, const char **argv) +{ + char errbuf[BUFSIZ]; + int err; + + const struct option live_options[] = { + OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid", + "record events on existing process id"), + OPT_CALLBACK('m', "mmap-pages", &kvm->opts.mmap_pages, "pages", + "number of mmap data pages", + perf_evlist__parse_mmap_pages), + OPT_INCR('v', "verbose", &verbose, + "be more verbose (show counter open errors, etc)"), + OPT_BOOLEAN('a', "all-cpus", &kvm->opts.target.system_wide, + "system-wide collection from all CPUs"), + OPT_UINTEGER('d', "display", &kvm->display_time, + "time in seconds between display updates"), + OPT_STRING(0, "event", &kvm->report_event, "report event", + "event for reporting: vmexit, mmio, ioport"), + OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu, + "vcpu id to report"), + OPT_STRING('k', "key", &kvm->sort_key, "sort-key", + "key for sorting: sample(sort by samples number)" + " time (sort by avg time)"), + OPT_U64(0, "duration", &kvm->duration, + "show events other than HALT that take longer than duration usecs"), + OPT_END() + }; + const char * const live_usage[] = { + "perf kvm stat live [<options>]", + NULL + }; + struct perf_data_file file = { + .mode = PERF_DATA_MODE_WRITE, + }; + + + /* event handling */ + kvm->tool.sample = process_sample_event; + kvm->tool.comm = perf_event__process_comm; + kvm->tool.exit = perf_event__process_exit; + kvm->tool.fork = perf_event__process_fork; + kvm->tool.lost = process_lost_event; + kvm->tool.ordered_samples = true; + perf_tool__fill_defaults(&kvm->tool); + + /* set defaults */ + kvm->display_time = 1; + kvm->opts.user_interval = 1; + kvm->opts.mmap_pages = 512; + kvm->opts.target.uses_mmap = false; + kvm->opts.target.uid_str = NULL; + kvm->opts.target.uid = UINT_MAX; + + symbol__init(); + disable_buildid_cache(); + + use_browser = 0; + setup_browser(false); + + if (argc) { + argc = parse_options(argc, argv, live_options, + live_usage, 0); + if (argc) + usage_with_options(live_usage, live_options); + } + + kvm->duration *= NSEC_PER_USEC; /* convert usec to nsec */ + + /* + * target related setups + */ + err = target__validate(&kvm->opts.target); + if (err) { + target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ); + ui__warning("%s", errbuf); + } + + if (target__none(&kvm->opts.target)) + kvm->opts.target.system_wide = true; + + + /* + * generate the event list + */ + kvm->evlist = kvm_live_event_list(); + if (kvm->evlist == NULL) { + err = -1; + goto out; + } + + symbol_conf.nr_events = kvm->evlist->nr_entries; + + if (perf_evlist__create_maps(kvm->evlist, &kvm->opts.target) < 0) + usage_with_options(live_usage, live_options); + + /* + * perf session + */ + kvm->session = perf_session__new(&file, false, &kvm->tool); + if (kvm->session == NULL) { + err = -ENOMEM; + goto out; + } + kvm->session->evlist = kvm->evlist; + perf_session__set_id_hdr_size(kvm->session); + machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target, + kvm->evlist->threads, false); + err = kvm_live_open_events(kvm); + if (err) + goto out; + + err = kvm_events_live_report(kvm); + +out: + exit_browser(0); + + if (kvm->session) + perf_session__delete(kvm->session); + kvm->session = NULL; + if (kvm->evlist) + perf_evlist__delete(kvm->evlist); + + return err; +} +#endif + static void print_kvm_stat_usage(void) { printf("Usage: perf kvm stat <command>\n\n"); @@ -860,28 +1571,47 @@ static void print_kvm_stat_usage(void) printf("# Available commands:\n"); printf("\trecord: record kvm events\n"); printf("\treport: report statistical data of kvm events\n"); + printf("\tlive: live reporting of statistical data of kvm events\n"); printf("\nOtherwise, it is the alias of 'perf stat':\n"); } -static int kvm_cmd_stat(struct perf_kvm *kvm, int argc, const char **argv) +static int kvm_cmd_stat(const char *file_name, int argc, const char **argv) { + struct perf_kvm_stat kvm = { + .file_name = file_name, + + .trace_vcpu = -1, + .report_event = "vmexit", + .sort_key = "sample", + + .exit_reasons = svm_exit_reasons, + .exit_reasons_size = ARRAY_SIZE(svm_exit_reasons), + .exit_reasons_isa = "SVM", + }; + if (argc == 1) { print_kvm_stat_usage(); goto perf_stat; } if (!strncmp(argv[1], "rec", 3)) - return kvm_events_record(kvm, argc - 1, argv + 1); + return kvm_events_record(&kvm, argc - 1, argv + 1); if (!strncmp(argv[1], "rep", 3)) - return kvm_events_report(kvm, argc - 1 , argv + 1); + return kvm_events_report(&kvm, argc - 1 , argv + 1); + +#ifdef HAVE_TIMERFD_SUPPORT + if (!strncmp(argv[1], "live", 4)) + return kvm_events_live(&kvm, argc - 1 , argv + 1); +#endif perf_stat: return cmd_stat(argc, argv, NULL); } +#endif -static int __cmd_record(struct perf_kvm *kvm, int argc, const char **argv) +static int __cmd_record(const char *file_name, int argc, const char **argv) { int rec_argc, i = 0, j; const char **rec_argv; @@ -890,7 +1620,7 @@ static int __cmd_record(struct perf_kvm *kvm, int argc, const char **argv) rec_argv = calloc(rec_argc + 1, sizeof(char *)); rec_argv[i++] = strdup("record"); rec_argv[i++] = strdup("-o"); - rec_argv[i++] = strdup(kvm->file_name); + rec_argv[i++] = strdup(file_name); for (j = 1; j < argc; j++, i++) rec_argv[i] = argv[j]; @@ -899,7 +1629,7 @@ static int __cmd_record(struct perf_kvm *kvm, int argc, const char **argv) return cmd_record(i, rec_argv, NULL); } -static int __cmd_report(struct perf_kvm *kvm, int argc, const char **argv) +static int __cmd_report(const char *file_name, int argc, const char **argv) { int rec_argc, i = 0, j; const char **rec_argv; @@ -908,7 +1638,7 @@ static int __cmd_report(struct perf_kvm *kvm, int argc, const char **argv) rec_argv = calloc(rec_argc + 1, sizeof(char *)); rec_argv[i++] = strdup("report"); rec_argv[i++] = strdup("-i"); - rec_argv[i++] = strdup(kvm->file_name); + rec_argv[i++] = strdup(file_name); for (j = 1; j < argc; j++, i++) rec_argv[i] = argv[j]; @@ -917,7 +1647,8 @@ static int __cmd_report(struct perf_kvm *kvm, int argc, const char **argv) return cmd_report(i, rec_argv, NULL); } -static int __cmd_buildid_list(struct perf_kvm *kvm, int argc, const char **argv) +static int +__cmd_buildid_list(const char *file_name, int argc, const char **argv) { int rec_argc, i = 0, j; const char **rec_argv; @@ -926,7 +1657,7 @@ static int __cmd_buildid_list(struct perf_kvm *kvm, int argc, const char **argv) rec_argv = calloc(rec_argc + 1, sizeof(char *)); rec_argv[i++] = strdup("buildid-list"); rec_argv[i++] = strdup("-i"); - rec_argv[i++] = strdup(kvm->file_name); + rec_argv[i++] = strdup(file_name); for (j = 1; j < argc; j++, i++) rec_argv[i] = argv[j]; @@ -937,20 +1668,11 @@ static int __cmd_buildid_list(struct perf_kvm *kvm, int argc, const char **argv) int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused) { - struct perf_kvm kvm = { - .trace_vcpu = -1, - .report_event = "vmexit", - .sort_key = "sample", - - .exit_reasons = svm_exit_reasons, - .exit_reasons_size = ARRAY_SIZE(svm_exit_reasons), - .exit_reasons_isa = "SVM", - }; - + const char *file_name = NULL; const struct option kvm_options[] = { - OPT_STRING('i', "input", &kvm.file_name, "file", + OPT_STRING('i', "input", &file_name, "file", "Input file name"), - OPT_STRING('o', "output", &kvm.file_name, "file", + OPT_STRING('o', "output", &file_name, "file", "Output file name"), OPT_BOOLEAN(0, "guest", &perf_guest, "Collect guest os data"), @@ -965,52 +1687,49 @@ int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused) "file", "file saving guest os /proc/kallsyms"), OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules, "file", "file saving guest os /proc/modules"), + OPT_INCR('v', "verbose", &verbose, + "be more verbose (show counter open errors, etc)"), OPT_END() }; - - const char * const kvm_usage[] = { - "perf kvm [<options>] {top|record|report|diff|buildid-list|stat}", - NULL - }; + const char *const kvm_subcommands[] = { "top", "record", "report", "diff", + "buildid-list", "stat", NULL }; + const char *kvm_usage[] = { NULL, NULL }; perf_host = 0; perf_guest = 1; - argc = parse_options(argc, argv, kvm_options, kvm_usage, - PARSE_OPT_STOP_AT_NON_OPTION); + argc = parse_options_subcommand(argc, argv, kvm_options, kvm_subcommands, kvm_usage, + PARSE_OPT_STOP_AT_NON_OPTION); if (!argc) usage_with_options(kvm_usage, kvm_options); if (!perf_host) perf_guest = 1; - if (!kvm.file_name) { - if (perf_host && !perf_guest) - kvm.file_name = strdup("perf.data.host"); - else if (!perf_host && perf_guest) - kvm.file_name = strdup("perf.data.guest"); - else - kvm.file_name = strdup("perf.data.kvm"); + if (!file_name) { + file_name = get_filename_for_perf_kvm(); - if (!kvm.file_name) { + if (!file_name) { pr_err("Failed to allocate memory for filename\n"); return -ENOMEM; } } if (!strncmp(argv[0], "rec", 3)) - return __cmd_record(&kvm, argc, argv); + return __cmd_record(file_name, argc, argv); else if (!strncmp(argv[0], "rep", 3)) - return __cmd_report(&kvm, argc, argv); + return __cmd_report(file_name, argc, argv); else if (!strncmp(argv[0], "diff", 4)) return cmd_diff(argc, argv, NULL); else if (!strncmp(argv[0], "top", 3)) return cmd_top(argc, argv, NULL); else if (!strncmp(argv[0], "buildid-list", 12)) - return __cmd_buildid_list(&kvm, argc, argv); + return __cmd_buildid_list(file_name, argc, argv); +#if defined(__i386__) || defined(__x86_64__) else if (!strncmp(argv[0], "stat", 4)) - return kvm_cmd_stat(&kvm, argc, argv); + return kvm_cmd_stat(file_name, argc, argv); +#endif else usage_with_options(kvm_usage, kvm_options); diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c index 1948eceb517..011195e38f2 100644 --- a/tools/perf/builtin-list.c +++ b/tools/perf/builtin-list.c @@ -13,49 +13,64 @@ #include "util/parse-events.h" #include "util/cache.h" +#include "util/pmu.h" +#include "util/parse-options.h" int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused) { + int i; + const struct option list_options[] = { + OPT_END() + }; + const char * const list_usage[] = { + "perf list [hw|sw|cache|tracepoint|pmu|event_glob]", + NULL + }; + + argc = parse_options(argc, argv, list_options, list_usage, + PARSE_OPT_STOP_AT_NON_OPTION); + setup_pager(); - if (argc == 1) + if (argc == 0) { print_events(NULL, false); - else { - int i; - - for (i = 1; i < argc; ++i) { - if (i > 2) - putchar('\n'); - if (strncmp(argv[i], "tracepoint", 10) == 0) - print_tracepoint_events(NULL, NULL, false); - else if (strcmp(argv[i], "hw") == 0 || - strcmp(argv[i], "hardware") == 0) - print_events_type(PERF_TYPE_HARDWARE); - else if (strcmp(argv[i], "sw") == 0 || - strcmp(argv[i], "software") == 0) - print_events_type(PERF_TYPE_SOFTWARE); - else if (strcmp(argv[i], "cache") == 0 || - strcmp(argv[i], "hwcache") == 0) - print_hwcache_events(NULL, false); - else if (strcmp(argv[i], "--raw-dump") == 0) - print_events(NULL, true); - else { - char *sep = strchr(argv[i], ':'), *s; - int sep_idx; - - if (sep == NULL) { - print_events(argv[i], false); - continue; - } - sep_idx = sep - argv[i]; - s = strdup(argv[i]); - if (s == NULL) - return -1; - - s[sep_idx] = '\0'; - print_tracepoint_events(s, s + sep_idx + 1, false); - free(s); + return 0; + } + + for (i = 0; i < argc; ++i) { + if (i) + putchar('\n'); + if (strncmp(argv[i], "tracepoint", 10) == 0) + print_tracepoint_events(NULL, NULL, false); + else if (strcmp(argv[i], "hw") == 0 || + strcmp(argv[i], "hardware") == 0) + print_events_type(PERF_TYPE_HARDWARE); + else if (strcmp(argv[i], "sw") == 0 || + strcmp(argv[i], "software") == 0) + print_events_type(PERF_TYPE_SOFTWARE); + else if (strcmp(argv[i], "cache") == 0 || + strcmp(argv[i], "hwcache") == 0) + print_hwcache_events(NULL, false); + else if (strcmp(argv[i], "pmu") == 0) + print_pmu_events(NULL, false); + else if (strcmp(argv[i], "--raw-dump") == 0) + print_events(NULL, true); + else { + char *sep = strchr(argv[i], ':'), *s; + int sep_idx; + + if (sep == NULL) { + print_events(argv[i], false); + continue; } + sep_idx = sep - argv[i]; + s = strdup(argv[i]); + if (s == NULL) + return -1; + + s[sep_idx] = '\0'; + print_tracepoint_events(s, s + sep_idx + 1, false); + free(s); } } return 0; diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c index 6f5f328157a..6148afc995c 100644 --- a/tools/perf/builtin-lock.c +++ b/tools/perf/builtin-lock.c @@ -15,6 +15,7 @@ #include "util/debug.h" #include "util/session.h" #include "util/tool.h" +#include "util/data.h" #include <sys/types.h> #include <sys/prctl.h> @@ -56,7 +57,9 @@ struct lock_stat { unsigned int nr_readlock; unsigned int nr_trylock; + /* these times are in nano sec. */ + u64 avg_wait_time; u64 wait_time_total; u64 wait_time_min; u64 wait_time_max; @@ -208,6 +211,7 @@ static struct thread_stat *thread_stat_findnew_first(u32 tid) SINGLE_KEY(nr_acquired) SINGLE_KEY(nr_contended) +SINGLE_KEY(avg_wait_time) SINGLE_KEY(wait_time_total) SINGLE_KEY(wait_time_max) @@ -244,6 +248,7 @@ static struct rb_root result; /* place to store sorted data */ struct lock_key keys[] = { DEF_KEY_LOCK(acquired, nr_acquired), DEF_KEY_LOCK(contended, nr_contended), + DEF_KEY_LOCK(avg_wait, avg_wait_time), DEF_KEY_LOCK(wait_total, wait_time_total), DEF_KEY_LOCK(wait_min, wait_time_min), DEF_KEY_LOCK(wait_max, wait_time_max), @@ -321,10 +326,12 @@ static struct lock_stat *lock_stat_findnew(void *addr, const char *name) new->addr = addr; new->name = zalloc(sizeof(char) * strlen(name) + 1); - if (!new->name) + if (!new->name) { + free(new); goto alloc_failed; - strcpy(new->name, name); + } + strcpy(new->name, name); new->wait_time_min = ULLONG_MAX; list_add(&new->hash_entry, entry); @@ -335,8 +342,6 @@ alloc_failed: return NULL; } -static const char *input_name; - struct trace_lock_handler { int (*acquire_event)(struct perf_evsel *evsel, struct perf_sample *sample); @@ -402,17 +407,17 @@ static int report_lock_acquire_event(struct perf_evsel *evsel, ls = lock_stat_findnew(addr, name); if (!ls) - return -1; + return -ENOMEM; if (ls->discard) return 0; ts = thread_stat_findnew(sample->tid); if (!ts) - return -1; + return -ENOMEM; seq = get_seq(ts, addr); if (!seq) - return -1; + return -ENOMEM; switch (seq->state) { case SEQ_STATE_UNINITIALIZED: @@ -448,7 +453,6 @@ broken: list_del(&seq->list); free(seq); goto end; - break; default: BUG_ON("Unknown state of lock sequence found!\n"); break; @@ -475,17 +479,17 @@ static int report_lock_acquired_event(struct perf_evsel *evsel, ls = lock_stat_findnew(addr, name); if (!ls) - return -1; + return -ENOMEM; if (ls->discard) return 0; ts = thread_stat_findnew(sample->tid); if (!ts) - return -1; + return -ENOMEM; seq = get_seq(ts, addr); if (!seq) - return -1; + return -ENOMEM; switch (seq->state) { case SEQ_STATE_UNINITIALIZED: @@ -510,8 +514,6 @@ static int report_lock_acquired_event(struct perf_evsel *evsel, list_del(&seq->list); free(seq); goto end; - break; - default: BUG_ON("Unknown state of lock sequence found!\n"); break; @@ -519,6 +521,7 @@ static int report_lock_acquired_event(struct perf_evsel *evsel, seq->state = SEQ_STATE_ACQUIRED; ls->nr_acquired++; + ls->avg_wait_time = ls->nr_contended ? ls->wait_time_total/ls->nr_contended : 0; seq->prev_event_time = sample->time; end: return 0; @@ -538,17 +541,17 @@ static int report_lock_contended_event(struct perf_evsel *evsel, ls = lock_stat_findnew(addr, name); if (!ls) - return -1; + return -ENOMEM; if (ls->discard) return 0; ts = thread_stat_findnew(sample->tid); if (!ts) - return -1; + return -ENOMEM; seq = get_seq(ts, addr); if (!seq) - return -1; + return -ENOMEM; switch (seq->state) { case SEQ_STATE_UNINITIALIZED: @@ -566,7 +569,6 @@ static int report_lock_contended_event(struct perf_evsel *evsel, list_del(&seq->list); free(seq); goto end; - break; default: BUG_ON("Unknown state of lock sequence found!\n"); break; @@ -574,6 +576,7 @@ static int report_lock_contended_event(struct perf_evsel *evsel, seq->state = SEQ_STATE_CONTENDED; ls->nr_contended++; + ls->avg_wait_time = ls->wait_time_total/ls->nr_contended; seq->prev_event_time = sample->time; end: return 0; @@ -593,22 +596,21 @@ static int report_lock_release_event(struct perf_evsel *evsel, ls = lock_stat_findnew(addr, name); if (!ls) - return -1; + return -ENOMEM; if (ls->discard) return 0; ts = thread_stat_findnew(sample->tid); if (!ts) - return -1; + return -ENOMEM; seq = get_seq(ts, addr); if (!seq) - return -1; + return -ENOMEM; switch (seq->state) { case SEQ_STATE_UNINITIALIZED: goto end; - break; case SEQ_STATE_ACQUIRED: break; case SEQ_STATE_READ_ACQUIRED: @@ -626,7 +628,6 @@ static int report_lock_release_event(struct perf_evsel *evsel, ls->discard = 1; bad_hist[BROKEN_RELEASE]++; goto free_seq; - break; default: BUG_ON("Unknown state of lock sequence found!\n"); break; @@ -692,7 +693,7 @@ static void print_bad_events(int bad, int total) pr_info("\n=== output for debug===\n\n"); pr_info("bad: %d, total: %d\n", bad, total); - pr_info("bad rate: %f %%\n", (double)bad / (double)total * 100); + pr_info("bad rate: %.2f %%\n", (double)bad / (double)total * 100); pr_info("histogram of events caused bad sequence\n"); for (i = 0; i < BROKEN_MAX; i++) pr_info(" %10s: %d\n", name[i], bad_hist[i]); @@ -709,6 +710,7 @@ static void print_result(void) pr_info("%10s ", "acquired"); pr_info("%10s ", "contended"); + pr_info("%15s ", "avg wait (ns)"); pr_info("%15s ", "total wait (ns)"); pr_info("%15s ", "max wait (ns)"); pr_info("%15s ", "min wait (ns)"); @@ -740,6 +742,7 @@ static void print_result(void) pr_info("%10u ", st->nr_acquired); pr_info("%10u ", st->nr_contended); + pr_info("%15" PRIu64 " ", st->avg_wait_time); pr_info("%15" PRIu64 " ", st->wait_time_total); pr_info("%15" PRIu64 " ", st->wait_time_max); pr_info("%15" PRIu64 " ", st->wait_time_min == ULLONG_MAX ? @@ -764,7 +767,7 @@ static void dump_threads(void) while (node) { st = container_of(node, struct thread_stat, rb); t = perf_session__findnew(session, st->tid); - pr_info("%10d: %s\n", st->tid, t->comm); + pr_info("%10d: %s\n", st->tid, thread__comm_str(t)); node = rb_next(node); }; } @@ -807,7 +810,8 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused, struct perf_evsel *evsel, struct machine *machine) { - struct thread *thread = machine__findnew_thread(machine, sample->tid); + struct thread *thread = machine__findnew_thread(machine, sample->pid, + sample->tid); if (thread == NULL) { pr_debug("problem processing %d event, skipping it.\n", @@ -815,14 +819,26 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused, return -1; } - if (evsel->handler.func != NULL) { - tracepoint_handler f = evsel->handler.func; + if (evsel->handler != NULL) { + tracepoint_handler f = evsel->handler; return f(evsel, sample); } return 0; } +static void sort_result(void) +{ + unsigned int i; + struct lock_stat *st; + + for (i = 0; i < LOCKHASH_SIZE; i++) { + list_for_each_entry(st, &lockhash_table[i], hash_entry) { + insert_to_result(st, compare); + } + } +} + static const struct perf_evsel_str_handler lock_tracepoints[] = { { "lock:lock_acquire", perf_evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */ { "lock:lock_acquired", perf_evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */ @@ -830,59 +846,59 @@ static const struct perf_evsel_str_handler lock_tracepoints[] = { { "lock:lock_release", perf_evsel__process_lock_release, }, /* CONFIG_LOCKDEP */ }; -static int read_events(void) +static int __cmd_report(bool display_info) { + int err = -EINVAL; struct perf_tool eops = { .sample = process_sample_event, .comm = perf_event__process_comm, .ordered_samples = true, }; - session = perf_session__new(input_name, O_RDONLY, 0, false, &eops); + struct perf_data_file file = { + .path = input_name, + .mode = PERF_DATA_MODE_READ, + }; + + session = perf_session__new(&file, false, &eops); if (!session) { pr_err("Initializing perf session failed\n"); - return -1; + return -ENOMEM; } + if (!perf_session__has_traces(session, "lock record")) + goto out_delete; + if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) { pr_err("Initializing perf session tracepoint handlers failed\n"); - return -1; + goto out_delete; } - return perf_session__process_events(session, &eops); -} - -static void sort_result(void) -{ - unsigned int i; - struct lock_stat *st; + if (select_key()) + goto out_delete; - for (i = 0; i < LOCKHASH_SIZE; i++) { - list_for_each_entry(st, &lockhash_table[i], hash_entry) { - insert_to_result(st, compare); - } - } -} + err = perf_session__process_events(session, &eops); + if (err) + goto out_delete; -static int __cmd_report(void) -{ setup_pager(); + if (display_info) /* used for info subcommand */ + err = dump_info(); + else { + sort_result(); + print_result(); + } - if ((select_key() != 0) || - (read_events() != 0)) - return -1; - - sort_result(); - print_result(); - - return 0; +out_delete: + perf_session__delete(session); + return err; } static int __cmd_record(int argc, const char **argv) { const char *record_args[] = { - "record", "-R", "-f", "-m", "1024", "-c", "1", + "record", "-R", "-m", "1024", "-c", "1", }; - unsigned int rec_argc, i, j; + unsigned int rec_argc, i, j, ret; const char **rec_argv; for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) { @@ -899,7 +915,7 @@ static int __cmd_record(int argc, const char **argv) rec_argc += 2 * ARRAY_SIZE(lock_tracepoints); rec_argv = calloc(rec_argc + 1, sizeof(char *)); - if (rec_argv == NULL) + if (!rec_argv) return -ENOMEM; for (i = 0; i < ARRAY_SIZE(record_args); i++) @@ -915,7 +931,9 @@ static int __cmd_record(int argc, const char **argv) BUG_ON(i != rec_argc); - return cmd_record(i, rec_argv, NULL); + ret = cmd_record(i, rec_argv, NULL); + free(rec_argv); + return ret; } int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused) @@ -935,7 +953,7 @@ int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused) }; const struct option report_options[] = { OPT_STRING('k', "key", &sort_key, "acquired", - "key for sorting (acquired / contended / wait_total / wait_max / wait_min)"), + "key for sorting (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"), /* TODO: type */ OPT_END() }; @@ -943,8 +961,10 @@ int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused) "perf lock info [<options>]", NULL }; - const char * const lock_usage[] = { - "perf lock [<options>] {record|report|script|info}", + const char *const lock_subcommands[] = { "record", "report", "script", + "info", NULL }; + const char *lock_usage[] = { + NULL, NULL }; const char * const report_usage[] = { @@ -958,8 +978,8 @@ int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused) for (i = 0; i < LOCKHASH_SIZE; i++) INIT_LIST_HEAD(lockhash_table + i); - argc = parse_options(argc, argv, lock_options, lock_usage, - PARSE_OPT_STOP_AT_NON_OPTION); + argc = parse_options_subcommand(argc, argv, lock_options, lock_subcommands, + lock_usage, PARSE_OPT_STOP_AT_NON_OPTION); if (!argc) usage_with_options(lock_usage, lock_options); @@ -973,7 +993,7 @@ int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused) if (argc) usage_with_options(report_usage, report_options); } - __cmd_report(); + rc = __cmd_report(false); } else if (!strcmp(argv[0], "script")) { /* Aliased to 'perf script' */ return cmd_script(argc, argv, prefix); @@ -986,11 +1006,7 @@ int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused) } /* recycling report_lock_ops */ trace_handler = &report_lock_ops; - setup_pager(); - if (read_events() != 0) - rc = -1; - else - rc = dump_info(); + rc = __cmd_report(true); } else { usage_with_options(lock_usage, lock_options); } diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c new file mode 100644 index 00000000000..4a1a6c94a5e --- /dev/null +++ b/tools/perf/builtin-mem.c @@ -0,0 +1,246 @@ +#include "builtin.h" +#include "perf.h" + +#include "util/parse-options.h" +#include "util/trace-event.h" +#include "util/tool.h" +#include "util/session.h" +#include "util/data.h" + +#define MEM_OPERATION_LOAD "load" +#define MEM_OPERATION_STORE "store" + +static const char *mem_operation = MEM_OPERATION_LOAD; + +struct perf_mem { + struct perf_tool tool; + char const *input_name; + bool hide_unresolved; + bool dump_raw; + const char *cpu_list; + DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); +}; + +static int __cmd_record(int argc, const char **argv) +{ + int rec_argc, i = 0, j; + const char **rec_argv; + char event[64]; + int ret; + + rec_argc = argc + 4; + rec_argv = calloc(rec_argc + 1, sizeof(char *)); + if (!rec_argv) + return -1; + + rec_argv[i++] = strdup("record"); + if (!strcmp(mem_operation, MEM_OPERATION_LOAD)) + rec_argv[i++] = strdup("-W"); + rec_argv[i++] = strdup("-d"); + rec_argv[i++] = strdup("-e"); + + if (strcmp(mem_operation, MEM_OPERATION_LOAD)) + sprintf(event, "cpu/mem-stores/pp"); + else + sprintf(event, "cpu/mem-loads/pp"); + + rec_argv[i++] = strdup(event); + for (j = 1; j < argc; j++, i++) + rec_argv[i] = argv[j]; + + ret = cmd_record(i, rec_argv, NULL); + free(rec_argv); + return ret; +} + +static int +dump_raw_samples(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + struct perf_mem *mem = container_of(tool, struct perf_mem, tool); + struct addr_location al; + const char *fmt; + + if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) { + fprintf(stderr, "problem processing %d event, skipping it.\n", + event->header.type); + return -1; + } + + if (al.filtered || (mem->hide_unresolved && al.sym == NULL)) + return 0; + + if (al.map != NULL) + al.map->dso->hit = 1; + + if (symbol_conf.field_sep) { + fmt = "%d%s%d%s0x%"PRIx64"%s0x%"PRIx64"%s%"PRIu64 + "%s0x%"PRIx64"%s%s:%s\n"; + } else { + fmt = "%5d%s%5d%s0x%016"PRIx64"%s0x016%"PRIx64 + "%s%5"PRIu64"%s0x%06"PRIx64"%s%s:%s\n"; + symbol_conf.field_sep = " "; + } + + printf(fmt, + sample->pid, + symbol_conf.field_sep, + sample->tid, + symbol_conf.field_sep, + sample->ip, + symbol_conf.field_sep, + sample->addr, + symbol_conf.field_sep, + sample->weight, + symbol_conf.field_sep, + sample->data_src, + symbol_conf.field_sep, + al.map ? (al.map->dso ? al.map->dso->long_name : "???") : "???", + al.sym ? al.sym->name : "???"); + + return 0; +} + +static int process_sample_event(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct perf_evsel *evsel __maybe_unused, + struct machine *machine) +{ + return dump_raw_samples(tool, event, sample, machine); +} + +static int report_raw_events(struct perf_mem *mem) +{ + struct perf_data_file file = { + .path = input_name, + .mode = PERF_DATA_MODE_READ, + }; + int err = -EINVAL; + int ret; + struct perf_session *session = perf_session__new(&file, false, + &mem->tool); + + if (session == NULL) + return -ENOMEM; + + if (mem->cpu_list) { + ret = perf_session__cpu_bitmap(session, mem->cpu_list, + mem->cpu_bitmap); + if (ret) + goto out_delete; + } + + if (symbol__init() < 0) + return -1; + + printf("# PID, TID, IP, ADDR, LOCAL WEIGHT, DSRC, SYMBOL\n"); + + err = perf_session__process_events(session, &mem->tool); + if (err) + return err; + + return 0; + +out_delete: + perf_session__delete(session); + return err; +} + +static int report_events(int argc, const char **argv, struct perf_mem *mem) +{ + const char **rep_argv; + int ret, i = 0, j, rep_argc; + + if (mem->dump_raw) + return report_raw_events(mem); + + rep_argc = argc + 3; + rep_argv = calloc(rep_argc + 1, sizeof(char *)); + if (!rep_argv) + return -1; + + rep_argv[i++] = strdup("report"); + rep_argv[i++] = strdup("--mem-mode"); + rep_argv[i++] = strdup("-n"); /* display number of samples */ + + /* + * there is no weight (cost) associated with stores, so don't print + * the column + */ + if (strcmp(mem_operation, MEM_OPERATION_LOAD)) + rep_argv[i++] = strdup("--sort=mem,sym,dso,symbol_daddr," + "dso_daddr,tlb,locked"); + + for (j = 1; j < argc; j++, i++) + rep_argv[i] = argv[j]; + + ret = cmd_report(i, rep_argv, NULL); + free(rep_argv); + return ret; +} + +int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused) +{ + struct stat st; + struct perf_mem mem = { + .tool = { + .sample = process_sample_event, + .mmap = perf_event__process_mmap, + .mmap2 = perf_event__process_mmap2, + .comm = perf_event__process_comm, + .lost = perf_event__process_lost, + .fork = perf_event__process_fork, + .build_id = perf_event__process_build_id, + .ordered_samples = true, + }, + .input_name = "perf.data", + }; + const struct option mem_options[] = { + OPT_STRING('t', "type", &mem_operation, + "type", "memory operations(load/store)"), + OPT_BOOLEAN('D', "dump-raw-samples", &mem.dump_raw, + "dump raw samples in ASCII"), + OPT_BOOLEAN('U', "hide-unresolved", &mem.hide_unresolved, + "Only display entries resolved to a symbol"), + OPT_STRING('i', "input", &input_name, "file", + "input file name"), + OPT_STRING('C', "cpu", &mem.cpu_list, "cpu", + "list of cpus to profile"), + OPT_STRING('x', "field-separator", &symbol_conf.field_sep, + "separator", + "separator for columns, no spaces will be added" + " between columns '.' is reserved."), + OPT_END() + }; + const char *const mem_subcommands[] = { "record", "report", NULL }; + const char *mem_usage[] = { + NULL, + NULL + }; + + + argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands, + mem_usage, PARSE_OPT_STOP_AT_NON_OPTION); + + if (!argc || !(strncmp(argv[0], "rec", 3) || mem_operation)) + usage_with_options(mem_usage, mem_options); + + if (!mem.input_name || !strlen(mem.input_name)) { + if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) + mem.input_name = "-"; + else + mem.input_name = "perf.data"; + } + + if (!strncmp(argv[0], "rec", 3)) + return __cmd_record(argc, argv); + else if (!strncmp(argv[0], "rep", 3)) + return report_events(argc, argv, &mem); + else + usage_with_options(mem_usage, mem_options); + + return 0; +} diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index de38a034b12..c63fa292507 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -37,7 +37,7 @@ #include "util/strfilter.h" #include "util/symbol.h" #include "util/debug.h" -#include "util/debugfs.h" +#include <api/fs/debugfs.h> #include "util/parse-options.h" #include "util/probe-finder.h" #include "util/probe-event.h" @@ -59,7 +59,7 @@ static struct { struct perf_probe_event events[MAX_PROBES]; struct strlist *dellist; struct line_range line_range; - const char *target; + char *target; int max_probe_points; struct strfilter *filter; } params; @@ -98,7 +98,10 @@ static int set_target(const char *ptr) * short module name. */ if (!params.target && ptr && *ptr == '/') { - params.target = ptr; + params.target = strdup(ptr); + if (!params.target) + return -ENOMEM; + found = 1; buf = ptr + (strlen(ptr) - 3); @@ -116,6 +119,9 @@ static int parse_probe_event_argv(int argc, const char **argv) char *buf; found_target = set_target(argv[0]); + if (found_target < 0) + return found_target; + if (found_target && argc == 1) return 0; @@ -169,25 +175,38 @@ static int opt_set_target(const struct option *opt, const char *str, int unset __maybe_unused) { int ret = -ENOENT; + char *tmp; if (str && !params.target) { if (!strcmp(opt->long_name, "exec")) params.uprobes = true; -#ifdef DWARF_SUPPORT +#ifdef HAVE_DWARF_SUPPORT else if (!strcmp(opt->long_name, "module")) params.uprobes = false; #endif else return ret; - params.target = str; + /* Expand given path to absolute path, except for modulename */ + if (params.uprobes || strchr(str, '/')) { + tmp = realpath(str, NULL); + if (!tmp) { + pr_warning("Failed to get the absolute path of %s: %m\n", str); + return ret; + } + } else { + tmp = strdup(str); + if (!tmp) + return -ENOMEM; + } + params.target = tmp; ret = 0; } return ret; } -#ifdef DWARF_SUPPORT +#ifdef HAVE_DWARF_SUPPORT static int opt_show_lines(const struct option *opt __maybe_unused, const char *str, int unset __maybe_unused) { @@ -204,7 +223,6 @@ static int opt_show_lines(const struct option *opt __maybe_unused, params.show_lines = true; ret = parse_line_range_desc(str, ¶ms.line_range); - INIT_LIST_HEAD(¶ms.line_range.line_list); return ret; } @@ -250,14 +268,42 @@ static int opt_set_filter(const struct option *opt __maybe_unused, return 0; } -int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused) +static int init_params(void) +{ + return line_range__init(¶ms.line_range); +} + +static void cleanup_params(void) +{ + int i; + + for (i = 0; i < params.nevents; i++) + clear_perf_probe_event(params.events + i); + if (params.dellist) + strlist__delete(params.dellist); + line_range__clear(¶ms.line_range); + free(params.target); + if (params.filter) + strfilter__delete(params.filter); + memset(¶ms, 0, sizeof(params)); +} + +static void pr_err_with_code(const char *msg, int err) +{ + pr_err("%s", msg); + pr_debug(" Reason: %s (Code: %d)", strerror(-err), err); + pr_err("\n"); +} + +static int +__cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused) { const char * const probe_usage[] = { "perf probe [<options>] 'PROBEDEF' ['PROBEDEF' ...]", "perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]", "perf probe [<options>] --del '[GROUP:]EVENT' ...", "perf probe --list", -#ifdef DWARF_SUPPORT +#ifdef HAVE_DWARF_SUPPORT "perf probe [<options>] --line 'LINEDESC'", "perf probe [<options>] --vars 'PROBEPOINT'", #endif @@ -271,7 +317,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused) OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.", opt_del_probe_event), OPT_CALLBACK('a', "add", NULL, -#ifdef DWARF_SUPPORT +#ifdef HAVE_DWARF_SUPPORT "[EVENT=]FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT" " [[NAME=]ARG ...]", #else @@ -283,7 +329,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused) "\t\tFUNC:\tFunction name\n" "\t\tOFF:\tOffset from function entry (in byte)\n" "\t\t%return:\tPut the probe at function return\n" -#ifdef DWARF_SUPPORT +#ifdef HAVE_DWARF_SUPPORT "\t\tSRC:\tSource code path\n" "\t\tRL:\tRelative line number from function entry.\n" "\t\tAL:\tAbsolute line number in file.\n" @@ -296,7 +342,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused) opt_add_probe_event), OPT_BOOLEAN('f', "force", ¶ms.force_add, "forcibly add events" " with existing name"), -#ifdef DWARF_SUPPORT +#ifdef HAVE_DWARF_SUPPORT OPT_CALLBACK('L', "line", NULL, "FUNC[:RLN[+NUM|-RLN2]]|SRC:ALN[+NUM|-ALN2]", "Show source code lines.", opt_show_lines), @@ -325,6 +371,8 @@ int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused) opt_set_filter), OPT_CALLBACK('x', "exec", NULL, "executable|path", "target executable name or path", opt_set_target), + OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle, + "Disable symbol demangling"), OPT_END() }; int ret; @@ -338,7 +386,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused) } ret = parse_probe_event_argv(argc, argv); if (ret < 0) { - pr_err(" Error: Parse Error. (%d)\n", ret); + pr_err_with_code(" Error: Command Parse Error.", ret); return ret; } } @@ -378,8 +426,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused) } ret = show_perf_probe_events(); if (ret < 0) - pr_err(" Error: Failed to show event list. (%d)\n", - ret); + pr_err_with_code(" Error: Failed to show event list.", ret); return ret; } if (params.show_funcs) { @@ -402,14 +449,14 @@ int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused) ret = show_available_funcs(params.target, params.filter, params.uprobes); strfilter__delete(params.filter); + params.filter = NULL; if (ret < 0) - pr_err(" Error: Failed to show functions." - " (%d)\n", ret); + pr_err_with_code(" Error: Failed to show functions.", ret); return ret; } -#ifdef DWARF_SUPPORT - if (params.show_lines && !params.uprobes) { +#ifdef HAVE_DWARF_SUPPORT + if (params.show_lines) { if (params.mod_events) { pr_err(" Error: Don't use --line with" " --add/--del.\n"); @@ -422,7 +469,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused) ret = show_line_range(¶ms.line_range, params.target); if (ret < 0) - pr_err(" Error: Failed to show lines. (%d)\n", ret); + pr_err_with_code(" Error: Failed to show lines.", ret); return ret; } if (params.show_vars) { @@ -441,17 +488,17 @@ int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused) params.filter, params.show_ext_vars); strfilter__delete(params.filter); + params.filter = NULL; if (ret < 0) - pr_err(" Error: Failed to show vars. (%d)\n", ret); + pr_err_with_code(" Error: Failed to show vars.", ret); return ret; } #endif if (params.dellist) { ret = del_perf_probe_events(params.dellist); - strlist__delete(params.dellist); if (ret < 0) { - pr_err(" Error: Failed to delete events. (%d)\n", ret); + pr_err_with_code(" Error: Failed to delete events.", ret); return ret; } } @@ -462,9 +509,22 @@ int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused) params.target, params.force_add); if (ret < 0) { - pr_err(" Error: Failed to add events. (%d)\n", ret); + pr_err_with_code(" Error: Failed to add events.", ret); return ret; } } return 0; } + +int cmd_probe(int argc, const char **argv, const char *prefix) +{ + int ret; + + ret = init_params(); + if (!ret) { + ret = __cmd_probe(argc, argv, prefix); + cleanup_params(); + } + + return ret; +} diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index e9231659754..378b85b731a 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -5,8 +5,6 @@ * (or a CPU, or a PID) into the perf.data output file - for * later analysis via perf report. */ -#define _FILE_OFFSET_BITS 64 - #include "builtin.h" #include "perf.h" @@ -26,58 +24,35 @@ #include "util/symbol.h" #include "util/cpumap.h" #include "util/thread_map.h" +#include "util/data.h" #include <unistd.h> #include <sched.h> #include <sys/mman.h> -enum write_mode_t { - WRITE_FORCE, - WRITE_APPEND -}; -struct perf_record { +struct record { struct perf_tool tool; - struct perf_record_opts opts; + struct record_opts opts; u64 bytes_written; - const char *output_name; + struct perf_data_file file; struct perf_evlist *evlist; struct perf_session *session; const char *progname; - int output; - unsigned int page_size; int realtime_prio; - enum write_mode_t write_mode; bool no_buildid; bool no_buildid_cache; - bool force; - bool file_new; - bool append_file; long samples; - off_t post_processing_offset; }; -static void advance_output(struct perf_record *rec, size_t size) +static int record__write(struct record *rec, void *bf, size_t size) { - rec->bytes_written += size; -} - -static int write_output(struct perf_record *rec, void *buf, size_t size) -{ - while (size) { - int ret = write(rec->output, buf, size); - - if (ret < 0) { - pr_err("failed to write\n"); - return -1; - } - - size -= ret; - buf += ret; - - rec->bytes_written += ret; + if (perf_data_file__write(rec->session->file, bf, size) < 0) { + pr_err("failed to write perf data, error: %m\n"); + return -1; } + rec->bytes_written += size; return 0; } @@ -86,19 +61,15 @@ static int process_synthesized_event(struct perf_tool *tool, struct perf_sample *sample __maybe_unused, struct machine *machine __maybe_unused) { - struct perf_record *rec = container_of(tool, struct perf_record, tool); - if (write_output(rec, event, event->header.size) < 0) - return -1; - - return 0; + struct record *rec = container_of(tool, struct record, tool); + return record__write(rec, event, event->header.size); } -static int perf_record__mmap_read(struct perf_record *rec, - struct perf_mmap *md) +static int record__mmap_read(struct record *rec, struct perf_mmap *md) { unsigned int head = perf_mmap__read_head(md); unsigned int old = md->prev; - unsigned char *data = md->base + rec->page_size; + unsigned char *data = md->base + page_size; unsigned long size; void *buf; int rc = 0; @@ -115,7 +86,7 @@ static int perf_record__mmap_read(struct perf_record *rec, size = md->mask + 1 - (old & md->mask); old += size; - if (write_output(rec, buf, size) < 0) { + if (record__write(rec, buf, size) < 0) { rc = -1; goto out; } @@ -125,7 +96,7 @@ static int perf_record__mmap_read(struct perf_record *rec, size = head - old; old += size; - if (write_output(rec, buf, size) < 0) { + if (record__write(rec, buf, size) < 0) { rc = -1; goto out; } @@ -145,168 +116,45 @@ static void sig_handler(int sig) { if (sig == SIGCHLD) child_finished = 1; + else + signr = sig; done = 1; - signr = sig; } -static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg) +static void record__sig_exit(void) { - struct perf_record *rec = arg; - int status; - - if (rec->evlist->workload.pid > 0) { - if (!child_finished) - kill(rec->evlist->workload.pid, SIGTERM); - - wait(&status); - if (WIFSIGNALED(status)) - psignal(WTERMSIG(status), rec->progname); - } - - if (signr == -1 || signr == SIGUSR1) + if (signr == -1) return; signal(signr, SIG_DFL); - kill(getpid(), signr); + raise(signr); } -static bool perf_evlist__equal(struct perf_evlist *evlist, - struct perf_evlist *other) -{ - struct perf_evsel *pos, *pair; - - if (evlist->nr_entries != other->nr_entries) - return false; - - pair = perf_evlist__first(other); - - list_for_each_entry(pos, &evlist->entries, node) { - if (memcmp(&pos->attr, &pair->attr, sizeof(pos->attr) != 0)) - return false; - pair = perf_evsel__next(pair); - } - - return true; -} - -static int perf_record__open(struct perf_record *rec) +static int record__open(struct record *rec) { + char msg[512]; struct perf_evsel *pos; struct perf_evlist *evlist = rec->evlist; struct perf_session *session = rec->session; - struct perf_record_opts *opts = &rec->opts; + struct record_opts *opts = &rec->opts; int rc = 0; - perf_evlist__config_attrs(evlist, opts); - - if (opts->group) - perf_evlist__set_leader(evlist); - - list_for_each_entry(pos, &evlist->entries, node) { - struct perf_event_attr *attr = &pos->attr; - /* - * Check if parse_single_tracepoint_event has already asked for - * PERF_SAMPLE_TIME. - * - * XXX this is kludgy but short term fix for problems introduced by - * eac23d1c that broke 'perf script' by having different sample_types - * when using multiple tracepoint events when we use a perf binary - * that tries to use sample_id_all on an older kernel. - * - * We need to move counter creation to perf_session, support - * different sample_types, etc. - */ - bool time_needed = attr->sample_type & PERF_SAMPLE_TIME; + perf_evlist__config(evlist, opts); -fallback_missing_features: - if (opts->exclude_guest_missing) - attr->exclude_guest = attr->exclude_host = 0; -retry_sample_id: - attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; + evlist__for_each(evlist, pos) { try_again: if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) { - int err = errno; - - if (err == EPERM || err == EACCES) { - ui__error_paranoid(); - rc = -err; - goto out; - } else if (err == ENODEV && opts->target.cpu_list) { - pr_err("No such device - did you specify" - " an out-of-range profile CPU?\n"); - rc = -err; - goto out; - } else if (err == EINVAL) { - if (!opts->exclude_guest_missing && - (attr->exclude_guest || attr->exclude_host)) { - pr_debug("Old kernel, cannot exclude " - "guest or host samples.\n"); - opts->exclude_guest_missing = true; - goto fallback_missing_features; - } else if (!opts->sample_id_all_missing) { - /* - * Old kernel, no attr->sample_id_type_all field - */ - opts->sample_id_all_missing = true; - if (!opts->sample_time && !opts->raw_samples && !time_needed) - attr->sample_type &= ~PERF_SAMPLE_TIME; - - goto retry_sample_id; - } - } - - /* - * If it's cycles then fall back to hrtimer - * based cpu-clock-tick sw counter, which - * is always available even if no PMU support. - * - * PPC returns ENXIO until 2.6.37 (behavior changed - * with commit b0a873e). - */ - if ((err == ENOENT || err == ENXIO) - && attr->type == PERF_TYPE_HARDWARE - && attr->config == PERF_COUNT_HW_CPU_CYCLES) { - + if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) { if (verbose) - ui__warning("The cycles event is not supported, " - "trying to fall back to cpu-clock-ticks\n"); - attr->type = PERF_TYPE_SOFTWARE; - attr->config = PERF_COUNT_SW_CPU_CLOCK; - if (pos->name) { - free(pos->name); - pos->name = NULL; - } + ui__warning("%s\n", msg); goto try_again; } - if (err == ENOENT) { - ui__error("The %s event is not supported.\n", - perf_evsel__name(pos)); - rc = -err; - goto out; - } - - printf("\n"); - error("sys_perf_event_open() syscall returned with %d " - "(%s) for event %s. /bin/dmesg may provide " - "additional information.\n", - err, strerror(err), perf_evsel__name(pos)); - -#if defined(__i386__) || defined(__x86_64__) - if (attr->type == PERF_TYPE_HARDWARE && - err == EOPNOTSUPP) { - pr_err("No hardware sampling interrupt available." - " No APIC? If so then you can boot the kernel" - " with the \"lapic\" boot parameter to" - " force-enable it.\n"); - rc = -err; - goto out; - } -#endif - - pr_err("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); - rc = -err; + rc = -errno; + perf_evsel__open_strerror(pos, &opts->target, + errno, msg, sizeof(msg)); + ui__error("%s\n", msg); goto out; } } @@ -324,11 +172,8 @@ try_again: "Consider increasing " "/proc/sys/kernel/perf_event_mlock_kb,\n" "or try again with a smaller value of -m/--mmap_pages.\n" - "(current value: %d)\n", opts->mmap_pages); + "(current value: %u)\n", opts->mmap_pages); rc = -errno; - } else if (!is_power_of_2(opts->mmap_pages)) { - pr_err("--mmap_pages/-m value must be a power of two."); - rc = -EINVAL; } else { pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno)); rc = -errno; @@ -336,62 +181,31 @@ try_again: goto out; } - if (rec->file_new) - session->evlist = evlist; - else { - if (!perf_evlist__equal(session->evlist, evlist)) { - fprintf(stderr, "incompatible append\n"); - rc = -1; - goto out; - } - } - + session->evlist = evlist; perf_session__set_id_hdr_size(session); out: return rc; } -static int process_buildids(struct perf_record *rec) +static int process_buildids(struct record *rec) { - u64 size = lseek(rec->output, 0, SEEK_CUR); + struct perf_data_file *file = &rec->file; + struct perf_session *session = rec->session; + u64 start = session->header.data_offset; + u64 size = lseek(file->fd, 0, SEEK_CUR); if (size == 0) return 0; - rec->session->fd = rec->output; - return __perf_session__process_events(rec->session, rec->post_processing_offset, - size - rec->post_processing_offset, + return __perf_session__process_events(session, start, + size - start, size, &build_id__mark_dso_hit_ops); } -static void perf_record__exit(int status, void *arg) -{ - struct perf_record *rec = arg; - - if (status != 0) - return; - - if (!rec->opts.pipe_output) { - rec->session->header.data_size += rec->bytes_written; - - if (!rec->no_buildid) - process_buildids(rec); - perf_session__write_header(rec->session, rec->evlist, - rec->output, true); - perf_session__delete(rec->session); - perf_evlist__delete(rec->evlist); - symbol__exit(); - } -} - static void perf_event__synthesize_guest_os(struct machine *machine, void *data) { int err; struct perf_tool *tool = data; - - if (machine__is_host(machine)) - return; - /* *As for guest kernel when processing subcommand record&report, *we arrange module mmap prior to guest kernel mmap and trigger @@ -411,10 +225,7 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data) * have no _text sometimes. */ err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, - machine, "_text"); - if (err < 0) - err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, - machine, "_stext"); + machine); if (err < 0) pr_err("Couldn't record guest kernel [%d]'s reference" " relocation symbol.\n", machine->pid); @@ -425,14 +236,14 @@ static struct perf_event_header finished_round_event = { .type = PERF_RECORD_FINISHED_ROUND, }; -static int perf_record__mmap_read_all(struct perf_record *rec) +static int record__mmap_read_all(struct record *rec) { int i; int rc = 0; for (i = 0; i < rec->evlist->nr_mmaps; i++) { if (rec->evlist->mmap[i].base) { - if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) { + if (record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) { rc = -1; goto out; } @@ -440,129 +251,104 @@ static int perf_record__mmap_read_all(struct perf_record *rec) } if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA)) - rc = write_output(rec, &finished_round_event, - sizeof(finished_round_event)); + rc = record__write(rec, &finished_round_event, sizeof(finished_round_event)); out: return rc; } -static int __cmd_record(struct perf_record *rec, int argc, const char **argv) +static void record__init_features(struct record *rec) { - struct stat st; - int flags; - int err, output, feat; + struct perf_session *session = rec->session; + int feat; + + for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) + perf_header__set_feat(&session->header, feat); + + if (rec->no_buildid) + perf_header__clear_feat(&session->header, HEADER_BUILD_ID); + + if (!have_tracepoints(&rec->evlist->entries)) + perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); + + if (!rec->opts.branch_stack) + perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); +} + +static volatile int workload_exec_errno; + +/* + * perf_evlist__prepare_workload will send a SIGUSR1 + * if the fork fails, since we asked by setting its + * want_signal to true. + */ +static void workload_exec_failed_signal(int signo __maybe_unused, + siginfo_t *info, + void *ucontext __maybe_unused) +{ + workload_exec_errno = info->si_value.sival_int; + done = 1; + child_finished = 1; +} + +static int __cmd_record(struct record *rec, int argc, const char **argv) +{ + int err; + int status = 0; unsigned long waking = 0; const bool forks = argc > 0; struct machine *machine; struct perf_tool *tool = &rec->tool; - struct perf_record_opts *opts = &rec->opts; - struct perf_evlist *evsel_list = rec->evlist; - const char *output_name = rec->output_name; + struct record_opts *opts = &rec->opts; + struct perf_data_file *file = &rec->file; struct perf_session *session; + bool disabled = false; rec->progname = argv[0]; - rec->page_size = sysconf(_SC_PAGE_SIZE); - - on_exit(perf_record__sig_exit, rec); + atexit(record__sig_exit); signal(SIGCHLD, sig_handler); signal(SIGINT, sig_handler); - signal(SIGUSR1, sig_handler); + signal(SIGTERM, sig_handler); - if (!output_name) { - if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode)) - opts->pipe_output = true; - else - rec->output_name = output_name = "perf.data"; - } - if (output_name) { - if (!strcmp(output_name, "-")) - opts->pipe_output = true; - else if (!stat(output_name, &st) && st.st_size) { - if (rec->write_mode == WRITE_FORCE) { - char oldname[PATH_MAX]; - snprintf(oldname, sizeof(oldname), "%s.old", - output_name); - unlink(oldname); - rename(output_name, oldname); - } - } else if (rec->write_mode == WRITE_APPEND) { - rec->write_mode = WRITE_FORCE; - } - } - - flags = O_CREAT|O_RDWR; - if (rec->write_mode == WRITE_APPEND) - rec->file_new = 0; - else - flags |= O_TRUNC; - - if (opts->pipe_output) - output = STDOUT_FILENO; - else - output = open(output_name, flags, S_IRUSR | S_IWUSR); - if (output < 0) { - perror("failed to create output file"); - return -1; - } - - rec->output = output; - - session = perf_session__new(output_name, O_WRONLY, - rec->write_mode == WRITE_FORCE, false, NULL); + session = perf_session__new(file, false, NULL); if (session == NULL) { - pr_err("Not enough memory for reading perf file header\n"); + pr_err("Perf session creation failed.\n"); return -1; } rec->session = session; - for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++) - perf_header__set_feat(&session->header, feat); - - if (rec->no_buildid) - perf_header__clear_feat(&session->header, HEADER_BUILD_ID); - - if (!have_tracepoints(&evsel_list->entries)) - perf_header__clear_feat(&session->header, HEADER_TRACING_DATA); - - if (!rec->opts.branch_stack) - perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); - - if (!rec->file_new) { - err = perf_session__read_header(session, output); - if (err < 0) - goto out_delete_session; - } + record__init_features(rec); if (forks) { - err = perf_evlist__prepare_workload(evsel_list, opts, argv); + err = perf_evlist__prepare_workload(rec->evlist, &opts->target, + argv, file->is_pipe, + workload_exec_failed_signal); if (err < 0) { pr_err("Couldn't run the workload!\n"); + status = err; goto out_delete_session; } } - if (perf_record__open(rec) != 0) { + if (record__open(rec) != 0) { err = -1; - goto out_delete_session; + goto out_child; } - /* - * perf_session__delete(session) will be called at perf_record__exit() - */ - on_exit(perf_record__exit, rec); + if (!rec->evlist->nr_groups) + perf_header__clear_feat(&session->header, HEADER_GROUP_DESC); - if (opts->pipe_output) { - err = perf_header__write_pipe(output); + if (file->is_pipe) { + err = perf_header__write_pipe(file->fd); if (err < 0) - goto out_delete_session; - } else if (rec->file_new) { - err = perf_session__write_header(session, evsel_list, - output, false); + goto out_child; + } else { + err = perf_session__write_header(session, rec->evlist, + file->fd, false); if (err < 0) - goto out_delete_session; + goto out_child; } if (!rec->no_buildid @@ -570,34 +356,20 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) pr_err("Couldn't generate buildids. " "Use --no-buildid to profile anyway.\n"); err = -1; - goto out_delete_session; + goto out_child; } - rec->post_processing_offset = lseek(output, 0, SEEK_CUR); + machine = &session->machines.host; - machine = perf_session__find_host_machine(session); - if (!machine) { - pr_err("Couldn't find native kernel information.\n"); - err = -1; - goto out_delete_session; - } - - if (opts->pipe_output) { + if (file->is_pipe) { err = perf_event__synthesize_attrs(tool, session, process_synthesized_event); if (err < 0) { pr_err("Couldn't synthesize attrs.\n"); - goto out_delete_session; + goto out_child; } - err = perf_event__synthesize_event_types(tool, process_synthesized_event, - machine); - if (err < 0) { - pr_err("Couldn't synthesize event_types.\n"); - goto out_delete_session; - } - - if (have_tracepoints(&evsel_list->entries)) { + if (have_tracepoints(&rec->evlist->entries)) { /* * FIXME err <= 0 here actually means that * there were no tracepoints so its not really @@ -606,21 +378,18 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) * return this more properly and also * propagate errors that now are calling die() */ - err = perf_event__synthesize_tracing_data(tool, output, evsel_list, + err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist, process_synthesized_event); if (err <= 0) { pr_err("Couldn't record tracing data.\n"); - goto out_delete_session; + goto out_child; } - advance_output(rec, err); + rec->bytes_written += err; } } err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, - machine, "_text"); - if (err < 0) - err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, - machine, "_stext"); + machine); if (err < 0) pr_err("Couldn't record kernel reference relocation symbol\n" "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" @@ -633,20 +402,15 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" "Check /proc/modules permission or run as root.\n"); - if (perf_guest) - perf_session__process_machines(session, tool, - perf_event__synthesize_guest_os); - - if (!opts->target.system_wide) - err = perf_event__synthesize_thread_map(tool, evsel_list->threads, - process_synthesized_event, - machine); - else - err = perf_event__synthesize_threads(tool, process_synthesized_event, - machine); + if (perf_guest) { + machines__process_guests(&session->machines, + perf_event__synthesize_guest_os, tool); + } + err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads, + process_synthesized_event, opts->sample_address); if (err != 0) - goto out_delete_session; + goto out_child; if (rec->realtime_prio) { struct sched_param param; @@ -655,56 +419,112 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { pr_err("Could not set realtime priority.\n"); err = -1; - goto out_delete_session; + goto out_child; } } - perf_evlist__enable(evsel_list); + /* + * When perf is starting the traced process, all the events + * (apart from group members) have enable_on_exec=1 set, + * so don't spoil it by prematurely enabling them. + */ + if (!target__none(&opts->target) && !opts->initial_delay) + perf_evlist__enable(rec->evlist); /* * Let the child rip */ if (forks) - perf_evlist__start_workload(evsel_list); + perf_evlist__start_workload(rec->evlist); + + if (opts->initial_delay) { + usleep(opts->initial_delay * 1000); + perf_evlist__enable(rec->evlist); + } for (;;) { int hits = rec->samples; - if (perf_record__mmap_read_all(rec) < 0) { + if (record__mmap_read_all(rec) < 0) { err = -1; - goto out_delete_session; + goto out_child; } if (hits == rec->samples) { if (done) break; - err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1); + err = poll(rec->evlist->pollfd, rec->evlist->nr_fds, -1); + /* + * Propagate error, only if there's any. Ignore positive + * number of returned events and interrupt error. + */ + if (err > 0 || (err < 0 && errno == EINTR)) + err = 0; waking++; } - if (done) - perf_evlist__disable(evsel_list); + /* + * When perf is starting the traced process, at the end events + * die with the process and we wait for that. Thus no need to + * disable events in this case. + */ + if (done && !disabled && !target__none(&opts->target)) { + perf_evlist__disable(rec->evlist); + disabled = true; + } } - if (quiet || signr == SIGUSR1) - return 0; + if (forks && workload_exec_errno) { + char msg[512]; + const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg)); + pr_err("Workload failed: %s\n", emsg); + err = -1; + goto out_child; + } - fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking); + if (!quiet) { + fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking); - /* - * Approximate RIP event size: 24 bytes. - */ - fprintf(stderr, - "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n", - (double)rec->bytes_written / 1024.0 / 1024.0, - output_name, - rec->bytes_written / 24); + /* + * Approximate RIP event size: 24 bytes. + */ + fprintf(stderr, + "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n", + (double)rec->bytes_written / 1024.0 / 1024.0, + file->path, + rec->bytes_written / 24); + } - return 0; +out_child: + if (forks) { + int exit_status; + + if (!child_finished) + kill(rec->evlist->workload.pid, SIGTERM); + + wait(&exit_status); + + if (err < 0) + status = err; + else if (WIFEXITED(exit_status)) + status = WEXITSTATUS(exit_status); + else if (WIFSIGNALED(exit_status)) + signr = WTERMSIG(exit_status); + } else + status = err; + + if (!err && !file->is_pipe) { + rec->session->header.data_size += rec->bytes_written; + + if (!rec->no_buildid) + process_buildids(rec); + perf_session__write_header(rec->session, rec->evlist, + file->fd, true); + } out_delete_session: perf_session__delete(session); - return err; + return status; } #define BRANCH_OPT(n, m) \ @@ -725,6 +545,10 @@ static const struct branch_mode branch_modes[] = { BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL), BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN), BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL), + BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX), + BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX), + BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX), + BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND), BRANCH_END }; @@ -791,7 +615,7 @@ error: return ret; } -#ifdef LIBUNWIND_SUPPORT +#ifdef HAVE_DWARF_UNWIND_SUPPORT static int get_stack_size(char *str, unsigned long *_size) { char *endptr; @@ -817,24 +641,14 @@ static int get_stack_size(char *str, unsigned long *_size) max_size, str); return -1; } -#endif /* LIBUNWIND_SUPPORT */ +#endif /* HAVE_DWARF_UNWIND_SUPPORT */ -static int -parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg, - int unset) +int record_parse_callchain(const char *arg, struct record_opts *opts) { - struct perf_record *rec = (struct perf_record *)opt->value; char *tok, *name, *saveptr = NULL; char *buf; int ret = -1; - /* --no-call-graph */ - if (unset) - return 0; - - /* We specified default option if none is provided. */ - BUG_ON(!arg); - /* We need buffer that we know we can write to. */ buf = malloc(strlen(arg) + 1); if (!buf) @@ -849,36 +663,32 @@ parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg, /* Framepointer style */ if (!strncmp(name, "fp", sizeof("fp"))) { if (!strtok_r(NULL, ",", &saveptr)) { - rec->opts.call_graph = CALLCHAIN_FP; + opts->call_graph = CALLCHAIN_FP; ret = 0; } else pr_err("callchain: No more arguments " "needed for -g fp\n"); break; -#ifdef LIBUNWIND_SUPPORT +#ifdef HAVE_DWARF_UNWIND_SUPPORT /* Dwarf style */ } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) { const unsigned long default_stack_dump_size = 8192; ret = 0; - rec->opts.call_graph = CALLCHAIN_DWARF; - rec->opts.stack_dump_size = default_stack_dump_size; + opts->call_graph = CALLCHAIN_DWARF; + opts->stack_dump_size = default_stack_dump_size; tok = strtok_r(NULL, ",", &saveptr); if (tok) { unsigned long size = 0; ret = get_stack_size(tok, &size); - rec->opts.stack_dump_size = size; + opts->stack_dump_size = size; } - - if (!ret) - pr_debug("callchain: stack dump size %d\n", - rec->opts.stack_dump_size); -#endif /* LIBUNWIND_SUPPORT */ +#endif /* HAVE_DWARF_UNWIND_SUPPORT */ } else { - pr_err("callchain: Unknown -g option " + pr_err("callchain: Unknown --call-graph option " "value: %s\n", arg); break; } @@ -886,13 +696,68 @@ parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg, } while (0); free(buf); + return ret; +} + +static void callchain_debug(struct record_opts *opts) +{ + static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF" }; + + pr_debug("callchain: type %s\n", str[opts->call_graph]); + + if (opts->call_graph == CALLCHAIN_DWARF) + pr_debug("callchain: stack dump size %d\n", + opts->stack_dump_size); +} + +int record_parse_callchain_opt(const struct option *opt, + const char *arg, + int unset) +{ + struct record_opts *opts = opt->value; + int ret; + + opts->call_graph_enabled = !unset; + /* --no-call-graph */ + if (unset) { + opts->call_graph = CALLCHAIN_NONE; + pr_debug("callchain: disabled\n"); + return 0; + } + + ret = record_parse_callchain(arg, opts); if (!ret) - pr_debug("callchain: type %d\n", rec->opts.call_graph); + callchain_debug(opts); return ret; } +int record_callchain_opt(const struct option *opt, + const char *arg __maybe_unused, + int unset __maybe_unused) +{ + struct record_opts *opts = opt->value; + + opts->call_graph_enabled = !unset; + + if (opts->call_graph == CALLCHAIN_NONE) + opts->call_graph = CALLCHAIN_FP; + + callchain_debug(opts); + return 0; +} + +static int perf_record_config(const char *var, const char *value, void *cb) +{ + struct record *rec = cb; + + if (!strcmp(var, "record.call-graph")) + return record_parse_callchain(value, &rec->opts); + + return perf_default_config(var, value, cb); +} + static const char * const record_usage[] = { "perf record [<options>] [<command>]", "perf record [<options>] -- <command> [<options>]", @@ -900,8 +765,8 @@ static const char * const record_usage[] = { }; /* - * XXX Ideally would be local to cmd_record() and passed to a perf_record__new - * because we need to have access to it in perf_record__exit, that is called + * XXX Ideally would be local to cmd_record() and passed to a record__new + * because we need to have access to it in record__exit, that is called * after cmd_record() exits, but since record_options need to be accessible to * builtin-script, leave it here. * @@ -909,7 +774,7 @@ static const char * const record_usage[] = { * * Just say no to tons of global variables, sigh. */ -static struct perf_record record = { +static struct record record = { .opts = { .mmap_pages = UINT_MAX, .user_freq = UINT_MAX, @@ -917,24 +782,23 @@ static struct perf_record record = { .freq = 4000, .target = { .uses_mmap = true, + .default_per_cpu = true, }, }, - .write_mode = WRITE_FORCE, - .file_new = true, }; -#define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: " +#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: " -#ifdef LIBUNWIND_SUPPORT -static const char callchain_help[] = CALLCHAIN_HELP "[fp] dwarf"; +#ifdef HAVE_DWARF_UNWIND_SUPPORT +const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf"; #else -static const char callchain_help[] = CALLCHAIN_HELP "[fp]"; +const char record_callchain_help[] = CALLCHAIN_HELP "fp"; #endif /* * XXX Will stay a global variable till we fix builtin-script.c to stop messing * with it and switch to use the library functions in perf_evlist that came - * from builtin-record.c, i.e. use perf_record_opts, + * from builtin-record.c, i.e. use record_opts, * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record', * using pipes, etc. */ @@ -950,31 +814,32 @@ const struct option record_options[] = { "record events on existing thread id"), OPT_INTEGER('r', "realtime", &record.realtime_prio, "collect data with this RT SCHED_FIFO priority"), - OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay, + OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering, "collect data without buffering"), OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples, "collect raw sample records from all opened counters"), OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide, "system-wide collection from all CPUs"), - OPT_BOOLEAN('A', "append", &record.append_file, - "append to the output file to do incremental profiling"), OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu", "list of cpus to monitor"), - OPT_BOOLEAN('f', "force", &record.force, - "overwrite existing data file (deprecated)"), OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"), - OPT_STRING('o', "output", &record.output_name, "file", + OPT_STRING('o', "output", &record.file.path, "file", "output file name"), - OPT_BOOLEAN('i', "no-inherit", &record.opts.no_inherit, - "child tasks do not inherit counters"), + OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit, + &record.opts.no_inherit_set, + "child tasks do not inherit counters"), OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"), - OPT_UINTEGER('m', "mmap-pages", &record.opts.mmap_pages, - "number of mmap data pages"), + OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages", + "number of mmap data pages", + perf_evlist__parse_mmap_pages), OPT_BOOLEAN(0, "group", &record.opts.group, "put the counters into a counter group"), - OPT_CALLBACK_DEFAULT('g', "call-graph", &record, "mode[,dump_size]", - callchain_help, &parse_callchain_opt, - "fp"), + OPT_CALLBACK_NOOPT('g', NULL, &record.opts, + NULL, "enables call-graph recording" , + &record_callchain_opt), + OPT_CALLBACK(0, "call-graph", &record.opts, + "mode[,dump_size]", record_callchain_help, + &record_parse_callchain_opt), OPT_INCR('v', "verbose", &verbose, "be more verbose (show counter open errors, etc)"), OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"), @@ -993,6 +858,8 @@ const struct option record_options[] = { OPT_CALLBACK('G', "cgroup", &record.evlist, "name", "monitor event in cgroup name only", parse_cgroups), + OPT_UINTEGER('D', "delay", &record.opts.initial_delay, + "ms to wait before starting measurement after program start"), OPT_STRING('u', "uid", &record.opts.target.uid_str, "user", "user to profile"), @@ -1003,38 +870,32 @@ const struct option record_options[] = { OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack, "branch filter mask", "branch stack filter modes", parse_branch_stack), + OPT_BOOLEAN('W', "weight", &record.opts.sample_weight, + "sample by weight (on special events only)"), + OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction, + "sample transaction flags (special events only)"), + OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread, + "use per-thread mmaps"), OPT_END() }; int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused) { int err = -ENOMEM; - struct perf_evsel *pos; - struct perf_evlist *evsel_list; - struct perf_record *rec = &record; + struct record *rec = &record; char errbuf[BUFSIZ]; - evsel_list = perf_evlist__new(NULL, NULL); - if (evsel_list == NULL) + rec->evlist = perf_evlist__new(); + if (rec->evlist == NULL) return -ENOMEM; - rec->evlist = evsel_list; + perf_config(perf_record_config, rec); argc = parse_options(argc, argv, record_options, record_usage, PARSE_OPT_STOP_AT_NON_OPTION); - if (!argc && perf_target__none(&rec->opts.target)) + if (!argc && target__none(&rec->opts.target)) usage_with_options(record_usage, record_options); - if (rec->force && rec->append_file) { - ui__error("Can't overwrite and append at the same time." - " You need to choose between -f and -A"); - usage_with_options(record_usage, record_options); - } else if (rec->append_file) { - rec->write_mode = WRITE_APPEND; - } else { - rec->write_mode = WRITE_FORCE; - } - if (nr_cgroups && !rec->opts.target.system_wide) { ui__error("cgroup monitoring only available in" " system-wide mode\n"); @@ -1056,60 +917,44 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused) if (rec->no_buildid_cache || rec->no_buildid) disable_buildid_cache(); - if (evsel_list->nr_entries == 0 && - perf_evlist__add_default(evsel_list) < 0) { + if (rec->evlist->nr_entries == 0 && + perf_evlist__add_default(rec->evlist) < 0) { pr_err("Not enough memory for event selector list\n"); goto out_symbol_exit; } - err = perf_target__validate(&rec->opts.target); + if (rec->opts.target.tid && !rec->opts.no_inherit_set) + rec->opts.no_inherit = true; + + err = target__validate(&rec->opts.target); if (err) { - perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ); + target__strerror(&rec->opts.target, err, errbuf, BUFSIZ); ui__warning("%s", errbuf); } - err = perf_target__parse_uid(&rec->opts.target); + err = target__parse_uid(&rec->opts.target); if (err) { int saved_errno = errno; - perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ); + target__strerror(&rec->opts.target, err, errbuf, BUFSIZ); ui__error("%s", errbuf); err = -saved_errno; - goto out_free_fd; + goto out_symbol_exit; } err = -ENOMEM; - if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0) + if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0) usage_with_options(record_usage, record_options); - list_for_each_entry(pos, &evsel_list->entries, node) { - if (perf_header__push_event(pos->attr.config, perf_evsel__name(pos))) - goto out_free_fd; - } - - if (rec->opts.user_interval != ULLONG_MAX) - rec->opts.default_interval = rec->opts.user_interval; - if (rec->opts.user_freq != UINT_MAX) - rec->opts.freq = rec->opts.user_freq; - - /* - * User specified count overrides default frequency. - */ - if (rec->opts.default_interval) - rec->opts.freq = 0; - else if (rec->opts.freq) { - rec->opts.default_interval = rec->opts.freq; - } else { - ui__error("frequency and count are zero, aborting\n"); + if (record_opts__config(&rec->opts)) { err = -EINVAL; - goto out_free_fd; + goto out_symbol_exit; } err = __cmd_record(&record, argc, argv); -out_free_fd: - perf_evlist__delete_maps(evsel_list); out_symbol_exit: + perf_evlist__delete(rec->evlist); symbol__exit(); return err; } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index a61725d89d3..21d830bafff 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -8,11 +8,11 @@ #include "builtin.h" #include "util/util.h" +#include "util/cache.h" #include "util/annotate.h" #include "util/color.h" #include <linux/list.h> -#include "util/cache.h" #include <linux/rbtree.h> #include "util/symbol.h" #include "util/callchain.h" @@ -33,192 +33,152 @@ #include "util/thread.h" #include "util/sort.h" #include "util/hist.h" +#include "util/data.h" +#include "arch/common.h" +#include <dlfcn.h> #include <linux/bitmap.h> -struct perf_report { +struct report { struct perf_tool tool; struct perf_session *session; - char const *input_name; bool force, use_tui, use_gtk, use_stdio; bool hide_unresolved; bool dont_use_callchains; bool show_full_info; bool show_threads; bool inverted_callchain; + bool mem_mode; + bool header; + bool header_only; + int max_stack; struct perf_read_values show_threads_values; const char *pretty_printing_style; - symbol_filter_t annotate_init; const char *cpu_list; const char *symbol_filter_str; + float min_percent; + u64 nr_entries; DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); }; -static int perf_report__add_branch_hist_entry(struct perf_tool *tool, - struct addr_location *al, - struct perf_sample *sample, - struct perf_evsel *evsel, - struct machine *machine) +static int report__config(const char *var, const char *value, void *cb) { - struct perf_report *rep = container_of(tool, struct perf_report, tool); - struct symbol *parent = NULL; - int err = 0; - unsigned i; - struct hist_entry *he; - struct branch_info *bi, *bx; - - if ((sort__has_parent || symbol_conf.use_callchain) - && sample->callchain) { - err = machine__resolve_callchain(machine, evsel, al->thread, - sample, &parent); - if (err) - return err; + if (!strcmp(var, "report.group")) { + symbol_conf.event_group = perf_config_bool(var, value); + return 0; + } + if (!strcmp(var, "report.percent-limit")) { + struct report *rep = cb; + rep->min_percent = strtof(value, NULL); + return 0; + } + if (!strcmp(var, "report.children")) { + symbol_conf.cumulate_callchain = perf_config_bool(var, value); + return 0; } - bi = machine__resolve_bstack(machine, al->thread, - sample->branch_stack); - if (!bi) - return -ENOMEM; - - for (i = 0; i < sample->branch_stack->nr; i++) { - if (rep->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym)) - continue; - /* - * The report shows the percentage of total branches captured - * and not events sampled. Thus we use a pseudo period of 1. - */ - he = __hists__add_branch_entry(&evsel->hists, al, parent, - &bi[i], 1); - if (he) { - struct annotation *notes; - err = -ENOMEM; - bx = he->branch_info; - if (bx->from.sym && use_browser == 1 && sort__has_sym) { - notes = symbol__annotation(bx->from.sym); - if (!notes->src - && symbol__alloc_hist(bx->from.sym) < 0) - goto out; - - err = symbol__inc_addr_samples(bx->from.sym, - bx->from.map, - evsel->idx, - bx->from.al_addr); - if (err) - goto out; - } + return perf_default_config(var, value, cb); +} - if (bx->to.sym && use_browser == 1 && sort__has_sym) { - notes = symbol__annotation(bx->to.sym); - if (!notes->src - && symbol__alloc_hist(bx->to.sym) < 0) - goto out; - - err = symbol__inc_addr_samples(bx->to.sym, - bx->to.map, - evsel->idx, - bx->to.al_addr); - if (err) - goto out; - } - evsel->hists.stats.total_period += 1; - hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); - err = 0; - } else - return -ENOMEM; - } -out: - return err; +static void report__inc_stats(struct report *rep, struct hist_entry *he) +{ + /* + * The @he is either of a newly created one or an existing one + * merging current sample. We only want to count a new one so + * checking ->nr_events being 1. + */ + if (he->stat.nr_events == 1) + rep->nr_entries++; } -static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, - struct addr_location *al, - struct perf_sample *sample, - struct machine *machine) +static int hist_iter__report_callback(struct hist_entry_iter *iter, + struct addr_location *al, bool single, + void *arg) { - struct symbol *parent = NULL; int err = 0; - struct hist_entry *he; + struct report *rep = arg; + struct hist_entry *he = iter->he; + struct perf_evsel *evsel = iter->evsel; + struct mem_info *mi; + struct branch_info *bi; - if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) { - err = machine__resolve_callchain(machine, evsel, al->thread, - sample, &parent); - if (err) - return err; - } + report__inc_stats(rep, he); - he = __hists__add_entry(&evsel->hists, al, parent, sample->period); - if (he == NULL) - return -ENOMEM; + if (!ui__has_annotation()) + return 0; - if (symbol_conf.use_callchain) { - err = callchain_append(he->callchain, - &callchain_cursor, - sample->period); + if (sort__mode == SORT_MODE__BRANCH) { + bi = he->branch_info; + err = addr_map_symbol__inc_samples(&bi->from, evsel->idx); if (err) - return err; - } - /* - * Only in the newt browser we are doing integrated annotation, - * so we don't allocated the extra space needed because the stdio - * code will not use it. - */ - if (he->ms.sym != NULL && use_browser == 1 && sort__has_sym) { - struct annotation *notes = symbol__annotation(he->ms.sym); + goto out; - assert(evsel != NULL); + err = addr_map_symbol__inc_samples(&bi->to, evsel->idx); - err = -ENOMEM; - if (notes->src == NULL && symbol__alloc_hist(he->ms.sym) < 0) + } else if (rep->mem_mode) { + mi = he->mem_info; + err = addr_map_symbol__inc_samples(&mi->daddr, evsel->idx); + if (err) goto out; err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); + + } else if (symbol_conf.cumulate_callchain) { + if (single) + err = hist_entry__inc_addr_samples(he, evsel->idx, + al->addr); + } else { + err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); } - evsel->hists.stats.total_period += sample->period; - hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); out: return err; } - static int process_sample_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) { - struct perf_report *rep = container_of(tool, struct perf_report, tool); + struct report *rep = container_of(tool, struct report, tool); struct addr_location al; + struct hist_entry_iter iter = { + .hide_unresolved = rep->hide_unresolved, + .add_entry_cb = hist_iter__report_callback, + }; + int ret; - if (perf_event__preprocess_sample(event, machine, &al, sample, - rep->annotate_init) < 0) { - fprintf(stderr, "problem processing %d event, skipping it.\n", - event->header.type); + if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) { + pr_debug("problem processing %d event, skipping it.\n", + event->header.type); return -1; } - if (al.filtered || (rep->hide_unresolved && al.sym == NULL)) + if (rep->hide_unresolved && al.sym == NULL) return 0; if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap)) return 0; - if (sort__branch_mode == 1) { - if (perf_report__add_branch_hist_entry(tool, &al, sample, - evsel, machine)) { - pr_debug("problem adding lbr entry, skipping event\n"); - return -1; - } - } else { - if (al.map != NULL) - al.map->dso->hit = 1; + if (sort__mode == SORT_MODE__BRANCH) + iter.ops = &hist_iter_branch; + else if (rep->mem_mode) + iter.ops = &hist_iter_mem; + else if (symbol_conf.cumulate_callchain) + iter.ops = &hist_iter_cumulative; + else + iter.ops = &hist_iter_normal; - if (perf_evsel__add_hist_entry(evsel, &al, sample, machine)) { - pr_debug("problem incrementing symbol period, skipping event\n"); - return -1; - } - } - return 0; + if (al.map != NULL) + al.map->dso->hit = 1; + + ret = hist_entry_iter__add(&iter, &al, evsel, sample, rep->max_stack, + rep); + if (ret < 0) + pr_debug("problem adding hist entry, skipping event\n"); + + return ret; } static int process_read_event(struct perf_tool *tool, @@ -227,7 +187,7 @@ static int process_read_event(struct perf_tool *tool, struct perf_evsel *evsel, struct machine *machine __maybe_unused) { - struct perf_report *rep = container_of(tool, struct perf_report, tool); + struct report *rep = container_of(tool, struct report, tool); if (rep->show_threads) { const char *name = evsel ? perf_evsel__name(evsel) : "unknown"; @@ -246,12 +206,13 @@ static int process_read_event(struct perf_tool *tool, } /* For pipe mode, sample_type is not currently set */ -static int perf_report__setup_sample_type(struct perf_report *rep) +static int report__setup_sample_type(struct report *rep) { - struct perf_session *self = rep->session; - u64 sample_type = perf_evlist__sample_type(self->evlist); + struct perf_session *session = rep->session; + u64 sample_type = perf_evlist__combined_sample_type(session->evlist); + bool is_pipe = perf_data_file__is_pipe(session->file); - if (!self->fd_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) { + if (!is_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) { if (sort__has_parent) { ui__error("Selected --sort parent, but no " "callchain data. Did you call " @@ -273,8 +234,16 @@ static int perf_report__setup_sample_type(struct perf_report *rep) } } - if (sort__branch_mode == 1) { - if (!self->fd_pipe && + if (symbol_conf.cumulate_callchain) { + /* Silently ignore if callchain is missing */ + if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { + symbol_conf.cumulate_callchain = false; + perf_hpp__cancel_cumulate(); + } + } + + if (sort__mode == SORT_MODE__BRANCH) { + if (!is_pipe && !(sample_type & PERF_SAMPLE_BRANCH_STACK)) { ui__error("Selected -b but no branch data. " "Did you call perf record without -b?\n"); @@ -285,42 +254,73 @@ static int perf_report__setup_sample_type(struct perf_report *rep) return 0; } -extern volatile int session_done; - static void sig_handler(int sig __maybe_unused) { session_done = 1; } -static size_t hists__fprintf_nr_sample_events(struct hists *self, +static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report *rep, const char *evname, FILE *fp) { size_t ret; char unit; - unsigned long nr_samples = self->stats.nr_events[PERF_RECORD_SAMPLE]; - u64 nr_events = self->stats.total_period; + unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE]; + u64 nr_events = hists->stats.total_period; + struct perf_evsel *evsel = hists_to_evsel(hists); + char buf[512]; + size_t size = sizeof(buf); + + if (symbol_conf.filter_relative) { + nr_samples = hists->stats.nr_non_filtered_samples; + nr_events = hists->stats.total_non_filtered_period; + } + + if (perf_evsel__is_group_event(evsel)) { + struct perf_evsel *pos; + + perf_evsel__group_desc(evsel, buf, size); + evname = buf; + + for_each_group_member(pos, evsel) { + if (symbol_conf.filter_relative) { + nr_samples += pos->hists.stats.nr_non_filtered_samples; + nr_events += pos->hists.stats.total_non_filtered_period; + } else { + nr_samples += pos->hists.stats.nr_events[PERF_RECORD_SAMPLE]; + nr_events += pos->hists.stats.total_period; + } + } + } nr_samples = convert_unit(nr_samples, &unit); ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit); if (evname != NULL) ret += fprintf(fp, " of event '%s'", evname); - ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events); + if (rep->mem_mode) { + ret += fprintf(fp, "\n# Total weight : %" PRIu64, nr_events); + ret += fprintf(fp, "\n# Sort order : %s", sort_order); + } else + ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events); return ret + fprintf(fp, "\n#\n"); } static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist, - struct perf_report *rep, + struct report *rep, const char *help) { struct perf_evsel *pos; - list_for_each_entry(pos, &evlist->entries, node) { + evlist__for_each(evlist, pos) { struct hists *hists = &pos->hists; const char *evname = perf_evsel__name(pos); - hists__fprintf_nr_sample_events(hists, evname, stdout); - hists__fprintf(hists, true, 0, 0, stdout); + if (symbol_conf.event_group && + !perf_evsel__is_group_leader(pos)) + continue; + + hists__fprintf_nr_sample_events(hists, rep, evname, stdout); + hists__fprintf(hists, true, 0, 0, rep->min_percent, stdout); fprintf(stdout, "\n\n"); } @@ -339,41 +339,11 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist, return 0; } -static int __cmd_report(struct perf_report *rep) +static void report__warn_kptr_restrict(const struct report *rep) { - int ret = -EINVAL; - u64 nr_samples; - struct perf_session *session = rep->session; - struct perf_evsel *pos; - struct map *kernel_map; - struct kmap *kernel_kmap; - const char *help = "For a higher level overview, try: perf report --sort comm,dso"; - - signal(SIGINT, sig_handler); - - if (rep->cpu_list) { - ret = perf_session__cpu_bitmap(session, rep->cpu_list, - rep->cpu_bitmap); - if (ret) - goto out_delete; - } - - if (use_browser <= 0) - perf_session__fprintf_info(session, stdout, rep->show_full_info); - - if (rep->show_threads) - perf_read_values_init(&rep->show_threads_values); - - ret = perf_report__setup_sample_type(rep); - if (ret) - goto out_delete; - - ret = perf_session__process_events(session, &rep->tool); - if (ret) - goto out_delete; + struct map *kernel_map = rep->session->machines.host.vmlinux_maps[MAP__FUNCTION]; + struct kmap *kernel_kmap = map__kmap(kernel_map); - kernel_map = session->host_machine.vmlinux_maps[MAP__FUNCTION]; - kernel_kmap = map__kmap(kernel_map); if (kernel_map == NULL || (kernel_map->dso->hit && (kernel_kmap->ref_reloc_sym == NULL || @@ -396,139 +366,170 @@ static int __cmd_report(struct perf_report *rep) "Samples in kernel modules can't be resolved as well.\n\n", desc); } +} + +static int report__gtk_browse_hists(struct report *rep, const char *help) +{ + int (*hist_browser)(struct perf_evlist *evlist, const char *help, + struct hist_browser_timer *timer, float min_pcnt); + + hist_browser = dlsym(perf_gtk_handle, "perf_evlist__gtk_browse_hists"); + + if (hist_browser == NULL) { + ui__error("GTK browser not found!\n"); + return -1; + } - if (verbose > 3) - perf_session__fprintf(session, stdout); + return hist_browser(rep->session->evlist, help, NULL, rep->min_percent); +} - if (verbose > 2) - perf_session__fprintf_dsos(session, stdout); +static int report__browse_hists(struct report *rep) +{ + int ret; + struct perf_session *session = rep->session; + struct perf_evlist *evlist = session->evlist; + const char *help = "For a higher level overview, try: perf report --sort comm,dso"; - if (dump_trace) { - perf_session__fprintf_nr_events(session, stdout); - goto out_delete; + switch (use_browser) { + case 1: + ret = perf_evlist__tui_browse_hists(evlist, help, NULL, + rep->min_percent, + &session->header.env); + /* + * Usually "ret" is the last pressed key, and we only + * care if the key notifies us to switch data file. + */ + if (ret != K_SWITCH_INPUT_DATA) + ret = 0; + break; + case 2: + ret = report__gtk_browse_hists(rep, help); + break; + default: + ret = perf_evlist__tty_browse_hists(evlist, rep, help); + break; } - nr_samples = 0; - list_for_each_entry(pos, &session->evlist->entries, node) { + return ret; +} + +static void report__collapse_hists(struct report *rep) +{ + struct ui_progress prog; + struct perf_evsel *pos; + + ui_progress__init(&prog, rep->nr_entries, "Merging related events..."); + + evlist__for_each(rep->session->evlist, pos) { struct hists *hists = &pos->hists; if (pos->idx == 0) hists->symbol_filter_str = rep->symbol_filter_str; - hists__collapse_resort(hists); - hists__output_resort(hists); - nr_samples += hists->stats.nr_events[PERF_RECORD_SAMPLE]; - } + hists__collapse_resort(hists, &prog); - if (nr_samples == 0) { - ui__error("The %s file has no samples!\n", session->filename); - goto out_delete; - } + /* Non-group events are considered as leader */ + if (symbol_conf.event_group && + !perf_evsel__is_group_leader(pos)) { + struct hists *leader_hists = &pos->leader->hists; - if (use_browser > 0) { - if (use_browser == 1) { - perf_evlist__tui_browse_hists(session->evlist, help, - NULL, NULL, 0); - } else if (use_browser == 2) { - perf_evlist__gtk_browse_hists(session->evlist, help, - NULL, NULL, 0); + hists__match(leader_hists, hists); + hists__link(leader_hists, hists); } - } else - perf_evlist__tty_browse_hists(session->evlist, rep, help); + } -out_delete: - /* - * Speed up the exit process, for large files this can - * take quite a while. - * - * XXX Enable this when using valgrind or if we ever - * librarize this command. - * - * Also experiment with obstacks to see how much speed - * up we'll get here. - * - * perf_session__delete(session); - */ - return ret; + ui_progress__finish(); } -static int -parse_callchain_opt(const struct option *opt, const char *arg, int unset) +static int __cmd_report(struct report *rep) { - struct perf_report *rep = (struct perf_report *)opt->value; - char *tok, *tok2; - char *endptr; + int ret; + struct perf_session *session = rep->session; + struct perf_evsel *pos; + struct perf_data_file *file = session->file; - /* - * --no-call-graph - */ - if (unset) { - rep->dont_use_callchains = true; - return 0; + signal(SIGINT, sig_handler); + + if (rep->cpu_list) { + ret = perf_session__cpu_bitmap(session, rep->cpu_list, + rep->cpu_bitmap); + if (ret) + return ret; } - symbol_conf.use_callchain = true; + if (rep->show_threads) + perf_read_values_init(&rep->show_threads_values); - if (!arg) - return 0; + ret = report__setup_sample_type(rep); + if (ret) + return ret; - tok = strtok((char *)arg, ","); - if (!tok) - return -1; + ret = perf_session__process_events(session, &rep->tool); + if (ret) + return ret; + + report__warn_kptr_restrict(rep); + + if (use_browser == 0) { + if (verbose > 3) + perf_session__fprintf(session, stdout); - /* get the output mode */ - if (!strncmp(tok, "graph", strlen(arg))) - callchain_param.mode = CHAIN_GRAPH_ABS; + if (verbose > 2) + perf_session__fprintf_dsos(session, stdout); - else if (!strncmp(tok, "flat", strlen(arg))) - callchain_param.mode = CHAIN_FLAT; + if (dump_trace) { + perf_session__fprintf_nr_events(session, stdout); + return 0; + } + } - else if (!strncmp(tok, "fractal", strlen(arg))) - callchain_param.mode = CHAIN_GRAPH_REL; + report__collapse_hists(rep); - else if (!strncmp(tok, "none", strlen(arg))) { - callchain_param.mode = CHAIN_NONE; - symbol_conf.use_callchain = false; + if (session_done()) + return 0; + if (rep->nr_entries == 0) { + ui__error("The %s file has no samples!\n", file->path); return 0; } - else - return -1; - - /* get the min percentage */ - tok = strtok(NULL, ","); - if (!tok) - goto setup; + evlist__for_each(session->evlist, pos) + hists__output_resort(&pos->hists); - callchain_param.min_percent = strtod(tok, &endptr); - if (tok == endptr) - return -1; + return report__browse_hists(rep); +} - /* get the print limit */ - tok2 = strtok(NULL, ","); - if (!tok2) - goto setup; +static int +report_parse_callchain_opt(const struct option *opt, const char *arg, int unset) +{ + struct report *rep = (struct report *)opt->value; - if (tok2[0] != 'c') { - callchain_param.print_limit = strtoul(tok2, &endptr, 0); - tok2 = strtok(NULL, ","); - if (!tok2) - goto setup; + /* + * --no-call-graph + */ + if (unset) { + rep->dont_use_callchains = true; + return 0; } - /* get the call chain order */ - if (!strcmp(tok2, "caller")) - callchain_param.order = ORDER_CALLER; - else if (!strcmp(tok2, "callee")) - callchain_param.order = ORDER_CALLEE; - else - return -1; -setup: - if (callchain_register_param(&callchain_param) < 0) { - fprintf(stderr, "Can't register callchain params\n"); - return -1; + return parse_callchain_report_opt(arg); +} + +int +report_parse_ignore_callees_opt(const struct option *opt __maybe_unused, + const char *arg, int unset __maybe_unused) +{ + if (arg) { + int err = regcomp(&ignore_callees_regex, arg, REG_EXTENDED); + if (err) { + char buf[BUFSIZ]; + regerror(err, &ignore_callees_regex, buf, sizeof(buf)); + pr_err("Invalid --ignore-callees regex: %s\n%s", arg, buf); + return -1; + } + have_ignore_callees = 1; } + return 0; } @@ -536,7 +537,19 @@ static int parse_branch_mode(const struct option *opt __maybe_unused, const char *str __maybe_unused, int unset) { - sort__branch_mode = !unset; + int *branch_mode = opt->value; + + *branch_mode = !unset; + return 0; +} + +static int +parse_percent_limit(const struct option *opt, const char *str, + int unset __maybe_unused) +{ + struct report *rep = opt->value; + + rep->min_percent = strtof(str, NULL); return 0; } @@ -545,32 +558,34 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) struct perf_session *session; struct stat st; bool has_br_stack = false; + int branch_mode = -1; int ret = -1; char callchain_default_opt[] = "fractal,0.5,callee"; const char * const report_usage[] = { "perf report [<options>]", NULL }; - struct perf_report report = { + struct report report = { .tool = { .sample = process_sample_event, .mmap = perf_event__process_mmap, + .mmap2 = perf_event__process_mmap2, .comm = perf_event__process_comm, - .exit = perf_event__process_task, - .fork = perf_event__process_task, + .exit = perf_event__process_exit, + .fork = perf_event__process_fork, .lost = perf_event__process_lost, .read = process_read_event, .attr = perf_event__process_attr, - .event_type = perf_event__process_event_type, .tracing_data = perf_event__process_tracing_data, .build_id = perf_event__process_build_id, .ordered_samples = true, .ordering_requires_timestamps = true, }, + .max_stack = PERF_MAX_STACK_DEPTH, .pretty_printing_style = "normal", }; const struct option options[] = { - OPT_STRING('i', "input", &report.input_name, "file", + OPT_STRING('i', "input", &input_name, "file", "input file name"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), @@ -593,9 +608,14 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) OPT_BOOLEAN(0, "gtk", &report.use_gtk, "Use the GTK2 interface"), OPT_BOOLEAN(0, "stdio", &report.use_stdio, "Use the stdio interface"), + OPT_BOOLEAN(0, "header", &report.header, "Show data header."), + OPT_BOOLEAN(0, "header-only", &report.header_only, + "Show only data header."), OPT_STRING('s', "sort", &sort_order, "key[,key2...]", - "sort by key(s): pid, comm, dso, symbol, parent, dso_to," - " dso_from, symbol_to, symbol_from, mispredict"), + "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..." + " Please refer the man page for the complete list."), + OPT_STRING('F', "fields", &field_order, "key[,keys...]", + "output field(s): overhead, period, sample plus all of sort keys"), OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization, "Show sample percentage for different cpu modes"), OPT_STRING('p', "parent", &parent_pattern, "regex", @@ -603,10 +623,19 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other, "Only display entries with parent-match"), OPT_CALLBACK_DEFAULT('g', "call-graph", &report, "output_type,min_percent[,print_limit],call_order", - "Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold, optional print limit and callchain order. " - "Default: fractal,0.5,callee", &parse_callchain_opt, callchain_default_opt), + "Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold, optional print limit, callchain order, key (function or address). " + "Default: fractal,0.5,callee,function", &report_parse_callchain_opt, callchain_default_opt), + OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain, + "Accumulate callchains of children and show total overhead as well"), + OPT_INTEGER(0, "max-stack", &report.max_stack, + "Set the maximum stack depth when parsing the callchain, " + "anything beyond the specified depth will be ignored. " + "Default: " __stringify(PERF_MAX_STACK_DEPTH)), OPT_BOOLEAN('G', "inverted", &report.inverted_callchain, "alias for inverted call graph"), + OPT_CALLBACK(0, "ignore-callees", NULL, "regex", + "ignore callees of these functions in call graphs", + report_parse_ignore_callees_opt), OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", "only consider symbols in these dsos"), OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", @@ -637,12 +666,26 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) "Specify disassembler style (e.g. -M intel for intel syntax)"), OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period, "Show a column with the sum of periods"), - OPT_CALLBACK_NOOPT('b', "branch-stack", &sort__branch_mode, "", + OPT_BOOLEAN(0, "group", &symbol_conf.event_group, + "Show event group information together"), + OPT_CALLBACK_NOOPT('b', "branch-stack", &branch_mode, "", "use branch records for histogram filling", parse_branch_mode), OPT_STRING(0, "objdump", &objdump_path, "path", "objdump binary to use for disassembly and annotations"), + OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle, + "Disable symbol demangling"), + OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"), + OPT_CALLBACK(0, "percent-limit", &report, "percent", + "Don't show entries under that percent", parse_percent_limit), + OPT_CALLBACK(0, "percentage", NULL, "relative|absolute", + "how to display percentage of filtered entries", parse_filter_percentage), OPT_END() }; + struct perf_data_file file = { + .mode = PERF_DATA_MODE_READ, + }; + + perf_config(report__config, &report); argc = parse_options(argc, argv, options, report_usage, 0); @@ -656,14 +699,18 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) if (report.inverted_callchain) callchain_param.order = ORDER_CALLER; - if (!report.input_name || !strlen(report.input_name)) { + if (!input_name || !strlen(input_name)) { if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) - report.input_name = "-"; + input_name = "-"; else - report.input_name = "perf.data"; + input_name = "perf.data"; } - session = perf_session__new(report.input_name, O_RDONLY, - report.force, false, &report.tool); + + file.path = input_name; + file.force = report.force; + +repeat: + session = perf_session__new(&file, false, &report.tool); if (session == NULL) return -ENOMEM; @@ -672,38 +719,57 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) has_br_stack = perf_header__has_feat(&session->header, HEADER_BRANCH_STACK); - if (sort__branch_mode == -1 && has_br_stack) - sort__branch_mode = 1; + if (branch_mode == -1 && has_br_stack) { + sort__mode = SORT_MODE__BRANCH; + symbol_conf.cumulate_callchain = false; + } - /* sort__branch_mode could be 0 if --no-branch-stack */ - if (sort__branch_mode == 1) { - /* - * if no sort_order is provided, then specify - * branch-mode specific order - */ - if (sort_order == default_sort_order) - sort_order = "comm,dso_from,symbol_from," - "dso_to,symbol_to"; + if (report.mem_mode) { + if (sort__mode == SORT_MODE__BRANCH) { + pr_err("branch and mem mode incompatible\n"); + goto error; + } + sort__mode = SORT_MODE__MEMORY; + symbol_conf.cumulate_callchain = false; + } + if (setup_sorting() < 0) { + if (sort_order) + parse_options_usage(report_usage, options, "s", 1); + if (field_order) + parse_options_usage(sort_order ? NULL : report_usage, + options, "F", 1); + goto error; } - if (strcmp(report.input_name, "-") != 0) + /* Force tty output for header output. */ + if (report.header || report.header_only) + use_browser = 0; + + if (strcmp(input_name, "-") != 0) setup_browser(true); - else { + else use_browser = 0; - perf_hpp__init(); - } - setup_sorting(report_usage, options); + if (report.header || report.header_only) { + perf_session__fprintf_info(session, stdout, + report.show_full_info); + if (report.header_only) + return 0; + } else if (use_browser == 0) { + fputs("# To display the perf.data header info, please use --header/--header-only options.\n#\n", + stdout); + } /* - * Only in the newt browser we are doing integrated annotation, + * Only in the TUI browser we are doing integrated annotation, * so don't allocate extra space that won't be used in the stdio * implementation. */ - if (use_browser == 1 && sort__has_sym) { + if (ui__has_annotation()) { symbol_conf.priv_size = sizeof(struct annotation); - report.annotate_init = symbol__annotate_init; + machines__set_symbol_filter(&session->machines, + symbol__annotate_init); /* * For searching by name on the "Browse map details". * providing it only in verbose mode not to bloat too @@ -724,20 +790,6 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) if (symbol__init() < 0) goto error; - if (parent_pattern != default_parent_pattern) { - if (sort_dimension__add("parent") < 0) - goto error; - - /* - * Only show the parent fields if we explicitly - * sort that way. If we only use parent machinery - * for filtering, we don't want it. - */ - if (!strstr(sort_order, "parent")) - sort_parent.elide = 1; - } else - symbol_conf.exclude_other = false; - if (argc) { /* * Special case: if there's an argument left then assume that @@ -749,19 +801,15 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) report.symbol_filter_str = argv[0]; } - sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout); - - if (sort__branch_mode == 1) { - sort_entry__setup_elide(&sort_dso_from, symbol_conf.dso_from_list, "dso_from", stdout); - sort_entry__setup_elide(&sort_dso_to, symbol_conf.dso_to_list, "dso_to", stdout); - sort_entry__setup_elide(&sort_sym_from, symbol_conf.sym_from_list, "sym_from", stdout); - sort_entry__setup_elide(&sort_sym_to, symbol_conf.sym_to_list, "sym_to", stdout); - } else { - sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", stdout); - sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout); - } + sort__setup_elide(stdout); ret = __cmd_report(&report); + if (ret == K_SWITCH_INPUT_DATA) { + perf_session__delete(session); + goto repeat; + } else + ret = 0; + error: perf_session__delete(session); return ret; diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 3488ead3b60..c38d06c0477 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -66,7 +66,7 @@ struct sched_atom { struct task_desc *wakee; }; -#define TASK_STATE_TO_CHAR_STR "RSDTtZX" +#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP" enum thread_state { THREAD_SLEEPING = 0, @@ -109,8 +109,9 @@ struct trace_sched_handler { int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel, struct perf_sample *sample, struct machine *machine); - int (*fork_event)(struct perf_sched *sched, struct perf_evsel *evsel, - struct perf_sample *sample); + /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */ + int (*fork_event)(struct perf_sched *sched, union perf_event *event, + struct machine *machine); int (*migrate_task_event)(struct perf_sched *sched, struct perf_evsel *evsel, @@ -120,7 +121,6 @@ struct trace_sched_handler { struct perf_sched { struct perf_tool tool; - const char *input_name; const char *sort_order; unsigned long nr_tasks; struct task_desc *pid_to_task[MAX_PID]; @@ -149,7 +149,6 @@ struct perf_sched { unsigned long nr_runs; unsigned long nr_timestamps; unsigned long nr_unordered_timestamps; - unsigned long nr_state_machine_bugs; unsigned long nr_context_switch_bugs; unsigned long nr_events; unsigned long nr_lost_chunks; @@ -469,7 +468,7 @@ static void *thread_func(void *ctx) char comm2[22]; int fd; - free(parms); + zfree(&parms); sprintf(comm2, ":%s", this_task->comm); prctl(PR_SET_NAME, comm2); @@ -718,22 +717,31 @@ static int replay_switch_event(struct perf_sched *sched, return 0; } -static int replay_fork_event(struct perf_sched *sched, struct perf_evsel *evsel, - struct perf_sample *sample) +static int replay_fork_event(struct perf_sched *sched, + union perf_event *event, + struct machine *machine) { - const char *parent_comm = perf_evsel__strval(evsel, sample, "parent_comm"), - *child_comm = perf_evsel__strval(evsel, sample, "child_comm"); - const u32 parent_pid = perf_evsel__intval(evsel, sample, "parent_pid"), - child_pid = perf_evsel__intval(evsel, sample, "child_pid"); + struct thread *child, *parent; + + child = machine__findnew_thread(machine, event->fork.pid, + event->fork.tid); + parent = machine__findnew_thread(machine, event->fork.ppid, + event->fork.ptid); + + if (child == NULL || parent == NULL) { + pr_debug("thread does not exist on fork event: child %p, parent %p\n", + child, parent); + return 0; + } if (verbose) { - printf("sched_fork event %p\n", evsel); - printf("... parent: %s/%d\n", parent_comm, parent_pid); - printf("... child: %s/%d\n", child_comm, child_pid); + printf("fork event\n"); + printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid); + printf("... child: %s/%d\n", thread__comm_str(child), child->tid); } - register_pid(sched, parent_pid, parent_comm); - register_pid(sched, child_pid, child_comm); + register_pid(sched, parent->tid, thread__comm_str(parent)); + register_pid(sched, child->tid, thread__comm_str(child)); return 0; } @@ -825,14 +833,6 @@ static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread) return 0; } -static int latency_fork_event(struct perf_sched *sched __maybe_unused, - struct perf_evsel *evsel __maybe_unused, - struct perf_sample *sample __maybe_unused) -{ - /* should insert the newcomer */ - return 0; -} - static char sched_out_state(u64 prev_state) { const char *str = TASK_STATE_TO_CHAR_STR; @@ -935,8 +935,8 @@ static int latency_switch_event(struct perf_sched *sched, return -1; } - sched_out = machine__findnew_thread(machine, prev_pid); - sched_in = machine__findnew_thread(machine, next_pid); + sched_out = machine__findnew_thread(machine, 0, prev_pid); + sched_in = machine__findnew_thread(machine, 0, next_pid); out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); if (!out_events) { @@ -979,7 +979,7 @@ static int latency_runtime_event(struct perf_sched *sched, { const u32 pid = perf_evsel__intval(evsel, sample, "pid"); const u64 runtime = perf_evsel__intval(evsel, sample, "runtime"); - struct thread *thread = machine__findnew_thread(machine, pid); + struct thread *thread = machine__findnew_thread(machine, 0, pid); struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); u64 timestamp = sample->time; int cpu = sample->cpu; @@ -1006,18 +1006,13 @@ static int latency_wakeup_event(struct perf_sched *sched, struct perf_sample *sample, struct machine *machine) { - const u32 pid = perf_evsel__intval(evsel, sample, "pid"), - success = perf_evsel__intval(evsel, sample, "success"); + const u32 pid = perf_evsel__intval(evsel, sample, "pid"); struct work_atoms *atoms; struct work_atom *atom; struct thread *wakee; u64 timestamp = sample->time; - /* Note for later, it may be interesting to observe the failing cases */ - if (!success) - return 0; - - wakee = machine__findnew_thread(machine, pid); + wakee = machine__findnew_thread(machine, 0, pid); atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); if (!atoms) { if (thread_atoms_insert(sched, wakee)) @@ -1036,12 +1031,18 @@ static int latency_wakeup_event(struct perf_sched *sched, atom = list_entry(atoms->work_list.prev, struct work_atom, list); /* + * As we do not guarantee the wakeup event happens when + * task is out of run queue, also may happen when task is + * on run queue and wakeup only change ->state to TASK_RUNNING, + * then we should not set the ->wake_up_time when wake up a + * task which is on run queue. + * * You WILL be missing events if you've recorded only * one CPU, or are only looking at only one, so don't - * make useless noise. + * skip in this case. */ if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING) - sched->nr_state_machine_bugs++; + return 0; sched->nr_timestamps++; if (atom->sched_out_time > timestamp) { @@ -1071,12 +1072,12 @@ static int latency_migrate_task_event(struct perf_sched *sched, if (sched->profile_cpu == -1) return 0; - migrant = machine__findnew_thread(machine, pid); + migrant = machine__findnew_thread(machine, 0, pid); atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); if (!atoms) { if (thread_atoms_insert(sched, migrant)) return -1; - register_pid(sched, migrant->pid, migrant->comm); + register_pid(sched, migrant->tid, thread__comm_str(migrant)); atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); if (!atoms) { pr_err("migration-event: Internal tree error"); @@ -1110,20 +1111,20 @@ static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_ /* * Ignore idle threads: */ - if (!strcmp(work_list->thread->comm, "swapper")) + if (!strcmp(thread__comm_str(work_list->thread), "swapper")) return; sched->all_runtime += work_list->total_runtime; sched->all_count += work_list->nb_atoms; - ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid); + ret = printf(" %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid); for (i = 0; i < 24 - ret; i++) printf(" "); avg = work_list->total_lat / work_list->nb_atoms; - printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n", + printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13.6f s\n", (double)work_list->total_runtime / 1e6, work_list->nb_atoms, (double)avg / 1e6, (double)work_list->max_lat / 1e6, @@ -1132,9 +1133,9 @@ static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_ static int pid_cmp(struct work_atoms *l, struct work_atoms *r) { - if (l->thread->pid < r->thread->pid) + if (l->thread->tid < r->thread->tid) return -1; - if (l->thread->pid > r->thread->pid) + if (l->thread->tid > r->thread->tid) return 1; return 0; @@ -1265,9 +1266,8 @@ static int process_sched_wakeup_event(struct perf_tool *tool, static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, struct perf_sample *sample, struct machine *machine) { - const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), - next_pid = perf_evsel__intval(evsel, sample, "next_pid"); - struct thread *sched_out __maybe_unused, *sched_in; + const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); + struct thread *sched_in; int new_shortname; u64 timestamp0, timestamp = sample->time; s64 delta; @@ -1290,8 +1290,7 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, return -1; } - sched_out = machine__findnew_thread(machine, prev_pid); - sched_in = machine__findnew_thread(machine, next_pid); + sched_in = machine__findnew_thread(machine, 0, next_pid); sched->curr_thread[this_cpu] = sched_in; @@ -1299,17 +1298,25 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, new_shortname = 0; if (!sched_in->shortname[0]) { - sched_in->shortname[0] = sched->next_shortname1; - sched_in->shortname[1] = sched->next_shortname2; - - if (sched->next_shortname1 < 'Z') { - sched->next_shortname1++; + if (!strcmp(thread__comm_str(sched_in), "swapper")) { + /* + * Don't allocate a letter-number for swapper:0 + * as a shortname. Instead, we use '.' for it. + */ + sched_in->shortname[0] = '.'; + sched_in->shortname[1] = ' '; } else { - sched->next_shortname1='A'; - if (sched->next_shortname2 < '9') { - sched->next_shortname2++; + sched_in->shortname[0] = sched->next_shortname1; + sched_in->shortname[1] = sched->next_shortname2; + + if (sched->next_shortname1 < 'Z') { + sched->next_shortname1++; } else { - sched->next_shortname2='0'; + sched->next_shortname1 = 'A'; + if (sched->next_shortname2 < '9') + sched->next_shortname2++; + else + sched->next_shortname2 = '0'; } } new_shortname = 1; @@ -1321,19 +1328,16 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, else printf("*"); - if (sched->curr_thread[cpu]) { - if (sched->curr_thread[cpu]->pid) - printf("%2s ", sched->curr_thread[cpu]->shortname); - else - printf(". "); - } else + if (sched->curr_thread[cpu]) + printf("%2s ", sched->curr_thread[cpu]->shortname); + else printf(" "); } printf(" %12.6f secs ", (double)timestamp/1e9); if (new_shortname) { printf("%s => %s:%d\n", - sched_in->shortname, sched_in->comm, sched_in->pid); + sched_in->shortname, thread__comm_str(sched_in), sched_in->tid); } else { printf("\n"); } @@ -1380,25 +1384,20 @@ static int process_sched_runtime_event(struct perf_tool *tool, return 0; } -static int process_sched_fork_event(struct perf_tool *tool, - struct perf_evsel *evsel, - struct perf_sample *sample, - struct machine *machine __maybe_unused) +static int perf_sched__process_fork_event(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) { struct perf_sched *sched = container_of(tool, struct perf_sched, tool); - if (sched->tp_handler->fork_event) - return sched->tp_handler->fork_event(sched, evsel, sample); + /* run the fork event through the perf machineruy */ + perf_event__process_fork(tool, event, sample, machine); - return 0; -} + /* and then run additional processing needed for this command */ + if (sched->tp_handler->fork_event) + return sched->tp_handler->fork_event(sched, event, machine); -static int process_sched_exit_event(struct perf_tool *tool __maybe_unused, - struct perf_evsel *evsel, - struct perf_sample *sample __maybe_unused, - struct machine *machine __maybe_unused) -{ - pr_debug("sched_exit event %p\n", evsel); return 0; } @@ -1426,27 +1425,20 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_ struct perf_evsel *evsel, struct machine *machine) { - struct thread *thread = machine__findnew_thread(machine, sample->tid); int err = 0; - if (thread == NULL) { - pr_debug("problem processing %s event, skipping it.\n", - perf_evsel__name(evsel)); - return -1; - } - evsel->hists.stats.total_period += sample->period; - hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); + hists__inc_nr_samples(&evsel->hists, true); - if (evsel->handler.func != NULL) { - tracepoint_handler f = evsel->handler.func; + if (evsel->handler != NULL) { + tracepoint_handler f = evsel->handler; err = f(tool, evsel, sample, machine); } return err; } -static int perf_sched__read_events(struct perf_sched *sched, bool destroy, +static int perf_sched__read_events(struct perf_sched *sched, struct perf_session **psession) { const struct perf_evsel_str_handler handlers[] = { @@ -1454,13 +1446,15 @@ static int perf_sched__read_events(struct perf_sched *sched, bool destroy, { "sched:sched_stat_runtime", process_sched_runtime_event, }, { "sched:sched_wakeup", process_sched_wakeup_event, }, { "sched:sched_wakeup_new", process_sched_wakeup_event, }, - { "sched:sched_process_fork", process_sched_fork_event, }, - { "sched:sched_process_exit", process_sched_exit_event, }, { "sched:sched_migrate_task", process_sched_migrate_task_event, }, }; struct perf_session *session; + struct perf_data_file file = { + .path = input_name, + .mode = PERF_DATA_MODE_READ, + }; - session = perf_session__new(sched->input_name, O_RDONLY, 0, false, &sched->tool); + session = perf_session__new(&file, false, &sched->tool); if (session == NULL) { pr_debug("No Memory for session\n"); return -1; @@ -1476,16 +1470,15 @@ static int perf_sched__read_events(struct perf_sched *sched, bool destroy, goto out_delete; } - sched->nr_events = session->hists.stats.nr_events[0]; - sched->nr_lost_events = session->hists.stats.total_lost; - sched->nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST]; + sched->nr_events = session->stats.nr_events[0]; + sched->nr_lost_events = session->stats.total_lost; + sched->nr_lost_chunks = session->stats.nr_events[PERF_RECORD_LOST]; } - if (destroy) - perf_session__delete(session); - if (psession) *psession = session; + else + perf_session__delete(session); return 0; @@ -1506,14 +1499,6 @@ static void print_bad_events(struct perf_sched *sched) (double)sched->nr_lost_events/(double)sched->nr_events * 100.0, sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks); } - if (sched->nr_state_machine_bugs && sched->nr_timestamps) { - printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)", - (double)sched->nr_state_machine_bugs/(double)sched->nr_timestamps*100.0, - sched->nr_state_machine_bugs, sched->nr_timestamps); - if (sched->nr_lost_events) - printf(" (due to lost events?)"); - printf("\n"); - } if (sched->nr_context_switch_bugs && sched->nr_timestamps) { printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)", (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0, @@ -1530,13 +1515,16 @@ static int perf_sched__lat(struct perf_sched *sched) struct perf_session *session; setup_pager(); - if (perf_sched__read_events(sched, false, &session)) + + /* save session -- references to threads are held in work_list */ + if (perf_sched__read_events(sched, &session)) return -1; + perf_sched__sort_lat(sched); - printf("\n ---------------------------------------------------------------------------------------------------------------\n"); - printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n"); - printf(" ---------------------------------------------------------------------------------------------------------------\n"); + printf("\n -----------------------------------------------------------------------------------------------------------------\n"); + printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n"); + printf(" -----------------------------------------------------------------------------------------------------------------\n"); next = rb_first(&sched->sorted_atom_root); @@ -1548,7 +1536,7 @@ static int perf_sched__lat(struct perf_sched *sched) next = rb_next(next); } - printf(" -----------------------------------------------------------------------------------------\n"); + printf(" -----------------------------------------------------------------------------------------------------------------\n"); printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n", (double)sched->all_runtime / 1e6, sched->all_count); @@ -1566,7 +1554,7 @@ static int perf_sched__map(struct perf_sched *sched) sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF); setup_pager(); - if (perf_sched__read_events(sched, true, NULL)) + if (perf_sched__read_events(sched, NULL)) return -1; print_bad_events(sched); return 0; @@ -1581,7 +1569,7 @@ static int perf_sched__replay(struct perf_sched *sched) test_calibrations(sched); - if (perf_sched__read_events(sched, true, NULL)) + if (perf_sched__read_events(sched, NULL)) return -1; printf("nr_run_events: %ld\n", sched->nr_run_events); @@ -1633,7 +1621,6 @@ static int __cmd_record(int argc, const char **argv) "record", "-a", "-R", - "-f", "-m", "1024", "-c", "1", "-e", "sched:sched_switch", @@ -1641,9 +1628,9 @@ static int __cmd_record(int argc, const char **argv) "-e", "sched:sched_stat_sleep", "-e", "sched:sched_stat_iowait", "-e", "sched:sched_stat_runtime", - "-e", "sched:sched_process_exit", "-e", "sched:sched_process_fork", "-e", "sched:sched_wakeup", + "-e", "sched:sched_wakeup_new", "-e", "sched:sched_migrate_task", }; @@ -1672,14 +1659,13 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused) .sample = perf_sched__process_tracepoint_sample, .comm = perf_event__process_comm, .lost = perf_event__process_lost, - .fork = perf_event__process_task, + .fork = perf_sched__process_fork_event, .ordered_samples = true, }, .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid), .sort_list = LIST_HEAD_INIT(sched.sort_list), .start_work_mutex = PTHREAD_MUTEX_INITIALIZER, .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER, - .curr_pid = { [0 ... MAX_CPUS - 1] = -1 }, .sort_order = default_sort_order, .replay_repeat = 10, .profile_cpu = -1, @@ -1707,7 +1693,7 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused) OPT_END() }; const struct option sched_options[] = { - OPT_STRING('i', "input", &sched.input_name, "file", + OPT_STRING('i', "input", &input_name, "file", "input file name"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), @@ -1723,15 +1709,16 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused) "perf sched replay [<options>]", NULL }; - const char * const sched_usage[] = { - "perf sched [<options>] {record|latency|map|replay|script}", + const char *const sched_subcommands[] = { "record", "latency", "map", + "replay", "script", NULL }; + const char *sched_usage[] = { + NULL, NULL }; struct trace_sched_handler lat_ops = { .wakeup_event = latency_wakeup_event, .switch_event = latency_switch_event, .runtime_event = latency_runtime_event, - .fork_event = latency_fork_event, .migrate_task_event = latency_migrate_task_event, }; struct trace_sched_handler map_ops = { @@ -1742,9 +1729,13 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused) .switch_event = replay_switch_event, .fork_event = replay_fork_event, }; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++) + sched.curr_pid[i] = -1; - argc = parse_options(argc, argv, sched_options, sched_usage, - PARSE_OPT_STOP_AT_NON_OPTION); + argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands, + sched_usage, PARSE_OPT_STOP_AT_NON_OPTION); if (!argc) usage_with_options(sched_usage, sched_options); diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index fb9625083a2..9e9c91f5b7f 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -15,6 +15,7 @@ #include "util/evlist.h" #include "util/evsel.h" #include "util/sort.h" +#include "util/data.h" #include <linux/bitmap.h> static char const *script_name; @@ -24,6 +25,7 @@ static u64 last_timestamp; static u64 nr_unordered; extern const struct option record_options[]; static bool no_callchain; +static bool latency_format; static bool system_wide; static const char *cpu_list; static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); @@ -41,6 +43,7 @@ enum perf_output_field { PERF_OUTPUT_DSO = 1U << 9, PERF_OUTPUT_ADDR = 1U << 10, PERF_OUTPUT_SYMOFFSET = 1U << 11, + PERF_OUTPUT_SRCLINE = 1U << 12, }; struct output_option { @@ -59,12 +62,14 @@ struct output_option { {.str = "dso", .field = PERF_OUTPUT_DSO}, {.str = "addr", .field = PERF_OUTPUT_ADDR}, {.str = "symoff", .field = PERF_OUTPUT_SYMOFFSET}, + {.str = "srcline", .field = PERF_OUTPUT_SRCLINE}, }; /* default set to maintain compatibility with current format */ static struct { bool user_set; bool wildcard_set; + unsigned int print_ip_opts; u64 fields; u64 invalid_fields; } output[PERF_TYPE_MAX] = { @@ -207,6 +212,11 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel, "to DSO.\n"); return -EINVAL; } + if (PRINT_FIELD(SRCLINE) && !PRINT_FIELD(IP)) { + pr_err("Display of source line number requested but sample IP is not\n" + "selected. Hence, no address to lookup the source line number.\n"); + return -EINVAL; + } if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) && perf_evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID", @@ -226,6 +236,27 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel, return 0; } +static void set_print_ip_opts(struct perf_event_attr *attr) +{ + unsigned int type = attr->type; + + output[type].print_ip_opts = 0; + if (PRINT_FIELD(IP)) + output[type].print_ip_opts |= PRINT_IP_OPT_IP; + + if (PRINT_FIELD(SYM)) + output[type].print_ip_opts |= PRINT_IP_OPT_SYM; + + if (PRINT_FIELD(DSO)) + output[type].print_ip_opts |= PRINT_IP_OPT_DSO; + + if (PRINT_FIELD(SYMOFFSET)) + output[type].print_ip_opts |= PRINT_IP_OPT_SYMOFFSET; + + if (PRINT_FIELD(SRCLINE)) + output[type].print_ip_opts |= PRINT_IP_OPT_SRCLINE; +} + /* * verify all user requested events exist and the samples * have the expected data @@ -252,8 +283,37 @@ static int perf_session__check_output_opt(struct perf_session *session) if (evsel && output[j].fields && perf_evsel__check_attr(evsel, session)) return -1; + + if (evsel == NULL) + continue; + + set_print_ip_opts(&evsel->attr); } + /* + * set default for tracepoints to print symbols only + * if callchains are present + */ + if (symbol_conf.use_callchain && + !output[PERF_TYPE_TRACEPOINT].user_set) { + struct perf_event_attr *attr; + + j = PERF_TYPE_TRACEPOINT; + evsel = perf_session__find_first_evtype(session, j); + if (evsel == NULL) + goto out; + + attr = &evsel->attr; + + if (attr->sample_type & PERF_SAMPLE_CALLCHAIN) { + output[j].fields |= PERF_OUTPUT_IP; + output[j].fields |= PERF_OUTPUT_SYM; + output[j].fields |= PERF_OUTPUT_DSO; + set_print_ip_opts(attr); + } + } + +out: return 0; } @@ -262,18 +322,17 @@ static void print_sample_start(struct perf_sample *sample, struct perf_evsel *evsel) { struct perf_event_attr *attr = &evsel->attr; - const char *evname = NULL; unsigned long secs; unsigned long usecs; unsigned long long nsecs; if (PRINT_FIELD(COMM)) { if (latency_format) - printf("%8.8s ", thread->comm); + printf("%8.8s ", thread__comm_str(thread)); else if (PRINT_FIELD(IP) && symbol_conf.use_callchain) - printf("%s ", thread->comm); + printf("%s ", thread__comm_str(thread)); else - printf("%16s ", thread->comm); + printf("%16s ", thread__comm_str(thread)); } if (PRINT_FIELD(PID) && PRINT_FIELD(TID)) @@ -297,11 +356,6 @@ static void print_sample_start(struct perf_sample *sample, usecs = nsecs / NSECS_PER_USEC; printf("%5lu.%06lu: ", secs, usecs); } - - if (PRINT_FIELD(EVNAME)) { - evname = perf_evsel__name(evsel); - printf("%s: ", evname ? evname : "[unknown]"); - } } static bool is_bts_event(struct perf_event_attr *attr) @@ -369,8 +423,8 @@ static void print_sample_addr(union perf_event *event, static void print_sample_bts(union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, - struct machine *machine, - struct thread *thread) + struct thread *thread, + struct addr_location *al) { struct perf_event_attr *attr = &evsel->attr; @@ -380,34 +434,40 @@ static void print_sample_bts(union perf_event *event, printf(" "); else printf("\n"); - perf_evsel__print_ip(evsel, event, sample, machine, - PRINT_FIELD(SYM), PRINT_FIELD(DSO), - PRINT_FIELD(SYMOFFSET)); + perf_evsel__print_ip(evsel, sample, al, + output[attr->type].print_ip_opts, + PERF_MAX_STACK_DEPTH); } printf(" => "); /* print branch_to information */ - if (PRINT_FIELD(ADDR)) - print_sample_addr(event, sample, machine, thread, attr); + if (PRINT_FIELD(ADDR) || + ((evsel->attr.sample_type & PERF_SAMPLE_ADDR) && + !output[attr->type].user_set)) + print_sample_addr(event, sample, al->machine, thread, attr); printf("\n"); } static void process_event(union perf_event *event, struct perf_sample *sample, - struct perf_evsel *evsel, struct machine *machine, + struct perf_evsel *evsel, struct thread *thread, struct addr_location *al) { struct perf_event_attr *attr = &evsel->attr; - struct thread *thread = al->thread; if (output[attr->type].fields == 0) return; print_sample_start(sample, thread, evsel); + if (PRINT_FIELD(EVNAME)) { + const char *evname = perf_evsel__name(evsel); + printf("%s: ", evname ? evname : "[unknown]"); + } + if (is_bts_event(attr)) { - print_sample_bts(event, sample, evsel, machine, thread); + print_sample_bts(event, sample, evsel, thread, al); return; } @@ -415,16 +475,17 @@ static void process_event(union perf_event *event, struct perf_sample *sample, event_format__print(evsel->tp_format, sample->cpu, sample->raw_data, sample->raw_size); if (PRINT_FIELD(ADDR)) - print_sample_addr(event, sample, machine, thread, attr); + print_sample_addr(event, sample, al->machine, thread, attr); if (PRINT_FIELD(IP)) { if (!symbol_conf.use_callchain) printf(" "); else printf("\n"); - perf_evsel__print_ip(evsel, event, sample, machine, - PRINT_FIELD(SYM), PRINT_FIELD(DSO), - PRINT_FIELD(SYMOFFSET)); + + perf_evsel__print_ip(evsel, sample, al, + output[attr->type].print_ip_opts, + PERF_MAX_STACK_DEPTH); } printf("\n"); @@ -479,7 +540,8 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused, struct machine *machine) { struct addr_location al; - struct thread *thread = machine__findnew_thread(machine, event->ip.tid); + struct thread *thread = machine__findnew_thread(machine, sample->pid, + sample->tid); if (thread == NULL) { pr_debug("problem processing %d event, skipping it.\n", @@ -498,7 +560,7 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused, return 0; } - if (perf_event__preprocess_sample(event, machine, &al, sample, 0) < 0) { + if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) { pr_err("problem processing %d event, skipping it.\n", event->header.type); return -1; @@ -510,40 +572,227 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused, if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) return 0; - scripting_ops->process_event(event, sample, evsel, machine, &al); + scripting_ops->process_event(event, sample, evsel, thread, &al); evsel->hists.stats.total_period += sample->period; return 0; } -static struct perf_tool perf_script = { - .sample = process_sample_event, - .mmap = perf_event__process_mmap, - .comm = perf_event__process_comm, - .exit = perf_event__process_task, - .fork = perf_event__process_task, - .attr = perf_event__process_attr, - .event_type = perf_event__process_event_type, - .tracing_data = perf_event__process_tracing_data, - .build_id = perf_event__process_build_id, - .ordered_samples = true, - .ordering_requires_timestamps = true, +struct perf_script { + struct perf_tool tool; + struct perf_session *session; + bool show_task_events; + bool show_mmap_events; }; -extern volatile int session_done; +static int process_attr(struct perf_tool *tool, union perf_event *event, + struct perf_evlist **pevlist) +{ + struct perf_script *scr = container_of(tool, struct perf_script, tool); + struct perf_evlist *evlist; + struct perf_evsel *evsel, *pos; + int err; + + err = perf_event__process_attr(tool, event, pevlist); + if (err) + return err; + + evlist = *pevlist; + evsel = perf_evlist__last(*pevlist); + + if (evsel->attr.type >= PERF_TYPE_MAX) + return 0; + + evlist__for_each(evlist, pos) { + if (pos->attr.type == evsel->attr.type && pos != evsel) + return 0; + } + + set_print_ip_opts(&evsel->attr); + + return perf_evsel__check_attr(evsel, scr->session); +} + +static int process_comm_event(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + struct thread *thread; + struct perf_script *script = container_of(tool, struct perf_script, tool); + struct perf_session *session = script->session; + struct perf_evsel *evsel = perf_evlist__first(session->evlist); + int ret = -1; + + thread = machine__findnew_thread(machine, event->comm.pid, event->comm.tid); + if (thread == NULL) { + pr_debug("problem processing COMM event, skipping it.\n"); + return -1; + } + + if (perf_event__process_comm(tool, event, sample, machine) < 0) + goto out; + + if (!evsel->attr.sample_id_all) { + sample->cpu = 0; + sample->time = 0; + sample->tid = event->comm.tid; + sample->pid = event->comm.pid; + } + print_sample_start(sample, thread, evsel); + perf_event__fprintf(event, stdout); + ret = 0; + +out: + return ret; +} + +static int process_fork_event(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + struct thread *thread; + struct perf_script *script = container_of(tool, struct perf_script, tool); + struct perf_session *session = script->session; + struct perf_evsel *evsel = perf_evlist__first(session->evlist); + + if (perf_event__process_fork(tool, event, sample, machine) < 0) + return -1; + + thread = machine__findnew_thread(machine, event->fork.pid, event->fork.tid); + if (thread == NULL) { + pr_debug("problem processing FORK event, skipping it.\n"); + return -1; + } + + if (!evsel->attr.sample_id_all) { + sample->cpu = 0; + sample->time = event->fork.time; + sample->tid = event->fork.tid; + sample->pid = event->fork.pid; + } + print_sample_start(sample, thread, evsel); + perf_event__fprintf(event, stdout); + + return 0; +} +static int process_exit_event(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + struct thread *thread; + struct perf_script *script = container_of(tool, struct perf_script, tool); + struct perf_session *session = script->session; + struct perf_evsel *evsel = perf_evlist__first(session->evlist); + + thread = machine__findnew_thread(machine, event->fork.pid, event->fork.tid); + if (thread == NULL) { + pr_debug("problem processing EXIT event, skipping it.\n"); + return -1; + } + + if (!evsel->attr.sample_id_all) { + sample->cpu = 0; + sample->time = 0; + sample->tid = event->comm.tid; + sample->pid = event->comm.pid; + } + print_sample_start(sample, thread, evsel); + perf_event__fprintf(event, stdout); + + if (perf_event__process_exit(tool, event, sample, machine) < 0) + return -1; + + return 0; +} + +static int process_mmap_event(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + struct thread *thread; + struct perf_script *script = container_of(tool, struct perf_script, tool); + struct perf_session *session = script->session; + struct perf_evsel *evsel = perf_evlist__first(session->evlist); + + if (perf_event__process_mmap(tool, event, sample, machine) < 0) + return -1; + + thread = machine__findnew_thread(machine, event->mmap.pid, event->mmap.tid); + if (thread == NULL) { + pr_debug("problem processing MMAP event, skipping it.\n"); + return -1; + } + + if (!evsel->attr.sample_id_all) { + sample->cpu = 0; + sample->time = 0; + sample->tid = event->mmap.tid; + sample->pid = event->mmap.pid; + } + print_sample_start(sample, thread, evsel); + perf_event__fprintf(event, stdout); + + return 0; +} + +static int process_mmap2_event(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + struct thread *thread; + struct perf_script *script = container_of(tool, struct perf_script, tool); + struct perf_session *session = script->session; + struct perf_evsel *evsel = perf_evlist__first(session->evlist); + + if (perf_event__process_mmap2(tool, event, sample, machine) < 0) + return -1; + + thread = machine__findnew_thread(machine, event->mmap2.pid, event->mmap2.tid); + if (thread == NULL) { + pr_debug("problem processing MMAP2 event, skipping it.\n"); + return -1; + } + + if (!evsel->attr.sample_id_all) { + sample->cpu = 0; + sample->time = 0; + sample->tid = event->mmap2.tid; + sample->pid = event->mmap2.pid; + } + print_sample_start(sample, thread, evsel); + perf_event__fprintf(event, stdout); + + return 0; +} static void sig_handler(int sig __maybe_unused) { session_done = 1; } -static int __cmd_script(struct perf_session *session) +static int __cmd_script(struct perf_script *script) { int ret; signal(SIGINT, sig_handler); - ret = perf_session__process_events(session, &perf_script); + /* override event processing functions */ + if (script->show_task_events) { + script->tool.comm = process_comm_event; + script->tool.fork = process_fork_event; + script->tool.exit = process_exit_event; + } + if (script->show_mmap_events) { + script->tool.mmap = process_mmap_event; + script->tool.mmap2 = process_mmap2_event; + } + + ret = perf_session__process_events(script->session, &script->tool); if (debug_mode) pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered); @@ -692,7 +941,7 @@ static int parse_output_fields(const struct option *opt __maybe_unused, const char *arg, int unset __maybe_unused) { char *tok; - int i, imax = sizeof(all_output_options) / sizeof(struct output_option); + int i, imax = ARRAY_SIZE(all_output_options); int j; int rc = 0; char *str = strdup(arg); @@ -853,9 +1102,9 @@ static struct script_desc *script_desc__new(const char *name) static void script_desc__delete(struct script_desc *s) { - free(s->name); - free(s->half_liner); - free(s->args); + zfree(&s->name); + zfree(&s->half_liner); + zfree(&s->args); free(s); } @@ -909,18 +1158,6 @@ static const char *ends_with(const char *str, const char *suffix) return NULL; } -static char *ltrim(char *str) -{ - int len = strlen(str); - - while (len && isspace(*str)) { - len--; - str++; - } - - return str; -} - static int read_script_info(struct script_desc *desc, const char *filename) { char line[BUFSIZ], *p; @@ -1030,6 +1267,67 @@ static int list_available_scripts(const struct option *opt __maybe_unused, } /* + * Some scripts specify the required events in their "xxx-record" file, + * this function will check if the events in perf.data match those + * mentioned in the "xxx-record". + * + * Fixme: All existing "xxx-record" are all in good formats "-e event ", + * which is covered well now. And new parsing code should be added to + * cover the future complexing formats like event groups etc. + */ +static int check_ev_match(char *dir_name, char *scriptname, + struct perf_session *session) +{ + char filename[MAXPATHLEN], evname[128]; + char line[BUFSIZ], *p; + struct perf_evsel *pos; + int match, len; + FILE *fp; + + sprintf(filename, "%s/bin/%s-record", dir_name, scriptname); + + fp = fopen(filename, "r"); + if (!fp) + return -1; + + while (fgets(line, sizeof(line), fp)) { + p = ltrim(line); + if (*p == '#') + continue; + + while (strlen(p)) { + p = strstr(p, "-e"); + if (!p) + break; + + p += 2; + p = ltrim(p); + len = strcspn(p, " \t"); + if (!len) + break; + + snprintf(evname, len + 1, "%s", p); + + match = 0; + evlist__for_each(session->evlist, pos) { + if (!strcmp(perf_evsel__name(pos), evname)) { + match = 1; + break; + } + } + + if (!match) { + fclose(fp); + return -1; + } + } + } + + fclose(fp); + return 0; +} + +/* * Return -1 if none is found, otherwise the actual scripts number. * * Currently the only user of this function is the script browser, which @@ -1039,17 +1337,27 @@ static int list_available_scripts(const struct option *opt __maybe_unused, int find_scripts(char **scripts_array, char **scripts_path_array) { struct dirent *script_next, *lang_next, script_dirent, lang_dirent; - char scripts_path[MAXPATHLEN]; + char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN]; DIR *scripts_dir, *lang_dir; - char lang_path[MAXPATHLEN]; + struct perf_session *session; + struct perf_data_file file = { + .path = input_name, + .mode = PERF_DATA_MODE_READ, + }; char *temp; int i = 0; + session = perf_session__new(&file, false, NULL); + if (!session) + return -1; + snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path()); scripts_dir = opendir(scripts_path); - if (!scripts_dir) + if (!scripts_dir) { + perf_session__delete(session); return -1; + } for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) { snprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path, @@ -1077,10 +1385,18 @@ int find_scripts(char **scripts_array, char **scripts_path_array) snprintf(scripts_array[i], (temp - script_dirent.d_name) + 1, "%s", script_dirent.d_name); + + if (check_ev_match(lang_path, + scripts_array[i], session)) + continue; + i++; } + closedir(lang_dir); } + closedir(scripts_dir); + perf_session__delete(session); return i; } @@ -1175,13 +1491,29 @@ static int have_cmd(int argc, const char **argv) int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused) { bool show_full_info = false; - const char *input_name = NULL; + bool header = false; + bool header_only = false; char *rec_script_path = NULL; char *rep_script_path = NULL; struct perf_session *session; char *script_path = NULL; const char **__argv; int i, j, err; + struct perf_script script = { + .tool = { + .sample = process_sample_event, + .mmap = perf_event__process_mmap, + .mmap2 = perf_event__process_mmap2, + .comm = perf_event__process_comm, + .exit = perf_event__process_exit, + .fork = perf_event__process_fork, + .attr = process_attr, + .tracing_data = perf_event__process_tracing_data, + .build_id = perf_event__process_build_id, + .ordered_samples = true, + .ordering_requires_timestamps = true, + }, + }; const struct option options[] = { OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), @@ -1199,6 +1531,8 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused) OPT_STRING('i', "input", &input_name, "file", "input file name"), OPT_BOOLEAN('d', "debug-mode", &debug_mode, "do various checks like samples ordering and lost events"), + OPT_BOOLEAN(0, "header", &header, "Show data header."), + OPT_BOOLEAN(0, "header-only", &header_only, "Show only data header."), OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, "file", "vmlinux pathname"), OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, @@ -1223,6 +1557,10 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused) "display extended information from perf.data file"), OPT_BOOLEAN('\0', "show-kernel-path", &symbol_conf.show_kernel_path, "Show the path of [kernel.kallsyms]"), + OPT_BOOLEAN('\0', "show-task-events", &script.show_task_events, + "Show the fork/comm/exit events"), + OPT_BOOLEAN('\0', "show-mmap-events", &script.show_mmap_events, + "Show the mmap events"), OPT_END() }; const char * const script_usage[] = { @@ -1233,12 +1571,17 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused) "perf script [<options>] <top-script> [script-args]", NULL }; + struct perf_data_file file = { + .mode = PERF_DATA_MODE_READ, + }; setup_scripting(); argc = parse_options(argc, argv, options, script_usage, PARSE_OPT_STOP_AT_NON_OPTION); + file.path = input_name; + if (argc > 1 && !strncmp(argv[0], "rec", strlen("rec"))) { rec_script_path = get_script_path(argv[1], RECORD_SUFFIX); if (!rec_script_path) @@ -1402,18 +1745,23 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused) if (!script_name) setup_pager(); - session = perf_session__new(input_name, O_RDONLY, 0, false, - &perf_script); + session = perf_session__new(&file, false, &script.tool); if (session == NULL) return -ENOMEM; + if (header || header_only) { + perf_session__fprintf_info(session, stdout, show_full_info); + if (header_only) + return 0; + } + + script.session = session; + if (cpu_list) { if (perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap)) return -1; } - perf_session__fprintf_info(session, stdout, show_full_info); - if (!no_callchain) symbol_conf.use_callchain = true; else @@ -1429,7 +1777,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused) return -1; } - input = open(session->filename, O_RDONLY); /* input_name */ + input = open(file.path, O_RDONLY); /* input_name */ if (input < 0) { perror("failed to open file"); return -1; @@ -1452,7 +1800,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused) return -1; } - err = scripting_ops->generate_script(session->pevent, + err = scripting_ops->generate_script(session->tevent.pevent, "perf-script"); goto out; } @@ -1469,7 +1817,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused) if (err < 0) goto out; - err = __cmd_script(session); + err = __cmd_script(&script); perf_session__delete(session); cleanup_scripting(); diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 93b9011fa3e..65a151e3606 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -46,6 +46,7 @@ #include "util/util.h" #include "util/parse-options.h" #include "util/parse-events.h" +#include "util/pmu.h" #include "util/event.h" #include "util/evlist.h" #include "util/evsel.h" @@ -57,6 +58,7 @@ #include "util/thread.h" #include "util/thread_map.h" +#include <stdlib.h> #include <sys/prctl.h> #include <locale.h> @@ -64,25 +66,83 @@ #define CNTR_NOT_SUPPORTED "<not supported>" #define CNTR_NOT_COUNTED "<not counted>" +static void print_stat(int argc, const char **argv); +static void print_counter_aggr(struct perf_evsel *counter, char *prefix); +static void print_counter(struct perf_evsel *counter, char *prefix); +static void print_aggr(char *prefix); + +/* Default events used for perf stat -T */ +static const char * const transaction_attrs[] = { + "task-clock", + "{" + "instructions," + "cycles," + "cpu/cycles-t/," + "cpu/tx-start/," + "cpu/el-start/," + "cpu/cycles-ct/" + "}" +}; + +/* More limited version when the CPU does not have all events. */ +static const char * const transaction_limited_attrs[] = { + "task-clock", + "{" + "instructions," + "cycles," + "cpu/cycles-t/," + "cpu/tx-start/" + "}" +}; + +/* must match transaction_attrs and the beginning limited_attrs */ +enum { + T_TASK_CLOCK, + T_INSTRUCTIONS, + T_CYCLES, + T_CYCLES_IN_TX, + T_TRANSACTION_START, + T_ELISION_START, + T_CYCLES_IN_TX_CP, +}; + static struct perf_evlist *evsel_list; -static struct perf_target target = { +static struct target target = { .uid = UINT_MAX, }; +enum aggr_mode { + AGGR_NONE, + AGGR_GLOBAL, + AGGR_SOCKET, + AGGR_CORE, +}; + static int run_count = 1; static bool no_inherit = false; static bool scale = true; -static bool no_aggr = false; -static pid_t child_pid = -1; +static enum aggr_mode aggr_mode = AGGR_GLOBAL; +static volatile pid_t child_pid = -1; static bool null_run = false; static int detailed_run = 0; +static bool transaction_run; static bool big_num = true; static int big_num_opt = -1; static const char *csv_sep = NULL; static bool csv_output = false; static bool group = false; static FILE *output = NULL; +static const char *pre_cmd = NULL; +static const char *post_cmd = NULL; +static bool sync_run = false; +static unsigned int interval = 0; +static unsigned int initial_delay = 0; +static unsigned int unit_width = 4; /* strlen("unit") */ +static bool forever = false; +static struct timespec ref_time; +static struct cpu_map *aggr_map; +static int (*aggr_get_id)(struct cpu_map *m, int cpu); static volatile int done = 0; @@ -90,26 +150,100 @@ struct perf_stat { struct stats res_stats[3]; }; +static inline void diff_timespec(struct timespec *r, struct timespec *a, + struct timespec *b) +{ + r->tv_sec = a->tv_sec - b->tv_sec; + if (a->tv_nsec < b->tv_nsec) { + r->tv_nsec = a->tv_nsec + 1000000000L - b->tv_nsec; + r->tv_sec--; + } else { + r->tv_nsec = a->tv_nsec - b->tv_nsec ; + } +} + +static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel) +{ + return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus; +} + +static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel) +{ + return perf_evsel__cpus(evsel)->nr; +} + +static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel) +{ + int i; + struct perf_stat *ps = evsel->priv; + + for (i = 0; i < 3; i++) + init_stats(&ps->res_stats[i]); +} + static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel) { evsel->priv = zalloc(sizeof(struct perf_stat)); - return evsel->priv == NULL ? -ENOMEM : 0; + if (evsel == NULL) + return -ENOMEM; + perf_evsel__reset_stat_priv(evsel); + return 0; } static void perf_evsel__free_stat_priv(struct perf_evsel *evsel) { - free(evsel->priv); - evsel->priv = NULL; + zfree(&evsel->priv); } -static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel) +static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel) { - return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus; + void *addr; + size_t sz; + + sz = sizeof(*evsel->counts) + + (perf_evsel__nr_cpus(evsel) * sizeof(struct perf_counts_values)); + + addr = zalloc(sz); + if (!addr) + return -ENOMEM; + + evsel->prev_raw_counts = addr; + + return 0; } -static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel) +static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel) { - return perf_evsel__cpus(evsel)->nr; + zfree(&evsel->prev_raw_counts); +} + +static void perf_evlist__free_stats(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) { + perf_evsel__free_stat_priv(evsel); + perf_evsel__free_counts(evsel); + perf_evsel__free_prev_raw_counts(evsel); + } +} + +static int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw) +{ + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) { + if (perf_evsel__alloc_stat_priv(evsel) < 0 || + perf_evsel__alloc_counts(evsel, perf_evsel__nr_cpus(evsel)) < 0 || + (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel) < 0)) + goto out_free; + } + + return 0; + +out_free: + perf_evlist__free_stats(evlist); + return -1; } static struct stats runtime_nsecs_stats[MAX_NR_CPUS]; @@ -123,14 +257,42 @@ static struct stats runtime_l1_icache_stats[MAX_NR_CPUS]; static struct stats runtime_ll_cache_stats[MAX_NR_CPUS]; static struct stats runtime_itlb_cache_stats[MAX_NR_CPUS]; static struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS]; +static struct stats runtime_cycles_in_tx_stats[MAX_NR_CPUS]; static struct stats walltime_nsecs_stats; +static struct stats runtime_transaction_stats[MAX_NR_CPUS]; +static struct stats runtime_elision_stats[MAX_NR_CPUS]; -static int create_perf_stat_counter(struct perf_evsel *evsel, - struct perf_evsel *first) +static void perf_stat__reset_stats(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) { + perf_evsel__reset_stat_priv(evsel); + perf_evsel__reset_counts(evsel, perf_evsel__nr_cpus(evsel)); + } + + memset(runtime_nsecs_stats, 0, sizeof(runtime_nsecs_stats)); + memset(runtime_cycles_stats, 0, sizeof(runtime_cycles_stats)); + memset(runtime_stalled_cycles_front_stats, 0, sizeof(runtime_stalled_cycles_front_stats)); + memset(runtime_stalled_cycles_back_stats, 0, sizeof(runtime_stalled_cycles_back_stats)); + memset(runtime_branches_stats, 0, sizeof(runtime_branches_stats)); + memset(runtime_cacherefs_stats, 0, sizeof(runtime_cacherefs_stats)); + memset(runtime_l1_dcache_stats, 0, sizeof(runtime_l1_dcache_stats)); + memset(runtime_l1_icache_stats, 0, sizeof(runtime_l1_icache_stats)); + memset(runtime_ll_cache_stats, 0, sizeof(runtime_ll_cache_stats)); + memset(runtime_itlb_cache_stats, 0, sizeof(runtime_itlb_cache_stats)); + memset(runtime_dtlb_cache_stats, 0, sizeof(runtime_dtlb_cache_stats)); + memset(runtime_cycles_in_tx_stats, 0, + sizeof(runtime_cycles_in_tx_stats)); + memset(runtime_transaction_stats, 0, + sizeof(runtime_transaction_stats)); + memset(runtime_elision_stats, 0, sizeof(runtime_elision_stats)); + memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats)); +} + +static int create_perf_stat_counter(struct perf_evsel *evsel) { struct perf_event_attr *attr = &evsel->attr; - bool exclude_guest_missing = false; - int ret; if (scale) attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | @@ -138,37 +300,16 @@ static int create_perf_stat_counter(struct perf_evsel *evsel, attr->inherit = !no_inherit; -retry: - if (exclude_guest_missing) - evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; + if (target__has_cpu(&target)) + return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel)); - if (perf_target__has_cpu(&target)) { - ret = perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel)); - if (ret) - goto check_ret; - return 0; - } - - if (!perf_target__has_task(&target) && (!group || evsel == first)) { + if (!target__has_task(&target) && perf_evsel__is_group_leader(evsel)) { attr->disabled = 1; - attr->enable_on_exec = 1; + if (!initial_delay) + attr->enable_on_exec = 1; } - ret = perf_evsel__open_per_thread(evsel, evsel_list->threads); - if (!ret) - return 0; - /* fall through */ -check_ret: - if (ret && errno == EINVAL) { - if (!exclude_guest_missing && - (evsel->attr.exclude_guest || evsel->attr.exclude_host)) { - pr_debug("Old kernel, cannot exclude " - "guest or host samples.\n"); - exclude_guest_missing = true; - goto retry; - } - } - return ret; + return perf_evsel__open_per_thread(evsel, evsel_list->threads); } /* @@ -183,6 +324,29 @@ static inline int nsec_counter(struct perf_evsel *evsel) return 0; } +static struct perf_evsel *nth_evsel(int n) +{ + static struct perf_evsel **array; + static int array_len; + struct perf_evsel *ev; + int j; + + /* Assumes this only called when evsel_list does not change anymore. */ + if (!array) { + evlist__for_each(evsel_list, ev) + array_len++; + array = malloc(array_len * sizeof(void *)); + if (!array) + exit(ENOMEM); + j = 0; + evlist__for_each(evsel_list, ev) + array[j++] = ev; + } + if (n < array_len) + return array[n]; + return NULL; +} + /* * Update various tracking values we maintain to print * more semantic information such as miss/hit ratios, @@ -194,6 +358,15 @@ static void update_shadow_stats(struct perf_evsel *counter, u64 *count) update_stats(&runtime_nsecs_stats[0], count[0]); else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) update_stats(&runtime_cycles_stats[0], count[0]); + else if (transaction_run && + perf_evsel__cmp(counter, nth_evsel(T_CYCLES_IN_TX))) + update_stats(&runtime_cycles_in_tx_stats[0], count[0]); + else if (transaction_run && + perf_evsel__cmp(counter, nth_evsel(T_TRANSACTION_START))) + update_stats(&runtime_transaction_stats[0], count[0]); + else if (transaction_run && + perf_evsel__cmp(counter, nth_evsel(T_ELISION_START))) + update_stats(&runtime_elision_stats[0], count[0]); else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) update_stats(&runtime_stalled_cycles_front_stats[0], count[0]); else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND)) @@ -225,7 +398,7 @@ static int read_counter_aggr(struct perf_evsel *counter) int i; if (__perf_evsel__read(counter, perf_evsel__nr_cpus(counter), - evsel_list->threads->nr, scale) < 0) + thread_map__nr(evsel_list->threads), scale) < 0) return -1; for (i = 0; i < 3; i++) @@ -265,73 +438,129 @@ static int read_counter(struct perf_evsel *counter) return 0; } -static int run_perf_stat(int argc __maybe_unused, const char **argv) +static void print_interval(void) { - unsigned long long t0, t1; - struct perf_evsel *counter, *first; - int status = 0; - int child_ready_pipe[2], go_pipe[2]; - const bool forks = (argc > 0); - char buf; + static int num_print_interval; + struct perf_evsel *counter; + struct perf_stat *ps; + struct timespec ts, rs; + char prefix[64]; - if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) { - perror("failed to create pipes"); - return -1; + if (aggr_mode == AGGR_GLOBAL) { + evlist__for_each(evsel_list, counter) { + ps = counter->priv; + memset(ps->res_stats, 0, sizeof(ps->res_stats)); + read_counter_aggr(counter); + } + } else { + evlist__for_each(evsel_list, counter) { + ps = counter->priv; + memset(ps->res_stats, 0, sizeof(ps->res_stats)); + read_counter(counter); + } } - if (forks) { - if ((child_pid = fork()) < 0) - perror("failed to fork"); + clock_gettime(CLOCK_MONOTONIC, &ts); + diff_timespec(&rs, &ts, &ref_time); + sprintf(prefix, "%6lu.%09lu%s", rs.tv_sec, rs.tv_nsec, csv_sep); + + if (num_print_interval == 0 && !csv_output) { + switch (aggr_mode) { + case AGGR_SOCKET: + fprintf(output, "# time socket cpus counts %*s events\n", unit_width, "unit"); + break; + case AGGR_CORE: + fprintf(output, "# time core cpus counts %*s events\n", unit_width, "unit"); + break; + case AGGR_NONE: + fprintf(output, "# time CPU counts %*s events\n", unit_width, "unit"); + break; + case AGGR_GLOBAL: + default: + fprintf(output, "# time counts %*s events\n", unit_width, "unit"); + } + } - if (!child_pid) { - close(child_ready_pipe[0]); - close(go_pipe[1]); - fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); + if (++num_print_interval == 25) + num_print_interval = 0; + + switch (aggr_mode) { + case AGGR_CORE: + case AGGR_SOCKET: + print_aggr(prefix); + break; + case AGGR_NONE: + evlist__for_each(evsel_list, counter) + print_counter(counter, prefix); + break; + case AGGR_GLOBAL: + default: + evlist__for_each(evsel_list, counter) + print_counter_aggr(counter, prefix); + } - /* - * Do a dummy execvp to get the PLT entry resolved, - * so we avoid the resolver overhead on the real - * execvp call. - */ - execvp("", (char **)argv); + fflush(output); +} - /* - * Tell the parent we're ready to go - */ - close(child_ready_pipe[1]); +static void handle_initial_delay(void) +{ + struct perf_evsel *counter; - /* - * Wait until the parent tells us to go. - */ - if (read(go_pipe[0], &buf, 1) == -1) - perror("unable to read pipe"); + if (initial_delay) { + const int ncpus = cpu_map__nr(evsel_list->cpus), + nthreads = thread_map__nr(evsel_list->threads); - execvp(argv[0], (char **)argv); + usleep(initial_delay * 1000); + evlist__for_each(evsel_list, counter) + perf_evsel__enable(counter, ncpus, nthreads); + } +} - perror(argv[0]); - exit(-1); - } +static volatile int workload_exec_errno; + +/* + * perf_evlist__prepare_workload will send a SIGUSR1 + * if the fork fails, since we asked by setting its + * want_signal to true. + */ +static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info, + void *ucontext __maybe_unused) +{ + workload_exec_errno = info->si_value.sival_int; +} + +static int __run_perf_stat(int argc, const char **argv) +{ + char msg[512]; + unsigned long long t0, t1; + struct perf_evsel *counter; + struct timespec ts; + size_t l; + int status = 0; + const bool forks = (argc > 0); - if (perf_target__none(&target)) - evsel_list->threads->map[0] = child_pid; - - /* - * Wait for the child to be ready to exec. - */ - close(child_ready_pipe[1]); - close(go_pipe[0]); - if (read(child_ready_pipe[0], &buf, 1) == -1) - perror("unable to read pipe"); - close(child_ready_pipe[0]); + if (interval) { + ts.tv_sec = interval / 1000; + ts.tv_nsec = (interval % 1000) * 1000000; + } else { + ts.tv_sec = 1; + ts.tv_nsec = 0; + } + + if (forks) { + if (perf_evlist__prepare_workload(evsel_list, &target, argv, false, + workload_exec_failed_signal) < 0) { + perror("failed to prepare workload"); + return -1; + } + child_pid = evsel_list->workload.pid; } if (group) perf_evlist__set_leader(evsel_list); - first = perf_evlist__first(evsel_list); - - list_for_each_entry(counter, &evsel_list->entries, node) { - if (create_perf_stat_counter(counter, first) < 0) { + evlist__for_each(evsel_list, counter) { + if (create_perf_stat_counter(counter) < 0) { /* * PPC returns ENXIO for HW counters until 2.6.37 * (behavior changed with commit b0a873e). @@ -346,23 +575,20 @@ static int run_perf_stat(int argc __maybe_unused, const char **argv) continue; } - if (errno == EPERM || errno == EACCES) { - error("You may not have permission to collect %sstats.\n" - "\t Consider tweaking" - " /proc/sys/kernel/perf_event_paranoid or running as root.", - target.system_wide ? "system-wide " : ""); - } else { - error("open_counter returned with %d (%s). " - "/bin/dmesg may provide additional information.\n", - errno, strerror(errno)); - } + perf_evsel__open_strerror(counter, &target, + errno, msg, sizeof(msg)); + ui__error("%s\n", msg); + if (child_pid != -1) kill(child_pid, SIGTERM); - pr_err("Not all events could be opened.\n"); return -1; } counter->supported = true; + + l = strlen(counter->unit); + if (l > unit_width) + unit_width = l; } if (perf_evlist__apply_filters(evsel_list)) { @@ -375,36 +601,83 @@ static int run_perf_stat(int argc __maybe_unused, const char **argv) * Enable counters and exec the command: */ t0 = rdclock(); + clock_gettime(CLOCK_MONOTONIC, &ref_time); if (forks) { - close(go_pipe[1]); + perf_evlist__start_workload(evsel_list); + handle_initial_delay(); + + if (interval) { + while (!waitpid(child_pid, &status, WNOHANG)) { + nanosleep(&ts, NULL); + print_interval(); + } + } wait(&status); + + if (workload_exec_errno) { + const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg)); + pr_err("Workload failed: %s\n", emsg); + return -1; + } + if (WIFSIGNALED(status)) psignal(WTERMSIG(status), argv[0]); } else { - while(!done) sleep(1); + handle_initial_delay(); + while (!done) { + nanosleep(&ts, NULL); + if (interval) + print_interval(); + } } t1 = rdclock(); update_stats(&walltime_nsecs_stats, t1 - t0); - if (no_aggr) { - list_for_each_entry(counter, &evsel_list->entries, node) { - read_counter(counter); - perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), 1); - } - } else { - list_for_each_entry(counter, &evsel_list->entries, node) { + if (aggr_mode == AGGR_GLOBAL) { + evlist__for_each(evsel_list, counter) { read_counter_aggr(counter); perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), - evsel_list->threads->nr); + thread_map__nr(evsel_list->threads)); + } + } else { + evlist__for_each(evsel_list, counter) { + read_counter(counter); + perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), 1); } } return WEXITSTATUS(status); } +static int run_perf_stat(int argc, const char **argv) +{ + int ret; + + if (pre_cmd) { + ret = system(pre_cmd); + if (ret) + return ret; + } + + if (sync_run) + sync(); + + ret = __run_perf_stat(argc, argv); + if (ret) + return ret; + + if (post_cmd) { + ret = system(post_cmd); + if (ret) + return ret; + } + + return ret; +} + static void print_noise_pct(double total, double avg) { double pct = rel_stddev_stats(total, avg); @@ -426,23 +699,66 @@ static void print_noise(struct perf_evsel *evsel, double avg) print_noise_pct(stddev_stats(&ps->res_stats[0]), avg); } -static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) +static void aggr_printout(struct perf_evsel *evsel, int id, int nr) +{ + switch (aggr_mode) { + case AGGR_CORE: + fprintf(output, "S%d-C%*d%s%*d%s", + cpu_map__id_to_socket(id), + csv_output ? 0 : -8, + cpu_map__id_to_cpu(id), + csv_sep, + csv_output ? 0 : 4, + nr, + csv_sep); + break; + case AGGR_SOCKET: + fprintf(output, "S%*d%s%*d%s", + csv_output ? 0 : -5, + id, + csv_sep, + csv_output ? 0 : 4, + nr, + csv_sep); + break; + case AGGR_NONE: + fprintf(output, "CPU%*d%s", + csv_output ? 0 : -4, + perf_evsel__cpus(evsel)->map[id], csv_sep); + break; + case AGGR_GLOBAL: + default: + break; + } +} + +static void nsec_printout(int cpu, int nr, struct perf_evsel *evsel, double avg) { double msecs = avg / 1e6; - char cpustr[16] = { '\0', }; - const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-25s"; + const char *fmt_v, *fmt_n; + char name[25]; - if (no_aggr) - sprintf(cpustr, "CPU%*d%s", - csv_output ? 0 : -4, - perf_evsel__cpus(evsel)->map[cpu], csv_sep); + fmt_v = csv_output ? "%.6f%s" : "%18.6f%s"; + fmt_n = csv_output ? "%s" : "%-25s"; + + aggr_printout(evsel, cpu, nr); - fprintf(output, fmt, cpustr, msecs, csv_sep, perf_evsel__name(evsel)); + scnprintf(name, sizeof(name), "%s%s", + perf_evsel__name(evsel), csv_output ? "" : " (msec)"); + + fprintf(output, fmt_v, msecs, csv_sep); + + if (csv_output) + fprintf(output, "%s%s", evsel->unit, csv_sep); + else + fprintf(output, "%-*s%s", unit_width, evsel->unit, csv_sep); + + fprintf(output, fmt_n, name); if (evsel->cgrp) fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); - if (csv_output) + if (csv_output || interval) return; if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) @@ -631,48 +947,56 @@ static void print_ll_cache_misses(int cpu, fprintf(output, " of all LL-cache hits "); } -static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) +static void abs_printout(int cpu, int nr, struct perf_evsel *evsel, double avg) { - double total, ratio = 0.0; - char cpustr[16] = { '\0', }; + double total, ratio = 0.0, total2; + double sc = evsel->scale; const char *fmt; - if (csv_output) - fmt = "%s%.0f%s%s"; - else if (big_num) - fmt = "%s%'18.0f%s%-25s"; - else - fmt = "%s%18.0f%s%-25s"; + if (csv_output) { + fmt = sc != 1.0 ? "%.2f%s" : "%.0f%s"; + } else { + if (big_num) + fmt = sc != 1.0 ? "%'18.2f%s" : "%'18.0f%s"; + else + fmt = sc != 1.0 ? "%18.2f%s" : "%18.0f%s"; + } - if (no_aggr) - sprintf(cpustr, "CPU%*d%s", - csv_output ? 0 : -4, - perf_evsel__cpus(evsel)->map[cpu], csv_sep); - else + aggr_printout(evsel, cpu, nr); + + if (aggr_mode == AGGR_GLOBAL) cpu = 0; - fprintf(output, fmt, cpustr, avg, csv_sep, perf_evsel__name(evsel)); + fprintf(output, fmt, avg, csv_sep); + + if (evsel->unit) + fprintf(output, "%-*s%s", + csv_output ? 0 : unit_width, + evsel->unit, csv_sep); + + fprintf(output, "%-*s", csv_output ? 0 : 25, perf_evsel__name(evsel)); if (evsel->cgrp) fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); - if (csv_output) + if (csv_output || interval) return; if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) { total = avg_stats(&runtime_cycles_stats[cpu]); - - if (total) + if (total) { ratio = avg / total; - - fprintf(output, " # %5.2f insns per cycle ", ratio); - + fprintf(output, " # %5.2f insns per cycle ", ratio); + } total = avg_stats(&runtime_stalled_cycles_front_stats[cpu]); total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[cpu])); if (total && avg) { ratio = total / avg; - fprintf(output, "\n # %5.2f stalled cycles per insn", ratio); + fprintf(output, "\n"); + if (aggr_mode == AGGR_NONE) + fprintf(output, " "); + fprintf(output, " # %5.2f stalled cycles per insn", ratio); } } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) && @@ -729,10 +1053,47 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) { total = avg_stats(&runtime_nsecs_stats[cpu]); + if (total) { + ratio = avg / total; + fprintf(output, " # %8.3f GHz ", ratio); + } + } else if (transaction_run && + perf_evsel__cmp(evsel, nth_evsel(T_CYCLES_IN_TX))) { + total = avg_stats(&runtime_cycles_stats[cpu]); + if (total) + fprintf(output, + " # %5.2f%% transactional cycles ", + 100.0 * (avg / total)); + } else if (transaction_run && + perf_evsel__cmp(evsel, nth_evsel(T_CYCLES_IN_TX_CP))) { + total = avg_stats(&runtime_cycles_stats[cpu]); + total2 = avg_stats(&runtime_cycles_in_tx_stats[cpu]); + if (total2 < avg) + total2 = avg; + if (total) + fprintf(output, + " # %5.2f%% aborted cycles ", + 100.0 * ((total2-avg) / total)); + } else if (transaction_run && + perf_evsel__cmp(evsel, nth_evsel(T_TRANSACTION_START)) && + avg > 0 && + runtime_cycles_in_tx_stats[cpu].n != 0) { + total = avg_stats(&runtime_cycles_in_tx_stats[cpu]); + if (total) - ratio = 1.0 * avg / total; + ratio = total / avg; + + fprintf(output, " # %8.0f cycles / transaction ", ratio); + } else if (transaction_run && + perf_evsel__cmp(evsel, nth_evsel(T_ELISION_START)) && + avg > 0 && + runtime_cycles_in_tx_stats[cpu].n != 0) { + total = avg_stats(&runtime_cycles_in_tx_stats[cpu]); - fprintf(output, " # %8.3f GHz ", ratio); + if (total) + ratio = total / avg; + + fprintf(output, " # %8.0f cycles / elision ", ratio); } else if (runtime_nsecs_stats[cpu].n != 0) { char unit = 'M'; @@ -751,22 +1112,100 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) } } +static void print_aggr(char *prefix) +{ + struct perf_evsel *counter; + int cpu, cpu2, s, s2, id, nr; + double uval; + u64 ena, run, val; + + if (!(aggr_map || aggr_get_id)) + return; + + for (s = 0; s < aggr_map->nr; s++) { + id = aggr_map->map[s]; + evlist__for_each(evsel_list, counter) { + val = ena = run = 0; + nr = 0; + for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) { + cpu2 = perf_evsel__cpus(counter)->map[cpu]; + s2 = aggr_get_id(evsel_list->cpus, cpu2); + if (s2 != id) + continue; + val += counter->counts->cpu[cpu].val; + ena += counter->counts->cpu[cpu].ena; + run += counter->counts->cpu[cpu].run; + nr++; + } + if (prefix) + fprintf(output, "%s", prefix); + + if (run == 0 || ena == 0) { + aggr_printout(counter, id, nr); + + fprintf(output, "%*s%s", + csv_output ? 0 : 18, + counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, + csv_sep); + + fprintf(output, "%-*s%s", + csv_output ? 0 : unit_width, + counter->unit, csv_sep); + + fprintf(output, "%*s", + csv_output ? 0 : -25, + perf_evsel__name(counter)); + + if (counter->cgrp) + fprintf(output, "%s%s", + csv_sep, counter->cgrp->name); + + fputc('\n', output); + continue; + } + uval = val * counter->scale; + + if (nsec_counter(counter)) + nsec_printout(id, nr, counter, uval); + else + abs_printout(id, nr, counter, uval); + + if (!csv_output) { + print_noise(counter, 1.0); + + if (run != ena) + fprintf(output, " (%.2f%%)", + 100.0 * run / ena); + } + fputc('\n', output); + } + } +} + /* * Print out the results of a single counter: * aggregated counts in system-wide mode */ -static void print_counter_aggr(struct perf_evsel *counter) +static void print_counter_aggr(struct perf_evsel *counter, char *prefix) { struct perf_stat *ps = counter->priv; double avg = avg_stats(&ps->res_stats[0]); int scaled = counter->counts->scaled; + double uval; + + if (prefix) + fprintf(output, "%s", prefix); if (scaled == -1) { - fprintf(output, "%*s%s%*s", + fprintf(output, "%*s%s", csv_output ? 0 : 18, counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, - csv_sep, - csv_output ? 0 : -24, + csv_sep); + fprintf(output, "%-*s%s", + csv_output ? 0 : unit_width, + counter->unit, csv_sep); + fprintf(output, "%*s", + csv_output ? 0 : -25, perf_evsel__name(counter)); if (counter->cgrp) @@ -776,10 +1215,12 @@ static void print_counter_aggr(struct perf_evsel *counter) return; } + uval = avg * counter->scale; + if (nsec_counter(counter)) - nsec_printout(-1, counter, avg); + nsec_printout(-1, 0, counter, uval); else - abs_printout(-1, counter, avg); + abs_printout(-1, 0, counter, uval); print_noise(counter, avg); @@ -803,24 +1244,35 @@ static void print_counter_aggr(struct perf_evsel *counter) * Print out the results of a single counter: * does not use aggregated count in system-wide */ -static void print_counter(struct perf_evsel *counter) +static void print_counter(struct perf_evsel *counter, char *prefix) { u64 ena, run, val; + double uval; int cpu; for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) { val = counter->counts->cpu[cpu].val; ena = counter->counts->cpu[cpu].ena; run = counter->counts->cpu[cpu].run; + + if (prefix) + fprintf(output, "%s", prefix); + if (run == 0 || ena == 0) { - fprintf(output, "CPU%*d%s%*s%s%*s", + fprintf(output, "CPU%*d%s%*s%s", csv_output ? 0 : -4, perf_evsel__cpus(counter)->map[cpu], csv_sep, csv_output ? 0 : 18, counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, - csv_sep, - csv_output ? 0 : -24, - perf_evsel__name(counter)); + csv_sep); + + fprintf(output, "%-*s%s", + csv_output ? 0 : unit_width, + counter->unit, csv_sep); + + fprintf(output, "%*s", + csv_output ? 0 : -25, + perf_evsel__name(counter)); if (counter->cgrp) fprintf(output, "%s%s", @@ -830,10 +1282,12 @@ static void print_counter(struct perf_evsel *counter) continue; } + uval = val * counter->scale; + if (nsec_counter(counter)) - nsec_printout(cpu, counter, val); + nsec_printout(cpu, 0, counter, uval); else - abs_printout(cpu, counter, val); + abs_printout(cpu, 0, counter, uval); if (!csv_output) { print_noise(counter, 1.0); @@ -856,7 +1310,11 @@ static void print_stat(int argc, const char **argv) if (!csv_output) { fprintf(output, "\n"); fprintf(output, " Performance counter stats for "); - if (!perf_target__has_task(&target)) { + if (target.system_wide) + fprintf(output, "\'system wide"); + else if (target.cpu_list) + fprintf(output, "\'CPU(s) %s", target.cpu_list); + else if (!target__has_task(&target)) { fprintf(output, "\'%s", argv[0]); for (i = 1; i < argc; i++) fprintf(output, " %s", argv[i]); @@ -871,12 +1329,21 @@ static void print_stat(int argc, const char **argv) fprintf(output, ":\n\n"); } - if (no_aggr) { - list_for_each_entry(counter, &evsel_list->entries, node) - print_counter(counter); - } else { - list_for_each_entry(counter, &evsel_list->entries, node) - print_counter_aggr(counter); + switch (aggr_mode) { + case AGGR_CORE: + case AGGR_SOCKET: + print_aggr(NULL); + break; + case AGGR_GLOBAL: + evlist__for_each(evsel_list, counter) + print_counter_aggr(counter, NULL); + break; + case AGGR_NONE: + evlist__for_each(evsel_list, counter) + print_counter(counter, NULL); + break; + default: + break; } if (!csv_output) { @@ -897,17 +1364,38 @@ static volatile int signr = -1; static void skip_signal(int signo) { - if(child_pid == -1) + if ((child_pid == -1) || interval) done = 1; signr = signo; + /* + * render child_pid harmless + * won't send SIGTERM to a random + * process in case of race condition + * and fast PID recycling + */ + child_pid = -1; } static void sig_atexit(void) { + sigset_t set, oset; + + /* + * avoid race condition with SIGCHLD handler + * in skip_signal() which is modifying child_pid + * goal is to avoid send SIGTERM to a random + * process + */ + sigemptyset(&set); + sigaddset(&set, SIGCHLD); + sigprocmask(SIG_BLOCK, &set, &oset); + if (child_pid != -1) kill(child_pid, SIGTERM); + sigprocmask(SIG_SETMASK, &oset, NULL); + if (signr == -1) return; @@ -922,6 +1410,42 @@ static int stat__set_big_num(const struct option *opt __maybe_unused, return 0; } +static int perf_stat_init_aggr_mode(void) +{ + switch (aggr_mode) { + case AGGR_SOCKET: + if (cpu_map__build_socket_map(evsel_list->cpus, &aggr_map)) { + perror("cannot build socket map"); + return -1; + } + aggr_get_id = cpu_map__get_socket; + break; + case AGGR_CORE: + if (cpu_map__build_core_map(evsel_list->cpus, &aggr_map)) { + perror("cannot build core map"); + return -1; + } + aggr_get_id = cpu_map__get_core; + break; + case AGGR_NONE: + case AGGR_GLOBAL: + default: + break; + } + return 0; +} + +static int setup_events(const char * const *attrs, unsigned len) +{ + unsigned i; + + for (i = 0; i < len; i++) { + if (parse_events(evsel_list, attrs[i])) + return -1; + } + return 0; +} + /* * Add default attributes, if there were no attributes specified or * if -d/--detailed, -d -d or -d -d -d is used: @@ -1039,6 +1563,22 @@ static int add_default_attributes(void) if (null_run) return 0; + if (transaction_run) { + int err; + if (pmu_have_event("cpu", "cycles-ct") && + pmu_have_event("cpu", "el-start")) + err = setup_events(transaction_attrs, + ARRAY_SIZE(transaction_attrs)); + else + err = setup_events(transaction_limited_attrs, + ARRAY_SIZE(transaction_limited_attrs)); + if (err < 0) { + fprintf(stderr, "Cannot set up transaction events\n"); + return -1; + } + return 0; + } + if (!evsel_list->nr_entries) { if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0) return -1; @@ -1069,11 +1609,12 @@ static int add_default_attributes(void) int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) { - bool append_file = false, - sync_run = false; + bool append_file = false; int output_fd = 0; const char *output_name = NULL; const struct option options[] = { + OPT_BOOLEAN('T', "transaction", &transaction_run, + "hardware transaction statistics"), OPT_CALLBACK('e', "event", &evsel_list, "event", "event selector. use 'perf list' to list available events", parse_events_option), @@ -1093,7 +1634,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) OPT_INCR('v', "verbose", &verbose, "be more verbose (show counter open errors, etc)"), OPT_INTEGER('r', "repeat", &run_count, - "repeat command and print average + stddev (max: 100)"), + "repeat command and print average + stddev (max: 100, forever: 0)"), OPT_BOOLEAN('n', "null", &null_run, "null run - dont start any counters"), OPT_INCR('d', "detailed", &detailed_run, @@ -1105,7 +1646,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) stat__set_big_num), OPT_STRING('C', "cpu", &target.cpu_list, "cpu", "list of cpus to monitor in system-wide"), - OPT_BOOLEAN('A', "no-aggr", &no_aggr, "disable CPU count aggregation"), + OPT_SET_UINT('A', "no-aggr", &aggr_mode, + "disable CPU count aggregation", AGGR_NONE), OPT_STRING('x', "field-separator", &csv_sep, "separator", "print counts with custom separator"), OPT_CALLBACK('G', "cgroup", &evsel_list, "name", @@ -1114,19 +1656,30 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), OPT_INTEGER(0, "log-fd", &output_fd, "log output to fd, instead of stderr"), + OPT_STRING(0, "pre", &pre_cmd, "command", + "command to run prior to the measured command"), + OPT_STRING(0, "post", &post_cmd, "command", + "command to run after to the measured command"), + OPT_UINTEGER('I', "interval-print", &interval, + "print counts at regular interval in ms (>= 100)"), + OPT_SET_UINT(0, "per-socket", &aggr_mode, + "aggregate counts per processor socket", AGGR_SOCKET), + OPT_SET_UINT(0, "per-core", &aggr_mode, + "aggregate counts per physical processor core", AGGR_CORE), + OPT_UINTEGER('D', "delay", &initial_delay, + "ms to wait before starting measurement after program start"), OPT_END() }; const char * const stat_usage[] = { "perf stat [<options>] [<command>]", NULL }; - struct perf_evsel *pos; - int status = -ENOMEM, run_idx; + int status = -EINVAL, run_idx; const char *mode; setlocale(LC_ALL, ""); - evsel_list = perf_evlist__new(NULL, NULL); + evsel_list = perf_evlist__new(); if (evsel_list == NULL) return -ENOMEM; @@ -1139,12 +1692,15 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) if (output_name && output_fd) { fprintf(stderr, "cannot use both --output and --log-fd\n"); - usage_with_options(stat_usage, options); + parse_options_usage(stat_usage, options, "o", 1); + parse_options_usage(NULL, options, "log-fd", 0); + goto out; } if (output_fd < 0) { fprintf(stderr, "argument to --log-fd must be a > 0\n"); - usage_with_options(stat_usage, options); + parse_options_usage(stat_usage, options, "log-fd", 0); + goto out; } if (!output) { @@ -1181,46 +1737,67 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) /* User explicitly passed -B? */ if (big_num_opt == 1) { fprintf(stderr, "-B option not supported with -x\n"); - usage_with_options(stat_usage, options); + parse_options_usage(stat_usage, options, "B", 1); + parse_options_usage(NULL, options, "x", 1); + goto out; } else /* Nope, so disable big number formatting */ big_num = false; } else if (big_num_opt == 0) /* User passed --no-big-num */ big_num = false; - if (!argc && !perf_target__has_task(&target)) - usage_with_options(stat_usage, options); - if (run_count <= 0) + if (!argc && target__none(&target)) usage_with_options(stat_usage, options); + if (run_count < 0) { + pr_err("Run count must be a positive number\n"); + parse_options_usage(stat_usage, options, "r", 1); + goto out; + } else if (run_count == 0) { + forever = true; + run_count = 1; + } + /* no_aggr, cgroup are for system-wide only */ - if ((no_aggr || nr_cgroups) && !perf_target__has_cpu(&target)) { + if ((aggr_mode != AGGR_GLOBAL || nr_cgroups) && + !target__has_cpu(&target)) { fprintf(stderr, "both cgroup and no-aggregation " "modes only available in system-wide mode\n"); - usage_with_options(stat_usage, options); + parse_options_usage(stat_usage, options, "G", 1); + parse_options_usage(NULL, options, "A", 1); + parse_options_usage(NULL, options, "a", 1); + goto out; } if (add_default_attributes()) goto out; - perf_target__validate(&target); + target__validate(&target); if (perf_evlist__create_maps(evsel_list, &target) < 0) { - if (perf_target__has_task(&target)) + if (target__has_task(&target)) { pr_err("Problems finding threads of monitor\n"); - if (perf_target__has_cpu(&target)) + parse_options_usage(stat_usage, options, "p", 1); + parse_options_usage(NULL, options, "t", 1); + } else if (target__has_cpu(&target)) { perror("failed to parse CPUs map"); - - usage_with_options(stat_usage, options); - return -1; + parse_options_usage(stat_usage, options, "C", 1); + parse_options_usage(NULL, options, "a", 1); + } + goto out; } - - list_for_each_entry(pos, &evsel_list->entries, node) { - if (perf_evsel__alloc_stat_priv(pos) < 0 || - perf_evsel__alloc_counts(pos, perf_evsel__nr_cpus(pos)) < 0) - goto out_free_fd; + if (interval && interval < 100) { + pr_err("print interval must be >= 100ms\n"); + parse_options_usage(stat_usage, options, "I", 1); + goto out; } + if (perf_evlist__alloc_stats(evsel_list, interval)) + goto out; + + if (perf_stat_init_aggr_mode()) + goto out; + /* * We dont want to block the signals - that would cause * child tasks to inherit that and Ctrl-C would not work. @@ -1228,28 +1805,29 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) * task, but being ignored by perf stat itself: */ atexit(sig_atexit); - signal(SIGINT, skip_signal); + if (!forever) + signal(SIGINT, skip_signal); + signal(SIGCHLD, skip_signal); signal(SIGALRM, skip_signal); signal(SIGABRT, skip_signal); status = 0; - for (run_idx = 0; run_idx < run_count; run_idx++) { + for (run_idx = 0; forever || run_idx < run_count; run_idx++) { if (run_count != 1 && verbose) fprintf(output, "[ perf stat: executing run #%d ... ]\n", run_idx + 1); - if (sync_run) - sync(); - status = run_perf_stat(argc, argv); + if (forever && status != -1) { + print_stat(argc, argv); + perf_stat__reset_stats(evsel_list); + } } - if (status != -1) + if (!forever && status != -1 && !interval) print_stat(argc, argv); -out_free_fd: - list_for_each_entry(pos, &evsel_list->entries, node) - perf_evsel__free_stat_priv(pos); - perf_evlist__delete_maps(evsel_list); + + perf_evlist__free_stats(evsel_list); out: perf_evlist__delete(evsel_list); return status; diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c deleted file mode 100644 index 484f26cc0c0..00000000000 --- a/tools/perf/builtin-test.c +++ /dev/null @@ -1,1547 +0,0 @@ -/* - * builtin-test.c - * - * Builtin regression testing command: ever growing number of sanity tests - */ -#include "builtin.h" - -#include "util/cache.h" -#include "util/debug.h" -#include "util/debugfs.h" -#include "util/evlist.h" -#include "util/parse-options.h" -#include "util/parse-events.h" -#include "util/symbol.h" -#include "util/thread_map.h" -#include "util/pmu.h" -#include "event-parse.h" -#include "../../include/linux/hw_breakpoint.h" - -#include <sys/mman.h> - -static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused, - struct symbol *sym) -{ - bool *visited = symbol__priv(sym); - *visited = true; - return 0; -} - -static int test__vmlinux_matches_kallsyms(void) -{ - int err = -1; - struct rb_node *nd; - struct symbol *sym; - struct map *kallsyms_map, *vmlinux_map; - struct machine kallsyms, vmlinux; - enum map_type type = MAP__FUNCTION; - long page_size = sysconf(_SC_PAGE_SIZE); - struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", }; - - /* - * Step 1: - * - * Init the machines that will hold kernel, modules obtained from - * both vmlinux + .ko files and from /proc/kallsyms split by modules. - */ - machine__init(&kallsyms, "", HOST_KERNEL_ID); - machine__init(&vmlinux, "", HOST_KERNEL_ID); - - /* - * Step 2: - * - * Create the kernel maps for kallsyms and the DSO where we will then - * load /proc/kallsyms. Also create the modules maps from /proc/modules - * and find the .ko files that match them in /lib/modules/`uname -r`/. - */ - if (machine__create_kernel_maps(&kallsyms) < 0) { - pr_debug("machine__create_kernel_maps "); - return -1; - } - - /* - * Step 3: - * - * Load and split /proc/kallsyms into multiple maps, one per module. - */ - if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) { - pr_debug("dso__load_kallsyms "); - goto out; - } - - /* - * Step 4: - * - * kallsyms will be internally on demand sorted by name so that we can - * find the reference relocation * symbol, i.e. the symbol we will use - * to see if the running kernel was relocated by checking if it has the - * same value in the vmlinux file we load. - */ - kallsyms_map = machine__kernel_map(&kallsyms, type); - - sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL); - if (sym == NULL) { - pr_debug("dso__find_symbol_by_name "); - goto out; - } - - ref_reloc_sym.addr = sym->start; - - /* - * Step 5: - * - * Now repeat step 2, this time for the vmlinux file we'll auto-locate. - */ - if (machine__create_kernel_maps(&vmlinux) < 0) { - pr_debug("machine__create_kernel_maps "); - goto out; - } - - vmlinux_map = machine__kernel_map(&vmlinux, type); - map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym; - - /* - * Step 6: - * - * Locate a vmlinux file in the vmlinux path that has a buildid that - * matches the one of the running kernel. - * - * While doing that look if we find the ref reloc symbol, if we find it - * we'll have its ref_reloc_symbol.unrelocated_addr and then - * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines - * to fixup the symbols. - */ - if (machine__load_vmlinux_path(&vmlinux, type, - vmlinux_matches_kallsyms_filter) <= 0) { - pr_debug("machine__load_vmlinux_path "); - goto out; - } - - err = 0; - /* - * Step 7: - * - * Now look at the symbols in the vmlinux DSO and check if we find all of them - * in the kallsyms dso. For the ones that are in both, check its names and - * end addresses too. - */ - for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) { - struct symbol *pair, *first_pair; - bool backwards = true; - - sym = rb_entry(nd, struct symbol, rb_node); - - if (sym->start == sym->end) - continue; - - first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL); - pair = first_pair; - - if (pair && pair->start == sym->start) { -next_pair: - if (strcmp(sym->name, pair->name) == 0) { - /* - * kallsyms don't have the symbol end, so we - * set that by using the next symbol start - 1, - * in some cases we get this up to a page - * wrong, trace_kmalloc when I was developing - * this code was one such example, 2106 bytes - * off the real size. More than that and we - * _really_ have a problem. - */ - s64 skew = sym->end - pair->end; - if (llabs(skew) < page_size) - continue; - - pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n", - sym->start, sym->name, sym->end, pair->end); - } else { - struct rb_node *nnd; -detour: - nnd = backwards ? rb_prev(&pair->rb_node) : - rb_next(&pair->rb_node); - if (nnd) { - struct symbol *next = rb_entry(nnd, struct symbol, rb_node); - - if (next->start == sym->start) { - pair = next; - goto next_pair; - } - } - - if (backwards) { - backwards = false; - pair = first_pair; - goto detour; - } - - pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n", - sym->start, sym->name, pair->name); - } - } else - pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name); - - err = -1; - } - - if (!verbose) - goto out; - - pr_info("Maps only in vmlinux:\n"); - - for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) { - struct map *pos = rb_entry(nd, struct map, rb_node), *pair; - /* - * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while - * the kernel will have the path for the vmlinux file being used, - * so use the short name, less descriptive but the same ("[kernel]" in - * both cases. - */ - pair = map_groups__find_by_name(&kallsyms.kmaps, type, - (pos->dso->kernel ? - pos->dso->short_name : - pos->dso->name)); - if (pair) - pair->priv = 1; - else - map__fprintf(pos, stderr); - } - - pr_info("Maps in vmlinux with a different name in kallsyms:\n"); - - for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) { - struct map *pos = rb_entry(nd, struct map, rb_node), *pair; - - pair = map_groups__find(&kallsyms.kmaps, type, pos->start); - if (pair == NULL || pair->priv) - continue; - - if (pair->start == pos->start) { - pair->priv = 1; - pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as", - pos->start, pos->end, pos->pgoff, pos->dso->name); - if (pos->pgoff != pair->pgoff || pos->end != pair->end) - pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "", - pair->start, pair->end, pair->pgoff); - pr_info(" %s\n", pair->dso->name); - pair->priv = 1; - } - } - - pr_info("Maps only in kallsyms:\n"); - - for (nd = rb_first(&kallsyms.kmaps.maps[type]); - nd; nd = rb_next(nd)) { - struct map *pos = rb_entry(nd, struct map, rb_node); - - if (!pos->priv) - map__fprintf(pos, stderr); - } -out: - return err; -} - -#include "util/cpumap.h" -#include "util/evsel.h" -#include <sys/types.h> - -static int trace_event__id(const char *evname) -{ - char *filename; - int err = -1, fd; - - if (asprintf(&filename, - "%s/syscalls/%s/id", - tracing_events_path, evname) < 0) - return -1; - - fd = open(filename, O_RDONLY); - if (fd >= 0) { - char id[16]; - if (read(fd, id, sizeof(id)) > 0) - err = atoi(id); - close(fd); - } - - free(filename); - return err; -} - -static int test__open_syscall_event(void) -{ - int err = -1, fd; - struct thread_map *threads; - struct perf_evsel *evsel; - struct perf_event_attr attr; - unsigned int nr_open_calls = 111, i; - int id = trace_event__id("sys_enter_open"); - - if (id < 0) { - pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); - return -1; - } - - threads = thread_map__new(-1, getpid(), UINT_MAX); - if (threads == NULL) { - pr_debug("thread_map__new\n"); - return -1; - } - - memset(&attr, 0, sizeof(attr)); - attr.type = PERF_TYPE_TRACEPOINT; - attr.config = id; - evsel = perf_evsel__new(&attr, 0); - if (evsel == NULL) { - pr_debug("perf_evsel__new\n"); - goto out_thread_map_delete; - } - - if (perf_evsel__open_per_thread(evsel, threads) < 0) { - pr_debug("failed to open counter: %s, " - "tweak /proc/sys/kernel/perf_event_paranoid?\n", - strerror(errno)); - goto out_evsel_delete; - } - - for (i = 0; i < nr_open_calls; ++i) { - fd = open("/etc/passwd", O_RDONLY); - close(fd); - } - - if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) { - pr_debug("perf_evsel__read_on_cpu\n"); - goto out_close_fd; - } - - if (evsel->counts->cpu[0].val != nr_open_calls) { - pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n", - nr_open_calls, evsel->counts->cpu[0].val); - goto out_close_fd; - } - - err = 0; -out_close_fd: - perf_evsel__close_fd(evsel, 1, threads->nr); -out_evsel_delete: - perf_evsel__delete(evsel); -out_thread_map_delete: - thread_map__delete(threads); - return err; -} - -#include <sched.h> - -static int test__open_syscall_event_on_all_cpus(void) -{ - int err = -1, fd, cpu; - struct thread_map *threads; - struct cpu_map *cpus; - struct perf_evsel *evsel; - struct perf_event_attr attr; - unsigned int nr_open_calls = 111, i; - cpu_set_t cpu_set; - int id = trace_event__id("sys_enter_open"); - - if (id < 0) { - pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); - return -1; - } - - threads = thread_map__new(-1, getpid(), UINT_MAX); - if (threads == NULL) { - pr_debug("thread_map__new\n"); - return -1; - } - - cpus = cpu_map__new(NULL); - if (cpus == NULL) { - pr_debug("cpu_map__new\n"); - goto out_thread_map_delete; - } - - - CPU_ZERO(&cpu_set); - - memset(&attr, 0, sizeof(attr)); - attr.type = PERF_TYPE_TRACEPOINT; - attr.config = id; - evsel = perf_evsel__new(&attr, 0); - if (evsel == NULL) { - pr_debug("perf_evsel__new\n"); - goto out_thread_map_delete; - } - - if (perf_evsel__open(evsel, cpus, threads) < 0) { - pr_debug("failed to open counter: %s, " - "tweak /proc/sys/kernel/perf_event_paranoid?\n", - strerror(errno)); - goto out_evsel_delete; - } - - for (cpu = 0; cpu < cpus->nr; ++cpu) { - unsigned int ncalls = nr_open_calls + cpu; - /* - * XXX eventually lift this restriction in a way that - * keeps perf building on older glibc installations - * without CPU_ALLOC. 1024 cpus in 2010 still seems - * a reasonable upper limit tho :-) - */ - if (cpus->map[cpu] >= CPU_SETSIZE) { - pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); - continue; - } - - CPU_SET(cpus->map[cpu], &cpu_set); - if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { - pr_debug("sched_setaffinity() failed on CPU %d: %s ", - cpus->map[cpu], - strerror(errno)); - goto out_close_fd; - } - for (i = 0; i < ncalls; ++i) { - fd = open("/etc/passwd", O_RDONLY); - close(fd); - } - CPU_CLR(cpus->map[cpu], &cpu_set); - } - - /* - * Here we need to explicitely preallocate the counts, as if - * we use the auto allocation it will allocate just for 1 cpu, - * as we start by cpu 0. - */ - if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) { - pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr); - goto out_close_fd; - } - - err = 0; - - for (cpu = 0; cpu < cpus->nr; ++cpu) { - unsigned int expected; - - if (cpus->map[cpu] >= CPU_SETSIZE) - continue; - - if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { - pr_debug("perf_evsel__read_on_cpu\n"); - err = -1; - break; - } - - expected = nr_open_calls + cpu; - if (evsel->counts->cpu[cpu].val != expected) { - pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", - expected, cpus->map[cpu], evsel->counts->cpu[cpu].val); - err = -1; - } - } - -out_close_fd: - perf_evsel__close_fd(evsel, 1, threads->nr); -out_evsel_delete: - perf_evsel__delete(evsel); -out_thread_map_delete: - thread_map__delete(threads); - return err; -} - -/* - * This test will generate random numbers of calls to some getpid syscalls, - * then establish an mmap for a group of events that are created to monitor - * the syscalls. - * - * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated - * sample.id field to map back to its respective perf_evsel instance. - * - * Then it checks if the number of syscalls reported as perf events by - * the kernel corresponds to the number of syscalls made. - */ -static int test__basic_mmap(void) -{ - int err = -1; - union perf_event *event; - struct thread_map *threads; - struct cpu_map *cpus; - struct perf_evlist *evlist; - struct perf_event_attr attr = { - .type = PERF_TYPE_TRACEPOINT, - .read_format = PERF_FORMAT_ID, - .sample_type = PERF_SAMPLE_ID, - .watermark = 0, - }; - cpu_set_t cpu_set; - const char *syscall_names[] = { "getsid", "getppid", "getpgrp", - "getpgid", }; - pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp, - (void*)getpgid }; -#define nsyscalls ARRAY_SIZE(syscall_names) - int ids[nsyscalls]; - unsigned int nr_events[nsyscalls], - expected_nr_events[nsyscalls], i, j; - struct perf_evsel *evsels[nsyscalls], *evsel; - - for (i = 0; i < nsyscalls; ++i) { - char name[64]; - - snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); - ids[i] = trace_event__id(name); - if (ids[i] < 0) { - pr_debug("Is debugfs mounted on /sys/kernel/debug?\n"); - return -1; - } - nr_events[i] = 0; - expected_nr_events[i] = random() % 257; - } - - threads = thread_map__new(-1, getpid(), UINT_MAX); - if (threads == NULL) { - pr_debug("thread_map__new\n"); - return -1; - } - - cpus = cpu_map__new(NULL); - if (cpus == NULL) { - pr_debug("cpu_map__new\n"); - goto out_free_threads; - } - - CPU_ZERO(&cpu_set); - CPU_SET(cpus->map[0], &cpu_set); - sched_setaffinity(0, sizeof(cpu_set), &cpu_set); - if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { - pr_debug("sched_setaffinity() failed on CPU %d: %s ", - cpus->map[0], strerror(errno)); - goto out_free_cpus; - } - - evlist = perf_evlist__new(cpus, threads); - if (evlist == NULL) { - pr_debug("perf_evlist__new\n"); - goto out_free_cpus; - } - - /* anonymous union fields, can't be initialized above */ - attr.wakeup_events = 1; - attr.sample_period = 1; - - for (i = 0; i < nsyscalls; ++i) { - attr.config = ids[i]; - evsels[i] = perf_evsel__new(&attr, i); - if (evsels[i] == NULL) { - pr_debug("perf_evsel__new\n"); - goto out_free_evlist; - } - - perf_evlist__add(evlist, evsels[i]); - - if (perf_evsel__open(evsels[i], cpus, threads) < 0) { - pr_debug("failed to open counter: %s, " - "tweak /proc/sys/kernel/perf_event_paranoid?\n", - strerror(errno)); - goto out_close_fd; - } - } - - if (perf_evlist__mmap(evlist, 128, true) < 0) { - pr_debug("failed to mmap events: %d (%s)\n", errno, - strerror(errno)); - goto out_close_fd; - } - - for (i = 0; i < nsyscalls; ++i) - for (j = 0; j < expected_nr_events[i]; ++j) { - int foo = syscalls[i](); - ++foo; - } - - while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { - struct perf_sample sample; - - if (event->header.type != PERF_RECORD_SAMPLE) { - pr_debug("unexpected %s event\n", - perf_event__name(event->header.type)); - goto out_munmap; - } - - err = perf_evlist__parse_sample(evlist, event, &sample); - if (err) { - pr_err("Can't parse sample, err = %d\n", err); - goto out_munmap; - } - - evsel = perf_evlist__id2evsel(evlist, sample.id); - if (evsel == NULL) { - pr_debug("event with id %" PRIu64 - " doesn't map to an evsel\n", sample.id); - goto out_munmap; - } - nr_events[evsel->idx]++; - } - - list_for_each_entry(evsel, &evlist->entries, node) { - if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { - pr_debug("expected %d %s events, got %d\n", - expected_nr_events[evsel->idx], - perf_evsel__name(evsel), nr_events[evsel->idx]); - goto out_munmap; - } - } - - err = 0; -out_munmap: - perf_evlist__munmap(evlist); -out_close_fd: - for (i = 0; i < nsyscalls; ++i) - perf_evsel__close_fd(evsels[i], 1, threads->nr); -out_free_evlist: - perf_evlist__delete(evlist); -out_free_cpus: - cpu_map__delete(cpus); -out_free_threads: - thread_map__delete(threads); - return err; -#undef nsyscalls -} - -static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp, - size_t *sizep) -{ - cpu_set_t *mask; - size_t size; - int i, cpu = -1, nrcpus = 1024; -realloc: - mask = CPU_ALLOC(nrcpus); - size = CPU_ALLOC_SIZE(nrcpus); - CPU_ZERO_S(size, mask); - - if (sched_getaffinity(pid, size, mask) == -1) { - CPU_FREE(mask); - if (errno == EINVAL && nrcpus < (1024 << 8)) { - nrcpus = nrcpus << 2; - goto realloc; - } - perror("sched_getaffinity"); - return -1; - } - - for (i = 0; i < nrcpus; i++) { - if (CPU_ISSET_S(i, size, mask)) { - if (cpu == -1) { - cpu = i; - *maskp = mask; - *sizep = size; - } else - CPU_CLR_S(i, size, mask); - } - } - - if (cpu == -1) - CPU_FREE(mask); - - return cpu; -} - -static int test__PERF_RECORD(void) -{ - struct perf_record_opts opts = { - .target = { - .uid = UINT_MAX, - .uses_mmap = true, - }, - .no_delay = true, - .freq = 10, - .mmap_pages = 256, - }; - cpu_set_t *cpu_mask = NULL; - size_t cpu_mask_size = 0; - struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); - struct perf_evsel *evsel; - struct perf_sample sample; - const char *cmd = "sleep"; - const char *argv[] = { cmd, "1", NULL, }; - char *bname; - u64 prev_time = 0; - bool found_cmd_mmap = false, - found_libc_mmap = false, - found_vdso_mmap = false, - found_ld_mmap = false; - int err = -1, errs = 0, i, wakeups = 0; - u32 cpu; - int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, }; - - if (evlist == NULL || argv == NULL) { - pr_debug("Not enough memory to create evlist\n"); - goto out; - } - - /* - * We need at least one evsel in the evlist, use the default - * one: "cycles". - */ - err = perf_evlist__add_default(evlist); - if (err < 0) { - pr_debug("Not enough memory to create evsel\n"); - goto out_delete_evlist; - } - - /* - * Create maps of threads and cpus to monitor. In this case - * we start with all threads and cpus (-1, -1) but then in - * perf_evlist__prepare_workload we'll fill in the only thread - * we're monitoring, the one forked there. - */ - err = perf_evlist__create_maps(evlist, &opts.target); - if (err < 0) { - pr_debug("Not enough memory to create thread/cpu maps\n"); - goto out_delete_evlist; - } - - /* - * Prepare the workload in argv[] to run, it'll fork it, and then wait - * for perf_evlist__start_workload() to exec it. This is done this way - * so that we have time to open the evlist (calling sys_perf_event_open - * on all the fds) and then mmap them. - */ - err = perf_evlist__prepare_workload(evlist, &opts, argv); - if (err < 0) { - pr_debug("Couldn't run the workload!\n"); - goto out_delete_evlist; - } - - /* - * Config the evsels, setting attr->comm on the first one, etc. - */ - evsel = perf_evlist__first(evlist); - evsel->attr.sample_type |= PERF_SAMPLE_CPU; - evsel->attr.sample_type |= PERF_SAMPLE_TID; - evsel->attr.sample_type |= PERF_SAMPLE_TIME; - perf_evlist__config_attrs(evlist, &opts); - - err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask, - &cpu_mask_size); - if (err < 0) { - pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno)); - goto out_delete_evlist; - } - - cpu = err; - - /* - * So that we can check perf_sample.cpu on all the samples. - */ - if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) { - pr_debug("sched_setaffinity: %s\n", strerror(errno)); - goto out_free_cpu_mask; - } - - /* - * Call sys_perf_event_open on all the fds on all the evsels, - * grouping them if asked to. - */ - err = perf_evlist__open(evlist); - if (err < 0) { - pr_debug("perf_evlist__open: %s\n", strerror(errno)); - goto out_delete_evlist; - } - - /* - * mmap the first fd on a given CPU and ask for events for the other - * fds in the same CPU to be injected in the same mmap ring buffer - * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)). - */ - err = perf_evlist__mmap(evlist, opts.mmap_pages, false); - if (err < 0) { - pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); - goto out_delete_evlist; - } - - /* - * Now that all is properly set up, enable the events, they will - * count just on workload.pid, which will start... - */ - perf_evlist__enable(evlist); - - /* - * Now! - */ - perf_evlist__start_workload(evlist); - - while (1) { - int before = total_events; - - for (i = 0; i < evlist->nr_mmaps; i++) { - union perf_event *event; - - while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { - const u32 type = event->header.type; - const char *name = perf_event__name(type); - - ++total_events; - if (type < PERF_RECORD_MAX) - nr_events[type]++; - - err = perf_evlist__parse_sample(evlist, event, &sample); - if (err < 0) { - if (verbose) - perf_event__fprintf(event, stderr); - pr_debug("Couldn't parse sample\n"); - goto out_err; - } - - if (verbose) { - pr_info("%" PRIu64" %d ", sample.time, sample.cpu); - perf_event__fprintf(event, stderr); - } - - if (prev_time > sample.time) { - pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n", - name, prev_time, sample.time); - ++errs; - } - - prev_time = sample.time; - - if (sample.cpu != cpu) { - pr_debug("%s with unexpected cpu, expected %d, got %d\n", - name, cpu, sample.cpu); - ++errs; - } - - if ((pid_t)sample.pid != evlist->workload.pid) { - pr_debug("%s with unexpected pid, expected %d, got %d\n", - name, evlist->workload.pid, sample.pid); - ++errs; - } - - if ((pid_t)sample.tid != evlist->workload.pid) { - pr_debug("%s with unexpected tid, expected %d, got %d\n", - name, evlist->workload.pid, sample.tid); - ++errs; - } - - if ((type == PERF_RECORD_COMM || - type == PERF_RECORD_MMAP || - type == PERF_RECORD_FORK || - type == PERF_RECORD_EXIT) && - (pid_t)event->comm.pid != evlist->workload.pid) { - pr_debug("%s with unexpected pid/tid\n", name); - ++errs; - } - - if ((type == PERF_RECORD_COMM || - type == PERF_RECORD_MMAP) && - event->comm.pid != event->comm.tid) { - pr_debug("%s with different pid/tid!\n", name); - ++errs; - } - - switch (type) { - case PERF_RECORD_COMM: - if (strcmp(event->comm.comm, cmd)) { - pr_debug("%s with unexpected comm!\n", name); - ++errs; - } - break; - case PERF_RECORD_EXIT: - goto found_exit; - case PERF_RECORD_MMAP: - bname = strrchr(event->mmap.filename, '/'); - if (bname != NULL) { - if (!found_cmd_mmap) - found_cmd_mmap = !strcmp(bname + 1, cmd); - if (!found_libc_mmap) - found_libc_mmap = !strncmp(bname + 1, "libc", 4); - if (!found_ld_mmap) - found_ld_mmap = !strncmp(bname + 1, "ld", 2); - } else if (!found_vdso_mmap) - found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]"); - break; - - case PERF_RECORD_SAMPLE: - /* Just ignore samples for now */ - break; - default: - pr_debug("Unexpected perf_event->header.type %d!\n", - type); - ++errs; - } - } - } - - /* - * We don't use poll here because at least at 3.1 times the - * PERF_RECORD_{!SAMPLE} events don't honour - * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does. - */ - if (total_events == before && false) - poll(evlist->pollfd, evlist->nr_fds, -1); - - sleep(1); - if (++wakeups > 5) { - pr_debug("No PERF_RECORD_EXIT event!\n"); - break; - } - } - -found_exit: - if (nr_events[PERF_RECORD_COMM] > 1) { - pr_debug("Excessive number of PERF_RECORD_COMM events!\n"); - ++errs; - } - - if (nr_events[PERF_RECORD_COMM] == 0) { - pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd); - ++errs; - } - - if (!found_cmd_mmap) { - pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd); - ++errs; - } - - if (!found_libc_mmap) { - pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc"); - ++errs; - } - - if (!found_ld_mmap) { - pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld"); - ++errs; - } - - if (!found_vdso_mmap) { - pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]"); - ++errs; - } -out_err: - perf_evlist__munmap(evlist); -out_free_cpu_mask: - CPU_FREE(cpu_mask); -out_delete_evlist: - perf_evlist__delete(evlist); -out: - return (err < 0 || errs > 0) ? -1 : 0; -} - - -#if defined(__x86_64__) || defined(__i386__) - -#define barrier() asm volatile("" ::: "memory") - -static u64 rdpmc(unsigned int counter) -{ - unsigned int low, high; - - asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter)); - - return low | ((u64)high) << 32; -} - -static u64 rdtsc(void) -{ - unsigned int low, high; - - asm volatile("rdtsc" : "=a" (low), "=d" (high)); - - return low | ((u64)high) << 32; -} - -static u64 mmap_read_self(void *addr) -{ - struct perf_event_mmap_page *pc = addr; - u32 seq, idx, time_mult = 0, time_shift = 0; - u64 count, cyc = 0, time_offset = 0, enabled, running, delta; - - do { - seq = pc->lock; - barrier(); - - enabled = pc->time_enabled; - running = pc->time_running; - - if (enabled != running) { - cyc = rdtsc(); - time_mult = pc->time_mult; - time_shift = pc->time_shift; - time_offset = pc->time_offset; - } - - idx = pc->index; - count = pc->offset; - if (idx) - count += rdpmc(idx - 1); - - barrier(); - } while (pc->lock != seq); - - if (enabled != running) { - u64 quot, rem; - - quot = (cyc >> time_shift); - rem = cyc & ((1 << time_shift) - 1); - delta = time_offset + quot * time_mult + - ((rem * time_mult) >> time_shift); - - enabled += delta; - if (idx) - running += delta; - - quot = count / running; - rem = count % running; - count = quot * enabled + (rem * enabled) / running; - } - - return count; -} - -/* - * If the RDPMC instruction faults then signal this back to the test parent task: - */ -static void segfault_handler(int sig __maybe_unused, - siginfo_t *info __maybe_unused, - void *uc __maybe_unused) -{ - exit(-1); -} - -static int __test__rdpmc(void) -{ - long page_size = sysconf(_SC_PAGE_SIZE); - volatile int tmp = 0; - u64 i, loops = 1000; - int n; - int fd; - void *addr; - struct perf_event_attr attr = { - .type = PERF_TYPE_HARDWARE, - .config = PERF_COUNT_HW_INSTRUCTIONS, - .exclude_kernel = 1, - }; - u64 delta_sum = 0; - struct sigaction sa; - - sigfillset(&sa.sa_mask); - sa.sa_sigaction = segfault_handler; - sigaction(SIGSEGV, &sa, NULL); - - fd = sys_perf_event_open(&attr, 0, -1, -1, 0); - if (fd < 0) { - pr_err("Error: sys_perf_event_open() syscall returned " - "with %d (%s)\n", fd, strerror(errno)); - return -1; - } - - addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0); - if (addr == (void *)(-1)) { - pr_err("Error: mmap() syscall returned with (%s)\n", - strerror(errno)); - goto out_close; - } - - for (n = 0; n < 6; n++) { - u64 stamp, now, delta; - - stamp = mmap_read_self(addr); - - for (i = 0; i < loops; i++) - tmp++; - - now = mmap_read_self(addr); - loops *= 10; - - delta = now - stamp; - pr_debug("%14d: %14Lu\n", n, (long long)delta); - - delta_sum += delta; - } - - munmap(addr, page_size); - pr_debug(" "); -out_close: - close(fd); - - if (!delta_sum) - return -1; - - return 0; -} - -static int test__rdpmc(void) -{ - int status = 0; - int wret = 0; - int ret; - int pid; - - pid = fork(); - if (pid < 0) - return -1; - - if (!pid) { - ret = __test__rdpmc(); - - exit(ret); - } - - wret = waitpid(pid, &status, 0); - if (wret < 0 || status) - return -1; - - return 0; -} - -#endif - -static int test__perf_pmu(void) -{ - return perf_pmu__test(); -} - -static int perf_evsel__roundtrip_cache_name_test(void) -{ - char name[128]; - int type, op, err = 0, ret = 0, i, idx; - struct perf_evsel *evsel; - struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); - - if (evlist == NULL) - return -ENOMEM; - - for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { - for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { - /* skip invalid cache type */ - if (!perf_evsel__is_cache_op_valid(type, op)) - continue; - - for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { - __perf_evsel__hw_cache_type_op_res_name(type, op, i, - name, sizeof(name)); - err = parse_events(evlist, name, 0); - if (err) - ret = err; - } - } - } - - idx = 0; - evsel = perf_evlist__first(evlist); - - for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { - for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { - /* skip invalid cache type */ - if (!perf_evsel__is_cache_op_valid(type, op)) - continue; - - for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { - __perf_evsel__hw_cache_type_op_res_name(type, op, i, - name, sizeof(name)); - if (evsel->idx != idx) - continue; - - ++idx; - - if (strcmp(perf_evsel__name(evsel), name)) { - pr_debug("%s != %s\n", perf_evsel__name(evsel), name); - ret = -1; - } - - evsel = perf_evsel__next(evsel); - } - } - } - - perf_evlist__delete(evlist); - return ret; -} - -static int __perf_evsel__name_array_test(const char *names[], int nr_names) -{ - int i, err; - struct perf_evsel *evsel; - struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); - - if (evlist == NULL) - return -ENOMEM; - - for (i = 0; i < nr_names; ++i) { - err = parse_events(evlist, names[i], 0); - if (err) { - pr_debug("failed to parse event '%s', err %d\n", - names[i], err); - goto out_delete_evlist; - } - } - - err = 0; - list_for_each_entry(evsel, &evlist->entries, node) { - if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) { - --err; - pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]); - } - } - -out_delete_evlist: - perf_evlist__delete(evlist); - return err; -} - -#define perf_evsel__name_array_test(names) \ - __perf_evsel__name_array_test(names, ARRAY_SIZE(names)) - -static int perf_evsel__roundtrip_name_test(void) -{ - int err = 0, ret = 0; - - err = perf_evsel__name_array_test(perf_evsel__hw_names); - if (err) - ret = err; - - err = perf_evsel__name_array_test(perf_evsel__sw_names); - if (err) - ret = err; - - err = perf_evsel__roundtrip_cache_name_test(); - if (err) - ret = err; - - return ret; -} - -static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name, - int size, bool should_be_signed) -{ - struct format_field *field = perf_evsel__field(evsel, name); - int is_signed; - int ret = 0; - - if (field == NULL) { - pr_debug("%s: \"%s\" field not found!\n", evsel->name, name); - return -1; - } - - is_signed = !!(field->flags | FIELD_IS_SIGNED); - if (should_be_signed && !is_signed) { - pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n", - evsel->name, name, is_signed, should_be_signed); - ret = -1; - } - - if (field->size != size) { - pr_debug("%s: \"%s\" size (%d) should be %d!\n", - evsel->name, name, field->size, size); - ret = -1; - } - - return ret; -} - -static int perf_evsel__tp_sched_test(void) -{ - struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0); - int ret = 0; - - if (evsel == NULL) { - pr_debug("perf_evsel__new\n"); - return -1; - } - - if (perf_evsel__test_field(evsel, "prev_comm", 16, true)) - ret = -1; - - if (perf_evsel__test_field(evsel, "prev_pid", 4, true)) - ret = -1; - - if (perf_evsel__test_field(evsel, "prev_prio", 4, true)) - ret = -1; - - if (perf_evsel__test_field(evsel, "prev_state", 8, true)) - ret = -1; - - if (perf_evsel__test_field(evsel, "next_comm", 16, true)) - ret = -1; - - if (perf_evsel__test_field(evsel, "next_pid", 4, true)) - ret = -1; - - if (perf_evsel__test_field(evsel, "next_prio", 4, true)) - ret = -1; - - perf_evsel__delete(evsel); - - evsel = perf_evsel__newtp("sched", "sched_wakeup", 0); - - if (perf_evsel__test_field(evsel, "comm", 16, true)) - ret = -1; - - if (perf_evsel__test_field(evsel, "pid", 4, true)) - ret = -1; - - if (perf_evsel__test_field(evsel, "prio", 4, true)) - ret = -1; - - if (perf_evsel__test_field(evsel, "success", 4, true)) - ret = -1; - - if (perf_evsel__test_field(evsel, "target_cpu", 4, true)) - ret = -1; - - return ret; -} - -static int test__syscall_open_tp_fields(void) -{ - struct perf_record_opts opts = { - .target = { - .uid = UINT_MAX, - .uses_mmap = true, - }, - .no_delay = true, - .freq = 1, - .mmap_pages = 256, - .raw_samples = true, - }; - const char *filename = "/etc/passwd"; - int flags = O_RDONLY | O_DIRECTORY; - struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); - struct perf_evsel *evsel; - int err = -1, i, nr_events = 0, nr_polls = 0; - - if (evlist == NULL) { - pr_debug("%s: perf_evlist__new\n", __func__); - goto out; - } - - evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); - if (evsel == NULL) { - pr_debug("%s: perf_evsel__newtp\n", __func__); - goto out_delete_evlist; - } - - perf_evlist__add(evlist, evsel); - - err = perf_evlist__create_maps(evlist, &opts.target); - if (err < 0) { - pr_debug("%s: perf_evlist__create_maps\n", __func__); - goto out_delete_evlist; - } - - perf_evsel__config(evsel, &opts, evsel); - - evlist->threads->map[0] = getpid(); - - err = perf_evlist__open(evlist); - if (err < 0) { - pr_debug("perf_evlist__open: %s\n", strerror(errno)); - goto out_delete_evlist; - } - - err = perf_evlist__mmap(evlist, UINT_MAX, false); - if (err < 0) { - pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); - goto out_delete_evlist; - } - - perf_evlist__enable(evlist); - - /* - * Generate the event: - */ - open(filename, flags); - - while (1) { - int before = nr_events; - - for (i = 0; i < evlist->nr_mmaps; i++) { - union perf_event *event; - - while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { - const u32 type = event->header.type; - int tp_flags; - struct perf_sample sample; - - ++nr_events; - - if (type != PERF_RECORD_SAMPLE) - continue; - - err = perf_evsel__parse_sample(evsel, event, &sample); - if (err) { - pr_err("Can't parse sample, err = %d\n", err); - goto out_munmap; - } - - tp_flags = perf_evsel__intval(evsel, &sample, "flags"); - - if (flags != tp_flags) { - pr_debug("%s: Expected flags=%#x, got %#x\n", - __func__, flags, tp_flags); - goto out_munmap; - } - - goto out_ok; - } - } - - if (nr_events == before) - poll(evlist->pollfd, evlist->nr_fds, 10); - - if (++nr_polls > 5) { - pr_debug("%s: no events!\n", __func__); - goto out_munmap; - } - } -out_ok: - err = 0; -out_munmap: - perf_evlist__munmap(evlist); -out_delete_evlist: - perf_evlist__delete(evlist); -out: - return err; -} - -static struct test { - const char *desc; - int (*func)(void); -} tests[] = { - { - .desc = "vmlinux symtab matches kallsyms", - .func = test__vmlinux_matches_kallsyms, - }, - { - .desc = "detect open syscall event", - .func = test__open_syscall_event, - }, - { - .desc = "detect open syscall event on all cpus", - .func = test__open_syscall_event_on_all_cpus, - }, - { - .desc = "read samples using the mmap interface", - .func = test__basic_mmap, - }, - { - .desc = "parse events tests", - .func = parse_events__test, - }, -#if defined(__x86_64__) || defined(__i386__) - { - .desc = "x86 rdpmc test", - .func = test__rdpmc, - }, -#endif - { - .desc = "Validate PERF_RECORD_* events & perf_sample fields", - .func = test__PERF_RECORD, - }, - { - .desc = "Test perf pmu format parsing", - .func = test__perf_pmu, - }, - { - .desc = "Test dso data interface", - .func = dso__test_data, - }, - { - .desc = "roundtrip evsel->name check", - .func = perf_evsel__roundtrip_name_test, - }, - { - .desc = "Check parsing of sched tracepoints fields", - .func = perf_evsel__tp_sched_test, - }, - { - .desc = "Generate and check syscalls:sys_enter_open event fields", - .func = test__syscall_open_tp_fields, - }, - { - .func = NULL, - }, -}; - -static bool perf_test__matches(int curr, int argc, const char *argv[]) -{ - int i; - - if (argc == 0) - return true; - - for (i = 0; i < argc; ++i) { - char *end; - long nr = strtoul(argv[i], &end, 10); - - if (*end == '\0') { - if (nr == curr + 1) - return true; - continue; - } - - if (strstr(tests[curr].desc, argv[i])) - return true; - } - - return false; -} - -static int __cmd_test(int argc, const char *argv[]) -{ - int i = 0; - - while (tests[i].func) { - int curr = i++, err; - - if (!perf_test__matches(curr, argc, argv)) - continue; - - pr_info("%2d: %s:", i, tests[curr].desc); - pr_debug("\n--- start ---\n"); - err = tests[curr].func(); - pr_debug("---- end ----\n%s:", tests[curr].desc); - pr_info(" %s\n", err ? "FAILED!\n" : "Ok"); - } - - return 0; -} - -static int perf_test__list(int argc, const char **argv) -{ - int i = 0; - - while (tests[i].func) { - int curr = i++; - - if (argc > 1 && !strstr(tests[curr].desc, argv[1])) - continue; - - pr_info("%2d: %s\n", i, tests[curr].desc); - } - - return 0; -} - -int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused) -{ - const char * const test_usage[] = { - "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]", - NULL, - }; - const struct option test_options[] = { - OPT_INCR('v', "verbose", &verbose, - "be more verbose (show symbol address, etc)"), - OPT_END() - }; - - argc = parse_options(argc, argv, test_options, test_usage, 0); - if (argc >= 1 && !strcmp(argv[0], "list")) - return perf_test__list(argc, argv); - - symbol_conf.priv_size = sizeof(int); - symbol_conf.sort_by_name = true; - symbol_conf.try_vmlinux_path = true; - - if (symbol__init() < 0) - return -1; - - return __cmd_test(argc, argv); -} diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index f251b613b2f..74db2568b86 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -12,6 +12,8 @@ * of the License. */ +#include <traceevent/event-parse.h> + #include "builtin.h" #include "util/util.h" @@ -19,6 +21,7 @@ #include "util/color.h" #include <linux/list.h> #include "util/cache.h" +#include "util/evlist.h" #include "util/evsel.h" #include <linux/rbtree.h> #include "util/symbol.h" @@ -33,29 +36,34 @@ #include "util/session.h" #include "util/svghelper.h" #include "util/tool.h" +#include "util/data.h" #define SUPPORT_OLD_POWER_EVENTS 1 #define PWR_EVENT_EXIT -1 - -static unsigned int numcpus; -static u64 min_freq; /* Lowest CPU frequency seen */ -static u64 max_freq; /* Highest CPU frequency seen */ -static u64 turbo_frequency; - -static u64 first_time, last_time; - -static bool power_only; - - struct per_pid; -struct per_pidcomm; - -struct cpu_sample; struct power_event; struct wake_event; -struct sample_wrapper; +struct timechart { + struct perf_tool tool; + struct per_pid *all_data; + struct power_event *power_events; + struct wake_event *wake_events; + int proc_num; + unsigned int numcpus; + u64 min_freq, /* Lowest CPU frequency seen */ + max_freq, /* Highest CPU frequency seen */ + turbo_frequency, + first_time, last_time; + bool power_only, + tasks_only, + with_backtrace, + topology; +}; + +struct per_pidcomm; +struct cpu_sample; /* * Datastructure layout: @@ -120,10 +128,9 @@ struct cpu_sample { u64 end_time; int type; int cpu; + const char *backtrace; }; -static struct per_pid *all_data; - #define CSTATE 1 #define PSTATE 2 @@ -141,12 +148,9 @@ struct wake_event { int waker; int wakee; u64 time; + const char *backtrace; }; -static struct power_event *power_events; -static struct wake_event *wake_events; - -struct process_filter; struct process_filter { char *name; int pid; @@ -156,9 +160,9 @@ struct process_filter { static struct process_filter *process_filter; -static struct per_pid *find_create_pid(int pid) +static struct per_pid *find_create_pid(struct timechart *tchart, int pid) { - struct per_pid *cursor = all_data; + struct per_pid *cursor = tchart->all_data; while (cursor) { if (cursor->pid == pid) @@ -168,16 +172,16 @@ static struct per_pid *find_create_pid(int pid) cursor = zalloc(sizeof(*cursor)); assert(cursor != NULL); cursor->pid = pid; - cursor->next = all_data; - all_data = cursor; + cursor->next = tchart->all_data; + tchart->all_data = cursor; return cursor; } -static void pid_set_comm(int pid, char *comm) +static void pid_set_comm(struct timechart *tchart, int pid, char *comm) { struct per_pid *p; struct per_pidcomm *c; - p = find_create_pid(pid); + p = find_create_pid(tchart, pid); c = p->all; while (c) { if (c->comm && strcmp(c->comm, comm) == 0) { @@ -199,14 +203,14 @@ static void pid_set_comm(int pid, char *comm) p->all = c; } -static void pid_fork(int pid, int ppid, u64 timestamp) +static void pid_fork(struct timechart *tchart, int pid, int ppid, u64 timestamp) { struct per_pid *p, *pp; - p = find_create_pid(pid); - pp = find_create_pid(ppid); + p = find_create_pid(tchart, pid); + pp = find_create_pid(tchart, ppid); p->ppid = ppid; if (pp->current && pp->current->comm && !p->current) - pid_set_comm(pid, pp->current->comm); + pid_set_comm(tchart, pid, pp->current->comm); p->start_time = timestamp; if (p->current) { @@ -215,23 +219,24 @@ static void pid_fork(int pid, int ppid, u64 timestamp) } } -static void pid_exit(int pid, u64 timestamp) +static void pid_exit(struct timechart *tchart, int pid, u64 timestamp) { struct per_pid *p; - p = find_create_pid(pid); + p = find_create_pid(tchart, pid); p->end_time = timestamp; if (p->current) p->current->end_time = timestamp; } -static void -pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end) +static void pid_put_sample(struct timechart *tchart, int pid, int type, + unsigned int cpu, u64 start, u64 end, + const char *backtrace) { struct per_pid *p; struct per_pidcomm *c; struct cpu_sample *sample; - p = find_create_pid(pid); + p = find_create_pid(tchart, pid); c = p->current; if (!c) { c = zalloc(sizeof(*c)); @@ -248,6 +253,7 @@ pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end) sample->type = type; sample->next = c->samples; sample->cpu = cpu; + sample->backtrace = backtrace; c->samples = sample; if (sample->type == TYPE_RUNNING && end > start && start > 0) { @@ -268,103 +274,47 @@ static int cpus_cstate_state[MAX_CPUS]; static u64 cpus_pstate_start_times[MAX_CPUS]; static u64 cpus_pstate_state[MAX_CPUS]; -static int process_comm_event(struct perf_tool *tool __maybe_unused, +static int process_comm_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample __maybe_unused, struct machine *machine __maybe_unused) { - pid_set_comm(event->comm.tid, event->comm.comm); + struct timechart *tchart = container_of(tool, struct timechart, tool); + pid_set_comm(tchart, event->comm.tid, event->comm.comm); return 0; } -static int process_fork_event(struct perf_tool *tool __maybe_unused, +static int process_fork_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample __maybe_unused, struct machine *machine __maybe_unused) { - pid_fork(event->fork.pid, event->fork.ppid, event->fork.time); + struct timechart *tchart = container_of(tool, struct timechart, tool); + pid_fork(tchart, event->fork.pid, event->fork.ppid, event->fork.time); return 0; } -static int process_exit_event(struct perf_tool *tool __maybe_unused, +static int process_exit_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample __maybe_unused, struct machine *machine __maybe_unused) { - pid_exit(event->fork.pid, event->fork.time); + struct timechart *tchart = container_of(tool, struct timechart, tool); + pid_exit(tchart, event->fork.pid, event->fork.time); return 0; } -struct trace_entry { - unsigned short type; - unsigned char flags; - unsigned char preempt_count; - int pid; - int lock_depth; -}; - #ifdef SUPPORT_OLD_POWER_EVENTS static int use_old_power_events; -struct power_entry_old { - struct trace_entry te; - u64 type; - u64 value; - u64 cpu_id; -}; #endif -struct power_processor_entry { - struct trace_entry te; - u32 state; - u32 cpu_id; -}; - -#define TASK_COMM_LEN 16 -struct wakeup_entry { - struct trace_entry te; - char comm[TASK_COMM_LEN]; - int pid; - int prio; - int success; -}; - -/* - * trace_flag_type is an enumeration that holds different - * states when a trace occurs. These are: - * IRQS_OFF - interrupts were disabled - * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags - * NEED_RESCED - reschedule is requested - * HARDIRQ - inside an interrupt handler - * SOFTIRQ - inside a softirq handler - */ -enum trace_flag_type { - TRACE_FLAG_IRQS_OFF = 0x01, - TRACE_FLAG_IRQS_NOSUPPORT = 0x02, - TRACE_FLAG_NEED_RESCHED = 0x04, - TRACE_FLAG_HARDIRQ = 0x08, - TRACE_FLAG_SOFTIRQ = 0x10, -}; - - - -struct sched_switch { - struct trace_entry te; - char prev_comm[TASK_COMM_LEN]; - int prev_pid; - int prev_prio; - long prev_state; /* Arjan weeps. */ - char next_comm[TASK_COMM_LEN]; - int next_pid; - int next_prio; -}; - static void c_state_start(int cpu, u64 timestamp, int state) { cpus_cstate_start_times[cpu] = timestamp; cpus_cstate_state[cpu] = state; } -static void c_state_end(int cpu, u64 timestamp) +static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp) { struct power_event *pwr = zalloc(sizeof(*pwr)); @@ -376,12 +326,12 @@ static void c_state_end(int cpu, u64 timestamp) pwr->end_time = timestamp; pwr->cpu = cpu; pwr->type = CSTATE; - pwr->next = power_events; + pwr->next = tchart->power_events; - power_events = pwr; + tchart->power_events = pwr; } -static void p_state_change(int cpu, u64 timestamp, u64 new_freq) +static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq) { struct power_event *pwr; @@ -397,73 +347,78 @@ static void p_state_change(int cpu, u64 timestamp, u64 new_freq) pwr->end_time = timestamp; pwr->cpu = cpu; pwr->type = PSTATE; - pwr->next = power_events; + pwr->next = tchart->power_events; if (!pwr->start_time) - pwr->start_time = first_time; + pwr->start_time = tchart->first_time; - power_events = pwr; + tchart->power_events = pwr; cpus_pstate_state[cpu] = new_freq; cpus_pstate_start_times[cpu] = timestamp; - if ((u64)new_freq > max_freq) - max_freq = new_freq; + if ((u64)new_freq > tchart->max_freq) + tchart->max_freq = new_freq; - if (new_freq < min_freq || min_freq == 0) - min_freq = new_freq; + if (new_freq < tchart->min_freq || tchart->min_freq == 0) + tchart->min_freq = new_freq; - if (new_freq == max_freq - 1000) - turbo_frequency = max_freq; + if (new_freq == tchart->max_freq - 1000) + tchart->turbo_frequency = tchart->max_freq; } -static void -sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te) +static void sched_wakeup(struct timechart *tchart, int cpu, u64 timestamp, + int waker, int wakee, u8 flags, const char *backtrace) { struct per_pid *p; - struct wakeup_entry *wake = (void *)te; struct wake_event *we = zalloc(sizeof(*we)); if (!we) return; we->time = timestamp; - we->waker = pid; + we->waker = waker; + we->backtrace = backtrace; - if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ)) + if ((flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ)) we->waker = -1; - we->wakee = wake->pid; - we->next = wake_events; - wake_events = we; - p = find_create_pid(we->wakee); + we->wakee = wakee; + we->next = tchart->wake_events; + tchart->wake_events = we; + p = find_create_pid(tchart, we->wakee); if (p && p->current && p->current->state == TYPE_NONE) { p->current->state_since = timestamp; p->current->state = TYPE_WAITING; } if (p && p->current && p->current->state == TYPE_BLOCKED) { - pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp); + pid_put_sample(tchart, p->pid, p->current->state, cpu, + p->current->state_since, timestamp, NULL); p->current->state_since = timestamp; p->current->state = TYPE_WAITING; } } -static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te) +static void sched_switch(struct timechart *tchart, int cpu, u64 timestamp, + int prev_pid, int next_pid, u64 prev_state, + const char *backtrace) { struct per_pid *p = NULL, *prev_p; - struct sched_switch *sw = (void *)te; + prev_p = find_create_pid(tchart, prev_pid); - prev_p = find_create_pid(sw->prev_pid); - - p = find_create_pid(sw->next_pid); + p = find_create_pid(tchart, next_pid); if (prev_p->current && prev_p->current->state != TYPE_NONE) - pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp); + pid_put_sample(tchart, prev_pid, TYPE_RUNNING, cpu, + prev_p->current->state_since, timestamp, + backtrace); if (p && p->current) { if (p->current->state != TYPE_NONE) - pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp); + pid_put_sample(tchart, next_pid, p->current->state, cpu, + p->current->state_since, timestamp, + backtrace); p->current->state_since = timestamp; p->current->state = TYPE_RUNNING; @@ -472,102 +427,225 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te) if (prev_p->current) { prev_p->current->state = TYPE_NONE; prev_p->current->state_since = timestamp; - if (sw->prev_state & 2) + if (prev_state & 2) prev_p->current->state = TYPE_BLOCKED; - if (sw->prev_state == 0) + if (prev_state == 0) prev_p->current->state = TYPE_WAITING; } } +static const char *cat_backtrace(union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + struct addr_location al; + unsigned int i; + char *p = NULL; + size_t p_len; + u8 cpumode = PERF_RECORD_MISC_USER; + struct addr_location tal; + struct ip_callchain *chain = sample->callchain; + FILE *f = open_memstream(&p, &p_len); + + if (!f) { + perror("open_memstream error"); + return NULL; + } + + if (!chain) + goto exit; + + if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) { + fprintf(stderr, "problem processing %d event, skipping it.\n", + event->header.type); + goto exit; + } + + for (i = 0; i < chain->nr; i++) { + u64 ip; -static int process_sample_event(struct perf_tool *tool __maybe_unused, - union perf_event *event __maybe_unused, + if (callchain_param.order == ORDER_CALLEE) + ip = chain->ips[i]; + else + ip = chain->ips[chain->nr - i - 1]; + + if (ip >= PERF_CONTEXT_MAX) { + switch (ip) { + case PERF_CONTEXT_HV: + cpumode = PERF_RECORD_MISC_HYPERVISOR; + break; + case PERF_CONTEXT_KERNEL: + cpumode = PERF_RECORD_MISC_KERNEL; + break; + case PERF_CONTEXT_USER: + cpumode = PERF_RECORD_MISC_USER; + break; + default: + pr_debug("invalid callchain context: " + "%"PRId64"\n", (s64) ip); + + /* + * It seems the callchain is corrupted. + * Discard all. + */ + zfree(&p); + goto exit; + } + continue; + } + + tal.filtered = 0; + thread__find_addr_location(al.thread, machine, cpumode, + MAP__FUNCTION, ip, &tal); + + if (tal.sym) + fprintf(f, "..... %016" PRIx64 " %s\n", ip, + tal.sym->name); + else + fprintf(f, "..... %016" PRIx64 "\n", ip); + } + +exit: + fclose(f); + + return p; +} + +typedef int (*tracepoint_handler)(struct timechart *tchart, + struct perf_evsel *evsel, + struct perf_sample *sample, + const char *backtrace); + +static int process_sample_event(struct perf_tool *tool, + union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, - struct machine *machine __maybe_unused) + struct machine *machine) { - struct trace_entry *te; + struct timechart *tchart = container_of(tool, struct timechart, tool); if (evsel->attr.sample_type & PERF_SAMPLE_TIME) { - if (!first_time || first_time > sample->time) - first_time = sample->time; - if (last_time < sample->time) - last_time = sample->time; + if (!tchart->first_time || tchart->first_time > sample->time) + tchart->first_time = sample->time; + if (tchart->last_time < sample->time) + tchart->last_time = sample->time; } - te = (void *)sample->raw_data; - if ((evsel->attr.sample_type & PERF_SAMPLE_RAW) && sample->raw_size > 0) { - char *event_str; -#ifdef SUPPORT_OLD_POWER_EVENTS - struct power_entry_old *peo; - peo = (void *)te; -#endif - /* - * FIXME: use evsel, its already mapped from id to perf_evsel, - * remove perf_header__find_event infrastructure bits. - * Mapping all these "power:cpu_idle" strings to the tracepoint - * ID and then just comparing against evsel->attr.config. - * - * e.g.: - * - * if (evsel->attr.config == power_cpu_idle_id) - */ - event_str = perf_header__find_event(te->type); - - if (!event_str) - return 0; - - if (sample->cpu > numcpus) - numcpus = sample->cpu; - - if (strcmp(event_str, "power:cpu_idle") == 0) { - struct power_processor_entry *ppe = (void *)te; - if (ppe->state == (u32)PWR_EVENT_EXIT) - c_state_end(ppe->cpu_id, sample->time); - else - c_state_start(ppe->cpu_id, sample->time, - ppe->state); - } - else if (strcmp(event_str, "power:cpu_frequency") == 0) { - struct power_processor_entry *ppe = (void *)te; - p_state_change(ppe->cpu_id, sample->time, ppe->state); - } + if (evsel->handler != NULL) { + tracepoint_handler f = evsel->handler; + return f(tchart, evsel, sample, + cat_backtrace(event, sample, machine)); + } + + return 0; +} + +static int +process_sample_cpu_idle(struct timechart *tchart __maybe_unused, + struct perf_evsel *evsel, + struct perf_sample *sample, + const char *backtrace __maybe_unused) +{ + u32 state = perf_evsel__intval(evsel, sample, "state"); + u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id"); + + if (state == (u32)PWR_EVENT_EXIT) + c_state_end(tchart, cpu_id, sample->time); + else + c_state_start(cpu_id, sample->time, state); + return 0; +} + +static int +process_sample_cpu_frequency(struct timechart *tchart, + struct perf_evsel *evsel, + struct perf_sample *sample, + const char *backtrace __maybe_unused) +{ + u32 state = perf_evsel__intval(evsel, sample, "state"); + u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id"); + + p_state_change(tchart, cpu_id, sample->time, state); + return 0; +} - else if (strcmp(event_str, "sched:sched_wakeup") == 0) - sched_wakeup(sample->cpu, sample->time, sample->pid, te); +static int +process_sample_sched_wakeup(struct timechart *tchart, + struct perf_evsel *evsel, + struct perf_sample *sample, + const char *backtrace) +{ + u8 flags = perf_evsel__intval(evsel, sample, "common_flags"); + int waker = perf_evsel__intval(evsel, sample, "common_pid"); + int wakee = perf_evsel__intval(evsel, sample, "pid"); + + sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace); + return 0; +} - else if (strcmp(event_str, "sched:sched_switch") == 0) - sched_switch(sample->cpu, sample->time, te); +static int +process_sample_sched_switch(struct timechart *tchart, + struct perf_evsel *evsel, + struct perf_sample *sample, + const char *backtrace) +{ + int prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"); + int next_pid = perf_evsel__intval(evsel, sample, "next_pid"); + u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); + + sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid, + prev_state, backtrace); + return 0; +} #ifdef SUPPORT_OLD_POWER_EVENTS - if (use_old_power_events) { - if (strcmp(event_str, "power:power_start") == 0) - c_state_start(peo->cpu_id, sample->time, - peo->value); - - else if (strcmp(event_str, "power:power_end") == 0) - c_state_end(sample->cpu, sample->time); - - else if (strcmp(event_str, - "power:power_frequency") == 0) - p_state_change(peo->cpu_id, sample->time, - peo->value); - } -#endif - } +static int +process_sample_power_start(struct timechart *tchart __maybe_unused, + struct perf_evsel *evsel, + struct perf_sample *sample, + const char *backtrace __maybe_unused) +{ + u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id"); + u64 value = perf_evsel__intval(evsel, sample, "value"); + + c_state_start(cpu_id, sample->time, value); + return 0; +} + +static int +process_sample_power_end(struct timechart *tchart, + struct perf_evsel *evsel __maybe_unused, + struct perf_sample *sample, + const char *backtrace __maybe_unused) +{ + c_state_end(tchart, sample->cpu, sample->time); return 0; } +static int +process_sample_power_frequency(struct timechart *tchart, + struct perf_evsel *evsel, + struct perf_sample *sample, + const char *backtrace __maybe_unused) +{ + u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id"); + u64 value = perf_evsel__intval(evsel, sample, "value"); + + p_state_change(tchart, cpu_id, sample->time, value); + return 0; +} +#endif /* SUPPORT_OLD_POWER_EVENTS */ + /* * After the last sample we need to wrap up the current C/P state * and close out each CPU for these. */ -static void end_sample_processing(void) +static void end_sample_processing(struct timechart *tchart) { u64 cpu; struct power_event *pwr; - for (cpu = 0; cpu <= numcpus; cpu++) { + for (cpu = 0; cpu <= tchart->numcpus; cpu++) { /* C state */ #if 0 pwr = zalloc(sizeof(*pwr)); @@ -576,12 +654,12 @@ static void end_sample_processing(void) pwr->state = cpus_cstate_state[cpu]; pwr->start_time = cpus_cstate_start_times[cpu]; - pwr->end_time = last_time; + pwr->end_time = tchart->last_time; pwr->cpu = cpu; pwr->type = CSTATE; - pwr->next = power_events; + pwr->next = tchart->power_events; - power_events = pwr; + tchart->power_events = pwr; #endif /* P state */ @@ -591,32 +669,32 @@ static void end_sample_processing(void) pwr->state = cpus_pstate_state[cpu]; pwr->start_time = cpus_pstate_start_times[cpu]; - pwr->end_time = last_time; + pwr->end_time = tchart->last_time; pwr->cpu = cpu; pwr->type = PSTATE; - pwr->next = power_events; + pwr->next = tchart->power_events; if (!pwr->start_time) - pwr->start_time = first_time; + pwr->start_time = tchart->first_time; if (!pwr->state) - pwr->state = min_freq; - power_events = pwr; + pwr->state = tchart->min_freq; + tchart->power_events = pwr; } } /* * Sort the pid datastructure */ -static void sort_pids(void) +static void sort_pids(struct timechart *tchart) { struct per_pid *new_list, *p, *cursor, *prev; /* sort by ppid first, then by pid, lowest to highest */ new_list = NULL; - while (all_data) { - p = all_data; - all_data = p->next; + while (tchart->all_data) { + p = tchart->all_data; + tchart->all_data = p->next; p->next = NULL; if (new_list == NULL) { @@ -649,14 +727,14 @@ static void sort_pids(void) prev->next = p; } } - all_data = new_list; + tchart->all_data = new_list; } -static void draw_c_p_states(void) +static void draw_c_p_states(struct timechart *tchart) { struct power_event *pwr; - pwr = power_events; + pwr = tchart->power_events; /* * two pass drawing so that the P state bars are on top of the C state blocks @@ -667,30 +745,30 @@ static void draw_c_p_states(void) pwr = pwr->next; } - pwr = power_events; + pwr = tchart->power_events; while (pwr) { if (pwr->type == PSTATE) { if (!pwr->state) - pwr->state = min_freq; + pwr->state = tchart->min_freq; svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state); } pwr = pwr->next; } } -static void draw_wakeups(void) +static void draw_wakeups(struct timechart *tchart) { struct wake_event *we; struct per_pid *p; struct per_pidcomm *c; - we = wake_events; + we = tchart->wake_events; while (we) { int from = 0, to = 0; char *task_from = NULL, *task_to = NULL; /* locate the column of the waker and wakee */ - p = all_data; + p = tchart->all_data; while (p) { if (p->pid == we->waker || p->pid == we->wakee) { c = p->all; @@ -733,11 +811,12 @@ static void draw_wakeups(void) } if (we->waker == -1) - svg_interrupt(we->time, to); + svg_interrupt(we->time, to, we->backtrace); else if (from && to && abs(from - to) == 1) - svg_wakeline(we->time, from, to); + svg_wakeline(we->time, from, to, we->backtrace); else - svg_partial_wakeline(we->time, from, task_from, to, task_to); + svg_partial_wakeline(we->time, from, task_from, to, + task_to, we->backtrace); we = we->next; free(task_from); @@ -745,19 +824,25 @@ static void draw_wakeups(void) } } -static void draw_cpu_usage(void) +static void draw_cpu_usage(struct timechart *tchart) { struct per_pid *p; struct per_pidcomm *c; struct cpu_sample *sample; - p = all_data; + p = tchart->all_data; while (p) { c = p->all; while (c) { sample = c->samples; while (sample) { - if (sample->type == TYPE_RUNNING) - svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm); + if (sample->type == TYPE_RUNNING) { + svg_process(sample->cpu, + sample->start_time, + sample->end_time, + p->pid, + c->comm, + sample->backtrace); + } sample = sample->next; } @@ -767,16 +852,16 @@ static void draw_cpu_usage(void) } } -static void draw_process_bars(void) +static void draw_process_bars(struct timechart *tchart) { struct per_pid *p; struct per_pidcomm *c; struct cpu_sample *sample; int Y = 0; - Y = 2 * numcpus + 2; + Y = 2 * tchart->numcpus + 2; - p = all_data; + p = tchart->all_data; while (p) { c = p->all; while (c) { @@ -790,11 +875,20 @@ static void draw_process_bars(void) sample = c->samples; while (sample) { if (sample->type == TYPE_RUNNING) - svg_sample(Y, sample->cpu, sample->start_time, sample->end_time); + svg_running(Y, sample->cpu, + sample->start_time, + sample->end_time, + sample->backtrace); if (sample->type == TYPE_BLOCKED) - svg_box(Y, sample->start_time, sample->end_time, "blocked"); + svg_blocked(Y, sample->cpu, + sample->start_time, + sample->end_time, + sample->backtrace); if (sample->type == TYPE_WAITING) - svg_waiting(Y, sample->start_time, sample->end_time); + svg_waiting(Y, sample->cpu, + sample->start_time, + sample->end_time, + sample->backtrace); sample = sample->next; } @@ -847,21 +941,21 @@ static int passes_filter(struct per_pid *p, struct per_pidcomm *c) return 0; } -static int determine_display_tasks_filtered(void) +static int determine_display_tasks_filtered(struct timechart *tchart) { struct per_pid *p; struct per_pidcomm *c; int count = 0; - p = all_data; + p = tchart->all_data; while (p) { p->display = 0; if (p->start_time == 1) - p->start_time = first_time; + p->start_time = tchart->first_time; /* no exit marker, task kept running to the end */ if (p->end_time == 0) - p->end_time = last_time; + p->end_time = tchart->last_time; c = p->all; @@ -869,7 +963,7 @@ static int determine_display_tasks_filtered(void) c->display = 0; if (c->start_time == 1) - c->start_time = first_time; + c->start_time = tchart->first_time; if (passes_filter(p, c)) { c->display = 1; @@ -878,7 +972,7 @@ static int determine_display_tasks_filtered(void) } if (c->end_time == 0) - c->end_time = last_time; + c->end_time = tchart->last_time; c = c->next; } @@ -887,25 +981,25 @@ static int determine_display_tasks_filtered(void) return count; } -static int determine_display_tasks(u64 threshold) +static int determine_display_tasks(struct timechart *tchart, u64 threshold) { struct per_pid *p; struct per_pidcomm *c; int count = 0; if (process_filter) - return determine_display_tasks_filtered(); + return determine_display_tasks_filtered(tchart); - p = all_data; + p = tchart->all_data; while (p) { p->display = 0; if (p->start_time == 1) - p->start_time = first_time; + p->start_time = tchart->first_time; /* no exit marker, task kept running to the end */ if (p->end_time == 0) - p->end_time = last_time; - if (p->total_time >= threshold && !power_only) + p->end_time = tchart->last_time; + if (p->total_time >= threshold) p->display = 1; c = p->all; @@ -914,15 +1008,15 @@ static int determine_display_tasks(u64 threshold) c->display = 0; if (c->start_time == 1) - c->start_time = first_time; + c->start_time = tchart->first_time; - if (c->total_time >= threshold && !power_only) { + if (c->total_time >= threshold) { c->display = 1; count++; } if (c->end_time == 0) - c->end_time = last_time; + c->end_time = tchart->last_time; c = c->next; } @@ -935,118 +1029,219 @@ static int determine_display_tasks(u64 threshold) #define TIME_THRESH 10000000 -static void write_svg_file(const char *filename) +static void write_svg_file(struct timechart *tchart, const char *filename) { u64 i; int count; + int thresh = TIME_THRESH; - numcpus++; + if (tchart->power_only) + tchart->proc_num = 0; + /* We'd like to show at least proc_num tasks; + * be less picky if we have fewer */ + do { + count = determine_display_tasks(tchart, thresh); + thresh /= 10; + } while (!process_filter && thresh && count < tchart->proc_num); - count = determine_display_tasks(TIME_THRESH); + if (!tchart->proc_num) + count = 0; - /* We'd like to show at least 15 tasks; be less picky if we have fewer */ - if (count < 15) - count = determine_display_tasks(TIME_THRESH / 10); - - open_svg(filename, numcpus, count, first_time, last_time); + open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time); svg_time_grid(); svg_legenda(); - for (i = 0; i < numcpus; i++) - svg_cpu_box(i, max_freq, turbo_frequency); + for (i = 0; i < tchart->numcpus; i++) + svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency); - draw_cpu_usage(); - draw_process_bars(); - draw_c_p_states(); - draw_wakeups(); + draw_cpu_usage(tchart); + if (tchart->proc_num) + draw_process_bars(tchart); + if (!tchart->tasks_only) + draw_c_p_states(tchart); + if (tchart->proc_num) + draw_wakeups(tchart); svg_close(); } -static int __cmd_timechart(const char *input_name, const char *output_name) +static int process_header(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, + int feat, + int fd __maybe_unused, + void *data) { - struct perf_tool perf_timechart = { - .comm = process_comm_event, - .fork = process_fork_event, - .exit = process_exit_event, - .sample = process_sample_event, - .ordered_samples = true, + struct timechart *tchart = data; + + switch (feat) { + case HEADER_NRCPUS: + tchart->numcpus = ph->env.nr_cpus_avail; + break; + + case HEADER_CPU_TOPOLOGY: + if (!tchart->topology) + break; + + if (svg_build_topology_map(ph->env.sibling_cores, + ph->env.nr_sibling_cores, + ph->env.sibling_threads, + ph->env.nr_sibling_threads)) + fprintf(stderr, "problem building topology\n"); + break; + + default: + break; + } + + return 0; +} + +static int __cmd_timechart(struct timechart *tchart, const char *output_name) +{ + const struct perf_evsel_str_handler power_tracepoints[] = { + { "power:cpu_idle", process_sample_cpu_idle }, + { "power:cpu_frequency", process_sample_cpu_frequency }, + { "sched:sched_wakeup", process_sample_sched_wakeup }, + { "sched:sched_switch", process_sample_sched_switch }, +#ifdef SUPPORT_OLD_POWER_EVENTS + { "power:power_start", process_sample_power_start }, + { "power:power_end", process_sample_power_end }, + { "power:power_frequency", process_sample_power_frequency }, +#endif + }; + struct perf_data_file file = { + .path = input_name, + .mode = PERF_DATA_MODE_READ, }; - struct perf_session *session = perf_session__new(input_name, O_RDONLY, - 0, false, &perf_timechart); + + struct perf_session *session = perf_session__new(&file, false, + &tchart->tool); int ret = -EINVAL; if (session == NULL) return -ENOMEM; + (void)perf_header__process_sections(&session->header, + perf_data_file__fd(session->file), + tchart, + process_header); + if (!perf_session__has_traces(session, "timechart record")) goto out_delete; - ret = perf_session__process_events(session, &perf_timechart); + if (perf_session__set_tracepoints_handlers(session, + power_tracepoints)) { + pr_err("Initializing session tracepoint handlers failed\n"); + goto out_delete; + } + + ret = perf_session__process_events(session, &tchart->tool); if (ret) goto out_delete; - end_sample_processing(); + end_sample_processing(tchart); - sort_pids(); + sort_pids(tchart); - write_svg_file(output_name); + write_svg_file(tchart, output_name); pr_info("Written %2.1f seconds of trace to %s.\n", - (last_time - first_time) / 1000000000.0, output_name); + (tchart->last_time - tchart->first_time) / 1000000000.0, output_name); out_delete: perf_session__delete(session); return ret; } -static int __cmd_record(int argc, const char **argv) +static int timechart__record(struct timechart *tchart, int argc, const char **argv) { + unsigned int rec_argc, i, j; + const char **rec_argv; + const char **p; + unsigned int record_elems; + + const char * const common_args[] = { + "record", "-a", "-R", "-c", "1", + }; + unsigned int common_args_nr = ARRAY_SIZE(common_args); + + const char * const backtrace_args[] = { + "-g", + }; + unsigned int backtrace_args_no = ARRAY_SIZE(backtrace_args); + + const char * const power_args[] = { + "-e", "power:cpu_frequency", + "-e", "power:cpu_idle", + }; + unsigned int power_args_nr = ARRAY_SIZE(power_args); + + const char * const old_power_args[] = { #ifdef SUPPORT_OLD_POWER_EVENTS - const char * const record_old_args[] = { - "record", "-a", "-R", "-f", "-c", "1", "-e", "power:power_start", "-e", "power:power_end", "-e", "power:power_frequency", - "-e", "sched:sched_wakeup", - "-e", "sched:sched_switch", - }; #endif - const char * const record_new_args[] = { - "record", "-a", "-R", "-f", "-c", "1", - "-e", "power:cpu_frequency", - "-e", "power:cpu_idle", + }; + unsigned int old_power_args_nr = ARRAY_SIZE(old_power_args); + + const char * const tasks_args[] = { "-e", "sched:sched_wakeup", "-e", "sched:sched_switch", }; - unsigned int rec_argc, i, j; - const char **rec_argv; - const char * const *record_args = record_new_args; - unsigned int record_elems = ARRAY_SIZE(record_new_args); + unsigned int tasks_args_nr = ARRAY_SIZE(tasks_args); #ifdef SUPPORT_OLD_POWER_EVENTS if (!is_valid_tracepoint("power:cpu_idle") && is_valid_tracepoint("power:power_start")) { use_old_power_events = 1; - record_args = record_old_args; - record_elems = ARRAY_SIZE(record_old_args); + power_args_nr = 0; + } else { + old_power_args_nr = 0; } #endif - rec_argc = record_elems + argc - 1; + if (tchart->power_only) + tasks_args_nr = 0; + + if (tchart->tasks_only) { + power_args_nr = 0; + old_power_args_nr = 0; + } + + if (!tchart->with_backtrace) + backtrace_args_no = 0; + + record_elems = common_args_nr + tasks_args_nr + + power_args_nr + old_power_args_nr + backtrace_args_no; + + rec_argc = record_elems + argc; rec_argv = calloc(rec_argc + 1, sizeof(char *)); if (rec_argv == NULL) return -ENOMEM; - for (i = 0; i < record_elems; i++) - rec_argv[i] = strdup(record_args[i]); + p = rec_argv; + for (i = 0; i < common_args_nr; i++) + *p++ = strdup(common_args[i]); + + for (i = 0; i < backtrace_args_no; i++) + *p++ = strdup(backtrace_args[i]); + + for (i = 0; i < tasks_args_nr; i++) + *p++ = strdup(tasks_args[i]); - for (j = 1; j < (unsigned int)argc; j++, i++) - rec_argv[i] = argv[j]; + for (i = 0; i < power_args_nr; i++) + *p++ = strdup(power_args[i]); - return cmd_record(i, rec_argv, NULL); + for (i = 0; i < old_power_args_nr; i++) + *p++ = strdup(old_power_args[i]); + + for (j = 0; j < (unsigned int)argc; j++) + *p++ = argv[j]; + + return cmd_record(rec_argc, rec_argv, NULL); } static int @@ -1058,21 +1253,56 @@ parse_process(const struct option *opt __maybe_unused, const char *arg, return 0; } +static int +parse_highlight(const struct option *opt __maybe_unused, const char *arg, + int __maybe_unused unset) +{ + unsigned long duration = strtoul(arg, NULL, 0); + + if (svg_highlight || svg_highlight_name) + return -1; + + if (duration) + svg_highlight = duration; + else + svg_highlight_name = strdup(arg); + + return 0; +} + int cmd_timechart(int argc, const char **argv, const char *prefix __maybe_unused) { - const char *input_name; + struct timechart tchart = { + .tool = { + .comm = process_comm_event, + .fork = process_fork_event, + .exit = process_exit_event, + .sample = process_sample_event, + .ordered_samples = true, + }, + .proc_num = 15, + }; const char *output_name = "output.svg"; - const struct option options[] = { + const struct option timechart_options[] = { OPT_STRING('i', "input", &input_name, "file", "input file name"), OPT_STRING('o', "output", &output_name, "file", "output file name"), OPT_INTEGER('w', "width", &svg_page_width, "page width"), - OPT_BOOLEAN('P', "power-only", &power_only, "output power data only"), + OPT_CALLBACK(0, "highlight", NULL, "duration or task name", + "highlight tasks. Pass duration in ns or process name.", + parse_highlight), + OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"), + OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only, + "output processes data only"), OPT_CALLBACK('p', "process", NULL, "process", "process selector. Pass a pid or process name.", parse_process), OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", "Look for files with symbols relative to this directory"), + OPT_INTEGER('n', "proc-num", &tchart.proc_num, + "min. number of tasks to print"), + OPT_BOOLEAN('t', "topology", &tchart.topology, + "sort CPUs according to topology"), OPT_END() }; const char * const timechart_usage[] = { @@ -1080,17 +1310,41 @@ int cmd_timechart(int argc, const char **argv, NULL }; - argc = parse_options(argc, argv, options, timechart_usage, + const struct option record_options[] = { + OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"), + OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only, + "output processes data only"), + OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"), + OPT_END() + }; + const char * const record_usage[] = { + "perf timechart record [<options>]", + NULL + }; + argc = parse_options(argc, argv, timechart_options, timechart_usage, PARSE_OPT_STOP_AT_NON_OPTION); + if (tchart.power_only && tchart.tasks_only) { + pr_err("-P and -T options cannot be used at the same time.\n"); + return -1; + } + symbol__init(); - if (argc && !strncmp(argv[0], "rec", 3)) - return __cmd_record(argc, argv); - else if (argc) - usage_with_options(timechart_usage, options); + if (argc && !strncmp(argv[0], "rec", 3)) { + argc = parse_options(argc, argv, record_options, record_usage, + PARSE_OPT_STOP_AT_NON_OPTION); + + if (tchart.power_only && tchart.tasks_only) { + pr_err("-P and -T options cannot be used at the same time.\n"); + return -1; + } + + return timechart__record(&tchart, argc, argv); + } else if (argc) + usage_with_options(timechart_usage, timechart_options); setup_pager(); - return __cmd_timechart(input_name, output_name); + return __cmd_timechart(&tchart, output_name); } diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index ff6db808680..377971dc89a 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -26,6 +26,7 @@ #include "util/color.h" #include "util/evlist.h" #include "util/evsel.h" +#include "util/machine.h" #include "util/session.h" #include "util/symbol.h" #include "util/thread.h" @@ -39,6 +40,7 @@ #include "util/xyarray.h" #include "util/sort.h" #include "util/intlist.h" +#include "arch/common.h" #include "util/debug.h" @@ -67,32 +69,13 @@ #include <linux/unistd.h> #include <linux/types.h> -void get_term_dimensions(struct winsize *ws) -{ - char *s = getenv("LINES"); - - if (s != NULL) { - ws->ws_row = atoi(s); - s = getenv("COLUMNS"); - if (s != NULL) { - ws->ws_col = atoi(s); - if (ws->ws_row && ws->ws_col) - return; - } - } -#ifdef TIOCGWINSZ - if (ioctl(1, TIOCGWINSZ, ws) == 0 && - ws->ws_row && ws->ws_col) - return; -#endif - ws->ws_row = 25; - ws->ws_col = 80; -} +static volatile int done; + +#define HEADER_LINE_NR 5 static void perf_top__update_print_entries(struct perf_top *top) { - if (top->print_entries > 9) - top->print_entries -= 9; + top->print_entries = top->winsize.ws_row - HEADER_LINE_NR; } static void perf_top__sig_winch(int sig __maybe_unused, @@ -101,13 +84,6 @@ static void perf_top__sig_winch(int sig __maybe_unused, struct perf_top *top = arg; get_term_dimensions(&top->winsize); - if (!top->print_entries - || (top->print_entries+4) > top->winsize.ws_row) { - top->print_entries = top->winsize.ws_row; - } else { - top->print_entries += 4; - top->winsize.ws_row = top->print_entries; - } perf_top__update_print_entries(top); } @@ -127,7 +103,8 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he) /* * We can't annotate with just /proc/kallsyms */ - if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS) { + if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && + !dso__is_kcore(map->dso)) { pr_err("Can't annotate %s: No vmlinux file was found in the " "path\n", sym->name); sleep(1); @@ -199,7 +176,7 @@ static void perf_top__record_precise_ip(struct perf_top *top, { struct annotation *notes; struct symbol *sym; - int err; + int err = 0; if (he == NULL || he->ms.sym == NULL || ((top->sym_filter_entry == NULL || @@ -212,21 +189,28 @@ static void perf_top__record_precise_ip(struct perf_top *top, if (pthread_mutex_trylock(¬es->lock)) return; - if (notes->src == NULL && symbol__alloc_hist(sym) < 0) { - pthread_mutex_unlock(¬es->lock); - pr_err("Not enough memory for annotating '%s' symbol!\n", - sym->name); - sleep(1); - return; - } - ip = he->ms.map->map_ip(he->ms.map, ip); - err = symbol__inc_addr_samples(sym, he->ms.map, counter, ip); + + if (ui__has_annotation()) + err = hist_entry__inc_addr_samples(he, counter, ip); pthread_mutex_unlock(¬es->lock); + /* + * This function is now called with he->hists->lock held. + * Release it before going to sleep. + */ + pthread_mutex_unlock(&he->hists->lock); + if (err == -ERANGE && !he->ms.map->erange_warned) ui__warn_map_erange(he->ms.map, sym, ip); + else if (err == -ENOMEM) { + pr_err("Not enough memory for annotating '%s' symbol!\n", + sym->name); + sleep(1); + } + + pthread_mutex_lock(&he->hists->lock); } static void perf_top__show_details(struct perf_top *top) @@ -250,7 +234,7 @@ static void perf_top__show_details(struct perf_top *top) printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name); printf(" Events Pcnt (>=%d%%)\n", top->sym_pcnt_filter); - more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel->idx, + more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel, 0, top->sym_pcnt_filter, top->print_entries, 4); if (top->zero) symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx); @@ -262,22 +246,6 @@ out_unlock: pthread_mutex_unlock(¬es->lock); } -static const char CONSOLE_CLEAR[] = "[H[2J"; - -static struct hist_entry *perf_evsel__add_hist_entry(struct perf_evsel *evsel, - struct addr_location *al, - struct perf_sample *sample) -{ - struct hist_entry *he; - - he = __hists__add_entry(&evsel->hists, al, NULL, sample->period); - if (he == NULL) - return NULL; - - hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); - return he; -} - static void perf_top__print_sym_table(struct perf_top *top) { char bf[160]; @@ -308,16 +276,17 @@ static void perf_top__print_sym_table(struct perf_top *top) return; } - hists__collapse_resort_threaded(&top->sym_evsel->hists); - hists__output_resort_threaded(&top->sym_evsel->hists); - hists__decay_entries_threaded(&top->sym_evsel->hists, - top->hide_user_symbols, - top->hide_kernel_symbols); + hists__collapse_resort(&top->sym_evsel->hists, NULL); + hists__output_resort(&top->sym_evsel->hists); + hists__decay_entries(&top->sym_evsel->hists, + top->hide_user_symbols, + top->hide_kernel_symbols); hists__output_recalc_col_len(&top->sym_evsel->hists, - top->winsize.ws_row - 3); + top->print_entries - printed); putchar('\n'); hists__fprintf(&top->sym_evsel->hists, false, - top->winsize.ws_row - 4 - printed, win_width, stdout); + top->print_entries - printed, win_width, + top->min_percent, stdout); } static void prompt_integer(int *target, const char *msg) @@ -452,8 +421,10 @@ static int perf_top__key_mapped(struct perf_top *top, int c) return 0; } -static void perf_top__handle_keypress(struct perf_top *top, int c) +static bool perf_top__handle_keypress(struct perf_top *top, int c) { + bool ret = true; + if (!perf_top__key_mapped(top, c)) { struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; struct termios tc, save; @@ -474,7 +445,7 @@ static void perf_top__handle_keypress(struct perf_top *top, int c) tcsetattr(0, TCSAFLUSH, &save); if (!perf_top__key_mapped(top, c)) - return; + return ret; } switch (c) { @@ -493,7 +464,6 @@ static void perf_top__handle_keypress(struct perf_top *top, int c) perf_top__sig_winch(SIGWINCH, NULL, top); sigaction(SIGWINCH, &act, NULL); } else { - perf_top__sig_winch(SIGWINCH, NULL, top); signal(SIGWINCH, SIG_DFL); } break; @@ -504,7 +474,7 @@ static void perf_top__handle_keypress(struct perf_top *top, int c) fprintf(stderr, "\nAvailable events:"); - list_for_each_entry(top->sym_evsel, &top->evlist->entries, node) + evlist__for_each(top->evlist, top->sym_evsel) fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, perf_evsel__name(top->sym_evsel)); prompt_integer(&counter, "Enter details event counter"); @@ -515,7 +485,7 @@ static void perf_top__handle_keypress(struct perf_top *top, int c) sleep(1); break; } - list_for_each_entry(top->sym_evsel, &top->evlist->entries, node) + evlist__for_each(top->evlist, top->sym_evsel) if (top->sym_evsel->idx == counter) break; } else @@ -536,7 +506,8 @@ static void perf_top__handle_keypress(struct perf_top *top, int c) printf("exiting.\n"); if (top->dump_symtab) perf_session__fprintf_dsos(top->session, stderr); - exit(0); + ret = false; + break; case 's': perf_top__prompt_symbol(top, "Enter details symbol"); break; @@ -559,6 +530,8 @@ static void perf_top__handle_keypress(struct perf_top *top, int c) default: break; } + + return ret; } static void perf_top__sort_new_samples(void *arg) @@ -569,11 +542,11 @@ static void perf_top__sort_new_samples(void *arg) if (t->evlist->selected != NULL) t->sym_evsel = t->evlist->selected; - hists__collapse_resort_threaded(&t->sym_evsel->hists); - hists__output_resort_threaded(&t->sym_evsel->hists); - hists__decay_entries_threaded(&t->sym_evsel->hists, - t->hide_user_symbols, - t->hide_kernel_symbols); + hists__collapse_resort(&t->sym_evsel->hists, NULL); + hists__output_resort(&t->sym_evsel->hists); + hists__decay_entries(&t->sym_evsel->hists, + t->hide_user_symbols, + t->hide_kernel_symbols); } static void *display_thread_tui(void *arg) @@ -581,6 +554,11 @@ static void *display_thread_tui(void *arg) struct perf_evsel *pos; struct perf_top *top = arg; const char *help = "For a higher level overview, try: perf top --sort comm,dso"; + struct hist_browser_timer hbt = { + .timer = perf_top__sort_new_samples, + .arg = top, + .refresh = top->delay_secs, + }; perf_top__sort_new_samples(top); @@ -589,15 +567,13 @@ static void *display_thread_tui(void *arg) * Zooming in/out UIDs. For now juse use whatever the user passed * via --uid. */ - list_for_each_entry(pos, &top->evlist->entries, node) - pos->hists.uid_filter_str = top->target.uid_str; + evlist__for_each(top->evlist, pos) + pos->hists.uid_filter_str = top->record_opts.target.uid_str; - perf_evlist__tui_browse_hists(top->evlist, help, - perf_top__sort_new_samples, - top, top->delay_secs); + perf_evlist__tui_browse_hists(top->evlist, help, &hbt, top->min_percent, + &top->session->header.env); - exit_browser(0); - exit(0); + done = 1; return NULL; } @@ -621,7 +597,7 @@ repeat: /* trash return*/ getc(stdin); - while (1) { + while (!done) { perf_top__print_sym_table(top); /* * Either timeout expired or we got an EINTR due to SIGWINCH, @@ -635,39 +611,21 @@ repeat: continue; /* Fall trhu */ default: - goto process_hotkey; + c = getc(stdin); + tcsetattr(0, TCSAFLUSH, &save); + + if (perf_top__handle_keypress(top, c)) + goto repeat; + done = 1; } } -process_hotkey: - c = getc(stdin); - tcsetattr(0, TCSAFLUSH, &save); - - perf_top__handle_keypress(top, c); - goto repeat; return NULL; } -/* Tag samples to be skipped. */ -static const char *skip_symbols[] = { - "intel_idle", - "default_idle", - "native_safe_halt", - "cpu_idle", - "enter_idle", - "exit_idle", - "mwait_idle", - "mwait_idle_with_hints", - "poll_idle", - "ppc64_runlatch_off", - "pseries_dedicated_idle_sleep", - NULL -}; - static int symbol_filter(struct map *map __maybe_unused, struct symbol *sym) { const char *name = sym->name; - int i; /* * ppc64 uses function descriptors and appends a '.' to the @@ -685,11 +643,27 @@ static int symbol_filter(struct map *map __maybe_unused, struct symbol *sym) strstr(name, "_text_end")) return 1; - for (i = 0; skip_symbols[i]; i++) { - if (!strcmp(skip_symbols[i], name)) { - sym->ignore = true; - break; - } + if (symbol__is_idle(sym)) + sym->ignore = true; + + return 0; +} + +static int hist_iter__top_callback(struct hist_entry_iter *iter, + struct addr_location *al, bool single, + void *arg) +{ + struct perf_top *top = arg; + struct hist_entry *he = iter->he; + struct perf_evsel *evsel = iter->evsel; + + if (sort__has_sym && single) { + u64 ip = al->addr; + + if (al->map) + ip = al->map->unmap_ip(al->map, ip); + + perf_top__record_precise_ip(top, he, evsel->idx, ip); } return 0; @@ -702,8 +676,6 @@ static void perf_event__process_sample(struct perf_tool *tool, struct machine *machine) { struct perf_top *top = container_of(tool, struct perf_top, tool); - struct symbol *parent = NULL; - u64 ip = event->ip.ip; struct addr_location al; int err; @@ -711,28 +683,26 @@ static void perf_event__process_sample(struct perf_tool *tool, static struct intlist *seen; if (!seen) - seen = intlist__new(); + seen = intlist__new(NULL); - if (!intlist__has_entry(seen, event->ip.pid)) { + if (!intlist__has_entry(seen, sample->pid)) { pr_err("Can't find guest [%d]'s kernel information\n", - event->ip.pid); - intlist__add(seen, event->ip.pid); + sample->pid); + intlist__add(seen, sample->pid); } return; } if (!machine) { - pr_err("%u unprocessable samples recorded.", - top->session->hists.stats.nr_unprocessable_samples++); + pr_err("%u unprocessable samples recorded.\r", + top->session->stats.nr_unprocessable_samples++); return; } if (event->header.misc & PERF_RECORD_MISC_EXACT_IP) top->exact_samples++; - if (perf_event__preprocess_sample(event, machine, &al, sample, - symbol_filter) < 0 || - al.filtered) + if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) return; if (!top->kptr_restrict_warned && @@ -780,33 +750,23 @@ static void perf_event__process_sample(struct perf_tool *tool, } if (al.sym == NULL || !al.sym->ignore) { - struct hist_entry *he; + struct hist_entry_iter iter = { + .add_entry_cb = hist_iter__top_callback, + }; - if ((sort__has_parent || symbol_conf.use_callchain) && - sample->callchain) { - err = machine__resolve_callchain(machine, evsel, - al.thread, sample, - &parent); + if (symbol_conf.cumulate_callchain) + iter.ops = &hist_iter_cumulative; + else + iter.ops = &hist_iter_normal; - if (err) - return; - } + pthread_mutex_lock(&evsel->hists.lock); - he = perf_evsel__add_hist_entry(evsel, &al, sample); - if (he == NULL) { + err = hist_entry_iter__add(&iter, &al, evsel, sample, + top->max_stack, top); + if (err < 0) pr_err("Problem incrementing symbol period, skipping event\n"); - return; - } - - if (symbol_conf.use_callchain) { - err = callchain_append(he->callchain, &callchain_cursor, - sample->period); - if (err) - return; - } - if (top->sort_has_symbols) - perf_top__record_precise_ip(top, he, evsel->idx, ip); + pthread_mutex_unlock(&evsel->hists.lock); } return; @@ -826,7 +786,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx) ret = perf_evlist__parse_sample(top->evlist, event, &sample); if (ret) { pr_err("Can't parse sample, err = %d\n", ret); - continue; + goto next_event; } evsel = perf_evlist__id2evsel(session->evlist, sample.id); @@ -841,18 +801,19 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx) case PERF_RECORD_MISC_USER: ++top->us_samples; if (top->hide_user_symbols) - continue; - machine = perf_session__find_host_machine(session); + goto next_event; + machine = &session->machines.host; break; case PERF_RECORD_MISC_KERNEL: ++top->kernel_samples; if (top->hide_kernel_symbols) - continue; - machine = perf_session__find_host_machine(session); + goto next_event; + machine = &session->machines.host; break; case PERF_RECORD_MISC_GUEST_KERNEL: ++top->guest_kernel_samples; - machine = perf_session__find_machine(session, event->ip.pid); + machine = perf_session__find_machine(session, + sample.pid); break; case PERF_RECORD_MISC_GUEST_USER: ++top->guest_us_samples; @@ -862,7 +823,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx) */ /* Fall thru */ default: - continue; + goto next_event; } @@ -871,9 +832,11 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx) &sample, machine); } else if (event->header.type < PERF_RECORD_MAX) { hists__inc_nr_events(&evsel->hists, event->header.type); - perf_event__process(&top->tool, event, &sample, machine); + machine__process_event(machine, event, &sample); } else - ++session->hists.stats.nr_unknown_events; + ++session->stats.nr_unknown_events; +next_event: + perf_evlist__mmap_consume(top->evlist, idx); } } @@ -885,129 +848,52 @@ static void perf_top__mmap_read(struct perf_top *top) perf_top__mmap_read_idx(top, i); } -static void perf_top__start_counters(struct perf_top *top) +static int perf_top__start_counters(struct perf_top *top) { + char msg[512]; struct perf_evsel *counter; struct perf_evlist *evlist = top->evlist; + struct record_opts *opts = &top->record_opts; - if (top->group) - perf_evlist__set_leader(evlist); - - list_for_each_entry(counter, &evlist->entries, node) { - struct perf_event_attr *attr = &counter->attr; - - attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; - - if (top->freq) { - attr->sample_type |= PERF_SAMPLE_PERIOD; - attr->freq = 1; - attr->sample_freq = top->freq; - } - - if (evlist->nr_entries > 1) { - attr->sample_type |= PERF_SAMPLE_ID; - attr->read_format |= PERF_FORMAT_ID; - } - - if (perf_target__has_cpu(&top->target)) - attr->sample_type |= PERF_SAMPLE_CPU; + perf_evlist__config(evlist, opts); - if (symbol_conf.use_callchain) - attr->sample_type |= PERF_SAMPLE_CALLCHAIN; - - attr->mmap = 1; - attr->comm = 1; - attr->inherit = top->inherit; -fallback_missing_features: - if (top->exclude_guest_missing) - attr->exclude_guest = attr->exclude_host = 0; -retry_sample_id: - attr->sample_id_all = top->sample_id_all_missing ? 0 : 1; + evlist__for_each(evlist, counter) { try_again: if (perf_evsel__open(counter, top->evlist->cpus, top->evlist->threads) < 0) { - int err = errno; - - if (err == EPERM || err == EACCES) { - ui__error_paranoid(); - goto out_err; - } else if (err == EINVAL) { - if (!top->exclude_guest_missing && - (attr->exclude_guest || attr->exclude_host)) { - pr_debug("Old kernel, cannot exclude " - "guest or host samples.\n"); - top->exclude_guest_missing = true; - goto fallback_missing_features; - } else if (!top->sample_id_all_missing) { - /* - * Old kernel, no attr->sample_id_type_all field - */ - top->sample_id_all_missing = true; - goto retry_sample_id; - } - } - /* - * If it's cycles then fall back to hrtimer - * based cpu-clock-tick sw counter, which - * is always available even if no PMU support: - */ - if ((err == ENOENT || err == ENXIO) && - (attr->type == PERF_TYPE_HARDWARE) && - (attr->config == PERF_COUNT_HW_CPU_CYCLES)) { - + if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) { if (verbose) - ui__warning("Cycles event not supported,\n" - "trying to fall back to cpu-clock-ticks\n"); - - attr->type = PERF_TYPE_SOFTWARE; - attr->config = PERF_COUNT_SW_CPU_CLOCK; - if (counter->name) { - free(counter->name); - counter->name = NULL; - } + ui__warning("%s\n", msg); goto try_again; } - if (err == ENOENT) { - ui__error("The %s event is not supported.\n", - perf_evsel__name(counter)); - goto out_err; - } else if (err == EMFILE) { - ui__error("Too many events are opened.\n" - "Try again after reducing the number of events\n"); - goto out_err; - } - - ui__error("The sys_perf_event_open() syscall " - "returned with %d (%s). /bin/dmesg " - "may provide additional information.\n" - "No CONFIG_PERF_EVENTS=y kernel support " - "configured?\n", err, strerror(err)); + perf_evsel__open_strerror(counter, &opts->target, + errno, msg, sizeof(msg)); + ui__error("%s\n", msg); goto out_err; } } - if (perf_evlist__mmap(evlist, top->mmap_pages, false) < 0) { + if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) { ui__error("Failed to mmap with %d (%s)\n", errno, strerror(errno)); goto out_err; } - return; + return 0; out_err: - exit_browser(0); - exit(0); + return -1; } -static int perf_top__setup_sample_type(struct perf_top *top) +static int perf_top__setup_sample_type(struct perf_top *top __maybe_unused) { - if (!top->sort_has_symbols) { + if (!sort__has_sym) { if (symbol_conf.use_callchain) { ui__error("Selected -g but \"sym\" not present in --sort/-s."); return -EINVAL; } - } else if (!top->dont_use_callchains && callchain_param.mode != CHAIN_NONE) { + } else if (callchain_param.mode != CHAIN_NONE) { if (callchain_register_param(&callchain_param) < 0) { ui__error("Can't register callchain params.\n"); return -EINVAL; @@ -1019,40 +905,56 @@ static int perf_top__setup_sample_type(struct perf_top *top) static int __cmd_top(struct perf_top *top) { + struct record_opts *opts = &top->record_opts; pthread_t thread; int ret; - /* - * FIXME: perf_session__new should allow passing a O_MMAP, so that all this - * mmap reading, etc is encapsulated in it. Use O_WRONLY for now. - */ - top->session = perf_session__new(NULL, O_WRONLY, false, false, NULL); + + top->session = perf_session__new(NULL, false, NULL); if (top->session == NULL) return -ENOMEM; + machines__set_symbol_filter(&top->session->machines, symbol_filter); + + if (!objdump_path) { + ret = perf_session_env__lookup_objdump(&top->session->header.env); + if (ret) + goto out_delete; + } + ret = perf_top__setup_sample_type(top); if (ret) goto out_delete; - if (perf_target__has_task(&top->target)) - perf_event__synthesize_thread_map(&top->tool, top->evlist->threads, - perf_event__process, - &top->session->host_machine); - else - perf_event__synthesize_threads(&top->tool, perf_event__process, - &top->session->host_machine); - perf_top__start_counters(top); + machine__synthesize_threads(&top->session->machines.host, &opts->target, + top->evlist->threads, false); + ret = perf_top__start_counters(top); + if (ret) + goto out_delete; + top->session->evlist = top->evlist; perf_session__set_id_hdr_size(top->session); + /* + * When perf is starting the traced process, all the events (apart from + * group members) have enable_on_exec=1 set, so don't spoil it by + * prematurely enabling them. + * + * XXX 'top' still doesn't start workloads like record, trace, but should, + * so leave the check here. + */ + if (!target__none(&opts->target)) + perf_evlist__enable(top->evlist); + /* Wait for a minimal set of events before starting the snapshot */ poll(top->evlist->pollfd, top->evlist->nr_fds, 100); perf_top__mmap_read(top); + ret = -1; if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui : display_thread), top)) { ui__error("Could not create display thread.\n"); - exit(-1); + goto out_delete; } if (top->realtime_prio) { @@ -1061,11 +963,11 @@ static int __cmd_top(struct perf_top *top) param.sched_priority = top->realtime_prio; if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { ui__error("Could not set realtime priority.\n"); - exit(-1); + goto out_delete; } } - while (1) { + while (!done) { u64 hits = top->samples; perf_top__mmap_read(top); @@ -1074,126 +976,95 @@ static int __cmd_top(struct perf_top *top) ret = poll(top->evlist->pollfd, top->evlist->nr_fds, 100); } + ret = 0; out_delete: perf_session__delete(top->session); top->session = NULL; - return 0; + return ret; } static int -parse_callchain_opt(const struct option *opt, const char *arg, int unset) +callchain_opt(const struct option *opt, const char *arg, int unset) { - struct perf_top *top = (struct perf_top *)opt->value; - char *tok, *tok2; - char *endptr; - - /* - * --no-call-graph - */ - if (unset) { - top->dont_use_callchains = true; - return 0; - } - symbol_conf.use_callchain = true; + return record_callchain_opt(opt, arg, unset); +} - if (!arg) - return 0; - - tok = strtok((char *)arg, ","); - if (!tok) - return -1; - - /* get the output mode */ - if (!strncmp(tok, "graph", strlen(arg))) - callchain_param.mode = CHAIN_GRAPH_ABS; - - else if (!strncmp(tok, "flat", strlen(arg))) - callchain_param.mode = CHAIN_FLAT; - - else if (!strncmp(tok, "fractal", strlen(arg))) - callchain_param.mode = CHAIN_GRAPH_REL; +static int +parse_callchain_opt(const struct option *opt, const char *arg, int unset) +{ + symbol_conf.use_callchain = true; + return record_parse_callchain_opt(opt, arg, unset); +} - else if (!strncmp(tok, "none", strlen(arg))) { - callchain_param.mode = CHAIN_NONE; - symbol_conf.use_callchain = false; +static int perf_top_config(const char *var, const char *value, void *cb) +{ + struct perf_top *top = cb; + if (!strcmp(var, "top.call-graph")) + return record_parse_callchain(value, &top->record_opts); + if (!strcmp(var, "top.children")) { + symbol_conf.cumulate_callchain = perf_config_bool(var, value); return 0; - } else - return -1; - - /* get the min percentage */ - tok = strtok(NULL, ","); - if (!tok) - goto setup; - - callchain_param.min_percent = strtod(tok, &endptr); - if (tok == endptr) - return -1; + } - /* get the print limit */ - tok2 = strtok(NULL, ","); - if (!tok2) - goto setup; + return perf_default_config(var, value, cb); +} - if (tok2[0] != 'c') { - callchain_param.print_limit = strtod(tok2, &endptr); - tok2 = strtok(NULL, ","); - if (!tok2) - goto setup; - } +static int +parse_percent_limit(const struct option *opt, const char *arg, + int unset __maybe_unused) +{ + struct perf_top *top = opt->value; - /* get the call chain order */ - if (!strcmp(tok2, "caller")) - callchain_param.order = ORDER_CALLER; - else if (!strcmp(tok2, "callee")) - callchain_param.order = ORDER_CALLEE; - else - return -1; -setup: - if (callchain_register_param(&callchain_param) < 0) { - fprintf(stderr, "Can't register callchain params\n"); - return -1; - } + top->min_percent = strtof(arg, NULL); return 0; } int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) { - struct perf_evsel *pos; - int status; + int status = -1; char errbuf[BUFSIZ]; struct perf_top top = { .count_filter = 5, .delay_secs = 2, - .freq = 4000, /* 4 KHz */ - .mmap_pages = 128, - .sym_pcnt_filter = 5, - .target = { - .uses_mmap = true, + .record_opts = { + .mmap_pages = UINT_MAX, + .user_freq = UINT_MAX, + .user_interval = ULLONG_MAX, + .freq = 4000, /* 4 KHz */ + .target = { + .uses_mmap = true, + }, }, + .max_stack = PERF_MAX_STACK_DEPTH, + .sym_pcnt_filter = 5, }; - char callchain_default_opt[] = "fractal,0.5,callee"; + struct record_opts *opts = &top.record_opts; + struct target *target = &opts->target; const struct option options[] = { OPT_CALLBACK('e', "event", &top.evlist, "event", "event selector. use 'perf list' to list available events", parse_events_option), - OPT_INTEGER('c', "count", &top.default_interval, - "event period to sample"), - OPT_STRING('p', "pid", &top.target.pid, "pid", + OPT_U64('c', "count", &opts->user_interval, "event period to sample"), + OPT_STRING('p', "pid", &target->pid, "pid", "profile events on existing process id"), - OPT_STRING('t', "tid", &top.target.tid, "tid", + OPT_STRING('t', "tid", &target->tid, "tid", "profile events on existing thread id"), - OPT_BOOLEAN('a', "all-cpus", &top.target.system_wide, + OPT_BOOLEAN('a', "all-cpus", &target->system_wide, "system-wide collection from all CPUs"), - OPT_STRING('C', "cpu", &top.target.cpu_list, "cpu", + OPT_STRING('C', "cpu", &target->cpu_list, "cpu", "list of cpus to monitor"), OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, "file", "vmlinux pathname"), + OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux, + "don't load vmlinux even if found"), OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols, "hide kernel symbols"), - OPT_UINTEGER('m', "mmap-pages", &top.mmap_pages, "number of mmap data pages"), + OPT_CALLBACK('m', "mmap-pages", &opts->mmap_pages, "pages", + "number of mmap data pages", + perf_evlist__parse_mmap_pages), OPT_INTEGER('r', "realtime", &top.realtime_prio, "collect data with this RT SCHED_FIFO priority"), OPT_INTEGER('d', "delay", &top.delay_secs, @@ -1202,16 +1073,14 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) "dump the symbol table used for profiling"), OPT_INTEGER('f', "count-filter", &top.count_filter, "only display functions with more events than this"), - OPT_BOOLEAN('g', "group", &top.group, + OPT_BOOLEAN(0, "group", &opts->group, "put the counters into a counter group"), - OPT_BOOLEAN('i', "inherit", &top.inherit, - "child tasks inherit counters"), + OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit, + "child tasks do not inherit counters"), OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name", "symbol to annotate"), - OPT_BOOLEAN('z', "zero", &top.zero, - "zero history across updates"), - OPT_INTEGER('F', "freq", &top.freq, - "profile at this frequency"), + OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"), + OPT_UINTEGER('F', "freq", &opts->user_freq, "profile at this frequency"), OPT_INTEGER('E', "entries", &top.print_entries, "display this many functions"), OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols, @@ -1221,13 +1090,26 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) OPT_INCR('v', "verbose", &verbose, "be more verbose (show counter open errors, etc)"), OPT_STRING('s', "sort", &sort_order, "key[,key2...]", - "sort by key(s): pid, comm, dso, symbol, parent"), + "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..." + " Please refer the man page for the complete list."), + OPT_STRING(0, "fields", &field_order, "key[,keys...]", + "output field(s): overhead, period, sample plus all of sort keys"), OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, "Show a column with the number of samples"), - OPT_CALLBACK_DEFAULT('G', "call-graph", &top, "output_type,min_percent, call_order", - "Display callchains using output_type (graph, flat, fractal, or none), min percent threshold and callchain order. " - "Default: fractal,0.5,callee", &parse_callchain_opt, - callchain_default_opt), + OPT_CALLBACK_NOOPT('g', NULL, &top.record_opts, + NULL, "enables call-graph recording", + &callchain_opt), + OPT_CALLBACK(0, "call-graph", &top.record_opts, + "mode[,dump_size]", record_callchain_help, + &parse_callchain_opt), + OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain, + "Accumulate callchains of children and show total overhead as well"), + OPT_INTEGER(0, "max-stack", &top.max_stack, + "Set the maximum stack depth when parsing the callchain. " + "Default: " __stringify(PERF_MAX_STACK_DEPTH)), + OPT_CALLBACK(0, "ignore-callees", NULL, "regex", + "ignore callees of these functions in call graphs", + report_parse_ignore_callees_opt), OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period, "Show a column with the sum of periods"), OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", @@ -1240,9 +1122,15 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) "Interleave source code with assembly code (default)"), OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw, "Display raw encoding of assembly instructions (default)"), + OPT_STRING(0, "objdump", &objdump_path, "path", + "objdump binary to use for disassembly and annotations"), OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", "Specify disassembler style (e.g. -M intel for intel syntax)"), - OPT_STRING('u', "uid", &top.target.uid_str, "user", "user to profile"), + OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"), + OPT_CALLBACK(0, "percent-limit", &top, "percent", + "Don't show entries under that percent", parse_percent_limit), + OPT_CALLBACK(0, "percentage", NULL, "relative|absolute", + "How to display percentage of filtered entries", parse_filter_percentage), OPT_END() }; const char * const top_usage[] = { @@ -1250,20 +1138,28 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) NULL }; - top.evlist = perf_evlist__new(NULL, NULL); + top.evlist = perf_evlist__new(); if (top.evlist == NULL) return -ENOMEM; - symbol_conf.exclude_other = false; + perf_config(perf_top_config, &top); argc = parse_options(argc, argv, options, top_usage, 0); if (argc) usage_with_options(top_usage, options); - if (sort_order == default_sort_order) - sort_order = "dso,symbol"; + sort__mode = SORT_MODE__TOP; + /* display thread wants entries to be collapsed in a different tree */ + sort__need_collapse = 1; - setup_sorting(top_usage, options); + if (setup_sorting() < 0) { + if (sort_order) + parse_options_usage(top_usage, options, "s", 1); + if (field_order) + parse_options_usage(sort_order ? NULL : top_usage, + options, "fields", 0); + goto out_delete_evlist; + } if (top.use_stdio) use_browser = 0; @@ -1272,33 +1168,33 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) setup_browser(false); - status = perf_target__validate(&top.target); + status = target__validate(target); if (status) { - perf_target__strerror(&top.target, status, errbuf, BUFSIZ); - ui__warning("%s", errbuf); + target__strerror(target, status, errbuf, BUFSIZ); + ui__warning("%s\n", errbuf); } - status = perf_target__parse_uid(&top.target); + status = target__parse_uid(target); if (status) { int saved_errno = errno; - perf_target__strerror(&top.target, status, errbuf, BUFSIZ); - ui__error("%s", errbuf); + target__strerror(target, status, errbuf, BUFSIZ); + ui__error("%s\n", errbuf); status = -saved_errno; goto out_delete_evlist; } - if (perf_target__none(&top.target)) - top.target.system_wide = true; + if (target__none(target)) + target->system_wide = true; - if (perf_evlist__create_maps(top.evlist, &top.target) < 0) + if (perf_evlist__create_maps(top.evlist, target) < 0) usage_with_options(top_usage, options); if (!top.evlist->nr_entries && perf_evlist__add_default(top.evlist) < 0) { ui__error("Not enough memory for event selector list\n"); - return -ENOMEM; + goto out_delete_evlist; } symbol_conf.nr_events = top.evlist->nr_entries; @@ -1306,43 +1202,25 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) if (top.delay_secs < 1) top.delay_secs = 1; - /* - * User specified count overrides default frequency. - */ - if (top.default_interval) - top.freq = 0; - else if (top.freq) { - top.default_interval = top.freq; - } else { - ui__error("frequency and count are zero, aborting\n"); - exit(EXIT_FAILURE); - } - - list_for_each_entry(pos, &top.evlist->entries, node) { - /* - * Fill in the ones not specifically initialized via -c: - */ - if (!pos->attr.sample_period) - pos->attr.sample_period = top.default_interval; + if (record_opts__config(opts)) { + status = -EINVAL; + goto out_delete_evlist; } top.sym_evsel = perf_evlist__first(top.evlist); + if (!symbol_conf.use_callchain) { + symbol_conf.cumulate_callchain = false; + perf_hpp__cancel_cumulate(); + } + symbol_conf.priv_size = sizeof(struct annotation); symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL); if (symbol__init() < 0) return -1; - sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", stdout); - sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout); - sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout); - - /* - * Avoid annotation data structures overhead when symbols aren't on the - * sort list. - */ - top.sort_has_symbols = sort_sym.list.next != NULL; + sort__setup_elide(stdout); get_term_dimensions(&top.winsize); if (top.print_entries == 0) { diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 7aaee39f677..f954c26de23 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -1,28 +1,1095 @@ +#include <traceevent/event-parse.h> #include "builtin.h" +#include "util/color.h" +#include "util/debug.h" #include "util/evlist.h" +#include "util/machine.h" +#include "util/session.h" +#include "util/thread.h" #include "util/parse-options.h" +#include "util/strlist.h" +#include "util/intlist.h" #include "util/thread_map.h" -#include "event-parse.h" +#include "util/stat.h" +#include "trace-event.h" +#include "util/parse-events.h" #include <libaudit.h> #include <stdlib.h> +#include <sys/eventfd.h> +#include <sys/mman.h> +#include <linux/futex.h> + +/* For older distros: */ +#ifndef MAP_STACK +# define MAP_STACK 0x20000 +#endif + +#ifndef MADV_HWPOISON +# define MADV_HWPOISON 100 +#endif + +#ifndef MADV_MERGEABLE +# define MADV_MERGEABLE 12 +#endif + +#ifndef MADV_UNMERGEABLE +# define MADV_UNMERGEABLE 13 +#endif + +#ifndef EFD_SEMAPHORE +# define EFD_SEMAPHORE 1 +#endif + +struct tp_field { + int offset; + union { + u64 (*integer)(struct tp_field *field, struct perf_sample *sample); + void *(*pointer)(struct tp_field *field, struct perf_sample *sample); + }; +}; + +#define TP_UINT_FIELD(bits) \ +static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \ +{ \ + return *(u##bits *)(sample->raw_data + field->offset); \ +} + +TP_UINT_FIELD(8); +TP_UINT_FIELD(16); +TP_UINT_FIELD(32); +TP_UINT_FIELD(64); + +#define TP_UINT_FIELD__SWAPPED(bits) \ +static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \ +{ \ + u##bits value = *(u##bits *)(sample->raw_data + field->offset); \ + return bswap_##bits(value);\ +} + +TP_UINT_FIELD__SWAPPED(16); +TP_UINT_FIELD__SWAPPED(32); +TP_UINT_FIELD__SWAPPED(64); + +static int tp_field__init_uint(struct tp_field *field, + struct format_field *format_field, + bool needs_swap) +{ + field->offset = format_field->offset; + + switch (format_field->size) { + case 1: + field->integer = tp_field__u8; + break; + case 2: + field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16; + break; + case 4: + field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32; + break; + case 8: + field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64; + break; + default: + return -1; + } + + return 0; +} + +static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample) +{ + return sample->raw_data + field->offset; +} + +static int tp_field__init_ptr(struct tp_field *field, struct format_field *format_field) +{ + field->offset = format_field->offset; + field->pointer = tp_field__ptr; + return 0; +} + +struct syscall_tp { + struct tp_field id; + union { + struct tp_field args, ret; + }; +}; + +static int perf_evsel__init_tp_uint_field(struct perf_evsel *evsel, + struct tp_field *field, + const char *name) +{ + struct format_field *format_field = perf_evsel__field(evsel, name); + + if (format_field == NULL) + return -1; + + return tp_field__init_uint(field, format_field, evsel->needs_swap); +} + +#define perf_evsel__init_sc_tp_uint_field(evsel, name) \ + ({ struct syscall_tp *sc = evsel->priv;\ + perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); }) + +static int perf_evsel__init_tp_ptr_field(struct perf_evsel *evsel, + struct tp_field *field, + const char *name) +{ + struct format_field *format_field = perf_evsel__field(evsel, name); + + if (format_field == NULL) + return -1; + + return tp_field__init_ptr(field, format_field); +} + +#define perf_evsel__init_sc_tp_ptr_field(evsel, name) \ + ({ struct syscall_tp *sc = evsel->priv;\ + perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); }) + +static void perf_evsel__delete_priv(struct perf_evsel *evsel) +{ + zfree(&evsel->priv); + perf_evsel__delete(evsel); +} + +static int perf_evsel__init_syscall_tp(struct perf_evsel *evsel, void *handler) +{ + evsel->priv = malloc(sizeof(struct syscall_tp)); + if (evsel->priv != NULL) { + if (perf_evsel__init_sc_tp_uint_field(evsel, id)) + goto out_delete; + + evsel->handler = handler; + return 0; + } + + return -ENOMEM; + +out_delete: + zfree(&evsel->priv); + return -ENOENT; +} + +static struct perf_evsel *perf_evsel__syscall_newtp(const char *direction, void *handler) +{ + struct perf_evsel *evsel = perf_evsel__newtp("raw_syscalls", direction); + + /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */ + if (evsel == NULL) + evsel = perf_evsel__newtp("syscalls", direction); + + if (evsel) { + if (perf_evsel__init_syscall_tp(evsel, handler)) + goto out_delete; + } + + return evsel; + +out_delete: + perf_evsel__delete_priv(evsel); + return NULL; +} + +#define perf_evsel__sc_tp_uint(evsel, name, sample) \ + ({ struct syscall_tp *fields = evsel->priv; \ + fields->name.integer(&fields->name, sample); }) + +#define perf_evsel__sc_tp_ptr(evsel, name, sample) \ + ({ struct syscall_tp *fields = evsel->priv; \ + fields->name.pointer(&fields->name, sample); }) + +static int perf_evlist__add_syscall_newtp(struct perf_evlist *evlist, + void *sys_enter_handler, + void *sys_exit_handler) +{ + int ret = -1; + struct perf_evsel *sys_enter, *sys_exit; + + sys_enter = perf_evsel__syscall_newtp("sys_enter", sys_enter_handler); + if (sys_enter == NULL) + goto out; + + if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args)) + goto out_delete_sys_enter; + + sys_exit = perf_evsel__syscall_newtp("sys_exit", sys_exit_handler); + if (sys_exit == NULL) + goto out_delete_sys_enter; + + if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret)) + goto out_delete_sys_exit; + + perf_evlist__add(evlist, sys_enter); + perf_evlist__add(evlist, sys_exit); + + ret = 0; +out: + return ret; + +out_delete_sys_exit: + perf_evsel__delete_priv(sys_exit); +out_delete_sys_enter: + perf_evsel__delete_priv(sys_enter); + goto out; +} + + +struct syscall_arg { + unsigned long val; + struct thread *thread; + struct trace *trace; + void *parm; + u8 idx; + u8 mask; +}; + +struct strarray { + int offset; + int nr_entries; + const char **entries; +}; + +#define DEFINE_STRARRAY(array) struct strarray strarray__##array = { \ + .nr_entries = ARRAY_SIZE(array), \ + .entries = array, \ +} + +#define DEFINE_STRARRAY_OFFSET(array, off) struct strarray strarray__##array = { \ + .offset = off, \ + .nr_entries = ARRAY_SIZE(array), \ + .entries = array, \ +} + +static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size, + const char *intfmt, + struct syscall_arg *arg) +{ + struct strarray *sa = arg->parm; + int idx = arg->val - sa->offset; + + if (idx < 0 || idx >= sa->nr_entries) + return scnprintf(bf, size, intfmt, arg->val); + + return scnprintf(bf, size, "%s", sa->entries[idx]); +} + +static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size, + struct syscall_arg *arg) +{ + return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg); +} + +#define SCA_STRARRAY syscall_arg__scnprintf_strarray + +#if defined(__i386__) || defined(__x86_64__) +/* + * FIXME: Make this available to all arches as soon as the ioctl beautifier + * gets rewritten to support all arches. + */ +static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size, + struct syscall_arg *arg) +{ + return __syscall_arg__scnprintf_strarray(bf, size, "%#x", arg); +} + +#define SCA_STRHEXARRAY syscall_arg__scnprintf_strhexarray +#endif /* defined(__i386__) || defined(__x86_64__) */ + +static size_t syscall_arg__scnprintf_fd(char *bf, size_t size, + struct syscall_arg *arg); + +#define SCA_FD syscall_arg__scnprintf_fd + +static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size, + struct syscall_arg *arg) +{ + int fd = arg->val; + + if (fd == AT_FDCWD) + return scnprintf(bf, size, "CWD"); + + return syscall_arg__scnprintf_fd(bf, size, arg); +} + +#define SCA_FDAT syscall_arg__scnprintf_fd_at + +static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size, + struct syscall_arg *arg); + +#define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd + +static size_t syscall_arg__scnprintf_hex(char *bf, size_t size, + struct syscall_arg *arg) +{ + return scnprintf(bf, size, "%#lx", arg->val); +} + +#define SCA_HEX syscall_arg__scnprintf_hex + +static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size, + struct syscall_arg *arg) +{ + int printed = 0, prot = arg->val; + + if (prot == PROT_NONE) + return scnprintf(bf, size, "NONE"); +#define P_MMAP_PROT(n) \ + if (prot & PROT_##n) { \ + printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \ + prot &= ~PROT_##n; \ + } + + P_MMAP_PROT(EXEC); + P_MMAP_PROT(READ); + P_MMAP_PROT(WRITE); +#ifdef PROT_SEM + P_MMAP_PROT(SEM); +#endif + P_MMAP_PROT(GROWSDOWN); + P_MMAP_PROT(GROWSUP); +#undef P_MMAP_PROT + + if (prot) + printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", prot); + + return printed; +} + +#define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot + +static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size, + struct syscall_arg *arg) +{ + int printed = 0, flags = arg->val; + +#define P_MMAP_FLAG(n) \ + if (flags & MAP_##n) { \ + printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \ + flags &= ~MAP_##n; \ + } + + P_MMAP_FLAG(SHARED); + P_MMAP_FLAG(PRIVATE); +#ifdef MAP_32BIT + P_MMAP_FLAG(32BIT); +#endif + P_MMAP_FLAG(ANONYMOUS); + P_MMAP_FLAG(DENYWRITE); + P_MMAP_FLAG(EXECUTABLE); + P_MMAP_FLAG(FILE); + P_MMAP_FLAG(FIXED); + P_MMAP_FLAG(GROWSDOWN); +#ifdef MAP_HUGETLB + P_MMAP_FLAG(HUGETLB); +#endif + P_MMAP_FLAG(LOCKED); + P_MMAP_FLAG(NONBLOCK); + P_MMAP_FLAG(NORESERVE); + P_MMAP_FLAG(POPULATE); + P_MMAP_FLAG(STACK); +#ifdef MAP_UNINITIALIZED + P_MMAP_FLAG(UNINITIALIZED); +#endif +#undef P_MMAP_FLAG + + if (flags) + printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); + + return printed; +} + +#define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags + +static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size, + struct syscall_arg *arg) +{ + int behavior = arg->val; + + switch (behavior) { +#define P_MADV_BHV(n) case MADV_##n: return scnprintf(bf, size, #n) + P_MADV_BHV(NORMAL); + P_MADV_BHV(RANDOM); + P_MADV_BHV(SEQUENTIAL); + P_MADV_BHV(WILLNEED); + P_MADV_BHV(DONTNEED); + P_MADV_BHV(REMOVE); + P_MADV_BHV(DONTFORK); + P_MADV_BHV(DOFORK); + P_MADV_BHV(HWPOISON); +#ifdef MADV_SOFT_OFFLINE + P_MADV_BHV(SOFT_OFFLINE); +#endif + P_MADV_BHV(MERGEABLE); + P_MADV_BHV(UNMERGEABLE); +#ifdef MADV_HUGEPAGE + P_MADV_BHV(HUGEPAGE); +#endif +#ifdef MADV_NOHUGEPAGE + P_MADV_BHV(NOHUGEPAGE); +#endif +#ifdef MADV_DONTDUMP + P_MADV_BHV(DONTDUMP); +#endif +#ifdef MADV_DODUMP + P_MADV_BHV(DODUMP); +#endif +#undef P_MADV_PHV + default: break; + } + + return scnprintf(bf, size, "%#x", behavior); +} + +#define SCA_MADV_BHV syscall_arg__scnprintf_madvise_behavior + +static size_t syscall_arg__scnprintf_flock(char *bf, size_t size, + struct syscall_arg *arg) +{ + int printed = 0, op = arg->val; + + if (op == 0) + return scnprintf(bf, size, "NONE"); +#define P_CMD(cmd) \ + if ((op & LOCK_##cmd) == LOCK_##cmd) { \ + printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #cmd); \ + op &= ~LOCK_##cmd; \ + } + + P_CMD(SH); + P_CMD(EX); + P_CMD(NB); + P_CMD(UN); + P_CMD(MAND); + P_CMD(RW); + P_CMD(READ); + P_CMD(WRITE); +#undef P_OP + + if (op) + printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", op); + + return printed; +} + +#define SCA_FLOCK syscall_arg__scnprintf_flock + +static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, struct syscall_arg *arg) +{ + enum syscall_futex_args { + SCF_UADDR = (1 << 0), + SCF_OP = (1 << 1), + SCF_VAL = (1 << 2), + SCF_TIMEOUT = (1 << 3), + SCF_UADDR2 = (1 << 4), + SCF_VAL3 = (1 << 5), + }; + int op = arg->val; + int cmd = op & FUTEX_CMD_MASK; + size_t printed = 0; + + switch (cmd) { +#define P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, #n); + P_FUTEX_OP(WAIT); arg->mask |= SCF_VAL3|SCF_UADDR2; break; + P_FUTEX_OP(WAKE); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break; + P_FUTEX_OP(FD); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break; + P_FUTEX_OP(REQUEUE); arg->mask |= SCF_VAL3|SCF_TIMEOUT; break; + P_FUTEX_OP(CMP_REQUEUE); arg->mask |= SCF_TIMEOUT; break; + P_FUTEX_OP(CMP_REQUEUE_PI); arg->mask |= SCF_TIMEOUT; break; + P_FUTEX_OP(WAKE_OP); break; + P_FUTEX_OP(LOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break; + P_FUTEX_OP(UNLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break; + P_FUTEX_OP(TRYLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2; break; + P_FUTEX_OP(WAIT_BITSET); arg->mask |= SCF_UADDR2; break; + P_FUTEX_OP(WAKE_BITSET); arg->mask |= SCF_UADDR2; break; + P_FUTEX_OP(WAIT_REQUEUE_PI); break; + default: printed = scnprintf(bf, size, "%#x", cmd); break; + } + + if (op & FUTEX_PRIVATE_FLAG) + printed += scnprintf(bf + printed, size - printed, "|PRIV"); + + if (op & FUTEX_CLOCK_REALTIME) + printed += scnprintf(bf + printed, size - printed, "|CLKRT"); + + return printed; +} + +#define SCA_FUTEX_OP syscall_arg__scnprintf_futex_op + +static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", }; +static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, 1); + +static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", }; +static DEFINE_STRARRAY(itimers); + +static const char *whences[] = { "SET", "CUR", "END", +#ifdef SEEK_DATA +"DATA", +#endif +#ifdef SEEK_HOLE +"HOLE", +#endif +}; +static DEFINE_STRARRAY(whences); + +static const char *fcntl_cmds[] = { + "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK", + "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "F_GETLK64", + "F_SETLK64", "F_SETLKW64", "F_SETOWN_EX", "F_GETOWN_EX", + "F_GETOWNER_UIDS", +}; +static DEFINE_STRARRAY(fcntl_cmds); + +static const char *rlimit_resources[] = { + "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE", + "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO", + "RTTIME", +}; +static DEFINE_STRARRAY(rlimit_resources); + +static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", }; +static DEFINE_STRARRAY(sighow); + +static const char *clockid[] = { + "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID", + "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", +}; +static DEFINE_STRARRAY(clockid); + +static const char *socket_families[] = { + "UNSPEC", "LOCAL", "INET", "AX25", "IPX", "APPLETALK", "NETROM", + "BRIDGE", "ATMPVC", "X25", "INET6", "ROSE", "DECnet", "NETBEUI", + "SECURITY", "KEY", "NETLINK", "PACKET", "ASH", "ECONET", "ATMSVC", + "RDS", "SNA", "IRDA", "PPPOX", "WANPIPE", "LLC", "IB", "CAN", "TIPC", + "BLUETOOTH", "IUCV", "RXRPC", "ISDN", "PHONET", "IEEE802154", "CAIF", + "ALG", "NFC", "VSOCK", +}; +static DEFINE_STRARRAY(socket_families); + +#ifndef SOCK_TYPE_MASK +#define SOCK_TYPE_MASK 0xf +#endif + +static size_t syscall_arg__scnprintf_socket_type(char *bf, size_t size, + struct syscall_arg *arg) +{ + size_t printed; + int type = arg->val, + flags = type & ~SOCK_TYPE_MASK; + + type &= SOCK_TYPE_MASK; + /* + * Can't use a strarray, MIPS may override for ABI reasons. + */ + switch (type) { +#define P_SK_TYPE(n) case SOCK_##n: printed = scnprintf(bf, size, #n); break; + P_SK_TYPE(STREAM); + P_SK_TYPE(DGRAM); + P_SK_TYPE(RAW); + P_SK_TYPE(RDM); + P_SK_TYPE(SEQPACKET); + P_SK_TYPE(DCCP); + P_SK_TYPE(PACKET); +#undef P_SK_TYPE + default: + printed = scnprintf(bf, size, "%#x", type); + } + +#define P_SK_FLAG(n) \ + if (flags & SOCK_##n) { \ + printed += scnprintf(bf + printed, size - printed, "|%s", #n); \ + flags &= ~SOCK_##n; \ + } + + P_SK_FLAG(CLOEXEC); + P_SK_FLAG(NONBLOCK); +#undef P_SK_FLAG + + if (flags) + printed += scnprintf(bf + printed, size - printed, "|%#x", flags); + + return printed; +} + +#define SCA_SK_TYPE syscall_arg__scnprintf_socket_type + +#ifndef MSG_PROBE +#define MSG_PROBE 0x10 +#endif +#ifndef MSG_WAITFORONE +#define MSG_WAITFORONE 0x10000 +#endif +#ifndef MSG_SENDPAGE_NOTLAST +#define MSG_SENDPAGE_NOTLAST 0x20000 +#endif +#ifndef MSG_FASTOPEN +#define MSG_FASTOPEN 0x20000000 +#endif + +static size_t syscall_arg__scnprintf_msg_flags(char *bf, size_t size, + struct syscall_arg *arg) +{ + int printed = 0, flags = arg->val; + + if (flags == 0) + return scnprintf(bf, size, "NONE"); +#define P_MSG_FLAG(n) \ + if (flags & MSG_##n) { \ + printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \ + flags &= ~MSG_##n; \ + } + + P_MSG_FLAG(OOB); + P_MSG_FLAG(PEEK); + P_MSG_FLAG(DONTROUTE); + P_MSG_FLAG(TRYHARD); + P_MSG_FLAG(CTRUNC); + P_MSG_FLAG(PROBE); + P_MSG_FLAG(TRUNC); + P_MSG_FLAG(DONTWAIT); + P_MSG_FLAG(EOR); + P_MSG_FLAG(WAITALL); + P_MSG_FLAG(FIN); + P_MSG_FLAG(SYN); + P_MSG_FLAG(CONFIRM); + P_MSG_FLAG(RST); + P_MSG_FLAG(ERRQUEUE); + P_MSG_FLAG(NOSIGNAL); + P_MSG_FLAG(MORE); + P_MSG_FLAG(WAITFORONE); + P_MSG_FLAG(SENDPAGE_NOTLAST); + P_MSG_FLAG(FASTOPEN); + P_MSG_FLAG(CMSG_CLOEXEC); +#undef P_MSG_FLAG + + if (flags) + printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); + + return printed; +} + +#define SCA_MSG_FLAGS syscall_arg__scnprintf_msg_flags + +static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size, + struct syscall_arg *arg) +{ + size_t printed = 0; + int mode = arg->val; + + if (mode == F_OK) /* 0 */ + return scnprintf(bf, size, "F"); +#define P_MODE(n) \ + if (mode & n##_OK) { \ + printed += scnprintf(bf + printed, size - printed, "%s", #n); \ + mode &= ~n##_OK; \ + } + + P_MODE(R); + P_MODE(W); + P_MODE(X); +#undef P_MODE + + if (mode) + printed += scnprintf(bf + printed, size - printed, "|%#x", mode); + + return printed; +} + +#define SCA_ACCMODE syscall_arg__scnprintf_access_mode + +static size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size, + struct syscall_arg *arg) +{ + int printed = 0, flags = arg->val; + + if (!(flags & O_CREAT)) + arg->mask |= 1 << (arg->idx + 1); /* Mask the mode parm */ + + if (flags == 0) + return scnprintf(bf, size, "RDONLY"); +#define P_FLAG(n) \ + if (flags & O_##n) { \ + printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \ + flags &= ~O_##n; \ + } + + P_FLAG(APPEND); + P_FLAG(ASYNC); + P_FLAG(CLOEXEC); + P_FLAG(CREAT); + P_FLAG(DIRECT); + P_FLAG(DIRECTORY); + P_FLAG(EXCL); + P_FLAG(LARGEFILE); + P_FLAG(NOATIME); + P_FLAG(NOCTTY); +#ifdef O_NONBLOCK + P_FLAG(NONBLOCK); +#elif O_NDELAY + P_FLAG(NDELAY); +#endif +#ifdef O_PATH + P_FLAG(PATH); +#endif + P_FLAG(RDWR); +#ifdef O_DSYNC + if ((flags & O_SYNC) == O_SYNC) + printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", "SYNC"); + else { + P_FLAG(DSYNC); + } +#else + P_FLAG(SYNC); +#endif + P_FLAG(TRUNC); + P_FLAG(WRONLY); +#undef P_FLAG + + if (flags) + printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); + + return printed; +} + +#define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags + +static size_t syscall_arg__scnprintf_eventfd_flags(char *bf, size_t size, + struct syscall_arg *arg) +{ + int printed = 0, flags = arg->val; + + if (flags == 0) + return scnprintf(bf, size, "NONE"); +#define P_FLAG(n) \ + if (flags & EFD_##n) { \ + printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \ + flags &= ~EFD_##n; \ + } + + P_FLAG(SEMAPHORE); + P_FLAG(CLOEXEC); + P_FLAG(NONBLOCK); +#undef P_FLAG + + if (flags) + printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); + + return printed; +} + +#define SCA_EFD_FLAGS syscall_arg__scnprintf_eventfd_flags + +static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size, + struct syscall_arg *arg) +{ + int printed = 0, flags = arg->val; + +#define P_FLAG(n) \ + if (flags & O_##n) { \ + printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \ + flags &= ~O_##n; \ + } + + P_FLAG(CLOEXEC); + P_FLAG(NONBLOCK); +#undef P_FLAG + + if (flags) + printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); + + return printed; +} + +#define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags + +static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscall_arg *arg) +{ + int sig = arg->val; + + switch (sig) { +#define P_SIGNUM(n) case SIG##n: return scnprintf(bf, size, #n) + P_SIGNUM(HUP); + P_SIGNUM(INT); + P_SIGNUM(QUIT); + P_SIGNUM(ILL); + P_SIGNUM(TRAP); + P_SIGNUM(ABRT); + P_SIGNUM(BUS); + P_SIGNUM(FPE); + P_SIGNUM(KILL); + P_SIGNUM(USR1); + P_SIGNUM(SEGV); + P_SIGNUM(USR2); + P_SIGNUM(PIPE); + P_SIGNUM(ALRM); + P_SIGNUM(TERM); + P_SIGNUM(CHLD); + P_SIGNUM(CONT); + P_SIGNUM(STOP); + P_SIGNUM(TSTP); + P_SIGNUM(TTIN); + P_SIGNUM(TTOU); + P_SIGNUM(URG); + P_SIGNUM(XCPU); + P_SIGNUM(XFSZ); + P_SIGNUM(VTALRM); + P_SIGNUM(PROF); + P_SIGNUM(WINCH); + P_SIGNUM(IO); + P_SIGNUM(PWR); + P_SIGNUM(SYS); +#ifdef SIGEMT + P_SIGNUM(EMT); +#endif +#ifdef SIGSTKFLT + P_SIGNUM(STKFLT); +#endif +#ifdef SIGSWI + P_SIGNUM(SWI); +#endif + default: break; + } + + return scnprintf(bf, size, "%#x", sig); +} + +#define SCA_SIGNUM syscall_arg__scnprintf_signum + +#if defined(__i386__) || defined(__x86_64__) +/* + * FIXME: Make this available to all arches. + */ +#define TCGETS 0x5401 + +static const char *tioctls[] = { + "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW", + "TCSETAF", "TCSBRK", "TCXONC", "TCFLSH", "TIOCEXCL", "TIOCNXCL", + "TIOCSCTTY", "TIOCGPGRP", "TIOCSPGRP", "TIOCOUTQ", "TIOCSTI", + "TIOCGWINSZ", "TIOCSWINSZ", "TIOCMGET", "TIOCMBIS", "TIOCMBIC", + "TIOCMSET", "TIOCGSOFTCAR", "TIOCSSOFTCAR", "FIONREAD", "TIOCLINUX", + "TIOCCONS", "TIOCGSERIAL", "TIOCSSERIAL", "TIOCPKT", "FIONBIO", + "TIOCNOTTY", "TIOCSETD", "TIOCGETD", "TCSBRKP", [0x27] = "TIOCSBRK", + "TIOCCBRK", "TIOCGSID", "TCGETS2", "TCSETS2", "TCSETSW2", "TCSETSF2", + "TIOCGRS485", "TIOCSRS485", "TIOCGPTN", "TIOCSPTLCK", + "TIOCGDEV||TCGETX", "TCSETX", "TCSETXF", "TCSETXW", "TIOCSIG", + "TIOCVHANGUP", "TIOCGPKT", "TIOCGPTLCK", "TIOCGEXCL", + [0x50] = "FIONCLEX", "FIOCLEX", "FIOASYNC", "TIOCSERCONFIG", + "TIOCSERGWILD", "TIOCSERSWILD", "TIOCGLCKTRMIOS", "TIOCSLCKTRMIOS", + "TIOCSERGSTRUCT", "TIOCSERGETLSR", "TIOCSERGETMULTI", "TIOCSERSETMULTI", + "TIOCMIWAIT", "TIOCGICOUNT", [0x60] = "FIOQSIZE", +}; + +static DEFINE_STRARRAY_OFFSET(tioctls, 0x5401); +#endif /* defined(__i386__) || defined(__x86_64__) */ + +#define STRARRAY(arg, name, array) \ + .arg_scnprintf = { [arg] = SCA_STRARRAY, }, \ + .arg_parm = { [arg] = &strarray__##array, } static struct syscall_fmt { const char *name; const char *alias; + size_t (*arg_scnprintf[6])(char *bf, size_t size, struct syscall_arg *arg); + void *arg_parm[6]; bool errmsg; bool timeout; + bool hexret; } syscall_fmts[] = { + { .name = "access", .errmsg = true, + .arg_scnprintf = { [1] = SCA_ACCMODE, /* mode */ }, }, { .name = "arch_prctl", .errmsg = true, .alias = "prctl", }, - { .name = "fstat", .errmsg = true, .alias = "newfstat", }, - { .name = "fstatat", .errmsg = true, .alias = "newfstatat", }, - { .name = "futex", .errmsg = true, }, + { .name = "brk", .hexret = true, + .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, }, + { .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), }, + { .name = "close", .errmsg = true, + .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, }, + { .name = "connect", .errmsg = true, }, + { .name = "dup", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "dup2", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "dup3", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), }, + { .name = "eventfd2", .errmsg = true, + .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, }, + { .name = "faccessat", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, + { .name = "fadvise64", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "fallocate", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "fchdir", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "fchmod", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "fchmodat", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, + { .name = "fchown", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "fchownat", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, + { .name = "fcntl", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ + [1] = SCA_STRARRAY, /* cmd */ }, + .arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, }, + { .name = "fdatasync", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "flock", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ + [1] = SCA_FLOCK, /* cmd */ }, }, + { .name = "fsetxattr", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "fstat", .errmsg = true, .alias = "newfstat", + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "fstatat", .errmsg = true, .alias = "newfstatat", + .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, + { .name = "fstatfs", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "fsync", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "ftruncate", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "futex", .errmsg = true, + .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, }, + { .name = "futimesat", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, + { .name = "getdents", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "getdents64", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), }, + { .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), }, + { .name = "ioctl", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ +#if defined(__i386__) || defined(__x86_64__) +/* + * FIXME: Make this available to all arches. + */ + [1] = SCA_STRHEXARRAY, /* cmd */ + [2] = SCA_HEX, /* arg */ }, + .arg_parm = { [1] = &strarray__tioctls, /* cmd */ }, }, +#else + [2] = SCA_HEX, /* arg */ }, }, +#endif + { .name = "kill", .errmsg = true, + .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, }, + { .name = "linkat", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, + { .name = "lseek", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ + [2] = SCA_STRARRAY, /* whence */ }, + .arg_parm = { [2] = &strarray__whences, /* whence */ }, }, + { .name = "lstat", .errmsg = true, .alias = "newlstat", }, + { .name = "madvise", .errmsg = true, + .arg_scnprintf = { [0] = SCA_HEX, /* start */ + [2] = SCA_MADV_BHV, /* behavior */ }, }, + { .name = "mkdirat", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, + { .name = "mknodat", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, + { .name = "mlock", .errmsg = true, + .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, }, + { .name = "mlockall", .errmsg = true, + .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, }, + { .name = "mmap", .hexret = true, + .arg_scnprintf = { [0] = SCA_HEX, /* addr */ + [2] = SCA_MMAP_PROT, /* prot */ + [3] = SCA_MMAP_FLAGS, /* flags */ + [4] = SCA_FD, /* fd */ }, }, + { .name = "mprotect", .errmsg = true, + .arg_scnprintf = { [0] = SCA_HEX, /* start */ + [2] = SCA_MMAP_PROT, /* prot */ }, }, + { .name = "mremap", .hexret = true, + .arg_scnprintf = { [0] = SCA_HEX, /* addr */ + [4] = SCA_HEX, /* new_addr */ }, }, + { .name = "munlock", .errmsg = true, + .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, }, + { .name = "munmap", .errmsg = true, + .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, }, + { .name = "name_to_handle_at", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, + { .name = "newfstatat", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, + { .name = "open", .errmsg = true, + .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, }, + { .name = "open_by_handle_at", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ + [2] = SCA_OPEN_FLAGS, /* flags */ }, }, + { .name = "openat", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ + [2] = SCA_OPEN_FLAGS, /* flags */ }, }, + { .name = "pipe2", .errmsg = true, + .arg_scnprintf = { [1] = SCA_PIPE_FLAGS, /* flags */ }, }, { .name = "poll", .errmsg = true, .timeout = true, }, { .name = "ppoll", .errmsg = true, .timeout = true, }, - { .name = "read", .errmsg = true, }, - { .name = "recvfrom", .errmsg = true, }, + { .name = "pread", .errmsg = true, .alias = "pread64", + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "preadv", .errmsg = true, .alias = "pread", + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), }, + { .name = "pwrite", .errmsg = true, .alias = "pwrite64", + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "pwritev", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "read", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "readlinkat", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, + { .name = "readv", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "recvfrom", .errmsg = true, + .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, }, + { .name = "recvmmsg", .errmsg = true, + .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, }, + { .name = "recvmsg", .errmsg = true, + .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, }, + { .name = "renameat", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, + { .name = "rt_sigaction", .errmsg = true, + .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, }, + { .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), }, + { .name = "rt_sigqueueinfo", .errmsg = true, + .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, }, + { .name = "rt_tgsigqueueinfo", .errmsg = true, + .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, }, { .name = "select", .errmsg = true, .timeout = true, }, + { .name = "sendmmsg", .errmsg = true, + .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, }, + { .name = "sendmsg", .errmsg = true, + .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, }, + { .name = "sendto", .errmsg = true, + .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, }, + { .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), }, + { .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), }, + { .name = "shutdown", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "socket", .errmsg = true, + .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */ + [1] = SCA_SK_TYPE, /* type */ }, + .arg_parm = { [0] = &strarray__socket_families, /* family */ }, }, + { .name = "socketpair", .errmsg = true, + .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */ + [1] = SCA_SK_TYPE, /* type */ }, + .arg_parm = { [0] = &strarray__socket_families, /* family */ }, }, { .name = "stat", .errmsg = true, .alias = "newstat", }, + { .name = "symlinkat", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, + { .name = "tgkill", .errmsg = true, + .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, }, + { .name = "tkill", .errmsg = true, + .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, }, + { .name = "uname", .errmsg = true, .alias = "newuname", }, + { .name = "unlinkat", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, + { .name = "utimensat", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ }, }, + { .name = "write", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, + { .name = "writev", .errmsg = true, + .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, }; static int syscall_fmt__cmp(const void *name, const void *fmtp) @@ -40,23 +1107,322 @@ static struct syscall_fmt *syscall_fmt__find(const char *name) struct syscall { struct event_format *tp_format; const char *name; + bool filtered; struct syscall_fmt *fmt; + size_t (**arg_scnprintf)(char *bf, size_t size, struct syscall_arg *arg); + void **arg_parm; }; +static size_t fprintf_duration(unsigned long t, FILE *fp) +{ + double duration = (double)t / NSEC_PER_MSEC; + size_t printed = fprintf(fp, "("); + + if (duration >= 1.0) + printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration); + else if (duration >= 0.01) + printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration); + else + printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration); + return printed + fprintf(fp, "): "); +} + +struct thread_trace { + u64 entry_time; + u64 exit_time; + bool entry_pending; + unsigned long nr_events; + char *entry_str; + double runtime_ms; + struct { + int max; + char **table; + } paths; + + struct intlist *syscall_stats; +}; + +static struct thread_trace *thread_trace__new(void) +{ + struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace)); + + if (ttrace) + ttrace->paths.max = -1; + + ttrace->syscall_stats = intlist__new(NULL); + + return ttrace; +} + +static struct thread_trace *thread__trace(struct thread *thread, FILE *fp) +{ + struct thread_trace *ttrace; + + if (thread == NULL) + goto fail; + + if (thread->priv == NULL) + thread->priv = thread_trace__new(); + + if (thread->priv == NULL) + goto fail; + + ttrace = thread->priv; + ++ttrace->nr_events; + + return ttrace; +fail: + color_fprintf(fp, PERF_COLOR_RED, + "WARNING: not enough memory, dropping samples!\n"); + return NULL; +} + struct trace { - int audit_machine; + struct perf_tool tool; + struct { + int machine; + int open_id; + } audit; struct { int max; struct syscall *table; } syscalls; - struct perf_record_opts opts; + struct record_opts opts; + struct machine *host; + u64 base_time; + FILE *output; + unsigned long nr_events; + struct strlist *ev_qualifier; + const char *last_vfs_getname; + struct intlist *tid_list; + struct intlist *pid_list; + double duration_filter; + double runtime_ms; + struct { + u64 vfs_getname, + proc_getname; + } stats; + bool not_ev_qualifier; + bool live; + bool full_time; + bool sched; + bool multiple_threads; + bool summary; + bool summary_only; + bool show_comm; + bool show_tool_stats; }; +static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname) +{ + struct thread_trace *ttrace = thread->priv; + + if (fd > ttrace->paths.max) { + char **npath = realloc(ttrace->paths.table, (fd + 1) * sizeof(char *)); + + if (npath == NULL) + return -1; + + if (ttrace->paths.max != -1) { + memset(npath + ttrace->paths.max + 1, 0, + (fd - ttrace->paths.max) * sizeof(char *)); + } else { + memset(npath, 0, (fd + 1) * sizeof(char *)); + } + + ttrace->paths.table = npath; + ttrace->paths.max = fd; + } + + ttrace->paths.table[fd] = strdup(pathname); + + return ttrace->paths.table[fd] != NULL ? 0 : -1; +} + +static int thread__read_fd_path(struct thread *thread, int fd) +{ + char linkname[PATH_MAX], pathname[PATH_MAX]; + struct stat st; + int ret; + + if (thread->pid_ == thread->tid) { + scnprintf(linkname, sizeof(linkname), + "/proc/%d/fd/%d", thread->pid_, fd); + } else { + scnprintf(linkname, sizeof(linkname), + "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd); + } + + if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname)) + return -1; + + ret = readlink(linkname, pathname, sizeof(pathname)); + + if (ret < 0 || ret > st.st_size) + return -1; + + pathname[ret] = '\0'; + return trace__set_fd_pathname(thread, fd, pathname); +} + +static const char *thread__fd_path(struct thread *thread, int fd, + struct trace *trace) +{ + struct thread_trace *ttrace = thread->priv; + + if (ttrace == NULL) + return NULL; + + if (fd < 0) + return NULL; + + if ((fd > ttrace->paths.max || ttrace->paths.table[fd] == NULL)) + if (!trace->live) + return NULL; + ++trace->stats.proc_getname; + if (thread__read_fd_path(thread, fd)) { + return NULL; + } + + return ttrace->paths.table[fd]; +} + +static size_t syscall_arg__scnprintf_fd(char *bf, size_t size, + struct syscall_arg *arg) +{ + int fd = arg->val; + size_t printed = scnprintf(bf, size, "%d", fd); + const char *path = thread__fd_path(arg->thread, fd, arg->trace); + + if (path) + printed += scnprintf(bf + printed, size - printed, "<%s>", path); + + return printed; +} + +static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size, + struct syscall_arg *arg) +{ + int fd = arg->val; + size_t printed = syscall_arg__scnprintf_fd(bf, size, arg); + struct thread_trace *ttrace = arg->thread->priv; + + if (ttrace && fd >= 0 && fd <= ttrace->paths.max) + zfree(&ttrace->paths.table[fd]); + + return printed; +} + +static bool trace__filter_duration(struct trace *trace, double t) +{ + return t < (trace->duration_filter * NSEC_PER_MSEC); +} + +static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) +{ + double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC; + + return fprintf(fp, "%10.3f ", ts); +} + +static bool done = false; +static bool interrupted = false; + +static void sig_handler(int sig) +{ + done = true; + interrupted = sig == SIGINT; +} + +static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread, + u64 duration, u64 tstamp, FILE *fp) +{ + size_t printed = trace__fprintf_tstamp(trace, tstamp, fp); + printed += fprintf_duration(duration, fp); + + if (trace->multiple_threads) { + if (trace->show_comm) + printed += fprintf(fp, "%.14s/", thread__comm_str(thread)); + printed += fprintf(fp, "%d ", thread->tid); + } + + return printed; +} + +static int trace__process_event(struct trace *trace, struct machine *machine, + union perf_event *event, struct perf_sample *sample) +{ + int ret = 0; + + switch (event->header.type) { + case PERF_RECORD_LOST: + color_fprintf(trace->output, PERF_COLOR_RED, + "LOST %" PRIu64 " events!\n", event->lost.lost); + ret = machine__process_lost_event(machine, event, sample); + default: + ret = machine__process_event(machine, event, sample); + break; + } + + return ret; +} + +static int trace__tool_process(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + struct trace *trace = container_of(tool, struct trace, tool); + return trace__process_event(trace, machine, event, sample); +} + +static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist) +{ + int err = symbol__init(); + + if (err) + return err; + + trace->host = machine__new_host(); + if (trace->host == NULL) + return -ENOMEM; + + err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, + evlist->threads, trace__tool_process, false); + if (err) + symbol__exit(); + + return err; +} + +static int syscall__set_arg_fmts(struct syscall *sc) +{ + struct format_field *field; + int idx = 0; + + sc->arg_scnprintf = calloc(sc->tp_format->format.nr_fields - 1, sizeof(void *)); + if (sc->arg_scnprintf == NULL) + return -1; + + if (sc->fmt) + sc->arg_parm = sc->fmt->arg_parm; + + for (field = sc->tp_format->format.fields->next; field; field = field->next) { + if (sc->fmt && sc->fmt->arg_scnprintf[idx]) + sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx]; + else if (field->flags & FIELD_IS_POINTER) + sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex; + ++idx; + } + + return 0; +} + static int trace__read_syscall_info(struct trace *trace, int id) { char tp_name[128]; struct syscall *sc; - const char *name = audit_syscall_to_name(id, trace->audit_machine); + const char *name = audit_syscall_to_name(id, trace->audit.machine); if (name == NULL) return -1; @@ -80,34 +1446,87 @@ static int trace__read_syscall_info(struct trace *trace, int id) sc = trace->syscalls.table + id; sc->name = name; + + if (trace->ev_qualifier) { + bool in = strlist__find(trace->ev_qualifier, name) != NULL; + + if (!(in ^ trace->not_ev_qualifier)) { + sc->filtered = true; + /* + * No need to do read tracepoint information since this will be + * filtered out. + */ + return 0; + } + } + sc->fmt = syscall_fmt__find(sc->name); snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name); - sc->tp_format = event_format__new("syscalls", tp_name); + sc->tp_format = trace_event__tp_format("syscalls", tp_name); if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) { snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias); - sc->tp_format = event_format__new("syscalls", tp_name); + sc->tp_format = trace_event__tp_format("syscalls", tp_name); } - return sc->tp_format != NULL ? 0 : -1; + if (sc->tp_format == NULL) + return -1; + + return syscall__set_arg_fmts(sc); } -static size_t syscall__fprintf_args(struct syscall *sc, unsigned long *args, FILE *fp) +static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size, + unsigned long *args, struct trace *trace, + struct thread *thread) { - int i = 0; size_t printed = 0; if (sc->tp_format != NULL) { struct format_field *field; + u8 bit = 1; + struct syscall_arg arg = { + .idx = 0, + .mask = 0, + .trace = trace, + .thread = thread, + }; - for (field = sc->tp_format->format.fields->next; field; field = field->next) { - printed += fprintf(fp, "%s%s: %ld", printed ? ", " : "", - field->name, args[i++]); + for (field = sc->tp_format->format.fields->next; field; + field = field->next, ++arg.idx, bit <<= 1) { + if (arg.mask & bit) + continue; + /* + * Suppress this argument if its value is zero and + * and we don't have a string associated in an + * strarray for it. + */ + if (args[arg.idx] == 0 && + !(sc->arg_scnprintf && + sc->arg_scnprintf[arg.idx] == SCA_STRARRAY && + sc->arg_parm[arg.idx])) + continue; + + printed += scnprintf(bf + printed, size - printed, + "%s%s: ", printed ? ", " : "", field->name); + if (sc->arg_scnprintf && sc->arg_scnprintf[arg.idx]) { + arg.val = args[arg.idx]; + if (sc->arg_parm) + arg.parm = sc->arg_parm[arg.idx]; + printed += sc->arg_scnprintf[arg.idx](bf + printed, + size - printed, &arg); + } else { + printed += scnprintf(bf + printed, size - printed, + "%ld", args[arg.idx]); + } } } else { + int i = 0; + while (i < 6) { - printed += fprintf(fp, "%sarg%d: %ld", printed ? ", " : "", i, args[i]); + printed += scnprintf(bf + printed, size - printed, + "%sarg%d: %ld", + printed ? ", " : "", i, args[i]); ++i; } } @@ -119,13 +1538,26 @@ typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel, struct perf_sample *sample); static struct syscall *trace__syscall_info(struct trace *trace, - struct perf_evsel *evsel, - struct perf_sample *sample) + struct perf_evsel *evsel, int id) { - int id = perf_evsel__intval(evsel, sample, "id"); if (id < 0) { - printf("Invalid syscall %d id, skipping...\n", id); + + /* + * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried + * before that, leaving at a higher verbosity level till that is + * explained. Reproduced with plain ftrace with: + * + * echo 1 > /t/events/raw_syscalls/sys_exit/enable + * grep "NR -1 " /t/trace_pipe + * + * After generating some load on the machine. + */ + if (verbose > 1) { + static u64 n; + fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n", + id, perf_evsel__name(evsel), ++n); + } return NULL; } @@ -139,27 +1571,86 @@ static struct syscall *trace__syscall_info(struct trace *trace, return &trace->syscalls.table[id]; out_cant_read: - printf("Problems reading syscall %d information\n", id); + if (verbose) { + fprintf(trace->output, "Problems reading syscall %d", id); + if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL) + fprintf(trace->output, "(%s)", trace->syscalls.table[id].name); + fputs(" information\n", trace->output); + } return NULL; } +static void thread__update_stats(struct thread_trace *ttrace, + int id, struct perf_sample *sample) +{ + struct int_node *inode; + struct stats *stats; + u64 duration = 0; + + inode = intlist__findnew(ttrace->syscall_stats, id); + if (inode == NULL) + return; + + stats = inode->priv; + if (stats == NULL) { + stats = malloc(sizeof(struct stats)); + if (stats == NULL) + return; + init_stats(stats); + inode->priv = stats; + } + + if (ttrace->entry_time && sample->time > ttrace->entry_time) + duration = sample->time - ttrace->entry_time; + + update_stats(stats, duration); +} + static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel, struct perf_sample *sample) { + char *msg; void *args; - struct syscall *sc = trace__syscall_info(trace, evsel, sample); + size_t printed = 0; + struct thread *thread; + int id = perf_evsel__sc_tp_uint(evsel, id, sample); + struct syscall *sc = trace__syscall_info(trace, evsel, id); + struct thread_trace *ttrace; if (sc == NULL) return -1; - args = perf_evsel__rawptr(evsel, sample, "args"); - if (args == NULL) { - printf("Problems reading syscall arguments\n"); + if (sc->filtered) + return 0; + + thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); + ttrace = thread__trace(thread, trace->output); + if (ttrace == NULL) return -1; + + args = perf_evsel__sc_tp_ptr(evsel, args, sample); + ttrace = thread->priv; + + if (ttrace->entry_str == NULL) { + ttrace->entry_str = malloc(1024); + if (!ttrace->entry_str) + return -1; } - printf("%s(", sc->name); - syscall__fprintf_args(sc, args, stdout); + ttrace->entry_time = sample->time; + msg = ttrace->entry_str; + printed += scnprintf(msg + printed, 1024 - printed, "%s(", sc->name); + + printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed, + args, trace, thread); + + if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) { + if (!trace->duration_filter && !trace->summary_only) { + trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output); + fprintf(trace->output, "%-70s\n", ttrace->entry_str); + } + } else + ttrace->entry_pending = true; return 0; } @@ -168,68 +1659,298 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel, struct perf_sample *sample) { int ret; - struct syscall *sc = trace__syscall_info(trace, evsel, sample); + u64 duration = 0; + struct thread *thread; + int id = perf_evsel__sc_tp_uint(evsel, id, sample); + struct syscall *sc = trace__syscall_info(trace, evsel, id); + struct thread_trace *ttrace; if (sc == NULL) return -1; - ret = perf_evsel__intval(evsel, sample, "ret"); + if (sc->filtered) + return 0; + + thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); + ttrace = thread__trace(thread, trace->output); + if (ttrace == NULL) + return -1; + + if (trace->summary) + thread__update_stats(ttrace, id, sample); + + ret = perf_evsel__sc_tp_uint(evsel, ret, sample); + + if (id == trace->audit.open_id && ret >= 0 && trace->last_vfs_getname) { + trace__set_fd_pathname(thread, ret, trace->last_vfs_getname); + trace->last_vfs_getname = NULL; + ++trace->stats.vfs_getname; + } + + ttrace = thread->priv; - if (ret < 0 && sc->fmt && sc->fmt->errmsg) { + ttrace->exit_time = sample->time; + + if (ttrace->entry_time) { + duration = sample->time - ttrace->entry_time; + if (trace__filter_duration(trace, duration)) + goto out; + } else if (trace->duration_filter) + goto out; + + if (trace->summary_only) + goto out; + + trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output); + + if (ttrace->entry_pending) { + fprintf(trace->output, "%-70s", ttrace->entry_str); + } else { + fprintf(trace->output, " ... ["); + color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued"); + fprintf(trace->output, "]: %s()", sc->name); + } + + if (sc->fmt == NULL) { +signed_print: + fprintf(trace->output, ") = %d", ret); + } else if (ret < 0 && sc->fmt->errmsg) { char bf[256]; const char *emsg = strerror_r(-ret, bf, sizeof(bf)), *e = audit_errno_to_name(-ret); - printf(") = -1 %s %s", e, emsg); - } else if (ret == 0 && sc->fmt && sc->fmt->timeout) - printf(") = 0 Timeout"); + fprintf(trace->output, ") = -1 %s %s", e, emsg); + } else if (ret == 0 && sc->fmt->timeout) + fprintf(trace->output, ") = 0 Timeout"); + else if (sc->fmt->hexret) + fprintf(trace->output, ") = %#x", ret); else - printf(") = %d", ret); + goto signed_print; + + fputc('\n', trace->output); +out: + ttrace->entry_pending = false; + + return 0; +} + +static int trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel, + struct perf_sample *sample) +{ + trace->last_vfs_getname = perf_evsel__rawptr(evsel, sample, "pathname"); + return 0; +} + +static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel, + struct perf_sample *sample) +{ + u64 runtime = perf_evsel__intval(evsel, sample, "runtime"); + double runtime_ms = (double)runtime / NSEC_PER_MSEC; + struct thread *thread = machine__findnew_thread(trace->host, + sample->pid, + sample->tid); + struct thread_trace *ttrace = thread__trace(thread, trace->output); + + if (ttrace == NULL) + goto out_dump; - putchar('\n'); + ttrace->runtime_ms += runtime_ms; + trace->runtime_ms += runtime_ms; return 0; + +out_dump: + fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n", + evsel->name, + perf_evsel__strval(evsel, sample, "comm"), + (pid_t)perf_evsel__intval(evsel, sample, "pid"), + runtime, + perf_evsel__intval(evsel, sample, "vruntime")); + return 0; +} + +static bool skip_sample(struct trace *trace, struct perf_sample *sample) +{ + if ((trace->pid_list && intlist__find(trace->pid_list, sample->pid)) || + (trace->tid_list && intlist__find(trace->tid_list, sample->tid))) + return false; + + if (trace->pid_list || trace->tid_list) + return true; + + return false; +} + +static int trace__process_sample(struct perf_tool *tool, + union perf_event *event __maybe_unused, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct machine *machine __maybe_unused) +{ + struct trace *trace = container_of(tool, struct trace, tool); + int err = 0; + + tracepoint_handler handler = evsel->handler; + + if (skip_sample(trace, sample)) + return 0; + + if (!trace->full_time && trace->base_time == 0) + trace->base_time = sample->time; + + if (handler) { + ++trace->nr_events; + handler(trace, evsel, sample); + } + + return err; } -static int trace__run(struct trace *trace) +static int parse_target_str(struct trace *trace) { - struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); + if (trace->opts.target.pid) { + trace->pid_list = intlist__new(trace->opts.target.pid); + if (trace->pid_list == NULL) { + pr_err("Error parsing process id string\n"); + return -EINVAL; + } + } + + if (trace->opts.target.tid) { + trace->tid_list = intlist__new(trace->opts.target.tid); + if (trace->tid_list == NULL) { + pr_err("Error parsing thread id string\n"); + return -EINVAL; + } + } + + return 0; +} + +static int trace__record(int argc, const char **argv) +{ + unsigned int rec_argc, i, j; + const char **rec_argv; + const char * const record_args[] = { + "record", + "-R", + "-m", "1024", + "-c", "1", + "-e", + }; + + /* +1 is for the event string below */ + rec_argc = ARRAY_SIZE(record_args) + 1 + argc; + rec_argv = calloc(rec_argc + 1, sizeof(char *)); + + if (rec_argv == NULL) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(record_args); i++) + rec_argv[i] = record_args[i]; + + /* event string may be different for older kernels - e.g., RHEL6 */ + if (is_valid_tracepoint("raw_syscalls:sys_enter")) + rec_argv[i] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit"; + else if (is_valid_tracepoint("syscalls:sys_enter")) + rec_argv[i] = "syscalls:sys_enter,syscalls:sys_exit"; + else { + pr_err("Neither raw_syscalls nor syscalls events exist.\n"); + return -1; + } + i++; + + for (j = 0; j < (unsigned int)argc; j++, i++) + rec_argv[i] = argv[j]; + + return cmd_record(i, rec_argv, NULL); +} + +static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp); + +static void perf_evlist__add_vfs_getname(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname"); + if (evsel == NULL) + return; + + if (perf_evsel__field(evsel, "pathname") == NULL) { + perf_evsel__delete(evsel); + return; + } + + evsel->handler = trace__vfs_getname; + perf_evlist__add(evlist, evsel); +} + +static int trace__run(struct trace *trace, int argc, const char **argv) +{ + struct perf_evlist *evlist = perf_evlist__new(); struct perf_evsel *evsel; - int err = -1, i, nr_events = 0, before; + int err = -1, i; + unsigned long before; + const bool forks = argc > 0; + + trace->live = true; if (evlist == NULL) { - printf("Not enough memory to run!\n"); + fprintf(trace->output, "Not enough memory to run!\n"); goto out; } - if (perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_enter", trace__sys_enter) || - perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_exit", trace__sys_exit)) { - printf("Couldn't read the raw_syscalls tracepoints information!\n"); - goto out_delete_evlist; - } + if (perf_evlist__add_syscall_newtp(evlist, trace__sys_enter, trace__sys_exit)) + goto out_error_tp; + + perf_evlist__add_vfs_getname(evlist); + + if (trace->sched && + perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime", + trace__sched_stat_runtime)) + goto out_error_tp; err = perf_evlist__create_maps(evlist, &trace->opts.target); if (err < 0) { - printf("Problems parsing the target to trace, check your options!\n"); + fprintf(trace->output, "Problems parsing the target to trace, check your options!\n"); goto out_delete_evlist; } - perf_evlist__config_attrs(evlist, &trace->opts); - - err = perf_evlist__open(evlist); + err = trace__symbols_init(trace, evlist); if (err < 0) { - printf("Couldn't create the events: %s\n", strerror(errno)); + fprintf(trace->output, "Problems initializing symbol libraries!\n"); goto out_delete_evlist; } - err = perf_evlist__mmap(evlist, UINT_MAX, false); + perf_evlist__config(evlist, &trace->opts); + + signal(SIGCHLD, sig_handler); + signal(SIGINT, sig_handler); + + if (forks) { + err = perf_evlist__prepare_workload(evlist, &trace->opts.target, + argv, false, NULL); + if (err < 0) { + fprintf(trace->output, "Couldn't run the workload!\n"); + goto out_delete_evlist; + } + } + + err = perf_evlist__open(evlist); + if (err < 0) + goto out_error_open; + + err = perf_evlist__mmap(evlist, trace->opts.mmap_pages, false); if (err < 0) { - printf("Couldn't mmap the events: %s\n", strerror(errno)); + fprintf(trace->output, "Couldn't mmap the events: %s\n", strerror(errno)); goto out_delete_evlist; } perf_evlist__enable(evlist); + + if (forks) + perf_evlist__start_workload(evlist); + + trace->multiple_threads = evlist->threads->map[0] == -1 || evlist->threads->nr > 1; again: - before = nr_events; + before = trace->nr_events; for (i = 0; i < evlist->nr_mmaps; i++) { union perf_event *event; @@ -239,66 +1960,324 @@ again: tracepoint_handler handler; struct perf_sample sample; - ++nr_events; - - switch (type) { - case PERF_RECORD_SAMPLE: - break; - case PERF_RECORD_LOST: - printf("LOST %" PRIu64 " events!\n", event->lost.lost); - continue; - default: - printf("Unexpected %s event, skipping...\n", - perf_event__name(type)); - continue; - } + ++trace->nr_events; err = perf_evlist__parse_sample(evlist, event, &sample); if (err) { - printf("Can't parse sample, err = %d, skipping...\n", err); + fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err); + goto next_event; + } + + if (!trace->full_time && trace->base_time == 0) + trace->base_time = sample.time; + + if (type != PERF_RECORD_SAMPLE) { + trace__process_event(trace, trace->host, event, &sample); continue; } evsel = perf_evlist__id2evsel(evlist, sample.id); if (evsel == NULL) { - printf("Unknown tp ID %" PRIu64 ", skipping...\n", sample.id); - continue; + fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample.id); + goto next_event; } - if (evlist->threads->map[0] == -1 || evlist->threads->nr > 1) - printf("%d ", sample.tid); - if (sample.raw_data == NULL) { - printf("%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n", + fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n", perf_evsel__name(evsel), sample.tid, sample.cpu, sample.raw_size); - continue; + goto next_event; } - handler = evsel->handler.func; + handler = evsel->handler; handler(trace, evsel, &sample); +next_event: + perf_evlist__mmap_consume(evlist, i); + + if (interrupted) + goto out_disable; } } - if (nr_events == before) - poll(evlist->pollfd, evlist->nr_fds, -1); + if (trace->nr_events == before) { + int timeout = done ? 100 : -1; + + if (poll(evlist->pollfd, evlist->nr_fds, timeout) > 0) + goto again; + } else { + goto again; + } + +out_disable: + perf_evlist__disable(evlist); - goto again; + if (!err) { + if (trace->summary) + trace__fprintf_thread_summary(trace, trace->output); + + if (trace->show_tool_stats) { + fprintf(trace->output, "Stats:\n " + " vfs_getname : %" PRIu64 "\n" + " proc_getname: %" PRIu64 "\n", + trace->stats.vfs_getname, + trace->stats.proc_getname); + } + } out_delete_evlist: perf_evlist__delete(evlist); out: + trace->live = false; + return err; +{ + char errbuf[BUFSIZ]; + +out_error_tp: + perf_evlist__strerror_tp(evlist, errno, errbuf, sizeof(errbuf)); + goto out_error; + +out_error_open: + perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf)); + +out_error: + fprintf(trace->output, "%s\n", errbuf); + goto out_delete_evlist; +} +} + +static int trace__replay(struct trace *trace) +{ + const struct perf_evsel_str_handler handlers[] = { + { "probe:vfs_getname", trace__vfs_getname, }, + }; + struct perf_data_file file = { + .path = input_name, + .mode = PERF_DATA_MODE_READ, + }; + struct perf_session *session; + struct perf_evsel *evsel; + int err = -1; + + trace->tool.sample = trace__process_sample; + trace->tool.mmap = perf_event__process_mmap; + trace->tool.mmap2 = perf_event__process_mmap2; + trace->tool.comm = perf_event__process_comm; + trace->tool.exit = perf_event__process_exit; + trace->tool.fork = perf_event__process_fork; + trace->tool.attr = perf_event__process_attr; + trace->tool.tracing_data = perf_event__process_tracing_data; + trace->tool.build_id = perf_event__process_build_id; + + trace->tool.ordered_samples = true; + trace->tool.ordering_requires_timestamps = true; + + /* add tid to output */ + trace->multiple_threads = true; + + if (symbol__init() < 0) + return -1; + + session = perf_session__new(&file, false, &trace->tool); + if (session == NULL) + return -ENOMEM; + + trace->host = &session->machines.host; + + err = perf_session__set_tracepoints_handlers(session, handlers); + if (err) + goto out; + + evsel = perf_evlist__find_tracepoint_by_name(session->evlist, + "raw_syscalls:sys_enter"); + /* older kernels have syscalls tp versus raw_syscalls */ + if (evsel == NULL) + evsel = perf_evlist__find_tracepoint_by_name(session->evlist, + "syscalls:sys_enter"); + if (evsel == NULL) { + pr_err("Data file does not have raw_syscalls:sys_enter event\n"); + goto out; + } + + if (perf_evsel__init_syscall_tp(evsel, trace__sys_enter) < 0 || + perf_evsel__init_sc_tp_ptr_field(evsel, args)) { + pr_err("Error during initialize raw_syscalls:sys_enter event\n"); + goto out; + } + + evsel = perf_evlist__find_tracepoint_by_name(session->evlist, + "raw_syscalls:sys_exit"); + if (evsel == NULL) + evsel = perf_evlist__find_tracepoint_by_name(session->evlist, + "syscalls:sys_exit"); + if (evsel == NULL) { + pr_err("Data file does not have raw_syscalls:sys_exit event\n"); + goto out; + } + + if (perf_evsel__init_syscall_tp(evsel, trace__sys_exit) < 0 || + perf_evsel__init_sc_tp_uint_field(evsel, ret)) { + pr_err("Error during initialize raw_syscalls:sys_exit event\n"); + goto out; + } + + err = parse_target_str(trace); + if (err != 0) + goto out; + + setup_pager(); + + err = perf_session__process_events(session, &trace->tool); + if (err) + pr_err("Failed to process events, error %d", err); + + else if (trace->summary) + trace__fprintf_thread_summary(trace, trace->output); + +out: + perf_session__delete(session); + return err; } +static size_t trace__fprintf_threads_header(FILE *fp) +{ + size_t printed; + + printed = fprintf(fp, "\n Summary of events:\n\n"); + + return printed; +} + +static size_t thread__dump_stats(struct thread_trace *ttrace, + struct trace *trace, FILE *fp) +{ + struct stats *stats; + size_t printed = 0; + struct syscall *sc; + struct int_node *inode = intlist__first(ttrace->syscall_stats); + + if (inode == NULL) + return 0; + + printed += fprintf(fp, "\n"); + + printed += fprintf(fp, " syscall calls min avg max stddev\n"); + printed += fprintf(fp, " (msec) (msec) (msec) (%%)\n"); + printed += fprintf(fp, " --------------- -------- --------- --------- --------- ------\n"); + + /* each int_node is a syscall */ + while (inode) { + stats = inode->priv; + if (stats) { + double min = (double)(stats->min) / NSEC_PER_MSEC; + double max = (double)(stats->max) / NSEC_PER_MSEC; + double avg = avg_stats(stats); + double pct; + u64 n = (u64) stats->n; + + pct = avg ? 100.0 * stddev_stats(stats)/avg : 0.0; + avg /= NSEC_PER_MSEC; + + sc = &trace->syscalls.table[inode->i]; + printed += fprintf(fp, " %-15s", sc->name); + printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f", + n, min, avg); + printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct); + } + + inode = intlist__next(inode); + } + + printed += fprintf(fp, "\n\n"); + + return printed; +} + +/* struct used to pass data to per-thread function */ +struct summary_data { + FILE *fp; + struct trace *trace; + size_t printed; +}; + +static int trace__fprintf_one_thread(struct thread *thread, void *priv) +{ + struct summary_data *data = priv; + FILE *fp = data->fp; + size_t printed = data->printed; + struct trace *trace = data->trace; + struct thread_trace *ttrace = thread->priv; + double ratio; + + if (ttrace == NULL) + return 0; + + ratio = (double)ttrace->nr_events / trace->nr_events * 100.0; + + printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid); + printed += fprintf(fp, "%lu events, ", ttrace->nr_events); + printed += fprintf(fp, "%.1f%%", ratio); + printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms); + printed += thread__dump_stats(ttrace, trace, fp); + + data->printed += printed; + + return 0; +} + +static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp) +{ + struct summary_data data = { + .fp = fp, + .trace = trace + }; + data.printed = trace__fprintf_threads_header(fp); + + machine__for_each_thread(trace->host, trace__fprintf_one_thread, &data); + + return data.printed; +} + +static int trace__set_duration(const struct option *opt, const char *str, + int unset __maybe_unused) +{ + struct trace *trace = opt->value; + + trace->duration_filter = atof(str); + return 0; +} + +static int trace__open_output(struct trace *trace, const char *filename) +{ + struct stat st; + + if (!stat(filename, &st) && st.st_size) { + char oldname[PATH_MAX]; + + scnprintf(oldname, sizeof(oldname), "%s.old", filename); + unlink(oldname); + rename(filename, oldname); + } + + trace->output = fopen(filename, "w"); + + return trace->output == NULL ? -errno : 0; +} + int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused) { const char * const trace_usage[] = { - "perf trace [<options>]", + "perf trace [<options>] [<command>]", + "perf trace [<options>] -- <command> [<options>]", + "perf trace record [<options>] [<command>]", + "perf trace record [<options>] -- <command> [<options>]", NULL }; struct trace trace = { - .audit_machine = audit_detect_machine(), + .audit = { + .machine = audit_detect_machine(), + .open_id = audit_name_to_syscall("open", trace.audit.machine), + }, .syscalls = { . max = -1, }, @@ -309,40 +2288,110 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused) }, .user_freq = UINT_MAX, .user_interval = ULLONG_MAX, - .no_delay = true, + .no_buffering = true, .mmap_pages = 1024, }, + .output = stdout, + .show_comm = true, }; + const char *output_name = NULL; + const char *ev_qualifier_str = NULL; const struct option trace_options[] = { + OPT_BOOLEAN(0, "comm", &trace.show_comm, + "show the thread COMM next to its id"), + OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"), + OPT_STRING('e', "expr", &ev_qualifier_str, "expr", + "list of events to trace"), + OPT_STRING('o', "output", &output_name, "file", "output file name"), + OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"), OPT_STRING('p', "pid", &trace.opts.target.pid, "pid", "trace events on existing process id"), - OPT_STRING(0, "tid", &trace.opts.target.tid, "tid", + OPT_STRING('t', "tid", &trace.opts.target.tid, "tid", "trace events on existing thread id"), - OPT_BOOLEAN(0, "all-cpus", &trace.opts.target.system_wide, + OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide, "system-wide collection from all CPUs"), - OPT_STRING(0, "cpu", &trace.opts.target.cpu_list, "cpu", + OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu", "list of cpus to monitor"), OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit, "child tasks do not inherit counters"), - OPT_UINTEGER(0, "mmap-pages", &trace.opts.mmap_pages, - "number of mmap data pages"), - OPT_STRING(0, "uid", &trace.opts.target.uid_str, "user", + OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages", + "number of mmap data pages", + perf_evlist__parse_mmap_pages), + OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user", "user to profile"), + OPT_CALLBACK(0, "duration", &trace, "float", + "show only events with duration > N.M ms", + trace__set_duration), + OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"), + OPT_INCR('v', "verbose", &verbose, "be more verbose"), + OPT_BOOLEAN('T', "time", &trace.full_time, + "Show full timestamp, not time relative to first start"), + OPT_BOOLEAN('s', "summary", &trace.summary_only, + "Show only syscall summary with statistics"), + OPT_BOOLEAN('S', "with-summary", &trace.summary, + "Show all syscalls and summary with statistics"), OPT_END() }; int err; + char bf[BUFSIZ]; + + if ((argc > 1) && (strcmp(argv[1], "record") == 0)) + return trace__record(argc-2, &argv[2]); argc = parse_options(argc, argv, trace_options, trace_usage, 0); - if (argc) - usage_with_options(trace_usage, trace_options); - err = perf_target__parse_uid(&trace.opts.target); + /* summary_only implies summary option, but don't overwrite summary if set */ + if (trace.summary_only) + trace.summary = trace.summary_only; + + if (output_name != NULL) { + err = trace__open_output(&trace, output_name); + if (err < 0) { + perror("failed to create output file"); + goto out; + } + } + + if (ev_qualifier_str != NULL) { + const char *s = ev_qualifier_str; + + trace.not_ev_qualifier = *s == '!'; + if (trace.not_ev_qualifier) + ++s; + trace.ev_qualifier = strlist__new(true, s); + if (trace.ev_qualifier == NULL) { + fputs("Not enough memory to parse event qualifier", + trace.output); + err = -ENOMEM; + goto out_close; + } + } + + err = target__validate(&trace.opts.target); if (err) { - char bf[BUFSIZ]; - perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf)); - printf("%s", bf); - return err; + target__strerror(&trace.opts.target, err, bf, sizeof(bf)); + fprintf(trace.output, "%s", bf); + goto out_close; } - return trace__run(&trace); + err = target__parse_uid(&trace.opts.target); + if (err) { + target__strerror(&trace.opts.target, err, bf, sizeof(bf)); + fprintf(trace.output, "%s", bf); + goto out_close; + } + + if (!argc && target__none(&trace.opts.target)) + trace.opts.target.system_wide = true; + + if (input_name) + err = trace__replay(&trace); + else + err = trace__run(&trace, argc, argv); + +out_close: + if (output_name != NULL) + fclose(trace.output); +out: + return err; } diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h index 08143bd854c..b210d62907e 100644 --- a/tools/perf/builtin.h +++ b/tools/perf/builtin.h @@ -36,6 +36,7 @@ extern int cmd_kvm(int argc, const char **argv, const char *prefix); extern int cmd_test(int argc, const char **argv, const char *prefix); extern int cmd_trace(int argc, const char **argv, const char *prefix); extern int cmd_inject(int argc, const char **argv, const char *prefix); +extern int cmd_mem(int argc, const char **argv, const char *prefix); extern int find_scripts(char **scripts_array, char **scripts_path_array); #endif diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt index 3e86bbd8c2d..0906fc401c5 100644 --- a/tools/perf/command-list.txt +++ b/tools/perf/command-list.txt @@ -10,17 +10,18 @@ perf-buildid-list mainporcelain common perf-diff mainporcelain common perf-evlist mainporcelain common perf-inject mainporcelain common +perf-kmem mainporcelain common +perf-kvm mainporcelain common perf-list mainporcelain common -perf-sched mainporcelain common +perf-lock mainporcelain common +perf-mem mainporcelain common +perf-probe mainporcelain full perf-record mainporcelain common perf-report mainporcelain common +perf-sched mainporcelain common +perf-script mainporcelain common perf-stat mainporcelain common +perf-test mainporcelain common perf-timechart mainporcelain common perf-top mainporcelain common perf-trace mainporcelain common -perf-script mainporcelain common -perf-probe mainporcelain full -perf-kmem mainporcelain common -perf-lock mainporcelain common -perf-kvm mainporcelain common -perf-test mainporcelain common diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile new file mode 100644 index 00000000000..f30ac5e5d27 --- /dev/null +++ b/tools/perf/config/Makefile @@ -0,0 +1,738 @@ + +ifeq ($(src-perf),) +src-perf := $(srctree)/tools/perf +endif + +ifeq ($(obj-perf),) +obj-perf := $(OUTPUT) +endif + +ifneq ($(obj-perf),) +obj-perf := $(abspath $(obj-perf))/ +endif + +LIB_INCLUDE := $(srctree)/tools/lib/ +CFLAGS := $(EXTRA_CFLAGS) $(EXTRA_WARNINGS) + +include $(src-perf)/config/Makefile.arch + +NO_PERF_REGS := 1 + +# Additional ARCH settings for x86 +ifeq ($(ARCH),x86) + ifeq (${IS_X86_64}, 1) + CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT + ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S + LIBUNWIND_LIBS = -lunwind -lunwind-x86_64 + else + LIBUNWIND_LIBS = -lunwind -lunwind-x86 + endif + NO_PERF_REGS := 0 +endif + +ifeq ($(ARCH),arm) + NO_PERF_REGS := 0 + LIBUNWIND_LIBS = -lunwind -lunwind-arm +endif + +ifeq ($(ARCH),arm64) + NO_PERF_REGS := 0 + LIBUNWIND_LIBS = -lunwind -lunwind-aarch64 +endif + +# So far there's only x86 and arm libdw unwind support merged in perf. +# Disable it on all other architectures in case libdw unwind +# support is detected in system. Add supported architectures +# to the check. +ifneq ($(ARCH),$(filter $(ARCH),x86 arm)) + NO_LIBDW_DWARF_UNWIND := 1 +endif + +ifeq ($(LIBUNWIND_LIBS),) + NO_LIBUNWIND := 1 +else + # + # For linking with debug library, run like: + # + # make DEBUG=1 LIBUNWIND_DIR=/opt/libunwind/ + # + ifdef LIBUNWIND_DIR + LIBUNWIND_CFLAGS = -I$(LIBUNWIND_DIR)/include + LIBUNWIND_LDFLAGS = -L$(LIBUNWIND_DIR)/lib + endif + LIBUNWIND_LDFLAGS += $(LIBUNWIND_LIBS) + + # Set per-feature check compilation flags + FEATURE_CHECK_CFLAGS-libunwind = $(LIBUNWIND_CFLAGS) + FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS) + FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS) + FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS) +endif + +ifeq ($(NO_PERF_REGS),0) + CFLAGS += -DHAVE_PERF_REGS_SUPPORT +endif + +ifndef NO_LIBELF + # for linking with debug library, run like: + # make DEBUG=1 LIBDW_DIR=/opt/libdw/ + ifdef LIBDW_DIR + LIBDW_CFLAGS := -I$(LIBDW_DIR)/include + LIBDW_LDFLAGS := -L$(LIBDW_DIR)/lib + endif + FEATURE_CHECK_CFLAGS-libdw-dwarf-unwind := $(LIBDW_CFLAGS) + FEATURE_CHECK_LDFLAGS-libdw-dwarf-unwind := $(LIBDW_LDFLAGS) -ldw +endif + +# include ARCH specific config +-include $(src-perf)/arch/$(ARCH)/Makefile + +include $(src-perf)/config/utilities.mak + +ifeq ($(call get-executable,$(FLEX)),) + dummy := $(error Error: $(FLEX) is missing on this system, please install it) +endif + +ifeq ($(call get-executable,$(BISON)),) + dummy := $(error Error: $(BISON) is missing on this system, please install it) +endif + +# Treat warnings as errors unless directed not to +ifneq ($(WERROR),0) + CFLAGS += -Werror +endif + +ifndef DEBUG + DEBUG := 0 +endif + +ifeq ($(DEBUG),0) + CFLAGS += -O6 +endif + +ifdef PARSER_DEBUG + PARSER_DEBUG_BISON := -t + PARSER_DEBUG_FLEX := -d + CFLAGS += -DPARSER_DEBUG +endif + +CFLAGS += -fno-omit-frame-pointer +CFLAGS += -ggdb3 +CFLAGS += -funwind-tables +CFLAGS += -Wall +CFLAGS += -Wextra +CFLAGS += -std=gnu99 + +# Enforce a non-executable stack, as we may regress (again) in the future by +# adding assembler files missing the .GNU-stack linker note. +LDFLAGS += -Wl,-z,noexecstack + +EXTLIBS = -lelf -lpthread -lrt -lm -ldl + +ifneq ($(OUTPUT),) + OUTPUT_FEATURES = $(OUTPUT)config/feature-checks/ + $(shell mkdir -p $(OUTPUT_FEATURES)) +endif + +feature_check = $(eval $(feature_check_code)) +define feature_check_code + feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C config/feature-checks test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0) +endef + +feature_set = $(eval $(feature_set_code)) +define feature_set_code + feature-$(1) := 1 +endef + +# +# Build the feature check binaries in parallel, ignore errors, ignore return value and suppress output: +# + +# +# Note that this is not a complete list of all feature tests, just +# those that are typically built on a fully configured system. +# +# [ Feature tests not mentioned here have to be built explicitly in +# the rule that uses them - an example for that is the 'bionic' +# feature check. ] +# +CORE_FEATURE_TESTS = \ + backtrace \ + dwarf \ + fortify-source \ + glibc \ + gtk2 \ + gtk2-infobar \ + libaudit \ + libbfd \ + libelf \ + libelf-getphdrnum \ + libelf-mmap \ + libnuma \ + libperl \ + libpython \ + libpython-version \ + libslang \ + libunwind \ + stackprotector-all \ + timerfd \ + libdw-dwarf-unwind + +LIB_FEATURE_TESTS = \ + dwarf \ + glibc \ + gtk2 \ + libaudit \ + libbfd \ + libelf \ + libnuma \ + libperl \ + libpython \ + libslang \ + libunwind \ + libdw-dwarf-unwind + +VF_FEATURE_TESTS = \ + backtrace \ + fortify-source \ + gtk2-infobar \ + libelf-getphdrnum \ + libelf-mmap \ + libpython-version \ + stackprotector-all \ + timerfd \ + libunwind-debug-frame \ + bionic \ + liberty \ + liberty-z \ + cplus-demangle + +# Set FEATURE_CHECK_(C|LD)FLAGS-all for all CORE_FEATURE_TESTS features. +# If in the future we need per-feature checks/flags for features not +# mentioned in this list we need to refactor this ;-). +set_test_all_flags = $(eval $(set_test_all_flags_code)) +define set_test_all_flags_code + FEATURE_CHECK_CFLAGS-all += $(FEATURE_CHECK_CFLAGS-$(1)) + FEATURE_CHECK_LDFLAGS-all += $(FEATURE_CHECK_LDFLAGS-$(1)) +endef + +$(foreach feat,$(CORE_FEATURE_TESTS),$(call set_test_all_flags,$(feat))) + +# +# Special fast-path for the 'all features are available' case: +# +$(call feature_check,all,$(MSG)) + +# +# Just in case the build freshly failed, make sure we print the +# feature matrix: +# +ifeq ($(feature-all), 1) + # + # test-all.c passed - just set all the core feature flags to 1: + # + $(foreach feat,$(CORE_FEATURE_TESTS),$(call feature_set,$(feat))) +else + $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS)" LDFLAGS=$(LDFLAGS) -i -j -C config/feature-checks $(addsuffix .bin,$(CORE_FEATURE_TESTS)) >/dev/null 2>&1) + $(foreach feat,$(CORE_FEATURE_TESTS),$(call feature_check,$(feat))) +endif + +ifeq ($(feature-stackprotector-all), 1) + CFLAGS += -fstack-protector-all +endif + +ifeq ($(DEBUG),0) + ifeq ($(feature-fortify-source), 1) + CFLAGS += -D_FORTIFY_SOURCE=2 + endif +endif + +CFLAGS += -I$(src-perf)/util/include +CFLAGS += -I$(src-perf)/arch/$(ARCH)/include +CFLAGS += -I$(srctree)/tools/include/ +CFLAGS += -I$(srctree)/arch/$(ARCH)/include/uapi +CFLAGS += -I$(srctree)/arch/$(ARCH)/include +CFLAGS += -I$(srctree)/include/uapi +CFLAGS += -I$(srctree)/include + +# $(obj-perf) for generated common-cmds.h +# $(obj-perf)/util for generated bison/flex headers +ifneq ($(OUTPUT),) +CFLAGS += -I$(obj-perf)/util +CFLAGS += -I$(obj-perf) +endif + +CFLAGS += -I$(src-perf)/util +CFLAGS += -I$(src-perf) +CFLAGS += -I$(LIB_INCLUDE) + +CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE + +ifndef NO_BIONIC + $(call feature_check,bionic) + ifeq ($(feature-bionic), 1) + BIONIC := 1 + EXTLIBS := $(filter-out -lrt,$(EXTLIBS)) + EXTLIBS := $(filter-out -lpthread,$(EXTLIBS)) + endif +endif + +ifdef NO_LIBELF + NO_DWARF := 1 + NO_DEMANGLE := 1 + NO_LIBUNWIND := 1 + NO_LIBDW_DWARF_UNWIND := 1 +else + ifeq ($(feature-libelf), 0) + ifeq ($(feature-glibc), 1) + LIBC_SUPPORT := 1 + endif + ifeq ($(BIONIC),1) + LIBC_SUPPORT := 1 + endif + ifeq ($(LIBC_SUPPORT),1) + msg := $(warning No libelf found, disables 'probe' tool, please install elfutils-libelf-devel/libelf-dev); + + NO_LIBELF := 1 + NO_DWARF := 1 + NO_DEMANGLE := 1 + NO_LIBUNWIND := 1 + NO_LIBDW_DWARF_UNWIND := 1 + else + ifneq ($(filter s% -static%,$(LDFLAGS),),) + msg := $(error No static glibc found, please install glibc-static); + else + msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]); + endif + endif + else + ifndef NO_LIBDW_DWARF_UNWIND + ifneq ($(feature-libdw-dwarf-unwind),1) + NO_LIBDW_DWARF_UNWIND := 1 + msg := $(warning No libdw DWARF unwind found, Please install elfutils-devel/libdw-dev >= 0.158 and/or set LIBDW_DIR); + endif + endif + ifneq ($(feature-dwarf), 1) + msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev); + NO_DWARF := 1 + endif # Dwarf support + endif # libelf support +endif # NO_LIBELF + +ifndef NO_LIBELF + CFLAGS += -DHAVE_LIBELF_SUPPORT + + ifeq ($(feature-libelf-mmap), 1) + CFLAGS += -DHAVE_LIBELF_MMAP_SUPPORT + endif + + ifeq ($(feature-libelf-getphdrnum), 1) + CFLAGS += -DHAVE_ELF_GETPHDRNUM_SUPPORT + endif + + # include ARCH specific config + -include $(src-perf)/arch/$(ARCH)/Makefile + + ifndef NO_DWARF + ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined) + msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled); + NO_DWARF := 1 + else + CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS) + LDFLAGS += $(LIBDW_LDFLAGS) + EXTLIBS += -lelf -ldw + endif # PERF_HAVE_DWARF_REGS + endif # NO_DWARF +endif # NO_LIBELF + +ifndef NO_LIBUNWIND + ifneq ($(feature-libunwind), 1) + msg := $(warning No libunwind found. Please install libunwind-dev[el] >= 1.1 and/or set LIBUNWIND_DIR); + NO_LIBUNWIND := 1 + endif +endif + +dwarf-post-unwind := 1 +dwarf-post-unwind-text := BUG + +# setup DWARF post unwinder +ifdef NO_LIBUNWIND + ifdef NO_LIBDW_DWARF_UNWIND + msg := $(warning Disabling post unwind, no support found.); + dwarf-post-unwind := 0 + else + dwarf-post-unwind-text := libdw + endif +else + dwarf-post-unwind-text := libunwind + # Enable libunwind support by default. + ifndef NO_LIBDW_DWARF_UNWIND + NO_LIBDW_DWARF_UNWIND := 1 + endif +endif + +ifeq ($(dwarf-post-unwind),1) + CFLAGS += -DHAVE_DWARF_UNWIND_SUPPORT +else + NO_DWARF_UNWIND := 1 +endif + +ifndef NO_LIBUNWIND + ifeq ($(ARCH),$(filter $(ARCH),arm arm64)) + $(call feature_check,libunwind-debug-frame) + ifneq ($(feature-libunwind-debug-frame), 1) + msg := $(warning No debug_frame support found in libunwind); + CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME + endif + else + # non-ARM has no dwarf_find_debug_frame() function: + CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME + endif + CFLAGS += -DHAVE_LIBUNWIND_SUPPORT + EXTLIBS += $(LIBUNWIND_LIBS) + CFLAGS += $(LIBUNWIND_CFLAGS) + LDFLAGS += $(LIBUNWIND_LDFLAGS) +endif + +ifndef NO_LIBAUDIT + ifneq ($(feature-libaudit), 1) + msg := $(warning No libaudit.h found, disables 'trace' tool, please install audit-libs-devel or libaudit-dev); + NO_LIBAUDIT := 1 + else + CFLAGS += -DHAVE_LIBAUDIT_SUPPORT + EXTLIBS += -laudit + endif +endif + +ifdef NO_NEWT + NO_SLANG=1 +endif + +ifndef NO_SLANG + ifneq ($(feature-libslang), 1) + msg := $(warning slang not found, disables TUI support. Please install slang-devel or libslang-dev); + NO_SLANG := 1 + else + # Fedora has /usr/include/slang/slang.h, but ubuntu /usr/include/slang.h + CFLAGS += -I/usr/include/slang + CFLAGS += -DHAVE_SLANG_SUPPORT + EXTLIBS += -lslang + endif +endif + +ifndef NO_GTK2 + FLAGS_GTK2=$(CFLAGS) $(LDFLAGS) $(EXTLIBS) $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) + ifneq ($(feature-gtk2), 1) + msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev); + NO_GTK2 := 1 + else + ifeq ($(feature-gtk2-infobar), 1) + GTK_CFLAGS := -DHAVE_GTK_INFO_BAR_SUPPORT + endif + CFLAGS += -DHAVE_GTK2_SUPPORT + GTK_CFLAGS += $(shell $(PKG_CONFIG) --cflags gtk+-2.0 2>/dev/null) + GTK_LIBS := $(shell $(PKG_CONFIG) --libs gtk+-2.0 2>/dev/null) + EXTLIBS += -ldl + endif +endif + +grep-libs = $(filter -l%,$(1)) +strip-libs = $(filter-out -l%,$(1)) + +ifdef NO_LIBPERL + CFLAGS += -DNO_LIBPERL +else + PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null) + PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS)) + PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS)) + PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null` + FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS) + + ifneq ($(feature-libperl), 1) + CFLAGS += -DNO_LIBPERL + NO_LIBPERL := 1 + msg := $(warning Missing perl devel files. Disabling perl scripting support, consider installing perl-ExtUtils-Embed); + else + LDFLAGS += $(PERL_EMBED_LDFLAGS) + EXTLIBS += $(PERL_EMBED_LIBADD) + endif +endif + +ifeq ($(feature-timerfd), 1) + CFLAGS += -DHAVE_TIMERFD_SUPPORT +else + msg := $(warning No timerfd support. Disables 'perf kvm stat live'); +endif + +disable-python = $(eval $(disable-python_code)) +define disable-python_code + CFLAGS += -DNO_LIBPYTHON + $(if $(1),$(warning No $(1) was found)) + $(warning Python support will not be built) + NO_LIBPYTHON := 1 +endef + +override PYTHON := \ + $(call get-executable-or-default,PYTHON,python) + +ifndef PYTHON + $(call disable-python,python interpreter) +else + + PYTHON_WORD := $(call shell-wordify,$(PYTHON)) + + ifdef NO_LIBPYTHON + $(call disable-python) + else + + override PYTHON_CONFIG := \ + $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON)-config) + + ifndef PYTHON_CONFIG + $(call disable-python,python-config tool) + else + + PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG)) + + PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null) + PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) + PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) + PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null) + FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) + + ifneq ($(feature-libpython), 1) + $(call disable-python,Python.h (for Python 2.x)) + else + + ifneq ($(feature-libpython-version), 1) + $(warning Python 3 is not yet supported; please set) + $(warning PYTHON and/or PYTHON_CONFIG appropriately.) + $(warning If you also have Python 2 installed, then) + $(warning try something like:) + $(warning $(and ,)) + $(warning $(and ,) make PYTHON=python2) + $(warning $(and ,)) + $(warning Otherwise, disable Python support entirely:) + $(warning $(and ,)) + $(warning $(and ,) make NO_LIBPYTHON=1) + $(warning $(and ,)) + $(error $(and ,)) + else + LDFLAGS += $(PYTHON_EMBED_LDFLAGS) + EXTLIBS += $(PYTHON_EMBED_LIBADD) + LANG_BINDINGS += $(obj-perf)python/perf.so + endif + endif + endif + endif +endif + +ifeq ($(feature-libbfd), 1) + EXTLIBS += -lbfd + + # call all detections now so we get correct + # status in VF output + $(call feature_check,liberty) + $(call feature_check,liberty-z) + $(call feature_check,cplus-demangle) + + ifeq ($(feature-liberty), 1) + EXTLIBS += -liberty + else + ifeq ($(feature-liberty-z), 1) + EXTLIBS += -liberty -lz + endif + endif +endif + +ifdef NO_DEMANGLE + CFLAGS += -DNO_DEMANGLE +else + ifdef HAVE_CPLUS_DEMANGLE_SUPPORT + EXTLIBS += -liberty + CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT + else + ifneq ($(feature-libbfd), 1) + ifneq ($(feature-liberty), 1) + ifneq ($(feature-liberty-z), 1) + # we dont have neither HAVE_CPLUS_DEMANGLE_SUPPORT + # or any of 'bfd iberty z' trinity + ifeq ($(feature-cplus-demangle), 1) + EXTLIBS += -liberty + CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT + else + msg := $(warning No bfd.h/libbfd found, install binutils-dev[el]/zlib-static to gain symbol demangling) + CFLAGS += -DNO_DEMANGLE + endif + endif + endif + endif + endif +endif + +ifneq ($(filter -lbfd,$(EXTLIBS)),) + CFLAGS += -DHAVE_LIBBFD_SUPPORT +endif + +ifndef NO_BACKTRACE + ifeq ($(feature-backtrace), 1) + CFLAGS += -DHAVE_BACKTRACE_SUPPORT + endif +endif + +ifndef NO_LIBNUMA + ifeq ($(feature-libnuma), 0) + msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev); + NO_LIBNUMA := 1 + else + CFLAGS += -DHAVE_LIBNUMA_SUPPORT + EXTLIBS += -lnuma + endif +endif + +# Among the variables below, these: +# perfexecdir +# template_dir +# mandir +# infodir +# htmldir +# ETC_PERFCONFIG (but not sysconfdir) +# can be specified as a relative path some/where/else; +# this is interpreted as relative to $(prefix) and "perf" at +# runtime figures out where they are based on the path to the executable. +# This can help installing the suite in a relocatable way. + +# Make the path relative to DESTDIR, not to prefix +ifndef DESTDIR +prefix ?= $(HOME) +endif +bindir_relative = bin +bindir = $(prefix)/$(bindir_relative) +mandir = share/man +infodir = share/info +perfexecdir = libexec/perf-core +sharedir = $(prefix)/share +template_dir = share/perf-core/templates +htmldir = share/doc/perf-doc +ifeq ($(prefix),/usr) +sysconfdir = /etc +ETC_PERFCONFIG = $(sysconfdir)/perfconfig +else +sysconfdir = $(prefix)/etc +ETC_PERFCONFIG = etc/perfconfig +endif +ifeq ($(IS_X86_64),1) +lib = lib64 +else +lib = lib +endif +libdir = $(prefix)/$(lib) + +# Shell quote (do not use $(call) to accommodate ancient setups); +ETC_PERFCONFIG_SQ = $(subst ','\'',$(ETC_PERFCONFIG)) +DESTDIR_SQ = $(subst ','\'',$(DESTDIR)) +bindir_SQ = $(subst ','\'',$(bindir)) +mandir_SQ = $(subst ','\'',$(mandir)) +infodir_SQ = $(subst ','\'',$(infodir)) +perfexecdir_SQ = $(subst ','\'',$(perfexecdir)) +template_dir_SQ = $(subst ','\'',$(template_dir)) +htmldir_SQ = $(subst ','\'',$(htmldir)) +prefix_SQ = $(subst ','\'',$(prefix)) +sysconfdir_SQ = $(subst ','\'',$(sysconfdir)) +libdir_SQ = $(subst ','\'',$(libdir)) + +ifneq ($(filter /%,$(firstword $(perfexecdir))),) +perfexec_instdir = $(perfexecdir) +else +perfexec_instdir = $(prefix)/$(perfexecdir) +endif +perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir)) + +# If we install to $(HOME) we keep the traceevent default: +# $(HOME)/.traceevent/plugins +# Otherwise we install plugins into the global $(libdir). +ifdef DESTDIR +plugindir=$(libdir)/traceevent/plugins +plugindir_SQ= $(subst ','\'',$(plugindir)) +endif + +# +# Print the result of the feature test: +# +feature_print_status = $(eval $(feature_print_status_code)) $(info $(MSG)) + +define feature_print_status_code + ifeq ($(feature-$(1)), 1) + MSG = $(shell printf '...%30s: [ \033[32mon\033[m ]' $(1)) + else + MSG = $(shell printf '...%30s: [ \033[31mOFF\033[m ]' $(1)) + endif +endef + +feature_print_var = $(eval $(feature_print_var_code)) $(info $(MSG)) +define feature_print_var_code + MSG = $(shell printf '...%30s: %s' $(1) $($(1))) +endef + +feature_print_text = $(eval $(feature_print_text_code)) $(info $(MSG)) +define feature_print_text_code + MSG = $(shell printf '...%30s: %s' $(1) $(2)) +endef + +PERF_FEATURES := $(foreach feat,$(LIB_FEATURE_TESTS),feature-$(feat)($(feature-$(feat)))) +PERF_FEATURES_FILE := $(shell touch $(OUTPUT)PERF-FEATURES; cat $(OUTPUT)PERF-FEATURES) + +ifeq ($(dwarf-post-unwind),1) + PERF_FEATURES += dwarf-post-unwind($(dwarf-post-unwind-text)) +endif + +# The $(display_lib) controls the default detection message +# output. It's set if: +# - detected features differes from stored features from +# last build (in PERF-FEATURES file) +# - one of the $(LIB_FEATURE_TESTS) is not detected +# - VF is enabled + +ifneq ("$(PERF_FEATURES)","$(PERF_FEATURES_FILE)") + $(shell echo "$(PERF_FEATURES)" > $(OUTPUT)PERF-FEATURES) + display_lib := 1 +endif + +feature_check = $(eval $(feature_check_code)) +define feature_check_code + ifneq ($(feature-$(1)), 1) + display_lib := 1 + endif +endef + +$(foreach feat,$(LIB_FEATURE_TESTS),$(call feature_check,$(feat))) + +ifeq ($(VF),1) + display_lib := 1 + display_vf := 1 +endif + +ifeq ($(display_lib),1) + $(info ) + $(info Auto-detecting system features:) + $(foreach feat,$(LIB_FEATURE_TESTS),$(call feature_print_status,$(feat),)) + + ifeq ($(dwarf-post-unwind),1) + $(call feature_print_text,"DWARF post unwind library", $(dwarf-post-unwind-text)) + endif +endif + +ifeq ($(display_vf),1) + $(foreach feat,$(VF_FEATURE_TESTS),$(call feature_print_status,$(feat),)) + $(info ) + $(call feature_print_var,prefix) + $(call feature_print_var,bindir) + $(call feature_print_var,libdir) + $(call feature_print_var,sysconfdir) + $(call feature_print_var,LIBUNWIND_DIR) + $(call feature_print_var,LIBDW_DIR) +endif + +ifeq ($(display_lib),1) + $(info ) +endif diff --git a/tools/perf/config/Makefile.arch b/tools/perf/config/Makefile.arch new file mode 100644 index 00000000000..4b06719ee98 --- /dev/null +++ b/tools/perf/config/Makefile.arch @@ -0,0 +1,23 @@ + +uname_M := $(shell uname -m 2>/dev/null || echo not) + +ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ + -e s/arm.*/arm/ -e s/sa110/arm/ \ + -e s/s390x/s390/ -e s/parisc64/parisc/ \ + -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ + -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \ + -e s/tile.*/tile/ ) + +# Additional ARCH settings for x86 +ifeq ($(ARCH),i386) + override ARCH := x86 +endif + +ifeq ($(ARCH),x86_64) + override ARCH := x86 + IS_X86_64 := 0 + ifeq (, $(findstring m32,$(CFLAGS))) + IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -x c - | tail -n 1) + RAW_ARCH := x86_64 + endif +endif diff --git a/tools/perf/config/feature-checks/.gitignore b/tools/perf/config/feature-checks/.gitignore new file mode 100644 index 00000000000..80f3da0c351 --- /dev/null +++ b/tools/perf/config/feature-checks/.gitignore @@ -0,0 +1,2 @@ +*.d +*.bin diff --git a/tools/perf/config/feature-checks/Makefile b/tools/perf/config/feature-checks/Makefile new file mode 100644 index 00000000000..64c84e5f051 --- /dev/null +++ b/tools/perf/config/feature-checks/Makefile @@ -0,0 +1,149 @@ + +FILES= \ + test-all.bin \ + test-backtrace.bin \ + test-bionic.bin \ + test-dwarf.bin \ + test-fortify-source.bin \ + test-glibc.bin \ + test-gtk2.bin \ + test-gtk2-infobar.bin \ + test-hello.bin \ + test-libaudit.bin \ + test-libbfd.bin \ + test-liberty.bin \ + test-liberty-z.bin \ + test-cplus-demangle.bin \ + test-libelf.bin \ + test-libelf-getphdrnum.bin \ + test-libelf-mmap.bin \ + test-libnuma.bin \ + test-libperl.bin \ + test-libpython.bin \ + test-libpython-version.bin \ + test-libslang.bin \ + test-libunwind.bin \ + test-libunwind-debug-frame.bin \ + test-stackprotector-all.bin \ + test-timerfd.bin \ + test-libdw-dwarf-unwind.bin + +CC := $(CROSS_COMPILE)gcc -MD +PKG_CONFIG := $(CROSS_COMPILE)pkg-config + +all: $(FILES) + +BUILD = $(CC) $(CFLAGS) -o $(OUTPUT)$@ $(patsubst %.bin,%.c,$@) $(LDFLAGS) + +############################### + +test-all.bin: + $(BUILD) -Werror -fstack-protector-all -O2 -Werror -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -laudit -I/usr/include/slang -lslang $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl + +test-hello.bin: + $(BUILD) + +test-stackprotector-all.bin: + $(BUILD) -Werror -fstack-protector-all + +test-fortify-source.bin: + $(BUILD) -O2 -Werror -D_FORTIFY_SOURCE=2 + +test-bionic.bin: + $(BUILD) + +test-libelf.bin: + $(BUILD) -lelf + +test-glibc.bin: + $(BUILD) + +test-dwarf.bin: + $(BUILD) -ldw + +test-libelf-mmap.bin: + $(BUILD) -lelf + +test-libelf-getphdrnum.bin: + $(BUILD) -lelf + +test-libnuma.bin: + $(BUILD) -lnuma + +test-libunwind.bin: + $(BUILD) -lelf + +test-libunwind-debug-frame.bin: + $(BUILD) -lelf + +test-libaudit.bin: + $(BUILD) -laudit + +test-libslang.bin: + $(BUILD) -I/usr/include/slang -lslang + +test-gtk2.bin: + $(BUILD) $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) + +test-gtk2-infobar.bin: + $(BUILD) $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) + +grep-libs = $(filter -l%,$(1)) +strip-libs = $(filter-out -l%,$(1)) + +PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null) +PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS)) +PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS)) +PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null` +FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS) + +test-libperl.bin: + $(BUILD) $(FLAGS_PERL_EMBED) + +override PYTHON := python +override PYTHON_CONFIG := python-config + +escape-for-shell-sq = $(subst ','\'',$(1)) +shell-sq = '$(escape-for-shell-sq)' + +PYTHON_CONFIG_SQ = $(call shell-sq,$(PYTHON_CONFIG)) + +PYTHON_EMBED_LDOPTS = $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null) +PYTHON_EMBED_LDFLAGS = $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) +PYTHON_EMBED_LIBADD = $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) +PYTHON_EMBED_CCOPTS = $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null) +FLAGS_PYTHON_EMBED = $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) + +test-libpython.bin: + $(BUILD) $(FLAGS_PYTHON_EMBED) + +test-libpython-version.bin: + $(BUILD) $(FLAGS_PYTHON_EMBED) + +test-libbfd.bin: + $(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl + +test-liberty.bin: + $(CC) -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty + +test-liberty-z.bin: + $(CC) -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty -lz + +test-cplus-demangle.bin: + $(BUILD) -liberty + +test-backtrace.bin: + $(BUILD) + +test-timerfd.bin: + $(BUILD) + +test-libdw-dwarf-unwind.bin: + $(BUILD) + +-include *.d + +############################### + +clean: + rm -f $(FILES) *.d diff --git a/tools/perf/config/feature-checks/test-all.c b/tools/perf/config/feature-checks/test-all.c new file mode 100644 index 00000000000..fe5c1e5c952 --- /dev/null +++ b/tools/perf/config/feature-checks/test-all.c @@ -0,0 +1,116 @@ +/* + * test-all.c: Try to build all the main testcases at once. + * + * A well-configured system will have all the prereqs installed, so we can speed + * up auto-detection on such systems. + */ + +/* + * Quirk: Python and Perl headers cannot be in arbitrary places, so keep + * these 3 testcases at the top: + */ +#define main main_test_libpython +# include "test-libpython.c" +#undef main + +#define main main_test_libpython_version +# include "test-libpython-version.c" +#undef main + +#define main main_test_libperl +# include "test-libperl.c" +#undef main + +#define main main_test_hello +# include "test-hello.c" +#undef main + +#define main main_test_libelf +# include "test-libelf.c" +#undef main + +#define main main_test_libelf_mmap +# include "test-libelf-mmap.c" +#undef main + +#define main main_test_glibc +# include "test-glibc.c" +#undef main + +#define main main_test_dwarf +# include "test-dwarf.c" +#undef main + +#define main main_test_libelf_getphdrnum +# include "test-libelf-getphdrnum.c" +#undef main + +#define main main_test_libunwind +# include "test-libunwind.c" +#undef main + +#define main main_test_libaudit +# include "test-libaudit.c" +#undef main + +#define main main_test_libslang +# include "test-libslang.c" +#undef main + +#define main main_test_gtk2 +# include "test-gtk2.c" +#undef main + +#define main main_test_gtk2_infobar +# include "test-gtk2-infobar.c" +#undef main + +#define main main_test_libbfd +# include "test-libbfd.c" +#undef main + +#define main main_test_backtrace +# include "test-backtrace.c" +#undef main + +#define main main_test_libnuma +# include "test-libnuma.c" +#undef main + +#define main main_test_timerfd +# include "test-timerfd.c" +#undef main + +#define main main_test_stackprotector_all +# include "test-stackprotector-all.c" +#undef main + +#define main main_test_libdw_dwarf_unwind +# include "test-libdw-dwarf-unwind.c" +#undef main + +int main(int argc, char *argv[]) +{ + main_test_libpython(); + main_test_libpython_version(); + main_test_libperl(); + main_test_hello(); + main_test_libelf(); + main_test_libelf_mmap(); + main_test_glibc(); + main_test_dwarf(); + main_test_libelf_getphdrnum(); + main_test_libunwind(); + main_test_libaudit(); + main_test_libslang(); + main_test_gtk2(argc, argv); + main_test_gtk2_infobar(argc, argv); + main_test_libbfd(); + main_test_backtrace(); + main_test_libnuma(); + main_test_timerfd(); + main_test_stackprotector_all(); + main_test_libdw_dwarf_unwind(); + + return 0; +} diff --git a/tools/perf/config/feature-checks/test-backtrace.c b/tools/perf/config/feature-checks/test-backtrace.c new file mode 100644 index 00000000000..7124aa1dc8f --- /dev/null +++ b/tools/perf/config/feature-checks/test-backtrace.c @@ -0,0 +1,13 @@ +#include <execinfo.h> +#include <stdio.h> + +int main(void) +{ + void *backtrace_fns[10]; + size_t entries; + + entries = backtrace(backtrace_fns, 10); + backtrace_symbols_fd(backtrace_fns, entries, 1); + + return 0; +} diff --git a/tools/perf/config/feature-checks/test-bionic.c b/tools/perf/config/feature-checks/test-bionic.c new file mode 100644 index 00000000000..eac24e9513e --- /dev/null +++ b/tools/perf/config/feature-checks/test-bionic.c @@ -0,0 +1,6 @@ +#include <android/api-level.h> + +int main(void) +{ + return __ANDROID_API__; +} diff --git a/tools/perf/config/feature-checks/test-cplus-demangle.c b/tools/perf/config/feature-checks/test-cplus-demangle.c new file mode 100644 index 00000000000..610c686e000 --- /dev/null +++ b/tools/perf/config/feature-checks/test-cplus-demangle.c @@ -0,0 +1,14 @@ +extern int printf(const char *format, ...); +extern char *cplus_demangle(const char *, int); + +int main(void) +{ + char symbol[4096] = "FieldName__9ClassNameFd"; + char *tmp; + + tmp = cplus_demangle(symbol, 0); + + printf("demangled symbol: {%s}\n", tmp); + + return 0; +} diff --git a/tools/perf/config/feature-checks/test-dwarf.c b/tools/perf/config/feature-checks/test-dwarf.c new file mode 100644 index 00000000000..3fc1801ce4a --- /dev/null +++ b/tools/perf/config/feature-checks/test-dwarf.c @@ -0,0 +1,10 @@ +#include <dwarf.h> +#include <elfutils/libdw.h> +#include <elfutils/version.h> + +int main(void) +{ + Dwarf *dbg = dwarf_begin(0, DWARF_C_READ); + + return (long)dbg; +} diff --git a/tools/perf/config/feature-checks/test-fortify-source.c b/tools/perf/config/feature-checks/test-fortify-source.c new file mode 100644 index 00000000000..c9f398d8786 --- /dev/null +++ b/tools/perf/config/feature-checks/test-fortify-source.c @@ -0,0 +1,6 @@ +#include <stdio.h> + +int main(void) +{ + return puts("hi"); +} diff --git a/tools/perf/config/feature-checks/test-glibc.c b/tools/perf/config/feature-checks/test-glibc.c new file mode 100644 index 00000000000..b0820345cd9 --- /dev/null +++ b/tools/perf/config/feature-checks/test-glibc.c @@ -0,0 +1,8 @@ +#include <gnu/libc-version.h> + +int main(void) +{ + const char *version = gnu_get_libc_version(); + + return (long)version; +} diff --git a/tools/perf/config/feature-checks/test-gtk2-infobar.c b/tools/perf/config/feature-checks/test-gtk2-infobar.c new file mode 100644 index 00000000000..397b4646d06 --- /dev/null +++ b/tools/perf/config/feature-checks/test-gtk2-infobar.c @@ -0,0 +1,11 @@ +#pragma GCC diagnostic ignored "-Wstrict-prototypes" +#include <gtk/gtk.h> +#pragma GCC diagnostic error "-Wstrict-prototypes" + +int main(int argc, char *argv[]) +{ + gtk_init(&argc, &argv); + gtk_info_bar_new(); + + return 0; +} diff --git a/tools/perf/config/feature-checks/test-gtk2.c b/tools/perf/config/feature-checks/test-gtk2.c new file mode 100644 index 00000000000..6bd80e50943 --- /dev/null +++ b/tools/perf/config/feature-checks/test-gtk2.c @@ -0,0 +1,10 @@ +#pragma GCC diagnostic ignored "-Wstrict-prototypes" +#include <gtk/gtk.h> +#pragma GCC diagnostic error "-Wstrict-prototypes" + +int main(int argc, char *argv[]) +{ + gtk_init(&argc, &argv); + + return 0; +} diff --git a/tools/perf/config/feature-checks/test-hello.c b/tools/perf/config/feature-checks/test-hello.c new file mode 100644 index 00000000000..c9f398d8786 --- /dev/null +++ b/tools/perf/config/feature-checks/test-hello.c @@ -0,0 +1,6 @@ +#include <stdio.h> + +int main(void) +{ + return puts("hi"); +} diff --git a/tools/perf/config/feature-checks/test-libaudit.c b/tools/perf/config/feature-checks/test-libaudit.c new file mode 100644 index 00000000000..afc019f0864 --- /dev/null +++ b/tools/perf/config/feature-checks/test-libaudit.c @@ -0,0 +1,10 @@ +#include <libaudit.h> + +extern int printf(const char *format, ...); + +int main(void) +{ + printf("error message: %s\n", audit_errno_to_name(0)); + + return audit_open(); +} diff --git a/tools/perf/config/feature-checks/test-libbfd.c b/tools/perf/config/feature-checks/test-libbfd.c new file mode 100644 index 00000000000..24059907e99 --- /dev/null +++ b/tools/perf/config/feature-checks/test-libbfd.c @@ -0,0 +1,15 @@ +#include <bfd.h> + +extern int printf(const char *format, ...); + +int main(void) +{ + char symbol[4096] = "FieldName__9ClassNameFd"; + char *tmp; + + tmp = bfd_demangle(0, symbol, 0); + + printf("demangled symbol: {%s}\n", tmp); + + return 0; +} diff --git a/tools/perf/config/feature-checks/test-libdw-dwarf-unwind.c b/tools/perf/config/feature-checks/test-libdw-dwarf-unwind.c new file mode 100644 index 00000000000..f676a3ff442 --- /dev/null +++ b/tools/perf/config/feature-checks/test-libdw-dwarf-unwind.c @@ -0,0 +1,13 @@ + +#include <elfutils/libdwfl.h> + +int main(void) +{ + /* + * This function is guarded via: __nonnull_attribute__ (1, 2). + * Passing '1' as arguments value. This code is never executed, + * only compiled. + */ + dwfl_thread_getframes((void *) 1, (void *) 1, NULL); + return 0; +} diff --git a/tools/perf/config/feature-checks/test-libelf-getphdrnum.c b/tools/perf/config/feature-checks/test-libelf-getphdrnum.c new file mode 100644 index 00000000000..d710459306c --- /dev/null +++ b/tools/perf/config/feature-checks/test-libelf-getphdrnum.c @@ -0,0 +1,8 @@ +#include <libelf.h> + +int main(void) +{ + size_t dst; + + return elf_getphdrnum(0, &dst); +} diff --git a/tools/perf/config/feature-checks/test-libelf-mmap.c b/tools/perf/config/feature-checks/test-libelf-mmap.c new file mode 100644 index 00000000000..564427d7ef1 --- /dev/null +++ b/tools/perf/config/feature-checks/test-libelf-mmap.c @@ -0,0 +1,8 @@ +#include <libelf.h> + +int main(void) +{ + Elf *elf = elf_begin(0, ELF_C_READ_MMAP, 0); + + return (long)elf; +} diff --git a/tools/perf/config/feature-checks/test-libelf.c b/tools/perf/config/feature-checks/test-libelf.c new file mode 100644 index 00000000000..08db322d895 --- /dev/null +++ b/tools/perf/config/feature-checks/test-libelf.c @@ -0,0 +1,8 @@ +#include <libelf.h> + +int main(void) +{ + Elf *elf = elf_begin(0, ELF_C_READ, 0); + + return (long)elf; +} diff --git a/tools/perf/config/feature-checks/test-libnuma.c b/tools/perf/config/feature-checks/test-libnuma.c new file mode 100644 index 00000000000..4763d9cd587 --- /dev/null +++ b/tools/perf/config/feature-checks/test-libnuma.c @@ -0,0 +1,9 @@ +#include <numa.h> +#include <numaif.h> + +int main(void) +{ + numa_available(); + + return 0; +} diff --git a/tools/perf/config/feature-checks/test-libperl.c b/tools/perf/config/feature-checks/test-libperl.c new file mode 100644 index 00000000000..8871f6a0fdb --- /dev/null +++ b/tools/perf/config/feature-checks/test-libperl.c @@ -0,0 +1,9 @@ +#include <EXTERN.h> +#include <perl.h> + +int main(void) +{ + perl_alloc(); + + return 0; +} diff --git a/tools/perf/config/feature-checks/test-libpython-version.c b/tools/perf/config/feature-checks/test-libpython-version.c new file mode 100644 index 00000000000..facea122d81 --- /dev/null +++ b/tools/perf/config/feature-checks/test-libpython-version.c @@ -0,0 +1,10 @@ +#include <Python.h> + +#if PY_VERSION_HEX >= 0x03000000 + #error +#endif + +int main(void) +{ + return 0; +} diff --git a/tools/perf/config/feature-checks/test-libpython.c b/tools/perf/config/feature-checks/test-libpython.c new file mode 100644 index 00000000000..b24b28ad632 --- /dev/null +++ b/tools/perf/config/feature-checks/test-libpython.c @@ -0,0 +1,8 @@ +#include <Python.h> + +int main(void) +{ + Py_Initialize(); + + return 0; +} diff --git a/tools/perf/config/feature-checks/test-libslang.c b/tools/perf/config/feature-checks/test-libslang.c new file mode 100644 index 00000000000..22ff22ed94d --- /dev/null +++ b/tools/perf/config/feature-checks/test-libslang.c @@ -0,0 +1,6 @@ +#include <slang.h> + +int main(void) +{ + return SLsmg_init_smg(); +} diff --git a/tools/perf/config/feature-checks/test-libunwind-debug-frame.c b/tools/perf/config/feature-checks/test-libunwind-debug-frame.c new file mode 100644 index 00000000000..0ef8087a104 --- /dev/null +++ b/tools/perf/config/feature-checks/test-libunwind-debug-frame.c @@ -0,0 +1,16 @@ +#include <libunwind.h> +#include <stdlib.h> + +extern int +UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug, + unw_word_t ip, unw_word_t segbase, + const char *obj_name, unw_word_t start, + unw_word_t end); + +#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame) + +int main(void) +{ + dwarf_find_debug_frame(0, NULL, 0, 0, NULL, 0, 0); + return 0; +} diff --git a/tools/perf/config/feature-checks/test-libunwind.c b/tools/perf/config/feature-checks/test-libunwind.c new file mode 100644 index 00000000000..43b9369bcab --- /dev/null +++ b/tools/perf/config/feature-checks/test-libunwind.c @@ -0,0 +1,27 @@ +#include <libunwind.h> +#include <stdlib.h> + +extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as, + unw_word_t ip, + unw_dyn_info_t *di, + unw_proc_info_t *pi, + int need_unwind_info, void *arg); + + +#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table) + +static unw_accessors_t accessors; + +int main(void) +{ + unw_addr_space_t addr_space; + + addr_space = unw_create_addr_space(&accessors, 0); + if (addr_space) + return 0; + + unw_init_remote(NULL, addr_space, NULL); + dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL); + + return 0; +} diff --git a/tools/perf/config/feature-checks/test-stackprotector-all.c b/tools/perf/config/feature-checks/test-stackprotector-all.c new file mode 100644 index 00000000000..c9f398d8786 --- /dev/null +++ b/tools/perf/config/feature-checks/test-stackprotector-all.c @@ -0,0 +1,6 @@ +#include <stdio.h> + +int main(void) +{ + return puts("hi"); +} diff --git a/tools/perf/config/feature-checks/test-timerfd.c b/tools/perf/config/feature-checks/test-timerfd.c new file mode 100644 index 00000000000..8c5c083b4d3 --- /dev/null +++ b/tools/perf/config/feature-checks/test-timerfd.c @@ -0,0 +1,18 @@ +/* + * test for timerfd functions used by perf-kvm-stat-live + */ +#include <sys/timerfd.h> + +int main(void) +{ + struct itimerspec new_value; + + int fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK); + if (fd < 0) + return 1; + + if (timerfd_settime(fd, 0, &new_value, NULL) != 0) + return 1; + + return 0; +} diff --git a/tools/perf/config/feature-tests.mak b/tools/perf/config/feature-tests.mak deleted file mode 100644 index 4add41bb0c7..00000000000 --- a/tools/perf/config/feature-tests.mak +++ /dev/null @@ -1,206 +0,0 @@ -define SOURCE_HELLO -#include <stdio.h> -int main(void) -{ - return puts(\"hi\"); -} -endef - -ifndef NO_DWARF -define SOURCE_DWARF -#include <dwarf.h> -#include <elfutils/libdw.h> -#include <elfutils/version.h> -#ifndef _ELFUTILS_PREREQ -#error -#endif - -int main(void) -{ - Dwarf *dbg = dwarf_begin(0, DWARF_C_READ); - return (long)dbg; -} -endef -endif - -define SOURCE_LIBELF -#include <libelf.h> - -int main(void) -{ - Elf *elf = elf_begin(0, ELF_C_READ, 0); - return (long)elf; -} -endef - -define SOURCE_GLIBC -#include <gnu/libc-version.h> - -int main(void) -{ - const char *version = gnu_get_libc_version(); - return (long)version; -} -endef - -define SOURCE_ELF_MMAP -#include <libelf.h> -int main(void) -{ - Elf *elf = elf_begin(0, ELF_C_READ_MMAP, 0); - return (long)elf; -} -endef - -ifndef NO_NEWT -define SOURCE_NEWT -#include <newt.h> - -int main(void) -{ - newtInit(); - newtCls(); - return newtFinished(); -} -endef -endif - -ifndef NO_GTK2 -define SOURCE_GTK2 -#pragma GCC diagnostic ignored \"-Wstrict-prototypes\" -#include <gtk/gtk.h> -#pragma GCC diagnostic error \"-Wstrict-prototypes\" - -int main(int argc, char *argv[]) -{ - gtk_init(&argc, &argv); - - return 0; -} -endef - -define SOURCE_GTK2_INFOBAR -#pragma GCC diagnostic ignored \"-Wstrict-prototypes\" -#include <gtk/gtk.h> -#pragma GCC diagnostic error \"-Wstrict-prototypes\" - -int main(void) -{ - gtk_info_bar_new(); - - return 0; -} -endef -endif - -ifndef NO_LIBPERL -define SOURCE_PERL_EMBED -#include <EXTERN.h> -#include <perl.h> - -int main(void) -{ -perl_alloc(); -return 0; -} -endef -endif - -ifndef NO_LIBPYTHON -define SOURCE_PYTHON_VERSION -#include <Python.h> -#if PY_VERSION_HEX >= 0x03000000 - #error -#endif -int main(void){} -endef -define SOURCE_PYTHON_EMBED -#include <Python.h> -int main(void) -{ - Py_Initialize(); - return 0; -} -endef -endif - -define SOURCE_BFD -#include <bfd.h> - -int main(void) -{ - bfd_demangle(0, 0, 0); - return 0; -} -endef - -define SOURCE_CPLUS_DEMANGLE -extern char *cplus_demangle(const char *, int); - -int main(void) -{ - cplus_demangle(0, 0); - return 0; -} -endef - -define SOURCE_STRLCPY -#include <stdlib.h> -extern size_t strlcpy(char *dest, const char *src, size_t size); - -int main(void) -{ - strlcpy(NULL, NULL, 0); - return 0; -} -endef - -ifndef NO_LIBUNWIND -define SOURCE_LIBUNWIND -#include <libunwind.h> -#include <stdlib.h> - -extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as, - unw_word_t ip, - unw_dyn_info_t *di, - unw_proc_info_t *pi, - int need_unwind_info, void *arg); - - -#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table) - -int main(void) -{ - unw_addr_space_t addr_space; - addr_space = unw_create_addr_space(NULL, 0); - unw_init_remote(NULL, addr_space, NULL); - dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL); - return 0; -} -endef -endif - -ifndef NO_BACKTRACE -define SOURCE_BACKTRACE -#include <execinfo.h> -#include <stdio.h> - -int main(void) -{ - backtrace(NULL, 0); - backtrace_symbols(NULL, 0); - return 0; -} -endef -endif - -ifndef NO_LIBAUDIT -define SOURCE_LIBAUDIT -#include <libaudit.h> - -int main(void) -{ - return audit_open(); -} -endef -endif
\ No newline at end of file diff --git a/tools/perf/config/utilities.mak b/tools/perf/config/utilities.mak index 8046182a19e..4d985e0f03f 100644 --- a/tools/perf/config/utilities.mak +++ b/tools/perf/config/utilities.mak @@ -13,7 +13,7 @@ newline := $(newline) # what should replace a newline when escaping # newlines; the default is a bizarre string. # -nl-escape = $(or $(1),m822df3020w6a44id34bt574ctac44eb9f4n) +nl-escape = $(if $(1),$(1),m822df3020w6a44id34bt574ctac44eb9f4n) # escape-nl # @@ -175,14 +175,6 @@ _ge-abspath = $(if $(is-executable),$(1)) define get-executable-or-default $(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2))) endef -_ge_attempt = $(or $(get-executable),$(_gea_warn),$(call _gea_err,$(2))) +_ge_attempt = $(if $(get-executable),$(get-executable),$(_gea_warn)$(call _gea_err,$(2))) _gea_warn = $(warning The path '$(1)' is not executable.) _gea_err = $(if $(1),$(error Please set '$(1)' appropriately)) - -# try-cc -# Usage: option = $(call try-cc, source-to-build, cc-options) -try-cc = $(shell sh -c \ - 'TMP="$(OUTPUT)$(TMPOUT).$$$$"; \ - echo "$(1)" | \ - $(CC) -x c - $(2) -o "$$TMP" > /dev/null 2>&1 && echo y; \ - rm -f "$$TMP"') diff --git a/tools/perf/design.txt b/tools/perf/design.txt index 67e5d0cace8..a28dca2582a 100644 --- a/tools/perf/design.txt +++ b/tools/perf/design.txt @@ -18,7 +18,7 @@ underlying hardware counters. Performance counters are accessed via special file descriptors. There's one file descriptor per virtual counter used. -The special file descriptor is opened via the perf_event_open() +The special file descriptor is opened via the sys_perf_event_open() system call: int sys_perf_event_open(struct perf_event_attr *hw_event_uptr, @@ -82,7 +82,7 @@ machine-specific. If 'raw_type' is 0, then the 'type' field says what kind of counter this is, with the following encoding: -enum perf_event_types { +enum perf_type_id { PERF_TYPE_HARDWARE = 0, PERF_TYPE_SOFTWARE = 1, PERF_TYPE_TRACEPOINT = 2, @@ -95,7 +95,7 @@ specified by 'event_id': * Generalized performance counter event types, used by the hw_event.event_id * parameter of the sys_perf_event_open() syscall: */ -enum hw_event_ids { +enum perf_hw_id { /* * Common hardware events, generalized by the kernel: */ @@ -129,7 +129,7 @@ software events, selected by 'event_id': * physical and sw events of the kernel (and allow the profiling of them as * well): */ -enum sw_event_ids { +enum perf_sw_ids { PERF_COUNT_SW_CPU_CLOCK = 0, PERF_COUNT_SW_TASK_CLOCK = 1, PERF_COUNT_SW_PAGE_FAULTS = 2, @@ -230,7 +230,7 @@ these events are recorded in the ring-buffer (see below). The 'comm' bit allows tracking of process comm data on process creation. This too is recorded in the ring-buffer (see below). -The 'pid' parameter to the perf_event_open() system call allows the +The 'pid' parameter to the sys_perf_event_open() system call allows the counter to be specific to a task: pid == 0: if the pid parameter is zero, the counter is attached to the @@ -260,7 +260,7 @@ The 'flags' parameter is currently unused and must be zero. The 'group_fd' parameter allows counter "groups" to be set up. A counter group has one counter which is the group "leader". The leader -is created first, with group_fd = -1 in the perf_event_open call +is created first, with group_fd = -1 in the sys_perf_event_open call that creates it. The rest of the group members are created subsequently, with group_fd giving the fd of the group leader. (A single counter on its own is created with group_fd = -1 and is @@ -454,7 +454,6 @@ So to start with, in order to add HAVE_PERF_EVENTS to your Kconfig, you will need at least this: - asm/perf_event.h - a basic stub will suffice at first - support for atomic64 types (and associated helper functions) - - set_perf_event_pending() implemented If your architecture does have hardware capabilities, you can override the weak stub hw_perf_event_init() to register hardware counters. diff --git a/tools/perf/perf-completion.sh b/tools/perf/perf-completion.sh new file mode 100644 index 00000000000..33569847fdc --- /dev/null +++ b/tools/perf/perf-completion.sh @@ -0,0 +1,206 @@ +# perf bash and zsh completion + +# Taken from git.git's completion script. +__my_reassemble_comp_words_by_ref() +{ + local exclude i j first + # Which word separators to exclude? + exclude="${1//[^$COMP_WORDBREAKS]}" + cword_=$COMP_CWORD + if [ -z "$exclude" ]; then + words_=("${COMP_WORDS[@]}") + return + fi + # List of word completion separators has shrunk; + # re-assemble words to complete. + for ((i=0, j=0; i < ${#COMP_WORDS[@]}; i++, j++)); do + # Append each nonempty word consisting of just + # word separator characters to the current word. + first=t + while + [ $i -gt 0 ] && + [ -n "${COMP_WORDS[$i]}" ] && + # word consists of excluded word separators + [ "${COMP_WORDS[$i]//[^$exclude]}" = "${COMP_WORDS[$i]}" ] + do + # Attach to the previous token, + # unless the previous token is the command name. + if [ $j -ge 2 ] && [ -n "$first" ]; then + ((j--)) + fi + first= + words_[$j]=${words_[j]}${COMP_WORDS[i]} + if [ $i = $COMP_CWORD ]; then + cword_=$j + fi + if (($i < ${#COMP_WORDS[@]} - 1)); then + ((i++)) + else + # Done. + return + fi + done + words_[$j]=${words_[j]}${COMP_WORDS[i]} + if [ $i = $COMP_CWORD ]; then + cword_=$j + fi + done +} + +type _get_comp_words_by_ref &>/dev/null || +_get_comp_words_by_ref() +{ + local exclude cur_ words_ cword_ + if [ "$1" = "-n" ]; then + exclude=$2 + shift 2 + fi + __my_reassemble_comp_words_by_ref "$exclude" + cur_=${words_[cword_]} + while [ $# -gt 0 ]; do + case "$1" in + cur) + cur=$cur_ + ;; + prev) + prev=${words_[$cword_-1]} + ;; + words) + words=("${words_[@]}") + ;; + cword) + cword=$cword_ + ;; + esac + shift + done +} + +type __ltrim_colon_completions &>/dev/null || +__ltrim_colon_completions() +{ + if [[ "$1" == *:* && "$COMP_WORDBREAKS" == *:* ]]; then + # Remove colon-word prefix from COMPREPLY items + local colon_word=${1%"${1##*:}"} + local i=${#COMPREPLY[*]} + while [[ $((--i)) -ge 0 ]]; do + COMPREPLY[$i]=${COMPREPLY[$i]#"$colon_word"} + done + fi +} + +__perfcomp () +{ + COMPREPLY=( $( compgen -W "$1" -- "$2" ) ) +} + +__perfcomp_colon () +{ + __perfcomp "$1" "$2" + __ltrim_colon_completions $cur +} + +__perf_main () +{ + local cmd + + cmd=${words[0]} + COMPREPLY=() + + # List perf subcommands or long options + if [ $cword -eq 1 ]; then + if [[ $cur == --* ]]; then + __perfcomp '--help --version \ + --exec-path --html-path --paginate --no-pager \ + --perf-dir --work-tree --debugfs-dir' -- "$cur" + else + cmds=$($cmd --list-cmds) + __perfcomp "$cmds" "$cur" + fi + # List possible events for -e option + elif [[ $prev == "-e" && "${words[1]}" == @(record|stat|top) ]]; then + evts=$($cmd list --raw-dump) + __perfcomp_colon "$evts" "$cur" + # List subcommands for perf commands + elif [[ $prev == @(kvm|kmem|mem|lock|sched) ]]; then + subcmds=$($cmd $prev --list-cmds) + __perfcomp_colon "$subcmds" "$cur" + # List long option names + elif [[ $cur == --* ]]; then + subcmd=${words[1]} + opts=$($cmd $subcmd --list-opts) + __perfcomp "$opts" "$cur" + fi +} + +if [[ -n ${ZSH_VERSION-} ]]; then + autoload -U +X compinit && compinit + + __perfcomp () + { + emulate -L zsh + + local c IFS=$' \t\n' + local -a array + + for c in ${=1}; do + case $c in + --*=*|*.) ;; + *) c="$c " ;; + esac + array[${#array[@]}+1]="$c" + done + + compset -P '*[=:]' + compadd -Q -S '' -a -- array && _ret=0 + } + + __perfcomp_colon () + { + emulate -L zsh + + local cur_="${2-$cur}" + local c IFS=$' \t\n' + local -a array + + if [[ "$cur_" == *:* ]]; then + local colon_word=${cur_%"${cur_##*:}"} + fi + + for c in ${=1}; do + case $c in + --*=*|*.) ;; + *) c="$c " ;; + esac + array[$#array+1]=${c#"$colon_word"} + done + + compset -P '*[=:]' + compadd -Q -S '' -a -- array && _ret=0 + } + + _perf () + { + local _ret=1 cur cword prev + cur=${words[CURRENT]} + prev=${words[CURRENT-1]} + let cword=CURRENT-1 + emulate ksh -c __perf_main + let _ret && _default && _ret=0 + return _ret + } + + compdef _perf perf + return +fi + +type perf &>/dev/null && +_perf() +{ + local cur words cword prev + _get_comp_words_by_ref -n =: cur words cword prev + __perf_main +} && + +complete -o bashdefault -o default -o nospace -F _perf perf 2>/dev/null \ + || complete -o default -o nospace -F _perf perf diff --git a/tools/perf/perf-sys.h b/tools/perf/perf-sys.h new file mode 100644 index 00000000000..5268a1481d2 --- /dev/null +++ b/tools/perf/perf-sys.h @@ -0,0 +1,190 @@ +#ifndef _PERF_SYS_H +#define _PERF_SYS_H + +#include <unistd.h> +#include <sys/types.h> +#include <sys/syscall.h> +#include <linux/types.h> +#include <linux/perf_event.h> +#include <asm/unistd.h> + +#if defined(__i386__) +#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") +#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") +#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") +#define cpu_relax() asm volatile("rep; nop" ::: "memory"); +#define CPUINFO_PROC "model name" +#ifndef __NR_perf_event_open +# define __NR_perf_event_open 336 +#endif +#ifndef __NR_futex +# define __NR_futex 240 +#endif +#ifndef __NR_gettid +# define __NR_gettid 224 +#endif +#endif + +#if defined(__x86_64__) +#define mb() asm volatile("mfence" ::: "memory") +#define wmb() asm volatile("sfence" ::: "memory") +#define rmb() asm volatile("lfence" ::: "memory") +#define cpu_relax() asm volatile("rep; nop" ::: "memory"); +#define CPUINFO_PROC "model name" +#ifndef __NR_perf_event_open +# define __NR_perf_event_open 298 +#endif +#ifndef __NR_futex +# define __NR_futex 202 +#endif +#ifndef __NR_gettid +# define __NR_gettid 186 +#endif +#endif + +#ifdef __powerpc__ +#include "../../arch/powerpc/include/uapi/asm/unistd.h" +#define mb() asm volatile ("sync" ::: "memory") +#define wmb() asm volatile ("sync" ::: "memory") +#define rmb() asm volatile ("sync" ::: "memory") +#define CPUINFO_PROC "cpu" +#endif + +#ifdef __s390__ +#define mb() asm volatile("bcr 15,0" ::: "memory") +#define wmb() asm volatile("bcr 15,0" ::: "memory") +#define rmb() asm volatile("bcr 15,0" ::: "memory") +#endif + +#ifdef __sh__ +#if defined(__SH4A__) || defined(__SH5__) +# define mb() asm volatile("synco" ::: "memory") +# define wmb() asm volatile("synco" ::: "memory") +# define rmb() asm volatile("synco" ::: "memory") +#else +# define mb() asm volatile("" ::: "memory") +# define wmb() asm volatile("" ::: "memory") +# define rmb() asm volatile("" ::: "memory") +#endif +#define CPUINFO_PROC "cpu type" +#endif + +#ifdef __hppa__ +#define mb() asm volatile("" ::: "memory") +#define wmb() asm volatile("" ::: "memory") +#define rmb() asm volatile("" ::: "memory") +#define CPUINFO_PROC "cpu" +#endif + +#ifdef __sparc__ +#ifdef __LP64__ +#define mb() asm volatile("ba,pt %%xcc, 1f\n" \ + "membar #StoreLoad\n" \ + "1:\n":::"memory") +#else +#define mb() asm volatile("":::"memory") +#endif +#define wmb() asm volatile("":::"memory") +#define rmb() asm volatile("":::"memory") +#define CPUINFO_PROC "cpu" +#endif + +#ifdef __alpha__ +#define mb() asm volatile("mb" ::: "memory") +#define wmb() asm volatile("wmb" ::: "memory") +#define rmb() asm volatile("mb" ::: "memory") +#define CPUINFO_PROC "cpu model" +#endif + +#ifdef __ia64__ +#define mb() asm volatile ("mf" ::: "memory") +#define wmb() asm volatile ("mf" ::: "memory") +#define rmb() asm volatile ("mf" ::: "memory") +#define cpu_relax() asm volatile ("hint @pause" ::: "memory") +#define CPUINFO_PROC "model name" +#endif + +#ifdef __arm__ +/* + * Use the __kuser_memory_barrier helper in the CPU helper page. See + * arch/arm/kernel/entry-armv.S in the kernel source for details. + */ +#define mb() ((void(*)(void))0xffff0fa0)() +#define wmb() ((void(*)(void))0xffff0fa0)() +#define rmb() ((void(*)(void))0xffff0fa0)() +#define CPUINFO_PROC "Processor" +#endif + +#ifdef __aarch64__ +#define mb() asm volatile("dmb ish" ::: "memory") +#define wmb() asm volatile("dmb ishst" ::: "memory") +#define rmb() asm volatile("dmb ishld" ::: "memory") +#define cpu_relax() asm volatile("yield" ::: "memory") +#endif + +#ifdef __mips__ +#define mb() asm volatile( \ + ".set mips2\n\t" \ + "sync\n\t" \ + ".set mips0" \ + : /* no output */ \ + : /* no input */ \ + : "memory") +#define wmb() mb() +#define rmb() mb() +#define CPUINFO_PROC "cpu model" +#endif + +#ifdef __arc__ +#define mb() asm volatile("" ::: "memory") +#define wmb() asm volatile("" ::: "memory") +#define rmb() asm volatile("" ::: "memory") +#define CPUINFO_PROC "Processor" +#endif + +#ifdef __metag__ +#define mb() asm volatile("" ::: "memory") +#define wmb() asm volatile("" ::: "memory") +#define rmb() asm volatile("" ::: "memory") +#define CPUINFO_PROC "CPU" +#endif + +#ifdef __xtensa__ +#define mb() asm volatile("memw" ::: "memory") +#define wmb() asm volatile("memw" ::: "memory") +#define rmb() asm volatile("" ::: "memory") +#define CPUINFO_PROC "core ID" +#endif + +#ifdef __tile__ +#define mb() asm volatile ("mf" ::: "memory") +#define wmb() asm volatile ("mf" ::: "memory") +#define rmb() asm volatile ("mf" ::: "memory") +#define cpu_relax() asm volatile ("mfspr zero, PASS" ::: "memory") +#define CPUINFO_PROC "model name" +#endif + +#define barrier() asm volatile ("" ::: "memory") + +#ifndef cpu_relax +#define cpu_relax() barrier() +#endif + +static inline int +sys_perf_event_open(struct perf_event_attr *attr, + pid_t pid, int cpu, int group_fd, + unsigned long flags) +{ + int fd; + + fd = syscall(__NR_perf_event_open, attr, pid, cpu, + group_fd, flags); + +#ifdef HAVE_ATTR_TEST + if (unlikely(test_attr__enabled)) + test_attr__open(attr, pid, cpu, fd, group_fd, flags); +#endif + return fd; +} + +#endif /* _PERF_SYS_H */ diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 6d50eb0b425..95c58fc1528 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -13,7 +13,7 @@ #include "util/quote.h" #include "util/run-command.h" #include "util/parse-events.h" -#include "util/debugfs.h" +#include <api/fs/debugfs.h> #include <pthread.h> const char perf_usage_string[] = @@ -24,6 +24,7 @@ const char perf_more_info_string[] = int use_browser = -1; static int use_pager = -1; +const char *input_name; struct cmd_struct { const char *cmd; @@ -48,17 +49,18 @@ static struct cmd_struct commands[] = { { "version", cmd_version, 0 }, { "script", cmd_script, 0 }, { "sched", cmd_sched, 0 }, -#ifdef LIBELF_SUPPORT +#ifdef HAVE_LIBELF_SUPPORT { "probe", cmd_probe, 0 }, #endif { "kmem", cmd_kmem, 0 }, { "lock", cmd_lock, 0 }, { "kvm", cmd_kvm, 0 }, { "test", cmd_test, 0 }, -#ifdef LIBAUDIT_SUPPORT +#ifdef HAVE_LIBAUDIT_SUPPORT { "trace", cmd_trace, 0 }, #endif { "inject", cmd_inject, 0 }, + { "mem", cmd_mem, 0 }, }; struct pager_config { @@ -84,21 +86,26 @@ int check_pager_config(const char *cmd) return c.val; } -static int tui_command_config(const char *var, const char *value, void *data) +static int browser_command_config(const char *var, const char *value, void *data) { struct pager_config *c = data; if (!prefixcmp(var, "tui.") && !strcmp(var + 4, c->cmd)) c->val = perf_config_bool(var, value); + if (!prefixcmp(var, "gtk.") && !strcmp(var + 4, c->cmd)) + c->val = perf_config_bool(var, value) ? 2 : 0; return 0; } -/* returns 0 for "no tui", 1 for "use tui", and -1 for "not specified" */ -static int check_tui_config(const char *cmd) +/* + * returns 0 for "no tui", 1 for "use tui", 2 for "use gtk", + * and -1 for "not specified" + */ +static int check_browser_config(const char *cmd) { struct pager_config c; c.cmd = cmd; c.val = -1; - perf_config(tui_command_config, &c); + perf_config(browser_command_config, &c); return c.val; } @@ -187,13 +194,13 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) fprintf(stderr, "No directory given for --debugfs-dir.\n"); usage(perf_usage_string); } - debugfs_set_path((*argv)[1]); + perf_debugfs_set_path((*argv)[1]); if (envchanged) *envchanged = 1; (*argv)++; (*argc)--; } else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) { - debugfs_set_path(cmd + strlen(CMD_DEBUGFS_DIR)); + perf_debugfs_set_path(cmd + strlen(CMD_DEBUGFS_DIR)); fprintf(stderr, "dir: %s\n", debugfs_mountpoint); if (envchanged) *envchanged = 1; @@ -301,7 +308,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv) prefix = NULL; /* setup_perf_directory(); */ if (use_browser == -1) - use_browser = check_tui_config(p->cmd); + use_browser = check_browser_config(p->cmd); if (use_pager == -1 && p->option & RUN_SETUP) use_pager = check_pager_config(p->cmd); @@ -322,14 +329,23 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv) if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) return 0; + status = 1; /* Check for ENOSPC and EIO errors.. */ - if (fflush(stdout)) - die("write failure on standard output: %s", strerror(errno)); - if (ferror(stdout)) - die("unknown write failure on standard output"); - if (fclose(stdout)) - die("close failed on standard output: %s", strerror(errno)); - return 0; + if (fflush(stdout)) { + fprintf(stderr, "write failure on standard output: %s", strerror(errno)); + goto out; + } + if (ferror(stdout)) { + fprintf(stderr, "unknown write failure on standard output"); + goto out; + } + if (fclose(stdout)) { + fprintf(stderr, "close failed on standard output: %s", strerror(errno)); + goto out; + } + status = 0; +out: + return status; } static void handle_internal_command(int argc, const char **argv) @@ -440,11 +456,15 @@ int main(int argc, const char **argv) { const char *cmd; + /* The page_size is placed in util object. */ + page_size = sysconf(_SC_PAGE_SIZE); + cacheline_size = sysconf(_SC_LEVEL1_DCACHE_LINESIZE); + cmd = perf_extract_argv0_path(argv[0]); if (!cmd) cmd = "perf-help"; /* get debugfs mount point from /proc/mounts */ - debugfs_mount(NULL); + perf_debugfs_mount(NULL); /* * "perf-xxxx" is the same as "perf xxxx", but we obviously: * @@ -459,9 +479,21 @@ int main(int argc, const char **argv) cmd += 5; argv[0] = cmd; handle_internal_command(argc, argv); - die("cannot handle %s internally", cmd); + fprintf(stderr, "cannot handle %s internally", cmd); + goto out; + } + if (!prefixcmp(cmd, "trace")) { +#ifdef HAVE_LIBAUDIT_SUPPORT + set_buildid_dir(); + setup_path(); + argv[0] = "trace"; + return cmd_trace(argc, argv, NULL); +#else + fprintf(stderr, + "trace command not available: missing audit-libs devel package at build time.\n"); + goto out; +#endif } - /* Look for flags.. */ argv++; argc--; @@ -477,10 +509,12 @@ int main(int argc, const char **argv) printf("\n usage: %s\n\n", perf_usage_string); list_common_cmds_help(); printf("\n %s\n\n", perf_more_info_string); - exit(1); + goto out; } cmd = argv[0]; + test_attr__init(); + /* * We use PATH to find perf commands, but we prepend some higher * precedence paths: the "--exec-path" option, the PERF_EXEC_PATH @@ -497,9 +531,8 @@ int main(int argc, const char **argv) while (1) { static int done_help; - static int was_alias; + int was_alias = run_argv(&argc, &argv); - was_alias = run_argv(&argc, &argv); if (errno != ENOENT) break; @@ -507,7 +540,7 @@ int main(int argc, const char **argv) fprintf(stderr, "Expansion of alias '%s' failed; " "'%s' is not a perf-command\n", cmd, argv[0]); - exit(1); + goto out; } if (!done_help) { cmd = argv[0] = help_unknown_cmd(cmd); @@ -518,6 +551,6 @@ int main(int argc, const char **argv) fprintf(stderr, "Failed to run command '%s': %s\n", cmd, strerror(errno)); - +out: return 1; } diff --git a/tools/perf/perf.h b/tools/perf/perf.h index c50985eaec4..510c65f7285 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -1,157 +1,25 @@ #ifndef _PERF_PERF_H #define _PERF_PERF_H -struct winsize; - -void get_term_dimensions(struct winsize *ws); - -#if defined(__i386__) -#include "../../arch/x86/include/asm/unistd.h" -#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") -#define cpu_relax() asm volatile("rep; nop" ::: "memory"); -#define CPUINFO_PROC "model name" -#ifndef __NR_perf_event_open -# define __NR_perf_event_open 336 -#endif -#endif - -#if defined(__x86_64__) -#include "../../arch/x86/include/asm/unistd.h" -#define rmb() asm volatile("lfence" ::: "memory") -#define cpu_relax() asm volatile("rep; nop" ::: "memory"); -#define CPUINFO_PROC "model name" -#ifndef __NR_perf_event_open -# define __NR_perf_event_open 298 -#endif -#endif - -#ifdef __powerpc__ -#include "../../arch/powerpc/include/asm/unistd.h" -#define rmb() asm volatile ("sync" ::: "memory") -#define cpu_relax() asm volatile ("" ::: "memory"); -#define CPUINFO_PROC "cpu" -#endif - -#ifdef __s390__ -#include "../../arch/s390/include/asm/unistd.h" -#define rmb() asm volatile("bcr 15,0" ::: "memory") -#define cpu_relax() asm volatile("" ::: "memory"); -#endif - -#ifdef __sh__ -#include "../../arch/sh/include/asm/unistd.h" -#if defined(__SH4A__) || defined(__SH5__) -# define rmb() asm volatile("synco" ::: "memory") -#else -# define rmb() asm volatile("" ::: "memory") -#endif -#define cpu_relax() asm volatile("" ::: "memory") -#define CPUINFO_PROC "cpu type" -#endif - -#ifdef __hppa__ -#include "../../arch/parisc/include/asm/unistd.h" -#define rmb() asm volatile("" ::: "memory") -#define cpu_relax() asm volatile("" ::: "memory"); -#define CPUINFO_PROC "cpu" -#endif - -#ifdef __sparc__ -#include "../../arch/sparc/include/uapi/asm/unistd.h" -#define rmb() asm volatile("":::"memory") -#define cpu_relax() asm volatile("":::"memory") -#define CPUINFO_PROC "cpu" -#endif - -#ifdef __alpha__ -#include "../../arch/alpha/include/asm/unistd.h" -#define rmb() asm volatile("mb" ::: "memory") -#define cpu_relax() asm volatile("" ::: "memory") -#define CPUINFO_PROC "cpu model" -#endif - -#ifdef __ia64__ -#include "../../arch/ia64/include/asm/unistd.h" -#define rmb() asm volatile ("mf" ::: "memory") -#define cpu_relax() asm volatile ("hint @pause" ::: "memory") -#define CPUINFO_PROC "model name" -#endif - -#ifdef __arm__ -#include "../../arch/arm/include/asm/unistd.h" -/* - * Use the __kuser_memory_barrier helper in the CPU helper page. See - * arch/arm/kernel/entry-armv.S in the kernel source for details. - */ -#define rmb() ((void(*)(void))0xffff0fa0)() -#define cpu_relax() asm volatile("":::"memory") -#define CPUINFO_PROC "Processor" -#endif - -#ifdef __aarch64__ -#include "../../arch/arm64/include/asm/unistd.h" -#define rmb() asm volatile("dmb ld" ::: "memory") -#define cpu_relax() asm volatile("yield" ::: "memory") -#endif - -#ifdef __mips__ -#include "../../arch/mips/include/asm/unistd.h" -#define rmb() asm volatile( \ - ".set mips2\n\t" \ - "sync\n\t" \ - ".set mips0" \ - : /* no output */ \ - : /* no input */ \ - : "memory") -#define cpu_relax() asm volatile("" ::: "memory") -#define CPUINFO_PROC "cpu model" -#endif - #include <time.h> -#include <unistd.h> -#include <sys/types.h> -#include <sys/syscall.h> - -#include "../../include/uapi/linux/perf_event.h" -#include "util/types.h" #include <stdbool.h> +#include <linux/types.h> +#include <linux/perf_event.h> -struct perf_mmap { - void *base; - int mask; - unsigned int prev; -}; - -static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm) -{ - struct perf_event_mmap_page *pc = mm->base; - int head = pc->data_head; - rmb(); - return head; -} - -static inline void perf_mmap__write_tail(struct perf_mmap *md, - unsigned long tail) -{ - struct perf_event_mmap_page *pc = md->base; - - /* - * ensure all reads are done before we write the tail out. - */ - /* mb(); */ - pc->data_tail = tail; -} +extern bool test_attr__enabled; +void test_attr__init(void); +void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu, + int fd, int group_fd, unsigned long flags); -/* - * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all - * counters in the current task. - */ -#define PR_TASK_PERF_EVENTS_DISABLE 31 -#define PR_TASK_PERF_EVENTS_ENABLE 32 +#define HAVE_ATTR_TEST +#include "perf-sys.h" #ifndef NSEC_PER_SEC # define NSEC_PER_SEC 1000000000ULL #endif +#ifndef NSEC_PER_USEC +# define NSEC_PER_USEC 1000ULL +#endif static inline unsigned long long rdclock(void) { @@ -161,53 +29,9 @@ static inline unsigned long long rdclock(void) return ts.tv_sec * 1000000000ULL + ts.tv_nsec; } -/* - * Pick up some kernel type conventions: - */ -#define __user -#define asmlinkage - -#define unlikely(x) __builtin_expect(!!(x), 0) -#define min(x, y) ({ \ - typeof(x) _min1 = (x); \ - typeof(y) _min2 = (y); \ - (void) (&_min1 == &_min2); \ - _min1 < _min2 ? _min1 : _min2; }) - -static inline int -sys_perf_event_open(struct perf_event_attr *attr, - pid_t pid, int cpu, int group_fd, - unsigned long flags) -{ - return syscall(__NR_perf_event_open, attr, pid, cpu, - group_fd, flags); -} - -#define MAX_COUNTERS 256 #define MAX_NR_CPUS 256 -struct ip_callchain { - u64 nr; - u64 ips[0]; -}; - -struct branch_flags { - u64 mispred:1; - u64 predicted:1; - u64 reserved:62; -}; - -struct branch_entry { - u64 from; - u64 to; - struct branch_flags flags; -}; - -struct branch_stack { - u64 nr; - struct branch_entry entries[0]; -}; - +extern const char *input_name; extern bool perf_host, perf_guest; extern const char perf_version_string[]; @@ -215,26 +39,20 @@ void pthread__unblock_sigwinch(void); #include "util/target.h" -enum perf_call_graph_mode { - CALLCHAIN_NONE, - CALLCHAIN_FP, - CALLCHAIN_DWARF -}; - -struct perf_record_opts { - struct perf_target target; +struct record_opts { + struct target target; int call_graph; + bool call_graph_enabled; bool group; bool inherit_stat; - bool no_delay; + bool no_buffering; bool no_inherit; + bool no_inherit_set; bool no_samples; - bool pipe_output; bool raw_samples; bool sample_address; + bool sample_weight; bool sample_time; - bool sample_id_all_missing; - bool exclude_guest_missing; bool period; unsigned int freq; unsigned int mmap_pages; @@ -243,6 +61,8 @@ struct perf_record_opts { u64 default_interval; u64 user_interval; u16 stack_dump_size; + bool sample_transaction; + unsigned initial_delay; }; #endif diff --git a/tools/perf/python/twatch.py b/tools/perf/python/twatch.py index b11cca58423..2225162ee1f 100755 --- a/tools/perf/python/twatch.py +++ b/tools/perf/python/twatch.py @@ -21,7 +21,7 @@ def main(): evsel = perf.evsel(task = 1, comm = 1, mmap = 0, wakeup_events = 1, watermark = 1, sample_id_all = 1, - sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID) + sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU) evsel.open(cpus = cpus, threads = threads); evlist = perf.evlist(cpus, threads) evlist.add(evsel) diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs b/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs index c1e2ed1ed34..8c7ea42444d 100644 --- a/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs +++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs @@ -23,7 +23,7 @@ #include "perl.h" #include "XSUB.h" #include "../../../perf.h" -#include "../../../util/script-event.h" +#include "../../../util/trace-event.h" MODULE = Perf::Trace::Context PACKAGE = Perf::Trace::Context PROTOTYPES: ENABLE diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-record b/tools/perf/scripts/perl/bin/workqueue-stats-record deleted file mode 100644 index 8edda9078d5..00000000000 --- a/tools/perf/scripts/perl/bin/workqueue-stats-record +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -perf record -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion $@ diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-report b/tools/perf/scripts/perl/bin/workqueue-stats-report deleted file mode 100644 index 6d91411d248..00000000000 --- a/tools/perf/scripts/perl/bin/workqueue-stats-report +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -# description: workqueue stats (ins/exe/create/destroy) -perf script $@ -s "$PERF_EXEC_PATH"/scripts/perl/workqueue-stats.pl diff --git a/tools/perf/scripts/perl/rwtop.pl b/tools/perf/scripts/perl/rwtop.pl index 4bb3ecd3347..8b20787021c 100644 --- a/tools/perf/scripts/perl/rwtop.pl +++ b/tools/perf/scripts/perl/rwtop.pl @@ -17,6 +17,7 @@ use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib"; use lib "./Perf-Trace-Util/lib"; use Perf::Trace::Core; use Perf::Trace::Util; +use POSIX qw/SIGALRM SA_RESTART/; my $default_interval = 3; my $nlines = 20; @@ -90,7 +91,10 @@ sub syscalls::sys_enter_write sub trace_begin { - $SIG{ALRM} = \&set_print_pending; + my $sa = POSIX::SigAction->new(\&set_print_pending); + $sa->flags(SA_RESTART); + $sa->safe(1); + POSIX::sigaction(SIGALRM, $sa) or die "Can't set SIGALRM handler: $!\n"; alarm 1; } diff --git a/tools/perf/scripts/perl/workqueue-stats.pl b/tools/perf/scripts/perl/workqueue-stats.pl deleted file mode 100644 index a8eaff5119e..00000000000 --- a/tools/perf/scripts/perl/workqueue-stats.pl +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/perl -w -# (c) 2009, Tom Zanussi <tzanussi@gmail.com> -# Licensed under the terms of the GNU GPL License version 2 - -# Displays workqueue stats -# -# Usage: -# -# perf record -c 1 -f -a -R -e workqueue:workqueue_creation -e -# workqueue:workqueue_destruction -e workqueue:workqueue_execution -# -e workqueue:workqueue_insertion -# -# perf script -p -s tools/perf/scripts/perl/workqueue-stats.pl - -use 5.010000; -use strict; -use warnings; - -use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib"; -use lib "./Perf-Trace-Util/lib"; -use Perf::Trace::Core; -use Perf::Trace::Util; - -my @cpus; - -sub workqueue::workqueue_destruction -{ - my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, - $common_pid, $common_comm, - $thread_comm, $thread_pid) = @_; - - $cpus[$common_cpu]{$thread_pid}{destroyed}++; - $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; -} - -sub workqueue::workqueue_creation -{ - my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, - $common_pid, $common_comm, - $thread_comm, $thread_pid, $cpu) = @_; - - $cpus[$common_cpu]{$thread_pid}{created}++; - $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; -} - -sub workqueue::workqueue_execution -{ - my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, - $common_pid, $common_comm, - $thread_comm, $thread_pid, $func) = @_; - - $cpus[$common_cpu]{$thread_pid}{executed}++; - $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; -} - -sub workqueue::workqueue_insertion -{ - my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, - $common_pid, $common_comm, - $thread_comm, $thread_pid, $func) = @_; - - $cpus[$common_cpu]{$thread_pid}{inserted}++; - $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; -} - -sub trace_end -{ - print "workqueue work stats:\n\n"; - my $cpu = 0; - printf("%3s %6s %6s\t%-20s\n", "cpu", "ins", "exec", "name"); - printf("%3s %6s %6s\t%-20s\n", "---", "---", "----", "----"); - foreach my $pidhash (@cpus) { - while ((my $pid, my $wqhash) = each %$pidhash) { - my $ins = $$wqhash{'inserted'} || 0; - my $exe = $$wqhash{'executed'} || 0; - my $comm = $$wqhash{'comm'} || ""; - if ($ins || $exe) { - printf("%3u %6u %6u\t%-20s\n", $cpu, $ins, $exe, $comm); - } - } - $cpu++; - } - - $cpu = 0; - print "\nworkqueue lifecycle stats:\n\n"; - printf("%3s %6s %6s\t%-20s\n", "cpu", "created", "destroyed", "name"); - printf("%3s %6s %6s\t%-20s\n", "---", "-------", "---------", "----"); - foreach my $pidhash (@cpus) { - while ((my $pid, my $wqhash) = each %$pidhash) { - my $created = $$wqhash{'created'} || 0; - my $destroyed = $$wqhash{'destroyed'} || 0; - my $comm = $$wqhash{'comm'} || ""; - if ($created || $destroyed) { - printf("%3u %6u %6u\t%-20s\n", $cpu, $created, $destroyed, - $comm); - } - } - $cpu++; - } - - print_unhandled(); -} - -my %unhandled; - -sub print_unhandled -{ - if ((scalar keys %unhandled) == 0) { - return; - } - - print "\nunhandled events:\n\n"; - - printf("%-40s %10s\n", "event", "count"); - printf("%-40s %10s\n", "----------------------------------------", - "-----------"); - - foreach my $event_name (keys %unhandled) { - printf("%-40s %10d\n", $event_name, $unhandled{$event_name}); - } -} - -sub trace_unhandled -{ - my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, - $common_pid, $common_comm) = @_; - - $unhandled{$event_name}++; -} diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Context.c b/tools/perf/scripts/python/Perf-Trace-Util/Context.c index 315067b8f55..fcd1dd66790 100644 --- a/tools/perf/scripts/python/Perf-Trace-Util/Context.c +++ b/tools/perf/scripts/python/Perf-Trace-Util/Context.c @@ -25,7 +25,7 @@ PyMODINIT_FUNC initperf_trace_context(void); -static PyObject *perf_trace_context_common_pc(PyObject *self, PyObject *args) +static PyObject *perf_trace_context_common_pc(PyObject *obj, PyObject *args) { static struct scripting_context *scripting_context; PyObject *context; @@ -40,7 +40,7 @@ static PyObject *perf_trace_context_common_pc(PyObject *self, PyObject *args) return Py_BuildValue("i", retval); } -static PyObject *perf_trace_context_common_flags(PyObject *self, +static PyObject *perf_trace_context_common_flags(PyObject *obj, PyObject *args) { static struct scripting_context *scripting_context; @@ -56,7 +56,7 @@ static PyObject *perf_trace_context_common_flags(PyObject *self, return Py_BuildValue("i", retval); } -static PyObject *perf_trace_context_common_lock_depth(PyObject *self, +static PyObject *perf_trace_context_common_lock_depth(PyObject *obj, PyObject *args) { static struct scripting_context *scripting_context; diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py index a4ffc950002..b5740599aab 100755 --- a/tools/perf/scripts/python/net_dropmonitor.py +++ b/tools/perf/scripts/python/net_dropmonitor.py @@ -15,35 +15,38 @@ kallsyms = [] def get_kallsyms_table(): global kallsyms + try: f = open("/proc/kallsyms", "r") - linecount = 0 - for line in f: - linecount = linecount+1 - f.seek(0) except: return - - j = 0 for line in f: loc = int(line.split()[0], 16) name = line.split()[2] - j = j +1 - if ((j % 100) == 0): - print "\r" + str(j) + "/" + str(linecount), - kallsyms.append({ 'loc': loc, 'name' : name}) - - print "\r" + str(j) + "/" + str(linecount) + kallsyms.append((loc, name)) kallsyms.sort() - return def get_sym(sloc): loc = int(sloc) - for i in kallsyms: - if (i['loc'] >= loc): - return (i['name'], i['loc']-loc) - return (None, 0) + + # Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start + # kallsyms[i][0] > loc for all end <= i < len(kallsyms) + start, end = -1, len(kallsyms) + while end != start + 1: + pivot = (start + end) // 2 + if loc < kallsyms[pivot][0]: + end = pivot + else: + start = pivot + + # Now (start == -1 or kallsyms[start][0] <= loc) + # and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0]) + if start >= 0: + symloc, name = kallsyms[start] + return (name, loc - symloc) + else: + return (None, 0) def print_drop_table(): print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT") @@ -64,7 +67,7 @@ def trace_end(): # called from perf, when it finds a correspoinding event def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, - skbaddr, protocol, location): + skbaddr, location, protocol): slocation = str(location) try: drop_log[slocation] = drop_log[slocation] + 1 diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c new file mode 100644 index 00000000000..2dfc9ad0e6f --- /dev/null +++ b/tools/perf/tests/attr.c @@ -0,0 +1,176 @@ +/* + * The struct perf_event_attr test support. + * + * This test is embedded inside into perf directly and is governed + * by the PERF_TEST_ATTR environment variable and hook inside + * sys_perf_event_open function. + * + * The general idea is to store 'struct perf_event_attr' details for + * each event created within single perf command. Each event details + * are stored into separate text file. Once perf command is finished + * these files can be checked for values we expect for command. + * + * Besides 'struct perf_event_attr' values we also store 'fd' and + * 'group_fd' values to allow checking for groups created. + * + * This all is triggered by setting PERF_TEST_ATTR environment variable. + * It must contain name of existing directory with access and write + * permissions. All the event text files are stored there. + */ + +#include <stdlib.h> +#include <stdio.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include "../perf.h" +#include "util.h" +#include "exec_cmd.h" +#include "tests.h" + +#define ENV "PERF_TEST_ATTR" + +extern int verbose; + +static char *dir; + +void test_attr__init(void) +{ + dir = getenv(ENV); + test_attr__enabled = (dir != NULL); +} + +#define BUFSIZE 1024 + +#define __WRITE_ASS(str, fmt, data) \ +do { \ + char buf[BUFSIZE]; \ + size_t size; \ + \ + size = snprintf(buf, BUFSIZE, #str "=%"fmt "\n", data); \ + if (1 != fwrite(buf, size, 1, file)) { \ + perror("test attr - failed to write event file"); \ + fclose(file); \ + return -1; \ + } \ + \ +} while (0) + +#define WRITE_ASS(field, fmt) __WRITE_ASS(field, fmt, attr->field) + +static int store_event(struct perf_event_attr *attr, pid_t pid, int cpu, + int fd, int group_fd, unsigned long flags) +{ + FILE *file; + char path[PATH_MAX]; + + snprintf(path, PATH_MAX, "%s/event-%d-%llu-%d", dir, + attr->type, attr->config, fd); + + file = fopen(path, "w+"); + if (!file) { + perror("test attr - failed to open event file"); + return -1; + } + + if (fprintf(file, "[event-%d-%llu-%d]\n", + attr->type, attr->config, fd) < 0) { + perror("test attr - failed to write event file"); + fclose(file); + return -1; + } + + /* syscall arguments */ + __WRITE_ASS(fd, "d", fd); + __WRITE_ASS(group_fd, "d", group_fd); + __WRITE_ASS(cpu, "d", cpu); + __WRITE_ASS(pid, "d", pid); + __WRITE_ASS(flags, "lu", flags); + + /* struct perf_event_attr */ + WRITE_ASS(type, PRIu32); + WRITE_ASS(size, PRIu32); + WRITE_ASS(config, "llu"); + WRITE_ASS(sample_period, "llu"); + WRITE_ASS(sample_type, "llu"); + WRITE_ASS(read_format, "llu"); + WRITE_ASS(disabled, "d"); + WRITE_ASS(inherit, "d"); + WRITE_ASS(pinned, "d"); + WRITE_ASS(exclusive, "d"); + WRITE_ASS(exclude_user, "d"); + WRITE_ASS(exclude_kernel, "d"); + WRITE_ASS(exclude_hv, "d"); + WRITE_ASS(exclude_idle, "d"); + WRITE_ASS(mmap, "d"); + WRITE_ASS(comm, "d"); + WRITE_ASS(freq, "d"); + WRITE_ASS(inherit_stat, "d"); + WRITE_ASS(enable_on_exec, "d"); + WRITE_ASS(task, "d"); + WRITE_ASS(watermark, "d"); + WRITE_ASS(precise_ip, "d"); + WRITE_ASS(mmap_data, "d"); + WRITE_ASS(sample_id_all, "d"); + WRITE_ASS(exclude_host, "d"); + WRITE_ASS(exclude_guest, "d"); + WRITE_ASS(exclude_callchain_kernel, "d"); + WRITE_ASS(exclude_callchain_user, "d"); + WRITE_ASS(wakeup_events, PRIu32); + WRITE_ASS(bp_type, PRIu32); + WRITE_ASS(config1, "llu"); + WRITE_ASS(config2, "llu"); + WRITE_ASS(branch_sample_type, "llu"); + WRITE_ASS(sample_regs_user, "llu"); + WRITE_ASS(sample_stack_user, PRIu32); + + fclose(file); + return 0; +} + +void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu, + int fd, int group_fd, unsigned long flags) +{ + int errno_saved = errno; + + if (store_event(attr, pid, cpu, fd, group_fd, flags)) + die("test attr FAILED"); + + errno = errno_saved; +} + +static int run_dir(const char *d, const char *perf) +{ + char v[] = "-vvvvv"; + int vcnt = min(verbose, (int) sizeof(v) - 1); + char cmd[3*PATH_MAX]; + + if (verbose) + vcnt++; + + snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s", + d, d, perf, vcnt, v); + + return system(cmd); +} + +int test__attr(void) +{ + struct stat st; + char path_perf[PATH_MAX]; + char path_dir[PATH_MAX]; + + /* First try developement tree tests. */ + if (!lstat("./tests", &st)) + return run_dir("./tests", "./perf"); + + /* Then installed path. */ + snprintf(path_dir, PATH_MAX, "%s/tests", perf_exec_path()); + snprintf(path_perf, PATH_MAX, "%s/perf", BINDIR); + + if (!lstat(path_dir, &st) && + !lstat(path_perf, &st)) + return run_dir(path_dir, path_perf); + + fprintf(stderr, " (omitted)"); + return 0; +} diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py new file mode 100644 index 00000000000..c9b4b6269b5 --- /dev/null +++ b/tools/perf/tests/attr.py @@ -0,0 +1,332 @@ +#! /usr/bin/python + +import os +import sys +import glob +import optparse +import tempfile +import logging +import shutil +import ConfigParser + +class Fail(Exception): + def __init__(self, test, msg): + self.msg = msg + self.test = test + def getMsg(self): + return '\'%s\' - %s' % (self.test.path, self.msg) + +class Unsup(Exception): + def __init__(self, test): + self.test = test + def getMsg(self): + return '\'%s\'' % self.test.path + +class Event(dict): + terms = [ + 'cpu', + 'flags', + 'type', + 'size', + 'config', + 'sample_period', + 'sample_type', + 'read_format', + 'disabled', + 'inherit', + 'pinned', + 'exclusive', + 'exclude_user', + 'exclude_kernel', + 'exclude_hv', + 'exclude_idle', + 'mmap', + 'comm', + 'freq', + 'inherit_stat', + 'enable_on_exec', + 'task', + 'watermark', + 'precise_ip', + 'mmap_data', + 'sample_id_all', + 'exclude_host', + 'exclude_guest', + 'exclude_callchain_kernel', + 'exclude_callchain_user', + 'wakeup_events', + 'bp_type', + 'config1', + 'config2', + 'branch_sample_type', + 'sample_regs_user', + 'sample_stack_user', + ] + + def add(self, data): + for key, val in data: + log.debug(" %s = %s" % (key, val)) + self[key] = val + + def __init__(self, name, data, base): + log.debug(" Event %s" % name); + self.name = name; + self.group = '' + self.add(base) + self.add(data) + + def compare_data(self, a, b): + # Allow multiple values in assignment separated by '|' + a_list = a.split('|') + b_list = b.split('|') + + for a_item in a_list: + for b_item in b_list: + if (a_item == b_item): + return True + elif (a_item == '*') or (b_item == '*'): + return True + + return False + + def equal(self, other): + for t in Event.terms: + log.debug(" [%s] %s %s" % (t, self[t], other[t])); + if not self.has_key(t) or not other.has_key(t): + return False + if not self.compare_data(self[t], other[t]): + return False + return True + + def diff(self, other): + for t in Event.terms: + if not self.has_key(t) or not other.has_key(t): + continue + if not self.compare_data(self[t], other[t]): + log.warning("expected %s=%s, got %s" % (t, self[t], other[t])) + + +# Test file description needs to have following sections: +# [config] +# - just single instance in file +# - needs to specify: +# 'command' - perf command name +# 'args' - special command arguments +# 'ret' - expected command return value (0 by default) +# +# [eventX:base] +# - one or multiple instances in file +# - expected values assignments +class Test(object): + def __init__(self, path, options): + parser = ConfigParser.SafeConfigParser() + parser.read(path) + + log.warning("running '%s'" % path) + + self.path = path + self.test_dir = options.test_dir + self.perf = options.perf + self.command = parser.get('config', 'command') + self.args = parser.get('config', 'args') + + try: + self.ret = parser.get('config', 'ret') + except: + self.ret = 0 + + self.expect = {} + self.result = {} + log.debug(" loading expected events"); + self.load_events(path, self.expect) + + def is_event(self, name): + if name.find("event") == -1: + return False + else: + return True + + def load_events(self, path, events): + parser_event = ConfigParser.SafeConfigParser() + parser_event.read(path) + + # The event record section header contains 'event' word, + # optionaly followed by ':' allowing to load 'parent + # event' first as a base + for section in filter(self.is_event, parser_event.sections()): + + parser_items = parser_event.items(section); + base_items = {} + + # Read parent event if there's any + if (':' in section): + base = section[section.index(':') + 1:] + parser_base = ConfigParser.SafeConfigParser() + parser_base.read(self.test_dir + '/' + base) + base_items = parser_base.items('event') + + e = Event(section, parser_items, base_items) + events[section] = e + + def run_cmd(self, tempdir): + cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir, + self.perf, self.command, tempdir, self.args) + ret = os.WEXITSTATUS(os.system(cmd)) + + log.info(" '%s' ret %d " % (cmd, ret)) + + if ret != int(self.ret): + raise Unsup(self) + + def compare(self, expect, result): + match = {} + + log.debug(" compare"); + + # For each expected event find all matching + # events in result. Fail if there's not any. + for exp_name, exp_event in expect.items(): + exp_list = [] + log.debug(" matching [%s]" % exp_name) + for res_name, res_event in result.items(): + log.debug(" to [%s]" % res_name) + if (exp_event.equal(res_event)): + exp_list.append(res_name) + log.debug(" ->OK") + else: + log.debug(" ->FAIL"); + + log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list))) + + # we did not any matching event - fail + if (not exp_list): + exp_event.diff(res_event) + raise Fail(self, 'match failure'); + + match[exp_name] = exp_list + + # For each defined group in the expected events + # check we match the same group in the result. + for exp_name, exp_event in expect.items(): + group = exp_event.group + + if (group == ''): + continue + + for res_name in match[exp_name]: + res_group = result[res_name].group + if res_group not in match[group]: + raise Fail(self, 'group failure') + + log.debug(" group: [%s] matches group leader %s" % + (exp_name, str(match[group]))) + + log.debug(" matched") + + def resolve_groups(self, events): + for name, event in events.items(): + group_fd = event['group_fd']; + if group_fd == '-1': + continue; + + for iname, ievent in events.items(): + if (ievent['fd'] == group_fd): + event.group = iname + log.debug('[%s] has group leader [%s]' % (name, iname)) + break; + + def run(self): + tempdir = tempfile.mkdtemp(); + + try: + # run the test script + self.run_cmd(tempdir); + + # load events expectation for the test + log.debug(" loading result events"); + for f in glob.glob(tempdir + '/event*'): + self.load_events(f, self.result); + + # resolve group_fd to event names + self.resolve_groups(self.expect); + self.resolve_groups(self.result); + + # do the expectation - results matching - both ways + self.compare(self.expect, self.result) + self.compare(self.result, self.expect) + + finally: + # cleanup + shutil.rmtree(tempdir) + + +def run_tests(options): + for f in glob.glob(options.test_dir + '/' + options.test): + try: + Test(f, options).run() + except Unsup, obj: + log.warning("unsupp %s" % obj.getMsg()) + +def setup_log(verbose): + global log + level = logging.CRITICAL + + if verbose == 1: + level = logging.WARNING + if verbose == 2: + level = logging.INFO + if verbose >= 3: + level = logging.DEBUG + + log = logging.getLogger('test') + log.setLevel(level) + ch = logging.StreamHandler() + ch.setLevel(level) + formatter = logging.Formatter('%(message)s') + ch.setFormatter(formatter) + log.addHandler(ch) + +USAGE = '''%s [OPTIONS] + -d dir # tests dir + -p path # perf binary + -t test # single test + -v # verbose level +''' % sys.argv[0] + +def main(): + parser = optparse.OptionParser(usage=USAGE) + + parser.add_option("-t", "--test", + action="store", type="string", dest="test") + parser.add_option("-d", "--test-dir", + action="store", type="string", dest="test_dir") + parser.add_option("-p", "--perf", + action="store", type="string", dest="perf") + parser.add_option("-v", "--verbose", + action="count", dest="verbose") + + options, args = parser.parse_args() + if args: + parser.error('FAILED wrong arguments %s' % ' '.join(args)) + return -1 + + setup_log(options.verbose) + + if not options.test_dir: + print 'FAILED no -d option specified' + sys.exit(-1) + + if not options.test: + options.test = 'test*' + + try: + run_tests(options) + + except Fail, obj: + print "FAILED %s" % obj.getMsg(); + sys.exit(-1) + + sys.exit(0) + +if __name__ == '__main__': + main() diff --git a/tools/perf/tests/attr/README b/tools/perf/tests/attr/README new file mode 100644 index 00000000000..430024f618f --- /dev/null +++ b/tools/perf/tests/attr/README @@ -0,0 +1,64 @@ +The struct perf_event_attr test (attr tests) support +==================================================== +This testing support is embedded into perf directly and is governed +by the PERF_TEST_ATTR environment variable and hook inside the +sys_perf_event_open function. + +The general idea is to store 'struct perf_event_attr' details for +each event created within single perf command. Each event details +are stored into separate text file. Once perf command is finished +these files are checked for values we expect for command. + +The attr tests consist of following parts: + +tests/attr.c +------------ +This is the sys_perf_event_open hook implementation. The hook +is triggered when the PERF_TEST_ATTR environment variable is +defined. It must contain name of existing directory with access +and write permissions. + +For each sys_perf_event_open call event details are stored in +separate file. Besides 'struct perf_event_attr' values we also +store 'fd' and 'group_fd' values to allow checking for groups. + +tests/attr.py +------------- +This is the python script that does all the hard work. It reads +the test definition, executes it and checks results. + +tests/attr/ +----------- +Directory containing all attr test definitions. +Following tests are defined (with perf commands): + + perf record kill (test-record-basic) + perf record -b kill (test-record-branch-any) + perf record -j any kill (test-record-branch-filter-any) + perf record -j any_call kill (test-record-branch-filter-any_call) + perf record -j any_ret kill (test-record-branch-filter-any_ret) + perf record -j hv kill (test-record-branch-filter-hv) + perf record -j ind_call kill (test-record-branch-filter-ind_call) + perf record -j k kill (test-record-branch-filter-k) + perf record -j u kill (test-record-branch-filter-u) + perf record -c 123 kill (test-record-count) + perf record -d kill (test-record-data) + perf record -F 100 kill (test-record-freq) + perf record -g kill (test-record-graph-default) + perf record --call-graph dwarf kill (test-record-graph-dwarf) + perf record --call-graph fp kill (test-record-graph-fp) + perf record --group -e cycles,instructions kill (test-record-group) + perf record -e '{cycles,instructions}' kill (test-record-group1) + perf record -D kill (test-record-no-delay) + perf record -i kill (test-record-no-inherit) + perf record -n kill (test-record-no-samples) + perf record -c 100 -P kill (test-record-period) + perf record -R kill (test-record-raw) + perf stat -e cycles kill (test-stat-basic) + perf stat kill (test-stat-default) + perf stat -d kill (test-stat-detailed-1) + perf stat -dd kill (test-stat-detailed-2) + perf stat -ddd kill (test-stat-detailed-3) + perf stat --group -e cycles,instructions kill (test-stat-group) + perf stat -e '{cycles,instructions}' kill (test-stat-group1) + perf stat -i -e cycles kill (test-stat-no-inherit) diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record new file mode 100644 index 00000000000..e9bd6391f2a --- /dev/null +++ b/tools/perf/tests/attr/base-record @@ -0,0 +1,40 @@ +[event] +fd=1 +group_fd=-1 +flags=0 +cpu=* +type=0|1 +size=96 +config=0 +sample_period=4000 +sample_type=263 +read_format=0 +disabled=1 +inherit=1 +pinned=0 +exclusive=0 +exclude_user=0 +exclude_kernel=0 +exclude_hv=0 +exclude_idle=0 +mmap=1 +comm=1 +freq=1 +inherit_stat=0 +enable_on_exec=1 +task=0 +watermark=0 +precise_ip=0 +mmap_data=0 +sample_id_all=1 +exclude_host=0|1 +exclude_guest=0|1 +exclude_callchain_kernel=0 +exclude_callchain_user=0 +wakeup_events=0 +bp_type=0 +config1=0 +config2=0 +branch_sample_type=0 +sample_regs_user=0 +sample_stack_user=0 diff --git a/tools/perf/tests/attr/base-stat b/tools/perf/tests/attr/base-stat new file mode 100644 index 00000000000..91cd48b399f --- /dev/null +++ b/tools/perf/tests/attr/base-stat @@ -0,0 +1,40 @@ +[event] +fd=1 +group_fd=-1 +flags=0 +cpu=* +type=0 +size=96 +config=0 +sample_period=0 +sample_type=0 +read_format=3 +disabled=1 +inherit=1 +pinned=0 +exclusive=0 +exclude_user=0 +exclude_kernel=0 +exclude_hv=0 +exclude_idle=0 +mmap=0 +comm=0 +freq=0 +inherit_stat=0 +enable_on_exec=1 +task=0 +watermark=0 +precise_ip=0 +mmap_data=0 +sample_id_all=0 +exclude_host=0|1 +exclude_guest=0|1 +exclude_callchain_kernel=0 +exclude_callchain_user=0 +wakeup_events=0 +bp_type=0 +config1=0 +config2=0 +branch_sample_type=0 +sample_regs_user=0 +sample_stack_user=0 diff --git a/tools/perf/tests/attr/test-record-C0 b/tools/perf/tests/attr/test-record-C0 new file mode 100644 index 00000000000..d6a7e43f61b --- /dev/null +++ b/tools/perf/tests/attr/test-record-C0 @@ -0,0 +1,13 @@ +[config] +command = record +args = -C 0 kill >/dev/null 2>&1 + +[event:base-record] +cpu=0 + +# no enable on exec for CPU attached +enable_on_exec=0 + +# PERF_SAMPLE_IP | PERF_SAMPLE_TID PERF_SAMPLE_TIME | # PERF_SAMPLE_PERIOD +# + PERF_SAMPLE_CPU added by -C 0 +sample_type=391 diff --git a/tools/perf/tests/attr/test-record-basic b/tools/perf/tests/attr/test-record-basic new file mode 100644 index 00000000000..55c0428370c --- /dev/null +++ b/tools/perf/tests/attr/test-record-basic @@ -0,0 +1,5 @@ +[config] +command = record +args = kill >/dev/null 2>&1 + +[event:base-record] diff --git a/tools/perf/tests/attr/test-record-branch-any b/tools/perf/tests/attr/test-record-branch-any new file mode 100644 index 00000000000..1421960ed4e --- /dev/null +++ b/tools/perf/tests/attr/test-record-branch-any @@ -0,0 +1,8 @@ +[config] +command = record +args = -b kill >/dev/null 2>&1 + +[event:base-record] +sample_period=4000 +sample_type=2311 +branch_sample_type=8 diff --git a/tools/perf/tests/attr/test-record-branch-filter-any b/tools/perf/tests/attr/test-record-branch-filter-any new file mode 100644 index 00000000000..915c4df0e0c --- /dev/null +++ b/tools/perf/tests/attr/test-record-branch-filter-any @@ -0,0 +1,8 @@ +[config] +command = record +args = -j any kill >/dev/null 2>&1 + +[event:base-record] +sample_period=4000 +sample_type=2311 +branch_sample_type=8 diff --git a/tools/perf/tests/attr/test-record-branch-filter-any_call b/tools/perf/tests/attr/test-record-branch-filter-any_call new file mode 100644 index 00000000000..8708dbd4f37 --- /dev/null +++ b/tools/perf/tests/attr/test-record-branch-filter-any_call @@ -0,0 +1,8 @@ +[config] +command = record +args = -j any_call kill >/dev/null 2>&1 + +[event:base-record] +sample_period=4000 +sample_type=2311 +branch_sample_type=16 diff --git a/tools/perf/tests/attr/test-record-branch-filter-any_ret b/tools/perf/tests/attr/test-record-branch-filter-any_ret new file mode 100644 index 00000000000..0d3607a6dcb --- /dev/null +++ b/tools/perf/tests/attr/test-record-branch-filter-any_ret @@ -0,0 +1,8 @@ +[config] +command = record +args = -j any_ret kill >/dev/null 2>&1 + +[event:base-record] +sample_period=4000 +sample_type=2311 +branch_sample_type=32 diff --git a/tools/perf/tests/attr/test-record-branch-filter-hv b/tools/perf/tests/attr/test-record-branch-filter-hv new file mode 100644 index 00000000000..f25526740ce --- /dev/null +++ b/tools/perf/tests/attr/test-record-branch-filter-hv @@ -0,0 +1,8 @@ +[config] +command = record +args = -j hv kill >/dev/null 2>&1 + +[event:base-record] +sample_period=4000 +sample_type=2311 +branch_sample_type=8 diff --git a/tools/perf/tests/attr/test-record-branch-filter-ind_call b/tools/perf/tests/attr/test-record-branch-filter-ind_call new file mode 100644 index 00000000000..e862dd17912 --- /dev/null +++ b/tools/perf/tests/attr/test-record-branch-filter-ind_call @@ -0,0 +1,8 @@ +[config] +command = record +args = -j ind_call kill >/dev/null 2>&1 + +[event:base-record] +sample_period=4000 +sample_type=2311 +branch_sample_type=64 diff --git a/tools/perf/tests/attr/test-record-branch-filter-k b/tools/perf/tests/attr/test-record-branch-filter-k new file mode 100644 index 00000000000..182971e898f --- /dev/null +++ b/tools/perf/tests/attr/test-record-branch-filter-k @@ -0,0 +1,8 @@ +[config] +command = record +args = -j k kill >/dev/null 2>&1 + +[event:base-record] +sample_period=4000 +sample_type=2311 +branch_sample_type=8 diff --git a/tools/perf/tests/attr/test-record-branch-filter-u b/tools/perf/tests/attr/test-record-branch-filter-u new file mode 100644 index 00000000000..83449ef9e68 --- /dev/null +++ b/tools/perf/tests/attr/test-record-branch-filter-u @@ -0,0 +1,8 @@ +[config] +command = record +args = -j u kill >/dev/null 2>&1 + +[event:base-record] +sample_period=4000 +sample_type=2311 +branch_sample_type=8 diff --git a/tools/perf/tests/attr/test-record-count b/tools/perf/tests/attr/test-record-count new file mode 100644 index 00000000000..2f841de56f6 --- /dev/null +++ b/tools/perf/tests/attr/test-record-count @@ -0,0 +1,8 @@ +[config] +command = record +args = -c 123 kill >/dev/null 2>&1 + +[event:base-record] +sample_period=123 +sample_type=7 +freq=0 diff --git a/tools/perf/tests/attr/test-record-data b/tools/perf/tests/attr/test-record-data new file mode 100644 index 00000000000..716e143b529 --- /dev/null +++ b/tools/perf/tests/attr/test-record-data @@ -0,0 +1,11 @@ +[config] +command = record +args = -d kill >/dev/null 2>&1 + +[event:base-record] +sample_period=4000 + +# sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_TIME | +# PERF_SAMPLE_ADDR | PERF_SAMPLE_PERIOD | PERF_SAMPLE_DATA_SRC +sample_type=33039 +mmap_data=1 diff --git a/tools/perf/tests/attr/test-record-freq b/tools/perf/tests/attr/test-record-freq new file mode 100644 index 00000000000..600d0f8f258 --- /dev/null +++ b/tools/perf/tests/attr/test-record-freq @@ -0,0 +1,6 @@ +[config] +command = record +args = -F 100 kill >/dev/null 2>&1 + +[event:base-record] +sample_period=100 diff --git a/tools/perf/tests/attr/test-record-graph-default b/tools/perf/tests/attr/test-record-graph-default new file mode 100644 index 00000000000..853597a9a8f --- /dev/null +++ b/tools/perf/tests/attr/test-record-graph-default @@ -0,0 +1,6 @@ +[config] +command = record +args = -g kill >/dev/null 2>&1 + +[event:base-record] +sample_type=295 diff --git a/tools/perf/tests/attr/test-record-graph-dwarf b/tools/perf/tests/attr/test-record-graph-dwarf new file mode 100644 index 00000000000..d6f324ea578 --- /dev/null +++ b/tools/perf/tests/attr/test-record-graph-dwarf @@ -0,0 +1,10 @@ +[config] +command = record +args = --call-graph dwarf -- kill >/dev/null 2>&1 + +[event:base-record] +sample_type=12583 +exclude_callchain_user=1 +sample_stack_user=8192 +# TODO different for each arch, no support for that now +sample_regs_user=* diff --git a/tools/perf/tests/attr/test-record-graph-fp b/tools/perf/tests/attr/test-record-graph-fp new file mode 100644 index 00000000000..055e3bee799 --- /dev/null +++ b/tools/perf/tests/attr/test-record-graph-fp @@ -0,0 +1,6 @@ +[config] +command = record +args = --call-graph fp kill >/dev/null 2>&1 + +[event:base-record] +sample_type=295 diff --git a/tools/perf/tests/attr/test-record-group b/tools/perf/tests/attr/test-record-group new file mode 100644 index 00000000000..57739cacdb2 --- /dev/null +++ b/tools/perf/tests/attr/test-record-group @@ -0,0 +1,20 @@ +[config] +command = record +args = --group -e cycles,instructions kill >/dev/null 2>&1 + +[event-1:base-record] +fd=1 +group_fd=-1 +sample_type=327 +read_format=4 + +[event-2:base-record] +fd=2 +group_fd=1 +config=1 +sample_type=327 +read_format=4 +mmap=0 +comm=0 +enable_on_exec=0 +disabled=0 diff --git a/tools/perf/tests/attr/test-record-group-sampling b/tools/perf/tests/attr/test-record-group-sampling new file mode 100644 index 00000000000..658f5d60c87 --- /dev/null +++ b/tools/perf/tests/attr/test-record-group-sampling @@ -0,0 +1,36 @@ +[config] +command = record +args = -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1 + +[event-1:base-record] +fd=1 +group_fd=-1 +sample_type=343 +read_format=12 +inherit=0 + +[event-2:base-record] +fd=2 +group_fd=1 + +# cache-misses +type=0 +config=3 + +# default | PERF_SAMPLE_READ +sample_type=343 + +# PERF_FORMAT_ID | PERF_FORMAT_GROUP +read_format=12 + +mmap=0 +comm=0 +enable_on_exec=0 +disabled=0 + +# inherit is disabled for group sampling +inherit=0 + +# sampling disabled +sample_freq=0 +sample_period=0 diff --git a/tools/perf/tests/attr/test-record-group1 b/tools/perf/tests/attr/test-record-group1 new file mode 100644 index 00000000000..c5548d054af --- /dev/null +++ b/tools/perf/tests/attr/test-record-group1 @@ -0,0 +1,21 @@ +[config] +command = record +args = -e '{cycles,instructions}' kill >/dev/null 2>&1 + +[event-1:base-record] +fd=1 +group_fd=-1 +sample_type=327 +read_format=4 + +[event-2:base-record] +fd=2 +group_fd=1 +type=0 +config=1 +sample_type=327 +read_format=4 +mmap=0 +comm=0 +enable_on_exec=0 +disabled=0 diff --git a/tools/perf/tests/attr/test-record-no-delay b/tools/perf/tests/attr/test-record-no-delay new file mode 100644 index 00000000000..f253b78cdbf --- /dev/null +++ b/tools/perf/tests/attr/test-record-no-delay @@ -0,0 +1,9 @@ +[config] +command = record +args = -D kill >/dev/null 2>&1 + +[event:base-record] +sample_period=4000 +sample_type=263 +watermark=0 +wakeup_events=1 diff --git a/tools/perf/tests/attr/test-record-no-inherit b/tools/perf/tests/attr/test-record-no-inherit new file mode 100644 index 00000000000..44edcb2edcd --- /dev/null +++ b/tools/perf/tests/attr/test-record-no-inherit @@ -0,0 +1,7 @@ +[config] +command = record +args = -i kill >/dev/null 2>&1 + +[event:base-record] +sample_type=263 +inherit=0 diff --git a/tools/perf/tests/attr/test-record-no-samples b/tools/perf/tests/attr/test-record-no-samples new file mode 100644 index 00000000000..d0141b2418b --- /dev/null +++ b/tools/perf/tests/attr/test-record-no-samples @@ -0,0 +1,6 @@ +[config] +command = record +args = -n kill >/dev/null 2>&1 + +[event:base-record] +sample_period=0 diff --git a/tools/perf/tests/attr/test-record-period b/tools/perf/tests/attr/test-record-period new file mode 100644 index 00000000000..8abc5314fc5 --- /dev/null +++ b/tools/perf/tests/attr/test-record-period @@ -0,0 +1,7 @@ +[config] +command = record +args = -c 100 -P kill >/dev/null 2>&1 + +[event:base-record] +sample_period=100 +freq=0 diff --git a/tools/perf/tests/attr/test-record-raw b/tools/perf/tests/attr/test-record-raw new file mode 100644 index 00000000000..4a8ef25b5f4 --- /dev/null +++ b/tools/perf/tests/attr/test-record-raw @@ -0,0 +1,7 @@ +[config] +command = record +args = -R kill >/dev/null 2>&1 + +[event:base-record] +sample_period=4000 +sample_type=1415 diff --git a/tools/perf/tests/attr/test-stat-C0 b/tools/perf/tests/attr/test-stat-C0 new file mode 100644 index 00000000000..aa835950751 --- /dev/null +++ b/tools/perf/tests/attr/test-stat-C0 @@ -0,0 +1,9 @@ +[config] +command = stat +args = -e cycles -C 0 kill >/dev/null 2>&1 +ret = 1 + +[event:base-stat] +# events are enabled by default when attached to cpu +disabled=0 +enable_on_exec=0 diff --git a/tools/perf/tests/attr/test-stat-basic b/tools/perf/tests/attr/test-stat-basic new file mode 100644 index 00000000000..74e17881f2b --- /dev/null +++ b/tools/perf/tests/attr/test-stat-basic @@ -0,0 +1,6 @@ +[config] +command = stat +args = -e cycles kill >/dev/null 2>&1 +ret = 1 + +[event:base-stat] diff --git a/tools/perf/tests/attr/test-stat-default b/tools/perf/tests/attr/test-stat-default new file mode 100644 index 00000000000..19270f54c96 --- /dev/null +++ b/tools/perf/tests/attr/test-stat-default @@ -0,0 +1,64 @@ +[config] +command = stat +args = kill >/dev/null 2>&1 +ret = 1 + +# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_TASK_CLOCK +[event1:base-stat] +fd=1 +type=1 +config=1 + +# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CONTEXT_SWITCHES +[event2:base-stat] +fd=2 +type=1 +config=3 + +# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CPU_MIGRATIONS +[event3:base-stat] +fd=3 +type=1 +config=4 + +# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_PAGE_FAULTS +[event4:base-stat] +fd=4 +type=1 +config=2 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_CPU_CYCLES +[event5:base-stat] +fd=5 +type=0 +config=0 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND +[event6:base-stat] +fd=6 +type=0 +config=7 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_BACKEND +[event7:base-stat] +fd=7 +type=0 +config=8 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_INSTRUCTIONS +[event8:base-stat] +fd=8 +type=0 +config=1 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS +[event9:base-stat] +fd=9 +type=0 +config=4 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES +[event10:base-stat] +fd=10 +type=0 +config=5 diff --git a/tools/perf/tests/attr/test-stat-detailed-1 b/tools/perf/tests/attr/test-stat-detailed-1 new file mode 100644 index 00000000000..51426b87153 --- /dev/null +++ b/tools/perf/tests/attr/test-stat-detailed-1 @@ -0,0 +1,101 @@ +[config] +command = stat +args = -d kill >/dev/null 2>&1 +ret = 1 + + +# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_TASK_CLOCK +[event1:base-stat] +fd=1 +type=1 +config=1 + +# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CONTEXT_SWITCHES +[event2:base-stat] +fd=2 +type=1 +config=3 + +# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CPU_MIGRATIONS +[event3:base-stat] +fd=3 +type=1 +config=4 + +# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_PAGE_FAULTS +[event4:base-stat] +fd=4 +type=1 +config=2 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_CPU_CYCLES +[event5:base-stat] +fd=5 +type=0 +config=0 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND +[event6:base-stat] +fd=6 +type=0 +config=7 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_BACKEND +[event7:base-stat] +fd=7 +type=0 +config=8 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_INSTRUCTIONS +[event8:base-stat] +fd=8 +type=0 +config=1 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS +[event9:base-stat] +fd=9 +type=0 +config=4 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES +[event10:base-stat] +fd=10 +type=0 +config=5 + +# PERF_TYPE_HW_CACHE / +# PERF_COUNT_HW_CACHE_L1D << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) +[event11:base-stat] +fd=11 +type=3 +config=0 + +# PERF_TYPE_HW_CACHE / +# PERF_COUNT_HW_CACHE_L1D << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) +[event12:base-stat] +fd=12 +type=3 +config=65536 + +# PERF_TYPE_HW_CACHE / +# PERF_COUNT_HW_CACHE_LL << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) +[event13:base-stat] +fd=13 +type=3 +config=2 + +# PERF_TYPE_HW_CACHE, +# PERF_COUNT_HW_CACHE_LL << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) +[event14:base-stat] +fd=14 +type=3 +config=65538 diff --git a/tools/perf/tests/attr/test-stat-detailed-2 b/tools/perf/tests/attr/test-stat-detailed-2 new file mode 100644 index 00000000000..8de5acc31c2 --- /dev/null +++ b/tools/perf/tests/attr/test-stat-detailed-2 @@ -0,0 +1,155 @@ +[config] +command = stat +args = -dd kill >/dev/null 2>&1 +ret = 1 + + +# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_TASK_CLOCK +[event1:base-stat] +fd=1 +type=1 +config=1 + +# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CONTEXT_SWITCHES +[event2:base-stat] +fd=2 +type=1 +config=3 + +# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CPU_MIGRATIONS +[event3:base-stat] +fd=3 +type=1 +config=4 + +# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_PAGE_FAULTS +[event4:base-stat] +fd=4 +type=1 +config=2 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_CPU_CYCLES +[event5:base-stat] +fd=5 +type=0 +config=0 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND +[event6:base-stat] +fd=6 +type=0 +config=7 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_BACKEND +[event7:base-stat] +fd=7 +type=0 +config=8 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_INSTRUCTIONS +[event8:base-stat] +fd=8 +type=0 +config=1 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS +[event9:base-stat] +fd=9 +type=0 +config=4 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES +[event10:base-stat] +fd=10 +type=0 +config=5 + +# PERF_TYPE_HW_CACHE / +# PERF_COUNT_HW_CACHE_L1D << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) +[event11:base-stat] +fd=11 +type=3 +config=0 + +# PERF_TYPE_HW_CACHE / +# PERF_COUNT_HW_CACHE_L1D << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) +[event12:base-stat] +fd=12 +type=3 +config=65536 + +# PERF_TYPE_HW_CACHE / +# PERF_COUNT_HW_CACHE_LL << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) +[event13:base-stat] +fd=13 +type=3 +config=2 + +# PERF_TYPE_HW_CACHE, +# PERF_COUNT_HW_CACHE_LL << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) +[event14:base-stat] +fd=14 +type=3 +config=65538 + +# PERF_TYPE_HW_CACHE, +# PERF_COUNT_HW_CACHE_L1I << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) +[event15:base-stat] +fd=15 +type=3 +config=1 + +# PERF_TYPE_HW_CACHE, +# PERF_COUNT_HW_CACHE_L1I << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) +[event16:base-stat] +fd=16 +type=3 +config=65537 + +# PERF_TYPE_HW_CACHE, +# PERF_COUNT_HW_CACHE_DTLB << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) +[event17:base-stat] +fd=17 +type=3 +config=3 + +# PERF_TYPE_HW_CACHE, +# PERF_COUNT_HW_CACHE_DTLB << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) +[event18:base-stat] +fd=18 +type=3 +config=65539 + +# PERF_TYPE_HW_CACHE, +# PERF_COUNT_HW_CACHE_ITLB << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) +[event19:base-stat] +fd=19 +type=3 +config=4 + +# PERF_TYPE_HW_CACHE, +# PERF_COUNT_HW_CACHE_ITLB << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) +[event20:base-stat] +fd=20 +type=3 +config=65540 diff --git a/tools/perf/tests/attr/test-stat-detailed-3 b/tools/perf/tests/attr/test-stat-detailed-3 new file mode 100644 index 00000000000..0a1f45bf7d7 --- /dev/null +++ b/tools/perf/tests/attr/test-stat-detailed-3 @@ -0,0 +1,173 @@ +[config] +command = stat +args = -ddd kill >/dev/null 2>&1 +ret = 1 + + +# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_TASK_CLOCK +[event1:base-stat] +fd=1 +type=1 +config=1 + +# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CONTEXT_SWITCHES +[event2:base-stat] +fd=2 +type=1 +config=3 + +# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CPU_MIGRATIONS +[event3:base-stat] +fd=3 +type=1 +config=4 + +# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_PAGE_FAULTS +[event4:base-stat] +fd=4 +type=1 +config=2 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_CPU_CYCLES +[event5:base-stat] +fd=5 +type=0 +config=0 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND +[event6:base-stat] +fd=6 +type=0 +config=7 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_BACKEND +[event7:base-stat] +fd=7 +type=0 +config=8 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_INSTRUCTIONS +[event8:base-stat] +fd=8 +type=0 +config=1 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS +[event9:base-stat] +fd=9 +type=0 +config=4 + +# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES +[event10:base-stat] +fd=10 +type=0 +config=5 + +# PERF_TYPE_HW_CACHE / +# PERF_COUNT_HW_CACHE_L1D << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) +[event11:base-stat] +fd=11 +type=3 +config=0 + +# PERF_TYPE_HW_CACHE / +# PERF_COUNT_HW_CACHE_L1D << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) +[event12:base-stat] +fd=12 +type=3 +config=65536 + +# PERF_TYPE_HW_CACHE / +# PERF_COUNT_HW_CACHE_LL << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) +[event13:base-stat] +fd=13 +type=3 +config=2 + +# PERF_TYPE_HW_CACHE, +# PERF_COUNT_HW_CACHE_LL << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) +[event14:base-stat] +fd=14 +type=3 +config=65538 + +# PERF_TYPE_HW_CACHE, +# PERF_COUNT_HW_CACHE_L1I << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) +[event15:base-stat] +fd=15 +type=3 +config=1 + +# PERF_TYPE_HW_CACHE, +# PERF_COUNT_HW_CACHE_L1I << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) +[event16:base-stat] +fd=16 +type=3 +config=65537 + +# PERF_TYPE_HW_CACHE, +# PERF_COUNT_HW_CACHE_DTLB << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) +[event17:base-stat] +fd=17 +type=3 +config=3 + +# PERF_TYPE_HW_CACHE, +# PERF_COUNT_HW_CACHE_DTLB << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) +[event18:base-stat] +fd=18 +type=3 +config=65539 + +# PERF_TYPE_HW_CACHE, +# PERF_COUNT_HW_CACHE_ITLB << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) +[event19:base-stat] +fd=19 +type=3 +config=4 + +# PERF_TYPE_HW_CACHE, +# PERF_COUNT_HW_CACHE_ITLB << 0 | +# (PERF_COUNT_HW_CACHE_OP_READ << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) +[event20:base-stat] +fd=20 +type=3 +config=65540 + +# PERF_TYPE_HW_CACHE, +# PERF_COUNT_HW_CACHE_L1D << 0 | +# (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) +[event21:base-stat] +fd=21 +type=3 +config=512 + +# PERF_TYPE_HW_CACHE, +# PERF_COUNT_HW_CACHE_L1D << 0 | +# (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | +# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) +[event22:base-stat] +fd=22 +type=3 +config=66048 diff --git a/tools/perf/tests/attr/test-stat-group b/tools/perf/tests/attr/test-stat-group new file mode 100644 index 00000000000..fdc1596a886 --- /dev/null +++ b/tools/perf/tests/attr/test-stat-group @@ -0,0 +1,15 @@ +[config] +command = stat +args = --group -e cycles,instructions kill >/dev/null 2>&1 +ret = 1 + +[event-1:base-stat] +fd=1 +group_fd=-1 + +[event-2:base-stat] +fd=2 +group_fd=1 +config=1 +disabled=0 +enable_on_exec=0 diff --git a/tools/perf/tests/attr/test-stat-group1 b/tools/perf/tests/attr/test-stat-group1 new file mode 100644 index 00000000000..2a1f86e4a90 --- /dev/null +++ b/tools/perf/tests/attr/test-stat-group1 @@ -0,0 +1,15 @@ +[config] +command = stat +args = -e '{cycles,instructions}' kill >/dev/null 2>&1 +ret = 1 + +[event-1:base-stat] +fd=1 +group_fd=-1 + +[event-2:base-stat] +fd=2 +group_fd=1 +config=1 +disabled=0 +enable_on_exec=0 diff --git a/tools/perf/tests/attr/test-stat-no-inherit b/tools/perf/tests/attr/test-stat-no-inherit new file mode 100644 index 00000000000..d54b2a1e3e2 --- /dev/null +++ b/tools/perf/tests/attr/test-stat-no-inherit @@ -0,0 +1,7 @@ +[config] +command = stat +args = -i -e cycles kill >/dev/null 2>&1 +ret = 1 + +[event:base-stat] +inherit=0 diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c new file mode 100644 index 00000000000..aba09548919 --- /dev/null +++ b/tools/perf/tests/bp_signal.c @@ -0,0 +1,192 @@ +/* + * Inspired by breakpoint overflow test done by + * Vince Weaver <vincent.weaver@maine.edu> for perf_event_tests + * (git://github.com/deater/perf_event_tests) + */ + +/* + * Powerpc needs __SANE_USERSPACE_TYPES__ before <linux/types.h> to select + * 'int-ll64.h' and avoid compile warnings when printing __u64 with %llu. + */ +#define __SANE_USERSPACE_TYPES__ + +#include <stdlib.h> +#include <stdio.h> +#include <unistd.h> +#include <string.h> +#include <sys/ioctl.h> +#include <time.h> +#include <fcntl.h> +#include <signal.h> +#include <sys/mman.h> +#include <linux/compiler.h> +#include <linux/hw_breakpoint.h> + +#include "tests.h" +#include "debug.h" +#include "perf.h" + +static int fd1; +static int fd2; +static int overflows; + +__attribute__ ((noinline)) +static int test_function(void) +{ + return time(NULL); +} + +static void sig_handler(int signum __maybe_unused, + siginfo_t *oh __maybe_unused, + void *uc __maybe_unused) +{ + overflows++; + + if (overflows > 10) { + /* + * This should be executed only once during + * this test, if we are here for the 10th + * time, consider this the recursive issue. + * + * We can get out of here by disable events, + * so no new SIGIO is delivered. + */ + ioctl(fd1, PERF_EVENT_IOC_DISABLE, 0); + ioctl(fd2, PERF_EVENT_IOC_DISABLE, 0); + } +} + +static int bp_event(void *fn, int setup_signal) +{ + struct perf_event_attr pe; + int fd; + + memset(&pe, 0, sizeof(struct perf_event_attr)); + pe.type = PERF_TYPE_BREAKPOINT; + pe.size = sizeof(struct perf_event_attr); + + pe.config = 0; + pe.bp_type = HW_BREAKPOINT_X; + pe.bp_addr = (unsigned long) fn; + pe.bp_len = sizeof(long); + + pe.sample_period = 1; + pe.sample_type = PERF_SAMPLE_IP; + pe.wakeup_events = 1; + + pe.disabled = 1; + pe.exclude_kernel = 1; + pe.exclude_hv = 1; + + fd = sys_perf_event_open(&pe, 0, -1, -1, 0); + if (fd < 0) { + pr_debug("failed opening event %llx\n", pe.config); + return TEST_FAIL; + } + + if (setup_signal) { + fcntl(fd, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC); + fcntl(fd, F_SETSIG, SIGIO); + fcntl(fd, F_SETOWN, getpid()); + } + + ioctl(fd, PERF_EVENT_IOC_RESET, 0); + + return fd; +} + +static long long bp_count(int fd) +{ + long long count; + int ret; + + ret = read(fd, &count, sizeof(long long)); + if (ret != sizeof(long long)) { + pr_debug("failed to read: %d\n", ret); + return TEST_FAIL; + } + + return count; +} + +int test__bp_signal(void) +{ + struct sigaction sa; + long long count1, count2; + + /* setup SIGIO signal handler */ + memset(&sa, 0, sizeof(struct sigaction)); + sa.sa_sigaction = (void *) sig_handler; + sa.sa_flags = SA_SIGINFO; + + if (sigaction(SIGIO, &sa, NULL) < 0) { + pr_debug("failed setting up signal handler\n"); + return TEST_FAIL; + } + + /* + * We create following events: + * + * fd1 - breakpoint event on test_function with SIGIO + * signal configured. We should get signal + * notification each time the breakpoint is hit + * + * fd2 - breakpoint event on sig_handler without SIGIO + * configured. + * + * Following processing should happen: + * - execute test_function + * - fd1 event breakpoint hit -> count1 == 1 + * - SIGIO is delivered -> overflows == 1 + * - fd2 event breakpoint hit -> count2 == 1 + * + * The test case check following error conditions: + * - we get stuck in signal handler because of debug + * exception being triggered receursively due to + * the wrong RF EFLAG management + * + * - we never trigger the sig_handler breakpoint due + * to the rong RF EFLAG management + * + */ + + fd1 = bp_event(test_function, 1); + fd2 = bp_event(sig_handler, 0); + + ioctl(fd1, PERF_EVENT_IOC_ENABLE, 0); + ioctl(fd2, PERF_EVENT_IOC_ENABLE, 0); + + /* + * Kick off the test by trigering 'fd1' + * breakpoint. + */ + test_function(); + + ioctl(fd1, PERF_EVENT_IOC_DISABLE, 0); + ioctl(fd2, PERF_EVENT_IOC_DISABLE, 0); + + count1 = bp_count(fd1); + count2 = bp_count(fd2); + + close(fd1); + close(fd2); + + pr_debug("count1 %lld, count2 %lld, overflow %d\n", + count1, count2, overflows); + + if (count1 != 1) { + if (count1 == 11) + pr_debug("failed: RF EFLAG recursion issue detected\n"); + else + pr_debug("failed: wrong count for bp1%lld\n", count1); + } + + if (overflows != 1) + pr_debug("failed: wrong overflow hit\n"); + + if (count2 != 1) + pr_debug("failed: wrong count for bp2\n"); + + return count1 == 1 && overflows == 1 && count2 == 1 ? + TEST_OK : TEST_FAIL; +} diff --git a/tools/perf/tests/bp_signal_overflow.c b/tools/perf/tests/bp_signal_overflow.c new file mode 100644 index 00000000000..44ac8217970 --- /dev/null +++ b/tools/perf/tests/bp_signal_overflow.c @@ -0,0 +1,132 @@ +/* + * Originally done by Vince Weaver <vincent.weaver@maine.edu> for + * perf_event_tests (git://github.com/deater/perf_event_tests) + */ + +/* + * Powerpc needs __SANE_USERSPACE_TYPES__ before <linux/types.h> to select + * 'int-ll64.h' and avoid compile warnings when printing __u64 with %llu. + */ +#define __SANE_USERSPACE_TYPES__ + +#include <stdlib.h> +#include <stdio.h> +#include <unistd.h> +#include <string.h> +#include <sys/ioctl.h> +#include <time.h> +#include <fcntl.h> +#include <signal.h> +#include <sys/mman.h> +#include <linux/compiler.h> +#include <linux/hw_breakpoint.h> + +#include "tests.h" +#include "debug.h" +#include "perf.h" + +static int overflows; + +__attribute__ ((noinline)) +static int test_function(void) +{ + return time(NULL); +} + +static void sig_handler(int signum __maybe_unused, + siginfo_t *oh __maybe_unused, + void *uc __maybe_unused) +{ + overflows++; +} + +static long long bp_count(int fd) +{ + long long count; + int ret; + + ret = read(fd, &count, sizeof(long long)); + if (ret != sizeof(long long)) { + pr_debug("failed to read: %d\n", ret); + return TEST_FAIL; + } + + return count; +} + +#define EXECUTIONS 10000 +#define THRESHOLD 100 + +int test__bp_signal_overflow(void) +{ + struct perf_event_attr pe; + struct sigaction sa; + long long count; + int fd, i, fails = 0; + + /* setup SIGIO signal handler */ + memset(&sa, 0, sizeof(struct sigaction)); + sa.sa_sigaction = (void *) sig_handler; + sa.sa_flags = SA_SIGINFO; + + if (sigaction(SIGIO, &sa, NULL) < 0) { + pr_debug("failed setting up signal handler\n"); + return TEST_FAIL; + } + + memset(&pe, 0, sizeof(struct perf_event_attr)); + pe.type = PERF_TYPE_BREAKPOINT; + pe.size = sizeof(struct perf_event_attr); + + pe.config = 0; + pe.bp_type = HW_BREAKPOINT_X; + pe.bp_addr = (unsigned long) test_function; + pe.bp_len = sizeof(long); + + pe.sample_period = THRESHOLD; + pe.sample_type = PERF_SAMPLE_IP; + pe.wakeup_events = 1; + + pe.disabled = 1; + pe.exclude_kernel = 1; + pe.exclude_hv = 1; + + fd = sys_perf_event_open(&pe, 0, -1, -1, 0); + if (fd < 0) { + pr_debug("failed opening event %llx\n", pe.config); + return TEST_FAIL; + } + + fcntl(fd, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC); + fcntl(fd, F_SETSIG, SIGIO); + fcntl(fd, F_SETOWN, getpid()); + + ioctl(fd, PERF_EVENT_IOC_RESET, 0); + ioctl(fd, PERF_EVENT_IOC_ENABLE, 0); + + for (i = 0; i < EXECUTIONS; i++) + test_function(); + + ioctl(fd, PERF_EVENT_IOC_DISABLE, 0); + + count = bp_count(fd); + + close(fd); + + pr_debug("count %lld, overflow %d\n", + count, overflows); + + if (count != EXECUTIONS) { + pr_debug("\tWrong number of executions %lld != %d\n", + count, EXECUTIONS); + fails++; + } + + if (overflows != EXECUTIONS / THRESHOLD) { + pr_debug("\tWrong number of overflows %d != %d\n", + overflows, EXECUTIONS / THRESHOLD); + fails++; + } + + return fails ? TEST_FAIL : TEST_OK; +} diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c new file mode 100644 index 00000000000..6f8b01bc603 --- /dev/null +++ b/tools/perf/tests/builtin-test.c @@ -0,0 +1,307 @@ +/* + * builtin-test.c + * + * Builtin regression testing command: ever growing number of sanity tests + */ +#include <unistd.h> +#include <string.h> +#include "builtin.h" +#include "intlist.h" +#include "tests.h" +#include "debug.h" +#include "color.h" +#include "parse-options.h" +#include "symbol.h" + +static struct test { + const char *desc; + int (*func)(void); +} tests[] = { + { + .desc = "vmlinux symtab matches kallsyms", + .func = test__vmlinux_matches_kallsyms, + }, + { + .desc = "detect open syscall event", + .func = test__open_syscall_event, + }, + { + .desc = "detect open syscall event on all cpus", + .func = test__open_syscall_event_on_all_cpus, + }, + { + .desc = "read samples using the mmap interface", + .func = test__basic_mmap, + }, + { + .desc = "parse events tests", + .func = test__parse_events, + }, +#if defined(__x86_64__) || defined(__i386__) + { + .desc = "x86 rdpmc test", + .func = test__rdpmc, + }, +#endif + { + .desc = "Validate PERF_RECORD_* events & perf_sample fields", + .func = test__PERF_RECORD, + }, + { + .desc = "Test perf pmu format parsing", + .func = test__pmu, + }, + { + .desc = "Test dso data read", + .func = test__dso_data, + }, + { + .desc = "Test dso data cache", + .func = test__dso_data_cache, + }, + { + .desc = "Test dso data reopen", + .func = test__dso_data_reopen, + }, + { + .desc = "roundtrip evsel->name check", + .func = test__perf_evsel__roundtrip_name_test, + }, + { + .desc = "Check parsing of sched tracepoints fields", + .func = test__perf_evsel__tp_sched_test, + }, + { + .desc = "Generate and check syscalls:sys_enter_open event fields", + .func = test__syscall_open_tp_fields, + }, + { + .desc = "struct perf_event_attr setup", + .func = test__attr, + }, + { + .desc = "Test matching and linking multiple hists", + .func = test__hists_link, + }, + { + .desc = "Try 'use perf' in python, checking link problems", + .func = test__python_use, + }, + { + .desc = "Test breakpoint overflow signal handler", + .func = test__bp_signal, + }, + { + .desc = "Test breakpoint overflow sampling", + .func = test__bp_signal_overflow, + }, + { + .desc = "Test number of exit event of a simple workload", + .func = test__task_exit, + }, + { + .desc = "Test software clock events have valid period values", + .func = test__sw_clock_freq, + }, +#if defined(__x86_64__) || defined(__i386__) + { + .desc = "Test converting perf time to TSC", + .func = test__perf_time_to_tsc, + }, +#endif + { + .desc = "Test object code reading", + .func = test__code_reading, + }, + { + .desc = "Test sample parsing", + .func = test__sample_parsing, + }, + { + .desc = "Test using a dummy software event to keep tracking", + .func = test__keep_tracking, + }, + { + .desc = "Test parsing with no sample_id_all bit set", + .func = test__parse_no_sample_id_all, + }, +#if defined(__x86_64__) || defined(__i386__) || defined(__arm__) +#ifdef HAVE_DWARF_UNWIND_SUPPORT + { + .desc = "Test dwarf unwind", + .func = test__dwarf_unwind, + }, +#endif +#endif + { + .desc = "Test filtering hist entries", + .func = test__hists_filter, + }, + { + .desc = "Test mmap thread lookup", + .func = test__mmap_thread_lookup, + }, + { + .desc = "Test thread mg sharing", + .func = test__thread_mg_share, + }, + { + .desc = "Test output sorting of hist entries", + .func = test__hists_output, + }, + { + .desc = "Test cumulation of child hist entries", + .func = test__hists_cumulate, + }, + { + .func = NULL, + }, +}; + +static bool perf_test__matches(int curr, int argc, const char *argv[]) +{ + int i; + + if (argc == 0) + return true; + + for (i = 0; i < argc; ++i) { + char *end; + long nr = strtoul(argv[i], &end, 10); + + if (*end == '\0') { + if (nr == curr + 1) + return true; + continue; + } + + if (strstr(tests[curr].desc, argv[i])) + return true; + } + + return false; +} + +static int run_test(struct test *test) +{ + int status, err = -1, child = fork(); + + if (child < 0) { + pr_err("failed to fork test: %s\n", strerror(errno)); + return -1; + } + + if (!child) { + pr_debug("test child forked, pid %d\n", getpid()); + err = test->func(); + exit(err); + } + + wait(&status); + + if (WIFEXITED(status)) { + err = WEXITSTATUS(status); + pr_debug("test child finished with %d\n", err); + } else if (WIFSIGNALED(status)) { + err = -1; + pr_debug("test child interrupted\n"); + } + + return err; +} + +static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist) +{ + int i = 0; + int width = 0; + + while (tests[i].func) { + int len = strlen(tests[i].desc); + + if (width < len) + width = len; + ++i; + } + + i = 0; + while (tests[i].func) { + int curr = i++, err; + + if (!perf_test__matches(curr, argc, argv)) + continue; + + pr_info("%2d: %-*s:", i, width, tests[curr].desc); + + if (intlist__find(skiplist, i)) { + color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n"); + continue; + } + + pr_debug("\n--- start ---\n"); + err = run_test(&tests[curr]); + pr_debug("---- end ----\n%s:", tests[curr].desc); + + switch (err) { + case TEST_OK: + pr_info(" Ok\n"); + break; + case TEST_SKIP: + color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n"); + break; + case TEST_FAIL: + default: + color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n"); + break; + } + } + + return 0; +} + +static int perf_test__list(int argc, const char **argv) +{ + int i = 0; + + while (tests[i].func) { + int curr = i++; + + if (argc > 1 && !strstr(tests[curr].desc, argv[1])) + continue; + + pr_info("%2d: %s\n", i, tests[curr].desc); + } + + return 0; +} + +int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused) +{ + const char * const test_usage[] = { + "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]", + NULL, + }; + const char *skip = NULL; + const struct option test_options[] = { + OPT_STRING('s', "skip", &skip, "tests", "tests to skip"), + OPT_INCR('v', "verbose", &verbose, + "be more verbose (show symbol address, etc)"), + OPT_END() + }; + struct intlist *skiplist = NULL; + + argc = parse_options(argc, argv, test_options, test_usage, 0); + if (argc >= 1 && !strcmp(argv[0], "list")) + return perf_test__list(argc, argv); + + symbol_conf.priv_size = sizeof(int); + symbol_conf.sort_by_name = true; + symbol_conf.try_vmlinux_path = true; + + if (symbol__init() < 0) + return -1; + + if (skip != NULL) + skiplist = intlist__new(skip); + + return __cmd_test(argc, argv, skiplist); +} diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c new file mode 100644 index 00000000000..67f2d632355 --- /dev/null +++ b/tools/perf/tests/code-reading.c @@ -0,0 +1,581 @@ +#include <linux/types.h> +#include <stdlib.h> +#include <unistd.h> +#include <stdio.h> +#include <ctype.h> +#include <string.h> + +#include "parse-events.h" +#include "evlist.h" +#include "evsel.h" +#include "thread_map.h" +#include "cpumap.h" +#include "machine.h" +#include "event.h" +#include "thread.h" + +#include "tests.h" + +#define BUFSZ 1024 +#define READLEN 128 + +struct state { + u64 done[1024]; + size_t done_cnt; +}; + +static unsigned int hex(char c) +{ + if (c >= '0' && c <= '9') + return c - '0'; + if (c >= 'a' && c <= 'f') + return c - 'a' + 10; + return c - 'A' + 10; +} + +static void read_objdump_line(const char *line, size_t line_len, void **buf, + size_t *len) +{ + const char *p; + size_t i; + + /* Skip to a colon */ + p = strchr(line, ':'); + if (!p) + return; + i = p + 1 - line; + + /* Read bytes */ + while (*len) { + char c1, c2; + + /* Skip spaces */ + for (; i < line_len; i++) { + if (!isspace(line[i])) + break; + } + /* Get 2 hex digits */ + if (i >= line_len || !isxdigit(line[i])) + break; + c1 = line[i++]; + if (i >= line_len || !isxdigit(line[i])) + break; + c2 = line[i++]; + /* Followed by a space */ + if (i < line_len && line[i] && !isspace(line[i])) + break; + /* Store byte */ + *(unsigned char *)*buf = (hex(c1) << 4) | hex(c2); + *buf += 1; + *len -= 1; + } +} + +static int read_objdump_output(FILE *f, void **buf, size_t *len) +{ + char *line = NULL; + size_t line_len; + ssize_t ret; + int err = 0; + + while (1) { + ret = getline(&line, &line_len, f); + if (feof(f)) + break; + if (ret < 0) { + pr_debug("getline failed\n"); + err = -1; + break; + } + read_objdump_line(line, ret, buf, len); + } + + free(line); + + return err; +} + +static int read_via_objdump(const char *filename, u64 addr, void *buf, + size_t len) +{ + char cmd[PATH_MAX * 2]; + const char *fmt; + FILE *f; + int ret; + + fmt = "%s -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s"; + ret = snprintf(cmd, sizeof(cmd), fmt, "objdump", addr, addr + len, + filename); + if (ret <= 0 || (size_t)ret >= sizeof(cmd)) + return -1; + + pr_debug("Objdump command is: %s\n", cmd); + + /* Ignore objdump errors */ + strcat(cmd, " 2>/dev/null"); + + f = popen(cmd, "r"); + if (!f) { + pr_debug("popen failed\n"); + return -1; + } + + ret = read_objdump_output(f, &buf, &len); + if (len) { + pr_debug("objdump read too few bytes\n"); + if (!ret) + ret = len; + } + + pclose(f); + + return ret; +} + +static int read_object_code(u64 addr, size_t len, u8 cpumode, + struct thread *thread, struct machine *machine, + struct state *state) +{ + struct addr_location al; + unsigned char buf1[BUFSZ]; + unsigned char buf2[BUFSZ]; + size_t ret_len; + u64 objdump_addr; + int ret; + + pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr); + + thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, addr, + &al); + if (!al.map || !al.map->dso) { + pr_debug("thread__find_addr_map failed\n"); + return -1; + } + + pr_debug("File is: %s\n", al.map->dso->long_name); + + if (al.map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && + !dso__is_kcore(al.map->dso)) { + pr_debug("Unexpected kernel address - skipping\n"); + return 0; + } + + pr_debug("On file address is: %#"PRIx64"\n", al.addr); + + if (len > BUFSZ) + len = BUFSZ; + + /* Do not go off the map */ + if (addr + len > al.map->end) + len = al.map->end - addr; + + /* Read the object code using perf */ + ret_len = dso__data_read_offset(al.map->dso, machine, al.addr, buf1, + len); + if (ret_len != len) { + pr_debug("dso__data_read_offset failed\n"); + return -1; + } + + /* + * Converting addresses for use by objdump requires more information. + * map__load() does that. See map__rip_2objdump() for details. + */ + if (map__load(al.map, NULL)) + return -1; + + /* objdump struggles with kcore - try each map only once */ + if (dso__is_kcore(al.map->dso)) { + size_t d; + + for (d = 0; d < state->done_cnt; d++) { + if (state->done[d] == al.map->start) { + pr_debug("kcore map tested already"); + pr_debug(" - skipping\n"); + return 0; + } + } + if (state->done_cnt >= ARRAY_SIZE(state->done)) { + pr_debug("Too many kcore maps - skipping\n"); + return 0; + } + state->done[state->done_cnt++] = al.map->start; + } + + /* Read the object code using objdump */ + objdump_addr = map__rip_2objdump(al.map, al.addr); + ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len); + if (ret > 0) { + /* + * The kernel maps are inaccurate - assume objdump is right in + * that case. + */ + if (cpumode == PERF_RECORD_MISC_KERNEL || + cpumode == PERF_RECORD_MISC_GUEST_KERNEL) { + len -= ret; + if (len) { + pr_debug("Reducing len to %zu\n", len); + } else if (dso__is_kcore(al.map->dso)) { + /* + * objdump cannot handle very large segments + * that may be found in kcore. + */ + pr_debug("objdump failed for kcore"); + pr_debug(" - skipping\n"); + return 0; + } else { + return -1; + } + } + } + if (ret < 0) { + pr_debug("read_via_objdump failed\n"); + return -1; + } + + /* The results should be identical */ + if (memcmp(buf1, buf2, len)) { + pr_debug("Bytes read differ from those read by objdump\n"); + return -1; + } + pr_debug("Bytes read match those read by objdump\n"); + + return 0; +} + +static int process_sample_event(struct machine *machine, + struct perf_evlist *evlist, + union perf_event *event, struct state *state) +{ + struct perf_sample sample; + struct thread *thread; + u8 cpumode; + + if (perf_evlist__parse_sample(evlist, event, &sample)) { + pr_debug("perf_evlist__parse_sample failed\n"); + return -1; + } + + thread = machine__findnew_thread(machine, sample.pid, sample.tid); + if (!thread) { + pr_debug("machine__findnew_thread failed\n"); + return -1; + } + + cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; + + return read_object_code(sample.ip, READLEN, cpumode, thread, machine, + state); +} + +static int process_event(struct machine *machine, struct perf_evlist *evlist, + union perf_event *event, struct state *state) +{ + if (event->header.type == PERF_RECORD_SAMPLE) + return process_sample_event(machine, evlist, event, state); + + if (event->header.type == PERF_RECORD_THROTTLE || + event->header.type == PERF_RECORD_UNTHROTTLE) + return 0; + + if (event->header.type < PERF_RECORD_MAX) { + int ret; + + ret = machine__process_event(machine, event, NULL); + if (ret < 0) + pr_debug("machine__process_event failed, event type %u\n", + event->header.type); + return ret; + } + + return 0; +} + +static int process_events(struct machine *machine, struct perf_evlist *evlist, + struct state *state) +{ + union perf_event *event; + int i, ret; + + for (i = 0; i < evlist->nr_mmaps; i++) { + while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { + ret = process_event(machine, evlist, event, state); + perf_evlist__mmap_consume(evlist, i); + if (ret < 0) + return ret; + } + } + return 0; +} + +static int comp(const void *a, const void *b) +{ + return *(int *)a - *(int *)b; +} + +static void do_sort_something(void) +{ + int buf[40960], i; + + for (i = 0; i < (int)ARRAY_SIZE(buf); i++) + buf[i] = ARRAY_SIZE(buf) - i - 1; + + qsort(buf, ARRAY_SIZE(buf), sizeof(int), comp); + + for (i = 0; i < (int)ARRAY_SIZE(buf); i++) { + if (buf[i] != i) { + pr_debug("qsort failed\n"); + break; + } + } +} + +static void sort_something(void) +{ + int i; + + for (i = 0; i < 10; i++) + do_sort_something(); +} + +static void syscall_something(void) +{ + int pipefd[2]; + int i; + + for (i = 0; i < 1000; i++) { + if (pipe(pipefd) < 0) { + pr_debug("pipe failed\n"); + break; + } + close(pipefd[1]); + close(pipefd[0]); + } +} + +static void fs_something(void) +{ + const char *test_file_name = "temp-perf-code-reading-test-file--"; + FILE *f; + int i; + + for (i = 0; i < 1000; i++) { + f = fopen(test_file_name, "w+"); + if (f) { + fclose(f); + unlink(test_file_name); + } + } +} + +static void do_something(void) +{ + fs_something(); + + sort_something(); + + syscall_something(); +} + +enum { + TEST_CODE_READING_OK, + TEST_CODE_READING_NO_VMLINUX, + TEST_CODE_READING_NO_KCORE, + TEST_CODE_READING_NO_ACCESS, + TEST_CODE_READING_NO_KERNEL_OBJ, +}; + +static int do_test_code_reading(bool try_kcore) +{ + struct machines machines; + struct machine *machine; + struct thread *thread; + struct record_opts opts = { + .mmap_pages = UINT_MAX, + .user_freq = UINT_MAX, + .user_interval = ULLONG_MAX, + .freq = 4000, + .target = { + .uses_mmap = true, + }, + }; + struct state state = { + .done_cnt = 0, + }; + struct thread_map *threads = NULL; + struct cpu_map *cpus = NULL; + struct perf_evlist *evlist = NULL; + struct perf_evsel *evsel = NULL; + int err = -1, ret; + pid_t pid; + struct map *map; + bool have_vmlinux, have_kcore, excl_kernel = false; + + pid = getpid(); + + machines__init(&machines); + machine = &machines.host; + + ret = machine__create_kernel_maps(machine); + if (ret < 0) { + pr_debug("machine__create_kernel_maps failed\n"); + goto out_err; + } + + /* Force the use of kallsyms instead of vmlinux to try kcore */ + if (try_kcore) + symbol_conf.kallsyms_name = "/proc/kallsyms"; + + /* Load kernel map */ + map = machine->vmlinux_maps[MAP__FUNCTION]; + ret = map__load(map, NULL); + if (ret < 0) { + pr_debug("map__load failed\n"); + goto out_err; + } + have_vmlinux = dso__is_vmlinux(map->dso); + have_kcore = dso__is_kcore(map->dso); + + /* 2nd time through we just try kcore */ + if (try_kcore && !have_kcore) + return TEST_CODE_READING_NO_KCORE; + + /* No point getting kernel events if there is no kernel object */ + if (!have_vmlinux && !have_kcore) + excl_kernel = true; + + threads = thread_map__new_by_tid(pid); + if (!threads) { + pr_debug("thread_map__new_by_tid failed\n"); + goto out_err; + } + + ret = perf_event__synthesize_thread_map(NULL, threads, + perf_event__process, machine, false); + if (ret < 0) { + pr_debug("perf_event__synthesize_thread_map failed\n"); + goto out_err; + } + + thread = machine__findnew_thread(machine, pid, pid); + if (!thread) { + pr_debug("machine__findnew_thread failed\n"); + goto out_err; + } + + cpus = cpu_map__new(NULL); + if (!cpus) { + pr_debug("cpu_map__new failed\n"); + goto out_err; + } + + while (1) { + const char *str; + + evlist = perf_evlist__new(); + if (!evlist) { + pr_debug("perf_evlist__new failed\n"); + goto out_err; + } + + perf_evlist__set_maps(evlist, cpus, threads); + + if (excl_kernel) + str = "cycles:u"; + else + str = "cycles"; + pr_debug("Parsing event '%s'\n", str); + ret = parse_events(evlist, str); + if (ret < 0) { + pr_debug("parse_events failed\n"); + goto out_err; + } + + perf_evlist__config(evlist, &opts); + + evsel = perf_evlist__first(evlist); + + evsel->attr.comm = 1; + evsel->attr.disabled = 1; + evsel->attr.enable_on_exec = 0; + + ret = perf_evlist__open(evlist); + if (ret < 0) { + if (!excl_kernel) { + excl_kernel = true; + perf_evlist__set_maps(evlist, NULL, NULL); + perf_evlist__delete(evlist); + evlist = NULL; + continue; + } + pr_debug("perf_evlist__open failed\n"); + goto out_err; + } + break; + } + + ret = perf_evlist__mmap(evlist, UINT_MAX, false); + if (ret < 0) { + pr_debug("perf_evlist__mmap failed\n"); + goto out_err; + } + + perf_evlist__enable(evlist); + + do_something(); + + perf_evlist__disable(evlist); + + ret = process_events(machine, evlist, &state); + if (ret < 0) + goto out_err; + + if (!have_vmlinux && !have_kcore && !try_kcore) + err = TEST_CODE_READING_NO_KERNEL_OBJ; + else if (!have_vmlinux && !try_kcore) + err = TEST_CODE_READING_NO_VMLINUX; + else if (excl_kernel) + err = TEST_CODE_READING_NO_ACCESS; + else + err = TEST_CODE_READING_OK; +out_err: + if (evlist) { + perf_evlist__delete(evlist); + } else { + cpu_map__delete(cpus); + thread_map__delete(threads); + } + machines__destroy_kernel_maps(&machines); + machine__delete_threads(machine); + machines__exit(&machines); + + return err; +} + +int test__code_reading(void) +{ + int ret; + + ret = do_test_code_reading(false); + if (!ret) + ret = do_test_code_reading(true); + + switch (ret) { + case TEST_CODE_READING_OK: + return 0; + case TEST_CODE_READING_NO_VMLINUX: + fprintf(stderr, " (no vmlinux)"); + return 0; + case TEST_CODE_READING_NO_KCORE: + fprintf(stderr, " (no kcore)"); + return 0; + case TEST_CODE_READING_NO_ACCESS: + fprintf(stderr, " (no access)"); + return 0; + case TEST_CODE_READING_NO_KERNEL_OBJ: + fprintf(stderr, " (no kernel obj)"); + return 0; + default: + return -1; + }; +} diff --git a/tools/perf/tests/dso-data.c b/tools/perf/tests/dso-data.c new file mode 100644 index 00000000000..630808cd7cc --- /dev/null +++ b/tools/perf/tests/dso-data.c @@ -0,0 +1,358 @@ +#include <stdlib.h> +#include <linux/types.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <string.h> +#include <sys/time.h> +#include <sys/resource.h> +#include <api/fs/fs.h> +#include "util.h" +#include "machine.h" +#include "symbol.h" +#include "tests.h" + +static char *test_file(int size) +{ +#define TEMPL "/tmp/perf-test-XXXXXX" + static char buf_templ[sizeof(TEMPL)]; + char *templ = buf_templ; + int fd, i; + unsigned char *buf; + + strcpy(buf_templ, TEMPL); +#undef TEMPL + + fd = mkstemp(templ); + if (fd < 0) { + perror("mkstemp failed"); + return NULL; + } + + buf = malloc(size); + if (!buf) { + close(fd); + return NULL; + } + + for (i = 0; i < size; i++) + buf[i] = (unsigned char) ((int) i % 10); + + if (size != write(fd, buf, size)) + templ = NULL; + + free(buf); + close(fd); + return templ; +} + +#define TEST_FILE_SIZE (DSO__DATA_CACHE_SIZE * 20) + +struct test_data_offset { + off_t offset; + u8 data[10]; + int size; +}; + +struct test_data_offset offsets[] = { + /* Fill first cache page. */ + { + .offset = 10, + .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, + .size = 10, + }, + /* Read first cache page. */ + { + .offset = 10, + .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, + .size = 10, + }, + /* Fill cache boundary pages. */ + { + .offset = DSO__DATA_CACHE_SIZE - DSO__DATA_CACHE_SIZE % 10, + .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, + .size = 10, + }, + /* Read cache boundary pages. */ + { + .offset = DSO__DATA_CACHE_SIZE - DSO__DATA_CACHE_SIZE % 10, + .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, + .size = 10, + }, + /* Fill final cache page. */ + { + .offset = TEST_FILE_SIZE - 10, + .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, + .size = 10, + }, + /* Read final cache page. */ + { + .offset = TEST_FILE_SIZE - 10, + .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, + .size = 10, + }, + /* Read final cache page. */ + { + .offset = TEST_FILE_SIZE - 3, + .data = { 7, 8, 9, 0, 0, 0, 0, 0, 0, 0 }, + .size = 3, + }, +}; + +int test__dso_data(void) +{ + struct machine machine; + struct dso *dso; + char *file = test_file(TEST_FILE_SIZE); + size_t i; + + TEST_ASSERT_VAL("No test file", file); + + memset(&machine, 0, sizeof(machine)); + + dso = dso__new((const char *)file); + + /* Basic 10 bytes tests. */ + for (i = 0; i < ARRAY_SIZE(offsets); i++) { + struct test_data_offset *data = &offsets[i]; + ssize_t size; + u8 buf[10]; + + memset(buf, 0, 10); + size = dso__data_read_offset(dso, &machine, data->offset, + buf, 10); + + TEST_ASSERT_VAL("Wrong size", size == data->size); + TEST_ASSERT_VAL("Wrong data", !memcmp(buf, data->data, 10)); + } + + /* Read cross multiple cache pages. */ + { + ssize_t size; + int c; + u8 *buf; + + buf = malloc(TEST_FILE_SIZE); + TEST_ASSERT_VAL("ENOMEM\n", buf); + + /* First iteration to fill caches, second one to read them. */ + for (c = 0; c < 2; c++) { + memset(buf, 0, TEST_FILE_SIZE); + size = dso__data_read_offset(dso, &machine, 10, + buf, TEST_FILE_SIZE); + + TEST_ASSERT_VAL("Wrong size", + size == (TEST_FILE_SIZE - 10)); + + for (i = 0; i < (size_t)size; i++) + TEST_ASSERT_VAL("Wrong data", + buf[i] == (i % 10)); + } + + free(buf); + } + + dso__delete(dso); + unlink(file); + return 0; +} + +static long open_files_cnt(void) +{ + char path[PATH_MAX]; + struct dirent *dent; + DIR *dir; + long nr = 0; + + scnprintf(path, PATH_MAX, "%s/self/fd", procfs__mountpoint()); + pr_debug("fd path: %s\n", path); + + dir = opendir(path); + TEST_ASSERT_VAL("failed to open fd directory", dir); + + while ((dent = readdir(dir)) != NULL) { + if (!strcmp(dent->d_name, ".") || + !strcmp(dent->d_name, "..")) + continue; + + nr++; + } + + closedir(dir); + return nr - 1; +} + +static struct dso **dsos; + +static int dsos__create(int cnt, int size) +{ + int i; + + dsos = malloc(sizeof(dsos) * cnt); + TEST_ASSERT_VAL("failed to alloc dsos array", dsos); + + for (i = 0; i < cnt; i++) { + char *file; + + file = test_file(size); + TEST_ASSERT_VAL("failed to get dso file", file); + + dsos[i] = dso__new(file); + TEST_ASSERT_VAL("failed to get dso", dsos[i]); + } + + return 0; +} + +static void dsos__delete(int cnt) +{ + int i; + + for (i = 0; i < cnt; i++) { + struct dso *dso = dsos[i]; + + unlink(dso->name); + dso__delete(dso); + } + + free(dsos); +} + +static int set_fd_limit(int n) +{ + struct rlimit rlim; + + if (getrlimit(RLIMIT_NOFILE, &rlim)) + return -1; + + pr_debug("file limit %ld, new %d\n", (long) rlim.rlim_cur, n); + + rlim.rlim_cur = n; + return setrlimit(RLIMIT_NOFILE, &rlim); +} + +int test__dso_data_cache(void) +{ + struct machine machine; + long nr_end, nr = open_files_cnt(); + int dso_cnt, limit, i, fd; + + memset(&machine, 0, sizeof(machine)); + + /* set as system limit */ + limit = nr * 4; + TEST_ASSERT_VAL("failed to set file limit", !set_fd_limit(limit)); + + /* and this is now our dso open FDs limit + 1 extra */ + dso_cnt = limit / 2 + 1; + TEST_ASSERT_VAL("failed to create dsos\n", + !dsos__create(dso_cnt, TEST_FILE_SIZE)); + + for (i = 0; i < (dso_cnt - 1); i++) { + struct dso *dso = dsos[i]; + + /* + * Open dsos via dso__data_fd or dso__data_read_offset. + * Both opens the data file and keep it open. + */ + if (i % 2) { + fd = dso__data_fd(dso, &machine); + TEST_ASSERT_VAL("failed to get fd", fd > 0); + } else { + #define BUFSIZE 10 + u8 buf[BUFSIZE]; + ssize_t n; + + n = dso__data_read_offset(dso, &machine, 0, buf, BUFSIZE); + TEST_ASSERT_VAL("failed to read dso", n == BUFSIZE); + } + } + + /* open +1 dso over the allowed limit */ + fd = dso__data_fd(dsos[i], &machine); + TEST_ASSERT_VAL("failed to get fd", fd > 0); + + /* should force the first one to be closed */ + TEST_ASSERT_VAL("failed to close dsos[0]", dsos[0]->data.fd == -1); + + /* cleanup everything */ + dsos__delete(dso_cnt); + + /* Make sure we did not leak any file descriptor. */ + nr_end = open_files_cnt(); + pr_debug("nr start %ld, nr stop %ld\n", nr, nr_end); + TEST_ASSERT_VAL("failed leadking files", nr == nr_end); + return 0; +} + +int test__dso_data_reopen(void) +{ + struct machine machine; + long nr_end, nr = open_files_cnt(); + int fd, fd_extra; + +#define dso_0 (dsos[0]) +#define dso_1 (dsos[1]) +#define dso_2 (dsos[2]) + + memset(&machine, 0, sizeof(machine)); + + /* + * Test scenario: + * - create 3 dso objects + * - set process file descriptor limit to current + * files count + 3 + * - test that the first dso gets closed when we + * reach the files count limit + */ + + /* Make sure we are able to open 3 fds anyway */ + TEST_ASSERT_VAL("failed to set file limit", + !set_fd_limit((nr + 3))); + + TEST_ASSERT_VAL("failed to create dsos\n", !dsos__create(3, TEST_FILE_SIZE)); + + /* open dso_0 */ + fd = dso__data_fd(dso_0, &machine); + TEST_ASSERT_VAL("failed to get fd", fd > 0); + + /* open dso_1 */ + fd = dso__data_fd(dso_1, &machine); + TEST_ASSERT_VAL("failed to get fd", fd > 0); + + /* + * open extra file descriptor and we just + * reached the files count limit + */ + fd_extra = open("/dev/null", O_RDONLY); + TEST_ASSERT_VAL("failed to open extra fd", fd_extra > 0); + + /* open dso_2 */ + fd = dso__data_fd(dso_2, &machine); + TEST_ASSERT_VAL("failed to get fd", fd > 0); + + /* + * dso_0 should get closed, because we reached + * the file descriptor limit + */ + TEST_ASSERT_VAL("failed to close dso_0", dso_0->data.fd == -1); + + /* open dso_0 */ + fd = dso__data_fd(dso_0, &machine); + TEST_ASSERT_VAL("failed to get fd", fd > 0); + + /* + * dso_1 should get closed, because we reached + * the file descriptor limit + */ + TEST_ASSERT_VAL("failed to close dso_1", dso_1->data.fd == -1); + + /* cleanup everything */ + close(fd_extra); + dsos__delete(3); + + /* Make sure we did not leak any file descriptor. */ + nr_end = open_files_cnt(); + pr_debug("nr start %ld, nr stop %ld\n", nr, nr_end); + TEST_ASSERT_VAL("failed leadking files", nr == nr_end); + return 0; +} diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c new file mode 100644 index 00000000000..96adb730b74 --- /dev/null +++ b/tools/perf/tests/dwarf-unwind.c @@ -0,0 +1,144 @@ +#include <linux/compiler.h> +#include <linux/types.h> +#include <unistd.h> +#include "tests.h" +#include "debug.h" +#include "machine.h" +#include "event.h" +#include "unwind.h" +#include "perf_regs.h" +#include "map.h" +#include "thread.h" + +static int mmap_handler(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_sample *sample __maybe_unused, + struct machine *machine) +{ + return machine__process_mmap2_event(machine, event, NULL); +} + +static int init_live_machine(struct machine *machine) +{ + union perf_event event; + pid_t pid = getpid(); + + return perf_event__synthesize_mmap_events(NULL, &event, pid, pid, + mmap_handler, machine, true); +} + +#define MAX_STACK 6 + +static int unwind_entry(struct unwind_entry *entry, void *arg) +{ + unsigned long *cnt = (unsigned long *) arg; + char *symbol = entry->sym ? entry->sym->name : NULL; + static const char *funcs[MAX_STACK] = { + "test__arch_unwind_sample", + "unwind_thread", + "krava_3", + "krava_2", + "krava_1", + "test__dwarf_unwind" + }; + + if (*cnt >= MAX_STACK) { + pr_debug("failed: crossed the max stack value %d\n", MAX_STACK); + return -1; + } + + if (!symbol) { + pr_debug("failed: got unresolved address 0x%" PRIx64 "\n", + entry->ip); + return -1; + } + + pr_debug("got: %s 0x%" PRIx64 "\n", symbol, entry->ip); + return strcmp((const char *) symbol, funcs[(*cnt)++]); +} + +__attribute__ ((noinline)) +static int unwind_thread(struct thread *thread, struct machine *machine) +{ + struct perf_sample sample; + unsigned long cnt = 0; + int err = -1; + + memset(&sample, 0, sizeof(sample)); + + if (test__arch_unwind_sample(&sample, thread)) { + pr_debug("failed to get unwind sample\n"); + goto out; + } + + err = unwind__get_entries(unwind_entry, &cnt, machine, thread, + &sample, MAX_STACK); + if (err) + pr_debug("unwind failed\n"); + else if (cnt != MAX_STACK) { + pr_debug("got wrong number of stack entries %lu != %d\n", + cnt, MAX_STACK); + err = -1; + } + + out: + free(sample.user_stack.data); + free(sample.user_regs.regs); + return err; +} + +__attribute__ ((noinline)) +static int krava_3(struct thread *thread, struct machine *machine) +{ + return unwind_thread(thread, machine); +} + +__attribute__ ((noinline)) +static int krava_2(struct thread *thread, struct machine *machine) +{ + return krava_3(thread, machine); +} + +__attribute__ ((noinline)) +static int krava_1(struct thread *thread, struct machine *machine) +{ + return krava_2(thread, machine); +} + +int test__dwarf_unwind(void) +{ + struct machines machines; + struct machine *machine; + struct thread *thread; + int err = -1; + + machines__init(&machines); + + machine = machines__find(&machines, HOST_KERNEL_ID); + if (!machine) { + pr_err("Could not get machine\n"); + return -1; + } + + if (init_live_machine(machine)) { + pr_err("Could not init machine\n"); + goto out; + } + + if (verbose > 1) + machine__fprintf(machine, stderr); + + thread = machine__find_thread(machine, getpid(), getpid()); + if (!thread) { + pr_err("Could not get thread\n"); + goto out; + } + + err = krava_1(thread, machine); + + out: + machine__delete_threads(machine); + machine__exit(machine); + machines__exit(&machines); + return err; +} diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c new file mode 100644 index 00000000000..465cdbc345c --- /dev/null +++ b/tools/perf/tests/evsel-roundtrip-name.c @@ -0,0 +1,114 @@ +#include "evlist.h" +#include "evsel.h" +#include "parse-events.h" +#include "tests.h" + +static int perf_evsel__roundtrip_cache_name_test(void) +{ + char name[128]; + int type, op, err = 0, ret = 0, i, idx; + struct perf_evsel *evsel; + struct perf_evlist *evlist = perf_evlist__new(); + + if (evlist == NULL) + return -ENOMEM; + + for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { + for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { + /* skip invalid cache type */ + if (!perf_evsel__is_cache_op_valid(type, op)) + continue; + + for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { + __perf_evsel__hw_cache_type_op_res_name(type, op, i, + name, sizeof(name)); + err = parse_events(evlist, name); + if (err) + ret = err; + } + } + } + + idx = 0; + evsel = perf_evlist__first(evlist); + + for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { + for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { + /* skip invalid cache type */ + if (!perf_evsel__is_cache_op_valid(type, op)) + continue; + + for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { + __perf_evsel__hw_cache_type_op_res_name(type, op, i, + name, sizeof(name)); + if (evsel->idx != idx) + continue; + + ++idx; + + if (strcmp(perf_evsel__name(evsel), name)) { + pr_debug("%s != %s\n", perf_evsel__name(evsel), name); + ret = -1; + } + + evsel = perf_evsel__next(evsel); + } + } + } + + perf_evlist__delete(evlist); + return ret; +} + +static int __perf_evsel__name_array_test(const char *names[], int nr_names) +{ + int i, err; + struct perf_evsel *evsel; + struct perf_evlist *evlist = perf_evlist__new(); + + if (evlist == NULL) + return -ENOMEM; + + for (i = 0; i < nr_names; ++i) { + err = parse_events(evlist, names[i]); + if (err) { + pr_debug("failed to parse event '%s', err %d\n", + names[i], err); + goto out_delete_evlist; + } + } + + err = 0; + evlist__for_each(evlist, evsel) { + if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) { + --err; + pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]); + } + } + +out_delete_evlist: + perf_evlist__delete(evlist); + return err; +} + +#define perf_evsel__name_array_test(names) \ + __perf_evsel__name_array_test(names, ARRAY_SIZE(names)) + +int test__perf_evsel__roundtrip_name_test(void) +{ + int err = 0, ret = 0; + + err = perf_evsel__name_array_test(perf_evsel__hw_names); + if (err) + ret = err; + + err = perf_evsel__name_array_test(perf_evsel__sw_names); + if (err) + ret = err; + + err = perf_evsel__roundtrip_cache_name_test(); + if (err) + ret = err; + + return ret; +} diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c new file mode 100644 index 00000000000..35d7fdb2328 --- /dev/null +++ b/tools/perf/tests/evsel-tp-sched.c @@ -0,0 +1,81 @@ +#include <traceevent/event-parse.h> +#include "evsel.h" +#include "tests.h" + +static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name, + int size, bool should_be_signed) +{ + struct format_field *field = perf_evsel__field(evsel, name); + int is_signed; + int ret = 0; + + if (field == NULL) { + pr_debug("%s: \"%s\" field not found!\n", evsel->name, name); + return -1; + } + + is_signed = !!(field->flags | FIELD_IS_SIGNED); + if (should_be_signed && !is_signed) { + pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n", + evsel->name, name, is_signed, should_be_signed); + ret = -1; + } + + if (field->size != size) { + pr_debug("%s: \"%s\" size (%d) should be %d!\n", + evsel->name, name, field->size, size); + ret = -1; + } + + return ret; +} + +int test__perf_evsel__tp_sched_test(void) +{ + struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch"); + int ret = 0; + + if (evsel == NULL) { + pr_debug("perf_evsel__new\n"); + return -1; + } + + if (perf_evsel__test_field(evsel, "prev_comm", 16, true)) + ret = -1; + + if (perf_evsel__test_field(evsel, "prev_pid", 4, true)) + ret = -1; + + if (perf_evsel__test_field(evsel, "prev_prio", 4, true)) + ret = -1; + + if (perf_evsel__test_field(evsel, "prev_state", sizeof(long), true)) + ret = -1; + + if (perf_evsel__test_field(evsel, "next_comm", 16, true)) + ret = -1; + + if (perf_evsel__test_field(evsel, "next_pid", 4, true)) + ret = -1; + + if (perf_evsel__test_field(evsel, "next_prio", 4, true)) + ret = -1; + + perf_evsel__delete(evsel); + + evsel = perf_evsel__newtp("sched", "sched_wakeup"); + + if (perf_evsel__test_field(evsel, "comm", 16, true)) + ret = -1; + + if (perf_evsel__test_field(evsel, "pid", 4, true)) + ret = -1; + + if (perf_evsel__test_field(evsel, "prio", 4, true)) + ret = -1; + + if (perf_evsel__test_field(evsel, "target_cpu", 4, true)) + ret = -1; + + return ret; +} diff --git a/tools/perf/tests/hists_common.c b/tools/perf/tests/hists_common.c new file mode 100644 index 00000000000..a62c0913451 --- /dev/null +++ b/tools/perf/tests/hists_common.c @@ -0,0 +1,209 @@ +#include "perf.h" +#include "util/debug.h" +#include "util/symbol.h" +#include "util/sort.h" +#include "util/evsel.h" +#include "util/evlist.h" +#include "util/machine.h" +#include "util/thread.h" +#include "tests/hists_common.h" + +static struct { + u32 pid; + const char *comm; +} fake_threads[] = { + { FAKE_PID_PERF1, "perf" }, + { FAKE_PID_PERF2, "perf" }, + { FAKE_PID_BASH, "bash" }, +}; + +static struct { + u32 pid; + u64 start; + const char *filename; +} fake_mmap_info[] = { + { FAKE_PID_PERF1, FAKE_MAP_PERF, "perf" }, + { FAKE_PID_PERF1, FAKE_MAP_LIBC, "libc" }, + { FAKE_PID_PERF1, FAKE_MAP_KERNEL, "[kernel]" }, + { FAKE_PID_PERF2, FAKE_MAP_PERF, "perf" }, + { FAKE_PID_PERF2, FAKE_MAP_LIBC, "libc" }, + { FAKE_PID_PERF2, FAKE_MAP_KERNEL, "[kernel]" }, + { FAKE_PID_BASH, FAKE_MAP_BASH, "bash" }, + { FAKE_PID_BASH, FAKE_MAP_LIBC, "libc" }, + { FAKE_PID_BASH, FAKE_MAP_KERNEL, "[kernel]" }, +}; + +struct fake_sym { + u64 start; + u64 length; + const char *name; +}; + +static struct fake_sym perf_syms[] = { + { FAKE_SYM_OFFSET1, FAKE_SYM_LENGTH, "main" }, + { FAKE_SYM_OFFSET2, FAKE_SYM_LENGTH, "run_command" }, + { FAKE_SYM_OFFSET3, FAKE_SYM_LENGTH, "cmd_record" }, +}; + +static struct fake_sym bash_syms[] = { + { FAKE_SYM_OFFSET1, FAKE_SYM_LENGTH, "main" }, + { FAKE_SYM_OFFSET2, FAKE_SYM_LENGTH, "xmalloc" }, + { FAKE_SYM_OFFSET3, FAKE_SYM_LENGTH, "xfree" }, +}; + +static struct fake_sym libc_syms[] = { + { 700, 100, "malloc" }, + { 800, 100, "free" }, + { 900, 100, "realloc" }, + { FAKE_SYM_OFFSET1, FAKE_SYM_LENGTH, "malloc" }, + { FAKE_SYM_OFFSET2, FAKE_SYM_LENGTH, "free" }, + { FAKE_SYM_OFFSET3, FAKE_SYM_LENGTH, "realloc" }, +}; + +static struct fake_sym kernel_syms[] = { + { FAKE_SYM_OFFSET1, FAKE_SYM_LENGTH, "schedule" }, + { FAKE_SYM_OFFSET2, FAKE_SYM_LENGTH, "page_fault" }, + { FAKE_SYM_OFFSET3, FAKE_SYM_LENGTH, "sys_perf_event_open" }, +}; + +static struct { + const char *dso_name; + struct fake_sym *syms; + size_t nr_syms; +} fake_symbols[] = { + { "perf", perf_syms, ARRAY_SIZE(perf_syms) }, + { "bash", bash_syms, ARRAY_SIZE(bash_syms) }, + { "libc", libc_syms, ARRAY_SIZE(libc_syms) }, + { "[kernel]", kernel_syms, ARRAY_SIZE(kernel_syms) }, +}; + +struct machine *setup_fake_machine(struct machines *machines) +{ + struct machine *machine = machines__find(machines, HOST_KERNEL_ID); + size_t i; + + if (machine == NULL) { + pr_debug("Not enough memory for machine setup\n"); + return NULL; + } + + for (i = 0; i < ARRAY_SIZE(fake_threads); i++) { + struct thread *thread; + + thread = machine__findnew_thread(machine, fake_threads[i].pid, + fake_threads[i].pid); + if (thread == NULL) + goto out; + + thread__set_comm(thread, fake_threads[i].comm, 0); + } + + for (i = 0; i < ARRAY_SIZE(fake_mmap_info); i++) { + union perf_event fake_mmap_event = { + .mmap = { + .header = { .misc = PERF_RECORD_MISC_USER, }, + .pid = fake_mmap_info[i].pid, + .tid = fake_mmap_info[i].pid, + .start = fake_mmap_info[i].start, + .len = FAKE_MAP_LENGTH, + .pgoff = 0ULL, + }, + }; + + strcpy(fake_mmap_event.mmap.filename, + fake_mmap_info[i].filename); + + machine__process_mmap_event(machine, &fake_mmap_event, NULL); + } + + for (i = 0; i < ARRAY_SIZE(fake_symbols); i++) { + size_t k; + struct dso *dso; + + dso = __dsos__findnew(&machine->user_dsos, + fake_symbols[i].dso_name); + if (dso == NULL) + goto out; + + /* emulate dso__load() */ + dso__set_loaded(dso, MAP__FUNCTION); + + for (k = 0; k < fake_symbols[i].nr_syms; k++) { + struct symbol *sym; + struct fake_sym *fsym = &fake_symbols[i].syms[k]; + + sym = symbol__new(fsym->start, fsym->length, + STB_GLOBAL, fsym->name); + if (sym == NULL) + goto out; + + symbols__insert(&dso->symbols[MAP__FUNCTION], sym); + } + } + + return machine; + +out: + pr_debug("Not enough memory for machine setup\n"); + machine__delete_threads(machine); + machine__delete(machine); + return NULL; +} + +void print_hists_in(struct hists *hists) +{ + int i = 0; + struct rb_root *root; + struct rb_node *node; + + if (sort__need_collapse) + root = &hists->entries_collapsed; + else + root = hists->entries_in; + + pr_info("----- %s --------\n", __func__); + node = rb_first(root); + while (node) { + struct hist_entry *he; + + he = rb_entry(node, struct hist_entry, rb_node_in); + + if (!he->filtered) { + pr_info("%2d: entry: %-8s [%-8s] %20s: period = %"PRIu64"\n", + i, thread__comm_str(he->thread), + he->ms.map->dso->short_name, + he->ms.sym->name, he->stat.period); + } + + i++; + node = rb_next(node); + } +} + +void print_hists_out(struct hists *hists) +{ + int i = 0; + struct rb_root *root; + struct rb_node *node; + + root = &hists->entries; + + pr_info("----- %s --------\n", __func__); + node = rb_first(root); + while (node) { + struct hist_entry *he; + + he = rb_entry(node, struct hist_entry, rb_node); + + if (!he->filtered) { + pr_info("%2d: entry: %8s:%5d [%-8s] %20s: period = %"PRIu64"/%"PRIu64"\n", + i, thread__comm_str(he->thread), he->thread->tid, + he->ms.map->dso->short_name, + he->ms.sym->name, he->stat.period, + he->stat_acc ? he->stat_acc->period : 0); + } + + i++; + node = rb_next(node); + } +} diff --git a/tools/perf/tests/hists_common.h b/tools/perf/tests/hists_common.h new file mode 100644 index 00000000000..888254e8665 --- /dev/null +++ b/tools/perf/tests/hists_common.h @@ -0,0 +1,75 @@ +#ifndef __PERF_TESTS__HISTS_COMMON_H__ +#define __PERF_TESTS__HISTS_COMMON_H__ + +struct machine; +struct machines; + +#define FAKE_PID_PERF1 100 +#define FAKE_PID_PERF2 200 +#define FAKE_PID_BASH 300 + +#define FAKE_MAP_PERF 0x400000 +#define FAKE_MAP_BASH 0x400000 +#define FAKE_MAP_LIBC 0x500000 +#define FAKE_MAP_KERNEL 0xf00000 +#define FAKE_MAP_LENGTH 0x100000 + +#define FAKE_SYM_OFFSET1 700 +#define FAKE_SYM_OFFSET2 800 +#define FAKE_SYM_OFFSET3 900 +#define FAKE_SYM_LENGTH 100 + +#define FAKE_IP_PERF_MAIN FAKE_MAP_PERF + FAKE_SYM_OFFSET1 +#define FAKE_IP_PERF_RUN_COMMAND FAKE_MAP_PERF + FAKE_SYM_OFFSET2 +#define FAKE_IP_PERF_CMD_RECORD FAKE_MAP_PERF + FAKE_SYM_OFFSET3 +#define FAKE_IP_BASH_MAIN FAKE_MAP_BASH + FAKE_SYM_OFFSET1 +#define FAKE_IP_BASH_XMALLOC FAKE_MAP_BASH + FAKE_SYM_OFFSET2 +#define FAKE_IP_BASH_XFREE FAKE_MAP_BASH + FAKE_SYM_OFFSET3 +#define FAKE_IP_LIBC_MALLOC FAKE_MAP_LIBC + FAKE_SYM_OFFSET1 +#define FAKE_IP_LIBC_FREE FAKE_MAP_LIBC + FAKE_SYM_OFFSET2 +#define FAKE_IP_LIBC_REALLOC FAKE_MAP_LIBC + FAKE_SYM_OFFSET3 +#define FAKE_IP_KERNEL_SCHEDULE FAKE_MAP_KERNEL + FAKE_SYM_OFFSET1 +#define FAKE_IP_KERNEL_PAGE_FAULT FAKE_MAP_KERNEL + FAKE_SYM_OFFSET2 +#define FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN FAKE_MAP_KERNEL + FAKE_SYM_OFFSET3 + +/* + * The setup_fake_machine() provides a test environment which consists + * of 3 processes that have 3 mappings and in turn, have 3 symbols + * respectively. See below table: + * + * Command: Pid Shared Object Symbol + * ............. ............. ................... + * perf: 100 perf main + * perf: 100 perf run_command + * perf: 100 perf cmd_record + * perf: 100 libc malloc + * perf: 100 libc free + * perf: 100 libc realloc + * perf: 100 [kernel] schedule + * perf: 100 [kernel] page_fault + * perf: 100 [kernel] sys_perf_event_open + * perf: 200 perf main + * perf: 200 perf run_command + * perf: 200 perf cmd_record + * perf: 200 libc malloc + * perf: 200 libc free + * perf: 200 libc realloc + * perf: 200 [kernel] schedule + * perf: 200 [kernel] page_fault + * perf: 200 [kernel] sys_perf_event_open + * bash: 300 bash main + * bash: 300 bash xmalloc + * bash: 300 bash xfree + * bash: 300 libc malloc + * bash: 300 libc free + * bash: 300 libc realloc + * bash: 300 [kernel] schedule + * bash: 300 [kernel] page_fault + * bash: 300 [kernel] sys_perf_event_open + */ +struct machine *setup_fake_machine(struct machines *machines); + +void print_hists_in(struct hists *hists); +void print_hists_out(struct hists *hists); + +#endif /* __PERF_TESTS__HISTS_COMMON_H__ */ diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c new file mode 100644 index 00000000000..0ac240db2e2 --- /dev/null +++ b/tools/perf/tests/hists_cumulate.c @@ -0,0 +1,726 @@ +#include "perf.h" +#include "util/debug.h" +#include "util/symbol.h" +#include "util/sort.h" +#include "util/evsel.h" +#include "util/evlist.h" +#include "util/machine.h" +#include "util/thread.h" +#include "util/parse-events.h" +#include "tests/tests.h" +#include "tests/hists_common.h" + +struct sample { + u32 pid; + u64 ip; + struct thread *thread; + struct map *map; + struct symbol *sym; +}; + +/* For the numbers, see hists_common.c */ +static struct sample fake_samples[] = { + /* perf [kernel] schedule() */ + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, }, + /* perf [perf] main() */ + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, }, + /* perf [perf] cmd_record() */ + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, }, + /* perf [libc] malloc() */ + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, }, + /* perf [libc] free() */ + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, }, + /* perf [perf] main() */ + { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, }, + /* perf [kernel] page_fault() */ + { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, + /* bash [bash] main() */ + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, }, + /* bash [bash] xmalloc() */ + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, }, + /* bash [kernel] page_fault() */ + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, +}; + +/* + * Will be casted to struct ip_callchain which has all 64 bit entries + * of nr and ips[]. + */ +static u64 fake_callchains[][10] = { + /* schedule => run_command => main */ + { 3, FAKE_IP_KERNEL_SCHEDULE, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, }, + /* main */ + { 1, FAKE_IP_PERF_MAIN, }, + /* cmd_record => run_command => main */ + { 3, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, }, + /* malloc => cmd_record => run_command => main */ + { 4, FAKE_IP_LIBC_MALLOC, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, + FAKE_IP_PERF_MAIN, }, + /* free => cmd_record => run_command => main */ + { 4, FAKE_IP_LIBC_FREE, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, + FAKE_IP_PERF_MAIN, }, + /* main */ + { 1, FAKE_IP_PERF_MAIN, }, + /* page_fault => sys_perf_event_open => run_command => main */ + { 4, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, + FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, }, + /* main */ + { 1, FAKE_IP_BASH_MAIN, }, + /* xmalloc => malloc => xmalloc => malloc => xmalloc => main */ + { 6, FAKE_IP_BASH_XMALLOC, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, + FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, FAKE_IP_BASH_MAIN, }, + /* page_fault => malloc => main */ + { 3, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_MAIN, }, +}; + +static int add_hist_entries(struct hists *hists, struct machine *machine) +{ + struct addr_location al; + struct perf_evsel *evsel = hists_to_evsel(hists); + struct perf_sample sample = { .period = 1000, }; + size_t i; + + for (i = 0; i < ARRAY_SIZE(fake_samples); i++) { + const union perf_event event = { + .header = { + .misc = PERF_RECORD_MISC_USER, + }, + }; + struct hist_entry_iter iter = { + .hide_unresolved = false, + }; + + if (symbol_conf.cumulate_callchain) + iter.ops = &hist_iter_cumulative; + else + iter.ops = &hist_iter_normal; + + sample.pid = fake_samples[i].pid; + sample.tid = fake_samples[i].pid; + sample.ip = fake_samples[i].ip; + sample.callchain = (struct ip_callchain *)fake_callchains[i]; + + if (perf_event__preprocess_sample(&event, machine, &al, + &sample) < 0) + goto out; + + if (hist_entry_iter__add(&iter, &al, evsel, &sample, + PERF_MAX_STACK_DEPTH, NULL) < 0) + goto out; + + fake_samples[i].thread = al.thread; + fake_samples[i].map = al.map; + fake_samples[i].sym = al.sym; + } + + return TEST_OK; + +out: + pr_debug("Not enough memory for adding a hist entry\n"); + return TEST_FAIL; +} + +static void del_hist_entries(struct hists *hists) +{ + struct hist_entry *he; + struct rb_root *root_in; + struct rb_root *root_out; + struct rb_node *node; + + if (sort__need_collapse) + root_in = &hists->entries_collapsed; + else + root_in = hists->entries_in; + + root_out = &hists->entries; + + while (!RB_EMPTY_ROOT(root_out)) { + node = rb_first(root_out); + + he = rb_entry(node, struct hist_entry, rb_node); + rb_erase(node, root_out); + rb_erase(&he->rb_node_in, root_in); + hist_entry__free(he); + } +} + +typedef int (*test_fn_t)(struct perf_evsel *, struct machine *); + +#define COMM(he) (thread__comm_str(he->thread)) +#define DSO(he) (he->ms.map->dso->short_name) +#define SYM(he) (he->ms.sym->name) +#define CPU(he) (he->cpu) +#define PID(he) (he->thread->tid) +#define DEPTH(he) (he->callchain->max_depth) +#define CDSO(cl) (cl->ms.map->dso->short_name) +#define CSYM(cl) (cl->ms.sym->name) + +struct result { + u64 children; + u64 self; + const char *comm; + const char *dso; + const char *sym; +}; + +struct callchain_result { + u64 nr; + struct { + const char *dso; + const char *sym; + } node[10]; +}; + +static int do_test(struct hists *hists, struct result *expected, size_t nr_expected, + struct callchain_result *expected_callchain, size_t nr_callchain) +{ + char buf[32]; + size_t i, c; + struct hist_entry *he; + struct rb_root *root; + struct rb_node *node; + struct callchain_node *cnode; + struct callchain_list *clist; + + /* + * adding and deleting hist entries must be done outside of this + * function since TEST_ASSERT_VAL() returns in case of failure. + */ + hists__collapse_resort(hists, NULL); + hists__output_resort(hists); + + if (verbose > 2) { + pr_info("use callchain: %d, cumulate callchain: %d\n", + symbol_conf.use_callchain, + symbol_conf.cumulate_callchain); + print_hists_out(hists); + } + + root = &hists->entries; + for (node = rb_first(root), i = 0; + node && (he = rb_entry(node, struct hist_entry, rb_node)); + node = rb_next(node), i++) { + scnprintf(buf, sizeof(buf), "Invalid hist entry #%zd", i); + + TEST_ASSERT_VAL("Incorrect number of hist entry", + i < nr_expected); + TEST_ASSERT_VAL(buf, he->stat.period == expected[i].self && + !strcmp(COMM(he), expected[i].comm) && + !strcmp(DSO(he), expected[i].dso) && + !strcmp(SYM(he), expected[i].sym)); + + if (symbol_conf.cumulate_callchain) + TEST_ASSERT_VAL(buf, he->stat_acc->period == expected[i].children); + + if (!symbol_conf.use_callchain) + continue; + + /* check callchain entries */ + root = &he->callchain->node.rb_root; + cnode = rb_entry(rb_first(root), struct callchain_node, rb_node); + + c = 0; + list_for_each_entry(clist, &cnode->val, list) { + scnprintf(buf, sizeof(buf), "Invalid callchain entry #%zd/%zd", i, c); + + TEST_ASSERT_VAL("Incorrect number of callchain entry", + c < expected_callchain[i].nr); + TEST_ASSERT_VAL(buf, + !strcmp(CDSO(clist), expected_callchain[i].node[c].dso) && + !strcmp(CSYM(clist), expected_callchain[i].node[c].sym)); + c++; + } + /* TODO: handle multiple child nodes properly */ + TEST_ASSERT_VAL("Incorrect number of callchain entry", + c <= expected_callchain[i].nr); + } + TEST_ASSERT_VAL("Incorrect number of hist entry", + i == nr_expected); + TEST_ASSERT_VAL("Incorrect number of callchain entry", + !symbol_conf.use_callchain || nr_expected == nr_callchain); + return 0; +} + +/* NO callchain + NO children */ +static int test1(struct perf_evsel *evsel, struct machine *machine) +{ + int err; + struct hists *hists = &evsel->hists; + /* + * expected output: + * + * Overhead Command Shared Object Symbol + * ======== ======= ============= ============== + * 20.00% perf perf [.] main + * 10.00% bash [kernel] [k] page_fault + * 10.00% bash bash [.] main + * 10.00% bash bash [.] xmalloc + * 10.00% perf [kernel] [k] page_fault + * 10.00% perf [kernel] [k] schedule + * 10.00% perf libc [.] free + * 10.00% perf libc [.] malloc + * 10.00% perf perf [.] cmd_record + */ + struct result expected[] = { + { 0, 2000, "perf", "perf", "main" }, + { 0, 1000, "bash", "[kernel]", "page_fault" }, + { 0, 1000, "bash", "bash", "main" }, + { 0, 1000, "bash", "bash", "xmalloc" }, + { 0, 1000, "perf", "[kernel]", "page_fault" }, + { 0, 1000, "perf", "[kernel]", "schedule" }, + { 0, 1000, "perf", "libc", "free" }, + { 0, 1000, "perf", "libc", "malloc" }, + { 0, 1000, "perf", "perf", "cmd_record" }, + }; + + symbol_conf.use_callchain = false; + symbol_conf.cumulate_callchain = false; + + setup_sorting(); + callchain_register_param(&callchain_param); + + err = add_hist_entries(hists, machine); + if (err < 0) + goto out; + + err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0); + +out: + del_hist_entries(hists); + reset_output_field(); + return err; +} + +/* callcain + NO children */ +static int test2(struct perf_evsel *evsel, struct machine *machine) +{ + int err; + struct hists *hists = &evsel->hists; + /* + * expected output: + * + * Overhead Command Shared Object Symbol + * ======== ======= ============= ============== + * 20.00% perf perf [.] main + * | + * --- main + * + * 10.00% bash [kernel] [k] page_fault + * | + * --- page_fault + * malloc + * main + * + * 10.00% bash bash [.] main + * | + * --- main + * + * 10.00% bash bash [.] xmalloc + * | + * --- xmalloc + * malloc + * xmalloc <--- NOTE: there's a cycle + * malloc + * xmalloc + * main + * + * 10.00% perf [kernel] [k] page_fault + * | + * --- page_fault + * sys_perf_event_open + * run_command + * main + * + * 10.00% perf [kernel] [k] schedule + * | + * --- schedule + * run_command + * main + * + * 10.00% perf libc [.] free + * | + * --- free + * cmd_record + * run_command + * main + * + * 10.00% perf libc [.] malloc + * | + * --- malloc + * cmd_record + * run_command + * main + * + * 10.00% perf perf [.] cmd_record + * | + * --- cmd_record + * run_command + * main + * + */ + struct result expected[] = { + { 0, 2000, "perf", "perf", "main" }, + { 0, 1000, "bash", "[kernel]", "page_fault" }, + { 0, 1000, "bash", "bash", "main" }, + { 0, 1000, "bash", "bash", "xmalloc" }, + { 0, 1000, "perf", "[kernel]", "page_fault" }, + { 0, 1000, "perf", "[kernel]", "schedule" }, + { 0, 1000, "perf", "libc", "free" }, + { 0, 1000, "perf", "libc", "malloc" }, + { 0, 1000, "perf", "perf", "cmd_record" }, + }; + struct callchain_result expected_callchain[] = { + { + 1, { { "perf", "main" }, }, + }, + { + 3, { { "[kernel]", "page_fault" }, + { "libc", "malloc" }, + { "bash", "main" }, }, + }, + { + 1, { { "bash", "main" }, }, + }, + { + 6, { { "bash", "xmalloc" }, + { "libc", "malloc" }, + { "bash", "xmalloc" }, + { "libc", "malloc" }, + { "bash", "xmalloc" }, + { "bash", "main" }, }, + }, + { + 4, { { "[kernel]", "page_fault" }, + { "[kernel]", "sys_perf_event_open" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 3, { { "[kernel]", "schedule" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 4, { { "libc", "free" }, + { "perf", "cmd_record" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 4, { { "libc", "malloc" }, + { "perf", "cmd_record" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 3, { { "perf", "cmd_record" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + }; + + symbol_conf.use_callchain = true; + symbol_conf.cumulate_callchain = false; + + setup_sorting(); + callchain_register_param(&callchain_param); + + err = add_hist_entries(hists, machine); + if (err < 0) + goto out; + + err = do_test(hists, expected, ARRAY_SIZE(expected), + expected_callchain, ARRAY_SIZE(expected_callchain)); + +out: + del_hist_entries(hists); + reset_output_field(); + return err; +} + +/* NO callchain + children */ +static int test3(struct perf_evsel *evsel, struct machine *machine) +{ + int err; + struct hists *hists = &evsel->hists; + /* + * expected output: + * + * Children Self Command Shared Object Symbol + * ======== ======== ======= ============= ======================= + * 70.00% 20.00% perf perf [.] main + * 50.00% 0.00% perf perf [.] run_command + * 30.00% 10.00% bash bash [.] main + * 30.00% 10.00% perf perf [.] cmd_record + * 20.00% 0.00% bash libc [.] malloc + * 10.00% 10.00% bash [kernel] [k] page_fault + * 10.00% 10.00% perf [kernel] [k] schedule + * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open + * 10.00% 10.00% perf [kernel] [k] page_fault + * 10.00% 10.00% perf libc [.] free + * 10.00% 10.00% perf libc [.] malloc + * 10.00% 10.00% bash bash [.] xmalloc + */ + struct result expected[] = { + { 7000, 2000, "perf", "perf", "main" }, + { 5000, 0, "perf", "perf", "run_command" }, + { 3000, 1000, "bash", "bash", "main" }, + { 3000, 1000, "perf", "perf", "cmd_record" }, + { 2000, 0, "bash", "libc", "malloc" }, + { 1000, 1000, "bash", "[kernel]", "page_fault" }, + { 1000, 1000, "perf", "[kernel]", "schedule" }, + { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, + { 1000, 1000, "perf", "[kernel]", "page_fault" }, + { 1000, 1000, "perf", "libc", "free" }, + { 1000, 1000, "perf", "libc", "malloc" }, + { 1000, 1000, "bash", "bash", "xmalloc" }, + }; + + symbol_conf.use_callchain = false; + symbol_conf.cumulate_callchain = true; + + setup_sorting(); + callchain_register_param(&callchain_param); + + err = add_hist_entries(hists, machine); + if (err < 0) + goto out; + + err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0); + +out: + del_hist_entries(hists); + reset_output_field(); + return err; +} + +/* callchain + children */ +static int test4(struct perf_evsel *evsel, struct machine *machine) +{ + int err; + struct hists *hists = &evsel->hists; + /* + * expected output: + * + * Children Self Command Shared Object Symbol + * ======== ======== ======= ============= ======================= + * 70.00% 20.00% perf perf [.] main + * | + * --- main + * + * 50.00% 0.00% perf perf [.] run_command + * | + * --- run_command + * main + * + * 30.00% 10.00% bash bash [.] main + * | + * --- main + * + * 30.00% 10.00% perf perf [.] cmd_record + * | + * --- cmd_record + * run_command + * main + * + * 20.00% 0.00% bash libc [.] malloc + * | + * --- malloc + * | + * |--50.00%-- xmalloc + * | main + * --50.00%-- main + * + * 10.00% 10.00% bash [kernel] [k] page_fault + * | + * --- page_fault + * malloc + * main + * + * 10.00% 10.00% perf [kernel] [k] schedule + * | + * --- schedule + * run_command + * main + * + * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open + * | + * --- sys_perf_event_open + * run_command + * main + * + * 10.00% 10.00% perf [kernel] [k] page_fault + * | + * --- page_fault + * sys_perf_event_open + * run_command + * main + * + * 10.00% 10.00% perf libc [.] free + * | + * --- free + * cmd_record + * run_command + * main + * + * 10.00% 10.00% perf libc [.] malloc + * | + * --- malloc + * cmd_record + * run_command + * main + * + * 10.00% 10.00% bash bash [.] xmalloc + * | + * --- xmalloc + * malloc + * xmalloc <--- NOTE: there's a cycle + * malloc + * xmalloc + * main + * + */ + struct result expected[] = { + { 7000, 2000, "perf", "perf", "main" }, + { 5000, 0, "perf", "perf", "run_command" }, + { 3000, 1000, "bash", "bash", "main" }, + { 3000, 1000, "perf", "perf", "cmd_record" }, + { 2000, 0, "bash", "libc", "malloc" }, + { 1000, 1000, "bash", "[kernel]", "page_fault" }, + { 1000, 1000, "perf", "[kernel]", "schedule" }, + { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, + { 1000, 1000, "perf", "[kernel]", "page_fault" }, + { 1000, 1000, "perf", "libc", "free" }, + { 1000, 1000, "perf", "libc", "malloc" }, + { 1000, 1000, "bash", "bash", "xmalloc" }, + }; + struct callchain_result expected_callchain[] = { + { + 1, { { "perf", "main" }, }, + }, + { + 2, { { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 1, { { "bash", "main" }, }, + }, + { + 3, { { "perf", "cmd_record" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 4, { { "libc", "malloc" }, + { "bash", "xmalloc" }, + { "bash", "main" }, + { "bash", "main" }, }, + }, + { + 3, { { "[kernel]", "page_fault" }, + { "libc", "malloc" }, + { "bash", "main" }, }, + }, + { + 3, { { "[kernel]", "schedule" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 3, { { "[kernel]", "sys_perf_event_open" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 4, { { "[kernel]", "page_fault" }, + { "[kernel]", "sys_perf_event_open" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 4, { { "libc", "free" }, + { "perf", "cmd_record" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 4, { { "libc", "malloc" }, + { "perf", "cmd_record" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { + 6, { { "bash", "xmalloc" }, + { "libc", "malloc" }, + { "bash", "xmalloc" }, + { "libc", "malloc" }, + { "bash", "xmalloc" }, + { "bash", "main" }, }, + }, + }; + + symbol_conf.use_callchain = true; + symbol_conf.cumulate_callchain = true; + + setup_sorting(); + callchain_register_param(&callchain_param); + + err = add_hist_entries(hists, machine); + if (err < 0) + goto out; + + err = do_test(hists, expected, ARRAY_SIZE(expected), + expected_callchain, ARRAY_SIZE(expected_callchain)); + +out: + del_hist_entries(hists); + reset_output_field(); + return err; +} + +int test__hists_cumulate(void) +{ + int err = TEST_FAIL; + struct machines machines; + struct machine *machine; + struct perf_evsel *evsel; + struct perf_evlist *evlist = perf_evlist__new(); + size_t i; + test_fn_t testcases[] = { + test1, + test2, + test3, + test4, + }; + + TEST_ASSERT_VAL("No memory", evlist); + + err = parse_events(evlist, "cpu-clock"); + if (err) + goto out; + + machines__init(&machines); + + /* setup threads/dso/map/symbols also */ + machine = setup_fake_machine(&machines); + if (!machine) + goto out; + + if (verbose > 1) + machine__fprintf(machine, stderr); + + evsel = perf_evlist__first(evlist); + + for (i = 0; i < ARRAY_SIZE(testcases); i++) { + err = testcases[i](evsel, machine); + if (err < 0) + break; + } + +out: + /* tear down everything */ + perf_evlist__delete(evlist); + machines__exit(&machines); + + return err; +} diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c new file mode 100644 index 00000000000..821f581fd93 --- /dev/null +++ b/tools/perf/tests/hists_filter.c @@ -0,0 +1,289 @@ +#include "perf.h" +#include "util/debug.h" +#include "util/symbol.h" +#include "util/sort.h" +#include "util/evsel.h" +#include "util/evlist.h" +#include "util/machine.h" +#include "util/thread.h" +#include "util/parse-events.h" +#include "tests/tests.h" +#include "tests/hists_common.h" + +struct sample { + u32 pid; + u64 ip; + struct thread *thread; + struct map *map; + struct symbol *sym; +}; + +/* For the numbers, see hists_common.c */ +static struct sample fake_samples[] = { + /* perf [kernel] schedule() */ + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, }, + /* perf [perf] main() */ + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, }, + /* perf [libc] malloc() */ + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, }, + /* perf [perf] main() */ + { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, }, /* will be merged */ + /* perf [perf] cmd_record() */ + { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, }, + /* perf [kernel] page_fault() */ + { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, + /* bash [bash] main() */ + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, }, + /* bash [bash] xmalloc() */ + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, }, + /* bash [libc] malloc() */ + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, }, + /* bash [kernel] page_fault() */ + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, +}; + +static int add_hist_entries(struct perf_evlist *evlist, + struct machine *machine __maybe_unused) +{ + struct perf_evsel *evsel; + struct addr_location al; + struct perf_sample sample = { .period = 100, }; + size_t i; + + /* + * each evsel will have 10 samples but the 4th sample + * (perf [perf] main) will be collapsed to an existing entry + * so total 9 entries will be in the tree. + */ + evlist__for_each(evlist, evsel) { + for (i = 0; i < ARRAY_SIZE(fake_samples); i++) { + const union perf_event event = { + .header = { + .misc = PERF_RECORD_MISC_USER, + }, + }; + struct hist_entry_iter iter = { + .ops = &hist_iter_normal, + .hide_unresolved = false, + }; + + /* make sure it has no filter at first */ + evsel->hists.thread_filter = NULL; + evsel->hists.dso_filter = NULL; + evsel->hists.symbol_filter_str = NULL; + + sample.pid = fake_samples[i].pid; + sample.tid = fake_samples[i].pid; + sample.ip = fake_samples[i].ip; + + if (perf_event__preprocess_sample(&event, machine, &al, + &sample) < 0) + goto out; + + if (hist_entry_iter__add(&iter, &al, evsel, &sample, + PERF_MAX_STACK_DEPTH, NULL) < 0) + goto out; + + fake_samples[i].thread = al.thread; + fake_samples[i].map = al.map; + fake_samples[i].sym = al.sym; + } + } + + return 0; + +out: + pr_debug("Not enough memory for adding a hist entry\n"); + return TEST_FAIL; +} + +int test__hists_filter(void) +{ + int err = TEST_FAIL; + struct machines machines; + struct machine *machine; + struct perf_evsel *evsel; + struct perf_evlist *evlist = perf_evlist__new(); + + TEST_ASSERT_VAL("No memory", evlist); + + err = parse_events(evlist, "cpu-clock"); + if (err) + goto out; + err = parse_events(evlist, "task-clock"); + if (err) + goto out; + + /* default sort order (comm,dso,sym) will be used */ + if (setup_sorting() < 0) + goto out; + + machines__init(&machines); + + /* setup threads/dso/map/symbols also */ + machine = setup_fake_machine(&machines); + if (!machine) + goto out; + + if (verbose > 1) + machine__fprintf(machine, stderr); + + /* process sample events */ + err = add_hist_entries(evlist, machine); + if (err < 0) + goto out; + + evlist__for_each(evlist, evsel) { + struct hists *hists = &evsel->hists; + + hists__collapse_resort(hists, NULL); + hists__output_resort(hists); + + if (verbose > 2) { + pr_info("Normal histogram\n"); + print_hists_out(hists); + } + + TEST_ASSERT_VAL("Invalid nr samples", + hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10); + TEST_ASSERT_VAL("Invalid nr hist entries", + hists->nr_entries == 9); + TEST_ASSERT_VAL("Invalid total period", + hists->stats.total_period == 1000); + TEST_ASSERT_VAL("Unmatched nr samples", + hists->stats.nr_events[PERF_RECORD_SAMPLE] == + hists->stats.nr_non_filtered_samples); + TEST_ASSERT_VAL("Unmatched nr hist entries", + hists->nr_entries == hists->nr_non_filtered_entries); + TEST_ASSERT_VAL("Unmatched total period", + hists->stats.total_period == + hists->stats.total_non_filtered_period); + + /* now applying thread filter for 'bash' */ + evsel->hists.thread_filter = fake_samples[9].thread; + hists__filter_by_thread(hists); + + if (verbose > 2) { + pr_info("Histogram for thread filter\n"); + print_hists_out(hists); + } + + /* normal stats should be invariant */ + TEST_ASSERT_VAL("Invalid nr samples", + hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10); + TEST_ASSERT_VAL("Invalid nr hist entries", + hists->nr_entries == 9); + TEST_ASSERT_VAL("Invalid total period", + hists->stats.total_period == 1000); + + /* but filter stats are changed */ + TEST_ASSERT_VAL("Unmatched nr samples for thread filter", + hists->stats.nr_non_filtered_samples == 4); + TEST_ASSERT_VAL("Unmatched nr hist entries for thread filter", + hists->nr_non_filtered_entries == 4); + TEST_ASSERT_VAL("Unmatched total period for thread filter", + hists->stats.total_non_filtered_period == 400); + + /* remove thread filter first */ + evsel->hists.thread_filter = NULL; + hists__filter_by_thread(hists); + + /* now applying dso filter for 'kernel' */ + evsel->hists.dso_filter = fake_samples[0].map->dso; + hists__filter_by_dso(hists); + + if (verbose > 2) { + pr_info("Histogram for dso filter\n"); + print_hists_out(hists); + } + + /* normal stats should be invariant */ + TEST_ASSERT_VAL("Invalid nr samples", + hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10); + TEST_ASSERT_VAL("Invalid nr hist entries", + hists->nr_entries == 9); + TEST_ASSERT_VAL("Invalid total period", + hists->stats.total_period == 1000); + + /* but filter stats are changed */ + TEST_ASSERT_VAL("Unmatched nr samples for dso filter", + hists->stats.nr_non_filtered_samples == 3); + TEST_ASSERT_VAL("Unmatched nr hist entries for dso filter", + hists->nr_non_filtered_entries == 3); + TEST_ASSERT_VAL("Unmatched total period for dso filter", + hists->stats.total_non_filtered_period == 300); + + /* remove dso filter first */ + evsel->hists.dso_filter = NULL; + hists__filter_by_dso(hists); + + /* + * now applying symbol filter for 'main'. Also note that + * there's 3 samples that have 'main' symbol but the 4th + * entry of fake_samples was collapsed already so it won't + * be counted as a separate entry but the sample count and + * total period will be remained. + */ + evsel->hists.symbol_filter_str = "main"; + hists__filter_by_symbol(hists); + + if (verbose > 2) { + pr_info("Histogram for symbol filter\n"); + print_hists_out(hists); + } + + /* normal stats should be invariant */ + TEST_ASSERT_VAL("Invalid nr samples", + hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10); + TEST_ASSERT_VAL("Invalid nr hist entries", + hists->nr_entries == 9); + TEST_ASSERT_VAL("Invalid total period", + hists->stats.total_period == 1000); + + /* but filter stats are changed */ + TEST_ASSERT_VAL("Unmatched nr samples for symbol filter", + hists->stats.nr_non_filtered_samples == 3); + TEST_ASSERT_VAL("Unmatched nr hist entries for symbol filter", + hists->nr_non_filtered_entries == 2); + TEST_ASSERT_VAL("Unmatched total period for symbol filter", + hists->stats.total_non_filtered_period == 300); + + /* now applying all filters at once. */ + evsel->hists.thread_filter = fake_samples[1].thread; + evsel->hists.dso_filter = fake_samples[1].map->dso; + hists__filter_by_thread(hists); + hists__filter_by_dso(hists); + + if (verbose > 2) { + pr_info("Histogram for all filters\n"); + print_hists_out(hists); + } + + /* normal stats should be invariant */ + TEST_ASSERT_VAL("Invalid nr samples", + hists->stats.nr_events[PERF_RECORD_SAMPLE] == 10); + TEST_ASSERT_VAL("Invalid nr hist entries", + hists->nr_entries == 9); + TEST_ASSERT_VAL("Invalid total period", + hists->stats.total_period == 1000); + + /* but filter stats are changed */ + TEST_ASSERT_VAL("Unmatched nr samples for all filter", + hists->stats.nr_non_filtered_samples == 2); + TEST_ASSERT_VAL("Unmatched nr hist entries for all filter", + hists->nr_non_filtered_entries == 1); + TEST_ASSERT_VAL("Unmatched total period for all filter", + hists->stats.total_non_filtered_period == 200); + } + + + err = TEST_OK; + +out: + /* tear down everything */ + perf_evlist__delete(evlist); + reset_output_field(); + machines__exit(&machines); + + return err; +} diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c new file mode 100644 index 00000000000..d4b34b0f50a --- /dev/null +++ b/tools/perf/tests/hists_link.c @@ -0,0 +1,339 @@ +#include "perf.h" +#include "tests.h" +#include "debug.h" +#include "symbol.h" +#include "sort.h" +#include "evsel.h" +#include "evlist.h" +#include "machine.h" +#include "thread.h" +#include "parse-events.h" +#include "hists_common.h" + +struct sample { + u32 pid; + u64 ip; + struct thread *thread; + struct map *map; + struct symbol *sym; +}; + +/* For the numbers, see hists_common.c */ +static struct sample fake_common_samples[] = { + /* perf [kernel] schedule() */ + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, }, + /* perf [perf] main() */ + { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, }, + /* perf [perf] cmd_record() */ + { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, }, + /* bash [bash] xmalloc() */ + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, }, + /* bash [libc] malloc() */ + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, }, +}; + +static struct sample fake_samples[][5] = { + { + /* perf [perf] run_command() */ + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_RUN_COMMAND, }, + /* perf [libc] malloc() */ + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, }, + /* perf [kernel] page_fault() */ + { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, + /* perf [kernel] sys_perf_event_open() */ + { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, }, + /* bash [libc] free() */ + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_FREE, }, + }, + { + /* perf [libc] free() */ + { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_LIBC_FREE, }, + /* bash [libc] malloc() */ + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, }, /* will be merged */ + /* bash [bash] xfee() */ + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XFREE, }, + /* bash [libc] realloc() */ + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_REALLOC, }, + /* bash [kernel] page_fault() */ + { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, + }, +}; + +static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine) +{ + struct perf_evsel *evsel; + struct addr_location al; + struct hist_entry *he; + struct perf_sample sample = { .period = 1, }; + size_t i = 0, k; + + /* + * each evsel will have 10 samples - 5 common and 5 distinct. + * However the second evsel also has a collapsed entry for + * "bash [libc] malloc" so total 9 entries will be in the tree. + */ + evlist__for_each(evlist, evsel) { + for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) { + const union perf_event event = { + .header = { + .misc = PERF_RECORD_MISC_USER, + }, + }; + + sample.pid = fake_common_samples[k].pid; + sample.tid = fake_common_samples[k].pid; + sample.ip = fake_common_samples[k].ip; + if (perf_event__preprocess_sample(&event, machine, &al, + &sample) < 0) + goto out; + + he = __hists__add_entry(&evsel->hists, &al, NULL, + NULL, NULL, 1, 1, 0, true); + if (he == NULL) + goto out; + + fake_common_samples[k].thread = al.thread; + fake_common_samples[k].map = al.map; + fake_common_samples[k].sym = al.sym; + } + + for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) { + const union perf_event event = { + .header = { + .misc = PERF_RECORD_MISC_USER, + }, + }; + + sample.pid = fake_samples[i][k].pid; + sample.tid = fake_samples[i][k].pid; + sample.ip = fake_samples[i][k].ip; + if (perf_event__preprocess_sample(&event, machine, &al, + &sample) < 0) + goto out; + + he = __hists__add_entry(&evsel->hists, &al, NULL, + NULL, NULL, 1, 1, 0, true); + if (he == NULL) + goto out; + + fake_samples[i][k].thread = al.thread; + fake_samples[i][k].map = al.map; + fake_samples[i][k].sym = al.sym; + } + i++; + } + + return 0; + +out: + pr_debug("Not enough memory for adding a hist entry\n"); + return -1; +} + +static int find_sample(struct sample *samples, size_t nr_samples, + struct thread *t, struct map *m, struct symbol *s) +{ + while (nr_samples--) { + if (samples->thread == t && samples->map == m && + samples->sym == s) + return 1; + samples++; + } + return 0; +} + +static int __validate_match(struct hists *hists) +{ + size_t count = 0; + struct rb_root *root; + struct rb_node *node; + + /* + * Only entries from fake_common_samples should have a pair. + */ + if (sort__need_collapse) + root = &hists->entries_collapsed; + else + root = hists->entries_in; + + node = rb_first(root); + while (node) { + struct hist_entry *he; + + he = rb_entry(node, struct hist_entry, rb_node_in); + + if (hist_entry__has_pairs(he)) { + if (find_sample(fake_common_samples, + ARRAY_SIZE(fake_common_samples), + he->thread, he->ms.map, he->ms.sym)) { + count++; + } else { + pr_debug("Can't find the matched entry\n"); + return -1; + } + } + + node = rb_next(node); + } + + if (count != ARRAY_SIZE(fake_common_samples)) { + pr_debug("Invalid count for matched entries: %zd of %zd\n", + count, ARRAY_SIZE(fake_common_samples)); + return -1; + } + + return 0; +} + +static int validate_match(struct hists *leader, struct hists *other) +{ + return __validate_match(leader) || __validate_match(other); +} + +static int __validate_link(struct hists *hists, int idx) +{ + size_t count = 0; + size_t count_pair = 0; + size_t count_dummy = 0; + struct rb_root *root; + struct rb_node *node; + + /* + * Leader hists (idx = 0) will have dummy entries from other, + * and some entries will have no pair. However every entry + * in other hists should have (dummy) pair. + */ + if (sort__need_collapse) + root = &hists->entries_collapsed; + else + root = hists->entries_in; + + node = rb_first(root); + while (node) { + struct hist_entry *he; + + he = rb_entry(node, struct hist_entry, rb_node_in); + + if (hist_entry__has_pairs(he)) { + if (!find_sample(fake_common_samples, + ARRAY_SIZE(fake_common_samples), + he->thread, he->ms.map, he->ms.sym) && + !find_sample(fake_samples[idx], + ARRAY_SIZE(fake_samples[idx]), + he->thread, he->ms.map, he->ms.sym)) { + count_dummy++; + } + count_pair++; + } else if (idx) { + pr_debug("A entry from the other hists should have pair\n"); + return -1; + } + + count++; + node = rb_next(node); + } + + /* + * Note that we have a entry collapsed in the other (idx = 1) hists. + */ + if (idx == 0) { + if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) { + pr_debug("Invalid count of dummy entries: %zd of %zd\n", + count_dummy, ARRAY_SIZE(fake_samples[1]) - 1); + return -1; + } + if (count != count_pair + ARRAY_SIZE(fake_samples[0])) { + pr_debug("Invalid count of total leader entries: %zd of %zd\n", + count, count_pair + ARRAY_SIZE(fake_samples[0])); + return -1; + } + } else { + if (count != count_pair) { + pr_debug("Invalid count of total other entries: %zd of %zd\n", + count, count_pair); + return -1; + } + if (count_dummy > 0) { + pr_debug("Other hists should not have dummy entries: %zd\n", + count_dummy); + return -1; + } + } + + return 0; +} + +static int validate_link(struct hists *leader, struct hists *other) +{ + return __validate_link(leader, 0) || __validate_link(other, 1); +} + +int test__hists_link(void) +{ + int err = -1; + struct machines machines; + struct machine *machine = NULL; + struct perf_evsel *evsel, *first; + struct perf_evlist *evlist = perf_evlist__new(); + + if (evlist == NULL) + return -ENOMEM; + + err = parse_events(evlist, "cpu-clock"); + if (err) + goto out; + err = parse_events(evlist, "task-clock"); + if (err) + goto out; + + /* default sort order (comm,dso,sym) will be used */ + if (setup_sorting() < 0) + goto out; + + machines__init(&machines); + + /* setup threads/dso/map/symbols also */ + machine = setup_fake_machine(&machines); + if (!machine) + goto out; + + if (verbose > 1) + machine__fprintf(machine, stderr); + + /* process sample events */ + err = add_hist_entries(evlist, machine); + if (err < 0) + goto out; + + evlist__for_each(evlist, evsel) { + hists__collapse_resort(&evsel->hists, NULL); + + if (verbose > 2) + print_hists_in(&evsel->hists); + } + + first = perf_evlist__first(evlist); + evsel = perf_evlist__last(evlist); + + /* match common entries */ + hists__match(&first->hists, &evsel->hists); + err = validate_match(&first->hists, &evsel->hists); + if (err) + goto out; + + /* link common and/or dummy entries */ + hists__link(&first->hists, &evsel->hists); + err = validate_link(&first->hists, &evsel->hists); + if (err) + goto out; + + err = 0; + +out: + /* tear down everything */ + perf_evlist__delete(evlist); + reset_output_field(); + machines__exit(&machines); + + return err; +} diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c new file mode 100644 index 00000000000..e3bbd6c54c1 --- /dev/null +++ b/tools/perf/tests/hists_output.c @@ -0,0 +1,621 @@ +#include "perf.h" +#include "util/debug.h" +#include "util/symbol.h" +#include "util/sort.h" +#include "util/evsel.h" +#include "util/evlist.h" +#include "util/machine.h" +#include "util/thread.h" +#include "util/parse-events.h" +#include "tests/tests.h" +#include "tests/hists_common.h" + +struct sample { + u32 cpu; + u32 pid; + u64 ip; + struct thread *thread; + struct map *map; + struct symbol *sym; +}; + +/* For the numbers, see hists_common.c */ +static struct sample fake_samples[] = { + /* perf [kernel] schedule() */ + { .cpu = 0, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, }, + /* perf [perf] main() */ + { .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, }, + /* perf [perf] cmd_record() */ + { .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, }, + /* perf [libc] malloc() */ + { .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, }, + /* perf [libc] free() */ + { .cpu = 2, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, }, + /* perf [perf] main() */ + { .cpu = 2, .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, }, + /* perf [kernel] page_fault() */ + { .cpu = 2, .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, + /* bash [bash] main() */ + { .cpu = 3, .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, }, + /* bash [bash] xmalloc() */ + { .cpu = 0, .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, }, + /* bash [kernel] page_fault() */ + { .cpu = 1, .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, +}; + +static int add_hist_entries(struct hists *hists, struct machine *machine) +{ + struct addr_location al; + struct perf_evsel *evsel = hists_to_evsel(hists); + struct perf_sample sample = { .period = 100, }; + size_t i; + + for (i = 0; i < ARRAY_SIZE(fake_samples); i++) { + const union perf_event event = { + .header = { + .misc = PERF_RECORD_MISC_USER, + }, + }; + struct hist_entry_iter iter = { + .ops = &hist_iter_normal, + .hide_unresolved = false, + }; + + sample.cpu = fake_samples[i].cpu; + sample.pid = fake_samples[i].pid; + sample.tid = fake_samples[i].pid; + sample.ip = fake_samples[i].ip; + + if (perf_event__preprocess_sample(&event, machine, &al, + &sample) < 0) + goto out; + + if (hist_entry_iter__add(&iter, &al, evsel, &sample, + PERF_MAX_STACK_DEPTH, NULL) < 0) + goto out; + + fake_samples[i].thread = al.thread; + fake_samples[i].map = al.map; + fake_samples[i].sym = al.sym; + } + + return TEST_OK; + +out: + pr_debug("Not enough memory for adding a hist entry\n"); + return TEST_FAIL; +} + +static void del_hist_entries(struct hists *hists) +{ + struct hist_entry *he; + struct rb_root *root_in; + struct rb_root *root_out; + struct rb_node *node; + + if (sort__need_collapse) + root_in = &hists->entries_collapsed; + else + root_in = hists->entries_in; + + root_out = &hists->entries; + + while (!RB_EMPTY_ROOT(root_out)) { + node = rb_first(root_out); + + he = rb_entry(node, struct hist_entry, rb_node); + rb_erase(node, root_out); + rb_erase(&he->rb_node_in, root_in); + hist_entry__free(he); + } +} + +typedef int (*test_fn_t)(struct perf_evsel *, struct machine *); + +#define COMM(he) (thread__comm_str(he->thread)) +#define DSO(he) (he->ms.map->dso->short_name) +#define SYM(he) (he->ms.sym->name) +#define CPU(he) (he->cpu) +#define PID(he) (he->thread->tid) + +/* default sort keys (no field) */ +static int test1(struct perf_evsel *evsel, struct machine *machine) +{ + int err; + struct hists *hists = &evsel->hists; + struct hist_entry *he; + struct rb_root *root; + struct rb_node *node; + + field_order = NULL; + sort_order = NULL; /* equivalent to sort_order = "comm,dso,sym" */ + + setup_sorting(); + + /* + * expected output: + * + * Overhead Command Shared Object Symbol + * ======== ======= ============= ============== + * 20.00% perf perf [.] main + * 10.00% bash [kernel] [k] page_fault + * 10.00% bash bash [.] main + * 10.00% bash bash [.] xmalloc + * 10.00% perf [kernel] [k] page_fault + * 10.00% perf [kernel] [k] schedule + * 10.00% perf libc [.] free + * 10.00% perf libc [.] malloc + * 10.00% perf perf [.] cmd_record + */ + err = add_hist_entries(hists, machine); + if (err < 0) + goto out; + + hists__collapse_resort(hists, NULL); + hists__output_resort(hists); + + if (verbose > 2) { + pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); + print_hists_out(hists); + } + + root = &evsel->hists.entries; + node = rb_first(root); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") && + !strcmp(SYM(he), "main") && he->stat.period == 200); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "[kernel]") && + !strcmp(SYM(he), "page_fault") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") && + !strcmp(SYM(he), "main") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") && + !strcmp(SYM(he), "xmalloc") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") && + !strcmp(SYM(he), "page_fault") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") && + !strcmp(SYM(he), "schedule") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") && + !strcmp(SYM(he), "free") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") && + !strcmp(SYM(he), "malloc") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") && + !strcmp(SYM(he), "cmd_record") && he->stat.period == 100); + +out: + del_hist_entries(hists); + reset_output_field(); + return err; +} + +/* mixed fields and sort keys */ +static int test2(struct perf_evsel *evsel, struct machine *machine) +{ + int err; + struct hists *hists = &evsel->hists; + struct hist_entry *he; + struct rb_root *root; + struct rb_node *node; + + field_order = "overhead,cpu"; + sort_order = "pid"; + + setup_sorting(); + + /* + * expected output: + * + * Overhead CPU Command: Pid + * ======== === ============= + * 30.00% 1 perf : 100 + * 10.00% 0 perf : 100 + * 10.00% 2 perf : 100 + * 20.00% 2 perf : 200 + * 10.00% 0 bash : 300 + * 10.00% 1 bash : 300 + * 10.00% 3 bash : 300 + */ + err = add_hist_entries(hists, machine); + if (err < 0) + goto out; + + hists__collapse_resort(hists, NULL); + hists__output_resort(hists); + + if (verbose > 2) { + pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); + print_hists_out(hists); + } + + root = &evsel->hists.entries; + node = rb_first(root); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + CPU(he) == 1 && PID(he) == 100 && he->stat.period == 300); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + CPU(he) == 0 && PID(he) == 100 && he->stat.period == 100); + +out: + del_hist_entries(hists); + reset_output_field(); + return err; +} + +/* fields only (no sort key) */ +static int test3(struct perf_evsel *evsel, struct machine *machine) +{ + int err; + struct hists *hists = &evsel->hists; + struct hist_entry *he; + struct rb_root *root; + struct rb_node *node; + + field_order = "comm,overhead,dso"; + sort_order = NULL; + + setup_sorting(); + + /* + * expected output: + * + * Command Overhead Shared Object + * ======= ======== ============= + * bash 20.00% bash + * bash 10.00% [kernel] + * perf 30.00% perf + * perf 20.00% [kernel] + * perf 20.00% libc + */ + err = add_hist_entries(hists, machine); + if (err < 0) + goto out; + + hists__collapse_resort(hists, NULL); + hists__output_resort(hists); + + if (verbose > 2) { + pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); + print_hists_out(hists); + } + + root = &evsel->hists.entries; + node = rb_first(root); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") && + he->stat.period == 200); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "[kernel]") && + he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") && + he->stat.period == 300); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") && + he->stat.period == 200); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") && + he->stat.period == 200); + +out: + del_hist_entries(hists); + reset_output_field(); + return err; +} + +/* handle duplicate 'dso' field */ +static int test4(struct perf_evsel *evsel, struct machine *machine) +{ + int err; + struct hists *hists = &evsel->hists; + struct hist_entry *he; + struct rb_root *root; + struct rb_node *node; + + field_order = "dso,sym,comm,overhead,dso"; + sort_order = "sym"; + + setup_sorting(); + + /* + * expected output: + * + * Shared Object Symbol Command Overhead + * ============= ============== ======= ======== + * perf [.] cmd_record perf 10.00% + * libc [.] free perf 10.00% + * bash [.] main bash 10.00% + * perf [.] main perf 20.00% + * libc [.] malloc perf 10.00% + * [kernel] [k] page_fault bash 10.00% + * [kernel] [k] page_fault perf 10.00% + * [kernel] [k] schedule perf 10.00% + * bash [.] xmalloc bash 10.00% + */ + err = add_hist_entries(hists, machine); + if (err < 0) + goto out; + + hists__collapse_resort(hists, NULL); + hists__output_resort(hists); + + if (verbose > 2) { + pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); + print_hists_out(hists); + } + + root = &evsel->hists.entries; + node = rb_first(root); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(DSO(he), "perf") && !strcmp(SYM(he), "cmd_record") && + !strcmp(COMM(he), "perf") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(DSO(he), "libc") && !strcmp(SYM(he), "free") && + !strcmp(COMM(he), "perf") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(DSO(he), "bash") && !strcmp(SYM(he), "main") && + !strcmp(COMM(he), "bash") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(DSO(he), "perf") && !strcmp(SYM(he), "main") && + !strcmp(COMM(he), "perf") && he->stat.period == 200); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(DSO(he), "libc") && !strcmp(SYM(he), "malloc") && + !strcmp(COMM(he), "perf") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(DSO(he), "[kernel]") && !strcmp(SYM(he), "page_fault") && + !strcmp(COMM(he), "bash") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(DSO(he), "[kernel]") && !strcmp(SYM(he), "page_fault") && + !strcmp(COMM(he), "perf") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(DSO(he), "[kernel]") && !strcmp(SYM(he), "schedule") && + !strcmp(COMM(he), "perf") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + !strcmp(DSO(he), "bash") && !strcmp(SYM(he), "xmalloc") && + !strcmp(COMM(he), "bash") && he->stat.period == 100); + +out: + del_hist_entries(hists); + reset_output_field(); + return err; +} + +/* full sort keys w/o overhead field */ +static int test5(struct perf_evsel *evsel, struct machine *machine) +{ + int err; + struct hists *hists = &evsel->hists; + struct hist_entry *he; + struct rb_root *root; + struct rb_node *node; + + field_order = "cpu,pid,comm,dso,sym"; + sort_order = "dso,pid"; + + setup_sorting(); + + /* + * expected output: + * + * CPU Command: Pid Command Shared Object Symbol + * === ============= ======= ============= ============== + * 0 perf: 100 perf [kernel] [k] schedule + * 2 perf: 200 perf [kernel] [k] page_fault + * 1 bash: 300 bash [kernel] [k] page_fault + * 0 bash: 300 bash bash [.] xmalloc + * 3 bash: 300 bash bash [.] main + * 1 perf: 100 perf libc [.] malloc + * 2 perf: 100 perf libc [.] free + * 1 perf: 100 perf perf [.] cmd_record + * 1 perf: 100 perf perf [.] main + * 2 perf: 200 perf perf [.] main + */ + err = add_hist_entries(hists, machine); + if (err < 0) + goto out; + + hists__collapse_resort(hists, NULL); + hists__output_resort(hists); + + if (verbose > 2) { + pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); + print_hists_out(hists); + } + + root = &evsel->hists.entries; + node = rb_first(root); + he = rb_entry(node, struct hist_entry, rb_node); + + TEST_ASSERT_VAL("Invalid hist entry", + CPU(he) == 0 && PID(he) == 100 && + !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") && + !strcmp(SYM(he), "schedule") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + CPU(he) == 2 && PID(he) == 200 && + !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") && + !strcmp(SYM(he), "page_fault") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + CPU(he) == 1 && PID(he) == 300 && + !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "[kernel]") && + !strcmp(SYM(he), "page_fault") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + CPU(he) == 0 && PID(he) == 300 && + !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") && + !strcmp(SYM(he), "xmalloc") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + CPU(he) == 3 && PID(he) == 300 && + !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") && + !strcmp(SYM(he), "main") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + CPU(he) == 1 && PID(he) == 100 && + !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") && + !strcmp(SYM(he), "malloc") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + CPU(he) == 2 && PID(he) == 100 && + !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") && + !strcmp(SYM(he), "free") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + CPU(he) == 1 && PID(he) == 100 && + !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") && + !strcmp(SYM(he), "cmd_record") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + CPU(he) == 1 && PID(he) == 100 && + !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") && + !strcmp(SYM(he), "main") && he->stat.period == 100); + + node = rb_next(node); + he = rb_entry(node, struct hist_entry, rb_node); + TEST_ASSERT_VAL("Invalid hist entry", + CPU(he) == 2 && PID(he) == 200 && + !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") && + !strcmp(SYM(he), "main") && he->stat.period == 100); + +out: + del_hist_entries(hists); + reset_output_field(); + return err; +} + +int test__hists_output(void) +{ + int err = TEST_FAIL; + struct machines machines; + struct machine *machine; + struct perf_evsel *evsel; + struct perf_evlist *evlist = perf_evlist__new(); + size_t i; + test_fn_t testcases[] = { + test1, + test2, + test3, + test4, + test5, + }; + + TEST_ASSERT_VAL("No memory", evlist); + + err = parse_events(evlist, "cpu-clock"); + if (err) + goto out; + + machines__init(&machines); + + /* setup threads/dso/map/symbols also */ + machine = setup_fake_machine(&machines); + if (!machine) + goto out; + + if (verbose > 1) + machine__fprintf(machine, stderr); + + evsel = perf_evlist__first(evlist); + + for (i = 0; i < ARRAY_SIZE(testcases); i++) { + err = testcases[i](evsel, machine); + if (err < 0) + break; + } + +out: + /* tear down everything */ + perf_evlist__delete(evlist); + machines__exit(&machines); + + return err; +} diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c new file mode 100644 index 00000000000..7a5ab7b0b8f --- /dev/null +++ b/tools/perf/tests/keep-tracking.c @@ -0,0 +1,152 @@ +#include <linux/types.h> +#include <unistd.h> +#include <sys/prctl.h> + +#include "parse-events.h" +#include "evlist.h" +#include "evsel.h" +#include "thread_map.h" +#include "cpumap.h" +#include "tests.h" + +#define CHECK__(x) { \ + while ((x) < 0) { \ + pr_debug(#x " failed!\n"); \ + goto out_err; \ + } \ +} + +#define CHECK_NOT_NULL__(x) { \ + while ((x) == NULL) { \ + pr_debug(#x " failed!\n"); \ + goto out_err; \ + } \ +} + +static int find_comm(struct perf_evlist *evlist, const char *comm) +{ + union perf_event *event; + int i, found; + + found = 0; + for (i = 0; i < evlist->nr_mmaps; i++) { + while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { + if (event->header.type == PERF_RECORD_COMM && + (pid_t)event->comm.pid == getpid() && + (pid_t)event->comm.tid == getpid() && + strcmp(event->comm.comm, comm) == 0) + found += 1; + perf_evlist__mmap_consume(evlist, i); + } + } + return found; +} + +/** + * test__keep_tracking - test using a dummy software event to keep tracking. + * + * This function implements a test that checks that tracking events continue + * when an event is disabled but a dummy software event is not disabled. If the + * test passes %0 is returned, otherwise %-1 is returned. + */ +int test__keep_tracking(void) +{ + struct record_opts opts = { + .mmap_pages = UINT_MAX, + .user_freq = UINT_MAX, + .user_interval = ULLONG_MAX, + .freq = 4000, + .target = { + .uses_mmap = true, + }, + }; + struct thread_map *threads = NULL; + struct cpu_map *cpus = NULL; + struct perf_evlist *evlist = NULL; + struct perf_evsel *evsel = NULL; + int found, err = -1; + const char *comm; + + threads = thread_map__new(-1, getpid(), UINT_MAX); + CHECK_NOT_NULL__(threads); + + cpus = cpu_map__new(NULL); + CHECK_NOT_NULL__(cpus); + + evlist = perf_evlist__new(); + CHECK_NOT_NULL__(evlist); + + perf_evlist__set_maps(evlist, cpus, threads); + + CHECK__(parse_events(evlist, "dummy:u")); + CHECK__(parse_events(evlist, "cycles:u")); + + perf_evlist__config(evlist, &opts); + + evsel = perf_evlist__first(evlist); + + evsel->attr.comm = 1; + evsel->attr.disabled = 1; + evsel->attr.enable_on_exec = 0; + + if (perf_evlist__open(evlist) < 0) { + fprintf(stderr, " (not supported)"); + err = 0; + goto out_err; + } + + CHECK__(perf_evlist__mmap(evlist, UINT_MAX, false)); + + /* + * First, test that a 'comm' event can be found when the event is + * enabled. + */ + + perf_evlist__enable(evlist); + + comm = "Test COMM 1"; + CHECK__(prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0)); + + perf_evlist__disable(evlist); + + found = find_comm(evlist, comm); + if (found != 1) { + pr_debug("First time, failed to find tracking event.\n"); + goto out_err; + } + + /* + * Secondly, test that a 'comm' event can be found when the event is + * disabled with the dummy event still enabled. + */ + + perf_evlist__enable(evlist); + + evsel = perf_evlist__last(evlist); + + CHECK__(perf_evlist__disable_event(evlist, evsel)); + + comm = "Test COMM 2"; + CHECK__(prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0)); + + perf_evlist__disable(evlist); + + found = find_comm(evlist, comm); + if (found != 1) { + pr_debug("Seconf time, failed to find tracking event.\n"); + goto out_err; + } + + err = 0; + +out_err: + if (evlist) { + perf_evlist__disable(evlist); + perf_evlist__delete(evlist); + } else { + cpu_map__delete(cpus); + thread_map__delete(threads); + } + + return err; +} diff --git a/tools/perf/tests/make b/tools/perf/tests/make new file mode 100644 index 00000000000..69a71ff84e0 --- /dev/null +++ b/tools/perf/tests/make @@ -0,0 +1,233 @@ +PERF := . +MK := Makefile + +include config/Makefile.arch + +# FIXME looks like x86 is the only arch running tests ;-) +# we need some IS_(32/64) flag to make this generic +ifeq ($(IS_X86_64),1) +lib = lib64 +else +lib = lib +endif + +has = $(shell which $1 2>/dev/null) + +# standard single make variable specified +make_clean_all := clean all +make_python_perf_so := python/perf.so +make_debug := DEBUG=1 +make_no_libperl := NO_LIBPERL=1 +make_no_libpython := NO_LIBPYTHON=1 +make_no_scripts := NO_LIBPYTHON=1 NO_LIBPERL=1 +make_no_newt := NO_NEWT=1 +make_no_slang := NO_SLANG=1 +make_no_gtk2 := NO_GTK2=1 +make_no_ui := NO_NEWT=1 NO_SLANG=1 NO_GTK2=1 +make_no_demangle := NO_DEMANGLE=1 +make_no_libelf := NO_LIBELF=1 +make_no_libunwind := NO_LIBUNWIND=1 +make_no_libdw_dwarf_unwind := NO_LIBDW_DWARF_UNWIND=1 +make_no_backtrace := NO_BACKTRACE=1 +make_no_libnuma := NO_LIBNUMA=1 +make_no_libaudit := NO_LIBAUDIT=1 +make_no_libbionic := NO_LIBBIONIC=1 +make_tags := tags +make_cscope := cscope +make_help := help +make_doc := doc +make_perf_o := perf.o +make_util_map_o := util/map.o +make_util_pmu_bison_o := util/pmu-bison.o +make_install := install +make_install_bin := install-bin +make_install_doc := install-doc +make_install_man := install-man +make_install_html := install-html +make_install_info := install-info +make_install_pdf := install-pdf +make_static := LDFLAGS=-static + +# all the NO_* variable combined +make_minimal := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1 +make_minimal += NO_DEMANGLE=1 NO_LIBELF=1 NO_LIBUNWIND=1 NO_BACKTRACE=1 +make_minimal += NO_LIBNUMA=1 NO_LIBAUDIT=1 NO_LIBBIONIC=1 +make_minimal += NO_LIBDW_DWARF_UNWIND=1 + +# $(run) contains all available tests +run := make_pure +run += make_clean_all +run += make_python_perf_so +run += make_debug +run += make_no_libperl +run += make_no_libpython +run += make_no_scripts +run += make_no_newt +run += make_no_slang +run += make_no_gtk2 +run += make_no_ui +run += make_no_demangle +run += make_no_libelf +run += make_no_libunwind +run += make_no_libdw_dwarf_unwind +run += make_no_backtrace +run += make_no_libnuma +run += make_no_libaudit +run += make_no_libbionic +run += make_help +run += make_doc +run += make_perf_o +run += make_util_map_o +run += make_util_pmu_bison_o +run += make_install +run += make_install_bin +# FIXME 'install-*' commented out till they're fixed +# run += make_install_doc +# run += make_install_man +# run += make_install_html +# run += make_install_info +# run += make_install_pdf +run += make_minimal +run += make_static + +ifneq ($(call has,ctags),) +run += make_tags +endif +ifneq ($(call has,cscope),) +run += make_cscope +endif + +# $(run_O) contains same portion of $(run) tests with '_O' attached +# to distinguish O=... tests +run_O := $(addsuffix _O,$(run)) + +# disable some tests for O=... +run_O := $(filter-out make_python_perf_so_O,$(run_O)) + +# define test for each compile as 'test_NAME' variable +# with the test itself as a value +test_make_tags = test -f tags +test_make_cscope = test -f cscope.out + +test_make_tags_O := $(test_make_tags) +test_make_cscope_O := $(test_make_cscope) + +test_ok := true +test_make_help := $(test_ok) +test_make_doc := $(test_ok) +test_make_help_O := $(test_ok) +test_make_doc_O := $(test_ok) + +test_make_python_perf_so := test -f $(PERF)/python/perf.so + +test_make_perf_o := test -f $(PERF)/perf.o +test_make_util_map_o := test -f $(PERF)/util/map.o +test_make_util_pmu_bison_o := test -f $(PERF)/util/pmu-bison.o + +define test_dest_files + for file in $(1); do \ + if [ ! -x $$TMP_DEST/$$file ]; then \ + echo " failed to find: $$file"; \ + fi \ + done +endef + +installed_files_bin := bin/perf +installed_files_bin += etc/bash_completion.d/perf +installed_files_bin += libexec/perf-core/perf-archive + +installed_files_plugins := $(lib)/traceevent/plugins/plugin_cfg80211.so +installed_files_plugins += $(lib)/traceevent/plugins/plugin_scsi.so +installed_files_plugins += $(lib)/traceevent/plugins/plugin_xen.so +installed_files_plugins += $(lib)/traceevent/plugins/plugin_function.so +installed_files_plugins += $(lib)/traceevent/plugins/plugin_sched_switch.so +installed_files_plugins += $(lib)/traceevent/plugins/plugin_mac80211.so +installed_files_plugins += $(lib)/traceevent/plugins/plugin_kvm.so +installed_files_plugins += $(lib)/traceevent/plugins/plugin_kmem.so +installed_files_plugins += $(lib)/traceevent/plugins/plugin_hrtimer.so +installed_files_plugins += $(lib)/traceevent/plugins/plugin_jbd2.so + +installed_files_all := $(installed_files_bin) +installed_files_all += $(installed_files_plugins) + +test_make_install := $(call test_dest_files,$(installed_files_all)) +test_make_install_O := $(call test_dest_files,$(installed_files_all)) +test_make_install_bin := $(call test_dest_files,$(installed_files_bin)) +test_make_install_bin_O := $(call test_dest_files,$(installed_files_bin)) + +# FIXME nothing gets installed +test_make_install_man := test -f $$TMP_DEST/share/man/man1/perf.1 +test_make_install_man_O := $(test_make_install_man) + +# FIXME nothing gets installed +test_make_install_doc := $(test_ok) +test_make_install_doc_O := $(test_ok) + +# FIXME nothing gets installed +test_make_install_html := $(test_ok) +test_make_install_html_O := $(test_ok) + +# FIXME nothing gets installed +test_make_install_info := $(test_ok) +test_make_install_info_O := $(test_ok) + +# FIXME nothing gets installed +test_make_install_pdf := $(test_ok) +test_make_install_pdf_O := $(test_ok) + +test_make_python_perf_so_O := test -f $$TMP_O/python/perf.so +test_make_perf_o_O := test -f $$TMP_O/perf.o +test_make_util_map_o_O := test -f $$TMP_O/util/map.o +test_make_util_pmu_bison_o_O := test -f $$TMP_O/util/pmu-bison.o + +test_default = test -x $(PERF)/perf +test = $(if $(test_$1),$(test_$1),$(test_default)) + +test_default_O = test -x $$TMP_O/perf +test_O = $(if $(test_$1),$(test_$1),$(test_default_O)) + +all: + +ifdef DEBUG +d := $(info run $(run)) +d := $(info run_O $(run_O)) +endif + +MAKEFLAGS := --no-print-directory + +clean := @(cd $(PERF); make -s -f $(MK) clean >/dev/null) + +$(run): + $(call clean) + @TMP_DEST=$$(mktemp -d); \ + cmd="cd $(PERF) && make -f $(MK) DESTDIR=$$TMP_DEST $($@)"; \ + echo "- $@: $$cmd" && echo $$cmd > $@ && \ + ( eval $$cmd ) >> $@ 2>&1; \ + echo " test: $(call test,$@)" >> $@ 2>&1; \ + $(call test,$@) && \ + rm -rf $@ $$TMP_DEST || (cat $@ ; false) + +$(run_O): + $(call clean) + @TMP_O=$$(mktemp -d); \ + TMP_DEST=$$(mktemp -d); \ + cmd="cd $(PERF) && make -f $(MK) O=$$TMP_O DESTDIR=$$TMP_DEST $($(patsubst %_O,%,$@))"; \ + echo "- $@: $$cmd" && echo $$cmd > $@ && \ + ( eval $$cmd ) >> $@ 2>&1 && \ + echo " test: $(call test_O,$@)" >> $@ 2>&1; \ + $(call test_O,$@) && \ + rm -rf $@ $$TMP_O $$TMP_DEST || (cat $@ ; false) + +tarpkg: + @cmd="$(PERF)/tests/perf-targz-src-pkg $(PERF)"; \ + echo "- $@: $$cmd" && echo $$cmd > $@ && \ + ( eval $$cmd ) >> $@ 2>&1 + + +all: $(run) $(run_O) tarpkg + @echo OK + +out: $(run_O) + @echo OK + +.PHONY: all $(run) $(run_O) tarpkg clean diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c new file mode 100644 index 00000000000..142263492f6 --- /dev/null +++ b/tools/perf/tests/mmap-basic.c @@ -0,0 +1,148 @@ +#include "evlist.h" +#include "evsel.h" +#include "thread_map.h" +#include "cpumap.h" +#include "tests.h" + +/* + * This test will generate random numbers of calls to some getpid syscalls, + * then establish an mmap for a group of events that are created to monitor + * the syscalls. + * + * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated + * sample.id field to map back to its respective perf_evsel instance. + * + * Then it checks if the number of syscalls reported as perf events by + * the kernel corresponds to the number of syscalls made. + */ +int test__basic_mmap(void) +{ + int err = -1; + union perf_event *event; + struct thread_map *threads; + struct cpu_map *cpus; + struct perf_evlist *evlist; + cpu_set_t cpu_set; + const char *syscall_names[] = { "getsid", "getppid", "getpgrp", + "getpgid", }; + pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp, + (void*)getpgid }; +#define nsyscalls ARRAY_SIZE(syscall_names) + unsigned int nr_events[nsyscalls], + expected_nr_events[nsyscalls], i, j; + struct perf_evsel *evsels[nsyscalls], *evsel; + + threads = thread_map__new(-1, getpid(), UINT_MAX); + if (threads == NULL) { + pr_debug("thread_map__new\n"); + return -1; + } + + cpus = cpu_map__new(NULL); + if (cpus == NULL) { + pr_debug("cpu_map__new\n"); + goto out_free_threads; + } + + CPU_ZERO(&cpu_set); + CPU_SET(cpus->map[0], &cpu_set); + sched_setaffinity(0, sizeof(cpu_set), &cpu_set); + if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { + pr_debug("sched_setaffinity() failed on CPU %d: %s ", + cpus->map[0], strerror(errno)); + goto out_free_cpus; + } + + evlist = perf_evlist__new(); + if (evlist == NULL) { + pr_debug("perf_evlist__new\n"); + goto out_free_cpus; + } + + perf_evlist__set_maps(evlist, cpus, threads); + + for (i = 0; i < nsyscalls; ++i) { + char name[64]; + + snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); + evsels[i] = perf_evsel__newtp("syscalls", name); + if (evsels[i] == NULL) { + pr_debug("perf_evsel__new\n"); + goto out_delete_evlist; + } + + evsels[i]->attr.wakeup_events = 1; + perf_evsel__set_sample_id(evsels[i], false); + + perf_evlist__add(evlist, evsels[i]); + + if (perf_evsel__open(evsels[i], cpus, threads) < 0) { + pr_debug("failed to open counter: %s, " + "tweak /proc/sys/kernel/perf_event_paranoid?\n", + strerror(errno)); + goto out_delete_evlist; + } + + nr_events[i] = 0; + expected_nr_events[i] = 1 + rand() % 127; + } + + if (perf_evlist__mmap(evlist, 128, true) < 0) { + pr_debug("failed to mmap events: %d (%s)\n", errno, + strerror(errno)); + goto out_delete_evlist; + } + + for (i = 0; i < nsyscalls; ++i) + for (j = 0; j < expected_nr_events[i]; ++j) { + int foo = syscalls[i](); + ++foo; + } + + while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { + struct perf_sample sample; + + if (event->header.type != PERF_RECORD_SAMPLE) { + pr_debug("unexpected %s event\n", + perf_event__name(event->header.type)); + goto out_delete_evlist; + } + + err = perf_evlist__parse_sample(evlist, event, &sample); + if (err) { + pr_err("Can't parse sample, err = %d\n", err); + goto out_delete_evlist; + } + + err = -1; + evsel = perf_evlist__id2evsel(evlist, sample.id); + if (evsel == NULL) { + pr_debug("event with id %" PRIu64 + " doesn't map to an evsel\n", sample.id); + goto out_delete_evlist; + } + nr_events[evsel->idx]++; + perf_evlist__mmap_consume(evlist, 0); + } + + err = 0; + evlist__for_each(evlist, evsel) { + if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { + pr_debug("expected %d %s events, got %d\n", + expected_nr_events[evsel->idx], + perf_evsel__name(evsel), nr_events[evsel->idx]); + err = -1; + goto out_delete_evlist; + } + } + +out_delete_evlist: + perf_evlist__delete(evlist); + cpus = NULL; + threads = NULL; +out_free_cpus: + cpu_map__delete(cpus); +out_free_threads: + thread_map__delete(threads); + return err; +} diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c new file mode 100644 index 00000000000..4a456fef66c --- /dev/null +++ b/tools/perf/tests/mmap-thread-lookup.c @@ -0,0 +1,233 @@ +#include <unistd.h> +#include <sys/syscall.h> +#include <sys/types.h> +#include <sys/mman.h> +#include <pthread.h> +#include <stdlib.h> +#include <stdio.h> +#include "debug.h" +#include "tests.h" +#include "machine.h" +#include "thread_map.h" +#include "symbol.h" +#include "thread.h" + +#define THREADS 4 + +static int go_away; + +struct thread_data { + pthread_t pt; + pid_t tid; + void *map; + int ready[2]; +}; + +static struct thread_data threads[THREADS]; + +static int thread_init(struct thread_data *td) +{ + void *map; + + map = mmap(NULL, page_size, + PROT_READ|PROT_WRITE|PROT_EXEC, + MAP_SHARED|MAP_ANONYMOUS, -1, 0); + + if (map == MAP_FAILED) { + perror("mmap failed"); + return -1; + } + + td->map = map; + td->tid = syscall(SYS_gettid); + + pr_debug("tid = %d, map = %p\n", td->tid, map); + return 0; +} + +static void *thread_fn(void *arg) +{ + struct thread_data *td = arg; + ssize_t ret; + int go; + + if (thread_init(td)) + return NULL; + + /* Signal thread_create thread is initialized. */ + ret = write(td->ready[1], &go, sizeof(int)); + if (ret != sizeof(int)) { + pr_err("failed to notify\n"); + return NULL; + } + + while (!go_away) { + /* Waiting for main thread to kill us. */ + usleep(100); + } + + munmap(td->map, page_size); + return NULL; +} + +static int thread_create(int i) +{ + struct thread_data *td = &threads[i]; + int err, go; + + if (pipe(td->ready)) + return -1; + + err = pthread_create(&td->pt, NULL, thread_fn, td); + if (!err) { + /* Wait for thread initialization. */ + ssize_t ret = read(td->ready[0], &go, sizeof(int)); + err = ret != sizeof(int); + } + + close(td->ready[0]); + close(td->ready[1]); + return err; +} + +static int threads_create(void) +{ + struct thread_data *td0 = &threads[0]; + int i, err = 0; + + go_away = 0; + + /* 0 is main thread */ + if (thread_init(td0)) + return -1; + + for (i = 1; !err && i < THREADS; i++) + err = thread_create(i); + + return err; +} + +static int threads_destroy(void) +{ + struct thread_data *td0 = &threads[0]; + int i, err = 0; + + /* cleanup the main thread */ + munmap(td0->map, page_size); + + go_away = 1; + + for (i = 1; !err && i < THREADS; i++) + err = pthread_join(threads[i].pt, NULL); + + return err; +} + +typedef int (*synth_cb)(struct machine *machine); + +static int synth_all(struct machine *machine) +{ + return perf_event__synthesize_threads(NULL, + perf_event__process, + machine, 0); +} + +static int synth_process(struct machine *machine) +{ + struct thread_map *map; + int err; + + map = thread_map__new_by_pid(getpid()); + + err = perf_event__synthesize_thread_map(NULL, map, + perf_event__process, + machine, 0); + + thread_map__delete(map); + return err; +} + +static int mmap_events(synth_cb synth) +{ + struct machines machines; + struct machine *machine; + int err, i; + + /* + * The threads_create will not return before all threads + * are spawned and all created memory map. + * + * They will loop until threads_destroy is called, so we + * can safely run synthesizing function. + */ + TEST_ASSERT_VAL("failed to create threads", !threads_create()); + + machines__init(&machines); + machine = &machines.host; + + dump_trace = verbose > 1 ? 1 : 0; + + err = synth(machine); + + dump_trace = 0; + + TEST_ASSERT_VAL("failed to destroy threads", !threads_destroy()); + TEST_ASSERT_VAL("failed to synthesize maps", !err); + + /* + * All data is synthesized, try to find map for each + * thread object. + */ + for (i = 0; i < THREADS; i++) { + struct thread_data *td = &threads[i]; + struct addr_location al; + struct thread *thread; + + thread = machine__findnew_thread(machine, getpid(), td->tid); + + pr_debug("looking for map %p\n", td->map); + + thread__find_addr_map(thread, machine, + PERF_RECORD_MISC_USER, MAP__FUNCTION, + (unsigned long) (td->map + 1), &al); + + if (!al.map) { + pr_debug("failed, couldn't find map\n"); + err = -1; + break; + } + + pr_debug("map %p, addr %" PRIx64 "\n", al.map, al.map->start); + } + + machine__delete_threads(machine); + machines__exit(&machines); + return err; +} + +/* + * This test creates 'THREADS' number of threads (including + * main thread) and each thread creates memory map. + * + * When threads are created, we synthesize them with both + * (separate tests): + * perf_event__synthesize_thread_map (process based) + * perf_event__synthesize_threads (global) + * + * We test we can find all memory maps via: + * thread__find_addr_map + * + * by using all thread objects. + */ +int test__mmap_thread_lookup(void) +{ + /* perf_event__synthesize_threads synthesize */ + TEST_ASSERT_VAL("failed with sythesizing all", + !mmap_events(synth_all)); + + /* perf_event__synthesize_thread_map synthesize */ + TEST_ASSERT_VAL("failed with sythesizing process", + !mmap_events(synth_process)); + + return 0; +} diff --git a/tools/perf/tests/open-syscall-all-cpus.c b/tools/perf/tests/open-syscall-all-cpus.c new file mode 100644 index 00000000000..5fecdbd2f5f --- /dev/null +++ b/tools/perf/tests/open-syscall-all-cpus.c @@ -0,0 +1,109 @@ +#include "evsel.h" +#include "tests.h" +#include "thread_map.h" +#include "cpumap.h" +#include "debug.h" + +int test__open_syscall_event_on_all_cpus(void) +{ + int err = -1, fd, cpu; + struct cpu_map *cpus; + struct perf_evsel *evsel; + unsigned int nr_open_calls = 111, i; + cpu_set_t cpu_set; + struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); + + if (threads == NULL) { + pr_debug("thread_map__new\n"); + return -1; + } + + cpus = cpu_map__new(NULL); + if (cpus == NULL) { + pr_debug("cpu_map__new\n"); + goto out_thread_map_delete; + } + + CPU_ZERO(&cpu_set); + + evsel = perf_evsel__newtp("syscalls", "sys_enter_open"); + if (evsel == NULL) { + pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); + goto out_thread_map_delete; + } + + if (perf_evsel__open(evsel, cpus, threads) < 0) { + pr_debug("failed to open counter: %s, " + "tweak /proc/sys/kernel/perf_event_paranoid?\n", + strerror(errno)); + goto out_evsel_delete; + } + + for (cpu = 0; cpu < cpus->nr; ++cpu) { + unsigned int ncalls = nr_open_calls + cpu; + /* + * XXX eventually lift this restriction in a way that + * keeps perf building on older glibc installations + * without CPU_ALLOC. 1024 cpus in 2010 still seems + * a reasonable upper limit tho :-) + */ + if (cpus->map[cpu] >= CPU_SETSIZE) { + pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); + continue; + } + + CPU_SET(cpus->map[cpu], &cpu_set); + if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { + pr_debug("sched_setaffinity() failed on CPU %d: %s ", + cpus->map[cpu], + strerror(errno)); + goto out_close_fd; + } + for (i = 0; i < ncalls; ++i) { + fd = open("/etc/passwd", O_RDONLY); + close(fd); + } + CPU_CLR(cpus->map[cpu], &cpu_set); + } + + /* + * Here we need to explicitely preallocate the counts, as if + * we use the auto allocation it will allocate just for 1 cpu, + * as we start by cpu 0. + */ + if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) { + pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr); + goto out_close_fd; + } + + err = 0; + + for (cpu = 0; cpu < cpus->nr; ++cpu) { + unsigned int expected; + + if (cpus->map[cpu] >= CPU_SETSIZE) + continue; + + if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { + pr_debug("perf_evsel__read_on_cpu\n"); + err = -1; + break; + } + + expected = nr_open_calls + cpu; + if (evsel->counts->cpu[cpu].val != expected) { + pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", + expected, cpus->map[cpu], evsel->counts->cpu[cpu].val); + err = -1; + } + } + + perf_evsel__free_counts(evsel); +out_close_fd: + perf_evsel__close_fd(evsel, 1, threads->nr); +out_evsel_delete: + perf_evsel__delete(evsel); +out_thread_map_delete: + thread_map__delete(threads); + return err; +} diff --git a/tools/perf/tests/open-syscall-tp-fields.c b/tools/perf/tests/open-syscall-tp-fields.c new file mode 100644 index 00000000000..c505ef2af24 --- /dev/null +++ b/tools/perf/tests/open-syscall-tp-fields.c @@ -0,0 +1,117 @@ +#include "perf.h" +#include "evlist.h" +#include "evsel.h" +#include "thread_map.h" +#include "tests.h" + +int test__syscall_open_tp_fields(void) +{ + struct record_opts opts = { + .target = { + .uid = UINT_MAX, + .uses_mmap = true, + }, + .no_buffering = true, + .freq = 1, + .mmap_pages = 256, + .raw_samples = true, + }; + const char *filename = "/etc/passwd"; + int flags = O_RDONLY | O_DIRECTORY; + struct perf_evlist *evlist = perf_evlist__new(); + struct perf_evsel *evsel; + int err = -1, i, nr_events = 0, nr_polls = 0; + + if (evlist == NULL) { + pr_debug("%s: perf_evlist__new\n", __func__); + goto out; + } + + evsel = perf_evsel__newtp("syscalls", "sys_enter_open"); + if (evsel == NULL) { + pr_debug("%s: perf_evsel__newtp\n", __func__); + goto out_delete_evlist; + } + + perf_evlist__add(evlist, evsel); + + err = perf_evlist__create_maps(evlist, &opts.target); + if (err < 0) { + pr_debug("%s: perf_evlist__create_maps\n", __func__); + goto out_delete_evlist; + } + + perf_evsel__config(evsel, &opts); + + evlist->threads->map[0] = getpid(); + + err = perf_evlist__open(evlist); + if (err < 0) { + pr_debug("perf_evlist__open: %s\n", strerror(errno)); + goto out_delete_evlist; + } + + err = perf_evlist__mmap(evlist, UINT_MAX, false); + if (err < 0) { + pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); + goto out_delete_evlist; + } + + perf_evlist__enable(evlist); + + /* + * Generate the event: + */ + open(filename, flags); + + while (1) { + int before = nr_events; + + for (i = 0; i < evlist->nr_mmaps; i++) { + union perf_event *event; + + while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { + const u32 type = event->header.type; + int tp_flags; + struct perf_sample sample; + + ++nr_events; + + if (type != PERF_RECORD_SAMPLE) { + perf_evlist__mmap_consume(evlist, i); + continue; + } + + err = perf_evsel__parse_sample(evsel, event, &sample); + if (err) { + pr_err("Can't parse sample, err = %d\n", err); + goto out_delete_evlist; + } + + tp_flags = perf_evsel__intval(evsel, &sample, "flags"); + + if (flags != tp_flags) { + pr_debug("%s: Expected flags=%#x, got %#x\n", + __func__, flags, tp_flags); + goto out_delete_evlist; + } + + goto out_ok; + } + } + + if (nr_events == before) + poll(evlist->pollfd, evlist->nr_fds, 10); + + if (++nr_polls > 5) { + pr_debug("%s: no events!\n", __func__); + goto out_delete_evlist; + } + } +out_ok: + err = 0; +out_delete_evlist: + perf_evlist__delete(evlist); +out: + return err; +} diff --git a/tools/perf/tests/open-syscall.c b/tools/perf/tests/open-syscall.c new file mode 100644 index 00000000000..c1dc7d25f38 --- /dev/null +++ b/tools/perf/tests/open-syscall.c @@ -0,0 +1,55 @@ +#include "thread_map.h" +#include "evsel.h" +#include "debug.h" +#include "tests.h" + +int test__open_syscall_event(void) +{ + int err = -1, fd; + struct perf_evsel *evsel; + unsigned int nr_open_calls = 111, i; + struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); + + if (threads == NULL) { + pr_debug("thread_map__new\n"); + return -1; + } + + evsel = perf_evsel__newtp("syscalls", "sys_enter_open"); + if (evsel == NULL) { + pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); + goto out_thread_map_delete; + } + + if (perf_evsel__open_per_thread(evsel, threads) < 0) { + pr_debug("failed to open counter: %s, " + "tweak /proc/sys/kernel/perf_event_paranoid?\n", + strerror(errno)); + goto out_evsel_delete; + } + + for (i = 0; i < nr_open_calls; ++i) { + fd = open("/etc/passwd", O_RDONLY); + close(fd); + } + + if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) { + pr_debug("perf_evsel__read_on_cpu\n"); + goto out_close_fd; + } + + if (evsel->counts->cpu[0].val != nr_open_calls) { + pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n", + nr_open_calls, evsel->counts->cpu[0].val); + goto out_close_fd; + } + + err = 0; +out_close_fd: + perf_evsel__close_fd(evsel, 1, threads->nr); +out_evsel_delete: + perf_evsel__delete(evsel); +out_thread_map_delete: + thread_map__delete(threads); + return err; +} diff --git a/tools/perf/util/parse-events-test.c b/tools/perf/tests/parse-events.c index 516ecd9ddd6..deba66955f8 100644 --- a/tools/perf/util/parse-events-test.c +++ b/tools/perf/tests/parse-events.c @@ -2,16 +2,10 @@ #include "parse-events.h" #include "evsel.h" #include "evlist.h" -#include "sysfs.h" -#include "../../../include/linux/hw_breakpoint.h" - -#define TEST_ASSERT_VAL(text, cond) \ -do { \ - if (!(cond)) { \ - pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \ - return -1; \ - } \ -} while (0) +#include <api/fs/fs.h> +#include <api/fs/debugfs.h> +#include "tests.h" +#include <linux/hw_breakpoint.h> #define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \ PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD) @@ -21,6 +15,7 @@ static int test__checkevent_tracepoint(struct perf_evlist *evlist) struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->nr_groups); TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); TEST_ASSERT_VAL("wrong sample_type", PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type); @@ -33,8 +28,9 @@ static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist) struct perf_evsel *evsel; TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1); + TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->nr_groups); - list_for_each_entry(evsel, &evlist->entries, node) { + evlist__for_each(evlist, evsel) { TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); TEST_ASSERT_VAL("wrong sample_type", @@ -205,7 +201,7 @@ test__checkevent_tracepoint_multi_modifier(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1); - list_for_each_entry(evsel, &evlist->entries, node) { + evlist__for_each(evlist, evsel) { TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", @@ -443,12 +439,29 @@ static int test__checkevent_pmu_name(struct perf_evlist *evlist) return 0; } +static int test__checkevent_pmu_events(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel = perf_evlist__first(evlist); + + TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); + TEST_ASSERT_VAL("wrong exclude_user", + !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", + evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong pinned", !evsel->attr.pinned); + + return 0; +} + static int test__checkterms_simple(struct list_head *terms) { - struct parse_events__term *term; + struct parse_events_term *term; /* config=10 */ - term = list_entry(terms->next, struct parse_events__term, list); + term = list_entry(terms->next, struct parse_events_term, list); TEST_ASSERT_VAL("wrong type term", term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG); TEST_ASSERT_VAL("wrong type val", @@ -457,7 +470,7 @@ static int test__checkterms_simple(struct list_head *terms) TEST_ASSERT_VAL("wrong config", !term->config); /* config1 */ - term = list_entry(term->list.next, struct parse_events__term, list); + term = list_entry(term->list.next, struct parse_events_term, list); TEST_ASSERT_VAL("wrong type term", term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG1); TEST_ASSERT_VAL("wrong type val", @@ -466,7 +479,7 @@ static int test__checkterms_simple(struct list_head *terms) TEST_ASSERT_VAL("wrong config", !term->config); /* config2=3 */ - term = list_entry(term->list.next, struct parse_events__term, list); + term = list_entry(term->list.next, struct parse_events_term, list); TEST_ASSERT_VAL("wrong type term", term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG2); TEST_ASSERT_VAL("wrong type val", @@ -475,7 +488,7 @@ static int test__checkterms_simple(struct list_head *terms) TEST_ASSERT_VAL("wrong config", !term->config); /* umask=1*/ - term = list_entry(term->list.next, struct parse_events__term, list); + term = list_entry(term->list.next, struct parse_events_term, list); TEST_ASSERT_VAL("wrong type term", term->type_term == PARSE_EVENTS__TERM_TYPE_USER); TEST_ASSERT_VAL("wrong type val", @@ -491,6 +504,7 @@ static int test__group1(struct perf_evlist *evlist) struct perf_evsel *evsel, *leader; TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); /* instructions:k */ evsel = leader = perf_evlist__first(evlist); @@ -503,7 +517,10 @@ static int test__group1(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); + TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); /* cycles:upp */ evsel = perf_evsel__next(evsel); @@ -518,6 +535,8 @@ static int test__group1(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2); TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); + TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); return 0; } @@ -527,6 +546,7 @@ static int test__group2(struct perf_evlist *evlist) struct perf_evsel *evsel, *leader; TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); /* faults + :ku modifier */ evsel = leader = perf_evlist__first(evlist); @@ -539,7 +559,10 @@ static int test__group2(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); + TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); /* cache-references + :u modifier */ evsel = perf_evsel__next(evsel); @@ -549,10 +572,12 @@ static int test__group2(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); - TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); + TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); /* cycles:k */ evsel = perf_evsel__next(evsel); @@ -565,7 +590,8 @@ static int test__group2(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); + TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); return 0; } @@ -575,6 +601,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused) struct perf_evsel *evsel, *leader; TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->nr_groups); /* group1 syscalls:sys_enter_open:H */ evsel = leader = perf_evlist__first(evlist); @@ -588,9 +615,12 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); TEST_ASSERT_VAL("wrong group name", !strcmp(leader->group_name, "group1")); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); + TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); /* group1 cycles:kppp */ evsel = perf_evsel__next(evsel); @@ -606,6 +636,8 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 3); TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); TEST_ASSERT_VAL("wrong group name", !evsel->group_name); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); + TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); /* group2 cycles + G modifier */ evsel = leader = perf_evsel__next(evsel); @@ -618,9 +650,12 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); TEST_ASSERT_VAL("wrong group name", !strcmp(leader->group_name, "group2")); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); + TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); /* group2 1:3 + G modifier */ evsel = perf_evsel__next(evsel); @@ -633,6 +668,8 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); + TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); /* instructions:u */ evsel = perf_evsel__next(evsel); @@ -645,7 +682,8 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); + TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); return 0; } @@ -655,6 +693,7 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused) struct perf_evsel *evsel, *leader; TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); /* cycles:u + p */ evsel = leader = perf_evlist__first(evlist); @@ -669,7 +708,10 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 1); TEST_ASSERT_VAL("wrong group name", !evsel->group_name); - TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); + TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); /* instructions:kp + p */ evsel = perf_evsel__next(evsel); @@ -684,6 +726,8 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2); TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); + TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); return 0; } @@ -693,6 +737,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused) struct perf_evsel *evsel, *leader; TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->nr_groups); /* cycles + G */ evsel = leader = perf_evlist__first(evlist); @@ -706,7 +751,10 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong group name", !evsel->group_name); - TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); + TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); /* instructions + G */ evsel = perf_evsel__next(evsel); @@ -720,6 +768,8 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); + TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); /* cycles:G */ evsel = leader = perf_evsel__next(evsel); @@ -733,7 +783,10 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong group name", !evsel->group_name); - TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); + TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read); /* instructions:G */ evsel = perf_evsel__next(evsel); @@ -747,6 +800,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); /* cycles */ evsel = perf_evsel__next(evsel); @@ -759,207 +813,648 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); - TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); + + return 0; +} + +static int test__group_gh1(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel, *leader; + + TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); + + /* cycles + :H group modifier */ + evsel = leader = perf_evlist__first(evlist); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong group name", !evsel->group_name); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); + + /* cache-misses:G + :H group modifier */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CACHE_MISSES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); return 0; } -struct test__event_st { +static int test__group_gh2(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel, *leader; + + TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); + + /* cycles + :G group modifier */ + evsel = leader = perf_evlist__first(evlist); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong group name", !evsel->group_name); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); + + /* cache-misses:H + :G group modifier */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CACHE_MISSES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); + + return 0; +} + +static int test__group_gh3(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel, *leader; + + TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); + + /* cycles:G + :u group modifier */ + evsel = leader = perf_evlist__first(evlist); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong group name", !evsel->group_name); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); + + /* cache-misses:H + :u group modifier */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CACHE_MISSES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); + + return 0; +} + +static int test__group_gh4(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel, *leader; + + TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); + TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); + + /* cycles:G + :uG group modifier */ + evsel = leader = perf_evlist__first(evlist); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong group name", !evsel->group_name); + TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); + TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0); + + /* cache-misses:H + :uG group modifier */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CACHE_MISSES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1); + + return 0; +} + +static int test__leader_sample1(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel, *leader; + + TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries); + + /* cycles - sampling group leader */ + evsel = leader = perf_evlist__first(evlist); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong group name", !evsel->group_name); + TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read); + + /* cache-misses - not sampling */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CACHE_MISSES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read); + + /* branch-misses - not sampling */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_BRANCH_MISSES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong group name", !evsel->group_name); + TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read); + + return 0; +} + +static int test__leader_sample2(struct perf_evlist *evlist __maybe_unused) +{ + struct perf_evsel *evsel, *leader; + + TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); + + /* instructions - sampling group leader */ + evsel = leader = perf_evlist__first(evlist); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong group name", !evsel->group_name); + TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read); + + /* branch-misses - not sampling */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_BRANCH_MISSES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong group name", !evsel->group_name); + TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read); + + return 0; +} + +static int test__checkevent_pinned_modifier(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel = perf_evlist__first(evlist); + + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong pinned", evsel->attr.pinned); + + return test__checkevent_symbolic_name(evlist); +} + +static int test__pinned_group(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel, *leader; + + TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries); + + /* cycles - group leader */ + evsel = leader = perf_evlist__first(evlist); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); + TEST_ASSERT_VAL("wrong group name", !evsel->group_name); + TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong pinned", evsel->attr.pinned); + + /* cache-misses - can not be pinned, but will go on with the leader */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CACHE_MISSES == evsel->attr.config); + TEST_ASSERT_VAL("wrong pinned", !evsel->attr.pinned); + + /* branch-misses - ditto */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_BRANCH_MISSES == evsel->attr.config); + TEST_ASSERT_VAL("wrong pinned", !evsel->attr.pinned); + + return 0; +} + +static int count_tracepoints(void) +{ + char events_path[PATH_MAX]; + struct dirent *events_ent; + DIR *events_dir; + int cnt = 0; + + scnprintf(events_path, PATH_MAX, "%s/tracing/events", + debugfs_find_mountpoint()); + + events_dir = opendir(events_path); + + TEST_ASSERT_VAL("Can't open events dir", events_dir); + + while ((events_ent = readdir(events_dir))) { + char sys_path[PATH_MAX]; + struct dirent *sys_ent; + DIR *sys_dir; + + if (!strcmp(events_ent->d_name, ".") + || !strcmp(events_ent->d_name, "..") + || !strcmp(events_ent->d_name, "enable") + || !strcmp(events_ent->d_name, "header_event") + || !strcmp(events_ent->d_name, "header_page")) + continue; + + scnprintf(sys_path, PATH_MAX, "%s/%s", + events_path, events_ent->d_name); + + sys_dir = opendir(sys_path); + TEST_ASSERT_VAL("Can't open sys dir", sys_dir); + + while ((sys_ent = readdir(sys_dir))) { + if (!strcmp(sys_ent->d_name, ".") + || !strcmp(sys_ent->d_name, "..") + || !strcmp(sys_ent->d_name, "enable") + || !strcmp(sys_ent->d_name, "filter")) + continue; + + cnt++; + } + + closedir(sys_dir); + } + + closedir(events_dir); + return cnt; +} + +static int test__all_tracepoints(struct perf_evlist *evlist) +{ + TEST_ASSERT_VAL("wrong events count", + count_tracepoints() == evlist->nr_entries); + + return test__checkevent_tracepoint_multi(evlist); +} + +struct evlist_test { const char *name; __u32 type; + const int id; int (*check)(struct perf_evlist *evlist); }; -static struct test__event_st test__events[] = { - [0] = { +static struct evlist_test test__events[] = { + { .name = "syscalls:sys_enter_open", .check = test__checkevent_tracepoint, + .id = 0, }, - [1] = { + { .name = "syscalls:*", .check = test__checkevent_tracepoint_multi, + .id = 1, }, - [2] = { + { .name = "r1a", .check = test__checkevent_raw, + .id = 2, }, - [3] = { + { .name = "1:1", .check = test__checkevent_numeric, + .id = 3, }, - [4] = { + { .name = "instructions", .check = test__checkevent_symbolic_name, + .id = 4, }, - [5] = { + { .name = "cycles/period=100000,config2/", .check = test__checkevent_symbolic_name_config, + .id = 5, }, - [6] = { + { .name = "faults", .check = test__checkevent_symbolic_alias, + .id = 6, }, - [7] = { + { .name = "L1-dcache-load-miss", .check = test__checkevent_genhw, + .id = 7, }, - [8] = { + { .name = "mem:0", .check = test__checkevent_breakpoint, + .id = 8, }, - [9] = { + { .name = "mem:0:x", .check = test__checkevent_breakpoint_x, + .id = 9, }, - [10] = { + { .name = "mem:0:r", .check = test__checkevent_breakpoint_r, + .id = 10, }, - [11] = { + { .name = "mem:0:w", .check = test__checkevent_breakpoint_w, + .id = 11, }, - [12] = { + { .name = "syscalls:sys_enter_open:k", .check = test__checkevent_tracepoint_modifier, + .id = 12, }, - [13] = { + { .name = "syscalls:*:u", .check = test__checkevent_tracepoint_multi_modifier, + .id = 13, }, - [14] = { + { .name = "r1a:kp", .check = test__checkevent_raw_modifier, + .id = 14, }, - [15] = { + { .name = "1:1:hp", .check = test__checkevent_numeric_modifier, + .id = 15, }, - [16] = { + { .name = "instructions:h", .check = test__checkevent_symbolic_name_modifier, + .id = 16, }, - [17] = { + { .name = "faults:u", .check = test__checkevent_symbolic_alias_modifier, + .id = 17, }, - [18] = { + { .name = "L1-dcache-load-miss:kp", .check = test__checkevent_genhw_modifier, + .id = 18, }, - [19] = { + { .name = "mem:0:u", .check = test__checkevent_breakpoint_modifier, + .id = 19, }, - [20] = { + { .name = "mem:0:x:k", .check = test__checkevent_breakpoint_x_modifier, + .id = 20, }, - [21] = { + { .name = "mem:0:r:hp", .check = test__checkevent_breakpoint_r_modifier, + .id = 21, }, - [22] = { + { .name = "mem:0:w:up", .check = test__checkevent_breakpoint_w_modifier, + .id = 22, }, - [23] = { + { .name = "r1,syscalls:sys_enter_open:k,1:1:hp", .check = test__checkevent_list, + .id = 23, }, - [24] = { + { .name = "instructions:G", .check = test__checkevent_exclude_host_modifier, + .id = 24, }, - [25] = { + { .name = "instructions:H", .check = test__checkevent_exclude_guest_modifier, + .id = 25, }, - [26] = { + { .name = "mem:0:rw", .check = test__checkevent_breakpoint_rw, + .id = 26, }, - [27] = { + { .name = "mem:0:rw:kp", .check = test__checkevent_breakpoint_rw_modifier, + .id = 27, }, - [28] = { + { .name = "{instructions:k,cycles:upp}", .check = test__group1, + .id = 28, }, - [29] = { + { .name = "{faults:k,cache-references}:u,cycles:k", .check = test__group2, + .id = 29, }, - [30] = { + { .name = "group1{syscalls:sys_enter_open:H,cycles:kppp},group2{cycles,1:3}:G,instructions:u", .check = test__group3, + .id = 30, }, - [31] = { + { .name = "{cycles:u,instructions:kp}:p", .check = test__group4, + .id = 31, }, - [32] = { + { .name = "{cycles,instructions}:G,{cycles:G,instructions:G},cycles", .check = test__group5, + .id = 32, + }, + { + .name = "*:*", + .check = test__all_tracepoints, + .id = 33, + }, + { + .name = "{cycles,cache-misses:G}:H", + .check = test__group_gh1, + .id = 34, }, + { + .name = "{cycles,cache-misses:H}:G", + .check = test__group_gh2, + .id = 35, + }, + { + .name = "{cycles:G,cache-misses:H}:u", + .check = test__group_gh3, + .id = 36, + }, + { + .name = "{cycles:G,cache-misses:H}:uG", + .check = test__group_gh4, + .id = 37, + }, + { + .name = "{cycles,cache-misses,branch-misses}:S", + .check = test__leader_sample1, + .id = 38, + }, + { + .name = "{instructions,branch-misses}:Su", + .check = test__leader_sample2, + .id = 39, + }, + { + .name = "instructions:uDp", + .check = test__checkevent_pinned_modifier, + .id = 40, + }, + { + .name = "{cycles,cache-misses,branch-misses}:D", + .check = test__pinned_group, + .id = 41, + }, +#if defined(__s390x__) + { + .name = "kvm-s390:kvm_s390_create_vm", + .check = test__checkevent_tracepoint, + .id = 100, + }, +#endif }; -static struct test__event_st test__events_pmu[] = { - [0] = { +static struct evlist_test test__events_pmu[] = { + { .name = "cpu/config=10,config1,config2=3,period=1000/u", .check = test__checkevent_pmu, + .id = 0, }, - [1] = { + { .name = "cpu/config=1,name=krava/u,cpu/config=2/u", .check = test__checkevent_pmu_name, + .id = 1, }, }; -struct test__term { +struct terms_test { const char *str; __u32 type; int (*check)(struct list_head *terms); }; -static struct test__term test__terms[] = { +static struct terms_test test__terms[] = { [0] = { .str = "config=10,config1,config2=3,umask=1", .check = test__checkterms_simple, }, }; -static int test_event(struct test__event_st *e) +static int test_event(struct evlist_test *e) { struct perf_evlist *evlist; int ret; - evlist = perf_evlist__new(NULL, NULL); + evlist = perf_evlist__new(); if (evlist == NULL) return -ENOMEM; - ret = parse_events(evlist, e->name, 0); + ret = parse_events(evlist, e->name); if (ret) { pr_debug("failed to parse event '%s', err %d\n", e->name, ret); - return ret; + } else { + ret = e->check(evlist); } - - ret = e->check(evlist); + perf_evlist__delete(evlist); return ret; } -static int test_events(struct test__event_st *events, unsigned cnt) +static int test_events(struct evlist_test *events, unsigned cnt) { int ret1, ret2 = 0; unsigned i; for (i = 0; i < cnt; i++) { - struct test__event_st *e = &events[i]; + struct evlist_test *e = &events[i]; - pr_debug("running test %d '%s'\n", i, e->name); + pr_debug("running test %d '%s'\n", e->id, e->name); ret1 = test_event(e); if (ret1) ret2 = ret1; @@ -968,37 +1463,33 @@ static int test_events(struct test__event_st *events, unsigned cnt) return ret2; } -static int test_term(struct test__term *t) +static int test_term(struct terms_test *t) { - struct list_head *terms; + struct list_head terms; int ret; - terms = malloc(sizeof(*terms)); - if (!terms) - return -ENOMEM; + INIT_LIST_HEAD(&terms); - INIT_LIST_HEAD(terms); - - ret = parse_events_terms(terms, t->str); + ret = parse_events_terms(&terms, t->str); if (ret) { pr_debug("failed to parse terms '%s', err %d\n", t->str , ret); return ret; } - ret = t->check(terms); - parse_events__free_terms(terms); + ret = t->check(&terms); + parse_events__free_terms(&terms); return ret; } -static int test_terms(struct test__term *terms, unsigned cnt) +static int test_terms(struct terms_test *terms, unsigned cnt) { int ret = 0; unsigned i; for (i = 0; i < cnt; i++) { - struct test__term *t = &terms[i]; + struct terms_test *t = &terms[i]; pr_debug("running test %d '%s'\n", i, t->str); ret = test_term(t); @@ -1016,7 +1507,7 @@ static int test_pmu(void) int ret; snprintf(path, PATH_MAX, "%s/bus/event_source/devices/cpu/format/", - sysfs_find_mountpoint()); + sysfs__mountpoint()); ret = stat(path, &st); if (ret) @@ -1024,7 +1515,52 @@ static int test_pmu(void) return !ret; } -int parse_events__test(void) +static int test_pmu_events(void) +{ + struct stat st; + char path[PATH_MAX]; + struct dirent *ent; + DIR *dir; + int ret; + + snprintf(path, PATH_MAX, "%s/bus/event_source/devices/cpu/events/", + sysfs__mountpoint()); + + ret = stat(path, &st); + if (ret) { + pr_debug("omitting PMU cpu events tests\n"); + return 0; + } + + dir = opendir(path); + if (!dir) { + pr_debug("can't open pmu event dir"); + return -1; + } + + while (!ret && (ent = readdir(dir))) { +#define MAX_NAME 100 + struct evlist_test e; + char name[MAX_NAME]; + + if (!strcmp(ent->d_name, ".") || + !strcmp(ent->d_name, "..")) + continue; + + snprintf(name, MAX_NAME, "cpu/event=%s/u", ent->d_name); + + e.name = name; + e.check = test__checkevent_pmu_events; + + ret = test_event(&e); +#undef MAX_NAME + } + + closedir(dir); + return ret; +} + +int test__parse_events(void) { int ret1, ret2 = 0; @@ -1040,6 +1576,12 @@ do { \ if (test_pmu()) TEST_EVENTS(test__events_pmu); + if (test_pmu()) { + int ret = test_pmu_events(); + if (ret) + return ret; + } + ret1 = test_terms(test__terms, ARRAY_SIZE(test__terms)); if (!ret2) ret2 = ret1; diff --git a/tools/perf/tests/parse-no-sample-id-all.c b/tools/perf/tests/parse-no-sample-id-all.c new file mode 100644 index 00000000000..905019f9b74 --- /dev/null +++ b/tools/perf/tests/parse-no-sample-id-all.c @@ -0,0 +1,108 @@ +#include <linux/types.h> +#include <stddef.h> + +#include "tests.h" + +#include "event.h" +#include "evlist.h" +#include "header.h" +#include "util.h" + +static int process_event(struct perf_evlist **pevlist, union perf_event *event) +{ + struct perf_sample sample; + + if (event->header.type == PERF_RECORD_HEADER_ATTR) { + if (perf_event__process_attr(NULL, event, pevlist)) { + pr_debug("perf_event__process_attr failed\n"); + return -1; + } + return 0; + } + + if (event->header.type >= PERF_RECORD_USER_TYPE_START) + return -1; + + if (!*pevlist) + return -1; + + if (perf_evlist__parse_sample(*pevlist, event, &sample)) { + pr_debug("perf_evlist__parse_sample failed\n"); + return -1; + } + + return 0; +} + +static int process_events(union perf_event **events, size_t count) +{ + struct perf_evlist *evlist = NULL; + int err = 0; + size_t i; + + for (i = 0; i < count && !err; i++) + err = process_event(&evlist, events[i]); + + if (evlist) + perf_evlist__delete(evlist); + + return err; +} + +struct test_attr_event { + struct attr_event attr; + u64 id; +}; + +/** + * test__parse_no_sample_id_all - test parsing with no sample_id_all bit set. + * + * This function tests parsing data produced on kernel's that do not support the + * sample_id_all bit. Without the sample_id_all bit, non-sample events (such as + * mmap events) do not have an id sample appended, and consequently logic + * designed to determine the id will not work. That case happens when there is + * more than one selected event, so this test processes three events: 2 + * attributes representing the selected events and one mmap event. + * + * Return: %0 on success, %-1 if the test fails. + */ +int test__parse_no_sample_id_all(void) +{ + int err; + + struct test_attr_event event1 = { + .attr = { + .header = { + .type = PERF_RECORD_HEADER_ATTR, + .size = sizeof(struct test_attr_event), + }, + }, + .id = 1, + }; + struct test_attr_event event2 = { + .attr = { + .header = { + .type = PERF_RECORD_HEADER_ATTR, + .size = sizeof(struct test_attr_event), + }, + }, + .id = 2, + }; + struct mmap_event event3 = { + .header = { + .type = PERF_RECORD_MMAP, + .size = sizeof(struct mmap_event), + }, + }; + union perf_event *events[] = { + (union perf_event *)&event1, + (union perf_event *)&event2, + (union perf_event *)&event3, + }; + + err = process_events(events, ARRAY_SIZE(events)); + if (err) + return -1; + + return 0; +} diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c new file mode 100644 index 00000000000..aca1a83dd13 --- /dev/null +++ b/tools/perf/tests/perf-record.c @@ -0,0 +1,309 @@ +#include <sched.h> +#include "evlist.h" +#include "evsel.h" +#include "perf.h" +#include "debug.h" +#include "tests.h" + +static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp) +{ + int i, cpu = -1, nrcpus = 1024; +realloc: + CPU_ZERO(maskp); + + if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) { + if (errno == EINVAL && nrcpus < (1024 << 8)) { + nrcpus = nrcpus << 2; + goto realloc; + } + perror("sched_getaffinity"); + return -1; + } + + for (i = 0; i < nrcpus; i++) { + if (CPU_ISSET(i, maskp)) { + if (cpu == -1) + cpu = i; + else + CPU_CLR(i, maskp); + } + } + + return cpu; +} + +int test__PERF_RECORD(void) +{ + struct record_opts opts = { + .target = { + .uid = UINT_MAX, + .uses_mmap = true, + }, + .no_buffering = true, + .freq = 10, + .mmap_pages = 256, + }; + cpu_set_t cpu_mask; + size_t cpu_mask_size = sizeof(cpu_mask); + struct perf_evlist *evlist = perf_evlist__new_default(); + struct perf_evsel *evsel; + struct perf_sample sample; + const char *cmd = "sleep"; + const char *argv[] = { cmd, "1", NULL, }; + char *bname, *mmap_filename; + u64 prev_time = 0; + bool found_cmd_mmap = false, + found_libc_mmap = false, + found_vdso_mmap = false, + found_ld_mmap = false; + int err = -1, errs = 0, i, wakeups = 0; + u32 cpu; + int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, }; + + if (evlist == NULL || argv == NULL) { + pr_debug("Not enough memory to create evlist\n"); + goto out; + } + + /* + * Create maps of threads and cpus to monitor. In this case + * we start with all threads and cpus (-1, -1) but then in + * perf_evlist__prepare_workload we'll fill in the only thread + * we're monitoring, the one forked there. + */ + err = perf_evlist__create_maps(evlist, &opts.target); + if (err < 0) { + pr_debug("Not enough memory to create thread/cpu maps\n"); + goto out_delete_evlist; + } + + /* + * Prepare the workload in argv[] to run, it'll fork it, and then wait + * for perf_evlist__start_workload() to exec it. This is done this way + * so that we have time to open the evlist (calling sys_perf_event_open + * on all the fds) and then mmap them. + */ + err = perf_evlist__prepare_workload(evlist, &opts.target, argv, false, NULL); + if (err < 0) { + pr_debug("Couldn't run the workload!\n"); + goto out_delete_evlist; + } + + /* + * Config the evsels, setting attr->comm on the first one, etc. + */ + evsel = perf_evlist__first(evlist); + perf_evsel__set_sample_bit(evsel, CPU); + perf_evsel__set_sample_bit(evsel, TID); + perf_evsel__set_sample_bit(evsel, TIME); + perf_evlist__config(evlist, &opts); + + err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); + if (err < 0) { + pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno)); + goto out_delete_evlist; + } + + cpu = err; + + /* + * So that we can check perf_sample.cpu on all the samples. + */ + if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { + pr_debug("sched_setaffinity: %s\n", strerror(errno)); + goto out_delete_evlist; + } + + /* + * Call sys_perf_event_open on all the fds on all the evsels, + * grouping them if asked to. + */ + err = perf_evlist__open(evlist); + if (err < 0) { + pr_debug("perf_evlist__open: %s\n", strerror(errno)); + goto out_delete_evlist; + } + + /* + * mmap the first fd on a given CPU and ask for events for the other + * fds in the same CPU to be injected in the same mmap ring buffer + * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)). + */ + err = perf_evlist__mmap(evlist, opts.mmap_pages, false); + if (err < 0) { + pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); + goto out_delete_evlist; + } + + /* + * Now that all is properly set up, enable the events, they will + * count just on workload.pid, which will start... + */ + perf_evlist__enable(evlist); + + /* + * Now! + */ + perf_evlist__start_workload(evlist); + + while (1) { + int before = total_events; + + for (i = 0; i < evlist->nr_mmaps; i++) { + union perf_event *event; + + while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { + const u32 type = event->header.type; + const char *name = perf_event__name(type); + + ++total_events; + if (type < PERF_RECORD_MAX) + nr_events[type]++; + + err = perf_evlist__parse_sample(evlist, event, &sample); + if (err < 0) { + if (verbose) + perf_event__fprintf(event, stderr); + pr_debug("Couldn't parse sample\n"); + goto out_delete_evlist; + } + + if (verbose) { + pr_info("%" PRIu64" %d ", sample.time, sample.cpu); + perf_event__fprintf(event, stderr); + } + + if (prev_time > sample.time) { + pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n", + name, prev_time, sample.time); + ++errs; + } + + prev_time = sample.time; + + if (sample.cpu != cpu) { + pr_debug("%s with unexpected cpu, expected %d, got %d\n", + name, cpu, sample.cpu); + ++errs; + } + + if ((pid_t)sample.pid != evlist->workload.pid) { + pr_debug("%s with unexpected pid, expected %d, got %d\n", + name, evlist->workload.pid, sample.pid); + ++errs; + } + + if ((pid_t)sample.tid != evlist->workload.pid) { + pr_debug("%s with unexpected tid, expected %d, got %d\n", + name, evlist->workload.pid, sample.tid); + ++errs; + } + + if ((type == PERF_RECORD_COMM || + type == PERF_RECORD_MMAP || + type == PERF_RECORD_MMAP2 || + type == PERF_RECORD_FORK || + type == PERF_RECORD_EXIT) && + (pid_t)event->comm.pid != evlist->workload.pid) { + pr_debug("%s with unexpected pid/tid\n", name); + ++errs; + } + + if ((type == PERF_RECORD_COMM || + type == PERF_RECORD_MMAP || + type == PERF_RECORD_MMAP2) && + event->comm.pid != event->comm.tid) { + pr_debug("%s with different pid/tid!\n", name); + ++errs; + } + + switch (type) { + case PERF_RECORD_COMM: + if (strcmp(event->comm.comm, cmd)) { + pr_debug("%s with unexpected comm!\n", name); + ++errs; + } + break; + case PERF_RECORD_EXIT: + goto found_exit; + case PERF_RECORD_MMAP: + mmap_filename = event->mmap.filename; + goto check_bname; + case PERF_RECORD_MMAP2: + mmap_filename = event->mmap2.filename; + check_bname: + bname = strrchr(mmap_filename, '/'); + if (bname != NULL) { + if (!found_cmd_mmap) + found_cmd_mmap = !strcmp(bname + 1, cmd); + if (!found_libc_mmap) + found_libc_mmap = !strncmp(bname + 1, "libc", 4); + if (!found_ld_mmap) + found_ld_mmap = !strncmp(bname + 1, "ld", 2); + } else if (!found_vdso_mmap) + found_vdso_mmap = !strcmp(mmap_filename, "[vdso]"); + break; + + case PERF_RECORD_SAMPLE: + /* Just ignore samples for now */ + break; + default: + pr_debug("Unexpected perf_event->header.type %d!\n", + type); + ++errs; + } + + perf_evlist__mmap_consume(evlist, i); + } + } + + /* + * We don't use poll here because at least at 3.1 times the + * PERF_RECORD_{!SAMPLE} events don't honour + * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does. + */ + if (total_events == before && false) + poll(evlist->pollfd, evlist->nr_fds, -1); + + sleep(1); + if (++wakeups > 5) { + pr_debug("No PERF_RECORD_EXIT event!\n"); + break; + } + } + +found_exit: + if (nr_events[PERF_RECORD_COMM] > 1) { + pr_debug("Excessive number of PERF_RECORD_COMM events!\n"); + ++errs; + } + + if (nr_events[PERF_RECORD_COMM] == 0) { + pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd); + ++errs; + } + + if (!found_cmd_mmap) { + pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd); + ++errs; + } + + if (!found_libc_mmap) { + pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc"); + ++errs; + } + + if (!found_ld_mmap) { + pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld"); + ++errs; + } + + if (!found_vdso_mmap) { + pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]"); + ++errs; + } +out_delete_evlist: + perf_evlist__delete(evlist); +out: + return (err < 0 || errs > 0) ? -1 : 0; +} diff --git a/tools/perf/tests/perf-targz-src-pkg b/tools/perf/tests/perf-targz-src-pkg new file mode 100755 index 00000000000..238aa3927c7 --- /dev/null +++ b/tools/perf/tests/perf-targz-src-pkg @@ -0,0 +1,21 @@ +#!/bin/sh +# Test one of the main kernel Makefile targets to generate a perf sources tarball +# suitable for build outside the full kernel sources. +# +# This is to test that the tools/perf/MANIFEST file lists all the files needed to +# be in such tarball, which sometimes gets broken when we move files around, +# like when we made some files that were in tools/perf/ available to other tools/ +# codebases by moving it to tools/include/, etc. + +PERF=$1 +cd ${PERF}/../.. +make perf-targz-src-pkg > /dev/null +TARBALL=$(ls -rt perf-*.tar.gz) +TMP_DEST=$(mktemp -d) +tar xf ${TARBALL} -C $TMP_DEST +rm -f ${TARBALL} +cd - > /dev/null +make -C $TMP_DEST/perf*/tools/perf > /dev/null 2>&1 +RC=$? +rm -rf ${TMP_DEST} +exit $RC diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c new file mode 100644 index 00000000000..3b7cd4d32dc --- /dev/null +++ b/tools/perf/tests/perf-time-to-tsc.c @@ -0,0 +1,172 @@ +#include <stdio.h> +#include <unistd.h> +#include <linux/types.h> +#include <sys/prctl.h> + +#include "parse-events.h" +#include "evlist.h" +#include "evsel.h" +#include "thread_map.h" +#include "cpumap.h" +#include "tests.h" + +#include "../arch/x86/util/tsc.h" + +#define CHECK__(x) { \ + while ((x) < 0) { \ + pr_debug(#x " failed!\n"); \ + goto out_err; \ + } \ +} + +#define CHECK_NOT_NULL__(x) { \ + while ((x) == NULL) { \ + pr_debug(#x " failed!\n"); \ + goto out_err; \ + } \ +} + +static u64 rdtsc(void) +{ + unsigned int low, high; + + asm volatile("rdtsc" : "=a" (low), "=d" (high)); + + return low | ((u64)high) << 32; +} + +/** + * test__perf_time_to_tsc - test converting perf time to TSC. + * + * This function implements a test that checks that the conversion of perf time + * to and from TSC is consistent with the order of events. If the test passes + * %0 is returned, otherwise %-1 is returned. If TSC conversion is not + * supported then then the test passes but " (not supported)" is printed. + */ +int test__perf_time_to_tsc(void) +{ + struct record_opts opts = { + .mmap_pages = UINT_MAX, + .user_freq = UINT_MAX, + .user_interval = ULLONG_MAX, + .freq = 4000, + .target = { + .uses_mmap = true, + }, + .sample_time = true, + }; + struct thread_map *threads = NULL; + struct cpu_map *cpus = NULL; + struct perf_evlist *evlist = NULL; + struct perf_evsel *evsel = NULL; + int err = -1, ret, i; + const char *comm1, *comm2; + struct perf_tsc_conversion tc; + struct perf_event_mmap_page *pc; + union perf_event *event; + u64 test_tsc, comm1_tsc, comm2_tsc; + u64 test_time, comm1_time = 0, comm2_time = 0; + + threads = thread_map__new(-1, getpid(), UINT_MAX); + CHECK_NOT_NULL__(threads); + + cpus = cpu_map__new(NULL); + CHECK_NOT_NULL__(cpus); + + evlist = perf_evlist__new(); + CHECK_NOT_NULL__(evlist); + + perf_evlist__set_maps(evlist, cpus, threads); + + CHECK__(parse_events(evlist, "cycles:u")); + + perf_evlist__config(evlist, &opts); + + evsel = perf_evlist__first(evlist); + + evsel->attr.comm = 1; + evsel->attr.disabled = 1; + evsel->attr.enable_on_exec = 0; + + CHECK__(perf_evlist__open(evlist)); + + CHECK__(perf_evlist__mmap(evlist, UINT_MAX, false)); + + pc = evlist->mmap[0].base; + ret = perf_read_tsc_conversion(pc, &tc); + if (ret) { + if (ret == -EOPNOTSUPP) { + fprintf(stderr, " (not supported)"); + return 0; + } + goto out_err; + } + + perf_evlist__enable(evlist); + + comm1 = "Test COMM 1"; + CHECK__(prctl(PR_SET_NAME, (unsigned long)comm1, 0, 0, 0)); + + test_tsc = rdtsc(); + + comm2 = "Test COMM 2"; + CHECK__(prctl(PR_SET_NAME, (unsigned long)comm2, 0, 0, 0)); + + perf_evlist__disable(evlist); + + for (i = 0; i < evlist->nr_mmaps; i++) { + while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { + struct perf_sample sample; + + if (event->header.type != PERF_RECORD_COMM || + (pid_t)event->comm.pid != getpid() || + (pid_t)event->comm.tid != getpid()) + goto next_event; + + if (strcmp(event->comm.comm, comm1) == 0) { + CHECK__(perf_evsel__parse_sample(evsel, event, + &sample)); + comm1_time = sample.time; + } + if (strcmp(event->comm.comm, comm2) == 0) { + CHECK__(perf_evsel__parse_sample(evsel, event, + &sample)); + comm2_time = sample.time; + } +next_event: + perf_evlist__mmap_consume(evlist, i); + } + } + + if (!comm1_time || !comm2_time) + goto out_err; + + test_time = tsc_to_perf_time(test_tsc, &tc); + comm1_tsc = perf_time_to_tsc(comm1_time, &tc); + comm2_tsc = perf_time_to_tsc(comm2_time, &tc); + + pr_debug("1st event perf time %"PRIu64" tsc %"PRIu64"\n", + comm1_time, comm1_tsc); + pr_debug("rdtsc time %"PRIu64" tsc %"PRIu64"\n", + test_time, test_tsc); + pr_debug("2nd event perf time %"PRIu64" tsc %"PRIu64"\n", + comm2_time, comm2_tsc); + + if (test_time <= comm1_time || + test_time >= comm2_time) + goto out_err; + + if (test_tsc <= comm1_tsc || + test_tsc >= comm2_tsc) + goto out_err; + + err = 0; + +out_err: + if (evlist) { + perf_evlist__disable(evlist); + perf_evlist__delete(evlist); + } + + return err; +} diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c new file mode 100644 index 00000000000..12b322fa347 --- /dev/null +++ b/tools/perf/tests/pmu.c @@ -0,0 +1,173 @@ +#include "parse-events.h" +#include "pmu.h" +#include "util.h" +#include "tests.h" + +/* Simulated format definitions. */ +static struct test_format { + const char *name; + const char *value; +} test_formats[] = { + { "krava01", "config:0-1,62-63\n", }, + { "krava02", "config:10-17\n", }, + { "krava03", "config:5\n", }, + { "krava11", "config1:0,2,4,6,8,20-28\n", }, + { "krava12", "config1:63\n", }, + { "krava13", "config1:45-47\n", }, + { "krava21", "config2:0-3,10-13,20-23,30-33,40-43,50-53,60-63\n", }, + { "krava22", "config2:8,18,48,58\n", }, + { "krava23", "config2:28-29,38\n", }, +}; + +/* Simulated users input. */ +static struct parse_events_term test_terms[] = { + { + .config = (char *) "krava01", + .val.num = 15, + .type_val = PARSE_EVENTS__TERM_TYPE_NUM, + .type_term = PARSE_EVENTS__TERM_TYPE_USER, + }, + { + .config = (char *) "krava02", + .val.num = 170, + .type_val = PARSE_EVENTS__TERM_TYPE_NUM, + .type_term = PARSE_EVENTS__TERM_TYPE_USER, + }, + { + .config = (char *) "krava03", + .val.num = 1, + .type_val = PARSE_EVENTS__TERM_TYPE_NUM, + .type_term = PARSE_EVENTS__TERM_TYPE_USER, + }, + { + .config = (char *) "krava11", + .val.num = 27, + .type_val = PARSE_EVENTS__TERM_TYPE_NUM, + .type_term = PARSE_EVENTS__TERM_TYPE_USER, + }, + { + .config = (char *) "krava12", + .val.num = 1, + .type_val = PARSE_EVENTS__TERM_TYPE_NUM, + .type_term = PARSE_EVENTS__TERM_TYPE_USER, + }, + { + .config = (char *) "krava13", + .val.num = 2, + .type_val = PARSE_EVENTS__TERM_TYPE_NUM, + .type_term = PARSE_EVENTS__TERM_TYPE_USER, + }, + { + .config = (char *) "krava21", + .val.num = 119, + .type_val = PARSE_EVENTS__TERM_TYPE_NUM, + .type_term = PARSE_EVENTS__TERM_TYPE_USER, + }, + { + .config = (char *) "krava22", + .val.num = 11, + .type_val = PARSE_EVENTS__TERM_TYPE_NUM, + .type_term = PARSE_EVENTS__TERM_TYPE_USER, + }, + { + .config = (char *) "krava23", + .val.num = 2, + .type_val = PARSE_EVENTS__TERM_TYPE_NUM, + .type_term = PARSE_EVENTS__TERM_TYPE_USER, + }, +}; + +/* + * Prepare format directory data, exported by kernel + * at /sys/bus/event_source/devices/<dev>/format. + */ +static char *test_format_dir_get(void) +{ + static char dir[PATH_MAX]; + unsigned int i; + + snprintf(dir, PATH_MAX, "/tmp/perf-pmu-test-format-XXXXXX"); + if (!mkdtemp(dir)) + return NULL; + + for (i = 0; i < ARRAY_SIZE(test_formats); i++) { + static char name[PATH_MAX]; + struct test_format *format = &test_formats[i]; + FILE *file; + + snprintf(name, PATH_MAX, "%s/%s", dir, format->name); + + file = fopen(name, "w"); + if (!file) + return NULL; + + if (1 != fwrite(format->value, strlen(format->value), 1, file)) + break; + + fclose(file); + } + + return dir; +} + +/* Cleanup format directory. */ +static int test_format_dir_put(char *dir) +{ + char buf[PATH_MAX]; + snprintf(buf, PATH_MAX, "rm -f %s/*\n", dir); + if (system(buf)) + return -1; + + snprintf(buf, PATH_MAX, "rmdir %s\n", dir); + return system(buf); +} + +static struct list_head *test_terms_list(void) +{ + static LIST_HEAD(terms); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(test_terms); i++) + list_add_tail(&test_terms[i].list, &terms); + + return &terms; +} + +int test__pmu(void) +{ + char *format = test_format_dir_get(); + LIST_HEAD(formats); + struct list_head *terms = test_terms_list(); + int ret; + + if (!format) + return -EINVAL; + + do { + struct perf_event_attr attr; + + memset(&attr, 0, sizeof(attr)); + + ret = perf_pmu__format_parse(format, &formats); + if (ret) + break; + + ret = perf_pmu__config_terms(&formats, &attr, terms); + if (ret) + break; + + ret = -EINVAL; + + if (attr.config != 0xc00000000002a823) + break; + if (attr.config1 != 0x8000400000000145) + break; + if (attr.config2 != 0x0400000020041d07) + break; + + ret = 0; + } while (0); + + test_format_dir_put(format); + return ret; +} diff --git a/tools/perf/tests/python-use.c b/tools/perf/tests/python-use.c new file mode 100644 index 00000000000..7760277c6de --- /dev/null +++ b/tools/perf/tests/python-use.c @@ -0,0 +1,23 @@ +/* + * Just test if we can load the python binding. + */ + +#include <stdio.h> +#include <stdlib.h> +#include "tests.h" + +extern int verbose; + +int test__python_use(void) +{ + char *cmd; + int ret; + + if (asprintf(&cmd, "echo \"import sys ; sys.path.append('%s'); import perf\" | %s %s", + PYTHONPATH, PYTHON, verbose ? "" : "2> /dev/null") < 0) + return -1; + + ret = system(cmd) ? -1 : 0; + free(cmd); + return ret; +} diff --git a/tools/perf/tests/rdpmc.c b/tools/perf/tests/rdpmc.c new file mode 100644 index 00000000000..e59143fd9e7 --- /dev/null +++ b/tools/perf/tests/rdpmc.c @@ -0,0 +1,173 @@ +#include <unistd.h> +#include <stdlib.h> +#include <signal.h> +#include <sys/mman.h> +#include <linux/types.h> +#include "perf.h" +#include "debug.h" +#include "tests.h" + +#if defined(__x86_64__) || defined(__i386__) + +static u64 rdpmc(unsigned int counter) +{ + unsigned int low, high; + + asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter)); + + return low | ((u64)high) << 32; +} + +static u64 rdtsc(void) +{ + unsigned int low, high; + + asm volatile("rdtsc" : "=a" (low), "=d" (high)); + + return low | ((u64)high) << 32; +} + +static u64 mmap_read_self(void *addr) +{ + struct perf_event_mmap_page *pc = addr; + u32 seq, idx, time_mult = 0, time_shift = 0; + u64 count, cyc = 0, time_offset = 0, enabled, running, delta; + + do { + seq = pc->lock; + barrier(); + + enabled = pc->time_enabled; + running = pc->time_running; + + if (enabled != running) { + cyc = rdtsc(); + time_mult = pc->time_mult; + time_shift = pc->time_shift; + time_offset = pc->time_offset; + } + + idx = pc->index; + count = pc->offset; + if (idx) + count += rdpmc(idx - 1); + + barrier(); + } while (pc->lock != seq); + + if (enabled != running) { + u64 quot, rem; + + quot = (cyc >> time_shift); + rem = cyc & ((1 << time_shift) - 1); + delta = time_offset + quot * time_mult + + ((rem * time_mult) >> time_shift); + + enabled += delta; + if (idx) + running += delta; + + quot = count / running; + rem = count % running; + count = quot * enabled + (rem * enabled) / running; + } + + return count; +} + +/* + * If the RDPMC instruction faults then signal this back to the test parent task: + */ +static void segfault_handler(int sig __maybe_unused, + siginfo_t *info __maybe_unused, + void *uc __maybe_unused) +{ + exit(-1); +} + +static int __test__rdpmc(void) +{ + volatile int tmp = 0; + u64 i, loops = 1000; + int n; + int fd; + void *addr; + struct perf_event_attr attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_INSTRUCTIONS, + .exclude_kernel = 1, + }; + u64 delta_sum = 0; + struct sigaction sa; + + sigfillset(&sa.sa_mask); + sa.sa_sigaction = segfault_handler; + sigaction(SIGSEGV, &sa, NULL); + + fd = sys_perf_event_open(&attr, 0, -1, -1, 0); + if (fd < 0) { + pr_err("Error: sys_perf_event_open() syscall returned " + "with %d (%s)\n", fd, strerror(errno)); + return -1; + } + + addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0); + if (addr == (void *)(-1)) { + pr_err("Error: mmap() syscall returned with (%s)\n", + strerror(errno)); + goto out_close; + } + + for (n = 0; n < 6; n++) { + u64 stamp, now, delta; + + stamp = mmap_read_self(addr); + + for (i = 0; i < loops; i++) + tmp++; + + now = mmap_read_self(addr); + loops *= 10; + + delta = now - stamp; + pr_debug("%14d: %14Lu\n", n, (long long)delta); + + delta_sum += delta; + } + + munmap(addr, page_size); + pr_debug(" "); +out_close: + close(fd); + + if (!delta_sum) + return -1; + + return 0; +} + +int test__rdpmc(void) +{ + int status = 0; + int wret = 0; + int ret; + int pid; + + pid = fork(); + if (pid < 0) + return -1; + + if (!pid) { + ret = __test__rdpmc(); + + exit(ret); + } + + wret = waitpid(pid, &status, 0); + if (wret < 0 || status) + return -1; + + return 0; +} + +#endif diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c new file mode 100644 index 00000000000..7ae8d17db3d --- /dev/null +++ b/tools/perf/tests/sample-parsing.c @@ -0,0 +1,320 @@ +#include <stdbool.h> +#include <linux/types.h> + +#include "util.h" +#include "event.h" +#include "evsel.h" + +#include "tests.h" + +#define COMP(m) do { \ + if (s1->m != s2->m) { \ + pr_debug("Samples differ at '"#m"'\n"); \ + return false; \ + } \ +} while (0) + +#define MCOMP(m) do { \ + if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) { \ + pr_debug("Samples differ at '"#m"'\n"); \ + return false; \ + } \ +} while (0) + +static bool samples_same(const struct perf_sample *s1, + const struct perf_sample *s2, + u64 type, u64 read_format) +{ + size_t i; + + if (type & PERF_SAMPLE_IDENTIFIER) + COMP(id); + + if (type & PERF_SAMPLE_IP) + COMP(ip); + + if (type & PERF_SAMPLE_TID) { + COMP(pid); + COMP(tid); + } + + if (type & PERF_SAMPLE_TIME) + COMP(time); + + if (type & PERF_SAMPLE_ADDR) + COMP(addr); + + if (type & PERF_SAMPLE_ID) + COMP(id); + + if (type & PERF_SAMPLE_STREAM_ID) + COMP(stream_id); + + if (type & PERF_SAMPLE_CPU) + COMP(cpu); + + if (type & PERF_SAMPLE_PERIOD) + COMP(period); + + if (type & PERF_SAMPLE_READ) { + if (read_format & PERF_FORMAT_GROUP) + COMP(read.group.nr); + else + COMP(read.one.value); + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) + COMP(read.time_enabled); + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + COMP(read.time_running); + /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ + if (read_format & PERF_FORMAT_GROUP) { + for (i = 0; i < s1->read.group.nr; i++) + MCOMP(read.group.values[i]); + } else { + COMP(read.one.id); + } + } + + if (type & PERF_SAMPLE_CALLCHAIN) { + COMP(callchain->nr); + for (i = 0; i < s1->callchain->nr; i++) + COMP(callchain->ips[i]); + } + + if (type & PERF_SAMPLE_RAW) { + COMP(raw_size); + if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) { + pr_debug("Samples differ at 'raw_data'\n"); + return false; + } + } + + if (type & PERF_SAMPLE_BRANCH_STACK) { + COMP(branch_stack->nr); + for (i = 0; i < s1->branch_stack->nr; i++) + MCOMP(branch_stack->entries[i]); + } + + if (type & PERF_SAMPLE_REGS_USER) { + size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64); + + COMP(user_regs.mask); + COMP(user_regs.abi); + if (s1->user_regs.abi && + (!s1->user_regs.regs || !s2->user_regs.regs || + memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) { + pr_debug("Samples differ at 'user_regs'\n"); + return false; + } + } + + if (type & PERF_SAMPLE_STACK_USER) { + COMP(user_stack.size); + if (memcmp(s1->user_stack.data, s1->user_stack.data, + s1->user_stack.size)) { + pr_debug("Samples differ at 'user_stack'\n"); + return false; + } + } + + if (type & PERF_SAMPLE_WEIGHT) + COMP(weight); + + if (type & PERF_SAMPLE_DATA_SRC) + COMP(data_src); + + if (type & PERF_SAMPLE_TRANSACTION) + COMP(transaction); + + return true; +} + +static int do_test(u64 sample_type, u64 sample_regs_user, u64 read_format) +{ + struct perf_evsel evsel = { + .needs_swap = false, + .attr = { + .sample_type = sample_type, + .sample_regs_user = sample_regs_user, + .read_format = read_format, + }, + }; + union perf_event *event; + union { + struct ip_callchain callchain; + u64 data[64]; + } callchain = { + /* 3 ips */ + .data = {3, 201, 202, 203}, + }; + union { + struct branch_stack branch_stack; + u64 data[64]; + } branch_stack = { + /* 1 branch_entry */ + .data = {1, 211, 212, 213}, + }; + u64 user_regs[64]; + const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL}; + const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL}; + struct perf_sample sample = { + .ip = 101, + .pid = 102, + .tid = 103, + .time = 104, + .addr = 105, + .id = 106, + .stream_id = 107, + .period = 108, + .weight = 109, + .cpu = 110, + .raw_size = sizeof(raw_data), + .data_src = 111, + .transaction = 112, + .raw_data = (void *)raw_data, + .callchain = &callchain.callchain, + .branch_stack = &branch_stack.branch_stack, + .user_regs = { + .abi = PERF_SAMPLE_REGS_ABI_64, + .mask = sample_regs_user, + .regs = user_regs, + }, + .user_stack = { + .size = sizeof(data), + .data = (void *)data, + }, + .read = { + .time_enabled = 0x030a59d664fca7deULL, + .time_running = 0x011b6ae553eb98edULL, + }, + }; + struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},}; + struct perf_sample sample_out; + size_t i, sz, bufsz; + int err, ret = -1; + + for (i = 0; i < sizeof(user_regs); i++) + *(i + (u8 *)user_regs) = i & 0xfe; + + if (read_format & PERF_FORMAT_GROUP) { + sample.read.group.nr = 4; + sample.read.group.values = values; + } else { + sample.read.one.value = 0x08789faeb786aa87ULL; + sample.read.one.id = 99; + } + + sz = perf_event__sample_event_size(&sample, sample_type, read_format); + bufsz = sz + 4096; /* Add a bit for overrun checking */ + event = malloc(bufsz); + if (!event) { + pr_debug("malloc failed\n"); + return -1; + } + + memset(event, 0xff, bufsz); + event->header.type = PERF_RECORD_SAMPLE; + event->header.misc = 0; + event->header.size = sz; + + err = perf_event__synthesize_sample(event, sample_type, read_format, + &sample, false); + if (err) { + pr_debug("%s failed for sample_type %#"PRIx64", error %d\n", + "perf_event__synthesize_sample", sample_type, err); + goto out_free; + } + + /* The data does not contain 0xff so we use that to check the size */ + for (i = bufsz; i > 0; i--) { + if (*(i - 1 + (u8 *)event) != 0xff) + break; + } + if (i != sz) { + pr_debug("Event size mismatch: actual %zu vs expected %zu\n", + i, sz); + goto out_free; + } + + evsel.sample_size = __perf_evsel__sample_size(sample_type); + + err = perf_evsel__parse_sample(&evsel, event, &sample_out); + if (err) { + pr_debug("%s failed for sample_type %#"PRIx64", error %d\n", + "perf_evsel__parse_sample", sample_type, err); + goto out_free; + } + + if (!samples_same(&sample, &sample_out, sample_type, read_format)) { + pr_debug("parsing failed for sample_type %#"PRIx64"\n", + sample_type); + goto out_free; + } + + ret = 0; +out_free: + free(event); + if (ret && read_format) + pr_debug("read_format %#"PRIx64"\n", read_format); + return ret; +} + +/** + * test__sample_parsing - test sample parsing. + * + * This function implements a test that synthesizes a sample event, parses it + * and then checks that the parsed sample matches the original sample. The test + * checks sample format bits separately and together. If the test passes %0 is + * returned, otherwise %-1 is returned. + */ +int test__sample_parsing(void) +{ + const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15}; + u64 sample_type; + u64 sample_regs_user; + size_t i; + int err; + + /* + * Fail the test if it has not been updated when new sample format bits + * were added. Please actually update the test rather than just change + * the condition below. + */ + if (PERF_SAMPLE_MAX > PERF_SAMPLE_TRANSACTION << 1) { + pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n"); + return -1; + } + + /* Test each sample format bit separately */ + for (sample_type = 1; sample_type != PERF_SAMPLE_MAX; + sample_type <<= 1) { + /* Test read_format variations */ + if (sample_type == PERF_SAMPLE_READ) { + for (i = 0; i < ARRAY_SIZE(rf); i++) { + err = do_test(sample_type, 0, rf[i]); + if (err) + return err; + } + continue; + } + + if (sample_type == PERF_SAMPLE_REGS_USER) + sample_regs_user = 0x3fff; + else + sample_regs_user = 0; + + err = do_test(sample_type, sample_regs_user, 0); + if (err) + return err; + } + + /* Test all sample format bits together */ + sample_type = PERF_SAMPLE_MAX - 1; + sample_regs_user = 0x3fff; + for (i = 0; i < ARRAY_SIZE(rf); i++) { + err = do_test(sample_type, sample_regs_user, rf[i]); + if (err) + return err; + } + + return 0; +} diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c new file mode 100644 index 00000000000..983d6b8562a --- /dev/null +++ b/tools/perf/tests/sw-clock.c @@ -0,0 +1,122 @@ +#include <unistd.h> +#include <stdlib.h> +#include <signal.h> +#include <sys/mman.h> + +#include "tests.h" +#include "util/evsel.h" +#include "util/evlist.h" +#include "util/cpumap.h" +#include "util/thread_map.h" + +#define NR_LOOPS 10000000 + +/* + * This test will open software clock events (cpu-clock, task-clock) + * then check their frequency -> period conversion has no artifact of + * setting period to 1 forcefully. + */ +static int __test__sw_clock_freq(enum perf_sw_ids clock_id) +{ + int i, err = -1; + volatile int tmp = 0; + u64 total_periods = 0; + int nr_samples = 0; + union perf_event *event; + struct perf_evsel *evsel; + struct perf_evlist *evlist; + struct perf_event_attr attr = { + .type = PERF_TYPE_SOFTWARE, + .config = clock_id, + .sample_type = PERF_SAMPLE_PERIOD, + .exclude_kernel = 1, + .disabled = 1, + .freq = 1, + }; + + attr.sample_freq = 500; + + evlist = perf_evlist__new(); + if (evlist == NULL) { + pr_debug("perf_evlist__new\n"); + return -1; + } + + evsel = perf_evsel__new(&attr); + if (evsel == NULL) { + pr_debug("perf_evsel__new\n"); + goto out_delete_evlist; + } + perf_evlist__add(evlist, evsel); + + evlist->cpus = cpu_map__dummy_new(); + evlist->threads = thread_map__new_by_tid(getpid()); + if (!evlist->cpus || !evlist->threads) { + err = -ENOMEM; + pr_debug("Not enough memory to create thread/cpu maps\n"); + goto out_delete_evlist; + } + + if (perf_evlist__open(evlist)) { + const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate"; + + err = -errno; + pr_debug("Couldn't open evlist: %s\nHint: check %s, using %" PRIu64 " in this test.\n", + strerror(errno), knob, (u64)attr.sample_freq); + goto out_delete_evlist; + } + + err = perf_evlist__mmap(evlist, 128, true); + if (err < 0) { + pr_debug("failed to mmap event: %d (%s)\n", errno, + strerror(errno)); + goto out_delete_evlist; + } + + perf_evlist__enable(evlist); + + /* collect samples */ + for (i = 0; i < NR_LOOPS; i++) + tmp++; + + perf_evlist__disable(evlist); + + while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { + struct perf_sample sample; + + if (event->header.type != PERF_RECORD_SAMPLE) + goto next_event; + + err = perf_evlist__parse_sample(evlist, event, &sample); + if (err < 0) { + pr_debug("Error during parse sample\n"); + goto out_delete_evlist; + } + + total_periods += sample.period; + nr_samples++; +next_event: + perf_evlist__mmap_consume(evlist, 0); + } + + if ((u64) nr_samples == total_periods) { + pr_debug("All (%d) samples have period value of 1!\n", + nr_samples); + err = -1; + } + +out_delete_evlist: + perf_evlist__delete(evlist); + return err; +} + +int test__sw_clock_freq(void) +{ + int ret; + + ret = __test__sw_clock_freq(PERF_COUNT_SW_CPU_CLOCK); + if (!ret) + ret = __test__sw_clock_freq(PERF_COUNT_SW_TASK_CLOCK); + + return ret; +} diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c new file mode 100644 index 00000000000..5ff3db318f1 --- /dev/null +++ b/tools/perf/tests/task-exit.c @@ -0,0 +1,118 @@ +#include "evlist.h" +#include "evsel.h" +#include "thread_map.h" +#include "cpumap.h" +#include "tests.h" + +#include <signal.h> + +static int exited; +static int nr_exit; + +static void sig_handler(int sig __maybe_unused) +{ + exited = 1; +} + +/* + * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since + * we asked by setting its exec_error to this handler. + */ +static void workload_exec_failed_signal(int signo __maybe_unused, + siginfo_t *info __maybe_unused, + void *ucontext __maybe_unused) +{ + exited = 1; + nr_exit = -1; +} + +/* + * This test will start a workload that does nothing then it checks + * if the number of exit event reported by the kernel is 1 or not + * in order to check the kernel returns correct number of event. + */ +int test__task_exit(void) +{ + int err = -1; + union perf_event *event; + struct perf_evsel *evsel; + struct perf_evlist *evlist; + struct target target = { + .uid = UINT_MAX, + .uses_mmap = true, + }; + const char *argv[] = { "true", NULL }; + + signal(SIGCHLD, sig_handler); + + evlist = perf_evlist__new_default(); + if (evlist == NULL) { + pr_debug("perf_evlist__new_default\n"); + return -1; + } + + /* + * Create maps of threads and cpus to monitor. In this case + * we start with all threads and cpus (-1, -1) but then in + * perf_evlist__prepare_workload we'll fill in the only thread + * we're monitoring, the one forked there. + */ + evlist->cpus = cpu_map__dummy_new(); + evlist->threads = thread_map__new_by_tid(-1); + if (!evlist->cpus || !evlist->threads) { + err = -ENOMEM; + pr_debug("Not enough memory to create thread/cpu maps\n"); + goto out_delete_evlist; + } + + err = perf_evlist__prepare_workload(evlist, &target, argv, false, + workload_exec_failed_signal); + if (err < 0) { + pr_debug("Couldn't run the workload!\n"); + goto out_delete_evlist; + } + + evsel = perf_evlist__first(evlist); + evsel->attr.task = 1; + evsel->attr.sample_freq = 0; + evsel->attr.inherit = 0; + evsel->attr.watermark = 0; + evsel->attr.wakeup_events = 1; + evsel->attr.exclude_kernel = 1; + + err = perf_evlist__open(evlist); + if (err < 0) { + pr_debug("Couldn't open the evlist: %s\n", strerror(-err)); + goto out_delete_evlist; + } + + if (perf_evlist__mmap(evlist, 128, true) < 0) { + pr_debug("failed to mmap events: %d (%s)\n", errno, + strerror(errno)); + goto out_delete_evlist; + } + + perf_evlist__start_workload(evlist); + +retry: + while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { + if (event->header.type == PERF_RECORD_EXIT) + nr_exit++; + + perf_evlist__mmap_consume(evlist, 0); + } + + if (!exited || !nr_exit) { + poll(evlist->pollfd, evlist->nr_fds, -1); + goto retry; + } + + if (nr_exit != 1) { + pr_debug("received %d EXIT records\n", nr_exit); + err = -1; + } + +out_delete_evlist: + perf_evlist__delete(evlist); + return err; +} diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h new file mode 100644 index 00000000000..ed64790a395 --- /dev/null +++ b/tools/perf/tests/tests.h @@ -0,0 +1,60 @@ +#ifndef TESTS_H +#define TESTS_H + +#define TEST_ASSERT_VAL(text, cond) \ +do { \ + if (!(cond)) { \ + pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \ + return -1; \ + } \ +} while (0) + +enum { + TEST_OK = 0, + TEST_FAIL = -1, + TEST_SKIP = -2, +}; + +/* Tests */ +int test__vmlinux_matches_kallsyms(void); +int test__open_syscall_event(void); +int test__open_syscall_event_on_all_cpus(void); +int test__basic_mmap(void); +int test__PERF_RECORD(void); +int test__rdpmc(void); +int test__perf_evsel__roundtrip_name_test(void); +int test__perf_evsel__tp_sched_test(void); +int test__syscall_open_tp_fields(void); +int test__pmu(void); +int test__attr(void); +int test__dso_data(void); +int test__dso_data_cache(void); +int test__dso_data_reopen(void); +int test__parse_events(void); +int test__hists_link(void); +int test__python_use(void); +int test__bp_signal(void); +int test__bp_signal_overflow(void); +int test__task_exit(void); +int test__sw_clock_freq(void); +int test__perf_time_to_tsc(void); +int test__code_reading(void); +int test__sample_parsing(void); +int test__keep_tracking(void); +int test__parse_no_sample_id_all(void); +int test__dwarf_unwind(void); +int test__hists_filter(void); +int test__mmap_thread_lookup(void); +int test__thread_mg_share(void); +int test__hists_output(void); +int test__hists_cumulate(void); + +#if defined(__x86_64__) || defined(__i386__) || defined(__arm__) +#ifdef HAVE_DWARF_UNWIND_SUPPORT +struct thread; +struct perf_sample; +int test__arch_unwind_sample(struct perf_sample *sample, + struct thread *thread); +#endif +#endif +#endif /* TESTS_H */ diff --git a/tools/perf/tests/thread-mg-share.c b/tools/perf/tests/thread-mg-share.c new file mode 100644 index 00000000000..2b2e0dbe114 --- /dev/null +++ b/tools/perf/tests/thread-mg-share.c @@ -0,0 +1,90 @@ +#include "tests.h" +#include "machine.h" +#include "thread.h" +#include "map.h" + +int test__thread_mg_share(void) +{ + struct machines machines; + struct machine *machine; + + /* thread group */ + struct thread *leader; + struct thread *t1, *t2, *t3; + struct map_groups *mg; + + /* other process */ + struct thread *other, *other_leader; + struct map_groups *other_mg; + + /* + * This test create 2 processes abstractions (struct thread) + * with several threads and checks they properly share and + * maintain map groups info (struct map_groups). + * + * thread group (pid: 0, tids: 0, 1, 2, 3) + * other group (pid: 4, tids: 4, 5) + */ + + machines__init(&machines); + machine = &machines.host; + + /* create process with 4 threads */ + leader = machine__findnew_thread(machine, 0, 0); + t1 = machine__findnew_thread(machine, 0, 1); + t2 = machine__findnew_thread(machine, 0, 2); + t3 = machine__findnew_thread(machine, 0, 3); + + /* and create 1 separated process, without thread leader */ + other = machine__findnew_thread(machine, 4, 5); + + TEST_ASSERT_VAL("failed to create threads", + leader && t1 && t2 && t3 && other); + + mg = leader->mg; + TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 4); + + /* test the map groups pointer is shared */ + TEST_ASSERT_VAL("map groups don't match", mg == t1->mg); + TEST_ASSERT_VAL("map groups don't match", mg == t2->mg); + TEST_ASSERT_VAL("map groups don't match", mg == t3->mg); + + /* + * Verify the other leader was created by previous call. + * It should have shared map groups with no change in + * refcnt. + */ + other_leader = machine__find_thread(machine, 4, 4); + TEST_ASSERT_VAL("failed to find other leader", other_leader); + + other_mg = other->mg; + TEST_ASSERT_VAL("wrong refcnt", other_mg->refcnt == 2); + + TEST_ASSERT_VAL("map groups don't match", other_mg == other_leader->mg); + + /* release thread group */ + thread__delete(leader); + TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 3); + + thread__delete(t1); + TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 2); + + thread__delete(t2); + TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 1); + + thread__delete(t3); + + /* release other group */ + thread__delete(other_leader); + TEST_ASSERT_VAL("wrong refcnt", other_mg->refcnt == 1); + + thread__delete(other); + + /* + * Cannot call machine__delete_threads(machine) now, + * because we've already released all the threads. + */ + + machines__exit(&machines); + return 0; +} diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c new file mode 100644 index 00000000000..3d9088003a5 --- /dev/null +++ b/tools/perf/tests/vmlinux-kallsyms.c @@ -0,0 +1,242 @@ +#include <linux/compiler.h> +#include <linux/rbtree.h> +#include <string.h> +#include "map.h" +#include "symbol.h" +#include "util.h" +#include "tests.h" +#include "debug.h" +#include "machine.h" + +static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused, + struct symbol *sym) +{ + bool *visited = symbol__priv(sym); + *visited = true; + return 0; +} + +#define UM(x) kallsyms_map->unmap_ip(kallsyms_map, (x)) + +int test__vmlinux_matches_kallsyms(void) +{ + int err = -1; + struct rb_node *nd; + struct symbol *sym; + struct map *kallsyms_map, *vmlinux_map; + struct machine kallsyms, vmlinux; + enum map_type type = MAP__FUNCTION; + u64 mem_start, mem_end; + + /* + * Step 1: + * + * Init the machines that will hold kernel, modules obtained from + * both vmlinux + .ko files and from /proc/kallsyms split by modules. + */ + machine__init(&kallsyms, "", HOST_KERNEL_ID); + machine__init(&vmlinux, "", HOST_KERNEL_ID); + + /* + * Step 2: + * + * Create the kernel maps for kallsyms and the DSO where we will then + * load /proc/kallsyms. Also create the modules maps from /proc/modules + * and find the .ko files that match them in /lib/modules/`uname -r`/. + */ + if (machine__create_kernel_maps(&kallsyms) < 0) { + pr_debug("machine__create_kernel_maps "); + goto out; + } + + /* + * Step 3: + * + * Load and split /proc/kallsyms into multiple maps, one per module. + */ + if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) { + pr_debug("dso__load_kallsyms "); + goto out; + } + + /* + * Step 4: + * + * kallsyms will be internally on demand sorted by name so that we can + * find the reference relocation * symbol, i.e. the symbol we will use + * to see if the running kernel was relocated by checking if it has the + * same value in the vmlinux file we load. + */ + kallsyms_map = machine__kernel_map(&kallsyms, type); + + /* + * Step 5: + * + * Now repeat step 2, this time for the vmlinux file we'll auto-locate. + */ + if (machine__create_kernel_maps(&vmlinux) < 0) { + pr_debug("machine__create_kernel_maps "); + goto out; + } + + vmlinux_map = machine__kernel_map(&vmlinux, type); + + /* + * Step 6: + * + * Locate a vmlinux file in the vmlinux path that has a buildid that + * matches the one of the running kernel. + * + * While doing that look if we find the ref reloc symbol, if we find it + * we'll have its ref_reloc_symbol.unrelocated_addr and then + * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines + * to fixup the symbols. + */ + if (machine__load_vmlinux_path(&vmlinux, type, + vmlinux_matches_kallsyms_filter) <= 0) { + pr_debug("Couldn't find a vmlinux that matches the kernel running on this machine, skipping test\n"); + err = TEST_SKIP; + goto out; + } + + err = 0; + /* + * Step 7: + * + * Now look at the symbols in the vmlinux DSO and check if we find all of them + * in the kallsyms dso. For the ones that are in both, check its names and + * end addresses too. + */ + for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) { + struct symbol *pair, *first_pair; + bool backwards = true; + + sym = rb_entry(nd, struct symbol, rb_node); + + if (sym->start == sym->end) + continue; + + mem_start = vmlinux_map->unmap_ip(vmlinux_map, sym->start); + mem_end = vmlinux_map->unmap_ip(vmlinux_map, sym->end); + + first_pair = machine__find_kernel_symbol(&kallsyms, type, + mem_start, NULL, NULL); + pair = first_pair; + + if (pair && UM(pair->start) == mem_start) { +next_pair: + if (strcmp(sym->name, pair->name) == 0) { + /* + * kallsyms don't have the symbol end, so we + * set that by using the next symbol start - 1, + * in some cases we get this up to a page + * wrong, trace_kmalloc when I was developing + * this code was one such example, 2106 bytes + * off the real size. More than that and we + * _really_ have a problem. + */ + s64 skew = mem_end - UM(pair->end); + if (llabs(skew) >= page_size) + pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n", + mem_start, sym->name, mem_end, + UM(pair->end)); + + /* + * Do not count this as a failure, because we + * could really find a case where it's not + * possible to get proper function end from + * kallsyms. + */ + continue; + + } else { + struct rb_node *nnd; +detour: + nnd = backwards ? rb_prev(&pair->rb_node) : + rb_next(&pair->rb_node); + if (nnd) { + struct symbol *next = rb_entry(nnd, struct symbol, rb_node); + + if (UM(next->start) == mem_start) { + pair = next; + goto next_pair; + } + } + + if (backwards) { + backwards = false; + pair = first_pair; + goto detour; + } + + pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n", + mem_start, sym->name, pair->name); + } + } else + pr_debug("%#" PRIx64 ": %s not on kallsyms\n", + mem_start, sym->name); + + err = -1; + } + + if (!verbose) + goto out; + + pr_info("Maps only in vmlinux:\n"); + + for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) { + struct map *pos = rb_entry(nd, struct map, rb_node), *pair; + /* + * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while + * the kernel will have the path for the vmlinux file being used, + * so use the short name, less descriptive but the same ("[kernel]" in + * both cases. + */ + pair = map_groups__find_by_name(&kallsyms.kmaps, type, + (pos->dso->kernel ? + pos->dso->short_name : + pos->dso->name)); + if (pair) + pair->priv = 1; + else + map__fprintf(pos, stderr); + } + + pr_info("Maps in vmlinux with a different name in kallsyms:\n"); + + for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) { + struct map *pos = rb_entry(nd, struct map, rb_node), *pair; + + mem_start = vmlinux_map->unmap_ip(vmlinux_map, pos->start); + mem_end = vmlinux_map->unmap_ip(vmlinux_map, pos->end); + + pair = map_groups__find(&kallsyms.kmaps, type, mem_start); + if (pair == NULL || pair->priv) + continue; + + if (pair->start == mem_start) { + pair->priv = 1; + pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as", + pos->start, pos->end, pos->pgoff, pos->dso->name); + if (mem_end != pair->end) + pr_info(":\n*%" PRIx64 "-%" PRIx64 " %" PRIx64, + pair->start, pair->end, pair->pgoff); + pr_info(" %s\n", pair->dso->name); + pair->priv = 1; + } + } + + pr_info("Maps only in kallsyms:\n"); + + for (nd = rb_first(&kallsyms.kmaps.maps[type]); + nd; nd = rb_next(nd)) { + struct map *pos = rb_entry(nd, struct map, rb_node); + + if (!pos->priv) + map__fprintf(pos, stderr); + } +out: + machine__exit(&kallsyms); + machine__exit(&vmlinux); + return err; +} diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c index 4aeb7d5df93..3ccf6e14f89 100644 --- a/tools/perf/ui/browser.c +++ b/tools/perf/ui/browser.c @@ -2,7 +2,6 @@ #include "../cache.h" #include "../../perf.h" #include "libslang.h" -#include <newt.h> #include "ui.h" #include "util.h" #include <linux/compiler.h> @@ -195,7 +194,7 @@ int ui_browser__warning(struct ui_browser *browser, int timeout, ui_helpline__vpush(format, args); va_end(args); } else { - while ((key == ui__question_window("Warning!", text, + while ((key = ui__question_window("Warning!", text, "Press any key...", timeout)) == K_RESIZE) ui_browser__handle_resize(browser); @@ -234,7 +233,7 @@ void ui_browser__reset_index(struct ui_browser *browser) void __ui_browser__show_title(struct ui_browser *browser, const char *title) { SLsmg_gotorc(0, 0); - ui_browser__set_color(browser, NEWT_COLORSET_ROOT); + ui_browser__set_color(browser, HE_COLORSET_ROOT); slsmg_write_nstring(title, browser->width + 1); } @@ -257,8 +256,7 @@ int ui_browser__show(struct ui_browser *browser, const char *title, __ui_browser__show_title(browser, title); browser->title = title; - free(browser->helpline); - browser->helpline = NULL; + zfree(&browser->helpline); va_start(ap, helpline); err = vasprintf(&browser->helpline, helpline, ap); @@ -269,10 +267,11 @@ int ui_browser__show(struct ui_browser *browser, const char *title, return err ? 0 : -1; } -void ui_browser__hide(struct ui_browser *browser __maybe_unused) +void ui_browser__hide(struct ui_browser *browser) { pthread_mutex_lock(&ui__lock); ui_helpline__pop(); + zfree(&browser->helpline); pthread_mutex_unlock(&ui__lock); } @@ -471,7 +470,7 @@ unsigned int ui_browser__list_head_refresh(struct ui_browser *browser) return row; } -static struct ui_browser__colorset { +static struct ui_browser_colorset { const char *name, *fg, *bg; int colorset; } ui_browser__colorsets[] = { @@ -512,6 +511,12 @@ static struct ui_browser__colorset { .bg = "default", }, { + .colorset = HE_COLORSET_ROOT, + .name = "root", + .fg = "white", + .bg = "blue", + }, + { .name = NULL, } }; @@ -562,7 +567,7 @@ void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence) browser->top = browser->top + browser->top_idx + offset; break; case SEEK_END: - browser->top = browser->top + browser->nr_entries + offset; + browser->top = browser->top + browser->nr_entries - 1 + offset; break; default: return; @@ -673,7 +678,7 @@ static void __ui_browser__line_arrow_down(struct ui_browser *browser, if (end >= browser->top_idx + browser->height) end_row = browser->height - 1; else - end_row = end - browser->top_idx;; + end_row = end - browser->top_idx; ui_browser__gotorc(browser, row, column); SLsmg_draw_vline(end_row - row + 1); @@ -706,7 +711,7 @@ void ui_browser__init(void) perf_config(ui_browser__color_config, NULL); while (ui_browser__colorsets[i].name) { - struct ui_browser__colorset *c = &ui_browser__colorsets[i++]; + struct ui_browser_colorset *c = &ui_browser__colorsets[i++]; sltt_set_color(c->colorset, c->name, c->fg, c->bg); } diff --git a/tools/perf/ui/browser.h b/tools/perf/ui/browser.h index af70314605e..03d4d6295f1 100644 --- a/tools/perf/ui/browser.h +++ b/tools/perf/ui/browser.h @@ -1,9 +1,7 @@ #ifndef _PERF_UI_BROWSER_H_ #define _PERF_UI_BROWSER_H_ 1 -#include <stdbool.h> -#include <sys/types.h> -#include "../types.h" +#include <linux/types.h> #define HE_COLORSET_TOP 50 #define HE_COLORSET_MEDIUM 51 @@ -11,6 +9,7 @@ #define HE_COLORSET_SELECTED 53 #define HE_COLORSET_CODE 54 #define HE_COLORSET_ADDR 55 +#define HE_COLORSET_ROOT 56 struct ui_browser { u64 index, top_idx; @@ -20,32 +19,32 @@ struct ui_browser { void *priv; const char *title; char *helpline; - unsigned int (*refresh)(struct ui_browser *self); - void (*write)(struct ui_browser *self, void *entry, int row); - void (*seek)(struct ui_browser *self, off_t offset, int whence); - bool (*filter)(struct ui_browser *self, void *entry); + unsigned int (*refresh)(struct ui_browser *browser); + void (*write)(struct ui_browser *browser, void *entry, int row); + void (*seek)(struct ui_browser *browser, off_t offset, int whence); + bool (*filter)(struct ui_browser *browser, void *entry); u32 nr_entries; bool navkeypressed; bool use_navkeypressed; }; int ui_browser__set_color(struct ui_browser *browser, int color); -void ui_browser__set_percent_color(struct ui_browser *self, +void ui_browser__set_percent_color(struct ui_browser *browser, double percent, bool current); -bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row); -void ui_browser__refresh_dimensions(struct ui_browser *self); -void ui_browser__reset_index(struct ui_browser *self); +bool ui_browser__is_current_entry(struct ui_browser *browser, unsigned row); +void ui_browser__refresh_dimensions(struct ui_browser *browser); +void ui_browser__reset_index(struct ui_browser *browser); -void ui_browser__gotorc(struct ui_browser *self, int y, int x); +void ui_browser__gotorc(struct ui_browser *browser, int y, int x); void ui_browser__write_graph(struct ui_browser *browser, int graph); void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column, u64 start, u64 end); void __ui_browser__show_title(struct ui_browser *browser, const char *title); void ui_browser__show_title(struct ui_browser *browser, const char *title); -int ui_browser__show(struct ui_browser *self, const char *title, +int ui_browser__show(struct ui_browser *browser, const char *title, const char *helpline, ...); -void ui_browser__hide(struct ui_browser *self); -int ui_browser__refresh(struct ui_browser *self); +void ui_browser__hide(struct ui_browser *browser); +int ui_browser__refresh(struct ui_browser *browser); int ui_browser__run(struct ui_browser *browser, int delay_secs); void ui_browser__update_nr_entries(struct ui_browser *browser, u32 nr_entries); void ui_browser__handle_resize(struct ui_browser *browser); @@ -58,15 +57,17 @@ int ui_browser__help_window(struct ui_browser *browser, const char *text); bool ui_browser__dialog_yesno(struct ui_browser *browser, const char *text); int ui_browser__input_window(const char *title, const char *text, char *input, const char *exit_msg, int delay_sec); +struct perf_session_env; +int tui__header_window(struct perf_session_env *env); void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence); unsigned int ui_browser__argv_refresh(struct ui_browser *browser); -void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence); -unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self); +void ui_browser__rb_tree_seek(struct ui_browser *browser, off_t offset, int whence); +unsigned int ui_browser__rb_tree_refresh(struct ui_browser *browser); -void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whence); -unsigned int ui_browser__list_head_refresh(struct ui_browser *self); +void ui_browser__list_head_seek(struct ui_browser *browser, off_t offset, int whence); +unsigned int ui_browser__list_head_refresh(struct ui_browser *browser); void ui_browser__init(void); void annotate_browser__init(void); diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index 8f8cd2d73b3..f0697a3aede 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -8,15 +8,19 @@ #include "../../util/hist.h" #include "../../util/sort.h" #include "../../util/symbol.h" +#include "../../util/evsel.h" #include <pthread.h> -#include <newt.h> struct browser_disasm_line { struct rb_node rb_node; - double percent; u32 idx; int idx_asm; int jump_sources; + /* + * actual length of this array is saved on the nr_events field + * of the struct annotate_browser + */ + double percent[1]; }; static struct annotate_browser_opt { @@ -33,8 +37,9 @@ struct annotate_browser { struct ui_browser b; struct rb_root entries; struct rb_node *curr_hot; - struct disasm_line *selection; + struct disasm_line *selection; struct disasm_line **offsets; + int nr_events; u64 start; int nr_asm_entries; int nr_entries; @@ -94,14 +99,24 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int (!current_entry || (browser->use_navkeypressed && !browser->navkeypressed))); int width = browser->width, printed; + int i, pcnt_width = 7 * ab->nr_events; + double percent_max = 0.0; char bf[256]; - if (dl->offset != -1 && bdl->percent != 0.0) { - ui_browser__set_percent_color(browser, bdl->percent, current_entry); - slsmg_printf("%6.2f ", bdl->percent); + for (i = 0; i < ab->nr_events; i++) { + if (bdl->percent[i] > percent_max) + percent_max = bdl->percent[i]; + } + + if (dl->offset != -1 && percent_max != 0.0) { + for (i = 0; i < ab->nr_events; i++) { + ui_browser__set_percent_color(browser, bdl->percent[i], + current_entry); + slsmg_printf("%6.2f ", bdl->percent[i]); + } } else { ui_browser__set_percent_color(browser, 0, current_entry); - slsmg_write_nstring(" ", 7); + slsmg_write_nstring(" ", pcnt_width); } SLsmg_write_char(' '); @@ -111,12 +126,12 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int width += 1; if (!*dl->line) - slsmg_write_nstring(" ", width - 7); + slsmg_write_nstring(" ", width - pcnt_width); else if (dl->offset == -1) { printed = scnprintf(bf, sizeof(bf), "%*s ", ab->addr_width, " "); slsmg_write_nstring(bf, printed); - slsmg_write_nstring(dl->line, width - printed - 6); + slsmg_write_nstring(dl->line, width - printed - pcnt_width + 1); } else { u64 addr = dl->offset; int color = -1; @@ -175,22 +190,38 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int } disasm_line__scnprintf(dl, bf, sizeof(bf), !annotate_browser__opts.use_offset); - slsmg_write_nstring(bf, width - 10 - printed); + slsmg_write_nstring(bf, width - pcnt_width - 3 - printed); } if (current_entry) ab->selection = dl; } +static bool disasm_line__is_valid_jump(struct disasm_line *dl, struct symbol *sym) +{ + if (!dl || !dl->ins || !ins__is_jump(dl->ins) + || !disasm_line__has_offset(dl) + || dl->ops.target.offset >= symbol__size(sym)) + return false; + + return true; +} + static void annotate_browser__draw_current_jump(struct ui_browser *browser) { struct annotate_browser *ab = container_of(browser, struct annotate_browser, b); struct disasm_line *cursor = ab->selection, *target; struct browser_disasm_line *btarget, *bcursor; unsigned int from, to; + struct map_symbol *ms = ab->b.priv; + struct symbol *sym = ms->sym; + u8 pcnt_width = 7; - if (!cursor || !cursor->ins || !ins__is_jump(cursor->ins) || - !disasm_line__has_offset(cursor)) + /* PLT symbols contain external offsets */ + if (strstr(sym->name, "@plt")) + return; + + if (!disasm_line__is_valid_jump(cursor, sym)) return; target = ab->offsets[cursor->ops.target.offset]; @@ -208,57 +239,44 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser) to = (u64)btarget->idx; } + pcnt_width *= ab->nr_events; + ui_browser__set_color(browser, HE_COLORSET_CODE); - __ui_browser__line_arrow(browser, 9 + ab->addr_width, from, to); + __ui_browser__line_arrow(browser, pcnt_width + 2 + ab->addr_width, + from, to); } static unsigned int annotate_browser__refresh(struct ui_browser *browser) { + struct annotate_browser *ab = container_of(browser, struct annotate_browser, b); int ret = ui_browser__list_head_refresh(browser); + int pcnt_width; + + pcnt_width = 7 * ab->nr_events; if (annotate_browser__opts.jump_arrows) annotate_browser__draw_current_jump(browser); ui_browser__set_color(browser, HE_COLORSET_NORMAL); - __ui_browser__vline(browser, 7, 0, browser->height - 1); + __ui_browser__vline(browser, pcnt_width, 0, browser->height - 1); return ret; } -static double disasm_line__calc_percent(struct disasm_line *dl, struct symbol *sym, int evidx) +static int disasm__cmp(struct browser_disasm_line *a, + struct browser_disasm_line *b, int nr_pcnt) { - double percent = 0.0; - - if (dl->offset != -1) { - int len = sym->end - sym->start; - unsigned int hits = 0; - struct annotation *notes = symbol__annotation(sym); - struct source_line *src_line = notes->src->lines; - struct sym_hist *h = annotation__histogram(notes, evidx); - s64 offset = dl->offset; - struct disasm_line *next; - - next = disasm__get_next_ip_line(¬es->src->source, dl); - while (offset < (s64)len && - (next == NULL || offset < next->offset)) { - if (src_line) { - percent += src_line[offset].percent; - } else - hits += h->addr[offset]; + int i; - ++offset; - } - /* - * If the percentage wasn't already calculated in - * symbol__get_source_line, do it now: - */ - if (src_line == NULL && h->sum) - percent = 100.0 * hits / h->sum; + for (i = 0; i < nr_pcnt; i++) { + if (a->percent[i] == b->percent[i]) + continue; + return a->percent[i] < b->percent[i]; } - - return percent; + return 0; } -static void disasm_rb_tree__insert(struct rb_root *root, struct browser_disasm_line *bdl) +static void disasm_rb_tree__insert(struct rb_root *root, struct browser_disasm_line *bdl, + int nr_events) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; @@ -267,7 +285,8 @@ static void disasm_rb_tree__insert(struct rb_root *root, struct browser_disasm_l while (*p != NULL) { parent = *p; l = rb_entry(parent, struct browser_disasm_line, rb_node); - if (bdl->percent < l->percent) + + if (disasm__cmp(bdl, l, nr_events)) p = &(*p)->rb_left; else p = &(*p)->rb_right; @@ -316,12 +335,13 @@ static void annotate_browser__set_rb_top(struct annotate_browser *browser, } static void annotate_browser__calc_percent(struct annotate_browser *browser, - int evidx) + struct perf_evsel *evsel) { struct map_symbol *ms = browser->b.priv; struct symbol *sym = ms->sym; struct annotation *notes = symbol__annotation(sym); - struct disasm_line *pos; + struct disasm_line *pos, *next; + s64 len = symbol__size(sym); browser->entries = RB_ROOT; @@ -329,12 +349,34 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser, list_for_each_entry(pos, ¬es->src->source, node) { struct browser_disasm_line *bpos = disasm_line__browser(pos); - bpos->percent = disasm_line__calc_percent(pos, sym, evidx); - if (bpos->percent < 0.01) { + const char *path = NULL; + double max_percent = 0.0; + int i; + + if (pos->offset == -1) { RB_CLEAR_NODE(&bpos->rb_node); continue; } - disasm_rb_tree__insert(&browser->entries, bpos); + + next = disasm__get_next_ip_line(¬es->src->source, pos); + + for (i = 0; i < browser->nr_events; i++) { + bpos->percent[i] = disasm__calc_percent(notes, + evsel->idx + i, + pos->offset, + next ? next->offset : len, + &path); + + if (max_percent < bpos->percent[i]) + max_percent = bpos->percent[i]; + } + + if (max_percent < 0.01) { + RB_CLEAR_NODE(&bpos->rb_node); + continue; + } + disasm_rb_tree__insert(&browser->entries, bpos, + browser->nr_events); } pthread_mutex_unlock(¬es->lock); @@ -386,40 +428,52 @@ static void annotate_browser__init_asm_mode(struct annotate_browser *browser) browser->b.nr_entries = browser->nr_asm_entries; } +#define SYM_TITLE_MAX_SIZE (PATH_MAX + 64) + +static int sym_title(struct symbol *sym, struct map *map, char *title, + size_t sz) +{ + return snprintf(title, sz, "%s %s", sym->name, map->dso->long_name); +} + static bool annotate_browser__callq(struct annotate_browser *browser, - int evidx, void (*timer)(void *arg), - void *arg, int delay_secs) + struct perf_evsel *evsel, + struct hist_browser_timer *hbt) { struct map_symbol *ms = browser->b.priv; struct disasm_line *dl = browser->selection; - struct symbol *sym = ms->sym; struct annotation *notes; - struct symbol *target; - u64 ip; + struct addr_map_symbol target = { + .map = ms->map, + .addr = map__objdump_2mem(ms->map, dl->ops.target.addr), + }; + char title[SYM_TITLE_MAX_SIZE]; if (!ins__is_call(dl->ins)) return false; - ip = ms->map->map_ip(ms->map, dl->ops.target.addr); - target = map__find_symbol(ms->map, ip, NULL); - if (target == NULL) { + if (map_groups__find_ams(&target, NULL) || + map__rip_2objdump(target.map, target.map->map_ip(target.map, + target.addr)) != + dl->ops.target.addr) { ui_helpline__puts("The called function was not found."); return true; } - notes = symbol__annotation(target); + notes = symbol__annotation(target.sym); pthread_mutex_lock(¬es->lock); - if (notes->src == NULL && symbol__alloc_hist(target) < 0) { + if (notes->src == NULL && symbol__alloc_hist(target.sym) < 0) { pthread_mutex_unlock(¬es->lock); ui__warning("Not enough memory for annotating '%s' symbol!\n", - target->name); + target.sym->name); return true; } pthread_mutex_unlock(¬es->lock); - symbol__tui_annotate(target, ms->map, evidx, timer, arg, delay_secs); - ui_browser__show_title(&browser->b, sym->name); + symbol__tui_annotate(target.sym, target.map, evsel, hbt); + sym_title(ms->sym, ms->map, title, sizeof(title)); + ui_browser__show_title(&browser->b, title); return true; } @@ -453,7 +507,7 @@ static bool annotate_browser__jump(struct annotate_browser *browser) dl = annotate_browser__find_offset(browser, dl->ops.target.offset, &idx); if (dl == NULL) { - ui_helpline__puts("Invallid jump offset"); + ui_helpline__puts("Invalid jump offset"); return true; } @@ -601,20 +655,23 @@ static void annotate_browser__update_addr_width(struct annotate_browser *browser browser->addr_width += browser->jumps_width + 1; } -static int annotate_browser__run(struct annotate_browser *browser, int evidx, - void(*timer)(void *arg), - void *arg, int delay_secs) +static int annotate_browser__run(struct annotate_browser *browser, + struct perf_evsel *evsel, + struct hist_browser_timer *hbt) { struct rb_node *nd = NULL; struct map_symbol *ms = browser->b.priv; struct symbol *sym = ms->sym; const char *help = "Press 'h' for help on key bindings"; + int delay_secs = hbt ? hbt->refresh : 0; int key; + char title[SYM_TITLE_MAX_SIZE]; - if (ui_browser__show(&browser->b, sym->name, help) < 0) + sym_title(sym, ms->map, title, sizeof(title)); + if (ui_browser__show(&browser->b, title, help) < 0) return -1; - annotate_browser__calc_percent(browser, evidx); + annotate_browser__calc_percent(browser, evsel); if (browser->curr_hot) { annotate_browser__set_rb_top(browser, browser->curr_hot); @@ -627,7 +684,7 @@ static int annotate_browser__run(struct annotate_browser *browser, int evidx, key = ui_browser__run(&browser->b, delay_secs); if (delay_secs != 0) { - annotate_browser__calc_percent(browser, evidx); + annotate_browser__calc_percent(browser, evsel); /* * Current line focus got out of the list of most active * lines, NULL it so that if TAB|UNTAB is pressed, we @@ -639,11 +696,11 @@ static int annotate_browser__run(struct annotate_browser *browser, int evidx, switch (key) { case K_TIMER: - if (timer != NULL) - timer(arg); + if (hbt) + hbt->timer(hbt->arg); if (delay_secs != 0) - symbol__annotate_decay_histogram(sym, evidx); + symbol__annotate_decay_histogram(sym, evsel->idx); continue; case K_TAB: if (nd != NULL) { @@ -676,8 +733,14 @@ static int annotate_browser__run(struct annotate_browser *browser, int evidx, "o Toggle disassembler output/simplified view\n" "s Toggle source code view\n" "/ Search string\n" - "? Search previous string\n"); + "r Run available scripts\n" + "? Search string backwards\n"); continue; + case 'r': + { + script_browse(NULL); + continue; + } case 'H': nd = browser->curr_hot; break; @@ -734,7 +797,7 @@ show_help: goto show_sup_ins; goto out; } else if (!(annotate_browser__jump(browser) || - annotate_browser__callq(browser, evidx, timer, arg, delay_secs))) { + annotate_browser__callq(browser, evsel, hbt))) { show_sup_ins: ui_helpline__puts("Actions are only available for 'callq', 'retq' & jump instructions."); } @@ -756,33 +819,30 @@ out: return key; } -int hist_entry__tui_annotate(struct hist_entry *he, int evidx, - void(*timer)(void *arg), void *arg, int delay_secs) +int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel, + struct hist_browser_timer *hbt) { - return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx, - timer, arg, delay_secs); + return symbol__tui_annotate(he->ms.sym, he->ms.map, evsel, hbt); } static void annotate_browser__mark_jump_targets(struct annotate_browser *browser, size_t size) { u64 offset; + struct map_symbol *ms = browser->b.priv; + struct symbol *sym = ms->sym; + + /* PLT symbols contain external offsets */ + if (strstr(sym->name, "@plt")) + return; for (offset = 0; offset < size; ++offset) { struct disasm_line *dl = browser->offsets[offset], *dlt; struct browser_disasm_line *bdlt; - if (!dl || !dl->ins || !ins__is_jump(dl->ins) || - !disasm_line__has_offset(dl)) + if (!disasm_line__is_valid_jump(dl, sym)) continue; - if (dl->ops.target.offset >= size) { - ui__error("jump to after symbol!\n" - "size: %zx, jump target: %" PRIx64, - size, dl->ops.target.offset); - continue; - } - dlt = browser->offsets[dl->ops.target.offset]; /* * FIXME: Oops, no jump target? Buggy disassembler? Or do we @@ -809,9 +869,9 @@ static inline int width_jumps(int n) return 1; } -int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, - void(*timer)(void *arg), void *arg, - int delay_secs) +int symbol__tui_annotate(struct symbol *sym, struct map *map, + struct perf_evsel *evsel, + struct hist_browser_timer *hbt) { struct disasm_line *pos, *n; struct annotation *notes; @@ -831,6 +891,8 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, }, }; int ret = -1; + int nr_pcnt = 1; + size_t sizeof_bdl = sizeof(struct browser_disasm_line); if (sym == NULL) return -1; @@ -846,7 +908,12 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, return -1; } - if (symbol__annotate(sym, map, sizeof(struct browser_disasm_line)) < 0) { + if (perf_evsel__is_group_event(evsel)) { + nr_pcnt = evsel->nr_members; + sizeof_bdl += sizeof(double) * (nr_pcnt - 1); + } + + if (symbol__annotate(sym, map, sizeof_bdl) < 0) { ui__error("%s", ui_helpline__last_msg); goto out_free_offsets; } @@ -884,6 +951,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, browser.addr_width = browser.target_width = browser.min_addr_width = hex_width(size); browser.max_addr_width = hex_width(sym->end); browser.jumps_width = width_jumps(browser.max_jump_sources); + browser.nr_events = nr_pcnt; browser.b.nr_entries = browser.nr_entries; browser.b.entries = ¬es->src->source, browser.b.width += 18; /* Percentage */ @@ -893,7 +961,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, annotate_browser__update_addr_width(&browser); - ret = annotate_browser__run(&browser, evidx, timer, arg, delay_secs); + ret = annotate_browser__run(&browser, evsel, hbt); list_for_each_entry_safe(pos, n, ¬es->src->source, node) { list_del(&pos->node); disasm_line__free(pos); @@ -906,11 +974,11 @@ out_free_offsets: #define ANNOTATE_CFG(n) \ { .name = #n, .value = &annotate_browser__opts.n, } - + /* * Keep the entries sorted, they are bsearch'ed */ -static struct annotate__config { +static struct annotate_config { const char *name; bool *value; } annotate__configs[] = { @@ -924,7 +992,7 @@ static struct annotate__config { static int annotate_config__cmp(const void *name, const void *cfgp) { - const struct annotate__config *cfg = cfgp; + const struct annotate_config *cfg = cfgp; return strcmp(name, cfg->name); } @@ -932,7 +1000,7 @@ static int annotate_config__cmp(const void *name, const void *cfgp) static int annotate__config(const char *var, const char *value, void *data __maybe_unused) { - struct annotate__config *cfg; + struct annotate_config *cfg; const char *name; if (prefixcmp(var, "annotate.") != 0) @@ -940,7 +1008,7 @@ static int annotate__config(const char *var, const char *value, name = var + 9; cfg = bsearch(name, annotate__configs, ARRAY_SIZE(annotate__configs), - sizeof(struct annotate__config), annotate_config__cmp); + sizeof(struct annotate_config), annotate_config__cmp); if (cfg == NULL) return -1; diff --git a/tools/perf/ui/browsers/header.c b/tools/perf/ui/browsers/header.c new file mode 100644 index 00000000000..89c16b98861 --- /dev/null +++ b/tools/perf/ui/browsers/header.c @@ -0,0 +1,127 @@ +#include "util/cache.h" +#include "util/debug.h" +#include "ui/browser.h" +#include "ui/ui.h" +#include "ui/util.h" +#include "ui/libslang.h" +#include "util/header.h" +#include "util/session.h" + +static void ui_browser__argv_write(struct ui_browser *browser, + void *entry, int row) +{ + char **arg = entry; + char *str = *arg; + char empty[] = " "; + bool current_entry = ui_browser__is_current_entry(browser, row); + unsigned long offset = (unsigned long)browser->priv; + + if (offset >= strlen(str)) + str = empty; + else + str = str + offset; + + ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED : + HE_COLORSET_NORMAL); + + slsmg_write_nstring(str, browser->width); +} + +static int list_menu__run(struct ui_browser *menu) +{ + int key; + unsigned long offset; + const char help[] = + "h/?/F1 Show this window\n" + "UP/DOWN/PGUP\n" + "PGDN/SPACE\n" + "LEFT/RIGHT Navigate\n" + "q/ESC/CTRL+C Exit browser"; + + if (ui_browser__show(menu, "Header information", "Press 'q' to exit") < 0) + return -1; + + while (1) { + key = ui_browser__run(menu, 0); + + switch (key) { + case K_RIGHT: + offset = (unsigned long)menu->priv; + offset += 10; + menu->priv = (void *)offset; + continue; + case K_LEFT: + offset = (unsigned long)menu->priv; + if (offset >= 10) + offset -= 10; + menu->priv = (void *)offset; + continue; + case K_F1: + case 'h': + case '?': + ui_browser__help_window(menu, help); + continue; + case K_ESC: + case 'q': + case CTRL('c'): + key = -1; + break; + default: + continue; + } + + break; + } + + ui_browser__hide(menu); + return key; +} + +static int ui__list_menu(int argc, char * const argv[]) +{ + struct ui_browser menu = { + .entries = (void *)argv, + .refresh = ui_browser__argv_refresh, + .seek = ui_browser__argv_seek, + .write = ui_browser__argv_write, + .nr_entries = argc, + }; + + return list_menu__run(&menu); +} + +int tui__header_window(struct perf_session_env *env) +{ + int i, argc = 0; + char **argv; + struct perf_session *session; + char *ptr, *pos; + size_t size; + FILE *fp = open_memstream(&ptr, &size); + + session = container_of(env, struct perf_session, header.env); + perf_header__fprintf_info(session, fp, true); + fclose(fp); + + for (pos = ptr, argc = 0; (pos = strchr(pos, '\n')) != NULL; pos++) + argc++; + + argv = calloc(argc + 1, sizeof(*argv)); + if (argv == NULL) + goto out; + + argv[0] = pos = ptr; + for (i = 1; (pos = strchr(pos, '\n')) != NULL; i++) { + *pos++ = '\0'; + argv[i] = pos; + } + + BUG_ON(i != argc + 1); + + ui__list_menu(argc, argv); + +out: + free(argv); + free(ptr); + return 0; +} diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index ef2f93ca749..04a229aa5c0 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -2,7 +2,6 @@ #include "../libslang.h" #include <stdlib.h> #include <string.h> -#include <newt.h> #include <linux/rbtree.h> #include "../../util/evsel.h" @@ -11,12 +10,14 @@ #include "../../util/pstack.h" #include "../../util/sort.h" #include "../../util/util.h" +#include "../../arch/common.h" #include "../browser.h" #include "../helpline.h" #include "../util.h" #include "../ui.h" #include "map.h" +#include "annotate.h" struct hist_browser { struct ui_browser b; @@ -25,13 +26,36 @@ struct hist_browser { struct map_symbol *selection; int print_seq; bool show_dso; - bool has_symbols; + float min_pcnt; + u64 nr_non_filtered_entries; + u64 nr_callchain_rows; }; extern void hist_browser__init_hpp(void); static int hists__browser_title(struct hists *hists, char *bf, size_t size, const char *ev_name); +static void hist_browser__update_nr_entries(struct hist_browser *hb); + +static struct rb_node *hists__filter_entries(struct rb_node *nd, + float min_pcnt); + +static bool hist_browser__has_filter(struct hist_browser *hb) +{ + return hists__has_filter(hb->hists) || hb->min_pcnt; +} + +static u32 hist_browser__nr_entries(struct hist_browser *hb) +{ + u32 nr_entries; + + if (hist_browser__has_filter(hb)) + nr_entries = hb->nr_non_filtered_entries; + else + nr_entries = hb->hists->nr_entries; + + return nr_entries + hb->nr_callchain_rows; +} static void hist_browser__refresh_dimensions(struct hist_browser *browser) { @@ -42,7 +66,14 @@ static void hist_browser__refresh_dimensions(struct hist_browser *browser) static void hist_browser__reset(struct hist_browser *browser) { - browser->b.nr_entries = browser->hists->nr_entries; + /* + * The hists__remove_entry_filter() already folds non-filtered + * entries so we can assume it has 0 callchain rows. + */ + browser->nr_callchain_rows = 0; + + hist_browser__update_nr_entries(browser); + browser->b.nr_entries = hist_browser__nr_entries(browser); hist_browser__refresh_dimensions(browser); ui_browser__reset_index(&browser->b); } @@ -197,14 +228,16 @@ static bool hist_browser__toggle_fold(struct hist_browser *browser) struct hist_entry *he = browser->he_selection; hist_entry__init_have_children(he); - browser->hists->nr_entries -= he->nr_rows; + browser->b.nr_entries -= he->nr_rows; + browser->nr_callchain_rows -= he->nr_rows; if (he->ms.unfolded) he->nr_rows = callchain__count_rows(&he->sorted_chain); else he->nr_rows = 0; - browser->hists->nr_entries += he->nr_rows; - browser->b.nr_entries = browser->hists->nr_entries; + + browser->b.nr_entries += he->nr_rows; + browser->nr_callchain_rows += he->nr_rows; return true; } @@ -279,23 +312,27 @@ static void hist_entry__set_folding(struct hist_entry *he, bool unfold) he->nr_rows = 0; } -static void hists__set_folding(struct hists *hists, bool unfold) +static void +__hist_browser__set_folding(struct hist_browser *browser, bool unfold) { struct rb_node *nd; + struct hists *hists = browser->hists; - hists->nr_entries = 0; - - for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { + for (nd = rb_first(&hists->entries); + (nd = hists__filter_entries(nd, browser->min_pcnt)) != NULL; + nd = rb_next(nd)) { struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); hist_entry__set_folding(he, unfold); - hists->nr_entries += 1 + he->nr_rows; + browser->nr_callchain_rows += he->nr_rows; } } static void hist_browser__set_folding(struct hist_browser *browser, bool unfold) { - hists__set_folding(browser->hists, unfold); - browser->b.nr_entries = browser->hists->nr_entries; + browser->nr_callchain_rows = 0; + __hist_browser__set_folding(browser, unfold); + + browser->b.nr_entries = hist_browser__nr_entries(browser); /* Go to the start, we may be way after valid entries after a collapse */ ui_browser__reset_index(&browser->b); } @@ -310,13 +347,14 @@ static void ui_browser__warn_lost_events(struct ui_browser *browser) } static int hist_browser__run(struct hist_browser *browser, const char *ev_name, - void(*timer)(void *arg), void *arg, int delay_secs) + struct hist_browser_timer *hbt) { int key; char title[160]; + int delay_secs = hbt ? hbt->refresh : 0; browser->b.entries = &browser->hists->entries; - browser->b.nr_entries = browser->hists->nr_entries; + browser->b.nr_entries = hist_browser__nr_entries(browser); hist_browser__refresh_dimensions(browser); hists__browser_title(browser->hists, title, sizeof(title), ev_name); @@ -329,9 +367,15 @@ static int hist_browser__run(struct hist_browser *browser, const char *ev_name, key = ui_browser__run(&browser->b, delay_secs); switch (key) { - case K_TIMER: - timer(arg); - ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries); + case K_TIMER: { + u64 nr_entries; + hbt->timer(hbt->arg); + + if (hist_browser__has_filter(browser)) + hist_browser__update_nr_entries(browser); + + nr_entries = hist_browser__nr_entries(browser); + ui_browser__update_nr_entries(&browser->b, nr_entries); if (browser->hists->stats.nr_lost_warned != browser->hists->stats.nr_events[PERF_RECORD_LOST]) { @@ -343,6 +387,7 @@ static int hist_browser__run(struct hist_browser *browser, const char *ev_name, hists__browser_title(browser->hists, title, sizeof(title), ev_name); ui_browser__show_title(&browser->b, title); continue; + } case 'D': { /* Debug */ static int seq; struct hist_entry *h = rb_entry(browser->b.top, @@ -565,28 +610,80 @@ static int hist_browser__show_callchain(struct hist_browser *browser, return row - first_row; } -#define HPP__COLOR_FN(_name, _field) \ -static int hist_browser__hpp_color_ ## _name(struct perf_hpp *hpp, \ - struct hist_entry *he) \ +struct hpp_arg { + struct ui_browser *b; + char folded_sign; + bool current_entry; +}; + +static int __hpp__slsmg_color_printf(struct perf_hpp *hpp, const char *fmt, ...) +{ + struct hpp_arg *arg = hpp->ptr; + int ret; + va_list args; + double percent; + + va_start(args, fmt); + percent = va_arg(args, double); + va_end(args); + + ui_browser__set_percent_color(arg->b, percent, arg->current_entry); + + ret = scnprintf(hpp->buf, hpp->size, fmt, percent); + slsmg_printf("%s", hpp->buf); + + advance_hpp(hpp, ret); + return ret; +} + +#define __HPP_COLOR_PERCENT_FN(_type, _field) \ +static u64 __hpp_get_##_field(struct hist_entry *he) \ { \ - struct hists *hists = he->hists; \ - double percent = 100.0 * he->stat._field / hists->stats.total_period; \ - *(double *)hpp->ptr = percent; \ - return scnprintf(hpp->buf, hpp->size, "%6.2f%%", percent); \ + return he->stat._field; \ +} \ + \ +static int \ +hist_browser__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused,\ + struct perf_hpp *hpp, \ + struct hist_entry *he) \ +{ \ + return __hpp__fmt(hpp, he, __hpp_get_##_field, " %6.2f%%", \ + __hpp__slsmg_color_printf, true); \ } -HPP__COLOR_FN(overhead, period) -HPP__COLOR_FN(overhead_sys, period_sys) -HPP__COLOR_FN(overhead_us, period_us) -HPP__COLOR_FN(overhead_guest_sys, period_guest_sys) -HPP__COLOR_FN(overhead_guest_us, period_guest_us) +#define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ +static u64 __hpp_get_acc_##_field(struct hist_entry *he) \ +{ \ + return he->stat_acc->_field; \ +} \ + \ +static int \ +hist_browser__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused,\ + struct perf_hpp *hpp, \ + struct hist_entry *he) \ +{ \ + if (!symbol_conf.cumulate_callchain) { \ + int ret = scnprintf(hpp->buf, hpp->size, "%8s", "N/A"); \ + slsmg_printf("%s", hpp->buf); \ + \ + return ret; \ + } \ + return __hpp__fmt(hpp, he, __hpp_get_acc_##_field, " %6.2f%%", \ + __hpp__slsmg_color_printf, true); \ +} + +__HPP_COLOR_PERCENT_FN(overhead, period) +__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys) +__HPP_COLOR_PERCENT_FN(overhead_us, period_us) +__HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys) +__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us) +__HPP_COLOR_ACC_PERCENT_FN(overhead_acc, period) -#undef HPP__COLOR_FN +#undef __HPP_COLOR_PERCENT_FN +#undef __HPP_COLOR_ACC_PERCENT_FN void hist_browser__init_hpp(void) { - perf_hpp__init(); - perf_hpp__format[PERF_HPP__OVERHEAD].color = hist_browser__hpp_color_overhead; perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color = @@ -597,6 +694,8 @@ void hist_browser__init_hpp(void) hist_browser__hpp_color_overhead_guest_sys; perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color = hist_browser__hpp_color_overhead_guest_us; + perf_hpp__format[PERF_HPP__OVERHEAD_ACC].color = + hist_browser__hpp_color_overhead_acc; } static int hist_browser__show_entry(struct hist_browser *browser, @@ -604,13 +703,13 @@ static int hist_browser__show_entry(struct hist_browser *browser, unsigned short row) { char s[256]; - double percent; - int i, printed = 0; + int printed = 0; int width = browser->b.width; char folded_sign = ' '; bool current_entry = ui_browser__is_current_entry(&browser->b, row); off_t row_offset = entry->row_offset; bool first = true; + struct perf_hpp_fmt *fmt; if (current_entry) { browser->he_selection = entry; @@ -623,41 +722,46 @@ static int hist_browser__show_entry(struct hist_browser *browser, } if (row_offset == 0) { + struct hpp_arg arg = { + .b = &browser->b, + .folded_sign = folded_sign, + .current_entry = current_entry, + }; struct perf_hpp hpp = { .buf = s, .size = sizeof(s), + .ptr = &arg, }; ui_browser__gotorc(&browser->b, row, 0); - for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { - if (!perf_hpp__format[i].cond) + perf_hpp__for_each_format(fmt) { + if (perf_hpp__should_skip(fmt)) continue; - if (!first) { - slsmg_printf(" "); - width -= 2; + if (current_entry && browser->b.navkeypressed) { + ui_browser__set_color(&browser->b, + HE_COLORSET_SELECTED); + } else { + ui_browser__set_color(&browser->b, + HE_COLORSET_NORMAL); } - first = false; - - if (perf_hpp__format[i].color) { - hpp.ptr = &percent; - /* It will set percent for us. See HPP__COLOR_FN above. */ - width -= perf_hpp__format[i].color(&hpp, entry); - - ui_browser__set_percent_color(&browser->b, percent, current_entry); - if (i == PERF_HPP__OVERHEAD && symbol_conf.use_callchain) { + if (first) { + if (symbol_conf.use_callchain) { slsmg_printf("%c ", folded_sign); width -= 2; } + first = false; + } else { + slsmg_printf(" "); + width -= 2; + } - slsmg_printf("%s", s); - - if (!current_entry || !browser->b.navkeypressed) - ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL); + if (fmt->color) { + width -= fmt->color(fmt, &hpp, entry); } else { - width -= perf_hpp__format[i].entry(&hpp, entry); + width -= fmt->entry(fmt, &hpp, entry); slsmg_printf("%s", s); } } @@ -666,8 +770,8 @@ static int hist_browser__show_entry(struct hist_browser *browser, if (!browser->b.navkeypressed) width += 1; - hist_entry__sort_snprintf(entry, s, sizeof(s), browser->hists); - slsmg_write_nstring(s, width); + slsmg_write_nstring("", width); + ++row; ++printed; } else @@ -704,10 +808,15 @@ static unsigned int hist_browser__refresh(struct ui_browser *browser) for (nd = browser->top; nd; nd = rb_next(nd)) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); + float percent; if (h->filtered) continue; + percent = hist_entry__get_percent_limit(h); + if (percent < hb->min_pcnt) + continue; + row += hist_browser__show_entry(hb, h, row); if (row == browser->height) break; @@ -716,11 +825,14 @@ static unsigned int hist_browser__refresh(struct ui_browser *browser) return row; } -static struct rb_node *hists__filter_entries(struct rb_node *nd) +static struct rb_node *hists__filter_entries(struct rb_node *nd, + float min_pcnt) { while (nd != NULL) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); - if (!h->filtered) + float percent = hist_entry__get_percent_limit(h); + + if (!h->filtered && percent >= min_pcnt) return nd; nd = rb_next(nd); @@ -729,11 +841,14 @@ static struct rb_node *hists__filter_entries(struct rb_node *nd) return NULL; } -static struct rb_node *hists__filter_prev_entries(struct rb_node *nd) +static struct rb_node *hists__filter_prev_entries(struct rb_node *nd, + float min_pcnt) { while (nd != NULL) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); - if (!h->filtered) + float percent = hist_entry__get_percent_limit(h); + + if (!h->filtered && percent >= min_pcnt) return nd; nd = rb_prev(nd); @@ -748,6 +863,9 @@ static void ui_browser__hists_seek(struct ui_browser *browser, struct hist_entry *h; struct rb_node *nd; bool first = true; + struct hist_browser *hb; + + hb = container_of(browser, struct hist_browser, b); if (browser->nr_entries == 0) return; @@ -756,13 +874,15 @@ static void ui_browser__hists_seek(struct ui_browser *browser, switch (whence) { case SEEK_SET: - nd = hists__filter_entries(rb_first(browser->entries)); + nd = hists__filter_entries(rb_first(browser->entries), + hb->min_pcnt); break; case SEEK_CUR: nd = browser->top; goto do_offset; case SEEK_END: - nd = hists__filter_prev_entries(rb_last(browser->entries)); + nd = hists__filter_prev_entries(rb_last(browser->entries), + hb->min_pcnt); first = false; break; default: @@ -805,7 +925,7 @@ do_offset: break; } } - nd = hists__filter_entries(rb_next(nd)); + nd = hists__filter_entries(rb_next(nd), hb->min_pcnt); if (nd == NULL) break; --offset; @@ -838,7 +958,8 @@ do_offset: } } - nd = hists__filter_prev_entries(rb_prev(nd)); + nd = hists__filter_prev_entries(rb_prev(nd), + hb->min_pcnt); if (nd == NULL) break; ++offset; @@ -976,27 +1097,35 @@ static int hist_browser__fprintf_entry(struct hist_browser *browser, struct hist_entry *he, FILE *fp) { char s[8192]; - double percent; int printed = 0; char folded_sign = ' '; + struct perf_hpp hpp = { + .buf = s, + .size = sizeof(s), + }; + struct perf_hpp_fmt *fmt; + bool first = true; + int ret; if (symbol_conf.use_callchain) folded_sign = hist_entry__folded(he); - hist_entry__sort_snprintf(he, s, sizeof(s), browser->hists); - percent = (he->stat.period * 100.0) / browser->hists->stats.total_period; - if (symbol_conf.use_callchain) printed += fprintf(fp, "%c ", folded_sign); - printed += fprintf(fp, " %5.2f%%", percent); - - if (symbol_conf.show_nr_samples) - printed += fprintf(fp, " %11u", he->stat.nr_events); + perf_hpp__for_each_format(fmt) { + if (perf_hpp__should_skip(fmt)) + continue; - if (symbol_conf.show_total_period) - printed += fprintf(fp, " %12" PRIu64, he->stat.period); + if (!first) { + ret = scnprintf(hpp.buf, hpp.size, " "); + advance_hpp(&hpp, ret); + } else + first = false; + ret = fmt->entry(fmt, &hpp, he); + advance_hpp(&hpp, ret); + } printed += fprintf(fp, "%s\n", rtrim(s)); if (folded_sign == '-') @@ -1007,14 +1136,15 @@ static int hist_browser__fprintf_entry(struct hist_browser *browser, static int hist_browser__fprintf(struct hist_browser *browser, FILE *fp) { - struct rb_node *nd = hists__filter_entries(rb_first(browser->b.entries)); + struct rb_node *nd = hists__filter_entries(rb_first(browser->b.entries), + browser->min_pcnt); int printed = 0; while (nd) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); printed += hist_browser__fprintf_entry(browser, h, fp); - nd = hists__filter_entries(rb_next(nd)); + nd = hists__filter_entries(rb_next(nd), browser->min_pcnt); } return printed; @@ -1063,10 +1193,6 @@ static struct hist_browser *hist_browser__new(struct hists *hists) browser->b.refresh = hist_browser__refresh; browser->b.seek = ui_browser__hists_seek; browser->b.use_navkeypressed = true; - if (sort__branch_mode == 1) - browser->has_symbols = sort_sym_from.list.next != NULL; - else - browser->has_symbols = sort_sym.list.next != NULL; } return browser; @@ -1096,6 +1222,31 @@ static int hists__browser_title(struct hists *hists, char *bf, size_t size, const struct thread *thread = hists->thread_filter; unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE]; u64 nr_events = hists->stats.total_period; + struct perf_evsel *evsel = hists_to_evsel(hists); + char buf[512]; + size_t buflen = sizeof(buf); + + if (symbol_conf.filter_relative) { + nr_samples = hists->stats.nr_non_filtered_samples; + nr_events = hists->stats.total_non_filtered_period; + } + + if (perf_evsel__is_group_event(evsel)) { + struct perf_evsel *pos; + + perf_evsel__group_desc(evsel, buf, buflen); + ev_name = buf; + + for_each_group_member(pos, evsel) { + if (symbol_conf.filter_relative) { + nr_samples += pos->hists.stats.nr_non_filtered_samples; + nr_events += pos->hists.stats.total_non_filtered_period; + } else { + nr_samples += pos->hists.stats.nr_events[PERF_RECORD_SAMPLE]; + nr_events += pos->hists.stats.total_period; + } + } + } nr_samples = convert_unit(nr_samples, &unit); printed = scnprintf(bf, size, @@ -1109,8 +1260,8 @@ static int hists__browser_title(struct hists *hists, char *bf, size_t size, if (thread) printed += scnprintf(bf + printed, size - printed, ", Thread: %s(%d)", - (thread->comm_set ? thread->comm : ""), - thread->pid); + (thread->comm_set ? thread__comm_str(thread) : ""), + thread->tid); if (dso) printed += scnprintf(bf + printed, size - printed, ", DSO: %s", dso->short_name); @@ -1121,17 +1272,129 @@ static inline void free_popup_options(char **options, int n) { int i; - for (i = 0; i < n; ++i) { - free(options[i]); - options[i] = NULL; + for (i = 0; i < n; ++i) + zfree(&options[i]); +} + +/* Check whether the browser is for 'top' or 'report' */ +static inline bool is_report_browser(void *timer) +{ + return timer == NULL; +} + +/* + * Only runtime switching of perf data file will make "input_name" point + * to a malloced buffer. So add "is_input_name_malloced" flag to decide + * whether we need to call free() for current "input_name" during the switch. + */ +static bool is_input_name_malloced = false; + +static int switch_data_file(void) +{ + char *pwd, *options[32], *abs_path[32], *tmp; + DIR *pwd_dir; + int nr_options = 0, choice = -1, ret = -1; + struct dirent *dent; + + pwd = getenv("PWD"); + if (!pwd) + return ret; + + pwd_dir = opendir(pwd); + if (!pwd_dir) + return ret; + + memset(options, 0, sizeof(options)); + memset(options, 0, sizeof(abs_path)); + + while ((dent = readdir(pwd_dir))) { + char path[PATH_MAX]; + u64 magic; + char *name = dent->d_name; + FILE *file; + + if (!(dent->d_type == DT_REG)) + continue; + + snprintf(path, sizeof(path), "%s/%s", pwd, name); + + file = fopen(path, "r"); + if (!file) + continue; + + if (fread(&magic, 1, 8, file) < 8) + goto close_file_and_continue; + + if (is_perf_magic(magic)) { + options[nr_options] = strdup(name); + if (!options[nr_options]) + goto close_file_and_continue; + + abs_path[nr_options] = strdup(path); + if (!abs_path[nr_options]) { + zfree(&options[nr_options]); + ui__warning("Can't search all data files due to memory shortage.\n"); + fclose(file); + break; + } + + nr_options++; + } + +close_file_and_continue: + fclose(file); + if (nr_options >= 32) { + ui__warning("Too many perf data files in PWD!\n" + "Only the first 32 files will be listed.\n"); + break; + } + } + closedir(pwd_dir); + + if (nr_options) { + choice = ui__popup_menu(nr_options, options); + if (choice < nr_options && choice >= 0) { + tmp = strdup(abs_path[choice]); + if (tmp) { + if (is_input_name_malloced) + free((void *)input_name); + input_name = tmp; + is_input_name_malloced = true; + ret = 0; + } else + ui__warning("Data switch failed due to memory shortage!\n"); + } } + + free_popup_options(options, nr_options); + free_popup_options(abs_path, nr_options); + return ret; +} + +static void hist_browser__update_nr_entries(struct hist_browser *hb) +{ + u64 nr_entries = 0; + struct rb_node *nd = rb_first(&hb->hists->entries); + + if (hb->min_pcnt == 0) { + hb->nr_non_filtered_entries = hb->hists->nr_non_filtered_entries; + return; + } + + while ((nd = hists__filter_entries(nd, hb->min_pcnt)) != NULL) { + nr_entries++; + nd = rb_next(nd); + } + + hb->nr_non_filtered_entries = nr_entries; } static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, const char *helpline, const char *ev_name, bool left_exits, - void(*timer)(void *arg), void *arg, - int delay_secs) + struct hist_browser_timer *hbt, + float min_pcnt, + struct perf_session_env *env) { struct hists *hists = &evsel->hists; struct hist_browser *browser = hist_browser__new(hists); @@ -1141,10 +1404,48 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, int nr_options = 0; int key = -1; char buf[64]; + char script_opt[64]; + int delay_secs = hbt ? hbt->refresh : 0; + +#define HIST_BROWSER_HELP_COMMON \ + "h/?/F1 Show this window\n" \ + "UP/DOWN/PGUP\n" \ + "PGDN/SPACE Navigate\n" \ + "q/ESC/CTRL+C Exit browser\n\n" \ + "For multiple event sessions:\n\n" \ + "TAB/UNTAB Switch events\n\n" \ + "For symbolic views (--sort has sym):\n\n" \ + "-> Zoom into DSO/Threads & Annotate current symbol\n" \ + "<- Zoom out\n" \ + "a Annotate current symbol\n" \ + "C Collapse all callchains\n" \ + "d Zoom into current DSO\n" \ + "E Expand all callchains\n" \ + "F Toggle percentage of filtered entries\n" \ + + /* help messages are sorted by lexical order of the hotkey */ + const char report_help[] = HIST_BROWSER_HELP_COMMON + "i Show header information\n" + "P Print histograms to perf.hist.N\n" + "r Run available scripts\n" + "s Switch to another data file in PWD\n" + "t Zoom into current Thread\n" + "V Verbose (DSO names in callchains, etc)\n" + "/ Filter symbol by name"; + const char top_help[] = HIST_BROWSER_HELP_COMMON + "P Print histograms to perf.hist.N\n" + "t Zoom into current Thread\n" + "V Verbose (DSO names in callchains, etc)\n" + "/ Filter symbol by name"; if (browser == NULL) return -1; + if (min_pcnt) { + browser->min_pcnt = min_pcnt; + hist_browser__update_nr_entries(browser); + } + fstack = pstack__new(2); if (fstack == NULL) goto out; @@ -1159,10 +1460,12 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, int choice = 0, annotate = -2, zoom_dso = -2, zoom_thread = -2, annotate_f = -2, annotate_t = -2, browse_map = -2; + int scripts_comm = -2, scripts_symbol = -2, + scripts_all = -2, switch_data = -2; nr_options = 0; - key = hist_browser__run(browser, ev_name, timer, arg, delay_secs); + key = hist_browser__run(browser, ev_name, hbt); if (browser->he_selection != NULL) { thread = hist_browser__selected_thread(browser); @@ -1179,7 +1482,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, */ goto out_free_stack; case 'a': - if (!browser->has_symbols) { + if (!sort__has_sym) { ui_browser__warning(&browser->b, delay_secs * 2, "Annotation is only available for symbolic views, " "include \"sym*\" in --sort to use it."); @@ -1211,27 +1514,27 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, hist_browser__reset(browser); } continue; + case 'r': + if (is_report_browser(hbt)) + goto do_scripts; + continue; + case 's': + if (is_report_browser(hbt)) + goto do_data_switch; + continue; + case 'i': + /* env->arch is NULL for live-mode (i.e. perf top) */ + if (env->arch) + tui__header_window(env); + continue; + case 'F': + symbol_conf.filter_relative ^= 1; + continue; case K_F1: case 'h': case '?': ui_browser__help_window(&browser->b, - "h/?/F1 Show this window\n" - "UP/DOWN/PGUP\n" - "PGDN/SPACE Navigate\n" - "q/ESC/CTRL+C Exit browser\n\n" - "For multiple event sessions:\n\n" - "TAB/UNTAB Switch events\n\n" - "For symbolic views (--sort has sym):\n\n" - "-> Zoom into DSO/Threads & Annotate current symbol\n" - "<- Zoom out\n" - "a Annotate current symbol\n" - "C Collapse all callchains\n" - "E Expand all callchains\n" - "d Zoom into current DSO\n" - "t Zoom into current Thread\n" - "P Print histograms to perf.hist.N\n" - "V Verbose (DSO names in callchains, etc)\n" - "/ Filter symbol by name"); + is_report_browser(hbt) ? report_help : top_help); continue; case K_ENTER: case K_RIGHT: @@ -1268,10 +1571,10 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, continue; } - if (!browser->has_symbols) + if (!sort__has_sym) goto add_exit_option; - if (sort__branch_mode == 1) { + if (sort__mode == SORT_MODE__BRANCH) { bi = browser->he_selection->branch_info; if (browser->selection != NULL && bi && @@ -1291,20 +1594,25 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, bi->to.sym->name) > 0) annotate_t = nr_options++; } else { - if (browser->selection != NULL && browser->selection->sym != NULL && - !browser->selection->map->dso->annotate_warned && - asprintf(&options[nr_options], "Annotate %s", - browser->selection->sym->name) > 0) - annotate = nr_options++; + !browser->selection->map->dso->annotate_warned) { + struct annotation *notes; + + notes = symbol__annotation(browser->selection->sym); + + if (notes->src && + asprintf(&options[nr_options], "Annotate %s", + browser->selection->sym->name) > 0) + annotate = nr_options++; + } } if (thread != NULL && asprintf(&options[nr_options], "Zoom %s %s(%d) thread", (browser->hists->thread_filter ? "out of" : "into"), - (thread->comm_set ? thread->comm : ""), - thread->pid) > 0) + (thread->comm_set ? thread__comm_str(thread) : ""), + thread->tid) > 0) zoom_thread = nr_options++; if (dso != NULL && @@ -1317,6 +1625,28 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, browser->selection->map != NULL && asprintf(&options[nr_options], "Browse map details") > 0) browse_map = nr_options++; + + /* perf script support */ + if (browser->he_selection) { + struct symbol *sym; + + if (asprintf(&options[nr_options], "Run scripts for samples of thread [%s]", + thread__comm_str(browser->he_selection->thread)) > 0) + scripts_comm = nr_options++; + + sym = browser->he_selection->ms.sym; + if (sym && sym->namelen && + asprintf(&options[nr_options], "Run scripts for samples of symbol [%s]", + sym->name) > 0) + scripts_symbol = nr_options++; + } + + if (asprintf(&options[nr_options], "Run scripts for all samples") > 0) + scripts_all = nr_options++; + + if (is_report_browser(hbt) && asprintf(&options[nr_options], + "Switch to another data file in PWD") > 0) + switch_data = nr_options++; add_exit_option: options[nr_options++] = (char *)"Exit"; retry_popup_menu: @@ -1332,8 +1662,12 @@ retry_popup_menu: if (choice == annotate || choice == annotate_t || choice == annotate_f) { struct hist_entry *he; + struct annotation *notes; int err; do_annotate: + if (!objdump_path && perf_session_env__lookup_objdump(env)) + continue; + he = hist_browser__selected_entry(browser); if (he == NULL) continue; @@ -1352,12 +1686,15 @@ do_annotate: he->ms.map = he->branch_info->to.map; } + notes = symbol__annotation(he->ms.sym); + if (!notes->src) + continue; + /* * Don't let this be freed, say, by hists__decay_entry. */ he->used = true; - err = hist_entry__tui_annotate(he, evsel->idx, - timer, arg, delay_secs); + err = hist_entry__tui_annotate(he, evsel, hbt); he->used = false; /* * offer option to annotate the other branch source or target @@ -1380,14 +1717,14 @@ zoom_dso: zoom_out_dso: ui_helpline__pop(); browser->hists->dso_filter = NULL; - sort_dso.elide = false; + perf_hpp__set_elide(HISTC_DSO, false); } else { if (dso == NULL) continue; ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s DSO\"", dso->kernel ? "the Kernel" : dso->short_name); browser->hists->dso_filter = dso; - sort_dso.elide = true; + perf_hpp__set_elide(HISTC_DSO, true); pstack__push(fstack, &browser->hists->dso_filter); } hists__filter_by_dso(hists); @@ -1399,18 +1736,42 @@ zoom_thread: zoom_out_thread: ui_helpline__pop(); browser->hists->thread_filter = NULL; - sort_thread.elide = false; + perf_hpp__set_elide(HISTC_THREAD, false); } else { ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s(%d) thread\"", - thread->comm_set ? thread->comm : "", - thread->pid); + thread->comm_set ? thread__comm_str(thread) : "", + thread->tid); browser->hists->thread_filter = thread; - sort_thread.elide = true; + perf_hpp__set_elide(HISTC_THREAD, false); pstack__push(fstack, &browser->hists->thread_filter); } hists__filter_by_thread(hists); hist_browser__reset(browser); } + /* perf scripts support */ + else if (choice == scripts_all || choice == scripts_comm || + choice == scripts_symbol) { +do_scripts: + memset(script_opt, 0, 64); + + if (choice == scripts_comm) + sprintf(script_opt, " -c %s ", thread__comm_str(browser->he_selection->thread)); + + if (choice == scripts_symbol) + sprintf(script_opt, " -S %s ", browser->he_selection->ms.sym->name); + + script_browse(script_opt); + } + /* Switch to another data file */ + else if (choice == switch_data) { +do_data_switch: + if (!switch_data_file()) { + key = K_SWITCH_INPUT_DATA; + break; + } else + ui__warning("Won't switch the data files due to\n" + "no valid data file get selected!\n"); + } } out_free_stack: pstack__delete(fstack); @@ -1424,6 +1785,8 @@ struct perf_evsel_menu { struct ui_browser b; struct perf_evsel *selection; bool lost_events, lost_events_warned; + float min_pcnt; + struct perf_session_env *env; }; static void perf_evsel_menu__write(struct ui_browser *browser, @@ -1442,6 +1805,16 @@ static void perf_evsel_menu__write(struct ui_browser *browser, ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED : HE_COLORSET_NORMAL); + if (perf_evsel__is_group_event(evsel)) { + struct perf_evsel *pos; + + ev_name = perf_evsel__group_name(evsel); + + for_each_group_member(pos, evsel) { + nr_events += pos->hists.stats.nr_events[PERF_RECORD_SAMPLE]; + } + } + nr_events = convert_unit(nr_events, &unit); printed = scnprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events, unit, unit == ' ' ? "" : " ", ev_name); @@ -1466,11 +1839,12 @@ static void perf_evsel_menu__write(struct ui_browser *browser, static int perf_evsel_menu__run(struct perf_evsel_menu *menu, int nr_events, const char *help, - void(*timer)(void *arg), void *arg, int delay_secs) + struct hist_browser_timer *hbt) { struct perf_evlist *evlist = menu->b.priv; struct perf_evsel *pos; const char *ev_name, *title = "Available samples"; + int delay_secs = hbt ? hbt->refresh : 0; int key; if (ui_browser__show(&menu->b, title, @@ -1482,7 +1856,7 @@ static int perf_evsel_menu__run(struct perf_evsel_menu *menu, switch (key) { case K_TIMER: - timer(arg); + hbt->timer(hbt->arg); if (!menu->lost_events_warned && menu->lost_events) { ui_browser__warn_lost_events(&menu->b); @@ -1500,31 +1874,33 @@ browse_hists: * Give the calling tool a chance to populate the non * default evsel resorted hists tree. */ - if (timer) - timer(arg); + if (hbt) + hbt->timer(hbt->arg); ev_name = perf_evsel__name(pos); key = perf_evsel__hists_browse(pos, nr_events, help, - ev_name, true, timer, - arg, delay_secs); + ev_name, true, hbt, + menu->min_pcnt, + menu->env); ui_browser__show_title(&menu->b, title); switch (key) { case K_TAB: if (pos->node.next == &evlist->entries) - pos = list_entry(evlist->entries.next, struct perf_evsel, node); + pos = perf_evlist__first(evlist); else - pos = list_entry(pos->node.next, struct perf_evsel, node); + pos = perf_evsel__next(pos); goto browse_hists; case K_UNTAB: if (pos->node.prev == &evlist->entries) - pos = list_entry(evlist->entries.prev, struct perf_evsel, node); + pos = perf_evlist__last(evlist); else - pos = list_entry(pos->node.prev, struct perf_evsel, node); + pos = perf_evsel__prev(pos); goto browse_hists; case K_ESC: if (!ui_browser__dialog_yesno(&menu->b, "Do you really want to exit?")) continue; /* Fall thru */ + case K_SWITCH_INPUT_DATA: case 'q': case CTRL('c'): goto out; @@ -1551,10 +1927,22 @@ out: return key; } +static bool filter_group_entries(struct ui_browser *browser __maybe_unused, + void *entry) +{ + struct perf_evsel *evsel = list_entry(entry, struct perf_evsel, node); + + if (symbol_conf.event_group && !perf_evsel__is_group_leader(evsel)) + return true; + + return false; +} + static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist, - const char *help, - void(*timer)(void *arg), void *arg, - int delay_secs) + int nr_entries, const char *help, + struct hist_browser_timer *hbt, + float min_pcnt, + struct perf_session_env *env) { struct perf_evsel *pos; struct perf_evsel_menu menu = { @@ -1563,14 +1951,17 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist, .refresh = ui_browser__list_head_refresh, .seek = ui_browser__list_head_seek, .write = perf_evsel_menu__write, - .nr_entries = evlist->nr_entries, + .filter = filter_group_entries, + .nr_entries = nr_entries, .priv = evlist, }, + .min_pcnt = min_pcnt, + .env = env, }; ui_helpline__push("Press ESC to exit"); - list_for_each_entry(pos, &evlist->entries, node) { + evlist__for_each(evlist, pos) { const char *ev_name = perf_evsel__name(pos); size_t line_len = strlen(ev_name) + 7; @@ -1578,23 +1969,39 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist, menu.b.width = line_len; } - return perf_evsel_menu__run(&menu, evlist->nr_entries, help, timer, - arg, delay_secs); + return perf_evsel_menu__run(&menu, nr_entries, help, hbt); } int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, - void(*timer)(void *arg), void *arg, - int delay_secs) + struct hist_browser_timer *hbt, + float min_pcnt, + struct perf_session_env *env) { - if (evlist->nr_entries == 1) { - struct perf_evsel *first = list_entry(evlist->entries.next, - struct perf_evsel, node); + int nr_entries = evlist->nr_entries; + +single_entry: + if (nr_entries == 1) { + struct perf_evsel *first = perf_evlist__first(evlist); const char *ev_name = perf_evsel__name(first); - return perf_evsel__hists_browse(first, evlist->nr_entries, help, - ev_name, false, timer, arg, - delay_secs); + + return perf_evsel__hists_browse(first, nr_entries, help, + ev_name, false, hbt, min_pcnt, + env); + } + + if (symbol_conf.event_group) { + struct perf_evsel *pos; + + nr_entries = 0; + evlist__for_each(evlist, pos) { + if (perf_evsel__is_group_leader(pos)) + nr_entries++; + } + + if (nr_entries == 1) + goto single_entry; } - return __perf_evlist__tui_browse_hists(evlist, help, - timer, arg, delay_secs); + return __perf_evlist__tui_browse_hists(evlist, nr_entries, help, + hbt, min_pcnt, env); } diff --git a/tools/perf/ui/browsers/map.c b/tools/perf/ui/browsers/map.c index 98851d55a53..b11639f3368 100644 --- a/tools/perf/ui/browsers/map.c +++ b/tools/perf/ui/browsers/map.c @@ -1,6 +1,5 @@ #include "../libslang.h" #include <elf.h> -#include <newt.h> #include <inttypes.h> #include <sys/ttydefaults.h> #include <string.h> @@ -10,128 +9,105 @@ #include "../../util/symbol.h" #include "../browser.h" #include "../helpline.h" +#include "../keysyms.h" #include "map.h" -static int ui_entry__read(const char *title, char *bf, size_t size, int width) -{ - struct newtExitStruct es; - newtComponent form, entry; - const char *result; - int err = -1; - - newtCenteredWindow(width, 1, title); - form = newtForm(NULL, NULL, 0); - if (form == NULL) - return -1; - - entry = newtEntry(0, 0, "0x", width, &result, NEWT_FLAG_SCROLL); - if (entry == NULL) - goto out_free_form; - - newtFormAddComponent(form, entry); - newtFormAddHotKey(form, NEWT_KEY_ENTER); - newtFormAddHotKey(form, NEWT_KEY_ESCAPE); - newtFormAddHotKey(form, NEWT_KEY_LEFT); - newtFormAddHotKey(form, CTRL('c')); - newtFormRun(form, &es); - - if (result != NULL) { - strncpy(bf, result, size); - err = 0; - } -out_free_form: - newtPopWindow(); - newtFormDestroy(form); - return err; -} - struct map_browser { struct ui_browser b; struct map *map; u8 addrlen; }; -static void map_browser__write(struct ui_browser *self, void *nd, int row) +static void map_browser__write(struct ui_browser *browser, void *nd, int row) { struct symbol *sym = rb_entry(nd, struct symbol, rb_node); - struct map_browser *mb = container_of(self, struct map_browser, b); - bool current_entry = ui_browser__is_current_entry(self, row); + struct map_browser *mb = container_of(browser, struct map_browser, b); + bool current_entry = ui_browser__is_current_entry(browser, row); int width; - ui_browser__set_percent_color(self, 0, current_entry); + ui_browser__set_percent_color(browser, 0, current_entry); slsmg_printf("%*" PRIx64 " %*" PRIx64 " %c ", mb->addrlen, sym->start, mb->addrlen, sym->end, sym->binding == STB_GLOBAL ? 'g' : sym->binding == STB_LOCAL ? 'l' : 'w'); - width = self->width - ((mb->addrlen * 2) + 4); + width = browser->width - ((mb->addrlen * 2) + 4); if (width > 0) slsmg_write_nstring(sym->name, width); } /* FIXME uber-kludgy, see comment on cmd_report... */ -static u32 *symbol__browser_index(struct symbol *self) +static u32 *symbol__browser_index(struct symbol *browser) { - return ((void *)self) - sizeof(struct rb_node) - sizeof(u32); + return ((void *)browser) - sizeof(struct rb_node) - sizeof(u32); } -static int map_browser__search(struct map_browser *self) +static int map_browser__search(struct map_browser *browser) { char target[512]; struct symbol *sym; - int err = ui_entry__read("Search by name/addr", target, sizeof(target), 40); - - if (err) - return err; + int err = ui_browser__input_window("Search by name/addr", + "Prefix with 0x to search by address", + target, "ENTER: OK, ESC: Cancel", 0); + if (err != K_ENTER) + return -1; if (target[0] == '0' && tolower(target[1]) == 'x') { u64 addr = strtoull(target, NULL, 16); - sym = map__find_symbol(self->map, addr, NULL); + sym = map__find_symbol(browser->map, addr, NULL); } else - sym = map__find_symbol_by_name(self->map, target, NULL); + sym = map__find_symbol_by_name(browser->map, target, NULL); if (sym != NULL) { u32 *idx = symbol__browser_index(sym); - self->b.top = &sym->rb_node; - self->b.index = self->b.top_idx = *idx; + browser->b.top = &sym->rb_node; + browser->b.index = browser->b.top_idx = *idx; } else ui_helpline__fpush("%s not found!", target); return 0; } -static int map_browser__run(struct map_browser *self) +static int map_browser__run(struct map_browser *browser) { int key; - if (ui_browser__show(&self->b, self->map->dso->long_name, + if (ui_browser__show(&browser->b, browser->map->dso->long_name, "Press <- or ESC to exit, %s / to search", verbose ? "" : "restart with -v to use") < 0) return -1; while (1) { - key = ui_browser__run(&self->b, 0); + key = ui_browser__run(&browser->b, 0); - if (verbose && key == '/') - map_browser__search(self); - else + switch (key) { + case '/': + if (verbose) + map_browser__search(browser); + default: break; + case K_LEFT: + case K_ESC: + case 'q': + case CTRL('c'): + goto out; + } } - - ui_browser__hide(&self->b); +out: + ui_browser__hide(&browser->b); return key; } -int map__browse(struct map *self) +int map__browse(struct map *map) { struct map_browser mb = { .b = { - .entries = &self->dso->symbols[self->type], + .entries = &map->dso->symbols[map->type], .refresh = ui_browser__rb_tree_refresh, .seek = ui_browser__rb_tree_seek, .write = map_browser__write, }, - .map = self, + .map = map, }; struct rb_node *nd; char tmp[BITS_PER_LONG / 4]; diff --git a/tools/perf/ui/browsers/map.h b/tools/perf/ui/browsers/map.h index df8581a43e1..2d58e4b3eb6 100644 --- a/tools/perf/ui/browsers/map.h +++ b/tools/perf/ui/browsers/map.h @@ -2,5 +2,5 @@ #define _PERF_UI_MAP_BROWSER_H_ 1 struct map; -int map__browse(struct map *self); +int map__browse(struct map *map); #endif /* _PERF_UI_MAP_BROWSER_H_ */ diff --git a/tools/perf/ui/browsers/scripts.c b/tools/perf/ui/browsers/scripts.c new file mode 100644 index 00000000000..402d2bd30b0 --- /dev/null +++ b/tools/perf/ui/browsers/scripts.c @@ -0,0 +1,187 @@ +#include <elf.h> +#include <inttypes.h> +#include <sys/ttydefaults.h> +#include <string.h> +#include "../../util/sort.h" +#include "../../util/util.h" +#include "../../util/hist.h" +#include "../../util/debug.h" +#include "../../util/symbol.h" +#include "../browser.h" +#include "../helpline.h" +#include "../libslang.h" + +/* 2048 lines should be enough for a script output */ +#define MAX_LINES 2048 + +/* 160 bytes for one output line */ +#define AVERAGE_LINE_LEN 160 + +struct script_line { + struct list_head node; + char line[AVERAGE_LINE_LEN]; +}; + +struct perf_script_browser { + struct ui_browser b; + struct list_head entries; + const char *script_name; + int nr_lines; +}; + +#define SCRIPT_NAMELEN 128 +#define SCRIPT_MAX_NO 64 +/* + * Usually the full path for a script is: + * /home/username/libexec/perf-core/scripts/python/xxx.py + * /home/username/libexec/perf-core/scripts/perl/xxx.pl + * So 256 should be long enough to contain the full path. + */ +#define SCRIPT_FULLPATH_LEN 256 + +/* + * When success, will copy the full path of the selected script + * into the buffer pointed by script_name, and return 0. + * Return -1 on failure. + */ +static int list_scripts(char *script_name) +{ + char *buf, *names[SCRIPT_MAX_NO], *paths[SCRIPT_MAX_NO]; + int i, num, choice, ret = -1; + + /* Preset the script name to SCRIPT_NAMELEN */ + buf = malloc(SCRIPT_MAX_NO * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN)); + if (!buf) + return ret; + + for (i = 0; i < SCRIPT_MAX_NO; i++) { + names[i] = buf + i * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN); + paths[i] = names[i] + SCRIPT_NAMELEN; + } + + num = find_scripts(names, paths); + if (num > 0) { + choice = ui__popup_menu(num, names); + if (choice < num && choice >= 0) { + strcpy(script_name, paths[choice]); + ret = 0; + } + } + + free(buf); + return ret; +} + +static void script_browser__write(struct ui_browser *browser, + void *entry, int row) +{ + struct script_line *sline = list_entry(entry, struct script_line, node); + bool current_entry = ui_browser__is_current_entry(browser, row); + + ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED : + HE_COLORSET_NORMAL); + + slsmg_write_nstring(sline->line, browser->width); +} + +static int script_browser__run(struct perf_script_browser *browser) +{ + int key; + + if (ui_browser__show(&browser->b, browser->script_name, + "Press <- or ESC to exit") < 0) + return -1; + + while (1) { + key = ui_browser__run(&browser->b, 0); + + /* We can add some special key handling here if needed */ + break; + } + + ui_browser__hide(&browser->b); + return key; +} + + +int script_browse(const char *script_opt) +{ + char cmd[SCRIPT_FULLPATH_LEN*2], script_name[SCRIPT_FULLPATH_LEN]; + char *line = NULL; + size_t len = 0; + ssize_t retlen; + int ret = -1, nr_entries = 0; + FILE *fp; + void *buf; + struct script_line *sline; + + struct perf_script_browser script = { + .b = { + .refresh = ui_browser__list_head_refresh, + .seek = ui_browser__list_head_seek, + .write = script_browser__write, + }, + .script_name = script_name, + }; + + INIT_LIST_HEAD(&script.entries); + + /* Save each line of the output in one struct script_line object. */ + buf = zalloc((sizeof(*sline)) * MAX_LINES); + if (!buf) + return -1; + sline = buf; + + memset(script_name, 0, SCRIPT_FULLPATH_LEN); + if (list_scripts(script_name)) + goto exit; + + sprintf(cmd, "perf script -s %s ", script_name); + + if (script_opt) + strcat(cmd, script_opt); + + if (input_name) { + strcat(cmd, " -i "); + strcat(cmd, input_name); + } + + strcat(cmd, " 2>&1"); + + fp = popen(cmd, "r"); + if (!fp) + goto exit; + + while ((retlen = getline(&line, &len, fp)) != -1) { + strncpy(sline->line, line, AVERAGE_LINE_LEN); + + /* If one output line is very large, just cut it short */ + if (retlen >= AVERAGE_LINE_LEN) { + sline->line[AVERAGE_LINE_LEN - 1] = '\0'; + sline->line[AVERAGE_LINE_LEN - 2] = '\n'; + } + list_add_tail(&sline->node, &script.entries); + + if (script.b.width < retlen) + script.b.width = retlen; + + if (nr_entries++ >= MAX_LINES - 1) + break; + sline++; + } + + if (script.b.width > AVERAGE_LINE_LEN) + script.b.width = AVERAGE_LINE_LEN; + + free(line); + pclose(fp); + + script.nr_lines = nr_entries; + script.b.nr_entries = nr_entries; + script.b.entries = &script.entries; + + ret = script_browser__run(&script); +exit: + free(buf); + return ret; +} diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c new file mode 100644 index 00000000000..9c7ff8d31b2 --- /dev/null +++ b/tools/perf/ui/gtk/annotate.c @@ -0,0 +1,252 @@ +#include "gtk.h" +#include "util/debug.h" +#include "util/annotate.h" +#include "util/evsel.h" +#include "ui/helpline.h" + + +enum { + ANN_COL__PERCENT, + ANN_COL__OFFSET, + ANN_COL__LINE, + + MAX_ANN_COLS +}; + +static const char *const col_names[] = { + "Overhead", + "Offset", + "Line" +}; + +static int perf_gtk__get_percent(char *buf, size_t size, struct symbol *sym, + struct disasm_line *dl, int evidx) +{ + struct sym_hist *symhist; + double percent = 0.0; + const char *markup; + int ret = 0; + + strcpy(buf, ""); + + if (dl->offset == (s64) -1) + return 0; + + symhist = annotation__histogram(symbol__annotation(sym), evidx); + if (!symbol_conf.event_group && !symhist->addr[dl->offset]) + return 0; + + percent = 100.0 * symhist->addr[dl->offset] / symhist->sum; + + markup = perf_gtk__get_percent_color(percent); + if (markup) + ret += scnprintf(buf, size, "%s", markup); + ret += scnprintf(buf + ret, size - ret, "%6.2f%%", percent); + if (markup) + ret += scnprintf(buf + ret, size - ret, "</span>"); + + return ret; +} + +static int perf_gtk__get_offset(char *buf, size_t size, struct symbol *sym, + struct map *map, struct disasm_line *dl) +{ + u64 start = map__rip_2objdump(map, sym->start); + + strcpy(buf, ""); + + if (dl->offset == (s64) -1) + return 0; + + return scnprintf(buf, size, "%"PRIx64, start + dl->offset); +} + +static int perf_gtk__get_line(char *buf, size_t size, struct disasm_line *dl) +{ + int ret = 0; + char *line = g_markup_escape_text(dl->line, -1); + const char *markup = "<span fgcolor='gray'>"; + + strcpy(buf, ""); + + if (!line) + return 0; + + if (dl->offset != (s64) -1) + markup = NULL; + + if (markup) + ret += scnprintf(buf, size, "%s", markup); + ret += scnprintf(buf + ret, size - ret, "%s", line); + if (markup) + ret += scnprintf(buf + ret, size - ret, "</span>"); + + g_free(line); + return ret; +} + +static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym, + struct map *map, struct perf_evsel *evsel, + struct hist_browser_timer *hbt __maybe_unused) +{ + struct disasm_line *pos, *n; + struct annotation *notes; + GType col_types[MAX_ANN_COLS]; + GtkCellRenderer *renderer; + GtkListStore *store; + GtkWidget *view; + int i; + char s[512]; + + notes = symbol__annotation(sym); + + for (i = 0; i < MAX_ANN_COLS; i++) { + col_types[i] = G_TYPE_STRING; + } + store = gtk_list_store_newv(MAX_ANN_COLS, col_types); + + view = gtk_tree_view_new(); + renderer = gtk_cell_renderer_text_new(); + + for (i = 0; i < MAX_ANN_COLS; i++) { + gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), + -1, col_names[i], renderer, "markup", + i, NULL); + } + + gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store)); + g_object_unref(GTK_TREE_MODEL(store)); + + list_for_each_entry(pos, ¬es->src->source, node) { + GtkTreeIter iter; + int ret = 0; + + gtk_list_store_append(store, &iter); + + if (perf_evsel__is_group_event(evsel)) { + for (i = 0; i < evsel->nr_members; i++) { + ret += perf_gtk__get_percent(s + ret, + sizeof(s) - ret, + sym, pos, + evsel->idx + i); + ret += scnprintf(s + ret, sizeof(s) - ret, " "); + } + } else { + ret = perf_gtk__get_percent(s, sizeof(s), sym, pos, + evsel->idx); + } + + if (ret) + gtk_list_store_set(store, &iter, ANN_COL__PERCENT, s, -1); + if (perf_gtk__get_offset(s, sizeof(s), sym, map, pos)) + gtk_list_store_set(store, &iter, ANN_COL__OFFSET, s, -1); + if (perf_gtk__get_line(s, sizeof(s), pos)) + gtk_list_store_set(store, &iter, ANN_COL__LINE, s, -1); + } + + gtk_container_add(GTK_CONTAINER(window), view); + + list_for_each_entry_safe(pos, n, ¬es->src->source, node) { + list_del(&pos->node); + disasm_line__free(pos); + } + + return 0; +} + +static int symbol__gtk_annotate(struct symbol *sym, struct map *map, + struct perf_evsel *evsel, + struct hist_browser_timer *hbt) +{ + GtkWidget *window; + GtkWidget *notebook; + GtkWidget *scrolled_window; + GtkWidget *tab_label; + + if (map->dso->annotate_warned) + return -1; + + if (symbol__annotate(sym, map, 0) < 0) { + ui__error("%s", ui_helpline__current); + return -1; + } + + if (perf_gtk__is_active_context(pgctx)) { + window = pgctx->main_window; + notebook = pgctx->notebook; + } else { + GtkWidget *vbox; + GtkWidget *infobar; + GtkWidget *statbar; + + signal(SIGSEGV, perf_gtk__signal); + signal(SIGFPE, perf_gtk__signal); + signal(SIGINT, perf_gtk__signal); + signal(SIGQUIT, perf_gtk__signal); + signal(SIGTERM, perf_gtk__signal); + + window = gtk_window_new(GTK_WINDOW_TOPLEVEL); + gtk_window_set_title(GTK_WINDOW(window), "perf annotate"); + + g_signal_connect(window, "delete_event", gtk_main_quit, NULL); + + pgctx = perf_gtk__activate_context(window); + if (!pgctx) + return -1; + + vbox = gtk_vbox_new(FALSE, 0); + notebook = gtk_notebook_new(); + pgctx->notebook = notebook; + + gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0); + + infobar = perf_gtk__setup_info_bar(); + if (infobar) { + gtk_box_pack_start(GTK_BOX(vbox), infobar, + FALSE, FALSE, 0); + } + + statbar = perf_gtk__setup_statusbar(); + gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0); + + gtk_container_add(GTK_CONTAINER(window), vbox); + } + + scrolled_window = gtk_scrolled_window_new(NULL, NULL); + tab_label = gtk_label_new(sym->name); + + gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window), + GTK_POLICY_AUTOMATIC, + GTK_POLICY_AUTOMATIC); + + gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, + tab_label); + + perf_gtk__annotate_symbol(scrolled_window, sym, map, evsel, hbt); + return 0; +} + +int hist_entry__gtk_annotate(struct hist_entry *he, + struct perf_evsel *evsel, + struct hist_browser_timer *hbt) +{ + return symbol__gtk_annotate(he->ms.sym, he->ms.map, evsel, hbt); +} + +void perf_gtk__show_annotations(void) +{ + GtkWidget *window; + + if (!perf_gtk__is_active_context(pgctx)) + return; + + window = pgctx->main_window; + gtk_widget_show_all(window); + + perf_gtk__resize_window(window); + gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER); + + gtk_main(); + + perf_gtk__deactivate_context(&pgctx); +} diff --git a/tools/perf/ui/gtk/browser.c b/tools/perf/ui/gtk/browser.c index 4125c628411..c24d9122129 100644 --- a/tools/perf/ui/gtk/browser.c +++ b/tools/perf/ui/gtk/browser.c @@ -8,15 +8,13 @@ #include <signal.h> -#define MAX_COLUMNS 32 - -static void perf_gtk__signal(int sig) +void perf_gtk__signal(int sig) { perf_gtk__exit(false); psignal(sig, "perf"); } -static void perf_gtk__resize_window(GtkWidget *window) +void perf_gtk__resize_window(GtkWidget *window) { GdkRectangle rect; GdkScreen *screen; @@ -36,7 +34,7 @@ static void perf_gtk__resize_window(GtkWidget *window) gtk_window_resize(GTK_WINDOW(window), width, height); } -static const char *perf_gtk__get_percent_color(double percent) +const char *perf_gtk__get_percent_color(double percent) { if (percent >= MIN_RED) return "<span fgcolor='red'>"; @@ -45,155 +43,8 @@ static const char *perf_gtk__get_percent_color(double percent) return NULL; } -#define HPP__COLOR_FN(_name, _field) \ -static int perf_gtk__hpp_color_ ## _name(struct perf_hpp *hpp, \ - struct hist_entry *he) \ -{ \ - struct hists *hists = he->hists; \ - double percent = 100.0 * he->stat._field / hists->stats.total_period; \ - const char *markup; \ - int ret = 0; \ - \ - markup = perf_gtk__get_percent_color(percent); \ - if (markup) \ - ret += scnprintf(hpp->buf, hpp->size, "%s", markup); \ - ret += scnprintf(hpp->buf + ret, hpp->size - ret, "%6.2f%%", percent); \ - if (markup) \ - ret += scnprintf(hpp->buf + ret, hpp->size - ret, "</span>"); \ - \ - return ret; \ -} - -HPP__COLOR_FN(overhead, period) -HPP__COLOR_FN(overhead_sys, period_sys) -HPP__COLOR_FN(overhead_us, period_us) -HPP__COLOR_FN(overhead_guest_sys, period_guest_sys) -HPP__COLOR_FN(overhead_guest_us, period_guest_us) - -#undef HPP__COLOR_FN - -void perf_gtk__init_hpp(void) -{ - perf_hpp__init(); - - perf_hpp__format[PERF_HPP__OVERHEAD].color = - perf_gtk__hpp_color_overhead; - perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color = - perf_gtk__hpp_color_overhead_sys; - perf_hpp__format[PERF_HPP__OVERHEAD_US].color = - perf_gtk__hpp_color_overhead_us; - perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color = - perf_gtk__hpp_color_overhead_guest_sys; - perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color = - perf_gtk__hpp_color_overhead_guest_us; -} - -static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) -{ - GType col_types[MAX_COLUMNS]; - GtkCellRenderer *renderer; - struct sort_entry *se; - GtkListStore *store; - struct rb_node *nd; - GtkWidget *view; - int i, col_idx; - int nr_cols; - char s[512]; - - struct perf_hpp hpp = { - .buf = s, - .size = sizeof(s), - }; - - nr_cols = 0; - - for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { - if (!perf_hpp__format[i].cond) - continue; - - col_types[nr_cols++] = G_TYPE_STRING; - } - - list_for_each_entry(se, &hist_entry__sort_list, list) { - if (se->elide) - continue; - - col_types[nr_cols++] = G_TYPE_STRING; - } - - store = gtk_list_store_newv(nr_cols, col_types); - - view = gtk_tree_view_new(); - - renderer = gtk_cell_renderer_text_new(); - - col_idx = 0; - - for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { - if (!perf_hpp__format[i].cond) - continue; - - perf_hpp__format[i].header(&hpp); - - gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), - -1, s, - renderer, "markup", - col_idx++, NULL); - } - - list_for_each_entry(se, &hist_entry__sort_list, list) { - if (se->elide) - continue; - - gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), - -1, se->se_header, - renderer, "text", - col_idx++, NULL); - } - - gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store)); - - g_object_unref(GTK_TREE_MODEL(store)); - - for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { - struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); - GtkTreeIter iter; - - if (h->filtered) - continue; - - gtk_list_store_append(store, &iter); - - col_idx = 0; - - for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { - if (!perf_hpp__format[i].cond) - continue; - - if (perf_hpp__format[i].color) - perf_hpp__format[i].color(&hpp, h); - else - perf_hpp__format[i].entry(&hpp, h); - - gtk_list_store_set(store, &iter, col_idx++, s, -1); - } - - list_for_each_entry(se, &hist_entry__sort_list, list) { - if (se->elide) - continue; - - se->se_snprintf(h, s, ARRAY_SIZE(s), - hists__col_len(hists, se->se_width_idx)); - - gtk_list_store_set(store, &iter, col_idx++, s, -1); - } - } - - gtk_container_add(GTK_CONTAINER(window), view); -} - -#ifdef HAVE_GTK_INFO_BAR -static GtkWidget *perf_gtk__setup_info_bar(void) +#ifdef HAVE_GTK_INFO_BAR_SUPPORT +GtkWidget *perf_gtk__setup_info_bar(void) { GtkWidget *info_bar; GtkWidget *label; @@ -220,7 +71,7 @@ static GtkWidget *perf_gtk__setup_info_bar(void) } #endif -static GtkWidget *perf_gtk__setup_statusbar(void) +GtkWidget *perf_gtk__setup_statusbar(void) { GtkWidget *stbar; unsigned ctxid; @@ -234,81 +85,3 @@ static GtkWidget *perf_gtk__setup_statusbar(void) return stbar; } - -int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, - const char *help, - void (*timer) (void *arg)__maybe_unused, - void *arg __maybe_unused, - int delay_secs __maybe_unused) -{ - struct perf_evsel *pos; - GtkWidget *vbox; - GtkWidget *notebook; - GtkWidget *info_bar; - GtkWidget *statbar; - GtkWidget *window; - - signal(SIGSEGV, perf_gtk__signal); - signal(SIGFPE, perf_gtk__signal); - signal(SIGINT, perf_gtk__signal); - signal(SIGQUIT, perf_gtk__signal); - signal(SIGTERM, perf_gtk__signal); - - window = gtk_window_new(GTK_WINDOW_TOPLEVEL); - - gtk_window_set_title(GTK_WINDOW(window), "perf report"); - - g_signal_connect(window, "delete_event", gtk_main_quit, NULL); - - pgctx = perf_gtk__activate_context(window); - if (!pgctx) - return -1; - - vbox = gtk_vbox_new(FALSE, 0); - - notebook = gtk_notebook_new(); - - list_for_each_entry(pos, &evlist->entries, node) { - struct hists *hists = &pos->hists; - const char *evname = perf_evsel__name(pos); - GtkWidget *scrolled_window; - GtkWidget *tab_label; - - scrolled_window = gtk_scrolled_window_new(NULL, NULL); - - gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window), - GTK_POLICY_AUTOMATIC, - GTK_POLICY_AUTOMATIC); - - perf_gtk__show_hists(scrolled_window, hists); - - tab_label = gtk_label_new(evname); - - gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label); - } - - gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0); - - info_bar = perf_gtk__setup_info_bar(); - if (info_bar) - gtk_box_pack_start(GTK_BOX(vbox), info_bar, FALSE, FALSE, 0); - - statbar = perf_gtk__setup_statusbar(); - gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0); - - gtk_container_add(GTK_CONTAINER(window), vbox); - - gtk_widget_show_all(window); - - perf_gtk__resize_window(window); - - gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER); - - ui_helpline__push(help); - - gtk_main(); - - perf_gtk__deactivate_context(&pgctx); - - return 0; -} diff --git a/tools/perf/ui/gtk/gtk.h b/tools/perf/ui/gtk/gtk.h index 687af0bba18..0a9173ff9a6 100644 --- a/tools/perf/ui/gtk/gtk.h +++ b/tools/perf/ui/gtk/gtk.h @@ -10,8 +10,9 @@ struct perf_gtk_context { GtkWidget *main_window; + GtkWidget *notebook; -#ifdef HAVE_GTK_INFO_BAR +#ifdef HAVE_GTK_INFO_BAR_SUPPORT GtkWidget *info_bar; GtkWidget *message_label; #endif @@ -19,6 +20,9 @@ struct perf_gtk_context { guint statbar_ctx_id; }; +int perf_gtk__init(void); +void perf_gtk__exit(bool wait_for_ok); + extern struct perf_gtk_context *pgctx; static inline bool perf_gtk__is_active_context(struct perf_gtk_context *ctx) @@ -30,13 +34,34 @@ struct perf_gtk_context *perf_gtk__activate_context(GtkWidget *window); int perf_gtk__deactivate_context(struct perf_gtk_context **ctx); void perf_gtk__init_helpline(void); +void gtk_ui_progress__init(void); void perf_gtk__init_hpp(void); -#ifndef HAVE_GTK_INFO_BAR +void perf_gtk__signal(int sig); +void perf_gtk__resize_window(GtkWidget *window); +const char *perf_gtk__get_percent_color(double percent); +GtkWidget *perf_gtk__setup_statusbar(void); + +#ifdef HAVE_GTK_INFO_BAR_SUPPORT +GtkWidget *perf_gtk__setup_info_bar(void); +#else static inline GtkWidget *perf_gtk__setup_info_bar(void) { return NULL; } #endif +struct perf_evsel; +struct perf_evlist; +struct hist_entry; +struct hist_browser_timer; + +int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, const char *help, + struct hist_browser_timer *hbt, + float min_pcnt); +int hist_entry__gtk_annotate(struct hist_entry *he, + struct perf_evsel *evsel, + struct hist_browser_timer *hbt); +void perf_gtk__show_annotations(void); + #endif /* _PERF_GTK_H_ */ diff --git a/tools/perf/ui/gtk/helpline.c b/tools/perf/ui/gtk/helpline.c index 5db4432ff12..3388cbd1218 100644 --- a/tools/perf/ui/gtk/helpline.c +++ b/tools/perf/ui/gtk/helpline.c @@ -24,17 +24,7 @@ static void gtk_helpline_push(const char *msg) pgctx->statbar_ctx_id, msg); } -static struct ui_helpline gtk_helpline_fns = { - .pop = gtk_helpline_pop, - .push = gtk_helpline_push, -}; - -void perf_gtk__init_helpline(void) -{ - helpline_fns = >k_helpline_fns; -} - -int perf_gtk__show_helpline(const char *fmt, va_list ap) +static int gtk_helpline_show(const char *fmt, va_list ap) { int ret; char *ptr; @@ -54,3 +44,14 @@ int perf_gtk__show_helpline(const char *fmt, va_list ap) return ret; } + +static struct ui_helpline gtk_helpline_fns = { + .pop = gtk_helpline_pop, + .push = gtk_helpline_push, + .show = gtk_helpline_show, +}; + +void perf_gtk__init_helpline(void) +{ + helpline_fns = >k_helpline_fns; +} diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c new file mode 100644 index 00000000000..6ca60e482cd --- /dev/null +++ b/tools/perf/ui/gtk/hists.c @@ -0,0 +1,365 @@ +#include "../evlist.h" +#include "../cache.h" +#include "../evsel.h" +#include "../sort.h" +#include "../hist.h" +#include "../helpline.h" +#include "gtk.h" + +#define MAX_COLUMNS 32 + +static int __percent_color_snprintf(struct perf_hpp *hpp, const char *fmt, ...) +{ + int ret = 0; + va_list args; + double percent; + const char *markup; + char *buf = hpp->buf; + size_t size = hpp->size; + + va_start(args, fmt); + percent = va_arg(args, double); + va_end(args); + + markup = perf_gtk__get_percent_color(percent); + if (markup) + ret += scnprintf(buf, size, markup); + + ret += scnprintf(buf + ret, size - ret, fmt, percent); + + if (markup) + ret += scnprintf(buf + ret, size - ret, "</span>"); + + return ret; +} + +#define __HPP_COLOR_PERCENT_FN(_type, _field) \ +static u64 he_get_##_field(struct hist_entry *he) \ +{ \ + return he->stat._field; \ +} \ + \ +static int perf_gtk__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ + struct perf_hpp *hpp, \ + struct hist_entry *he) \ +{ \ + return __hpp__fmt(hpp, he, he_get_##_field, " %6.2f%%", \ + __percent_color_snprintf, true); \ +} + +#define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ +static u64 he_get_acc_##_field(struct hist_entry *he) \ +{ \ + return he->stat_acc->_field; \ +} \ + \ +static int perf_gtk__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ + struct perf_hpp *hpp, \ + struct hist_entry *he) \ +{ \ + return __hpp__fmt_acc(hpp, he, he_get_acc_##_field, " %6.2f%%", \ + __percent_color_snprintf, true); \ +} + +__HPP_COLOR_PERCENT_FN(overhead, period) +__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys) +__HPP_COLOR_PERCENT_FN(overhead_us, period_us) +__HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys) +__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us) +__HPP_COLOR_ACC_PERCENT_FN(overhead_acc, period) + +#undef __HPP_COLOR_PERCENT_FN + + +void perf_gtk__init_hpp(void) +{ + perf_hpp__format[PERF_HPP__OVERHEAD].color = + perf_gtk__hpp_color_overhead; + perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color = + perf_gtk__hpp_color_overhead_sys; + perf_hpp__format[PERF_HPP__OVERHEAD_US].color = + perf_gtk__hpp_color_overhead_us; + perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color = + perf_gtk__hpp_color_overhead_guest_sys; + perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color = + perf_gtk__hpp_color_overhead_guest_us; + perf_hpp__format[PERF_HPP__OVERHEAD_ACC].color = + perf_gtk__hpp_color_overhead_acc; +} + +static void callchain_list__sym_name(struct callchain_list *cl, + char *bf, size_t bfsize) +{ + if (cl->ms.sym) + scnprintf(bf, bfsize, "%s", cl->ms.sym->name); + else + scnprintf(bf, bfsize, "%#" PRIx64, cl->ip); +} + +static void perf_gtk__add_callchain(struct rb_root *root, GtkTreeStore *store, + GtkTreeIter *parent, int col, u64 total) +{ + struct rb_node *nd; + bool has_single_node = (rb_first(root) == rb_last(root)); + + for (nd = rb_first(root); nd; nd = rb_next(nd)) { + struct callchain_node *node; + struct callchain_list *chain; + GtkTreeIter iter, new_parent; + bool need_new_parent; + double percent; + u64 hits, child_total; + + node = rb_entry(nd, struct callchain_node, rb_node); + + hits = callchain_cumul_hits(node); + percent = 100.0 * hits / total; + + new_parent = *parent; + need_new_parent = !has_single_node && (node->val_nr > 1); + + list_for_each_entry(chain, &node->val, list) { + char buf[128]; + + gtk_tree_store_append(store, &iter, &new_parent); + + scnprintf(buf, sizeof(buf), "%5.2f%%", percent); + gtk_tree_store_set(store, &iter, 0, buf, -1); + + callchain_list__sym_name(chain, buf, sizeof(buf)); + gtk_tree_store_set(store, &iter, col, buf, -1); + + if (need_new_parent) { + /* + * Only show the top-most symbol in a callchain + * if it's not the only callchain. + */ + new_parent = iter; + need_new_parent = false; + } + } + + if (callchain_param.mode == CHAIN_GRAPH_REL) + child_total = node->children_hit; + else + child_total = total; + + /* Now 'iter' contains info of the last callchain_list */ + perf_gtk__add_callchain(&node->rb_root, store, &iter, col, + child_total); + } +} + +static void on_row_activated(GtkTreeView *view, GtkTreePath *path, + GtkTreeViewColumn *col __maybe_unused, + gpointer user_data __maybe_unused) +{ + bool expanded = gtk_tree_view_row_expanded(view, path); + + if (expanded) + gtk_tree_view_collapse_row(view, path); + else + gtk_tree_view_expand_row(view, path, FALSE); +} + +static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists, + float min_pcnt) +{ + struct perf_hpp_fmt *fmt; + GType col_types[MAX_COLUMNS]; + GtkCellRenderer *renderer; + GtkTreeStore *store; + struct rb_node *nd; + GtkWidget *view; + int col_idx; + int sym_col = -1; + int nr_cols; + char s[512]; + + struct perf_hpp hpp = { + .buf = s, + .size = sizeof(s), + }; + + nr_cols = 0; + + perf_hpp__for_each_format(fmt) + col_types[nr_cols++] = G_TYPE_STRING; + + store = gtk_tree_store_newv(nr_cols, col_types); + + view = gtk_tree_view_new(); + + renderer = gtk_cell_renderer_text_new(); + + col_idx = 0; + + perf_hpp__for_each_format(fmt) { + if (perf_hpp__should_skip(fmt)) + continue; + + /* + * XXX no way to determine where symcol column is.. + * Just use last column for now. + */ + if (perf_hpp__is_sort_entry(fmt)) + sym_col = col_idx; + + fmt->header(fmt, &hpp, hists_to_evsel(hists)); + + gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), + -1, ltrim(s), + renderer, "markup", + col_idx++, NULL); + } + + for (col_idx = 0; col_idx < nr_cols; col_idx++) { + GtkTreeViewColumn *column; + + column = gtk_tree_view_get_column(GTK_TREE_VIEW(view), col_idx); + gtk_tree_view_column_set_resizable(column, TRUE); + + if (col_idx == sym_col) { + gtk_tree_view_set_expander_column(GTK_TREE_VIEW(view), + column); + } + } + + gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store)); + + g_object_unref(GTK_TREE_MODEL(store)); + + for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { + struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); + GtkTreeIter iter; + u64 total = hists__total_period(h->hists); + float percent; + + if (h->filtered) + continue; + + percent = hist_entry__get_percent_limit(h); + if (percent < min_pcnt) + continue; + + gtk_tree_store_append(store, &iter, NULL); + + col_idx = 0; + + perf_hpp__for_each_format(fmt) { + if (perf_hpp__should_skip(fmt)) + continue; + + if (fmt->color) + fmt->color(fmt, &hpp, h); + else + fmt->entry(fmt, &hpp, h); + + gtk_tree_store_set(store, &iter, col_idx++, s, -1); + } + + if (symbol_conf.use_callchain && sort__has_sym) { + if (callchain_param.mode == CHAIN_GRAPH_REL) + total = symbol_conf.cumulate_callchain ? + h->stat_acc->period : h->stat.period; + + perf_gtk__add_callchain(&h->sorted_chain, store, &iter, + sym_col, total); + } + } + + gtk_tree_view_set_rules_hint(GTK_TREE_VIEW(view), TRUE); + + g_signal_connect(view, "row-activated", + G_CALLBACK(on_row_activated), NULL); + gtk_container_add(GTK_CONTAINER(window), view); +} + +int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, + const char *help, + struct hist_browser_timer *hbt __maybe_unused, + float min_pcnt) +{ + struct perf_evsel *pos; + GtkWidget *vbox; + GtkWidget *notebook; + GtkWidget *info_bar; + GtkWidget *statbar; + GtkWidget *window; + + signal(SIGSEGV, perf_gtk__signal); + signal(SIGFPE, perf_gtk__signal); + signal(SIGINT, perf_gtk__signal); + signal(SIGQUIT, perf_gtk__signal); + signal(SIGTERM, perf_gtk__signal); + + window = gtk_window_new(GTK_WINDOW_TOPLEVEL); + + gtk_window_set_title(GTK_WINDOW(window), "perf report"); + + g_signal_connect(window, "delete_event", gtk_main_quit, NULL); + + pgctx = perf_gtk__activate_context(window); + if (!pgctx) + return -1; + + vbox = gtk_vbox_new(FALSE, 0); + + notebook = gtk_notebook_new(); + + gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0); + + info_bar = perf_gtk__setup_info_bar(); + if (info_bar) + gtk_box_pack_start(GTK_BOX(vbox), info_bar, FALSE, FALSE, 0); + + statbar = perf_gtk__setup_statusbar(); + gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0); + + gtk_container_add(GTK_CONTAINER(window), vbox); + + evlist__for_each(evlist, pos) { + struct hists *hists = &pos->hists; + const char *evname = perf_evsel__name(pos); + GtkWidget *scrolled_window; + GtkWidget *tab_label; + char buf[512]; + size_t size = sizeof(buf); + + if (symbol_conf.event_group) { + if (!perf_evsel__is_group_leader(pos)) + continue; + + if (pos->nr_members > 1) { + perf_evsel__group_desc(pos, buf, size); + evname = buf; + } + } + + scrolled_window = gtk_scrolled_window_new(NULL, NULL); + + gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window), + GTK_POLICY_AUTOMATIC, + GTK_POLICY_AUTOMATIC); + + perf_gtk__show_hists(scrolled_window, hists, min_pcnt); + + tab_label = gtk_label_new(evname); + + gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label); + } + + gtk_widget_show_all(window); + + perf_gtk__resize_window(window); + + gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER); + + ui_helpline__push(help); + + gtk_main(); + + perf_gtk__deactivate_context(&pgctx); + + return 0; +} diff --git a/tools/perf/ui/gtk/progress.c b/tools/perf/ui/gtk/progress.c new file mode 100644 index 00000000000..b656655fbc3 --- /dev/null +++ b/tools/perf/ui/gtk/progress.c @@ -0,0 +1,59 @@ +#include <inttypes.h> + +#include "gtk.h" +#include "../progress.h" +#include "util.h" + +static GtkWidget *dialog; +static GtkWidget *progress; + +static void gtk_ui_progress__update(struct ui_progress *p) +{ + double fraction = p->total ? 1.0 * p->curr / p->total : 0.0; + char buf[1024]; + + if (dialog == NULL) { + GtkWidget *vbox = gtk_vbox_new(TRUE, 5); + GtkWidget *label = gtk_label_new(p->title); + + dialog = gtk_window_new(GTK_WINDOW_TOPLEVEL); + progress = gtk_progress_bar_new(); + + gtk_box_pack_start(GTK_BOX(vbox), label, TRUE, FALSE, 3); + gtk_box_pack_start(GTK_BOX(vbox), progress, TRUE, TRUE, 3); + + gtk_container_add(GTK_CONTAINER(dialog), vbox); + + gtk_window_set_title(GTK_WINDOW(dialog), "perf"); + gtk_window_resize(GTK_WINDOW(dialog), 300, 80); + gtk_window_set_position(GTK_WINDOW(dialog), GTK_WIN_POS_CENTER); + + gtk_widget_show_all(dialog); + } + + gtk_progress_bar_set_fraction(GTK_PROGRESS_BAR(progress), fraction); + snprintf(buf, sizeof(buf), "%"PRIu64" / %"PRIu64, p->curr, p->total); + gtk_progress_bar_set_text(GTK_PROGRESS_BAR(progress), buf); + + /* we didn't call gtk_main yet, so do it manually */ + while (gtk_events_pending()) + gtk_main_iteration(); +} + +static void gtk_ui_progress__finish(void) +{ + /* this will also destroy all of its children */ + gtk_widget_destroy(dialog); + + dialog = NULL; +} + +static struct ui_progress_ops gtk_ui_progress__ops = { + .update = gtk_ui_progress__update, + .finish = gtk_ui_progress__finish, +}; + +void gtk_ui_progress__init(void) +{ + ui_progress__ops = >k_ui_progress__ops; +} diff --git a/tools/perf/ui/gtk/setup.c b/tools/perf/ui/gtk/setup.c index 3c4c6ef7828..1d57676f821 100644 --- a/tools/perf/ui/gtk/setup.c +++ b/tools/perf/ui/gtk/setup.c @@ -8,7 +8,9 @@ int perf_gtk__init(void) { perf_error__register(&perf_gtk_eops); perf_gtk__init_helpline(); + gtk_ui_progress__init(); perf_gtk__init_hpp(); + return gtk_init_check(NULL, NULL) ? 0 : -1; } diff --git a/tools/perf/ui/gtk/util.c b/tools/perf/ui/gtk/util.c index ccb046aac98..52e7fc48af9 100644 --- a/tools/perf/ui/gtk/util.c +++ b/tools/perf/ui/gtk/util.c @@ -23,8 +23,7 @@ int perf_gtk__deactivate_context(struct perf_gtk_context **ctx) if (!perf_gtk__is_active_context(*ctx)) return -1; - free(*ctx); - *ctx = NULL; + zfree(ctx); return 0; } @@ -53,7 +52,7 @@ static int perf_gtk__error(const char *format, va_list args) return 0; } -#ifdef HAVE_GTK_INFO_BAR +#ifdef HAVE_GTK_INFO_BAR_SUPPORT static int perf_gtk__warning_info_bar(const char *format, va_list args) { char *msg; @@ -105,20 +104,9 @@ static int perf_gtk__warning_statusbar(const char *format, va_list args) struct perf_error_ops perf_gtk_eops = { .error = perf_gtk__error, -#ifdef HAVE_GTK_INFO_BAR +#ifdef HAVE_GTK_INFO_BAR_SUPPORT .warning = perf_gtk__warning_info_bar, #else .warning = perf_gtk__warning_statusbar, #endif }; - -/* - * FIXME: Functions below should be implemented properly. - * For now, just add stubs for NO_NEWT=1 build. - */ -#ifndef NEWT_SUPPORT -void ui_progress__update(u64 curr __maybe_unused, u64 total __maybe_unused, - const char *title __maybe_unused) -{ -} -#endif diff --git a/tools/perf/ui/helpline.c b/tools/perf/ui/helpline.c index a49bcf3c190..700fb3cfa1c 100644 --- a/tools/perf/ui/helpline.c +++ b/tools/perf/ui/helpline.c @@ -16,9 +16,16 @@ static void nop_helpline__push(const char *msg __maybe_unused) { } +static int nop_helpline__show(const char *fmt __maybe_unused, + va_list ap __maybe_unused) +{ + return 0; +} + static struct ui_helpline default_helpline_fns = { .pop = nop_helpline__pop, .push = nop_helpline__push, + .show = nop_helpline__show, }; struct ui_helpline *helpline_fns = &default_helpline_fns; @@ -59,3 +66,8 @@ void ui_helpline__puts(const char *msg) ui_helpline__pop(); ui_helpline__push(msg); } + +int ui_helpline__vshow(const char *fmt, va_list ap) +{ + return helpline_fns->show(fmt, ap); +} diff --git a/tools/perf/ui/helpline.h b/tools/perf/ui/helpline.h index baa28a4d16b..46181f4fc07 100644 --- a/tools/perf/ui/helpline.h +++ b/tools/perf/ui/helpline.h @@ -9,6 +9,7 @@ struct ui_helpline { void (*pop)(void); void (*push)(const char *msg); + int (*show)(const char *fmt, va_list ap); }; extern struct ui_helpline *helpline_fns; @@ -20,28 +21,9 @@ void ui_helpline__push(const char *msg); void ui_helpline__vpush(const char *fmt, va_list ap); void ui_helpline__fpush(const char *fmt, ...); void ui_helpline__puts(const char *msg); +int ui_helpline__vshow(const char *fmt, va_list ap); extern char ui_helpline__current[512]; - -#ifdef NEWT_SUPPORT extern char ui_helpline__last_msg[]; -int ui_helpline__show_help(const char *format, va_list ap); -#else -static inline int ui_helpline__show_help(const char *format __maybe_unused, - va_list ap __maybe_unused) -{ - return 0; -} -#endif /* NEWT_SUPPORT */ - -#ifdef GTK2_SUPPORT -int perf_gtk__show_helpline(const char *format, va_list ap); -#else -static inline int perf_gtk__show_helpline(const char *format __maybe_unused, - va_list ap __maybe_unused) -{ - return 0; -} -#endif /* GTK2_SUPPORT */ #endif /* _PERF_UI_HELPLINE_H_ */ diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index f5a1e4f6526..498adb23c02 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -1,402 +1,598 @@ #include <math.h> +#include <linux/compiler.h> #include "../util/hist.h" #include "../util/util.h" #include "../util/sort.h" - +#include "../util/evsel.h" /* hist period print (hpp) functions */ -static int hpp__header_overhead(struct perf_hpp *hpp) -{ - return scnprintf(hpp->buf, hpp->size, "Overhead"); -} -static int hpp__width_overhead(struct perf_hpp *hpp __maybe_unused) -{ - return 8; -} +#define hpp__call_print_fn(hpp, fn, fmt, ...) \ +({ \ + int __ret = fn(hpp, fmt, ##__VA_ARGS__); \ + advance_hpp(hpp, __ret); \ + __ret; \ +}) -static int hpp__color_overhead(struct perf_hpp *hpp, struct hist_entry *he) +int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, + hpp_field_fn get_field, const char *fmt, + hpp_snprint_fn print_fn, bool fmt_percent) { + int ret; struct hists *hists = he->hists; - double percent = 100.0 * he->stat.period / hists->stats.total_period; + struct perf_evsel *evsel = hists_to_evsel(hists); + char *buf = hpp->buf; + size_t size = hpp->size; + + if (fmt_percent) { + double percent = 0.0; + u64 total = hists__total_period(hists); + + if (total) + percent = 100.0 * get_field(he) / total; + + ret = hpp__call_print_fn(hpp, print_fn, fmt, percent); + } else + ret = hpp__call_print_fn(hpp, print_fn, fmt, get_field(he)); + + if (perf_evsel__is_group_event(evsel)) { + int prev_idx, idx_delta; + struct hist_entry *pair; + int nr_members = evsel->nr_members; + + prev_idx = perf_evsel__group_idx(evsel); + + list_for_each_entry(pair, &he->pairs.head, pairs.node) { + u64 period = get_field(pair); + u64 total = hists__total_period(pair->hists); + + if (!total) + continue; + + evsel = hists_to_evsel(pair->hists); + idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1; + + while (idx_delta--) { + /* + * zero-fill group members in the middle which + * have no sample + */ + if (fmt_percent) { + ret += hpp__call_print_fn(hpp, print_fn, + fmt, 0.0); + } else { + ret += hpp__call_print_fn(hpp, print_fn, + fmt, 0ULL); + } + } + + if (fmt_percent) { + ret += hpp__call_print_fn(hpp, print_fn, fmt, + 100.0 * period / total); + } else { + ret += hpp__call_print_fn(hpp, print_fn, fmt, + period); + } + + prev_idx = perf_evsel__group_idx(evsel); + } - return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%%", percent); -} + idx_delta = nr_members - prev_idx - 1; + + while (idx_delta--) { + /* + * zero-fill group members at last which have no sample + */ + if (fmt_percent) { + ret += hpp__call_print_fn(hpp, print_fn, + fmt, 0.0); + } else { + ret += hpp__call_print_fn(hpp, print_fn, + fmt, 0ULL); + } + } + } -static int hpp__entry_overhead(struct perf_hpp *hpp, struct hist_entry *he) -{ - struct hists *hists = he->hists; - double percent = 100.0 * he->stat.period / hists->stats.total_period; - const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%%"; + /* + * Restore original buf and size as it's where caller expects + * the result will be saved. + */ + hpp->buf = buf; + hpp->size = size; - return scnprintf(hpp->buf, hpp->size, fmt, percent); + return ret; } -static int hpp__header_overhead_sys(struct perf_hpp *hpp) +int __hpp__fmt_acc(struct perf_hpp *hpp, struct hist_entry *he, + hpp_field_fn get_field, const char *fmt, + hpp_snprint_fn print_fn, bool fmt_percent) { - const char *fmt = symbol_conf.field_sep ? "%s" : "%7s"; + if (!symbol_conf.cumulate_callchain) { + return snprintf(hpp->buf, hpp->size, "%*s", + fmt_percent ? 8 : 12, "N/A"); + } - return scnprintf(hpp->buf, hpp->size, fmt, "sys"); + return __hpp__fmt(hpp, he, get_field, fmt, print_fn, fmt_percent); } -static int hpp__width_overhead_sys(struct perf_hpp *hpp __maybe_unused) +static int field_cmp(u64 field_a, u64 field_b) { - return 7; + if (field_a > field_b) + return 1; + if (field_a < field_b) + return -1; + return 0; } -static int hpp__color_overhead_sys(struct perf_hpp *hpp, struct hist_entry *he) +static int __hpp__sort(struct hist_entry *a, struct hist_entry *b, + hpp_field_fn get_field) { - struct hists *hists = he->hists; - double percent = 100.0 * he->stat.period_sys / hists->stats.total_period; + s64 ret; + int i, nr_members; + struct perf_evsel *evsel; + struct hist_entry *pair; + u64 *fields_a, *fields_b; - return percent_color_snprintf(hpp->buf, hpp->size, "%6.2f%%", percent); -} + ret = field_cmp(get_field(a), get_field(b)); + if (ret || !symbol_conf.event_group) + return ret; -static int hpp__entry_overhead_sys(struct perf_hpp *hpp, struct hist_entry *he) -{ - struct hists *hists = he->hists; - double percent = 100.0 * he->stat.period_sys / hists->stats.total_period; - const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%"; + evsel = hists_to_evsel(a->hists); + if (!perf_evsel__is_group_event(evsel)) + return ret; - return scnprintf(hpp->buf, hpp->size, fmt, percent); -} + nr_members = evsel->nr_members; + fields_a = calloc(sizeof(*fields_a), nr_members); + fields_b = calloc(sizeof(*fields_b), nr_members); -static int hpp__header_overhead_us(struct perf_hpp *hpp) -{ - const char *fmt = symbol_conf.field_sep ? "%s" : "%7s"; + if (!fields_a || !fields_b) + goto out; - return scnprintf(hpp->buf, hpp->size, fmt, "user"); -} - -static int hpp__width_overhead_us(struct perf_hpp *hpp __maybe_unused) -{ - return 7; -} + list_for_each_entry(pair, &a->pairs.head, pairs.node) { + evsel = hists_to_evsel(pair->hists); + fields_a[perf_evsel__group_idx(evsel)] = get_field(pair); + } -static int hpp__color_overhead_us(struct perf_hpp *hpp, struct hist_entry *he) -{ - struct hists *hists = he->hists; - double percent = 100.0 * he->stat.period_us / hists->stats.total_period; + list_for_each_entry(pair, &b->pairs.head, pairs.node) { + evsel = hists_to_evsel(pair->hists); + fields_b[perf_evsel__group_idx(evsel)] = get_field(pair); + } - return percent_color_snprintf(hpp->buf, hpp->size, "%6.2f%%", percent); -} + for (i = 1; i < nr_members; i++) { + ret = field_cmp(fields_a[i], fields_b[i]); + if (ret) + break; + } -static int hpp__entry_overhead_us(struct perf_hpp *hpp, struct hist_entry *he) -{ - struct hists *hists = he->hists; - double percent = 100.0 * he->stat.period_us / hists->stats.total_period; - const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%"; +out: + free(fields_a); + free(fields_b); - return scnprintf(hpp->buf, hpp->size, fmt, percent); + return ret; } -static int hpp__header_overhead_guest_sys(struct perf_hpp *hpp) +static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b, + hpp_field_fn get_field) { - return scnprintf(hpp->buf, hpp->size, "guest sys"); -} + s64 ret = 0; -static int hpp__width_overhead_guest_sys(struct perf_hpp *hpp __maybe_unused) -{ - return 9; -} + if (symbol_conf.cumulate_callchain) { + /* + * Put caller above callee when they have equal period. + */ + ret = field_cmp(get_field(a), get_field(b)); + if (ret) + return ret; -static int hpp__color_overhead_guest_sys(struct perf_hpp *hpp, - struct hist_entry *he) -{ - struct hists *hists = he->hists; - double percent = 100.0 * he->stat.period_guest_sys / hists->stats.total_period; - - return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%% ", percent); + ret = b->callchain->max_depth - a->callchain->max_depth; + } + return ret; } -static int hpp__entry_overhead_guest_sys(struct perf_hpp *hpp, - struct hist_entry *he) -{ - struct hists *hists = he->hists; - double percent = 100.0 * he->stat.period_guest_sys / hists->stats.total_period; - const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%% "; - - return scnprintf(hpp->buf, hpp->size, fmt, percent); +#define __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \ +static int hpp__header_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ + struct perf_hpp *hpp, \ + struct perf_evsel *evsel) \ +{ \ + int len = _min_width; \ + \ + if (symbol_conf.event_group) \ + len = max(len, evsel->nr_members * _unit_width); \ + \ + return scnprintf(hpp->buf, hpp->size, "%*s", len, _str); \ } -static int hpp__header_overhead_guest_us(struct perf_hpp *hpp) -{ - return scnprintf(hpp->buf, hpp->size, "guest usr"); -} +#define __HPP_WIDTH_FN(_type, _min_width, _unit_width) \ +static int hpp__width_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ + struct perf_hpp *hpp __maybe_unused, \ + struct perf_evsel *evsel) \ +{ \ + int len = _min_width; \ + \ + if (symbol_conf.event_group) \ + len = max(len, evsel->nr_members * _unit_width); \ + \ + return len; \ +} + +static int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...) +{ + va_list args; + ssize_t ssize = hpp->size; + double percent; + int ret; -static int hpp__width_overhead_guest_us(struct perf_hpp *hpp __maybe_unused) -{ - return 9; -} + va_start(args, fmt); + percent = va_arg(args, double); + ret = value_color_snprintf(hpp->buf, hpp->size, fmt, percent); + va_end(args); + + return (ret >= ssize) ? (ssize - 1) : ret; +} + +static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...) +{ + va_list args; + ssize_t ssize = hpp->size; + int ret; + + va_start(args, fmt); + ret = vsnprintf(hpp->buf, hpp->size, fmt, args); + va_end(args); + + return (ret >= ssize) ? (ssize - 1) : ret; +} + +#define __HPP_COLOR_PERCENT_FN(_type, _field) \ +static u64 he_get_##_field(struct hist_entry *he) \ +{ \ + return he->stat._field; \ +} \ + \ +static int hpp__color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ + struct perf_hpp *hpp, struct hist_entry *he) \ +{ \ + return __hpp__fmt(hpp, he, he_get_##_field, " %6.2f%%", \ + hpp_color_scnprintf, true); \ +} + +#define __HPP_ENTRY_PERCENT_FN(_type, _field) \ +static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \ + struct perf_hpp *hpp, struct hist_entry *he) \ +{ \ + const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%"; \ + return __hpp__fmt(hpp, he, he_get_##_field, fmt, \ + hpp_entry_scnprintf, true); \ +} + +#define __HPP_SORT_FN(_type, _field) \ +static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \ +{ \ + return __hpp__sort(a, b, he_get_##_field); \ +} + +#define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ +static u64 he_get_acc_##_field(struct hist_entry *he) \ +{ \ + return he->stat_acc->_field; \ +} \ + \ +static int hpp__color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ + struct perf_hpp *hpp, struct hist_entry *he) \ +{ \ + return __hpp__fmt_acc(hpp, he, he_get_acc_##_field, " %6.2f%%", \ + hpp_color_scnprintf, true); \ +} + +#define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \ +static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \ + struct perf_hpp *hpp, struct hist_entry *he) \ +{ \ + const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%"; \ + return __hpp__fmt_acc(hpp, he, he_get_acc_##_field, fmt, \ + hpp_entry_scnprintf, true); \ +} + +#define __HPP_SORT_ACC_FN(_type, _field) \ +static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \ +{ \ + return __hpp__sort_acc(a, b, he_get_acc_##_field); \ +} + +#define __HPP_ENTRY_RAW_FN(_type, _field) \ +static u64 he_get_raw_##_field(struct hist_entry *he) \ +{ \ + return he->stat._field; \ +} \ + \ +static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \ + struct perf_hpp *hpp, struct hist_entry *he) \ +{ \ + const char *fmt = symbol_conf.field_sep ? " %"PRIu64 : " %11"PRIu64; \ + return __hpp__fmt(hpp, he, he_get_raw_##_field, fmt, \ + hpp_entry_scnprintf, false); \ +} -static int hpp__color_overhead_guest_us(struct perf_hpp *hpp, - struct hist_entry *he) -{ - struct hists *hists = he->hists; - double percent = 100.0 * he->stat.period_guest_us / hists->stats.total_period; +#define __HPP_SORT_RAW_FN(_type, _field) \ +static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \ +{ \ + return __hpp__sort(a, b, he_get_raw_##_field); \ +} + + +#define HPP_PERCENT_FNS(_type, _str, _field, _min_width, _unit_width) \ +__HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \ +__HPP_WIDTH_FN(_type, _min_width, _unit_width) \ +__HPP_COLOR_PERCENT_FN(_type, _field) \ +__HPP_ENTRY_PERCENT_FN(_type, _field) \ +__HPP_SORT_FN(_type, _field) + +#define HPP_PERCENT_ACC_FNS(_type, _str, _field, _min_width, _unit_width)\ +__HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \ +__HPP_WIDTH_FN(_type, _min_width, _unit_width) \ +__HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ +__HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \ +__HPP_SORT_ACC_FN(_type, _field) + +#define HPP_RAW_FNS(_type, _str, _field, _min_width, _unit_width) \ +__HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \ +__HPP_WIDTH_FN(_type, _min_width, _unit_width) \ +__HPP_ENTRY_RAW_FN(_type, _field) \ +__HPP_SORT_RAW_FN(_type, _field) + +__HPP_HEADER_FN(overhead_self, "Self", 8, 8) + +HPP_PERCENT_FNS(overhead, "Overhead", period, 8, 8) +HPP_PERCENT_FNS(overhead_sys, "sys", period_sys, 8, 8) +HPP_PERCENT_FNS(overhead_us, "usr", period_us, 8, 8) +HPP_PERCENT_FNS(overhead_guest_sys, "guest sys", period_guest_sys, 9, 8) +HPP_PERCENT_FNS(overhead_guest_us, "guest usr", period_guest_us, 9, 8) +HPP_PERCENT_ACC_FNS(overhead_acc, "Children", period, 8, 8) + +HPP_RAW_FNS(samples, "Samples", nr_events, 12, 12) +HPP_RAW_FNS(period, "Period", period, 12, 12) + +static int64_t hpp__nop_cmp(struct hist_entry *a __maybe_unused, + struct hist_entry *b __maybe_unused) +{ + return 0; +} + +#define HPP__COLOR_PRINT_FNS(_name) \ + { \ + .header = hpp__header_ ## _name, \ + .width = hpp__width_ ## _name, \ + .color = hpp__color_ ## _name, \ + .entry = hpp__entry_ ## _name, \ + .cmp = hpp__nop_cmp, \ + .collapse = hpp__nop_cmp, \ + .sort = hpp__sort_ ## _name, \ + } - return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%% ", percent); -} +#define HPP__COLOR_ACC_PRINT_FNS(_name) \ + { \ + .header = hpp__header_ ## _name, \ + .width = hpp__width_ ## _name, \ + .color = hpp__color_ ## _name, \ + .entry = hpp__entry_ ## _name, \ + .cmp = hpp__nop_cmp, \ + .collapse = hpp__nop_cmp, \ + .sort = hpp__sort_ ## _name, \ + } -static int hpp__entry_overhead_guest_us(struct perf_hpp *hpp, - struct hist_entry *he) -{ - struct hists *hists = he->hists; - double percent = 100.0 * he->stat.period_guest_us / hists->stats.total_period; - const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%% "; +#define HPP__PRINT_FNS(_name) \ + { \ + .header = hpp__header_ ## _name, \ + .width = hpp__width_ ## _name, \ + .entry = hpp__entry_ ## _name, \ + .cmp = hpp__nop_cmp, \ + .collapse = hpp__nop_cmp, \ + .sort = hpp__sort_ ## _name, \ + } - return scnprintf(hpp->buf, hpp->size, fmt, percent); -} +struct perf_hpp_fmt perf_hpp__format[] = { + HPP__COLOR_PRINT_FNS(overhead), + HPP__COLOR_PRINT_FNS(overhead_sys), + HPP__COLOR_PRINT_FNS(overhead_us), + HPP__COLOR_PRINT_FNS(overhead_guest_sys), + HPP__COLOR_PRINT_FNS(overhead_guest_us), + HPP__COLOR_ACC_PRINT_FNS(overhead_acc), + HPP__PRINT_FNS(samples), + HPP__PRINT_FNS(period) +}; -static int hpp__header_baseline(struct perf_hpp *hpp) -{ - return scnprintf(hpp->buf, hpp->size, "Baseline"); -} +LIST_HEAD(perf_hpp__list); +LIST_HEAD(perf_hpp__sort_list); -static int hpp__width_baseline(struct perf_hpp *hpp __maybe_unused) -{ - return 8; -} -static double baseline_percent(struct hist_entry *he) -{ - struct hist_entry *pair = he->pair; - struct hists *pair_hists = pair ? pair->hists : NULL; - double percent = 0.0; +#undef HPP__COLOR_PRINT_FNS +#undef HPP__COLOR_ACC_PRINT_FNS +#undef HPP__PRINT_FNS - if (pair) { - u64 total_period = pair_hists->stats.total_period; - u64 base_period = pair->stat.period; +#undef HPP_PERCENT_FNS +#undef HPP_PERCENT_ACC_FNS +#undef HPP_RAW_FNS - percent = 100.0 * base_period / total_period; - } +#undef __HPP_HEADER_FN +#undef __HPP_WIDTH_FN +#undef __HPP_COLOR_PERCENT_FN +#undef __HPP_ENTRY_PERCENT_FN +#undef __HPP_COLOR_ACC_PERCENT_FN +#undef __HPP_ENTRY_ACC_PERCENT_FN +#undef __HPP_ENTRY_RAW_FN +#undef __HPP_SORT_FN +#undef __HPP_SORT_ACC_FN +#undef __HPP_SORT_RAW_FN - return percent; -} -static int hpp__color_baseline(struct perf_hpp *hpp, struct hist_entry *he) +void perf_hpp__init(void) { - double percent = baseline_percent(he); + struct list_head *list; + int i; - return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%%", percent); -} + for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { + struct perf_hpp_fmt *fmt = &perf_hpp__format[i]; -static int hpp__entry_baseline(struct perf_hpp *hpp, struct hist_entry *he) -{ - double percent = baseline_percent(he); - const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%%"; + INIT_LIST_HEAD(&fmt->list); - return scnprintf(hpp->buf, hpp->size, fmt, percent); -} + /* sort_list may be linked by setup_sorting() */ + if (fmt->sort_list.next == NULL) + INIT_LIST_HEAD(&fmt->sort_list); + } -static int hpp__header_samples(struct perf_hpp *hpp) -{ - const char *fmt = symbol_conf.field_sep ? "%s" : "%11s"; + /* + * If user specified field order, no need to setup default fields. + */ + if (field_order) + return; - return scnprintf(hpp->buf, hpp->size, fmt, "Samples"); -} + if (symbol_conf.cumulate_callchain) { + perf_hpp__column_enable(PERF_HPP__OVERHEAD_ACC); -static int hpp__width_samples(struct perf_hpp *hpp __maybe_unused) -{ - return 11; -} + perf_hpp__format[PERF_HPP__OVERHEAD].header = + hpp__header_overhead_self; + } -static int hpp__entry_samples(struct perf_hpp *hpp, struct hist_entry *he) -{ - const char *fmt = symbol_conf.field_sep ? "%" PRIu64 : "%11" PRIu64; + perf_hpp__column_enable(PERF_HPP__OVERHEAD); - return scnprintf(hpp->buf, hpp->size, fmt, he->stat.nr_events); -} + if (symbol_conf.show_cpu_utilization) { + perf_hpp__column_enable(PERF_HPP__OVERHEAD_SYS); + perf_hpp__column_enable(PERF_HPP__OVERHEAD_US); -static int hpp__header_period(struct perf_hpp *hpp) -{ - const char *fmt = symbol_conf.field_sep ? "%s" : "%12s"; + if (perf_guest) { + perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_SYS); + perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_US); + } + } - return scnprintf(hpp->buf, hpp->size, fmt, "Period"); -} + if (symbol_conf.show_nr_samples) + perf_hpp__column_enable(PERF_HPP__SAMPLES); -static int hpp__width_period(struct perf_hpp *hpp __maybe_unused) -{ - return 12; -} + if (symbol_conf.show_total_period) + perf_hpp__column_enable(PERF_HPP__PERIOD); -static int hpp__entry_period(struct perf_hpp *hpp, struct hist_entry *he) -{ - const char *fmt = symbol_conf.field_sep ? "%" PRIu64 : "%12" PRIu64; + /* prepend overhead field for backward compatiblity. */ + list = &perf_hpp__format[PERF_HPP__OVERHEAD].sort_list; + if (list_empty(list)) + list_add(list, &perf_hpp__sort_list); - return scnprintf(hpp->buf, hpp->size, fmt, he->stat.period); + if (symbol_conf.cumulate_callchain) { + list = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC].sort_list; + if (list_empty(list)) + list_add(list, &perf_hpp__sort_list); + } } -static int hpp__header_delta(struct perf_hpp *hpp) +void perf_hpp__column_register(struct perf_hpp_fmt *format) { - const char *fmt = symbol_conf.field_sep ? "%s" : "%7s"; - - return scnprintf(hpp->buf, hpp->size, fmt, "Delta"); + list_add_tail(&format->list, &perf_hpp__list); } -static int hpp__width_delta(struct perf_hpp *hpp __maybe_unused) +void perf_hpp__column_unregister(struct perf_hpp_fmt *format) { - return 7; + list_del(&format->list); } -static int hpp__entry_delta(struct perf_hpp *hpp, struct hist_entry *he) +void perf_hpp__register_sort_field(struct perf_hpp_fmt *format) { - struct hist_entry *pair = he->pair; - struct hists *pair_hists = pair ? pair->hists : NULL; - struct hists *hists = he->hists; - u64 old_total, new_total; - double old_percent = 0, new_percent = 0; - double diff; - const char *fmt = symbol_conf.field_sep ? "%s" : "%7.7s"; - char buf[32] = " "; - - old_total = pair_hists ? pair_hists->stats.total_period : 0; - if (old_total > 0 && pair) - old_percent = 100.0 * pair->stat.period / old_total; - - new_total = hists->stats.total_period; - if (new_total > 0) - new_percent = 100.0 * he->stat.period / new_total; - - diff = new_percent - old_percent; - if (fabs(diff) >= 0.01) - scnprintf(buf, sizeof(buf), "%+4.2F%%", diff); - - return scnprintf(hpp->buf, hpp->size, fmt, buf); + list_add_tail(&format->sort_list, &perf_hpp__sort_list); } -static int hpp__header_displ(struct perf_hpp *hpp) +void perf_hpp__column_enable(unsigned col) { - return scnprintf(hpp->buf, hpp->size, "Displ."); + BUG_ON(col >= PERF_HPP__MAX_INDEX); + perf_hpp__column_register(&perf_hpp__format[col]); } -static int hpp__width_displ(struct perf_hpp *hpp __maybe_unused) +void perf_hpp__column_disable(unsigned col) { - return 6; + BUG_ON(col >= PERF_HPP__MAX_INDEX); + perf_hpp__column_unregister(&perf_hpp__format[col]); } -static int hpp__entry_displ(struct perf_hpp *hpp, - struct hist_entry *he) +void perf_hpp__cancel_cumulate(void) { - struct hist_entry *pair = he->pair; - long displacement = pair ? pair->position - he->position : 0; - const char *fmt = symbol_conf.field_sep ? "%s" : "%6.6s"; - char buf[32] = " "; + if (field_order) + return; - if (displacement) - scnprintf(buf, sizeof(buf), "%+4ld", displacement); - - return scnprintf(hpp->buf, hpp->size, fmt, buf); + perf_hpp__column_disable(PERF_HPP__OVERHEAD_ACC); + perf_hpp__format[PERF_HPP__OVERHEAD].header = hpp__header_overhead; } -#define HPP__COLOR_PRINT_FNS(_name) \ - .header = hpp__header_ ## _name, \ - .width = hpp__width_ ## _name, \ - .color = hpp__color_ ## _name, \ - .entry = hpp__entry_ ## _name - -#define HPP__PRINT_FNS(_name) \ - .header = hpp__header_ ## _name, \ - .width = hpp__width_ ## _name, \ - .entry = hpp__entry_ ## _name - -struct perf_hpp_fmt perf_hpp__format[] = { - { .cond = false, HPP__COLOR_PRINT_FNS(baseline) }, - { .cond = true, HPP__COLOR_PRINT_FNS(overhead) }, - { .cond = false, HPP__COLOR_PRINT_FNS(overhead_sys) }, - { .cond = false, HPP__COLOR_PRINT_FNS(overhead_us) }, - { .cond = false, HPP__COLOR_PRINT_FNS(overhead_guest_sys) }, - { .cond = false, HPP__COLOR_PRINT_FNS(overhead_guest_us) }, - { .cond = false, HPP__PRINT_FNS(samples) }, - { .cond = false, HPP__PRINT_FNS(period) }, - { .cond = false, HPP__PRINT_FNS(delta) }, - { .cond = false, HPP__PRINT_FNS(displ) } -}; - -#undef HPP__COLOR_PRINT_FNS -#undef HPP__PRINT_FNS - -void perf_hpp__init(void) +void perf_hpp__setup_output_field(void) { - if (symbol_conf.show_cpu_utilization) { - perf_hpp__format[PERF_HPP__OVERHEAD_SYS].cond = true; - perf_hpp__format[PERF_HPP__OVERHEAD_US].cond = true; + struct perf_hpp_fmt *fmt; - if (perf_guest) { - perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].cond = true; - perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].cond = true; - } - } - - if (symbol_conf.show_nr_samples) - perf_hpp__format[PERF_HPP__SAMPLES].cond = true; - - if (symbol_conf.show_total_period) - perf_hpp__format[PERF_HPP__PERIOD].cond = true; -} + /* append sort keys to output field */ + perf_hpp__for_each_sort_list(fmt) { + if (!list_empty(&fmt->list)) + continue; -void perf_hpp__column_enable(unsigned col, bool enable) -{ - BUG_ON(col >= PERF_HPP__MAX_INDEX); - perf_hpp__format[col].cond = enable; -} + /* + * sort entry fields are dynamically created, + * so they can share a same sort key even though + * the list is empty. + */ + if (perf_hpp__is_sort_entry(fmt)) { + struct perf_hpp_fmt *pos; + + perf_hpp__for_each_format(pos) { + if (perf_hpp__same_sort_entry(pos, fmt)) + goto next; + } + } -static inline void advance_hpp(struct perf_hpp *hpp, int inc) -{ - hpp->buf += inc; - hpp->size -= inc; + perf_hpp__column_register(fmt); +next: + continue; + } } -int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he, - bool color) +void perf_hpp__append_sort_keys(void) { - const char *sep = symbol_conf.field_sep; - char *start = hpp->buf; - int i, ret; - bool first = true; + struct perf_hpp_fmt *fmt; - if (symbol_conf.exclude_other && !he->parent) - return 0; - - for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { - if (!perf_hpp__format[i].cond) + /* append output fields to sort keys */ + perf_hpp__for_each_format(fmt) { + if (!list_empty(&fmt->sort_list)) continue; - if (!sep || !first) { - ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " "); - advance_hpp(hpp, ret); - first = false; + /* + * sort entry fields are dynamically created, + * so they can share a same sort key even though + * the list is empty. + */ + if (perf_hpp__is_sort_entry(fmt)) { + struct perf_hpp_fmt *pos; + + perf_hpp__for_each_sort_list(pos) { + if (perf_hpp__same_sort_entry(pos, fmt)) + goto next; + } } - if (color && perf_hpp__format[i].color) - ret = perf_hpp__format[i].color(hpp, he); - else - ret = perf_hpp__format[i].entry(hpp, he); - - advance_hpp(hpp, ret); + perf_hpp__register_sort_field(fmt); +next: + continue; } - - return hpp->buf - start; } -int hist_entry__sort_snprintf(struct hist_entry *he, char *s, size_t size, - struct hists *hists) +void perf_hpp__reset_output_field(void) { - const char *sep = symbol_conf.field_sep; - struct sort_entry *se; - int ret = 0; - - list_for_each_entry(se, &hist_entry__sort_list, list) { - if (se->elide) - continue; + struct perf_hpp_fmt *fmt, *tmp; - ret += scnprintf(s + ret, size - ret, "%s", sep ?: " "); - ret += se->se_snprintf(he, s + ret, size - ret, - hists__col_len(hists, se->se_width_idx)); + /* reset output fields */ + perf_hpp__for_each_format_safe(fmt, tmp) { + list_del_init(&fmt->list); + list_del_init(&fmt->sort_list); } - return ret; + /* reset sort keys */ + perf_hpp__for_each_sort_list_safe(fmt, tmp) { + list_del_init(&fmt->list); + list_del_init(&fmt->sort_list); + } } /* @@ -404,23 +600,24 @@ int hist_entry__sort_snprintf(struct hist_entry *he, char *s, size_t size, */ unsigned int hists__sort_list_width(struct hists *hists) { - struct sort_entry *se; - int i, ret = 0; + struct perf_hpp_fmt *fmt; + int ret = 0; + bool first = true; + struct perf_hpp dummy_hpp; - for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { - if (!perf_hpp__format[i].cond) + perf_hpp__for_each_format(fmt) { + if (perf_hpp__should_skip(fmt)) continue; - if (i) + + if (first) + first = false; + else ret += 2; - ret += perf_hpp__format[i].width(NULL); + ret += fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists)); } - list_for_each_entry(se, &hist_entry__sort_list, list) - if (!se->elide) - ret += 2 + hists__col_len(hists, se->se_width_idx); - - if (verbose) /* Addr + origin */ + if (verbose && sort__has_sym) /* Addr + origin */ ret += 3 + BITS_PER_LONG / 4; return ret; diff --git a/tools/perf/ui/keysyms.h b/tools/perf/ui/keysyms.h index 809eca5707f..65092d576b4 100644 --- a/tools/perf/ui/keysyms.h +++ b/tools/perf/ui/keysyms.h @@ -23,5 +23,6 @@ #define K_TIMER -1 #define K_ERROR -2 #define K_RESIZE -3 +#define K_SWITCH_INPUT_DATA -4 #endif /* _PERF_KEYSYMS_H_ */ diff --git a/tools/perf/ui/progress.c b/tools/perf/ui/progress.c index 13aa64e50e1..a0f24c7115c 100644 --- a/tools/perf/ui/progress.c +++ b/tools/perf/ui/progress.c @@ -1,32 +1,38 @@ #include "../cache.h" #include "progress.h" -#include "libslang.h" -#include "ui.h" -#include "browser.h" -void ui_progress__update(u64 curr, u64 total, const char *title) +static void null_progress__update(struct ui_progress *p __maybe_unused) { - int bar, y; - /* - * FIXME: We should have a per UI backend way of showing progress, - * stdio will just show a percentage as NN%, etc. - */ - if (use_browser <= 0) - return; - - if (total == 0) - return; - - ui__refresh_dimensions(true); - pthread_mutex_lock(&ui__lock); - y = SLtt_Screen_Rows / 2 - 2; - SLsmg_set_color(0); - SLsmg_draw_box(y, 0, 3, SLtt_Screen_Cols); - SLsmg_gotorc(y++, 1); - SLsmg_write_string((char *)title); - SLsmg_set_color(HE_COLORSET_SELECTED); - bar = ((SLtt_Screen_Cols - 2) * curr) / total; - SLsmg_fill_region(y, 1, 1, bar, ' '); - SLsmg_refresh(); - pthread_mutex_unlock(&ui__lock); +} + +static struct ui_progress_ops null_progress__ops = +{ + .update = null_progress__update, +}; + +struct ui_progress_ops *ui_progress__ops = &null_progress__ops; + +void ui_progress__update(struct ui_progress *p, u64 adv) +{ + p->curr += adv; + + if (p->curr >= p->next) { + p->next += p->step; + ui_progress__ops->update(p); + } +} + +void ui_progress__init(struct ui_progress *p, u64 total, const char *title) +{ + p->curr = 0; + p->next = p->step = total / 16; + p->total = total; + p->title = title; + +} + +void ui_progress__finish(void) +{ + if (ui_progress__ops->finish) + ui_progress__ops->finish(); } diff --git a/tools/perf/ui/progress.h b/tools/perf/ui/progress.h index d9c205b59aa..f34f89eb607 100644 --- a/tools/perf/ui/progress.h +++ b/tools/perf/ui/progress.h @@ -1,8 +1,23 @@ #ifndef _PERF_UI_PROGRESS_H_ #define _PERF_UI_PROGRESS_H_ 1 -#include <../types.h> +#include <linux/types.h> -void ui_progress__update(u64 curr, u64 total, const char *title); +void ui_progress__finish(void); + +struct ui_progress { + const char *title; + u64 curr, next, step, total; +}; + +void ui_progress__init(struct ui_progress *p, u64 total, const char *title); +void ui_progress__update(struct ui_progress *p, u64 adv); + +struct ui_progress_ops { + void (*update)(struct ui_progress *p); + void (*finish)(void); +}; + +extern struct ui_progress_ops *ui_progress__ops; #endif diff --git a/tools/perf/ui/setup.c b/tools/perf/ui/setup.c index ebb4cc10787..ba51fa8a117 100644 --- a/tools/perf/ui/setup.c +++ b/tools/perf/ui/setup.c @@ -1,14 +1,68 @@ #include <pthread.h> +#include <dlfcn.h> #include "../util/cache.h" #include "../util/debug.h" #include "../util/hist.h" pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER; +void *perf_gtk_handle; + +#ifdef HAVE_GTK2_SUPPORT +static int setup_gtk_browser(void) +{ + int (*perf_ui_init)(void); + + if (perf_gtk_handle) + return 0; + + perf_gtk_handle = dlopen(PERF_GTK_DSO, RTLD_LAZY); + if (perf_gtk_handle == NULL) { + char buf[PATH_MAX]; + scnprintf(buf, sizeof(buf), "%s/%s", LIBDIR, PERF_GTK_DSO); + perf_gtk_handle = dlopen(buf, RTLD_LAZY); + } + if (perf_gtk_handle == NULL) + return -1; + + perf_ui_init = dlsym(perf_gtk_handle, "perf_gtk__init"); + if (perf_ui_init == NULL) + goto out_close; + + if (perf_ui_init() == 0) + return 0; + +out_close: + dlclose(perf_gtk_handle); + return -1; +} + +static void exit_gtk_browser(bool wait_for_ok) +{ + void (*perf_ui_exit)(bool); + + if (perf_gtk_handle == NULL) + return; + + perf_ui_exit = dlsym(perf_gtk_handle, "perf_gtk__exit"); + if (perf_ui_exit == NULL) + goto out_close; + + perf_ui_exit(wait_for_ok); + +out_close: + dlclose(perf_gtk_handle); + + perf_gtk_handle = NULL; +} +#else +static inline int setup_gtk_browser(void) { return -1; } +static inline void exit_gtk_browser(bool wait_for_ok __maybe_unused) {} +#endif void setup_browser(bool fallback_to_pager) { - if (!isatty(1) || dump_trace) + if (use_browser < 2 && (!isatty(1) || dump_trace)) use_browser = 0; /* default to TUI */ @@ -17,8 +71,11 @@ void setup_browser(bool fallback_to_pager) switch (use_browser) { case 2: - if (perf_gtk__init() == 0) + if (setup_gtk_browser() == 0) break; + printf("GTK browser requested but could not find %s\n", + PERF_GTK_DSO); + sleep(1); /* fall through */ case 1: use_browser = 1; @@ -29,8 +86,6 @@ void setup_browser(bool fallback_to_pager) use_browser = 0; if (fallback_to_pager) setup_pager(); - - perf_hpp__init(); break; } } @@ -39,7 +94,7 @@ void exit_browser(bool wait_for_ok) { switch (use_browser) { case 2: - perf_gtk__exit(wait_for_ok); + exit_gtk_browser(wait_for_ok); break; case 1: diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c index fbd4e32d074..90122abd372 100644 --- a/tools/perf/ui/stdio/hist.c +++ b/tools/perf/ui/stdio/hist.c @@ -3,6 +3,7 @@ #include "../../util/util.h" #include "../../util/hist.h" #include "../../util/sort.h" +#include "../../util/evsel.h" static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) @@ -182,7 +183,8 @@ static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root, * the symbol. No need to print it otherwise it appears as * displayed twice. */ - if (!i++ && sort__first_dimension == SORT_SYM) + if (!i++ && field_order == NULL && + sort_order && !prefixcmp(sort_order, "sym")) continue; if (!printed) { ret += callchain__fprintf_left_margin(fp, left_margin); @@ -212,20 +214,19 @@ static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root, return ret; } -static size_t __callchain__fprintf_flat(FILE *fp, - struct callchain_node *self, +static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node, u64 total_samples) { struct callchain_list *chain; size_t ret = 0; - if (!self) + if (!node) return 0; - ret += __callchain__fprintf_flat(fp, self->parent, total_samples); + ret += __callchain__fprintf_flat(fp, node->parent, total_samples); - list_for_each_entry(chain, &self->val, list) { + list_for_each_entry(chain, &node->val, list) { if (chain->ip >= PERF_CONTEXT_MAX) continue; if (chain->ms.sym) @@ -238,15 +239,14 @@ static size_t __callchain__fprintf_flat(FILE *fp, return ret; } -static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *self, +static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree, u64 total_samples) { size_t ret = 0; u32 entries_printed = 0; - struct rb_node *rb_node; struct callchain_node *chain; + struct rb_node *rb_node = rb_first(tree); - rb_node = rb_first(self); while (rb_node) { double percent; @@ -271,7 +271,9 @@ static size_t hist_entry_callchain__fprintf(struct hist_entry *he, { switch (callchain_param.mode) { case CHAIN_GRAPH_REL: - return callchain__fprintf_graph(fp, &he->sorted_chain, he->stat.period, + return callchain__fprintf_graph(fp, &he->sorted_chain, + symbol_conf.cumulate_callchain ? + he->stat_acc->period : he->stat.period, left_margin); break; case CHAIN_GRAPH_ABS: @@ -297,32 +299,73 @@ static size_t hist_entry__callchain_fprintf(struct hist_entry *he, int left_margin = 0; u64 total_period = hists->stats.total_period; - if (sort__first_dimension == SORT_COMM) { - struct sort_entry *se = list_first_entry(&hist_entry__sort_list, - typeof(*se), list); - left_margin = hists__col_len(hists, se->se_width_idx); - left_margin -= thread__comm_len(he->thread); - } + if (field_order == NULL && (sort_order == NULL || + !prefixcmp(sort_order, "comm"))) { + struct perf_hpp_fmt *fmt; + + perf_hpp__for_each_format(fmt) { + if (!perf_hpp__is_sort_entry(fmt)) + continue; + /* must be 'comm' sort entry */ + left_margin = fmt->width(fmt, NULL, hists_to_evsel(hists)); + left_margin -= thread__comm_len(he->thread); + break; + } + } return hist_entry_callchain__fprintf(he, total_period, left_margin, fp); } +static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp) +{ + const char *sep = symbol_conf.field_sep; + struct perf_hpp_fmt *fmt; + char *start = hpp->buf; + int ret; + bool first = true; + + if (symbol_conf.exclude_other && !he->parent) + return 0; + + perf_hpp__for_each_format(fmt) { + if (perf_hpp__should_skip(fmt)) + continue; + + /* + * If there's no field_sep, we still need + * to display initial ' '. + */ + if (!sep || !first) { + ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " "); + advance_hpp(hpp, ret); + } else + first = false; + + if (perf_hpp__use_color() && fmt->color) + ret = fmt->color(fmt, hpp, he); + else + ret = fmt->entry(fmt, hpp, he); + + advance_hpp(hpp, ret); + } + + return hpp->buf - start; +} + static int hist_entry__fprintf(struct hist_entry *he, size_t size, - struct hists *hists, FILE *fp) + struct hists *hists, + char *bf, size_t bfsz, FILE *fp) { - char bf[512]; int ret; struct perf_hpp hpp = { .buf = bf, .size = size, }; - bool color = !symbol_conf.field_sep; - if (size == 0 || size > sizeof(bf)) - size = hpp.size = sizeof(bf); + if (size == 0 || size > bfsz) + size = hpp.size = bfsz; - ret = hist_entry__period_snprintf(&hpp, he, color); - hist_entry__sort_snprintf(he, bf + ret, size - ret, hists); + hist_entry__snprintf(he, &hpp); ret = fprintf(fp, "%s\n", bf); @@ -333,30 +376,36 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size, } size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, - int max_cols, FILE *fp) + int max_cols, float min_pcnt, FILE *fp) { - struct sort_entry *se; + struct perf_hpp_fmt *fmt; struct rb_node *nd; size_t ret = 0; unsigned int width; const char *sep = symbol_conf.field_sep; - const char *col_width = symbol_conf.col_width_list_str; - int idx, nr_rows = 0; - char bf[64]; + int nr_rows = 0; + char bf[96]; struct perf_hpp dummy_hpp = { .buf = bf, .size = sizeof(bf), }; bool first = true; + size_t linesz; + char *line = NULL; init_rem_hits(); + + perf_hpp__for_each_format(fmt) + perf_hpp__reset_width(fmt, hists); + if (!show_header) goto print_entries; fprintf(fp, "# "); - for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) { - if (!perf_hpp__format[idx].cond) + + perf_hpp__for_each_format(fmt) { + if (perf_hpp__should_skip(fmt)) continue; if (!first) @@ -364,32 +413,10 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, else first = false; - perf_hpp__format[idx].header(&dummy_hpp); + fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists)); fprintf(fp, "%s", bf); } - list_for_each_entry(se, &hist_entry__sort_list, list) { - if (se->elide) - continue; - if (sep) { - fprintf(fp, "%c%s", *sep, se->se_header); - continue; - } - width = strlen(se->se_header); - if (symbol_conf.col_width_list_str) { - if (col_width) { - hists__set_col_len(hists, se->se_width_idx, - atoi(col_width)); - col_width = strchr(col_width, ','); - if (col_width) - ++col_width; - } - } - if (!hists__new_col_len(hists, se->se_width_idx, width)) - width = hists__col_len(hists, se->se_width_idx); - fprintf(fp, " %*s", width, se->se_header); - } - fprintf(fp, "\n"); if (max_rows && ++nr_rows >= max_rows) goto out; @@ -400,10 +427,11 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, first = true; fprintf(fp, "# "); - for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) { + + perf_hpp__for_each_format(fmt) { unsigned int i; - if (!perf_hpp__format[idx].cond) + if (perf_hpp__should_skip(fmt)) continue; if (!first) @@ -411,21 +439,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, else first = false; - width = perf_hpp__format[idx].width(&dummy_hpp); - for (i = 0; i < width; i++) - fprintf(fp, "."); - } - - list_for_each_entry(se, &hist_entry__sort_list, list) { - unsigned int i; - - if (se->elide) - continue; - - fprintf(fp, " "); - width = hists__col_len(hists, se->se_width_idx); - if (width == 0) - width = strlen(se->se_header); + width = fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists)); for (i = 0; i < width; i++) fprintf(fp, "."); } @@ -439,30 +453,45 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, goto out; print_entries: + linesz = hists__sort_list_width(hists) + 3 + 1; + linesz += perf_hpp__color_overhead(); + line = malloc(linesz); + if (line == NULL) { + ret = -1; + goto out; + } + for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); + float percent; if (h->filtered) continue; - ret += hist_entry__fprintf(h, max_cols, hists, fp); + percent = hist_entry__get_percent_limit(h); + if (percent < min_pcnt) + continue; + + ret += hist_entry__fprintf(h, max_cols, hists, line, linesz, fp); if (max_rows && ++nr_rows >= max_rows) - goto out; + break; if (h->ms.map == NULL && verbose > 1) { - __map_groups__fprintf_maps(&h->thread->mg, + __map_groups__fprintf_maps(h->thread->mg, MAP__FUNCTION, verbose, fp); fprintf(fp, "%.10s end\n", graph_dotted_line); } } + + free(line); out: - free(rem_sq_bracket); + zfree(&rem_sq_bracket); return ret; } -size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp) +size_t events_stats__fprintf(struct events_stats *stats, FILE *fp) { int i; size_t ret = 0; @@ -470,7 +499,7 @@ size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp) for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { const char *name; - if (hists->stats.nr_events[i] == 0) + if (stats->nr_events[i] == 0) continue; name = perf_event__name(i); @@ -478,7 +507,7 @@ size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp) continue; ret += fprintf(fp, "%16s events: %10d\n", name, - hists->stats.nr_events[i]); + stats->nr_events[i]); } return ret; diff --git a/tools/perf/ui/tui/helpline.c b/tools/perf/ui/tui/helpline.c index 2884d2f41e3..1c8b9afd5d6 100644 --- a/tools/perf/ui/tui/helpline.c +++ b/tools/perf/ui/tui/helpline.c @@ -8,6 +8,8 @@ #include "../ui.h" #include "../libslang.h" +char ui_helpline__last_msg[1024]; + static void tui_helpline__pop(void) { } @@ -23,20 +25,7 @@ static void tui_helpline__push(const char *msg) strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0'; } -struct ui_helpline tui_helpline_fns = { - .pop = tui_helpline__pop, - .push = tui_helpline__push, -}; - -void ui_helpline__init(void) -{ - helpline_fns = &tui_helpline_fns; - ui_helpline__puts(" "); -} - -char ui_helpline__last_msg[1024]; - -int ui_helpline__show_help(const char *format, va_list ap) +static int tui_helpline__show(const char *format, va_list ap) { int ret; static int backlog; @@ -55,3 +44,15 @@ int ui_helpline__show_help(const char *format, va_list ap) return ret; } + +struct ui_helpline tui_helpline_fns = { + .pop = tui_helpline__pop, + .push = tui_helpline__push, + .show = tui_helpline__show, +}; + +void ui_helpline__init(void) +{ + helpline_fns = &tui_helpline_fns; + ui_helpline__puts(" "); +} diff --git a/tools/perf/ui/tui/progress.c b/tools/perf/ui/tui/progress.c new file mode 100644 index 00000000000..c61d14b101e --- /dev/null +++ b/tools/perf/ui/tui/progress.c @@ -0,0 +1,44 @@ +#include "../cache.h" +#include "../progress.h" +#include "../libslang.h" +#include "../ui.h" +#include "tui.h" +#include "../browser.h" + +static void tui_progress__update(struct ui_progress *p) +{ + int bar, y; + /* + * FIXME: We should have a per UI backend way of showing progress, + * stdio will just show a percentage as NN%, etc. + */ + if (use_browser <= 0) + return; + + if (p->total == 0) + return; + + ui__refresh_dimensions(false); + pthread_mutex_lock(&ui__lock); + y = SLtt_Screen_Rows / 2 - 2; + SLsmg_set_color(0); + SLsmg_draw_box(y, 0, 3, SLtt_Screen_Cols); + SLsmg_gotorc(y++, 1); + SLsmg_write_string((char *)p->title); + SLsmg_fill_region(y, 1, 1, SLtt_Screen_Cols - 2, ' '); + SLsmg_set_color(HE_COLORSET_SELECTED); + bar = ((SLtt_Screen_Cols - 2) * p->curr) / p->total; + SLsmg_fill_region(y, 1, 1, bar, ' '); + SLsmg_refresh(); + pthread_mutex_unlock(&ui__lock); +} + +static struct ui_progress_ops tui_progress__ops = +{ + .update = tui_progress__update, +}; + +void tui_progress__init(void) +{ + ui_progress__ops = &tui_progress__ops; +} diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c index 60debb81537..2f612562978 100644 --- a/tools/perf/ui/tui/setup.c +++ b/tools/perf/ui/tui/setup.c @@ -1,4 +1,3 @@ -#include <newt.h> #include <signal.h> #include <stdbool.h> @@ -10,6 +9,7 @@ #include "../util.h" #include "../libslang.h" #include "../keysyms.h" +#include "tui.h" static volatile int ui__need_resize; @@ -88,13 +88,6 @@ int ui__getch(int delay_secs) return SLkp_getkey(); } -static void newt_suspend(void *d __maybe_unused) -{ - newtSuspend(); - raise(SIGTSTP); - newtResume(); -} - static void ui__signal(int sig) { ui__exit(false); @@ -106,7 +99,17 @@ int ui__init(void) { int err; - newtInit(); + SLutf8_enable(-1); + SLtt_get_terminfo(); + SLtt_get_screen_size(); + + err = SLsmg_init_smg(); + if (err < 0) + goto out; + err = SLang_init_tty(0, 0, 0); + if (err < 0) + goto out; + err = SLkp_init(); if (err < 0) { pr_err("TUI initialization failed.\n"); @@ -115,9 +118,9 @@ int ui__init(void) SLkp_define_keysym((char *)"^(kB)", SL_KEY_UNTAB); - newtSetSuspendCallback(newt_suspend, NULL); ui_helpline__init(); ui_browser__init(); + tui_progress__init(); signal(SIGSEGV, ui__signal); signal(SIGFPE, ui__signal); diff --git a/tools/perf/ui/tui/tui.h b/tools/perf/ui/tui/tui.h new file mode 100644 index 00000000000..18961c7b6ec --- /dev/null +++ b/tools/perf/ui/tui/tui.h @@ -0,0 +1,6 @@ +#ifndef _PERF_TUI_H_ +#define _PERF_TUI_H_ 1 + +void tui_progress__init(void); + +#endif /* _PERF_TUI_H_ */ diff --git a/tools/perf/ui/tui/util.c b/tools/perf/ui/tui/util.c index 092902e30ce..bf890f72fe8 100644 --- a/tools/perf/ui/tui/util.c +++ b/tools/perf/ui/tui/util.c @@ -92,6 +92,8 @@ int ui_browser__input_window(const char *title, const char *text, char *input, t = sep + 1; } + pthread_mutex_lock(&ui__lock); + max_len += 2; nr_lines += 8; y = SLtt_Screen_Rows / 2 - nr_lines / 2; @@ -120,13 +122,19 @@ int ui_browser__input_window(const char *title, const char *text, char *input, SLsmg_write_nstring((char *)exit_msg, max_len); SLsmg_refresh(); + pthread_mutex_unlock(&ui__lock); + x += 2; len = 0; key = ui__getch(delay_secs); while (key != K_TIMER && key != K_ENTER && key != K_ESC) { + pthread_mutex_lock(&ui__lock); + if (key == K_BKSPC) { - if (len == 0) + if (len == 0) { + pthread_mutex_unlock(&ui__lock); goto next_key; + } SLsmg_gotorc(y, x + --len); SLsmg_write_char(' '); } else { @@ -136,6 +144,8 @@ int ui_browser__input_window(const char *title, const char *text, char *input, } SLsmg_refresh(); + pthread_mutex_unlock(&ui__lock); + /* XXX more graceful overflow handling needed */ if (len == sizeof(buf) - 1) { ui_helpline__push("maximum size of symbol name reached!"); @@ -174,6 +184,8 @@ int ui__question_window(const char *title, const char *text, t = sep + 1; } + pthread_mutex_lock(&ui__lock); + max_len += 2; nr_lines += 4; y = SLtt_Screen_Rows / 2 - nr_lines / 2, @@ -195,6 +207,9 @@ int ui__question_window(const char *title, const char *text, SLsmg_gotorc(y + nr_lines - 1, x); SLsmg_write_nstring((char *)exit_msg, max_len); SLsmg_refresh(); + + pthread_mutex_unlock(&ui__lock); + return ui__getch(delay_secs); } @@ -215,9 +230,7 @@ static int __ui__warning(const char *title, const char *format, va_list args) if (vasprintf(&s, format, args) > 0) { int key; - pthread_mutex_lock(&ui__lock); key = ui__question_window(title, s, "Press any key...", 0); - pthread_mutex_unlock(&ui__lock); free(s); return key; } diff --git a/tools/perf/ui/ui.h b/tools/perf/ui/ui.h index 7b67045479f..ab88383f8be 100644 --- a/tools/perf/ui/ui.h +++ b/tools/perf/ui/ui.h @@ -3,8 +3,26 @@ #include <pthread.h> #include <stdbool.h> +#include <linux/compiler.h> extern pthread_mutex_t ui__lock; +extern void *perf_gtk_handle; + +extern int use_browser; + +void setup_browser(bool fallback_to_pager); +void exit_browser(bool wait_for_ok); + +#ifdef HAVE_SLANG_SUPPORT +int ui__init(void); +void ui__exit(bool wait_for_ok); +#else +static inline int ui__init(void) +{ + return -1; +} +static inline void ui__exit(bool wait_for_ok __maybe_unused) {} +#endif void ui__refresh_dimensions(bool force); diff --git a/tools/perf/ui/util.c b/tools/perf/ui/util.c index 4f989774c8c..e3e0a963d03 100644 --- a/tools/perf/ui/util.c +++ b/tools/perf/ui/util.c @@ -52,7 +52,6 @@ int ui__warning(const char *format, ...) return ret; } - /** * perf_error__register - Register error logging functions * @eops: The pointer to error logging function struct diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN index 95264f30417..39f17507578 100755 --- a/tools/perf/util/PERF-VERSION-GEN +++ b/tools/perf/util/PERF-VERSION-GEN @@ -9,34 +9,42 @@ GVF=${OUTPUT}PERF-VERSION-FILE LF=' ' +# # First check if there is a .git to get the version from git describe -# otherwise try to get the version from the kernel makefile -if test -d ../../.git -o -f ../../.git && - VN=$(git describe --match 'v[0-9].[0-9]*' --abbrev=4 HEAD 2>/dev/null) && - case "$VN" in - *$LF*) (exit 1) ;; - v[0-9]*) - git update-index -q --refresh - test -z "$(git diff-index --name-only HEAD --)" || - VN="$VN-dirty" ;; - esac +# otherwise try to get the version from the kernel Makefile +# +CID= +TAG= +if test -d ../../.git -o -f ../../.git then - VN=$(echo "$VN" | sed -e 's/-/./g'); -else - VN=$(MAKEFLAGS= make -sC ../.. kernelversion) + TAG=$(git describe --abbrev=0 --match "v[0-9].[0-9]*" 2>/dev/null ) + CID=$(git log -1 --abbrev=4 --pretty=format:"%h" 2>/dev/null) && CID="-g$CID" +elif test -f ../../PERF-VERSION-FILE +then + TAG=$(cut -d' ' -f3 ../../PERF-VERSION-FILE | sed -e 's/\"//g') +fi +if test -z "$TAG" +then + TAG=$(MAKEFLAGS= make -sC ../.. kernelversion) +fi +VN="$TAG$CID" +if test -n "$CID" +then + # format version string, strip trailing zero of sublevel: + VN=$(echo "$VN" | sed -e 's/-/./g;s/\([0-9]*[.][0-9]*\)[.]0/\1/') fi VN=$(expr "$VN" : v*'\(.*\)') if test -r $GVF then - VC=$(sed -e 's/^PERF_VERSION = //' <$GVF) + VC=$(sed -e 's/^#define PERF_VERSION "\(.*\)"/\1/' <$GVF) else VC=unset fi test "$VN" = "$VC" || { - echo >&2 "PERF_VERSION = $VN" - echo "PERF_VERSION = $VN" >$GVF + echo >&2 " PERF_VERSION = $VN" + echo "#define PERF_VERSION \"$VN\"" >$GVF } diff --git a/tools/perf/util/alias.c b/tools/perf/util/alias.c index e6d134773d0..c0b43ee40d9 100644 --- a/tools/perf/util/alias.c +++ b/tools/perf/util/alias.c @@ -55,8 +55,7 @@ int split_cmdline(char *cmdline, const char ***argv) src++; c = cmdline[src]; if (!c) { - free(*argv); - *argv = NULL; + zfree(argv); return error("cmdline ends with \\"); } } @@ -68,8 +67,7 @@ int split_cmdline(char *cmdline, const char ***argv) cmdline[dst] = 0; if (quoted) { - free(*argv); - *argv = NULL; + zfree(argv); return error("unclosed quote"); } diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index f0a91037137..809b4c50bea 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -8,13 +8,17 @@ */ #include "util.h" +#include "ui/ui.h" +#include "sort.h" #include "build-id.h" #include "color.h" #include "cache.h" #include "symbol.h" #include "debug.h" #include "annotate.h" +#include "evsel.h" #include <pthread.h> +#include <linux/bitops.h> const char *disassembler_style; const char *objdump_path; @@ -24,10 +28,10 @@ static int disasm_line__parse(char *line, char **namep, char **rawp); static void ins__delete(struct ins_operands *ops) { - free(ops->source.raw); - free(ops->source.name); - free(ops->target.raw); - free(ops->target.name); + zfree(&ops->source.raw); + zfree(&ops->source.name); + zfree(&ops->target.raw); + zfree(&ops->target.name); } static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size, @@ -108,10 +112,10 @@ static int jump__parse(struct ins_operands *ops) { const char *s = strchr(ops->raw, '+'); - ops->target.addr = strtoll(ops->raw, NULL, 16); + ops->target.addr = strtoull(ops->raw, NULL, 16); if (s++ != NULL) - ops->target.offset = strtoll(s, NULL, 16); + ops->target.offset = strtoull(s, NULL, 16); else ops->target.offset = UINT64_MAX; @@ -170,21 +174,20 @@ static int lock__parse(struct ins_operands *ops) if (disasm_line__parse(ops->raw, &name, &ops->locked.ops->raw) < 0) goto out_free_ops; - ops->locked.ins = ins__find(name); - if (ops->locked.ins == NULL) - goto out_free_ops; + ops->locked.ins = ins__find(name); + if (ops->locked.ins == NULL) + goto out_free_ops; - if (!ops->locked.ins->ops) - return 0; + if (!ops->locked.ins->ops) + return 0; - if (ops->locked.ins->ops->parse) - ops->locked.ins->ops->parse(ops->locked.ops); + if (ops->locked.ins->ops->parse) + ops->locked.ins->ops->parse(ops->locked.ops); return 0; out_free_ops: - free(ops->locked.ops); - ops->locked.ops = NULL; + zfree(&ops->locked.ops); return 0; } @@ -203,9 +206,9 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size, static void lock__delete(struct ins_operands *ops) { - free(ops->locked.ops); - free(ops->target.raw); - free(ops->target.name); + zfree(&ops->locked.ops); + zfree(&ops->target.raw); + zfree(&ops->target.name); } static struct ins_ops lock_ops = { @@ -254,8 +257,7 @@ static int mov__parse(struct ins_operands *ops) return 0; out_free_source: - free(ops->source.raw); - ops->source.raw = NULL; + zfree(&ops->source.raw); return -1; } @@ -400,6 +402,8 @@ static struct ins instructions[] = { { .name = "testb", .ops = &mov_ops, }, { .name = "testl", .ops = &mov_ops, }, { .name = "xadd", .ops = &mov_ops, }, + { .name = "xbeginl", .ops = &jump_ops, }, + { .name = "xbeginq", .ops = &jump_ops, }, }; static int ins__cmp(const void *name, const void *insp) @@ -460,17 +464,12 @@ void symbol__annotate_zero_histograms(struct symbol *sym) pthread_mutex_unlock(¬es->lock); } -int symbol__inc_addr_samples(struct symbol *sym, struct map *map, - int evidx, u64 addr) +static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map, + struct annotation *notes, int evidx, u64 addr) { unsigned offset; - struct annotation *notes; struct sym_hist *h; - notes = symbol__annotation(sym); - if (notes->src == NULL) - return -ENOMEM; - pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr)); if (addr < sym->start || addr > sym->end) @@ -487,6 +486,33 @@ int symbol__inc_addr_samples(struct symbol *sym, struct map *map, return 0; } +static int symbol__inc_addr_samples(struct symbol *sym, struct map *map, + int evidx, u64 addr) +{ + struct annotation *notes; + + if (sym == NULL) + return 0; + + notes = symbol__annotation(sym); + if (notes->src == NULL) { + if (symbol__alloc_hist(sym) < 0) + return -ENOMEM; + } + + return __symbol__inc_addr_samples(sym, map, notes, evidx, addr); +} + +int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx) +{ + return symbol__inc_addr_samples(ams->sym, ams->map, evidx, ams->al_addr); +} + +int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip) +{ + return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip); +} + static void disasm_line__init_ins(struct disasm_line *dl) { dl->ins = ins__find(dl->name); @@ -534,8 +560,7 @@ static int disasm_line__parse(char *line, char **namep, char **rawp) return 0; out_free_name: - free(*namep); - *namep = NULL; + zfree(namep); return -1; } @@ -560,7 +585,7 @@ static struct disasm_line *disasm_line__new(s64 offset, char *line, size_t privs return dl; out_free_line: - free(dl->line); + zfree(&dl->line); out_delete: free(dl); return NULL; @@ -568,8 +593,8 @@ out_delete: void disasm_line__free(struct disasm_line *dl) { - free(dl->line); - free(dl->name); + zfree(&dl->line); + zfree(&dl->name); if (dl->ins && dl->ins->ops->free) dl->ins->ops->free(&dl->ops); else @@ -599,8 +624,42 @@ struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disa return NULL; } +double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset, + s64 end, const char **path) +{ + struct source_line *src_line = notes->src->lines; + double percent = 0.0; + + if (src_line) { + size_t sizeof_src_line = sizeof(*src_line) + + sizeof(src_line->p) * (src_line->nr_pcnt - 1); + + while (offset < end) { + src_line = (void *)notes->src->lines + + (sizeof_src_line * offset); + + if (*path == NULL) + *path = src_line->path; + + percent += src_line->p[evidx].percent; + offset++; + } + } else { + struct sym_hist *h = annotation__histogram(notes, evidx); + unsigned int hits = 0; + + while (offset < end) + hits += h->addr[offset++]; + + if (h->sum) + percent = 100.0 * hits / h->sum; + } + + return percent; +} + static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 start, - int evidx, u64 len, int min_pcnt, int printed, + struct perf_evsel *evsel, u64 len, int min_pcnt, int printed, int max_lines, struct disasm_line *queue) { static const char *prev_line; @@ -608,34 +667,37 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st if (dl->offset != -1) { const char *path = NULL; - unsigned int hits = 0; - double percent = 0.0; + double percent, max_percent = 0.0; + double *ppercents = &percent; + int i, nr_percent = 1; const char *color; struct annotation *notes = symbol__annotation(sym); - struct source_line *src_line = notes->src->lines; - struct sym_hist *h = annotation__histogram(notes, evidx); s64 offset = dl->offset; const u64 addr = start + offset; struct disasm_line *next; next = disasm__get_next_ip_line(¬es->src->source, dl); - while (offset < (s64)len && - (next == NULL || offset < next->offset)) { - if (src_line) { - if (path == NULL) - path = src_line[offset].path; - percent += src_line[offset].percent; - } else - hits += h->addr[offset]; - - ++offset; + if (perf_evsel__is_group_event(evsel)) { + nr_percent = evsel->nr_members; + ppercents = calloc(nr_percent, sizeof(double)); + if (ppercents == NULL) + return -1; } - if (src_line == NULL && h->sum) - percent = 100.0 * hits / h->sum; + for (i = 0; i < nr_percent; i++) { + percent = disasm__calc_percent(notes, + notes->src->lines ? i : evsel->idx + i, + offset, + next ? next->offset : (s64) len, + &path); - if (percent < min_pcnt) + ppercents[i] = percent; + if (percent > max_percent) + max_percent = percent; + } + + if (max_percent < min_pcnt) return -1; if (max_lines && printed >= max_lines) @@ -645,12 +707,12 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st list_for_each_entry_from(queue, ¬es->src->source, node) { if (queue == dl) break; - disasm_line__print(queue, sym, start, evidx, len, + disasm_line__print(queue, sym, start, evsel, len, 0, 0, 1, NULL); } } - color = get_percent_color(percent); + color = get_percent_color(max_percent); /* * Also color the filename and line if needed, with @@ -666,25 +728,59 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st } } - color_fprintf(stdout, color, " %7.2f", percent); + for (i = 0; i < nr_percent; i++) { + percent = ppercents[i]; + color = get_percent_color(percent); + color_fprintf(stdout, color, " %7.2f", percent); + } + printf(" : "); color_fprintf(stdout, PERF_COLOR_MAGENTA, " %" PRIx64 ":", addr); color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", dl->line); + + if (ppercents != &percent) + free(ppercents); + } else if (max_lines && printed >= max_lines) return 1; else { + int width = 8; + if (queue) return -1; + if (perf_evsel__is_group_event(evsel)) + width *= evsel->nr_members; + if (!*dl->line) - printf(" :\n"); + printf(" %*s:\n", width, " "); else - printf(" : %s\n", dl->line); + printf(" %*s: %s\n", width, " ", dl->line); } return 0; } +/* + * symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw) + * which looks like following + * + * 0000000000415500 <_init>: + * 415500: sub $0x8,%rsp + * 415504: mov 0x2f5ad5(%rip),%rax # 70afe0 <_DYNAMIC+0x2f8> + * 41550b: test %rax,%rax + * 41550e: je 415515 <_init+0x15> + * 415510: callq 416e70 <__gmon_start__@plt> + * 415515: add $0x8,%rsp + * 415519: retq + * + * it will be parsed and saved into struct disasm_line as + * <offset> <name> <ops.raw> + * + * The offset will be a relative offset from the start of the symbol and -1 + * means that it's not a disassembly line so should be treated differently. + * The ops.raw part will be parsed further according to type of the instruction. + */ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map, FILE *file, size_t privsize) { @@ -734,7 +830,7 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map, end = map__rip_2objdump(map, sym->end); offset = line_ip - start; - if (offset < 0 || (u64)line_ip > end) + if ((u64)line_ip < start || (u64)line_ip > end) offset = -1; else parsed_line = tmp2 + 1; @@ -746,11 +842,51 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map, if (dl == NULL) return -1; + if (dl->ops.target.offset == UINT64_MAX) + dl->ops.target.offset = dl->ops.target.addr - + map__rip_2objdump(map, sym->start); + + /* kcore has no symbols, so add the call target name */ + if (dl->ins && ins__is_call(dl->ins) && !dl->ops.target.name) { + struct addr_map_symbol target = { + .map = map, + .addr = dl->ops.target.addr, + }; + + if (!map_groups__find_ams(&target, NULL) && + target.sym->start == target.al_addr) + dl->ops.target.name = strdup(target.sym->name); + } + disasm__add(¬es->src->source, dl); return 0; } +static void delete_last_nop(struct symbol *sym) +{ + struct annotation *notes = symbol__annotation(sym); + struct list_head *list = ¬es->src->source; + struct disasm_line *dl; + + while (!list_empty(list)) { + dl = list_entry(list->prev, struct disasm_line, node); + + if (dl->ins && dl->ins->ops) { + if (dl->ins->ops != &nop_ops) + return; + } else { + if (!strstr(dl->line, " nop ") && + !strstr(dl->line, " nopl ") && + !strstr(dl->line, " nopw ")) + return; + } + + list_del(&dl->node); + disasm_line__free(dl); + } +} + int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize) { struct dso *dso = map->dso; @@ -760,6 +896,8 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize) FILE *file; int err = 0; char symfs_filename[PATH_MAX]; + struct kcore_extract kce; + bool delete_extract = false; if (filename) { snprintf(symfs_filename, sizeof(symfs_filename), "%s%s", @@ -783,13 +921,14 @@ fallback: * cache, or is just a kallsyms file, well, lets hope that this * DSO is the same as when 'perf record' ran. */ - filename = dso->long_name; + filename = (char *)dso->long_name; snprintf(symfs_filename, sizeof(symfs_filename), "%s%s", symbol_conf.symfs, filename); free_filename = false; } - if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS) { + if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && + !dso__is_kcore(dso)) { char bf[BUILD_ID_SIZE * 2 + 16] = " with build id "; char *build_id_msg = NULL; @@ -806,7 +945,7 @@ fallback: pr_err("Can't annotate %s:\n\n" "No vmlinux file%s\nwas found in the path.\n\n" "Please use:\n\n" - " perf buildid-cache -av vmlinux\n\n" + " perf buildid-cache -vu vmlinux\n\n" "or:\n\n" " --vmlinux vmlinux\n", sym->name, build_id_msg ?: ""); @@ -820,10 +959,27 @@ fallback: pr_debug("annotating [%p] %30s : [%p] %30s\n", dso, dso->long_name, sym, sym->name); + if (dso__is_kcore(dso)) { + kce.kcore_filename = symfs_filename; + kce.addr = map__rip_2objdump(map, sym->start); + kce.offs = sym->start; + kce.len = sym->end + 1 - sym->start; + if (!kcore_extract__create(&kce)) { + delete_extract = true; + strlcpy(symfs_filename, kce.extract_filename, + sizeof(symfs_filename)); + if (free_filename) { + free(filename); + free_filename = false; + } + filename = symfs_filename; + } + } + snprintf(command, sizeof(command), "%s %s%s --start-address=0x%016" PRIx64 " --stop-address=0x%016" PRIx64 - " -d %s %s -C %s|grep -v %s|expand", + " -d %s %s -C %s 2>/dev/null|grep -v %s|expand", objdump_path ? objdump_path : "objdump", disassembler_style ? "-M " : "", disassembler_style ? disassembler_style : "", @@ -843,8 +999,17 @@ fallback: if (symbol__parse_objdump_line(sym, map, file, privsize) < 0) break; + /* + * kallsyms does not have symbol sizes so there may a nop at the end. + * Remove it. + */ + if (dso__is_kcore(dso)) + delete_last_nop(sym); + pclose(file); out_free_filename: + if (delete_extract) + kcore_extract__delete(&kce); if (free_filename) free(filename); return err; @@ -855,12 +1020,56 @@ static void insert_source_line(struct rb_root *root, struct source_line *src_lin struct source_line *iter; struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; + int i, ret; + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct source_line, node); + + ret = strcmp(iter->path, src_line->path); + if (ret == 0) { + for (i = 0; i < src_line->nr_pcnt; i++) + iter->p[i].percent_sum += src_line->p[i].percent; + return; + } + + if (ret < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + for (i = 0; i < src_line->nr_pcnt; i++) + src_line->p[i].percent_sum = src_line->p[i].percent; + + rb_link_node(&src_line->node, parent, p); + rb_insert_color(&src_line->node, root); +} + +static int cmp_source_line(struct source_line *a, struct source_line *b) +{ + int i; + + for (i = 0; i < a->nr_pcnt; i++) { + if (a->p[i].percent_sum == b->p[i].percent_sum) + continue; + return a->p[i].percent_sum > b->p[i].percent_sum; + } + + return 0; +} + +static void __resort_source_line(struct rb_root *root, struct source_line *src_line) +{ + struct source_line *iter; + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; while (*p != NULL) { parent = *p; iter = rb_entry(parent, struct source_line, node); - if (src_line->percent > iter->percent) + if (cmp_source_line(src_line, iter)) p = &(*p)->rb_left; else p = &(*p)->rb_right; @@ -870,70 +1079,102 @@ static void insert_source_line(struct rb_root *root, struct source_line *src_lin rb_insert_color(&src_line->node, root); } +static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root) +{ + struct source_line *src_line; + struct rb_node *node; + + node = rb_first(src_root); + while (node) { + struct rb_node *next; + + src_line = rb_entry(node, struct source_line, node); + next = rb_next(node); + rb_erase(node, src_root); + + __resort_source_line(dest_root, src_line); + node = next; + } +} + static void symbol__free_source_line(struct symbol *sym, int len) { struct annotation *notes = symbol__annotation(sym); struct source_line *src_line = notes->src->lines; + size_t sizeof_src_line; int i; - for (i = 0; i < len; i++) - free(src_line[i].path); + sizeof_src_line = sizeof(*src_line) + + (sizeof(src_line->p) * (src_line->nr_pcnt - 1)); + + for (i = 0; i < len; i++) { + free_srcline(src_line->path); + src_line = (void *)src_line + sizeof_src_line; + } - free(src_line); - notes->src->lines = NULL; + zfree(¬es->src->lines); } /* Get the filename:line for the colored entries */ static int symbol__get_source_line(struct symbol *sym, struct map *map, - int evidx, struct rb_root *root, int len, - const char *filename) + struct perf_evsel *evsel, + struct rb_root *root, int len) { u64 start; - int i; - char cmd[PATH_MAX * 2]; + int i, k; + int evidx = evsel->idx; struct source_line *src_line; struct annotation *notes = symbol__annotation(sym); struct sym_hist *h = annotation__histogram(notes, evidx); + struct rb_root tmp_root = RB_ROOT; + int nr_pcnt = 1; + u64 h_sum = h->sum; + size_t sizeof_src_line = sizeof(struct source_line); + + if (perf_evsel__is_group_event(evsel)) { + for (i = 1; i < evsel->nr_members; i++) { + h = annotation__histogram(notes, evidx + i); + h_sum += h->sum; + } + nr_pcnt = evsel->nr_members; + sizeof_src_line += (nr_pcnt - 1) * sizeof(src_line->p); + } - if (!h->sum) + if (!h_sum) return 0; - src_line = notes->src->lines = calloc(len, sizeof(struct source_line)); + src_line = notes->src->lines = calloc(len, sizeof_src_line); if (!notes->src->lines) return -1; start = map__rip_2objdump(map, sym->start); for (i = 0; i < len; i++) { - char *path = NULL; - size_t line_len; u64 offset; - FILE *fp; + double percent_max = 0.0; - src_line[i].percent = 100.0 * h->addr[i] / h->sum; - if (src_line[i].percent <= 0.5) - continue; + src_line->nr_pcnt = nr_pcnt; - offset = start + i; - sprintf(cmd, "addr2line -e %s %016" PRIx64, filename, offset); - fp = popen(cmd, "r"); - if (!fp) - continue; + for (k = 0; k < nr_pcnt; k++) { + h = annotation__histogram(notes, evidx + k); + src_line->p[k].percent = 100.0 * h->addr[i] / h->sum; - if (getline(&path, &line_len, fp) < 0 || !line_len) - goto next; + if (src_line->p[k].percent > percent_max) + percent_max = src_line->p[k].percent; + } - src_line[i].path = malloc(sizeof(char) * line_len + 1); - if (!src_line[i].path) + if (percent_max <= 0.5) goto next; - strcpy(src_line[i].path, path); - insert_source_line(root, &src_line[i]); + offset = start + i; + src_line->path = get_srcline(map->dso, offset); + insert_source_line(&tmp_root, src_line); next: - pclose(fp); + src_line = (void *)src_line + sizeof_src_line; } + resort_source_line(root, &tmp_root); return 0; } @@ -952,24 +1193,33 @@ static void print_summary(struct rb_root *root, const char *filename) node = rb_first(root); while (node) { - double percent; + double percent, percent_max = 0.0; const char *color; char *path; + int i; src_line = rb_entry(node, struct source_line, node); - percent = src_line->percent; - color = get_percent_color(percent); + for (i = 0; i < src_line->nr_pcnt; i++) { + percent = src_line->p[i].percent_sum; + color = get_percent_color(percent); + color_fprintf(stdout, color, " %7.2f", percent); + + if (percent > percent_max) + percent_max = percent; + } + path = src_line->path; + color = get_percent_color(percent_max); + color_fprintf(stdout, color, " %s\n", path); - color_fprintf(stdout, color, " %7.2f %s", percent, path); node = rb_next(node); } } -static void symbol__annotate_hits(struct symbol *sym, int evidx) +static void symbol__annotate_hits(struct symbol *sym, struct perf_evsel *evsel) { struct annotation *notes = symbol__annotation(sym); - struct sym_hist *h = annotation__histogram(notes, evidx); + struct sym_hist *h = annotation__histogram(notes, evsel->idx); u64 len = symbol__size(sym), offset; for (offset = 0; offset < len; ++offset) @@ -979,19 +1229,22 @@ static void symbol__annotate_hits(struct symbol *sym, int evidx) printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum); } -int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, - bool full_paths, int min_pcnt, int max_lines, - int context) +int symbol__annotate_printf(struct symbol *sym, struct map *map, + struct perf_evsel *evsel, bool full_paths, + int min_pcnt, int max_lines, int context) { struct dso *dso = map->dso; char *filename; const char *d_filename; + const char *evsel_name = perf_evsel__name(evsel); struct annotation *notes = symbol__annotation(sym); struct disasm_line *pos, *queue = NULL; u64 start = map__rip_2objdump(map, sym->start); int printed = 2, queue_len = 0; int more = 0; u64 len; + int width = 8; + int namelen, evsel_name_len, graph_dotted_len; filename = strdup(dso->long_name); if (!filename) @@ -1003,12 +1256,21 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, d_filename = basename(filename); len = symbol__size(sym); + namelen = strlen(d_filename); + evsel_name_len = strlen(evsel_name); - printf(" Percent | Source code & Disassembly of %s\n", d_filename); - printf("------------------------------------------------\n"); + if (perf_evsel__is_group_event(evsel)) + width *= evsel->nr_members; + + printf(" %-*.*s| Source code & Disassembly of %s for %s\n", + width, width, "Percent", d_filename, evsel_name); + + graph_dotted_len = width + namelen + evsel_name_len; + printf("-%-*.*s-----------------------------------------\n", + graph_dotted_len, graph_dotted_len, graph_dotted_line); if (verbose) - symbol__annotate_hits(sym, evidx); + symbol__annotate_hits(sym, evsel); list_for_each_entry(pos, ¬es->src->source, node) { if (context && queue == NULL) { @@ -1016,7 +1278,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, queue_len = 0; } - switch (disasm_line__print(pos, sym, start, evidx, len, + switch (disasm_line__print(pos, sym, start, evsel, len, min_pcnt, printed, max_lines, queue)) { case 0: @@ -1111,12 +1373,11 @@ size_t disasm__fprintf(struct list_head *head, FILE *fp) return printed; } -int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx, - bool print_lines, bool full_paths, int min_pcnt, - int max_lines) +int symbol__tty_annotate(struct symbol *sym, struct map *map, + struct perf_evsel *evsel, bool print_lines, + bool full_paths, int min_pcnt, int max_lines) { struct dso *dso = map->dso; - const char *filename = dso->long_name; struct rb_root source_line = RB_ROOT; u64 len; @@ -1126,12 +1387,11 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx, len = symbol__size(sym); if (print_lines) { - symbol__get_source_line(sym, map, evidx, &source_line, - len, filename); - print_summary(&source_line, filename); + symbol__get_source_line(sym, map, evsel, &source_line, len); + print_summary(&source_line, dso->long_name); } - symbol__annotate_printf(sym, map, evidx, full_paths, + symbol__annotate_printf(sym, map, evsel, full_paths, min_pcnt, max_lines, 0); if (print_lines) symbol__free_source_line(sym, len); @@ -1140,3 +1400,13 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx, return 0; } + +int hist_entry__annotate(struct hist_entry *he, size_t privsize) +{ + return symbol__annotate(he->ms.sym, he->ms.map, privsize); +} + +bool ui__has_annotation(void) +{ + return use_browser == 1 && sort__has_sym; +} diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 39242dcee8f..112d6e26815 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -3,8 +3,10 @@ #include <stdbool.h> #include <stdint.h> -#include "types.h" +#include <linux/types.h> #include "symbol.h" +#include "hist.h" +#include "sort.h" #include <linux/list.h> #include <linux/rbtree.h> #include <pthread.h> @@ -48,6 +50,8 @@ bool ins__is_jump(const struct ins *ins); bool ins__is_call(const struct ins *ins); int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops); +struct annotation; + struct disasm_line { struct list_head node; s64 offset; @@ -66,16 +70,24 @@ void disasm_line__free(struct disasm_line *dl); struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos); int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw); size_t disasm__fprintf(struct list_head *head, FILE *fp); +double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset, + s64 end, const char **path); struct sym_hist { u64 sum; u64 addr[0]; }; +struct source_line_percent { + double percent; + double percent_sum; +}; + struct source_line { struct rb_node node; - double percent; char *path; + int nr_pcnt; + struct source_line_percent p[1]; }; /** struct annotated_source - symbols with hits have this attached as in sannotation @@ -120,40 +132,46 @@ static inline struct annotation *symbol__annotation(struct symbol *sym) return &a->annotation; } -int symbol__inc_addr_samples(struct symbol *sym, struct map *map, - int evidx, u64 addr); +int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx); + +int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 addr); + int symbol__alloc_hist(struct symbol *sym); void symbol__annotate_zero_histograms(struct symbol *sym); int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize); + +int hist_entry__annotate(struct hist_entry *he, size_t privsize); + int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym); -int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, - bool full_paths, int min_pcnt, int max_lines, - int context); +int symbol__annotate_printf(struct symbol *sym, struct map *map, + struct perf_evsel *evsel, bool full_paths, + int min_pcnt, int max_lines, int context); void symbol__annotate_zero_histogram(struct symbol *sym, int evidx); void symbol__annotate_decay_histogram(struct symbol *sym, int evidx); void disasm__purge(struct list_head *head); -int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx, - bool print_lines, bool full_paths, int min_pcnt, - int max_lines); +bool ui__has_annotation(void); + +int symbol__tty_annotate(struct symbol *sym, struct map *map, + struct perf_evsel *evsel, bool print_lines, + bool full_paths, int min_pcnt, int max_lines); -#ifdef NEWT_SUPPORT -int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, - void(*timer)(void *arg), void *arg, int delay_secs); +#ifdef HAVE_SLANG_SUPPORT +int symbol__tui_annotate(struct symbol *sym, struct map *map, + struct perf_evsel *evsel, + struct hist_browser_timer *hbt); #else static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused, - struct map *map __maybe_unused, - int evidx __maybe_unused, - void(*timer)(void *arg) __maybe_unused, - void *arg __maybe_unused, - int delay_secs __maybe_unused) + struct map *map __maybe_unused, + struct perf_evsel *evsel __maybe_unused, + struct hist_browser_timer *hbt + __maybe_unused) { return 0; } #endif extern const char *disassembler_style; -extern const char *objdump_path; #endif /* __PERF_ANNOTATE_H */ diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c index 8e3a740ddbd..a904a4cfe7d 100644 --- a/tools/perf/util/build-id.c +++ b/tools/perf/util/build-id.c @@ -16,15 +16,16 @@ #include "session.h" #include "tool.h" -static int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused, - union perf_event *event, - struct perf_sample *sample __maybe_unused, - struct perf_evsel *evsel __maybe_unused, - struct machine *machine) +int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_sample *sample, + struct perf_evsel *evsel __maybe_unused, + struct machine *machine) { struct addr_location al; u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - struct thread *thread = machine__findnew_thread(machine, event->ip.pid); + struct thread *thread = machine__findnew_thread(machine, sample->pid, + sample->tid); if (thread == NULL) { pr_err("problem processing %d event, skipping it.\n", @@ -33,7 +34,7 @@ static int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused, } thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, - event->ip.ip, &al); + sample->ip, &al); if (al.map != NULL) al.map->dso->hit = 1; @@ -47,7 +48,9 @@ static int perf_event__exit_del_thread(struct perf_tool *tool __maybe_unused, __maybe_unused, struct machine *machine) { - struct thread *thread = machine__findnew_thread(machine, event->fork.tid); + struct thread *thread = machine__findnew_thread(machine, + event->fork.pid, + event->fork.tid); dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid, event->fork.ppid, event->fork.ptid); @@ -64,20 +67,36 @@ static int perf_event__exit_del_thread(struct perf_tool *tool __maybe_unused, struct perf_tool build_id__mark_dso_hit_ops = { .sample = build_id__mark_dso_hit, .mmap = perf_event__process_mmap, - .fork = perf_event__process_task, + .mmap2 = perf_event__process_mmap2, + .fork = perf_event__process_fork, .exit = perf_event__exit_del_thread, .attr = perf_event__process_attr, .build_id = perf_event__process_build_id, }; -char *dso__build_id_filename(struct dso *self, char *bf, size_t size) +int build_id__sprintf(const u8 *build_id, int len, char *bf) +{ + char *bid = bf; + const u8 *raw = build_id; + int i; + + for (i = 0; i < len; ++i) { + sprintf(bid, "%02x", *raw); + ++raw; + bid += 2; + } + + return raw - build_id; +} + +char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size) { char build_id_hex[BUILD_ID_SIZE * 2 + 1]; - if (!self->has_build_id) + if (!dso->has_build_id) return NULL; - build_id__sprintf(self->build_id, sizeof(self->build_id), build_id_hex); + build_id__sprintf(dso->build_id, sizeof(dso->build_id), build_id_hex); if (bf == NULL) { if (asprintf(&bf, "%s/.build-id/%.2s/%s", buildid_dir, build_id_hex, build_id_hex + 2) < 0) diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h index a993ba87d99..ae392561470 100644 --- a/tools/perf/util/build-id.h +++ b/tools/perf/util/build-id.h @@ -1,10 +1,18 @@ #ifndef PERF_BUILD_ID_H_ #define PERF_BUILD_ID_H_ 1 -#include "session.h" +#define BUILD_ID_SIZE 20 + +#include "tool.h" +#include <linux/types.h> extern struct perf_tool build_id__mark_dso_hit_ops; +struct dso; -char *dso__build_id_filename(struct dso *self, char *bf, size_t size); +int build_id__sprintf(const u8 *build_id, int len, char *bf); +char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size); +int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event, + struct perf_sample *sample, struct perf_evsel *evsel, + struct machine *machine); #endif diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h index 2bd51370ad2..7b176dd02e1 100644 --- a/tools/perf/util/cache.h +++ b/tools/perf/util/cache.h @@ -5,6 +5,7 @@ #include "util.h" #include "strbuf.h" #include "../perf.h" +#include "../ui/ui.h" #define CMD_EXEC_PATH "--exec-path" #define CMD_PERF_DIR "--perf-dir=" @@ -31,44 +32,6 @@ extern const char *pager_program; extern int pager_in_use(void); extern int pager_use_color; -extern int use_browser; - -#if defined(NEWT_SUPPORT) || defined(GTK2_SUPPORT) -void setup_browser(bool fallback_to_pager); -void exit_browser(bool wait_for_ok); - -#ifdef NEWT_SUPPORT -int ui__init(void); -void ui__exit(bool wait_for_ok); -#else -static inline int ui__init(void) -{ - return -1; -} -static inline void ui__exit(bool wait_for_ok __maybe_unused) {} -#endif - -#ifdef GTK2_SUPPORT -int perf_gtk__init(void); -void perf_gtk__exit(bool wait_for_ok); -#else -static inline int perf_gtk__init(void) -{ - return -1; -} -static inline void perf_gtk__exit(bool wait_for_ok __maybe_unused) {} -#endif - -#else /* NEWT_SUPPORT || GTK2_SUPPORT */ - -static inline void setup_browser(bool fallback_to_pager) -{ - if (fallback_to_pager) - setup_pager(); -} -static inline void exit_browser(bool wait_for_ok __maybe_unused) {} -#endif /* NEWT_SUPPORT || GTK2_SUPPORT */ - char *alias_lookup(const char *alias); int split_cmdline(char *cmdline, const char ***argv); @@ -107,8 +70,7 @@ extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2 extern char *perf_pathdup(const char *fmt, ...) __attribute__((format (printf, 1, 2))); -#ifndef HAVE_STRLCPY +/* Matches the libc/libbsd function attribute so we declare this unconditionally: */ extern size_t strlcpy(char *dest, const char *src, size_t size); -#endif #endif /* __PERF_CACHE_H */ diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index d3b3f5d8213..48b6d3f5001 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c @@ -15,24 +15,93 @@ #include <errno.h> #include <math.h> +#include "asm/bug.h" + +#include "hist.h" #include "util.h" +#include "sort.h" +#include "machine.h" #include "callchain.h" __thread struct callchain_cursor callchain_cursor; -bool ip_callchain__valid(struct ip_callchain *chain, - const union perf_event *event) +int +parse_callchain_report_opt(const char *arg) { - unsigned int chain_size = event->header.size; - chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event; - return chain->nr * sizeof(u64) <= chain_size; -} + char *tok, *tok2; + char *endptr; + + symbol_conf.use_callchain = true; + + if (!arg) + return 0; + + tok = strtok((char *)arg, ","); + if (!tok) + return -1; + + /* get the output mode */ + if (!strncmp(tok, "graph", strlen(arg))) { + callchain_param.mode = CHAIN_GRAPH_ABS; + + } else if (!strncmp(tok, "flat", strlen(arg))) { + callchain_param.mode = CHAIN_FLAT; + } else if (!strncmp(tok, "fractal", strlen(arg))) { + callchain_param.mode = CHAIN_GRAPH_REL; + } else if (!strncmp(tok, "none", strlen(arg))) { + callchain_param.mode = CHAIN_NONE; + symbol_conf.use_callchain = false; + return 0; + } else { + return -1; + } + + /* get the min percentage */ + tok = strtok(NULL, ","); + if (!tok) + goto setup; + + callchain_param.min_percent = strtod(tok, &endptr); + if (tok == endptr) + return -1; + + /* get the print limit */ + tok2 = strtok(NULL, ","); + if (!tok2) + goto setup; + + if (tok2[0] != 'c') { + callchain_param.print_limit = strtoul(tok2, &endptr, 0); + tok2 = strtok(NULL, ","); + if (!tok2) + goto setup; + } -#define chain_for_each_child(child, parent) \ - list_for_each_entry(child, &parent->children, siblings) + /* get the call chain order */ + if (!strncmp(tok2, "caller", strlen("caller"))) + callchain_param.order = ORDER_CALLER; + else if (!strncmp(tok2, "callee", strlen("callee"))) + callchain_param.order = ORDER_CALLEE; + else + return -1; -#define chain_for_each_child_safe(child, next, parent) \ - list_for_each_entry_safe(child, next, &parent->children, siblings) + /* Get the sort key */ + tok2 = strtok(NULL, ","); + if (!tok2) + goto setup; + if (!strncmp(tok2, "function", strlen("function"))) + callchain_param.key = CCKEY_FUNCTION; + else if (!strncmp(tok2, "address", strlen("address"))) + callchain_param.key = CCKEY_ADDRESS; + else + return -1; +setup: + if (callchain_register_param(&callchain_param) < 0) { + pr_err("Can't register callchain params\n"); + return -1; + } + return 0; +} static void rb_insert_callchain(struct rb_root *root, struct callchain_node *chain, @@ -78,10 +147,16 @@ static void __sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node, u64 min_hit) { + struct rb_node *n; struct callchain_node *child; - chain_for_each_child(child, node) + n = rb_first(&node->rb_root_in); + while (n) { + child = rb_entry(n, struct callchain_node, rb_node_in); + n = rb_next(n); + __sort_chain_flat(rb_root, child, min_hit); + } if (node->hit && node->hit >= min_hit) rb_insert_callchain(rb_root, node, CHAIN_FLAT); @@ -101,11 +176,16 @@ sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root, static void __sort_chain_graph_abs(struct callchain_node *node, u64 min_hit) { + struct rb_node *n; struct callchain_node *child; node->rb_root = RB_ROOT; + n = rb_first(&node->rb_root_in); + + while (n) { + child = rb_entry(n, struct callchain_node, rb_node_in); + n = rb_next(n); - chain_for_each_child(child, node) { __sort_chain_graph_abs(child, min_hit); if (callchain_cumul_hits(child) >= min_hit) rb_insert_callchain(&node->rb_root, child, @@ -124,13 +204,18 @@ sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root, static void __sort_chain_graph_rel(struct callchain_node *node, double min_percent) { + struct rb_node *n; struct callchain_node *child; u64 min_hit; node->rb_root = RB_ROOT; min_hit = ceil(node->children_hit * min_percent); - chain_for_each_child(child, node) { + n = rb_first(&node->rb_root_in); + while (n) { + child = rb_entry(n, struct callchain_node, rb_node_in); + n = rb_next(n); + __sort_chain_graph_rel(child, min_percent); if (callchain_cumul_hits(child) >= min_hit) rb_insert_callchain(&node->rb_root, child, @@ -180,19 +265,26 @@ create_child(struct callchain_node *parent, bool inherit_children) return NULL; } new->parent = parent; - INIT_LIST_HEAD(&new->children); INIT_LIST_HEAD(&new->val); if (inherit_children) { - struct callchain_node *next; + struct rb_node *n; + struct callchain_node *child; + + new->rb_root_in = parent->rb_root_in; + parent->rb_root_in = RB_ROOT; - list_splice(&parent->children, &new->children); - INIT_LIST_HEAD(&parent->children); + n = rb_first(&new->rb_root_in); + while (n) { + child = rb_entry(n, struct callchain_node, rb_node_in); + child->parent = new; + n = rb_next(n); + } - chain_for_each_child(next, new) - next->parent = new; + /* make it the first child */ + rb_link_node(&new->rb_node_in, NULL, &parent->rb_root_in.rb_node); + rb_insert_color(&new->rb_node_in, &parent->rb_root_in); } - list_add_tail(&new->siblings, &parent->children); return new; } @@ -230,7 +322,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor) } } -static void +static struct callchain_node * add_child(struct callchain_node *parent, struct callchain_cursor *cursor, u64 period) @@ -242,6 +334,19 @@ add_child(struct callchain_node *parent, new->children_hit = 0; new->hit = period; + return new; +} + +static s64 match_chain(struct callchain_cursor_node *node, + struct callchain_list *cnode) +{ + struct symbol *sym = node->sym; + + if (cnode->ms.sym && sym && + callchain_param.key == CCKEY_FUNCTION) + return cnode->ms.sym->start - sym->start; + else + return cnode->ip - node->ip; } /* @@ -279,9 +384,33 @@ split_add_child(struct callchain_node *parent, /* create a new child for the new branch if any */ if (idx_total < cursor->nr) { + struct callchain_node *first; + struct callchain_list *cnode; + struct callchain_cursor_node *node; + struct rb_node *p, **pp; + parent->hit = 0; - add_child(parent, cursor, period); parent->children_hit += period; + + node = callchain_cursor_current(cursor); + new = add_child(parent, cursor, period); + + /* + * This is second child since we moved parent's children + * to new (first) child above. + */ + p = parent->rb_root_in.rb_node; + first = rb_entry(p, struct callchain_node, rb_node_in); + cnode = list_first_entry(&first->val, struct callchain_list, + list); + + if (match_chain(node, cnode) < 0) + pp = &p->rb_left; + else + pp = &p->rb_right; + + rb_link_node(&new->rb_node_in, p, pp); + rb_insert_color(&new->rb_node_in, &parent->rb_root_in); } else { parent->hit = period; } @@ -298,16 +427,35 @@ append_chain_children(struct callchain_node *root, u64 period) { struct callchain_node *rnode; + struct callchain_cursor_node *node; + struct rb_node **p = &root->rb_root_in.rb_node; + struct rb_node *parent = NULL; + + node = callchain_cursor_current(cursor); + if (!node) + return; /* lookup in childrens */ - chain_for_each_child(rnode, root) { - unsigned int ret = append_chain(rnode, cursor, period); + while (*p) { + s64 ret; + + parent = *p; + rnode = rb_entry(parent, struct callchain_node, rb_node_in); - if (!ret) + /* If at least first entry matches, rely to children */ + ret = append_chain(rnode, cursor, period); + if (ret == 0) goto inc_children_hit; + + if (ret < 0) + p = &parent->rb_left; + else + p = &parent->rb_right; } /* nothing in children, add to the current node */ - add_child(root, cursor, period); + rnode = add_child(root, cursor, period); + rb_link_node(&rnode->rb_node_in, parent, p); + rb_insert_color(&rnode->rb_node_in, &root->rb_root_in); inc_children_hit: root->children_hit += period; @@ -318,44 +466,38 @@ append_chain(struct callchain_node *root, struct callchain_cursor *cursor, u64 period) { - struct callchain_cursor_node *curr_snap = cursor->curr; struct callchain_list *cnode; u64 start = cursor->pos; bool found = false; u64 matches; + int cmp = 0; /* * Lookup in the current node * If we have a symbol, then compare the start to match - * anywhere inside a function. + * anywhere inside a function, unless function + * mode is disabled. */ list_for_each_entry(cnode, &root->val, list) { struct callchain_cursor_node *node; - struct symbol *sym; node = callchain_cursor_current(cursor); if (!node) break; - sym = node->sym; - - if (cnode->ms.sym && sym) { - if (cnode->ms.sym->start != sym->start) - break; - } else if (cnode->ip != node->ip) + cmp = match_chain(node, cnode); + if (cmp) break; - if (!found) - found = true; + found = true; callchain_cursor_advance(cursor); } - /* matches not, relay on the parent */ + /* matches not, relay no the parent */ if (!found) { - cursor->curr = curr_snap; - cursor->pos = start; - return -1; + WARN_ONCE(!cmp, "Chain comparison error\n"); + return cmp; } matches = cursor->pos - start; @@ -400,8 +542,9 @@ merge_chain_branch(struct callchain_cursor *cursor, struct callchain_node *dst, struct callchain_node *src) { struct callchain_cursor_node **old_last = cursor->last; - struct callchain_node *child, *next_child; + struct callchain_node *child; struct callchain_list *list, *next_list; + struct rb_node *n; int old_pos = cursor->nr; int err = 0; @@ -417,12 +560,16 @@ merge_chain_branch(struct callchain_cursor *cursor, append_chain_children(dst, cursor, src->hit); } - chain_for_each_child_safe(child, next_child, src) { + n = rb_first(&src->rb_root_in); + while (n) { + child = container_of(n, struct callchain_node, rb_node_in); + n = rb_next(n); + rb_erase(&child->rb_node_in, &src->rb_root_in); + err = merge_chain_branch(cursor, dst, child); if (err) break; - list_del(&child->siblings); free(child); } @@ -444,7 +591,7 @@ int callchain_cursor_append(struct callchain_cursor *cursor, struct callchain_cursor_node *node = *cursor->last; if (!node) { - node = calloc(sizeof(*node), 1); + node = calloc(1, sizeof(*node)); if (!node) return -ENOMEM; @@ -461,3 +608,67 @@ int callchain_cursor_append(struct callchain_cursor *cursor, return 0; } + +int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent, + struct perf_evsel *evsel, struct addr_location *al, + int max_stack) +{ + if (sample->callchain == NULL) + return 0; + + if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain || + sort__has_parent) { + return machine__resolve_callchain(al->machine, evsel, al->thread, + sample, parent, al, max_stack); + } + return 0; +} + +int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample) +{ + if (!symbol_conf.use_callchain) + return 0; + return callchain_append(he->callchain, &callchain_cursor, sample->period); +} + +int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node, + bool hide_unresolved) +{ + al->map = node->map; + al->sym = node->sym; + if (node->map) + al->addr = node->map->map_ip(node->map, node->ip); + else + al->addr = node->ip; + + if (al->sym == NULL) { + if (hide_unresolved) + return 0; + if (al->map == NULL) + goto out; + } + + if (al->map->groups == &al->machine->kmaps) { + if (machine__is_host(al->machine)) { + al->cpumode = PERF_RECORD_MISC_KERNEL; + al->level = 'k'; + } else { + al->cpumode = PERF_RECORD_MISC_GUEST_KERNEL; + al->level = 'g'; + } + } else { + if (machine__is_host(al->machine)) { + al->cpumode = PERF_RECORD_MISC_USER; + al->level = '.'; + } else if (perf_guest) { + al->cpumode = PERF_RECORD_MISC_GUEST_USER; + al->level = 'u'; + } else { + al->cpumode = PERF_RECORD_MISC_HYPERVISOR; + al->level = 'H'; + } + } + +out: + return 1; +} diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index eb340571e7d..8f84423a75d 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h @@ -7,6 +7,13 @@ #include "event.h" #include "symbol.h" +enum perf_call_graph_mode { + CALLCHAIN_NONE, + CALLCHAIN_FP, + CALLCHAIN_DWARF, + CALLCHAIN_MAX +}; + enum chain_mode { CHAIN_NONE, CHAIN_FLAT, @@ -21,11 +28,11 @@ enum chain_order { struct callchain_node { struct callchain_node *parent; - struct list_head siblings; - struct list_head children; struct list_head val; - struct rb_node rb_node; /* to sort nodes in an rbtree */ - struct rb_root rb_root; /* sorted tree of children */ + struct rb_node rb_node_in; /* to insert nodes in an rbtree */ + struct rb_node rb_node; /* to sort nodes in an output tree */ + struct rb_root rb_root_in; /* input tree of children */ + struct rb_root rb_root; /* sorted output tree of children */ unsigned int val_nr; u64 hit; u64 children_hit; @@ -41,12 +48,18 @@ struct callchain_param; typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_root *, u64, struct callchain_param *); +enum chain_key { + CCKEY_FUNCTION, + CCKEY_ADDRESS +}; + struct callchain_param { enum chain_mode mode; u32 print_limit; double min_percent; sort_chain_func_t sort; enum chain_order order; + enum chain_key key; }; struct callchain_list { @@ -80,13 +93,12 @@ extern __thread struct callchain_cursor callchain_cursor; static inline void callchain_init(struct callchain_root *root) { - INIT_LIST_HEAD(&root->node.siblings); - INIT_LIST_HEAD(&root->node.children); INIT_LIST_HEAD(&root->node.val); root->node.parent = NULL; root->node.hit = 0; root->node.children_hit = 0; + root->node.rb_root_in = RB_ROOT; root->max_depth = 0; } @@ -103,11 +115,6 @@ int callchain_append(struct callchain_root *root, int callchain_merge(struct callchain_cursor *cursor, struct callchain_root *dst, struct callchain_root *src); -struct ip_callchain; -union perf_event; - -bool ip_callchain__valid(struct ip_callchain *chain, - const union perf_event *event); /* * Initialize a cursor before adding entries inside, but keep * the previously allocated entries as a cache. @@ -143,4 +150,30 @@ static inline void callchain_cursor_advance(struct callchain_cursor *cursor) cursor->curr = cursor->curr->next; cursor->pos++; } + +struct option; +struct hist_entry; + +int record_parse_callchain(const char *arg, struct record_opts *opts); +int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset); +int record_callchain_opt(const struct option *opt, const char *arg, int unset); + +int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent, + struct perf_evsel *evsel, struct addr_location *al, + int max_stack); +int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample); +int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node, + bool hide_unresolved); + +extern const char record_callchain_help[]; +int parse_callchain_report_opt(const char *arg); + +static inline void callchain_cursor_snapshot(struct callchain_cursor *dest, + struct callchain_cursor *src) +{ + *dest = *src; + + dest->first = src->curr; + dest->nr -= src->pos; +} #endif /* __PERF_CALLCHAIN_H */ diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c index 96bbda1ddb8..88f7be39943 100644 --- a/tools/perf/util/cgroup.c +++ b/tools/perf/util/cgroup.c @@ -81,7 +81,7 @@ static int add_cgroup(struct perf_evlist *evlist, char *str) /* * check if cgrp is already defined, if so we reuse it */ - list_for_each_entry(counter, &evlist->entries, node) { + evlist__for_each(evlist, counter) { cgrp = counter->cgrp; if (!cgrp) continue; @@ -110,7 +110,7 @@ static int add_cgroup(struct perf_evlist *evlist, char *str) * if add cgroup N, then need to find event N */ n = 0; - list_for_each_entry(counter, &evlist->entries, node) { + evlist__for_each(evlist, counter) { if (n == nr_cgroups) goto found; n++; @@ -133,7 +133,7 @@ void close_cgroup(struct cgroup_sel *cgrp) /* XXX: not reentrant */ if (--cgrp->refcnt == 0) { close(cgrp->fd); - free(cgrp->name); + zfree(&cgrp->name); free(cgrp); } } diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c index 11e46da17bb..87b8672eb41 100644 --- a/tools/perf/util/color.c +++ b/tools/perf/util/color.c @@ -1,6 +1,7 @@ #include <linux/kernel.h> #include "cache.h" #include "color.h" +#include <math.h> int perf_use_color_default = -1; @@ -298,10 +299,10 @@ const char *get_percent_color(double percent) * entries in green - and keep the low overhead places * normal: */ - if (percent >= MIN_RED) + if (fabs(percent) >= MIN_RED) color = PERF_COLOR_RED; else { - if (percent > MIN_GREEN) + if (fabs(percent) > MIN_GREEN) color = PERF_COLOR_GREEN; } return color; @@ -318,8 +319,19 @@ int percent_color_fprintf(FILE *fp, const char *fmt, double percent) return r; } -int percent_color_snprintf(char *bf, size_t size, const char *fmt, double percent) +int value_color_snprintf(char *bf, size_t size, const char *fmt, double value) { - const char *color = get_percent_color(percent); - return color_snprintf(bf, size, color, fmt, percent); + const char *color = get_percent_color(value); + return color_snprintf(bf, size, color, fmt, value); +} + +int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...) +{ + va_list args; + double percent; + + va_start(args, fmt); + percent = va_arg(args, double); + va_end(args); + return value_color_snprintf(bf, size, fmt, percent); } diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h index dea082b7960..7ff30a62a13 100644 --- a/tools/perf/util/color.h +++ b/tools/perf/util/color.h @@ -39,7 +39,8 @@ int color_fprintf(FILE *fp, const char *color, const char *fmt, ...); int color_snprintf(char *bf, size_t size, const char *color, const char *fmt, ...); int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...); int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf); -int percent_color_snprintf(char *bf, size_t size, const char *fmt, double percent); +int value_color_snprintf(char *bf, size_t size, const char *fmt, double value); +int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...); int percent_color_fprintf(FILE *fp, const char *fmt, double percent); const char *get_percent_color(double percent); diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c new file mode 100644 index 00000000000..f9e777629e2 --- /dev/null +++ b/tools/perf/util/comm.c @@ -0,0 +1,122 @@ +#include "comm.h" +#include "util.h" +#include <stdlib.h> +#include <stdio.h> + +struct comm_str { + char *str; + struct rb_node rb_node; + int ref; +}; + +/* Should perhaps be moved to struct machine */ +static struct rb_root comm_str_root; + +static void comm_str__get(struct comm_str *cs) +{ + cs->ref++; +} + +static void comm_str__put(struct comm_str *cs) +{ + if (!--cs->ref) { + rb_erase(&cs->rb_node, &comm_str_root); + zfree(&cs->str); + free(cs); + } +} + +static struct comm_str *comm_str__alloc(const char *str) +{ + struct comm_str *cs; + + cs = zalloc(sizeof(*cs)); + if (!cs) + return NULL; + + cs->str = strdup(str); + if (!cs->str) { + free(cs); + return NULL; + } + + return cs; +} + +static struct comm_str *comm_str__findnew(const char *str, struct rb_root *root) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct comm_str *iter, *new; + int cmp; + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct comm_str, rb_node); + + cmp = strcmp(str, iter->str); + if (!cmp) + return iter; + + if (cmp < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + new = comm_str__alloc(str); + if (!new) + return NULL; + + rb_link_node(&new->rb_node, parent, p); + rb_insert_color(&new->rb_node, root); + + return new; +} + +struct comm *comm__new(const char *str, u64 timestamp) +{ + struct comm *comm = zalloc(sizeof(*comm)); + + if (!comm) + return NULL; + + comm->start = timestamp; + + comm->comm_str = comm_str__findnew(str, &comm_str_root); + if (!comm->comm_str) { + free(comm); + return NULL; + } + + comm_str__get(comm->comm_str); + + return comm; +} + +int comm__override(struct comm *comm, const char *str, u64 timestamp) +{ + struct comm_str *new, *old = comm->comm_str; + + new = comm_str__findnew(str, &comm_str_root); + if (!new) + return -ENOMEM; + + comm_str__get(new); + comm_str__put(old); + comm->comm_str = new; + comm->start = timestamp; + + return 0; +} + +void comm__free(struct comm *comm) +{ + comm_str__put(comm->comm_str); + free(comm); +} + +const char *comm__str(const struct comm *comm) +{ + return comm->comm_str->str; +} diff --git a/tools/perf/util/comm.h b/tools/perf/util/comm.h new file mode 100644 index 00000000000..fac5bd51bef --- /dev/null +++ b/tools/perf/util/comm.h @@ -0,0 +1,21 @@ +#ifndef __PERF_COMM_H +#define __PERF_COMM_H + +#include "../perf.h" +#include <linux/rbtree.h> +#include <linux/list.h> + +struct comm_str; + +struct comm { + struct comm_str *comm_str; + u64 start; + struct list_head list; +}; + +void comm__free(struct comm *comm); +struct comm *comm__new(const char *str, u64 timestamp); +const char *comm__str(const struct comm *comm); +int comm__override(struct comm *comm, const char *str, u64 timestamp); + +#endif /* __PERF_COMM_H */ diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c index 3e0fdd369cc..24519e14ac5 100644 --- a/tools/perf/util/config.c +++ b/tools/perf/util/config.c @@ -11,6 +11,7 @@ #include "util.h" #include "cache.h" #include "exec_cmd.h" +#include "util/hist.h" /* perf_hist_config */ #define MAXNAME (256) @@ -355,6 +356,9 @@ int perf_default_config(const char *var, const char *value, if (!prefixcmp(var, "core.")) return perf_default_core_config(var, value); + if (!prefixcmp(var, "hist.")) + return perf_hist_config(var, value); + /* Add other config variables here. */ return 0; } diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 2b32ffa9ebd..c4e55b71010 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -1,8 +1,10 @@ #include "util.h" +#include <api/fs/fs.h> #include "../perf.h" #include "cpumap.h" #include <assert.h> #include <stdio.h> +#include <stdlib.h> static struct cpu_map *cpu_map__default_new(void) { @@ -201,3 +203,277 @@ void cpu_map__delete(struct cpu_map *map) { free(map); } + +int cpu_map__get_socket(struct cpu_map *map, int idx) +{ + FILE *fp; + const char *mnt; + char path[PATH_MAX]; + int cpu, ret; + + if (idx > map->nr) + return -1; + + cpu = map->map[idx]; + + mnt = sysfs__mountpoint(); + if (!mnt) + return -1; + + snprintf(path, PATH_MAX, + "%s/devices/system/cpu/cpu%d/topology/physical_package_id", + mnt, cpu); + + fp = fopen(path, "r"); + if (!fp) + return -1; + ret = fscanf(fp, "%d", &cpu); + fclose(fp); + return ret == 1 ? cpu : -1; +} + +static int cmp_ids(const void *a, const void *b) +{ + return *(int *)a - *(int *)b; +} + +static int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res, + int (*f)(struct cpu_map *map, int cpu)) +{ + struct cpu_map *c; + int nr = cpus->nr; + int cpu, s1, s2; + + /* allocate as much as possible */ + c = calloc(1, sizeof(*c) + nr * sizeof(int)); + if (!c) + return -1; + + for (cpu = 0; cpu < nr; cpu++) { + s1 = f(cpus, cpu); + for (s2 = 0; s2 < c->nr; s2++) { + if (s1 == c->map[s2]) + break; + } + if (s2 == c->nr) { + c->map[c->nr] = s1; + c->nr++; + } + } + /* ensure we process id in increasing order */ + qsort(c->map, c->nr, sizeof(int), cmp_ids); + + *res = c; + return 0; +} + +int cpu_map__get_core(struct cpu_map *map, int idx) +{ + FILE *fp; + const char *mnt; + char path[PATH_MAX]; + int cpu, ret, s; + + if (idx > map->nr) + return -1; + + cpu = map->map[idx]; + + mnt = sysfs__mountpoint(); + if (!mnt) + return -1; + + snprintf(path, PATH_MAX, + "%s/devices/system/cpu/cpu%d/topology/core_id", + mnt, cpu); + + fp = fopen(path, "r"); + if (!fp) + return -1; + ret = fscanf(fp, "%d", &cpu); + fclose(fp); + if (ret != 1) + return -1; + + s = cpu_map__get_socket(map, idx); + if (s == -1) + return -1; + + /* + * encode socket in upper 16 bits + * core_id is relative to socket, and + * we need a global id. So we combine + * socket+ core id + */ + return (s << 16) | (cpu & 0xffff); +} + +int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp) +{ + return cpu_map__build_map(cpus, sockp, cpu_map__get_socket); +} + +int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep) +{ + return cpu_map__build_map(cpus, corep, cpu_map__get_core); +} + +/* setup simple routines to easily access node numbers given a cpu number */ +static int get_max_num(char *path, int *max) +{ + size_t num; + char *buf; + int err = 0; + + if (filename__read_str(path, &buf, &num)) + return -1; + + buf[num] = '\0'; + + /* start on the right, to find highest node num */ + while (--num) { + if ((buf[num] == ',') || (buf[num] == '-')) { + num++; + break; + } + } + if (sscanf(&buf[num], "%d", max) < 1) { + err = -1; + goto out; + } + + /* convert from 0-based to 1-based */ + (*max)++; + +out: + free(buf); + return err; +} + +/* Determine highest possible cpu in the system for sparse allocation */ +static void set_max_cpu_num(void) +{ + const char *mnt; + char path[PATH_MAX]; + int ret = -1; + + /* set up default */ + max_cpu_num = 4096; + + mnt = sysfs__mountpoint(); + if (!mnt) + goto out; + + /* get the highest possible cpu number for a sparse allocation */ + ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt); + if (ret == PATH_MAX) { + pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); + goto out; + } + + ret = get_max_num(path, &max_cpu_num); + +out: + if (ret) + pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num); +} + +/* Determine highest possible node in the system for sparse allocation */ +static void set_max_node_num(void) +{ + const char *mnt; + char path[PATH_MAX]; + int ret = -1; + + /* set up default */ + max_node_num = 8; + + mnt = sysfs__mountpoint(); + if (!mnt) + goto out; + + /* get the highest possible cpu number for a sparse allocation */ + ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt); + if (ret == PATH_MAX) { + pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); + goto out; + } + + ret = get_max_num(path, &max_node_num); + +out: + if (ret) + pr_err("Failed to read max nodes, using default of %d\n", max_node_num); +} + +static int init_cpunode_map(void) +{ + int i; + + set_max_cpu_num(); + set_max_node_num(); + + cpunode_map = calloc(max_cpu_num, sizeof(int)); + if (!cpunode_map) { + pr_err("%s: calloc failed\n", __func__); + return -1; + } + + for (i = 0; i < max_cpu_num; i++) + cpunode_map[i] = -1; + + return 0; +} + +int cpu__setup_cpunode_map(void) +{ + struct dirent *dent1, *dent2; + DIR *dir1, *dir2; + unsigned int cpu, mem; + char buf[PATH_MAX]; + char path[PATH_MAX]; + const char *mnt; + int n; + + /* initialize globals */ + if (init_cpunode_map()) + return -1; + + mnt = sysfs__mountpoint(); + if (!mnt) + return 0; + + n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt); + if (n == PATH_MAX) { + pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); + return -1; + } + + dir1 = opendir(path); + if (!dir1) + return 0; + + /* walk tree and setup map */ + while ((dent1 = readdir(dir1)) != NULL) { + if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1) + continue; + + n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name); + if (n == PATH_MAX) { + pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); + continue; + } + + dir2 = opendir(buf); + if (!dir2) + continue; + while ((dent2 = readdir(dir2)) != NULL) { + if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1) + continue; + cpunode_map[cpu] = mem; + } + closedir(dir2); + } + closedir(dir1); + return 0; +} diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 2f68a3b8c28..61a65484900 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -4,6 +4,9 @@ #include <stdio.h> #include <stdbool.h> +#include "perf.h" +#include "util/debug.h" + struct cpu_map { int nr; int map[]; @@ -14,15 +17,68 @@ struct cpu_map *cpu_map__dummy_new(void); void cpu_map__delete(struct cpu_map *map); struct cpu_map *cpu_map__read(FILE *file); size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp); +int cpu_map__get_socket(struct cpu_map *map, int idx); +int cpu_map__get_core(struct cpu_map *map, int idx); +int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp); +int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep); + +static inline int cpu_map__socket(struct cpu_map *sock, int s) +{ + if (!sock || s > sock->nr || s < 0) + return 0; + return sock->map[s]; +} + +static inline int cpu_map__id_to_socket(int id) +{ + return id >> 16; +} + +static inline int cpu_map__id_to_cpu(int id) +{ + return id & 0xffff; +} static inline int cpu_map__nr(const struct cpu_map *map) { return map ? map->nr : 1; } -static inline bool cpu_map__all(const struct cpu_map *map) +static inline bool cpu_map__empty(const struct cpu_map *map) { return map ? map->map[0] == -1 : true; } +int max_cpu_num; +int max_node_num; +int *cpunode_map; + +int cpu__setup_cpunode_map(void); + +static inline int cpu__max_node(void) +{ + if (unlikely(!max_node_num)) + pr_debug("cpu_map not initialized\n"); + + return max_node_num; +} + +static inline int cpu__max_cpu(void) +{ + if (unlikely(!max_cpu_num)) + pr_debug("cpu_map not initialized\n"); + + return max_cpu_num; +} + +static inline int cpu__get_node(int cpu) +{ + if (unlikely(cpunode_map == NULL)) { + pr_debug("cpu_map not initialized\n"); + return -1; + } + + return cpunode_map[cpu]; +} + #endif /* __PERF_CPUMAP_H */ diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c new file mode 100644 index 00000000000..55de44ecebe --- /dev/null +++ b/tools/perf/util/data.c @@ -0,0 +1,133 @@ +#include <linux/compiler.h> +#include <linux/kernel.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <unistd.h> +#include <string.h> + +#include "data.h" +#include "util.h" + +static bool check_pipe(struct perf_data_file *file) +{ + struct stat st; + bool is_pipe = false; + int fd = perf_data_file__is_read(file) ? + STDIN_FILENO : STDOUT_FILENO; + + if (!file->path) { + if (!fstat(fd, &st) && S_ISFIFO(st.st_mode)) + is_pipe = true; + } else { + if (!strcmp(file->path, "-")) + is_pipe = true; + } + + if (is_pipe) + file->fd = fd; + + return file->is_pipe = is_pipe; +} + +static int check_backup(struct perf_data_file *file) +{ + struct stat st; + + if (!stat(file->path, &st) && st.st_size) { + /* TODO check errors properly */ + char oldname[PATH_MAX]; + snprintf(oldname, sizeof(oldname), "%s.old", + file->path); + unlink(oldname); + rename(file->path, oldname); + } + + return 0; +} + +static int open_file_read(struct perf_data_file *file) +{ + struct stat st; + int fd; + + fd = open(file->path, O_RDONLY); + if (fd < 0) { + int err = errno; + + pr_err("failed to open %s: %s", file->path, strerror(err)); + if (err == ENOENT && !strcmp(file->path, "perf.data")) + pr_err(" (try 'perf record' first)"); + pr_err("\n"); + return -err; + } + + if (fstat(fd, &st) < 0) + goto out_close; + + if (!file->force && st.st_uid && (st.st_uid != geteuid())) { + pr_err("file %s not owned by current user or root\n", + file->path); + goto out_close; + } + + if (!st.st_size) { + pr_info("zero-sized file (%s), nothing to do!\n", + file->path); + goto out_close; + } + + file->size = st.st_size; + return fd; + + out_close: + close(fd); + return -1; +} + +static int open_file_write(struct perf_data_file *file) +{ + int fd; + + if (check_backup(file)) + return -1; + + fd = open(file->path, O_CREAT|O_RDWR|O_TRUNC, S_IRUSR|S_IWUSR); + + if (fd < 0) + pr_err("failed to open %s : %s\n", file->path, strerror(errno)); + + return fd; +} + +static int open_file(struct perf_data_file *file) +{ + int fd; + + fd = perf_data_file__is_read(file) ? + open_file_read(file) : open_file_write(file); + + file->fd = fd; + return fd < 0 ? -1 : 0; +} + +int perf_data_file__open(struct perf_data_file *file) +{ + if (check_pipe(file)) + return 0; + + if (!file->path) + file->path = "perf.data"; + + return open_file(file); +} + +void perf_data_file__close(struct perf_data_file *file) +{ + close(file->fd); +} + +ssize_t perf_data_file__write(struct perf_data_file *file, + void *buf, size_t size) +{ + return writen(file->fd, buf, size); +} diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h new file mode 100644 index 00000000000..2b15d0c95c7 --- /dev/null +++ b/tools/perf/util/data.h @@ -0,0 +1,50 @@ +#ifndef __PERF_DATA_H +#define __PERF_DATA_H + +#include <stdbool.h> + +enum perf_data_mode { + PERF_DATA_MODE_WRITE, + PERF_DATA_MODE_READ, +}; + +struct perf_data_file { + const char *path; + int fd; + bool is_pipe; + bool force; + unsigned long size; + enum perf_data_mode mode; +}; + +static inline bool perf_data_file__is_read(struct perf_data_file *file) +{ + return file->mode == PERF_DATA_MODE_READ; +} + +static inline bool perf_data_file__is_write(struct perf_data_file *file) +{ + return file->mode == PERF_DATA_MODE_WRITE; +} + +static inline int perf_data_file__is_pipe(struct perf_data_file *file) +{ + return file->is_pipe; +} + +static inline int perf_data_file__fd(struct perf_data_file *file) +{ + return file->fd; +} + +static inline unsigned long perf_data_file__size(struct perf_data_file *file) +{ + return file->size; +} + +int perf_data_file__open(struct perf_data_file *file); +void perf_data_file__close(struct perf_data_file *file); +ssize_t perf_data_file__write(struct perf_data_file *file, + void *buf, size_t size); + +#endif /* __PERF_DATA_H */ diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index 03f830b4814..299b5558650 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c @@ -16,59 +16,58 @@ int verbose; bool dump_trace = false, quiet = false; -int eprintf(int level, const char *fmt, ...) +static int _eprintf(int level, const char *fmt, va_list args) { - va_list args; int ret = 0; if (verbose >= level) { - va_start(args, fmt); - if (use_browser == 1) - ret = ui_helpline__show_help(fmt, args); - else if (use_browser == 2) - ret = perf_gtk__show_helpline(fmt, args); + if (use_browser >= 1) + ui_helpline__vshow(fmt, args); else ret = vfprintf(stderr, fmt, args); - va_end(args); } return ret; } -int dump_printf(const char *fmt, ...) +int eprintf(int level, const char *fmt, ...) { va_list args; - int ret = 0; + int ret; - if (dump_trace) { - va_start(args, fmt); - ret = vprintf(fmt, args); - va_end(args); - } + va_start(args, fmt); + ret = _eprintf(level, fmt, args); + va_end(args); return ret; } -#if !defined(NEWT_SUPPORT) && !defined(GTK2_SUPPORT) -int ui__warning(const char *format, ...) +/* + * Overloading libtraceevent standard info print + * function, display with -v in perf. + */ +void pr_stat(const char *fmt, ...) { va_list args; - va_start(args, format); - vfprintf(stderr, format, args); + va_start(args, fmt); + _eprintf(1, fmt, args); va_end(args); - return 0; + eprintf(1, "\n"); } -#endif -int ui__error_paranoid(void) +int dump_printf(const char *fmt, ...) { - return ui__error("Permission error - are you root?\n" - "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n" - " -1 - Not paranoid at all\n" - " 0 - Disallow raw tracepoint access for unpriv\n" - " 1 - Disallow cpu events for unpriv\n" - " 2 - Disallow kernel profiling for unpriv\n"); + va_list args; + int ret = 0; + + if (dump_trace) { + va_start(args, fmt); + ret = vprintf(fmt, args); + va_end(args); + } + + return ret; } void trace_event(union perf_event *event) diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index dec98750b48..443694c36b0 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h @@ -5,6 +5,8 @@ #include <stdbool.h> #include "event.h" #include "../ui/helpline.h" +#include "../ui/progress.h" +#include "../ui/util.h" extern int verbose; extern bool quiet, dump_trace; @@ -12,38 +14,9 @@ extern bool quiet, dump_trace; int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); void trace_event(union perf_event *event); -struct ui_progress; -struct perf_error_ops; - -#if defined(NEWT_SUPPORT) || defined(GTK2_SUPPORT) - -#include "../ui/progress.h" int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2))); -#include "../ui/util.h" - -#else - -static inline void ui_progress__update(u64 curr __maybe_unused, - u64 total __maybe_unused, - const char *title __maybe_unused) {} - -#define ui__error(format, arg...) ui__warning(format, ##arg) - -static inline int -perf_error__register(struct perf_error_ops *eops __maybe_unused) -{ - return 0; -} - -static inline int -perf_error__unregister(struct perf_error_ops *eops __maybe_unused) -{ - return 0; -} - -#endif /* NEWT_SUPPORT || GTK2_SUPPORT */ - int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2))); -int ui__error_paranoid(void); + +void pr_stat(const char *fmt, ...); #endif /* __PERF_DEBUG_H */ diff --git a/tools/perf/util/debugfs.h b/tools/perf/util/debugfs.h deleted file mode 100644 index 68f3e87ec57..00000000000 --- a/tools/perf/util/debugfs.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef __DEBUGFS_H__ -#define __DEBUGFS_H__ - -const char *debugfs_find_mountpoint(void); -int debugfs_valid_mountpoint(const char *debugfs); -char *debugfs_mount(const char *mountpoint); -void debugfs_set_path(const char *mountpoint); - -extern char debugfs_mountpoint[]; -extern char tracing_events_path[]; - -#endif /* __DEBUGFS_H__ */ diff --git a/tools/perf/util/dso-test-data.c b/tools/perf/util/dso-test-data.c deleted file mode 100644 index c6caedeb1d6..00000000000 --- a/tools/perf/util/dso-test-data.c +++ /dev/null @@ -1,153 +0,0 @@ -#include "util.h" - -#include <stdlib.h> -#include <sys/types.h> -#include <sys/stat.h> -#include <fcntl.h> -#include <string.h> - -#include "symbol.h" - -#define TEST_ASSERT_VAL(text, cond) \ -do { \ - if (!(cond)) { \ - pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \ - return -1; \ - } \ -} while (0) - -static char *test_file(int size) -{ - static char buf_templ[] = "/tmp/test-XXXXXX"; - char *templ = buf_templ; - int fd, i; - unsigned char *buf; - - fd = mkstemp(templ); - - buf = malloc(size); - if (!buf) { - close(fd); - return NULL; - } - - for (i = 0; i < size; i++) - buf[i] = (unsigned char) ((int) i % 10); - - if (size != write(fd, buf, size)) - templ = NULL; - - close(fd); - return templ; -} - -#define TEST_FILE_SIZE (DSO__DATA_CACHE_SIZE * 20) - -struct test_data_offset { - off_t offset; - u8 data[10]; - int size; -}; - -struct test_data_offset offsets[] = { - /* Fill first cache page. */ - { - .offset = 10, - .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, - .size = 10, - }, - /* Read first cache page. */ - { - .offset = 10, - .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, - .size = 10, - }, - /* Fill cache boundary pages. */ - { - .offset = DSO__DATA_CACHE_SIZE - DSO__DATA_CACHE_SIZE % 10, - .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, - .size = 10, - }, - /* Read cache boundary pages. */ - { - .offset = DSO__DATA_CACHE_SIZE - DSO__DATA_CACHE_SIZE % 10, - .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, - .size = 10, - }, - /* Fill final cache page. */ - { - .offset = TEST_FILE_SIZE - 10, - .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, - .size = 10, - }, - /* Read final cache page. */ - { - .offset = TEST_FILE_SIZE - 10, - .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, - .size = 10, - }, - /* Read final cache page. */ - { - .offset = TEST_FILE_SIZE - 3, - .data = { 7, 8, 9, 0, 0, 0, 0, 0, 0, 0 }, - .size = 3, - }, -}; - -int dso__test_data(void) -{ - struct machine machine; - struct dso *dso; - char *file = test_file(TEST_FILE_SIZE); - size_t i; - - TEST_ASSERT_VAL("No test file", file); - - memset(&machine, 0, sizeof(machine)); - - dso = dso__new((const char *)file); - - /* Basic 10 bytes tests. */ - for (i = 0; i < ARRAY_SIZE(offsets); i++) { - struct test_data_offset *data = &offsets[i]; - ssize_t size; - u8 buf[10]; - - memset(buf, 0, 10); - size = dso__data_read_offset(dso, &machine, data->offset, - buf, 10); - - TEST_ASSERT_VAL("Wrong size", size == data->size); - TEST_ASSERT_VAL("Wrong data", !memcmp(buf, data->data, 10)); - } - - /* Read cross multiple cache pages. */ - { - ssize_t size; - int c; - u8 *buf; - - buf = malloc(TEST_FILE_SIZE); - TEST_ASSERT_VAL("ENOMEM\n", buf); - - /* First iteration to fill caches, second one to read them. */ - for (c = 0; c < 2; c++) { - memset(buf, 0, TEST_FILE_SIZE); - size = dso__data_read_offset(dso, &machine, 10, - buf, TEST_FILE_SIZE); - - TEST_ASSERT_VAL("Wrong size", - size == (TEST_FILE_SIZE - 10)); - - for (i = 0; i < (size_t)size; i++) - TEST_ASSERT_VAL("Wrong data", - buf[i] == (i % 10)); - } - - free(buf); - } - - dso__delete(dso); - unlink(file); - return 0; -} diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c new file mode 100644 index 00000000000..819f10414f0 --- /dev/null +++ b/tools/perf/util/dso.c @@ -0,0 +1,900 @@ +#include <asm/bug.h> +#include <sys/time.h> +#include <sys/resource.h> +#include "symbol.h" +#include "dso.h" +#include "machine.h" +#include "util.h" +#include "debug.h" + +char dso__symtab_origin(const struct dso *dso) +{ + static const char origin[] = { + [DSO_BINARY_TYPE__KALLSYMS] = 'k', + [DSO_BINARY_TYPE__VMLINUX] = 'v', + [DSO_BINARY_TYPE__JAVA_JIT] = 'j', + [DSO_BINARY_TYPE__DEBUGLINK] = 'l', + [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B', + [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', + [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', + [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o', + [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', + [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', + [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K', + [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g', + [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G', + [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V', + }; + + if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND) + return '!'; + return origin[dso->symtab_type]; +} + +int dso__read_binary_type_filename(const struct dso *dso, + enum dso_binary_type type, + char *root_dir, char *filename, size_t size) +{ + char build_id_hex[BUILD_ID_SIZE * 2 + 1]; + int ret = 0; + + switch (type) { + case DSO_BINARY_TYPE__DEBUGLINK: { + char *debuglink; + + strncpy(filename, dso->long_name, size); + debuglink = filename + dso->long_name_len; + while (debuglink != filename && *debuglink != '/') + debuglink--; + if (*debuglink == '/') + debuglink++; + ret = filename__read_debuglink(dso->long_name, debuglink, + size - (debuglink - filename)); + } + break; + case DSO_BINARY_TYPE__BUILD_ID_CACHE: + /* skip the locally configured cache if a symfs is given */ + if (symbol_conf.symfs[0] || + (dso__build_id_filename(dso, filename, size) == NULL)) + ret = -1; + break; + + case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: + snprintf(filename, size, "%s/usr/lib/debug%s.debug", + symbol_conf.symfs, dso->long_name); + break; + + case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: + snprintf(filename, size, "%s/usr/lib/debug%s", + symbol_conf.symfs, dso->long_name); + break; + + case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: + { + const char *last_slash; + size_t len; + size_t dir_size; + + last_slash = dso->long_name + dso->long_name_len; + while (last_slash != dso->long_name && *last_slash != '/') + last_slash--; + + len = scnprintf(filename, size, "%s", symbol_conf.symfs); + dir_size = last_slash - dso->long_name + 2; + if (dir_size > (size - len)) { + ret = -1; + break; + } + len += scnprintf(filename + len, dir_size, "%s", dso->long_name); + len += scnprintf(filename + len , size - len, ".debug%s", + last_slash); + break; + } + + case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: + if (!dso->has_build_id) { + ret = -1; + break; + } + + build_id__sprintf(dso->build_id, + sizeof(dso->build_id), + build_id_hex); + snprintf(filename, size, + "%s/usr/lib/debug/.build-id/%.2s/%s.debug", + symbol_conf.symfs, build_id_hex, build_id_hex + 2); + break; + + case DSO_BINARY_TYPE__VMLINUX: + case DSO_BINARY_TYPE__GUEST_VMLINUX: + case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: + snprintf(filename, size, "%s%s", + symbol_conf.symfs, dso->long_name); + break; + + case DSO_BINARY_TYPE__GUEST_KMODULE: + snprintf(filename, size, "%s%s%s", symbol_conf.symfs, + root_dir, dso->long_name); + break; + + case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: + snprintf(filename, size, "%s%s", symbol_conf.symfs, + dso->long_name); + break; + + case DSO_BINARY_TYPE__KCORE: + case DSO_BINARY_TYPE__GUEST_KCORE: + snprintf(filename, size, "%s", dso->long_name); + break; + + default: + case DSO_BINARY_TYPE__KALLSYMS: + case DSO_BINARY_TYPE__GUEST_KALLSYMS: + case DSO_BINARY_TYPE__JAVA_JIT: + case DSO_BINARY_TYPE__NOT_FOUND: + ret = -1; + break; + } + + return ret; +} + +/* + * Global list of open DSOs and the counter. + */ +static LIST_HEAD(dso__data_open); +static long dso__data_open_cnt; + +static void dso__list_add(struct dso *dso) +{ + list_add_tail(&dso->data.open_entry, &dso__data_open); + dso__data_open_cnt++; +} + +static void dso__list_del(struct dso *dso) +{ + list_del(&dso->data.open_entry); + WARN_ONCE(dso__data_open_cnt <= 0, + "DSO data fd counter out of bounds."); + dso__data_open_cnt--; +} + +static void close_first_dso(void); + +static int do_open(char *name) +{ + int fd; + + do { + fd = open(name, O_RDONLY); + if (fd >= 0) + return fd; + + pr_debug("dso open failed, mmap: %s\n", strerror(errno)); + if (!dso__data_open_cnt || errno != EMFILE) + break; + + close_first_dso(); + } while (1); + + return -1; +} + +static int __open_dso(struct dso *dso, struct machine *machine) +{ + int fd; + char *root_dir = (char *)""; + char *name = malloc(PATH_MAX); + + if (!name) + return -ENOMEM; + + if (machine) + root_dir = machine->root_dir; + + if (dso__read_binary_type_filename(dso, dso->binary_type, + root_dir, name, PATH_MAX)) { + free(name); + return -EINVAL; + } + + fd = do_open(name); + free(name); + return fd; +} + +static void check_data_close(void); + +/** + * dso_close - Open DSO data file + * @dso: dso object + * + * Open @dso's data file descriptor and updates + * list/count of open DSO objects. + */ +static int open_dso(struct dso *dso, struct machine *machine) +{ + int fd = __open_dso(dso, machine); + + if (fd > 0) { + dso__list_add(dso); + /* + * Check if we crossed the allowed number + * of opened DSOs and close one if needed. + */ + check_data_close(); + } + + return fd; +} + +static void close_data_fd(struct dso *dso) +{ + if (dso->data.fd >= 0) { + close(dso->data.fd); + dso->data.fd = -1; + dso->data.file_size = 0; + dso__list_del(dso); + } +} + +/** + * dso_close - Close DSO data file + * @dso: dso object + * + * Close @dso's data file descriptor and updates + * list/count of open DSO objects. + */ +static void close_dso(struct dso *dso) +{ + close_data_fd(dso); +} + +static void close_first_dso(void) +{ + struct dso *dso; + + dso = list_first_entry(&dso__data_open, struct dso, data.open_entry); + close_dso(dso); +} + +static rlim_t get_fd_limit(void) +{ + struct rlimit l; + rlim_t limit = 0; + + /* Allow half of the current open fd limit. */ + if (getrlimit(RLIMIT_NOFILE, &l) == 0) { + if (l.rlim_cur == RLIM_INFINITY) + limit = l.rlim_cur; + else + limit = l.rlim_cur / 2; + } else { + pr_err("failed to get fd limit\n"); + limit = 1; + } + + return limit; +} + +static bool may_cache_fd(void) +{ + static rlim_t limit; + + if (!limit) + limit = get_fd_limit(); + + if (limit == RLIM_INFINITY) + return true; + + return limit > (rlim_t) dso__data_open_cnt; +} + +/* + * Check and close LRU dso if we crossed allowed limit + * for opened dso file descriptors. The limit is half + * of the RLIMIT_NOFILE files opened. +*/ +static void check_data_close(void) +{ + bool cache_fd = may_cache_fd(); + + if (!cache_fd) + close_first_dso(); +} + +/** + * dso__data_close - Close DSO data file + * @dso: dso object + * + * External interface to close @dso's data file descriptor. + */ +void dso__data_close(struct dso *dso) +{ + close_dso(dso); +} + +/** + * dso__data_fd - Get dso's data file descriptor + * @dso: dso object + * @machine: machine object + * + * External interface to find dso's file, open it and + * returns file descriptor. + */ +int dso__data_fd(struct dso *dso, struct machine *machine) +{ + enum dso_binary_type binary_type_data[] = { + DSO_BINARY_TYPE__BUILD_ID_CACHE, + DSO_BINARY_TYPE__SYSTEM_PATH_DSO, + DSO_BINARY_TYPE__NOT_FOUND, + }; + int i = 0; + + if (dso->data.fd >= 0) + return dso->data.fd; + + if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) { + dso->data.fd = open_dso(dso, machine); + return dso->data.fd; + } + + do { + int fd; + + dso->binary_type = binary_type_data[i++]; + + fd = open_dso(dso, machine); + if (fd >= 0) + return dso->data.fd = fd; + + } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND); + + return -EINVAL; +} + +static void +dso_cache__free(struct rb_root *root) +{ + struct rb_node *next = rb_first(root); + + while (next) { + struct dso_cache *cache; + + cache = rb_entry(next, struct dso_cache, rb_node); + next = rb_next(&cache->rb_node); + rb_erase(&cache->rb_node, root); + free(cache); + } +} + +static struct dso_cache *dso_cache__find(const struct rb_root *root, u64 offset) +{ + struct rb_node * const *p = &root->rb_node; + const struct rb_node *parent = NULL; + struct dso_cache *cache; + + while (*p != NULL) { + u64 end; + + parent = *p; + cache = rb_entry(parent, struct dso_cache, rb_node); + end = cache->offset + DSO__DATA_CACHE_SIZE; + + if (offset < cache->offset) + p = &(*p)->rb_left; + else if (offset >= end) + p = &(*p)->rb_right; + else + return cache; + } + return NULL; +} + +static void +dso_cache__insert(struct rb_root *root, struct dso_cache *new) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct dso_cache *cache; + u64 offset = new->offset; + + while (*p != NULL) { + u64 end; + + parent = *p; + cache = rb_entry(parent, struct dso_cache, rb_node); + end = cache->offset + DSO__DATA_CACHE_SIZE; + + if (offset < cache->offset) + p = &(*p)->rb_left; + else if (offset >= end) + p = &(*p)->rb_right; + } + + rb_link_node(&new->rb_node, parent, p); + rb_insert_color(&new->rb_node, root); +} + +static ssize_t +dso_cache__memcpy(struct dso_cache *cache, u64 offset, + u8 *data, u64 size) +{ + u64 cache_offset = offset - cache->offset; + u64 cache_size = min(cache->size - cache_offset, size); + + memcpy(data, cache->data + cache_offset, cache_size); + return cache_size; +} + +static ssize_t +dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size) +{ + struct dso_cache *cache; + ssize_t ret; + + do { + u64 cache_offset; + + ret = -ENOMEM; + + cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE); + if (!cache) + break; + + cache_offset = offset & DSO__DATA_CACHE_MASK; + ret = -EINVAL; + + if (-1 == lseek(dso->data.fd, cache_offset, SEEK_SET)) + break; + + ret = read(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE); + if (ret <= 0) + break; + + cache->offset = cache_offset; + cache->size = ret; + dso_cache__insert(&dso->data.cache, cache); + + ret = dso_cache__memcpy(cache, offset, data, size); + + } while (0); + + if (ret <= 0) + free(cache); + + return ret; +} + +static ssize_t dso_cache_read(struct dso *dso, u64 offset, + u8 *data, ssize_t size) +{ + struct dso_cache *cache; + + cache = dso_cache__find(&dso->data.cache, offset); + if (cache) + return dso_cache__memcpy(cache, offset, data, size); + else + return dso_cache__read(dso, offset, data, size); +} + +/* + * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks + * in the rb_tree. Any read to already cached data is served + * by cached data. + */ +static ssize_t cached_read(struct dso *dso, u64 offset, u8 *data, ssize_t size) +{ + ssize_t r = 0; + u8 *p = data; + + do { + ssize_t ret; + + ret = dso_cache_read(dso, offset, p, size); + if (ret < 0) + return ret; + + /* Reached EOF, return what we have. */ + if (!ret) + break; + + BUG_ON(ret > size); + + r += ret; + p += ret; + offset += ret; + size -= ret; + + } while (size); + + return r; +} + +static int data_file_size(struct dso *dso) +{ + struct stat st; + + if (!dso->data.file_size) { + if (fstat(dso->data.fd, &st)) { + pr_err("dso mmap failed, fstat: %s\n", strerror(errno)); + return -1; + } + dso->data.file_size = st.st_size; + } + + return 0; +} + +static ssize_t data_read_offset(struct dso *dso, u64 offset, + u8 *data, ssize_t size) +{ + if (data_file_size(dso)) + return -1; + + /* Check the offset sanity. */ + if (offset > dso->data.file_size) + return -1; + + if (offset + size < offset) + return -1; + + return cached_read(dso, offset, data, size); +} + +/** + * dso__data_read_offset - Read data from dso file offset + * @dso: dso object + * @machine: machine object + * @offset: file offset + * @data: buffer to store data + * @size: size of the @data buffer + * + * External interface to read data from dso file offset. Open + * dso data file and use cached_read to get the data. + */ +ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, + u64 offset, u8 *data, ssize_t size) +{ + if (dso__data_fd(dso, machine) < 0) + return -1; + + return data_read_offset(dso, offset, data, size); +} + +/** + * dso__data_read_addr - Read data from dso address + * @dso: dso object + * @machine: machine object + * @add: virtual memory address + * @data: buffer to store data + * @size: size of the @data buffer + * + * External interface to read data from dso address. + */ +ssize_t dso__data_read_addr(struct dso *dso, struct map *map, + struct machine *machine, u64 addr, + u8 *data, ssize_t size) +{ + u64 offset = map->map_ip(map, addr); + return dso__data_read_offset(dso, machine, offset, data, size); +} + +struct map *dso__new_map(const char *name) +{ + struct map *map = NULL; + struct dso *dso = dso__new(name); + + if (dso) + map = map__new2(0, dso, MAP__FUNCTION); + + return map; +} + +struct dso *dso__kernel_findnew(struct machine *machine, const char *name, + const char *short_name, int dso_type) +{ + /* + * The kernel dso could be created by build_id processing. + */ + struct dso *dso = __dsos__findnew(&machine->kernel_dsos, name); + + /* + * We need to run this in all cases, since during the build_id + * processing we had no idea this was the kernel dso. + */ + if (dso != NULL) { + dso__set_short_name(dso, short_name, false); + dso->kernel = dso_type; + } + + return dso; +} + +void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) +{ + if (name == NULL) + return; + + if (dso->long_name_allocated) + free((char *)dso->long_name); + + dso->long_name = name; + dso->long_name_len = strlen(name); + dso->long_name_allocated = name_allocated; +} + +void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) +{ + if (name == NULL) + return; + + if (dso->short_name_allocated) + free((char *)dso->short_name); + + dso->short_name = name; + dso->short_name_len = strlen(name); + dso->short_name_allocated = name_allocated; +} + +static void dso__set_basename(struct dso *dso) +{ + /* + * basename() may modify path buffer, so we must pass + * a copy. + */ + char *base, *lname = strdup(dso->long_name); + + if (!lname) + return; + + /* + * basename() may return a pointer to internal + * storage which is reused in subsequent calls + * so copy the result. + */ + base = strdup(basename(lname)); + + free(lname); + + if (!base) + return; + + dso__set_short_name(dso, base, true); +} + +int dso__name_len(const struct dso *dso) +{ + if (!dso) + return strlen("[unknown]"); + if (verbose) + return dso->long_name_len; + + return dso->short_name_len; +} + +bool dso__loaded(const struct dso *dso, enum map_type type) +{ + return dso->loaded & (1 << type); +} + +bool dso__sorted_by_name(const struct dso *dso, enum map_type type) +{ + return dso->sorted_by_name & (1 << type); +} + +void dso__set_sorted_by_name(struct dso *dso, enum map_type type) +{ + dso->sorted_by_name |= (1 << type); +} + +struct dso *dso__new(const char *name) +{ + struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1); + + if (dso != NULL) { + int i; + strcpy(dso->name, name); + dso__set_long_name(dso, dso->name, false); + dso__set_short_name(dso, dso->name, false); + for (i = 0; i < MAP__NR_TYPES; ++i) + dso->symbols[i] = dso->symbol_names[i] = RB_ROOT; + dso->data.cache = RB_ROOT; + dso->data.fd = -1; + dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; + dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND; + dso->loaded = 0; + dso->rel = 0; + dso->sorted_by_name = 0; + dso->has_build_id = 0; + dso->has_srcline = 1; + dso->a2l_fails = 1; + dso->kernel = DSO_TYPE_USER; + dso->needs_swap = DSO_SWAP__UNSET; + INIT_LIST_HEAD(&dso->node); + INIT_LIST_HEAD(&dso->data.open_entry); + } + + return dso; +} + +void dso__delete(struct dso *dso) +{ + int i; + for (i = 0; i < MAP__NR_TYPES; ++i) + symbols__delete(&dso->symbols[i]); + + if (dso->short_name_allocated) { + zfree((char **)&dso->short_name); + dso->short_name_allocated = false; + } + + if (dso->long_name_allocated) { + zfree((char **)&dso->long_name); + dso->long_name_allocated = false; + } + + dso__data_close(dso); + dso_cache__free(&dso->data.cache); + dso__free_a2l(dso); + zfree(&dso->symsrc_filename); + free(dso); +} + +void dso__set_build_id(struct dso *dso, void *build_id) +{ + memcpy(dso->build_id, build_id, sizeof(dso->build_id)); + dso->has_build_id = 1; +} + +bool dso__build_id_equal(const struct dso *dso, u8 *build_id) +{ + return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0; +} + +void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) +{ + char path[PATH_MAX]; + + if (machine__is_default_guest(machine)) + return; + sprintf(path, "%s/sys/kernel/notes", machine->root_dir); + if (sysfs__read_build_id(path, dso->build_id, + sizeof(dso->build_id)) == 0) + dso->has_build_id = true; +} + +int dso__kernel_module_get_build_id(struct dso *dso, + const char *root_dir) +{ + char filename[PATH_MAX]; + /* + * kernel module short names are of the form "[module]" and + * we need just "module" here. + */ + const char *name = dso->short_name + 1; + + snprintf(filename, sizeof(filename), + "%s/sys/module/%.*s/notes/.note.gnu.build-id", + root_dir, (int)strlen(name) - 1, name); + + if (sysfs__read_build_id(filename, dso->build_id, + sizeof(dso->build_id)) == 0) + dso->has_build_id = true; + + return 0; +} + +bool __dsos__read_build_ids(struct list_head *head, bool with_hits) +{ + bool have_build_id = false; + struct dso *pos; + + list_for_each_entry(pos, head, node) { + if (with_hits && !pos->hit) + continue; + if (pos->has_build_id) { + have_build_id = true; + continue; + } + if (filename__read_build_id(pos->long_name, pos->build_id, + sizeof(pos->build_id)) > 0) { + have_build_id = true; + pos->has_build_id = true; + } + } + + return have_build_id; +} + +void dsos__add(struct list_head *head, struct dso *dso) +{ + list_add_tail(&dso->node, head); +} + +struct dso *dsos__find(const struct list_head *head, const char *name, bool cmp_short) +{ + struct dso *pos; + + if (cmp_short) { + list_for_each_entry(pos, head, node) + if (strcmp(pos->short_name, name) == 0) + return pos; + return NULL; + } + list_for_each_entry(pos, head, node) + if (strcmp(pos->long_name, name) == 0) + return pos; + return NULL; +} + +struct dso *__dsos__findnew(struct list_head *head, const char *name) +{ + struct dso *dso = dsos__find(head, name, false); + + if (!dso) { + dso = dso__new(name); + if (dso != NULL) { + dsos__add(head, dso); + dso__set_basename(dso); + } + } + + return dso; +} + +size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, + bool (skip)(struct dso *dso, int parm), int parm) +{ + struct dso *pos; + size_t ret = 0; + + list_for_each_entry(pos, head, node) { + if (skip && skip(pos, parm)) + continue; + ret += dso__fprintf_buildid(pos, fp); + ret += fprintf(fp, " %s\n", pos->long_name); + } + return ret; +} + +size_t __dsos__fprintf(struct list_head *head, FILE *fp) +{ + struct dso *pos; + size_t ret = 0; + + list_for_each_entry(pos, head, node) { + int i; + for (i = 0; i < MAP__NR_TYPES; ++i) + ret += dso__fprintf(pos, i, fp); + } + + return ret; +} + +size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) +{ + char sbuild_id[BUILD_ID_SIZE * 2 + 1]; + + build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); + return fprintf(fp, "%s", sbuild_id); +} + +size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp) +{ + struct rb_node *nd; + size_t ret = fprintf(fp, "dso: %s (", dso->short_name); + + if (dso->short_name != dso->long_name) + ret += fprintf(fp, "%s, ", dso->long_name); + ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type], + dso__loaded(dso, type) ? "" : "NOT "); + ret += dso__fprintf_buildid(dso, fp); + ret += fprintf(fp, ")\n"); + for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) { + struct symbol *pos = rb_entry(nd, struct symbol, rb_node); + ret += symbol__fprintf(pos, fp); + } + + return ret; +} diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h new file mode 100644 index 00000000000..ad553ba257b --- /dev/null +++ b/tools/perf/util/dso.h @@ -0,0 +1,232 @@ +#ifndef __PERF_DSO +#define __PERF_DSO + +#include <linux/types.h> +#include <linux/rbtree.h> +#include <stdbool.h> +#include <linux/types.h> +#include "map.h" +#include "build-id.h" + +enum dso_binary_type { + DSO_BINARY_TYPE__KALLSYMS = 0, + DSO_BINARY_TYPE__GUEST_KALLSYMS, + DSO_BINARY_TYPE__VMLINUX, + DSO_BINARY_TYPE__GUEST_VMLINUX, + DSO_BINARY_TYPE__JAVA_JIT, + DSO_BINARY_TYPE__DEBUGLINK, + DSO_BINARY_TYPE__BUILD_ID_CACHE, + DSO_BINARY_TYPE__FEDORA_DEBUGINFO, + DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__BUILDID_DEBUGINFO, + DSO_BINARY_TYPE__SYSTEM_PATH_DSO, + DSO_BINARY_TYPE__GUEST_KMODULE, + DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, + DSO_BINARY_TYPE__KCORE, + DSO_BINARY_TYPE__GUEST_KCORE, + DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, + DSO_BINARY_TYPE__NOT_FOUND, +}; + +enum dso_kernel_type { + DSO_TYPE_USER = 0, + DSO_TYPE_KERNEL, + DSO_TYPE_GUEST_KERNEL +}; + +enum dso_swap_type { + DSO_SWAP__UNSET, + DSO_SWAP__NO, + DSO_SWAP__YES, +}; + +#define DSO__SWAP(dso, type, val) \ +({ \ + type ____r = val; \ + BUG_ON(dso->needs_swap == DSO_SWAP__UNSET); \ + if (dso->needs_swap == DSO_SWAP__YES) { \ + switch (sizeof(____r)) { \ + case 2: \ + ____r = bswap_16(val); \ + break; \ + case 4: \ + ____r = bswap_32(val); \ + break; \ + case 8: \ + ____r = bswap_64(val); \ + break; \ + default: \ + BUG_ON(1); \ + } \ + } \ + ____r; \ +}) + +#define DSO__DATA_CACHE_SIZE 4096 +#define DSO__DATA_CACHE_MASK ~(DSO__DATA_CACHE_SIZE - 1) + +struct dso_cache { + struct rb_node rb_node; + u64 offset; + u64 size; + char data[0]; +}; + +struct dso { + struct list_head node; + struct rb_root symbols[MAP__NR_TYPES]; + struct rb_root symbol_names[MAP__NR_TYPES]; + void *a2l; + char *symsrc_filename; + unsigned int a2l_fails; + enum dso_kernel_type kernel; + enum dso_swap_type needs_swap; + enum dso_binary_type symtab_type; + enum dso_binary_type binary_type; + u8 adjust_symbols:1; + u8 has_build_id:1; + u8 has_srcline:1; + u8 hit:1; + u8 annotate_warned:1; + u8 short_name_allocated:1; + u8 long_name_allocated:1; + u8 sorted_by_name; + u8 loaded; + u8 rel; + u8 build_id[BUILD_ID_SIZE]; + const char *short_name; + const char *long_name; + u16 long_name_len; + u16 short_name_len; + + /* dso data file */ + struct { + struct rb_root cache; + int fd; + size_t file_size; + struct list_head open_entry; + } data; + + char name[0]; +}; + +/* dso__for_each_symbol - iterate over the symbols of given type + * + * @dso: the 'struct dso *' in which symbols itereated + * @pos: the 'struct symbol *' to use as a loop cursor + * @n: the 'struct rb_node *' to use as a temporary storage + * @type: the 'enum map_type' type of symbols + */ +#define dso__for_each_symbol(dso, pos, n, type) \ + symbols__for_each_entry(&(dso)->symbols[(type)], pos, n) + +static inline void dso__set_loaded(struct dso *dso, enum map_type type) +{ + dso->loaded |= (1 << type); +} + +struct dso *dso__new(const char *name); +void dso__delete(struct dso *dso); + +void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated); +void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated); + +int dso__name_len(const struct dso *dso); + +bool dso__loaded(const struct dso *dso, enum map_type type); + +bool dso__sorted_by_name(const struct dso *dso, enum map_type type); +void dso__set_sorted_by_name(struct dso *dso, enum map_type type); +void dso__sort_by_name(struct dso *dso, enum map_type type); + +void dso__set_build_id(struct dso *dso, void *build_id); +bool dso__build_id_equal(const struct dso *dso, u8 *build_id); +void dso__read_running_kernel_build_id(struct dso *dso, + struct machine *machine); +int dso__kernel_module_get_build_id(struct dso *dso, const char *root_dir); + +char dso__symtab_origin(const struct dso *dso); +int dso__read_binary_type_filename(const struct dso *dso, enum dso_binary_type type, + char *root_dir, char *filename, size_t size); + +/* + * The dso__data_* external interface provides following functions: + * dso__data_fd + * dso__data_close + * dso__data_read_offset + * dso__data_read_addr + * + * Please refer to the dso.c object code for each function and + * arguments documentation. Following text tries to explain the + * dso file descriptor caching. + * + * The dso__data* interface allows caching of opened file descriptors + * to speed up the dso data accesses. The idea is to leave the file + * descriptor opened ideally for the whole life of the dso object. + * + * The current usage of the dso__data_* interface is as follows: + * + * Get DSO's fd: + * int fd = dso__data_fd(dso, machine); + * USE 'fd' SOMEHOW + * + * Read DSO's data: + * n = dso__data_read_offset(dso_0, &machine, 0, buf, BUFSIZE); + * n = dso__data_read_addr(dso_0, &machine, 0, buf, BUFSIZE); + * + * Eventually close DSO's fd: + * dso__data_close(dso); + * + * It is not necessary to close the DSO object data file. Each time new + * DSO data file is opened, the limit (RLIMIT_NOFILE/2) is checked. Once + * it is crossed, the oldest opened DSO object is closed. + * + * The dso__delete function calls close_dso function to ensure the + * data file descriptor gets closed/unmapped before the dso object + * is freed. + * + * TODO +*/ +int dso__data_fd(struct dso *dso, struct machine *machine); +void dso__data_close(struct dso *dso); + +ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, + u64 offset, u8 *data, ssize_t size); +ssize_t dso__data_read_addr(struct dso *dso, struct map *map, + struct machine *machine, u64 addr, + u8 *data, ssize_t size); + +struct map *dso__new_map(const char *name); +struct dso *dso__kernel_findnew(struct machine *machine, const char *name, + const char *short_name, int dso_type); + +void dsos__add(struct list_head *head, struct dso *dso); +struct dso *dsos__find(const struct list_head *head, const char *name, + bool cmp_short); +struct dso *__dsos__findnew(struct list_head *head, const char *name); +bool __dsos__read_build_ids(struct list_head *head, bool with_hits); + +size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, + bool (skip)(struct dso *dso, int parm), int parm); +size_t __dsos__fprintf(struct list_head *head, FILE *fp); + +size_t dso__fprintf_buildid(struct dso *dso, FILE *fp); +size_t dso__fprintf_symbols_by_name(struct dso *dso, + enum map_type type, FILE *fp); +size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp); + +static inline bool dso__is_vmlinux(struct dso *dso) +{ + return dso->binary_type == DSO_BINARY_TYPE__VMLINUX || + dso->binary_type == DSO_BINARY_TYPE__GUEST_VMLINUX; +} + +static inline bool dso__is_kcore(struct dso *dso) +{ + return dso->binary_type == DSO_BINARY_TYPE__KCORE || + dso->binary_type == DSO_BINARY_TYPE__GUEST_KCORE; +} + +void dso__free_a2l(struct dso *dso); + +#endif /* __PERF_DSO */ diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index 3e5f5430a28..cc66c4049e0 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -263,6 +263,21 @@ bool die_is_signed_type(Dwarf_Die *tp_die) } /** + * die_is_func_def - Ensure that this DIE is a subprogram and definition + * @dw_die: a DIE + * + * Ensure that this DIE is a subprogram and NOT a declaration. This + * returns true if @dw_die is a function definition. + **/ +bool die_is_func_def(Dwarf_Die *dw_die) +{ + Dwarf_Attribute attr; + + return (dwarf_tag(dw_die) == DW_TAG_subprogram && + dwarf_attr(dw_die, DW_AT_declaration, &attr) == NULL); +} + +/** * die_get_data_member_location - Get the data-member offset * @mb_die: a DIE of a member of a data structure * @offs: The offset of the member in the data structure @@ -392,6 +407,10 @@ static int __die_search_func_cb(Dwarf_Die *fn_die, void *data) { struct __addr_die_search_param *ad = data; + /* + * Since a declaration entry doesn't has given pc, this always returns + * function definition entry. + */ if (dwarf_tag(fn_die) == DW_TAG_subprogram && dwarf_haspc(fn_die, ad->addr)) { memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die)); @@ -407,7 +426,7 @@ static int __die_search_func_cb(Dwarf_Die *fn_die, void *data) * @die_mem: a buffer for result DIE * * Search a non-inlined function DIE which includes @addr. Stores the - * DIE to @die_mem and returns it if found. Returns NULl if failed. + * DIE to @die_mem and returns it if found. Returns NULL if failed. */ Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr, Dwarf_Die *die_mem) @@ -435,15 +454,32 @@ static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data) } /** + * die_find_top_inlinefunc - Search the top inlined function at given address + * @sp_die: a subprogram DIE which including @addr + * @addr: target address + * @die_mem: a buffer for result DIE + * + * Search an inlined function DIE which includes @addr. Stores the + * DIE to @die_mem and returns it if found. Returns NULL if failed. + * Even if several inlined functions are expanded recursively, this + * doesn't trace it down, and returns the topmost one. + */ +Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, + Dwarf_Die *die_mem) +{ + return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem); +} + +/** * die_find_inlinefunc - Search an inlined function at given address - * @cu_die: a CU DIE which including @addr + * @sp_die: a subprogram DIE which including @addr * @addr: target address * @die_mem: a buffer for result DIE * * Search an inlined function DIE which includes @addr. Stores the - * DIE to @die_mem and returns it if found. Returns NULl if failed. + * DIE to @die_mem and returns it if found. Returns NULL if failed. * If several inlined functions are expanded recursively, this trace - * it and returns deepest one. + * it down and returns deepest one. */ Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, Dwarf_Die *die_mem) @@ -711,14 +747,17 @@ struct __find_variable_param { static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data) { struct __find_variable_param *fvp = data; + Dwarf_Attribute attr; int tag; tag = dwarf_tag(die_mem); if ((tag == DW_TAG_formal_parameter || tag == DW_TAG_variable) && - die_compare_name(die_mem, fvp->name)) + die_compare_name(die_mem, fvp->name) && + /* Does the DIE have location information or external instance? */ + (dwarf_attr(die_mem, DW_AT_external, &attr) || + dwarf_attr(die_mem, DW_AT_location, &attr))) return DIE_FIND_CB_END; - if (dwarf_haspc(die_mem, fvp->addr)) return DIE_FIND_CB_CONTINUE; else diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h index 6ce1717784b..b4fe90c6cb2 100644 --- a/tools/perf/util/dwarf-aux.h +++ b/tools/perf/util/dwarf-aux.h @@ -38,6 +38,9 @@ extern int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr, extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr, int (*callback)(Dwarf_Die *, void *), void *data); +/* Ensure that this DIE is a subprogram and definition (not declaration) */ +extern bool die_is_func_def(Dwarf_Die *dw_die); + /* Compare diename and tname */ extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname); @@ -76,7 +79,11 @@ extern Dwarf_Die *die_find_child(Dwarf_Die *rt_die, extern Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr, Dwarf_Die *die_mem); -/* Search an inlined function including given address */ +/* Search the top inlined function including given address */ +extern Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, + Dwarf_Die *die_mem); + +/* Search the deepest inlined function including given address */ extern Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, Dwarf_Die *die_mem); diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 6715b193872..d0281bdfa58 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -1,15 +1,20 @@ #include <linux/types.h> +#include <sys/mman.h> #include "event.h" #include "debug.h" +#include "hist.h" +#include "machine.h" #include "sort.h" #include "string.h" #include "strlist.h" #include "thread.h" #include "thread_map.h" +#include "symbol/kallsyms.h" static const char *perf_event__names[] = { [0] = "TOTAL", [PERF_RECORD_MMAP] = "MMAP", + [PERF_RECORD_MMAP2] = "MMAP2", [PERF_RECORD_LOST] = "LOST", [PERF_RECORD_COMM] = "COMM", [PERF_RECORD_EXIT] = "EXIT", @@ -91,20 +96,20 @@ static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len) static pid_t perf_event__synthesize_comm(struct perf_tool *tool, union perf_event *event, pid_t pid, - int full, perf_event__handler_t process, struct machine *machine) { - char filename[PATH_MAX]; size_t size; - DIR *tasks; - struct dirent dirent, *next; pid_t tgid; memset(&event->comm, 0, sizeof(event->comm)); - tgid = perf_event__get_comm_tgid(pid, event->comm.comm, - sizeof(event->comm.comm)); + if (machine__is_host(machine)) + tgid = perf_event__get_comm_tgid(pid, event->comm.comm, + sizeof(event->comm.comm)); + else + tgid = machine->pid; + if (tgid < 0) goto out; @@ -117,64 +122,53 @@ static pid_t perf_event__synthesize_comm(struct perf_tool *tool, event->comm.header.size = (sizeof(event->comm) - (sizeof(event->comm.comm) - size) + machine->id_hdr_size); - if (!full) { - event->comm.tid = pid; - - if (process(tool, event, &synth_sample, machine) != 0) - return -1; - - goto out; - } - - snprintf(filename, sizeof(filename), "/proc/%d/task", pid); + event->comm.tid = pid; - tasks = opendir(filename); - if (tasks == NULL) { - pr_debug("couldn't open %s\n", filename); - return 0; - } + if (process(tool, event, &synth_sample, machine) != 0) + return -1; - while (!readdir_r(tasks, &dirent, &next) && next) { - char *end; - pid = strtol(dirent.d_name, &end, 10); - if (*end) - continue; +out: + return tgid; +} - /* already have tgid; jut want to update the comm */ - (void) perf_event__get_comm_tgid(pid, event->comm.comm, - sizeof(event->comm.comm)); +static int perf_event__synthesize_fork(struct perf_tool *tool, + union perf_event *event, pid_t pid, + pid_t tgid, perf_event__handler_t process, + struct machine *machine) +{ + memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size); - size = strlen(event->comm.comm) + 1; - size = PERF_ALIGN(size, sizeof(u64)); - memset(event->comm.comm + size, 0, machine->id_hdr_size); - event->comm.header.size = (sizeof(event->comm) - - (sizeof(event->comm.comm) - size) + - machine->id_hdr_size); + /* this is really a clone event but we use fork to synthesize it */ + event->fork.ppid = tgid; + event->fork.ptid = tgid; + event->fork.pid = tgid; + event->fork.tid = pid; + event->fork.header.type = PERF_RECORD_FORK; - event->comm.tid = pid; + event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); - if (process(tool, event, &synth_sample, machine) != 0) { - tgid = -1; - break; - } - } + if (process(tool, event, &synth_sample, machine) != 0) + return -1; - closedir(tasks); -out: - return tgid; + return 0; } -static int perf_event__synthesize_mmap_events(struct perf_tool *tool, - union perf_event *event, - pid_t pid, pid_t tgid, - perf_event__handler_t process, - struct machine *machine) +int perf_event__synthesize_mmap_events(struct perf_tool *tool, + union perf_event *event, + pid_t pid, pid_t tgid, + perf_event__handler_t process, + struct machine *machine, + bool mmap_data) { char filename[PATH_MAX]; FILE *fp; int rc = 0; - snprintf(filename, sizeof(filename), "/proc/%d/maps", pid); + if (machine__is_default_guest(machine)) + return 0; + + snprintf(filename, sizeof(filename), "%s/proc/%d/maps", + machine->root_dir, pid); fp = fopen(filename, "r"); if (fp == NULL) { @@ -185,62 +179,85 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool, return -1; } - event->header.type = PERF_RECORD_MMAP; - /* - * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c - */ - event->header.misc = PERF_RECORD_MISC_USER; + event->header.type = PERF_RECORD_MMAP2; while (1) { - char bf[BUFSIZ], *pbf = bf; - int n; + char bf[BUFSIZ]; + char prot[5]; + char execname[PATH_MAX]; + char anonstr[] = "//anon"; + unsigned int ino; size_t size; + ssize_t n; + if (fgets(bf, sizeof(bf), fp) == NULL) break; + /* ensure null termination since stack will be reused. */ + strcpy(execname, ""); + /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ - n = hex2u64(pbf, &event->mmap.start); - if (n < 0) - continue; - pbf += n + 1; - n = hex2u64(pbf, &event->mmap.len); - if (n < 0) + n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n", + &event->mmap2.start, &event->mmap2.len, prot, + &event->mmap2.pgoff, &event->mmap2.maj, + &event->mmap2.min, + &ino, execname); + + /* + * Anon maps don't have the execname. + */ + if (n < 7) continue; - pbf += n + 3; - if (*pbf == 'x') { /* vm_exec */ - char anonstr[] = "//anon\n"; - char *execname = strchr(bf, '/'); - /* Catch VDSO */ - if (execname == NULL) - execname = strstr(bf, "[vdso]"); + event->mmap2.ino = (u64)ino; - /* Catch anonymous mmaps */ - if ((execname == NULL) && !strstr(bf, "[")) - execname = anonstr; + /* + * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c + */ + if (machine__is_host(machine)) + event->header.misc = PERF_RECORD_MISC_USER; + else + event->header.misc = PERF_RECORD_MISC_GUEST_USER; + + /* map protection and flags bits */ + event->mmap2.prot = 0; + event->mmap2.flags = 0; + if (prot[0] == 'r') + event->mmap2.prot |= PROT_READ; + if (prot[1] == 'w') + event->mmap2.prot |= PROT_WRITE; + if (prot[2] == 'x') + event->mmap2.prot |= PROT_EXEC; + + if (prot[3] == 's') + event->mmap2.flags |= MAP_SHARED; + else + event->mmap2.flags |= MAP_PRIVATE; - if (execname == NULL) + if (prot[2] != 'x') { + if (!mmap_data || prot[0] != 'r') continue; - pbf += 3; - n = hex2u64(pbf, &event->mmap.pgoff); - - size = strlen(execname); - execname[size - 1] = '\0'; /* Remove \n */ - memcpy(event->mmap.filename, execname, size); - size = PERF_ALIGN(size, sizeof(u64)); - event->mmap.len -= event->mmap.start; - event->mmap.header.size = (sizeof(event->mmap) - - (sizeof(event->mmap.filename) - size)); - memset(event->mmap.filename + size, 0, machine->id_hdr_size); - event->mmap.header.size += machine->id_hdr_size; - event->mmap.pid = tgid; - event->mmap.tid = pid; - - if (process(tool, event, &synth_sample, machine) != 0) { - rc = -1; - break; - } + event->header.misc |= PERF_RECORD_MISC_MMAP_DATA; + } + + if (!strcmp(execname, "")) + strcpy(execname, anonstr); + + size = strlen(execname) + 1; + memcpy(event->mmap2.filename, execname, size); + size = PERF_ALIGN(size, sizeof(u64)); + event->mmap2.len -= event->mmap.start; + event->mmap2.header.size = (sizeof(event->mmap2) - + (sizeof(event->mmap2.filename) - size)); + memset(event->mmap2.filename + size, 0, machine->id_hdr_size); + event->mmap2.header.size += machine->id_hdr_size; + event->mmap2.pid = tgid; + event->mmap2.tid = pid; + + if (process(tool, event, &synth_sample, machine) != 0) { + rc = -1; + break; } } @@ -306,25 +323,80 @@ int perf_event__synthesize_modules(struct perf_tool *tool, static int __event__synthesize_thread(union perf_event *comm_event, union perf_event *mmap_event, + union perf_event *fork_event, pid_t pid, int full, perf_event__handler_t process, struct perf_tool *tool, - struct machine *machine) + struct machine *machine, bool mmap_data) { - pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full, + char filename[PATH_MAX]; + DIR *tasks; + struct dirent dirent, *next; + pid_t tgid; + + /* special case: only send one comm event using passed in pid */ + if (!full) { + tgid = perf_event__synthesize_comm(tool, comm_event, pid, + process, machine); + + if (tgid == -1) + return -1; + + return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, + process, machine, mmap_data); + } + + if (machine__is_default_guest(machine)) + return 0; + + snprintf(filename, sizeof(filename), "%s/proc/%d/task", + machine->root_dir, pid); + + tasks = opendir(filename); + if (tasks == NULL) { + pr_debug("couldn't open %s\n", filename); + return 0; + } + + while (!readdir_r(tasks, &dirent, &next) && next) { + char *end; + int rc = 0; + pid_t _pid; + + _pid = strtol(dirent.d_name, &end, 10); + if (*end) + continue; + + tgid = perf_event__synthesize_comm(tool, comm_event, _pid, + process, machine); + if (tgid == -1) + return -1; + + if (_pid == pid) { + /* process the parent's maps too */ + rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, + process, machine, mmap_data); + } else { + /* only fork the tid's map, to save time */ + rc = perf_event__synthesize_fork(tool, fork_event, _pid, tgid, process, machine); - if (tgid == -1) - return -1; - return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, - process, machine); + } + + if (rc) + return rc; + } + + closedir(tasks); + return 0; } int perf_event__synthesize_thread_map(struct perf_tool *tool, struct thread_map *threads, perf_event__handler_t process, - struct machine *machine) + struct machine *machine, + bool mmap_data) { - union perf_event *comm_event, *mmap_event; + union perf_event *comm_event, *mmap_event, *fork_event; int err = -1, thread, j; comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); @@ -335,11 +407,17 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool, if (mmap_event == NULL) goto out_free_comm; + fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); + if (fork_event == NULL) + goto out_free_mmap; + err = 0; for (thread = 0; thread < threads->nr; ++thread) { if (__event__synthesize_thread(comm_event, mmap_event, + fork_event, threads->map[thread], 0, - process, tool, machine)) { + process, tool, machine, + mmap_data)) { err = -1; break; } @@ -361,15 +439,18 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool, /* if not, generate events for it */ if (need_leader && - __event__synthesize_thread(comm_event, - mmap_event, - comm_event->comm.pid, 0, - process, tool, machine)) { + __event__synthesize_thread(comm_event, mmap_event, + fork_event, + comm_event->comm.pid, 0, + process, tool, machine, + mmap_data)) { err = -1; break; } } } + free(fork_event); +out_free_mmap: free(mmap_event); out_free_comm: free(comm_event); @@ -379,13 +460,17 @@ out: int perf_event__synthesize_threads(struct perf_tool *tool, perf_event__handler_t process, - struct machine *machine) + struct machine *machine, bool mmap_data) { DIR *proc; + char proc_path[PATH_MAX]; struct dirent dirent, *next; - union perf_event *comm_event, *mmap_event; + union perf_event *comm_event, *mmap_event, *fork_event; int err = -1; + if (machine__is_default_guest(machine)) + return 0; + comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); if (comm_event == NULL) goto out; @@ -394,27 +479,34 @@ int perf_event__synthesize_threads(struct perf_tool *tool, if (mmap_event == NULL) goto out_free_comm; - proc = opendir("/proc"); - if (proc == NULL) + fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); + if (fork_event == NULL) goto out_free_mmap; + snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir); + proc = opendir(proc_path); + + if (proc == NULL) + goto out_free_fork; + while (!readdir_r(proc, &dirent, &next) && next) { char *end; pid_t pid = strtol(dirent.d_name, &end, 10); if (*end) /* only interested in proper numerical dirents */ continue; - - if (__event__synthesize_thread(comm_event, mmap_event, pid, 1, - process, tool, machine) != 0) { - err = -1; - goto out_closedir; - } + /* + * We may race with exiting thread, so don't stop just because + * one thread couldn't be synthesized. + */ + __event__synthesize_thread(comm_event, mmap_event, fork_event, pid, + 1, process, tool, machine, mmap_data); } err = 0; -out_closedir: closedir(proc); +out_free_fork: + free(fork_event); out_free_mmap: free(mmap_event); out_free_comm: @@ -445,23 +537,32 @@ static int find_symbol_cb(void *arg, const char *name, char type, return 1; } +u64 kallsyms__get_function_start(const char *kallsyms_filename, + const char *symbol_name) +{ + struct process_symbol_args args = { .name = symbol_name, }; + + if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0) + return 0; + + return args.start; +} + int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, perf_event__handler_t process, - struct machine *machine, - const char *symbol_name) + struct machine *machine) { size_t size; - const char *filename, *mmap_name; - char path[PATH_MAX]; + const char *mmap_name; char name_buff[PATH_MAX]; struct map *map; + struct kmap *kmap; int err; /* * We should get this from /sys/kernel/sections/.text, but till that is * available use this, and after it is use this as a fallback for older * kernels. */ - struct process_symbol_args args = { .name = symbol_name, }; union perf_event *event = zalloc((sizeof(event->mmap) + machine->id_hdr_size)); if (event == NULL) { @@ -477,28 +578,19 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, * see kernel/perf_event.c __perf_event_mmap */ event->header.misc = PERF_RECORD_MISC_KERNEL; - filename = "/proc/kallsyms"; } else { event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; - if (machine__is_default_guest(machine)) - filename = (char *) symbol_conf.default_guest_kallsyms; - else { - sprintf(path, "%s/proc/kallsyms", machine->root_dir); - filename = path; - } } - if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) - return -ENOENT; - map = machine->vmlinux_maps[MAP__FUNCTION]; + kmap = map__kmap(map); size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), - "%s%s", mmap_name, symbol_name) + 1; + "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1; size = PERF_ALIGN(size, sizeof(u64)); event->mmap.header.type = PERF_RECORD_MMAP; event->mmap.header.size = (sizeof(event->mmap) - (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); - event->mmap.pgoff = args.start; + event->mmap.pgoff = kmap->ref_reloc_sym->addr; event->mmap.start = map->start; event->mmap.len = map->end - event->mmap.start; event->mmap.pid = machine->pid; @@ -516,183 +608,58 @@ size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp) int perf_event__process_comm(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_sample *sample __maybe_unused, + struct perf_sample *sample, struct machine *machine) { - struct thread *thread = machine__findnew_thread(machine, event->comm.tid); - - if (dump_trace) - perf_event__fprintf_comm(event, stdout); - - if (thread == NULL || thread__set_comm(thread, event->comm.comm)) { - dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); - return -1; - } - - return 0; + return machine__process_comm_event(machine, event, sample); } int perf_event__process_lost(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_sample *sample __maybe_unused, - struct machine *machine __maybe_unused) + struct perf_sample *sample, + struct machine *machine) { - dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", - event->lost.id, event->lost.lost); - return 0; + return machine__process_lost_event(machine, event, sample); } -static void perf_event__set_kernel_mmap_len(union perf_event *event, - struct map **maps) +size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) { - maps[MAP__FUNCTION]->start = event->mmap.start; - maps[MAP__FUNCTION]->end = event->mmap.start + event->mmap.len; - /* - * Be a bit paranoid here, some perf.data file came with - * a zero sized synthesized MMAP event for the kernel. - */ - if (maps[MAP__FUNCTION]->end == 0) - maps[MAP__FUNCTION]->end = ~0ULL; + return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n", + event->mmap.pid, event->mmap.tid, event->mmap.start, + event->mmap.len, event->mmap.pgoff, + (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x', + event->mmap.filename); } -static int perf_event__process_kernel_mmap(struct perf_tool *tool - __maybe_unused, - union perf_event *event, - struct machine *machine) +size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp) { - struct map *map; - char kmmap_prefix[PATH_MAX]; - enum dso_kernel_type kernel_type; - bool is_kernel_mmap; - - machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix)); - if (machine__is_host(machine)) - kernel_type = DSO_TYPE_KERNEL; - else - kernel_type = DSO_TYPE_GUEST_KERNEL; - - is_kernel_mmap = memcmp(event->mmap.filename, - kmmap_prefix, - strlen(kmmap_prefix) - 1) == 0; - if (event->mmap.filename[0] == '/' || - (!is_kernel_mmap && event->mmap.filename[0] == '[')) { - - char short_module_name[1024]; - char *name, *dot; - - if (event->mmap.filename[0] == '/') { - name = strrchr(event->mmap.filename, '/'); - if (name == NULL) - goto out_problem; - - ++name; /* skip / */ - dot = strrchr(name, '.'); - if (dot == NULL) - goto out_problem; - snprintf(short_module_name, sizeof(short_module_name), - "[%.*s]", (int)(dot - name), name); - strxfrchar(short_module_name, '-', '_'); - } else - strcpy(short_module_name, event->mmap.filename); - - map = machine__new_module(machine, event->mmap.start, - event->mmap.filename); - if (map == NULL) - goto out_problem; - - name = strdup(short_module_name); - if (name == NULL) - goto out_problem; - - map->dso->short_name = name; - map->dso->sname_alloc = 1; - map->end = map->start + event->mmap.len; - } else if (is_kernel_mmap) { - const char *symbol_name = (event->mmap.filename + - strlen(kmmap_prefix)); - /* - * Should be there already, from the build-id table in - * the header. - */ - struct dso *kernel = __dsos__findnew(&machine->kernel_dsos, - kmmap_prefix); - if (kernel == NULL) - goto out_problem; - - kernel->kernel = kernel_type; - if (__machine__create_kernel_maps(machine, kernel) < 0) - goto out_problem; - - perf_event__set_kernel_mmap_len(event, machine->vmlinux_maps); - - /* - * Avoid using a zero address (kptr_restrict) for the ref reloc - * symbol. Effectively having zero here means that at record - * time /proc/sys/kernel/kptr_restrict was non zero. - */ - if (event->mmap.pgoff != 0) { - maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, - symbol_name, - event->mmap.pgoff); - } - - if (machine__is_default_guest(machine)) { - /* - * preload dso of guest kernel and modules - */ - dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION], - NULL); - } - } - return 0; -out_problem: - return -1; + return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 + " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n", + event->mmap2.pid, event->mmap2.tid, event->mmap2.start, + event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj, + event->mmap2.min, event->mmap2.ino, + event->mmap2.ino_generation, + (event->mmap2.prot & PROT_READ) ? 'r' : '-', + (event->mmap2.prot & PROT_WRITE) ? 'w' : '-', + (event->mmap2.prot & PROT_EXEC) ? 'x' : '-', + (event->mmap2.flags & MAP_SHARED) ? 's' : 'p', + event->mmap2.filename); } -size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) +int perf_event__process_mmap(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) { - return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n", - event->mmap.pid, event->mmap.tid, event->mmap.start, - event->mmap.len, event->mmap.pgoff, event->mmap.filename); + return machine__process_mmap_event(machine, event, sample); } -int perf_event__process_mmap(struct perf_tool *tool, +int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_sample *sample __maybe_unused, + struct perf_sample *sample, struct machine *machine) { - struct thread *thread; - struct map *map; - u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - int ret = 0; - - if (dump_trace) - perf_event__fprintf_mmap(event, stdout); - - if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || - cpumode == PERF_RECORD_MISC_KERNEL) { - ret = perf_event__process_kernel_mmap(tool, event, machine); - if (ret < 0) - goto out_problem; - return 0; - } - - thread = machine__findnew_thread(machine, event->mmap.pid); - if (thread == NULL) - goto out_problem; - map = map__new(&machine->user_dsos, event->mmap.start, - event->mmap.len, event->mmap.pgoff, - event->mmap.pid, event->mmap.filename, - MAP__FUNCTION); - if (map == NULL) - goto out_problem; - - thread__insert_map(thread, map); - return 0; - -out_problem: - dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); - return 0; + return machine__process_mmap2_event(machine, event, sample); } size_t perf_event__fprintf_task(union perf_event *event, FILE *fp) @@ -702,29 +669,20 @@ size_t perf_event__fprintf_task(union perf_event *event, FILE *fp) event->fork.ppid, event->fork.ptid); } -int perf_event__process_task(struct perf_tool *tool __maybe_unused, +int perf_event__process_fork(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_sample *sample __maybe_unused, - struct machine *machine) + struct perf_sample *sample, + struct machine *machine) { - struct thread *thread = machine__findnew_thread(machine, event->fork.tid); - struct thread *parent = machine__findnew_thread(machine, event->fork.ptid); - - if (dump_trace) - perf_event__fprintf_task(event, stdout); - - if (event->header.type == PERF_RECORD_EXIT) { - machine__remove_thread(machine, thread); - return 0; - } - - if (thread == NULL || parent == NULL || - thread__fork(thread, parent) < 0) { - dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); - return -1; - } + return machine__process_fork_event(machine, event, sample); +} - return 0; +int perf_event__process_exit(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + return machine__process_exit_event(machine, event, sample); } size_t perf_event__fprintf(union perf_event *event, FILE *fp) @@ -743,6 +701,9 @@ size_t perf_event__fprintf(union perf_event *event, FILE *fp) case PERF_RECORD_MMAP: ret += perf_event__fprintf_mmap(event, fp); break; + case PERF_RECORD_MMAP2: + ret += perf_event__fprintf_mmap2(event, fp); + break; default: ret += fprintf(fp, "\n"); } @@ -750,40 +711,27 @@ size_t perf_event__fprintf(union perf_event *event, FILE *fp) return ret; } -int perf_event__process(struct perf_tool *tool, union perf_event *event, - struct perf_sample *sample, struct machine *machine) +int perf_event__process(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) { - switch (event->header.type) { - case PERF_RECORD_COMM: - perf_event__process_comm(tool, event, sample, machine); - break; - case PERF_RECORD_MMAP: - perf_event__process_mmap(tool, event, sample, machine); - break; - case PERF_RECORD_FORK: - case PERF_RECORD_EXIT: - perf_event__process_task(tool, event, sample, machine); - break; - case PERF_RECORD_LOST: - perf_event__process_lost(tool, event, sample, machine); - default: - break; - } - - return 0; + return machine__process_event(machine, event, sample); } -void thread__find_addr_map(struct thread *self, +void thread__find_addr_map(struct thread *thread, struct machine *machine, u8 cpumode, enum map_type type, u64 addr, struct addr_location *al) { - struct map_groups *mg = &self->mg; + struct map_groups *mg = thread->mg; + bool load_map = false; - al->thread = self; + al->machine = machine; + al->thread = thread; al->addr = addr; al->cpumode = cpumode; - al->filtered = false; + al->filtered = 0; if (machine == NULL) { al->map = NULL; @@ -793,30 +741,27 @@ void thread__find_addr_map(struct thread *self, if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { al->level = 'k'; mg = &machine->kmaps; + load_map = true; } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { al->level = '.'; } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { al->level = 'g'; mg = &machine->kmaps; + load_map = true; + } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) { + al->level = 'u'; } else { - /* - * 'u' means guest os user space. - * TODO: We don't support guest user space. Might support late. - */ - if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) - al->level = 'u'; - else - al->level = 'H'; + al->level = 'H'; al->map = NULL; if ((cpumode == PERF_RECORD_MISC_GUEST_USER || cpumode == PERF_RECORD_MISC_GUEST_KERNEL) && !perf_guest) - al->filtered = true; + al->filtered |= (1 << HIST_FILTER__GUEST); if ((cpumode == PERF_RECORD_MISC_USER || cpumode == PERF_RECORD_MISC_KERNEL) && !perf_host) - al->filtered = true; + al->filtered |= (1 << HIST_FILTER__HOST); return; } @@ -838,18 +783,25 @@ try_again: mg = &machine->kmaps; goto try_again; } - } else + } else { + /* + * Kernel maps might be changed when loading symbols so loading + * must be done prior to using kernel maps. + */ + if (load_map) + map__load(al->map, machine->symbol_filter); al->addr = al->map->map_ip(al->map, al->addr); + } } void thread__find_addr_location(struct thread *thread, struct machine *machine, u8 cpumode, enum map_type type, u64 addr, - struct addr_location *al, - symbol_filter_t filter) + struct addr_location *al) { thread__find_addr_map(thread, machine, cpumode, type, addr, al); if (al->map != NULL) - al->sym = map__find_symbol(al->map, al->addr, filter); + al->sym = map__find_symbol(al->map, al->addr, + machine->symbol_filter); else al->sym = NULL; } @@ -857,20 +809,16 @@ void thread__find_addr_location(struct thread *thread, struct machine *machine, int perf_event__preprocess_sample(const union perf_event *event, struct machine *machine, struct addr_location *al, - struct perf_sample *sample, - symbol_filter_t filter) + struct perf_sample *sample) { u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - struct thread *thread = machine__findnew_thread(machine, event->ip.pid); + struct thread *thread = machine__findnew_thread(machine, sample->pid, + sample->tid); if (thread == NULL) return -1; - if (symbol_conf.comm_list && - !strlist__has_entry(symbol_conf.comm_list, thread->comm)) - goto out_filtered; - - dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); + dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid); /* * Have we already created the kernel maps for this machine? * @@ -883,10 +831,14 @@ int perf_event__preprocess_sample(const union perf_event *event, machine__create_kernel_maps(machine); thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, - event->ip.ip, al); + sample->ip, al); dump_printf(" ...... dso: %s\n", al->map ? al->map->dso->long_name : al->level == 'H' ? "[hypervisor]" : "<not found>"); + + if (thread__is_filtered(thread)) + al->filtered |= (1 << HIST_FILTER__THREAD); + al->sym = NULL; al->cpu = sample->cpu; @@ -898,20 +850,19 @@ int perf_event__preprocess_sample(const union perf_event *event, dso->short_name) || (dso->short_name != dso->long_name && strlist__has_entry(symbol_conf.dso_list, - dso->long_name))))) - goto out_filtered; + dso->long_name))))) { + al->filtered |= (1 << HIST_FILTER__DSO); + } - al->sym = map__find_symbol(al->map, al->addr, filter); + al->sym = map__find_symbol(al->map, al->addr, + machine->symbol_filter); } if (symbol_conf.sym_list && (!al->sym || !strlist__has_entry(symbol_conf.sym_list, - al->sym->name))) - goto out_filtered; - - return 0; + al->sym->name))) { + al->filtered |= (1 << HIST_FILTER__SYMBOL); + } -out_filtered: - al->filtered = true; return 0; } diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 21b99e741a8..e5dd40addb3 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -6,23 +6,30 @@ #include "../perf.h" #include "map.h" +#include "build-id.h" +#include "perf_regs.h" -/* - * PERF_SAMPLE_IP | PERF_SAMPLE_TID | * - */ -struct ip_event { +struct mmap_event { struct perf_event_header header; - u64 ip; u32 pid, tid; - unsigned char __more_data[]; + u64 start; + u64 len; + u64 pgoff; + char filename[PATH_MAX]; }; -struct mmap_event { +struct mmap2_event { struct perf_event_header header; u32 pid, tid; u64 start; u64 len; u64 pgoff; + u32 maj; + u32 min; + u64 ino; + u64 ino_generation; + u32 prot; + u32 flags; char filename[PATH_MAX]; }; @@ -57,12 +64,22 @@ struct read_event { u64 id; }; +struct throttle_event { + struct perf_event_header header; + u64 time; + u64 id; + u64 stream_id; +}; #define PERF_SAMPLE_MASK \ (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \ PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \ PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \ - PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD) + PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD | \ + PERF_SAMPLE_IDENTIFIER) + +/* perf sample has 16 bits size limit */ +#define PERF_SAMPLE_MAX_SIZE (1 << 16) struct sample_event { struct perf_event_header header; @@ -70,7 +87,13 @@ struct sample_event { }; struct regs_dump { + u64 abi; + u64 mask; u64 *regs; + + /* Cached values/mask filled by first register access. */ + u64 cache_regs[PERF_REGS_MAX]; + u64 cache_mask; }; struct stack_dump { @@ -79,6 +102,47 @@ struct stack_dump { char *data; }; +struct sample_read_value { + u64 value; + u64 id; +}; + +struct sample_read { + u64 time_enabled; + u64 time_running; + union { + struct { + u64 nr; + struct sample_read_value *values; + } group; + struct sample_read_value one; + }; +}; + +struct ip_callchain { + u64 nr; + u64 ips[0]; +}; + +struct branch_flags { + u64 mispred:1; + u64 predicted:1; + u64 in_tx:1; + u64 abort:1; + u64 reserved:60; +}; + +struct branch_entry { + u64 from; + u64 to; + struct branch_flags flags; +}; + +struct branch_stack { + u64 nr; + struct branch_entry entries[0]; +}; + struct perf_sample { u64 ip; u32 pid, tid; @@ -87,16 +151,25 @@ struct perf_sample { u64 id; u64 stream_id; u64 period; + u64 weight; + u64 transaction; u32 cpu; u32 raw_size; + u64 data_src; void *raw_data; struct ip_callchain *callchain; struct branch_stack *branch_stack; struct regs_dump user_regs; struct stack_dump user_stack; + struct sample_read read; }; -#define BUILD_ID_SIZE 20 +#define PERF_MEM_DATA_SRC_NONE \ + (PERF_MEM_S(OP, NA) |\ + PERF_MEM_S(LVL, NA) |\ + PERF_MEM_S(SNOOP, NA) |\ + PERF_MEM_S(LOCK, NA) |\ + PERF_MEM_S(TLB, NA)) struct build_id_event { struct perf_event_header header; @@ -108,7 +181,7 @@ struct build_id_event { enum perf_user_event_type { /* above any possible kernel type */ PERF_RECORD_USER_TYPE_START = 64, PERF_RECORD_HEADER_ATTR = 64, - PERF_RECORD_HEADER_EVENT_TYPE = 65, + PERF_RECORD_HEADER_EVENT_TYPE = 65, /* depreceated */ PERF_RECORD_HEADER_TRACING_DATA = 66, PERF_RECORD_HEADER_BUILD_ID = 67, PERF_RECORD_FINISHED_ROUND = 68, @@ -140,12 +213,13 @@ struct tracing_data_event { union perf_event { struct perf_event_header header; - struct ip_event ip; struct mmap_event mmap; + struct mmap2_event mmap2; struct comm_event comm; struct fork_event fork; struct lost_event lost; struct read_event read; + struct throttle_event throttle; struct sample_event sample; struct attr_event attr; struct event_type_event event_type; @@ -166,14 +240,13 @@ typedef int (*perf_event__handler_t)(struct perf_tool *tool, int perf_event__synthesize_thread_map(struct perf_tool *tool, struct thread_map *threads, perf_event__handler_t process, - struct machine *machine); + struct machine *machine, bool mmap_data); int perf_event__synthesize_threads(struct perf_tool *tool, perf_event__handler_t process, - struct machine *machine); + struct machine *machine, bool mmap_data); int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, perf_event__handler_t process, - struct machine *machine, - const char *symbol_name); + struct machine *machine); int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process, @@ -191,7 +264,15 @@ int perf_event__process_mmap(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine); -int perf_event__process_task(struct perf_tool *tool, +int perf_event__process_mmap2(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine); +int perf_event__process_fork(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine); +int perf_event__process_exit(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine); @@ -201,21 +282,35 @@ int perf_event__process(struct perf_tool *tool, struct machine *machine); struct addr_location; -int perf_event__preprocess_sample(const union perf_event *self, + +int perf_event__preprocess_sample(const union perf_event *event, struct machine *machine, struct addr_location *al, - struct perf_sample *sample, - symbol_filter_t filter); + struct perf_sample *sample); const char *perf_event__name(unsigned int id); +size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, + u64 read_format); int perf_event__synthesize_sample(union perf_event *event, u64 type, + u64 read_format, const struct perf_sample *sample, bool swapped); +int perf_event__synthesize_mmap_events(struct perf_tool *tool, + union perf_event *event, + pid_t pid, pid_t tgid, + perf_event__handler_t process, + struct machine *machine, + bool mmap_data); + size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp); size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp); +size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp); size_t perf_event__fprintf_task(union perf_event *event, FILE *fp); size_t perf_event__fprintf(union perf_event *event, FILE *fp); +u64 kallsyms__get_function_start(const char *kallsyms_filename, + const char *symbol_name); + #endif /* __PERF_RECORD_H */ diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 186b8773039..59ef2802fcf 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -7,16 +7,18 @@ * Released under the GPL v2. (and only v2, not any later version) */ #include "util.h" -#include "debugfs.h" +#include <api/fs/debugfs.h> #include <poll.h> #include "cpumap.h" #include "thread_map.h" #include "target.h" #include "evlist.h" #include "evsel.h" +#include "debug.h" #include <unistd.h> #include "parse-events.h" +#include "parse-options.h" #include <sys/mman.h> @@ -38,40 +40,58 @@ void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, evlist->workload.pid = -1; } -struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, - struct thread_map *threads) +struct perf_evlist *perf_evlist__new(void) { struct perf_evlist *evlist = zalloc(sizeof(*evlist)); if (evlist != NULL) - perf_evlist__init(evlist, cpus, threads); + perf_evlist__init(evlist, NULL, NULL); return evlist; } -void perf_evlist__config_attrs(struct perf_evlist *evlist, - struct perf_record_opts *opts) +struct perf_evlist *perf_evlist__new_default(void) { - struct perf_evsel *evsel, *first; + struct perf_evlist *evlist = perf_evlist__new(); - if (evlist->cpus->map[0] < 0) - opts->no_inherit = true; + if (evlist && perf_evlist__add_default(evlist)) { + perf_evlist__delete(evlist); + evlist = NULL; + } - first = perf_evlist__first(evlist); + return evlist; +} - list_for_each_entry(evsel, &evlist->entries, node) { - perf_evsel__config(evsel, opts, first); +/** + * perf_evlist__set_id_pos - set the positions of event ids. + * @evlist: selected event list + * + * Events with compatible sample types all have the same id_pos + * and is_pos. For convenience, put a copy on evlist. + */ +void perf_evlist__set_id_pos(struct perf_evlist *evlist) +{ + struct perf_evsel *first = perf_evlist__first(evlist); - if (evlist->nr_entries > 1) - evsel->attr.sample_type |= PERF_SAMPLE_ID; - } + evlist->id_pos = first->id_pos; + evlist->is_pos = first->is_pos; +} + +static void perf_evlist__update_id_pos(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) + perf_evsel__calc_id_pos(evsel); + + perf_evlist__set_id_pos(evlist); } static void perf_evlist__purge(struct perf_evlist *evlist) { struct perf_evsel *pos, *n; - list_for_each_entry_safe(pos, n, &evlist->entries, node) { + evlist__for_each_safe(evlist, n, pos) { list_del_init(&pos->node); perf_evsel__delete(pos); } @@ -81,14 +101,18 @@ static void perf_evlist__purge(struct perf_evlist *evlist) void perf_evlist__exit(struct perf_evlist *evlist) { - free(evlist->mmap); - free(evlist->pollfd); - evlist->mmap = NULL; - evlist->pollfd = NULL; + zfree(&evlist->mmap); + zfree(&evlist->pollfd); } void perf_evlist__delete(struct perf_evlist *evlist) { + perf_evlist__munmap(evlist); + perf_evlist__close(evlist); + cpu_map__delete(evlist->cpus); + thread_map__delete(evlist->threads); + evlist->cpus = NULL; + evlist->threads = NULL; perf_evlist__purge(evlist); perf_evlist__exit(evlist); free(evlist); @@ -97,15 +121,22 @@ void perf_evlist__delete(struct perf_evlist *evlist) void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) { list_add_tail(&entry->node, &evlist->entries); - ++evlist->nr_entries; + entry->idx = evlist->nr_entries; + + if (!evlist->nr_entries++) + perf_evlist__set_id_pos(evlist); } void perf_evlist__splice_list_tail(struct perf_evlist *evlist, struct list_head *list, int nr_entries) { + bool set_id_pos = !evlist->nr_entries; + list_splice_tail(list, &evlist->entries); evlist->nr_entries += nr_entries; + if (set_id_pos) + perf_evlist__set_id_pos(evlist); } void __perf_evlist__set_leader(struct list_head *list) @@ -113,18 +144,21 @@ void __perf_evlist__set_leader(struct list_head *list) struct perf_evsel *evsel, *leader; leader = list_entry(list->next, struct perf_evsel, node); - leader->leader = NULL; + evsel = list_entry(list->prev, struct perf_evsel, node); + + leader->nr_members = evsel->idx - leader->idx + 1; - list_for_each_entry(evsel, list, node) { - if (evsel != leader) - evsel->leader = leader; + __evlist__for_each(list, evsel) { + evsel->leader = leader; } } void perf_evlist__set_leader(struct perf_evlist *evlist) { - if (evlist->nr_entries) + if (evlist->nr_entries) { + evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0; __perf_evlist__set_leader(&evlist->entries); + } } int perf_evlist__add_default(struct perf_evlist *evlist) @@ -137,7 +171,7 @@ int perf_evlist__add_default(struct perf_evlist *evlist) event_attr_init(&attr); - evsel = perf_evsel__new(&attr, 0); + evsel = perf_evsel__new(&attr); if (evsel == NULL) goto error; @@ -162,7 +196,7 @@ static int perf_evlist__add_attrs(struct perf_evlist *evlist, size_t i; for (i = 0; i < nr_attrs; i++) { - evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i); + evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i); if (evsel == NULL) goto out_delete_partial_list; list_add_tail(&evsel->node, &head); @@ -173,7 +207,7 @@ static int perf_evlist__add_attrs(struct perf_evlist *evlist, return 0; out_delete_partial_list: - list_for_each_entry_safe(evsel, n, &head, node) + __evlist__for_each_safe(&head, n, evsel) perf_evsel__delete(evsel); return -1; } @@ -194,7 +228,7 @@ perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) { struct perf_evsel *evsel; - list_for_each_entry(evsel, &evlist->entries, node) { + evlist__for_each(evlist, evsel) { if (evsel->attr.type == PERF_TYPE_TRACEPOINT && (int)evsel->attr.config == id) return evsel; @@ -203,16 +237,30 @@ perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) return NULL; } +struct perf_evsel * +perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, + const char *name) +{ + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) { + if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) && + (strcmp(evsel->name, name) == 0)) + return evsel; + } + + return NULL; +} + int perf_evlist__add_newtp(struct perf_evlist *evlist, const char *sys, const char *name, void *handler) { - struct perf_evsel *evsel; + struct perf_evsel *evsel = perf_evsel__newtp(sys, name); - evsel = perf_evsel__newtp(sys, name, evlist->nr_entries); if (evsel == NULL) return -1; - evsel->handler.func = handler; + evsel->handler = handler; perf_evlist__add(evlist, evsel); return 0; } @@ -221,10 +269,14 @@ void perf_evlist__disable(struct perf_evlist *evlist) { int cpu, thread; struct perf_evsel *pos; - - for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { - list_for_each_entry(pos, &evlist->entries, node) { - for (thread = 0; thread < evlist->threads->nr; thread++) + int nr_cpus = cpu_map__nr(evlist->cpus); + int nr_threads = thread_map__nr(evlist->threads); + + for (cpu = 0; cpu < nr_cpus; cpu++) { + evlist__for_each(evlist, pos) { + if (!perf_evsel__is_group_leader(pos) || !pos->fd) + continue; + for (thread = 0; thread < nr_threads; thread++) ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE, 0); } @@ -235,19 +287,63 @@ void perf_evlist__enable(struct perf_evlist *evlist) { int cpu, thread; struct perf_evsel *pos; - - for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) { - list_for_each_entry(pos, &evlist->entries, node) { - for (thread = 0; thread < evlist->threads->nr; thread++) + int nr_cpus = cpu_map__nr(evlist->cpus); + int nr_threads = thread_map__nr(evlist->threads); + + for (cpu = 0; cpu < nr_cpus; cpu++) { + evlist__for_each(evlist, pos) { + if (!perf_evsel__is_group_leader(pos) || !pos->fd) + continue; + for (thread = 0; thread < nr_threads; thread++) ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); } } } +int perf_evlist__disable_event(struct perf_evlist *evlist, + struct perf_evsel *evsel) +{ + int cpu, thread, err; + + if (!evsel->fd) + return 0; + + for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { + for (thread = 0; thread < evlist->threads->nr; thread++) { + err = ioctl(FD(evsel, cpu, thread), + PERF_EVENT_IOC_DISABLE, 0); + if (err) + return err; + } + } + return 0; +} + +int perf_evlist__enable_event(struct perf_evlist *evlist, + struct perf_evsel *evsel) +{ + int cpu, thread, err; + + if (!evsel->fd) + return -EINVAL; + + for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { + for (thread = 0; thread < evlist->threads->nr; thread++) { + err = ioctl(FD(evsel, cpu, thread), + PERF_EVENT_IOC_ENABLE, 0); + if (err) + return err; + } + } + return 0; +} + static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) { - int nfds = cpu_map__nr(evlist->cpus) * evlist->threads->nr * evlist->nr_entries; + int nr_cpus = cpu_map__nr(evlist->cpus); + int nr_threads = thread_map__nr(evlist->threads); + int nfds = nr_cpus * nr_threads * evlist->nr_entries; evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); return evlist->pollfd != NULL ? 0 : -ENOMEM; } @@ -286,6 +382,24 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist, { u64 read_data[4] = { 0, }; int id_idx = 1; /* The first entry is the counter value */ + u64 id; + int ret; + + ret = ioctl(fd, PERF_EVENT_IOC_ID, &id); + if (!ret) + goto add; + + if (errno != ENOTTY) + return -1; + + /* Legacy way to get event id.. All hail to old kernels! */ + + /* + * This way does not work with group format read, so bail + * out in that case. + */ + if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP) + return -1; if (!(evsel->attr.read_format & PERF_FORMAT_ID) || read(fd, &read_data, sizeof(read_data)) == -1) @@ -296,26 +410,39 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist, if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) ++id_idx; - perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]); + id = read_data[id_idx]; + + add: + perf_evlist__id_add(evlist, evsel, cpu, thread, id); return 0; } -struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) +struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id) { struct hlist_head *head; - struct hlist_node *pos; struct perf_sample_id *sid; int hash; - if (evlist->nr_entries == 1) - return perf_evlist__first(evlist); - hash = hash_64(id, PERF_EVLIST__HLIST_BITS); head = &evlist->heads[hash]; - hlist_for_each_entry(sid, pos, head, node) + hlist_for_each_entry(sid, head, node) if (sid->id == id) - return sid->evsel; + return sid; + + return NULL; +} + +struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) +{ + struct perf_sample_id *sid; + + if (evlist->nr_entries == 1) + return perf_evlist__first(evlist); + + sid = perf_evlist__id2sid(evlist, id); + if (sid) + return sid->evsel; if (!perf_evlist__sample_id_all(evlist)) return perf_evlist__first(evlist); @@ -323,10 +450,62 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) return NULL; } +static int perf_evlist__event2id(struct perf_evlist *evlist, + union perf_event *event, u64 *id) +{ + const u64 *array = event->sample.array; + ssize_t n; + + n = (event->header.size - sizeof(event->header)) >> 3; + + if (event->header.type == PERF_RECORD_SAMPLE) { + if (evlist->id_pos >= n) + return -1; + *id = array[evlist->id_pos]; + } else { + if (evlist->is_pos > n) + return -1; + n -= evlist->is_pos; + *id = array[n]; + } + return 0; +} + +static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, + union perf_event *event) +{ + struct perf_evsel *first = perf_evlist__first(evlist); + struct hlist_head *head; + struct perf_sample_id *sid; + int hash; + u64 id; + + if (evlist->nr_entries == 1) + return first; + + if (!first->attr.sample_id_all && + event->header.type != PERF_RECORD_SAMPLE) + return first; + + if (perf_evlist__event2id(evlist, event, &id)) + return NULL; + + /* Synthesized events have an id of zero */ + if (!id) + return first; + + hash = hash_64(id, PERF_EVLIST__HLIST_BITS); + head = &evlist->heads[hash]; + + hlist_for_each_entry(sid, head, node) { + if (sid->id == id) + return sid->evsel; + } + return NULL; +} + union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) { - /* XXX Move this to perf.c, making it generally available */ - unsigned int page_size = sysconf(_SC_PAGE_SIZE); struct perf_mmap *md = &evlist->mmap[idx]; unsigned int head = perf_mmap__read_head(md); unsigned int old = md->prev; @@ -366,7 +545,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) if ((old & md->mask) + size != ((old + size) & md->mask)) { unsigned int offset = old; unsigned int len = min(sizeof(*event), size), cpy; - void *dst = &evlist->event_copy; + void *dst = md->event_copy; do { cpy = min(md->mask + 1 - (offset & md->mask), len); @@ -376,7 +555,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) len -= cpy; } while (len); - event = &evlist->event_copy; + event = (union perf_event *) md->event_copy; } old += size; @@ -384,32 +563,45 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) md->prev = old; - if (!evlist->overwrite) + return event; +} + +void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) +{ + if (!evlist->overwrite) { + struct perf_mmap *md = &evlist->mmap[idx]; + unsigned int old = md->prev; + perf_mmap__write_tail(md, old); + } +} - return event; +static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx) +{ + if (evlist->mmap[idx].base != NULL) { + munmap(evlist->mmap[idx].base, evlist->mmap_len); + evlist->mmap[idx].base = NULL; + } } void perf_evlist__munmap(struct perf_evlist *evlist) { int i; - for (i = 0; i < evlist->nr_mmaps; i++) { - if (evlist->mmap[i].base != NULL) { - munmap(evlist->mmap[i].base, evlist->mmap_len); - evlist->mmap[i].base = NULL; - } - } + if (evlist->mmap == NULL) + return; + + for (i = 0; i < evlist->nr_mmaps; i++) + __perf_evlist__munmap(evlist, i); - free(evlist->mmap); - evlist->mmap = NULL; + zfree(&evlist->mmap); } static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) { evlist->nr_mmaps = cpu_map__nr(evlist->cpus); - if (cpu_map__all(evlist->cpus)) - evlist->nr_mmaps = evlist->threads->nr; + if (cpu_map__empty(evlist->cpus)) + evlist->nr_mmaps = thread_map__nr(evlist->threads); evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); return evlist->mmap != NULL ? 0 : -ENOMEM; } @@ -422,6 +614,8 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, MAP_SHARED, fd, 0); if (evlist->mmap[idx].base == MAP_FAILED) { + pr_debug2("failed to mmap perf event ring buffer, error %d\n", + errno); evlist->mmap[idx].base = NULL; return -1; } @@ -430,118 +624,177 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, return 0; } -static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask) +static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, + int prot, int mask, int cpu, int thread, + int *output) { struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) { + int fd = FD(evsel, cpu, thread); + + if (*output == -1) { + *output = fd; + if (__perf_evlist__mmap(evlist, idx, prot, mask, + *output) < 0) + return -1; + } else { + if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0) + return -1; + } + + if ((evsel->attr.read_format & PERF_FORMAT_ID) && + perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) + return -1; + } + + return 0; +} + +static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, + int mask) +{ int cpu, thread; + int nr_cpus = cpu_map__nr(evlist->cpus); + int nr_threads = thread_map__nr(evlist->threads); - for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { + pr_debug2("perf event ring buffer mmapped per cpu\n"); + for (cpu = 0; cpu < nr_cpus; cpu++) { int output = -1; - for (thread = 0; thread < evlist->threads->nr; thread++) { - list_for_each_entry(evsel, &evlist->entries, node) { - int fd = FD(evsel, cpu, thread); - - if (output == -1) { - output = fd; - if (__perf_evlist__mmap(evlist, cpu, - prot, mask, output) < 0) - goto out_unmap; - } else { - if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) - goto out_unmap; - } - - if ((evsel->attr.read_format & PERF_FORMAT_ID) && - perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) - goto out_unmap; - } + for (thread = 0; thread < nr_threads; thread++) { + if (perf_evlist__mmap_per_evsel(evlist, cpu, prot, mask, + cpu, thread, &output)) + goto out_unmap; } } return 0; out_unmap: - for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { - if (evlist->mmap[cpu].base != NULL) { - munmap(evlist->mmap[cpu].base, evlist->mmap_len); - evlist->mmap[cpu].base = NULL; - } - } + for (cpu = 0; cpu < nr_cpus; cpu++) + __perf_evlist__munmap(evlist, cpu); return -1; } -static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask) +static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, + int mask) { - struct perf_evsel *evsel; int thread; + int nr_threads = thread_map__nr(evlist->threads); - for (thread = 0; thread < evlist->threads->nr; thread++) { + pr_debug2("perf event ring buffer mmapped per thread\n"); + for (thread = 0; thread < nr_threads; thread++) { int output = -1; - list_for_each_entry(evsel, &evlist->entries, node) { - int fd = FD(evsel, 0, thread); - - if (output == -1) { - output = fd; - if (__perf_evlist__mmap(evlist, thread, - prot, mask, output) < 0) - goto out_unmap; - } else { - if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) - goto out_unmap; - } - - if ((evsel->attr.read_format & PERF_FORMAT_ID) && - perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0) - goto out_unmap; - } + if (perf_evlist__mmap_per_evsel(evlist, thread, prot, mask, 0, + thread, &output)) + goto out_unmap; } return 0; out_unmap: - for (thread = 0; thread < evlist->threads->nr; thread++) { - if (evlist->mmap[thread].base != NULL) { - munmap(evlist->mmap[thread].base, evlist->mmap_len); - evlist->mmap[thread].base = NULL; - } - } + for (thread = 0; thread < nr_threads; thread++) + __perf_evlist__munmap(evlist, thread); return -1; } -/** perf_evlist__mmap - Create per cpu maps to receive events - * - * @evlist - list of events - * @pages - map length in pages - * @overwrite - overwrite older events? - * - * If overwrite is false the user needs to signal event consuption using: - * - * struct perf_mmap *m = &evlist->mmap[cpu]; - * unsigned int head = perf_mmap__read_head(m); +static size_t perf_evlist__mmap_size(unsigned long pages) +{ + /* 512 kiB: default amount of unprivileged mlocked memory */ + if (pages == UINT_MAX) + pages = (512 * 1024) / page_size; + else if (!is_power_of_2(pages)) + return 0; + + return (pages + 1) * page_size; +} + +static long parse_pages_arg(const char *str, unsigned long min, + unsigned long max) +{ + unsigned long pages, val; + static struct parse_tag tags[] = { + { .tag = 'B', .mult = 1 }, + { .tag = 'K', .mult = 1 << 10 }, + { .tag = 'M', .mult = 1 << 20 }, + { .tag = 'G', .mult = 1 << 30 }, + { .tag = 0 }, + }; + + if (str == NULL) + return -EINVAL; + + val = parse_tag_value(str, tags); + if (val != (unsigned long) -1) { + /* we got file size value */ + pages = PERF_ALIGN(val, page_size) / page_size; + } else { + /* we got pages count value */ + char *eptr; + pages = strtoul(str, &eptr, 10); + if (*eptr != '\0') + return -EINVAL; + } + + if (pages == 0 && min == 0) { + /* leave number of pages at 0 */ + } else if (!is_power_of_2(pages)) { + /* round pages up to next power of 2 */ + pages = next_pow2_l(pages); + if (!pages) + return -EINVAL; + pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n", + pages * page_size, pages); + } + + if (pages > max) + return -EINVAL; + + return pages; +} + +int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, + int unset __maybe_unused) +{ + unsigned int *mmap_pages = opt->value; + unsigned long max = UINT_MAX; + long pages; + + if (max > SIZE_MAX / page_size) + max = SIZE_MAX / page_size; + + pages = parse_pages_arg(str, 1, max); + if (pages < 0) { + pr_err("Invalid argument for --mmap_pages/-m\n"); + return -1; + } + + *mmap_pages = pages; + return 0; +} + +/** + * perf_evlist__mmap - Create mmaps to receive events. + * @evlist: list of events + * @pages: map length in pages + * @overwrite: overwrite older events? * - * perf_mmap__write_tail(m, head) + * If @overwrite is %false the user needs to signal event consumption using + * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this + * automatically. * - * Using perf_evlist__read_on_cpu does this automatically. + * Return: %0 on success, negative error code otherwise. */ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, bool overwrite) { - unsigned int page_size = sysconf(_SC_PAGE_SIZE); struct perf_evsel *evsel; const struct cpu_map *cpus = evlist->cpus; const struct thread_map *threads = evlist->threads; int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask; - /* 512 kiB: default amount of unprivileged mlocked memory */ - if (pages == UINT_MAX) - pages = (512 * 1024) / page_size; - else if (!is_power_of_2(pages)) - return -EINVAL; - - mask = pages * page_size - 1; - if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) return -ENOMEM; @@ -549,23 +802,24 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, return -ENOMEM; evlist->overwrite = overwrite; - evlist->mmap_len = (pages + 1) * page_size; + evlist->mmap_len = perf_evlist__mmap_size(pages); + pr_debug("mmap size %zuB\n", evlist->mmap_len); + mask = evlist->mmap_len - page_size - 1; - list_for_each_entry(evsel, &evlist->entries, node) { + evlist__for_each(evlist, evsel) { if ((evsel->attr.read_format & PERF_FORMAT_ID) && evsel->sample_id == NULL && perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) return -ENOMEM; } - if (cpu_map__all(cpus)) + if (cpu_map__empty(cpus)) return perf_evlist__mmap_per_thread(evlist, prot, mask); return perf_evlist__mmap_per_cpu(evlist, prot, mask); } -int perf_evlist__create_maps(struct perf_evlist *evlist, - struct perf_target *target) +int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) { evlist->threads = thread_map__new_str(target->pid, target->tid, target->uid); @@ -573,9 +827,7 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, if (evlist->threads == NULL) return -1; - if (perf_target__has_task(target)) - evlist->cpus = cpu_map__dummy_new(); - else if (!perf_target__has_cpu(target) && !target->uses_mmap) + if (target__uses_dummy_map(target)) evlist->cpus = cpu_map__dummy_new(); else evlist->cpus = cpu_map__new(target->cpu_list); @@ -590,22 +842,14 @@ out_delete_threads: return -1; } -void perf_evlist__delete_maps(struct perf_evlist *evlist) -{ - cpu_map__delete(evlist->cpus); - thread_map__delete(evlist->threads); - evlist->cpus = NULL; - evlist->threads = NULL; -} - int perf_evlist__apply_filters(struct perf_evlist *evlist) { struct perf_evsel *evsel; int err = 0; const int ncpus = cpu_map__nr(evlist->cpus), - nthreads = evlist->threads->nr; + nthreads = thread_map__nr(evlist->threads); - list_for_each_entry(evsel, &evlist->entries, node) { + evlist__for_each(evlist, evsel) { if (evsel->filter == NULL) continue; @@ -622,9 +866,9 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) struct perf_evsel *evsel; int err = 0; const int ncpus = cpu_map__nr(evlist->cpus), - nthreads = evlist->threads->nr; + nthreads = thread_map__nr(evlist->threads); - list_for_each_entry(evsel, &evlist->entries, node) { + evlist__for_each(evlist, evsel) { err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter); if (err) break; @@ -635,20 +879,66 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) { + struct perf_evsel *pos; + + if (evlist->nr_entries == 1) + return true; + + if (evlist->id_pos < 0 || evlist->is_pos < 0) + return false; + + evlist__for_each(evlist, pos) { + if (pos->id_pos != evlist->id_pos || + pos->is_pos != evlist->is_pos) + return false; + } + + return true; +} + +u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + + if (evlist->combined_sample_type) + return evlist->combined_sample_type; + + evlist__for_each(evlist, evsel) + evlist->combined_sample_type |= evsel->attr.sample_type; + + return evlist->combined_sample_type; +} + +u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist) +{ + evlist->combined_sample_type = 0; + return __perf_evlist__combined_sample_type(evlist); +} + +bool perf_evlist__valid_read_format(struct perf_evlist *evlist) +{ struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; + u64 read_format = first->attr.read_format; + u64 sample_type = first->attr.sample_type; - list_for_each_entry_continue(pos, &evlist->entries, node) { - if (first->attr.sample_type != pos->attr.sample_type) + evlist__for_each(evlist, pos) { + if (read_format != pos->attr.read_format) return false; } + /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */ + if ((sample_type & PERF_SAMPLE_READ) && + !(read_format & PERF_FORMAT_ID)) { + return false; + } + return true; } -u64 perf_evlist__sample_type(struct perf_evlist *evlist) +u64 perf_evlist__read_format(struct perf_evlist *evlist) { struct perf_evsel *first = perf_evlist__first(evlist); - return first->attr.sample_type; + return first->attr.read_format; } u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) @@ -677,6 +967,9 @@ u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) if (sample_type & PERF_SAMPLE_CPU) size += sizeof(data->cpu) * 2; + + if (sample_type & PERF_SAMPLE_IDENTIFIER) + size += sizeof(data->id); out: return size; } @@ -685,7 +978,7 @@ bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist) { struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; - list_for_each_entry_continue(pos, &evlist->entries, node) { + evlist__for_each_continue(evlist, pos) { if (first->attr.sample_id_all != pos->attr.sample_id_all) return false; } @@ -705,12 +998,27 @@ void perf_evlist__set_selected(struct perf_evlist *evlist, evlist->selected = evsel; } +void perf_evlist__close(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + int ncpus = cpu_map__nr(evlist->cpus); + int nthreads = thread_map__nr(evlist->threads); + int n; + + evlist__for_each_reverse(evlist, evsel) { + n = evsel->cpus ? evsel->cpus->nr : ncpus; + perf_evsel__close(evsel, n, nthreads); + } +} + int perf_evlist__open(struct perf_evlist *evlist) { struct perf_evsel *evsel; - int err, ncpus, nthreads; + int err; - list_for_each_entry(evsel, &evlist->entries, node) { + perf_evlist__update_id_pos(evlist); + + evlist__for_each(evlist, evsel) { err = perf_evsel__open(evsel, evlist->cpus, evlist->threads); if (err < 0) goto out_err; @@ -718,19 +1026,14 @@ int perf_evlist__open(struct perf_evlist *evlist) return 0; out_err: - ncpus = evlist->cpus ? evlist->cpus->nr : 1; - nthreads = evlist->threads ? evlist->threads->nr : 1; - - list_for_each_entry_reverse(evsel, &evlist->entries, node) - perf_evsel__close(evsel, ncpus, nthreads); - + perf_evlist__close(evlist); errno = -err; return err; } -int perf_evlist__prepare_workload(struct perf_evlist *evlist, - struct perf_record_opts *opts, - const char *argv[]) +int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target, + const char *argv[], bool pipe_output, + void (*exec_error)(int signo, siginfo_t *info, void *ucontext)) { int child_ready_pipe[2], go_pipe[2]; char bf; @@ -752,21 +1055,16 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, } if (!evlist->workload.pid) { - if (opts->pipe_output) + if (pipe_output) dup2(2, 1); + signal(SIGTERM, SIG_DFL); + close(child_ready_pipe[0]); close(go_pipe[1]); fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); /* - * Do a dummy execvp to get the PLT entry resolved, - * so we avoid the resolver overhead on the real - * execvp call. - */ - execvp("", (char **)argv); - - /* * Tell the parent we're ready to go */ close(child_ready_pipe[1]); @@ -779,12 +1077,26 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, execvp(argv[0], (char **)argv); - perror(argv[0]); - kill(getppid(), SIGUSR1); + if (exec_error) { + union sigval val; + + val.sival_int = errno; + if (sigqueue(getppid(), SIGUSR1, val)) + perror(argv[0]); + } else + perror(argv[0]); exit(-1); } - if (perf_target__none(&opts->target)) + if (exec_error) { + struct sigaction act = { + .sa_flags = SA_SIGINFO, + .sa_sigaction = exec_error, + }; + sigaction(SIGUSR1, &act, NULL); + } + + if (target__none(target)) evlist->threads->map[0] = evlist->workload.pid; close(child_ready_pipe[1]); @@ -797,6 +1109,7 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, goto out_close_pipes; } + fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC); evlist->workload.cork_fd = go_pipe[1]; close(child_ready_pipe[0]); return 0; @@ -813,10 +1126,17 @@ out_close_ready_pipe: int perf_evlist__start_workload(struct perf_evlist *evlist) { if (evlist->workload.cork_fd > 0) { + char bf = 0; + int ret; /* * Remove the cork, let it rip! */ - return close(evlist->workload.cork_fd); + ret = write(evlist->workload.cork_fd, &bf, 1); + if (ret < 0) + perror("enable to write to pipe"); + + close(evlist->workload.cork_fd); + return ret; } return 0; @@ -825,7 +1145,10 @@ int perf_evlist__start_workload(struct perf_evlist *evlist) int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, struct perf_sample *sample) { - struct perf_evsel *evsel = perf_evlist__first(evlist); + struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event); + + if (!evsel) + return -EFAULT; return perf_evsel__parse_sample(evsel, event, sample); } @@ -834,10 +1157,89 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) struct perf_evsel *evsel; size_t printed = 0; - list_for_each_entry(evsel, &evlist->entries, node) { + evlist__for_each(evlist, evsel) { printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "", perf_evsel__name(evsel)); } - return printed + fprintf(fp, "\n");; + return printed + fprintf(fp, "\n"); +} + +int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused, + int err, char *buf, size_t size) +{ + char sbuf[128]; + + switch (err) { + case ENOENT: + scnprintf(buf, size, "%s", + "Error:\tUnable to find debugfs\n" + "Hint:\tWas your kernel was compiled with debugfs support?\n" + "Hint:\tIs the debugfs filesystem mounted?\n" + "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'"); + break; + case EACCES: + scnprintf(buf, size, + "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n" + "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n", + debugfs_mountpoint, debugfs_mountpoint); + break; + default: + scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf))); + break; + } + + return 0; +} + +int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused, + int err, char *buf, size_t size) +{ + int printed, value; + char sbuf[128], *emsg = strerror_r(err, sbuf, sizeof(sbuf)); + + switch (err) { + case EACCES: + case EPERM: + printed = scnprintf(buf, size, + "Error:\t%s.\n" + "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg); + + value = perf_event_paranoid(); + + printed += scnprintf(buf + printed, size - printed, "\nHint:\t"); + + if (value >= 2) { + printed += scnprintf(buf + printed, size - printed, + "For your workloads it needs to be <= 1\nHint:\t"); + } + printed += scnprintf(buf + printed, size - printed, + "For system wide tracing it needs to be set to -1"); + + printed += scnprintf(buf + printed, size - printed, + ".\nHint:\tThe current value is %d.", value); + break; + default: + scnprintf(buf, size, "%s", emsg); + break; + } + + return 0; +} + +void perf_evlist__to_front(struct perf_evlist *evlist, + struct perf_evsel *move_evsel) +{ + struct perf_evsel *evsel, *n; + LIST_HEAD(move); + + if (move_evsel == perf_evlist__first(evlist)) + return; + + evlist__for_each_safe(evlist, n, evsel) { + if (evsel->leader == move_evsel->leader) + list_move_tail(&evsel->node, &move); + } + + list_splice(&move, &evlist->entries); } diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 56003f779e6..f5173cd6369 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -12,24 +12,34 @@ struct pollfd; struct thread_map; struct cpu_map; -struct perf_record_opts; +struct record_opts; #define PERF_EVLIST__HLIST_BITS 8 #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS) +struct perf_mmap { + void *base; + int mask; + unsigned int prev; + char event_copy[PERF_SAMPLE_MAX_SIZE]; +}; + struct perf_evlist { struct list_head entries; struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; int nr_entries; + int nr_groups; int nr_fds; int nr_mmaps; - int mmap_len; + size_t mmap_len; + int id_pos; + int is_pos; + u64 combined_sample_type; struct { int cork_fd; pid_t pid; } workload; bool overwrite; - union perf_event event_copy; struct perf_mmap *mmap; struct pollfd *pollfd; struct thread_map *threads; @@ -42,8 +52,8 @@ struct perf_evsel_str_handler { void *handler; }; -struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, - struct thread_map *threads); +struct perf_evlist *perf_evlist__new(void); +struct perf_evlist *perf_evlist__new_default(void); void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, struct thread_map *threads); void perf_evlist__exit(struct perf_evlist *evlist); @@ -65,6 +75,10 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter); struct perf_evsel * perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id); +struct perf_evsel * +perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, + const char *name); + void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, int cpu, int thread, u64 id); @@ -72,18 +86,31 @@ void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd); struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); -union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); +struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id); + +union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx); + +void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx); int perf_evlist__open(struct perf_evlist *evlist); +void perf_evlist__close(struct perf_evlist *evlist); -void perf_evlist__config_attrs(struct perf_evlist *evlist, - struct perf_record_opts *opts); +void perf_evlist__set_id_pos(struct perf_evlist *evlist); +bool perf_can_sample_identifier(void); +void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts); +int record_opts__config(struct record_opts *opts); int perf_evlist__prepare_workload(struct perf_evlist *evlist, - struct perf_record_opts *opts, - const char *argv[]); + struct target *target, + const char *argv[], bool pipe_output, + void (*exec_error)(int signo, siginfo_t *info, + void *ucontext)); int perf_evlist__start_workload(struct perf_evlist *evlist); +int perf_evlist__parse_mmap_pages(const struct option *opt, + const char *str, + int unset); + int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, bool overwrite); void perf_evlist__munmap(struct perf_evlist *evlist); @@ -91,6 +118,11 @@ void perf_evlist__munmap(struct perf_evlist *evlist); void perf_evlist__disable(struct perf_evlist *evlist); void perf_evlist__enable(struct perf_evlist *evlist); +int perf_evlist__disable_event(struct perf_evlist *evlist, + struct perf_evsel *evsel); +int perf_evlist__enable_event(struct perf_evlist *evlist, + struct perf_evsel *evsel); + void perf_evlist__set_selected(struct perf_evlist *evlist, struct perf_evsel *evsel); @@ -102,15 +134,15 @@ static inline void perf_evlist__set_maps(struct perf_evlist *evlist, evlist->threads = threads; } -int perf_evlist__create_maps(struct perf_evlist *evlist, - struct perf_target *target); -void perf_evlist__delete_maps(struct perf_evlist *evlist); +int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target); int perf_evlist__apply_filters(struct perf_evlist *evlist); void __perf_evlist__set_leader(struct list_head *list); void perf_evlist__set_leader(struct perf_evlist *evlist); -u64 perf_evlist__sample_type(struct perf_evlist *evlist); +u64 perf_evlist__read_format(struct perf_evlist *evlist); +u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist); +u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist); bool perf_evlist__sample_id_all(struct perf_evlist *evlist); u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist); @@ -119,6 +151,7 @@ int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *even bool perf_evlist__valid_sample_type(struct perf_evlist *evlist); bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist); +bool perf_evlist__valid_read_format(struct perf_evlist *evlist); void perf_evlist__splice_list_tail(struct perf_evlist *evlist, struct list_head *list, @@ -135,4 +168,98 @@ static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist) } size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp); + +int perf_evlist__strerror_tp(struct perf_evlist *evlist, int err, char *buf, size_t size); +int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size); + +static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm) +{ + struct perf_event_mmap_page *pc = mm->base; + int head = ACCESS_ONCE(pc->data_head); + rmb(); + return head; +} + +static inline void perf_mmap__write_tail(struct perf_mmap *md, + unsigned long tail) +{ + struct perf_event_mmap_page *pc = md->base; + + /* + * ensure all reads are done before we write the tail out. + */ + mb(); + pc->data_tail = tail; +} + +bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str); +void perf_evlist__to_front(struct perf_evlist *evlist, + struct perf_evsel *move_evsel); + +/** + * __evlist__for_each - iterate thru all the evsels + * @list: list_head instance to iterate + * @evsel: struct evsel iterator + */ +#define __evlist__for_each(list, evsel) \ + list_for_each_entry(evsel, list, node) + +/** + * evlist__for_each - iterate thru all the evsels + * @evlist: evlist instance to iterate + * @evsel: struct evsel iterator + */ +#define evlist__for_each(evlist, evsel) \ + __evlist__for_each(&(evlist)->entries, evsel) + +/** + * __evlist__for_each_continue - continue iteration thru all the evsels + * @list: list_head instance to iterate + * @evsel: struct evsel iterator + */ +#define __evlist__for_each_continue(list, evsel) \ + list_for_each_entry_continue(evsel, list, node) + +/** + * evlist__for_each_continue - continue iteration thru all the evsels + * @evlist: evlist instance to iterate + * @evsel: struct evsel iterator + */ +#define evlist__for_each_continue(evlist, evsel) \ + __evlist__for_each_continue(&(evlist)->entries, evsel) + +/** + * __evlist__for_each_reverse - iterate thru all the evsels in reverse order + * @list: list_head instance to iterate + * @evsel: struct evsel iterator + */ +#define __evlist__for_each_reverse(list, evsel) \ + list_for_each_entry_reverse(evsel, list, node) + +/** + * evlist__for_each_reverse - iterate thru all the evsels in reverse order + * @evlist: evlist instance to iterate + * @evsel: struct evsel iterator + */ +#define evlist__for_each_reverse(evlist, evsel) \ + __evlist__for_each_reverse(&(evlist)->entries, evsel) + +/** + * __evlist__for_each_safe - safely iterate thru all the evsels + * @list: list_head instance to iterate + * @tmp: struct evsel temp iterator + * @evsel: struct evsel iterator + */ +#define __evlist__for_each_safe(list, tmp, evsel) \ + list_for_each_entry_safe(evsel, tmp, list, node) + +/** + * evlist__for_each_safe - safely iterate thru all the evsels + * @evlist: evlist instance to iterate + * @evsel: struct evsel iterator + * @tmp: struct evsel temp iterator + */ +#define evlist__for_each_safe(evlist, tmp, evsel) \ + __evlist__for_each_safe(&(evlist)->entries, tmp, evsel) + #endif /* __PERF_EVLIST_H */ diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 618d41140ab..8606175fe1e 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -9,22 +9,31 @@ #include <byteswap.h> #include <linux/bitops.h> +#include <api/fs/debugfs.h> +#include <traceevent/event-parse.h> +#include <linux/hw_breakpoint.h> +#include <linux/perf_event.h> +#include <sys/resource.h> #include "asm/bug.h" -#include "debugfs.h" -#include "event-parse.h" #include "evsel.h" #include "evlist.h" #include "util.h" #include "cpumap.h" #include "thread_map.h" #include "target.h" -#include "../../../include/linux/hw_breakpoint.h" -#include "../../../include/uapi/linux/perf_event.h" #include "perf_regs.h" +#include "debug.h" +#include "trace-event.h" + +static struct { + bool sample_id_all; + bool exclude_guest; + bool mmap2; +} perf_missing_features; #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) -static int __perf_evsel__sample_size(u64 sample_type) +int __perf_evsel__sample_size(u64 sample_type) { u64 mask = sample_type & PERF_SAMPLE_MASK; int size = 0; @@ -40,6 +49,72 @@ static int __perf_evsel__sample_size(u64 sample_type) return size; } +/** + * __perf_evsel__calc_id_pos - calculate id_pos. + * @sample_type: sample type + * + * This function returns the position of the event id (PERF_SAMPLE_ID or + * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct + * sample_event. + */ +static int __perf_evsel__calc_id_pos(u64 sample_type) +{ + int idx = 0; + + if (sample_type & PERF_SAMPLE_IDENTIFIER) + return 0; + + if (!(sample_type & PERF_SAMPLE_ID)) + return -1; + + if (sample_type & PERF_SAMPLE_IP) + idx += 1; + + if (sample_type & PERF_SAMPLE_TID) + idx += 1; + + if (sample_type & PERF_SAMPLE_TIME) + idx += 1; + + if (sample_type & PERF_SAMPLE_ADDR) + idx += 1; + + return idx; +} + +/** + * __perf_evsel__calc_is_pos - calculate is_pos. + * @sample_type: sample type + * + * This function returns the position (counting backwards) of the event id + * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if + * sample_id_all is used there is an id sample appended to non-sample events. + */ +static int __perf_evsel__calc_is_pos(u64 sample_type) +{ + int idx = 1; + + if (sample_type & PERF_SAMPLE_IDENTIFIER) + return 1; + + if (!(sample_type & PERF_SAMPLE_ID)) + return -1; + + if (sample_type & PERF_SAMPLE_CPU) + idx += 1; + + if (sample_type & PERF_SAMPLE_STREAM_ID) + idx += 1; + + return idx; +} + +void perf_evsel__calc_id_pos(struct perf_evsel *evsel) +{ + evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type); + evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type); +} + void hists__init(struct hists *hists) { memset(hists, 0, sizeof(*hists)); @@ -50,17 +125,53 @@ void hists__init(struct hists *hists) pthread_mutex_init(&hists->lock, NULL); } +void __perf_evsel__set_sample_bit(struct perf_evsel *evsel, + enum perf_event_sample_format bit) +{ + if (!(evsel->attr.sample_type & bit)) { + evsel->attr.sample_type |= bit; + evsel->sample_size += sizeof(u64); + perf_evsel__calc_id_pos(evsel); + } +} + +void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, + enum perf_event_sample_format bit) +{ + if (evsel->attr.sample_type & bit) { + evsel->attr.sample_type &= ~bit; + evsel->sample_size -= sizeof(u64); + perf_evsel__calc_id_pos(evsel); + } +} + +void perf_evsel__set_sample_id(struct perf_evsel *evsel, + bool can_sample_identifier) +{ + if (can_sample_identifier) { + perf_evsel__reset_sample_bit(evsel, ID); + perf_evsel__set_sample_bit(evsel, IDENTIFIER); + } else { + perf_evsel__set_sample_bit(evsel, ID); + } + evsel->attr.read_format |= PERF_FORMAT_ID; +} + void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr, int idx) { evsel->idx = idx; evsel->attr = *attr; + evsel->leader = evsel; + evsel->unit = ""; + evsel->scale = 1.0; INIT_LIST_HEAD(&evsel->node); hists__init(&evsel->hists); evsel->sample_size = __perf_evsel__sample_size(attr->sample_type); + perf_evsel__calc_id_pos(evsel); } -struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) +struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx) { struct perf_evsel *evsel = zalloc(sizeof(*evsel)); @@ -70,48 +181,7 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) return evsel; } -struct event_format *event_format__new(const char *sys, const char *name) -{ - int fd, n; - char *filename; - void *bf = NULL, *nbf; - size_t size = 0, alloc_size = 0; - struct event_format *format = NULL; - - if (asprintf(&filename, "%s/%s/%s/format", tracing_events_path, sys, name) < 0) - goto out; - - fd = open(filename, O_RDONLY); - if (fd < 0) - goto out_free_filename; - - do { - if (size == alloc_size) { - alloc_size += BUFSIZ; - nbf = realloc(bf, alloc_size); - if (nbf == NULL) - goto out_free_bf; - bf = nbf; - } - - n = read(fd, bf + size, BUFSIZ); - if (n < 0) - goto out_free_bf; - size += n; - } while (n > 0); - - pevent_parse_format(&format, bf, size, sys); - -out_free_bf: - free(bf); - close(fd); -out_free_filename: - free(filename); -out: - return format; -} - -struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx) +struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx) { struct perf_evsel *evsel = zalloc(sizeof(*evsel)); @@ -125,7 +195,7 @@ struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx) if (asprintf(&evsel->name, "%s:%s", sys, name) < 0) goto out_free; - evsel->tp_format = event_format__new(sys, name); + evsel->tp_format = trace_event__tp_format(sys, name); if (evsel->tp_format == NULL) goto out_free; @@ -138,7 +208,7 @@ struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx) return evsel; out_free: - free(evsel->name); + zfree(&evsel->name); free(evsel); return NULL; } @@ -216,6 +286,7 @@ const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = { "major-faults", "alignment-faults", "emulation-faults", + "dummy", }; static const char *__perf_evsel__sw_name(u64 config) @@ -404,29 +475,127 @@ const char *perf_evsel__name(struct perf_evsel *evsel) return evsel->name ?: "unknown"; } -void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts, - struct perf_evsel *first) +const char *perf_evsel__group_name(struct perf_evsel *evsel) { + return evsel->group_name ?: "anon group"; +} + +int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size) +{ + int ret; + struct perf_evsel *pos; + const char *group_name = perf_evsel__group_name(evsel); + + ret = scnprintf(buf, size, "%s", group_name); + + ret += scnprintf(buf + ret, size - ret, " { %s", + perf_evsel__name(evsel)); + + for_each_group_member(pos, evsel) + ret += scnprintf(buf + ret, size - ret, ", %s", + perf_evsel__name(pos)); + + ret += scnprintf(buf + ret, size - ret, " }"); + + return ret; +} + +static void +perf_evsel__config_callgraph(struct perf_evsel *evsel, + struct record_opts *opts) +{ + bool function = perf_evsel__is_function_event(evsel); + struct perf_event_attr *attr = &evsel->attr; + + perf_evsel__set_sample_bit(evsel, CALLCHAIN); + + if (opts->call_graph == CALLCHAIN_DWARF) { + if (!function) { + perf_evsel__set_sample_bit(evsel, REGS_USER); + perf_evsel__set_sample_bit(evsel, STACK_USER); + attr->sample_regs_user = PERF_REGS_MASK; + attr->sample_stack_user = opts->stack_dump_size; + attr->exclude_callchain_user = 1; + } else { + pr_info("Cannot use DWARF unwind for function trace event," + " falling back to framepointers.\n"); + } + } + + if (function) { + pr_info("Disabling user space callchains for function trace event.\n"); + attr->exclude_callchain_user = 1; + } +} + +/* + * The enable_on_exec/disabled value strategy: + * + * 1) For any type of traced program: + * - all independent events and group leaders are disabled + * - all group members are enabled + * + * Group members are ruled by group leaders. They need to + * be enabled, because the group scheduling relies on that. + * + * 2) For traced programs executed by perf: + * - all independent events and group leaders have + * enable_on_exec set + * - we don't specifically enable or disable any event during + * the record command + * + * Independent events and group leaders are initially disabled + * and get enabled by exec. Group members are ruled by group + * leaders as stated in 1). + * + * 3) For traced programs attached by perf (pid/tid): + * - we specifically enable or disable all events during + * the record command + * + * When attaching events to already running traced we + * enable/disable events specifically, as there's no + * initial traced exec call. + */ +void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) +{ + struct perf_evsel *leader = evsel->leader; struct perf_event_attr *attr = &evsel->attr; int track = !evsel->idx; /* only the first counter needs these */ + bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread; - attr->disabled = 1; - attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; + attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1; attr->inherit = !opts->no_inherit; - attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | - PERF_FORMAT_TOTAL_TIME_RUNNING | - PERF_FORMAT_ID; - attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID; + perf_evsel__set_sample_bit(evsel, IP); + perf_evsel__set_sample_bit(evsel, TID); + + if (evsel->sample_read) { + perf_evsel__set_sample_bit(evsel, READ); + + /* + * We need ID even in case of single event, because + * PERF_SAMPLE_READ process ID specific data. + */ + perf_evsel__set_sample_id(evsel, false); + + /* + * Apply group format only if we belong to group + * with more than one members. + */ + if (leader->nr_members > 1) { + attr->read_format |= PERF_FORMAT_GROUP; + attr->inherit = 0; + } + } /* - * We default some events to a 1 default interval. But keep + * We default some events to have a default interval. But keep * it a weak assumption overridable by the user. */ - if (!attr->sample_period || (opts->user_freq != UINT_MAX && + if (!attr->sample_period || (opts->user_freq != UINT_MAX || opts->user_interval != ULLONG_MAX)) { if (opts->freq) { - attr->sample_type |= PERF_SAMPLE_PERIOD; + perf_evsel__set_sample_bit(evsel, PERIOD); attr->freq = 1; attr->sample_freq = opts->freq; } else { @@ -434,6 +603,15 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts, } } + /* + * Disable sampling for all group members other + * than leader in case leader 'leads' the sampling. + */ + if ((leader != evsel) && leader->sample_read) { + attr->sample_freq = 0; + attr->sample_period = 0; + } + if (opts->no_samples) attr->sample_freq = 0; @@ -441,55 +619,68 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts, attr->inherit_stat = 1; if (opts->sample_address) { - attr->sample_type |= PERF_SAMPLE_ADDR; + perf_evsel__set_sample_bit(evsel, ADDR); attr->mmap_data = track; } - if (opts->call_graph) { - attr->sample_type |= PERF_SAMPLE_CALLCHAIN; - - if (opts->call_graph == CALLCHAIN_DWARF) { - attr->sample_type |= PERF_SAMPLE_REGS_USER | - PERF_SAMPLE_STACK_USER; - attr->sample_regs_user = PERF_REGS_MASK; - attr->sample_stack_user = opts->stack_dump_size; - attr->exclude_callchain_user = 1; - } - } + if (opts->call_graph_enabled) + perf_evsel__config_callgraph(evsel, opts); - if (perf_target__has_cpu(&opts->target)) - attr->sample_type |= PERF_SAMPLE_CPU; + if (target__has_cpu(&opts->target)) + perf_evsel__set_sample_bit(evsel, CPU); if (opts->period) - attr->sample_type |= PERF_SAMPLE_PERIOD; + perf_evsel__set_sample_bit(evsel, PERIOD); - if (!opts->sample_id_all_missing && + if (!perf_missing_features.sample_id_all && (opts->sample_time || !opts->no_inherit || - perf_target__has_cpu(&opts->target))) - attr->sample_type |= PERF_SAMPLE_TIME; + target__has_cpu(&opts->target) || per_cpu)) + perf_evsel__set_sample_bit(evsel, TIME); if (opts->raw_samples) { - attr->sample_type |= PERF_SAMPLE_TIME; - attr->sample_type |= PERF_SAMPLE_RAW; - attr->sample_type |= PERF_SAMPLE_CPU; + perf_evsel__set_sample_bit(evsel, TIME); + perf_evsel__set_sample_bit(evsel, RAW); + perf_evsel__set_sample_bit(evsel, CPU); } - if (opts->no_delay) { + if (opts->sample_address) + perf_evsel__set_sample_bit(evsel, DATA_SRC); + + if (opts->no_buffering) { attr->watermark = 0; attr->wakeup_events = 1; } if (opts->branch_stack) { - attr->sample_type |= PERF_SAMPLE_BRANCH_STACK; + perf_evsel__set_sample_bit(evsel, BRANCH_STACK); attr->branch_sample_type = opts->branch_stack; } - attr->mmap = track; - attr->comm = track; + if (opts->sample_weight) + perf_evsel__set_sample_bit(evsel, WEIGHT); - if (perf_target__none(&opts->target) && - (!opts->group || evsel == first)) { + attr->mmap = track; + attr->mmap2 = track && !perf_missing_features.mmap2; + attr->comm = track; + + if (opts->sample_transaction) + perf_evsel__set_sample_bit(evsel, TRANSACTION); + + /* + * XXX see the function comment above + * + * Disabling only independent events or group leaders, + * keeping group members enabled. + */ + if (perf_evsel__is_group_leader(evsel)) + attr->disabled = 1; + + /* + * Setting enable_on_exec for independent events and + * group leaders for traced executed by perf. + */ + if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) && + !opts->initial_delay) attr->enable_on_exec = 1; - } } int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) @@ -508,15 +699,15 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) return evsel->fd != NULL ? 0 : -ENOMEM; } -int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, - const char *filter) +static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads, + int ioc, void *arg) { int cpu, thread; for (cpu = 0; cpu < ncpus; cpu++) { for (thread = 0; thread < nthreads; thread++) { int fd = FD(evsel, cpu, thread), - err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter); + err = ioctl(fd, ioc, arg); if (err) return err; @@ -526,6 +717,21 @@ int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, return 0; } +int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, + const char *filter) +{ + return perf_evsel__run_ioctl(evsel, ncpus, nthreads, + PERF_EVENT_IOC_SET_FILTER, + (void *)filter); +} + +int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads) +{ + return perf_evsel__run_ioctl(evsel, ncpus, nthreads, + PERF_EVENT_IOC_ENABLE, + 0); +} + int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) { evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); @@ -542,6 +748,12 @@ int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) return 0; } +void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus) +{ + memset(evsel->counts, 0, (sizeof(*evsel->counts) + + (ncpus * sizeof(struct perf_counts_values)))); +} + int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus) { evsel->counts = zalloc((sizeof(*evsel->counts) + @@ -559,8 +771,7 @@ void perf_evsel__free_id(struct perf_evsel *evsel) { xyarray__delete(evsel->sample_id); evsel->sample_id = NULL; - free(evsel->id); - evsel->id = NULL; + zfree(&evsel->id); } void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) @@ -574,25 +785,51 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) } } +void perf_evsel__free_counts(struct perf_evsel *evsel) +{ + zfree(&evsel->counts); +} + void perf_evsel__exit(struct perf_evsel *evsel) { assert(list_empty(&evsel->node)); - xyarray__delete(evsel->fd); - xyarray__delete(evsel->sample_id); - free(evsel->id); + perf_evsel__free_fd(evsel); + perf_evsel__free_id(evsel); } void perf_evsel__delete(struct perf_evsel *evsel) { perf_evsel__exit(evsel); close_cgroup(evsel->cgrp); - free(evsel->group_name); + zfree(&evsel->group_name); if (evsel->tp_format) pevent_free_format(evsel->tp_format); - free(evsel->name); + zfree(&evsel->name); free(evsel); } +static inline void compute_deltas(struct perf_evsel *evsel, + int cpu, + struct perf_counts_values *count) +{ + struct perf_counts_values tmp; + + if (!evsel->prev_raw_counts) + return; + + if (cpu == -1) { + tmp = evsel->prev_raw_counts->aggr; + evsel->prev_raw_counts->aggr = *count; + } else { + tmp = evsel->prev_raw_counts->cpu[cpu]; + evsel->prev_raw_counts->cpu[cpu] = *count; + } + + count->val = count->val - tmp.val; + count->ena = count->ena - tmp.ena; + count->run = count->run - tmp.run; +} + int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, int cpu, int thread, bool scale) { @@ -608,6 +845,8 @@ int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0) return -errno; + compute_deltas(evsel, cpu, &count); + if (scale) { if (count.run == 0) count.val = 0; @@ -646,6 +885,8 @@ int __perf_evsel__read(struct perf_evsel *evsel, } } + compute_deltas(evsel, -1, aggr); + evsel->counts->scaled = 0; if (scale) { if (aggr->run == 0) { @@ -669,7 +910,7 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread) struct perf_evsel *leader = evsel->leader; int fd; - if (!leader) + if (perf_evsel__is_group_leader(evsel)) return -1; /* @@ -684,12 +925,73 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread) return fd; } +#define __PRINT_ATTR(fmt, cast, field) \ + fprintf(fp, " %-19s "fmt"\n", #field, cast attr->field) + +#define PRINT_ATTR_U32(field) __PRINT_ATTR("%u" , , field) +#define PRINT_ATTR_X32(field) __PRINT_ATTR("%#x", , field) +#define PRINT_ATTR_U64(field) __PRINT_ATTR("%" PRIu64, (uint64_t), field) +#define PRINT_ATTR_X64(field) __PRINT_ATTR("%#"PRIx64, (uint64_t), field) + +#define PRINT_ATTR2N(name1, field1, name2, field2) \ + fprintf(fp, " %-19s %u %-19s %u\n", \ + name1, attr->field1, name2, attr->field2) + +#define PRINT_ATTR2(field1, field2) \ + PRINT_ATTR2N(#field1, field1, #field2, field2) + +static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp) +{ + size_t ret = 0; + + ret += fprintf(fp, "%.60s\n", graph_dotted_line); + ret += fprintf(fp, "perf_event_attr:\n"); + + ret += PRINT_ATTR_U32(type); + ret += PRINT_ATTR_U32(size); + ret += PRINT_ATTR_X64(config); + ret += PRINT_ATTR_U64(sample_period); + ret += PRINT_ATTR_U64(sample_freq); + ret += PRINT_ATTR_X64(sample_type); + ret += PRINT_ATTR_X64(read_format); + + ret += PRINT_ATTR2(disabled, inherit); + ret += PRINT_ATTR2(pinned, exclusive); + ret += PRINT_ATTR2(exclude_user, exclude_kernel); + ret += PRINT_ATTR2(exclude_hv, exclude_idle); + ret += PRINT_ATTR2(mmap, comm); + ret += PRINT_ATTR2(freq, inherit_stat); + ret += PRINT_ATTR2(enable_on_exec, task); + ret += PRINT_ATTR2(watermark, precise_ip); + ret += PRINT_ATTR2(mmap_data, sample_id_all); + ret += PRINT_ATTR2(exclude_host, exclude_guest); + ret += PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel, + "excl.callchain_user", exclude_callchain_user); + ret += PRINT_ATTR_U32(mmap2); + + ret += PRINT_ATTR_U32(wakeup_events); + ret += PRINT_ATTR_U32(wakeup_watermark); + ret += PRINT_ATTR_X32(bp_type); + ret += PRINT_ATTR_X64(bp_addr); + ret += PRINT_ATTR_X64(config1); + ret += PRINT_ATTR_U64(bp_len); + ret += PRINT_ATTR_X64(config2); + ret += PRINT_ATTR_X64(branch_sample_type); + ret += PRINT_ATTR_X64(sample_regs_user); + ret += PRINT_ATTR_U32(sample_stack_user); + + ret += fprintf(fp, "%.60s\n", graph_dotted_line); + + return ret; +} + static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, struct thread_map *threads) { int cpu, thread; unsigned long flags = 0; int pid = -1, err; + enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE; if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0) @@ -700,6 +1002,18 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, pid = evsel->cgrp->fd; } +fallback_missing_features: + if (perf_missing_features.mmap2) + evsel->attr.mmap2 = 0; + if (perf_missing_features.exclude_guest) + evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; +retry_sample_id: + if (perf_missing_features.sample_id_all) + evsel->attr.sample_id_all = 0; + + if (verbose >= 2) + perf_event_attr__fprintf(&evsel->attr, stderr); + for (cpu = 0; cpu < cpus->nr; cpu++) { for (thread = 0; thread < threads->nr; thread++) { @@ -709,6 +1023,9 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, pid = threads->map[thread]; group_fd = get_group_fd(evsel, cpu, thread); +retry_open: + pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n", + pid, cpus->map[cpu], group_fd, flags); FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr, pid, @@ -716,13 +1033,56 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, group_fd, flags); if (FD(evsel, cpu, thread) < 0) { err = -errno; - goto out_close; + pr_debug2("sys_perf_event_open failed, error %d\n", + err); + goto try_fallback; } + set_rlimit = NO_CHANGE; } } return 0; +try_fallback: + /* + * perf stat needs between 5 and 22 fds per CPU. When we run out + * of them try to increase the limits. + */ + if (err == -EMFILE && set_rlimit < INCREASED_MAX) { + struct rlimit l; + int old_errno = errno; + + if (getrlimit(RLIMIT_NOFILE, &l) == 0) { + if (set_rlimit == NO_CHANGE) + l.rlim_cur = l.rlim_max; + else { + l.rlim_cur = l.rlim_max + 1000; + l.rlim_max = l.rlim_cur; + } + if (setrlimit(RLIMIT_NOFILE, &l) == 0) { + set_rlimit++; + errno = old_errno; + goto retry_open; + } + } + errno = old_errno; + } + + if (err != -EINVAL || cpu > 0 || thread > 0) + goto out_close; + + if (!perf_missing_features.mmap2 && evsel->attr.mmap2) { + perf_missing_features.mmap2 = true; + goto fallback_missing_features; + } else if (!perf_missing_features.exclude_guest && + (evsel->attr.exclude_guest || evsel->attr.exclude_host)) { + perf_missing_features.exclude_guest = true; + goto fallback_missing_features; + } else if (!perf_missing_features.sample_id_all) { + perf_missing_features.sample_id_all = true; + goto retry_sample_id; + } + out_close: do { while (--thread >= 0) { @@ -741,7 +1101,6 @@ void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads) perf_evsel__close_fd(evsel, ncpus, nthreads); perf_evsel__free_fd(evsel); - evsel->fd = NULL; } static struct { @@ -798,6 +1157,11 @@ static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel, array += ((event->header.size - sizeof(event->header)) / sizeof(u64)) - 1; + if (type & PERF_SAMPLE_IDENTIFIER) { + sample->id = *array; + array--; + } + if (type & PERF_SAMPLE_CPU) { u.val64 = *array; if (swapped) { @@ -836,29 +1200,36 @@ static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel, sample->pid = u.val32[0]; sample->tid = u.val32[1]; + array--; } return 0; } -static bool sample_overlap(const union perf_event *event, - const void *offset, u64 size) +static inline bool overflow(const void *endp, u16 max_size, const void *offset, + u64 size) { - const void *base = event; + return size > max_size || offset + size > endp; +} - if (offset + size > base + event->header.size) - return true; +#define OVERFLOW_CHECK(offset, size, max_size) \ + do { \ + if (overflow(endp, (max_size), (offset), (size))) \ + return -EFAULT; \ + } while (0) - return false; -} +#define OVERFLOW_CHECK_u64(offset) \ + OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64)) int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, struct perf_sample *data) { u64 type = evsel->attr.sample_type; - u64 regs_user = evsel->attr.sample_regs_user; bool swapped = evsel->needs_swap; const u64 *array; + u16 max_size = event->header.size; + const void *endp = (void *)event + max_size; + u64 sz; /* * used for cross-endian analysis. See git commit 65014ab3 @@ -869,7 +1240,8 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, memset(data, 0, sizeof(*data)); data->cpu = data->pid = data->tid = -1; data->stream_id = data->id = data->time = -1ULL; - data->period = 1; + data->period = evsel->attr.sample_period; + data->weight = 0; if (event->header.type != PERF_RECORD_SAMPLE) { if (!evsel->attr.sample_id_all) @@ -879,11 +1251,22 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, array = event->sample.array; + /* + * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes + * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to + * check the format does not go past the end of the event. + */ if (evsel->sample_size + sizeof(event->header) > event->header.size) return -EFAULT; + data->id = -1ULL; + if (type & PERF_SAMPLE_IDENTIFIER) { + data->id = *array; + array++; + } + if (type & PERF_SAMPLE_IP) { - data->ip = event->ip.ip; + data->ip = *array; array++; } @@ -912,7 +1295,6 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, array++; } - data->id = -1ULL; if (type & PERF_SAMPLE_ID) { data->id = *array; array++; @@ -942,25 +1324,62 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, } if (type & PERF_SAMPLE_READ) { - fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n"); - return -1; + u64 read_format = evsel->attr.read_format; + + OVERFLOW_CHECK_u64(array); + if (read_format & PERF_FORMAT_GROUP) + data->read.group.nr = *array; + else + data->read.one.value = *array; + + array++; + + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { + OVERFLOW_CHECK_u64(array); + data->read.time_enabled = *array; + array++; + } + + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { + OVERFLOW_CHECK_u64(array); + data->read.time_running = *array; + array++; + } + + /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ + if (read_format & PERF_FORMAT_GROUP) { + const u64 max_group_nr = UINT64_MAX / + sizeof(struct sample_read_value); + + if (data->read.group.nr > max_group_nr) + return -EFAULT; + sz = data->read.group.nr * + sizeof(struct sample_read_value); + OVERFLOW_CHECK(array, sz, max_size); + data->read.group.values = + (struct sample_read_value *)array; + array = (void *)array + sz; + } else { + OVERFLOW_CHECK_u64(array); + data->read.one.id = *array; + array++; + } } if (type & PERF_SAMPLE_CALLCHAIN) { - if (sample_overlap(event, array, sizeof(data->callchain->nr))) - return -EFAULT; - - data->callchain = (struct ip_callchain *)array; + const u64 max_callchain_nr = UINT64_MAX / sizeof(u64); - if (sample_overlap(event, array, data->callchain->nr)) + OVERFLOW_CHECK_u64(array); + data->callchain = (struct ip_callchain *)array++; + if (data->callchain->nr > max_callchain_nr) return -EFAULT; - - array += 1 + data->callchain->nr; + sz = data->callchain->nr * sizeof(u64); + OVERFLOW_CHECK(array, sz, max_size); + array = (void *)array + sz; } if (type & PERF_SAMPLE_RAW) { - const u64 *pdata; - + OVERFLOW_CHECK_u64(array); u.val64 = *array; if (WARN_ONCE(swapped, "Endianness of raw data not corrected!\n")) { @@ -969,66 +1388,191 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, u.val32[0] = bswap_32(u.val32[0]); u.val32[1] = bswap_32(u.val32[1]); } - - if (sample_overlap(event, array, sizeof(u32))) - return -EFAULT; - data->raw_size = u.val32[0]; - pdata = (void *) array + sizeof(u32); - - if (sample_overlap(event, pdata, data->raw_size)) - return -EFAULT; + array = (void *)array + sizeof(u32); - data->raw_data = (void *) pdata; - - array = (void *)array + data->raw_size + sizeof(u32); + OVERFLOW_CHECK(array, data->raw_size, max_size); + data->raw_data = (void *)array; + array = (void *)array + data->raw_size; } if (type & PERF_SAMPLE_BRANCH_STACK) { - u64 sz; + const u64 max_branch_nr = UINT64_MAX / + sizeof(struct branch_entry); - data->branch_stack = (struct branch_stack *)array; - array++; /* nr */ + OVERFLOW_CHECK_u64(array); + data->branch_stack = (struct branch_stack *)array++; + if (data->branch_stack->nr > max_branch_nr) + return -EFAULT; sz = data->branch_stack->nr * sizeof(struct branch_entry); - sz /= sizeof(u64); - array += sz; + OVERFLOW_CHECK(array, sz, max_size); + array = (void *)array + sz; } if (type & PERF_SAMPLE_REGS_USER) { - /* First u64 tells us if we have any regs in sample. */ - u64 avail = *array++; + OVERFLOW_CHECK_u64(array); + data->user_regs.abi = *array; + array++; - if (avail) { + if (data->user_regs.abi) { + u64 mask = evsel->attr.sample_regs_user; + + sz = hweight_long(mask) * sizeof(u64); + OVERFLOW_CHECK(array, sz, max_size); + data->user_regs.mask = mask; data->user_regs.regs = (u64 *)array; - array += hweight_long(regs_user); + array = (void *)array + sz; } } if (type & PERF_SAMPLE_STACK_USER) { - u64 size = *array++; + OVERFLOW_CHECK_u64(array); + sz = *array++; data->user_stack.offset = ((char *)(array - 1) - (char *) event); - if (!size) { + if (!sz) { data->user_stack.size = 0; } else { + OVERFLOW_CHECK(array, sz, max_size); data->user_stack.data = (char *)array; - array += size / sizeof(*array); - data->user_stack.size = *array; + array = (void *)array + sz; + OVERFLOW_CHECK_u64(array); + data->user_stack.size = *array++; + if (WARN_ONCE(data->user_stack.size > sz, + "user stack dump failure\n")) + return -EFAULT; } } + data->weight = 0; + if (type & PERF_SAMPLE_WEIGHT) { + OVERFLOW_CHECK_u64(array); + data->weight = *array; + array++; + } + + data->data_src = PERF_MEM_DATA_SRC_NONE; + if (type & PERF_SAMPLE_DATA_SRC) { + OVERFLOW_CHECK_u64(array); + data->data_src = *array; + array++; + } + + data->transaction = 0; + if (type & PERF_SAMPLE_TRANSACTION) { + OVERFLOW_CHECK_u64(array); + data->transaction = *array; + array++; + } + return 0; } +size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, + u64 read_format) +{ + size_t sz, result = sizeof(struct sample_event); + + if (type & PERF_SAMPLE_IDENTIFIER) + result += sizeof(u64); + + if (type & PERF_SAMPLE_IP) + result += sizeof(u64); + + if (type & PERF_SAMPLE_TID) + result += sizeof(u64); + + if (type & PERF_SAMPLE_TIME) + result += sizeof(u64); + + if (type & PERF_SAMPLE_ADDR) + result += sizeof(u64); + + if (type & PERF_SAMPLE_ID) + result += sizeof(u64); + + if (type & PERF_SAMPLE_STREAM_ID) + result += sizeof(u64); + + if (type & PERF_SAMPLE_CPU) + result += sizeof(u64); + + if (type & PERF_SAMPLE_PERIOD) + result += sizeof(u64); + + if (type & PERF_SAMPLE_READ) { + result += sizeof(u64); + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) + result += sizeof(u64); + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + result += sizeof(u64); + /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ + if (read_format & PERF_FORMAT_GROUP) { + sz = sample->read.group.nr * + sizeof(struct sample_read_value); + result += sz; + } else { + result += sizeof(u64); + } + } + + if (type & PERF_SAMPLE_CALLCHAIN) { + sz = (sample->callchain->nr + 1) * sizeof(u64); + result += sz; + } + + if (type & PERF_SAMPLE_RAW) { + result += sizeof(u32); + result += sample->raw_size; + } + + if (type & PERF_SAMPLE_BRANCH_STACK) { + sz = sample->branch_stack->nr * sizeof(struct branch_entry); + sz += sizeof(u64); + result += sz; + } + + if (type & PERF_SAMPLE_REGS_USER) { + if (sample->user_regs.abi) { + result += sizeof(u64); + sz = hweight_long(sample->user_regs.mask) * sizeof(u64); + result += sz; + } else { + result += sizeof(u64); + } + } + + if (type & PERF_SAMPLE_STACK_USER) { + sz = sample->user_stack.size; + result += sizeof(u64); + if (sz) { + result += sz; + result += sizeof(u64); + } + } + + if (type & PERF_SAMPLE_WEIGHT) + result += sizeof(u64); + + if (type & PERF_SAMPLE_DATA_SRC) + result += sizeof(u64); + + if (type & PERF_SAMPLE_TRANSACTION) + result += sizeof(u64); + + return result; +} + int perf_event__synthesize_sample(union perf_event *event, u64 type, + u64 read_format, const struct perf_sample *sample, bool swapped) { u64 *array; - + size_t sz; /* * used for cross-endian analysis. See git commit 65014ab3 * for why this goofiness is needed. @@ -1037,8 +1581,13 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, array = event->sample.array; + if (type & PERF_SAMPLE_IDENTIFIER) { + *array = sample->id; + array++; + } + if (type & PERF_SAMPLE_IP) { - event->ip.ip = sample->ip; + *array = sample->ip; array++; } @@ -1096,6 +1645,102 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, array++; } + if (type & PERF_SAMPLE_READ) { + if (read_format & PERF_FORMAT_GROUP) + *array = sample->read.group.nr; + else + *array = sample->read.one.value; + array++; + + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { + *array = sample->read.time_enabled; + array++; + } + + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { + *array = sample->read.time_running; + array++; + } + + /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ + if (read_format & PERF_FORMAT_GROUP) { + sz = sample->read.group.nr * + sizeof(struct sample_read_value); + memcpy(array, sample->read.group.values, sz); + array = (void *)array + sz; + } else { + *array = sample->read.one.id; + array++; + } + } + + if (type & PERF_SAMPLE_CALLCHAIN) { + sz = (sample->callchain->nr + 1) * sizeof(u64); + memcpy(array, sample->callchain, sz); + array = (void *)array + sz; + } + + if (type & PERF_SAMPLE_RAW) { + u.val32[0] = sample->raw_size; + if (WARN_ONCE(swapped, + "Endianness of raw data not corrected!\n")) { + /* + * Inverse of what is done in perf_evsel__parse_sample + */ + u.val32[0] = bswap_32(u.val32[0]); + u.val32[1] = bswap_32(u.val32[1]); + u.val64 = bswap_64(u.val64); + } + *array = u.val64; + array = (void *)array + sizeof(u32); + + memcpy(array, sample->raw_data, sample->raw_size); + array = (void *)array + sample->raw_size; + } + + if (type & PERF_SAMPLE_BRANCH_STACK) { + sz = sample->branch_stack->nr * sizeof(struct branch_entry); + sz += sizeof(u64); + memcpy(array, sample->branch_stack, sz); + array = (void *)array + sz; + } + + if (type & PERF_SAMPLE_REGS_USER) { + if (sample->user_regs.abi) { + *array++ = sample->user_regs.abi; + sz = hweight_long(sample->user_regs.mask) * sizeof(u64); + memcpy(array, sample->user_regs.regs, sz); + array = (void *)array + sz; + } else { + *array++ = 0; + } + } + + if (type & PERF_SAMPLE_STACK_USER) { + sz = sample->user_stack.size; + *array++ = sz; + if (sz) { + memcpy(array, sample->user_stack.data, sz); + array = (void *)array + sz; + *array++ = sz; + } + } + + if (type & PERF_SAMPLE_WEIGHT) { + *array = sample->weight; + array++; + } + + if (type & PERF_SAMPLE_DATA_SRC) { + *array = sample->data_src; + array++; + } + + if (type & PERF_SAMPLE_TRANSACTION) { + *array = sample->transaction; + array++; + } + return 0; } @@ -1167,3 +1812,225 @@ u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, return 0; } + +static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...) +{ + va_list args; + int ret = 0; + + if (!*first) { + ret += fprintf(fp, ","); + } else { + ret += fprintf(fp, ":"); + *first = false; + } + + va_start(args, fmt); + ret += vfprintf(fp, fmt, args); + va_end(args); + return ret; +} + +static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value) +{ + if (value == 0) + return 0; + + return comma_fprintf(fp, first, " %s: %" PRIu64, field, value); +} + +#define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field) + +struct bit_names { + int bit; + const char *name; +}; + +static int bits__fprintf(FILE *fp, const char *field, u64 value, + struct bit_names *bits, bool *first) +{ + int i = 0, printed = comma_fprintf(fp, first, " %s: ", field); + bool first_bit = true; + + do { + if (value & bits[i].bit) { + printed += fprintf(fp, "%s%s", first_bit ? "" : "|", bits[i].name); + first_bit = false; + } + } while (bits[++i].name != NULL); + + return printed; +} + +static int sample_type__fprintf(FILE *fp, bool *first, u64 value) +{ +#define bit_name(n) { PERF_SAMPLE_##n, #n } + struct bit_names bits[] = { + bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR), + bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU), + bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW), + bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER), + bit_name(IDENTIFIER), + { .name = NULL, } + }; +#undef bit_name + return bits__fprintf(fp, "sample_type", value, bits, first); +} + +static int read_format__fprintf(FILE *fp, bool *first, u64 value) +{ +#define bit_name(n) { PERF_FORMAT_##n, #n } + struct bit_names bits[] = { + bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING), + bit_name(ID), bit_name(GROUP), + { .name = NULL, } + }; +#undef bit_name + return bits__fprintf(fp, "read_format", value, bits, first); +} + +int perf_evsel__fprintf(struct perf_evsel *evsel, + struct perf_attr_details *details, FILE *fp) +{ + bool first = true; + int printed = 0; + + if (details->event_group) { + struct perf_evsel *pos; + + if (!perf_evsel__is_group_leader(evsel)) + return 0; + + if (evsel->nr_members > 1) + printed += fprintf(fp, "%s{", evsel->group_name ?: ""); + + printed += fprintf(fp, "%s", perf_evsel__name(evsel)); + for_each_group_member(pos, evsel) + printed += fprintf(fp, ",%s", perf_evsel__name(pos)); + + if (evsel->nr_members > 1) + printed += fprintf(fp, "}"); + goto out; + } + + printed += fprintf(fp, "%s", perf_evsel__name(evsel)); + + if (details->verbose || details->freq) { + printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64, + (u64)evsel->attr.sample_freq); + } + + if (details->verbose) { + if_print(type); + if_print(config); + if_print(config1); + if_print(config2); + if_print(size); + printed += sample_type__fprintf(fp, &first, evsel->attr.sample_type); + if (evsel->attr.read_format) + printed += read_format__fprintf(fp, &first, evsel->attr.read_format); + if_print(disabled); + if_print(inherit); + if_print(pinned); + if_print(exclusive); + if_print(exclude_user); + if_print(exclude_kernel); + if_print(exclude_hv); + if_print(exclude_idle); + if_print(mmap); + if_print(mmap2); + if_print(comm); + if_print(freq); + if_print(inherit_stat); + if_print(enable_on_exec); + if_print(task); + if_print(watermark); + if_print(precise_ip); + if_print(mmap_data); + if_print(sample_id_all); + if_print(exclude_host); + if_print(exclude_guest); + if_print(__reserved_1); + if_print(wakeup_events); + if_print(bp_type); + if_print(branch_sample_type); + } +out: + fputc('\n', fp); + return ++printed; +} + +bool perf_evsel__fallback(struct perf_evsel *evsel, int err, + char *msg, size_t msgsize) +{ + if ((err == ENOENT || err == ENXIO || err == ENODEV) && + evsel->attr.type == PERF_TYPE_HARDWARE && + evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) { + /* + * If it's cycles then fall back to hrtimer based + * cpu-clock-tick sw counter, which is always available even if + * no PMU support. + * + * PPC returns ENXIO until 2.6.37 (behavior changed with commit + * b0a873e). + */ + scnprintf(msg, msgsize, "%s", +"The cycles event is not supported, trying to fall back to cpu-clock-ticks"); + + evsel->attr.type = PERF_TYPE_SOFTWARE; + evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK; + + zfree(&evsel->name); + return true; + } + + return false; +} + +int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, + int err, char *msg, size_t size) +{ + switch (err) { + case EPERM: + case EACCES: + return scnprintf(msg, size, + "You may not have permission to collect %sstats.\n" + "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n" + " -1 - Not paranoid at all\n" + " 0 - Disallow raw tracepoint access for unpriv\n" + " 1 - Disallow cpu events for unpriv\n" + " 2 - Disallow kernel profiling for unpriv", + target->system_wide ? "system-wide " : ""); + case ENOENT: + return scnprintf(msg, size, "The %s event is not supported.", + perf_evsel__name(evsel)); + case EMFILE: + return scnprintf(msg, size, "%s", + "Too many events are opened.\n" + "Try again after reducing the number of events."); + case ENODEV: + if (target->cpu_list) + return scnprintf(msg, size, "%s", + "No such device - did you specify an out-of-range profile CPU?\n"); + break; + case EOPNOTSUPP: + if (evsel->attr.precise_ip) + return scnprintf(msg, size, "%s", + "\'precise\' request may not be supported. Try removing 'p' modifier."); +#if defined(__i386__) || defined(__x86_64__) + if (evsel->attr.type == PERF_TYPE_HARDWARE) + return scnprintf(msg, size, "%s", + "No hardware sampling interrupt available.\n" + "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it."); +#endif + break; + default: + break; + } + + return scnprintf(msg, size, + "The sys_perf_event_open() syscall returned with %d (%s) for event (%s). \n" + "/bin/dmesg may provide additional information.\n" + "No CONFIG_PERF_EVENTS=y kernel support configured?\n", + err, strerror(err), perf_evsel__name(evsel)); +} diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 6f94d6dea00..a52e9a5bb2d 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -3,12 +3,14 @@ #include <linux/list.h> #include <stdbool.h> -#include "../../../include/uapi/linux/perf_event.h" -#include "types.h" +#include <stddef.h> +#include <linux/perf_event.h> +#include <linux/types.h> #include "xyarray.h" #include "cgroup.h" #include "hist.h" - +#include "symbol.h" + struct perf_counts_values { union { struct { @@ -36,6 +38,9 @@ struct perf_sample_id { struct hlist_node node; u64 id; struct perf_evsel *evsel; + + /* Holds total ID period value for PERF_SAMPLE_READ processing. */ + u64 period; }; /** struct perf_evsel - event selector @@ -43,6 +48,12 @@ struct perf_sample_id { * @name - Can be set to retain the original event name passed by the user, * so that when showing results in tools such as 'perf stat', we * show the name used, not some alias. + * @id_pos: the position of the event id (PERF_SAMPLE_ID or + * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of + * struct sample_event + * @is_pos: the position (counting backwards) of the event id (PERF_SAMPLE_ID or + * PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all + * is used there is an id sample appended to non-sample events */ struct perf_evsel { struct list_head node; @@ -52,37 +63,59 @@ struct perf_evsel { struct xyarray *sample_id; u64 *id; struct perf_counts *counts; + struct perf_counts *prev_raw_counts; int idx; u32 ids; struct hists hists; char *name; + double scale; + const char *unit; struct event_format *tp_format; union { void *priv; off_t id_offset; }; struct cgroup_sel *cgrp; - struct { - void *func; - void *data; - } handler; + void *handler; struct cpu_map *cpus; unsigned int sample_size; + int id_pos; + int is_pos; bool supported; bool needs_swap; /* parse modifier helper */ int exclude_GH; + int nr_members; + int sample_read; struct perf_evsel *leader; char *group_name; }; +union u64_swap { + u64 val64; + u32 val32[2]; +}; + +#define hists_to_evsel(h) container_of(h, struct perf_evsel, hists) + struct cpu_map; struct thread_map; struct perf_evlist; -struct perf_record_opts; +struct record_opts; + +struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx); + +static inline struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr) +{ + return perf_evsel__new_idx(attr, 0); +} -struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx); -struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx); +struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx); + +static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name) +{ + return perf_evsel__newtp_idx(sys, name, 0); +} struct event_format *event_format__new(const char *sys, const char *name); @@ -92,8 +125,10 @@ void perf_evsel__exit(struct perf_evsel *evsel); void perf_evsel__delete(struct perf_evsel *evsel); void perf_evsel__config(struct perf_evsel *evsel, - struct perf_record_opts *opts, - struct perf_evsel *first); + struct record_opts *opts); + +int __perf_evsel__sample_size(u64 sample_type); +void perf_evsel__calc_id_pos(struct perf_evsel *evsel); bool perf_evsel__is_cache_op_valid(u8 type, u8 op); @@ -111,15 +146,35 @@ int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size); const char *perf_evsel__name(struct perf_evsel *evsel); +const char *perf_evsel__group_name(struct perf_evsel *evsel); +int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size); + int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads); int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus); +void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus); void perf_evsel__free_fd(struct perf_evsel *evsel); void perf_evsel__free_id(struct perf_evsel *evsel); +void perf_evsel__free_counts(struct perf_evsel *evsel); void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); +void __perf_evsel__set_sample_bit(struct perf_evsel *evsel, + enum perf_event_sample_format bit); +void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, + enum perf_event_sample_format bit); + +#define perf_evsel__set_sample_bit(evsel, bit) \ + __perf_evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit) + +#define perf_evsel__reset_sample_bit(evsel, bit) \ + __perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit) + +void perf_evsel__set_sample_id(struct perf_evsel *evsel, + bool use_sample_identifier); + int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, const char *filter); +int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads); int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus); @@ -158,6 +213,12 @@ static inline bool perf_evsel__match2(struct perf_evsel *e1, (e1->attr.config == e2->attr.config); } +#define perf_evsel__cmp(a, b) \ + ((a) && \ + (b) && \ + (a)->attr.type == (b)->attr.type && \ + (a)->attr.config == (b)->attr.config) + int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, int cpu, int thread, bool scale); @@ -225,4 +286,80 @@ static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel) { return list_entry(evsel->node.next, struct perf_evsel, node); } + +static inline struct perf_evsel *perf_evsel__prev(struct perf_evsel *evsel) +{ + return list_entry(evsel->node.prev, struct perf_evsel, node); +} + +/** + * perf_evsel__is_group_leader - Return whether given evsel is a leader event + * + * @evsel - evsel selector to be tested + * + * Return %true if @evsel is a group leader or a stand-alone event + */ +static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel) +{ + return evsel->leader == evsel; +} + +/** + * perf_evsel__is_group_event - Return whether given evsel is a group event + * + * @evsel - evsel selector to be tested + * + * Return %true iff event group view is enabled and @evsel is a actual group + * leader which has other members in the group + */ +static inline bool perf_evsel__is_group_event(struct perf_evsel *evsel) +{ + if (!symbol_conf.event_group) + return false; + + return perf_evsel__is_group_leader(evsel) && evsel->nr_members > 1; +} + +/** + * perf_evsel__is_function_event - Return whether given evsel is a function + * trace event + * + * @evsel - evsel selector to be tested + * + * Return %true if event is function trace event + */ +static inline bool perf_evsel__is_function_event(struct perf_evsel *evsel) +{ +#define FUNCTION_EVENT "ftrace:function" + + return evsel->name && + !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT)); + +#undef FUNCTION_EVENT +} + +struct perf_attr_details { + bool freq; + bool verbose; + bool event_group; +}; + +int perf_evsel__fprintf(struct perf_evsel *evsel, + struct perf_attr_details *details, FILE *fp); + +bool perf_evsel__fallback(struct perf_evsel *evsel, int err, + char *msg, size_t msgsize); +int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, + int err, char *msg, size_t size); + +static inline int perf_evsel__group_idx(struct perf_evsel *evsel) +{ + return evsel->idx - evsel->leader->idx; +} + +#define for_each_group_member(_evsel, _leader) \ +for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node); \ + (_evsel) && (_evsel)->leader == (_leader); \ + (_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node)) + #endif /* __PERF_EVSEL_H */ diff --git a/tools/perf/util/generate-cmdlist.sh b/tools/perf/util/generate-cmdlist.sh index 3ac38031d53..36a885d2cd2 100755 --- a/tools/perf/util/generate-cmdlist.sh +++ b/tools/perf/util/generate-cmdlist.sh @@ -22,7 +22,7 @@ do }' "Documentation/perf-$cmd.txt" done -echo "#ifdef LIBELF_SUPPORT" +echo "#ifdef HAVE_LIBELF_SUPPORT" sed -n -e 's/^perf-\([^ ]*\)[ ].* full.*/\1/p' command-list.txt | sort | while read cmd @@ -35,5 +35,5 @@ do p }' "Documentation/perf-$cmd.txt" done -echo "#endif /* LIBELF_SUPPORT */" +echo "#endif /* HAVE_LIBELF_SUPPORT */" echo "};" diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 7daad237dea..893f8e2df92 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1,5 +1,3 @@ -#define _FILE_OFFSET_BITS 64 - #include "util.h" #include <sys/types.h> #include <byteswap.h> @@ -23,44 +21,14 @@ #include "pmu.h" #include "vdso.h" #include "strbuf.h" +#include "build-id.h" +#include "data.h" static bool no_buildid_cache = false; -static int trace_event_count; -static struct perf_trace_event_type *trace_events; - static u32 header_argc; static const char **header_argv; -int perf_header__push_event(u64 id, const char *name) -{ - struct perf_trace_event_type *nevents; - - if (strlen(name) > MAX_EVENT_NAME) - pr_warning("Event %s will be truncated\n", name); - - nevents = realloc(trace_events, (trace_event_count + 1) * sizeof(*trace_events)); - if (nevents == NULL) - return -ENOMEM; - trace_events = nevents; - - memset(&trace_events[trace_event_count], 0, sizeof(struct perf_trace_event_type)); - trace_events[trace_event_count].event_id = id; - strncpy(trace_events[trace_event_count].name, name, MAX_EVENT_NAME - 1); - trace_event_count++; - return 0; -} - -char *perf_header__find_event(u64 id) -{ - int i; - for (i = 0 ; i < trace_event_count; i++) { - if (trace_events[i].event_id == id) - return trace_events[i].name; - } - return NULL; -} - /* * magic2 = "PERFILE2" * must be a numerical value to let the endianness @@ -147,7 +115,7 @@ static char *do_read_string(int fd, struct perf_header *ph) u32 len; char *buf; - sz = read(fd, &len, sizeof(len)); + sz = readn(fd, &len, sizeof(len)); if (sz < (ssize_t)sizeof(len)) return NULL; @@ -158,7 +126,7 @@ static char *do_read_string(int fd, struct perf_header *ph) if (!buf) return NULL; - ret = read(fd, buf, len); + ret = readn(fd, buf, len); if (ret == (ssize_t)len) { /* * strings are padded by zeroes @@ -209,7 +177,7 @@ perf_header__set_cmdline(int argc, const char **argv) continue; \ else -static int write_buildid(char *name, size_t name_len, u8 *build_id, +static int write_buildid(const char *name, size_t name_len, u8 *build_id, pid_t pid, u16 misc, int fd) { int err; @@ -232,14 +200,16 @@ static int write_buildid(char *name, size_t name_len, u8 *build_id, return write_padded(fd, name, name_len + 1, len); } -static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, - u16 misc, int fd) +static int __dsos__write_buildid_table(struct list_head *head, + struct machine *machine, + pid_t pid, u16 misc, int fd) { + char nm[PATH_MAX]; struct dso *pos; dsos__for_each_with_build_id(pos, head) { int err; - char *name; + const char *name; size_t name_len; if (!pos->hit) @@ -248,6 +218,10 @@ static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, if (is_vdso_map(pos->short_name)) { name = (char *) VDSO__MAP_NAME; name_len = sizeof(VDSO__MAP_NAME) + 1; + } else if (dso__is_kcore(pos)) { + machine__mmap_name(machine, nm, sizeof(nm)); + name = nm; + name_len = strlen(nm) + 1; } else { name = pos->long_name; name_len = pos->long_name_len + 1; @@ -273,10 +247,10 @@ static int machine__write_buildid_table(struct machine *machine, int fd) umisc = PERF_RECORD_MISC_GUEST_USER; } - err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid, - kmisc, fd); + err = __dsos__write_buildid_table(&machine->kernel_dsos, machine, + machine->pid, kmisc, fd); if (err == 0) - err = __dsos__write_buildid_table(&machine->user_dsos, + err = __dsos__write_buildid_table(&machine->user_dsos, machine, machine->pid, umisc, fd); return err; } @@ -286,12 +260,12 @@ static int dsos__write_buildid_table(struct perf_header *header, int fd) struct perf_session *session = container_of(header, struct perf_session, header); struct rb_node *nd; - int err = machine__write_buildid_table(&session->host_machine, fd); + int err = machine__write_buildid_table(&session->machines.host, fd); if (err) return err; - for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { + for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); err = machine__write_buildid_table(pos, fd); if (err) @@ -312,7 +286,8 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, if (is_kallsyms) { if (symbol_conf.kptr_restrict) { pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n"); - return 0; + err = 0; + goto out_free; } realname = (char *) name; } else @@ -407,23 +382,31 @@ out_free: return err; } -static int dso__cache_build_id(struct dso *dso, const char *debugdir) +static int dso__cache_build_id(struct dso *dso, struct machine *machine, + const char *debugdir) { bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; bool is_vdso = is_vdso_map(dso->short_name); + const char *name = dso->long_name; + char nm[PATH_MAX]; - return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), - dso->long_name, debugdir, - is_kallsyms, is_vdso); + if (dso__is_kcore(dso)) { + is_kallsyms = true; + machine__mmap_name(machine, nm, sizeof(nm)); + name = nm; + } + return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name, + debugdir, is_kallsyms, is_vdso); } -static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) +static int __dsos__cache_build_ids(struct list_head *head, + struct machine *machine, const char *debugdir) { struct dso *pos; int err = 0; dsos__for_each_with_build_id(pos, head) - if (dso__cache_build_id(pos, debugdir)) + if (dso__cache_build_id(pos, machine, debugdir)) err = -1; return err; @@ -431,8 +414,9 @@ static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) static int machine__cache_build_ids(struct machine *machine, const char *debugdir) { - int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir); - ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir); + int ret = __dsos__cache_build_ids(&machine->kernel_dsos, machine, + debugdir); + ret |= __dsos__cache_build_ids(&machine->user_dsos, machine, debugdir); return ret; } @@ -447,9 +431,9 @@ static int perf_session__cache_build_ids(struct perf_session *session) if (mkdir(debugdir, 0755) != 0 && errno != EEXIST) return -1; - ret = machine__cache_build_ids(&session->host_machine, debugdir); + ret = machine__cache_build_ids(&session->machines.host, debugdir); - for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { + for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); ret |= machine__cache_build_ids(pos, debugdir); } @@ -466,9 +450,9 @@ static bool machine__read_build_ids(struct machine *machine, bool with_hits) static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits) { struct rb_node *nd; - bool ret = machine__read_build_ids(&session->host_machine, with_hits); + bool ret = machine__read_build_ids(&session->machines.host, with_hits); - for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { + for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); ret |= machine__read_build_ids(pos, with_hits); } @@ -659,8 +643,7 @@ static int write_event_desc(int fd, struct perf_header *h __maybe_unused, if (ret < 0) return ret; - list_for_each_entry(evsel, &evlist->entries, node) { - + evlist__for_each(evlist, evsel) { ret = do_write(fd, &evsel->attr, sz); if (ret < 0) return ret; @@ -748,18 +731,19 @@ static int build_cpu_topo(struct cpu_topo *tp, int cpu) char filename[MAXPATHLEN]; char *buf = NULL, *p; size_t len = 0; + ssize_t sret; u32 i = 0; int ret = -1; sprintf(filename, CORE_SIB_FMT, cpu); fp = fopen(filename, "r"); if (!fp) - return -1; - - if (getline(&buf, &len, fp) <= 0) - goto done; + goto try_threads; + sret = getline(&buf, &len, fp); fclose(fp); + if (sret <= 0) + goto try_threads; p = strchr(buf, '\n'); if (p) @@ -775,7 +759,9 @@ static int build_cpu_topo(struct cpu_topo *tp, int cpu) buf = NULL; len = 0; } + ret = 0; +try_threads: sprintf(filename, THRD_SIB_FMT, cpu); fp = fopen(filename, "r"); if (!fp) @@ -813,10 +799,10 @@ static void free_cpu_topo(struct cpu_topo *tp) return; for (i = 0 ; i < tp->core_sib; i++) - free(tp->core_siblings[i]); + zfree(&tp->core_siblings[i]); for (i = 0 ; i < tp->thread_sib; i++) - free(tp->thread_siblings[i]); + zfree(&tp->thread_siblings[i]); free(tp); } @@ -944,7 +930,7 @@ static int write_topo_node(int fd, int node) /* skip over invalid lines */ if (!strchr(buf, ':')) continue; - if (sscanf(buf, "%*s %*d %s %"PRIu64, field, &mem) != 2) + if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2) goto done; if (!strcmp(field, "MemTotal:")) mem_total = mem; @@ -953,6 +939,7 @@ static int write_topo_node(int fd, int node) } fclose(fp); + fp = NULL; ret = do_write(fd, &mem_total, sizeof(u64)); if (ret) @@ -979,7 +966,8 @@ static int write_topo_node(int fd, int node) ret = do_write_string(fd, buf); done: free(buf); - fclose(fp); + if (fp) + fclose(fp); return ret; } @@ -1050,16 +1038,25 @@ static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused, struct perf_pmu *pmu = NULL; off_t offset = lseek(fd, 0, SEEK_CUR); __u32 pmu_num = 0; + int ret; /* write real pmu_num later */ - do_write(fd, &pmu_num, sizeof(pmu_num)); + ret = do_write(fd, &pmu_num, sizeof(pmu_num)); + if (ret < 0) + return ret; while ((pmu = perf_pmu__scan(pmu))) { if (!pmu->name) continue; pmu_num++; - do_write(fd, &pmu->type, sizeof(pmu->type)); - do_write_string(fd, pmu->name); + + ret = do_write(fd, &pmu->type, sizeof(pmu->type)); + if (ret < 0) + return ret; + + ret = do_write_string(fd, pmu->name); + if (ret < 0) + return ret; } if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) { @@ -1072,6 +1069,52 @@ static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused, } /* + * File format: + * + * struct group_descs { + * u32 nr_groups; + * struct group_desc { + * char name[]; + * u32 leader_idx; + * u32 nr_members; + * }[nr_groups]; + * }; + */ +static int write_group_desc(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist) +{ + u32 nr_groups = evlist->nr_groups; + struct perf_evsel *evsel; + int ret; + + ret = do_write(fd, &nr_groups, sizeof(nr_groups)); + if (ret < 0) + return ret; + + evlist__for_each(evlist, evsel) { + if (perf_evsel__is_group_leader(evsel) && + evsel->nr_members > 1) { + const char *name = evsel->group_name ?: "{anon_group}"; + u32 leader_idx = evsel->idx; + u32 nr_members = evsel->nr_members; + + ret = do_write_string(fd, name); + if (ret < 0) + return ret; + + ret = do_write(fd, &leader_idx, sizeof(leader_idx)); + if (ret < 0) + return ret; + + ret = do_write(fd, &nr_members, sizeof(nr_members)); + if (ret < 0) + return ret; + } + } + return 0; +} + +/* * default get_cpuid(): nothing gets recorded * actual implementation must be in arch/$(ARCH)/util/header.c */ @@ -1188,10 +1231,8 @@ static void free_event_desc(struct perf_evsel *events) return; for (evsel = events; evsel->attr.size; evsel++) { - if (evsel->name) - free(evsel->name); - if (evsel->id) - free(evsel->id); + zfree(&evsel->name); + zfree(&evsel->id); } free(events); @@ -1208,14 +1249,14 @@ read_event_desc(struct perf_header *ph, int fd) size_t msz; /* number of events */ - ret = read(fd, &nre, sizeof(nre)); + ret = readn(fd, &nre, sizeof(nre)); if (ret != (ssize_t)sizeof(nre)) goto error; if (ph->needs_swap) nre = bswap_32(nre); - ret = read(fd, &sz, sizeof(sz)); + ret = readn(fd, &sz, sizeof(sz)); if (ret != (ssize_t)sizeof(sz)) goto error; @@ -1243,7 +1284,7 @@ read_event_desc(struct perf_header *ph, int fd) * must read entire on-file attr struct to * sync up with layout. */ - ret = read(fd, buf, sz); + ret = readn(fd, buf, sz); if (ret != (ssize_t)sz) goto error; @@ -1252,7 +1293,7 @@ read_event_desc(struct perf_header *ph, int fd) memcpy(&evsel->attr, buf, msz); - ret = read(fd, &nr, sizeof(nr)); + ret = readn(fd, &nr, sizeof(nr)); if (ret != (ssize_t)sizeof(nr)) goto error; @@ -1273,7 +1314,7 @@ read_event_desc(struct perf_header *ph, int fd) evsel->id = id; for (j = 0 ; j < nr; j++) { - ret = read(fd, id, sizeof(*id)); + ret = readn(fd, id, sizeof(*id)); if (ret != (ssize_t)sizeof(*id)) goto error; if (ph->needs_swap) @@ -1282,8 +1323,7 @@ read_event_desc(struct perf_header *ph, int fd) } } out: - if (buf) - free(buf); + free(buf); return events; error: if (events) @@ -1323,6 +1363,9 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) fprintf(fp, ", precise_ip = %d", evsel->attr.precise_ip); + fprintf(fp, ", attr_mmap2 = %d", evsel->attr.mmap2); + fprintf(fp, ", attr_mmap = %d", evsel->attr.mmap); + fprintf(fp, ", attr_mmap_data = %d", evsel->attr.mmap_data); if (evsel->ids) { fprintf(fp, ", id = {"); for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) { @@ -1378,6 +1421,8 @@ static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused, str = tmp + 1; fprintf(fp, "# node%u cpu list : %s\n", c, str); + + str += strlen(str) + 1; } return; error: @@ -1432,6 +1477,31 @@ error: fprintf(fp, "# pmu mappings: unable to read\n"); } +static void print_group_desc(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) +{ + struct perf_session *session; + struct perf_evsel *evsel; + u32 nr = 0; + + session = container_of(ph, struct perf_session, header); + + evlist__for_each(session->evlist, evsel) { + if (perf_evsel__is_group_leader(evsel) && + evsel->nr_members > 1) { + fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", + perf_evsel__name(evsel)); + + nr = evsel->nr_members - 1; + } else if (nr) { + fprintf(fp, ",%s", perf_evsel__name(evsel)); + + if (--nr == 0) + fprintf(fp, "}\n"); + } + } +} + static int __event_process_build_id(struct build_id_event *bev, char *filename, struct perf_session *session) @@ -1503,14 +1573,14 @@ static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, while (offset < limit) { ssize_t len; - if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) + if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) return -1; if (header->needs_swap) perf_event_header__bswap(&old_bev.header); len = old_bev.header.size - sizeof(old_bev); - if (read(input, filename, len) != len) + if (readn(input, filename, len) != len) return -1; bev.header = old_bev.header; @@ -1545,14 +1615,14 @@ static int perf_header__read_build_ids(struct perf_header *header, while (offset < limit) { ssize_t len; - if (read(input, &bev, sizeof(bev)) != sizeof(bev)) + if (readn(input, &bev, sizeof(bev)) != sizeof(bev)) goto out; if (header->needs_swap) perf_event_header__bswap(&bev.header); len = bev.header.size - sizeof(bev); - if (read(input, filename, len) != len) + if (readn(input, filename, len) != len) goto out; /* * The a1645ce1 changeset: @@ -1586,8 +1656,8 @@ static int process_tracing_data(struct perf_file_section *section __maybe_unused struct perf_header *ph __maybe_unused, int fd, void *data) { - trace_report(fd, data, false); - return 0; + ssize_t ret = trace_report(fd, data, false); + return ret < 0 ? -1 : 0; } static int process_build_id(struct perf_file_section *section, @@ -1635,10 +1705,10 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused, struct perf_header *ph, int fd, void *data __maybe_unused) { - size_t ret; + ssize_t ret; u32 nr; - ret = read(fd, &nr, sizeof(nr)); + ret = readn(fd, &nr, sizeof(nr)); if (ret != sizeof(nr)) return -1; @@ -1647,7 +1717,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused, ph->env.nr_cpus_online = nr; - ret = read(fd, &nr, sizeof(nr)); + ret = readn(fd, &nr, sizeof(nr)); if (ret != sizeof(nr)) return -1; @@ -1679,9 +1749,9 @@ static int process_total_mem(struct perf_file_section *section __maybe_unused, void *data __maybe_unused) { uint64_t mem; - size_t ret; + ssize_t ret; - ret = read(fd, &mem, sizeof(mem)); + ret = readn(fd, &mem, sizeof(mem)); if (ret != sizeof(mem)) return -1; @@ -1697,7 +1767,7 @@ perf_evlist__find_by_index(struct perf_evlist *evlist, int idx) { struct perf_evsel *evsel; - list_for_each_entry(evsel, &evlist->entries, node) { + evlist__for_each(evlist, evsel) { if (evsel->idx == idx) return evsel; } @@ -1748,12 +1818,12 @@ static int process_cmdline(struct perf_file_section *section __maybe_unused, struct perf_header *ph, int fd, void *data __maybe_unused) { - size_t ret; + ssize_t ret; char *str; u32 nr, i; struct strbuf sb; - ret = read(fd, &nr, sizeof(nr)); + ret = readn(fd, &nr, sizeof(nr)); if (ret != sizeof(nr)) return -1; @@ -1784,12 +1854,12 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused struct perf_header *ph, int fd, void *data __maybe_unused) { - size_t ret; + ssize_t ret; u32 nr, i; char *str; struct strbuf sb; - ret = read(fd, &nr, sizeof(nr)); + ret = readn(fd, &nr, sizeof(nr)); if (ret != sizeof(nr)) return -1; @@ -1810,7 +1880,7 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused } ph->env.sibling_cores = strbuf_detach(&sb, NULL); - ret = read(fd, &nr, sizeof(nr)); + ret = readn(fd, &nr, sizeof(nr)); if (ret != sizeof(nr)) return -1; @@ -1840,14 +1910,14 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse struct perf_header *ph, int fd, void *data __maybe_unused) { - size_t ret; + ssize_t ret; u32 nr, node, i; char *str; uint64_t mem_total, mem_free; struct strbuf sb; /* nr nodes */ - ret = read(fd, &nr, sizeof(nr)); + ret = readn(fd, &nr, sizeof(nr)); if (ret != sizeof(nr)) goto error; @@ -1859,15 +1929,15 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse for (i = 0; i < nr; i++) { /* node number */ - ret = read(fd, &node, sizeof(node)); + ret = readn(fd, &node, sizeof(node)); if (ret != sizeof(node)) goto error; - ret = read(fd, &mem_total, sizeof(u64)); + ret = readn(fd, &mem_total, sizeof(u64)); if (ret != sizeof(u64)) goto error; - ret = read(fd, &mem_free, sizeof(u64)); + ret = readn(fd, &mem_free, sizeof(u64)); if (ret != sizeof(u64)) goto error; @@ -1900,13 +1970,13 @@ static int process_pmu_mappings(struct perf_file_section *section __maybe_unused struct perf_header *ph, int fd, void *data __maybe_unused) { - size_t ret; + ssize_t ret; char *name; u32 pmu_num; u32 type; struct strbuf sb; - ret = read(fd, &pmu_num, sizeof(pmu_num)); + ret = readn(fd, &pmu_num, sizeof(pmu_num)); if (ret != sizeof(pmu_num)) return -1; @@ -1922,7 +1992,7 @@ static int process_pmu_mappings(struct perf_file_section *section __maybe_unused strbuf_init(&sb, 128); while (pmu_num) { - if (read(fd, &type, sizeof(type)) != sizeof(type)) + if (readn(fd, &type, sizeof(type)) != sizeof(type)) goto error; if (ph->needs_swap) type = bswap_32(type); @@ -1946,6 +2016,100 @@ error: return -1; } +static int process_group_desc(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + size_t ret = -1; + u32 i, nr, nr_groups; + struct perf_session *session; + struct perf_evsel *evsel, *leader = NULL; + struct group_desc { + char *name; + u32 leader_idx; + u32 nr_members; + } *desc; + + if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups)) + return -1; + + if (ph->needs_swap) + nr_groups = bswap_32(nr_groups); + + ph->env.nr_groups = nr_groups; + if (!nr_groups) { + pr_debug("group desc not available\n"); + return 0; + } + + desc = calloc(nr_groups, sizeof(*desc)); + if (!desc) + return -1; + + for (i = 0; i < nr_groups; i++) { + desc[i].name = do_read_string(fd, ph); + if (!desc[i].name) + goto out_free; + + if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32)) + goto out_free; + + if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32)) + goto out_free; + + if (ph->needs_swap) { + desc[i].leader_idx = bswap_32(desc[i].leader_idx); + desc[i].nr_members = bswap_32(desc[i].nr_members); + } + } + + /* + * Rebuild group relationship based on the group_desc + */ + session = container_of(ph, struct perf_session, header); + session->evlist->nr_groups = nr_groups; + + i = nr = 0; + evlist__for_each(session->evlist, evsel) { + if (evsel->idx == (int) desc[i].leader_idx) { + evsel->leader = evsel; + /* {anon_group} is a dummy name */ + if (strcmp(desc[i].name, "{anon_group}")) { + evsel->group_name = desc[i].name; + desc[i].name = NULL; + } + evsel->nr_members = desc[i].nr_members; + + if (i >= nr_groups || nr > 0) { + pr_debug("invalid group desc\n"); + goto out_free; + } + + leader = evsel; + nr = evsel->nr_members - 1; + i++; + } else if (nr) { + /* This is a group member */ + evsel->leader = leader; + + nr--; + } + } + + if (i != nr_groups || nr != 0) { + pr_debug("invalid group desc\n"); + goto out_free; + } + + ret = 0; +out_free: + for (i = 0; i < nr_groups; i++) + zfree(&desc[i].name); + free(desc); + + return ret; +} + struct feature_ops { int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist); void (*print)(struct perf_header *h, int fd, FILE *fp); @@ -1985,6 +2149,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology), FEAT_OPA(HEADER_BRANCH_STACK, branch_stack), FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings), + FEAT_OPP(HEADER_GROUP_DESC, group_desc), }; struct header_print_data { @@ -2023,7 +2188,7 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full) { struct header_print_data hd; struct perf_header *header = &session->header; - int fd = session->fd; + int fd = perf_data_file__fd(session->file); hd.fp = fp; hd.full = full; @@ -2074,13 +2239,13 @@ static int perf_header__adds_write(struct perf_header *header, if (!nr_sections) return 0; - feat_sec = p = calloc(sizeof(*feat_sec), nr_sections); + feat_sec = p = calloc(nr_sections, sizeof(*feat_sec)); if (feat_sec == NULL) return -ENOMEM; sec_size = sizeof(*feat_sec) * nr_sections; - sec_start = header->data_offset + header->data_size; + sec_start = header->feat_offset; lseek(fd, sec_start + sec_size, SEEK_SET); for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { @@ -2126,34 +2291,24 @@ int perf_session__write_header(struct perf_session *session, struct perf_file_header f_header; struct perf_file_attr f_attr; struct perf_header *header = &session->header; - struct perf_evsel *evsel, *pair = NULL; + struct perf_evsel *evsel; + u64 attr_offset; int err; lseek(fd, sizeof(f_header), SEEK_SET); - if (session->evlist != evlist) - pair = perf_evlist__first(session->evlist); - - list_for_each_entry(evsel, &evlist->entries, node) { + evlist__for_each(session->evlist, evsel) { evsel->id_offset = lseek(fd, 0, SEEK_CUR); err = do_write(fd, evsel->id, evsel->ids * sizeof(u64)); if (err < 0) { -out_err_write: pr_debug("failed to write perf header\n"); return err; } - if (session->evlist != evlist) { - err = do_write(fd, pair->id, pair->ids * sizeof(u64)); - if (err < 0) - goto out_err_write; - evsel->ids += pair->ids; - pair = perf_evsel__next(pair); - } } - header->attr_offset = lseek(fd, 0, SEEK_CUR); + attr_offset = lseek(fd, 0, SEEK_CUR); - list_for_each_entry(evsel, &evlist->entries, node) { + evlist__for_each(evlist, evsel) { f_attr = (struct perf_file_attr){ .attr = evsel->attr, .ids = { @@ -2168,17 +2323,9 @@ out_err_write: } } - header->event_offset = lseek(fd, 0, SEEK_CUR); - header->event_size = trace_event_count * sizeof(struct perf_trace_event_type); - if (trace_events) { - err = do_write(fd, trace_events, header->event_size); - if (err < 0) { - pr_debug("failed to write perf header events\n"); - return err; - } - } - - header->data_offset = lseek(fd, 0, SEEK_CUR); + if (!header->data_offset) + header->data_offset = lseek(fd, 0, SEEK_CUR); + header->feat_offset = header->data_offset + header->data_size; if (at_exit) { err = perf_header__adds_write(header, evlist, fd); @@ -2191,17 +2338,14 @@ out_err_write: .size = sizeof(f_header), .attr_size = sizeof(f_attr), .attrs = { - .offset = header->attr_offset, + .offset = attr_offset, .size = evlist->nr_entries * sizeof(f_attr), }, .data = { .offset = header->data_offset, .size = header->data_size, }, - .event_types = { - .offset = header->event_offset, - .size = header->event_size, - }, + /* event_types is ignored, store zeros */ }; memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features)); @@ -2214,7 +2358,6 @@ out_err_write: } lseek(fd, header->data_offset + header->data_size, SEEK_SET); - header->frozen = 1; return 0; } @@ -2246,13 +2389,13 @@ int perf_header__process_sections(struct perf_header *header, int fd, if (!nr_sections) return 0; - feat_sec = sec = calloc(sizeof(*feat_sec), nr_sections); + feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec)); if (!feat_sec) return -1; sec_size = sizeof(*feat_sec) * nr_sections; - lseek(fd, header->data_offset + header->data_size, SEEK_SET); + lseek(fd, header->feat_offset, SEEK_SET); err = perf_header__getbuffer64(header, fd, feat_sec, sec_size); if (err < 0) @@ -2340,6 +2483,16 @@ static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph) return -1; } +bool is_perf_magic(u64 magic) +{ + if (!memcmp(&magic, __perf_magic1, sizeof(magic)) + || magic == __perf_magic2 + || magic == __perf_magic2_sw) + return true; + + return false; +} + static int check_magic_endian(u64 magic, uint64_t hdr_sz, bool is_pipe, struct perf_header *ph) { @@ -2348,6 +2501,7 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz, /* check for legacy format */ ret = memcmp(&magic, __perf_magic1, sizeof(magic)); if (ret == 0) { + ph->version = PERF_HEADER_VERSION_1; pr_debug("legacy perf.data format\n"); if (is_pipe) return try_all_pipe_abis(hdr_sz, ph); @@ -2369,6 +2523,7 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz, return -1; ph->needs_swap = true; + ph->version = PERF_HEADER_VERSION_2; return 0; } @@ -2376,7 +2531,7 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz, int perf_file_header__read(struct perf_file_header *header, struct perf_header *ph, int fd) { - int ret; + ssize_t ret; lseek(fd, 0, SEEK_SET); @@ -2439,10 +2594,9 @@ int perf_file_header__read(struct perf_file_header *header, memcpy(&ph->adds_features, &header->adds_features, sizeof(ph->adds_features)); - ph->event_offset = header->event_types.offset; - ph->event_size = header->event_types.size; ph->data_offset = header->data.offset; ph->data_size = header->data.size; + ph->feat_offset = header->data.offset + header->data.size; return 0; } @@ -2471,7 +2625,7 @@ static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, struct perf_header *ph, int fd, bool repipe) { - int ret; + ssize_t ret; ret = readn(fd, header, sizeof(*header)); if (ret <= 0) @@ -2491,19 +2645,18 @@ static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, return 0; } -static int perf_header__read_pipe(struct perf_session *session, int fd) +static int perf_header__read_pipe(struct perf_session *session) { struct perf_header *header = &session->header; struct perf_pipe_file_header f_header; - if (perf_file_header__read_pipe(&f_header, header, fd, + if (perf_file_header__read_pipe(&f_header, header, + perf_data_file__fd(session->file), session->repipe) < 0) { pr_debug("incompatible file format\n"); return -EINVAL; } - session->fd = fd; - return 0; } @@ -2513,7 +2666,7 @@ static int read_attr(int fd, struct perf_header *ph, struct perf_event_attr *attr = &f_attr->attr; size_t sz, left; size_t our_sz = sizeof(f_attr->attr); - int ret; + ssize_t ret; memset(f_attr, 0, sizeof(*f_attr)); @@ -2563,6 +2716,11 @@ static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel, if (evsel->tp_format) return 0; + if (pevent == NULL) { + pr_debug("broken or missing trace data\n"); + return -1; + } + event = pevent_find_event(pevent, evsel->attr.config); if (event == NULL) return -1; @@ -2583,7 +2741,7 @@ static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist, { struct perf_evsel *pos; - list_for_each_entry(pos, &evlist->entries, node) { + evlist__for_each(evlist, pos) { if (pos->attr.type == PERF_TYPE_TRACEPOINT && perf_evsel__prepare_tracepoint_event(pos, pevent)) return -1; @@ -2592,24 +2750,38 @@ static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist, return 0; } -int perf_session__read_header(struct perf_session *session, int fd) +int perf_session__read_header(struct perf_session *session) { + struct perf_data_file *file = session->file; struct perf_header *header = &session->header; struct perf_file_header f_header; struct perf_file_attr f_attr; u64 f_id; int nr_attrs, nr_ids, i, j; + int fd = perf_data_file__fd(file); - session->evlist = perf_evlist__new(NULL, NULL); + session->evlist = perf_evlist__new(); if (session->evlist == NULL) return -ENOMEM; - if (session->fd_pipe) - return perf_header__read_pipe(session, fd); + if (perf_data_file__is_pipe(file)) + return perf_header__read_pipe(session); if (perf_file_header__read(&f_header, header, fd) < 0) return -EINVAL; + /* + * Sanity check that perf.data was written cleanly; data size is + * initialized to 0 and updated only if the on_exit function is run. + * If data size is still 0 then the file contains only partial + * information. Just warn user and process it as much as it can. + */ + if (f_header.data.size == 0) { + pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n" + "Was the 'perf record' command properly terminated?\n", + file->path); + } + nr_attrs = f_header.attrs.size / f_header.attr_size; lseek(fd, f_header.attrs.offset, SEEK_SET); @@ -2624,7 +2796,7 @@ int perf_session__read_header(struct perf_session *session, int fd) perf_event__attr_swap(&f_attr.attr); tmp = lseek(fd, 0, SEEK_CUR); - evsel = perf_evsel__new(&f_attr.attr, i); + evsel = perf_evsel__new(&f_attr.attr); if (evsel == NULL) goto out_delete_evlist; @@ -2659,27 +2831,13 @@ int perf_session__read_header(struct perf_session *session, int fd) symbol_conf.nr_events = nr_attrs; - if (f_header.event_types.size) { - lseek(fd, f_header.event_types.offset, SEEK_SET); - trace_events = malloc(f_header.event_types.size); - if (trace_events == NULL) - return -ENOMEM; - if (perf_header__getbuffer64(header, fd, trace_events, - f_header.event_types.size)) - goto out_errno; - trace_event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); - } - - perf_header__process_sections(header, fd, &session->pevent, + perf_header__process_sections(header, fd, &session->tevent, perf_file_section__process); - lseek(fd, header->data_offset, SEEK_SET); - if (perf_evlist__prepare_tracepoint_events(session->evlist, - session->pevent)) + session->tevent.pevent)) goto out_delete_evlist; - header->frozen = 1; return 0; out_errno: return -errno; @@ -2731,7 +2889,7 @@ int perf_event__synthesize_attrs(struct perf_tool *tool, struct perf_evsel *evsel; int err = 0; - list_for_each_entry(evsel, &session->evlist->entries, node) { + evlist__for_each(session->evlist, evsel) { err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids, evsel->id, process); if (err) { @@ -2743,7 +2901,8 @@ int perf_event__synthesize_attrs(struct perf_tool *tool, return err; } -int perf_event__process_attr(union perf_event *event, +int perf_event__process_attr(struct perf_tool *tool __maybe_unused, + union perf_event *event, struct perf_evlist **pevlist) { u32 i, ids, n_ids; @@ -2751,12 +2910,12 @@ int perf_event__process_attr(union perf_event *event, struct perf_evlist *evlist = *pevlist; if (evlist == NULL) { - *pevlist = evlist = perf_evlist__new(NULL, NULL); + *pevlist = evlist = perf_evlist__new(); if (evlist == NULL) return -ENOMEM; } - evsel = perf_evsel__new(&event->attr.attr, evlist->nr_entries); + evsel = perf_evsel__new(&event->attr.attr); if (evsel == NULL) return -ENOMEM; @@ -2777,63 +2936,7 @@ int perf_event__process_attr(union perf_event *event, perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]); } - return 0; -} - -int perf_event__synthesize_event_type(struct perf_tool *tool, - u64 event_id, char *name, - perf_event__handler_t process, - struct machine *machine) -{ - union perf_event ev; - size_t size = 0; - int err = 0; - - memset(&ev, 0, sizeof(ev)); - - ev.event_type.event_type.event_id = event_id; - memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME); - strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1); - - ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE; - size = strlen(ev.event_type.event_type.name); - size = PERF_ALIGN(size, sizeof(u64)); - ev.event_type.header.size = sizeof(ev.event_type) - - (sizeof(ev.event_type.event_type.name) - size); - - err = process(tool, &ev, NULL, machine); - - return err; -} - -int perf_event__synthesize_event_types(struct perf_tool *tool, - perf_event__handler_t process, - struct machine *machine) -{ - struct perf_trace_event_type *type; - int i, err = 0; - - for (i = 0; i < trace_event_count; i++) { - type = &trace_events[i]; - - err = perf_event__synthesize_event_type(tool, type->event_id, - type->name, process, - machine); - if (err) { - pr_debug("failed to create perf header event type\n"); - return err; - } - } - - return err; -} - -int perf_event__process_event_type(struct perf_tool *tool __maybe_unused, - union perf_event *event) -{ - if (perf_header__push_event(event->event_type.event_type.event_id, - event->event_type.event_type.name) < 0) - return -ENOMEM; + symbol_conf.nr_events = evlist->nr_entries; return 0; } @@ -2884,34 +2987,42 @@ int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, return aligned_size; } -int perf_event__process_tracing_data(union perf_event *event, +int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused, + union perf_event *event, struct perf_session *session) { ssize_t size_read, padding, size = event->tracing_data.size; - off_t offset = lseek(session->fd, 0, SEEK_CUR); + int fd = perf_data_file__fd(session->file); + off_t offset = lseek(fd, 0, SEEK_CUR); char buf[BUFSIZ]; /* setup for reading amidst mmap */ - lseek(session->fd, offset + sizeof(struct tracing_data_event), + lseek(fd, offset + sizeof(struct tracing_data_event), SEEK_SET); - size_read = trace_report(session->fd, &session->pevent, + size_read = trace_report(fd, &session->tevent, session->repipe); padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read; - if (read(session->fd, buf, padding) < 0) - die("reading input file"); + if (readn(fd, buf, padding) < 0) { + pr_err("%s: reading input file", __func__); + return -1; + } if (session->repipe) { int retw = write(STDOUT_FILENO, buf, padding); - if (retw <= 0 || retw != padding) - die("repiping tracing data padding"); + if (retw <= 0 || retw != padding) { + pr_err("%s: repiping tracing data padding", __func__); + return -1; + } } - if (size_read + padding != size) - die("tracing data size mismatch"); + if (size_read + padding != size) { + pr_err("%s: tracing data size mismatch", __func__); + return -1; + } perf_evlist__prepare_tracepoint_events(session->evlist, - session->pevent); + session->tevent.pevent); return size_read + padding; } diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 879d215cdac..d08cfe49940 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -1,13 +1,13 @@ #ifndef __PERF_HEADER_H #define __PERF_HEADER_H -#include "../../../include/uapi/linux/perf_event.h" +#include <linux/perf_event.h> #include <sys/types.h> #include <stdbool.h> -#include "types.h" +#include <linux/bitmap.h> +#include <linux/types.h> #include "event.h" -#include <linux/bitmap.h> enum { HEADER_RESERVED = 0, /* always cleared */ @@ -29,10 +29,16 @@ enum { HEADER_NUMA_TOPOLOGY, HEADER_BRANCH_STACK, HEADER_PMU_MAPPINGS, + HEADER_GROUP_DESC, HEADER_LAST_FEATURE, HEADER_FEAT_BITS = 256, }; +enum perf_header_version { + PERF_HEADER_VERSION_1, + PERF_HEADER_VERSION_2, +}; + struct perf_file_section { u64 offset; u64 size; @@ -44,6 +50,7 @@ struct perf_file_header { u64 attr_size; struct perf_file_section attrs; struct perf_file_section data; + /* event_types is ignored */ struct perf_file_section event_types; DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); }; @@ -70,41 +77,37 @@ struct perf_session_env { unsigned long long total_mem; int nr_cmdline; - char *cmdline; int nr_sibling_cores; - char *sibling_cores; int nr_sibling_threads; - char *sibling_threads; int nr_numa_nodes; - char *numa_nodes; int nr_pmu_mappings; + int nr_groups; + char *cmdline; + char *sibling_cores; + char *sibling_threads; + char *numa_nodes; char *pmu_mappings; }; struct perf_header { - int frozen; - bool needs_swap; - s64 attr_offset; - u64 data_offset; - u64 data_size; - u64 event_offset; - u64 event_size; + enum perf_header_version version; + bool needs_swap; + u64 data_offset; + u64 data_size; + u64 feat_offset; DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); - struct perf_session_env env; + struct perf_session_env env; }; struct perf_evlist; struct perf_session; -int perf_session__read_header(struct perf_session *session, int fd); +int perf_session__read_header(struct perf_session *session); int perf_session__write_header(struct perf_session *session, struct perf_evlist *evlist, int fd, bool at_exit); int perf_header__write_pipe(int fd); -int perf_header__push_event(u64 id, const char *name); -char *perf_header__find_event(u64 id); - void perf_header__set_feat(struct perf_header *header, int feat); void perf_header__clear_feat(struct perf_header *header, int feat); bool perf_header__has_feat(const struct perf_header *header, int feat); @@ -129,22 +132,14 @@ int perf_event__synthesize_attr(struct perf_tool *tool, int perf_event__synthesize_attrs(struct perf_tool *tool, struct perf_session *session, perf_event__handler_t process); -int perf_event__process_attr(union perf_event *event, struct perf_evlist **pevlist); - -int perf_event__synthesize_event_type(struct perf_tool *tool, - u64 event_id, char *name, - perf_event__handler_t process, - struct machine *machine); -int perf_event__synthesize_event_types(struct perf_tool *tool, - perf_event__handler_t process, - struct machine *machine); -int perf_event__process_event_type(struct perf_tool *tool, - union perf_event *event); +int perf_event__process_attr(struct perf_tool *tool, union perf_event *event, + struct perf_evlist **pevlist); int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct perf_evlist *evlist, perf_event__handler_t process); -int perf_event__process_tracing_data(union perf_event *event, +int perf_event__process_tracing_data(struct perf_tool *tool, + union perf_event *event, struct perf_session *session); int perf_event__synthesize_build_id(struct perf_tool *tool, @@ -154,6 +149,7 @@ int perf_event__synthesize_build_id(struct perf_tool *tool, int perf_event__process_build_id(struct perf_tool *tool, union perf_event *event, struct perf_session *session); +bool is_perf_magic(u64 magic); /* * arch specific callback diff --git a/tools/perf/util/help.c b/tools/perf/util/help.c index 8b1f6e891b8..86c37c47226 100644 --- a/tools/perf/util/help.c +++ b/tools/perf/util/help.c @@ -22,8 +22,8 @@ static void clean_cmdnames(struct cmdnames *cmds) unsigned int i; for (i = 0; i < cmds->cnt; ++i) - free(cmds->names[i]); - free(cmds->names); + zfree(&cmds->names[i]); + zfree(&cmds->names); cmds->cnt = 0; cmds->alloc = 0; } @@ -263,9 +263,8 @@ static void add_cmd_list(struct cmdnames *cmds, struct cmdnames *old) for (i = 0; i < old->cnt; i++) cmds->names[cmds->cnt++] = old->names[i]; - free(old->names); + zfree(&old->names); old->cnt = 0; - old->names = NULL; } const char *help_unknown_cmd(const char *cmd) diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 277947a669b..30df6187ee0 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -1,9 +1,10 @@ -#include "annotate.h" #include "util.h" #include "build-id.h" #include "hist.h" #include "session.h" #include "sort.h" +#include "evsel.h" +#include "annotate.h" #include <math.h> static bool hists__filter_entry_by_dso(struct hists *hists, @@ -13,17 +14,11 @@ static bool hists__filter_entry_by_thread(struct hists *hists, static bool hists__filter_entry_by_symbol(struct hists *hists, struct hist_entry *he); -enum hist_filter { - HIST_FILTER__DSO, - HIST_FILTER__THREAD, - HIST_FILTER__PARENT, - HIST_FILTER__SYMBOL, -}; - struct callchain_param callchain_param = { .mode = CHAIN_GRAPH_REL, .min_percent = 0.5, - .order = ORDER_CALLEE + .order = ORDER_CALLEE, + .key = CCKEY_FUNCTION }; u16 hists__col_len(struct hists *hists, enum hist_column col) @@ -66,12 +61,24 @@ static void hists__set_unres_dso_col_len(struct hists *hists, int dso) void hists__calc_col_len(struct hists *hists, struct hist_entry *h) { const unsigned int unresolved_col_width = BITS_PER_LONG / 4; + int symlen; u16 len; - if (h->ms.sym) - hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4); - else + /* + * +4 accounts for '[x] ' priv level info + * +2 accounts for 0x prefix on raw addresses + * +3 accounts for ' y ' symtab origin info + */ + if (h->ms.sym) { + symlen = h->ms.sym->namelen + 4; + if (verbose) + symlen += BITS_PER_LONG / 4 + 2 + 3; + hists__new_col_len(hists, HISTC_SYMBOL, symlen); + } else { + symlen = unresolved_col_width + 4 + 2; + hists__new_col_len(hists, HISTC_SYMBOL, symlen); hists__set_unres_dso_col_len(hists, HISTC_DSO); + } len = thread__comm_len(h->thread); if (hists__new_col_len(hists, HISTC_COMM, len)) @@ -82,14 +89,14 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) hists__new_col_len(hists, HISTC_DSO, len); } + if (h->parent) + hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); + if (h->branch_info) { - int symlen; - /* - * +4 accounts for '[x] ' priv level info - * +2 account of 0x prefix on raw addresses - */ if (h->branch_info->from.sym) { symlen = (int)h->branch_info->from.sym->namelen + 4; + if (verbose) + symlen += BITS_PER_LONG / 4 + 2 + 3; hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); symlen = dso__name_len(h->branch_info->from.map->dso); @@ -102,6 +109,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) if (h->branch_info->to.sym) { symlen = (int)h->branch_info->to.sym->namelen + 4; + if (verbose) + symlen += BITS_PER_LONG / 4 + 2 + 3; hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); symlen = dso__name_len(h->branch_info->to.map->dso); @@ -112,6 +121,44 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h) hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); } } + + if (h->mem_info) { + if (h->mem_info->daddr.sym) { + symlen = (int)h->mem_info->daddr.sym->namelen + 4 + + unresolved_col_width + 2; + hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, + symlen); + hists__new_col_len(hists, HISTC_MEM_DCACHELINE, + symlen + 1); + } else { + symlen = unresolved_col_width + 4 + 2; + hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, + symlen); + } + if (h->mem_info->daddr.map) { + symlen = dso__name_len(h->mem_info->daddr.map->dso); + hists__new_col_len(hists, HISTC_MEM_DADDR_DSO, + symlen); + } else { + symlen = unresolved_col_width + 4 + 2; + hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); + } + } else { + symlen = unresolved_col_width + 4 + 2; + hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); + hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); + } + + hists__new_col_len(hists, HISTC_MEM_LOCKED, 6); + hists__new_col_len(hists, HISTC_MEM_TLB, 22); + hists__new_col_len(hists, HISTC_MEM_SNOOP, 12); + hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3); + hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12); + hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12); + + if (h->transaction) + hists__new_col_len(hists, HISTC_TRANSACTION, + hist_entry__transaction_len()); } void hists__output_recalc_col_len(struct hists *hists, int max_rows) @@ -130,30 +177,33 @@ void hists__output_recalc_col_len(struct hists *hists, int max_rows) } } -static void hist_entry__add_cpumode_period(struct hist_entry *he, - unsigned int cpumode, u64 period) +static void he_stat__add_cpumode_period(struct he_stat *he_stat, + unsigned int cpumode, u64 period) { switch (cpumode) { case PERF_RECORD_MISC_KERNEL: - he->stat.period_sys += period; + he_stat->period_sys += period; break; case PERF_RECORD_MISC_USER: - he->stat.period_us += period; + he_stat->period_us += period; break; case PERF_RECORD_MISC_GUEST_KERNEL: - he->stat.period_guest_sys += period; + he_stat->period_guest_sys += period; break; case PERF_RECORD_MISC_GUEST_USER: - he->stat.period_guest_us += period; + he_stat->period_guest_us += period; break; default: break; } } -static void he_stat__add_period(struct he_stat *he_stat, u64 period) +static void he_stat__add_period(struct he_stat *he_stat, u64 period, + u64 weight) { + he_stat->period += period; + he_stat->weight += weight; he_stat->nr_events += 1; } @@ -165,31 +215,38 @@ static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) dest->period_guest_sys += src->period_guest_sys; dest->period_guest_us += src->period_guest_us; dest->nr_events += src->nr_events; + dest->weight += src->weight; } -static void hist_entry__decay(struct hist_entry *he) +static void he_stat__decay(struct he_stat *he_stat) { - he->stat.period = (he->stat.period * 7) / 8; - he->stat.nr_events = (he->stat.nr_events * 7) / 8; + he_stat->period = (he_stat->period * 7) / 8; + he_stat->nr_events = (he_stat->nr_events * 7) / 8; + /* XXX need decay for weight too? */ } static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) { u64 prev_period = he->stat.period; + u64 diff; if (prev_period == 0) return true; - hist_entry__decay(he); + he_stat__decay(&he->stat); + if (symbol_conf.cumulate_callchain) + he_stat__decay(he->stat_acc); + diff = prev_period - he->stat.period; + + hists->stats.total_period -= diff; if (!he->filtered) - hists->stats.total_period -= prev_period - he->stat.period; + hists->stats.total_non_filtered_period -= diff; return he->stat.period == 0; } -static void __hists__decay_entries(struct hists *hists, bool zap_user, - bool zap_kernel, bool threaded) +void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) { struct rb_node *next = rb_first(&hists->entries); struct hist_entry *n; @@ -208,56 +265,88 @@ static void __hists__decay_entries(struct hists *hists, bool zap_user, !n->used) { rb_erase(&n->rb_node, &hists->entries); - if (sort__need_collapse || threaded) + if (sort__need_collapse) rb_erase(&n->rb_node_in, &hists->entries_collapsed); - hist_entry__free(n); --hists->nr_entries; + if (!n->filtered) + --hists->nr_non_filtered_entries; + + hist_entry__free(n); } } } -void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) -{ - return __hists__decay_entries(hists, zap_user, zap_kernel, false); -} - -void hists__decay_entries_threaded(struct hists *hists, - bool zap_user, bool zap_kernel) -{ - return __hists__decay_entries(hists, zap_user, zap_kernel, true); -} - /* * histogram, sorted on item, collects periods */ -static struct hist_entry *hist_entry__new(struct hist_entry *template) +static struct hist_entry *hist_entry__new(struct hist_entry *template, + bool sample_self) { - size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0; - struct hist_entry *he = malloc(sizeof(*he) + callchain_size); + size_t callchain_size = 0; + struct hist_entry *he; + + if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) + callchain_size = sizeof(struct callchain_root); + + he = zalloc(sizeof(*he) + callchain_size); if (he != NULL) { *he = *template; + if (symbol_conf.cumulate_callchain) { + he->stat_acc = malloc(sizeof(he->stat)); + if (he->stat_acc == NULL) { + free(he); + return NULL; + } + memcpy(he->stat_acc, &he->stat, sizeof(he->stat)); + if (!sample_self) + memset(&he->stat, 0, sizeof(he->stat)); + } + if (he->ms.map) he->ms.map->referenced = true; + + if (he->branch_info) { + /* + * This branch info is (a part of) allocated from + * sample__resolve_bstack() and will be freed after + * adding new entries. So we need to save a copy. + */ + he->branch_info = malloc(sizeof(*he->branch_info)); + if (he->branch_info == NULL) { + free(he->stat_acc); + free(he); + return NULL; + } + + memcpy(he->branch_info, template->branch_info, + sizeof(*he->branch_info)); + + if (he->branch_info->from.map) + he->branch_info->from.map->referenced = true; + if (he->branch_info->to.map) + he->branch_info->to.map->referenced = true; + } + + if (he->mem_info) { + if (he->mem_info->iaddr.map) + he->mem_info->iaddr.map->referenced = true; + if (he->mem_info->daddr.map) + he->mem_info->daddr.map->referenced = true; + } + if (symbol_conf.use_callchain) callchain_init(he->callchain); + + INIT_LIST_HEAD(&he->pairs.node); } return he; } -static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h) -{ - if (!h->filtered) { - hists__calc_col_len(hists, h); - ++hists->nr_entries; - hists->stats.total_period += h->stat.period; - } -} - static u8 symbol__parent_filter(const struct symbol *parent) { if (symbol_conf.exclude_other && parent == NULL) @@ -266,16 +355,16 @@ static u8 symbol__parent_filter(const struct symbol *parent) } static struct hist_entry *add_hist_entry(struct hists *hists, - struct hist_entry *entry, - struct addr_location *al, - u64 period) + struct hist_entry *entry, + struct addr_location *al, + bool sample_self) { struct rb_node **p; struct rb_node *parent = NULL; struct hist_entry *he; - int cmp; - - pthread_mutex_lock(&hists->lock); + int64_t cmp; + u64 period = entry->stat.period; + u64 weight = entry->stat.weight; p = &hists->entries_in->rb_node; @@ -283,10 +372,25 @@ static struct hist_entry *add_hist_entry(struct hists *hists, parent = *p; he = rb_entry(parent, struct hist_entry, rb_node_in); - cmp = hist_entry__cmp(entry, he); + /* + * Make sure that it receives arguments in a same order as + * hist_entry__collapse() so that we can use an appropriate + * function when searching an entry regardless which sort + * keys were used. + */ + cmp = hist_entry__cmp(he, entry); if (!cmp) { - he_stat__add_period(&he->stat, period); + if (sample_self) + he_stat__add_period(&he->stat, period, weight); + if (symbol_conf.cumulate_callchain) + he_stat__add_period(he->stat_acc, period, weight); + + /* + * This mem info was allocated from sample__resolve_mem + * and will not be used anymore. + */ + zfree(&entry->mem_info); /* If the map of an existing hist_entry has * become out-of-date due to an exec() or @@ -308,80 +412,488 @@ static struct hist_entry *add_hist_entry(struct hists *hists, p = &(*p)->rb_right; } - he = hist_entry__new(entry); + he = hist_entry__new(entry, sample_self); if (!he) - goto out_unlock; + return NULL; rb_link_node(&he->rb_node_in, parent, p); rb_insert_color(&he->rb_node_in, hists->entries_in); out: - hist_entry__add_cpumode_period(he, al->cpumode, period); -out_unlock: - pthread_mutex_unlock(&hists->lock); + if (sample_self) + he_stat__add_cpumode_period(&he->stat, al->cpumode, period); + if (symbol_conf.cumulate_callchain) + he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period); return he; } -struct hist_entry *__hists__add_branch_entry(struct hists *self, - struct addr_location *al, - struct symbol *sym_parent, - struct branch_info *bi, - u64 period) +struct hist_entry *__hists__add_entry(struct hists *hists, + struct addr_location *al, + struct symbol *sym_parent, + struct branch_info *bi, + struct mem_info *mi, + u64 period, u64 weight, u64 transaction, + bool sample_self) { struct hist_entry entry = { .thread = al->thread, + .comm = thread__comm(al->thread), .ms = { - .map = bi->to.map, - .sym = bi->to.sym, + .map = al->map, + .sym = al->sym, }, - .cpu = al->cpu, - .ip = bi->to.addr, - .level = al->level, + .cpu = al->cpu, + .cpumode = al->cpumode, + .ip = al->addr, + .level = al->level, .stat = { - .period = period, .nr_events = 1, + .period = period, + .weight = weight, }, .parent = sym_parent, - .filtered = symbol__parent_filter(sym_parent), + .filtered = symbol__parent_filter(sym_parent) | al->filtered, + .hists = hists, .branch_info = bi, - .hists = self, + .mem_info = mi, + .transaction = transaction, }; - return add_hist_entry(self, &entry, al, period); + return add_hist_entry(hists, &entry, al, sample_self); } -struct hist_entry *__hists__add_entry(struct hists *self, - struct addr_location *al, - struct symbol *sym_parent, u64 period) +static int +iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, + struct addr_location *al __maybe_unused) { - struct hist_entry entry = { - .thread = al->thread, + return 0; +} + +static int +iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, + struct addr_location *al __maybe_unused) +{ + return 0; +} + +static int +iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) +{ + struct perf_sample *sample = iter->sample; + struct mem_info *mi; + + mi = sample__resolve_mem(sample, al); + if (mi == NULL) + return -ENOMEM; + + iter->priv = mi; + return 0; +} + +static int +iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) +{ + u64 cost; + struct mem_info *mi = iter->priv; + struct hist_entry *he; + + if (mi == NULL) + return -EINVAL; + + cost = iter->sample->weight; + if (!cost) + cost = 1; + + /* + * must pass period=weight in order to get the correct + * sorting from hists__collapse_resort() which is solely + * based on periods. We want sorting be done on nr_events * weight + * and this is indirectly achieved by passing period=weight here + * and the he_stat__add_period() function. + */ + he = __hists__add_entry(&iter->evsel->hists, al, iter->parent, NULL, mi, + cost, cost, 0, true); + if (!he) + return -ENOMEM; + + iter->he = he; + return 0; +} + +static int +iter_finish_mem_entry(struct hist_entry_iter *iter, + struct addr_location *al __maybe_unused) +{ + struct perf_evsel *evsel = iter->evsel; + struct hist_entry *he = iter->he; + int err = -EINVAL; + + if (he == NULL) + goto out; + + hists__inc_nr_samples(&evsel->hists, he->filtered); + + err = hist_entry__append_callchain(he, iter->sample); + +out: + /* + * We don't need to free iter->priv (mem_info) here since + * the mem info was either already freed in add_hist_entry() or + * passed to a new hist entry by hist_entry__new(). + */ + iter->priv = NULL; + + iter->he = NULL; + return err; +} + +static int +iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) +{ + struct branch_info *bi; + struct perf_sample *sample = iter->sample; + + bi = sample__resolve_bstack(sample, al); + if (!bi) + return -ENOMEM; + + iter->curr = 0; + iter->total = sample->branch_stack->nr; + + iter->priv = bi; + return 0; +} + +static int +iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused, + struct addr_location *al __maybe_unused) +{ + /* to avoid calling callback function */ + iter->he = NULL; + + return 0; +} + +static int +iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) +{ + struct branch_info *bi = iter->priv; + int i = iter->curr; + + if (bi == NULL) + return 0; + + if (iter->curr >= iter->total) + return 0; + + al->map = bi[i].to.map; + al->sym = bi[i].to.sym; + al->addr = bi[i].to.addr; + return 1; +} + +static int +iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) +{ + struct branch_info *bi; + struct perf_evsel *evsel = iter->evsel; + struct hist_entry *he = NULL; + int i = iter->curr; + int err = 0; + + bi = iter->priv; + + if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym)) + goto out; + + /* + * The report shows the percentage of total branches captured + * and not events sampled. Thus we use a pseudo period of 1. + */ + he = __hists__add_entry(&evsel->hists, al, iter->parent, &bi[i], NULL, + 1, 1, 0, true); + if (he == NULL) + return -ENOMEM; + + hists__inc_nr_samples(&evsel->hists, he->filtered); + +out: + iter->he = he; + iter->curr++; + return err; +} + +static int +iter_finish_branch_entry(struct hist_entry_iter *iter, + struct addr_location *al __maybe_unused) +{ + zfree(&iter->priv); + iter->he = NULL; + + return iter->curr >= iter->total ? 0 : -1; +} + +static int +iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused, + struct addr_location *al __maybe_unused) +{ + return 0; +} + +static int +iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al) +{ + struct perf_evsel *evsel = iter->evsel; + struct perf_sample *sample = iter->sample; + struct hist_entry *he; + + he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL, + sample->period, sample->weight, + sample->transaction, true); + if (he == NULL) + return -ENOMEM; + + iter->he = he; + return 0; +} + +static int +iter_finish_normal_entry(struct hist_entry_iter *iter, + struct addr_location *al __maybe_unused) +{ + struct hist_entry *he = iter->he; + struct perf_evsel *evsel = iter->evsel; + struct perf_sample *sample = iter->sample; + + if (he == NULL) + return 0; + + iter->he = NULL; + + hists__inc_nr_samples(&evsel->hists, he->filtered); + + return hist_entry__append_callchain(he, sample); +} + +static int +iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused, + struct addr_location *al __maybe_unused) +{ + struct hist_entry **he_cache; + + callchain_cursor_commit(&callchain_cursor); + + /* + * This is for detecting cycles or recursions so that they're + * cumulated only one time to prevent entries more than 100% + * overhead. + */ + he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1)); + if (he_cache == NULL) + return -ENOMEM; + + iter->priv = he_cache; + iter->curr = 0; + + return 0; +} + +static int +iter_add_single_cumulative_entry(struct hist_entry_iter *iter, + struct addr_location *al) +{ + struct perf_evsel *evsel = iter->evsel; + struct perf_sample *sample = iter->sample; + struct hist_entry **he_cache = iter->priv; + struct hist_entry *he; + int err = 0; + + he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL, + sample->period, sample->weight, + sample->transaction, true); + if (he == NULL) + return -ENOMEM; + + iter->he = he; + he_cache[iter->curr++] = he; + + callchain_append(he->callchain, &callchain_cursor, sample->period); + + /* + * We need to re-initialize the cursor since callchain_append() + * advanced the cursor to the end. + */ + callchain_cursor_commit(&callchain_cursor); + + hists__inc_nr_samples(&evsel->hists, he->filtered); + + return err; +} + +static int +iter_next_cumulative_entry(struct hist_entry_iter *iter, + struct addr_location *al) +{ + struct callchain_cursor_node *node; + + node = callchain_cursor_current(&callchain_cursor); + if (node == NULL) + return 0; + + return fill_callchain_info(al, node, iter->hide_unresolved); +} + +static int +iter_add_next_cumulative_entry(struct hist_entry_iter *iter, + struct addr_location *al) +{ + struct perf_evsel *evsel = iter->evsel; + struct perf_sample *sample = iter->sample; + struct hist_entry **he_cache = iter->priv; + struct hist_entry *he; + struct hist_entry he_tmp = { + .cpu = al->cpu, + .thread = al->thread, + .comm = thread__comm(al->thread), + .ip = al->addr, .ms = { - .map = al->map, - .sym = al->sym, - }, - .cpu = al->cpu, - .ip = al->addr, - .level = al->level, - .stat = { - .period = period, - .nr_events = 1, + .map = al->map, + .sym = al->sym, }, - .parent = sym_parent, - .filtered = symbol__parent_filter(sym_parent), - .hists = self, + .parent = iter->parent, }; + int i; + struct callchain_cursor cursor; + + callchain_cursor_snapshot(&cursor, &callchain_cursor); + + callchain_cursor_advance(&callchain_cursor); + + /* + * Check if there's duplicate entries in the callchain. + * It's possible that it has cycles or recursive calls. + */ + for (i = 0; i < iter->curr; i++) { + if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) { + /* to avoid calling callback function */ + iter->he = NULL; + return 0; + } + } + + he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL, + sample->period, sample->weight, + sample->transaction, false); + if (he == NULL) + return -ENOMEM; + + iter->he = he; + he_cache[iter->curr++] = he; + + callchain_append(he->callchain, &cursor, sample->period); + return 0; +} + +static int +iter_finish_cumulative_entry(struct hist_entry_iter *iter, + struct addr_location *al __maybe_unused) +{ + zfree(&iter->priv); + iter->he = NULL; + + return 0; +} + +const struct hist_iter_ops hist_iter_mem = { + .prepare_entry = iter_prepare_mem_entry, + .add_single_entry = iter_add_single_mem_entry, + .next_entry = iter_next_nop_entry, + .add_next_entry = iter_add_next_nop_entry, + .finish_entry = iter_finish_mem_entry, +}; + +const struct hist_iter_ops hist_iter_branch = { + .prepare_entry = iter_prepare_branch_entry, + .add_single_entry = iter_add_single_branch_entry, + .next_entry = iter_next_branch_entry, + .add_next_entry = iter_add_next_branch_entry, + .finish_entry = iter_finish_branch_entry, +}; + +const struct hist_iter_ops hist_iter_normal = { + .prepare_entry = iter_prepare_normal_entry, + .add_single_entry = iter_add_single_normal_entry, + .next_entry = iter_next_nop_entry, + .add_next_entry = iter_add_next_nop_entry, + .finish_entry = iter_finish_normal_entry, +}; + +const struct hist_iter_ops hist_iter_cumulative = { + .prepare_entry = iter_prepare_cumulative_entry, + .add_single_entry = iter_add_single_cumulative_entry, + .next_entry = iter_next_cumulative_entry, + .add_next_entry = iter_add_next_cumulative_entry, + .finish_entry = iter_finish_cumulative_entry, +}; + +int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, + struct perf_evsel *evsel, struct perf_sample *sample, + int max_stack_depth, void *arg) +{ + int err, err2; + + err = sample__resolve_callchain(sample, &iter->parent, evsel, al, + max_stack_depth); + if (err) + return err; + + iter->evsel = evsel; + iter->sample = sample; + + err = iter->ops->prepare_entry(iter, al); + if (err) + goto out; + + err = iter->ops->add_single_entry(iter, al); + if (err) + goto out; - return add_hist_entry(self, &entry, al, period); + if (iter->he && iter->add_entry_cb) { + err = iter->add_entry_cb(iter, al, true, arg); + if (err) + goto out; + } + + while (iter->ops->next_entry(iter, al)) { + err = iter->ops->add_next_entry(iter, al); + if (err) + break; + + if (iter->he && iter->add_entry_cb) { + err = iter->add_entry_cb(iter, al, false, arg); + if (err) + goto out; + } + } + +out: + err2 = iter->ops->finish_entry(iter, al); + if (!err) + err = err2; + + return err; } int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) { - struct sort_entry *se; + struct perf_hpp_fmt *fmt; int64_t cmp = 0; - list_for_each_entry(se, &hist_entry__sort_list, list) { - cmp = se->se_cmp(left, right); + perf_hpp__for_each_sort_list(fmt) { + if (perf_hpp__should_skip(fmt)) + continue; + + cmp = fmt->cmp(left, right); if (cmp) break; } @@ -392,15 +904,14 @@ hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) { - struct sort_entry *se; + struct perf_hpp_fmt *fmt; int64_t cmp = 0; - list_for_each_entry(se, &hist_entry__sort_list, list) { - int64_t (*f)(struct hist_entry *, struct hist_entry *); - - f = se->se_collapse ?: se->se_cmp; + perf_hpp__for_each_sort_list(fmt) { + if (perf_hpp__should_skip(fmt)) + continue; - cmp = f(left, right); + cmp = fmt->collapse(left, right); if (cmp) break; } @@ -410,6 +921,10 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) void hist_entry__free(struct hist_entry *he) { + zfree(&he->branch_info); + zfree(&he->mem_info); + zfree(&he->stat_acc); + free_srcline(he->srcline); free(he); } @@ -434,6 +949,8 @@ static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused, if (!cmp) { he_stat__add_stat(&iter->stat, &he->stat); + if (symbol_conf.cumulate_callchain) + he_stat__add_stat(iter->stat_acc, he->stat_acc); if (symbol_conf.use_callchain) { callchain_cursor_reset(&callchain_cursor); @@ -478,19 +995,21 @@ static void hists__apply_filters(struct hists *hists, struct hist_entry *he) hists__filter_entry_by_symbol(hists, he); } -static void __hists__collapse_resort(struct hists *hists, bool threaded) +void hists__collapse_resort(struct hists *hists, struct ui_progress *prog) { struct rb_root *root; struct rb_node *next; struct hist_entry *n; - if (!sort__need_collapse && !threaded) + if (!sort__need_collapse) return; root = hists__get_rotate_entries_in(hists); next = rb_first(root); while (next) { + if (session_done()) + break; n = rb_entry(next, struct hist_entry, rb_node_in); next = rb_next(&n->rb_node_in); @@ -503,22 +1022,56 @@ static void __hists__collapse_resort(struct hists *hists, bool threaded) */ hists__apply_filters(hists, n); } + if (prog) + ui_progress__update(prog, 1); } } -void hists__collapse_resort(struct hists *hists) +static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b) { - return __hists__collapse_resort(hists, false); + struct perf_hpp_fmt *fmt; + int64_t cmp = 0; + + perf_hpp__for_each_sort_list(fmt) { + if (perf_hpp__should_skip(fmt)) + continue; + + cmp = fmt->sort(a, b); + if (cmp) + break; + } + + return cmp; } -void hists__collapse_resort_threaded(struct hists *hists) +static void hists__reset_filter_stats(struct hists *hists) { - return __hists__collapse_resort(hists, true); + hists->nr_non_filtered_entries = 0; + hists->stats.total_non_filtered_period = 0; } -/* - * reverse the map, sort on period. - */ +void hists__reset_stats(struct hists *hists) +{ + hists->nr_entries = 0; + hists->stats.total_period = 0; + + hists__reset_filter_stats(hists); +} + +static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h) +{ + hists->nr_non_filtered_entries++; + hists->stats.total_non_filtered_period += h->stat.period; +} + +void hists__inc_stats(struct hists *hists, struct hist_entry *h) +{ + if (!h->filtered) + hists__inc_filter_stats(hists, h); + + hists->nr_entries++; + hists->stats.total_period += h->stat.period; +} static void __hists__insert_output_entry(struct rb_root *entries, struct hist_entry *he, @@ -536,7 +1089,7 @@ static void __hists__insert_output_entry(struct rb_root *entries, parent = *p; iter = rb_entry(parent, struct hist_entry, rb_node); - if (he->stat.period > iter->stat.period) + if (hist_entry__sort(he, iter) > 0) p = &(*p)->rb_left; else p = &(*p)->rb_right; @@ -546,7 +1099,7 @@ static void __hists__insert_output_entry(struct rb_root *entries, rb_insert_color(&he->rb_node, entries); } -static void __hists__output_resort(struct hists *hists, bool threaded) +void hists__output_resort(struct hists *hists) { struct rb_root *root; struct rb_node *next; @@ -555,7 +1108,7 @@ static void __hists__output_resort(struct hists *hists, bool threaded) min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100); - if (sort__need_collapse || threaded) + if (sort__need_collapse) root = &hists->entries_collapsed; else root = hists->entries_in; @@ -563,8 +1116,7 @@ static void __hists__output_resort(struct hists *hists, bool threaded) next = rb_first(root); hists->entries = RB_ROOT; - hists->nr_entries = 0; - hists->stats.total_period = 0; + hists__reset_stats(hists); hists__reset_col_len(hists); while (next) { @@ -572,18 +1124,11 @@ static void __hists__output_resort(struct hists *hists, bool threaded) next = rb_next(&n->rb_node_in); __hists__insert_output_entry(&hists->entries, n, min_callchain_hits); - hists__inc_nr_entries(hists, n); - } -} + hists__inc_stats(hists, n); -void hists__output_resort(struct hists *hists) -{ - return __hists__output_resort(hists, false); -} - -void hists__output_resort_threaded(struct hists *hists) -{ - return __hists__output_resort(hists, true); + if (!n->filtered) + hists__calc_col_len(hists, n); + } } static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, @@ -593,13 +1138,13 @@ static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h if (h->filtered) return; - ++hists->nr_entries; - if (h->ms.unfolded) - hists->nr_entries += h->nr_rows; + /* force fold unfiltered entry for simplicity */ + h->ms.unfolded = false; h->row_offset = 0; - hists->stats.total_period += h->stat.period; - hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events; + hists->stats.nr_non_filtered_samples += h->stat.nr_events; + + hists__inc_filter_stats(hists, h); hists__calc_col_len(hists, h); } @@ -620,8 +1165,9 @@ void hists__filter_by_dso(struct hists *hists) { struct rb_node *nd; - hists->nr_entries = hists->stats.total_period = 0; - hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; + hists->stats.nr_non_filtered_samples = 0; + + hists__reset_filter_stats(hists); hists__reset_col_len(hists); for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { @@ -653,8 +1199,9 @@ void hists__filter_by_thread(struct hists *hists) { struct rb_node *nd; - hists->nr_entries = hists->stats.total_period = 0; - hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; + hists->stats.nr_non_filtered_samples = 0; + + hists__reset_filter_stats(hists); hists__reset_col_len(hists); for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { @@ -684,8 +1231,9 @@ void hists__filter_by_symbol(struct hists *hists) { struct rb_node *nd; - hists->nr_entries = hists->stats.total_period = 0; - hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; + hists->stats.nr_non_filtered_samples = 0; + + hists__reset_filter_stats(hists); hists__reset_col_len(hists); for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { @@ -698,18 +1246,169 @@ void hists__filter_by_symbol(struct hists *hists) } } -int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip) +void events_stats__inc(struct events_stats *stats, u32 type) { - return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip); + ++stats->nr_events[0]; + ++stats->nr_events[type]; } -int hist_entry__annotate(struct hist_entry *he, size_t privsize) +void hists__inc_nr_events(struct hists *hists, u32 type) { - return symbol__annotate(he->ms.sym, he->ms.map, privsize); + events_stats__inc(&hists->stats, type); } -void hists__inc_nr_events(struct hists *hists, u32 type) +void hists__inc_nr_samples(struct hists *hists, bool filtered) +{ + events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE); + if (!filtered) + hists->stats.nr_non_filtered_samples++; +} + +static struct hist_entry *hists__add_dummy_entry(struct hists *hists, + struct hist_entry *pair) +{ + struct rb_root *root; + struct rb_node **p; + struct rb_node *parent = NULL; + struct hist_entry *he; + int64_t cmp; + + if (sort__need_collapse) + root = &hists->entries_collapsed; + else + root = hists->entries_in; + + p = &root->rb_node; + + while (*p != NULL) { + parent = *p; + he = rb_entry(parent, struct hist_entry, rb_node_in); + + cmp = hist_entry__collapse(he, pair); + + if (!cmp) + goto out; + + if (cmp < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + he = hist_entry__new(pair, true); + if (he) { + memset(&he->stat, 0, sizeof(he->stat)); + he->hists = hists; + rb_link_node(&he->rb_node_in, parent, p); + rb_insert_color(&he->rb_node_in, root); + hists__inc_stats(hists, he); + he->dummy = true; + } +out: + return he; +} + +static struct hist_entry *hists__find_entry(struct hists *hists, + struct hist_entry *he) { - ++hists->stats.nr_events[0]; - ++hists->stats.nr_events[type]; + struct rb_node *n; + + if (sort__need_collapse) + n = hists->entries_collapsed.rb_node; + else + n = hists->entries_in->rb_node; + + while (n) { + struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); + int64_t cmp = hist_entry__collapse(iter, he); + + if (cmp < 0) + n = n->rb_left; + else if (cmp > 0) + n = n->rb_right; + else + return iter; + } + + return NULL; +} + +/* + * Look for pairs to link to the leader buckets (hist_entries): + */ +void hists__match(struct hists *leader, struct hists *other) +{ + struct rb_root *root; + struct rb_node *nd; + struct hist_entry *pos, *pair; + + if (sort__need_collapse) + root = &leader->entries_collapsed; + else + root = leader->entries_in; + + for (nd = rb_first(root); nd; nd = rb_next(nd)) { + pos = rb_entry(nd, struct hist_entry, rb_node_in); + pair = hists__find_entry(other, pos); + + if (pair) + hist_entry__add_pair(pair, pos); + } +} + +/* + * Look for entries in the other hists that are not present in the leader, if + * we find them, just add a dummy entry on the leader hists, with period=0, + * nr_events=0, to serve as the list header. + */ +int hists__link(struct hists *leader, struct hists *other) +{ + struct rb_root *root; + struct rb_node *nd; + struct hist_entry *pos, *pair; + + if (sort__need_collapse) + root = &other->entries_collapsed; + else + root = other->entries_in; + + for (nd = rb_first(root); nd; nd = rb_next(nd)) { + pos = rb_entry(nd, struct hist_entry, rb_node_in); + + if (!hist_entry__has_pairs(pos)) { + pair = hists__add_dummy_entry(leader, pos); + if (pair == NULL) + return -1; + hist_entry__add_pair(pos, pair); + } + } + + return 0; +} + +u64 hists__total_period(struct hists *hists) +{ + return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period : + hists->stats.total_period; +} + +int parse_filter_percentage(const struct option *opt __maybe_unused, + const char *arg, int unset __maybe_unused) +{ + if (!strcmp(arg, "relative")) + symbol_conf.filter_relative = true; + else if (!strcmp(arg, "absolute")) + symbol_conf.filter_relative = false; + else + return -1; + + return 0; +} + +int perf_hist_config(const char *var, const char *value) +{ + if (!strcmp(var, "hist.percentage")) + return parse_filter_percentage(NULL, value, 0); + + return 0; } diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 66cb31fe81d..742f49a8572 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -4,6 +4,9 @@ #include <linux/types.h> #include <pthread.h> #include "callchain.h" +#include "header.h" +#include "color.h" +#include "ui/progress.h" extern struct callchain_param callchain_param; @@ -11,6 +14,15 @@ struct hist_entry; struct addr_location; struct symbol; +enum hist_filter { + HIST_FILTER__DSO, + HIST_FILTER__THREAD, + HIST_FILTER__PARENT, + HIST_FILTER__SYMBOL, + HIST_FILTER__GUEST, + HIST_FILTER__HOST, +}; + /* * The kernel collects the number of events it couldn't send in a stretch and * when possible sends this number in a PERF_RECORD_LOST event. The number of @@ -25,9 +37,11 @@ struct symbol; */ struct events_stats { u64 total_period; + u64 total_non_filtered_period; u64 total_lost; u64 total_invalid_chains; u32 nr_events[PERF_RECORD_HEADER_MAX]; + u32 nr_non_filtered_samples; u32 nr_lost_warned; u32 nr_unknown_events; u32 nr_invalid_chains; @@ -42,12 +56,24 @@ enum hist_column { HISTC_COMM, HISTC_PARENT, HISTC_CPU, + HISTC_SRCLINE, HISTC_MISPREDICT, + HISTC_IN_TX, + HISTC_ABORT, HISTC_SYMBOL_FROM, HISTC_SYMBOL_TO, HISTC_DSO_FROM, HISTC_DSO_TO, - HISTC_SRCLINE, + HISTC_LOCAL_WEIGHT, + HISTC_GLOBAL_WEIGHT, + HISTC_MEM_DADDR_SYMBOL, + HISTC_MEM_DADDR_DSO, + HISTC_MEM_LOCKED, + HISTC_MEM_TLB, + HISTC_MEM_LVL, + HISTC_MEM_SNOOP, + HISTC_MEM_DCACHELINE, + HISTC_TRANSACTION, HISTC_NR_COLS, /* Last entry */ }; @@ -60,6 +86,7 @@ struct hists { struct rb_root entries; struct rb_root entries_collapsed; u64 nr_entries; + u64 nr_non_filtered_entries; const struct thread *thread_filter; const struct dso *dso_filter; const char *uid_filter_str; @@ -70,50 +97,93 @@ struct hists { u16 col_len[HISTC_NR_COLS]; }; -struct hist_entry *__hists__add_entry(struct hists *self, +struct hist_entry_iter; + +struct hist_iter_ops { + int (*prepare_entry)(struct hist_entry_iter *, struct addr_location *); + int (*add_single_entry)(struct hist_entry_iter *, struct addr_location *); + int (*next_entry)(struct hist_entry_iter *, struct addr_location *); + int (*add_next_entry)(struct hist_entry_iter *, struct addr_location *); + int (*finish_entry)(struct hist_entry_iter *, struct addr_location *); +}; + +struct hist_entry_iter { + int total; + int curr; + + bool hide_unresolved; + + struct perf_evsel *evsel; + struct perf_sample *sample; + struct hist_entry *he; + struct symbol *parent; + void *priv; + + const struct hist_iter_ops *ops; + /* user-defined callback function (optional) */ + int (*add_entry_cb)(struct hist_entry_iter *iter, + struct addr_location *al, bool single, void *arg); +}; + +extern const struct hist_iter_ops hist_iter_normal; +extern const struct hist_iter_ops hist_iter_branch; +extern const struct hist_iter_ops hist_iter_mem; +extern const struct hist_iter_ops hist_iter_cumulative; + +struct hist_entry *__hists__add_entry(struct hists *hists, struct addr_location *al, - struct symbol *parent, u64 period); + struct symbol *parent, + struct branch_info *bi, + struct mem_info *mi, u64 period, + u64 weight, u64 transaction, + bool sample_self); +int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, + struct perf_evsel *evsel, struct perf_sample *sample, + int max_stack_depth, void *arg); + int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right); int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right); -int hist_entry__sort_snprintf(struct hist_entry *self, char *bf, size_t size, +int hist_entry__transaction_len(void); +int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size, struct hists *hists); void hist_entry__free(struct hist_entry *); -struct hist_entry *__hists__add_branch_entry(struct hists *self, - struct addr_location *al, - struct symbol *sym_parent, - struct branch_info *bi, - u64 period); - -void hists__output_resort(struct hists *self); -void hists__output_resort_threaded(struct hists *hists); -void hists__collapse_resort(struct hists *self); -void hists__collapse_resort_threaded(struct hists *hists); +void hists__output_resort(struct hists *hists); +void hists__collapse_resort(struct hists *hists, struct ui_progress *prog); void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel); -void hists__decay_entries_threaded(struct hists *hists, bool zap_user, - bool zap_kernel); void hists__output_recalc_col_len(struct hists *hists, int max_rows); -void hists__inc_nr_events(struct hists *self, u32 type); -size_t hists__fprintf_nr_events(struct hists *self, FILE *fp); +u64 hists__total_period(struct hists *hists); +void hists__reset_stats(struct hists *hists); +void hists__inc_stats(struct hists *hists, struct hist_entry *h); +void hists__inc_nr_events(struct hists *hists, u32 type); +void hists__inc_nr_samples(struct hists *hists, bool filtered); +void events_stats__inc(struct events_stats *stats, u32 type); +size_t events_stats__fprintf(struct events_stats *stats, FILE *fp); -size_t hists__fprintf(struct hists *self, bool show_header, int max_rows, - int max_cols, FILE *fp); - -int hist_entry__inc_addr_samples(struct hist_entry *self, int evidx, u64 addr); -int hist_entry__annotate(struct hist_entry *self, size_t privsize); +size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, + int max_cols, float min_pcnt, FILE *fp); void hists__filter_by_dso(struct hists *hists); void hists__filter_by_thread(struct hists *hists); void hists__filter_by_symbol(struct hists *hists); -u16 hists__col_len(struct hists *self, enum hist_column col); -void hists__set_col_len(struct hists *self, enum hist_column col, u16 len); -bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len); +static inline bool hists__has_filter(struct hists *hists) +{ + return hists->thread_filter || hists->dso_filter || + hists->symbol_filter_str; +} + +u16 hists__col_len(struct hists *hists, enum hist_column col); +void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len); +bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len); void hists__reset_col_len(struct hists *hists); void hists__calc_col_len(struct hists *hists, struct hist_entry *he); +void hists__match(struct hists *leader, struct hists *other); +int hists__link(struct hists *leader, struct hists *other); + struct perf_hpp { char *buf; size_t size; @@ -122,86 +192,156 @@ struct perf_hpp { }; struct perf_hpp_fmt { - bool cond; - int (*header)(struct perf_hpp *hpp); - int (*width)(struct perf_hpp *hpp); - int (*color)(struct perf_hpp *hpp, struct hist_entry *he); - int (*entry)(struct perf_hpp *hpp, struct hist_entry *he); + int (*header)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct perf_evsel *evsel); + int (*width)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct perf_evsel *evsel); + int (*color)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct hist_entry *he); + int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct hist_entry *he); + int64_t (*cmp)(struct hist_entry *a, struct hist_entry *b); + int64_t (*collapse)(struct hist_entry *a, struct hist_entry *b); + int64_t (*sort)(struct hist_entry *a, struct hist_entry *b); + + struct list_head list; + struct list_head sort_list; + bool elide; }; +extern struct list_head perf_hpp__list; +extern struct list_head perf_hpp__sort_list; + +#define perf_hpp__for_each_format(format) \ + list_for_each_entry(format, &perf_hpp__list, list) + +#define perf_hpp__for_each_format_safe(format, tmp) \ + list_for_each_entry_safe(format, tmp, &perf_hpp__list, list) + +#define perf_hpp__for_each_sort_list(format) \ + list_for_each_entry(format, &perf_hpp__sort_list, sort_list) + +#define perf_hpp__for_each_sort_list_safe(format, tmp) \ + list_for_each_entry_safe(format, tmp, &perf_hpp__sort_list, sort_list) + extern struct perf_hpp_fmt perf_hpp__format[]; enum { - PERF_HPP__BASELINE, + /* Matches perf_hpp__format array. */ PERF_HPP__OVERHEAD, PERF_HPP__OVERHEAD_SYS, PERF_HPP__OVERHEAD_US, PERF_HPP__OVERHEAD_GUEST_SYS, PERF_HPP__OVERHEAD_GUEST_US, + PERF_HPP__OVERHEAD_ACC, PERF_HPP__SAMPLES, PERF_HPP__PERIOD, - PERF_HPP__DELTA, - PERF_HPP__DISPL, PERF_HPP__MAX_INDEX }; void perf_hpp__init(void); -void perf_hpp__column_enable(unsigned col, bool enable); -int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he, - bool color); +void perf_hpp__column_register(struct perf_hpp_fmt *format); +void perf_hpp__column_unregister(struct perf_hpp_fmt *format); +void perf_hpp__column_enable(unsigned col); +void perf_hpp__column_disable(unsigned col); +void perf_hpp__cancel_cumulate(void); + +void perf_hpp__register_sort_field(struct perf_hpp_fmt *format); +void perf_hpp__setup_output_field(void); +void perf_hpp__reset_output_field(void); +void perf_hpp__append_sort_keys(void); + +bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format); +bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b); + +static inline bool perf_hpp__should_skip(struct perf_hpp_fmt *format) +{ + return format->elide; +} + +void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists); + +typedef u64 (*hpp_field_fn)(struct hist_entry *he); +typedef int (*hpp_callback_fn)(struct perf_hpp *hpp, bool front); +typedef int (*hpp_snprint_fn)(struct perf_hpp *hpp, const char *fmt, ...); + +int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, + hpp_field_fn get_field, const char *fmt, + hpp_snprint_fn print_fn, bool fmt_percent); +int __hpp__fmt_acc(struct perf_hpp *hpp, struct hist_entry *he, + hpp_field_fn get_field, const char *fmt, + hpp_snprint_fn print_fn, bool fmt_percent); + +static inline void advance_hpp(struct perf_hpp *hpp, int inc) +{ + hpp->buf += inc; + hpp->size -= inc; +} + +static inline size_t perf_hpp__use_color(void) +{ + return !symbol_conf.field_sep; +} + +static inline size_t perf_hpp__color_overhead(void) +{ + return perf_hpp__use_color() ? + (COLOR_MAXLEN + sizeof(PERF_COLOR_RESET)) * PERF_HPP__MAX_INDEX + : 0; +} struct perf_evlist; -#ifdef NEWT_SUPPORT +struct hist_browser_timer { + void (*timer)(void *arg); + void *arg; + int refresh; +}; + +#ifdef HAVE_SLANG_SUPPORT #include "../ui/keysyms.h" -int hist_entry__tui_annotate(struct hist_entry *he, int evidx, - void(*timer)(void *arg), void *arg, int delay_secs); +int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel, + struct hist_browser_timer *hbt); int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, - void(*timer)(void *arg), void *arg, - int refresh); + struct hist_browser_timer *hbt, + float min_pcnt, + struct perf_session_env *env); +int script_browse(const char *script_opt); #else static inline int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused, const char *help __maybe_unused, - void(*timer)(void *arg) __maybe_unused, - void *arg __maybe_unused, - int refresh __maybe_unused) + struct hist_browser_timer *hbt __maybe_unused, + float min_pcnt __maybe_unused, + struct perf_session_env *env __maybe_unused) { return 0; } -static inline int hist_entry__tui_annotate(struct hist_entry *self - __maybe_unused, - int evidx __maybe_unused, - void(*timer)(void *arg) - __maybe_unused, - void *arg __maybe_unused, - int delay_secs __maybe_unused) +static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused, + struct perf_evsel *evsel __maybe_unused, + struct hist_browser_timer *hbt __maybe_unused) { return 0; } -#define K_LEFT -1 -#define K_RIGHT -2 -#endif -#ifdef GTK2_SUPPORT -int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, const char *help, - void(*timer)(void *arg), void *arg, - int refresh); -#else -static inline -int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __maybe_unused, - const char *help __maybe_unused, - void(*timer)(void *arg) __maybe_unused, - void *arg __maybe_unused, - int refresh __maybe_unused) +static inline int script_browse(const char *script_opt __maybe_unused) { return 0; } + +#define K_LEFT -1000 +#define K_RIGHT -2000 +#define K_SWITCH_INPUT_DATA -3000 #endif -unsigned int hists__sort_list_width(struct hists *self); +unsigned int hists__sort_list_width(struct hists *hists); + +struct option; +int parse_filter_percentage(const struct option *opt __maybe_unused, + const char *arg, int unset __maybe_unused); +int perf_hist_config(const char *var, const char *value); #endif /* __PERF_HIST_H */ diff --git a/tools/perf/util/include/asm/hash.h b/tools/perf/util/include/asm/hash.h new file mode 100644 index 00000000000..d82b170bb21 --- /dev/null +++ b/tools/perf/util/include/asm/hash.h @@ -0,0 +1,6 @@ +#ifndef __ASM_GENERIC_HASH_H +#define __ASM_GENERIC_HASH_H + +/* Stub */ + +#endif /* __ASM_GENERIC_HASH_H */ diff --git a/tools/perf/util/include/dwarf-regs.h b/tools/perf/util/include/dwarf-regs.h index cf6727e99c4..8f149655f49 100644 --- a/tools/perf/util/include/dwarf-regs.h +++ b/tools/perf/util/include/dwarf-regs.h @@ -1,7 +1,7 @@ #ifndef _PERF_DWARF_REGS_H_ #define _PERF_DWARF_REGS_H_ -#ifdef DWARF_SUPPORT +#ifdef HAVE_DWARF_SUPPORT const char *get_arch_regstr(unsigned int n); #endif diff --git a/tools/perf/util/include/linux/bitmap.h b/tools/perf/util/include/linux/bitmap.h index bb162e40c76..01ffd12dc79 100644 --- a/tools/perf/util/include/linux/bitmap.h +++ b/tools/perf/util/include/linux/bitmap.h @@ -4,6 +4,9 @@ #include <string.h> #include <linux/bitops.h> +#define DECLARE_BITMAP(name,bits) \ + unsigned long name[BITS_TO_LONGS(bits)] + int __bitmap_weight(const unsigned long *bitmap, int bits); void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, int bits); diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h index a55d8cf083c..dadfa7e5428 100644 --- a/tools/perf/util/include/linux/bitops.h +++ b/tools/perf/util/include/linux/bitops.h @@ -14,6 +14,7 @@ #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) #define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64)) #define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32)) +#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE) #define for_each_set_bit(bit, addr, size) \ for ((bit) = find_first_bit((addr), (size)); \ @@ -86,13 +87,15 @@ static __always_inline unsigned long __ffs(unsigned long word) return num; } +typedef const unsigned long __attribute__((__may_alias__)) long_alias_t; + /* * Find the first set bit in a memory region. */ static inline unsigned long find_first_bit(const unsigned long *addr, unsigned long size) { - const unsigned long *p = addr; + long_alias_t *p = (long_alias_t *) addr; unsigned long result = 0; unsigned long tmp; diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h deleted file mode 100644 index 96b919dae11..00000000000 --- a/tools/perf/util/include/linux/compiler.h +++ /dev/null @@ -1,21 +0,0 @@ -#ifndef _PERF_LINUX_COMPILER_H_ -#define _PERF_LINUX_COMPILER_H_ - -#ifndef __always_inline -#define __always_inline inline -#endif -#define __user -#ifndef __attribute_const__ -#define __attribute_const__ -#endif - -#ifndef __maybe_unused -#define __maybe_unused __attribute__((unused)) -#endif -#define __packed __attribute__((__packed__)) - -#ifndef __force -#define __force -#endif - -#endif diff --git a/tools/perf/util/include/linux/export.h b/tools/perf/util/include/linux/export.h deleted file mode 100644 index b43e2dc21e0..00000000000 --- a/tools/perf/util/include/linux/export.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef PERF_LINUX_MODULE_H -#define PERF_LINUX_MODULE_H - -#define EXPORT_SYMBOL(name) - -#endif diff --git a/tools/perf/util/include/linux/hash.h b/tools/perf/util/include/linux/hash.h deleted file mode 100644 index 201f5739799..00000000000 --- a/tools/perf/util/include/linux/hash.h +++ /dev/null @@ -1,5 +0,0 @@ -#include "../../../../include/linux/hash.h" - -#ifndef PERF_HASH_H -#define PERF_HASH_H -#endif diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h index d8c927c868e..9844c31b7c2 100644 --- a/tools/perf/util/include/linux/kernel.h +++ b/tools/perf/util/include/linux/kernel.h @@ -94,12 +94,6 @@ static inline int scnprintf(char * buf, size_t size, const char * fmt, ...) return (i >= ssize) ? (ssize - 1) : i; } -static inline unsigned long -simple_strtoul(const char *nptr, char **endptr, int base) -{ - return strtoul(nptr, endptr, base); -} - int eprintf(int level, const char *fmt, ...) __attribute__((format(printf, 2, 3))); diff --git a/tools/perf/util/include/linux/list.h b/tools/perf/util/include/linux/list.h index 1d928a0ce99..76ddbc72634 100644 --- a/tools/perf/util/include/linux/list.h +++ b/tools/perf/util/include/linux/list.h @@ -1,5 +1,5 @@ #include <linux/kernel.h> -#include <linux/prefetch.h> +#include <linux/types.h> #include "../../../../include/linux/list.h" diff --git a/tools/perf/util/include/linux/magic.h b/tools/perf/util/include/linux/magic.h deleted file mode 100644 index 58b64ed4da1..00000000000 --- a/tools/perf/util/include/linux/magic.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef _PERF_LINUX_MAGIC_H_ -#define _PERF_LINUX_MAGIC_H_ - -#ifndef DEBUGFS_MAGIC -#define DEBUGFS_MAGIC 0x64626720 -#endif - -#ifndef SYSFS_MAGIC -#define SYSFS_MAGIC 0x62656572 -#endif - -#endif diff --git a/tools/perf/util/include/linux/prefetch.h b/tools/perf/util/include/linux/prefetch.h deleted file mode 100644 index 7841e485d8c..00000000000 --- a/tools/perf/util/include/linux/prefetch.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef PERF_LINUX_PREFETCH_H -#define PERF_LINUX_PREFETCH_H - -static inline void prefetch(void *a __attribute__((unused))) { } - -#endif diff --git a/tools/perf/util/include/linux/string.h b/tools/perf/util/include/linux/string.h index 6f19c548ecc..97a80073822 100644 --- a/tools/perf/util/include/linux/string.h +++ b/tools/perf/util/include/linux/string.h @@ -1,3 +1,4 @@ #include <string.h> void *memdup(const void *src, size_t len); +int str_append(char **s, int *len, const char *a); diff --git a/tools/perf/util/include/linux/types.h b/tools/perf/util/include/linux/types.h deleted file mode 100644 index eb464786c08..00000000000 --- a/tools/perf/util/include/linux/types.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef _PERF_LINUX_TYPES_H_ -#define _PERF_LINUX_TYPES_H_ - -#include <asm/types.h> - -#ifndef __bitwise -#define __bitwise -#endif - -#ifndef __le32 -typedef __u32 __bitwise __le32; -#endif - -#define DECLARE_BITMAP(name,bits) \ - unsigned long name[BITS_TO_LONGS(bits)] - -struct list_head { - struct list_head *next, *prev; -}; - -struct hlist_head { - struct hlist_node *first; -}; - -struct hlist_node { - struct hlist_node *next, **pprev; -}; - -#endif diff --git a/tools/perf/util/intlist.c b/tools/perf/util/intlist.c index 9d0740024ba..89715b64a31 100644 --- a/tools/perf/util/intlist.c +++ b/tools/perf/util/intlist.c @@ -20,6 +20,7 @@ static struct rb_node *intlist__node_new(struct rblist *rblist __maybe_unused, if (node != NULL) { node->i = i; + node->priv = NULL; rc = &node->rb_node; } @@ -57,10 +58,19 @@ void intlist__remove(struct intlist *ilist, struct int_node *node) rblist__remove_node(&ilist->rblist, &node->rb_node); } -struct int_node *intlist__find(struct intlist *ilist, int i) +static struct int_node *__intlist__findnew(struct intlist *ilist, + int i, bool create) { struct int_node *node = NULL; - struct rb_node *rb_node = rblist__find(&ilist->rblist, (void *)((long)i)); + struct rb_node *rb_node; + + if (ilist == NULL) + return NULL; + + if (create) + rb_node = rblist__findnew(&ilist->rblist, (void *)((long)i)); + else + rb_node = rblist__find(&ilist->rblist, (void *)((long)i)); if (rb_node) node = container_of(rb_node, struct int_node, rb_node); @@ -68,7 +78,36 @@ struct int_node *intlist__find(struct intlist *ilist, int i) return node; } -struct intlist *intlist__new(void) +struct int_node *intlist__find(struct intlist *ilist, int i) +{ + return __intlist__findnew(ilist, i, false); +} + +struct int_node *intlist__findnew(struct intlist *ilist, int i) +{ + return __intlist__findnew(ilist, i, true); +} + +static int intlist__parse_list(struct intlist *ilist, const char *s) +{ + char *sep; + int err; + + do { + long value = strtol(s, &sep, 10); + err = -EINVAL; + if (*sep != ',' && *sep != '\0') + break; + err = intlist__add(ilist, value); + if (err) + break; + s = sep + 1; + } while (*sep != '\0'); + + return err; +} + +struct intlist *intlist__new(const char *slist) { struct intlist *ilist = malloc(sizeof(*ilist)); @@ -77,9 +116,15 @@ struct intlist *intlist__new(void) ilist->rblist.node_cmp = intlist__node_cmp; ilist->rblist.node_new = intlist__node_new; ilist->rblist.node_delete = intlist__node_delete; + + if (slist && intlist__parse_list(ilist, slist)) + goto out_delete; } return ilist; +out_delete: + intlist__delete(ilist); + return NULL; } void intlist__delete(struct intlist *ilist) diff --git a/tools/perf/util/intlist.h b/tools/perf/util/intlist.h index 6d63ab90db5..aa6877d3685 100644 --- a/tools/perf/util/intlist.h +++ b/tools/perf/util/intlist.h @@ -9,13 +9,14 @@ struct int_node { struct rb_node rb_node; int i; + void *priv; }; struct intlist { struct rblist rblist; }; -struct intlist *intlist__new(void); +struct intlist *intlist__new(const char *slist); void intlist__delete(struct intlist *ilist); void intlist__remove(struct intlist *ilist, struct int_node *in); @@ -23,6 +24,7 @@ int intlist__add(struct intlist *ilist, int i); struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx); struct int_node *intlist__find(struct intlist *ilist, int i); +struct int_node *intlist__findnew(struct intlist *ilist, int i); static inline bool intlist__has_entry(struct intlist *ilist, int i) { diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c new file mode 100644 index 00000000000..c73e1fc12e5 --- /dev/null +++ b/tools/perf/util/machine.c @@ -0,0 +1,1422 @@ +#include "callchain.h" +#include "debug.h" +#include "event.h" +#include "evsel.h" +#include "hist.h" +#include "machine.h" +#include "map.h" +#include "sort.h" +#include "strlist.h" +#include "thread.h" +#include <stdbool.h> +#include <symbol/kallsyms.h> +#include "unwind.h" + +int machine__init(struct machine *machine, const char *root_dir, pid_t pid) +{ + map_groups__init(&machine->kmaps); + RB_CLEAR_NODE(&machine->rb_node); + INIT_LIST_HEAD(&machine->user_dsos); + INIT_LIST_HEAD(&machine->kernel_dsos); + + machine->threads = RB_ROOT; + INIT_LIST_HEAD(&machine->dead_threads); + machine->last_match = NULL; + + machine->kmaps.machine = machine; + machine->pid = pid; + + machine->symbol_filter = NULL; + machine->id_hdr_size = 0; + + machine->root_dir = strdup(root_dir); + if (machine->root_dir == NULL) + return -ENOMEM; + + if (pid != HOST_KERNEL_ID) { + struct thread *thread = machine__findnew_thread(machine, 0, + pid); + char comm[64]; + + if (thread == NULL) + return -ENOMEM; + + snprintf(comm, sizeof(comm), "[guest/%d]", pid); + thread__set_comm(thread, comm, 0); + } + + return 0; +} + +struct machine *machine__new_host(void) +{ + struct machine *machine = malloc(sizeof(*machine)); + + if (machine != NULL) { + machine__init(machine, "", HOST_KERNEL_ID); + + if (machine__create_kernel_maps(machine) < 0) + goto out_delete; + } + + return machine; +out_delete: + free(machine); + return NULL; +} + +static void dsos__delete(struct list_head *dsos) +{ + struct dso *pos, *n; + + list_for_each_entry_safe(pos, n, dsos, node) { + list_del(&pos->node); + dso__delete(pos); + } +} + +void machine__delete_dead_threads(struct machine *machine) +{ + struct thread *n, *t; + + list_for_each_entry_safe(t, n, &machine->dead_threads, node) { + list_del(&t->node); + thread__delete(t); + } +} + +void machine__delete_threads(struct machine *machine) +{ + struct rb_node *nd = rb_first(&machine->threads); + + while (nd) { + struct thread *t = rb_entry(nd, struct thread, rb_node); + + rb_erase(&t->rb_node, &machine->threads); + nd = rb_next(nd); + thread__delete(t); + } +} + +void machine__exit(struct machine *machine) +{ + map_groups__exit(&machine->kmaps); + dsos__delete(&machine->user_dsos); + dsos__delete(&machine->kernel_dsos); + zfree(&machine->root_dir); +} + +void machine__delete(struct machine *machine) +{ + machine__exit(machine); + free(machine); +} + +void machines__init(struct machines *machines) +{ + machine__init(&machines->host, "", HOST_KERNEL_ID); + machines->guests = RB_ROOT; + machines->symbol_filter = NULL; +} + +void machines__exit(struct machines *machines) +{ + machine__exit(&machines->host); + /* XXX exit guest */ +} + +struct machine *machines__add(struct machines *machines, pid_t pid, + const char *root_dir) +{ + struct rb_node **p = &machines->guests.rb_node; + struct rb_node *parent = NULL; + struct machine *pos, *machine = malloc(sizeof(*machine)); + + if (machine == NULL) + return NULL; + + if (machine__init(machine, root_dir, pid) != 0) { + free(machine); + return NULL; + } + + machine->symbol_filter = machines->symbol_filter; + + while (*p != NULL) { + parent = *p; + pos = rb_entry(parent, struct machine, rb_node); + if (pid < pos->pid) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&machine->rb_node, parent, p); + rb_insert_color(&machine->rb_node, &machines->guests); + + return machine; +} + +void machines__set_symbol_filter(struct machines *machines, + symbol_filter_t symbol_filter) +{ + struct rb_node *nd; + + machines->symbol_filter = symbol_filter; + machines->host.symbol_filter = symbol_filter; + + for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { + struct machine *machine = rb_entry(nd, struct machine, rb_node); + + machine->symbol_filter = symbol_filter; + } +} + +struct machine *machines__find(struct machines *machines, pid_t pid) +{ + struct rb_node **p = &machines->guests.rb_node; + struct rb_node *parent = NULL; + struct machine *machine; + struct machine *default_machine = NULL; + + if (pid == HOST_KERNEL_ID) + return &machines->host; + + while (*p != NULL) { + parent = *p; + machine = rb_entry(parent, struct machine, rb_node); + if (pid < machine->pid) + p = &(*p)->rb_left; + else if (pid > machine->pid) + p = &(*p)->rb_right; + else + return machine; + if (!machine->pid) + default_machine = machine; + } + + return default_machine; +} + +struct machine *machines__findnew(struct machines *machines, pid_t pid) +{ + char path[PATH_MAX]; + const char *root_dir = ""; + struct machine *machine = machines__find(machines, pid); + + if (machine && (machine->pid == pid)) + goto out; + + if ((pid != HOST_KERNEL_ID) && + (pid != DEFAULT_GUEST_KERNEL_ID) && + (symbol_conf.guestmount)) { + sprintf(path, "%s/%d", symbol_conf.guestmount, pid); + if (access(path, R_OK)) { + static struct strlist *seen; + + if (!seen) + seen = strlist__new(true, NULL); + + if (!strlist__has_entry(seen, path)) { + pr_err("Can't access file %s\n", path); + strlist__add(seen, path); + } + machine = NULL; + goto out; + } + root_dir = path; + } + + machine = machines__add(machines, pid, root_dir); +out: + return machine; +} + +void machines__process_guests(struct machines *machines, + machine__process_t process, void *data) +{ + struct rb_node *nd; + + for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { + struct machine *pos = rb_entry(nd, struct machine, rb_node); + process(pos, data); + } +} + +char *machine__mmap_name(struct machine *machine, char *bf, size_t size) +{ + if (machine__is_host(machine)) + snprintf(bf, size, "[%s]", "kernel.kallsyms"); + else if (machine__is_default_guest(machine)) + snprintf(bf, size, "[%s]", "guest.kernel.kallsyms"); + else { + snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", + machine->pid); + } + + return bf; +} + +void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size) +{ + struct rb_node *node; + struct machine *machine; + + machines->host.id_hdr_size = id_hdr_size; + + for (node = rb_first(&machines->guests); node; node = rb_next(node)) { + machine = rb_entry(node, struct machine, rb_node); + machine->id_hdr_size = id_hdr_size; + } + + return; +} + +static struct thread *__machine__findnew_thread(struct machine *machine, + pid_t pid, pid_t tid, + bool create) +{ + struct rb_node **p = &machine->threads.rb_node; + struct rb_node *parent = NULL; + struct thread *th; + + /* + * Front-end cache - TID lookups come in blocks, + * so most of the time we dont have to look up + * the full rbtree: + */ + if (machine->last_match && machine->last_match->tid == tid) { + if (pid && pid != machine->last_match->pid_) + machine->last_match->pid_ = pid; + return machine->last_match; + } + + while (*p != NULL) { + parent = *p; + th = rb_entry(parent, struct thread, rb_node); + + if (th->tid == tid) { + machine->last_match = th; + if (pid && pid != th->pid_) + th->pid_ = pid; + return th; + } + + if (tid < th->tid) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + if (!create) + return NULL; + + th = thread__new(pid, tid); + if (th != NULL) { + rb_link_node(&th->rb_node, parent, p); + rb_insert_color(&th->rb_node, &machine->threads); + machine->last_match = th; + + /* + * We have to initialize map_groups separately + * after rb tree is updated. + * + * The reason is that we call machine__findnew_thread + * within thread__init_map_groups to find the thread + * leader and that would screwed the rb tree. + */ + if (thread__init_map_groups(th, machine)) + return NULL; + } + + return th; +} + +struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, + pid_t tid) +{ + return __machine__findnew_thread(machine, pid, tid, true); +} + +struct thread *machine__find_thread(struct machine *machine, pid_t pid, + pid_t tid) +{ + return __machine__findnew_thread(machine, pid, tid, false); +} + +int machine__process_comm_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample) +{ + struct thread *thread = machine__findnew_thread(machine, + event->comm.pid, + event->comm.tid); + + if (dump_trace) + perf_event__fprintf_comm(event, stdout); + + if (thread == NULL || thread__set_comm(thread, event->comm.comm, sample->time)) { + dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); + return -1; + } + + return 0; +} + +int machine__process_lost_event(struct machine *machine __maybe_unused, + union perf_event *event, struct perf_sample *sample __maybe_unused) +{ + dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", + event->lost.id, event->lost.lost); + return 0; +} + +struct map *machine__new_module(struct machine *machine, u64 start, + const char *filename) +{ + struct map *map; + struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename); + + if (dso == NULL) + return NULL; + + map = map__new2(start, dso, MAP__FUNCTION); + if (map == NULL) + return NULL; + + if (machine__is_host(machine)) + dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; + else + dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; + map_groups__insert(&machine->kmaps, map); + return map; +} + +size_t machines__fprintf_dsos(struct machines *machines, FILE *fp) +{ + struct rb_node *nd; + size_t ret = __dsos__fprintf(&machines->host.kernel_dsos, fp) + + __dsos__fprintf(&machines->host.user_dsos, fp); + + for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { + struct machine *pos = rb_entry(nd, struct machine, rb_node); + ret += __dsos__fprintf(&pos->kernel_dsos, fp); + ret += __dsos__fprintf(&pos->user_dsos, fp); + } + + return ret; +} + +size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, + bool (skip)(struct dso *dso, int parm), int parm) +{ + return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, skip, parm) + + __dsos__fprintf_buildid(&machine->user_dsos, fp, skip, parm); +} + +size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, + bool (skip)(struct dso *dso, int parm), int parm) +{ + struct rb_node *nd; + size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm); + + for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { + struct machine *pos = rb_entry(nd, struct machine, rb_node); + ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm); + } + return ret; +} + +size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) +{ + int i; + size_t printed = 0; + struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso; + + if (kdso->has_build_id) { + char filename[PATH_MAX]; + if (dso__build_id_filename(kdso, filename, sizeof(filename))) + printed += fprintf(fp, "[0] %s\n", filename); + } + + for (i = 0; i < vmlinux_path__nr_entries; ++i) + printed += fprintf(fp, "[%d] %s\n", + i + kdso->has_build_id, vmlinux_path[i]); + + return printed; +} + +size_t machine__fprintf(struct machine *machine, FILE *fp) +{ + size_t ret = 0; + struct rb_node *nd; + + for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { + struct thread *pos = rb_entry(nd, struct thread, rb_node); + + ret += thread__fprintf(pos, fp); + } + + return ret; +} + +static struct dso *machine__get_kernel(struct machine *machine) +{ + const char *vmlinux_name = NULL; + struct dso *kernel; + + if (machine__is_host(machine)) { + vmlinux_name = symbol_conf.vmlinux_name; + if (!vmlinux_name) + vmlinux_name = "[kernel.kallsyms]"; + + kernel = dso__kernel_findnew(machine, vmlinux_name, + "[kernel]", + DSO_TYPE_KERNEL); + } else { + char bf[PATH_MAX]; + + if (machine__is_default_guest(machine)) + vmlinux_name = symbol_conf.default_guest_vmlinux_name; + if (!vmlinux_name) + vmlinux_name = machine__mmap_name(machine, bf, + sizeof(bf)); + + kernel = dso__kernel_findnew(machine, vmlinux_name, + "[guest.kernel]", + DSO_TYPE_GUEST_KERNEL); + } + + if (kernel != NULL && (!kernel->has_build_id)) + dso__read_running_kernel_build_id(kernel, machine); + + return kernel; +} + +struct process_args { + u64 start; +}; + +static void machine__get_kallsyms_filename(struct machine *machine, char *buf, + size_t bufsz) +{ + if (machine__is_default_guest(machine)) + scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms); + else + scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); +} + +const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL}; + +/* Figure out the start address of kernel map from /proc/kallsyms. + * Returns the name of the start symbol in *symbol_name. Pass in NULL as + * symbol_name if it's not that important. + */ +static u64 machine__get_kernel_start_addr(struct machine *machine, + const char **symbol_name) +{ + char filename[PATH_MAX]; + int i; + const char *name; + u64 addr = 0; + + machine__get_kallsyms_filename(machine, filename, PATH_MAX); + + if (symbol__restricted_filename(filename, "/proc/kallsyms")) + return 0; + + for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { + addr = kallsyms__get_function_start(filename, name); + if (addr) + break; + } + + if (symbol_name) + *symbol_name = name; + + return addr; +} + +int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) +{ + enum map_type type; + u64 start = machine__get_kernel_start_addr(machine, NULL); + + for (type = 0; type < MAP__NR_TYPES; ++type) { + struct kmap *kmap; + + machine->vmlinux_maps[type] = map__new2(start, kernel, type); + if (machine->vmlinux_maps[type] == NULL) + return -1; + + machine->vmlinux_maps[type]->map_ip = + machine->vmlinux_maps[type]->unmap_ip = + identity__map_ip; + kmap = map__kmap(machine->vmlinux_maps[type]); + kmap->kmaps = &machine->kmaps; + map_groups__insert(&machine->kmaps, + machine->vmlinux_maps[type]); + } + + return 0; +} + +void machine__destroy_kernel_maps(struct machine *machine) +{ + enum map_type type; + + for (type = 0; type < MAP__NR_TYPES; ++type) { + struct kmap *kmap; + + if (machine->vmlinux_maps[type] == NULL) + continue; + + kmap = map__kmap(machine->vmlinux_maps[type]); + map_groups__remove(&machine->kmaps, + machine->vmlinux_maps[type]); + if (kmap->ref_reloc_sym) { + /* + * ref_reloc_sym is shared among all maps, so free just + * on one of them. + */ + if (type == MAP__FUNCTION) { + zfree((char **)&kmap->ref_reloc_sym->name); + zfree(&kmap->ref_reloc_sym); + } else + kmap->ref_reloc_sym = NULL; + } + + map__delete(machine->vmlinux_maps[type]); + machine->vmlinux_maps[type] = NULL; + } +} + +int machines__create_guest_kernel_maps(struct machines *machines) +{ + int ret = 0; + struct dirent **namelist = NULL; + int i, items = 0; + char path[PATH_MAX]; + pid_t pid; + char *endp; + + if (symbol_conf.default_guest_vmlinux_name || + symbol_conf.default_guest_modules || + symbol_conf.default_guest_kallsyms) { + machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); + } + + if (symbol_conf.guestmount) { + items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL); + if (items <= 0) + return -ENOENT; + for (i = 0; i < items; i++) { + if (!isdigit(namelist[i]->d_name[0])) { + /* Filter out . and .. */ + continue; + } + pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10); + if ((*endp != '\0') || + (endp == namelist[i]->d_name) || + (errno == ERANGE)) { + pr_debug("invalid directory (%s). Skipping.\n", + namelist[i]->d_name); + continue; + } + sprintf(path, "%s/%s/proc/kallsyms", + symbol_conf.guestmount, + namelist[i]->d_name); + ret = access(path, R_OK); + if (ret) { + pr_debug("Can't access file %s\n", path); + goto failure; + } + machines__create_kernel_maps(machines, pid); + } +failure: + free(namelist); + } + + return ret; +} + +void machines__destroy_kernel_maps(struct machines *machines) +{ + struct rb_node *next = rb_first(&machines->guests); + + machine__destroy_kernel_maps(&machines->host); + + while (next) { + struct machine *pos = rb_entry(next, struct machine, rb_node); + + next = rb_next(&pos->rb_node); + rb_erase(&pos->rb_node, &machines->guests); + machine__delete(pos); + } +} + +int machines__create_kernel_maps(struct machines *machines, pid_t pid) +{ + struct machine *machine = machines__findnew(machines, pid); + + if (machine == NULL) + return -1; + + return machine__create_kernel_maps(machine); +} + +int machine__load_kallsyms(struct machine *machine, const char *filename, + enum map_type type, symbol_filter_t filter) +{ + struct map *map = machine->vmlinux_maps[type]; + int ret = dso__load_kallsyms(map->dso, filename, map, filter); + + if (ret > 0) { + dso__set_loaded(map->dso, type); + /* + * Since /proc/kallsyms will have multiple sessions for the + * kernel, with modules between them, fixup the end of all + * sections. + */ + __map_groups__fixup_end(&machine->kmaps, type); + } + + return ret; +} + +int machine__load_vmlinux_path(struct machine *machine, enum map_type type, + symbol_filter_t filter) +{ + struct map *map = machine->vmlinux_maps[type]; + int ret = dso__load_vmlinux_path(map->dso, map, filter); + + if (ret > 0) + dso__set_loaded(map->dso, type); + + return ret; +} + +static void map_groups__fixup_end(struct map_groups *mg) +{ + int i; + for (i = 0; i < MAP__NR_TYPES; ++i) + __map_groups__fixup_end(mg, i); +} + +static char *get_kernel_version(const char *root_dir) +{ + char version[PATH_MAX]; + FILE *file; + char *name, *tmp; + const char *prefix = "Linux version "; + + sprintf(version, "%s/proc/version", root_dir); + file = fopen(version, "r"); + if (!file) + return NULL; + + version[0] = '\0'; + tmp = fgets(version, sizeof(version), file); + fclose(file); + + name = strstr(version, prefix); + if (!name) + return NULL; + name += strlen(prefix); + tmp = strchr(name, ' '); + if (tmp) + *tmp = '\0'; + + return strdup(name); +} + +static int map_groups__set_modules_path_dir(struct map_groups *mg, + const char *dir_name, int depth) +{ + struct dirent *dent; + DIR *dir = opendir(dir_name); + int ret = 0; + + if (!dir) { + pr_debug("%s: cannot open %s dir\n", __func__, dir_name); + return -1; + } + + while ((dent = readdir(dir)) != NULL) { + char path[PATH_MAX]; + struct stat st; + + /*sshfs might return bad dent->d_type, so we have to stat*/ + snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); + if (stat(path, &st)) + continue; + + if (S_ISDIR(st.st_mode)) { + if (!strcmp(dent->d_name, ".") || + !strcmp(dent->d_name, "..")) + continue; + + /* Do not follow top-level source and build symlinks */ + if (depth == 0) { + if (!strcmp(dent->d_name, "source") || + !strcmp(dent->d_name, "build")) + continue; + } + + ret = map_groups__set_modules_path_dir(mg, path, + depth + 1); + if (ret < 0) + goto out; + } else { + char *dot = strrchr(dent->d_name, '.'), + dso_name[PATH_MAX]; + struct map *map; + char *long_name; + + if (dot == NULL || strcmp(dot, ".ko")) + continue; + snprintf(dso_name, sizeof(dso_name), "[%.*s]", + (int)(dot - dent->d_name), dent->d_name); + + strxfrchar(dso_name, '-', '_'); + map = map_groups__find_by_name(mg, MAP__FUNCTION, + dso_name); + if (map == NULL) + continue; + + long_name = strdup(path); + if (long_name == NULL) { + ret = -1; + goto out; + } + dso__set_long_name(map->dso, long_name, true); + dso__kernel_module_get_build_id(map->dso, ""); + } + } + +out: + closedir(dir); + return ret; +} + +static int machine__set_modules_path(struct machine *machine) +{ + char *version; + char modules_path[PATH_MAX]; + + version = get_kernel_version(machine->root_dir); + if (!version) + return -1; + + snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s", + machine->root_dir, version); + free(version); + + return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0); +} + +static int machine__create_module(void *arg, const char *name, u64 start) +{ + struct machine *machine = arg; + struct map *map; + + map = machine__new_module(machine, start, name); + if (map == NULL) + return -1; + + dso__kernel_module_get_build_id(map->dso, machine->root_dir); + + return 0; +} + +static int machine__create_modules(struct machine *machine) +{ + const char *modules; + char path[PATH_MAX]; + + if (machine__is_default_guest(machine)) { + modules = symbol_conf.default_guest_modules; + } else { + snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir); + modules = path; + } + + if (symbol__restricted_filename(modules, "/proc/modules")) + return -1; + + if (modules__parse(modules, machine, machine__create_module)) + return -1; + + if (!machine__set_modules_path(machine)) + return 0; + + pr_debug("Problems setting modules path maps, continuing anyway...\n"); + + return 0; +} + +int machine__create_kernel_maps(struct machine *machine) +{ + struct dso *kernel = machine__get_kernel(machine); + const char *name; + u64 addr = machine__get_kernel_start_addr(machine, &name); + if (!addr) + return -1; + + if (kernel == NULL || + __machine__create_kernel_maps(machine, kernel) < 0) + return -1; + + if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { + if (machine__is_host(machine)) + pr_debug("Problems creating module maps, " + "continuing anyway...\n"); + else + pr_debug("Problems creating module maps for guest %d, " + "continuing anyway...\n", machine->pid); + } + + /* + * Now that we have all the maps created, just set the ->end of them: + */ + map_groups__fixup_end(&machine->kmaps); + + if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, + addr)) { + machine__destroy_kernel_maps(machine); + return -1; + } + + return 0; +} + +static void machine__set_kernel_mmap_len(struct machine *machine, + union perf_event *event) +{ + int i; + + for (i = 0; i < MAP__NR_TYPES; i++) { + machine->vmlinux_maps[i]->start = event->mmap.start; + machine->vmlinux_maps[i]->end = (event->mmap.start + + event->mmap.len); + /* + * Be a bit paranoid here, some perf.data file came with + * a zero sized synthesized MMAP event for the kernel. + */ + if (machine->vmlinux_maps[i]->end == 0) + machine->vmlinux_maps[i]->end = ~0ULL; + } +} + +static bool machine__uses_kcore(struct machine *machine) +{ + struct dso *dso; + + list_for_each_entry(dso, &machine->kernel_dsos, node) { + if (dso__is_kcore(dso)) + return true; + } + + return false; +} + +static int machine__process_kernel_mmap_event(struct machine *machine, + union perf_event *event) +{ + struct map *map; + char kmmap_prefix[PATH_MAX]; + enum dso_kernel_type kernel_type; + bool is_kernel_mmap; + + /* If we have maps from kcore then we do not need or want any others */ + if (machine__uses_kcore(machine)) + return 0; + + machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix)); + if (machine__is_host(machine)) + kernel_type = DSO_TYPE_KERNEL; + else + kernel_type = DSO_TYPE_GUEST_KERNEL; + + is_kernel_mmap = memcmp(event->mmap.filename, + kmmap_prefix, + strlen(kmmap_prefix) - 1) == 0; + if (event->mmap.filename[0] == '/' || + (!is_kernel_mmap && event->mmap.filename[0] == '[')) { + + char short_module_name[1024]; + char *name, *dot; + + if (event->mmap.filename[0] == '/') { + name = strrchr(event->mmap.filename, '/'); + if (name == NULL) + goto out_problem; + + ++name; /* skip / */ + dot = strrchr(name, '.'); + if (dot == NULL) + goto out_problem; + snprintf(short_module_name, sizeof(short_module_name), + "[%.*s]", (int)(dot - name), name); + strxfrchar(short_module_name, '-', '_'); + } else + strcpy(short_module_name, event->mmap.filename); + + map = machine__new_module(machine, event->mmap.start, + event->mmap.filename); + if (map == NULL) + goto out_problem; + + name = strdup(short_module_name); + if (name == NULL) + goto out_problem; + + dso__set_short_name(map->dso, name, true); + map->end = map->start + event->mmap.len; + } else if (is_kernel_mmap) { + const char *symbol_name = (event->mmap.filename + + strlen(kmmap_prefix)); + /* + * Should be there already, from the build-id table in + * the header. + */ + struct dso *kernel = __dsos__findnew(&machine->kernel_dsos, + kmmap_prefix); + if (kernel == NULL) + goto out_problem; + + kernel->kernel = kernel_type; + if (__machine__create_kernel_maps(machine, kernel) < 0) + goto out_problem; + + machine__set_kernel_mmap_len(machine, event); + + /* + * Avoid using a zero address (kptr_restrict) for the ref reloc + * symbol. Effectively having zero here means that at record + * time /proc/sys/kernel/kptr_restrict was non zero. + */ + if (event->mmap.pgoff != 0) { + maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, + symbol_name, + event->mmap.pgoff); + } + + if (machine__is_default_guest(machine)) { + /* + * preload dso of guest kernel and modules + */ + dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION], + NULL); + } + } + return 0; +out_problem: + return -1; +} + +int machine__process_mmap2_event(struct machine *machine, + union perf_event *event, + struct perf_sample *sample __maybe_unused) +{ + u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; + struct thread *thread; + struct map *map; + enum map_type type; + int ret = 0; + + if (dump_trace) + perf_event__fprintf_mmap2(event, stdout); + + if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || + cpumode == PERF_RECORD_MISC_KERNEL) { + ret = machine__process_kernel_mmap_event(machine, event); + if (ret < 0) + goto out_problem; + return 0; + } + + thread = machine__findnew_thread(machine, event->mmap2.pid, + event->mmap2.tid); + if (thread == NULL) + goto out_problem; + + if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) + type = MAP__VARIABLE; + else + type = MAP__FUNCTION; + + map = map__new(&machine->user_dsos, event->mmap2.start, + event->mmap2.len, event->mmap2.pgoff, + event->mmap2.pid, event->mmap2.maj, + event->mmap2.min, event->mmap2.ino, + event->mmap2.ino_generation, + event->mmap2.prot, + event->mmap2.flags, + event->mmap2.filename, type); + + if (map == NULL) + goto out_problem; + + thread__insert_map(thread, map); + return 0; + +out_problem: + dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n"); + return 0; +} + +int machine__process_mmap_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample __maybe_unused) +{ + u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; + struct thread *thread; + struct map *map; + enum map_type type; + int ret = 0; + + if (dump_trace) + perf_event__fprintf_mmap(event, stdout); + + if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || + cpumode == PERF_RECORD_MISC_KERNEL) { + ret = machine__process_kernel_mmap_event(machine, event); + if (ret < 0) + goto out_problem; + return 0; + } + + thread = machine__findnew_thread(machine, event->mmap.pid, + event->mmap.tid); + if (thread == NULL) + goto out_problem; + + if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) + type = MAP__VARIABLE; + else + type = MAP__FUNCTION; + + map = map__new(&machine->user_dsos, event->mmap.start, + event->mmap.len, event->mmap.pgoff, + event->mmap.pid, 0, 0, 0, 0, 0, 0, + event->mmap.filename, + type); + + if (map == NULL) + goto out_problem; + + thread__insert_map(thread, map); + return 0; + +out_problem: + dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); + return 0; +} + +static void machine__remove_thread(struct machine *machine, struct thread *th) +{ + machine->last_match = NULL; + rb_erase(&th->rb_node, &machine->threads); + /* + * We may have references to this thread, for instance in some hist_entry + * instances, so just move them to a separate list. + */ + list_add_tail(&th->node, &machine->dead_threads); +} + +int machine__process_fork_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample) +{ + struct thread *thread = machine__find_thread(machine, + event->fork.pid, + event->fork.tid); + struct thread *parent = machine__findnew_thread(machine, + event->fork.ppid, + event->fork.ptid); + + /* if a thread currently exists for the thread id remove it */ + if (thread != NULL) + machine__remove_thread(machine, thread); + + thread = machine__findnew_thread(machine, event->fork.pid, + event->fork.tid); + if (dump_trace) + perf_event__fprintf_task(event, stdout); + + if (thread == NULL || parent == NULL || + thread__fork(thread, parent, sample->time) < 0) { + dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); + return -1; + } + + return 0; +} + +int machine__process_exit_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample __maybe_unused) +{ + struct thread *thread = machine__find_thread(machine, + event->fork.pid, + event->fork.tid); + + if (dump_trace) + perf_event__fprintf_task(event, stdout); + + if (thread != NULL) + thread__exited(thread); + + return 0; +} + +int machine__process_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample) +{ + int ret; + + switch (event->header.type) { + case PERF_RECORD_COMM: + ret = machine__process_comm_event(machine, event, sample); break; + case PERF_RECORD_MMAP: + ret = machine__process_mmap_event(machine, event, sample); break; + case PERF_RECORD_MMAP2: + ret = machine__process_mmap2_event(machine, event, sample); break; + case PERF_RECORD_FORK: + ret = machine__process_fork_event(machine, event, sample); break; + case PERF_RECORD_EXIT: + ret = machine__process_exit_event(machine, event, sample); break; + case PERF_RECORD_LOST: + ret = machine__process_lost_event(machine, event, sample); break; + default: + ret = -1; + break; + } + + return ret; +} + +static bool symbol__match_regex(struct symbol *sym, regex_t *regex) +{ + if (sym->name && !regexec(regex, sym->name, 0, NULL, 0)) + return 1; + return 0; +} + +static void ip__resolve_ams(struct machine *machine, struct thread *thread, + struct addr_map_symbol *ams, + u64 ip) +{ + struct addr_location al; + + memset(&al, 0, sizeof(al)); + /* + * We cannot use the header.misc hint to determine whether a + * branch stack address is user, kernel, guest, hypervisor. + * Branches may straddle the kernel/user/hypervisor boundaries. + * Thus, we have to try consecutively until we find a match + * or else, the symbol is unknown + */ + thread__find_cpumode_addr_location(thread, machine, MAP__FUNCTION, ip, &al); + + ams->addr = ip; + ams->al_addr = al.addr; + ams->sym = al.sym; + ams->map = al.map; +} + +static void ip__resolve_data(struct machine *machine, struct thread *thread, + u8 m, struct addr_map_symbol *ams, u64 addr) +{ + struct addr_location al; + + memset(&al, 0, sizeof(al)); + + thread__find_addr_location(thread, machine, m, MAP__VARIABLE, addr, + &al); + ams->addr = addr; + ams->al_addr = al.addr; + ams->sym = al.sym; + ams->map = al.map; +} + +struct mem_info *sample__resolve_mem(struct perf_sample *sample, + struct addr_location *al) +{ + struct mem_info *mi = zalloc(sizeof(*mi)); + + if (!mi) + return NULL; + + ip__resolve_ams(al->machine, al->thread, &mi->iaddr, sample->ip); + ip__resolve_data(al->machine, al->thread, al->cpumode, + &mi->daddr, sample->addr); + mi->data_src.val = sample->data_src; + + return mi; +} + +struct branch_info *sample__resolve_bstack(struct perf_sample *sample, + struct addr_location *al) +{ + unsigned int i; + const struct branch_stack *bs = sample->branch_stack; + struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info)); + + if (!bi) + return NULL; + + for (i = 0; i < bs->nr; i++) { + ip__resolve_ams(al->machine, al->thread, &bi[i].to, bs->entries[i].to); + ip__resolve_ams(al->machine, al->thread, &bi[i].from, bs->entries[i].from); + bi[i].flags = bs->entries[i].flags; + } + return bi; +} + +static int machine__resolve_callchain_sample(struct machine *machine, + struct thread *thread, + struct ip_callchain *chain, + struct symbol **parent, + struct addr_location *root_al, + int max_stack) +{ + u8 cpumode = PERF_RECORD_MISC_USER; + int chain_nr = min(max_stack, (int)chain->nr); + int i; + int err; + + callchain_cursor_reset(&callchain_cursor); + + if (chain->nr > PERF_MAX_STACK_DEPTH) { + pr_warning("corrupted callchain. skipping...\n"); + return 0; + } + + for (i = 0; i < chain_nr; i++) { + u64 ip; + struct addr_location al; + + if (callchain_param.order == ORDER_CALLEE) + ip = chain->ips[i]; + else + ip = chain->ips[chain->nr - i - 1]; + + if (ip >= PERF_CONTEXT_MAX) { + switch (ip) { + case PERF_CONTEXT_HV: + cpumode = PERF_RECORD_MISC_HYPERVISOR; + break; + case PERF_CONTEXT_KERNEL: + cpumode = PERF_RECORD_MISC_KERNEL; + break; + case PERF_CONTEXT_USER: + cpumode = PERF_RECORD_MISC_USER; + break; + default: + pr_debug("invalid callchain context: " + "%"PRId64"\n", (s64) ip); + /* + * It seems the callchain is corrupted. + * Discard all. + */ + callchain_cursor_reset(&callchain_cursor); + return 0; + } + continue; + } + + al.filtered = 0; + thread__find_addr_location(thread, machine, cpumode, + MAP__FUNCTION, ip, &al); + if (al.sym != NULL) { + if (sort__has_parent && !*parent && + symbol__match_regex(al.sym, &parent_regex)) + *parent = al.sym; + else if (have_ignore_callees && root_al && + symbol__match_regex(al.sym, &ignore_callees_regex)) { + /* Treat this symbol as the root, + forgetting its callees. */ + *root_al = al; + callchain_cursor_reset(&callchain_cursor); + } + } + + err = callchain_cursor_append(&callchain_cursor, + ip, al.map, al.sym); + if (err) + return err; + } + + return 0; +} + +static int unwind_entry(struct unwind_entry *entry, void *arg) +{ + struct callchain_cursor *cursor = arg; + return callchain_cursor_append(cursor, entry->ip, + entry->map, entry->sym); +} + +int machine__resolve_callchain(struct machine *machine, + struct perf_evsel *evsel, + struct thread *thread, + struct perf_sample *sample, + struct symbol **parent, + struct addr_location *root_al, + int max_stack) +{ + int ret; + + ret = machine__resolve_callchain_sample(machine, thread, + sample->callchain, parent, + root_al, max_stack); + if (ret) + return ret; + + /* Can we do dwarf post unwind? */ + if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) && + (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER))) + return 0; + + /* Bail out if nothing was captured. */ + if ((!sample->user_regs.regs) || + (!sample->user_stack.size)) + return 0; + + return unwind__get_entries(unwind_entry, &callchain_cursor, machine, + thread, sample, max_stack); + +} + +int machine__for_each_thread(struct machine *machine, + int (*fn)(struct thread *thread, void *p), + void *priv) +{ + struct rb_node *nd; + struct thread *thread; + int rc = 0; + + for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { + thread = rb_entry(nd, struct thread, rb_node); + rc = fn(thread, priv); + if (rc != 0) + return rc; + } + + list_for_each_entry(thread, &machine->dead_threads, node) { + rc = fn(thread, priv); + if (rc != 0) + return rc; + } + return rc; +} + +int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, + struct target *target, struct thread_map *threads, + perf_event__handler_t process, bool data_mmap) +{ + if (target__has_task(target)) + return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap); + else if (target__has_cpu(target)) + return perf_event__synthesize_threads(tool, process, machine, data_mmap); + /* command specified */ + return 0; +} diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h new file mode 100644 index 00000000000..c8c74a11939 --- /dev/null +++ b/tools/perf/util/machine.h @@ -0,0 +1,194 @@ +#ifndef __PERF_MACHINE_H +#define __PERF_MACHINE_H + +#include <sys/types.h> +#include <linux/rbtree.h> +#include "map.h" +#include "event.h" + +struct addr_location; +struct branch_stack; +struct perf_evsel; +struct perf_sample; +struct symbol; +struct thread; +union perf_event; + +/* Native host kernel uses -1 as pid index in machine */ +#define HOST_KERNEL_ID (-1) +#define DEFAULT_GUEST_KERNEL_ID (0) + +extern const char *ref_reloc_sym_names[]; + +struct machine { + struct rb_node rb_node; + pid_t pid; + u16 id_hdr_size; + char *root_dir; + struct rb_root threads; + struct list_head dead_threads; + struct thread *last_match; + struct list_head user_dsos; + struct list_head kernel_dsos; + struct map_groups kmaps; + struct map *vmlinux_maps[MAP__NR_TYPES]; + symbol_filter_t symbol_filter; +}; + +static inline +struct map *machine__kernel_map(struct machine *machine, enum map_type type) +{ + return machine->vmlinux_maps[type]; +} + +struct thread *machine__find_thread(struct machine *machine, pid_t pid, + pid_t tid); + +int machine__process_comm_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample); +int machine__process_exit_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample); +int machine__process_fork_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample); +int machine__process_lost_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample); +int machine__process_mmap_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample); +int machine__process_mmap2_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample); +int machine__process_event(struct machine *machine, union perf_event *event, + struct perf_sample *sample); + +typedef void (*machine__process_t)(struct machine *machine, void *data); + +struct machines { + struct machine host; + struct rb_root guests; + symbol_filter_t symbol_filter; +}; + +void machines__init(struct machines *machines); +void machines__exit(struct machines *machines); + +void machines__process_guests(struct machines *machines, + machine__process_t process, void *data); + +struct machine *machines__add(struct machines *machines, pid_t pid, + const char *root_dir); +struct machine *machines__find_host(struct machines *machines); +struct machine *machines__find(struct machines *machines, pid_t pid); +struct machine *machines__findnew(struct machines *machines, pid_t pid); + +void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size); +char *machine__mmap_name(struct machine *machine, char *bf, size_t size); + +void machines__set_symbol_filter(struct machines *machines, + symbol_filter_t symbol_filter); + +struct machine *machine__new_host(void); +int machine__init(struct machine *machine, const char *root_dir, pid_t pid); +void machine__exit(struct machine *machine); +void machine__delete_dead_threads(struct machine *machine); +void machine__delete_threads(struct machine *machine); +void machine__delete(struct machine *machine); + +struct branch_info *sample__resolve_bstack(struct perf_sample *sample, + struct addr_location *al); +struct mem_info *sample__resolve_mem(struct perf_sample *sample, + struct addr_location *al); +int machine__resolve_callchain(struct machine *machine, + struct perf_evsel *evsel, + struct thread *thread, + struct perf_sample *sample, + struct symbol **parent, + struct addr_location *root_al, + int max_stack); + +/* + * Default guest kernel is defined by parameter --guestkallsyms + * and --guestmodules + */ +static inline bool machine__is_default_guest(struct machine *machine) +{ + return machine ? machine->pid == DEFAULT_GUEST_KERNEL_ID : false; +} + +static inline bool machine__is_host(struct machine *machine) +{ + return machine ? machine->pid == HOST_KERNEL_ID : false; +} + +struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, + pid_t tid); + +size_t machine__fprintf(struct machine *machine, FILE *fp); + +static inline +struct symbol *machine__find_kernel_symbol(struct machine *machine, + enum map_type type, u64 addr, + struct map **mapp, + symbol_filter_t filter) +{ + return map_groups__find_symbol(&machine->kmaps, type, addr, + mapp, filter); +} + +static inline +struct symbol *machine__find_kernel_function(struct machine *machine, u64 addr, + struct map **mapp, + symbol_filter_t filter) +{ + return machine__find_kernel_symbol(machine, MAP__FUNCTION, addr, + mapp, filter); +} + +static inline +struct symbol *machine__find_kernel_function_by_name(struct machine *machine, + const char *name, + struct map **mapp, + symbol_filter_t filter) +{ + return map_groups__find_function_by_name(&machine->kmaps, name, mapp, + filter); +} + +struct map *machine__new_module(struct machine *machine, u64 start, + const char *filename); + +int machine__load_kallsyms(struct machine *machine, const char *filename, + enum map_type type, symbol_filter_t filter); +int machine__load_vmlinux_path(struct machine *machine, enum map_type type, + symbol_filter_t filter); + +size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, + bool (skip)(struct dso *dso, int parm), int parm); +size_t machines__fprintf_dsos(struct machines *machines, FILE *fp); +size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, + bool (skip)(struct dso *dso, int parm), int parm); + +void machine__destroy_kernel_maps(struct machine *machine); +int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel); +int machine__create_kernel_maps(struct machine *machine); + +int machines__create_kernel_maps(struct machines *machines, pid_t pid); +int machines__create_guest_kernel_maps(struct machines *machines); +void machines__destroy_kernel_maps(struct machines *machines); + +size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp); + +int machine__for_each_thread(struct machine *machine, + int (*fn)(struct thread *thread, void *p), + void *priv); + +int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, + struct target *target, struct thread_map *threads, + perf_event__handler_t process, bool data_mmap); +static inline +int machine__synthesize_threads(struct machine *machine, struct target *target, + struct thread_map *threads, bool data_mmap) +{ + return __machine__synthesize_threads(machine, NULL, target, threads, + perf_event__process, data_mmap); +} + +#endif /* __PERF_MACHINE_H */ diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 6109fa4d14c..25c571f4cba 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -10,6 +10,9 @@ #include "thread.h" #include "strlist.h" #include "vdso.h" +#include "build-id.h" +#include "util.h" +#include <linux/string.h> const char *map_type__name[MAP__NR_TYPES] = { [MAP__FUNCTION] = "Functions", @@ -18,51 +21,155 @@ const char *map_type__name[MAP__NR_TYPES] = { static inline int is_anon_memory(const char *filename) { - return strcmp(filename, "//anon") == 0; + return !strcmp(filename, "//anon") || + !strcmp(filename, "/dev/zero (deleted)") || + !strcmp(filename, "/anon_hugepage (deleted)"); } static inline int is_no_dso_memory(const char *filename) { - return !strcmp(filename, "[stack]") || + return !strncmp(filename, "[stack", 6) || !strcmp(filename, "[heap]"); } -void map__init(struct map *self, enum map_type type, +static inline int is_android_lib(const char *filename) +{ + return !strncmp(filename, "/data/app-lib", 13) || + !strncmp(filename, "/system/lib", 11); +} + +static inline bool replace_android_lib(const char *filename, char *newfilename) +{ + const char *libname; + char *app_abi; + size_t app_abi_length, new_length; + size_t lib_length = 0; + + libname = strrchr(filename, '/'); + if (libname) + lib_length = strlen(libname); + + app_abi = getenv("APP_ABI"); + if (!app_abi) + return false; + + app_abi_length = strlen(app_abi); + + if (!strncmp(filename, "/data/app-lib", 13)) { + char *apk_path; + + if (!app_abi_length) + return false; + + new_length = 7 + app_abi_length + lib_length; + + apk_path = getenv("APK_PATH"); + if (apk_path) { + new_length += strlen(apk_path) + 1; + if (new_length > PATH_MAX) + return false; + snprintf(newfilename, new_length, + "%s/libs/%s/%s", apk_path, app_abi, libname); + } else { + if (new_length > PATH_MAX) + return false; + snprintf(newfilename, new_length, + "libs/%s/%s", app_abi, libname); + } + return true; + } + + if (!strncmp(filename, "/system/lib/", 11)) { + char *ndk, *app; + const char *arch; + size_t ndk_length; + size_t app_length; + + ndk = getenv("NDK_ROOT"); + app = getenv("APP_PLATFORM"); + + if (!(ndk && app)) + return false; + + ndk_length = strlen(ndk); + app_length = strlen(app); + + if (!(ndk_length && app_length && app_abi_length)) + return false; + + arch = !strncmp(app_abi, "arm", 3) ? "arm" : + !strncmp(app_abi, "mips", 4) ? "mips" : + !strncmp(app_abi, "x86", 3) ? "x86" : NULL; + + if (!arch) + return false; + + new_length = 27 + ndk_length + + app_length + lib_length + + strlen(arch); + + if (new_length > PATH_MAX) + return false; + snprintf(newfilename, new_length, + "%s/platforms/%s/arch-%s/usr/lib/%s", + ndk, app, arch, libname); + + return true; + } + return false; +} + +void map__init(struct map *map, enum map_type type, u64 start, u64 end, u64 pgoff, struct dso *dso) { - self->type = type; - self->start = start; - self->end = end; - self->pgoff = pgoff; - self->dso = dso; - self->map_ip = map__map_ip; - self->unmap_ip = map__unmap_ip; - RB_CLEAR_NODE(&self->rb_node); - self->groups = NULL; - self->referenced = false; - self->erange_warned = false; + map->type = type; + map->start = start; + map->end = end; + map->pgoff = pgoff; + map->reloc = 0; + map->dso = dso; + map->map_ip = map__map_ip; + map->unmap_ip = map__unmap_ip; + RB_CLEAR_NODE(&map->rb_node); + map->groups = NULL; + map->referenced = false; + map->erange_warned = false; } struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, - u64 pgoff, u32 pid, char *filename, + u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino, + u64 ino_gen, u32 prot, u32 flags, char *filename, enum map_type type) { - struct map *self = malloc(sizeof(*self)); + struct map *map = malloc(sizeof(*map)); - if (self != NULL) { + if (map != NULL) { char newfilename[PATH_MAX]; struct dso *dso; - int anon, no_dso, vdso; + int anon, no_dso, vdso, android; + android = is_android_lib(filename); anon = is_anon_memory(filename); vdso = is_vdso_map(filename); no_dso = is_no_dso_memory(filename); - if (anon) { + map->maj = d_maj; + map->min = d_min; + map->ino = ino; + map->ino_generation = ino_gen; + map->prot = prot; + map->flags = flags; + + if ((anon || no_dso) && type == MAP__FUNCTION) { snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid); filename = newfilename; } + if (android) { + if (replace_android_lib(filename, newfilename)) + filename = newfilename; + } + if (vdso) { pgoff = 0; dso = vdso__dso_findnew(dsos__list); @@ -72,23 +179,23 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, if (dso == NULL) goto out_delete; - map__init(self, type, start, start + len, pgoff, dso); + map__init(map, type, start, start + len, pgoff, dso); if (anon || no_dso) { - self->map_ip = self->unmap_ip = identity__map_ip; + map->map_ip = map->unmap_ip = identity__map_ip; /* * Set memory without DSO as loaded. All map__find_* * functions still return NULL, and we avoid the * unnecessary map__load warning. */ - if (no_dso) - dso__set_loaded(dso, self->type); + if (type != MAP__FUNCTION) + dso__set_loaded(dso, map->type); } } - return self; + return map; out_delete: - free(self); + free(map); return NULL; } @@ -111,48 +218,48 @@ struct map *map__new2(u64 start, struct dso *dso, enum map_type type) return map; } -void map__delete(struct map *self) +void map__delete(struct map *map) { - free(self); + free(map); } -void map__fixup_start(struct map *self) +void map__fixup_start(struct map *map) { - struct rb_root *symbols = &self->dso->symbols[self->type]; + struct rb_root *symbols = &map->dso->symbols[map->type]; struct rb_node *nd = rb_first(symbols); if (nd != NULL) { struct symbol *sym = rb_entry(nd, struct symbol, rb_node); - self->start = sym->start; + map->start = sym->start; } } -void map__fixup_end(struct map *self) +void map__fixup_end(struct map *map) { - struct rb_root *symbols = &self->dso->symbols[self->type]; + struct rb_root *symbols = &map->dso->symbols[map->type]; struct rb_node *nd = rb_last(symbols); if (nd != NULL) { struct symbol *sym = rb_entry(nd, struct symbol, rb_node); - self->end = sym->end; + map->end = sym->end; } } #define DSO__DELETED "(deleted)" -int map__load(struct map *self, symbol_filter_t filter) +int map__load(struct map *map, symbol_filter_t filter) { - const char *name = self->dso->long_name; + const char *name = map->dso->long_name; int nr; - if (dso__loaded(self->dso, self->type)) + if (dso__loaded(map->dso, map->type)) return 0; - nr = dso__load(self->dso, self, filter); + nr = dso__load(map->dso, map, filter); if (nr < 0) { - if (self->dso->has_build_id) { + if (map->dso->has_build_id) { char sbuild_id[BUILD_ID_SIZE * 2 + 1]; - build_id__sprintf(self->dso->build_id, - sizeof(self->dso->build_id), + build_id__sprintf(map->dso->build_id, + sizeof(map->dso->build_id), sbuild_id); pr_warning("%s with build id %s not found", name, sbuild_id); @@ -162,7 +269,7 @@ int map__load(struct map *self, symbol_filter_t filter) pr_warning(", continuing without symbols\n"); return -1; } else if (nr == 0) { -#ifdef LIBELF_SUPPORT +#ifdef HAVE_LIBELF_SUPPORT const size_t len = strlen(name); const size_t real_len = len - sizeof(DSO__DELETED); @@ -178,47 +285,34 @@ int map__load(struct map *self, symbol_filter_t filter) #endif return -1; } - /* - * Only applies to the kernel, as its symtabs aren't relative like the - * module ones. - */ - if (self->dso->kernel) - map__reloc_vmlinux(self); return 0; } -struct symbol *map__find_symbol(struct map *self, u64 addr, +struct symbol *map__find_symbol(struct map *map, u64 addr, symbol_filter_t filter) { - if (map__load(self, filter) < 0) + if (map__load(map, filter) < 0) return NULL; - return dso__find_symbol(self->dso, self->type, addr); + return dso__find_symbol(map->dso, map->type, addr); } -struct symbol *map__find_symbol_by_name(struct map *self, const char *name, +struct symbol *map__find_symbol_by_name(struct map *map, const char *name, symbol_filter_t filter) { - if (map__load(self, filter) < 0) + if (map__load(map, filter) < 0) return NULL; - if (!dso__sorted_by_name(self->dso, self->type)) - dso__sort_by_name(self->dso, self->type); + if (!dso__sorted_by_name(map->dso, map->type)) + dso__sort_by_name(map->dso, map->type); - return dso__find_symbol_by_name(self->dso, self->type, name); + return dso__find_symbol_by_name(map->dso, map->type, name); } -struct map *map__clone(struct map *self) +struct map *map__clone(struct map *map) { - struct map *map = malloc(sizeof(*self)); - - if (!map) - return NULL; - - memcpy(map, self, sizeof(*self)); - - return map; + return memdup(map, sizeof(*map)); } int map__overlap(struct map *l, struct map *r) @@ -235,10 +329,10 @@ int map__overlap(struct map *l, struct map *r) return 0; } -size_t map__fprintf(struct map *self, FILE *fp) +size_t map__fprintf(struct map *map, FILE *fp) { return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", - self->start, self->end, self->pgoff, self->dso->name); + map->start, map->end, map->pgoff, map->dso->name); } size_t map__fprintf_dsoname(struct map *map, FILE *fp) @@ -255,16 +349,65 @@ size_t map__fprintf_dsoname(struct map *map, FILE *fp) return fprintf(fp, "%s", dsoname); } -/* +int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, + FILE *fp) +{ + char *srcline; + int ret = 0; + + if (map && map->dso) { + srcline = get_srcline(map->dso, + map__rip_2objdump(map, addr)); + if (srcline != SRCLINE_UNKNOWN) + ret = fprintf(fp, "%s%s", prefix, srcline); + free_srcline(srcline); + } + return ret; +} + +/** + * map__rip_2objdump - convert symbol start address to objdump address. + * @map: memory map + * @rip: symbol start address + * * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN. - * map->dso->adjust_symbols==1 for ET_EXEC-like cases. + * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is + * relative to section start. + * + * Return: Address suitable for passing to "objdump --start-address=" */ u64 map__rip_2objdump(struct map *map, u64 rip) { - u64 addr = map->dso->adjust_symbols ? - map->unmap_ip(map, rip) : /* RIP -> IP */ - rip; - return addr; + if (!map->dso->adjust_symbols) + return rip; + + if (map->dso->rel) + return rip - map->pgoff; + + return map->unmap_ip(map, rip) - map->reloc; +} + +/** + * map__objdump_2mem - convert objdump address to a memory address. + * @map: memory map + * @ip: objdump address + * + * Closely related to map__rip_2objdump(), this function takes an address from + * objdump and converts it to a memory address. Note this assumes that @map + * contains the address. To be sure the result is valid, check it forwards + * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip + * + * Return: Memory address. + */ +u64 map__objdump_2mem(struct map *map, u64 ip) +{ + if (!map->dso->adjust_symbols) + return map->unmap_ip(map, ip); + + if (map->dso->rel) + return map->unmap_ip(map, ip + map->pgoff); + + return ip + map->reloc; } void map_groups__init(struct map_groups *mg) @@ -275,6 +418,7 @@ void map_groups__init(struct map_groups *mg) INIT_LIST_HEAD(&mg->removed_maps[i]); } mg->machine = NULL; + mg->refcnt = 1; } static void maps__delete(struct rb_root *maps) @@ -310,6 +454,28 @@ void map_groups__exit(struct map_groups *mg) } } +struct map_groups *map_groups__new(void) +{ + struct map_groups *mg = malloc(sizeof(*mg)); + + if (mg != NULL) + map_groups__init(mg); + + return mg; +} + +void map_groups__delete(struct map_groups *mg) +{ + map_groups__exit(mg); + free(mg); +} + +void map_groups__put(struct map_groups *mg) +{ + if (--mg->refcnt == 0) + map_groups__delete(mg); +} + void map_groups__flush(struct map_groups *mg) { int type; @@ -339,7 +505,8 @@ struct symbol *map_groups__find_symbol(struct map_groups *mg, { struct map *map = map_groups__find(mg, type, addr); - if (map != NULL) { + /* Ensure map is loaded before using map->map_ip */ + if (map != NULL && map__load(map, filter) >= 0) { if (mapp != NULL) *mapp = map; return map__find_symbol(map, map->map_ip(map, addr), filter); @@ -370,6 +537,23 @@ struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, return NULL; } +int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter) +{ + if (ams->addr < ams->map->start || ams->addr > ams->map->end) { + if (ams->map->groups == NULL) + return -1; + ams->map = map_groups__find(ams->map->groups, ams->map->type, + ams->addr); + if (ams->map == NULL) + return -1; + } + + ams->al_addr = ams->map->map_ip(ams->map, ams->addr); + ams->sym = map__find_symbol(ams->map, ams->al_addr, filter); + + return ams->sym ? 0 : -1; +} + size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type, int verbose, FILE *fp) { @@ -516,35 +700,6 @@ int map_groups__clone(struct map_groups *mg, return 0; } -static u64 map__reloc_map_ip(struct map *map, u64 ip) -{ - return ip + (s64)map->pgoff; -} - -static u64 map__reloc_unmap_ip(struct map *map, u64 ip) -{ - return ip - (s64)map->pgoff; -} - -void map__reloc_vmlinux(struct map *self) -{ - struct kmap *kmap = map__kmap(self); - s64 reloc; - - if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr) - return; - - reloc = (kmap->ref_reloc_sym->unrelocated_addr - - kmap->ref_reloc_sym->addr); - - if (!reloc) - return; - - self->map_ip = map__reloc_map_ip; - self->unmap_ip = map__reloc_unmap_ip; - self->pgoff = reloc; -} - void maps__insert(struct rb_root *maps, struct map *map) { struct rb_node **p = &maps->rb_node; @@ -565,9 +720,9 @@ void maps__insert(struct rb_root *maps, struct map *map) rb_insert_color(&map->rb_node, maps); } -void maps__remove(struct rb_root *self, struct map *map) +void maps__remove(struct rb_root *maps, struct map *map) { - rb_erase(&map->rb_node, self); + rb_erase(&map->rb_node, maps); } struct map *maps__find(struct rb_root *maps, u64 ip) @@ -590,181 +745,20 @@ struct map *maps__find(struct rb_root *maps, u64 ip) return NULL; } -int machine__init(struct machine *self, const char *root_dir, pid_t pid) -{ - map_groups__init(&self->kmaps); - RB_CLEAR_NODE(&self->rb_node); - INIT_LIST_HEAD(&self->user_dsos); - INIT_LIST_HEAD(&self->kernel_dsos); - - self->threads = RB_ROOT; - INIT_LIST_HEAD(&self->dead_threads); - self->last_match = NULL; - - self->kmaps.machine = self; - self->pid = pid; - self->root_dir = strdup(root_dir); - if (self->root_dir == NULL) - return -ENOMEM; - - if (pid != HOST_KERNEL_ID) { - struct thread *thread = machine__findnew_thread(self, pid); - char comm[64]; - - if (thread == NULL) - return -ENOMEM; - - snprintf(comm, sizeof(comm), "[guest/%d]", pid); - thread__set_comm(thread, comm); - } - - return 0; -} - -static void dsos__delete(struct list_head *self) -{ - struct dso *pos, *n; - - list_for_each_entry_safe(pos, n, self, node) { - list_del(&pos->node); - dso__delete(pos); - } -} - -void machine__exit(struct machine *self) -{ - map_groups__exit(&self->kmaps); - dsos__delete(&self->user_dsos); - dsos__delete(&self->kernel_dsos); - free(self->root_dir); - self->root_dir = NULL; -} - -void machine__delete(struct machine *self) -{ - machine__exit(self); - free(self); -} - -struct machine *machines__add(struct rb_root *self, pid_t pid, - const char *root_dir) +struct map *maps__first(struct rb_root *maps) { - struct rb_node **p = &self->rb_node; - struct rb_node *parent = NULL; - struct machine *pos, *machine = malloc(sizeof(*machine)); - - if (!machine) - return NULL; - - if (machine__init(machine, root_dir, pid) != 0) { - free(machine); - return NULL; - } - - while (*p != NULL) { - parent = *p; - pos = rb_entry(parent, struct machine, rb_node); - if (pid < pos->pid) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } + struct rb_node *first = rb_first(maps); - rb_link_node(&machine->rb_node, parent, p); - rb_insert_color(&machine->rb_node, self); - - return machine; -} - -struct machine *machines__find(struct rb_root *self, pid_t pid) -{ - struct rb_node **p = &self->rb_node; - struct rb_node *parent = NULL; - struct machine *machine; - struct machine *default_machine = NULL; - - while (*p != NULL) { - parent = *p; - machine = rb_entry(parent, struct machine, rb_node); - if (pid < machine->pid) - p = &(*p)->rb_left; - else if (pid > machine->pid) - p = &(*p)->rb_right; - else - return machine; - if (!machine->pid) - default_machine = machine; - } - - return default_machine; -} - -struct machine *machines__findnew(struct rb_root *self, pid_t pid) -{ - char path[PATH_MAX]; - const char *root_dir = ""; - struct machine *machine = machines__find(self, pid); - - if (machine && (machine->pid == pid)) - goto out; - - if ((pid != HOST_KERNEL_ID) && - (pid != DEFAULT_GUEST_KERNEL_ID) && - (symbol_conf.guestmount)) { - sprintf(path, "%s/%d", symbol_conf.guestmount, pid); - if (access(path, R_OK)) { - static struct strlist *seen; - - if (!seen) - seen = strlist__new(true, NULL); - - if (!strlist__has_entry(seen, path)) { - pr_err("Can't access file %s\n", path); - strlist__add(seen, path); - } - machine = NULL; - goto out; - } - root_dir = path; - } - - machine = machines__add(self, pid, root_dir); - -out: - return machine; -} - -void machines__process(struct rb_root *self, machine__process_t process, void *data) -{ - struct rb_node *nd; - - for (nd = rb_first(self); nd; nd = rb_next(nd)) { - struct machine *pos = rb_entry(nd, struct machine, rb_node); - process(pos, data); - } -} - -char *machine__mmap_name(struct machine *self, char *bf, size_t size) -{ - if (machine__is_host(self)) - snprintf(bf, size, "[%s]", "kernel.kallsyms"); - else if (machine__is_default_guest(self)) - snprintf(bf, size, "[%s]", "guest.kernel.kallsyms"); - else - snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", self->pid); - - return bf; + if (first) + return rb_entry(first, struct map, rb_node); + return NULL; } -void machines__set_id_hdr_size(struct rb_root *machines, u16 id_hdr_size) +struct map *maps__next(struct map *map) { - struct rb_node *node; - struct machine *machine; - - for (node = rb_first(machines); node; node = rb_next(node)) { - machine = rb_entry(node, struct machine, rb_node); - machine->id_hdr_size = id_hdr_size; - } + struct rb_node *next = rb_next(&map->rb_node); - return; + if (next) + return rb_entry(next, struct map, rb_node); + return NULL; } diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index d2250fc97e2..7758c72522e 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -6,7 +6,7 @@ #include <linux/rbtree.h> #include <stdio.h> #include <stdbool.h> -#include "types.h" +#include <linux/types.h> enum map_type { MAP__FUNCTION = 0, @@ -35,7 +35,13 @@ struct map { bool referenced; bool erange_warned; u32 priv; + u32 prot; + u32 flags; u64 pgoff; + u64 reloc; + u32 maj, min; /* only valid for MMAP2 record */ + u64 ino; /* only valid for MMAP2 record */ + u64 ino_generation;/* only valid for MMAP2 record */ /* ip -> dso rip */ u64 (*map_ip)(struct map *, u64); @@ -55,35 +61,23 @@ struct map_groups { struct rb_root maps[MAP__NR_TYPES]; struct list_head removed_maps[MAP__NR_TYPES]; struct machine *machine; + int refcnt; }; -/* Native host kernel uses -1 as pid index in machine */ -#define HOST_KERNEL_ID (-1) -#define DEFAULT_GUEST_KERNEL_ID (0) - -struct machine { - struct rb_node rb_node; - pid_t pid; - u16 id_hdr_size; - char *root_dir; - struct rb_root threads; - struct list_head dead_threads; - struct thread *last_match; - struct list_head user_dsos; - struct list_head kernel_dsos; - struct map_groups kmaps; - struct map *vmlinux_maps[MAP__NR_TYPES]; -}; +struct map_groups *map_groups__new(void); +void map_groups__delete(struct map_groups *mg); -static inline -struct map *machine__kernel_map(struct machine *self, enum map_type type) +static inline struct map_groups *map_groups__get(struct map_groups *mg) { - return self->vmlinux_maps[type]; + ++mg->refcnt; + return mg; } -static inline struct kmap *map__kmap(struct map *self) +void map_groups__put(struct map_groups *mg); + +static inline struct kmap *map__kmap(struct map *map) { - return (struct kmap *)(self + 1); + return (struct kmap *)(map + 1); } static inline u64 map__map_ip(struct map *map, u64 ip) @@ -105,37 +99,55 @@ static inline u64 identity__map_ip(struct map *map __maybe_unused, u64 ip) /* rip/ip <-> addr suitable for passing to `objdump --start-address=` */ u64 map__rip_2objdump(struct map *map, u64 rip); +/* objdump address -> memory address */ +u64 map__objdump_2mem(struct map *map, u64 ip); + struct symbol; +/* map__for_each_symbol - iterate over the symbols in the given map + * + * @map: the 'struct map *' in which symbols itereated + * @pos: the 'struct symbol *' to use as a loop cursor + * @n: the 'struct rb_node *' to use as a temporary storage + * Note: caller must ensure map->dso is not NULL (map is loaded). + */ +#define map__for_each_symbol(map, pos, n) \ + dso__for_each_symbol(map->dso, pos, n, map->type) + typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); -void map__init(struct map *self, enum map_type type, +void map__init(struct map *map, enum map_type type, u64 start, u64 end, u64 pgoff, struct dso *dso); struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, - u64 pgoff, u32 pid, char *filename, - enum map_type type); + u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino, + u64 ino_gen, u32 prot, u32 flags, + char *filename, enum map_type type); struct map *map__new2(u64 start, struct dso *dso, enum map_type type); -void map__delete(struct map *self); -struct map *map__clone(struct map *self); +void map__delete(struct map *map); +struct map *map__clone(struct map *map); int map__overlap(struct map *l, struct map *r); -size_t map__fprintf(struct map *self, FILE *fp); +size_t map__fprintf(struct map *map, FILE *fp); size_t map__fprintf_dsoname(struct map *map, FILE *fp); +int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, + FILE *fp); -int map__load(struct map *self, symbol_filter_t filter); -struct symbol *map__find_symbol(struct map *self, +int map__load(struct map *map, symbol_filter_t filter); +struct symbol *map__find_symbol(struct map *map, u64 addr, symbol_filter_t filter); -struct symbol *map__find_symbol_by_name(struct map *self, const char *name, +struct symbol *map__find_symbol_by_name(struct map *map, const char *name, symbol_filter_t filter); -void map__fixup_start(struct map *self); -void map__fixup_end(struct map *self); +void map__fixup_start(struct map *map); +void map__fixup_end(struct map *map); -void map__reloc_vmlinux(struct map *self); +void map__reloc_vmlinux(struct map *map); size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type, int verbose, FILE *fp); void maps__insert(struct rb_root *maps, struct map *map); void maps__remove(struct rb_root *maps, struct map *map); struct map *maps__find(struct rb_root *maps, u64 addr); +struct map *maps__first(struct rb_root *maps); +struct map *maps__next(struct map *map); void map_groups__init(struct map_groups *mg); void map_groups__exit(struct map_groups *mg); int map_groups__clone(struct map_groups *mg, @@ -143,44 +155,9 @@ int map_groups__clone(struct map_groups *mg, size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp); size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp); -typedef void (*machine__process_t)(struct machine *self, void *data); - -void machines__process(struct rb_root *self, machine__process_t process, void *data); -struct machine *machines__add(struct rb_root *self, pid_t pid, - const char *root_dir); -struct machine *machines__find_host(struct rb_root *self); -struct machine *machines__find(struct rb_root *self, pid_t pid); -struct machine *machines__findnew(struct rb_root *self, pid_t pid); -void machines__set_id_hdr_size(struct rb_root *self, u16 id_hdr_size); -char *machine__mmap_name(struct machine *self, char *bf, size_t size); -int machine__init(struct machine *self, const char *root_dir, pid_t pid); -void machine__exit(struct machine *self); -void machine__delete(struct machine *self); - -struct perf_evsel; -struct perf_sample; -int machine__resolve_callchain(struct machine *machine, - struct perf_evsel *evsel, - struct thread *thread, - struct perf_sample *sample, - struct symbol **parent); int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name, u64 addr); -/* - * Default guest kernel is defined by parameter --guestkallsyms - * and --guestmodules - */ -static inline bool machine__is_default_guest(struct machine *self) -{ - return self ? self->pid == DEFAULT_GUEST_KERNEL_ID : false; -} - -static inline bool machine__is_host(struct machine *self) -{ - return self ? self->pid == HOST_KERNEL_ID : false; -} - static inline void map_groups__insert(struct map_groups *mg, struct map *map) { maps__insert(&mg->maps[map->type], map); @@ -198,6 +175,17 @@ static inline struct map *map_groups__find(struct map_groups *mg, return maps__find(&mg->maps[type], addr); } +static inline struct map *map_groups__first(struct map_groups *mg, + enum map_type type) +{ + return maps__first(&mg->maps[type]); +} + +static inline struct map *map_groups__next(struct map *map) +{ + return maps__next(map); +} + struct symbol *map_groups__find_symbol(struct map_groups *mg, enum map_type type, u64 addr, struct map **mapp, @@ -209,28 +197,9 @@ struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, struct map **mapp, symbol_filter_t filter); +struct addr_map_symbol; -struct thread *machine__findnew_thread(struct machine *machine, pid_t pid); -void machine__remove_thread(struct machine *machine, struct thread *th); - -size_t machine__fprintf(struct machine *machine, FILE *fp); - -static inline -struct symbol *machine__find_kernel_symbol(struct machine *self, - enum map_type type, u64 addr, - struct map **mapp, - symbol_filter_t filter) -{ - return map_groups__find_symbol(&self->kmaps, type, addr, mapp, filter); -} - -static inline -struct symbol *machine__find_kernel_function(struct machine *self, u64 addr, - struct map **mapp, - symbol_filter_t filter) -{ - return machine__find_kernel_symbol(self, MAP__FUNCTION, addr, mapp, filter); -} +int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter); static inline struct symbol *map_groups__find_function_by_name(struct map_groups *mg, @@ -240,22 +209,11 @@ struct symbol *map_groups__find_function_by_name(struct map_groups *mg, return map_groups__find_symbol_by_name(mg, MAP__FUNCTION, name, mapp, filter); } -static inline -struct symbol *machine__find_kernel_function_by_name(struct machine *self, - const char *name, - struct map **mapp, - symbol_filter_t filter) -{ - return map_groups__find_function_by_name(&self->kmaps, name, mapp, - filter); -} - int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, int verbose, FILE *fp); struct map *map_groups__find_by_name(struct map_groups *mg, enum map_type type, const char *name); -struct map *machine__new_module(struct machine *self, u64 start, const char *filename); void map_groups__flush(struct map_groups *mg); diff --git a/tools/perf/util/pager.c b/tools/perf/util/pager.c index 3322b8446e8..31ee02d4e98 100644 --- a/tools/perf/util/pager.c +++ b/tools/perf/util/pager.c @@ -57,13 +57,13 @@ void setup_pager(void) } if (!pager) pager = getenv("PAGER"); - if (!pager) { - if (!access("/usr/bin/pager", X_OK)) - pager = "/usr/bin/pager"; - } + if (!(pager || access("/usr/bin/pager", X_OK))) + pager = "/usr/bin/pager"; + if (!(pager || access("/usr/bin/less", X_OK))) + pager = "/usr/bin/less"; if (!pager) - pager = "less"; - else if (!*pager || !strcmp(pager, "cat")) + pager = "cat"; + if (!*pager || !strcmp(pager, "cat")) return; spawned_pager = 1; /* means we are emitting to terminal */ diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 75c7b0fca6d..1e15df10a88 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1,4 +1,4 @@ -#include "../../../include/linux/hw_breakpoint.h" +#include <linux/hw_breakpoint.h> #include "util.h" #include "../perf.h" #include "evlist.h" @@ -6,15 +6,16 @@ #include "parse-options.h" #include "parse-events.h" #include "exec_cmd.h" -#include "string.h" +#include "linux/string.h" #include "symbol.h" #include "cache.h" #include "header.h" -#include "debugfs.h" +#include <api/fs/debugfs.h> #include "parse-events-bison.h" #define YY_EXTRA_TYPE int #include "parse-events-flex.h" #include "pmu.h" +#include "thread_map.h" #define MAX_NAME_LEN 100 @@ -108,6 +109,10 @@ static struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { .symbol = "emulation-faults", .alias = "", }, + [PERF_COUNT_SW_DUMMY] = { + .symbol = "dummy", + .alias = "", + }, }; #define __PERF_EVENT_FIELD(config, name) \ @@ -199,7 +204,7 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) } path->name = malloc(MAX_EVENT_LENGTH); if (!path->name) { - free(path->system); + zfree(&path->system); free(path); return NULL; } @@ -217,6 +222,29 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) return NULL; } +struct tracepoint_path *tracepoint_name_to_path(const char *name) +{ + struct tracepoint_path *path = zalloc(sizeof(*path)); + char *str = strchr(name, ':'); + + if (path == NULL || str == NULL) { + free(path); + return NULL; + } + + path->system = strndup(name, str - name); + path->name = strdup(str+1); + + if (path->system == NULL || path->name == NULL) { + zfree(&path->system); + zfree(&path->name); + free(path); + path = NULL; + } + + return path; +} + const char *event_type(int type) { switch (type) { @@ -241,40 +269,30 @@ const char *event_type(int type) -static int __add_event(struct list_head **_list, int *idx, - struct perf_event_attr *attr, - char *name, struct cpu_map *cpus) +static struct perf_evsel * +__add_event(struct list_head *list, int *idx, + struct perf_event_attr *attr, + char *name, struct cpu_map *cpus) { struct perf_evsel *evsel; - struct list_head *list = *_list; - - if (!list) { - list = malloc(sizeof(*list)); - if (!list) - return -ENOMEM; - INIT_LIST_HEAD(list); - } event_attr_init(attr); - evsel = perf_evsel__new(attr, (*idx)++); - if (!evsel) { - free(list); - return -ENOMEM; - } + evsel = perf_evsel__new_idx(attr, (*idx)++); + if (!evsel) + return NULL; evsel->cpus = cpus; if (name) evsel->name = strdup(name); list_add_tail(&evsel->node, list); - *_list = list; - return 0; + return evsel; } -static int add_event(struct list_head **_list, int *idx, +static int add_event(struct list_head *list, int *idx, struct perf_event_attr *attr, char *name) { - return __add_event(_list, idx, attr, name, NULL); + return __add_event(list, idx, attr, name, NULL) ? 0 : -ENOMEM; } static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size) @@ -295,7 +313,7 @@ static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES] return -1; } -int parse_events_add_cache(struct list_head **list, int *idx, +int parse_events_add_cache(struct list_head *list, int *idx, char *type, char *op_result1, char *op_result2) { struct perf_event_attr attr; @@ -356,32 +374,22 @@ int parse_events_add_cache(struct list_head **list, int *idx, return add_event(list, idx, &attr, name); } -static int add_tracepoint(struct list_head **listp, int *idx, +static int add_tracepoint(struct list_head *list, int *idx, char *sys_name, char *evt_name) { struct perf_evsel *evsel; - struct list_head *list = *listp; - if (!list) { - list = malloc(sizeof(*list)); - if (!list) - return -ENOMEM; - INIT_LIST_HEAD(list); - } - - evsel = perf_evsel__newtp(sys_name, evt_name, (*idx)++); - if (!evsel) { - free(list); + evsel = perf_evsel__newtp_idx(sys_name, evt_name, (*idx)++); + if (!evsel) return -ENOMEM; - } list_add_tail(&evsel->node, list); - *listp = list; + return 0; } -static int add_tracepoint_multi(struct list_head **list, int *idx, - char *sys_name, char *evt_name) +static int add_tracepoint_multi_event(struct list_head *list, int *idx, + char *sys_name, char *evt_name) { char evt_path[MAXPATHLEN]; struct dirent *evt_ent; @@ -408,10 +416,51 @@ static int add_tracepoint_multi(struct list_head **list, int *idx, ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name); } + closedir(evt_dir); return ret; } -int parse_events_add_tracepoint(struct list_head **list, int *idx, +static int add_tracepoint_event(struct list_head *list, int *idx, + char *sys_name, char *evt_name) +{ + return strpbrk(evt_name, "*?") ? + add_tracepoint_multi_event(list, idx, sys_name, evt_name) : + add_tracepoint(list, idx, sys_name, evt_name); +} + +static int add_tracepoint_multi_sys(struct list_head *list, int *idx, + char *sys_name, char *evt_name) +{ + struct dirent *events_ent; + DIR *events_dir; + int ret = 0; + + events_dir = opendir(tracing_events_path); + if (!events_dir) { + perror("Can't open event dir"); + return -1; + } + + while (!ret && (events_ent = readdir(events_dir))) { + if (!strcmp(events_ent->d_name, ".") + || !strcmp(events_ent->d_name, "..") + || !strcmp(events_ent->d_name, "enable") + || !strcmp(events_ent->d_name, "header_event") + || !strcmp(events_ent->d_name, "header_page")) + continue; + + if (!strglobmatch(events_ent->d_name, sys_name)) + continue; + + ret = add_tracepoint_event(list, idx, events_ent->d_name, + evt_name); + } + + closedir(events_dir); + return ret; +} + +int parse_events_add_tracepoint(struct list_head *list, int *idx, char *sys, char *event) { int ret; @@ -420,9 +469,10 @@ int parse_events_add_tracepoint(struct list_head **list, int *idx, if (ret) return ret; - return strpbrk(event, "*?") ? - add_tracepoint_multi(list, idx, sys, event) : - add_tracepoint(list, idx, sys, event); + if (strpbrk(sys, "*?")) + return add_tracepoint_multi_sys(list, idx, sys, event); + else + return add_tracepoint_event(list, idx, sys, event); } static int @@ -465,7 +515,7 @@ do { \ return 0; } -int parse_events_add_breakpoint(struct list_head **list, int *idx, +int parse_events_add_breakpoint(struct list_head *list, int *idx, void *ptr, char *type) { struct perf_event_attr attr; @@ -492,7 +542,7 @@ int parse_events_add_breakpoint(struct list_head **list, int *idx, } static int config_term(struct perf_event_attr *attr, - struct parse_events__term *term) + struct parse_events_term *term) { #define CHECK_TYPE_VAL(type) \ do { \ @@ -537,7 +587,7 @@ do { \ static int config_attr(struct perf_event_attr *attr, struct list_head *head, int fail) { - struct parse_events__term *term; + struct parse_events_term *term; list_for_each_entry(term, head, list) if (config_term(attr, term) && fail) @@ -546,7 +596,7 @@ static int config_attr(struct perf_event_attr *attr, return 0; } -int parse_events_add_numeric(struct list_head **list, int *idx, +int parse_events_add_numeric(struct list_head *list, int *idx, u32 type, u64 config, struct list_head *head_config) { @@ -563,14 +613,14 @@ int parse_events_add_numeric(struct list_head **list, int *idx, return add_event(list, idx, &attr, NULL); } -static int parse_events__is_name_term(struct parse_events__term *term) +static int parse_events__is_name_term(struct parse_events_term *term) { return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME; } static char *pmu_event_name(struct list_head *head_terms) { - struct parse_events__term *term; + struct parse_events_term *term; list_for_each_entry(term, head_terms, list) if (parse_events__is_name_term(term)) @@ -579,11 +629,14 @@ static char *pmu_event_name(struct list_head *head_terms) return NULL; } -int parse_events_add_pmu(struct list_head **list, int *idx, +int parse_events_add_pmu(struct list_head *list, int *idx, char *name, struct list_head *head_config) { struct perf_event_attr attr; struct perf_pmu *pmu; + struct perf_evsel *evsel; + const char *unit; + double scale; pmu = perf_pmu__find(name); if (!pmu) @@ -591,7 +644,7 @@ int parse_events_add_pmu(struct list_head **list, int *idx, memset(&attr, 0, sizeof(attr)); - if (perf_pmu__check_alias(pmu, head_config)) + if (perf_pmu__check_alias(pmu, head_config, &unit, &scale)) return -EINVAL; /* @@ -603,8 +656,14 @@ int parse_events_add_pmu(struct list_head **list, int *idx, if (perf_pmu__config(pmu, &attr, head_config)) return -EINVAL; - return __add_event(list, idx, &attr, pmu_event_name(head_config), - pmu->cpus); + evsel = __add_event(list, idx, &attr, pmu_event_name(head_config), + pmu->cpus); + if (evsel) { + evsel->unit = unit; + evsel->scale = scale; + } + + return evsel ? 0 : -ENOMEM; } int parse_events__modifier_group(struct list_head *list, @@ -622,6 +681,7 @@ void parse_events__set_leader(char *name, struct list_head *list) leader->group_name = name ? strdup(name) : NULL; } +/* list_event is assumed to point to malloc'ed memory */ void parse_events_update_lists(struct list_head *list_event, struct list_head *list_all) { @@ -642,6 +702,8 @@ struct event_modifier { int eG; int precise; int exclude_GH; + int sample_read; + int pinned; }; static int get_event_modifier(struct event_modifier *mod, char *str, @@ -653,18 +715,12 @@ static int get_event_modifier(struct event_modifier *mod, char *str, int eH = evsel ? evsel->attr.exclude_host : 0; int eG = evsel ? evsel->attr.exclude_guest : 0; int precise = evsel ? evsel->attr.precise_ip : 0; + int sample_read = 0; + int pinned = evsel ? evsel->attr.pinned : 0; int exclude = eu | ek | eh; int exclude_GH = evsel ? evsel->exclude_GH : 0; - /* - * We are here for group and 'GH' was not set as event - * modifier and whatever event/group modifier override - * default 'GH' setup. - */ - if (evsel && !exclude_GH) - eH = eG = 0; - memset(mod, 0, sizeof(*mod)); while (*str) { @@ -693,6 +749,10 @@ static int get_event_modifier(struct event_modifier *mod, char *str, /* use of precise requires exclude_guest */ if (!exclude_GH) eG = 1; + } else if (*str == 'S') { + sample_read = 1; + } else if (*str == 'D') { + pinned = 1; } else break; @@ -719,6 +779,30 @@ static int get_event_modifier(struct event_modifier *mod, char *str, mod->eG = eG; mod->precise = precise; mod->exclude_GH = exclude_GH; + mod->sample_read = sample_read; + mod->pinned = pinned; + + return 0; +} + +/* + * Basic modifier sanity check to validate it contains only one + * instance of any modifier (apart from 'p') present. + */ +static int check_modifier(char *str) +{ + char *p = str; + + /* The sizeof includes 0 byte as well. */ + if (strlen(str) > (sizeof("ukhGHpppSD") - 1)) + return -1; + + while (*p) { + if (*p != 'p' && strchr(p + 1, *p)) + return -1; + p++; + } + return 0; } @@ -730,11 +814,13 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add) if (str == NULL) return 0; - if (!add && get_event_modifier(&mod, str, NULL)) + if (check_modifier(str)) return -EINVAL; - list_for_each_entry(evsel, list, node) { + if (!add && get_event_modifier(&mod, str, NULL)) + return -EINVAL; + __evlist__for_each(list, evsel) { if (add && get_event_modifier(&mod, str, evsel)) return -EINVAL; @@ -745,6 +831,10 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add) evsel->attr.exclude_host = mod.eH; evsel->attr.exclude_guest = mod.eG; evsel->exclude_GH = mod.exclude_GH; + evsel->sample_read = mod.sample_read; + + if (perf_evsel__is_group_leader(evsel)) + evsel->attr.pinned = mod.pinned; } return 0; @@ -754,7 +844,7 @@ int parse_events_name(struct list_head *list, char *name) { struct perf_evsel *evsel; - list_for_each_entry(evsel, list, node) { + __evlist__for_each(list, evsel) { if (!evsel->name) evsel->name = strdup(name); } @@ -762,6 +852,32 @@ int parse_events_name(struct list_head *list, char *name) return 0; } +static int parse_events__scanner(const char *str, void *data, int start_token); + +static int parse_events_fixup(int ret, const char *str, void *data, + int start_token) +{ + char *o = strdup(str); + char *s = NULL; + char *t = o; + char *p; + int len = 0; + + if (!o) + return ret; + while ((p = strsep(&t, ",")) != NULL) { + if (s) + str_append(&s, &len, ","); + str_append(&s, &len, "cpu/"); + str_append(&s, &len, p); + str_append(&s, &len, "/"); + } + free(o); + if (!s) + return -ENOMEM; + return parse_events__scanner(s, data, start_token); +} + static int parse_events__scanner(const char *str, void *data, int start_token) { YY_BUFFER_STATE buffer; @@ -782,6 +898,8 @@ static int parse_events__scanner(const char *str, void *data, int start_token) parse_events__flush_buffer(buffer, scanner); parse_events__delete_buffer(buffer, scanner); parse_events_lex_destroy(scanner); + if (ret && !strchr(str, '/')) + ret = parse_events_fixup(ret, str, data, start_token); return ret; } @@ -790,7 +908,7 @@ static int parse_events__scanner(const char *str, void *data, int start_token) */ int parse_events_terms(struct list_head *terms, const char *str) { - struct parse_events_data__terms data = { + struct parse_events_terms data = { .terms = NULL, }; int ret; @@ -798,18 +916,18 @@ int parse_events_terms(struct list_head *terms, const char *str) ret = parse_events__scanner(str, &data, PE_START_TERMS); if (!ret) { list_splice(data.terms, terms); - free(data.terms); + zfree(&data.terms); return 0; } - parse_events__free_terms(data.terms); + if (data.terms) + parse_events__free_terms(data.terms); return ret; } -int parse_events(struct perf_evlist *evlist, const char *str, - int unset __maybe_unused) +int parse_events(struct perf_evlist *evlist, const char *str) { - struct parse_events_data__events data = { + struct parse_events_evlist data = { .list = LIST_HEAD_INIT(data.list), .idx = evlist->nr_entries, }; @@ -819,6 +937,7 @@ int parse_events(struct perf_evlist *evlist, const char *str, if (!ret) { int entries = data.idx - evlist->nr_entries; perf_evlist__splice_list_tail(evlist, &data.list, entries); + evlist->nr_groups += data.nr_groups; return 0; } @@ -827,8 +946,6 @@ int parse_events(struct perf_evlist *evlist, const char *str, * Both call perf_evlist__delete in case of error, so we dont * need to bother. */ - fprintf(stderr, "invalid or unsupported event: '%s'\n", str); - fprintf(stderr, "Run 'perf list' for a list of valid events\n"); return ret; } @@ -836,7 +953,13 @@ int parse_events_option(const struct option *opt, const char *str, int unset __maybe_unused) { struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; - return parse_events(evlist, str, unset); + int ret = parse_events(evlist, str); + + if (ret) { + fprintf(stderr, "invalid or unsupported event: '%s'\n", str); + fprintf(stderr, "Run 'perf list' for a list of valid events\n"); + } + return ret; } int parse_filter(const struct option *opt, const char *str, @@ -884,8 +1007,10 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob, char evt_path[MAXPATHLEN]; char dir_path[MAXPATHLEN]; - if (debugfs_valid_mountpoint(tracing_events_path)) + if (debugfs_valid_mountpoint(tracing_events_path)) { + printf(" [ Tracepoints not available: %s ]\n", strerror(errno)); return; + } sys_dir = opendir(tracing_events_path); if (!sys_dir) @@ -963,6 +1088,46 @@ int is_valid_tracepoint(const char *event_string) return 0; } +static bool is_event_supported(u8 type, unsigned config) +{ + bool ret = true; + int open_return; + struct perf_evsel *evsel; + struct perf_event_attr attr = { + .type = type, + .config = config, + .disabled = 1, + }; + struct { + struct thread_map map; + int threads[1]; + } tmap = { + .map.nr = 1, + .threads = { 0 }, + }; + + evsel = perf_evsel__new(&attr); + if (evsel) { + open_return = perf_evsel__open(evsel, NULL, &tmap.map); + ret = open_return >= 0; + + if (open_return == -EACCES) { + /* + * This happens if the paranoid value + * /proc/sys/kernel/perf_event_paranoid is set to 2 + * Re-run with exclude_kernel set; we don't do that + * by default as some ARM machines do not support it. + * + */ + evsel->attr.exclude_kernel = 1; + ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0; + } + perf_evsel__delete(evsel); + } + + return ret; +} + static void __print_events_type(u8 type, struct event_symbol *syms, unsigned max) { @@ -970,14 +1135,16 @@ static void __print_events_type(u8 type, struct event_symbol *syms, unsigned i; for (i = 0; i < max ; i++, syms++) { + if (!is_event_supported(type, i)) + continue; + if (strlen(syms->alias)) snprintf(name, sizeof(name), "%s OR %s", syms->symbol, syms->alias); else snprintf(name, sizeof(name), "%s", syms->symbol); - printf(" %-50s [%s]\n", name, - event_type_descriptors[type]); + printf(" %-50s [%s]\n", name, event_type_descriptors[type]); } } @@ -1006,6 +1173,10 @@ int print_hwcache_events(const char *event_glob, bool name_only) if (event_glob != NULL && !strglobmatch(name, event_glob)) continue; + if (!is_event_supported(PERF_TYPE_HW_CACHE, + type | (op << 8) | (i << 16))) + continue; + if (name_only) printf("%s ", name); else @@ -1016,6 +1187,8 @@ int print_hwcache_events(const char *event_glob, bool name_only) } } + if (printed) + printf("\n"); return printed; } @@ -1033,6 +1206,9 @@ static void print_symbol_events(const char *event_glob, unsigned type, (syms->alias && strglobmatch(syms->alias, event_glob)))) continue; + if (!is_event_supported(type, i)) + continue; + if (name_only) { printf("%s ", syms->symbol); continue; @@ -1070,18 +1246,19 @@ void print_events(const char *event_glob, bool name_only) print_hwcache_events(event_glob, name_only); + print_pmu_events(event_glob, name_only); + if (event_glob != NULL) return; if (!name_only) { - printf("\n"); printf(" %-50s [%s]\n", "rNNN", event_type_descriptors[PERF_TYPE_RAW]); printf(" %-50s [%s]\n", "cpu/t1=v1[,t2=v2,t3 ...]/modifier", event_type_descriptors[PERF_TYPE_RAW]); - printf(" (see 'perf list --help' on how to encode it)\n"); + printf(" (see 'man perf-list' on how to encode it)\n"); printf("\n"); printf(" %-50s [%s]\n", @@ -1093,16 +1270,16 @@ void print_events(const char *event_glob, bool name_only) print_tracepoint_events(NULL, NULL, name_only); } -int parse_events__is_hardcoded_term(struct parse_events__term *term) +int parse_events__is_hardcoded_term(struct parse_events_term *term) { return term->type_term != PARSE_EVENTS__TERM_TYPE_USER; } -static int new_term(struct parse_events__term **_term, int type_val, +static int new_term(struct parse_events_term **_term, int type_val, int type_term, char *config, char *str, u64 num) { - struct parse_events__term *term; + struct parse_events_term *term; term = zalloc(sizeof(*term)); if (!term) @@ -1121,6 +1298,7 @@ static int new_term(struct parse_events__term **_term, int type_val, term->val.str = str; break; default: + free(term); return -EINVAL; } @@ -1128,22 +1306,40 @@ static int new_term(struct parse_events__term **_term, int type_val, return 0; } -int parse_events__term_num(struct parse_events__term **term, +int parse_events_term__num(struct parse_events_term **term, int type_term, char *config, u64 num) { return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term, config, NULL, num); } -int parse_events__term_str(struct parse_events__term **term, +int parse_events_term__str(struct parse_events_term **term, int type_term, char *config, char *str) { return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term, config, str, 0); } -int parse_events__term_clone(struct parse_events__term **new, - struct parse_events__term *term) +int parse_events_term__sym_hw(struct parse_events_term **term, + char *config, unsigned idx) +{ + struct event_symbol *sym; + + BUG_ON(idx >= PERF_COUNT_HW_MAX); + sym = &event_symbols_hw[idx]; + + if (config) + return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, + PARSE_EVENTS__TERM_TYPE_USER, config, + (char *) sym->symbol, 0); + else + return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, + PARSE_EVENTS__TERM_TYPE_USER, + (char *) "event", (char *) sym->symbol, 0); +} + +int parse_events_term__clone(struct parse_events_term **new, + struct parse_events_term *term) { return new_term(new, term->type_val, term->type_term, term->config, term->val.str, term->val.num); @@ -1151,10 +1347,8 @@ int parse_events__term_clone(struct parse_events__term **new, void parse_events__free_terms(struct list_head *terms) { - struct parse_events__term *term, *h; + struct parse_events_term *term, *h; list_for_each_entry_safe(term, h, terms, list) free(term); - - free(terms); } diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index 839230ceb18..df094b4ed5e 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -6,9 +6,8 @@ #include <linux/list.h> #include <stdbool.h> -#include "types.h" -#include "../../../include/uapi/linux/perf_event.h" -#include "types.h" +#include <linux/types.h> +#include <linux/perf_event.h> struct list_head; struct perf_evsel; @@ -23,14 +22,14 @@ struct tracepoint_path { }; extern struct tracepoint_path *tracepoint_id_to_path(u64 config); +extern struct tracepoint_path *tracepoint_name_to_path(const char *name); extern bool have_tracepoints(struct list_head *evlist); const char *event_type(int type); extern int parse_events_option(const struct option *opt, const char *str, int unset); -extern int parse_events(struct perf_evlist *evlist, const char *str, - int unset); +extern int parse_events(struct perf_evlist *evlist, const char *str); extern int parse_events_terms(struct list_head *terms, const char *str); extern int parse_filter(const struct option *opt, const char *str, int unset); @@ -51,7 +50,7 @@ enum { PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE, }; -struct parse_events__term { +struct parse_events_term { char *config; union { char *str; @@ -62,42 +61,44 @@ struct parse_events__term { struct list_head list; }; -struct parse_events_data__events { +struct parse_events_evlist { struct list_head list; int idx; + int nr_groups; }; -struct parse_events_data__terms { +struct parse_events_terms { struct list_head *terms; }; -int parse_events__is_hardcoded_term(struct parse_events__term *term); -int parse_events__term_num(struct parse_events__term **_term, +int parse_events__is_hardcoded_term(struct parse_events_term *term); +int parse_events_term__num(struct parse_events_term **_term, int type_term, char *config, u64 num); -int parse_events__term_str(struct parse_events__term **_term, +int parse_events_term__str(struct parse_events_term **_term, int type_term, char *config, char *str); -int parse_events__term_clone(struct parse_events__term **new, - struct parse_events__term *term); +int parse_events_term__sym_hw(struct parse_events_term **term, + char *config, unsigned idx); +int parse_events_term__clone(struct parse_events_term **new, + struct parse_events_term *term); void parse_events__free_terms(struct list_head *terms); int parse_events__modifier_event(struct list_head *list, char *str, bool add); int parse_events__modifier_group(struct list_head *list, char *event_mod); int parse_events_name(struct list_head *list, char *name); -int parse_events_add_tracepoint(struct list_head **list, int *idx, +int parse_events_add_tracepoint(struct list_head *list, int *idx, char *sys, char *event); -int parse_events_add_numeric(struct list_head **list, int *idx, +int parse_events_add_numeric(struct list_head *list, int *idx, u32 type, u64 config, struct list_head *head_config); -int parse_events_add_cache(struct list_head **list, int *idx, +int parse_events_add_cache(struct list_head *list, int *idx, char *type, char *op_result1, char *op_result2); -int parse_events_add_breakpoint(struct list_head **list, int *idx, +int parse_events_add_breakpoint(struct list_head *list, int *idx, void *ptr, char *type); -int parse_events_add_pmu(struct list_head **list, int *idx, +int parse_events_add_pmu(struct list_head *list, int *idx, char *pmu , struct list_head *head_config); void parse_events__set_leader(char *name, struct list_head *list); void parse_events_update_lists(struct list_head *list_event, struct list_head *list_all); void parse_events_error(void *data, void *scanner, char const *msg); -int parse_events__test(void); void print_events(const char *event_glob, bool name_only); void print_events_type(u8 type); diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l index c87efc12579..343299575b3 100644 --- a/tools/perf/util/parse-events.l +++ b/tools/perf/util/parse-events.l @@ -81,7 +81,9 @@ num_dec [0-9]+ num_hex 0x[a-fA-F0-9]+ num_raw_hex [a-fA-F0-9]+ name [a-zA-Z_*?][a-zA-Z0-9_*?]* -modifier_event [ukhpGH]{1,8} +name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?]* +/* If you add a modifier you need to update check_modifier() */ +modifier_event [ukhpGHSD]+ modifier_bp [rwx]{1,3} %% @@ -124,6 +126,37 @@ modifier_bp [rwx]{1,3} } +<config>{ +config { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); } +config1 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG1); } +config2 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG2); } +name { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NAME); } +period { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); } +branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); } +, { return ','; } +"/" { BEGIN(INITIAL); return '/'; } +{name_minus} { return str(yyscanner, PE_NAME); } +} + +<mem>{ +{modifier_bp} { return str(yyscanner, PE_MODIFIER_BP); } +: { return ':'; } +{num_dec} { return value(yyscanner, 10); } +{num_hex} { return value(yyscanner, 16); } + /* + * We need to separate 'mem:' scanner part, in order to get specific + * modifier bits parsed out. Otherwise we would need to handle PE_NAME + * and we'd need to parse it manually. During the escape from <mem> + * state we need to put the escaping char back, so we dont miss it. + */ +. { unput(*yytext); BEGIN(INITIAL); } + /* + * We destroy the scanner after reaching EOF, + * but anyway just to be sure get back to INIT state. + */ +<<EOF>> { BEGIN(INITIAL); } +} + cpu-cycles|cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); } stalled-cycles-frontend|idle-cycles-frontend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); } stalled-cycles-backend|idle-cycles-backend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); } @@ -143,6 +176,7 @@ context-switches|cs { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW cpu-migrations|migrations { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS); } alignment-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); } emulation-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); } +dummy { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); } L1-dcache|l1-d|l1d|L1-data | L1-icache|l1-i|l1i|L1-instruction | @@ -159,17 +193,6 @@ speculative-read|speculative-load | refs|Reference|ops|access | misses|miss { return str(yyscanner, PE_NAME_CACHE_OP_RESULT); } -<config>{ -config { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); } -config1 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG1); } -config2 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG2); } -name { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NAME); } -period { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); } -branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); } -, { return ','; } -"/" { BEGIN(INITIAL); return '/'; } -} - mem: { BEGIN(mem); return PE_PREFIX_MEM; } r{num_raw_hex} { return raw(yyscanner); } {num_dec} { return value(yyscanner, 10); } @@ -185,25 +208,7 @@ r{num_raw_hex} { return raw(yyscanner); } "}" { return '}'; } = { return '='; } \n { } - -<mem>{ -{modifier_bp} { return str(yyscanner, PE_MODIFIER_BP); } -: { return ':'; } -{num_dec} { return value(yyscanner, 10); } -{num_hex} { return value(yyscanner, 16); } - /* - * We need to separate 'mem:' scanner part, in order to get specific - * modifier bits parsed out. Otherwise we would need to handle PE_NAME - * and we'd need to parse it manually. During the escape from <mem> - * state we need to put the escaping char back, so we dont miss it. - */ -. { unput(*yytext); BEGIN(INITIAL); } - /* - * We destroy the scanner after reaching EOF, - * but anyway just to be sure get back to INIT state. - */ -<<EOF>> { BEGIN(INITIAL); } -} +. { } %% diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index cd88209e3c5..0bc87ba46bf 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -1,5 +1,4 @@ %pure-parser -%name-prefix "parse_events_" %parse-param {void *_data} %parse-param {void *scanner} %lex-param {void* scanner} @@ -10,7 +9,7 @@ #include <linux/compiler.h> #include <linux/list.h> -#include "types.h" +#include <linux/types.h> #include "util.h" #include "parse-events.h" #include "parse-events-bison.h" @@ -23,6 +22,21 @@ do { \ YYABORT; \ } while (0) +#define ALLOC_LIST(list) \ +do { \ + list = malloc(sizeof(*list)); \ + ABORT_ON(!list); \ + INIT_LIST_HEAD(list); \ +} while (0) + +static inc_group_count(struct list_head *list, + struct parse_events_evlist *data) +{ + /* Count groups only have more than 1 members */ + if (!list_is_last(list->next, list)) + data->nr_groups++; +} + %} %token PE_START_EVENTS PE_START_TERMS @@ -68,7 +82,7 @@ do { \ char *str; u64 num; struct list_head *head; - struct parse_events__term *term; + struct parse_events_term *term; } %% @@ -79,7 +93,7 @@ PE_START_TERMS start_terms start_events: groups { - struct parse_events_data__events *data = _data; + struct parse_events_evlist *data = _data; parse_events_update_lists($1, &data->list); } @@ -123,6 +137,7 @@ PE_NAME '{' events '}' { struct list_head *list = $3; + inc_group_count(list, _data); parse_events__set_leader($1, list); $$ = list; } @@ -131,6 +146,7 @@ PE_NAME '{' events '}' { struct list_head *list = $2; + inc_group_count(list, _data); parse_events__set_leader(NULL, list); $$ = list; } @@ -186,10 +202,11 @@ event_def: event_pmu | event_pmu: PE_NAME '/' event_config '/' { - struct parse_events_data__events *data = _data; - struct list_head *list = NULL; + struct parse_events_evlist *data = _data; + struct list_head *list; - ABORT_ON(parse_events_add_pmu(&list, &data->idx, $1, $3)); + ALLOC_LIST(list); + ABORT_ON(parse_events_add_pmu(list, &data->idx, $1, $3)); parse_events__free_terms($3); $$ = list; } @@ -202,12 +219,13 @@ PE_VALUE_SYM_SW event_legacy_symbol: value_sym '/' event_config '/' { - struct parse_events_data__events *data = _data; - struct list_head *list = NULL; + struct parse_events_evlist *data = _data; + struct list_head *list; int type = $1 >> 16; int config = $1 & 255; - ABORT_ON(parse_events_add_numeric(&list, &data->idx, + ALLOC_LIST(list); + ABORT_ON(parse_events_add_numeric(list, &data->idx, type, config, $3)); parse_events__free_terms($3); $$ = list; @@ -215,12 +233,13 @@ value_sym '/' event_config '/' | value_sym sep_slash_dc { - struct parse_events_data__events *data = _data; - struct list_head *list = NULL; + struct parse_events_evlist *data = _data; + struct list_head *list; int type = $1 >> 16; int config = $1 & 255; - ABORT_ON(parse_events_add_numeric(&list, &data->idx, + ALLOC_LIST(list); + ABORT_ON(parse_events_add_numeric(list, &data->idx, type, config, NULL)); $$ = list; } @@ -228,86 +247,106 @@ value_sym sep_slash_dc event_legacy_cache: PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT { - struct parse_events_data__events *data = _data; - struct list_head *list = NULL; + struct parse_events_evlist *data = _data; + struct list_head *list; - ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, $5)); + ALLOC_LIST(list); + ABORT_ON(parse_events_add_cache(list, &data->idx, $1, $3, $5)); $$ = list; } | PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT { - struct parse_events_data__events *data = _data; - struct list_head *list = NULL; + struct parse_events_evlist *data = _data; + struct list_head *list; - ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, NULL)); + ALLOC_LIST(list); + ABORT_ON(parse_events_add_cache(list, &data->idx, $1, $3, NULL)); $$ = list; } | PE_NAME_CACHE_TYPE { - struct parse_events_data__events *data = _data; - struct list_head *list = NULL; + struct parse_events_evlist *data = _data; + struct list_head *list; - ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, NULL, NULL)); + ALLOC_LIST(list); + ABORT_ON(parse_events_add_cache(list, &data->idx, $1, NULL, NULL)); $$ = list; } event_legacy_mem: PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc { - struct parse_events_data__events *data = _data; - struct list_head *list = NULL; + struct parse_events_evlist *data = _data; + struct list_head *list; - ABORT_ON(parse_events_add_breakpoint(&list, &data->idx, + ALLOC_LIST(list); + ABORT_ON(parse_events_add_breakpoint(list, &data->idx, (void *) $2, $4)); $$ = list; } | PE_PREFIX_MEM PE_VALUE sep_dc { - struct parse_events_data__events *data = _data; - struct list_head *list = NULL; + struct parse_events_evlist *data = _data; + struct list_head *list; - ABORT_ON(parse_events_add_breakpoint(&list, &data->idx, + ALLOC_LIST(list); + ABORT_ON(parse_events_add_breakpoint(list, &data->idx, (void *) $2, NULL)); $$ = list; } event_legacy_tracepoint: +PE_NAME '-' PE_NAME ':' PE_NAME +{ + struct parse_events_evlist *data = _data; + struct list_head *list; + char sys_name[128]; + snprintf(&sys_name, 128, "%s-%s", $1, $3); + + ALLOC_LIST(list); + ABORT_ON(parse_events_add_tracepoint(list, &data->idx, &sys_name, $5)); + $$ = list; +} +| PE_NAME ':' PE_NAME { - struct parse_events_data__events *data = _data; - struct list_head *list = NULL; + struct parse_events_evlist *data = _data; + struct list_head *list; - ABORT_ON(parse_events_add_tracepoint(&list, &data->idx, $1, $3)); + ALLOC_LIST(list); + ABORT_ON(parse_events_add_tracepoint(list, &data->idx, $1, $3)); $$ = list; } event_legacy_numeric: PE_VALUE ':' PE_VALUE { - struct parse_events_data__events *data = _data; - struct list_head *list = NULL; + struct parse_events_evlist *data = _data; + struct list_head *list; - ABORT_ON(parse_events_add_numeric(&list, &data->idx, (u32)$1, $3, NULL)); + ALLOC_LIST(list); + ABORT_ON(parse_events_add_numeric(list, &data->idx, (u32)$1, $3, NULL)); $$ = list; } event_legacy_raw: PE_RAW { - struct parse_events_data__events *data = _data; - struct list_head *list = NULL; + struct parse_events_evlist *data = _data; + struct list_head *list; - ABORT_ON(parse_events_add_numeric(&list, &data->idx, + ALLOC_LIST(list); + ABORT_ON(parse_events_add_numeric(list, &data->idx, PERF_TYPE_RAW, $1, NULL)); $$ = list; } start_terms: event_config { - struct parse_events_data__terms *data = _data; + struct parse_events_terms *data = _data; data->terms = $1; } @@ -315,7 +354,7 @@ event_config: event_config ',' event_term { struct list_head *head = $1; - struct parse_events__term *term = $3; + struct parse_events_term *term = $3; ABORT_ON(!head); list_add_tail(&term->list, head); @@ -325,7 +364,7 @@ event_config ',' event_term event_term { struct list_head *head = malloc(sizeof(*head)); - struct parse_events__term *term = $1; + struct parse_events_term *term = $1; ABORT_ON(!head); INIT_LIST_HEAD(head); @@ -336,52 +375,70 @@ event_term event_term: PE_NAME '=' PE_NAME { - struct parse_events__term *term; + struct parse_events_term *term; - ABORT_ON(parse_events__term_str(&term, PARSE_EVENTS__TERM_TYPE_USER, + ABORT_ON(parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER, $1, $3)); $$ = term; } | PE_NAME '=' PE_VALUE { - struct parse_events__term *term; + struct parse_events_term *term; - ABORT_ON(parse_events__term_num(&term, PARSE_EVENTS__TERM_TYPE_USER, + ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER, $1, $3)); $$ = term; } | +PE_NAME '=' PE_VALUE_SYM_HW +{ + struct parse_events_term *term; + int config = $3 & 255; + + ABORT_ON(parse_events_term__sym_hw(&term, $1, config)); + $$ = term; +} +| PE_NAME { - struct parse_events__term *term; + struct parse_events_term *term; - ABORT_ON(parse_events__term_num(&term, PARSE_EVENTS__TERM_TYPE_USER, + ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER, $1, 1)); $$ = term; } | +PE_VALUE_SYM_HW +{ + struct parse_events_term *term; + int config = $1 & 255; + + ABORT_ON(parse_events_term__sym_hw(&term, NULL, config)); + $$ = term; +} +| PE_TERM '=' PE_NAME { - struct parse_events__term *term; + struct parse_events_term *term; - ABORT_ON(parse_events__term_str(&term, (int)$1, NULL, $3)); + ABORT_ON(parse_events_term__str(&term, (int)$1, NULL, $3)); $$ = term; } | PE_TERM '=' PE_VALUE { - struct parse_events__term *term; + struct parse_events_term *term; - ABORT_ON(parse_events__term_num(&term, (int)$1, NULL, $3)); + ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, $3)); $$ = term; } | PE_TERM { - struct parse_events__term *term; + struct parse_events_term *term; - ABORT_ON(parse_events__term_num(&term, (int)$1, NULL, 1)); + ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, 1)); $$ = term; } diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c index 2bc9e70df7e..bf48092983c 100644 --- a/tools/perf/util/parse-options.c +++ b/tools/perf/util/parse-options.c @@ -78,6 +78,8 @@ static int get_value(struct parse_opt_ctx_t *p, case OPTION_BOOLEAN: *(bool *)opt->value = unset ? false : true; + if (opt->set) + *(bool *)opt->set = true; return 0; case OPTION_INCR: @@ -224,6 +226,24 @@ static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg, return 0; } if (!rest) { + if (!prefixcmp(options->long_name, "no-")) { + /* + * The long name itself starts with "no-", so + * accept the option without "no-" so that users + * do not have to enter "no-no-" to get the + * negation. + */ + rest = skip_prefix(arg, options->long_name + 3); + if (rest) { + flags |= OPT_UNSET; + goto match; + } + /* Abbreviated case */ + if (!prefixcmp(options->long_name + 3, arg)) { + flags |= OPT_UNSET; + goto is_abbreviated; + } + } /* abbreviated? */ if (!strncmp(options->long_name, arg, arg_end - arg)) { is_abbreviated: @@ -259,6 +279,7 @@ is_abbreviated: if (!rest) continue; } +match: if (*rest) { if (*rest != '=') continue; @@ -339,10 +360,10 @@ int parse_options_step(struct parse_opt_ctx_t *ctx, if (arg[1] != '-') { ctx->opt = arg + 1; if (internal_help && *ctx->opt == 'h') - return parse_options_usage(usagestr, options); + return usage_with_options_internal(usagestr, options, 0); switch (parse_short_opt(ctx, options)) { case -1: - return parse_options_usage(usagestr, options); + return parse_options_usage(usagestr, options, arg + 1, 1); case -2: goto unknown; default: @@ -352,10 +373,11 @@ int parse_options_step(struct parse_opt_ctx_t *ctx, check_typos(arg + 1, options); while (ctx->opt) { if (internal_help && *ctx->opt == 'h') - return parse_options_usage(usagestr, options); + return usage_with_options_internal(usagestr, options, 0); + arg = ctx->opt; switch (parse_short_opt(ctx, options)) { case -1: - return parse_options_usage(usagestr, options); + return parse_options_usage(usagestr, options, arg, 1); case -2: /* fake a short option thing to hide the fact that we may have * started to parse aggregated stuff @@ -383,12 +405,14 @@ int parse_options_step(struct parse_opt_ctx_t *ctx, if (internal_help && !strcmp(arg + 2, "help-all")) return usage_with_options_internal(usagestr, options, 1); if (internal_help && !strcmp(arg + 2, "help")) - return parse_options_usage(usagestr, options); + return usage_with_options_internal(usagestr, options, 0); if (!strcmp(arg + 2, "list-opts")) - return PARSE_OPT_LIST; + return PARSE_OPT_LIST_OPTS; + if (!strcmp(arg + 2, "list-cmds")) + return PARSE_OPT_LIST_SUBCMDS; switch (parse_long_opt(ctx, arg + 2, options)) { case -1: - return parse_options_usage(usagestr, options); + return parse_options_usage(usagestr, options, arg + 2, 0); case -2: goto unknown; default: @@ -411,25 +435,45 @@ int parse_options_end(struct parse_opt_ctx_t *ctx) return ctx->cpidx + ctx->argc; } -int parse_options(int argc, const char **argv, const struct option *options, - const char * const usagestr[], int flags) +int parse_options_subcommand(int argc, const char **argv, const struct option *options, + const char *const subcommands[], const char *usagestr[], int flags) { struct parse_opt_ctx_t ctx; perf_header__set_cmdline(argc, argv); + /* build usage string if it's not provided */ + if (subcommands && !usagestr[0]) { + struct strbuf buf = STRBUF_INIT; + + strbuf_addf(&buf, "perf %s [<options>] {", argv[0]); + for (int i = 0; subcommands[i]; i++) { + if (i) + strbuf_addstr(&buf, "|"); + strbuf_addstr(&buf, subcommands[i]); + } + strbuf_addstr(&buf, "}"); + + usagestr[0] = strdup(buf.buf); + strbuf_release(&buf); + } + parse_options_start(&ctx, argc, argv, flags); switch (parse_options_step(&ctx, options, usagestr)) { case PARSE_OPT_HELP: exit(129); case PARSE_OPT_DONE: break; - case PARSE_OPT_LIST: + case PARSE_OPT_LIST_OPTS: while (options->type != OPTION_END) { printf("--%s ", options->long_name); options++; } exit(130); + case PARSE_OPT_LIST_SUBCMDS: + for (int i = 0; subcommands[i]; i++) + printf("%s ", subcommands[i]); + exit(130); default: /* PARSE_OPT_UNKNOWN */ if (ctx.argv[0][1] == '-') { error("unknown option `%s'", ctx.argv[0] + 2); @@ -442,9 +486,99 @@ int parse_options(int argc, const char **argv, const struct option *options, return parse_options_end(&ctx); } +int parse_options(int argc, const char **argv, const struct option *options, + const char * const usagestr[], int flags) +{ + return parse_options_subcommand(argc, argv, options, NULL, + (const char **) usagestr, flags); +} + #define USAGE_OPTS_WIDTH 24 #define USAGE_GAP 2 +static void print_option_help(const struct option *opts, int full) +{ + size_t pos; + int pad; + + if (opts->type == OPTION_GROUP) { + fputc('\n', stderr); + if (*opts->help) + fprintf(stderr, "%s\n", opts->help); + return; + } + if (!full && (opts->flags & PARSE_OPT_HIDDEN)) + return; + + pos = fprintf(stderr, " "); + if (opts->short_name) + pos += fprintf(stderr, "-%c", opts->short_name); + else + pos += fprintf(stderr, " "); + + if (opts->long_name && opts->short_name) + pos += fprintf(stderr, ", "); + if (opts->long_name) + pos += fprintf(stderr, "--%s", opts->long_name); + + switch (opts->type) { + case OPTION_ARGUMENT: + break; + case OPTION_LONG: + case OPTION_U64: + case OPTION_INTEGER: + case OPTION_UINTEGER: + if (opts->flags & PARSE_OPT_OPTARG) + if (opts->long_name) + pos += fprintf(stderr, "[=<n>]"); + else + pos += fprintf(stderr, "[<n>]"); + else + pos += fprintf(stderr, " <n>"); + break; + case OPTION_CALLBACK: + if (opts->flags & PARSE_OPT_NOARG) + break; + /* FALLTHROUGH */ + case OPTION_STRING: + if (opts->argh) { + if (opts->flags & PARSE_OPT_OPTARG) + if (opts->long_name) + pos += fprintf(stderr, "[=<%s>]", opts->argh); + else + pos += fprintf(stderr, "[<%s>]", opts->argh); + else + pos += fprintf(stderr, " <%s>", opts->argh); + } else { + if (opts->flags & PARSE_OPT_OPTARG) + if (opts->long_name) + pos += fprintf(stderr, "[=...]"); + else + pos += fprintf(stderr, "[...]"); + else + pos += fprintf(stderr, " ..."); + } + break; + default: /* OPTION_{BIT,BOOLEAN,SET_UINT,SET_PTR} */ + case OPTION_END: + case OPTION_GROUP: + case OPTION_BIT: + case OPTION_BOOLEAN: + case OPTION_INCR: + case OPTION_SET_UINT: + case OPTION_SET_PTR: + break; + } + + if (pos <= USAGE_OPTS_WIDTH) + pad = USAGE_OPTS_WIDTH - pos; + else { + fputc('\n', stderr); + pad = USAGE_OPTS_WIDTH; + } + fprintf(stderr, "%*s%s\n", pad + USAGE_GAP, "", opts->help); +} + int usage_with_options_internal(const char * const *usagestr, const struct option *opts, int full) { @@ -464,87 +598,9 @@ int usage_with_options_internal(const char * const *usagestr, if (opts->type != OPTION_GROUP) fputc('\n', stderr); - for (; opts->type != OPTION_END; opts++) { - size_t pos; - int pad; - - if (opts->type == OPTION_GROUP) { - fputc('\n', stderr); - if (*opts->help) - fprintf(stderr, "%s\n", opts->help); - continue; - } - if (!full && (opts->flags & PARSE_OPT_HIDDEN)) - continue; - - pos = fprintf(stderr, " "); - if (opts->short_name) - pos += fprintf(stderr, "-%c", opts->short_name); - else - pos += fprintf(stderr, " "); - - if (opts->long_name && opts->short_name) - pos += fprintf(stderr, ", "); - if (opts->long_name) - pos += fprintf(stderr, "--%s", opts->long_name); + for ( ; opts->type != OPTION_END; opts++) + print_option_help(opts, full); - switch (opts->type) { - case OPTION_ARGUMENT: - break; - case OPTION_LONG: - case OPTION_U64: - case OPTION_INTEGER: - case OPTION_UINTEGER: - if (opts->flags & PARSE_OPT_OPTARG) - if (opts->long_name) - pos += fprintf(stderr, "[=<n>]"); - else - pos += fprintf(stderr, "[<n>]"); - else - pos += fprintf(stderr, " <n>"); - break; - case OPTION_CALLBACK: - if (opts->flags & PARSE_OPT_NOARG) - break; - /* FALLTHROUGH */ - case OPTION_STRING: - if (opts->argh) { - if (opts->flags & PARSE_OPT_OPTARG) - if (opts->long_name) - pos += fprintf(stderr, "[=<%s>]", opts->argh); - else - pos += fprintf(stderr, "[<%s>]", opts->argh); - else - pos += fprintf(stderr, " <%s>", opts->argh); - } else { - if (opts->flags & PARSE_OPT_OPTARG) - if (opts->long_name) - pos += fprintf(stderr, "[=...]"); - else - pos += fprintf(stderr, "[...]"); - else - pos += fprintf(stderr, " ..."); - } - break; - default: /* OPTION_{BIT,BOOLEAN,SET_UINT,SET_PTR} */ - case OPTION_END: - case OPTION_GROUP: - case OPTION_BIT: - case OPTION_BOOLEAN: - case OPTION_INCR: - case OPTION_SET_UINT: - case OPTION_SET_PTR: - break; - } - - if (pos <= USAGE_OPTS_WIDTH) - pad = USAGE_OPTS_WIDTH - pos; - else { - fputc('\n', stderr); - pad = USAGE_OPTS_WIDTH; - } - fprintf(stderr, "%*s%s\n", pad + USAGE_GAP, "", opts->help); - } fputc('\n', stderr); return PARSE_OPT_HELP; @@ -559,9 +615,45 @@ void usage_with_options(const char * const *usagestr, } int parse_options_usage(const char * const *usagestr, - const struct option *opts) + const struct option *opts, + const char *optstr, bool short_opt) { - return usage_with_options_internal(usagestr, opts, 0); + if (!usagestr) + goto opt; + + fprintf(stderr, "\n usage: %s\n", *usagestr++); + while (*usagestr && **usagestr) + fprintf(stderr, " or: %s\n", *usagestr++); + while (*usagestr) { + fprintf(stderr, "%s%s\n", + **usagestr ? " " : "", + *usagestr); + usagestr++; + } + fputc('\n', stderr); + +opt: + for ( ; opts->type != OPTION_END; opts++) { + if (short_opt) { + if (opts->short_name == *optstr) + break; + continue; + } + + if (opts->long_name == NULL) + continue; + + if (!prefixcmp(optstr, opts->long_name)) + break; + if (!prefixcmp(optstr, "no-") && + !prefixcmp(optstr + 3, opts->long_name)) + break; + } + + if (opts->type != OPTION_END) + print_option_help(opts, 0); + + return PARSE_OPT_HELP; } diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h index 7bb5999940c..d8dac8ac5f3 100644 --- a/tools/perf/util/parse-options.h +++ b/tools/perf/util/parse-options.h @@ -82,6 +82,9 @@ typedef int parse_opt_cb(const struct option *, const char *arg, int unset); * OPTION_{BIT,SET_UINT,SET_PTR} store the {mask,integer,pointer} to put in * the value when met. * CALLBACKS can use it like they want. + * + * `set`:: + * whether an option was set by the user */ struct option { enum parse_opt_type type; @@ -94,6 +97,7 @@ struct option { int flags; parse_opt_cb *callback; intptr_t defval; + bool *set; }; #define check_vtype(v, type) ( BUILD_BUG_ON_ZERO(!__builtin_types_compatible_p(typeof(v), type)) + v ) @@ -103,6 +107,10 @@ struct option { #define OPT_GROUP(h) { .type = OPTION_GROUP, .help = (h) } #define OPT_BIT(s, l, v, h, b) { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h), .defval = (b) } #define OPT_BOOLEAN(s, l, v, h) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = check_vtype(v, bool *), .help = (h) } +#define OPT_BOOLEAN_SET(s, l, v, os, h) \ + { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), \ + .value = check_vtype(v, bool *), .help = (h), \ + .set = check_vtype(os, bool *)} #define OPT_INCR(s, l, v, h) { .type = OPTION_INCR, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h) } #define OPT_SET_UINT(s, l, v, h, i) { .type = OPTION_SET_UINT, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned int *), .help = (h), .defval = (i) } #define OPT_SET_PTR(s, l, v, h, p) { .type = OPTION_SET_PTR, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (p) } @@ -132,6 +140,11 @@ extern int parse_options(int argc, const char **argv, const struct option *options, const char * const usagestr[], int flags); +extern int parse_options_subcommand(int argc, const char **argv, + const struct option *options, + const char *const subcommands[], + const char *usagestr[], int flags); + extern NORETURN void usage_with_options(const char * const *usagestr, const struct option *options); @@ -140,7 +153,8 @@ extern NORETURN void usage_with_options(const char * const *usagestr, enum { PARSE_OPT_HELP = -1, PARSE_OPT_DONE, - PARSE_OPT_LIST, + PARSE_OPT_LIST_OPTS, + PARSE_OPT_LIST_SUBCMDS, PARSE_OPT_UNKNOWN, }; @@ -158,7 +172,9 @@ struct parse_opt_ctx_t { }; extern int parse_options_usage(const char * const *usagestr, - const struct option *opts); + const struct option *opts, + const char *optstr, + bool short_opt); extern void parse_options_start(struct parse_opt_ctx_t *ctx, int argc, const char **argv, int flags); diff --git a/tools/perf/util/path.c b/tools/perf/util/path.c index a8c49548ca4..5d13cb45b31 100644 --- a/tools/perf/util/path.c +++ b/tools/perf/util/path.c @@ -22,19 +22,23 @@ static const char *get_perf_dir(void) return "."; } -#ifndef HAVE_STRLCPY -size_t strlcpy(char *dest, const char *src, size_t size) +/* + * If libc has strlcpy() then that version will override this + * implementation: + */ +size_t __weak strlcpy(char *dest, const char *src, size_t size) { size_t ret = strlen(src); if (size) { size_t len = (ret >= size) ? size - 1 : ret; + memcpy(dest, src, len); dest[len] = '\0'; } + return ret; } -#endif static char *get_pathname(void) { diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c new file mode 100644 index 00000000000..43168fb0d9a --- /dev/null +++ b/tools/perf/util/perf_regs.c @@ -0,0 +1,27 @@ +#include <errno.h> +#include "perf_regs.h" +#include "event.h" + +int perf_reg_value(u64 *valp, struct regs_dump *regs, int id) +{ + int i, idx = 0; + u64 mask = regs->mask; + + if (regs->cache_mask & (1 << id)) + goto out; + + if (!(mask & (1 << id))) + return -EINVAL; + + for (i = 0; i < id; i++) { + if (mask & (1 << i)) + idx++; + } + + regs->cache_mask |= (1 << id); + regs->cache_regs[id] = regs->regs[idx]; + +out: + *valp = regs->cache_regs[id]; + return 0; +} diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h index 5a4f2b6f373..980dbf76bc9 100644 --- a/tools/perf/util/perf_regs.h +++ b/tools/perf/util/perf_regs.h @@ -1,14 +1,29 @@ #ifndef __PERF_REGS_H #define __PERF_REGS_H -#ifdef HAVE_PERF_REGS +#include <linux/types.h> + +struct regs_dump; + +#ifdef HAVE_PERF_REGS_SUPPORT #include <perf_regs.h> + +int perf_reg_value(u64 *valp, struct regs_dump *regs, int id); + #else #define PERF_REGS_MASK 0 +#define PERF_REGS_MAX 0 static inline const char *perf_reg_name(int id __maybe_unused) { return NULL; } -#endif /* HAVE_PERF_REGS */ + +static inline int perf_reg_value(u64 *valp __maybe_unused, + struct regs_dump *regs __maybe_unused, + int id __maybe_unused) +{ + return 0; +} +#endif /* HAVE_PERF_REGS_SUPPORT */ #endif /* __PERF_REGS_H */ diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 8a2229da594..7a811eb61f7 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -1,16 +1,32 @@ - #include <linux/list.h> #include <sys/types.h> -#include <sys/stat.h> #include <unistd.h> #include <stdio.h> #include <dirent.h> -#include "sysfs.h" +#include <api/fs/fs.h> +#include <locale.h> #include "util.h" #include "pmu.h" #include "parse-events.h" #include "cpumap.h" +#define UNIT_MAX_LEN 31 /* max length for event unit name */ + +struct perf_pmu_alias { + char *name; + struct list_head terms; + struct list_head list; + char unit[UNIT_MAX_LEN+1]; + double scale; +}; + +struct perf_pmu_format { + char *name; + int value; + DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS); + struct list_head list; +}; + #define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/" int perf_pmu_parse(struct list_head *list, char *name); @@ -22,7 +38,7 @@ static LIST_HEAD(pmus); * Parse & process all the sysfs attributes located under * the directory specified in 'dir' parameter. */ -static int pmu_format_parse(char *dir, struct list_head *head) +int perf_pmu__format_parse(char *dir, struct list_head *head) { struct dirent *evt_ent; DIR *format_dir; @@ -61,13 +77,12 @@ static int pmu_format_parse(char *dir, struct list_head *head) * located at: * /sys/bus/event_source/devices/<dev>/format as sysfs group attributes. */ -static int pmu_format(char *name, struct list_head *format) +static int pmu_format(const char *name, struct list_head *format) { struct stat st; char path[PATH_MAX]; - const char *sysfs; + const char *sysfs = sysfs__mountpoint(); - sysfs = sysfs_find_mountpoint(); if (!sysfs) return -1; @@ -77,15 +92,88 @@ static int pmu_format(char *name, struct list_head *format) if (stat(path, &st) < 0) return 0; /* no error if format does not exist */ - if (pmu_format_parse(path, format)) + if (perf_pmu__format_parse(path, format)) + return -1; + + return 0; +} + +static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char *name) +{ + struct stat st; + ssize_t sret; + char scale[128]; + int fd, ret = -1; + char path[PATH_MAX]; + const char *lc; + + snprintf(path, PATH_MAX, "%s/%s.scale", dir, name); + + fd = open(path, O_RDONLY); + if (fd == -1) + return -1; + + if (fstat(fd, &st) < 0) + goto error; + + sret = read(fd, scale, sizeof(scale)-1); + if (sret < 0) + goto error; + + scale[sret] = '\0'; + /* + * save current locale + */ + lc = setlocale(LC_NUMERIC, NULL); + + /* + * force to C locale to ensure kernel + * scale string is converted correctly. + * kernel uses default C locale. + */ + setlocale(LC_NUMERIC, "C"); + + alias->scale = strtod(scale, NULL); + + /* restore locale */ + setlocale(LC_NUMERIC, lc); + + ret = 0; +error: + close(fd); + return ret; +} + +static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *name) +{ + char path[PATH_MAX]; + ssize_t sret; + int fd; + + snprintf(path, PATH_MAX, "%s/%s.unit", dir, name); + + fd = open(path, O_RDONLY); + if (fd == -1) return -1; + sret = read(fd, alias->unit, UNIT_MAX_LEN); + if (sret < 0) + goto error; + + close(fd); + + alias->unit[sret] = '\0'; + return 0; +error: + close(fd); + alias->unit[0] = '\0'; + return -1; } -static int perf_pmu__new_alias(struct list_head *list, char *name, FILE *file) +static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FILE *file) { - struct perf_pmu__alias *alias; + struct perf_pmu_alias *alias; char buf[256]; int ret; @@ -99,6 +187,9 @@ static int perf_pmu__new_alias(struct list_head *list, char *name, FILE *file) return -ENOMEM; INIT_LIST_HEAD(&alias->terms); + alias->scale = 1.0; + alias->unit[0] = '\0'; + ret = parse_events_terms(&alias->terms, buf); if (ret) { free(alias); @@ -106,7 +197,14 @@ static int perf_pmu__new_alias(struct list_head *list, char *name, FILE *file) } alias->name = strdup(name); + /* + * load unit name and scale if available + */ + perf_pmu__parse_unit(alias, dir, name); + perf_pmu__parse_scale(alias, dir, name); + list_add_tail(&alias->list, list); + return 0; } @@ -118,6 +216,7 @@ static int pmu_aliases_parse(char *dir, struct list_head *head) { struct dirent *evt_ent; DIR *event_dir; + size_t len; int ret = 0; event_dir = opendir(dir); @@ -132,13 +231,24 @@ static int pmu_aliases_parse(char *dir, struct list_head *head) if (!strcmp(name, ".") || !strcmp(name, "..")) continue; + /* + * skip .unit and .scale info files + * parsed in perf_pmu__new_alias() + */ + len = strlen(name); + if (len > 5 && !strcmp(name + len - 5, ".unit")) + continue; + if (len > 6 && !strcmp(name + len - 6, ".scale")) + continue; + snprintf(path, PATH_MAX, "%s/%s", dir, name); ret = -EINVAL; file = fopen(path, "r"); if (!file) break; - ret = perf_pmu__new_alias(head, name, file); + + ret = perf_pmu__new_alias(head, dir, name, file); fclose(file); } @@ -150,13 +260,12 @@ static int pmu_aliases_parse(char *dir, struct list_head *head) * Reading the pmu event aliases definition, which should be located at: * /sys/bus/event_source/devices/<dev>/events as sysfs group attributes. */ -static int pmu_aliases(char *name, struct list_head *head) +static int pmu_aliases(const char *name, struct list_head *head) { struct stat st; char path[PATH_MAX]; - const char *sysfs; + const char *sysfs = sysfs__mountpoint(); - sysfs = sysfs_find_mountpoint(); if (!sysfs) return -1; @@ -164,7 +273,7 @@ static int pmu_aliases(char *name, struct list_head *head) "%s/bus/event_source/devices/%s/events", sysfs, name); if (stat(path, &st) < 0) - return -1; + return 0; /* no error if 'events' does not exist */ if (pmu_aliases_parse(path, head)) return -1; @@ -172,20 +281,20 @@ static int pmu_aliases(char *name, struct list_head *head) return 0; } -static int pmu_alias_terms(struct perf_pmu__alias *alias, +static int pmu_alias_terms(struct perf_pmu_alias *alias, struct list_head *terms) { - struct parse_events__term *term, *clone; + struct parse_events_term *term, *cloned; LIST_HEAD(list); int ret; list_for_each_entry(term, &alias->terms, list) { - ret = parse_events__term_clone(&clone, term); + ret = parse_events_term__clone(&cloned, term); if (ret) { parse_events__free_terms(&list); return ret; } - list_add_tail(&clone->list, &list); + list_add_tail(&cloned->list, &list); } list_splice(&list, terms); return 0; @@ -196,15 +305,14 @@ static int pmu_alias_terms(struct perf_pmu__alias *alias, * located at: * /sys/bus/event_source/devices/<dev>/type as sysfs attribute. */ -static int pmu_type(char *name, __u32 *type) +static int pmu_type(const char *name, __u32 *type) { struct stat st; char path[PATH_MAX]; - const char *sysfs; FILE *file; int ret = 0; + const char *sysfs = sysfs__mountpoint(); - sysfs = sysfs_find_mountpoint(); if (!sysfs) return -1; @@ -229,11 +337,10 @@ static int pmu_type(char *name, __u32 *type) static void pmu_read_sysfs(void) { char path[PATH_MAX]; - const char *sysfs; DIR *dir; struct dirent *dent; + const char *sysfs = sysfs__mountpoint(); - sysfs = sysfs_find_mountpoint(); if (!sysfs) return; @@ -254,15 +361,14 @@ static void pmu_read_sysfs(void) closedir(dir); } -static struct cpu_map *pmu_cpumask(char *name) +static struct cpu_map *pmu_cpumask(const char *name) { struct stat st; char path[PATH_MAX]; - const char *sysfs; FILE *file; struct cpu_map *cpus; + const char *sysfs = sysfs__mountpoint(); - sysfs = sysfs_find_mountpoint(); if (!sysfs) return NULL; @@ -281,7 +387,7 @@ static struct cpu_map *pmu_cpumask(char *name) return cpus; } -static struct perf_pmu *pmu_lookup(char *name) +static struct perf_pmu *pmu_lookup(const char *name) { struct perf_pmu *pmu; LIST_HEAD(format); @@ -296,6 +402,9 @@ static struct perf_pmu *pmu_lookup(char *name) if (pmu_format(name, &format)) return NULL; + if (pmu_aliases(name, &aliases)) + return NULL; + if (pmu_type(name, &type)) return NULL; @@ -305,8 +414,6 @@ static struct perf_pmu *pmu_lookup(char *name) pmu->cpus = pmu_cpumask(name); - pmu_aliases(name, &aliases); - INIT_LIST_HEAD(&pmu->format); INIT_LIST_HEAD(&pmu->aliases); list_splice(&format, &pmu->format); @@ -317,7 +424,7 @@ static struct perf_pmu *pmu_lookup(char *name) return pmu; } -static struct perf_pmu *pmu_find(char *name) +static struct perf_pmu *pmu_find(const char *name) { struct perf_pmu *pmu; @@ -343,7 +450,7 @@ struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu) return NULL; } -struct perf_pmu *perf_pmu__find(char *name) +struct perf_pmu *perf_pmu__find(const char *name) { struct perf_pmu *pmu; @@ -359,10 +466,10 @@ struct perf_pmu *perf_pmu__find(char *name) return pmu_lookup(name); } -static struct perf_pmu__format* +static struct perf_pmu_format * pmu_find_format(struct list_head *formats, char *name) { - struct perf_pmu__format *format; + struct perf_pmu_format *format; list_for_each_entry(format, formats, list) if (!strcmp(format->name, name)) @@ -398,13 +505,13 @@ static __u64 pmu_format_value(unsigned long *format, __u64 value) /* * Setup one of config[12] attr members based on the - * user input data - temr parameter. + * user input data - term parameter. */ static int pmu_config_term(struct list_head *formats, struct perf_event_attr *attr, - struct parse_events__term *term) + struct parse_events_term *term) { - struct perf_pmu__format *format; + struct perf_pmu_format *format; __u64 *vp; /* @@ -445,10 +552,11 @@ static int pmu_config_term(struct list_head *formats, return 0; } -static int pmu_config(struct list_head *formats, struct perf_event_attr *attr, - struct list_head *head_terms) +int perf_pmu__config_terms(struct list_head *formats, + struct perf_event_attr *attr, + struct list_head *head_terms) { - struct parse_events__term *term; + struct parse_events_term *term; list_for_each_entry(term, head_terms, list) if (pmu_config_term(formats, attr, term)) @@ -466,13 +574,13 @@ int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr, struct list_head *head_terms) { attr->type = pmu->type; - return pmu_config(&pmu->format, attr, head_terms); + return perf_pmu__config_terms(&pmu->format, attr, head_terms); } -static struct perf_pmu__alias *pmu_find_alias(struct perf_pmu *pmu, - struct parse_events__term *term) +static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu, + struct parse_events_term *term) { - struct perf_pmu__alias *alias; + struct perf_pmu_alias *alias; char *name; if (parse_events__is_hardcoded_term(term)) @@ -499,16 +607,46 @@ static struct perf_pmu__alias *pmu_find_alias(struct perf_pmu *pmu, return NULL; } + +static int check_unit_scale(struct perf_pmu_alias *alias, + const char **unit, double *scale) +{ + /* + * Only one term in event definition can + * define unit and scale, fail if there's + * more than one. + */ + if ((*unit && alias->unit) || + (*scale && alias->scale)) + return -EINVAL; + + if (alias->unit) + *unit = alias->unit; + + if (alias->scale) + *scale = alias->scale; + + return 0; +} + /* * Find alias in the terms list and replace it with the terms * defined for the alias */ -int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms) +int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms, + const char **unit, double *scale) { - struct parse_events__term *term, *h; - struct perf_pmu__alias *alias; + struct parse_events_term *term, *h; + struct perf_pmu_alias *alias; int ret; + /* + * Mark unit and scale as not set + * (different from default values, see below) + */ + *unit = NULL; + *scale = 0.0; + list_for_each_entry_safe(term, h, head_terms, list) { alias = pmu_find_alias(pmu, term); if (!alias) @@ -516,16 +654,33 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms) ret = pmu_alias_terms(alias, &term->list); if (ret) return ret; + + ret = check_unit_scale(alias, unit, scale); + if (ret) + return ret; + list_del(&term->list); free(term); } + + /* + * if no unit or scale foundin aliases, then + * set defaults as for evsel + * unit cannot left to NULL + */ + if (*unit == NULL) + *unit = ""; + + if (*scale == 0.0) + *scale = 1.0; + return 0; } int perf_pmu__new_format(struct list_head *list, char *name, int config, unsigned long *bits) { - struct perf_pmu__format *format; + struct perf_pmu_format *format; format = zalloc(sizeof(*format)); if (!format) @@ -546,181 +701,96 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to) if (!to) to = from; - memset(bits, 0, BITS_TO_LONGS(PERF_PMU_FORMAT_BITS)); + memset(bits, 0, BITS_TO_BYTES(PERF_PMU_FORMAT_BITS)); for (b = from; b <= to; b++) set_bit(b, bits); } -/* Simulated format definitions. */ -static struct test_format { - const char *name; - const char *value; -} test_formats[] = { - { "krava01", "config:0-1,62-63\n", }, - { "krava02", "config:10-17\n", }, - { "krava03", "config:5\n", }, - { "krava11", "config1:0,2,4,6,8,20-28\n", }, - { "krava12", "config1:63\n", }, - { "krava13", "config1:45-47\n", }, - { "krava21", "config2:0-3,10-13,20-23,30-33,40-43,50-53,60-63\n", }, - { "krava22", "config2:8,18,48,58\n", }, - { "krava23", "config2:28-29,38\n", }, -}; - -#define TEST_FORMATS_CNT (sizeof(test_formats) / sizeof(struct test_format)) - -/* Simulated users input. */ -static struct parse_events__term test_terms[] = { - { - .config = (char *) "krava01", - .val.num = 15, - .type_val = PARSE_EVENTS__TERM_TYPE_NUM, - .type_term = PARSE_EVENTS__TERM_TYPE_USER, - }, - { - .config = (char *) "krava02", - .val.num = 170, - .type_val = PARSE_EVENTS__TERM_TYPE_NUM, - .type_term = PARSE_EVENTS__TERM_TYPE_USER, - }, - { - .config = (char *) "krava03", - .val.num = 1, - .type_val = PARSE_EVENTS__TERM_TYPE_NUM, - .type_term = PARSE_EVENTS__TERM_TYPE_USER, - }, - { - .config = (char *) "krava11", - .val.num = 27, - .type_val = PARSE_EVENTS__TERM_TYPE_NUM, - .type_term = PARSE_EVENTS__TERM_TYPE_USER, - }, - { - .config = (char *) "krava12", - .val.num = 1, - .type_val = PARSE_EVENTS__TERM_TYPE_NUM, - .type_term = PARSE_EVENTS__TERM_TYPE_USER, - }, - { - .config = (char *) "krava13", - .val.num = 2, - .type_val = PARSE_EVENTS__TERM_TYPE_NUM, - .type_term = PARSE_EVENTS__TERM_TYPE_USER, - }, - { - .config = (char *) "krava21", - .val.num = 119, - .type_val = PARSE_EVENTS__TERM_TYPE_NUM, - .type_term = PARSE_EVENTS__TERM_TYPE_USER, - }, - { - .config = (char *) "krava22", - .val.num = 11, - .type_val = PARSE_EVENTS__TERM_TYPE_NUM, - .type_term = PARSE_EVENTS__TERM_TYPE_USER, - }, - { - .config = (char *) "krava23", - .val.num = 2, - .type_val = PARSE_EVENTS__TERM_TYPE_NUM, - .type_term = PARSE_EVENTS__TERM_TYPE_USER, - }, -}; -#define TERMS_CNT (sizeof(test_terms) / sizeof(struct parse_events__term)) - -/* - * Prepare format directory data, exported by kernel - * at /sys/bus/event_source/devices/<dev>/format. - */ -static char *test_format_dir_get(void) +static char *format_alias(char *buf, int len, struct perf_pmu *pmu, + struct perf_pmu_alias *alias) { - static char dir[PATH_MAX]; - unsigned int i; - - snprintf(dir, PATH_MAX, "/tmp/perf-pmu-test-format-XXXXXX"); - if (!mkdtemp(dir)) - return NULL; - - for (i = 0; i < TEST_FORMATS_CNT; i++) { - static char name[PATH_MAX]; - struct test_format *format = &test_formats[i]; - FILE *file; - - snprintf(name, PATH_MAX, "%s/%s", dir, format->name); - - file = fopen(name, "w"); - if (!file) - return NULL; - - if (1 != fwrite(format->value, strlen(format->value), 1, file)) - break; - - fclose(file); - } - - return dir; + snprintf(buf, len, "%s/%s/", pmu->name, alias->name); + return buf; } -/* Cleanup format directory. */ -static int test_format_dir_put(char *dir) +static char *format_alias_or(char *buf, int len, struct perf_pmu *pmu, + struct perf_pmu_alias *alias) { - char buf[PATH_MAX]; - snprintf(buf, PATH_MAX, "rm -f %s/*\n", dir); - if (system(buf)) - return -1; - - snprintf(buf, PATH_MAX, "rmdir %s\n", dir); - return system(buf); + snprintf(buf, len, "%s OR %s/%s/", alias->name, pmu->name, alias->name); + return buf; } -static struct list_head *test_terms_list(void) +static int cmp_string(const void *a, const void *b) { - static LIST_HEAD(terms); - unsigned int i; - - for (i = 0; i < TERMS_CNT; i++) - list_add_tail(&test_terms[i].list, &terms); - - return &terms; + const char * const *as = a; + const char * const *bs = b; + return strcmp(*as, *bs); } -#undef TERMS_CNT - -int perf_pmu__test(void) +void print_pmu_events(const char *event_glob, bool name_only) { - char *format = test_format_dir_get(); - LIST_HEAD(formats); - struct list_head *terms = test_terms_list(); - int ret; - - if (!format) - return -EINVAL; - - do { - struct perf_event_attr attr; - - memset(&attr, 0, sizeof(attr)); - - ret = pmu_format_parse(format, &formats); - if (ret) - break; - - ret = pmu_config(&formats, &attr, terms); - if (ret) - break; - - ret = -EINVAL; - - if (attr.config != 0xc00000000002a823) - break; - if (attr.config1 != 0x8000400000000145) - break; - if (attr.config2 != 0x0400000020041d07) - break; + struct perf_pmu *pmu; + struct perf_pmu_alias *alias; + char buf[1024]; + int printed = 0; + int len, j; + char **aliases; + + pmu = NULL; + len = 0; + while ((pmu = perf_pmu__scan(pmu)) != NULL) + list_for_each_entry(alias, &pmu->aliases, list) + len++; + aliases = malloc(sizeof(char *) * len); + if (!aliases) + return; + pmu = NULL; + j = 0; + while ((pmu = perf_pmu__scan(pmu)) != NULL) + list_for_each_entry(alias, &pmu->aliases, list) { + char *name = format_alias(buf, sizeof(buf), pmu, alias); + bool is_cpu = !strcmp(pmu->name, "cpu"); + + if (event_glob != NULL && + !(strglobmatch(name, event_glob) || + (!is_cpu && strglobmatch(alias->name, + event_glob)))) + continue; + aliases[j] = name; + if (is_cpu && !name_only) + aliases[j] = format_alias_or(buf, sizeof(buf), + pmu, alias); + aliases[j] = strdup(aliases[j]); + j++; + } + len = j; + qsort(aliases, len, sizeof(char *), cmp_string); + for (j = 0; j < len; j++) { + if (name_only) { + printf("%s ", aliases[j]); + continue; + } + printf(" %-50s [Kernel PMU event]\n", aliases[j]); + zfree(&aliases[j]); + printed++; + } + if (printed) + printf("\n"); + free(aliases); +} - ret = 0; - } while (0); +bool pmu_have_event(const char *pname, const char *name) +{ + struct perf_pmu *pmu; + struct perf_pmu_alias *alias; - test_format_dir_put(format); - return ret; + pmu = NULL; + while ((pmu = perf_pmu__scan(pmu)) != NULL) { + if (strcmp(pname, pmu->name)) + continue; + list_for_each_entry(alias, &pmu->aliases, list) + if (!strcmp(alias->name, name)) + return true; + } + return false; } diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index 39f3abac774..c14a543ce1f 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -1,8 +1,9 @@ #ifndef __PMU_H #define __PMU_H -#include <linux/bitops.h> -#include "../../../include/uapi/linux/perf_event.h" +#include <linux/bitmap.h> +#include <linux/perf_event.h> +#include <stdbool.h> enum { PERF_PMU_FORMAT_VALUE_CONFIG, @@ -12,19 +13,6 @@ enum { #define PERF_PMU_FORMAT_BITS 64 -struct perf_pmu__format { - char *name; - int value; - DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS); - struct list_head list; -}; - -struct perf_pmu__alias { - char *name; - struct list_head terms; - struct list_head list; -}; - struct perf_pmu { char *name; __u32 type; @@ -34,20 +22,28 @@ struct perf_pmu { struct list_head list; }; -struct perf_pmu *perf_pmu__find(char *name); +struct perf_pmu *perf_pmu__find(const char *name); int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr, struct list_head *head_terms); -int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms); +int perf_pmu__config_terms(struct list_head *formats, + struct perf_event_attr *attr, + struct list_head *head_terms); +int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms, + const char **unit, double *scale); struct list_head *perf_pmu__alias(struct perf_pmu *pmu, - struct list_head *head_terms); + struct list_head *head_terms); int perf_pmu_wrap(void); void perf_pmu_error(struct list_head *list, char *name, char const *msg); int perf_pmu__new_format(struct list_head *list, char *name, int config, unsigned long *bits); void perf_pmu__set_format(unsigned long *bits, long from, long to); +int perf_pmu__format_parse(char *dir, struct list_head *head); struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu); +void print_pmu_events(const char *event_glob, bool name_only); +bool pmu_have_event(const char *pname, const char *name); + int perf_pmu__test(void); #endif /* __PMU_H */ diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y index ec898047ebb..bfd7e850986 100644 --- a/tools/perf/util/pmu.y +++ b/tools/perf/util/pmu.y @@ -1,5 +1,4 @@ -%name-prefix "perf_pmu_" %parse-param {struct list_head *format} %parse-param {char *name} diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 49a256e6e0a..9a0a1839a37 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -40,14 +40,13 @@ #include "color.h" #include "symbol.h" #include "thread.h" -#include "debugfs.h" +#include <api/fs/debugfs.h> #include "trace-event.h" /* For __maybe_unused */ #include "probe-event.h" #include "probe-finder.h" #include "session.h" #define MAX_CMDLEN 256 -#define MAX_PROBE_ARGS 128 #define PERFPROBE_GROUP "probe" bool probe_event_dry_run; /* Dry run flag */ @@ -71,33 +70,32 @@ static int e_snprintf(char *str, size_t size, const char *format, ...) } static char *synthesize_perf_probe_point(struct perf_probe_point *pp); -static int convert_name_to_addr(struct perf_probe_event *pev, - const char *exec); -static struct machine machine; +static void clear_probe_trace_event(struct probe_trace_event *tev); +static struct machine *host_machine; /* Initialize symbol maps and path of vmlinux/modules */ -static int init_vmlinux(void) +static int init_symbol_maps(bool user_only) { int ret; symbol_conf.sort_by_name = true; - if (symbol_conf.vmlinux_name == NULL) - symbol_conf.try_vmlinux_path = true; - else - pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name); ret = symbol__init(); if (ret < 0) { pr_debug("Failed to init symbol map.\n"); goto out; } - ret = machine__init(&machine, "", HOST_KERNEL_ID); - if (ret < 0) - goto out; + if (host_machine || user_only) /* already initialized */ + return 0; - if (machine__create_kernel_maps(&machine) < 0) { - pr_debug("machine__create_kernel_maps() failed.\n"); - goto out; + if (symbol_conf.vmlinux_name) + pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name); + + host_machine = machine__new_host(); + if (!host_machine) { + pr_debug("machine__new_host() failed.\n"); + symbol__exit(); + ret = -1; } out: if (ret < 0) @@ -105,21 +103,66 @@ out: return ret; } +static void exit_symbol_maps(void) +{ + if (host_machine) { + machine__delete(host_machine); + host_machine = NULL; + } + symbol__exit(); +} + static struct symbol *__find_kernel_function_by_name(const char *name, struct map **mapp) { - return machine__find_kernel_function_by_name(&machine, name, mapp, + return machine__find_kernel_function_by_name(host_machine, name, mapp, NULL); } +static struct symbol *__find_kernel_function(u64 addr, struct map **mapp) +{ + return machine__find_kernel_function(host_machine, addr, mapp, NULL); +} + +static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void) +{ + /* kmap->ref_reloc_sym should be set if host_machine is initialized */ + struct kmap *kmap; + + if (map__load(host_machine->vmlinux_maps[MAP__FUNCTION], NULL) < 0) + return NULL; + + kmap = map__kmap(host_machine->vmlinux_maps[MAP__FUNCTION]); + return kmap->ref_reloc_sym; +} + +static u64 kernel_get_symbol_address_by_name(const char *name, bool reloc) +{ + struct ref_reloc_sym *reloc_sym; + struct symbol *sym; + struct map *map; + + /* ref_reloc_sym is just a label. Need a special fix*/ + reloc_sym = kernel_get_ref_reloc_sym(); + if (reloc_sym && strcmp(name, reloc_sym->name) == 0) + return (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr; + else { + sym = __find_kernel_function_by_name(name, &map); + if (sym) + return map->unmap_ip(map, sym->start) - + (reloc) ? 0 : map->reloc; + } + return 0; +} + static struct map *kernel_get_module_map(const char *module) { struct rb_node *nd; - struct map_groups *grp = &machine.kmaps; + struct map_groups *grp = &host_machine->kmaps; /* A file path -- this is an offline module */ if (module && strchr(module, '/')) - return machine__new_module(&machine, 0, module); + return machine__new_module(host_machine, 0, module); if (!module) module = "kernel"; @@ -141,7 +184,7 @@ static struct dso *kernel_get_module_dso(const char *module) const char *vmlinux_name; if (module) { - list_for_each_entry(dso, &machine.kernel_dsos, node) { + list_for_each_entry(dso, &host_machine->kernel_dsos, node) { if (strncmp(dso->short_name + 1, module, dso->short_name_len - 2) == 0) goto found; @@ -150,12 +193,12 @@ static struct dso *kernel_get_module_dso(const char *module) return NULL; } - map = machine.vmlinux_maps[MAP__FUNCTION]; + map = host_machine->vmlinux_maps[MAP__FUNCTION]; dso = map->dso; vmlinux_name = symbol_conf.vmlinux_name; if (vmlinux_name) { - if (dso__load_vmlinux(dso, map, vmlinux_name, NULL) <= 0) + if (dso__load_vmlinux(dso, map, vmlinux_name, false, NULL) <= 0) return NULL; } else { if (dso__load_vmlinux_path(dso, map, NULL) <= 0) { @@ -173,46 +216,54 @@ const char *kernel_get_module_path(const char *module) return (dso) ? dso->long_name : NULL; } -static int init_user_exec(void) +static int convert_exec_to_group(const char *exec, char **result) { - int ret = 0; + char *ptr1, *ptr2, *exec_copy; + char buf[64]; + int ret; - symbol_conf.try_vmlinux_path = false; - symbol_conf.sort_by_name = true; - ret = symbol__init(); + exec_copy = strdup(exec); + if (!exec_copy) + return -ENOMEM; + ptr1 = basename(exec_copy); + if (!ptr1) { + ret = -EINVAL; + goto out; + } + + ptr2 = strpbrk(ptr1, "-._"); + if (ptr2) + *ptr2 = '\0'; + ret = e_snprintf(buf, 64, "%s_%s", PERFPROBE_GROUP, ptr1); if (ret < 0) - pr_debug("Failed to init symbol map.\n"); + goto out; + *result = strdup(buf); + ret = *result ? 0 : -ENOMEM; + +out: + free(exec_copy); return ret; } -static int convert_to_perf_probe_point(struct probe_trace_point *tp, - struct perf_probe_point *pp) +static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs) { - pp->function = strdup(tp->symbol); - - if (pp->function == NULL) - return -ENOMEM; - - pp->offset = tp->offset; - pp->retprobe = tp->retprobe; + int i; - return 0; + for (i = 0; i < ntevs; i++) + clear_probe_trace_event(tevs + i); } -#ifdef DWARF_SUPPORT +#ifdef HAVE_DWARF_SUPPORT + /* Open new debuginfo of given module */ static struct debuginfo *open_debuginfo(const char *module) { - const char *path; + const char *path = module; - /* A file path -- this is an offline module */ - if (module && strchr(module, '/')) - path = module; - else { + if (!module || !strchr(module, '/')) { path = kernel_get_module_path(module); - if (!path) { pr_err("Failed to find path of %s module.\n", module ?: "kernel"); @@ -222,44 +273,110 @@ static struct debuginfo *open_debuginfo(const char *module) return debuginfo__new(path); } +static int get_text_start_address(const char *exec, unsigned long *address) +{ + Elf *elf; + GElf_Ehdr ehdr; + GElf_Shdr shdr; + int fd, ret = -ENOENT; + + fd = open(exec, O_RDONLY); + if (fd < 0) + return -errno; + + elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); + if (elf == NULL) + return -EINVAL; + + if (gelf_getehdr(elf, &ehdr) == NULL) + goto out; + + if (!elf_section_by_name(elf, &ehdr, &shdr, ".text", NULL)) + goto out; + + *address = shdr.sh_addr - shdr.sh_offset; + ret = 0; +out: + elf_end(elf); + return ret; +} + /* * Convert trace point to probe point with debuginfo - * Currently only handles kprobes. */ -static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp, - struct perf_probe_point *pp) +static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp, + struct perf_probe_point *pp, + bool is_kprobe) { - struct symbol *sym; - struct map *map; - u64 addr; + struct debuginfo *dinfo = NULL; + unsigned long stext = 0; + u64 addr = tp->address; int ret = -ENOENT; - struct debuginfo *dinfo; - sym = __find_kernel_function_by_name(tp->symbol, &map); - if (sym) { - addr = map->unmap_ip(map, sym->start + tp->offset); - pr_debug("try to find %s+%ld@%" PRIx64 "\n", tp->symbol, - tp->offset, addr); + /* convert the address to dwarf address */ + if (!is_kprobe) { + if (!addr) { + ret = -EINVAL; + goto error; + } + ret = get_text_start_address(tp->module, &stext); + if (ret < 0) + goto error; + addr += stext; + } else { + addr = kernel_get_symbol_address_by_name(tp->symbol, false); + if (addr == 0) + goto error; + addr += tp->offset; + } + + pr_debug("try to find information at %" PRIx64 " in %s\n", addr, + tp->module ? : "kernel"); - dinfo = debuginfo__new_online_kernel(addr); - if (dinfo) { - ret = debuginfo__find_probe_point(dinfo, + dinfo = open_debuginfo(tp->module); + if (dinfo) { + ret = debuginfo__find_probe_point(dinfo, (unsigned long)addr, pp); - debuginfo__delete(dinfo); - } else { - pr_debug("Failed to open debuginfo at 0x%" PRIx64 "\n", - addr); - ret = -ENOENT; - } + debuginfo__delete(dinfo); + } else { + pr_debug("Failed to open debuginfo at 0x%" PRIx64 "\n", addr); + ret = -ENOENT; } - if (ret <= 0) { - pr_debug("Failed to find corresponding probes from " - "debuginfo. Use kprobe event information.\n"); - return convert_to_perf_probe_point(tp, pp); + + if (ret > 0) { + pp->retprobe = tp->retprobe; + return 0; } - pp->retprobe = tp->retprobe; +error: + pr_debug("Failed to find corresponding probes from debuginfo.\n"); + return ret ? : -ENOENT; +} - return 0; +static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs, + int ntevs, const char *exec) +{ + int i, ret = 0; + unsigned long stext = 0; + + if (!exec) + return 0; + + ret = get_text_start_address(exec, &stext); + if (ret < 0) + return ret; + + for (i = 0; i < ntevs && ret >= 0; i++) { + /* point.address is the addres of point.symbol + point.offset */ + tevs[i].point.address -= stext; + tevs[i].point.module = strdup(exec); + if (!tevs[i].point.module) { + ret = -ENOMEM; + break; + } + tevs[i].uprobes = true; + } + + return ret; } static int add_module_to_probe_trace_events(struct probe_trace_event *tevs, @@ -291,12 +408,46 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs, } } - if (tmp) - free(tmp); - + free(tmp); return ret; } +/* Post processing the probe events */ +static int post_process_probe_trace_events(struct probe_trace_event *tevs, + int ntevs, const char *module, + bool uprobe) +{ + struct ref_reloc_sym *reloc_sym; + char *tmp; + int i; + + if (uprobe) + return add_exec_to_probe_trace_events(tevs, ntevs, module); + + /* Note that currently ref_reloc_sym based probe is not for drivers */ + if (module) + return add_module_to_probe_trace_events(tevs, ntevs, module); + + reloc_sym = kernel_get_ref_reloc_sym(); + if (!reloc_sym) { + pr_warning("Relocated base symbol is not found!\n"); + return -EINVAL; + } + + for (i = 0; i < ntevs; i++) { + if (tevs[i].point.address) { + tmp = strdup(reloc_sym->name); + if (!tmp) + return -ENOMEM; + free(tevs[i].point.symbol); + tevs[i].point.symbol = tmp; + tevs[i].point.offset = tevs[i].point.address - + reloc_sym->unrelocated_addr; + } + } + return 0; +} + /* Try to find perf_probe_event with debuginfo */ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, struct probe_trace_event **tevs, @@ -306,15 +457,6 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, struct debuginfo *dinfo; int ntevs, ret = 0; - if (pev->uprobes) { - if (need_dwarf) { - pr_warning("Debuginfo-analysis is not yet supported" - " with -x/--exec option.\n"); - return -ENOSYS; - } - return convert_name_to_addr(pev, target); - } - dinfo = open_debuginfo(target); if (!dinfo) { @@ -326,16 +468,20 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, return 0; } + pr_debug("Try to find probe point from debuginfo.\n"); /* Searching trace events corresponding to a probe event */ ntevs = debuginfo__find_trace_events(dinfo, pev, tevs, max_tevs); debuginfo__delete(dinfo); if (ntevs > 0) { /* Succeeded to find trace events */ - pr_debug("find %d probe_trace_events.\n", ntevs); - if (target) - ret = add_module_to_probe_trace_events(*tevs, ntevs, - target); + pr_debug("Found %d probe_trace_events.\n", ntevs); + ret = post_process_probe_trace_events(*tevs, ntevs, + target, pev->uprobes); + if (ret < 0) { + clear_probe_trace_events(*tevs, ntevs); + zfree(tevs); + } return ret < 0 ? ret : ntevs; } @@ -402,15 +548,13 @@ static int get_real_path(const char *raw_path, const char *comp_dir, case EFAULT: raw_path = strchr(++raw_path, '/'); if (!raw_path) { - free(*new_path); - *new_path = NULL; + zfree(new_path); return -ENOENT; } continue; default: - free(*new_path); - *new_path = NULL; + zfree(new_path); return -errno; } } @@ -466,20 +610,16 @@ static int _show_one_line(FILE *fp, int l, bool skip, bool show_num) * Show line-range always requires debuginfo to find source file and * line number. */ -int show_line_range(struct line_range *lr, const char *module) +static int __show_line_range(struct line_range *lr, const char *module) { int l = 1; - struct line_node *ln; + struct int_node *ln; struct debuginfo *dinfo; FILE *fp; int ret; char *tmp; /* Search a line range */ - ret = init_vmlinux(); - if (ret < 0) - return ret; - dinfo = open_debuginfo(module); if (!dinfo) { pr_warning("Failed to open debuginfo file.\n"); @@ -488,11 +628,11 @@ int show_line_range(struct line_range *lr, const char *module) ret = debuginfo__find_line_range(dinfo, lr); debuginfo__delete(dinfo); - if (ret == 0) { + if (ret == 0 || ret == -ENOENT) { pr_warning("Specified source line is not found.\n"); return -ENOENT; } else if (ret < 0) { - pr_warning("Debuginfo analysis failed. (%d)\n", ret); + pr_warning("Debuginfo analysis failed.\n"); return ret; } @@ -501,7 +641,7 @@ int show_line_range(struct line_range *lr, const char *module) ret = get_real_path(tmp, lr->comp_dir, &lr->path); free(tmp); /* Free old path */ if (ret < 0) { - pr_warning("Failed to find source file. (%d)\n", ret); + pr_warning("Failed to find source file path.\n"); return ret; } @@ -526,8 +666,8 @@ int show_line_range(struct line_range *lr, const char *module) goto end; } - list_for_each_entry(ln, &lr->line_list, list) { - for (; ln->line > l; l++) { + intlist__for_each(ln, lr->line_list) { + for (; ln->i > l; l++) { ret = show_one_line(fp, l - lr->offset); if (ret < 0) goto end; @@ -549,6 +689,19 @@ end: return ret; } +int show_line_range(struct line_range *lr, const char *module) +{ + int ret; + + ret = init_symbol_maps(false); + if (ret < 0) + return ret; + ret = __show_line_range(lr, module); + exit_symbol_maps(); + + return ret; +} + static int show_available_vars_at(struct debuginfo *dinfo, struct perf_probe_event *pev, int max_vls, struct strfilter *_filter, @@ -568,9 +721,14 @@ static int show_available_vars_at(struct debuginfo *dinfo, ret = debuginfo__find_available_vars_at(dinfo, pev, &vls, max_vls, externs); if (ret <= 0) { - pr_err("Failed to find variables at %s (%d)\n", buf, ret); + if (ret == 0 || ret == -ENOENT) { + pr_err("Failed to find the address of %s\n", buf); + ret = -ENOENT; + } else + pr_warning("Debuginfo analysis failed.\n"); goto end; } + /* Some variables are found */ fprintf(stdout, "Available variables at %s\n", buf); for (i = 0; i < ret; i++) { @@ -581,7 +739,7 @@ static int show_available_vars_at(struct debuginfo *dinfo, */ fprintf(stdout, "\t@<%s+%lu>\n", vl->point.symbol, vl->point.offset); - free(vl->point.symbol); + zfree(&vl->point.symbol); nvars = 0; if (vl->vars) { strlist__for_each(node, vl->vars) { @@ -610,14 +768,15 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs, int i, ret = 0; struct debuginfo *dinfo; - ret = init_vmlinux(); + ret = init_symbol_maps(false); if (ret < 0) return ret; dinfo = open_debuginfo(module); if (!dinfo) { pr_warning("Failed to open debuginfo file.\n"); - return -ENOENT; + ret = -ENOENT; + goto out; } setup_pager(); @@ -627,37 +786,31 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs, externs); debuginfo__delete(dinfo); +out: + exit_symbol_maps(); return ret; } -#else /* !DWARF_SUPPORT */ +#else /* !HAVE_DWARF_SUPPORT */ -static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp, - struct perf_probe_point *pp) +static int +find_perf_probe_point_from_dwarf(struct probe_trace_point *tp __maybe_unused, + struct perf_probe_point *pp __maybe_unused, + bool is_kprobe __maybe_unused) { - struct symbol *sym; - - sym = __find_kernel_function_by_name(tp->symbol, NULL); - if (!sym) { - pr_err("Failed to find symbol %s in kernel.\n", tp->symbol); - return -ENOENT; - } - - return convert_to_perf_probe_point(tp, pp); + return -ENOSYS; } static int try_to_find_probe_trace_events(struct perf_probe_event *pev, struct probe_trace_event **tevs __maybe_unused, - int max_tevs __maybe_unused, const char *target) + int max_tevs __maybe_unused, + const char *target __maybe_unused) { if (perf_probe_event_need_dwarf(pev)) { pr_warning("Debuginfo-analysis is not supported.\n"); return -ENOSYS; } - if (pev->uprobes) - return convert_name_to_addr(pev, target); - return 0; } @@ -679,6 +832,26 @@ int show_available_vars(struct perf_probe_event *pevs __maybe_unused, } #endif +void line_range__clear(struct line_range *lr) +{ + free(lr->function); + free(lr->file); + free(lr->path); + free(lr->comp_dir); + intlist__delete(lr->line_list); + memset(lr, 0, sizeof(*lr)); +} + +int line_range__init(struct line_range *lr) +{ + memset(lr, 0, sizeof(*lr)); + lr->line_list = intlist__new(NULL); + if (!lr->line_list) + return -ENOMEM; + else + return 0; +} + static int parse_line_num(char **ptr, int *val, const char *what) { const char *start = *ptr; @@ -1150,16 +1323,21 @@ static int parse_probe_trace_command(const char *cmd, } else p = argv[1]; fmt1_str = strtok_r(p, "+", &fmt); - tp->symbol = strdup(fmt1_str); - if (tp->symbol == NULL) { - ret = -ENOMEM; - goto out; + if (fmt1_str[0] == '0') /* only the address started with 0x */ + tp->address = strtoul(fmt1_str, NULL, 0); + else { + /* Only the symbol-based probe has offset */ + tp->symbol = strdup(fmt1_str); + if (tp->symbol == NULL) { + ret = -ENOMEM; + goto out; + } + fmt2_str = strtok_r(NULL, "", &fmt); + if (fmt2_str == NULL) + tp->offset = 0; + else + tp->offset = strtoul(fmt2_str, NULL, 10); } - fmt2_str = strtok_r(NULL, "", &fmt); - if (fmt2_str == NULL) - tp->offset = 0; - else - tp->offset = strtoul(fmt2_str, NULL, 10); tev->nargs = argc - 2; tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs); @@ -1279,8 +1457,7 @@ static char *synthesize_perf_probe_point(struct perf_probe_point *pp) error: pr_debug("Failed to synthesize perf probe point: %s\n", strerror(-ret)); - if (buf) - free(buf); + free(buf); return NULL; } @@ -1402,20 +1579,27 @@ char *synthesize_probe_trace_command(struct probe_trace_event *tev) if (buf == NULL) return NULL; + len = e_snprintf(buf, MAX_CMDLEN, "%c:%s/%s ", tp->retprobe ? 'r' : 'p', + tev->group, tev->event); + if (len <= 0) + goto error; + + /* Uprobes must have tp->address and tp->module */ + if (tev->uprobes && (!tp->address || !tp->module)) + goto error; + + /* Use the tp->address for uprobes */ if (tev->uprobes) - len = e_snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s:%s", - tp->retprobe ? 'r' : 'p', - tev->group, tev->event, - tp->module, tp->symbol); + ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s:0x%lx", + tp->module, tp->address); else - len = e_snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s%s%s+%lu", - tp->retprobe ? 'r' : 'p', - tev->group, tev->event, + ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s%s%s+%lu", tp->module ?: "", tp->module ? ":" : "", tp->symbol, tp->offset); - if (len <= 0) + if (ret <= 0) goto error; + len += ret; for (i = 0; i < tev->nargs; i++) { ret = synthesize_probe_trace_arg(&tev->args[i], buf + len, @@ -1431,6 +1615,79 @@ error: return NULL; } +static int find_perf_probe_point_from_map(struct probe_trace_point *tp, + struct perf_probe_point *pp, + bool is_kprobe) +{ + struct symbol *sym = NULL; + struct map *map; + u64 addr; + int ret = -ENOENT; + + if (!is_kprobe) { + map = dso__new_map(tp->module); + if (!map) + goto out; + addr = tp->address; + sym = map__find_symbol(map, addr, NULL); + } else { + addr = kernel_get_symbol_address_by_name(tp->symbol, true); + if (addr) { + addr += tp->offset; + sym = __find_kernel_function(addr, &map); + } + } + if (!sym) + goto out; + + pp->retprobe = tp->retprobe; + pp->offset = addr - map->unmap_ip(map, sym->start); + pp->function = strdup(sym->name); + ret = pp->function ? 0 : -ENOMEM; + +out: + if (map && !is_kprobe) { + dso__delete(map->dso); + map__delete(map); + } + + return ret; +} + +static int convert_to_perf_probe_point(struct probe_trace_point *tp, + struct perf_probe_point *pp, + bool is_kprobe) +{ + char buf[128]; + int ret; + + ret = find_perf_probe_point_from_dwarf(tp, pp, is_kprobe); + if (!ret) + return 0; + ret = find_perf_probe_point_from_map(tp, pp, is_kprobe); + if (!ret) + return 0; + + pr_debug("Failed to find probe point from both of dwarf and map.\n"); + + if (tp->symbol) { + pp->function = strdup(tp->symbol); + pp->offset = tp->offset; + } else if (!tp->module && !is_kprobe) { + ret = e_snprintf(buf, 128, "0x%" PRIx64, (u64)tp->address); + if (ret < 0) + return ret; + pp->function = strdup(buf); + pp->offset = 0; + } + if (pp->function == NULL) + return -ENOMEM; + + pp->retprobe = tp->retprobe; + + return 0; +} + static int convert_to_perf_probe_event(struct probe_trace_event *tev, struct perf_probe_event *pev, bool is_kprobe) { @@ -1444,11 +1701,7 @@ static int convert_to_perf_probe_event(struct probe_trace_event *tev, return -ENOMEM; /* Convert trace_point to probe_point */ - if (is_kprobe) - ret = kprobe_convert_to_perf_probe(&tev->point, &pev->point); - else - ret = convert_to_perf_probe_point(&tev->point, &pev->point); - + ret = convert_to_perf_probe_point(&tev->point, &pev->point, is_kprobe); if (ret < 0) return ret; @@ -1481,34 +1734,25 @@ void clear_perf_probe_event(struct perf_probe_event *pev) struct perf_probe_arg_field *field, *next; int i; - if (pev->event) - free(pev->event); - if (pev->group) - free(pev->group); - if (pp->file) - free(pp->file); - if (pp->function) - free(pp->function); - if (pp->lazy_line) - free(pp->lazy_line); + free(pev->event); + free(pev->group); + free(pp->file); + free(pp->function); + free(pp->lazy_line); + for (i = 0; i < pev->nargs; i++) { - if (pev->args[i].name) - free(pev->args[i].name); - if (pev->args[i].var) - free(pev->args[i].var); - if (pev->args[i].type) - free(pev->args[i].type); + free(pev->args[i].name); + free(pev->args[i].var); + free(pev->args[i].type); field = pev->args[i].field; while (field) { next = field->next; - if (field->name) - free(field->name); + zfree(&field->name); free(field); field = next; } } - if (pev->args) - free(pev->args); + free(pev->args); memset(pev, 0, sizeof(*pev)); } @@ -1517,21 +1761,14 @@ static void clear_probe_trace_event(struct probe_trace_event *tev) struct probe_trace_arg_ref *ref, *next; int i; - if (tev->event) - free(tev->event); - if (tev->group) - free(tev->group); - if (tev->point.symbol) - free(tev->point.symbol); - if (tev->point.module) - free(tev->point.module); + free(tev->event); + free(tev->group); + free(tev->point.symbol); + free(tev->point.module); for (i = 0; i < tev->nargs; i++) { - if (tev->args[i].name) - free(tev->args[i].name); - if (tev->args[i].value) - free(tev->args[i].value); - if (tev->args[i].type) - free(tev->args[i].type); + free(tev->args[i].name); + free(tev->args[i].value); + free(tev->args[i].type); ref = tev->args[i].ref; while (ref) { next = ref->next; @@ -1539,8 +1776,7 @@ static void clear_probe_trace_event(struct probe_trace_event *tev) ref = next; } } - if (tev->args) - free(tev->args); + free(tev->args); memset(tev, 0, sizeof(*tev)); } @@ -1632,7 +1868,8 @@ static struct strlist *get_probe_trace_command_rawlist(int fd) } /* Show an event */ -static int show_perf_probe_event(struct perf_probe_event *pev) +static int show_perf_probe_event(struct perf_probe_event *pev, + const char *module) { int i, ret; char buf[128]; @@ -1648,6 +1885,8 @@ static int show_perf_probe_event(struct perf_probe_event *pev) return ret; printf(" %-20s (on %s", buf, place); + if (module) + printf(" in %s", module); if (pev->nargs > 0) { printf(" with"); @@ -1685,7 +1924,8 @@ static int __show_perf_probe_events(int fd, bool is_kprobe) ret = convert_to_perf_probe_event(&tev, &pev, is_kprobe); if (ret >= 0) - ret = show_perf_probe_event(&pev); + ret = show_perf_probe_event(&pev, + tev.point.module); } clear_perf_probe_event(&pev); clear_probe_trace_event(&tev); @@ -1708,7 +1948,7 @@ int show_perf_probe_events(void) if (fd < 0) return fd; - ret = init_vmlinux(); + ret = init_symbol_maps(false); if (ret < 0) return ret; @@ -1721,6 +1961,7 @@ int show_perf_probe_events(void) close(fd); } + exit_symbol_maps(); return ret; } @@ -1883,7 +2124,7 @@ static int __add_probe_trace_events(struct perf_probe_event *pev, group = pev->group; pev->event = tev->event; pev->group = tev->group; - show_perf_probe_event(pev); + show_perf_probe_event(pev, tev->point.module); /* Trick here - restore current event/group */ pev->event = (char *)event; pev->group = (char *)group; @@ -1909,98 +2150,175 @@ static int __add_probe_trace_events(struct perf_probe_event *pev, return ret; } -static int convert_to_probe_trace_events(struct perf_probe_event *pev, - struct probe_trace_event **tevs, - int max_tevs, const char *target) +static char *looking_function_name; +static int num_matched_functions; + +static int probe_function_filter(struct map *map __maybe_unused, + struct symbol *sym) { + if ((sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) && + strcmp(looking_function_name, sym->name) == 0) { + num_matched_functions++; + return 0; + } + return 1; +} + +#define strdup_or_goto(str, label) \ + ({ char *__p = strdup(str); if (!__p) goto label; __p; }) + +/* + * Find probe function addresses from map. + * Return an error or the number of found probe_trace_event + */ +static int find_probe_trace_events_from_map(struct perf_probe_event *pev, + struct probe_trace_event **tevs, + int max_tevs, const char *target) +{ + struct map *map = NULL; + struct kmap *kmap = NULL; + struct ref_reloc_sym *reloc_sym = NULL; struct symbol *sym; - int ret = 0, i; + struct rb_node *nd; struct probe_trace_event *tev; + struct perf_probe_point *pp = &pev->point; + struct probe_trace_point *tp; + int ret, i; - /* Convert perf_probe_event with debuginfo */ - ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, target); - if (ret != 0) - return ret; /* Found in debuginfo or got an error */ - - /* Allocate trace event buffer */ - tev = *tevs = zalloc(sizeof(struct probe_trace_event)); - if (tev == NULL) - return -ENOMEM; + /* Init maps of given executable or kernel */ + if (pev->uprobes) + map = dso__new_map(target); + else + map = kernel_get_module_map(target); + if (!map) { + ret = -EINVAL; + goto out; + } - /* Copy parameters */ - tev->point.symbol = strdup(pev->point.function); - if (tev->point.symbol == NULL) { - ret = -ENOMEM; - goto error; + /* + * Load matched symbols: Since the different local symbols may have + * same name but different addresses, this lists all the symbols. + */ + num_matched_functions = 0; + looking_function_name = pp->function; + ret = map__load(map, probe_function_filter); + if (ret || num_matched_functions == 0) { + pr_err("Failed to find symbol %s in %s\n", pp->function, + target ? : "kernel"); + ret = -ENOENT; + goto out; + } else if (num_matched_functions > max_tevs) { + pr_err("Too many functions matched in %s\n", + target ? : "kernel"); + ret = -E2BIG; + goto out; } - if (target) { - tev->point.module = strdup(target); - if (tev->point.module == NULL) { - ret = -ENOMEM; - goto error; + if (!pev->uprobes) { + kmap = map__kmap(map); + reloc_sym = kmap->ref_reloc_sym; + if (!reloc_sym) { + pr_warning("Relocated base symbol is not found!\n"); + ret = -EINVAL; + goto out; } } - tev->point.offset = pev->point.offset; - tev->point.retprobe = pev->point.retprobe; - tev->nargs = pev->nargs; - tev->uprobes = pev->uprobes; + /* Setup result trace-probe-events */ + *tevs = zalloc(sizeof(*tev) * num_matched_functions); + if (!*tevs) { + ret = -ENOMEM; + goto out; + } - if (tev->nargs) { - tev->args = zalloc(sizeof(struct probe_trace_arg) - * tev->nargs); - if (tev->args == NULL) { - ret = -ENOMEM; - goto error; + ret = 0; + map__for_each_symbol(map, sym, nd) { + tev = (*tevs) + ret; + tp = &tev->point; + if (ret == num_matched_functions) { + pr_warning("Too many symbols are listed. Skip it.\n"); + break; + } + ret++; + + if (pp->offset > sym->end - sym->start) { + pr_warning("Offset %ld is bigger than the size of %s\n", + pp->offset, sym->name); + ret = -ENOENT; + goto err_out; + } + /* Add one probe point */ + tp->address = map->unmap_ip(map, sym->start) + pp->offset; + if (reloc_sym) { + tp->symbol = strdup_or_goto(reloc_sym->name, nomem_out); + tp->offset = tp->address - reloc_sym->addr; + } else { + tp->symbol = strdup_or_goto(sym->name, nomem_out); + tp->offset = pp->offset; + } + tp->retprobe = pp->retprobe; + if (target) + tev->point.module = strdup_or_goto(target, nomem_out); + tev->uprobes = pev->uprobes; + tev->nargs = pev->nargs; + if (tev->nargs) { + tev->args = zalloc(sizeof(struct probe_trace_arg) * + tev->nargs); + if (tev->args == NULL) + goto nomem_out; } for (i = 0; i < tev->nargs; i++) { - if (pev->args[i].name) { - tev->args[i].name = strdup(pev->args[i].name); - if (tev->args[i].name == NULL) { - ret = -ENOMEM; - goto error; - } - } - tev->args[i].value = strdup(pev->args[i].var); - if (tev->args[i].value == NULL) { - ret = -ENOMEM; - goto error; - } - if (pev->args[i].type) { - tev->args[i].type = strdup(pev->args[i].type); - if (tev->args[i].type == NULL) { - ret = -ENOMEM; - goto error; - } - } + if (pev->args[i].name) + tev->args[i].name = + strdup_or_goto(pev->args[i].name, + nomem_out); + + tev->args[i].value = strdup_or_goto(pev->args[i].var, + nomem_out); + if (pev->args[i].type) + tev->args[i].type = + strdup_or_goto(pev->args[i].type, + nomem_out); } } - if (pev->uprobes) - return 1; +out: + if (map && pev->uprobes) { + /* Only when using uprobe(exec) map needs to be released */ + dso__delete(map->dso); + map__delete(map); + } + return ret; - /* Currently just checking function name from symbol map */ - sym = __find_kernel_function_by_name(tev->point.symbol, NULL); - if (!sym) { - pr_warning("Kernel symbol \'%s\' not found.\n", - tev->point.symbol); - ret = -ENOENT; - goto error; - } else if (tev->point.offset > sym->end - sym->start) { - pr_warning("Offset specified is greater than size of %s\n", - tev->point.symbol); - ret = -ENOENT; - goto error; +nomem_out: + ret = -ENOMEM; +err_out: + clear_probe_trace_events(*tevs, num_matched_functions); + zfree(tevs); + goto out; +} +static int convert_to_probe_trace_events(struct perf_probe_event *pev, + struct probe_trace_event **tevs, + int max_tevs, const char *target) +{ + int ret; + + if (pev->uprobes && !pev->group) { + /* Replace group name if not given */ + ret = convert_exec_to_group(target, &pev->group); + if (ret != 0) { + pr_warning("Failed to make a group name.\n"); + return ret; + } } - return 1; -error: - clear_probe_trace_event(tev); - free(tev); - *tevs = NULL; - return ret; + /* Convert perf_probe_event with debuginfo */ + ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, target); + if (ret != 0) + return ret; /* Found in debuginfo or got an error */ + + return find_probe_trace_events_from_map(pev, tevs, max_tevs, target); } struct __event_package { @@ -2021,12 +2339,7 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs, if (pkgs == NULL) return -ENOMEM; - if (!pevs->uprobes) - /* Init vmlinux path */ - ret = init_vmlinux(); - else - ret = init_user_exec(); - + ret = init_symbol_maps(pevs->uprobes); if (ret < 0) { free(pkgs); return ret; @@ -2057,9 +2370,10 @@ end: for (i = 0; i < npevs; i++) { for (j = 0; j < pkgs[i].ntevs; j++) clear_probe_trace_event(&pkgs[i].tevs[j]); - free(pkgs[i].tevs); + zfree(&pkgs[i].tevs); } free(pkgs); + exit_symbol_maps(); return ret; } @@ -2209,166 +2523,51 @@ static struct strfilter *available_func_filter; static int filter_available_functions(struct map *map __maybe_unused, struct symbol *sym) { - if (sym->binding == STB_GLOBAL && + if ((sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) && strfilter__compare(available_func_filter, sym->name)) return 0; return 1; } -static int __show_available_funcs(struct map *map) -{ - if (map__load(map, filter_available_functions)) { - pr_err("Failed to load map.\n"); - return -EINVAL; - } - if (!dso__sorted_by_name(map->dso, map->type)) - dso__sort_by_name(map->dso, map->type); - - dso__fprintf_symbols_by_name(map->dso, map->type, stdout); - return 0; -} - -static int available_kernel_funcs(const char *module) +int show_available_funcs(const char *target, struct strfilter *_filter, + bool user) { struct map *map; int ret; - ret = init_vmlinux(); + ret = init_symbol_maps(user); if (ret < 0) return ret; - map = kernel_get_module_map(module); + /* Get a symbol map */ + if (user) + map = dso__new_map(target); + else + map = kernel_get_module_map(target); if (!map) { - pr_err("Failed to find %s map.\n", (module) ? : "kernel"); + pr_err("Failed to get a map for %s\n", (target) ? : "kernel"); return -EINVAL; } - return __show_available_funcs(map); -} -static int available_user_funcs(const char *target) -{ - struct map *map; - int ret; - - ret = init_user_exec(); - if (ret < 0) - return ret; - - map = dso__new_map(target); - ret = __show_available_funcs(map); - dso__delete(map->dso); - map__delete(map); - return ret; -} - -int show_available_funcs(const char *target, struct strfilter *_filter, - bool user) -{ - setup_pager(); + /* Load symbols with given filter */ available_func_filter = _filter; - - if (!user) - return available_kernel_funcs(target); - - return available_user_funcs(target); -} - -/* - * uprobe_events only accepts address: - * Convert function and any offset to address - */ -static int convert_name_to_addr(struct perf_probe_event *pev, const char *exec) -{ - struct perf_probe_point *pp = &pev->point; - struct symbol *sym; - struct map *map = NULL; - char *function = NULL, *name = NULL; - int ret = -EINVAL; - unsigned long long vaddr = 0; - - if (!pp->function) { - pr_warning("No function specified for uprobes"); - goto out; - } - - function = strdup(pp->function); - if (!function) { - pr_warning("Failed to allocate memory by strdup.\n"); - ret = -ENOMEM; - goto out; - } - - name = realpath(exec, NULL); - if (!name) { - pr_warning("Cannot find realpath for %s.\n", exec); - goto out; - } - map = dso__new_map(name); - if (!map) { - pr_warning("Cannot find appropriate DSO for %s.\n", exec); - goto out; - } - available_func_filter = strfilter__new(function, NULL); if (map__load(map, filter_available_functions)) { - pr_err("Failed to load map.\n"); - goto out; - } - - sym = map__find_symbol_by_name(map, function, NULL); - if (!sym) { - pr_warning("Cannot find %s in DSO %s\n", function, exec); - goto out; - } - - if (map->start > sym->start) - vaddr = map->start; - vaddr += sym->start + pp->offset + map->pgoff; - pp->offset = 0; - - if (!pev->event) { - pev->event = function; - function = NULL; - } - if (!pev->group) { - char *ptr1, *ptr2, *exec_copy; - - pev->group = zalloc(sizeof(char *) * 64); - exec_copy = strdup(exec); - if (!exec_copy) { - ret = -ENOMEM; - pr_warning("Failed to copy exec string.\n"); - goto out; - } - - ptr1 = strdup(basename(exec_copy)); - if (ptr1) { - ptr2 = strpbrk(ptr1, "-._"); - if (ptr2) - *ptr2 = '\0'; - e_snprintf(pev->group, 64, "%s_%s", PERFPROBE_GROUP, - ptr1); - free(ptr1); - } - free(exec_copy); - } - free(pp->function); - pp->function = zalloc(sizeof(char *) * MAX_PROBE_ARGS); - if (!pp->function) { - ret = -ENOMEM; - pr_warning("Failed to allocate memory by zalloc.\n"); - goto out; + pr_err("Failed to load symbols in %s\n", (target) ? : "kernel"); + goto end; } - e_snprintf(pp->function, MAX_PROBE_ARGS, "0x%llx", vaddr); - ret = 0; + if (!dso__sorted_by_name(map->dso, map->type)) + dso__sort_by_name(map->dso, map->type); -out: - if (map) { + /* Show all (filtered) symbols */ + setup_pager(); + dso__fprintf_symbols_by_name(map->dso, map->type, stdout); +end: + if (user) { dso__delete(map->dso); map__delete(map); } - if (function) - free(function); - if (name) - free(name); + exit_symbol_maps(); + return ret; } + diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h index f9f3de8b422..776c9347a3b 100644 --- a/tools/perf/util/probe-event.h +++ b/tools/perf/util/probe-event.h @@ -2,6 +2,7 @@ #define _PROBE_EVENT_H #include <stdbool.h> +#include "intlist.h" #include "strlist.h" #include "strfilter.h" @@ -12,6 +13,7 @@ struct probe_trace_point { char *symbol; /* Base symbol */ char *module; /* Module name */ unsigned long offset; /* Offset from symbol */ + unsigned long address; /* Actual address of the trace point */ bool retprobe; /* Return probe flag */ }; @@ -75,13 +77,6 @@ struct perf_probe_event { struct perf_probe_arg *args; /* Arguments */ }; - -/* Line number container */ -struct line_node { - struct list_head list; - int line; -}; - /* Line range */ struct line_range { char *file; /* File name */ @@ -91,7 +86,7 @@ struct line_range { int offset; /* Start line offset */ char *path; /* Real path name */ char *comp_dir; /* Compile directory */ - struct list_head line_list; /* Visible lines */ + struct intlist *line_list; /* Visible lines */ }; /* List of variables */ @@ -119,6 +114,12 @@ extern void clear_perf_probe_event(struct perf_probe_event *pev); /* Command string to line-range */ extern int parse_line_range_desc(const char *cmd, struct line_range *lr); +/* Release line range members */ +extern void line_range__clear(struct line_range *lr); + +/* Initialize line range */ +extern int line_range__init(struct line_range *lr); + /* Internal use: Return kernel/module path */ extern const char *kernel_get_module_path(const char *module); diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 1daf5c14e75..98e30476641 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -34,7 +34,9 @@ #include <linux/bitops.h> #include "event.h" +#include "dso.h" #include "debug.h" +#include "intlist.h" #include "util.h" #include "symbol.h" #include "probe-finder.h" @@ -42,65 +44,6 @@ /* Kprobe tracer basic type is up to u64 */ #define MAX_BASIC_TYPE_BITS 64 -/* Line number list operations */ - -/* Add a line to line number list */ -static int line_list__add_line(struct list_head *head, int line) -{ - struct line_node *ln; - struct list_head *p; - - /* Reverse search, because new line will be the last one */ - list_for_each_entry_reverse(ln, head, list) { - if (ln->line < line) { - p = &ln->list; - goto found; - } else if (ln->line == line) /* Already exist */ - return 1; - } - /* List is empty, or the smallest entry */ - p = head; -found: - pr_debug("line list: add a line %u\n", line); - ln = zalloc(sizeof(struct line_node)); - if (ln == NULL) - return -ENOMEM; - ln->line = line; - INIT_LIST_HEAD(&ln->list); - list_add(&ln->list, p); - return 0; -} - -/* Check if the line in line number list */ -static int line_list__has_line(struct list_head *head, int line) -{ - struct line_node *ln; - - /* Reverse search, because new line will be the last one */ - list_for_each_entry(ln, head, list) - if (ln->line == line) - return 1; - - return 0; -} - -/* Init line number list */ -static void line_list__init(struct list_head *head) -{ - INIT_LIST_HEAD(head); -} - -/* Free line number list */ -static void line_list__free(struct list_head *head) -{ - struct line_node *ln; - while (!list_empty(head)) { - ln = list_first_entry(head, struct line_node, list); - list_del(&ln->list); - free(ln); - } -} - /* Dwarf FL wrappers */ static char *debuginfo_path; /* Currently dummy */ @@ -115,146 +58,92 @@ static const Dwfl_Callbacks offline_callbacks = { }; /* Get a Dwarf from offline image */ -static int debuginfo__init_offline_dwarf(struct debuginfo *self, +static int debuginfo__init_offline_dwarf(struct debuginfo *dbg, const char *path) { - Dwfl_Module *mod; int fd; fd = open(path, O_RDONLY); if (fd < 0) return fd; - self->dwfl = dwfl_begin(&offline_callbacks); - if (!self->dwfl) + dbg->dwfl = dwfl_begin(&offline_callbacks); + if (!dbg->dwfl) goto error; - mod = dwfl_report_offline(self->dwfl, "", "", fd); - if (!mod) + dbg->mod = dwfl_report_offline(dbg->dwfl, "", "", fd); + if (!dbg->mod) goto error; - self->dbg = dwfl_module_getdwarf(mod, &self->bias); - if (!self->dbg) + dbg->dbg = dwfl_module_getdwarf(dbg->mod, &dbg->bias); + if (!dbg->dbg) goto error; return 0; error: - if (self->dwfl) - dwfl_end(self->dwfl); + if (dbg->dwfl) + dwfl_end(dbg->dwfl); else close(fd); - memset(self, 0, sizeof(*self)); + memset(dbg, 0, sizeof(*dbg)); return -ENOENT; } -#if _ELFUTILS_PREREQ(0, 148) -/* This method is buggy if elfutils is older than 0.148 */ -static int __linux_kernel_find_elf(Dwfl_Module *mod, - void **userdata, - const char *module_name, - Dwarf_Addr base, - char **file_name, Elf **elfp) -{ - int fd; - const char *path = kernel_get_module_path(module_name); - - pr_debug2("Use file %s for %s\n", path, module_name); - if (path) { - fd = open(path, O_RDONLY); - if (fd >= 0) { - *file_name = strdup(path); - return fd; - } - } - /* If failed, try to call standard method */ - return dwfl_linux_kernel_find_elf(mod, userdata, module_name, base, - file_name, elfp); -} - -static const Dwfl_Callbacks kernel_callbacks = { - .find_debuginfo = dwfl_standard_find_debuginfo, - .debuginfo_path = &debuginfo_path, - - .find_elf = __linux_kernel_find_elf, - .section_address = dwfl_linux_kernel_module_section_address, -}; - -/* Get a Dwarf from live kernel image */ -static int debuginfo__init_online_kernel_dwarf(struct debuginfo *self, - Dwarf_Addr addr) +static struct debuginfo *__debuginfo__new(const char *path) { - self->dwfl = dwfl_begin(&kernel_callbacks); - if (!self->dwfl) - return -EINVAL; - - /* Load the kernel dwarves: Don't care the result here */ - dwfl_linux_kernel_report_kernel(self->dwfl); - dwfl_linux_kernel_report_modules(self->dwfl); - - self->dbg = dwfl_addrdwarf(self->dwfl, addr, &self->bias); - /* Here, check whether we could get a real dwarf */ - if (!self->dbg) { - pr_debug("Failed to find kernel dwarf at %lx\n", - (unsigned long)addr); - dwfl_end(self->dwfl); - memset(self, 0, sizeof(*self)); - return -ENOENT; - } + struct debuginfo *dbg = zalloc(sizeof(*dbg)); + if (!dbg) + return NULL; - return 0; + if (debuginfo__init_offline_dwarf(dbg, path) < 0) + zfree(&dbg); + if (dbg) + pr_debug("Open Debuginfo file: %s\n", path); + return dbg; } -#else -/* With older elfutils, this just support kernel module... */ -static int debuginfo__init_online_kernel_dwarf(struct debuginfo *self, - Dwarf_Addr addr __maybe_unused) -{ - const char *path = kernel_get_module_path("kernel"); - - if (!path) { - pr_err("Failed to find vmlinux path\n"); - return -ENOENT; - } - pr_debug2("Use file %s for debuginfo\n", path); - return debuginfo__init_offline_dwarf(self, path); -} -#endif +enum dso_binary_type distro_dwarf_types[] = { + DSO_BINARY_TYPE__FEDORA_DEBUGINFO, + DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, + DSO_BINARY_TYPE__BUILDID_DEBUGINFO, + DSO_BINARY_TYPE__NOT_FOUND, +}; struct debuginfo *debuginfo__new(const char *path) { - struct debuginfo *self = zalloc(sizeof(struct debuginfo)); - if (!self) - return NULL; - - if (debuginfo__init_offline_dwarf(self, path) < 0) { - free(self); - self = NULL; - } - - return self; -} - -struct debuginfo *debuginfo__new_online_kernel(unsigned long addr) -{ - struct debuginfo *self = zalloc(sizeof(struct debuginfo)); - if (!self) - return NULL; + enum dso_binary_type *type; + char buf[PATH_MAX], nil = '\0'; + struct dso *dso; + struct debuginfo *dinfo = NULL; + + /* Try to open distro debuginfo files */ + dso = dso__new(path); + if (!dso) + goto out; - if (debuginfo__init_online_kernel_dwarf(self, (Dwarf_Addr)addr) < 0) { - free(self); - self = NULL; + for (type = distro_dwarf_types; + !dinfo && *type != DSO_BINARY_TYPE__NOT_FOUND; + type++) { + if (dso__read_binary_type_filename(dso, *type, &nil, + buf, PATH_MAX) < 0) + continue; + dinfo = __debuginfo__new(buf); } + dso__delete(dso); - return self; +out: + /* if failed to open all distro debuginfo, open given binary */ + return dinfo ? : __debuginfo__new(path); } -void debuginfo__delete(struct debuginfo *self) +void debuginfo__delete(struct debuginfo *dbg) { - if (self) { - if (self->dwfl) - dwfl_end(self->dwfl); - free(self); + if (dbg) { + if (dbg->dwfl) + dwfl_end(dbg->dwfl); + free(dbg); } } @@ -274,12 +163,15 @@ static struct probe_trace_arg_ref *alloc_trace_arg_ref(long offs) /* * Convert a location into trace_arg. * If tvar == NULL, this just checks variable can be converted. + * If fentry == true and vr_die is a parameter, do huristic search + * for the location fuzzed by function entry mcount. */ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr, - Dwarf_Op *fb_ops, + Dwarf_Op *fb_ops, Dwarf_Die *sp_die, struct probe_trace_arg *tvar) { Dwarf_Attribute attr; + Dwarf_Addr tmp = 0; Dwarf_Op *op; size_t nops; unsigned int regn; @@ -292,12 +184,29 @@ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr, goto static_var; /* TODO: handle more than 1 exprs */ - if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL || - dwarf_getlocation_addr(&attr, addr, &op, &nops, 1) <= 0 || - nops == 0) { - /* TODO: Support const_value */ + if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL) + return -EINVAL; /* Broken DIE ? */ + if (dwarf_getlocation_addr(&attr, addr, &op, &nops, 1) <= 0) { + ret = dwarf_entrypc(sp_die, &tmp); + if (ret || addr != tmp || + dwarf_tag(vr_die) != DW_TAG_formal_parameter || + dwarf_highpc(sp_die, &tmp)) + return -ENOENT; + /* + * This is fuzzed by fentry mcount. We try to find the + * parameter location at the earliest address. + */ + for (addr += 1; addr <= tmp; addr++) { + if (dwarf_getlocation_addr(&attr, addr, &op, + &nops, 1) > 0) + goto found; + } return -ENOENT; } +found: + if (nops == 0) + /* TODO: Support const_value */ + return -ENOENT; if (op->atom == DW_OP_addr) { static_var: @@ -413,12 +322,12 @@ static int convert_variable_type(Dwarf_Die *vr_die, dwarf_diename(vr_die), dwarf_diename(&type)); return -EINVAL; } + if (die_get_real_type(&type, &type) == NULL) { + pr_warning("Failed to get a type" + " information.\n"); + return -ENOENT; + } if (ret == DW_TAG_pointer_type) { - if (die_get_real_type(&type, &type) == NULL) { - pr_warning("Failed to get a type" - " information.\n"); - return -ENOENT; - } while (*ref_ptr) ref_ptr = &(*ref_ptr)->next; /* Add new reference with offset +0 */ @@ -564,7 +473,7 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname, } if (die_find_member(&type, field->name, die_mem) == NULL) { - pr_warning("%s(tyep:%s) has no member %s.\n", varname, + pr_warning("%s(type:%s) has no member %s.\n", varname, dwarf_diename(&type), field->name); return -EINVAL; } @@ -601,13 +510,13 @@ static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf) dwarf_diename(vr_die)); ret = convert_variable_location(vr_die, pf->addr, pf->fb_ops, - pf->tvar); - if (ret == -ENOENT) + &pf->sp_die, pf->tvar); + if (ret == -ENOENT || ret == -EINVAL) pr_err("Failed to find the location of %s at this address.\n" " Perhaps, it has been optimized out.\n", pf->pvar->var); else if (ret == -ENOTSUP) pr_err("Sorry, we don't support this variable location yet.\n"); - else if (pf->pvar->field) { + else if (ret == 0 && pf->pvar->field) { ret = convert_variable_fields(vr_die, pf->pvar->var, pf->pvar->field, &pf->tvar->ref, &die_mem); @@ -664,49 +573,54 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf) if (!die_find_variable_at(sc_die, pf->pvar->var, pf->addr, &vr_die)) { /* Search again in global variables */ if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, 0, &vr_die)) + pr_warning("Failed to find '%s' in this function.\n", + pf->pvar->var); ret = -ENOENT; } if (ret >= 0) ret = convert_variable(&vr_die, pf); - if (ret < 0) - pr_warning("Failed to find '%s' in this function.\n", - pf->pvar->var); return ret; } /* Convert subprogram DIE to trace point */ -static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr, - bool retprobe, struct probe_trace_point *tp) +static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod, + Dwarf_Addr paddr, bool retprobe, + struct probe_trace_point *tp) { Dwarf_Addr eaddr, highaddr; - const char *name; - - /* Copy the name of probe point */ - name = dwarf_diename(sp_die); - if (name) { - if (dwarf_entrypc(sp_die, &eaddr) != 0) { - pr_warning("Failed to get entry address of %s\n", - dwarf_diename(sp_die)); - return -ENOENT; - } - if (dwarf_highpc(sp_die, &highaddr) != 0) { - pr_warning("Failed to get end address of %s\n", - dwarf_diename(sp_die)); - return -ENOENT; - } - if (paddr > highaddr) { - pr_warning("Offset specified is greater than size of %s\n", - dwarf_diename(sp_die)); - return -EINVAL; - } - tp->symbol = strdup(name); - if (tp->symbol == NULL) - return -ENOMEM; - tp->offset = (unsigned long)(paddr - eaddr); - } else - /* This function has no name. */ - tp->offset = (unsigned long)paddr; + GElf_Sym sym; + const char *symbol; + + /* Verify the address is correct */ + if (dwarf_entrypc(sp_die, &eaddr) != 0) { + pr_warning("Failed to get entry address of %s\n", + dwarf_diename(sp_die)); + return -ENOENT; + } + if (dwarf_highpc(sp_die, &highaddr) != 0) { + pr_warning("Failed to get end address of %s\n", + dwarf_diename(sp_die)); + return -ENOENT; + } + if (paddr > highaddr) { + pr_warning("Offset specified is greater than size of %s\n", + dwarf_diename(sp_die)); + return -EINVAL; + } + + /* Get an appropriate symbol from symtab */ + symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL); + if (!symbol) { + pr_warning("Failed to find symbol at 0x%lx\n", + (unsigned long)paddr); + return -ENOENT; + } + tp->offset = (unsigned long)(paddr - sym.st_value); + tp->address = (unsigned long)paddr; + tp->symbol = strdup(symbol); + if (!tp->symbol) + return -ENOMEM; /* Return probe must be on the head of a subprogram */ if (retprobe) { @@ -734,7 +648,7 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf) } /* If not a real subprogram, find a real one */ - if (dwarf_tag(sc_die) != DW_TAG_subprogram) { + if (!die_is_func_def(sc_die)) { if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) { pr_warning("Failed to find probe point in any " "functions.\n"); @@ -858,7 +772,7 @@ static int find_probe_point_by_line(struct probe_finder *pf) } /* Find lines which match lazy pattern */ -static int find_lazy_match_lines(struct list_head *head, +static int find_lazy_match_lines(struct intlist *list, const char *fname, const char *pat) { FILE *fp; @@ -879,7 +793,7 @@ static int find_lazy_match_lines(struct list_head *head, line[len - 1] = '\0'; if (strlazymatch(line, pat)) { - line_list__add_line(head, linenum); + intlist__add(list, linenum); count++; } linenum++; @@ -902,7 +816,7 @@ static int probe_point_lazy_walker(const char *fname, int lineno, Dwarf_Die *sc_die, die_mem; int ret; - if (!line_list__has_line(&pf->lcache, lineno) || + if (!intlist__has_entry(pf->lcache, lineno) || strtailcmp(fname, pf->fname) != 0) return 0; @@ -930,9 +844,9 @@ static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf) { int ret = 0; - if (list_empty(&pf->lcache)) { + if (intlist__empty(pf->lcache)) { /* Matching lazy line pattern */ - ret = find_lazy_match_lines(&pf->lcache, pf->fname, + ret = find_lazy_match_lines(pf->lcache, pf->fname, pf->pev->point.lazy_line); if (ret <= 0) return ret; @@ -980,12 +894,10 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data) struct dwarf_callback_param *param = data; struct probe_finder *pf = param->data; struct perf_probe_point *pp = &pf->pev->point; - Dwarf_Attribute attr; /* Check tag and diename */ - if (dwarf_tag(sp_die) != DW_TAG_subprogram || - !die_compare_name(sp_die, pp->function) || - dwarf_attr(sp_die, DW_AT_declaration, &attr)) + if (!die_is_func_def(sp_die) || + !die_compare_name(sp_die, pp->function)) return DWARF_CB_OK; /* Check declared file */ @@ -1061,7 +973,7 @@ static int pubname_search_cb(Dwarf *dbg, Dwarf_Global *gl, void *data) } /* Find probe points from debuginfo */ -static int debuginfo__find_probes(struct debuginfo *self, +static int debuginfo__find_probes(struct debuginfo *dbg, struct probe_finder *pf) { struct perf_probe_point *pp = &pf->pev->point; @@ -1072,11 +984,13 @@ static int debuginfo__find_probes(struct debuginfo *self, #if _ELFUTILS_PREREQ(0, 142) /* Get the call frame information from this dwarf */ - pf->cfi = dwarf_getcfi(self->dbg); + pf->cfi = dwarf_getcfi_elf(dwarf_getelf(dbg->dbg)); #endif off = 0; - line_list__init(&pf->lcache); + pf->lcache = intlist__new(NULL); + if (!pf->lcache) + return -ENOMEM; /* Fastpath: lookup by function name from .debug_pubnames section */ if (pp->function) { @@ -1091,7 +1005,7 @@ static int debuginfo__find_probes(struct debuginfo *self, .data = pf, }; - dwarf_getpubnames(self->dbg, pubname_search_cb, + dwarf_getpubnames(dbg->dbg, pubname_search_cb, &pubname_param, 0); if (pubname_param.found) { ret = probe_point_search_cb(&pf->sp_die, &probe_param); @@ -1101,9 +1015,9 @@ static int debuginfo__find_probes(struct debuginfo *self, } /* Loop on CUs (Compilation Unit) */ - while (!dwarf_nextcu(self->dbg, off, &noff, &cuhl, NULL, NULL, NULL)) { + while (!dwarf_nextcu(dbg->dbg, off, &noff, &cuhl, NULL, NULL, NULL)) { /* Get the DIE(Debugging Information Entry) of this CU */ - diep = dwarf_offdie(self->dbg, off + cuhl, &pf->cu_die); + diep = dwarf_offdie(dbg->dbg, off + cuhl, &pf->cu_die); if (!diep) continue; @@ -1129,17 +1043,86 @@ static int debuginfo__find_probes(struct debuginfo *self, } found: - line_list__free(&pf->lcache); + intlist__delete(pf->lcache); + pf->lcache = NULL; return ret; } +struct local_vars_finder { + struct probe_finder *pf; + struct perf_probe_arg *args; + int max_args; + int nargs; + int ret; +}; + +/* Collect available variables in this scope */ +static int copy_variables_cb(Dwarf_Die *die_mem, void *data) +{ + struct local_vars_finder *vf = data; + struct probe_finder *pf = vf->pf; + int tag; + + tag = dwarf_tag(die_mem); + if (tag == DW_TAG_formal_parameter || + tag == DW_TAG_variable) { + if (convert_variable_location(die_mem, vf->pf->addr, + vf->pf->fb_ops, &pf->sp_die, + NULL) == 0) { + vf->args[vf->nargs].var = (char *)dwarf_diename(die_mem); + if (vf->args[vf->nargs].var == NULL) { + vf->ret = -ENOMEM; + return DIE_FIND_CB_END; + } + pr_debug(" %s", vf->args[vf->nargs].var); + vf->nargs++; + } + } + + if (dwarf_haspc(die_mem, vf->pf->addr)) + return DIE_FIND_CB_CONTINUE; + else + return DIE_FIND_CB_SIBLING; +} + +static int expand_probe_args(Dwarf_Die *sc_die, struct probe_finder *pf, + struct perf_probe_arg *args) +{ + Dwarf_Die die_mem; + int i; + int n = 0; + struct local_vars_finder vf = {.pf = pf, .args = args, + .max_args = MAX_PROBE_ARGS, .ret = 0}; + + for (i = 0; i < pf->pev->nargs; i++) { + /* var never be NULL */ + if (strcmp(pf->pev->args[i].var, "$vars") == 0) { + pr_debug("Expanding $vars into:"); + vf.nargs = n; + /* Special local variables */ + die_find_child(sc_die, copy_variables_cb, (void *)&vf, + &die_mem); + pr_debug(" (%d)\n", vf.nargs - n); + if (vf.ret < 0) + return vf.ret; + n = vf.nargs; + } else { + /* Copy normal argument */ + args[n] = pf->pev->args[i]; + n++; + } + } + return n; +} + /* Add a found probe point into trace event list */ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf) { struct trace_event_finder *tf = container_of(pf, struct trace_event_finder, pf); struct probe_trace_event *tev; + struct perf_probe_arg *args; int ret, i; /* Check number of tevs */ @@ -1151,7 +1134,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf) tev = &tf->tevs[tf->ntevs++]; /* Trace point should be converted from subprogram DIE */ - ret = convert_to_trace_point(&pf->sp_die, pf->addr, + ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr, pf->pev->point.retprobe, &tev->point); if (ret < 0) return ret; @@ -1159,31 +1142,45 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf) pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, tev->point.offset); - /* Find each argument */ - tev->nargs = pf->pev->nargs; - tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs); - if (tev->args == NULL) + /* Expand special probe argument if exist */ + args = zalloc(sizeof(struct perf_probe_arg) * MAX_PROBE_ARGS); + if (args == NULL) return -ENOMEM; - for (i = 0; i < pf->pev->nargs; i++) { - pf->pvar = &pf->pev->args[i]; + + ret = expand_probe_args(sc_die, pf, args); + if (ret < 0) + goto end; + + tev->nargs = ret; + tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs); + if (tev->args == NULL) { + ret = -ENOMEM; + goto end; + } + + /* Find each argument */ + for (i = 0; i < tev->nargs; i++) { + pf->pvar = &args[i]; pf->tvar = &tev->args[i]; /* Variable should be found from scope DIE */ ret = find_variable(sc_die, pf); if (ret != 0) - return ret; + break; } - return 0; +end: + free(args); + return ret; } /* Find probe_trace_events specified by perf_probe_event from debuginfo */ -int debuginfo__find_trace_events(struct debuginfo *self, +int debuginfo__find_trace_events(struct debuginfo *dbg, struct perf_probe_event *pev, struct probe_trace_event **tevs, int max_tevs) { struct trace_event_finder tf = { .pf = {.pev = pev, .callback = add_probe_trace_event}, - .max_tevs = max_tevs}; + .mod = dbg->mod, .max_tevs = max_tevs}; int ret; /* Allocate result tevs array */ @@ -1194,10 +1191,9 @@ int debuginfo__find_trace_events(struct debuginfo *self, tf.tevs = *tevs; tf.ntevs = 0; - ret = debuginfo__find_probes(self, &tf.pf); + ret = debuginfo__find_probes(dbg, &tf.pf); if (ret < 0) { - free(*tevs); - *tevs = NULL; + zfree(tevs); return ret; } @@ -1220,7 +1216,8 @@ static int collect_variables_cb(Dwarf_Die *die_mem, void *data) if (tag == DW_TAG_formal_parameter || tag == DW_TAG_variable) { ret = convert_variable_location(die_mem, af->pf.addr, - af->pf.fb_ops, NULL); + af->pf.fb_ops, &af->pf.sp_die, + NULL); if (ret == 0) { ret = die_get_varname(die_mem, buf, MAX_VAR_LEN); pr_debug2("Add new var: %s\n", buf); @@ -1252,7 +1249,7 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf) vl = &af->vls[af->nvls++]; /* Trace point should be converted from subprogram DIE */ - ret = convert_to_trace_point(&pf->sp_die, pf->addr, + ret = convert_to_trace_point(&pf->sp_die, af->mod, pf->addr, pf->pev->point.retprobe, &vl->point); if (ret < 0) return ret; @@ -1283,14 +1280,19 @@ out: return ret; } -/* Find available variables at given probe point */ -int debuginfo__find_available_vars_at(struct debuginfo *self, +/* + * Find available variables at given probe point + * Return the number of found probe points. Return 0 if there is no + * matched probe point. Return <0 if an error occurs. + */ +int debuginfo__find_available_vars_at(struct debuginfo *dbg, struct perf_probe_event *pev, struct variable_list **vls, int max_vls, bool externs) { struct available_var_finder af = { .pf = {.pev = pev, .callback = add_available_vars}, + .mod = dbg->mod, .max_vls = max_vls, .externs = externs}; int ret; @@ -1302,17 +1304,14 @@ int debuginfo__find_available_vars_at(struct debuginfo *self, af.vls = *vls; af.nvls = 0; - ret = debuginfo__find_probes(self, &af.pf); + ret = debuginfo__find_probes(dbg, &af.pf); if (ret < 0) { /* Free vlist for error */ while (af.nvls--) { - if (af.vls[af.nvls].point.symbol) - free(af.vls[af.nvls].point.symbol); - if (af.vls[af.nvls].vars) - strlist__delete(af.vls[af.nvls].vars); + zfree(&af.vls[af.nvls].point.symbol); + strlist__delete(af.vls[af.nvls].vars); } - free(af.vls); - *vls = NULL; + zfree(vls); return ret; } @@ -1320,19 +1319,19 @@ int debuginfo__find_available_vars_at(struct debuginfo *self, } /* Reverse search */ -int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr, +int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, struct perf_probe_point *ppt) { Dwarf_Die cudie, spdie, indie; - Dwarf_Addr _addr, baseaddr; - const char *fname = NULL, *func = NULL, *tmp; + Dwarf_Addr _addr = 0, baseaddr = 0; + const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp; int baseline = 0, lineno = 0, ret = 0; /* Adjust address with bias */ - addr += self->bias; + addr += dbg->bias; /* Find cu die */ - if (!dwarf_addrdie(self->dbg, (Dwarf_Addr)addr - self->bias, &cudie)) { + if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr - dbg->bias, &cudie)) { pr_warning("Failed to find debug information for address %lx\n", addr); ret = -EINVAL; @@ -1346,27 +1345,36 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr, /* Find a corresponding function (name, baseline and baseaddr) */ if (die_find_realfunc(&cudie, (Dwarf_Addr)addr, &spdie)) { /* Get function entry information */ - tmp = dwarf_diename(&spdie); - if (!tmp || + func = basefunc = dwarf_diename(&spdie); + if (!func || dwarf_entrypc(&spdie, &baseaddr) != 0 || - dwarf_decl_line(&spdie, &baseline) != 0) + dwarf_decl_line(&spdie, &baseline) != 0) { + lineno = 0; goto post; - func = tmp; + } - if (addr == (unsigned long)baseaddr) + fname = dwarf_decl_file(&spdie); + if (addr == (unsigned long)baseaddr) { /* Function entry - Relative line number is 0 */ lineno = baseline; - else if (die_find_inlinefunc(&spdie, (Dwarf_Addr)addr, - &indie)) { + goto post; + } + + /* Track down the inline functions step by step */ + while (die_find_top_inlinefunc(&spdie, (Dwarf_Addr)addr, + &indie)) { + /* There is an inline function */ if (dwarf_entrypc(&indie, &_addr) == 0 && - _addr == addr) + _addr == addr) { /* * addr is at an inline function entry. * In this case, lineno should be the call-site - * line number. + * line number. (overwrite lineinfo) */ lineno = die_get_call_lineno(&indie); - else { + fname = die_get_call_file(&indie); + break; + } else { /* * addr is in an inline function body. * Since lineno points one of the lines @@ -1374,19 +1382,27 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr, * be the entry line of the inline function. */ tmp = dwarf_diename(&indie); - if (tmp && - dwarf_decl_line(&spdie, &baseline) == 0) - func = tmp; + if (!tmp || + dwarf_decl_line(&indie, &baseline) != 0) + break; + func = tmp; + spdie = indie; } } + /* Verify the lineno and baseline are in a same file */ + tmp = dwarf_decl_file(&spdie); + if (!tmp || strcmp(tmp, fname) != 0) + lineno = 0; } post: /* Make a relative line number or an offset */ if (lineno) ppt->line = lineno - baseline; - else if (func) + else if (basefunc) { ppt->offset = addr - (unsigned long)baseaddr; + func = basefunc; + } /* Duplicate strings */ if (func) { @@ -1399,10 +1415,7 @@ post: if (fname) { ppt->file = strdup(fname); if (ppt->file == NULL) { - if (ppt->function) { - free(ppt->function); - ppt->function = NULL; - } + zfree(&ppt->function); ret = -ENOMEM; goto end; } @@ -1423,7 +1436,7 @@ static int line_range_add_line(const char *src, unsigned int lineno, if (lr->path == NULL) return -ENOMEM; } - return line_list__add_line(&lr->line_list, lineno); + return intlist__add(lr->line_list, lineno); } static int line_range_walk_cb(const char *fname, int lineno, @@ -1431,13 +1444,15 @@ static int line_range_walk_cb(const char *fname, int lineno, void *data) { struct line_finder *lf = data; + int err; if ((strtailcmp(fname, lf->fname) != 0) || (lf->lno_s > lineno || lf->lno_e < lineno)) return 0; - if (line_range_add_line(fname, lineno, lf->lr) < 0) - return -EINVAL; + err = line_range_add_line(fname, lineno, lf->lr); + if (err < 0 && err != -EEXIST) + return err; return 0; } @@ -1451,30 +1466,30 @@ static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf) /* Update status */ if (ret >= 0) - if (!list_empty(&lf->lr->line_list)) + if (!intlist__empty(lf->lr->line_list)) ret = lf->found = 1; else ret = 0; /* Lines are not found */ else { - free(lf->lr->path); - lf->lr->path = NULL; + zfree(&lf->lr->path); } return ret; } static int line_range_inline_cb(Dwarf_Die *in_die, void *data) { - find_line_range_by_line(in_die, data); + int ret = find_line_range_by_line(in_die, data); /* * We have to check all instances of inlined function, because * some execution paths can be optimized out depends on the - * function argument of instances + * function argument of instances. However, if an error occurs, + * it should be handled by the caller. */ - return 0; + return ret < 0 ? ret : 0; } -/* Search function from function name */ +/* Search function definition from function name */ static int line_range_search_cb(Dwarf_Die *sp_die, void *data) { struct dwarf_callback_param *param = data; @@ -1485,7 +1500,7 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data) if (lr->file && strtailcmp(lr->file, dwarf_decl_file(sp_die))) return DWARF_CB_OK; - if (dwarf_tag(sp_die) == DW_TAG_subprogram && + if (die_is_func_def(sp_die) && die_compare_name(sp_die, lr->function)) { lf->fname = dwarf_decl_file(sp_die); dwarf_decl_line(sp_die, &lr->offset); @@ -1516,7 +1531,7 @@ static int find_line_range_by_func(struct line_finder *lf) return param.retval; } -int debuginfo__find_line_range(struct debuginfo *self, struct line_range *lr) +int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr) { struct line_finder lf = {.lr = lr, .found = 0}; int ret = 0; @@ -1533,7 +1548,7 @@ int debuginfo__find_line_range(struct debuginfo *self, struct line_range *lr) struct dwarf_callback_param line_range_param = { .data = (void *)&lf, .retval = 0}; - dwarf_getpubnames(self->dbg, pubname_search_cb, + dwarf_getpubnames(dbg->dbg, pubname_search_cb, &pubname_param, 0); if (pubname_param.found) { line_range_search_cb(&lf.sp_die, &line_range_param); @@ -1544,12 +1559,12 @@ int debuginfo__find_line_range(struct debuginfo *self, struct line_range *lr) /* Loop on CUs (Compilation Unit) */ while (!lf.found && ret >= 0) { - if (dwarf_nextcu(self->dbg, off, &noff, &cuhl, + if (dwarf_nextcu(dbg->dbg, off, &noff, &cuhl, NULL, NULL, NULL) != 0) break; /* Get the DIE(Debugging Information Entry) of this CU */ - diep = dwarf_offdie(self->dbg, off + cuhl, &lf.cu_die); + diep = dwarf_offdie(dbg->dbg, off + cuhl, &lf.cu_die); if (!diep) continue; diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index 17e94d0c36f..92590b2c7e1 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -3,10 +3,12 @@ #include <stdbool.h> #include "util.h" +#include "intlist.h" #include "probe-event.h" #define MAX_PROBE_BUFFER 1024 #define MAX_PROBES 128 +#define MAX_PROBE_ARGS 128 static inline int is_c_varname(const char *name) { @@ -14,7 +16,7 @@ static inline int is_c_varname(const char *name) return isalpha(name[0]) || name[0] == '_'; } -#ifdef DWARF_SUPPORT +#ifdef HAVE_DWARF_SUPPORT #include "dwarf-aux.h" @@ -23,31 +25,32 @@ static inline int is_c_varname(const char *name) /* debug information structure */ struct debuginfo { Dwarf *dbg; + Dwfl_Module *mod; Dwfl *dwfl; Dwarf_Addr bias; }; +/* This also tries to open distro debuginfo */ extern struct debuginfo *debuginfo__new(const char *path); -extern struct debuginfo *debuginfo__new_online_kernel(unsigned long addr); -extern void debuginfo__delete(struct debuginfo *self); +extern void debuginfo__delete(struct debuginfo *dbg); /* Find probe_trace_events specified by perf_probe_event from debuginfo */ -extern int debuginfo__find_trace_events(struct debuginfo *self, +extern int debuginfo__find_trace_events(struct debuginfo *dbg, struct perf_probe_event *pev, struct probe_trace_event **tevs, int max_tevs); /* Find a perf_probe_point from debuginfo */ -extern int debuginfo__find_probe_point(struct debuginfo *self, +extern int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, struct perf_probe_point *ppt); /* Find a line range */ -extern int debuginfo__find_line_range(struct debuginfo *self, +extern int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr); /* Find available variables */ -extern int debuginfo__find_available_vars_at(struct debuginfo *self, +extern int debuginfo__find_available_vars_at(struct debuginfo *dbg, struct perf_probe_event *pev, struct variable_list **vls, int max_points, bool externs); @@ -64,7 +67,7 @@ struct probe_finder { const char *fname; /* Real file name */ Dwarf_Die cu_die; /* Current CU */ Dwarf_Die sp_die; - struct list_head lcache; /* Line cache for lazy match */ + struct intlist *lcache; /* Line cache for lazy match */ /* For variable searching */ #if _ELFUTILS_PREREQ(0, 142) @@ -77,6 +80,7 @@ struct probe_finder { struct trace_event_finder { struct probe_finder pf; + Dwfl_Module *mod; /* For solving symbols */ struct probe_trace_event *tevs; /* Found trace events */ int ntevs; /* Number of trace events */ int max_tevs; /* Max number of trace events */ @@ -84,6 +88,7 @@ struct trace_event_finder { struct available_var_finder { struct probe_finder pf; + Dwfl_Module *mod; /* For solving symbols */ struct variable_list *vls; /* Found variable lists */ int nvls; /* Number of variable lists */ int max_vls; /* Max no. of variable lists */ @@ -102,6 +107,6 @@ struct line_finder { int found; }; -#endif /* DWARF_SUPPORT */ +#endif /* HAVE_DWARF_SUPPORT */ #endif /*_PROBE_FINDER_H */ diff --git a/tools/perf/util/pstack.c b/tools/perf/util/pstack.c index 13d36faf64e..daa17aeb6c6 100644 --- a/tools/perf/util/pstack.c +++ b/tools/perf/util/pstack.c @@ -17,59 +17,59 @@ struct pstack { struct pstack *pstack__new(unsigned short max_nr_entries) { - struct pstack *self = zalloc((sizeof(*self) + - max_nr_entries * sizeof(void *))); - if (self != NULL) - self->max_nr_entries = max_nr_entries; - return self; + struct pstack *pstack = zalloc((sizeof(*pstack) + + max_nr_entries * sizeof(void *))); + if (pstack != NULL) + pstack->max_nr_entries = max_nr_entries; + return pstack; } -void pstack__delete(struct pstack *self) +void pstack__delete(struct pstack *pstack) { - free(self); + free(pstack); } -bool pstack__empty(const struct pstack *self) +bool pstack__empty(const struct pstack *pstack) { - return self->top == 0; + return pstack->top == 0; } -void pstack__remove(struct pstack *self, void *key) +void pstack__remove(struct pstack *pstack, void *key) { - unsigned short i = self->top, last_index = self->top - 1; + unsigned short i = pstack->top, last_index = pstack->top - 1; while (i-- != 0) { - if (self->entries[i] == key) { + if (pstack->entries[i] == key) { if (i < last_index) - memmove(self->entries + i, - self->entries + i + 1, + memmove(pstack->entries + i, + pstack->entries + i + 1, (last_index - i) * sizeof(void *)); - --self->top; + --pstack->top; return; } } pr_err("%s: %p not on the pstack!\n", __func__, key); } -void pstack__push(struct pstack *self, void *key) +void pstack__push(struct pstack *pstack, void *key) { - if (self->top == self->max_nr_entries) { - pr_err("%s: top=%d, overflow!\n", __func__, self->top); + if (pstack->top == pstack->max_nr_entries) { + pr_err("%s: top=%d, overflow!\n", __func__, pstack->top); return; } - self->entries[self->top++] = key; + pstack->entries[pstack->top++] = key; } -void *pstack__pop(struct pstack *self) +void *pstack__pop(struct pstack *pstack) { void *ret; - if (self->top == 0) { + if (pstack->top == 0) { pr_err("%s: underflow!\n", __func__); return NULL; } - ret = self->entries[--self->top]; - self->entries[self->top] = NULL; + ret = pstack->entries[--pstack->top]; + pstack->entries[pstack->top] = NULL; return ret; } diff --git a/tools/perf/util/pstack.h b/tools/perf/util/pstack.h index 4cedea59f51..c3cb6584d52 100644 --- a/tools/perf/util/pstack.h +++ b/tools/perf/util/pstack.h @@ -5,10 +5,10 @@ struct pstack; struct pstack *pstack__new(unsigned short max_nr_entries); -void pstack__delete(struct pstack *self); -bool pstack__empty(const struct pstack *self); -void pstack__remove(struct pstack *self, void *key); -void pstack__push(struct pstack *self, void *key); -void *pstack__pop(struct pstack *self); +void pstack__delete(struct pstack *pstack); +bool pstack__empty(const struct pstack *pstack); +void pstack__remove(struct pstack *pstack, void *key); +void pstack__push(struct pstack *pstack, void *key); +void *pstack__pop(struct pstack *pstack); #endif /* _PERF_PSTACK_ */ diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources index c40c2d33199..16a475a7d49 100644 --- a/tools/perf/util/python-ext-sources +++ b/tools/perf/util/python-ext-sources @@ -15,7 +15,8 @@ util/thread_map.c util/util.c util/xyarray.c util/cgroup.c -util/debugfs.c util/rblist.c util/strlist.c +../lib/api/fs/fs.c +util/trace-event.c ../../lib/rbtree.c diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 9181bf212fb..122669c18ff 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -8,18 +8,31 @@ #include "cpumap.h" #include "thread_map.h" +/* + * Support debug printing even though util/debug.c is not linked. That means + * implementing 'verbose' and 'eprintf'. + */ +int verbose; + +int eprintf(int level, const char *fmt, ...) +{ + va_list args; + int ret = 0; + + if (verbose >= level) { + va_start(args, fmt); + ret = vfprintf(stderr, fmt, args); + va_end(args); + } + + return ret; +} + /* Define PyVarObject_HEAD_INIT for python 2.5 */ #ifndef PyVarObject_HEAD_INIT # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, #endif -struct throttle_event { - struct perf_event_header header; - u64 time; - u64 id; - u64 stream_id; -}; - PyMODINIT_FUNC initperf(void); #define member_def(type, member, ptype, help) \ @@ -802,6 +815,8 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, PyObject *pyevent = pyrf_event__new(event); struct pyrf_event *pevent = (struct pyrf_event *)pyevent; + perf_evlist__mmap_consume(evlist, cpu); + if (pyevent == NULL) return PyErr_NoMemory(); @@ -893,9 +908,10 @@ static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i) if (i >= pevlist->evlist.nr_entries) return NULL; - list_for_each_entry(pos, &pevlist->evlist.entries, node) + evlist__for_each(&pevlist->evlist, pos) { if (i-- == 0) break; + } return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel)); } @@ -967,6 +983,7 @@ static struct { { "COUNT_SW_PAGE_FAULTS_MAJ", PERF_COUNT_SW_PAGE_FAULTS_MAJ }, { "COUNT_SW_ALIGNMENT_FAULTS", PERF_COUNT_SW_ALIGNMENT_FAULTS }, { "COUNT_SW_EMULATION_FAULTS", PERF_COUNT_SW_EMULATION_FAULTS }, + { "COUNT_SW_DUMMY", PERF_COUNT_SW_DUMMY }, { "SAMPLE_IP", PERF_SAMPLE_IP }, { "SAMPLE_TID", PERF_SAMPLE_TID }, @@ -1015,6 +1032,9 @@ PyMODINIT_FUNC initperf(void) pyrf_cpu_map__setup_types() < 0) return; + /* The page_size is placed in util object. */ + page_size = sysconf(_SC_PAGE_SIZE); + Py_INCREF(&pyrf_evlist__type); PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type); @@ -1043,3 +1063,12 @@ error: if (PyErr_Occurred()) PyErr_SetString(PyExc_ImportError, "perf: Init failed!"); } + +/* + * Dummy, to avoid dragging all the test_attr infrastructure in the python + * binding. + */ +void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu, + int fd, int group_fd, unsigned long flags) +{ +} diff --git a/tools/perf/util/rblist.c b/tools/perf/util/rblist.c index 0171fb61100..0dfe27d9945 100644 --- a/tools/perf/util/rblist.c +++ b/tools/perf/util/rblist.c @@ -44,13 +44,16 @@ int rblist__add_node(struct rblist *rblist, const void *new_entry) void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node) { rb_erase(rb_node, &rblist->entries); + --rblist->nr_entries; rblist->node_delete(rblist, rb_node); } -struct rb_node *rblist__find(struct rblist *rblist, const void *entry) +static struct rb_node *__rblist__findnew(struct rblist *rblist, + const void *entry, + bool create) { struct rb_node **p = &rblist->entries.rb_node; - struct rb_node *parent = NULL; + struct rb_node *parent = NULL, *new_node = NULL; while (*p != NULL) { int rc; @@ -66,7 +69,26 @@ struct rb_node *rblist__find(struct rblist *rblist, const void *entry) return parent; } - return NULL; + if (create) { + new_node = rblist->node_new(rblist, entry); + if (new_node) { + rb_link_node(new_node, parent, p); + rb_insert_color(new_node, &rblist->entries); + ++rblist->nr_entries; + } + } + + return new_node; +} + +struct rb_node *rblist__find(struct rblist *rblist, const void *entry) +{ + return __rblist__findnew(rblist, entry, false); +} + +struct rb_node *rblist__findnew(struct rblist *rblist, const void *entry) +{ + return __rblist__findnew(rblist, entry, true); } void rblist__init(struct rblist *rblist) @@ -87,8 +109,7 @@ void rblist__delete(struct rblist *rblist) while (next) { pos = next; next = rb_next(pos); - rb_erase(pos, &rblist->entries); - rblist->node_delete(rblist, pos); + rblist__remove_node(rblist, pos); } free(rblist); } diff --git a/tools/perf/util/rblist.h b/tools/perf/util/rblist.h index 6d0cae5ae83..ff9913b994c 100644 --- a/tools/perf/util/rblist.h +++ b/tools/perf/util/rblist.h @@ -32,6 +32,7 @@ void rblist__delete(struct rblist *rblist); int rblist__add_node(struct rblist *rblist, const void *new_entry); void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node); struct rb_node *rblist__find(struct rblist *rblist, const void *entry); +struct rb_node *rblist__findnew(struct rblist *rblist, const void *entry); struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx); static inline bool rblist__empty(const struct rblist *rblist) diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c new file mode 100644 index 00000000000..049e0a09ccd --- /dev/null +++ b/tools/perf/util/record.c @@ -0,0 +1,215 @@ +#include "evlist.h" +#include "evsel.h" +#include "cpumap.h" +#include "parse-events.h" +#include <api/fs/fs.h> +#include "util.h" + +typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel); + +static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str) +{ + struct perf_evlist *evlist; + struct perf_evsel *evsel; + int err = -EAGAIN, fd; + + evlist = perf_evlist__new(); + if (!evlist) + return -ENOMEM; + + if (parse_events(evlist, str)) + goto out_delete; + + evsel = perf_evlist__first(evlist); + + fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0); + if (fd < 0) + goto out_delete; + close(fd); + + fn(evsel); + + fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0); + if (fd < 0) { + if (errno == EINVAL) + err = -EINVAL; + goto out_delete; + } + close(fd); + err = 0; + +out_delete: + perf_evlist__delete(evlist); + return err; +} + +static bool perf_probe_api(setup_probe_fn_t fn) +{ + const char *try[] = {"cycles:u", "instructions:u", "cpu-clock", NULL}; + struct cpu_map *cpus; + int cpu, ret, i = 0; + + cpus = cpu_map__new(NULL); + if (!cpus) + return false; + cpu = cpus->map[0]; + cpu_map__delete(cpus); + + do { + ret = perf_do_probe_api(fn, cpu, try[i++]); + if (!ret) + return true; + } while (ret == -EAGAIN && try[i]); + + return false; +} + +static void perf_probe_sample_identifier(struct perf_evsel *evsel) +{ + evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER; +} + +bool perf_can_sample_identifier(void) +{ + return perf_probe_api(perf_probe_sample_identifier); +} + +void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts) +{ + struct perf_evsel *evsel; + bool use_sample_identifier = false; + + /* + * Set the evsel leader links before we configure attributes, + * since some might depend on this info. + */ + if (opts->group) + perf_evlist__set_leader(evlist); + + if (evlist->cpus->map[0] < 0) + opts->no_inherit = true; + + evlist__for_each(evlist, evsel) + perf_evsel__config(evsel, opts); + + if (evlist->nr_entries > 1) { + struct perf_evsel *first = perf_evlist__first(evlist); + + evlist__for_each(evlist, evsel) { + if (evsel->attr.sample_type == first->attr.sample_type) + continue; + use_sample_identifier = perf_can_sample_identifier(); + break; + } + evlist__for_each(evlist, evsel) + perf_evsel__set_sample_id(evsel, use_sample_identifier); + } + + perf_evlist__set_id_pos(evlist); +} + +static int get_max_rate(unsigned int *rate) +{ + char path[PATH_MAX]; + const char *procfs = procfs__mountpoint(); + + if (!procfs) + return -1; + + snprintf(path, PATH_MAX, + "%s/sys/kernel/perf_event_max_sample_rate", procfs); + + return filename__read_int(path, (int *) rate); +} + +static int record_opts__config_freq(struct record_opts *opts) +{ + bool user_freq = opts->user_freq != UINT_MAX; + unsigned int max_rate; + + if (opts->user_interval != ULLONG_MAX) + opts->default_interval = opts->user_interval; + if (user_freq) + opts->freq = opts->user_freq; + + /* + * User specified count overrides default frequency. + */ + if (opts->default_interval) + opts->freq = 0; + else if (opts->freq) { + opts->default_interval = opts->freq; + } else { + pr_err("frequency and count are zero, aborting\n"); + return -1; + } + + if (get_max_rate(&max_rate)) + return 0; + + /* + * User specified frequency is over current maximum. + */ + if (user_freq && (max_rate < opts->freq)) { + pr_err("Maximum frequency rate (%u) reached.\n" + "Please use -F freq option with lower value or consider\n" + "tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n", + max_rate); + return -1; + } + + /* + * Default frequency is over current maximum. + */ + if (max_rate < opts->freq) { + pr_warning("Lowering default frequency rate to %u.\n" + "Please consider tweaking " + "/proc/sys/kernel/perf_event_max_sample_rate.\n", + max_rate); + opts->freq = max_rate; + } + + return 0; +} + +int record_opts__config(struct record_opts *opts) +{ + return record_opts__config_freq(opts); +} + +bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str) +{ + struct perf_evlist *temp_evlist; + struct perf_evsel *evsel; + int err, fd, cpu; + bool ret = false; + + temp_evlist = perf_evlist__new(); + if (!temp_evlist) + return false; + + err = parse_events(temp_evlist, str); + if (err) + goto out_delete; + + evsel = perf_evlist__last(temp_evlist); + + if (!evlist || cpu_map__empty(evlist->cpus)) { + struct cpu_map *cpus = cpu_map__new(NULL); + + cpu = cpus ? cpus->map[0] : 0; + cpu_map__delete(cpus); + } else { + cpu = evlist->cpus->map[0]; + } + + fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0); + if (fd >= 0) { + close(fd); + ret = true; + } + +out_delete: + perf_evlist__delete(temp_evlist); + return ret; +} diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c index f80605eb185..af7da565a75 100644 --- a/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/tools/perf/util/scripting-engines/trace-event-perl.c @@ -194,8 +194,7 @@ static void define_event_symbols(struct event_format *event, zero_flag_atom = 0; break; case PRINT_FIELD: - if (cur_field_name) - free(cur_field_name); + free(cur_field_name); cur_field_name = strdup(args->field.name); break; case PRINT_FLAGS: @@ -216,6 +215,7 @@ static void define_event_symbols(struct event_format *event, case PRINT_BSTRING: case PRINT_DYNAMIC_ARRAY: case PRINT_STRING: + case PRINT_BITMASK: break; case PRINT_TYPE: define_event_symbols(event, ev_name, args->typecast.item); @@ -257,11 +257,9 @@ static inline struct event_format *find_cache_event(struct perf_evsel *evsel) return event; } -static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused, - struct perf_sample *sample, +static void perl_process_tracepoint(struct perf_sample *sample, struct perf_evsel *evsel, - struct machine *machine __maybe_unused, - struct addr_location *al) + struct thread *thread) { struct format_field *field; static char handler[256]; @@ -272,8 +270,7 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused, int cpu = sample->cpu; void *data = sample->raw_data; unsigned long long nsecs = sample->time; - struct thread *thread = al->thread; - char *comm = thread->comm; + const char *comm = thread__comm_str(thread); dSP; @@ -282,7 +279,7 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused, event = find_cache_event(evsel); if (!event) - die("ug! no event found for type %" PRIu64, evsel->attr.config); + die("ug! no event found for type %" PRIu64, (u64)evsel->attr.config); pid = raw_field_value(event, "common_pid", data); @@ -292,6 +289,7 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused, ns = nsecs - s * NSECS_PER_SEC; scripting_context->event_data = data; + scripting_context->pevent = evsel->tp_format->pevent; ENTER; SAVETMPS; @@ -348,9 +346,7 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused, static void perl_process_event_generic(union perf_event *event, struct perf_sample *sample, - struct perf_evsel *evsel, - struct machine *machine __maybe_unused, - struct addr_location *al __maybe_unused) + struct perf_evsel *evsel) { dSP; @@ -375,11 +371,11 @@ static void perl_process_event_generic(union perf_event *event, static void perl_process_event(union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, - struct machine *machine, - struct addr_location *al) + struct thread *thread, + struct addr_location *al __maybe_unused) { - perl_process_tracepoint(event, sample, evsel, machine, al); - perl_process_event_generic(event, sample, evsel, machine, al); + perl_process_tracepoint(sample, evsel, thread); + perl_process_event_generic(event, sample, evsel); } static void run_start_sub(void) diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 730c6630cba..1c419321f70 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -32,7 +32,6 @@ #include "../event.h" #include "../thread.h" #include "../trace-event.h" -#include "../evsel.h" PyMODINIT_FUNC initperf_trace_context(void); @@ -57,6 +56,17 @@ static void handler_call_die(const char *handler_name) Py_FatalError("problem in Python trace event handler"); } +/* + * Insert val into into the dictionary and decrement the reference counter. + * This is necessary for dictionaries since PyDict_SetItemString() does not + * steal a reference, as opposed to PyTuple_SetItem(). + */ +static void pydict_set_item_string_decref(PyObject *dict, const char *key, PyObject *val) +{ + PyDict_SetItemString(dict, key, val); + Py_DECREF(val); +} + static void define_value(enum print_arg_type field_type, const char *ev_name, const char *field_name, @@ -151,8 +161,7 @@ static void define_event_symbols(struct event_format *event, zero_flag_atom = 0; break; case PRINT_FIELD: - if (cur_field_name) - free(cur_field_name); + free(cur_field_name); cur_field_name = strdup(args->field.name); break; case PRINT_FLAGS: @@ -188,6 +197,7 @@ static void define_event_symbols(struct event_format *event, case PRINT_BSTRING: case PRINT_DYNAMIC_ARRAY: case PRINT_FUNC: + case PRINT_BITMASK: /* we should warn... */ return; } @@ -221,12 +231,10 @@ static inline struct event_format *find_cache_event(struct perf_evsel *evsel) return event; } -static void python_process_tracepoint(union perf_event *perf_event - __maybe_unused, - struct perf_sample *sample, - struct perf_evsel *evsel, - struct machine *machine __maybe_unused, - struct addr_location *al) +static void python_process_tracepoint(struct perf_sample *sample, + struct perf_evsel *evsel, + struct thread *thread, + struct addr_location *al) { PyObject *handler, *retval, *context, *t, *obj, *dict = NULL; static char handler_name[256]; @@ -239,8 +247,7 @@ static void python_process_tracepoint(union perf_event *perf_event int cpu = sample->cpu; void *data = sample->raw_data; unsigned long long nsecs = sample->time; - struct thread *thread = al->thread; - char *comm = thread->comm; + const char *comm = thread__comm_str(thread); t = PyTuple_New(MAX_FIELDS); if (!t) @@ -266,6 +273,7 @@ static void python_process_tracepoint(union perf_event *perf_event ns = nsecs - s * NSECS_PER_SEC; scripting_context->event_data = data; + scripting_context->pevent = evsel->tp_format->pevent; context = PyCObject_FromVoidPtr(scripting_context, NULL); @@ -279,11 +287,11 @@ static void python_process_tracepoint(union perf_event *perf_event PyTuple_SetItem(t, n++, PyInt_FromLong(pid)); PyTuple_SetItem(t, n++, PyString_FromString(comm)); } else { - PyDict_SetItemString(dict, "common_cpu", PyInt_FromLong(cpu)); - PyDict_SetItemString(dict, "common_s", PyInt_FromLong(s)); - PyDict_SetItemString(dict, "common_ns", PyInt_FromLong(ns)); - PyDict_SetItemString(dict, "common_pid", PyInt_FromLong(pid)); - PyDict_SetItemString(dict, "common_comm", PyString_FromString(comm)); + pydict_set_item_string_decref(dict, "common_cpu", PyInt_FromLong(cpu)); + pydict_set_item_string_decref(dict, "common_s", PyInt_FromLong(s)); + pydict_set_item_string_decref(dict, "common_ns", PyInt_FromLong(ns)); + pydict_set_item_string_decref(dict, "common_pid", PyInt_FromLong(pid)); + pydict_set_item_string_decref(dict, "common_comm", PyString_FromString(comm)); } for (field = event->format.fields; field; field = field->next) { if (field->flags & FIELD_IS_STRING) { @@ -313,7 +321,7 @@ static void python_process_tracepoint(union perf_event *perf_event if (handler) PyTuple_SetItem(t, n++, obj); else - PyDict_SetItemString(dict, field->name, obj); + pydict_set_item_string_decref(dict, field->name, obj); } if (!handler) @@ -340,17 +348,14 @@ static void python_process_tracepoint(union perf_event *perf_event Py_DECREF(t); } -static void python_process_general_event(union perf_event *perf_event - __maybe_unused, - struct perf_sample *sample, +static void python_process_general_event(struct perf_sample *sample, struct perf_evsel *evsel, - struct machine *machine __maybe_unused, + struct thread *thread, struct addr_location *al) { PyObject *handler, *retval, *t, *dict; static char handler_name[64]; unsigned n = 0; - struct thread *thread = al->thread; /* * Use the MAX_FIELDS to make the function expandable, though @@ -370,21 +375,21 @@ static void python_process_general_event(union perf_event *perf_event if (!handler || !PyCallable_Check(handler)) goto exit; - PyDict_SetItemString(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel))); - PyDict_SetItemString(dict, "attr", PyString_FromStringAndSize( + pydict_set_item_string_decref(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel))); + pydict_set_item_string_decref(dict, "attr", PyString_FromStringAndSize( (const char *)&evsel->attr, sizeof(evsel->attr))); - PyDict_SetItemString(dict, "sample", PyString_FromStringAndSize( + pydict_set_item_string_decref(dict, "sample", PyString_FromStringAndSize( (const char *)sample, sizeof(*sample))); - PyDict_SetItemString(dict, "raw_buf", PyString_FromStringAndSize( + pydict_set_item_string_decref(dict, "raw_buf", PyString_FromStringAndSize( (const char *)sample->raw_data, sample->raw_size)); - PyDict_SetItemString(dict, "comm", - PyString_FromString(thread->comm)); + pydict_set_item_string_decref(dict, "comm", + PyString_FromString(thread__comm_str(thread))); if (al->map) { - PyDict_SetItemString(dict, "dso", + pydict_set_item_string_decref(dict, "dso", PyString_FromString(al->map->dso->name)); } if (al->sym) { - PyDict_SetItemString(dict, "symbol", + pydict_set_item_string_decref(dict, "symbol", PyString_FromString(al->sym->name)); } @@ -400,21 +405,19 @@ exit: Py_DECREF(t); } -static void python_process_event(union perf_event *perf_event, +static void python_process_event(union perf_event *event __maybe_unused, struct perf_sample *sample, struct perf_evsel *evsel, - struct machine *machine, + struct thread *thread, struct addr_location *al) { switch (evsel->attr.type) { case PERF_TYPE_TRACEPOINT: - python_process_tracepoint(perf_event, sample, evsel, - machine, al); + python_process_tracepoint(sample, evsel, thread, al); break; /* Reserve for future process_hw/sw/raw APIs */ default: - python_process_general_event(perf_event, sample, evsel, - machine, al); + python_process_general_event(sample, evsel, thread, al); } } @@ -620,6 +623,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile) fprintf(ofp, "%s=", f->name); if (f->flags & FIELD_IS_STRING || f->flags & FIELD_IS_FLAG || + f->flags & FIELD_IS_ARRAY || f->flags & FIELD_IS_SYMBOLIC) fprintf(ofp, "%%s"); else if (f->flags & FIELD_IS_SIGNED) diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 8cdd23239c9..64a186edc7b 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1,6 +1,5 @@ -#define _FILE_OFFSET_BITS 64 - #include <linux/kernel.h> +#include <traceevent/event-parse.h> #include <byteswap.h> #include <unistd.h> @@ -14,394 +13,155 @@ #include "sort.h" #include "util.h" #include "cpumap.h" -#include "event-parse.h" #include "perf_regs.h" -#include "unwind.h" #include "vdso.h" -static int perf_session__open(struct perf_session *self, bool force) +static int perf_session__open(struct perf_session *session) { - struct stat input_stat; - - if (!strcmp(self->filename, "-")) { - self->fd_pipe = true; - self->fd = STDIN_FILENO; - - if (perf_session__read_header(self, self->fd) < 0) - pr_err("incompatible file format (rerun with -v to learn more)"); - - return 0; - } - - self->fd = open(self->filename, O_RDONLY); - if (self->fd < 0) { - int err = errno; - - pr_err("failed to open %s: %s", self->filename, strerror(err)); - if (err == ENOENT && !strcmp(self->filename, "perf.data")) - pr_err(" (try 'perf record' first)"); - pr_err("\n"); - return -errno; - } - - if (fstat(self->fd, &input_stat) < 0) - goto out_close; + struct perf_data_file *file = session->file; - if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { - pr_err("file %s not owned by current user or root\n", - self->filename); - goto out_close; - } - - if (!input_stat.st_size) { - pr_info("zero-sized file (%s), nothing to do!\n", - self->filename); - goto out_close; - } - - if (perf_session__read_header(self, self->fd) < 0) { + if (perf_session__read_header(session) < 0) { pr_err("incompatible file format (rerun with -v to learn more)"); - goto out_close; + return -1; } - if (!perf_evlist__valid_sample_type(self->evlist)) { + if (perf_data_file__is_pipe(file)) + return 0; + + if (!perf_evlist__valid_sample_type(session->evlist)) { pr_err("non matching sample_type"); - goto out_close; + return -1; } - if (!perf_evlist__valid_sample_id_all(self->evlist)) { + if (!perf_evlist__valid_sample_id_all(session->evlist)) { pr_err("non matching sample_id_all"); - goto out_close; + return -1; } - self->size = input_stat.st_size; - return 0; + if (!perf_evlist__valid_read_format(session->evlist)) { + pr_err("non matching read_format"); + return -1; + } -out_close: - close(self->fd); - self->fd = -1; - return -1; + return 0; } void perf_session__set_id_hdr_size(struct perf_session *session) { u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); - session->host_machine.id_hdr_size = id_hdr_size; machines__set_id_hdr_size(&session->machines, id_hdr_size); } -int perf_session__create_kernel_maps(struct perf_session *self) +int perf_session__create_kernel_maps(struct perf_session *session) { - int ret = machine__create_kernel_maps(&self->host_machine); + int ret = machine__create_kernel_maps(&session->machines.host); if (ret >= 0) - ret = machines__create_guest_kernel_maps(&self->machines); + ret = machines__create_guest_kernel_maps(&session->machines); return ret; } -static void perf_session__destroy_kernel_maps(struct perf_session *self) +static void perf_session__destroy_kernel_maps(struct perf_session *session) { - machine__destroy_kernel_maps(&self->host_machine); - machines__destroy_guest_kernel_maps(&self->machines); + machines__destroy_kernel_maps(&session->machines); } -struct perf_session *perf_session__new(const char *filename, int mode, - bool force, bool repipe, - struct perf_tool *tool) +struct perf_session *perf_session__new(struct perf_data_file *file, + bool repipe, struct perf_tool *tool) { - struct perf_session *self; - struct stat st; - size_t len; + struct perf_session *session = zalloc(sizeof(*session)); - if (!filename || !strlen(filename)) { - if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) - filename = "-"; - else - filename = "perf.data"; - } - - len = strlen(filename); - self = zalloc(sizeof(*self) + len); - - if (self == NULL) + if (!session) goto out; - memcpy(self->filename, filename, len); - /* - * On 64bit we can mmap the data file in one go. No need for tiny mmap - * slices. On 32bit we use 32MB. - */ -#if BITS_PER_LONG == 64 - self->mmap_window = ULLONG_MAX; -#else - self->mmap_window = 32 * 1024 * 1024ULL; -#endif - self->machines = RB_ROOT; - self->repipe = repipe; - INIT_LIST_HEAD(&self->ordered_samples.samples); - INIT_LIST_HEAD(&self->ordered_samples.sample_cache); - INIT_LIST_HEAD(&self->ordered_samples.to_free); - machine__init(&self->host_machine, "", HOST_KERNEL_ID); - hists__init(&self->hists); - - if (mode == O_RDONLY) { - if (perf_session__open(self, force) < 0) + session->repipe = repipe; + INIT_LIST_HEAD(&session->ordered_samples.samples); + INIT_LIST_HEAD(&session->ordered_samples.sample_cache); + INIT_LIST_HEAD(&session->ordered_samples.to_free); + machines__init(&session->machines); + + if (file) { + if (perf_data_file__open(file)) goto out_delete; - perf_session__set_id_hdr_size(self); - } else if (mode == O_WRONLY) { + + session->file = file; + + if (perf_data_file__is_read(file)) { + if (perf_session__open(session) < 0) + goto out_close; + + perf_session__set_id_hdr_size(session); + } + } + + if (!file || perf_data_file__is_write(file)) { /* * In O_RDONLY mode this will be performed when reading the * kernel MMAP event, in perf_event__process_mmap(). */ - if (perf_session__create_kernel_maps(self) < 0) + if (perf_session__create_kernel_maps(session) < 0) goto out_delete; } if (tool && tool->ordering_requires_timestamps && - tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) { + tool->ordered_samples && !perf_evlist__sample_id_all(session->evlist)) { dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); tool->ordered_samples = false; } -out: - return self; -out_delete: - perf_session__delete(self); - return NULL; -} - -static void machine__delete_dead_threads(struct machine *machine) -{ - struct thread *n, *t; + return session; - list_for_each_entry_safe(t, n, &machine->dead_threads, node) { - list_del(&t->node); - thread__delete(t); - } + out_close: + perf_data_file__close(file); + out_delete: + perf_session__delete(session); + out: + return NULL; } static void perf_session__delete_dead_threads(struct perf_session *session) { - machine__delete_dead_threads(&session->host_machine); -} - -static void machine__delete_threads(struct machine *self) -{ - struct rb_node *nd = rb_first(&self->threads); - - while (nd) { - struct thread *t = rb_entry(nd, struct thread, rb_node); - - rb_erase(&t->rb_node, &self->threads); - nd = rb_next(nd); - thread__delete(t); - } + machine__delete_dead_threads(&session->machines.host); } static void perf_session__delete_threads(struct perf_session *session) { - machine__delete_threads(&session->host_machine); -} - -void perf_session__delete(struct perf_session *self) -{ - perf_session__destroy_kernel_maps(self); - perf_session__delete_dead_threads(self); - perf_session__delete_threads(self); - machine__exit(&self->host_machine); - close(self->fd); - free(self); - vdso__exit(); -} - -void machine__remove_thread(struct machine *self, struct thread *th) -{ - self->last_match = NULL; - rb_erase(&th->rb_node, &self->threads); - /* - * We may have references to this thread, for instance in some hist_entry - * instances, so just move them to a separate list. - */ - list_add_tail(&th->node, &self->dead_threads); -} - -static bool symbol__match_parent_regex(struct symbol *sym) -{ - if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) - return 1; - - return 0; -} - -static const u8 cpumodes[] = { - PERF_RECORD_MISC_USER, - PERF_RECORD_MISC_KERNEL, - PERF_RECORD_MISC_GUEST_USER, - PERF_RECORD_MISC_GUEST_KERNEL -}; -#define NCPUMODES (sizeof(cpumodes)/sizeof(u8)) - -static void ip__resolve_ams(struct machine *self, struct thread *thread, - struct addr_map_symbol *ams, - u64 ip) -{ - struct addr_location al; - size_t i; - u8 m; - - memset(&al, 0, sizeof(al)); - - for (i = 0; i < NCPUMODES; i++) { - m = cpumodes[i]; - /* - * We cannot use the header.misc hint to determine whether a - * branch stack address is user, kernel, guest, hypervisor. - * Branches may straddle the kernel/user/hypervisor boundaries. - * Thus, we have to try consecutively until we find a match - * or else, the symbol is unknown - */ - thread__find_addr_location(thread, self, m, MAP__FUNCTION, - ip, &al, NULL); - if (al.sym) - goto found; - } -found: - ams->addr = ip; - ams->al_addr = al.addr; - ams->sym = al.sym; - ams->map = al.map; + machine__delete_threads(&session->machines.host); } -struct branch_info *machine__resolve_bstack(struct machine *self, - struct thread *thr, - struct branch_stack *bs) +static void perf_session_env__delete(struct perf_session_env *env) { - struct branch_info *bi; - unsigned int i; - - bi = calloc(bs->nr, sizeof(struct branch_info)); - if (!bi) - return NULL; - - for (i = 0; i < bs->nr; i++) { - ip__resolve_ams(self, thr, &bi[i].to, bs->entries[i].to); - ip__resolve_ams(self, thr, &bi[i].from, bs->entries[i].from); - bi[i].flags = bs->entries[i].flags; - } - return bi; -} - -static int machine__resolve_callchain_sample(struct machine *machine, - struct thread *thread, - struct ip_callchain *chain, - struct symbol **parent) - -{ - u8 cpumode = PERF_RECORD_MISC_USER; - unsigned int i; - int err; - - callchain_cursor_reset(&callchain_cursor); - - if (chain->nr > PERF_MAX_STACK_DEPTH) { - pr_warning("corrupted callchain. skipping...\n"); - return 0; - } - - for (i = 0; i < chain->nr; i++) { - u64 ip; - struct addr_location al; - - if (callchain_param.order == ORDER_CALLEE) - ip = chain->ips[i]; - else - ip = chain->ips[chain->nr - i - 1]; - - if (ip >= PERF_CONTEXT_MAX) { - switch (ip) { - case PERF_CONTEXT_HV: - cpumode = PERF_RECORD_MISC_HYPERVISOR; - break; - case PERF_CONTEXT_KERNEL: - cpumode = PERF_RECORD_MISC_KERNEL; - break; - case PERF_CONTEXT_USER: - cpumode = PERF_RECORD_MISC_USER; - break; - default: - pr_debug("invalid callchain context: " - "%"PRId64"\n", (s64) ip); - /* - * It seems the callchain is corrupted. - * Discard all. - */ - callchain_cursor_reset(&callchain_cursor); - return 0; - } - continue; - } + zfree(&env->hostname); + zfree(&env->os_release); + zfree(&env->version); + zfree(&env->arch); + zfree(&env->cpu_desc); + zfree(&env->cpuid); - al.filtered = false; - thread__find_addr_location(thread, machine, cpumode, - MAP__FUNCTION, ip, &al, NULL); - if (al.sym != NULL) { - if (sort__has_parent && !*parent && - symbol__match_parent_regex(al.sym)) - *parent = al.sym; - if (!symbol_conf.use_callchain) - break; - } - - err = callchain_cursor_append(&callchain_cursor, - ip, al.map, al.sym); - if (err) - return err; - } - - return 0; -} - -static int unwind_entry(struct unwind_entry *entry, void *arg) -{ - struct callchain_cursor *cursor = arg; - return callchain_cursor_append(cursor, entry->ip, - entry->map, entry->sym); + zfree(&env->cmdline); + zfree(&env->sibling_cores); + zfree(&env->sibling_threads); + zfree(&env->numa_nodes); + zfree(&env->pmu_mappings); } -int machine__resolve_callchain(struct machine *machine, - struct perf_evsel *evsel, - struct thread *thread, - struct perf_sample *sample, - struct symbol **parent) - +void perf_session__delete(struct perf_session *session) { - int ret; - - callchain_cursor_reset(&callchain_cursor); - - ret = machine__resolve_callchain_sample(machine, thread, - sample->callchain, parent); - if (ret) - return ret; - - /* Can we do dwarf post unwind? */ - if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) && - (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER))) - return 0; - - /* Bail out if nothing was captured. */ - if ((!sample->user_regs.regs) || - (!sample->user_stack.size)) - return 0; - - return unwind__get_entries(unwind_entry, &callchain_cursor, machine, - thread, evsel->attr.sample_regs_user, - sample); - + perf_session__destroy_kernel_maps(session); + perf_session__delete_dead_threads(session); + perf_session__delete_threads(session); + perf_session_env__delete(&session->header.env); + machines__exit(&session->machines); + if (session->file) + perf_data_file__close(session->file); + free(session); + vdso__exit(); } -static int process_event_synth_tracing_data_stub(union perf_event *event +static int process_event_synth_tracing_data_stub(struct perf_tool *tool + __maybe_unused, + union perf_event *event __maybe_unused, struct perf_session *session __maybe_unused) @@ -410,7 +170,8 @@ static int process_event_synth_tracing_data_stub(union perf_event *event return 0; } -static int process_event_synth_attr_stub(union perf_event *event __maybe_unused, +static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, struct perf_evlist **pevlist __maybe_unused) { @@ -446,23 +207,18 @@ static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, return 0; } -static int process_event_type_stub(struct perf_tool *tool __maybe_unused, - union perf_event *event __maybe_unused) -{ - dump_printf(": unhandled!\n"); - return 0; -} - static int process_finished_round(struct perf_tool *tool, union perf_event *event, struct perf_session *session); -static void perf_tool__fill_defaults(struct perf_tool *tool) +void perf_tool__fill_defaults(struct perf_tool *tool) { if (tool->sample == NULL) tool->sample = process_event_sample_stub; if (tool->mmap == NULL) tool->mmap = process_event_stub; + if (tool->mmap2 == NULL) + tool->mmap2 = process_event_stub; if (tool->comm == NULL) tool->comm = process_event_stub; if (tool->fork == NULL) @@ -479,8 +235,6 @@ static void perf_tool__fill_defaults(struct perf_tool *tool) tool->unthrottle = process_event_stub; if (tool->attr == NULL) tool->attr = process_event_synth_attr_stub; - if (tool->event_type == NULL) - tool->event_type = process_event_type_stub; if (tool->tracing_data == NULL) tool->tracing_data = process_event_synth_tracing_data_stub; if (tool->build_id == NULL) @@ -493,27 +247,6 @@ static void perf_tool__fill_defaults(struct perf_tool *tool) } } -void mem_bswap_32(void *src, int byte_size) -{ - u32 *m = src; - while (byte_size > 0) { - *m = bswap_32(*m); - byte_size -= sizeof(u32); - ++m; - } -} - -void mem_bswap_64(void *src, int byte_size) -{ - u64 *m = src; - - while (byte_size > 0) { - *m = bswap_64(*m); - byte_size -= sizeof(u64); - ++m; - } -} - static void swap_sample_id_all(union perf_event *event, void *data) { void *end = (void *) event + event->header.size; @@ -560,6 +293,25 @@ static void perf_event__mmap_swap(union perf_event *event, } } +static void perf_event__mmap2_swap(union perf_event *event, + bool sample_id_all) +{ + event->mmap2.pid = bswap_32(event->mmap2.pid); + event->mmap2.tid = bswap_32(event->mmap2.tid); + event->mmap2.start = bswap_64(event->mmap2.start); + event->mmap2.len = bswap_64(event->mmap2.len); + event->mmap2.pgoff = bswap_64(event->mmap2.pgoff); + event->mmap2.maj = bswap_32(event->mmap2.maj); + event->mmap2.min = bswap_32(event->mmap2.min); + event->mmap2.ino = bswap_64(event->mmap2.ino); + + if (sample_id_all) { + void *data = &event->mmap2.filename; + + data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); + swap_sample_id_all(event, data); + } +} static void perf_event__task_swap(union perf_event *event, bool sample_id_all) { event->fork.pid = bswap_32(event->fork.pid); @@ -585,6 +337,17 @@ static void perf_event__read_swap(union perf_event *event, bool sample_id_all) swap_sample_id_all(event, &event->read + 1); } +static void perf_event__throttle_swap(union perf_event *event, + bool sample_id_all) +{ + event->throttle.time = bswap_64(event->throttle.time); + event->throttle.id = bswap_64(event->throttle.id); + event->throttle.stream_id = bswap_64(event->throttle.stream_id); + + if (sample_id_all) + swap_sample_id_all(event, &event->throttle + 1); +} + static u8 revbyte(u8 b) { int rev = (b >> 4) | ((b & 0xf) << 4); @@ -630,6 +393,9 @@ void perf_event__attr_swap(struct perf_event_attr *attr) attr->bp_type = bswap_32(attr->bp_type); attr->bp_addr = bswap_64(attr->bp_addr); attr->bp_len = bswap_64(attr->bp_len); + attr->branch_sample_type = bswap_64(attr->branch_sample_type); + attr->sample_regs_user = bswap_64(attr->sample_regs_user); + attr->sample_stack_user = bswap_32(attr->sample_stack_user); swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); } @@ -664,11 +430,14 @@ typedef void (*perf_event__swap_op)(union perf_event *event, static perf_event__swap_op perf_event__swap_ops[] = { [PERF_RECORD_MMAP] = perf_event__mmap_swap, + [PERF_RECORD_MMAP2] = perf_event__mmap2_swap, [PERF_RECORD_COMM] = perf_event__comm_swap, [PERF_RECORD_FORK] = perf_event__task_swap, [PERF_RECORD_EXIT] = perf_event__task_swap, [PERF_RECORD_LOST] = perf_event__all64_swap, [PERF_RECORD_READ] = perf_event__read_swap, + [PERF_RECORD_THROTTLE] = perf_event__throttle_swap, + [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap, [PERF_RECORD_SAMPLE] = perf_event__all64_swap, [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, @@ -704,7 +473,7 @@ static int perf_session_deliver_event(struct perf_session *session, u64 file_offset); static int flush_sample_queue(struct perf_session *s, - struct perf_tool *tool) + struct perf_tool *tool) { struct ordered_samples *os = &s->ordered_samples; struct list_head *head = &os->samples; @@ -712,13 +481,20 @@ static int flush_sample_queue(struct perf_session *s, struct perf_sample sample; u64 limit = os->next_flush; u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; - unsigned idx = 0, progress_next = os->nr_samples / 16; + bool show_progress = limit == ULLONG_MAX; + struct ui_progress prog; int ret; if (!tool->ordered_samples || !limit) return 0; + if (show_progress) + ui_progress__init(&prog, os->nr_samples, "Processing time ordered events..."); + list_for_each_entry_safe(iter, tmp, head, list) { + if (session_done()) + return 0; + if (iter->timestamp > limit) break; @@ -735,11 +511,9 @@ static int flush_sample_queue(struct perf_session *s, os->last_flush = iter->timestamp; list_del(&iter->list); list_add(&iter->list, &os->sample_cache); - if (++idx >= progress_next) { - progress_next += os->nr_samples / 16; - ui_progress__update(idx, os->nr_samples, - "Processing time ordered events..."); - } + + if (show_progress) + ui_progress__update(&prog, 1); } if (list_empty(head)) { @@ -852,7 +626,7 @@ static void __queue_event(struct sample_queue *new, struct perf_session *s) #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) -static int perf_session_queue_event(struct perf_session *s, union perf_event *event, +int perf_session_queue_event(struct perf_session *s, union perf_event *event, struct perf_sample *sample, u64 file_offset) { struct ordered_samples *os = &s->ordered_samples; @@ -928,11 +702,12 @@ static void regs_dump__printf(u64 mask, u64 *regs) } } -static void regs_user__printf(struct perf_sample *sample, u64 mask) +static void regs_user__printf(struct perf_sample *sample) { struct regs_dump *user_regs = &sample->user_regs; if (user_regs->regs) { + u64 mask = user_regs->mask; printf("... user regs: mask 0x%" PRIx64 "\n", mask); regs_dump__printf(mask, user_regs->regs); } @@ -948,7 +723,7 @@ static void perf_session__print_tstamp(struct perf_session *session, union perf_event *event, struct perf_sample *sample) { - u64 sample_type = perf_evlist__sample_type(session->evlist); + u64 sample_type = __perf_evlist__combined_sample_type(session->evlist); if (event->header.type != PERF_RECORD_SAMPLE && !perf_evlist__sample_id_all(session->evlist)) { @@ -963,6 +738,36 @@ static void perf_session__print_tstamp(struct perf_session *session, printf("%" PRIu64 " ", sample->time); } +static void sample_read__printf(struct perf_sample *sample, u64 read_format) +{ + printf("... sample_read:\n"); + + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) + printf("...... time enabled %016" PRIx64 "\n", + sample->read.time_enabled); + + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + printf("...... time running %016" PRIx64 "\n", + sample->read.time_running); + + if (read_format & PERF_FORMAT_GROUP) { + u64 i; + + printf(".... group nr %" PRIu64 "\n", sample->read.group.nr); + + for (i = 0; i < sample->read.group.nr; i++) { + struct sample_read_value *value; + + value = &sample->read.group.values[i]; + printf("..... id %016" PRIx64 + ", value %016" PRIx64 "\n", + value->id, value->value); + } + } else + printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n", + sample->read.one.id, sample->read.one.value); +} + static void dump_event(struct perf_session *session, union perf_event *event, u64 file_offset, struct perf_sample *sample) { @@ -989,7 +794,7 @@ static void dump_sample(struct perf_evsel *evsel, union perf_event *event, if (!dump_trace) return; - printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", + printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", event->header.misc, sample->pid, sample->tid, sample->ip, sample->period, sample->addr); @@ -1002,32 +807,120 @@ static void dump_sample(struct perf_evsel *evsel, union perf_event *event, branch_stack__printf(sample); if (sample_type & PERF_SAMPLE_REGS_USER) - regs_user__printf(sample, evsel->attr.sample_regs_user); + regs_user__printf(sample); if (sample_type & PERF_SAMPLE_STACK_USER) stack_user__printf(&sample->user_stack); + + if (sample_type & PERF_SAMPLE_WEIGHT) + printf("... weight: %" PRIu64 "\n", sample->weight); + + if (sample_type & PERF_SAMPLE_DATA_SRC) + printf(" . data_src: 0x%"PRIx64"\n", sample->data_src); + + if (sample_type & PERF_SAMPLE_TRANSACTION) + printf("... transaction: %" PRIx64 "\n", sample->transaction); + + if (sample_type & PERF_SAMPLE_READ) + sample_read__printf(sample, evsel->attr.read_format); } static struct machine * perf_session__find_machine_for_cpumode(struct perf_session *session, - union perf_event *event) + union perf_event *event, + struct perf_sample *sample) { const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; + struct machine *machine; if (perf_guest && ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || (cpumode == PERF_RECORD_MISC_GUEST_USER))) { u32 pid; - if (event->header.type == PERF_RECORD_MMAP) + if (event->header.type == PERF_RECORD_MMAP + || event->header.type == PERF_RECORD_MMAP2) pid = event->mmap.pid; else - pid = event->ip.pid; + pid = sample->pid; + + machine = perf_session__find_machine(session, pid); + if (!machine) + machine = perf_session__findnew_machine(session, + DEFAULT_GUEST_KERNEL_ID); + return machine; + } + + return &session->machines.host; +} + +static int deliver_sample_value(struct perf_session *session, + struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct sample_read_value *v, + struct machine *machine) +{ + struct perf_sample_id *sid; + + sid = perf_evlist__id2sid(session->evlist, v->id); + if (sid) { + sample->id = v->id; + sample->period = v->value - sid->period; + sid->period = v->value; + } - return perf_session__findnew_machine(session, pid); + if (!sid || sid->evsel == NULL) { + ++session->stats.nr_unknown_id; + return 0; } - return perf_session__find_host_machine(session); + return tool->sample(tool, event, sample, sid->evsel, machine); +} + +static int deliver_sample_group(struct perf_session *session, + struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) +{ + int ret = -EINVAL; + u64 i; + + for (i = 0; i < sample->read.group.nr; i++) { + ret = deliver_sample_value(session, tool, event, sample, + &sample->read.group.values[i], + machine); + if (ret) + break; + } + + return ret; +} + +static int +perf_session__deliver_sample(struct perf_session *session, + struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct machine *machine) +{ + /* We know evsel != NULL. */ + u64 sample_type = evsel->attr.sample_type; + u64 read_format = evsel->attr.read_format; + + /* Standard sample delievery. */ + if (!(sample_type & PERF_SAMPLE_READ)) + return tool->sample(tool, event, sample, evsel, machine); + + /* For PERF_SAMPLE_READ we have either single or group mode. */ + if (read_format & PERF_FORMAT_GROUP) + return deliver_sample_group(session, tool, event, sample, + machine); + else + return deliver_sample_value(session, tool, event, sample, + &sample->read.one, machine); } static int perf_session_deliver_event(struct perf_session *session, @@ -1059,22 +952,26 @@ static int perf_session_deliver_event(struct perf_session *session, hists__inc_nr_events(&evsel->hists, event->header.type); } - machine = perf_session__find_machine_for_cpumode(session, event); + machine = perf_session__find_machine_for_cpumode(session, event, + sample); switch (event->header.type) { case PERF_RECORD_SAMPLE: dump_sample(evsel, event, sample); if (evsel == NULL) { - ++session->hists.stats.nr_unknown_id; + ++session->stats.nr_unknown_id; return 0; } if (machine == NULL) { - ++session->hists.stats.nr_unprocessable_samples; + ++session->stats.nr_unprocessable_samples; return 0; } - return tool->sample(tool, event, sample, evsel, machine); + return perf_session__deliver_sample(session, tool, event, + sample, evsel, machine); case PERF_RECORD_MMAP: return tool->mmap(tool, event, sample, machine); + case PERF_RECORD_MMAP2: + return tool->mmap2(tool, event, sample, machine); case PERF_RECORD_COMM: return tool->comm(tool, event, sample, machine); case PERF_RECORD_FORK: @@ -1083,7 +980,7 @@ static int perf_session_deliver_event(struct perf_session *session, return tool->exit(tool, event, sample, machine); case PERF_RECORD_LOST: if (tool->lost == perf_event__process_lost) - session->hists.stats.total_lost += event->lost.lost; + session->stats.total_lost += event->lost.lost; return tool->lost(tool, event, sample, machine); case PERF_RECORD_READ: return tool->read(tool, event, sample, evsel, machine); @@ -1092,30 +989,15 @@ static int perf_session_deliver_event(struct perf_session *session, case PERF_RECORD_UNTHROTTLE: return tool->unthrottle(tool, event, sample, machine); default: - ++session->hists.stats.nr_unknown_events; + ++session->stats.nr_unknown_events; return -1; } } -static int perf_session__preprocess_sample(struct perf_session *session, - union perf_event *event, struct perf_sample *sample) -{ - if (event->header.type != PERF_RECORD_SAMPLE || - !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN)) - return 0; - - if (!ip_callchain__valid(sample->callchain, event)) { - pr_debug("call-chain problem with event, skipping it.\n"); - ++session->hists.stats.nr_invalid_chains; - session->hists.stats.total_invalid_chains += sample->period; - return -EINVAL; - } - return 0; -} - static int perf_session__process_user_event(struct perf_session *session, union perf_event *event, struct perf_tool *tool, u64 file_offset) { + int fd = perf_data_file__fd(session->file); int err; dump_event(session, event, file_offset, NULL); @@ -1123,16 +1005,20 @@ static int perf_session__process_user_event(struct perf_session *session, union /* These events are processed right away */ switch (event->header.type) { case PERF_RECORD_HEADER_ATTR: - err = tool->attr(event, &session->evlist); + err = tool->attr(tool, event, &session->evlist); if (err == 0) perf_session__set_id_hdr_size(session); return err; case PERF_RECORD_HEADER_EVENT_TYPE: - return tool->event_type(tool, event); + /* + * Depreceated, but we need to handle it for sake + * of old data files create in pipe mode. + */ + return 0; case PERF_RECORD_HEADER_TRACING_DATA: /* setup for reading amidst mmap */ - lseek(session->fd, file_offset, SEEK_SET); - return tool->tracing_data(event, session); + lseek(fd, file_offset, SEEK_SET); + return tool->tracing_data(tool, event, session); case PERF_RECORD_HEADER_BUILD_ID: return tool->build_id(tool, event, session); case PERF_RECORD_FINISHED_ROUND: @@ -1165,7 +1051,7 @@ static int perf_session__process_event(struct perf_session *session, if (event->header.type >= PERF_RECORD_HEADER_MAX) return -EINVAL; - hists__inc_nr_events(&session->hists, event->header.type); + events_stats__inc(&session->stats, event->header.type); if (event->header.type >= PERF_RECORD_USER_TYPE_START) return perf_session__process_user_event(session, event, tool, file_offset); @@ -1177,10 +1063,6 @@ static int perf_session__process_event(struct perf_session *session, if (ret) return ret; - /* Preprocess sample records - precheck callchains */ - if (perf_session__preprocess_sample(session, event, &sample)) - return 0; - if (tool->ordered_samples) { ret = perf_session_queue_event(session, event, &sample, file_offset); @@ -1192,23 +1074,23 @@ static int perf_session__process_event(struct perf_session *session, file_offset); } -void perf_event_header__bswap(struct perf_event_header *self) +void perf_event_header__bswap(struct perf_event_header *hdr) { - self->type = bswap_32(self->type); - self->misc = bswap_16(self->misc); - self->size = bswap_16(self->size); + hdr->type = bswap_32(hdr->type); + hdr->misc = bswap_16(hdr->misc); + hdr->size = bswap_16(hdr->size); } struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) { - return machine__findnew_thread(&session->host_machine, pid); + return machine__findnew_thread(&session->machines.host, 0, pid); } -static struct thread *perf_session__register_idle_thread(struct perf_session *self) +static struct thread *perf_session__register_idle_thread(struct perf_session *session) { - struct thread *thread = perf_session__findnew(self, 0); + struct thread *thread = perf_session__findnew(session, 0); - if (thread == NULL || thread__set_comm(thread, "swapper")) { + if (thread == NULL || thread__set_comm(thread, "swapper", 0)) { pr_err("problem inserting idle task.\n"); thread = NULL; } @@ -1220,54 +1102,54 @@ static void perf_session__warn_about_errors(const struct perf_session *session, const struct perf_tool *tool) { if (tool->lost == perf_event__process_lost && - session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) { + session->stats.nr_events[PERF_RECORD_LOST] != 0) { ui__warning("Processed %d events and lost %d chunks!\n\n" "Check IO/CPU overload!\n\n", - session->hists.stats.nr_events[0], - session->hists.stats.nr_events[PERF_RECORD_LOST]); + session->stats.nr_events[0], + session->stats.nr_events[PERF_RECORD_LOST]); } - if (session->hists.stats.nr_unknown_events != 0) { + if (session->stats.nr_unknown_events != 0) { ui__warning("Found %u unknown events!\n\n" "Is this an older tool processing a perf.data " "file generated by a more recent tool?\n\n" "If that is not the case, consider " "reporting to linux-kernel@vger.kernel.org.\n\n", - session->hists.stats.nr_unknown_events); + session->stats.nr_unknown_events); } - if (session->hists.stats.nr_unknown_id != 0) { + if (session->stats.nr_unknown_id != 0) { ui__warning("%u samples with id not present in the header\n", - session->hists.stats.nr_unknown_id); + session->stats.nr_unknown_id); } - if (session->hists.stats.nr_invalid_chains != 0) { + if (session->stats.nr_invalid_chains != 0) { ui__warning("Found invalid callchains!\n\n" "%u out of %u events were discarded for this reason.\n\n" "Consider reporting to linux-kernel@vger.kernel.org.\n\n", - session->hists.stats.nr_invalid_chains, - session->hists.stats.nr_events[PERF_RECORD_SAMPLE]); + session->stats.nr_invalid_chains, + session->stats.nr_events[PERF_RECORD_SAMPLE]); } - if (session->hists.stats.nr_unprocessable_samples != 0) { + if (session->stats.nr_unprocessable_samples != 0) { ui__warning("%u unprocessable samples recorded.\n" "Do you have a KVM guest running and not using 'perf kvm'?\n", - session->hists.stats.nr_unprocessable_samples); + session->stats.nr_unprocessable_samples); } } -#define session_done() (*(volatile int *)(&session_done)) volatile int session_done; -static int __perf_session__process_pipe_events(struct perf_session *self, +static int __perf_session__process_pipe_events(struct perf_session *session, struct perf_tool *tool) { + int fd = perf_data_file__fd(session->file); union perf_event *event; uint32_t size, cur_size = 0; void *buf = NULL; int skip = 0; u64 head; - int err; + ssize_t err; void *p; perf_tool__fill_defaults(tool); @@ -1280,7 +1162,7 @@ static int __perf_session__process_pipe_events(struct perf_session *self, return -errno; more: event = buf; - err = readn(self->fd, event, sizeof(struct perf_event_header)); + err = readn(fd, event, sizeof(struct perf_event_header)); if (err <= 0) { if (err == 0) goto done; @@ -1289,12 +1171,14 @@ more: goto out_err; } - if (self->header.needs_swap) + if (session->header.needs_swap) perf_event_header__bswap(&event->header); size = event->header.size; - if (size == 0) - size = 8; + if (size < sizeof(struct perf_event_header)) { + pr_err("bad event header size\n"); + goto out_err; + } if (size > cur_size) { void *new = realloc(buf, size); @@ -1310,7 +1194,7 @@ more: p += sizeof(struct perf_event_header); if (size - sizeof(struct perf_event_header)) { - err = readn(self->fd, p, size - sizeof(struct perf_event_header)); + err = readn(fd, p, size - sizeof(struct perf_event_header)); if (err <= 0) { if (err == 0) { pr_err("unexpected end of event stream\n"); @@ -1322,7 +1206,7 @@ more: } } - if ((skip = perf_session__process_event(self, event, tool, head)) < 0) { + if ((skip = perf_session__process_event(session, event, tool, head)) < 0) { pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", head, event->header.size, event->header.type); err = -EINVAL; @@ -1337,11 +1221,13 @@ more: if (!session_done()) goto more; done: - err = 0; + /* do the final flush for ordered samples */ + session->ordered_samples.next_flush = ULLONG_MAX; + err = flush_sample_queue(session, tool); out_err: free(buf); - perf_session__warn_about_errors(self, tool); - perf_session_free_sample_buffers(self); + perf_session__warn_about_errors(session, tool); + perf_session_free_sample_buffers(session); return err; } @@ -1363,37 +1249,53 @@ fetch_mmaped_event(struct perf_session *session, if (session->header.needs_swap) perf_event_header__bswap(&event->header); - if (head + event->header.size > mmap_size) + if (head + event->header.size > mmap_size) { + /* We're not fetching the event so swap back again */ + if (session->header.needs_swap) + perf_event_header__bswap(&event->header); return NULL; + } return event; } +/* + * On 64bit we can mmap the data file in one go. No need for tiny mmap + * slices. On 32bit we use 32MB. + */ +#if BITS_PER_LONG == 64 +#define MMAP_SIZE ULLONG_MAX +#define NUM_MMAPS 1 +#else +#define MMAP_SIZE (32 * 1024 * 1024ULL) +#define NUM_MMAPS 128 +#endif + int __perf_session__process_events(struct perf_session *session, u64 data_offset, u64 data_size, u64 file_size, struct perf_tool *tool) { - u64 head, page_offset, file_offset, file_pos, progress_next; + int fd = perf_data_file__fd(session->file); + u64 head, page_offset, file_offset, file_pos; int err, mmap_prot, mmap_flags, map_idx = 0; - size_t page_size, mmap_size; - char *buf, *mmaps[8]; + size_t mmap_size; + char *buf, *mmaps[NUM_MMAPS]; union perf_event *event; uint32_t size; + struct ui_progress prog; perf_tool__fill_defaults(tool); - page_size = sysconf(_SC_PAGESIZE); - page_offset = page_size * (data_offset / page_size); file_offset = page_offset; head = data_offset - page_offset; - if (data_offset + data_size < file_size) + if (data_size && (data_offset + data_size < file_size)) file_size = data_offset + data_size; - progress_next = file_size / 16; + ui_progress__init(&prog, file_size, "Processing events..."); - mmap_size = session->mmap_window; + mmap_size = MMAP_SIZE; if (mmap_size > file_size) mmap_size = file_size; @@ -1407,7 +1309,7 @@ int __perf_session__process_events(struct perf_session *session, mmap_flags = MAP_PRIVATE; } remap: - buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd, + buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd, file_offset); if (buf == MAP_FAILED) { pr_err("failed to mmap file\n"); @@ -1434,7 +1336,7 @@ more: size = event->header.size; - if (size == 0 || + if (size < sizeof(struct perf_event_header) || perf_session__process_event(session, event, tool, file_pos) < 0) { pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", file_offset + head, event->header.size, @@ -1446,52 +1348,56 @@ more: head += size; file_pos += size; - if (file_pos >= progress_next) { - progress_next += file_size / 16; - ui_progress__update(file_pos, file_size, - "Processing events..."); - } + ui_progress__update(&prog, size); + + if (session_done()) + goto out; if (file_pos < file_size) goto more; - err = 0; +out: /* do the final flush for ordered samples */ session->ordered_samples.next_flush = ULLONG_MAX; err = flush_sample_queue(session, tool); out_err: + ui_progress__finish(); perf_session__warn_about_errors(session, tool); perf_session_free_sample_buffers(session); return err; } -int perf_session__process_events(struct perf_session *self, +int perf_session__process_events(struct perf_session *session, struct perf_tool *tool) { + u64 size = perf_data_file__size(session->file); int err; - if (perf_session__register_idle_thread(self) == NULL) + if (perf_session__register_idle_thread(session) == NULL) return -ENOMEM; - if (!self->fd_pipe) - err = __perf_session__process_events(self, - self->header.data_offset, - self->header.data_size, - self->size, tool); + if (!perf_data_file__is_pipe(session->file)) + err = __perf_session__process_events(session, + session->header.data_offset, + session->header.data_size, + size, tool); else - err = __perf_session__process_pipe_events(self, tool); + err = __perf_session__process_pipe_events(session, tool); return err; } bool perf_session__has_traces(struct perf_session *session, const char *msg) { - if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) { - pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); - return false; + struct perf_evsel *evsel; + + evlist__for_each(session->evlist, evsel) { + if (evsel->attr.type == PERF_TYPE_TRACEPOINT) + return true; } - return true; + pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); + return false; } int maps__set_kallsyms_ref_reloc_sym(struct map **maps, @@ -1525,18 +1431,15 @@ int maps__set_kallsyms_ref_reloc_sym(struct map **maps, return 0; } -size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp) +size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp) { - return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) + - __dsos__fprintf(&self->host_machine.user_dsos, fp) + - machines__fprintf_dsos(&self->machines, fp); + return machines__fprintf_dsos(&session->machines, fp); } -size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, - bool with_hits) +size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp, + bool (skip)(struct dso *dso, int parm), int parm) { - size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits); - return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits); + return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm); } size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) @@ -1544,11 +1447,11 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) struct perf_evsel *pos; size_t ret = fprintf(fp, "Aggregated stats:\n"); - ret += hists__fprintf_nr_events(&session->hists, fp); + ret += events_stats__fprintf(&session->stats, fp); - list_for_each_entry(pos, &session->evlist->entries, node) { + evlist__for_each(session->evlist, pos) { ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); - ret += hists__fprintf_nr_events(&pos->hists, fp); + ret += events_stats__fprintf(&pos->hists.stats, fp); } return ret; @@ -1560,19 +1463,7 @@ size_t perf_session__fprintf(struct perf_session *session, FILE *fp) * FIXME: Here we have to actually print all the machines in this * session, not just the host... */ - return machine__fprintf(&session->host_machine, fp); -} - -void perf_session__remove_thread(struct perf_session *session, - struct thread *th) -{ - /* - * FIXME: This one makes no sense, we need to remove the thread from - * the machine it belongs to, perf_session can have many machines, so - * doing it always on ->host_machine is wrong. Fix when auditing all - * the 'perf kvm' code. - */ - machine__remove_thread(&session->host_machine, th); + return machine__fprintf(&session->machines.host, fp); } struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, @@ -1580,81 +1471,116 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, { struct perf_evsel *pos; - list_for_each_entry(pos, &session->evlist->entries, node) { + evlist__for_each(session->evlist, pos) { if (pos->attr.type == type) return pos; } return NULL; } -void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event, - struct perf_sample *sample, struct machine *machine, - int print_sym, int print_dso, int print_symoffset) +void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample, + struct addr_location *al, + unsigned int print_opts, unsigned int stack_depth) { - struct addr_location al; struct callchain_cursor_node *node; - - if (perf_event__preprocess_sample(event, machine, &al, sample, - NULL) < 0) { - error("problem processing %d event, skipping it.\n", - event->header.type); - return; - } + int print_ip = print_opts & PRINT_IP_OPT_IP; + int print_sym = print_opts & PRINT_IP_OPT_SYM; + int print_dso = print_opts & PRINT_IP_OPT_DSO; + int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET; + int print_oneline = print_opts & PRINT_IP_OPT_ONELINE; + int print_srcline = print_opts & PRINT_IP_OPT_SRCLINE; + char s = print_oneline ? ' ' : '\t'; if (symbol_conf.use_callchain && sample->callchain) { + struct addr_location node_al; - - if (machine__resolve_callchain(machine, evsel, al.thread, - sample, NULL) != 0) { + if (machine__resolve_callchain(al->machine, evsel, al->thread, + sample, NULL, NULL, + PERF_MAX_STACK_DEPTH) != 0) { if (verbose) error("Failed to resolve callchain. Skipping\n"); return; } callchain_cursor_commit(&callchain_cursor); - while (1) { + if (print_symoffset) + node_al = *al; + + while (stack_depth) { + u64 addr = 0; + node = callchain_cursor_current(&callchain_cursor); if (!node) break; - printf("\t%16" PRIx64, node->ip); + if (node->sym && node->sym->ignore) + goto next; + + if (print_ip) + printf("%c%16" PRIx64, s, node->ip); + + if (node->map) + addr = node->map->map_ip(node->map, node->ip); + if (print_sym) { printf(" "); - symbol__fprintf_symname(node->sym, stdout); + if (print_symoffset) { + node_al.addr = addr; + node_al.map = node->map; + symbol__fprintf_symname_offs(node->sym, &node_al, stdout); + } else + symbol__fprintf_symname(node->sym, stdout); } + if (print_dso) { printf(" ("); map__fprintf_dsoname(node->map, stdout); printf(")"); } - printf("\n"); + if (print_srcline) + map__fprintf_srcline(node->map, addr, "\n ", + stdout); + + if (!print_oneline) + printf("\n"); + + stack_depth--; +next: callchain_cursor_advance(&callchain_cursor); } } else { - printf("%16" PRIx64, sample->ip); + if (al->sym && al->sym->ignore) + return; + + if (print_ip) + printf("%16" PRIx64, sample->ip); + if (print_sym) { printf(" "); if (print_symoffset) - symbol__fprintf_symname_offs(al.sym, &al, + symbol__fprintf_symname_offs(al->sym, al, stdout); else - symbol__fprintf_symname(al.sym, stdout); + symbol__fprintf_symname(al->sym, stdout); } if (print_dso) { printf(" ("); - map__fprintf_dsoname(al.map, stdout); + map__fprintf_dsoname(al->map, stdout); printf(")"); } + + if (print_srcline) + map__fprintf_srcline(al->map, al->addr, "\n ", stdout); } } int perf_session__cpu_bitmap(struct perf_session *session, const char *cpu_list, unsigned long *cpu_bitmap) { - int i; + int i, err = -1; struct cpu_map *map; for (i = 0; i < PERF_TYPE_MAX; ++i) { @@ -1683,25 +1609,31 @@ int perf_session__cpu_bitmap(struct perf_session *session, if (cpu >= MAX_NR_CPUS) { pr_err("Requested CPU %d too large. " "Consider raising MAX_NR_CPUS\n", cpu); - return -1; + goto out_delete_map; } set_bit(cpu, cpu_bitmap); } - return 0; + err = 0; + +out_delete_map: + cpu_map__delete(map); + return err; } void perf_session__fprintf_info(struct perf_session *session, FILE *fp, bool full) { struct stat st; - int ret; + int fd, ret; if (session == NULL || fp == NULL) return; - ret = fstat(session->fd, &st); + fd = perf_data_file__fd(session->file); + + ret = fstat(fd, &st); if (ret == -1) return; @@ -1716,52 +1648,26 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session, const struct perf_evsel_str_handler *assocs, size_t nr_assocs) { - struct perf_evlist *evlist = session->evlist; - struct event_format *format; struct perf_evsel *evsel; - char *tracepoint, *name; size_t i; int err; for (i = 0; i < nr_assocs; i++) { - err = -ENOMEM; - tracepoint = strdup(assocs[i].name); - if (tracepoint == NULL) - goto out; - - err = -ENOENT; - name = strchr(tracepoint, ':'); - if (name == NULL) - goto out_free; - - *name++ = '\0'; - format = pevent_find_event_by_name(session->pevent, - tracepoint, name); - if (format == NULL) { - /* - * Adding a handler for an event not in the session, - * just ignore it. - */ - goto next; - } - - evsel = perf_evlist__find_tracepoint_by_id(evlist, format->id); + /* + * Adding a handler for an event not in the session, + * just ignore it. + */ + evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name); if (evsel == NULL) - goto next; + continue; err = -EEXIST; - if (evsel->handler.func != NULL) - goto out_free; - evsel->handler.func = assocs[i].handler; -next: - free(tracepoint); + if (evsel->handler != NULL) + goto out; + evsel->handler = assocs[i].handler; } err = 0; out: return err; - -out_free: - free(tracepoint); - goto out; } diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index dd6426163ba..3140f8ae614 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -1,13 +1,16 @@ #ifndef __PERF_SESSION_H #define __PERF_SESSION_H +#include "trace-event.h" #include "hist.h" #include "event.h" #include "header.h" +#include "machine.h" #include "symbol.h" #include "thread.h" +#include "data.h" #include <linux/rbtree.h> -#include "../../../include/uapi/linux/perf_event.h" +#include <linux/perf_event.h> struct sample_queue; struct ip_callchain; @@ -28,108 +31,83 @@ struct ordered_samples { struct perf_session { struct perf_header header; - unsigned long size; - unsigned long mmap_window; - struct machine host_machine; - struct rb_root machines; + struct machines machines; struct perf_evlist *evlist; - struct pevent *pevent; - /* - * FIXME: Need to split this up further, we need global - * stats + per event stats. - */ - struct hists hists; - int fd; - bool fd_pipe; + struct trace_event tevent; + struct events_stats stats; bool repipe; - int cwdlen; - char *cwd; struct ordered_samples ordered_samples; - char filename[1]; + struct perf_data_file *file; }; +#define PRINT_IP_OPT_IP (1<<0) +#define PRINT_IP_OPT_SYM (1<<1) +#define PRINT_IP_OPT_DSO (1<<2) +#define PRINT_IP_OPT_SYMOFFSET (1<<3) +#define PRINT_IP_OPT_ONELINE (1<<4) +#define PRINT_IP_OPT_SRCLINE (1<<5) + struct perf_tool; -struct perf_session *perf_session__new(const char *filename, int mode, - bool force, bool repipe, - struct perf_tool *tool); -void perf_session__delete(struct perf_session *self); +struct perf_session *perf_session__new(struct perf_data_file *file, + bool repipe, struct perf_tool *tool); +void perf_session__delete(struct perf_session *session); -void perf_event_header__bswap(struct perf_event_header *self); +void perf_event_header__bswap(struct perf_event_header *hdr); -int __perf_session__process_events(struct perf_session *self, +int __perf_session__process_events(struct perf_session *session, u64 data_offset, u64 data_size, u64 size, struct perf_tool *tool); -int perf_session__process_events(struct perf_session *self, +int perf_session__process_events(struct perf_session *session, struct perf_tool *tool); -int perf_session__resolve_callchain(struct perf_session *self, struct perf_evsel *evsel, +int perf_session_queue_event(struct perf_session *s, union perf_event *event, + struct perf_sample *sample, u64 file_offset); + +void perf_tool__fill_defaults(struct perf_tool *tool); + +int perf_session__resolve_callchain(struct perf_session *session, + struct perf_evsel *evsel, struct thread *thread, struct ip_callchain *chain, struct symbol **parent); -struct branch_info *machine__resolve_bstack(struct machine *self, - struct thread *thread, - struct branch_stack *bs); - -bool perf_session__has_traces(struct perf_session *self, const char *msg); +bool perf_session__has_traces(struct perf_session *session, const char *msg); -void mem_bswap_64(void *src, int byte_size); -void mem_bswap_32(void *src, int byte_size); void perf_event__attr_swap(struct perf_event_attr *attr); -int perf_session__create_kernel_maps(struct perf_session *self); +int perf_session__create_kernel_maps(struct perf_session *session); void perf_session__set_id_hdr_size(struct perf_session *session); -void perf_session__remove_thread(struct perf_session *self, struct thread *th); - -static inline -struct machine *perf_session__find_host_machine(struct perf_session *self) -{ - return &self->host_machine; -} static inline -struct machine *perf_session__find_machine(struct perf_session *self, pid_t pid) +struct machine *perf_session__find_machine(struct perf_session *session, pid_t pid) { - if (pid == HOST_KERNEL_ID) - return &self->host_machine; - return machines__find(&self->machines, pid); + return machines__find(&session->machines, pid); } static inline -struct machine *perf_session__findnew_machine(struct perf_session *self, pid_t pid) +struct machine *perf_session__findnew_machine(struct perf_session *session, pid_t pid) { - if (pid == HOST_KERNEL_ID) - return &self->host_machine; - return machines__findnew(&self->machines, pid); + return machines__findnew(&session->machines, pid); } -static inline -void perf_session__process_machines(struct perf_session *self, - struct perf_tool *tool, - machine__process_t process) -{ - process(&self->host_machine, tool); - return machines__process(&self->machines, process, tool); -} +struct thread *perf_session__findnew(struct perf_session *session, pid_t pid); +size_t perf_session__fprintf(struct perf_session *session, FILE *fp); -struct thread *perf_session__findnew(struct perf_session *self, pid_t pid); -size_t perf_session__fprintf(struct perf_session *self, FILE *fp); +size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp); -size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp); - -size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, - FILE *fp, bool with_hits); +size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp, + bool (fn)(struct dso *dso, int parm), int parm); size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp); struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, unsigned int type); -void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event, - struct perf_sample *sample, struct machine *machine, - int print_sym, int print_dso, int print_symoffset); +void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample, + struct addr_location *al, + unsigned int print_opts, unsigned int stack_depth); int perf_session__cpu_bitmap(struct perf_session *session, const char *cpu_list, unsigned long *cpu_bitmap); @@ -144,4 +122,8 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session, #define perf_session__set_tracepoints_handlers(session, array) \ __perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array)) + +extern volatile int session_done; + +#define session_done() (*(volatile int *)(&session_done)) #endif /* __PERF_SESSION_H */ diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py index 73d51026978..d0aee4b9dfd 100644 --- a/tools/perf/util/setup.py +++ b/tools/perf/util/setup.py @@ -18,12 +18,14 @@ class install_lib(_install_lib): self.build_dir = build_lib -cflags = ['-fno-strict-aliasing', '-Wno-write-strings'] -cflags += getenv('CFLAGS', '').split() +cflags = getenv('CFLAGS', '').split() +# switch off several checks (need to be at the end of cflags list) +cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ] build_lib = getenv('PYTHON_EXTBUILD_LIB') build_tmp = getenv('PYTHON_EXTBUILD_TMP') libtraceevent = getenv('LIBTRACEEVENT') +libapikfs = getenv('LIBAPIKFS') ext_sources = [f.strip() for f in file('util/python-ext-sources') if len(f.strip()) > 0 and f[0] != '#'] @@ -32,7 +34,7 @@ perf = Extension('perf', sources = ext_sources, include_dirs = ['util/include'], extra_compile_args = cflags, - extra_objects = [libtraceevent], + extra_objects = [libtraceevent, libapikfs], ) setup(name='perf', diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index cfd1c0feb32..1ec57dd8228 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -1,19 +1,28 @@ +#include <sys/mman.h> #include "sort.h" #include "hist.h" +#include "comm.h" +#include "symbol.h" +#include "evsel.h" regex_t parent_regex; const char default_parent_pattern[] = "^sys_|^do_page_fault"; const char *parent_pattern = default_parent_pattern; const char default_sort_order[] = "comm,dso,symbol"; -const char *sort_order = default_sort_order; +const char default_branch_sort_order[] = "comm,dso_from,symbol_from,dso_to,symbol_to"; +const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked"; +const char default_top_sort_order[] = "dso,symbol"; +const char default_diff_sort_order[] = "dso,symbol"; +const char *sort_order; +const char *field_order; +regex_t ignore_callees_regex; +int have_ignore_callees = 0; int sort__need_collapse = 0; int sort__has_parent = 0; int sort__has_sym = 0; -int sort__branch_mode = -1; /* -1 = means not set */ +int sort__has_dso = 0; +enum sort_mode sort__mode = SORT_MODE__NORMAL; -enum sort_type sort__first_dimension; - -LIST_HEAD(hist_entry__sort_list); static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) { @@ -39,7 +48,7 @@ static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) return n; } -static int64_t cmp_null(void *l, void *r) +static int64_t cmp_null(const void *l, const void *r) { if (!l && !r) return 0; @@ -54,14 +63,15 @@ static int64_t cmp_null(void *l, void *r) static int64_t sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) { - return right->thread->pid - left->thread->pid; + return right->thread->tid - left->thread->tid; } -static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf, +static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - return repsep_snprintf(bf, size, "%*s:%5d", width, - self->thread->comm ?: "", self->thread->pid); + const char *comm = thread__comm_str(he->thread); + return repsep_snprintf(bf, size, "%*s:%5d", width - 6, + comm ?: "", he->thread->tid); } struct sort_entry sort_thread = { @@ -76,27 +86,40 @@ struct sort_entry sort_thread = { static int64_t sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) { - return right->thread->pid - left->thread->pid; + /* Compare the addr that should be unique among comm */ + return comm__str(right->comm) - comm__str(left->comm); } static int64_t sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) { - char *comm_l = left->thread->comm; - char *comm_r = right->thread->comm; - - if (!comm_l || !comm_r) - return cmp_null(comm_l, comm_r); + /* Compare the addr that should be unique among comm */ + return comm__str(right->comm) - comm__str(left->comm); +} - return strcmp(comm_l, comm_r); +static int64_t +sort__comm_sort(struct hist_entry *left, struct hist_entry *right) +{ + return strcmp(comm__str(right->comm), comm__str(left->comm)); } -static int hist_entry__comm_snprintf(struct hist_entry *self, char *bf, +static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - return repsep_snprintf(bf, size, "%*s", width, self->thread->comm); + return repsep_snprintf(bf, size, "%*s", width, comm__str(he->comm)); } +struct sort_entry sort_comm = { + .se_header = "Command", + .se_cmp = sort__comm_cmp, + .se_collapse = sort__comm_collapse, + .se_sort = sort__comm_sort, + .se_snprintf = hist_entry__comm_snprintf, + .se_width_idx = HISTC_COMM, +}; + +/* --sort dso */ + static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) { struct dso *dso_l = map_l ? map_l->dso : NULL; @@ -104,7 +127,7 @@ static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) const char *dso_name_l, *dso_name_r; if (!dso_l || !dso_r) - return cmp_null(dso_l, dso_r); + return cmp_null(dso_r, dso_l); if (verbose) { dso_name_l = dso_l->long_name; @@ -117,76 +140,116 @@ static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) return strcmp(dso_name_l, dso_name_r); } -struct sort_entry sort_comm = { - .se_header = "Command", - .se_cmp = sort__comm_cmp, - .se_collapse = sort__comm_collapse, - .se_snprintf = hist_entry__comm_snprintf, - .se_width_idx = HISTC_COMM, -}; - -/* --sort dso */ - static int64_t sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) { - return _sort__dso_cmp(left->ms.map, right->ms.map); + return _sort__dso_cmp(right->ms.map, left->ms.map); +} + +static int _hist_entry__dso_snprintf(struct map *map, char *bf, + size_t size, unsigned int width) +{ + if (map && map->dso) { + const char *dso_name = !verbose ? map->dso->short_name : + map->dso->long_name; + return repsep_snprintf(bf, size, "%-*s", width, dso_name); + } + + return repsep_snprintf(bf, size, "%-*s", width, "[unknown]"); +} + +static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); } +struct sort_entry sort_dso = { + .se_header = "Shared Object", + .se_cmp = sort__dso_cmp, + .se_snprintf = hist_entry__dso_snprintf, + .se_width_idx = HISTC_DSO, +}; -static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r, - u64 ip_l, u64 ip_r) +/* --sort symbol */ + +static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) +{ + return (int64_t)(right_ip - left_ip); +} + +static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) { + u64 ip_l, ip_r; + if (!sym_l || !sym_r) return cmp_null(sym_l, sym_r); if (sym_l == sym_r) return 0; - if (sym_l) - ip_l = sym_l->start; - if (sym_r) - ip_r = sym_r->start; + ip_l = sym_l->start; + ip_r = sym_r->start; return (int64_t)(ip_r - ip_l); } -static int _hist_entry__dso_snprintf(struct map *map, char *bf, - size_t size, unsigned int width) +static int64_t +sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) { - if (map && map->dso) { - const char *dso_name = !verbose ? map->dso->short_name : - map->dso->long_name; - return repsep_snprintf(bf, size, "%-*s", width, dso_name); + int64_t ret; + + if (!left->ms.sym && !right->ms.sym) + return _sort__addr_cmp(left->ip, right->ip); + + /* + * comparing symbol address alone is not enough since it's a + * relative address within a dso. + */ + if (!sort__has_dso) { + ret = sort__dso_cmp(left, right); + if (ret != 0) + return ret; } - return repsep_snprintf(bf, size, "%-*s", width, "[unknown]"); + return _sort__sym_cmp(left->ms.sym, right->ms.sym); } -static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf, - size_t size, unsigned int width) +static int64_t +sort__sym_sort(struct hist_entry *left, struct hist_entry *right) { - return _hist_entry__dso_snprintf(self->ms.map, bf, size, width); + if (!left->ms.sym || !right->ms.sym) + return cmp_null(left->ms.sym, right->ms.sym); + + return strcmp(right->ms.sym->name, left->ms.sym->name); } static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, u64 ip, char level, char *bf, size_t size, - unsigned int width __maybe_unused) + unsigned int width) { size_t ret = 0; if (verbose) { char o = map ? dso__symtab_origin(map->dso) : '!'; ret += repsep_snprintf(bf, size, "%-#*llx %c ", - BITS_PER_LONG / 4, ip, o); + BITS_PER_LONG / 4 + 2, ip, o); } ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); - if (sym) - ret += repsep_snprintf(bf + ret, size - ret, "%-*s", - width - ret, - sym->name); - else { + if (sym && map) { + if (map->type == MAP__VARIABLE) { + ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); + ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", + ip - map->unmap_ip(map, sym->start)); + ret += repsep_snprintf(bf + ret, size - ret, "%-*s", + width - ret, ""); + } else { + ret += repsep_snprintf(bf + ret, size - ret, "%-*s", + width - ret, + sym->name); + } + } else { size_t len = BITS_PER_LONG / 4; ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, ip); @@ -197,46 +260,17 @@ static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, return ret; } - -struct sort_entry sort_dso = { - .se_header = "Shared Object", - .se_cmp = sort__dso_cmp, - .se_snprintf = hist_entry__dso_snprintf, - .se_width_idx = HISTC_DSO, -}; - -static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, - size_t size, - unsigned int width __maybe_unused) +static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) { - return _hist_entry__sym_snprintf(self->ms.map, self->ms.sym, self->ip, - self->level, bf, size, width); -} - -/* --sort symbol */ -static int64_t -sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) -{ - u64 ip_l, ip_r; - - if (!left->ms.sym && !right->ms.sym) - return right->level - left->level; - - if (!left->ms.sym || !right->ms.sym) - return cmp_null(left->ms.sym, right->ms.sym); - - if (left->ms.sym == right->ms.sym) - return 0; - - ip_l = left->ms.sym->start; - ip_r = right->ms.sym->start; - - return _sort__sym_cmp(left->ms.sym, right->ms.sym, ip_l, ip_r); + return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip, + he->level, bf, size, width); } struct sort_entry sort_sym = { .se_header = "Symbol", .se_cmp = sort__sym_cmp, + .se_sort = sort__sym_sort, .se_snprintf = hist_entry__sym_snprintf, .se_width_idx = HISTC_SYMBOL, }; @@ -246,47 +280,32 @@ struct sort_entry sort_sym = { static int64_t sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) { - return (int64_t)(right->ip - left->ip); + if (!left->srcline) { + if (!left->ms.map) + left->srcline = SRCLINE_UNKNOWN; + else { + struct map *map = left->ms.map; + left->srcline = get_srcline(map->dso, + map__rip_2objdump(map, left->ip)); + } + } + if (!right->srcline) { + if (!right->ms.map) + right->srcline = SRCLINE_UNKNOWN; + else { + struct map *map = right->ms.map; + right->srcline = get_srcline(map->dso, + map__rip_2objdump(map, right->ip)); + } + } + return strcmp(right->srcline, left->srcline); } -static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf, +static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width __maybe_unused) { - FILE *fp; - char cmd[PATH_MAX + 2], *path = self->srcline, *nl; - size_t line_len; - - if (path != NULL) - goto out_path; - - if (!self->ms.map) - goto out_ip; - - if (!strncmp(self->ms.map->dso->long_name, "/tmp/perf-", 10)) - goto out_ip; - - snprintf(cmd, sizeof(cmd), "addr2line -e %s %016" PRIx64, - self->ms.map->dso->long_name, self->ip); - fp = popen(cmd, "r"); - if (!fp) - goto out_ip; - - if (getline(&path, &line_len, fp) < 0 || !line_len) - goto out_ip; - fclose(fp); - self->srcline = strdup(path); - if (self->srcline == NULL) - goto out_ip; - - nl = strchr(self->srcline, '\n'); - if (nl != NULL) - *nl = '\0'; - path = self->srcline; -out_path: - return repsep_snprintf(bf, size, "%s", path); -out_ip: - return repsep_snprintf(bf, size, "%-#*llx", BITS_PER_LONG / 4, self->ip); + return repsep_snprintf(bf, size, "%s", he->srcline); } struct sort_entry sort_srcline = { @@ -307,14 +326,14 @@ sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) if (!sym_l || !sym_r) return cmp_null(sym_l, sym_r); - return strcmp(sym_l->name, sym_r->name); + return strcmp(sym_r->name, sym_l->name); } -static int hist_entry__parent_snprintf(struct hist_entry *self, char *bf, +static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { return repsep_snprintf(bf, size, "%-*s", width, - self->parent ? self->parent->name : "[other]"); + he->parent ? he->parent->name : "[other]"); } struct sort_entry sort_parent = { @@ -332,10 +351,10 @@ sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) return right->cpu - left->cpu; } -static int hist_entry__cpu_snprintf(struct hist_entry *self, char *bf, - size_t size, unsigned int width) +static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) { - return repsep_snprintf(bf, size, "%-*d", width, self->cpu); + return repsep_snprintf(bf, size, "%*d", width, he->cpu); } struct sort_entry sort_cpu = { @@ -345,6 +364,8 @@ struct sort_entry sort_cpu = { .se_width_idx = HISTC_CPU, }; +/* sort keys for branch stacks */ + static int64_t sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) { @@ -352,20 +373,13 @@ sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) right->branch_info->from.map); } -static int hist_entry__dso_from_snprintf(struct hist_entry *self, char *bf, +static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - return _hist_entry__dso_snprintf(self->branch_info->from.map, + return _hist_entry__dso_snprintf(he->branch_info->from.map, bf, size, width); } -struct sort_entry sort_dso_from = { - .se_header = "Source Shared Object", - .se_cmp = sort__dso_from_cmp, - .se_snprintf = hist_entry__dso_from_snprintf, - .se_width_idx = HISTC_DSO_FROM, -}; - static int64_t sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) { @@ -373,10 +387,10 @@ sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) right->branch_info->to.map); } -static int hist_entry__dso_to_snprintf(struct hist_entry *self, char *bf, +static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - return _hist_entry__dso_snprintf(self->branch_info->to.map, + return _hist_entry__dso_snprintf(he->branch_info->to.map, bf, size, width); } @@ -387,10 +401,9 @@ sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) struct addr_map_symbol *from_r = &right->branch_info->from; if (!from_l->sym && !from_r->sym) - return right->level - left->level; + return _sort__addr_cmp(from_l->addr, from_r->addr); - return _sort__sym_cmp(from_l->sym, from_r->sym, from_l->addr, - from_r->addr); + return _sort__sym_cmp(from_l->sym, from_r->sym); } static int64_t @@ -400,31 +413,36 @@ sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) struct addr_map_symbol *to_r = &right->branch_info->to; if (!to_l->sym && !to_r->sym) - return right->level - left->level; + return _sort__addr_cmp(to_l->addr, to_r->addr); - return _sort__sym_cmp(to_l->sym, to_r->sym, to_l->addr, to_r->addr); + return _sort__sym_cmp(to_l->sym, to_r->sym); } -static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf, - size_t size, - unsigned int width __maybe_unused) +static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) { - struct addr_map_symbol *from = &self->branch_info->from; + struct addr_map_symbol *from = &he->branch_info->from; return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, - self->level, bf, size, width); + he->level, bf, size, width); } -static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf, - size_t size, - unsigned int width __maybe_unused) +static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) { - struct addr_map_symbol *to = &self->branch_info->to; + struct addr_map_symbol *to = &he->branch_info->to; return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, - self->level, bf, size, width); + he->level, bf, size, width); } +struct sort_entry sort_dso_from = { + .se_header = "Source Shared Object", + .se_cmp = sort__dso_from_cmp, + .se_snprintf = hist_entry__dso_from_snprintf, + .se_width_idx = HISTC_DSO_FROM, +}; + struct sort_entry sort_dso_to = { .se_header = "Target Shared Object", .se_cmp = sort__dso_to_cmp, @@ -457,18 +475,414 @@ sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) return mp || p; } -static int hist_entry__mispredict_snprintf(struct hist_entry *self, char *bf, +static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width){ static const char *out = "N/A"; - if (self->branch_info->flags.predicted) + if (he->branch_info->flags.predicted) out = "N"; - else if (self->branch_info->flags.mispred) + else if (he->branch_info->flags.mispred) out = "Y"; return repsep_snprintf(bf, size, "%-*s", width, out); } +/* --sort daddr_sym */ +static int64_t +sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) +{ + uint64_t l = 0, r = 0; + + if (left->mem_info) + l = left->mem_info->daddr.addr; + if (right->mem_info) + r = right->mem_info->daddr.addr; + + return (int64_t)(r - l); +} + +static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + uint64_t addr = 0; + struct map *map = NULL; + struct symbol *sym = NULL; + + if (he->mem_info) { + addr = he->mem_info->daddr.addr; + map = he->mem_info->daddr.map; + sym = he->mem_info->daddr.sym; + } + return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, + width); +} + +static int64_t +sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct map *map_l = NULL; + struct map *map_r = NULL; + + if (left->mem_info) + map_l = left->mem_info->daddr.map; + if (right->mem_info) + map_r = right->mem_info->daddr.map; + + return _sort__dso_cmp(map_l, map_r); +} + +static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + struct map *map = NULL; + + if (he->mem_info) + map = he->mem_info->daddr.map; + + return _hist_entry__dso_snprintf(map, bf, size, width); +} + +static int64_t +sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) +{ + union perf_mem_data_src data_src_l; + union perf_mem_data_src data_src_r; + + if (left->mem_info) + data_src_l = left->mem_info->data_src; + else + data_src_l.mem_lock = PERF_MEM_LOCK_NA; + + if (right->mem_info) + data_src_r = right->mem_info->data_src; + else + data_src_r.mem_lock = PERF_MEM_LOCK_NA; + + return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); +} + +static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + const char *out; + u64 mask = PERF_MEM_LOCK_NA; + + if (he->mem_info) + mask = he->mem_info->data_src.mem_lock; + + if (mask & PERF_MEM_LOCK_NA) + out = "N/A"; + else if (mask & PERF_MEM_LOCK_LOCKED) + out = "Yes"; + else + out = "No"; + + return repsep_snprintf(bf, size, "%-*s", width, out); +} + +static int64_t +sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) +{ + union perf_mem_data_src data_src_l; + union perf_mem_data_src data_src_r; + + if (left->mem_info) + data_src_l = left->mem_info->data_src; + else + data_src_l.mem_dtlb = PERF_MEM_TLB_NA; + + if (right->mem_info) + data_src_r = right->mem_info->data_src; + else + data_src_r.mem_dtlb = PERF_MEM_TLB_NA; + + return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); +} + +static const char * const tlb_access[] = { + "N/A", + "HIT", + "MISS", + "L1", + "L2", + "Walker", + "Fault", +}; +#define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *)) + +static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + char out[64]; + size_t sz = sizeof(out) - 1; /* -1 for null termination */ + size_t l = 0, i; + u64 m = PERF_MEM_TLB_NA; + u64 hit, miss; + + out[0] = '\0'; + + if (he->mem_info) + m = he->mem_info->data_src.mem_dtlb; + + hit = m & PERF_MEM_TLB_HIT; + miss = m & PERF_MEM_TLB_MISS; + + /* already taken care of */ + m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS); + + for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) { + if (!(m & 0x1)) + continue; + if (l) { + strcat(out, " or "); + l += 4; + } + strncat(out, tlb_access[i], sz - l); + l += strlen(tlb_access[i]); + } + if (*out == '\0') + strcpy(out, "N/A"); + if (hit) + strncat(out, " hit", sz - l); + if (miss) + strncat(out, " miss", sz - l); + + return repsep_snprintf(bf, size, "%-*s", width, out); +} + +static int64_t +sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) +{ + union perf_mem_data_src data_src_l; + union perf_mem_data_src data_src_r; + + if (left->mem_info) + data_src_l = left->mem_info->data_src; + else + data_src_l.mem_lvl = PERF_MEM_LVL_NA; + + if (right->mem_info) + data_src_r = right->mem_info->data_src; + else + data_src_r.mem_lvl = PERF_MEM_LVL_NA; + + return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); +} + +static const char * const mem_lvl[] = { + "N/A", + "HIT", + "MISS", + "L1", + "LFB", + "L2", + "L3", + "Local RAM", + "Remote RAM (1 hop)", + "Remote RAM (2 hops)", + "Remote Cache (1 hop)", + "Remote Cache (2 hops)", + "I/O", + "Uncached", +}; +#define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *)) + +static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + char out[64]; + size_t sz = sizeof(out) - 1; /* -1 for null termination */ + size_t i, l = 0; + u64 m = PERF_MEM_LVL_NA; + u64 hit, miss; + + if (he->mem_info) + m = he->mem_info->data_src.mem_lvl; + + out[0] = '\0'; + + hit = m & PERF_MEM_LVL_HIT; + miss = m & PERF_MEM_LVL_MISS; + + /* already taken care of */ + m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS); + + for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) { + if (!(m & 0x1)) + continue; + if (l) { + strcat(out, " or "); + l += 4; + } + strncat(out, mem_lvl[i], sz - l); + l += strlen(mem_lvl[i]); + } + if (*out == '\0') + strcpy(out, "N/A"); + if (hit) + strncat(out, " hit", sz - l); + if (miss) + strncat(out, " miss", sz - l); + + return repsep_snprintf(bf, size, "%-*s", width, out); +} + +static int64_t +sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) +{ + union perf_mem_data_src data_src_l; + union perf_mem_data_src data_src_r; + + if (left->mem_info) + data_src_l = left->mem_info->data_src; + else + data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; + + if (right->mem_info) + data_src_r = right->mem_info->data_src; + else + data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; + + return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); +} + +static const char * const snoop_access[] = { + "N/A", + "None", + "Miss", + "Hit", + "HitM", +}; +#define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *)) + +static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + char out[64]; + size_t sz = sizeof(out) - 1; /* -1 for null termination */ + size_t i, l = 0; + u64 m = PERF_MEM_SNOOP_NA; + + out[0] = '\0'; + + if (he->mem_info) + m = he->mem_info->data_src.mem_snoop; + + for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) { + if (!(m & 0x1)) + continue; + if (l) { + strcat(out, " or "); + l += 4; + } + strncat(out, snoop_access[i], sz - l); + l += strlen(snoop_access[i]); + } + + if (*out == '\0') + strcpy(out, "N/A"); + + return repsep_snprintf(bf, size, "%-*s", width, out); +} + +static inline u64 cl_address(u64 address) +{ + /* return the cacheline of the address */ + return (address & ~(cacheline_size - 1)); +} + +static int64_t +sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) +{ + u64 l, r; + struct map *l_map, *r_map; + + if (!left->mem_info) return -1; + if (!right->mem_info) return 1; + + /* group event types together */ + if (left->cpumode > right->cpumode) return -1; + if (left->cpumode < right->cpumode) return 1; + + l_map = left->mem_info->daddr.map; + r_map = right->mem_info->daddr.map; + + /* if both are NULL, jump to sort on al_addr instead */ + if (!l_map && !r_map) + goto addr; + + if (!l_map) return -1; + if (!r_map) return 1; + + if (l_map->maj > r_map->maj) return -1; + if (l_map->maj < r_map->maj) return 1; + + if (l_map->min > r_map->min) return -1; + if (l_map->min < r_map->min) return 1; + + if (l_map->ino > r_map->ino) return -1; + if (l_map->ino < r_map->ino) return 1; + + if (l_map->ino_generation > r_map->ino_generation) return -1; + if (l_map->ino_generation < r_map->ino_generation) return 1; + + /* + * Addresses with no major/minor numbers are assumed to be + * anonymous in userspace. Sort those on pid then address. + * + * The kernel and non-zero major/minor mapped areas are + * assumed to be unity mapped. Sort those on address. + */ + + if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && + (!(l_map->flags & MAP_SHARED)) && + !l_map->maj && !l_map->min && !l_map->ino && + !l_map->ino_generation) { + /* userspace anonymous */ + + if (left->thread->pid_ > right->thread->pid_) return -1; + if (left->thread->pid_ < right->thread->pid_) return 1; + } + +addr: + /* al_addr does all the right addr - start + offset calculations */ + l = cl_address(left->mem_info->daddr.al_addr); + r = cl_address(right->mem_info->daddr.al_addr); + + if (l > r) return -1; + if (l < r) return 1; + + return 0; +} + +static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + + uint64_t addr = 0; + struct map *map = NULL; + struct symbol *sym = NULL; + char level = he->level; + + if (he->mem_info) { + addr = cl_address(he->mem_info->daddr.al_addr); + map = he->mem_info->daddr.map; + sym = he->mem_info->daddr.sym; + + /* print [s] for shared data mmaps */ + if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && + map && (map->type == MAP__VARIABLE) && + (map->flags & MAP_SHARED) && + (map->maj || map->min || map->ino || + map->ino_generation)) + level = 's'; + else if (!map) + level = 'X'; + } + return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size, + width); +} + struct sort_entry sort_mispredict = { .se_header = "Branch Mispredicted", .se_cmp = sort__mispredict_cmp, @@ -476,6 +890,219 @@ struct sort_entry sort_mispredict = { .se_width_idx = HISTC_MISPREDICT, }; +static u64 he_weight(struct hist_entry *he) +{ + return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; +} + +static int64_t +sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return he_weight(left) - he_weight(right); +} + +static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); +} + +struct sort_entry sort_local_weight = { + .se_header = "Local Weight", + .se_cmp = sort__local_weight_cmp, + .se_snprintf = hist_entry__local_weight_snprintf, + .se_width_idx = HISTC_LOCAL_WEIGHT, +}; + +static int64_t +sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return left->stat.weight - right->stat.weight; +} + +static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); +} + +struct sort_entry sort_global_weight = { + .se_header = "Weight", + .se_cmp = sort__global_weight_cmp, + .se_snprintf = hist_entry__global_weight_snprintf, + .se_width_idx = HISTC_GLOBAL_WEIGHT, +}; + +struct sort_entry sort_mem_daddr_sym = { + .se_header = "Data Symbol", + .se_cmp = sort__daddr_cmp, + .se_snprintf = hist_entry__daddr_snprintf, + .se_width_idx = HISTC_MEM_DADDR_SYMBOL, +}; + +struct sort_entry sort_mem_daddr_dso = { + .se_header = "Data Object", + .se_cmp = sort__dso_daddr_cmp, + .se_snprintf = hist_entry__dso_daddr_snprintf, + .se_width_idx = HISTC_MEM_DADDR_SYMBOL, +}; + +struct sort_entry sort_mem_locked = { + .se_header = "Locked", + .se_cmp = sort__locked_cmp, + .se_snprintf = hist_entry__locked_snprintf, + .se_width_idx = HISTC_MEM_LOCKED, +}; + +struct sort_entry sort_mem_tlb = { + .se_header = "TLB access", + .se_cmp = sort__tlb_cmp, + .se_snprintf = hist_entry__tlb_snprintf, + .se_width_idx = HISTC_MEM_TLB, +}; + +struct sort_entry sort_mem_lvl = { + .se_header = "Memory access", + .se_cmp = sort__lvl_cmp, + .se_snprintf = hist_entry__lvl_snprintf, + .se_width_idx = HISTC_MEM_LVL, +}; + +struct sort_entry sort_mem_snoop = { + .se_header = "Snoop", + .se_cmp = sort__snoop_cmp, + .se_snprintf = hist_entry__snoop_snprintf, + .se_width_idx = HISTC_MEM_SNOOP, +}; + +struct sort_entry sort_mem_dcacheline = { + .se_header = "Data Cacheline", + .se_cmp = sort__dcacheline_cmp, + .se_snprintf = hist_entry__dcacheline_snprintf, + .se_width_idx = HISTC_MEM_DCACHELINE, +}; + +static int64_t +sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return left->branch_info->flags.abort != + right->branch_info->flags.abort; +} + +static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + static const char *out = "."; + + if (he->branch_info->flags.abort) + out = "A"; + return repsep_snprintf(bf, size, "%-*s", width, out); +} + +struct sort_entry sort_abort = { + .se_header = "Transaction abort", + .se_cmp = sort__abort_cmp, + .se_snprintf = hist_entry__abort_snprintf, + .se_width_idx = HISTC_ABORT, +}; + +static int64_t +sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return left->branch_info->flags.in_tx != + right->branch_info->flags.in_tx; +} + +static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + static const char *out = "."; + + if (he->branch_info->flags.in_tx) + out = "T"; + + return repsep_snprintf(bf, size, "%-*s", width, out); +} + +struct sort_entry sort_in_tx = { + .se_header = "Branch in transaction", + .se_cmp = sort__in_tx_cmp, + .se_snprintf = hist_entry__in_tx_snprintf, + .se_width_idx = HISTC_IN_TX, +}; + +static int64_t +sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return left->transaction - right->transaction; +} + +static inline char *add_str(char *p, const char *str) +{ + strcpy(p, str); + return p + strlen(str); +} + +static struct txbit { + unsigned flag; + const char *name; + int skip_for_len; +} txbits[] = { + { PERF_TXN_ELISION, "EL ", 0 }, + { PERF_TXN_TRANSACTION, "TX ", 1 }, + { PERF_TXN_SYNC, "SYNC ", 1 }, + { PERF_TXN_ASYNC, "ASYNC ", 0 }, + { PERF_TXN_RETRY, "RETRY ", 0 }, + { PERF_TXN_CONFLICT, "CON ", 0 }, + { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, + { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, + { 0, NULL, 0 } +}; + +int hist_entry__transaction_len(void) +{ + int i; + int len = 0; + + for (i = 0; txbits[i].name; i++) { + if (!txbits[i].skip_for_len) + len += strlen(txbits[i].name); + } + len += 4; /* :XX<space> */ + return len; +} + +static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + u64 t = he->transaction; + char buf[128]; + char *p = buf; + int i; + + buf[0] = 0; + for (i = 0; txbits[i].name; i++) + if (txbits[i].flag & t) + p = add_str(p, txbits[i].name); + if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) + p = add_str(p, "NEITHER "); + if (t & PERF_TXN_ABORT_MASK) { + sprintf(p, ":%" PRIx64, + (t & PERF_TXN_ABORT_MASK) >> + PERF_TXN_ABORT_SHIFT); + p += strlen(p); + } + + return repsep_snprintf(bf, size, "%-*s", width, buf); +} + +struct sort_entry sort_transaction = { + .se_header = "Transaction ", + .se_cmp = sort__transaction_cmp, + .se_snprintf = hist_entry__transaction_snprintf, + .se_width_idx = HISTC_TRANSACTION, +}; + struct sort_dimension { const char *name; struct sort_entry *entry; @@ -484,30 +1111,249 @@ struct sort_dimension { #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } -static struct sort_dimension sort_dimensions[] = { +static struct sort_dimension common_sort_dimensions[] = { DIM(SORT_PID, "pid", sort_thread), DIM(SORT_COMM, "comm", sort_comm), DIM(SORT_DSO, "dso", sort_dso), - DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), - DIM(SORT_DSO_TO, "dso_to", sort_dso_to), DIM(SORT_SYM, "symbol", sort_sym), - DIM(SORT_SYM_TO, "symbol_from", sort_sym_from), - DIM(SORT_SYM_FROM, "symbol_to", sort_sym_to), DIM(SORT_PARENT, "parent", sort_parent), DIM(SORT_CPU, "cpu", sort_cpu), - DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), DIM(SORT_SRCLINE, "srcline", sort_srcline), + DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), + DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), + DIM(SORT_TRANSACTION, "transaction", sort_transaction), +}; + +#undef DIM + +#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } + +static struct sort_dimension bstack_sort_dimensions[] = { + DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), + DIM(SORT_DSO_TO, "dso_to", sort_dso_to), + DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), + DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), + DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), + DIM(SORT_IN_TX, "in_tx", sort_in_tx), + DIM(SORT_ABORT, "abort", sort_abort), +}; + +#undef DIM + +#define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } + +static struct sort_dimension memory_sort_dimensions[] = { + DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), + DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), + DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), + DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), + DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), + DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), + DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), +}; + +#undef DIM + +struct hpp_dimension { + const char *name; + struct perf_hpp_fmt *fmt; + int taken; +}; + +#define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } + +static struct hpp_dimension hpp_sort_dimensions[] = { + DIM(PERF_HPP__OVERHEAD, "overhead"), + DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), + DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), + DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), + DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), + DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), + DIM(PERF_HPP__SAMPLES, "sample"), + DIM(PERF_HPP__PERIOD, "period"), }; +#undef DIM + +struct hpp_sort_entry { + struct perf_hpp_fmt hpp; + struct sort_entry *se; +}; + +bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) +{ + struct hpp_sort_entry *hse_a; + struct hpp_sort_entry *hse_b; + + if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) + return false; + + hse_a = container_of(a, struct hpp_sort_entry, hpp); + hse_b = container_of(b, struct hpp_sort_entry, hpp); + + return hse_a->se == hse_b->se; +} + +void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists) +{ + struct hpp_sort_entry *hse; + + if (!perf_hpp__is_sort_entry(fmt)) + return; + + hse = container_of(fmt, struct hpp_sort_entry, hpp); + hists__new_col_len(hists, hse->se->se_width_idx, + strlen(hse->se->se_header)); +} + +static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct perf_evsel *evsel) +{ + struct hpp_sort_entry *hse; + size_t len; + + hse = container_of(fmt, struct hpp_sort_entry, hpp); + len = hists__col_len(&evsel->hists, hse->se->se_width_idx); + + return scnprintf(hpp->buf, hpp->size, "%*s", len, hse->se->se_header); +} + +static int __sort__hpp_width(struct perf_hpp_fmt *fmt, + struct perf_hpp *hpp __maybe_unused, + struct perf_evsel *evsel) +{ + struct hpp_sort_entry *hse; + + hse = container_of(fmt, struct hpp_sort_entry, hpp); + + return hists__col_len(&evsel->hists, hse->se->se_width_idx); +} + +static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct hist_entry *he) +{ + struct hpp_sort_entry *hse; + size_t len; + + hse = container_of(fmt, struct hpp_sort_entry, hpp); + len = hists__col_len(he->hists, hse->se->se_width_idx); + + return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); +} + +static struct hpp_sort_entry * +__sort_dimension__alloc_hpp(struct sort_dimension *sd) +{ + struct hpp_sort_entry *hse; + + hse = malloc(sizeof(*hse)); + if (hse == NULL) { + pr_err("Memory allocation failed\n"); + return NULL; + } + + hse->se = sd->entry; + hse->hpp.header = __sort__hpp_header; + hse->hpp.width = __sort__hpp_width; + hse->hpp.entry = __sort__hpp_entry; + hse->hpp.color = NULL; + + hse->hpp.cmp = sd->entry->se_cmp; + hse->hpp.collapse = sd->entry->se_collapse ? : sd->entry->se_cmp; + hse->hpp.sort = sd->entry->se_sort ? : hse->hpp.collapse; + + INIT_LIST_HEAD(&hse->hpp.list); + INIT_LIST_HEAD(&hse->hpp.sort_list); + hse->hpp.elide = false; + + return hse; +} + +bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) +{ + return format->header == __sort__hpp_header; +} + +static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd) +{ + struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd); + + if (hse == NULL) + return -1; + + perf_hpp__register_sort_field(&hse->hpp); + return 0; +} + +static int __sort_dimension__add_hpp_output(struct sort_dimension *sd) +{ + struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd); + + if (hse == NULL) + return -1; + + perf_hpp__column_register(&hse->hpp); + return 0; +} + +static int __sort_dimension__add(struct sort_dimension *sd) +{ + if (sd->taken) + return 0; + + if (__sort_dimension__add_hpp_sort(sd) < 0) + return -1; + + if (sd->entry->se_collapse) + sort__need_collapse = 1; + + sd->taken = 1; + + return 0; +} + +static int __hpp_dimension__add(struct hpp_dimension *hd) +{ + if (!hd->taken) { + hd->taken = 1; + + perf_hpp__register_sort_field(hd->fmt); + } + return 0; +} + +static int __sort_dimension__add_output(struct sort_dimension *sd) +{ + if (sd->taken) + return 0; + + if (__sort_dimension__add_hpp_output(sd) < 0) + return -1; + + sd->taken = 1; + return 0; +} + +static int __hpp_dimension__add_output(struct hpp_dimension *hd) +{ + if (!hd->taken) { + hd->taken = 1; + + perf_hpp__column_register(hd->fmt); + } + return 0; +} + int sort_dimension__add(const char *tok) { unsigned int i; - for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { - struct sort_dimension *sd = &sort_dimensions[i]; + for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { + struct sort_dimension *sd = &common_sort_dimensions[i]; if (strncasecmp(tok, sd->name, strlen(tok))) continue; + if (sd->entry == &sort_parent) { int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); if (ret) { @@ -518,73 +1364,343 @@ int sort_dimension__add(const char *tok) return -EINVAL; } sort__has_parent = 1; - } else if (sd->entry == &sort_sym || - sd->entry == &sort_sym_from || - sd->entry == &sort_sym_to) { + } else if (sd->entry == &sort_sym) { sort__has_sym = 1; + } else if (sd->entry == &sort_dso) { + sort__has_dso = 1; } - if (sd->taken) - return 0; + return __sort_dimension__add(sd); + } - if (sd->entry->se_collapse) - sort__need_collapse = 1; - - if (list_empty(&hist_entry__sort_list)) { - if (!strcmp(sd->name, "pid")) - sort__first_dimension = SORT_PID; - else if (!strcmp(sd->name, "comm")) - sort__first_dimension = SORT_COMM; - else if (!strcmp(sd->name, "dso")) - sort__first_dimension = SORT_DSO; - else if (!strcmp(sd->name, "symbol")) - sort__first_dimension = SORT_SYM; - else if (!strcmp(sd->name, "parent")) - sort__first_dimension = SORT_PARENT; - else if (!strcmp(sd->name, "cpu")) - sort__first_dimension = SORT_CPU; - else if (!strcmp(sd->name, "symbol_from")) - sort__first_dimension = SORT_SYM_FROM; - else if (!strcmp(sd->name, "symbol_to")) - sort__first_dimension = SORT_SYM_TO; - else if (!strcmp(sd->name, "dso_from")) - sort__first_dimension = SORT_DSO_FROM; - else if (!strcmp(sd->name, "dso_to")) - sort__first_dimension = SORT_DSO_TO; - else if (!strcmp(sd->name, "mispredict")) - sort__first_dimension = SORT_MISPREDICT; - } + for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { + struct hpp_dimension *hd = &hpp_sort_dimensions[i]; + + if (strncasecmp(tok, hd->name, strlen(tok))) + continue; + + return __hpp_dimension__add(hd); + } - list_add_tail(&sd->entry->list, &hist_entry__sort_list); - sd->taken = 1; + for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { + struct sort_dimension *sd = &bstack_sort_dimensions[i]; + if (strncasecmp(tok, sd->name, strlen(tok))) + continue; + + if (sort__mode != SORT_MODE__BRANCH) + return -EINVAL; + + if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) + sort__has_sym = 1; + + __sort_dimension__add(sd); + return 0; + } + + for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { + struct sort_dimension *sd = &memory_sort_dimensions[i]; + + if (strncasecmp(tok, sd->name, strlen(tok))) + continue; + + if (sort__mode != SORT_MODE__MEMORY) + return -EINVAL; + + if (sd->entry == &sort_mem_daddr_sym) + sort__has_sym = 1; + + __sort_dimension__add(sd); return 0; } + return -ESRCH; } -void setup_sorting(const char * const usagestr[], const struct option *opts) +static const char *get_default_sort_order(void) +{ + const char *default_sort_orders[] = { + default_sort_order, + default_branch_sort_order, + default_mem_sort_order, + default_top_sort_order, + default_diff_sort_order, + }; + + BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); + + return default_sort_orders[sort__mode]; +} + +static int __setup_sorting(void) { - char *tmp, *tok, *str = strdup(sort_order); + char *tmp, *tok, *str; + const char *sort_keys = sort_order; + int ret = 0; + + if (sort_keys == NULL) { + if (field_order) { + /* + * If user specified field order but no sort order, + * we'll honor it and not add default sort orders. + */ + return 0; + } + + sort_keys = get_default_sort_order(); + } + + str = strdup(sort_keys); + if (str == NULL) { + error("Not enough memory to setup sort keys"); + return -ENOMEM; + } for (tok = strtok_r(str, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) { - if (sort_dimension__add(tok) < 0) { + ret = sort_dimension__add(tok); + if (ret == -EINVAL) { + error("Invalid --sort key: `%s'", tok); + break; + } else if (ret == -ESRCH) { error("Unknown --sort key: `%s'", tok); - usage_with_options(usagestr, opts); + break; } } free(str); + return ret; +} + +void perf_hpp__set_elide(int idx, bool elide) +{ + struct perf_hpp_fmt *fmt; + struct hpp_sort_entry *hse; + + perf_hpp__for_each_format(fmt) { + if (!perf_hpp__is_sort_entry(fmt)) + continue; + + hse = container_of(fmt, struct hpp_sort_entry, hpp); + if (hse->se->se_width_idx == idx) { + fmt->elide = elide; + break; + } + } } -void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list, - const char *list_name, FILE *fp) +static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) { if (list && strlist__nr_entries(list) == 1) { if (fp != NULL) fprintf(fp, "# %s: %s\n", list_name, strlist__entry(list, 0)->s); - self->elide = true; + return true; + } + return false; +} + +static bool get_elide(int idx, FILE *output) +{ + switch (idx) { + case HISTC_SYMBOL: + return __get_elide(symbol_conf.sym_list, "symbol", output); + case HISTC_DSO: + return __get_elide(symbol_conf.dso_list, "dso", output); + case HISTC_COMM: + return __get_elide(symbol_conf.comm_list, "comm", output); + default: + break; + } + + if (sort__mode != SORT_MODE__BRANCH) + return false; + + switch (idx) { + case HISTC_SYMBOL_FROM: + return __get_elide(symbol_conf.sym_from_list, "sym_from", output); + case HISTC_SYMBOL_TO: + return __get_elide(symbol_conf.sym_to_list, "sym_to", output); + case HISTC_DSO_FROM: + return __get_elide(symbol_conf.dso_from_list, "dso_from", output); + case HISTC_DSO_TO: + return __get_elide(symbol_conf.dso_to_list, "dso_to", output); + default: + break; + } + + return false; +} + +void sort__setup_elide(FILE *output) +{ + struct perf_hpp_fmt *fmt; + struct hpp_sort_entry *hse; + + perf_hpp__for_each_format(fmt) { + if (!perf_hpp__is_sort_entry(fmt)) + continue; + + hse = container_of(fmt, struct hpp_sort_entry, hpp); + fmt->elide = get_elide(hse->se->se_width_idx, output); + } + + /* + * It makes no sense to elide all of sort entries. + * Just revert them to show up again. + */ + perf_hpp__for_each_format(fmt) { + if (!perf_hpp__is_sort_entry(fmt)) + continue; + + if (!fmt->elide) + return; } + + perf_hpp__for_each_format(fmt) { + if (!perf_hpp__is_sort_entry(fmt)) + continue; + + fmt->elide = false; + } +} + +static int output_field_add(char *tok) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { + struct sort_dimension *sd = &common_sort_dimensions[i]; + + if (strncasecmp(tok, sd->name, strlen(tok))) + continue; + + return __sort_dimension__add_output(sd); + } + + for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { + struct hpp_dimension *hd = &hpp_sort_dimensions[i]; + + if (strncasecmp(tok, hd->name, strlen(tok))) + continue; + + return __hpp_dimension__add_output(hd); + } + + for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { + struct sort_dimension *sd = &bstack_sort_dimensions[i]; + + if (strncasecmp(tok, sd->name, strlen(tok))) + continue; + + return __sort_dimension__add_output(sd); + } + + for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { + struct sort_dimension *sd = &memory_sort_dimensions[i]; + + if (strncasecmp(tok, sd->name, strlen(tok))) + continue; + + return __sort_dimension__add_output(sd); + } + + return -ESRCH; +} + +static void reset_dimensions(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) + common_sort_dimensions[i].taken = 0; + + for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) + hpp_sort_dimensions[i].taken = 0; + + for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) + bstack_sort_dimensions[i].taken = 0; + + for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) + memory_sort_dimensions[i].taken = 0; +} + +static int __setup_output_field(void) +{ + char *tmp, *tok, *str; + int ret = 0; + + if (field_order == NULL) + return 0; + + reset_dimensions(); + + str = strdup(field_order); + if (str == NULL) { + error("Not enough memory to setup output fields"); + return -ENOMEM; + } + + for (tok = strtok_r(str, ", ", &tmp); + tok; tok = strtok_r(NULL, ", ", &tmp)) { + ret = output_field_add(tok); + if (ret == -EINVAL) { + error("Invalid --fields key: `%s'", tok); + break; + } else if (ret == -ESRCH) { + error("Unknown --fields key: `%s'", tok); + break; + } + } + + free(str); + return ret; +} + +int setup_sorting(void) +{ + int err; + + err = __setup_sorting(); + if (err < 0) + return err; + + if (parent_pattern != default_parent_pattern) { + err = sort_dimension__add("parent"); + if (err < 0) + return err; + } + + reset_dimensions(); + + /* + * perf diff doesn't use default hpp output fields. + */ + if (sort__mode != SORT_MODE__DIFF) + perf_hpp__init(); + + err = __setup_output_field(); + if (err < 0) + return err; + + /* copy sort keys to output fields */ + perf_hpp__setup_output_field(); + /* and then copy output fields to sort keys */ + perf_hpp__append_sort_keys(); + + return 0; +} + +void reset_output_field(void) +{ + sort__need_collapse = 0; + sort__has_parent = 0; + sort__has_sym = 0; + sort__has_dso = 0; + + field_order = NULL; + sort_order = NULL; + + reset_dimensions(); + perf_hpp__reset_output_field(); } diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 5786f323b59..041f0c9cea2 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -20,19 +20,21 @@ #include "parse-options.h" #include "parse-events.h" - +#include "hist.h" #include "thread.h" -#include "sort.h" extern regex_t parent_regex; extern const char *sort_order; +extern const char *field_order; extern const char default_parent_pattern[]; extern const char *parent_pattern; extern const char default_sort_order[]; +extern regex_t ignore_callees_regex; +extern int have_ignore_callees; extern int sort__need_collapse; extern int sort__has_parent; extern int sort__has_sym; -extern int sort__branch_mode; +extern enum sort_mode sort__mode; extern struct sort_entry sort_comm; extern struct sort_entry sort_dso; extern struct sort_entry sort_sym; @@ -49,9 +51,23 @@ struct he_stat { u64 period_us; u64 period_guest_sys; u64 period_guest_us; + u64 weight; u32 nr_events; }; +struct hist_entry_diff { + bool computed; + + /* PERF_HPP__DELTA */ + double period_ratio_delta; + + /* PERF_HPP__RATIO */ + double period_ratio; + + /* HISTC_WEIGHTED_DIFF */ + s64 wdiff; +}; + /** * struct hist_entry - histogram entry * @@ -61,11 +77,24 @@ struct he_stat { struct hist_entry { struct rb_node rb_node_in; struct rb_node rb_node; + union { + struct list_head node; + struct list_head head; + } pairs; struct he_stat stat; + struct he_stat *stat_acc; struct map_symbol ms; struct thread *thread; + struct comm *comm; u64 ip; + u64 transaction; s32 cpu; + u8 cpumode; + + struct hist_entry_diff diff; + + /* We are added by hists__add_dummy_entry. */ + bool dummy; /* XXX These two should move to some tree widget lib */ u16 row_offset; @@ -78,28 +107,86 @@ struct hist_entry { char *srcline; struct symbol *parent; unsigned long position; - union { - struct hist_entry *pair; - struct rb_root sorted_chain; - }; + struct rb_root sorted_chain; struct branch_info *branch_info; struct hists *hists; - struct callchain_root callchain[0]; + struct mem_info *mem_info; + struct callchain_root callchain[0]; /* must be last member */ +}; + +static inline bool hist_entry__has_pairs(struct hist_entry *he) +{ + return !list_empty(&he->pairs.node); +} + +static inline struct hist_entry *hist_entry__next_pair(struct hist_entry *he) +{ + if (hist_entry__has_pairs(he)) + return list_entry(he->pairs.node.next, struct hist_entry, pairs.node); + return NULL; +} + +static inline void hist_entry__add_pair(struct hist_entry *pair, + struct hist_entry *he) +{ + list_add_tail(&pair->pairs.node, &he->pairs.head); +} + +static inline float hist_entry__get_percent_limit(struct hist_entry *he) +{ + u64 period = he->stat.period; + u64 total_period = hists__total_period(he->hists); + + if (unlikely(total_period == 0)) + return 0; + + if (symbol_conf.cumulate_callchain) + period = he->stat_acc->period; + + return period * 100.0 / total_period; +} + + +enum sort_mode { + SORT_MODE__NORMAL, + SORT_MODE__BRANCH, + SORT_MODE__MEMORY, + SORT_MODE__TOP, + SORT_MODE__DIFF, }; enum sort_type { + /* common sort keys */ SORT_PID, SORT_COMM, SORT_DSO, SORT_SYM, SORT_PARENT, SORT_CPU, - SORT_DSO_FROM, + SORT_SRCLINE, + SORT_LOCAL_WEIGHT, + SORT_GLOBAL_WEIGHT, + SORT_TRANSACTION, + + /* branch stack specific sort keys */ + __SORT_BRANCH_STACK, + SORT_DSO_FROM = __SORT_BRANCH_STACK, SORT_DSO_TO, SORT_SYM_FROM, SORT_SYM_TO, SORT_MISPREDICT, - SORT_SRCLINE, + SORT_ABORT, + SORT_IN_TX, + + /* memory mode specific sort keys */ + __SORT_MEMORY_MODE, + SORT_MEM_DADDR_SYMBOL = __SORT_MEMORY_MODE, + SORT_MEM_DADDR_DSO, + SORT_MEM_LOCKED, + SORT_MEM_TLB, + SORT_MEM_LVL, + SORT_MEM_SNOOP, + SORT_MEM_DCACHELINE, }; /* @@ -113,18 +200,22 @@ struct sort_entry { int64_t (*se_cmp)(struct hist_entry *, struct hist_entry *); int64_t (*se_collapse)(struct hist_entry *, struct hist_entry *); - int (*se_snprintf)(struct hist_entry *self, char *bf, size_t size, + int64_t (*se_sort)(struct hist_entry *, struct hist_entry *); + int (*se_snprintf)(struct hist_entry *he, char *bf, size_t size, unsigned int width); u8 se_width_idx; - bool elide; }; extern struct sort_entry sort_thread; extern struct list_head hist_entry__sort_list; -void setup_sorting(const char * const usagestr[], const struct option *opts); +int setup_sorting(void); +int setup_output_field(void); +void reset_output_field(void); extern int sort_dimension__add(const char *); -void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list, - const char *list_name, FILE *fp); +void sort__setup_elide(FILE *fp); +void perf_hpp__set_elide(int idx, bool elide); + +int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset); #endif /* __PERF_SORT_H */ diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c new file mode 100644 index 00000000000..f3e4bc5fe5d --- /dev/null +++ b/tools/perf/util/srcline.c @@ -0,0 +1,299 @@ +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include <linux/kernel.h> + +#include "util/dso.h" +#include "util/util.h" +#include "util/debug.h" + +#ifdef HAVE_LIBBFD_SUPPORT + +/* + * Implement addr2line using libbfd. + */ +#define PACKAGE "perf" +#include <bfd.h> + +struct a2l_data { + const char *input; + unsigned long addr; + + bool found; + const char *filename; + const char *funcname; + unsigned line; + + bfd *abfd; + asymbol **syms; +}; + +static int bfd_error(const char *string) +{ + const char *errmsg; + + errmsg = bfd_errmsg(bfd_get_error()); + fflush(stdout); + + if (string) + pr_debug("%s: %s\n", string, errmsg); + else + pr_debug("%s\n", errmsg); + + return -1; +} + +static int slurp_symtab(bfd *abfd, struct a2l_data *a2l) +{ + long storage; + long symcount; + asymbol **syms; + bfd_boolean dynamic = FALSE; + + if ((bfd_get_file_flags(abfd) & HAS_SYMS) == 0) + return bfd_error(bfd_get_filename(abfd)); + + storage = bfd_get_symtab_upper_bound(abfd); + if (storage == 0L) { + storage = bfd_get_dynamic_symtab_upper_bound(abfd); + dynamic = TRUE; + } + if (storage < 0L) + return bfd_error(bfd_get_filename(abfd)); + + syms = malloc(storage); + if (dynamic) + symcount = bfd_canonicalize_dynamic_symtab(abfd, syms); + else + symcount = bfd_canonicalize_symtab(abfd, syms); + + if (symcount < 0) { + free(syms); + return bfd_error(bfd_get_filename(abfd)); + } + + a2l->syms = syms; + return 0; +} + +static void find_address_in_section(bfd *abfd, asection *section, void *data) +{ + bfd_vma pc, vma; + bfd_size_type size; + struct a2l_data *a2l = data; + + if (a2l->found) + return; + + if ((bfd_get_section_flags(abfd, section) & SEC_ALLOC) == 0) + return; + + pc = a2l->addr; + vma = bfd_get_section_vma(abfd, section); + size = bfd_get_section_size(section); + + if (pc < vma || pc >= vma + size) + return; + + a2l->found = bfd_find_nearest_line(abfd, section, a2l->syms, pc - vma, + &a2l->filename, &a2l->funcname, + &a2l->line); +} + +static struct a2l_data *addr2line_init(const char *path) +{ + bfd *abfd; + struct a2l_data *a2l = NULL; + + abfd = bfd_openr(path, NULL); + if (abfd == NULL) + return NULL; + + if (!bfd_check_format(abfd, bfd_object)) + goto out; + + a2l = zalloc(sizeof(*a2l)); + if (a2l == NULL) + goto out; + + a2l->abfd = abfd; + a2l->input = strdup(path); + if (a2l->input == NULL) + goto out; + + if (slurp_symtab(abfd, a2l)) + goto out; + + return a2l; + +out: + if (a2l) { + zfree((char **)&a2l->input); + free(a2l); + } + bfd_close(abfd); + return NULL; +} + +static void addr2line_cleanup(struct a2l_data *a2l) +{ + if (a2l->abfd) + bfd_close(a2l->abfd); + zfree((char **)&a2l->input); + zfree(&a2l->syms); + free(a2l); +} + +static int addr2line(const char *dso_name, unsigned long addr, + char **file, unsigned int *line, struct dso *dso) +{ + int ret = 0; + struct a2l_data *a2l = dso->a2l; + + if (!a2l) { + dso->a2l = addr2line_init(dso_name); + a2l = dso->a2l; + } + + if (a2l == NULL) { + pr_warning("addr2line_init failed for %s\n", dso_name); + return 0; + } + + a2l->addr = addr; + a2l->found = false; + + bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l); + + if (a2l->found && a2l->filename) { + *file = strdup(a2l->filename); + *line = a2l->line; + + if (*file) + ret = 1; + } + + return ret; +} + +void dso__free_a2l(struct dso *dso) +{ + struct a2l_data *a2l = dso->a2l; + + if (!a2l) + return; + + addr2line_cleanup(a2l); + + dso->a2l = NULL; +} + +#else /* HAVE_LIBBFD_SUPPORT */ + +static int addr2line(const char *dso_name, unsigned long addr, + char **file, unsigned int *line_nr, + struct dso *dso __maybe_unused) +{ + FILE *fp; + char cmd[PATH_MAX]; + char *filename = NULL; + size_t len; + char *sep; + int ret = 0; + + scnprintf(cmd, sizeof(cmd), "addr2line -e %s %016"PRIx64, + dso_name, addr); + + fp = popen(cmd, "r"); + if (fp == NULL) { + pr_warning("popen failed for %s\n", dso_name); + return 0; + } + + if (getline(&filename, &len, fp) < 0 || !len) { + pr_warning("addr2line has no output for %s\n", dso_name); + goto out; + } + + sep = strchr(filename, '\n'); + if (sep) + *sep = '\0'; + + if (!strcmp(filename, "??:0")) { + pr_debug("no debugging info in %s\n", dso_name); + free(filename); + goto out; + } + + sep = strchr(filename, ':'); + if (sep) { + *sep++ = '\0'; + *file = filename; + *line_nr = strtoul(sep, NULL, 0); + ret = 1; + } +out: + pclose(fp); + return ret; +} + +void dso__free_a2l(struct dso *dso __maybe_unused) +{ +} + +#endif /* HAVE_LIBBFD_SUPPORT */ + +/* + * Number of addr2line failures (without success) before disabling it for that + * dso. + */ +#define A2L_FAIL_LIMIT 123 + +char *get_srcline(struct dso *dso, unsigned long addr) +{ + char *file = NULL; + unsigned line = 0; + char *srcline; + const char *dso_name; + + if (!dso->has_srcline) + return SRCLINE_UNKNOWN; + + if (dso->symsrc_filename) + dso_name = dso->symsrc_filename; + else + dso_name = dso->long_name; + + if (dso_name[0] == '[') + goto out; + + if (!strncmp(dso_name, "/tmp/perf-", 10)) + goto out; + + if (!addr2line(dso_name, addr, &file, &line, dso)) + goto out; + + if (asprintf(&srcline, "%s:%u", file, line) < 0) { + free(file); + goto out; + } + + dso->a2l_fails = 0; + + free(file); + return srcline; + +out: + if (dso->a2l_fails && ++dso->a2l_fails > A2L_FAIL_LIMIT) { + dso->has_srcline = 0; + dso__free_a2l(dso); + } + return SRCLINE_UNKNOWN; +} + +void free_srcline(char *srcline) +{ + if (srcline && strcmp(srcline, SRCLINE_UNKNOWN) != 0) + free(srcline); +} diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 23742126f47..6506b3dfb60 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -10,6 +10,12 @@ void update_stats(struct stats *stats, u64 val) delta = val - stats->mean; stats->mean += delta / stats->n; stats->M2 += delta*(val - stats->mean); + + if (val > stats->max) + stats->max = val; + + if (val < stats->min) + stats->min = val; } double avg_stats(struct stats *stats) @@ -37,7 +43,7 @@ double stddev_stats(struct stats *stats) { double variance, variance_mean; - if (!stats->n) + if (stats->n < 2) return 0.0; variance = stats->M2 / (stats->n - 1); diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index 588367c3c76..5667fc3e39c 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h @@ -1,11 +1,12 @@ #ifndef __PERF_STATS_H #define __PERF_STATS_H -#include "types.h" +#include <linux/types.h> struct stats { double n, mean, M2; + u64 max, min; }; void update_stats(struct stats *stats, u64 val); @@ -13,4 +14,12 @@ double avg_stats(struct stats *stats); double stddev_stats(struct stats *stats); double rel_stddev_stats(double stddev, double avg); +static inline void init_stats(struct stats *stats) +{ + stats->n = 0.0; + stats->mean = 0.0; + stats->M2 = 0.0; + stats->min = (u64) -1; + stats->max = 0; +} #endif diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c index 2eeb51baf07..4abe23550c7 100644 --- a/tools/perf/util/strbuf.c +++ b/tools/perf/util/strbuf.c @@ -28,7 +28,7 @@ void strbuf_init(struct strbuf *sb, ssize_t hint) void strbuf_release(struct strbuf *sb) { if (sb->alloc) { - free(sb->buf); + zfree(&sb->buf); strbuf_init(sb, 0); } } @@ -90,17 +90,17 @@ void strbuf_addf(struct strbuf *sb, const char *fmt, ...) if (!strbuf_avail(sb)) strbuf_grow(sb, 64); va_start(ap, fmt); - len = vscnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); + len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); va_end(ap); if (len < 0) - die("your vscnprintf is broken"); + die("your vsnprintf is broken"); if (len > strbuf_avail(sb)) { strbuf_grow(sb, len); va_start(ap, fmt); - len = vscnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); + len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap); va_end(ap); if (len > strbuf_avail(sb)) { - die("this should not happen, your snprintf is broken"); + die("this should not happen, your vsnprintf is broken"); } } strbuf_setlen(sb, sb->len + len); diff --git a/tools/perf/util/strfilter.c b/tools/perf/util/strfilter.c index 834c8ebfe38..79a757a2a15 100644 --- a/tools/perf/util/strfilter.c +++ b/tools/perf/util/strfilter.c @@ -10,22 +10,22 @@ static const char *OP_not = "!"; /* Logical NOT */ #define is_operator(c) ((c) == '|' || (c) == '&' || (c) == '!') #define is_separator(c) (is_operator(c) || (c) == '(' || (c) == ')') -static void strfilter_node__delete(struct strfilter_node *self) +static void strfilter_node__delete(struct strfilter_node *node) { - if (self) { - if (self->p && !is_operator(*self->p)) - free((char *)self->p); - strfilter_node__delete(self->l); - strfilter_node__delete(self->r); - free(self); + if (node) { + if (node->p && !is_operator(*node->p)) + zfree((char **)&node->p); + strfilter_node__delete(node->l); + strfilter_node__delete(node->r); + free(node); } } -void strfilter__delete(struct strfilter *self) +void strfilter__delete(struct strfilter *filter) { - if (self) { - strfilter_node__delete(self->root); - free(self); + if (filter) { + strfilter_node__delete(filter->root); + free(filter); } } @@ -62,15 +62,15 @@ static struct strfilter_node *strfilter_node__alloc(const char *op, struct strfilter_node *l, struct strfilter_node *r) { - struct strfilter_node *ret = zalloc(sizeof(struct strfilter_node)); + struct strfilter_node *node = zalloc(sizeof(*node)); - if (ret) { - ret->p = op; - ret->l = l; - ret->r = r; + if (node) { + node->p = op; + node->l = l; + node->r = r; } - return ret; + return node; } static struct strfilter_node *strfilter_node__new(const char *s, @@ -154,46 +154,46 @@ error: */ struct strfilter *strfilter__new(const char *rules, const char **err) { - struct strfilter *ret = zalloc(sizeof(struct strfilter)); + struct strfilter *filter = zalloc(sizeof(*filter)); const char *ep = NULL; - if (ret) - ret->root = strfilter_node__new(rules, &ep); + if (filter) + filter->root = strfilter_node__new(rules, &ep); - if (!ret || !ret->root || *ep != '\0') { + if (!filter || !filter->root || *ep != '\0') { if (err) *err = ep; - strfilter__delete(ret); - ret = NULL; + strfilter__delete(filter); + filter = NULL; } - return ret; + return filter; } -static bool strfilter_node__compare(struct strfilter_node *self, +static bool strfilter_node__compare(struct strfilter_node *node, const char *str) { - if (!self || !self->p) + if (!node || !node->p) return false; - switch (*self->p) { + switch (*node->p) { case '|': /* OR */ - return strfilter_node__compare(self->l, str) || - strfilter_node__compare(self->r, str); + return strfilter_node__compare(node->l, str) || + strfilter_node__compare(node->r, str); case '&': /* AND */ - return strfilter_node__compare(self->l, str) && - strfilter_node__compare(self->r, str); + return strfilter_node__compare(node->l, str) && + strfilter_node__compare(node->r, str); case '!': /* NOT */ - return !strfilter_node__compare(self->r, str); + return !strfilter_node__compare(node->r, str); default: - return strglobmatch(str, self->p); + return strglobmatch(str, node->p); } } /* Return true if STR matches the filter rules */ -bool strfilter__compare(struct strfilter *self, const char *str) +bool strfilter__compare(struct strfilter *filter, const char *str) { - if (!self) + if (!filter) return false; - return strfilter_node__compare(self->root, str); + return strfilter_node__compare(filter->root, str); } diff --git a/tools/perf/util/strfilter.h b/tools/perf/util/strfilter.h index 00f58a7506d..fe611f3c9e3 100644 --- a/tools/perf/util/strfilter.h +++ b/tools/perf/util/strfilter.h @@ -30,19 +30,19 @@ struct strfilter *strfilter__new(const char *rules, const char **err); /** * strfilter__compare - compare given string and a string filter - * @self: String filter + * @filter: String filter * @str: target string * - * Compare @str and @self. Return true if the str match the rule + * Compare @str and @filter. Return true if the str match the rule */ -bool strfilter__compare(struct strfilter *self, const char *str); +bool strfilter__compare(struct strfilter *filter, const char *str); /** * strfilter__delete - delete a string filter - * @self: String filter to delete + * @filter: String filter to delete * - * Delete @self. + * Delete @filter. */ -void strfilter__delete(struct strfilter *self); +void strfilter__delete(struct strfilter *filter); #endif diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c index 32170590892..2553e5b55b8 100644 --- a/tools/perf/util/string.c +++ b/tools/perf/util/string.c @@ -128,7 +128,7 @@ void argv_free(char **argv) { char **p; for (p = argv; *p; p++) - free(*p); + zfree(p); free(argv); } @@ -314,6 +314,42 @@ int strtailcmp(const char *s1, const char *s2) } /** + * strxfrchar - Locate and replace character in @s + * @s: The string to be searched/changed. + * @from: Source character to be replaced. + * @to: Destination character. + * + * Return pointer to the changed string. + */ +char *strxfrchar(char *s, char from, char to) +{ + char *p = s; + + while ((p = strchr(p, from)) != NULL) + *p++ = to; + + return s; +} + +/** + * ltrim - Removes leading whitespace from @s. + * @s: The string to be stripped. + * + * Return pointer to the first non-whitespace character in @s. + */ +char *ltrim(char *s) +{ + int len = strlen(s); + + while (len && isspace(*s)) { + len--; + s++; + } + + return s; +} + +/** * rtrim - Removes trailing whitespace from @s. * @s: The string to be stripped. * @@ -351,3 +387,27 @@ void *memdup(const void *src, size_t len) return p; } + +/** + * str_append - reallocate string and append another + * @s: pointer to string pointer + * @len: pointer to len (initialized) + * @a: string to append. + */ +int str_append(char **s, int *len, const char *a) +{ + int olen = *s ? strlen(*s) : 0; + int nlen = olen + strlen(a) + 1; + if (*len < nlen) { + *len = *len * 2; + if (*len < nlen) + *len = nlen; + *s = realloc(*s, *len); + if (!*s) + return -ENOMEM; + if (olen == 0) + **s = 0; + } + strcat(*s, a); + return 0; +} diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c index 155d8b7078a..71f9d102b96 100644 --- a/tools/perf/util/strlist.c +++ b/tools/perf/util/strlist.c @@ -5,6 +5,7 @@ */ #include "strlist.h" +#include "util.h" #include <errno.h> #include <stdio.h> #include <stdlib.h> @@ -35,11 +36,11 @@ out_delete: return NULL; } -static void str_node__delete(struct str_node *self, bool dupstr) +static void str_node__delete(struct str_node *snode, bool dupstr) { if (dupstr) - free((void *)self->s); - free(self); + zfree((char **)&snode->s); + free(snode); } static @@ -59,12 +60,12 @@ static int strlist__node_cmp(struct rb_node *rb_node, const void *entry) return strcmp(snode->s, str); } -int strlist__add(struct strlist *self, const char *new_entry) +int strlist__add(struct strlist *slist, const char *new_entry) { - return rblist__add_node(&self->rblist, new_entry); + return rblist__add_node(&slist->rblist, new_entry); } -int strlist__load(struct strlist *self, const char *filename) +int strlist__load(struct strlist *slist, const char *filename) { char entry[1024]; int err; @@ -80,7 +81,7 @@ int strlist__load(struct strlist *self, const char *filename) continue; entry[len - 1] = '\0'; - err = strlist__add(self, entry); + err = strlist__add(slist, entry); if (err != 0) goto out; } @@ -107,56 +108,56 @@ struct str_node *strlist__find(struct strlist *slist, const char *entry) return snode; } -static int strlist__parse_list_entry(struct strlist *self, const char *s) +static int strlist__parse_list_entry(struct strlist *slist, const char *s) { if (strncmp(s, "file://", 7) == 0) - return strlist__load(self, s + 7); + return strlist__load(slist, s + 7); - return strlist__add(self, s); + return strlist__add(slist, s); } -int strlist__parse_list(struct strlist *self, const char *s) +int strlist__parse_list(struct strlist *slist, const char *s) { char *sep; int err; while ((sep = strchr(s, ',')) != NULL) { *sep = '\0'; - err = strlist__parse_list_entry(self, s); + err = strlist__parse_list_entry(slist, s); *sep = ','; if (err != 0) return err; s = sep + 1; } - return *s ? strlist__parse_list_entry(self, s) : 0; + return *s ? strlist__parse_list_entry(slist, s) : 0; } -struct strlist *strlist__new(bool dupstr, const char *slist) +struct strlist *strlist__new(bool dupstr, const char *list) { - struct strlist *self = malloc(sizeof(*self)); + struct strlist *slist = malloc(sizeof(*slist)); - if (self != NULL) { - rblist__init(&self->rblist); - self->rblist.node_cmp = strlist__node_cmp; - self->rblist.node_new = strlist__node_new; - self->rblist.node_delete = strlist__node_delete; + if (slist != NULL) { + rblist__init(&slist->rblist); + slist->rblist.node_cmp = strlist__node_cmp; + slist->rblist.node_new = strlist__node_new; + slist->rblist.node_delete = strlist__node_delete; - self->dupstr = dupstr; - if (slist && strlist__parse_list(self, slist) != 0) + slist->dupstr = dupstr; + if (list && strlist__parse_list(slist, list) != 0) goto out_error; } - return self; + return slist; out_error: - free(self); + free(slist); return NULL; } -void strlist__delete(struct strlist *self) +void strlist__delete(struct strlist *slist) { - if (self != NULL) - rblist__delete(&self->rblist); + if (slist != NULL) + rblist__delete(&slist->rblist); } struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx) diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h index dd9f922ec67..5c7f87069d9 100644 --- a/tools/perf/util/strlist.h +++ b/tools/perf/util/strlist.h @@ -17,34 +17,34 @@ struct strlist { }; struct strlist *strlist__new(bool dupstr, const char *slist); -void strlist__delete(struct strlist *self); +void strlist__delete(struct strlist *slist); -void strlist__remove(struct strlist *self, struct str_node *sn); -int strlist__load(struct strlist *self, const char *filename); -int strlist__add(struct strlist *self, const char *str); +void strlist__remove(struct strlist *slist, struct str_node *sn); +int strlist__load(struct strlist *slist, const char *filename); +int strlist__add(struct strlist *slist, const char *str); -struct str_node *strlist__entry(const struct strlist *self, unsigned int idx); -struct str_node *strlist__find(struct strlist *self, const char *entry); +struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx); +struct str_node *strlist__find(struct strlist *slist, const char *entry); -static inline bool strlist__has_entry(struct strlist *self, const char *entry) +static inline bool strlist__has_entry(struct strlist *slist, const char *entry) { - return strlist__find(self, entry) != NULL; + return strlist__find(slist, entry) != NULL; } -static inline bool strlist__empty(const struct strlist *self) +static inline bool strlist__empty(const struct strlist *slist) { - return rblist__empty(&self->rblist); + return rblist__empty(&slist->rblist); } -static inline unsigned int strlist__nr_entries(const struct strlist *self) +static inline unsigned int strlist__nr_entries(const struct strlist *slist) { - return rblist__nr_entries(&self->rblist); + return rblist__nr_entries(&slist->rblist); } /* For strlist iteration */ -static inline struct str_node *strlist__first(struct strlist *self) +static inline struct str_node *strlist__first(struct strlist *slist) { - struct rb_node *rn = rb_first(&self->rblist.entries); + struct rb_node *rn = rb_first(&slist->rblist.entries); return rn ? rb_entry(rn, struct str_node, rb_node) : NULL; } static inline struct str_node *strlist__next(struct str_node *sn) @@ -59,21 +59,21 @@ static inline struct str_node *strlist__next(struct str_node *sn) /** * strlist_for_each - iterate over a strlist * @pos: the &struct str_node to use as a loop cursor. - * @self: the &struct strlist for loop. + * @slist: the &struct strlist for loop. */ -#define strlist__for_each(pos, self) \ - for (pos = strlist__first(self); pos; pos = strlist__next(pos)) +#define strlist__for_each(pos, slist) \ + for (pos = strlist__first(slist); pos; pos = strlist__next(pos)) /** * strlist_for_each_safe - iterate over a strlist safe against removal of * str_node * @pos: the &struct str_node to use as a loop cursor. * @n: another &struct str_node to use as temporary storage. - * @self: the &struct strlist for loop. + * @slist: the &struct strlist for loop. */ -#define strlist__for_each_safe(pos, n, self) \ - for (pos = strlist__first(self), n = strlist__next(pos); pos;\ +#define strlist__for_each_safe(pos, n, slist) \ + for (pos = strlist__first(slist), n = strlist__next(pos); pos;\ pos = n, n = strlist__next(n)) -int strlist__parse_list(struct strlist *self, const char *s); +int strlist__parse_list(struct strlist *slist, const char *s); #endif /* __PERF_STRLIST_H */ diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c index 96c866045d6..6a0a13d07a2 100644 --- a/tools/perf/util/svghelper.c +++ b/tools/perf/util/svghelper.c @@ -17,8 +17,12 @@ #include <stdlib.h> #include <unistd.h> #include <string.h> +#include <linux/bitmap.h> +#include "perf.h" #include "svghelper.h" +#include "util.h" +#include "cpumap.h" static u64 first_time, last_time; static u64 turbo_frequency, max_freq; @@ -28,6 +32,8 @@ static u64 turbo_frequency, max_freq; #define SLOT_HEIGHT 25.0 int svg_page_width = 1000; +u64 svg_highlight; +const char *svg_highlight_name; #define MIN_TEXT_SIZE 0.01 @@ -39,9 +45,14 @@ static double cpu2slot(int cpu) return 2 * cpu + 1; } +static int *topology_map; + static double cpu2y(int cpu) { - return cpu2slot(cpu) * SLOT_MULT; + if (topology_map) + return cpu2slot(topology_map[cpu]) * SLOT_MULT; + else + return cpu2slot(cpu) * SLOT_MULT; } static double time2pixels(u64 __time) @@ -95,6 +106,7 @@ void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end) total_height = (1 + rows + cpu2slot(cpus)) * SLOT_MULT; fprintf(svgfile, "<?xml version=\"1.0\" standalone=\"no\"?> \n"); + fprintf(svgfile, "<!DOCTYPE svg SYSTEM \"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\">\n"); fprintf(svgfile, "<svg width=\"%i\" height=\"%" PRIu64 "\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n", svg_page_width, total_height); fprintf(svgfile, "<defs>\n <style type=\"text/css\">\n <![CDATA[\n"); @@ -103,6 +115,7 @@ void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end) fprintf(svgfile, " rect.process { fill:rgb(180,180,180); fill-opacity:0.9; stroke-width:1; stroke:rgb( 0, 0, 0); } \n"); fprintf(svgfile, " rect.process2 { fill:rgb(180,180,180); fill-opacity:0.9; stroke-width:0; stroke:rgb( 0, 0, 0); } \n"); fprintf(svgfile, " rect.sample { fill:rgb( 0, 0,255); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n"); + fprintf(svgfile, " rect.sample_hi{ fill:rgb(255,128, 0); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n"); fprintf(svgfile, " rect.blocked { fill:rgb(255, 0, 0); fill-opacity:0.5; stroke-width:0; stroke:rgb( 0, 0, 0); } \n"); fprintf(svgfile, " rect.waiting { fill:rgb(224,214, 0); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n"); fprintf(svgfile, " rect.WAITING { fill:rgb(255,214, 48); fill-opacity:0.6; stroke-width:0; stroke:rgb( 0, 0, 0); } \n"); @@ -128,14 +141,42 @@ void svg_box(int Yslot, u64 start, u64 end, const char *type) time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT, type); } -void svg_sample(int Yslot, int cpu, u64 start, u64 end) +static char *time_to_string(u64 duration); +void svg_blocked(int Yslot, int cpu, u64 start, u64 end, const char *backtrace) +{ + if (!svgfile) + return; + + fprintf(svgfile, "<g>\n"); + fprintf(svgfile, "<title>#%d blocked %s</title>\n", cpu, + time_to_string(end - start)); + if (backtrace) + fprintf(svgfile, "<desc>Blocked on:\n%s</desc>\n", backtrace); + svg_box(Yslot, start, end, "blocked"); + fprintf(svgfile, "</g>\n"); +} + +void svg_running(int Yslot, int cpu, u64 start, u64 end, const char *backtrace) { double text_size; + const char *type; + if (!svgfile) return; - fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"sample\"/>\n", - time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT); + if (svg_highlight && end - start > svg_highlight) + type = "sample_hi"; + else + type = "sample"; + fprintf(svgfile, "<g>\n"); + + fprintf(svgfile, "<title>#%d running %s</title>\n", + cpu, time_to_string(end - start)); + if (backtrace) + fprintf(svgfile, "<desc>Switched because:\n%s</desc>\n", backtrace); + fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"%s\"/>\n", + time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT, + type); text_size = (time2pixels(end)-time2pixels(start)); if (cpu > 9) @@ -148,6 +189,7 @@ void svg_sample(int Yslot, int cpu, u64 start, u64 end) fprintf(svgfile, "<text x=\"%1.8f\" y=\"%1.8f\" font-size=\"%1.8fpt\">%i</text>\n", time2pixels(start), Yslot * SLOT_MULT + SLOT_HEIGHT - 1, text_size, cpu + 1); + fprintf(svgfile, "</g>\n"); } static char *time_to_string(u64 duration) @@ -168,7 +210,7 @@ static char *time_to_string(u64 duration) return text; } -void svg_waiting(int Yslot, u64 start, u64 end) +void svg_waiting(int Yslot, int cpu, u64 start, u64 end, const char *backtrace) { char *text; const char *style; @@ -192,6 +234,9 @@ void svg_waiting(int Yslot, u64 start, u64 end) font_size = round_text_size(font_size); fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\">\n", time2pixels(start), Yslot * SLOT_MULT); + fprintf(svgfile, "<title>#%d waiting %s</title>\n", cpu, time_to_string(end - start)); + if (backtrace) + fprintf(svgfile, "<desc>Waiting on:\n%s</desc>\n", backtrace); fprintf(svgfile, "<rect x=\"0\" width=\"%4.8f\" y=\"0\" height=\"%4.1f\" class=\"%s\"/>\n", time2pixels(end)-time2pixels(start), SLOT_HEIGHT, style); if (font_size > MIN_TEXT_SIZE) @@ -242,28 +287,42 @@ void svg_cpu_box(int cpu, u64 __max_freq, u64 __turbo_freq) max_freq = __max_freq; turbo_frequency = __turbo_freq; + fprintf(svgfile, "<g>\n"); + fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"cpu\"/>\n", time2pixels(first_time), time2pixels(last_time)-time2pixels(first_time), cpu2y(cpu), SLOT_MULT+SLOT_HEIGHT); - sprintf(cpu_string, "CPU %i", (int)cpu+1); + sprintf(cpu_string, "CPU %i", (int)cpu); fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\">%s</text>\n", 10+time2pixels(first_time), cpu2y(cpu) + SLOT_HEIGHT/2, cpu_string); fprintf(svgfile, "<text transform=\"translate(%4.8f,%4.8f)\" font-size=\"1.25pt\">%s</text>\n", 10+time2pixels(first_time), cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - 4, cpu_model()); + + fprintf(svgfile, "</g>\n"); } -void svg_process(int cpu, u64 start, u64 end, const char *type, const char *name) +void svg_process(int cpu, u64 start, u64 end, int pid, const char *name, const char *backtrace) { double width; + const char *type; if (!svgfile) return; + if (svg_highlight && end - start >= svg_highlight) + type = "sample_hi"; + else if (svg_highlight_name && strstr(name, svg_highlight_name)) + type = "sample_hi"; + else + type = "sample"; fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\">\n", time2pixels(start), cpu2y(cpu)); + fprintf(svgfile, "<title>%d %s running %s</title>\n", pid, name, time_to_string(end - start)); + if (backtrace) + fprintf(svgfile, "<desc>Switched because:\n%s</desc>\n", backtrace); fprintf(svgfile, "<rect x=\"0\" width=\"%4.8f\" y=\"0\" height=\"%4.1f\" class=\"%s\"/>\n", time2pixels(end)-time2pixels(start), SLOT_MULT+SLOT_HEIGHT, type); width = time2pixels(end)-time2pixels(start); @@ -288,6 +347,8 @@ void svg_cstate(int cpu, u64 start, u64 end, int type) return; + fprintf(svgfile, "<g>\n"); + if (type > 6) type = 6; sprintf(style, "c%i", type); @@ -306,6 +367,8 @@ void svg_cstate(int cpu, u64 start, u64 end, int type) if (width > MIN_TEXT_SIZE) fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\" font-size=\"%3.8fpt\">C%i</text>\n", time2pixels(start), cpu2y(cpu)+width, width, type); + + fprintf(svgfile, "</g>\n"); } static char *HzToHuman(unsigned long hz) @@ -339,6 +402,8 @@ void svg_pstate(int cpu, u64 start, u64 end, u64 freq) if (!svgfile) return; + fprintf(svgfile, "<g>\n"); + if (max_freq) height = freq * 1.0 / max_freq * (SLOT_HEIGHT + SLOT_MULT); height = 1 + cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - height; @@ -347,10 +412,11 @@ void svg_pstate(int cpu, u64 start, u64 end, u64 freq) fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\" font-size=\"0.25pt\">%s</text>\n", time2pixels(start), height+0.9, HzToHuman(freq)); + fprintf(svgfile, "</g>\n"); } -void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2) +void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2, const char *backtrace) { double height; @@ -358,6 +424,15 @@ void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc return; + fprintf(svgfile, "<g>\n"); + + fprintf(svgfile, "<title>%s wakes up %s</title>\n", + desc1 ? desc1 : "?", + desc2 ? desc2 : "?"); + + if (backtrace) + fprintf(svgfile, "<desc>%s</desc>\n", backtrace); + if (row1 < row2) { if (row1) { fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n", @@ -395,9 +470,11 @@ void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc if (row1) fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(32,255,32)\"/>\n", time2pixels(start), height); + + fprintf(svgfile, "</g>\n"); } -void svg_wakeline(u64 start, int row1, int row2) +void svg_wakeline(u64 start, int row1, int row2, const char *backtrace) { double height; @@ -405,6 +482,11 @@ void svg_wakeline(u64 start, int row1, int row2) return; + fprintf(svgfile, "<g>\n"); + + if (backtrace) + fprintf(svgfile, "<desc>%s</desc>\n", backtrace); + if (row1 < row2) fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n", time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row2 * SLOT_MULT); @@ -417,17 +499,28 @@ void svg_wakeline(u64 start, int row1, int row2) height += SLOT_HEIGHT; fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(32,255,32)\"/>\n", time2pixels(start), height); + + fprintf(svgfile, "</g>\n"); } -void svg_interrupt(u64 start, int row) +void svg_interrupt(u64 start, int row, const char *backtrace) { if (!svgfile) return; + fprintf(svgfile, "<g>\n"); + + fprintf(svgfile, "<title>Wakeup from interrupt</title>\n"); + + if (backtrace) + fprintf(svgfile, "<desc>%s</desc>\n", backtrace); + fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(255,128,128)\"/>\n", time2pixels(start), row * SLOT_MULT); fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(255,128,128)\"/>\n", time2pixels(start), row * SLOT_MULT + SLOT_HEIGHT); + + fprintf(svgfile, "</g>\n"); } void svg_text(int Yslot, u64 start, const char *text) @@ -455,6 +548,7 @@ void svg_legenda(void) if (!svgfile) return; + fprintf(svgfile, "<g>\n"); svg_legenda_box(0, "Running", "sample"); svg_legenda_box(100, "Idle","c1"); svg_legenda_box(200, "Deeper Idle", "c3"); @@ -462,6 +556,7 @@ void svg_legenda(void) svg_legenda_box(550, "Sleeping", "process2"); svg_legenda_box(650, "Waiting for cpu", "waiting"); svg_legenda_box(800, "Blocked on IO", "blocked"); + fprintf(svgfile, "</g>\n"); } void svg_time_grid(void) @@ -499,3 +594,123 @@ void svg_close(void) svgfile = NULL; } } + +#define cpumask_bits(maskp) ((maskp)->bits) +typedef struct { DECLARE_BITMAP(bits, MAX_NR_CPUS); } cpumask_t; + +struct topology { + cpumask_t *sib_core; + int sib_core_nr; + cpumask_t *sib_thr; + int sib_thr_nr; +}; + +static void scan_thread_topology(int *map, struct topology *t, int cpu, int *pos) +{ + int i; + int thr; + + for (i = 0; i < t->sib_thr_nr; i++) { + if (!test_bit(cpu, cpumask_bits(&t->sib_thr[i]))) + continue; + + for_each_set_bit(thr, + cpumask_bits(&t->sib_thr[i]), + MAX_NR_CPUS) + if (map[thr] == -1) + map[thr] = (*pos)++; + } +} + +static void scan_core_topology(int *map, struct topology *t) +{ + int pos = 0; + int i; + int cpu; + + for (i = 0; i < t->sib_core_nr; i++) + for_each_set_bit(cpu, + cpumask_bits(&t->sib_core[i]), + MAX_NR_CPUS) + scan_thread_topology(map, t, cpu, &pos); +} + +static int str_to_bitmap(char *s, cpumask_t *b) +{ + int i; + int ret = 0; + struct cpu_map *m; + int c; + + m = cpu_map__new(s); + if (!m) + return -1; + + for (i = 0; i < m->nr; i++) { + c = m->map[i]; + if (c >= MAX_NR_CPUS) { + ret = -1; + break; + } + + set_bit(c, cpumask_bits(b)); + } + + cpu_map__delete(m); + + return ret; +} + +int svg_build_topology_map(char *sib_core, int sib_core_nr, + char *sib_thr, int sib_thr_nr) +{ + int i; + struct topology t; + + t.sib_core_nr = sib_core_nr; + t.sib_thr_nr = sib_thr_nr; + t.sib_core = calloc(sib_core_nr, sizeof(cpumask_t)); + t.sib_thr = calloc(sib_thr_nr, sizeof(cpumask_t)); + + if (!t.sib_core || !t.sib_thr) { + fprintf(stderr, "topology: no memory\n"); + goto exit; + } + + for (i = 0; i < sib_core_nr; i++) { + if (str_to_bitmap(sib_core, &t.sib_core[i])) { + fprintf(stderr, "topology: can't parse siblings map\n"); + goto exit; + } + + sib_core += strlen(sib_core) + 1; + } + + for (i = 0; i < sib_thr_nr; i++) { + if (str_to_bitmap(sib_thr, &t.sib_thr[i])) { + fprintf(stderr, "topology: can't parse siblings map\n"); + goto exit; + } + + sib_thr += strlen(sib_thr) + 1; + } + + topology_map = malloc(sizeof(int) * MAX_NR_CPUS); + if (!topology_map) { + fprintf(stderr, "topology: no memory\n"); + goto exit; + } + + for (i = 0; i < MAX_NR_CPUS; i++) + topology_map[i] = -1; + + scan_core_topology(topology_map, &t); + + return 0; + +exit: + zfree(&t.sib_core); + zfree(&t.sib_thr); + + return -1; +} diff --git a/tools/perf/util/svghelper.h b/tools/perf/util/svghelper.h index e0781989cc3..e3aff5332e3 100644 --- a/tools/perf/util/svghelper.h +++ b/tools/perf/util/svghelper.h @@ -1,28 +1,33 @@ #ifndef __PERF_SVGHELPER_H #define __PERF_SVGHELPER_H -#include "types.h" +#include <linux/types.h> extern void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end); extern void svg_box(int Yslot, u64 start, u64 end, const char *type); -extern void svg_sample(int Yslot, int cpu, u64 start, u64 end); -extern void svg_waiting(int Yslot, u64 start, u64 end); +extern void svg_blocked(int Yslot, int cpu, u64 start, u64 end, const char *backtrace); +extern void svg_running(int Yslot, int cpu, u64 start, u64 end, const char *backtrace); +extern void svg_waiting(int Yslot, int cpu, u64 start, u64 end, const char *backtrace); extern void svg_cpu_box(int cpu, u64 max_frequency, u64 turbo_frequency); -extern void svg_process(int cpu, u64 start, u64 end, const char *type, const char *name); +extern void svg_process(int cpu, u64 start, u64 end, int pid, const char *name, const char *backtrace); extern void svg_cstate(int cpu, u64 start, u64 end, int type); extern void svg_pstate(int cpu, u64 start, u64 end, u64 freq); extern void svg_time_grid(void); extern void svg_legenda(void); -extern void svg_wakeline(u64 start, int row1, int row2); -extern void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2); -extern void svg_interrupt(u64 start, int row); +extern void svg_wakeline(u64 start, int row1, int row2, const char *backtrace); +extern void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2, const char *backtrace); +extern void svg_interrupt(u64 start, int row, const char *backtrace); extern void svg_text(int Yslot, u64 start, const char *text); extern void svg_close(void); +extern int svg_build_topology_map(char *sib_core, int sib_core_nr, + char *sib_thr, int sib_thr_nr); extern int svg_page_width; +extern u64 svg_highlight; +extern const char *svg_highlight_name; #endif /* __PERF_SVGHELPER_H */ diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index db0cc92cf2e..6864661a79d 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -1,6 +1,3 @@ -#include <libelf.h> -#include <gelf.h> -#include <elf.h> #include <fcntl.h> #include <stdio.h> #include <errno.h> @@ -9,8 +6,26 @@ #include <inttypes.h> #include "symbol.h" +#include "vdso.h" +#include <symbol/kallsyms.h> #include "debug.h" +#ifndef HAVE_ELF_GETPHDRNUM_SUPPORT +static int elf_getphdrnum(Elf *elf, size_t *dst) +{ + GElf_Ehdr gehdr; + GElf_Ehdr *ehdr; + + ehdr = gelf_getehdr(elf, &gehdr); + if (!ehdr) + return -1; + + *dst = ehdr->e_phnum; + + return 0; +} +#endif + #ifndef NT_GNU_BUILD_ID #define NT_GNU_BUILD_ID 3 #endif @@ -122,9 +137,8 @@ static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) return -1; } -static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, - GElf_Shdr *shp, const char *name, - size_t *idx) +Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, + GElf_Shdr *shp, const char *name, size_t *idx) { Elf_Scn *sec = NULL; size_t cnt = 1; @@ -138,15 +152,15 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, gelf_getshdr(sec, shp); str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); - if (!strcmp(name, str)) { + if (str && !strcmp(name, str)) { if (idx) *idx = cnt; - break; + return sec; } ++cnt; } - return sec; + return NULL; } #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \ @@ -474,27 +488,29 @@ int filename__read_debuglink(const char *filename, char *debuglink, ek = elf_kind(elf); if (ek != ELF_K_ELF) - goto out_close; + goto out_elf_end; if (gelf_getehdr(elf, &ehdr) == NULL) { pr_err("%s: cannot get elf header.\n", __func__); - goto out_close; + goto out_elf_end; } sec = elf_section_by_name(elf, &ehdr, &shdr, ".gnu_debuglink", NULL); if (sec == NULL) - goto out_close; + goto out_elf_end; data = elf_getdata(sec, NULL); if (data == NULL) - goto out_close; + goto out_elf_end; /* the start of this section is a zero-terminated string */ strncpy(debuglink, data->d_buf, size); - elf_end(elf); + err = 0; +out_elf_end: + elf_end(elf); out_close: close(fd); out: @@ -540,7 +556,7 @@ bool symsrc__has_symtab(struct symsrc *ss) void symsrc__destroy(struct symsrc *ss) { - free(ss->name); + zfree(&ss->name); elf_end(ss->elf); close(ss->fd); } @@ -602,11 +618,14 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, if (dso->kernel == DSO_TYPE_USER) { GElf_Shdr shdr; ss->adjust_symbols = (ehdr.e_type == ET_EXEC || + ehdr.e_type == ET_REL || + is_vdso_map(dso->short_name) || elf_section_by_name(elf, &ehdr, &shdr, ".gnu.prelink_undo", NULL) != NULL); } else { - ss->adjust_symbols = 0; + ss->adjust_symbols = ehdr.e_type == ET_EXEC || + ehdr.e_type == ET_REL; } ss->name = strdup(name); @@ -627,6 +646,37 @@ out_close: return err; } +/** + * ref_reloc_sym_not_found - has kernel relocation symbol been found. + * @kmap: kernel maps and relocation reference symbol + * + * This function returns %true if we are dealing with the kernel maps and the + * relocation reference symbol has not yet been found. Otherwise %false is + * returned. + */ +static bool ref_reloc_sym_not_found(struct kmap *kmap) +{ + return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name && + !kmap->ref_reloc_sym->unrelocated_addr; +} + +/** + * ref_reloc - kernel relocation offset. + * @kmap: kernel maps and relocation reference symbol + * + * This function returns the offset of kernel addresses as determined by using + * the relocation reference symbol i.e. if the kernel has not been relocated + * then the return value is zero. + */ +static u64 ref_reloc(struct kmap *kmap) +{ + if (kmap && kmap->ref_reloc_sym && + kmap->ref_reloc_sym->unrelocated_addr) + return kmap->ref_reloc_sym->addr - + kmap->ref_reloc_sym->unrelocated_addr; + return 0; +} + int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, struct symsrc *runtime_ss, symbol_filter_t filter, int kmodule) @@ -645,8 +695,17 @@ int dso__load_sym(struct dso *dso, struct map *map, Elf_Scn *sec, *sec_strndx; Elf *elf; int nr = 0; + bool remap_kernel = false, adjust_kernel_syms = false; dso->symtab_type = syms_ss->type; + dso->rel = syms_ss->ehdr.e_type == ET_REL; + + /* + * Modules may already have symbols from kallsyms, but those symbols + * have the wrong values for the dso maps, so remove them. + */ + if (kmodule && syms_ss->symtab) + symbols__delete(&dso->symbols[map->type]); if (!syms_ss->symtab) { syms_ss->symtab = syms_ss->dynsym; @@ -684,7 +743,33 @@ int dso__load_sym(struct dso *dso, struct map *map, nr_syms = shdr.sh_size / shdr.sh_entsize; memset(&sym, 0, sizeof(sym)); - dso->adjust_symbols = runtime_ss->adjust_symbols; + + /* + * The kernel relocation symbol is needed in advance in order to adjust + * kernel maps correctly. + */ + if (ref_reloc_sym_not_found(kmap)) { + elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { + const char *elf_name = elf_sym__name(&sym, symstrs); + + if (strcmp(elf_name, kmap->ref_reloc_sym->name)) + continue; + kmap->ref_reloc_sym->unrelocated_addr = sym.st_value; + map->reloc = kmap->ref_reloc_sym->addr - + kmap->ref_reloc_sym->unrelocated_addr; + break; + } + } + + dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap); + /* + * Initial kernel and module mappings do not map to the dso. For + * function mappings, flag the fixups. + */ + if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) { + remap_kernel = true; + adjust_kernel_syms = dso->adjust_symbols; + } elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { struct symbol *f; const char *elf_name = elf_sym__name(&sym, symstrs); @@ -693,10 +778,6 @@ int dso__load_sym(struct dso *dso, struct map *map, const char *section_name; bool used_opd = false; - if (kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name && - strcmp(elf_name, kmap->ref_reloc_sym->name) == 0) - kmap->ref_reloc_sym->unrelocated_addr = sym.st_value; - if (!is_label && !elf_sym__is_a(&sym, map->type)) continue; @@ -718,6 +799,17 @@ int dso__load_sym(struct dso *dso, struct map *map, sym.st_value); used_opd = true; } + /* + * When loading symbols in a data mapping, ABS symbols (which + * has a value of SHN_ABS in its st_shndx) failed at + * elf_getscn(). And it marks the loading as a failure so + * already loaded symbols cannot be fixed up. + * + * I'm not sure what should be done. Just ignore them for now. + * - Namhyung Kim + */ + if (sym.st_shndx == SHN_ABS) + continue; sec = elf_getscn(runtime_ss->elf, sym.st_shndx); if (!sec) @@ -737,20 +829,55 @@ int dso__load_sym(struct dso *dso, struct map *map, (sym.st_value & 1)) --sym.st_value; - if (dso->kernel != DSO_TYPE_USER || kmodule) { + if (dso->kernel || kmodule) { char dso_name[PATH_MAX]; + /* Adjust symbol to map to file offset */ + if (adjust_kernel_syms) + sym.st_value -= shdr.sh_addr - shdr.sh_offset; + if (strcmp(section_name, (curr_dso->short_name + dso->short_name_len)) == 0) goto new_symbol; if (strcmp(section_name, ".text") == 0) { + /* + * The initial kernel mapping is based on + * kallsyms and identity maps. Overwrite it to + * map to the kernel dso. + */ + if (remap_kernel && dso->kernel) { + remap_kernel = false; + map->start = shdr.sh_addr + + ref_reloc(kmap); + map->end = map->start + shdr.sh_size; + map->pgoff = shdr.sh_offset; + map->map_ip = map__map_ip; + map->unmap_ip = map__unmap_ip; + /* Ensure maps are correctly ordered */ + map_groups__remove(kmap->kmaps, map); + map_groups__insert(kmap->kmaps, map); + } + + /* + * The initial module mapping is based on + * /proc/modules mapped to offset zero. + * Overwrite it to map to the module dso. + */ + if (remap_kernel && kmodule) { + remap_kernel = false; + map->pgoff = shdr.sh_offset; + } + curr_map = map; curr_dso = dso; goto new_symbol; } + if (!kmap) + goto new_symbol; + snprintf(dso_name, sizeof(dso_name), "%s%s", dso->short_name, section_name); @@ -773,8 +900,16 @@ int dso__load_sym(struct dso *dso, struct map *map, dso__delete(curr_dso); goto out_elf_end; } - curr_map->map_ip = identity__map_ip; - curr_map->unmap_ip = identity__map_ip; + if (adjust_kernel_syms) { + curr_map->start = shdr.sh_addr + + ref_reloc(kmap); + curr_map->end = curr_map->start + + shdr.sh_size; + curr_map->pgoff = shdr.sh_offset; + } else { + curr_map->map_ip = identity__map_ip; + curr_map->unmap_ip = identity__map_ip; + } curr_dso->symtab_type = dso->symtab_type; map_groups__insert(kmap->kmaps, curr_map); dsos__add(&dso->node, curr_dso); @@ -793,15 +928,18 @@ int dso__load_sym(struct dso *dso, struct map *map, (u64)shdr.sh_offset); sym.st_value -= shdr.sh_addr - shdr.sh_offset; } +new_symbol: /* * We need to figure out if the object was created from C++ sources * DWARF DW_compile_unit has this, but we don't always have access * to it... */ - demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI); - if (demangled != NULL) - elf_name = demangled; -new_symbol: + if (symbol_conf.demangle) { + demangled = bfd_demangle(NULL, elf_name, + DMGL_PARAMS | DMGL_ANSI); + if (demangled != NULL) + elf_name = demangled; + } f = symbol__new(sym.st_value, sym.st_size, GELF_ST_BIND(sym.st_info), elf_name); free(demangled); @@ -835,6 +973,652 @@ out_elf_end: return err; } +static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data) +{ + GElf_Phdr phdr; + size_t i, phdrnum; + int err; + u64 sz; + + if (elf_getphdrnum(elf, &phdrnum)) + return -1; + + for (i = 0; i < phdrnum; i++) { + if (gelf_getphdr(elf, i, &phdr) == NULL) + return -1; + if (phdr.p_type != PT_LOAD) + continue; + if (exe) { + if (!(phdr.p_flags & PF_X)) + continue; + } else { + if (!(phdr.p_flags & PF_R)) + continue; + } + sz = min(phdr.p_memsz, phdr.p_filesz); + if (!sz) + continue; + err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data); + if (err) + return err; + } + return 0; +} + +int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data, + bool *is_64_bit) +{ + int err; + Elf *elf; + + elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); + if (elf == NULL) + return -1; + + if (is_64_bit) + *is_64_bit = (gelf_getclass(elf) == ELFCLASS64); + + err = elf_read_maps(elf, exe, mapfn, data); + + elf_end(elf); + return err; +} + +static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len) +{ + ssize_t r; + size_t n; + int err = -1; + char *buf = malloc(page_size); + + if (buf == NULL) + return -1; + + if (lseek(to, to_offs, SEEK_SET) != to_offs) + goto out; + + if (lseek(from, from_offs, SEEK_SET) != from_offs) + goto out; + + while (len) { + n = page_size; + if (len < n) + n = len; + /* Use read because mmap won't work on proc files */ + r = read(from, buf, n); + if (r < 0) + goto out; + if (!r) + break; + n = r; + r = write(to, buf, n); + if (r < 0) + goto out; + if ((size_t)r != n) + goto out; + len -= n; + } + + err = 0; +out: + free(buf); + return err; +} + +struct kcore { + int fd; + int elfclass; + Elf *elf; + GElf_Ehdr ehdr; +}; + +static int kcore__open(struct kcore *kcore, const char *filename) +{ + GElf_Ehdr *ehdr; + + kcore->fd = open(filename, O_RDONLY); + if (kcore->fd == -1) + return -1; + + kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL); + if (!kcore->elf) + goto out_close; + + kcore->elfclass = gelf_getclass(kcore->elf); + if (kcore->elfclass == ELFCLASSNONE) + goto out_end; + + ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); + if (!ehdr) + goto out_end; + + return 0; + +out_end: + elf_end(kcore->elf); +out_close: + close(kcore->fd); + return -1; +} + +static int kcore__init(struct kcore *kcore, char *filename, int elfclass, + bool temp) +{ + GElf_Ehdr *ehdr; + + kcore->elfclass = elfclass; + + if (temp) + kcore->fd = mkstemp(filename); + else + kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400); + if (kcore->fd == -1) + return -1; + + kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL); + if (!kcore->elf) + goto out_close; + + if (!gelf_newehdr(kcore->elf, elfclass)) + goto out_end; + + ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); + if (!ehdr) + goto out_end; + + return 0; + +out_end: + elf_end(kcore->elf); +out_close: + close(kcore->fd); + unlink(filename); + return -1; +} + +static void kcore__close(struct kcore *kcore) +{ + elf_end(kcore->elf); + close(kcore->fd); +} + +static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count) +{ + GElf_Ehdr *ehdr = &to->ehdr; + GElf_Ehdr *kehdr = &from->ehdr; + + memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT); + ehdr->e_type = kehdr->e_type; + ehdr->e_machine = kehdr->e_machine; + ehdr->e_version = kehdr->e_version; + ehdr->e_entry = 0; + ehdr->e_shoff = 0; + ehdr->e_flags = kehdr->e_flags; + ehdr->e_phnum = count; + ehdr->e_shentsize = 0; + ehdr->e_shnum = 0; + ehdr->e_shstrndx = 0; + + if (from->elfclass == ELFCLASS32) { + ehdr->e_phoff = sizeof(Elf32_Ehdr); + ehdr->e_ehsize = sizeof(Elf32_Ehdr); + ehdr->e_phentsize = sizeof(Elf32_Phdr); + } else { + ehdr->e_phoff = sizeof(Elf64_Ehdr); + ehdr->e_ehsize = sizeof(Elf64_Ehdr); + ehdr->e_phentsize = sizeof(Elf64_Phdr); + } + + if (!gelf_update_ehdr(to->elf, ehdr)) + return -1; + + if (!gelf_newphdr(to->elf, count)) + return -1; + + return 0; +} + +static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset, + u64 addr, u64 len) +{ + GElf_Phdr gphdr; + GElf_Phdr *phdr; + + phdr = gelf_getphdr(kcore->elf, idx, &gphdr); + if (!phdr) + return -1; + + phdr->p_type = PT_LOAD; + phdr->p_flags = PF_R | PF_W | PF_X; + phdr->p_offset = offset; + phdr->p_vaddr = addr; + phdr->p_paddr = 0; + phdr->p_filesz = len; + phdr->p_memsz = len; + phdr->p_align = page_size; + + if (!gelf_update_phdr(kcore->elf, idx, phdr)) + return -1; + + return 0; +} + +static off_t kcore__write(struct kcore *kcore) +{ + return elf_update(kcore->elf, ELF_C_WRITE); +} + +struct phdr_data { + off_t offset; + u64 addr; + u64 len; +}; + +struct kcore_copy_info { + u64 stext; + u64 etext; + u64 first_symbol; + u64 last_symbol; + u64 first_module; + u64 last_module_symbol; + struct phdr_data kernel_map; + struct phdr_data modules_map; +}; + +static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, + u64 start) +{ + struct kcore_copy_info *kci = arg; + + if (!symbol_type__is_a(type, MAP__FUNCTION)) + return 0; + + if (strchr(name, '[')) { + if (start > kci->last_module_symbol) + kci->last_module_symbol = start; + return 0; + } + + if (!kci->first_symbol || start < kci->first_symbol) + kci->first_symbol = start; + + if (!kci->last_symbol || start > kci->last_symbol) + kci->last_symbol = start; + + if (!strcmp(name, "_stext")) { + kci->stext = start; + return 0; + } + + if (!strcmp(name, "_etext")) { + kci->etext = start; + return 0; + } + + return 0; +} + +static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci, + const char *dir) +{ + char kallsyms_filename[PATH_MAX]; + + scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir); + + if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms")) + return -1; + + if (kallsyms__parse(kallsyms_filename, kci, + kcore_copy__process_kallsyms) < 0) + return -1; + + return 0; +} + +static int kcore_copy__process_modules(void *arg, + const char *name __maybe_unused, + u64 start) +{ + struct kcore_copy_info *kci = arg; + + if (!kci->first_module || start < kci->first_module) + kci->first_module = start; + + return 0; +} + +static int kcore_copy__parse_modules(struct kcore_copy_info *kci, + const char *dir) +{ + char modules_filename[PATH_MAX]; + + scnprintf(modules_filename, PATH_MAX, "%s/modules", dir); + + if (symbol__restricted_filename(modules_filename, "/proc/modules")) + return -1; + + if (modules__parse(modules_filename, kci, + kcore_copy__process_modules) < 0) + return -1; + + return 0; +} + +static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff, + u64 s, u64 e) +{ + if (p->addr || s < start || s >= end) + return; + + p->addr = s; + p->offset = (s - start) + pgoff; + p->len = e < end ? e - s : end - s; +} + +static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data) +{ + struct kcore_copy_info *kci = data; + u64 end = start + len; + + kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext, + kci->etext); + + kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module, + kci->last_module_symbol); + + return 0; +} + +static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf) +{ + if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0) + return -1; + + return 0; +} + +static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir, + Elf *elf) +{ + if (kcore_copy__parse_kallsyms(kci, dir)) + return -1; + + if (kcore_copy__parse_modules(kci, dir)) + return -1; + + if (kci->stext) + kci->stext = round_down(kci->stext, page_size); + else + kci->stext = round_down(kci->first_symbol, page_size); + + if (kci->etext) { + kci->etext = round_up(kci->etext, page_size); + } else if (kci->last_symbol) { + kci->etext = round_up(kci->last_symbol, page_size); + kci->etext += page_size; + } + + kci->first_module = round_down(kci->first_module, page_size); + + if (kci->last_module_symbol) { + kci->last_module_symbol = round_up(kci->last_module_symbol, + page_size); + kci->last_module_symbol += page_size; + } + + if (!kci->stext || !kci->etext) + return -1; + + if (kci->first_module && !kci->last_module_symbol) + return -1; + + return kcore_copy__read_maps(kci, elf); +} + +static int kcore_copy__copy_file(const char *from_dir, const char *to_dir, + const char *name) +{ + char from_filename[PATH_MAX]; + char to_filename[PATH_MAX]; + + scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name); + scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name); + + return copyfile_mode(from_filename, to_filename, 0400); +} + +static int kcore_copy__unlink(const char *dir, const char *name) +{ + char filename[PATH_MAX]; + + scnprintf(filename, PATH_MAX, "%s/%s", dir, name); + + return unlink(filename); +} + +static int kcore_copy__compare_fds(int from, int to) +{ + char *buf_from; + char *buf_to; + ssize_t ret; + size_t len; + int err = -1; + + buf_from = malloc(page_size); + buf_to = malloc(page_size); + if (!buf_from || !buf_to) + goto out; + + while (1) { + /* Use read because mmap won't work on proc files */ + ret = read(from, buf_from, page_size); + if (ret < 0) + goto out; + + if (!ret) + break; + + len = ret; + + if (readn(to, buf_to, len) != (int)len) + goto out; + + if (memcmp(buf_from, buf_to, len)) + goto out; + } + + err = 0; +out: + free(buf_to); + free(buf_from); + return err; +} + +static int kcore_copy__compare_files(const char *from_filename, + const char *to_filename) +{ + int from, to, err = -1; + + from = open(from_filename, O_RDONLY); + if (from < 0) + return -1; + + to = open(to_filename, O_RDONLY); + if (to < 0) + goto out_close_from; + + err = kcore_copy__compare_fds(from, to); + + close(to); +out_close_from: + close(from); + return err; +} + +static int kcore_copy__compare_file(const char *from_dir, const char *to_dir, + const char *name) +{ + char from_filename[PATH_MAX]; + char to_filename[PATH_MAX]; + + scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name); + scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name); + + return kcore_copy__compare_files(from_filename, to_filename); +} + +/** + * kcore_copy - copy kallsyms, modules and kcore from one directory to another. + * @from_dir: from directory + * @to_dir: to directory + * + * This function copies kallsyms, modules and kcore files from one directory to + * another. kallsyms and modules are copied entirely. Only code segments are + * copied from kcore. It is assumed that two segments suffice: one for the + * kernel proper and one for all the modules. The code segments are determined + * from kallsyms and modules files. The kernel map starts at _stext or the + * lowest function symbol, and ends at _etext or the highest function symbol. + * The module map starts at the lowest module address and ends at the highest + * module symbol. Start addresses are rounded down to the nearest page. End + * addresses are rounded up to the nearest page. An extra page is added to the + * highest kernel symbol and highest module symbol to, hopefully, encompass that + * symbol too. Because it contains only code sections, the resulting kcore is + * unusual. One significant peculiarity is that the mapping (start -> pgoff) + * is not the same for the kernel map and the modules map. That happens because + * the data is copied adjacently whereas the original kcore has gaps. Finally, + * kallsyms and modules files are compared with their copies to check that + * modules have not been loaded or unloaded while the copies were taking place. + * + * Return: %0 on success, %-1 on failure. + */ +int kcore_copy(const char *from_dir, const char *to_dir) +{ + struct kcore kcore; + struct kcore extract; + size_t count = 2; + int idx = 0, err = -1; + off_t offset = page_size, sz, modules_offset = 0; + struct kcore_copy_info kci = { .stext = 0, }; + char kcore_filename[PATH_MAX]; + char extract_filename[PATH_MAX]; + + if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms")) + return -1; + + if (kcore_copy__copy_file(from_dir, to_dir, "modules")) + goto out_unlink_kallsyms; + + scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir); + scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir); + + if (kcore__open(&kcore, kcore_filename)) + goto out_unlink_modules; + + if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf)) + goto out_kcore_close; + + if (kcore__init(&extract, extract_filename, kcore.elfclass, false)) + goto out_kcore_close; + + if (!kci.modules_map.addr) + count -= 1; + + if (kcore__copy_hdr(&kcore, &extract, count)) + goto out_extract_close; + + if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr, + kci.kernel_map.len)) + goto out_extract_close; + + if (kci.modules_map.addr) { + modules_offset = offset + kci.kernel_map.len; + if (kcore__add_phdr(&extract, idx, modules_offset, + kci.modules_map.addr, kci.modules_map.len)) + goto out_extract_close; + } + + sz = kcore__write(&extract); + if (sz < 0 || sz > offset) + goto out_extract_close; + + if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset, + kci.kernel_map.len)) + goto out_extract_close; + + if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset, + extract.fd, modules_offset, + kci.modules_map.len)) + goto out_extract_close; + + if (kcore_copy__compare_file(from_dir, to_dir, "modules")) + goto out_extract_close; + + if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms")) + goto out_extract_close; + + err = 0; + +out_extract_close: + kcore__close(&extract); + if (err) + unlink(extract_filename); +out_kcore_close: + kcore__close(&kcore); +out_unlink_modules: + if (err) + kcore_copy__unlink(to_dir, "modules"); +out_unlink_kallsyms: + if (err) + kcore_copy__unlink(to_dir, "kallsyms"); + + return err; +} + +int kcore_extract__create(struct kcore_extract *kce) +{ + struct kcore kcore; + struct kcore extract; + size_t count = 1; + int idx = 0, err = -1; + off_t offset = page_size, sz; + + if (kcore__open(&kcore, kce->kcore_filename)) + return -1; + + strcpy(kce->extract_filename, PERF_KCORE_EXTRACT); + if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true)) + goto out_kcore_close; + + if (kcore__copy_hdr(&kcore, &extract, count)) + goto out_extract_close; + + if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len)) + goto out_extract_close; + + sz = kcore__write(&extract); + if (sz < 0 || sz > offset) + goto out_extract_close; + + if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len)) + goto out_extract_close; + + err = 0; + +out_extract_close: + kcore__close(&extract); + if (err) + unlink(kce->extract_filename); +out_kcore_close: + kcore__close(&kcore); + + return err; +} + +void kcore_extract__delete(struct kcore_extract *kce) +{ + unlink(kce->extract_filename); +} + void symbol__elf_init(void) { elf_version(EV_CURRENT); diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c index 259f8f2ea9c..bd15f490d04 100644 --- a/tools/perf/util/symbol-minimal.c +++ b/tools/perf/util/symbol-minimal.c @@ -1,6 +1,6 @@ #include "symbol.h" +#include "util.h" -#include <elf.h> #include <stdio.h> #include <fcntl.h> #include <string.h> @@ -254,6 +254,7 @@ int symsrc__init(struct symsrc *ss, struct dso *dso __maybe_unused, if (!ss->name) goto out_close; + ss->fd = fd; ss->type = type; return 0; @@ -275,7 +276,7 @@ bool symsrc__has_symtab(struct symsrc *ss __maybe_unused) void symsrc__destroy(struct symsrc *ss) { - free(ss->name); + zfree(&ss->name); close(ss->fd); } @@ -302,6 +303,28 @@ int dso__load_sym(struct dso *dso, struct map *map __maybe_unused, return 0; } +int file__read_maps(int fd __maybe_unused, bool exe __maybe_unused, + mapfn_t mapfn __maybe_unused, void *data __maybe_unused, + bool *is_64_bit __maybe_unused) +{ + return -1; +} + +int kcore_extract__create(struct kcore_extract *kce __maybe_unused) +{ + return -1; +} + +void kcore_extract__delete(struct kcore_extract *kce __maybe_unused) +{ +} + +int kcore_copy(const char *from_dir __maybe_unused, + const char *to_dir __maybe_unused) +{ + return -1; +} + void symbol__elf_init(void) { } diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index e2e8c697cff..7b9096f29cd 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -12,31 +12,29 @@ #include "build-id.h" #include "util.h" #include "debug.h" +#include "machine.h" #include "symbol.h" #include "strlist.h" #include <elf.h> #include <limits.h> +#include <symbol/kallsyms.h> #include <sys/utsname.h> -#ifndef KSYM_NAME_LEN -#define KSYM_NAME_LEN 256 -#endif - -static void dso_cache__free(struct rb_root *root); static int dso__load_kernel_sym(struct dso *dso, struct map *map, symbol_filter_t filter); static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, symbol_filter_t filter); -static int vmlinux_path__nr_entries; -static char **vmlinux_path; +int vmlinux_path__nr_entries; +char **vmlinux_path; struct symbol_conf symbol_conf = { - .exclude_other = true, - .use_modules = true, - .try_vmlinux_path = true, - .annotate_src = true, - .symfs = "", + .use_modules = true, + .try_vmlinux_path = true, + .annotate_src = true, + .demangle = true, + .cumulate_callchain = true, + .symfs = "", }; static enum dso_binary_type binary_type_symtab[] = { @@ -51,44 +49,12 @@ static enum dso_binary_type binary_type_symtab[] = { DSO_BINARY_TYPE__SYSTEM_PATH_DSO, DSO_BINARY_TYPE__GUEST_KMODULE, DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, + DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, DSO_BINARY_TYPE__NOT_FOUND, }; #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab) -static enum dso_binary_type binary_type_data[] = { - DSO_BINARY_TYPE__BUILD_ID_CACHE, - DSO_BINARY_TYPE__SYSTEM_PATH_DSO, - DSO_BINARY_TYPE__NOT_FOUND, -}; - -#define DSO_BINARY_TYPE__DATA_CNT ARRAY_SIZE(binary_type_data) - -int dso__name_len(const struct dso *dso) -{ - if (!dso) - return strlen("[unknown]"); - if (verbose) - return dso->long_name_len; - - return dso->short_name_len; -} - -bool dso__loaded(const struct dso *dso, enum map_type type) -{ - return dso->loaded & (1 << type); -} - -bool dso__sorted_by_name(const struct dso *dso, enum map_type type) -{ - return dso->sorted_by_name & (1 << type); -} - -static void dso__set_sorted_by_name(struct dso *dso, enum map_type type) -{ - dso->sorted_by_name |= (1 << type); -} - bool symbol_type__is_a(char symbol_type, enum map_type map_type) { symbol_type = toupper(symbol_type); @@ -120,6 +86,7 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb) { s64 a; s64 b; + size_t na, nb; /* Prefer a symbol with non zero length */ a = syma->end - syma->start; @@ -153,11 +120,21 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb) else if (a > b) return SYMBOL_B; - /* If all else fails, choose the symbol with the longest name */ - if (strlen(syma->name) >= strlen(symb->name)) + /* Choose the symbol with the longest name */ + na = strlen(syma->name); + nb = strlen(symb->name); + if (na > nb) return SYMBOL_A; - else + else if (na < nb) + return SYMBOL_B; + + /* Avoid "SyS" kernel syscall aliases */ + if (na >= 3 && !strncmp(syma->name, "SyS", 3)) return SYMBOL_B; + if (na >= 10 && !strncmp(syma->name, "compat_SyS", 10)) + return SYMBOL_B; + + return SYMBOL_A; } void symbols__fixup_duplicate(struct rb_root *symbols) @@ -181,10 +158,12 @@ again: if (choose_best_symbol(curr, next) == SYMBOL_A) { rb_erase(&next->rb_node, symbols); + symbol__delete(next); goto again; } else { nd = rb_next(&curr->rb_node); rb_erase(&curr->rb_node, symbols); + symbol__delete(curr); } } } @@ -235,13 +214,6 @@ void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) curr->end = ~0ULL; } -static void map_groups__fixup_end(struct map_groups *mg) -{ - int i; - for (i = 0; i < MAP__NR_TYPES; ++i) - __map_groups__fixup_end(mg, i); -} - struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) { size_t namelen = strlen(name) + 1; @@ -270,7 +242,7 @@ void symbol__delete(struct symbol *sym) free(((void *)sym) - symbol_conf.priv_size); } -static size_t symbol__fprintf(struct symbol *sym, FILE *fp) +size_t symbol__fprintf(struct symbol *sym, FILE *fp) { return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n", sym->start, sym->end, @@ -288,7 +260,10 @@ size_t symbol__fprintf_symname_offs(const struct symbol *sym, if (sym && sym->name) { length = fprintf(fp, "%s", sym->name); if (al) { - offset = al->addr - sym->start; + if (al->addr < sym->end) + offset = al->addr - sym->start; + else + offset = al->addr - al->map->start - sym->start; length += fprintf(fp, "+0x%lx", offset); } return length; @@ -301,53 +276,7 @@ size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp) return symbol__fprintf_symname_offs(sym, NULL, fp); } -void dso__set_long_name(struct dso *dso, char *name) -{ - if (name == NULL) - return; - dso->long_name = name; - dso->long_name_len = strlen(name); -} - -static void dso__set_short_name(struct dso *dso, const char *name) -{ - if (name == NULL) - return; - dso->short_name = name; - dso->short_name_len = strlen(name); -} - -static void dso__set_basename(struct dso *dso) -{ - dso__set_short_name(dso, basename(dso->long_name)); -} - -struct dso *dso__new(const char *name) -{ - struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1); - - if (dso != NULL) { - int i; - strcpy(dso->name, name); - dso__set_long_name(dso, dso->name); - dso__set_short_name(dso, dso->name); - for (i = 0; i < MAP__NR_TYPES; ++i) - dso->symbols[i] = dso->symbol_names[i] = RB_ROOT; - dso->cache = RB_ROOT; - dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; - dso->data_type = DSO_BINARY_TYPE__NOT_FOUND; - dso->loaded = 0; - dso->sorted_by_name = 0; - dso->has_build_id = 0; - dso->kernel = DSO_TYPE_USER; - dso->needs_swap = DSO_SWAP__UNSET; - INIT_LIST_HEAD(&dso->node); - } - - return dso; -} - -static void symbols__delete(struct rb_root *symbols) +void symbols__delete(struct rb_root *symbols) { struct symbol *pos; struct rb_node *next = rb_first(symbols); @@ -360,25 +289,6 @@ static void symbols__delete(struct rb_root *symbols) } } -void dso__delete(struct dso *dso) -{ - int i; - for (i = 0; i < MAP__NR_TYPES; ++i) - symbols__delete(&dso->symbols[i]); - if (dso->sname_alloc) - free((char *)dso->short_name); - if (dso->lname_alloc) - free(dso->long_name); - dso_cache__free(&dso->cache); - free(dso); -} - -void dso__set_build_id(struct dso *dso, void *build_id) -{ - memcpy(dso->build_id, build_id, sizeof(dso->build_id)); - dso->has_build_id = 1; -} - void symbols__insert(struct rb_root *symbols, struct symbol *sym) { struct rb_node **p = &symbols->rb_node; @@ -421,6 +331,16 @@ static struct symbol *symbols__find(struct rb_root *symbols, u64 ip) return NULL; } +static struct symbol *symbols__first(struct rb_root *symbols) +{ + struct rb_node *n = rb_first(symbols); + + if (n) + return rb_entry(n, struct symbol, rb_node); + + return NULL; +} + struct symbol_name_rb_node { struct rb_node rb_node; struct symbol sym; @@ -491,6 +411,11 @@ struct symbol *dso__find_symbol(struct dso *dso, return symbols__find(&dso->symbols[type], addr); } +static struct symbol *dso__first_symbol(struct dso *dso, enum map_type type) +{ + return symbols__first(&dso->symbols[type]); +} + struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, const char *name) { @@ -504,29 +429,6 @@ void dso__sort_by_name(struct dso *dso, enum map_type type) &dso->symbols[type]); } -int build_id__sprintf(const u8 *build_id, int len, char *bf) -{ - char *bid = bf; - const u8 *raw = build_id; - int i; - - for (i = 0; i < len; ++i) { - sprintf(bid, "%02x", *raw); - ++raw; - bid += 2; - } - - return raw - build_id; -} - -size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) -{ - char sbuild_id[BUILD_ID_SIZE * 2 + 1]; - - build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); - return fprintf(fp, "%s", sbuild_id); -} - size_t dso__fprintf_symbols_by_name(struct dso *dso, enum map_type type, FILE *fp) { @@ -542,79 +444,62 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso, return ret; } -size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp) -{ - struct rb_node *nd; - size_t ret = fprintf(fp, "dso: %s (", dso->short_name); - - if (dso->short_name != dso->long_name) - ret += fprintf(fp, "%s, ", dso->long_name); - ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type], - dso->loaded ? "" : "NOT "); - ret += dso__fprintf_buildid(dso, fp); - ret += fprintf(fp, ")\n"); - for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) { - struct symbol *pos = rb_entry(nd, struct symbol, rb_node); - ret += symbol__fprintf(pos, fp); - } - - return ret; -} - -int kallsyms__parse(const char *filename, void *arg, - int (*process_symbol)(void *arg, const char *name, - char type, u64 start)) +int modules__parse(const char *filename, void *arg, + int (*process_module)(void *arg, const char *name, + u64 start)) { char *line = NULL; size_t n; - int err = -1; - FILE *file = fopen(filename, "r"); + FILE *file; + int err = 0; + file = fopen(filename, "r"); if (file == NULL) - goto out_failure; - - err = 0; + return -1; - while (!feof(file)) { + while (1) { + char name[PATH_MAX]; u64 start; - int line_len, len; - char symbol_type; - char *symbol_name; + char *sep; + ssize_t line_len; line_len = getline(&line, &n, file); - if (line_len < 0 || !line) - break; + if (line_len < 0) { + if (feof(file)) + break; + err = -1; + goto out; + } + + if (!line) { + err = -1; + goto out; + } line[--line_len] = '\0'; /* \n */ - len = hex2u64(line, &start); + sep = strrchr(line, 'x'); + if (sep == NULL) + continue; - len++; - if (len + 2 >= line_len) + hex2u64(sep + 1, &start); + + sep = strchr(line, ' '); + if (sep == NULL) continue; - symbol_type = line[len]; - len += 2; - symbol_name = line + len; - len = line_len - len; + *sep = '\0'; - if (len >= KSYM_NAME_LEN) { - err = -1; - break; - } + scnprintf(name, sizeof(name), "[%s]", line); - err = process_symbol(arg, symbol_name, - symbol_type, start); + err = process_module(arg, name, start); if (err) break; } - +out: free(line); fclose(file); return err; - -out_failure: - return -1; } struct process_kallsyms_args { @@ -622,12 +507,34 @@ struct process_kallsyms_args { struct dso *dso; }; -static u8 kallsyms2elf_type(char type) -{ - if (type == 'W') - return STB_WEAK; +bool symbol__is_idle(struct symbol *sym) +{ + const char * const idle_symbols[] = { + "cpu_idle", + "intel_idle", + "default_idle", + "native_safe_halt", + "enter_idle", + "exit_idle", + "mwait_idle", + "mwait_idle_with_hints", + "poll_idle", + "ppc64_runlatch_off", + "pseries_dedicated_idle_sleep", + NULL + }; + + int i; + + if (!sym) + return false; - return isupper(type) ? STB_GLOBAL : STB_LOCAL; + for (i = 0; idle_symbols[i]; i++) { + if (!strcmp(idle_symbols[i], sym->name)) + return true; + } + + return false; } static int map__process_kallsym_symbol(void *arg, const char *name, @@ -669,12 +576,59 @@ static int dso__load_all_kallsyms(struct dso *dso, const char *filename, return kallsyms__parse(filename, &args, map__process_kallsym_symbol); } +static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map, + symbol_filter_t filter) +{ + struct map_groups *kmaps = map__kmap(map)->kmaps; + struct map *curr_map; + struct symbol *pos; + int count = 0, moved = 0; + struct rb_root *root = &dso->symbols[map->type]; + struct rb_node *next = rb_first(root); + + while (next) { + char *module; + + pos = rb_entry(next, struct symbol, rb_node); + next = rb_next(&pos->rb_node); + + module = strchr(pos->name, '\t'); + if (module) + *module = '\0'; + + curr_map = map_groups__find(kmaps, map->type, pos->start); + + if (!curr_map || (filter && filter(curr_map, pos))) { + rb_erase(&pos->rb_node, root); + symbol__delete(pos); + } else { + pos->start -= curr_map->start - curr_map->pgoff; + if (pos->end) + pos->end -= curr_map->start - curr_map->pgoff; + if (curr_map != map) { + rb_erase(&pos->rb_node, root); + symbols__insert( + &curr_map->dso->symbols[curr_map->type], + pos); + ++moved; + } else { + ++count; + } + } + } + + /* Symbols have been adjusted */ + dso->adjust_symbols = 1; + + return count + moved; +} + /* * Split the symbols into maps, making sure there are no overlaps, i.e. the * kernel range is broken in several maps, named [kernel].N, as we don't have * the original ELF section names vmlinux have. */ -static int dso__split_kallsyms(struct dso *dso, struct map *map, +static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta, symbol_filter_t filter) { struct map_groups *kmaps = map__kmap(map)->kmaps; @@ -739,6 +693,12 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, char dso_name[PATH_MAX]; struct dso *ndso; + if (delta) { + /* Kernel was relocated at boot time */ + pos->start -= delta; + pos->end -= delta; + } + if (count == 0) { curr_map = map; goto filter_symbol; @@ -768,6 +728,10 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; map_groups__insert(kmaps, curr_map); ++kernel_range; + } else if (delta) { + /* Kernel was relocated at boot time */ + pos->start -= delta; + pos->end -= delta; } filter_symbol: if (filter && filter(curr_map, pos)) { @@ -792,8 +756,8 @@ discard_symbol: rb_erase(&pos->rb_node, root); return count + moved; } -static bool symbol__restricted_filename(const char *filename, - const char *restricted_filename) +bool symbol__restricted_filename(const char *filename, + const char *restricted_filename) { bool restricted = false; @@ -810,15 +774,408 @@ static bool symbol__restricted_filename(const char *filename, return restricted; } +struct module_info { + struct rb_node rb_node; + char *name; + u64 start; +}; + +static void add_module(struct module_info *mi, struct rb_root *modules) +{ + struct rb_node **p = &modules->rb_node; + struct rb_node *parent = NULL; + struct module_info *m; + + while (*p != NULL) { + parent = *p; + m = rb_entry(parent, struct module_info, rb_node); + if (strcmp(mi->name, m->name) < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + rb_link_node(&mi->rb_node, parent, p); + rb_insert_color(&mi->rb_node, modules); +} + +static void delete_modules(struct rb_root *modules) +{ + struct module_info *mi; + struct rb_node *next = rb_first(modules); + + while (next) { + mi = rb_entry(next, struct module_info, rb_node); + next = rb_next(&mi->rb_node); + rb_erase(&mi->rb_node, modules); + zfree(&mi->name); + free(mi); + } +} + +static struct module_info *find_module(const char *name, + struct rb_root *modules) +{ + struct rb_node *n = modules->rb_node; + + while (n) { + struct module_info *m; + int cmp; + + m = rb_entry(n, struct module_info, rb_node); + cmp = strcmp(name, m->name); + if (cmp < 0) + n = n->rb_left; + else if (cmp > 0) + n = n->rb_right; + else + return m; + } + + return NULL; +} + +static int __read_proc_modules(void *arg, const char *name, u64 start) +{ + struct rb_root *modules = arg; + struct module_info *mi; + + mi = zalloc(sizeof(struct module_info)); + if (!mi) + return -ENOMEM; + + mi->name = strdup(name); + mi->start = start; + + if (!mi->name) { + free(mi); + return -ENOMEM; + } + + add_module(mi, modules); + + return 0; +} + +static int read_proc_modules(const char *filename, struct rb_root *modules) +{ + if (symbol__restricted_filename(filename, "/proc/modules")) + return -1; + + if (modules__parse(filename, modules, __read_proc_modules)) { + delete_modules(modules); + return -1; + } + + return 0; +} + +int compare_proc_modules(const char *from, const char *to) +{ + struct rb_root from_modules = RB_ROOT; + struct rb_root to_modules = RB_ROOT; + struct rb_node *from_node, *to_node; + struct module_info *from_m, *to_m; + int ret = -1; + + if (read_proc_modules(from, &from_modules)) + return -1; + + if (read_proc_modules(to, &to_modules)) + goto out_delete_from; + + from_node = rb_first(&from_modules); + to_node = rb_first(&to_modules); + while (from_node) { + if (!to_node) + break; + + from_m = rb_entry(from_node, struct module_info, rb_node); + to_m = rb_entry(to_node, struct module_info, rb_node); + + if (from_m->start != to_m->start || + strcmp(from_m->name, to_m->name)) + break; + + from_node = rb_next(from_node); + to_node = rb_next(to_node); + } + + if (!from_node && !to_node) + ret = 0; + + delete_modules(&to_modules); +out_delete_from: + delete_modules(&from_modules); + + return ret; +} + +static int do_validate_kcore_modules(const char *filename, struct map *map, + struct map_groups *kmaps) +{ + struct rb_root modules = RB_ROOT; + struct map *old_map; + int err; + + err = read_proc_modules(filename, &modules); + if (err) + return err; + + old_map = map_groups__first(kmaps, map->type); + while (old_map) { + struct map *next = map_groups__next(old_map); + struct module_info *mi; + + if (old_map == map || old_map->start == map->start) { + /* The kernel map */ + old_map = next; + continue; + } + + /* Module must be in memory at the same address */ + mi = find_module(old_map->dso->short_name, &modules); + if (!mi || mi->start != old_map->start) { + err = -EINVAL; + goto out; + } + + old_map = next; + } +out: + delete_modules(&modules); + return err; +} + +/* + * If kallsyms is referenced by name then we look for filename in the same + * directory. + */ +static bool filename_from_kallsyms_filename(char *filename, + const char *base_name, + const char *kallsyms_filename) +{ + char *name; + + strcpy(filename, kallsyms_filename); + name = strrchr(filename, '/'); + if (!name) + return false; + + name += 1; + + if (!strcmp(name, "kallsyms")) { + strcpy(name, base_name); + return true; + } + + return false; +} + +static int validate_kcore_modules(const char *kallsyms_filename, + struct map *map) +{ + struct map_groups *kmaps = map__kmap(map)->kmaps; + char modules_filename[PATH_MAX]; + + if (!filename_from_kallsyms_filename(modules_filename, "modules", + kallsyms_filename)) + return -EINVAL; + + if (do_validate_kcore_modules(modules_filename, map, kmaps)) + return -EINVAL; + + return 0; +} + +static int validate_kcore_addresses(const char *kallsyms_filename, + struct map *map) +{ + struct kmap *kmap = map__kmap(map); + + if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) { + u64 start; + + start = kallsyms__get_function_start(kallsyms_filename, + kmap->ref_reloc_sym->name); + if (start != kmap->ref_reloc_sym->addr) + return -EINVAL; + } + + return validate_kcore_modules(kallsyms_filename, map); +} + +struct kcore_mapfn_data { + struct dso *dso; + enum map_type type; + struct list_head maps; +}; + +static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data) +{ + struct kcore_mapfn_data *md = data; + struct map *map; + + map = map__new2(start, md->dso, md->type); + if (map == NULL) + return -ENOMEM; + + map->end = map->start + len; + map->pgoff = pgoff; + + list_add(&map->node, &md->maps); + + return 0; +} + +static int dso__load_kcore(struct dso *dso, struct map *map, + const char *kallsyms_filename) +{ + struct map_groups *kmaps = map__kmap(map)->kmaps; + struct machine *machine = kmaps->machine; + struct kcore_mapfn_data md; + struct map *old_map, *new_map, *replacement_map = NULL; + bool is_64_bit; + int err, fd; + char kcore_filename[PATH_MAX]; + struct symbol *sym; + + /* This function requires that the map is the kernel map */ + if (map != machine->vmlinux_maps[map->type]) + return -EINVAL; + + if (!filename_from_kallsyms_filename(kcore_filename, "kcore", + kallsyms_filename)) + return -EINVAL; + + /* Modules and kernel must be present at their original addresses */ + if (validate_kcore_addresses(kallsyms_filename, map)) + return -EINVAL; + + md.dso = dso; + md.type = map->type; + INIT_LIST_HEAD(&md.maps); + + fd = open(kcore_filename, O_RDONLY); + if (fd < 0) + return -EINVAL; + + /* Read new maps into temporary lists */ + err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md, + &is_64_bit); + if (err) + goto out_err; + + if (list_empty(&md.maps)) { + err = -EINVAL; + goto out_err; + } + + /* Remove old maps */ + old_map = map_groups__first(kmaps, map->type); + while (old_map) { + struct map *next = map_groups__next(old_map); + + if (old_map != map) + map_groups__remove(kmaps, old_map); + old_map = next; + } + + /* Find the kernel map using the first symbol */ + sym = dso__first_symbol(dso, map->type); + list_for_each_entry(new_map, &md.maps, node) { + if (sym && sym->start >= new_map->start && + sym->start < new_map->end) { + replacement_map = new_map; + break; + } + } + + if (!replacement_map) + replacement_map = list_entry(md.maps.next, struct map, node); + + /* Add new maps */ + while (!list_empty(&md.maps)) { + new_map = list_entry(md.maps.next, struct map, node); + list_del(&new_map->node); + if (new_map == replacement_map) { + map->start = new_map->start; + map->end = new_map->end; + map->pgoff = new_map->pgoff; + map->map_ip = new_map->map_ip; + map->unmap_ip = new_map->unmap_ip; + map__delete(new_map); + /* Ensure maps are correctly ordered */ + map_groups__remove(kmaps, map); + map_groups__insert(kmaps, map); + } else { + map_groups__insert(kmaps, new_map); + } + } + + /* + * Set the data type and long name so that kcore can be read via + * dso__data_read_addr(). + */ + if (dso->kernel == DSO_TYPE_GUEST_KERNEL) + dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE; + else + dso->binary_type = DSO_BINARY_TYPE__KCORE; + dso__set_long_name(dso, strdup(kcore_filename), true); + + close(fd); + + if (map->type == MAP__FUNCTION) + pr_debug("Using %s for kernel object code\n", kcore_filename); + else + pr_debug("Using %s for kernel data\n", kcore_filename); + + return 0; + +out_err: + while (!list_empty(&md.maps)) { + map = list_entry(md.maps.next, struct map, node); + list_del(&map->node); + map__delete(map); + } + close(fd); + return -EINVAL; +} + +/* + * If the kernel is relocated at boot time, kallsyms won't match. Compute the + * delta based on the relocation reference symbol. + */ +static int kallsyms__delta(struct map *map, const char *filename, u64 *delta) +{ + struct kmap *kmap = map__kmap(map); + u64 addr; + + if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) + return 0; + + addr = kallsyms__get_function_start(filename, + kmap->ref_reloc_sym->name); + if (!addr) + return -1; + + *delta = addr - kmap->ref_reloc_sym->addr; + return 0; +} + int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map, symbol_filter_t filter) { + u64 delta = 0; + if (symbol__restricted_filename(filename, "/proc/kallsyms")) return -1; if (dso__load_all_kallsyms(dso, filename, map) < 0) return -1; + if (kallsyms__delta(map, filename, &delta)) + return -1; + symbols__fixup_duplicate(&dso->symbols[map->type]); symbols__fixup_end(&dso->symbols[map->type]); @@ -827,7 +1184,10 @@ int dso__load_kallsyms(struct dso *dso, const char *filename, else dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS; - return dso__split_kallsyms(dso, map, filter); + if (!dso__load_kcore(dso, map, filename)) + return dso__split_kallsyms_for_kcore(dso, map, filter); + else + return dso__split_kallsyms(dso, map, delta, filter); } static int dso__load_perf_map(struct dso *dso, struct map *map, @@ -892,134 +1252,44 @@ out_failure: return -1; } -bool dso__build_id_equal(const struct dso *dso, u8 *build_id) -{ - return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0; -} - -bool __dsos__read_build_ids(struct list_head *head, bool with_hits) -{ - bool have_build_id = false; - struct dso *pos; - - list_for_each_entry(pos, head, node) { - if (with_hits && !pos->hit) - continue; - if (pos->has_build_id) { - have_build_id = true; - continue; - } - if (filename__read_build_id(pos->long_name, pos->build_id, - sizeof(pos->build_id)) > 0) { - have_build_id = true; - pos->has_build_id = true; - } - } - - return have_build_id; -} - -char dso__symtab_origin(const struct dso *dso) +static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, + enum dso_binary_type type) { - static const char origin[] = { - [DSO_BINARY_TYPE__KALLSYMS] = 'k', - [DSO_BINARY_TYPE__VMLINUX] = 'v', - [DSO_BINARY_TYPE__JAVA_JIT] = 'j', - [DSO_BINARY_TYPE__DEBUGLINK] = 'l', - [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B', - [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', - [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', - [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', - [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', - [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K', - [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g', - [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G', - [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V', - }; - - if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND) - return '!'; - return origin[dso->symtab_type]; -} - -int dso__binary_type_file(struct dso *dso, enum dso_binary_type type, - char *root_dir, char *file, size_t size) -{ - char build_id_hex[BUILD_ID_SIZE * 2 + 1]; - int ret = 0; - switch (type) { - case DSO_BINARY_TYPE__DEBUGLINK: { - char *debuglink; - - strncpy(file, dso->long_name, size); - debuglink = file + dso->long_name_len; - while (debuglink != file && *debuglink != '/') - debuglink--; - if (*debuglink == '/') - debuglink++; - filename__read_debuglink(dso->long_name, debuglink, - size - (debuglink - file)); - } - break; - case DSO_BINARY_TYPE__BUILD_ID_CACHE: - /* skip the locally configured cache if a symfs is given */ - if (symbol_conf.symfs[0] || - (dso__build_id_filename(dso, file, size) == NULL)) - ret = -1; - break; - + case DSO_BINARY_TYPE__JAVA_JIT: + case DSO_BINARY_TYPE__DEBUGLINK: + case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: - snprintf(file, size, "%s/usr/lib/debug%s.debug", - symbol_conf.symfs, dso->long_name); - break; - case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: - snprintf(file, size, "%s/usr/lib/debug%s", - symbol_conf.symfs, dso->long_name); - break; - case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: - if (!dso->has_build_id) { - ret = -1; - break; - } + case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: + return !kmod && dso->kernel == DSO_TYPE_USER; - build_id__sprintf(dso->build_id, - sizeof(dso->build_id), - build_id_hex); - snprintf(file, size, - "%s/usr/lib/debug/.build-id/%.2s/%s.debug", - symbol_conf.symfs, build_id_hex, build_id_hex + 2); - break; + case DSO_BINARY_TYPE__KALLSYMS: + case DSO_BINARY_TYPE__VMLINUX: + case DSO_BINARY_TYPE__KCORE: + return dso->kernel == DSO_TYPE_KERNEL; - case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: - snprintf(file, size, "%s%s", - symbol_conf.symfs, dso->long_name); - break; + case DSO_BINARY_TYPE__GUEST_KALLSYMS: + case DSO_BINARY_TYPE__GUEST_VMLINUX: + case DSO_BINARY_TYPE__GUEST_KCORE: + return dso->kernel == DSO_TYPE_GUEST_KERNEL; case DSO_BINARY_TYPE__GUEST_KMODULE: - snprintf(file, size, "%s%s%s", symbol_conf.symfs, - root_dir, dso->long_name); - break; - case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: - snprintf(file, size, "%s%s", symbol_conf.symfs, - dso->long_name); - break; + /* + * kernel modules know their symtab type - it's set when + * creating a module dso in machine__new_module(). + */ + return kmod && dso->symtab_type == type; + + case DSO_BINARY_TYPE__BUILD_ID_CACHE: + return true; - default: - case DSO_BINARY_TYPE__KALLSYMS: - case DSO_BINARY_TYPE__VMLINUX: - case DSO_BINARY_TYPE__GUEST_KALLSYMS: - case DSO_BINARY_TYPE__GUEST_VMLINUX: - case DSO_BINARY_TYPE__JAVA_JIT: case DSO_BINARY_TYPE__NOT_FOUND: - ret = -1; - break; + default: + return false; } - - return ret; } int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) @@ -1032,6 +1302,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) int ss_pos = 0; struct symsrc ss_[2]; struct symsrc *syms_ss = NULL, *runtime_ss = NULL; + bool kmod; dso__set_loaded(dso, map->type); @@ -1045,10 +1316,6 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) else machine = NULL; - name = malloc(PATH_MAX); - if (!name) - return -1; - dso->adjust_symbols = 0; if (strncmp(dso->name, "/tmp/perf-", 10) == 0) { @@ -1072,7 +1339,15 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) if (machine) root_dir = machine->root_dir; - /* Iterate over candidate debug images. + name = malloc(PATH_MAX); + if (!name) + return -1; + + kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || + dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE; + + /* + * Iterate over candidate debug images. * Keep track of "interesting" ones (those which have a symtab, dynsym, * and/or opd section) for processing. */ @@ -1082,8 +1357,11 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) enum dso_binary_type symtab_type = binary_type_symtab[i]; - if (dso__binary_type_file(dso, symtab_type, - root_dir, name, PATH_MAX)) + if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type)) + continue; + + if (dso__read_binary_type_filename(dso, symtab_type, + root_dir, name, PATH_MAX)) continue; /* Name is now the name of the next image to try */ @@ -1093,6 +1371,8 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) if (!syms_ss && symsrc__has_symtab(ss)) { syms_ss = ss; next_slot = true; + if (!dso->symsrc_filename) + dso->symsrc_filename = strdup(name); } if (!runtime_ss && symsrc__possibly_runtime(ss)) { @@ -1105,6 +1385,8 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) if (syms_ss && runtime_ss) break; + } else { + symsrc__destroy(ss); } } @@ -1121,7 +1403,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) runtime_ss = syms_ss; if (syms_ss) - ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, 0); + ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, kmod); else ret = -1; @@ -1157,231 +1439,20 @@ struct map *map_groups__find_by_name(struct map_groups *mg, return NULL; } -static int dso__kernel_module_get_build_id(struct dso *dso, - const char *root_dir) -{ - char filename[PATH_MAX]; - /* - * kernel module short names are of the form "[module]" and - * we need just "module" here. - */ - const char *name = dso->short_name + 1; - - snprintf(filename, sizeof(filename), - "%s/sys/module/%.*s/notes/.note.gnu.build-id", - root_dir, (int)strlen(name) - 1, name); - - if (sysfs__read_build_id(filename, dso->build_id, - sizeof(dso->build_id)) == 0) - dso->has_build_id = true; - - return 0; -} - -static int map_groups__set_modules_path_dir(struct map_groups *mg, - const char *dir_name) -{ - struct dirent *dent; - DIR *dir = opendir(dir_name); - int ret = 0; - - if (!dir) { - pr_debug("%s: cannot open %s dir\n", __func__, dir_name); - return -1; - } - - while ((dent = readdir(dir)) != NULL) { - char path[PATH_MAX]; - struct stat st; - - /*sshfs might return bad dent->d_type, so we have to stat*/ - snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); - if (stat(path, &st)) - continue; - - if (S_ISDIR(st.st_mode)) { - if (!strcmp(dent->d_name, ".") || - !strcmp(dent->d_name, "..")) - continue; - - ret = map_groups__set_modules_path_dir(mg, path); - if (ret < 0) - goto out; - } else { - char *dot = strrchr(dent->d_name, '.'), - dso_name[PATH_MAX]; - struct map *map; - char *long_name; - - if (dot == NULL || strcmp(dot, ".ko")) - continue; - snprintf(dso_name, sizeof(dso_name), "[%.*s]", - (int)(dot - dent->d_name), dent->d_name); - - strxfrchar(dso_name, '-', '_'); - map = map_groups__find_by_name(mg, MAP__FUNCTION, - dso_name); - if (map == NULL) - continue; - - long_name = strdup(path); - if (long_name == NULL) { - ret = -1; - goto out; - } - dso__set_long_name(map->dso, long_name); - map->dso->lname_alloc = 1; - dso__kernel_module_get_build_id(map->dso, ""); - } - } - -out: - closedir(dir); - return ret; -} - -static char *get_kernel_version(const char *root_dir) -{ - char version[PATH_MAX]; - FILE *file; - char *name, *tmp; - const char *prefix = "Linux version "; - - sprintf(version, "%s/proc/version", root_dir); - file = fopen(version, "r"); - if (!file) - return NULL; - - version[0] = '\0'; - tmp = fgets(version, sizeof(version), file); - fclose(file); - - name = strstr(version, prefix); - if (!name) - return NULL; - name += strlen(prefix); - tmp = strchr(name, ' '); - if (tmp) - *tmp = '\0'; - - return strdup(name); -} - -static int machine__set_modules_path(struct machine *machine) -{ - char *version; - char modules_path[PATH_MAX]; - - version = get_kernel_version(machine->root_dir); - if (!version) - return -1; - - snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel", - machine->root_dir, version); - free(version); - - return map_groups__set_modules_path_dir(&machine->kmaps, modules_path); -} - -struct map *machine__new_module(struct machine *machine, u64 start, - const char *filename) -{ - struct map *map; - struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename); - - if (dso == NULL) - return NULL; - - map = map__new2(start, dso, MAP__FUNCTION); - if (map == NULL) - return NULL; - - if (machine__is_host(machine)) - dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; - else - dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; - map_groups__insert(&machine->kmaps, map); - return map; -} - -static int machine__create_modules(struct machine *machine) -{ - char *line = NULL; - size_t n; - FILE *file; - struct map *map; - const char *modules; - char path[PATH_MAX]; - - if (machine__is_default_guest(machine)) - modules = symbol_conf.default_guest_modules; - else { - sprintf(path, "%s/proc/modules", machine->root_dir); - modules = path; - } - - if (symbol__restricted_filename(path, "/proc/modules")) - return -1; - - file = fopen(modules, "r"); - if (file == NULL) - return -1; - - while (!feof(file)) { - char name[PATH_MAX]; - u64 start; - char *sep; - int line_len; - - line_len = getline(&line, &n, file); - if (line_len < 0) - break; - - if (!line) - goto out_failure; - - line[--line_len] = '\0'; /* \n */ - - sep = strrchr(line, 'x'); - if (sep == NULL) - continue; - - hex2u64(sep + 1, &start); - - sep = strchr(line, ' '); - if (sep == NULL) - continue; - - *sep = '\0'; - - snprintf(name, sizeof(name), "[%s]", line); - map = machine__new_module(machine, start, name); - if (map == NULL) - goto out_delete_line; - dso__kernel_module_get_build_id(map->dso, machine->root_dir); - } - - free(line); - fclose(file); - - return machine__set_modules_path(machine); - -out_delete_line: - free(line); -out_failure: - return -1; -} - int dso__load_vmlinux(struct dso *dso, struct map *map, - const char *vmlinux, symbol_filter_t filter) + const char *vmlinux, bool vmlinux_allocated, + symbol_filter_t filter) { int err = -1; struct symsrc ss; char symfs_vmlinux[PATH_MAX]; enum dso_binary_type symtab_type; - snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s", - symbol_conf.symfs, vmlinux); + if (vmlinux[0] == '/') + snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux); + else + snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s", + symbol_conf.symfs, vmlinux); if (dso->kernel == DSO_TYPE_GUEST_KERNEL) symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX; @@ -1395,7 +1466,11 @@ int dso__load_vmlinux(struct dso *dso, struct map *map, symsrc__destroy(&ss); if (err > 0) { - dso__set_long_name(dso, (char *)vmlinux); + if (dso->kernel == DSO_TYPE_GUEST_KERNEL) + dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX; + else + dso->binary_type = DSO_BINARY_TYPE__VMLINUX; + dso__set_long_name(dso, vmlinux, vmlinux_allocated); dso__set_loaded(dso, map->type); pr_debug("Using %s for symbols\n", symfs_vmlinux); } @@ -1414,23 +1489,125 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map, filename = dso__build_id_filename(dso, NULL, 0); if (filename != NULL) { - err = dso__load_vmlinux(dso, map, filename, filter); + err = dso__load_vmlinux(dso, map, filename, true, filter); if (err > 0) goto out; free(filename); } for (i = 0; i < vmlinux_path__nr_entries; ++i) { - err = dso__load_vmlinux(dso, map, vmlinux_path[i], filter); - if (err > 0) { - dso__set_long_name(dso, strdup(vmlinux_path[i])); + err = dso__load_vmlinux(dso, map, vmlinux_path[i], false, filter); + if (err > 0) break; - } } out: return err; } +static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz) +{ + char kallsyms_filename[PATH_MAX]; + struct dirent *dent; + int ret = -1; + DIR *d; + + d = opendir(dir); + if (!d) + return -1; + + while (1) { + dent = readdir(d); + if (!dent) + break; + if (dent->d_type != DT_DIR) + continue; + scnprintf(kallsyms_filename, sizeof(kallsyms_filename), + "%s/%s/kallsyms", dir, dent->d_name); + if (!validate_kcore_addresses(kallsyms_filename, map)) { + strlcpy(dir, kallsyms_filename, dir_sz); + ret = 0; + break; + } + } + + closedir(d); + + return ret; +} + +static char *dso__find_kallsyms(struct dso *dso, struct map *map) +{ + u8 host_build_id[BUILD_ID_SIZE]; + char sbuild_id[BUILD_ID_SIZE * 2 + 1]; + bool is_host = false; + char path[PATH_MAX]; + + if (!dso->has_build_id) { + /* + * Last resort, if we don't have a build-id and couldn't find + * any vmlinux file, try the running kernel kallsyms table. + */ + goto proc_kallsyms; + } + + if (sysfs__read_build_id("/sys/kernel/notes", host_build_id, + sizeof(host_build_id)) == 0) + is_host = dso__build_id_equal(dso, host_build_id); + + build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); + + scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s", buildid_dir, + sbuild_id); + + /* Use /proc/kallsyms if possible */ + if (is_host) { + DIR *d; + int fd; + + /* If no cached kcore go with /proc/kallsyms */ + d = opendir(path); + if (!d) + goto proc_kallsyms; + closedir(d); + + /* + * Do not check the build-id cache, until we know we cannot use + * /proc/kcore. + */ + fd = open("/proc/kcore", O_RDONLY); + if (fd != -1) { + close(fd); + /* If module maps match go with /proc/kallsyms */ + if (!validate_kcore_addresses("/proc/kallsyms", map)) + goto proc_kallsyms; + } + + /* Find kallsyms in build-id cache with kcore */ + if (!find_matching_kcore(map, path, sizeof(path))) + return strdup(path); + + goto proc_kallsyms; + } + + /* Find kallsyms in build-id cache with kcore */ + if (!find_matching_kcore(map, path, sizeof(path))) + return strdup(path); + + scnprintf(path, sizeof(path), "%s/[kernel.kallsyms]/%s", + buildid_dir, sbuild_id); + + if (access(path, F_OK)) { + pr_err("No kallsyms or vmlinux with build-id %s was found\n", + sbuild_id); + return NULL; + } + + return strdup(path); + +proc_kallsyms: + return strdup("/proc/kallsyms"); +} + static int dso__load_kernel_sym(struct dso *dso, struct map *map, symbol_filter_t filter) { @@ -1457,72 +1634,26 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, goto do_kallsyms; } - if (symbol_conf.vmlinux_name != NULL) { - err = dso__load_vmlinux(dso, map, - symbol_conf.vmlinux_name, filter); - if (err > 0) { - dso__set_long_name(dso, - strdup(symbol_conf.vmlinux_name)); - goto out_fixup; - } - return err; + if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) { + return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, + false, filter); } - if (vmlinux_path != NULL) { + if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) { err = dso__load_vmlinux_path(dso, map, filter); if (err > 0) - goto out_fixup; + return err; } /* do not try local files if a symfs was given */ if (symbol_conf.symfs[0] != 0) return -1; - /* - * Say the kernel DSO was created when processing the build-id header table, - * we have a build-id, so check if it is the same as the running kernel, - * using it if it is. - */ - if (dso->has_build_id) { - u8 kallsyms_build_id[BUILD_ID_SIZE]; - char sbuild_id[BUILD_ID_SIZE * 2 + 1]; - - if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id, - sizeof(kallsyms_build_id)) == 0) { - if (dso__build_id_equal(dso, kallsyms_build_id)) { - kallsyms_filename = "/proc/kallsyms"; - goto do_kallsyms; - } - } - /* - * Now look if we have it on the build-id cache in - * $HOME/.debug/[kernel.kallsyms]. - */ - build_id__sprintf(dso->build_id, sizeof(dso->build_id), - sbuild_id); - - if (asprintf(&kallsyms_allocated_filename, - "%s/.debug/[kernel.kallsyms]/%s", - getenv("HOME"), sbuild_id) == -1) { - pr_err("Not enough memory for kallsyms file lookup\n"); - return -1; - } - - kallsyms_filename = kallsyms_allocated_filename; + kallsyms_allocated_filename = dso__find_kallsyms(dso, map); + if (!kallsyms_allocated_filename) + return -1; - if (access(kallsyms_filename, F_OK)) { - pr_err("No kallsyms or vmlinux with build-id %s " - "was found\n", sbuild_id); - free(kallsyms_allocated_filename); - return -1; - } - } else { - /* - * Last resort, if we don't have a build-id and couldn't find - * any vmlinux file, try the running kernel kallsyms table. - */ - kallsyms_filename = "/proc/kallsyms"; - } + kallsyms_filename = kallsyms_allocated_filename; do_kallsyms: err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); @@ -1530,9 +1661,8 @@ do_kallsyms: pr_debug("Using %s for symbols\n", kallsyms_filename); free(kallsyms_allocated_filename); - if (err > 0) { - dso__set_long_name(dso, strdup("[kernel.kallsyms]")); -out_fixup: + if (err > 0 && !dso__is_kcore(dso)) { + dso__set_long_name(dso, "[kernel.kallsyms]", false); map__fixup_start(map); map__fixup_end(map); } @@ -1562,8 +1692,9 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, */ if (symbol_conf.default_guest_vmlinux_name != NULL) { err = dso__load_vmlinux(dso, map, - symbol_conf.default_guest_vmlinux_name, filter); - goto out_try_fixup; + symbol_conf.default_guest_vmlinux_name, + false, filter); + return err; } kallsyms_filename = symbol_conf.default_guest_kallsyms; @@ -1577,13 +1708,9 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); if (err > 0) pr_debug("Using %s for symbols\n", kallsyms_filename); - -out_try_fixup: - if (err > 0) { - if (kallsyms_filename != NULL) { - machine__mmap_name(machine, path, sizeof(path)); - dso__set_long_name(dso, strdup(path)); - } + if (err > 0 && !dso__is_kcore(dso)) { + machine__mmap_name(machine, path, sizeof(path)); + dso__set_long_name(dso, strdup(path), true); map__fixup_start(map); map__fixup_end(map); } @@ -1591,296 +1718,12 @@ out_try_fixup: return err; } -void dsos__add(struct list_head *head, struct dso *dso) -{ - list_add_tail(&dso->node, head); -} - -struct dso *dsos__find(struct list_head *head, const char *name) -{ - struct dso *pos; - - list_for_each_entry(pos, head, node) - if (strcmp(pos->long_name, name) == 0) - return pos; - return NULL; -} - -struct dso *__dsos__findnew(struct list_head *head, const char *name) -{ - struct dso *dso = dsos__find(head, name); - - if (!dso) { - dso = dso__new(name); - if (dso != NULL) { - dsos__add(head, dso); - dso__set_basename(dso); - } - } - - return dso; -} - -size_t __dsos__fprintf(struct list_head *head, FILE *fp) -{ - struct dso *pos; - size_t ret = 0; - - list_for_each_entry(pos, head, node) { - int i; - for (i = 0; i < MAP__NR_TYPES; ++i) - ret += dso__fprintf(pos, i, fp); - } - - return ret; -} - -size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp) -{ - struct rb_node *nd; - size_t ret = 0; - - for (nd = rb_first(machines); nd; nd = rb_next(nd)) { - struct machine *pos = rb_entry(nd, struct machine, rb_node); - ret += __dsos__fprintf(&pos->kernel_dsos, fp); - ret += __dsos__fprintf(&pos->user_dsos, fp); - } - - return ret; -} - -static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, - bool with_hits) -{ - struct dso *pos; - size_t ret = 0; - - list_for_each_entry(pos, head, node) { - if (with_hits && !pos->hit) - continue; - ret += dso__fprintf_buildid(pos, fp); - ret += fprintf(fp, " %s\n", pos->long_name); - } - return ret; -} - -size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, - bool with_hits) -{ - return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, with_hits) + - __dsos__fprintf_buildid(&machine->user_dsos, fp, with_hits); -} - -size_t machines__fprintf_dsos_buildid(struct rb_root *machines, - FILE *fp, bool with_hits) -{ - struct rb_node *nd; - size_t ret = 0; - - for (nd = rb_first(machines); nd; nd = rb_next(nd)) { - struct machine *pos = rb_entry(nd, struct machine, rb_node); - ret += machine__fprintf_dsos_buildid(pos, fp, with_hits); - } - return ret; -} - -static struct dso* -dso__kernel_findnew(struct machine *machine, const char *name, - const char *short_name, int dso_type) -{ - /* - * The kernel dso could be created by build_id processing. - */ - struct dso *dso = __dsos__findnew(&machine->kernel_dsos, name); - - /* - * We need to run this in all cases, since during the build_id - * processing we had no idea this was the kernel dso. - */ - if (dso != NULL) { - dso__set_short_name(dso, short_name); - dso->kernel = dso_type; - } - - return dso; -} - -void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) -{ - char path[PATH_MAX]; - - if (machine__is_default_guest(machine)) - return; - sprintf(path, "%s/sys/kernel/notes", machine->root_dir); - if (sysfs__read_build_id(path, dso->build_id, - sizeof(dso->build_id)) == 0) - dso->has_build_id = true; -} - -static struct dso *machine__get_kernel(struct machine *machine) -{ - const char *vmlinux_name = NULL; - struct dso *kernel; - - if (machine__is_host(machine)) { - vmlinux_name = symbol_conf.vmlinux_name; - if (!vmlinux_name) - vmlinux_name = "[kernel.kallsyms]"; - - kernel = dso__kernel_findnew(machine, vmlinux_name, - "[kernel]", - DSO_TYPE_KERNEL); - } else { - char bf[PATH_MAX]; - - if (machine__is_default_guest(machine)) - vmlinux_name = symbol_conf.default_guest_vmlinux_name; - if (!vmlinux_name) - vmlinux_name = machine__mmap_name(machine, bf, - sizeof(bf)); - - kernel = dso__kernel_findnew(machine, vmlinux_name, - "[guest.kernel]", - DSO_TYPE_GUEST_KERNEL); - } - - if (kernel != NULL && (!kernel->has_build_id)) - dso__read_running_kernel_build_id(kernel, machine); - - return kernel; -} - -struct process_args { - u64 start; -}; - -static int symbol__in_kernel(void *arg, const char *name, - char type __maybe_unused, u64 start) -{ - struct process_args *args = arg; - - if (strchr(name, '[')) - return 0; - - args->start = start; - return 1; -} - -/* Figure out the start address of kernel map from /proc/kallsyms */ -static u64 machine__get_kernel_start_addr(struct machine *machine) -{ - const char *filename; - char path[PATH_MAX]; - struct process_args args; - - if (machine__is_host(machine)) { - filename = "/proc/kallsyms"; - } else { - if (machine__is_default_guest(machine)) - filename = (char *)symbol_conf.default_guest_kallsyms; - else { - sprintf(path, "%s/proc/kallsyms", machine->root_dir); - filename = path; - } - } - - if (symbol__restricted_filename(filename, "/proc/kallsyms")) - return 0; - - if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0) - return 0; - - return args.start; -} - -int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) -{ - enum map_type type; - u64 start = machine__get_kernel_start_addr(machine); - - for (type = 0; type < MAP__NR_TYPES; ++type) { - struct kmap *kmap; - - machine->vmlinux_maps[type] = map__new2(start, kernel, type); - if (machine->vmlinux_maps[type] == NULL) - return -1; - - machine->vmlinux_maps[type]->map_ip = - machine->vmlinux_maps[type]->unmap_ip = - identity__map_ip; - kmap = map__kmap(machine->vmlinux_maps[type]); - kmap->kmaps = &machine->kmaps; - map_groups__insert(&machine->kmaps, - machine->vmlinux_maps[type]); - } - - return 0; -} - -void machine__destroy_kernel_maps(struct machine *machine) -{ - enum map_type type; - - for (type = 0; type < MAP__NR_TYPES; ++type) { - struct kmap *kmap; - - if (machine->vmlinux_maps[type] == NULL) - continue; - - kmap = map__kmap(machine->vmlinux_maps[type]); - map_groups__remove(&machine->kmaps, - machine->vmlinux_maps[type]); - if (kmap->ref_reloc_sym) { - /* - * ref_reloc_sym is shared among all maps, so free just - * on one of them. - */ - if (type == MAP__FUNCTION) { - free((char *)kmap->ref_reloc_sym->name); - kmap->ref_reloc_sym->name = NULL; - free(kmap->ref_reloc_sym); - } - kmap->ref_reloc_sym = NULL; - } - - map__delete(machine->vmlinux_maps[type]); - machine->vmlinux_maps[type] = NULL; - } -} - -int machine__create_kernel_maps(struct machine *machine) -{ - struct dso *kernel = machine__get_kernel(machine); - - if (kernel == NULL || - __machine__create_kernel_maps(machine, kernel) < 0) - return -1; - - if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { - if (machine__is_host(machine)) - pr_debug("Problems creating module maps, " - "continuing anyway...\n"); - else - pr_debug("Problems creating module maps for guest %d, " - "continuing anyway...\n", machine->pid); - } - - /* - * Now that we have all the maps created, just set the ->end of them: - */ - map_groups__fixup_end(&machine->kmaps); - return 0; -} - static void vmlinux_path__exit(void) { - while (--vmlinux_path__nr_entries >= 0) { - free(vmlinux_path[vmlinux_path__nr_entries]); - vmlinux_path[vmlinux_path__nr_entries] = NULL; - } + while (--vmlinux_path__nr_entries >= 0) + zfree(&vmlinux_path[vmlinux_path__nr_entries]); - free(vmlinux_path); - vmlinux_path = NULL; + zfree(&vmlinux_path); } static int vmlinux_path__init(void) @@ -1932,26 +1775,7 @@ out_fail: return -1; } -size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) -{ - int i; - size_t printed = 0; - struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso; - - if (kdso->has_build_id) { - char filename[PATH_MAX]; - if (dso__build_id_filename(kdso, filename, sizeof(filename))) - printed += fprintf(fp, "[0] %s\n", filename); - } - - for (i = 0; i < vmlinux_path__nr_entries; ++i) - printed += fprintf(fp, "[%d] %s\n", - i + kdso->has_build_id, vmlinux_path[i]); - - return printed; -} - -static int setup_list(struct strlist **list, const char *list_str, +int setup_list(struct strlist **list, const char *list_str, const char *list_name) { if (list_str == NULL) @@ -2054,377 +1878,3 @@ void symbol__exit(void) symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; symbol_conf.initialized = false; } - -int machines__create_kernel_maps(struct rb_root *machines, pid_t pid) -{ - struct machine *machine = machines__findnew(machines, pid); - - if (machine == NULL) - return -1; - - return machine__create_kernel_maps(machine); -} - -static int hex(char ch) -{ - if ((ch >= '0') && (ch <= '9')) - return ch - '0'; - if ((ch >= 'a') && (ch <= 'f')) - return ch - 'a' + 10; - if ((ch >= 'A') && (ch <= 'F')) - return ch - 'A' + 10; - return -1; -} - -/* - * While we find nice hex chars, build a long_val. - * Return number of chars processed. - */ -int hex2u64(const char *ptr, u64 *long_val) -{ - const char *p = ptr; - *long_val = 0; - - while (*p) { - const int hex_val = hex(*p); - - if (hex_val < 0) - break; - - *long_val = (*long_val << 4) | hex_val; - p++; - } - - return p - ptr; -} - -char *strxfrchar(char *s, char from, char to) -{ - char *p = s; - - while ((p = strchr(p, from)) != NULL) - *p++ = to; - - return s; -} - -int machines__create_guest_kernel_maps(struct rb_root *machines) -{ - int ret = 0; - struct dirent **namelist = NULL; - int i, items = 0; - char path[PATH_MAX]; - pid_t pid; - char *endp; - - if (symbol_conf.default_guest_vmlinux_name || - symbol_conf.default_guest_modules || - symbol_conf.default_guest_kallsyms) { - machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); - } - - if (symbol_conf.guestmount) { - items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL); - if (items <= 0) - return -ENOENT; - for (i = 0; i < items; i++) { - if (!isdigit(namelist[i]->d_name[0])) { - /* Filter out . and .. */ - continue; - } - pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10); - if ((*endp != '\0') || - (endp == namelist[i]->d_name) || - (errno == ERANGE)) { - pr_debug("invalid directory (%s). Skipping.\n", - namelist[i]->d_name); - continue; - } - sprintf(path, "%s/%s/proc/kallsyms", - symbol_conf.guestmount, - namelist[i]->d_name); - ret = access(path, R_OK); - if (ret) { - pr_debug("Can't access file %s\n", path); - goto failure; - } - machines__create_kernel_maps(machines, pid); - } -failure: - free(namelist); - } - - return ret; -} - -void machines__destroy_guest_kernel_maps(struct rb_root *machines) -{ - struct rb_node *next = rb_first(machines); - - while (next) { - struct machine *pos = rb_entry(next, struct machine, rb_node); - - next = rb_next(&pos->rb_node); - rb_erase(&pos->rb_node, machines); - machine__delete(pos); - } -} - -int machine__load_kallsyms(struct machine *machine, const char *filename, - enum map_type type, symbol_filter_t filter) -{ - struct map *map = machine->vmlinux_maps[type]; - int ret = dso__load_kallsyms(map->dso, filename, map, filter); - - if (ret > 0) { - dso__set_loaded(map->dso, type); - /* - * Since /proc/kallsyms will have multiple sessions for the - * kernel, with modules between them, fixup the end of all - * sections. - */ - __map_groups__fixup_end(&machine->kmaps, type); - } - - return ret; -} - -int machine__load_vmlinux_path(struct machine *machine, enum map_type type, - symbol_filter_t filter) -{ - struct map *map = machine->vmlinux_maps[type]; - int ret = dso__load_vmlinux_path(map->dso, map, filter); - - if (ret > 0) { - dso__set_loaded(map->dso, type); - map__reloc_vmlinux(map); - } - - return ret; -} - -struct map *dso__new_map(const char *name) -{ - struct map *map = NULL; - struct dso *dso = dso__new(name); - - if (dso) - map = map__new2(0, dso, MAP__FUNCTION); - - return map; -} - -static int open_dso(struct dso *dso, struct machine *machine) -{ - char *root_dir = (char *) ""; - char *name; - int fd; - - name = malloc(PATH_MAX); - if (!name) - return -ENOMEM; - - if (machine) - root_dir = machine->root_dir; - - if (dso__binary_type_file(dso, dso->data_type, - root_dir, name, PATH_MAX)) { - free(name); - return -EINVAL; - } - - fd = open(name, O_RDONLY); - free(name); - return fd; -} - -int dso__data_fd(struct dso *dso, struct machine *machine) -{ - int i = 0; - - if (dso->data_type != DSO_BINARY_TYPE__NOT_FOUND) - return open_dso(dso, machine); - - do { - int fd; - - dso->data_type = binary_type_data[i++]; - - fd = open_dso(dso, machine); - if (fd >= 0) - return fd; - - } while (dso->data_type != DSO_BINARY_TYPE__NOT_FOUND); - - return -EINVAL; -} - -static void -dso_cache__free(struct rb_root *root) -{ - struct rb_node *next = rb_first(root); - - while (next) { - struct dso_cache *cache; - - cache = rb_entry(next, struct dso_cache, rb_node); - next = rb_next(&cache->rb_node); - rb_erase(&cache->rb_node, root); - free(cache); - } -} - -static struct dso_cache* -dso_cache__find(struct rb_root *root, u64 offset) -{ - struct rb_node **p = &root->rb_node; - struct rb_node *parent = NULL; - struct dso_cache *cache; - - while (*p != NULL) { - u64 end; - - parent = *p; - cache = rb_entry(parent, struct dso_cache, rb_node); - end = cache->offset + DSO__DATA_CACHE_SIZE; - - if (offset < cache->offset) - p = &(*p)->rb_left; - else if (offset >= end) - p = &(*p)->rb_right; - else - return cache; - } - return NULL; -} - -static void -dso_cache__insert(struct rb_root *root, struct dso_cache *new) -{ - struct rb_node **p = &root->rb_node; - struct rb_node *parent = NULL; - struct dso_cache *cache; - u64 offset = new->offset; - - while (*p != NULL) { - u64 end; - - parent = *p; - cache = rb_entry(parent, struct dso_cache, rb_node); - end = cache->offset + DSO__DATA_CACHE_SIZE; - - if (offset < cache->offset) - p = &(*p)->rb_left; - else if (offset >= end) - p = &(*p)->rb_right; - } - - rb_link_node(&new->rb_node, parent, p); - rb_insert_color(&new->rb_node, root); -} - -static ssize_t -dso_cache__memcpy(struct dso_cache *cache, u64 offset, - u8 *data, u64 size) -{ - u64 cache_offset = offset - cache->offset; - u64 cache_size = min(cache->size - cache_offset, size); - - memcpy(data, cache->data + cache_offset, cache_size); - return cache_size; -} - -static ssize_t -dso_cache__read(struct dso *dso, struct machine *machine, - u64 offset, u8 *data, ssize_t size) -{ - struct dso_cache *cache; - ssize_t ret; - int fd; - - fd = dso__data_fd(dso, machine); - if (fd < 0) - return -1; - - do { - u64 cache_offset; - - ret = -ENOMEM; - - cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE); - if (!cache) - break; - - cache_offset = offset & DSO__DATA_CACHE_MASK; - ret = -EINVAL; - - if (-1 == lseek(fd, cache_offset, SEEK_SET)) - break; - - ret = read(fd, cache->data, DSO__DATA_CACHE_SIZE); - if (ret <= 0) - break; - - cache->offset = cache_offset; - cache->size = ret; - dso_cache__insert(&dso->cache, cache); - - ret = dso_cache__memcpy(cache, offset, data, size); - - } while (0); - - if (ret <= 0) - free(cache); - - close(fd); - return ret; -} - -static ssize_t dso_cache_read(struct dso *dso, struct machine *machine, - u64 offset, u8 *data, ssize_t size) -{ - struct dso_cache *cache; - - cache = dso_cache__find(&dso->cache, offset); - if (cache) - return dso_cache__memcpy(cache, offset, data, size); - else - return dso_cache__read(dso, machine, offset, data, size); -} - -ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, - u64 offset, u8 *data, ssize_t size) -{ - ssize_t r = 0; - u8 *p = data; - - do { - ssize_t ret; - - ret = dso_cache_read(dso, machine, offset, p, size); - if (ret < 0) - return ret; - - /* Reached EOF, return what we have. */ - if (!ret) - break; - - BUG_ON(ret > size); - - r += ret; - p += ret; - offset += ret; - size -= ret; - - } while (size); - - return r; -} - -ssize_t dso__data_read_addr(struct dso *dso, struct map *map, - struct machine *machine, u64 addr, - u8 *data, ssize_t size) -{ - u64 offset = map->map_ip(map, addr); - return dso__data_read_offset(dso, machine, offset, data, size); -} diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 8b6ef7fac74..615c752dd76 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -11,14 +11,18 @@ #include <stdio.h> #include <byteswap.h> #include <libgen.h> +#include "build-id.h" +#include "event.h" -#ifdef LIBELF_SUPPORT +#ifdef HAVE_LIBELF_SUPPORT #include <libelf.h> #include <gelf.h> -#include <elf.h> #endif +#include <elf.h> + +#include "dso.h" -#ifdef HAVE_CPLUS_DEMANGLE +#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT extern char *cplus_demangle(const char *, int); static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i) @@ -39,26 +43,26 @@ static inline char *bfd_demangle(void __maybe_unused *v, #endif #endif -int hex2u64(const char *ptr, u64 *val); -char *strxfrchar(char *s, char from, char to); - /* * libelf 0.8.x and earlier do not support ELF_C_READ_MMAP; * for newer versions we can use mmap to reduce memory usage: */ -#ifdef LIBELF_MMAP +#ifdef HAVE_LIBELF_MMAP_SUPPORT # define PERF_ELF_C_READ_MMAP ELF_C_READ_MMAP #else # define PERF_ELF_C_READ_MMAP ELF_C_READ #endif +#ifdef HAVE_LIBELF_SUPPORT +extern Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, + GElf_Shdr *shp, const char *name, size_t *idx); +#endif + #ifndef DMGL_PARAMS #define DMGL_PARAMS (1 << 0) /* Include function args */ #define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */ #endif -#define BUILD_ID_SIZE 20 - /** struct symbol - symtab entry * * @ignore - resolvable but tools ignore it (e.g. idle routines) @@ -74,6 +78,18 @@ struct symbol { }; void symbol__delete(struct symbol *sym); +void symbols__delete(struct rb_root *symbols); + +/* symbols__for_each_entry - iterate over symbols (rb_root) + * + * @symbols: the rb_root of symbols + * @pos: the 'struct symbol *' to use as a loop cursor + * @nd: the 'struct rb_node *' to use as a temporary storage + */ +#define symbols__for_each_entry(symbols, pos, nd) \ + for (nd = rb_first(symbols); \ + nd && (pos = rb_entry(nd, struct symbol, rb_node)); \ + nd = rb_next(nd)) static inline size_t symbol__size(const struct symbol *sym) { @@ -86,18 +102,23 @@ struct symbol_conf { unsigned short priv_size; unsigned short nr_events; bool try_vmlinux_path, + ignore_vmlinux, show_kernel_path, use_modules, sort_by_name, show_nr_samples, show_total_period, use_callchain, + cumulate_callchain, exclude_other, show_cpu_utilization, initialized, kptr_restrict, annotate_asm_raw, - annotate_src; + annotate_src, + event_group, + demangle, + filter_relative; const char *vmlinux_name, *kallsyms_name, *source_prefix, @@ -121,6 +142,8 @@ struct symbol_conf { }; extern struct symbol_conf symbol_conf; +extern int vmlinux_path__nr_entries; +extern char **vmlinux_path; static inline void *symbol__priv(struct symbol *sym) { @@ -153,87 +176,30 @@ struct branch_info { struct branch_flags flags; }; +struct mem_info { + struct addr_map_symbol iaddr; + struct addr_map_symbol daddr; + union perf_mem_data_src data_src; +}; + struct addr_location { + struct machine *machine; struct thread *thread; struct map *map; struct symbol *sym; u64 addr; char level; - bool filtered; + u8 filtered; u8 cpumode; s32 cpu; }; -enum dso_binary_type { - DSO_BINARY_TYPE__KALLSYMS = 0, - DSO_BINARY_TYPE__GUEST_KALLSYMS, - DSO_BINARY_TYPE__VMLINUX, - DSO_BINARY_TYPE__GUEST_VMLINUX, - DSO_BINARY_TYPE__JAVA_JIT, - DSO_BINARY_TYPE__DEBUGLINK, - DSO_BINARY_TYPE__BUILD_ID_CACHE, - DSO_BINARY_TYPE__FEDORA_DEBUGINFO, - DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, - DSO_BINARY_TYPE__BUILDID_DEBUGINFO, - DSO_BINARY_TYPE__SYSTEM_PATH_DSO, - DSO_BINARY_TYPE__GUEST_KMODULE, - DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, - DSO_BINARY_TYPE__NOT_FOUND, -}; - -enum dso_kernel_type { - DSO_TYPE_USER = 0, - DSO_TYPE_KERNEL, - DSO_TYPE_GUEST_KERNEL -}; - -enum dso_swap_type { - DSO_SWAP__UNSET, - DSO_SWAP__NO, - DSO_SWAP__YES, -}; - -#define DSO__DATA_CACHE_SIZE 4096 -#define DSO__DATA_CACHE_MASK ~(DSO__DATA_CACHE_SIZE - 1) - -struct dso_cache { - struct rb_node rb_node; - u64 offset; - u64 size; - char data[0]; -}; - -struct dso { - struct list_head node; - struct rb_root symbols[MAP__NR_TYPES]; - struct rb_root symbol_names[MAP__NR_TYPES]; - struct rb_root cache; - enum dso_kernel_type kernel; - enum dso_swap_type needs_swap; - enum dso_binary_type symtab_type; - enum dso_binary_type data_type; - u8 adjust_symbols:1; - u8 has_build_id:1; - u8 hit:1; - u8 annotate_warned:1; - u8 sname_alloc:1; - u8 lname_alloc:1; - u8 sorted_by_name; - u8 loaded; - u8 build_id[BUILD_ID_SIZE]; - const char *short_name; - char *long_name; - u16 long_name_len; - u16 short_name_len; - char name[0]; -}; - struct symsrc { char *name; int fd; enum dso_binary_type type; -#ifdef LIBELF_SUPPORT +#ifdef HAVE_LIBELF_SUPPORT Elf *elf; GElf_Ehdr ehdr; @@ -258,78 +224,15 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, bool symsrc__has_symtab(struct symsrc *ss); bool symsrc__possibly_runtime(struct symsrc *ss); -#define DSO__SWAP(dso, type, val) \ -({ \ - type ____r = val; \ - BUG_ON(dso->needs_swap == DSO_SWAP__UNSET); \ - if (dso->needs_swap == DSO_SWAP__YES) { \ - switch (sizeof(____r)) { \ - case 2: \ - ____r = bswap_16(val); \ - break; \ - case 4: \ - ____r = bswap_32(val); \ - break; \ - case 8: \ - ____r = bswap_64(val); \ - break; \ - default: \ - BUG_ON(1); \ - } \ - } \ - ____r; \ -}) - -struct dso *dso__new(const char *name); -void dso__delete(struct dso *dso); - -int dso__name_len(const struct dso *dso); - -bool dso__loaded(const struct dso *dso, enum map_type type); -bool dso__sorted_by_name(const struct dso *dso, enum map_type type); - -static inline void dso__set_loaded(struct dso *dso, enum map_type type) -{ - dso->loaded |= (1 << type); -} - -void dso__sort_by_name(struct dso *dso, enum map_type type); - -void dsos__add(struct list_head *head, struct dso *dso); -struct dso *dsos__find(struct list_head *head, const char *name); -struct dso *__dsos__findnew(struct list_head *head, const char *name); - int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter); int dso__load_vmlinux(struct dso *dso, struct map *map, - const char *vmlinux, symbol_filter_t filter); + const char *vmlinux, bool vmlinux_allocated, + symbol_filter_t filter); int dso__load_vmlinux_path(struct dso *dso, struct map *map, symbol_filter_t filter); int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map, symbol_filter_t filter); -int machine__load_kallsyms(struct machine *machine, const char *filename, - enum map_type type, symbol_filter_t filter); -int machine__load_vmlinux_path(struct machine *machine, enum map_type type, - symbol_filter_t filter); - -size_t __dsos__fprintf(struct list_head *head, FILE *fp); - -size_t machine__fprintf_dsos_buildid(struct machine *machine, - FILE *fp, bool with_hits); -size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp); -size_t machines__fprintf_dsos_buildid(struct rb_root *machines, - FILE *fp, bool with_hits); -size_t dso__fprintf_buildid(struct dso *dso, FILE *fp); -size_t dso__fprintf_symbols_by_name(struct dso *dso, - enum map_type type, FILE *fp); -size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp); - -char dso__symtab_origin(const struct dso *dso); -void dso__set_long_name(struct dso *dso, char *name); -void dso__set_build_id(struct dso *dso, void *build_id); -bool dso__build_id_equal(const struct dso *dso, u8 *build_id); -void dso__read_running_kernel_build_id(struct dso *dso, - struct machine *machine); -struct map *dso__new_map(const char *name); + struct symbol *dso__find_symbol(struct dso *dso, enum map_type type, u64 addr); struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, @@ -337,22 +240,12 @@ struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, int filename__read_build_id(const char *filename, void *bf, size_t size); int sysfs__read_build_id(const char *filename, void *bf, size_t size); -bool __dsos__read_build_ids(struct list_head *head, bool with_hits); -int build_id__sprintf(const u8 *build_id, int len, char *bf); -int kallsyms__parse(const char *filename, void *arg, - int (*process_symbol)(void *arg, const char *name, - char type, u64 start)); +int modules__parse(const char *filename, void *arg, + int (*process_module)(void *arg, const char *name, + u64 start)); int filename__read_debuglink(const char *filename, char *debuglink, size_t size); -void machine__destroy_kernel_maps(struct machine *machine); -int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel); -int machine__create_kernel_maps(struct machine *machine); - -int machines__create_kernel_maps(struct rb_root *machines, pid_t pid); -int machines__create_guest_kernel_maps(struct rb_root *machines); -void machines__destroy_guest_kernel_maps(struct rb_root *machines); - int symbol__init(void); void symbol__exit(void); void symbol__elf_init(void); @@ -360,20 +253,12 @@ struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name); size_t symbol__fprintf_symname_offs(const struct symbol *sym, const struct addr_location *al, FILE *fp); size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp); +size_t symbol__fprintf(struct symbol *sym, FILE *fp); bool symbol_type__is_a(char symbol_type, enum map_type map_type); +bool symbol__restricted_filename(const char *filename, + const char *restricted_filename); +bool symbol__is_idle(struct symbol *sym); -size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp); - -int dso__binary_type_file(struct dso *dso, enum dso_binary_type type, - char *root_dir, char *file, size_t size); - -int dso__data_fd(struct dso *dso, struct machine *machine); -ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, - u64 offset, u8 *data, ssize_t size); -ssize_t dso__data_read_addr(struct dso *dso, struct map *map, - struct machine *machine, u64 addr, - u8 *data, ssize_t size); -int dso__test_data(void); int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, struct symsrc *runtime_ss, symbol_filter_t filter, int kmodule); @@ -385,4 +270,28 @@ void symbols__fixup_duplicate(struct rb_root *symbols); void symbols__fixup_end(struct rb_root *symbols); void __map_groups__fixup_end(struct map_groups *mg, enum map_type type); +typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data); +int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data, + bool *is_64_bit); + +#define PERF_KCORE_EXTRACT "/tmp/perf-kcore-XXXXXX" + +struct kcore_extract { + char *kcore_filename; + u64 addr; + u64 offs; + u64 len; + char extract_filename[sizeof(PERF_KCORE_EXTRACT)]; + int fd; +}; + +int kcore_extract__create(struct kcore_extract *kce); +void kcore_extract__delete(struct kcore_extract *kce); + +int kcore_copy(const char *from_dir, const char *to_dir); +int compare_proc_modules(const char *from, const char *to); + +int setup_list(struct strlist **list, const char *list_str, + const char *list_name); + #endif /* __PERF_SYMBOL */ diff --git a/tools/perf/util/sysfs.c b/tools/perf/util/sysfs.c deleted file mode 100644 index 48c6902e749..00000000000 --- a/tools/perf/util/sysfs.c +++ /dev/null @@ -1,60 +0,0 @@ - -#include "util.h" -#include "sysfs.h" - -static const char * const sysfs_known_mountpoints[] = { - "/sys", - 0, -}; - -static int sysfs_found; -char sysfs_mountpoint[PATH_MAX]; - -static int sysfs_valid_mountpoint(const char *sysfs) -{ - struct statfs st_fs; - - if (statfs(sysfs, &st_fs) < 0) - return -ENOENT; - else if (st_fs.f_type != (long) SYSFS_MAGIC) - return -ENOENT; - - return 0; -} - -const char *sysfs_find_mountpoint(void) -{ - const char * const *ptr; - char type[100]; - FILE *fp; - - if (sysfs_found) - return (const char *) sysfs_mountpoint; - - ptr = sysfs_known_mountpoints; - while (*ptr) { - if (sysfs_valid_mountpoint(*ptr) == 0) { - sysfs_found = 1; - strcpy(sysfs_mountpoint, *ptr); - return sysfs_mountpoint; - } - ptr++; - } - - /* give up and parse /proc/mounts */ - fp = fopen("/proc/mounts", "r"); - if (fp == NULL) - return NULL; - - while (!sysfs_found && - fscanf(fp, "%*s %" STR(PATH_MAX) "s %99s %*s %*d %*d\n", - sysfs_mountpoint, type) == 2) { - - if (strcmp(type, "sysfs") == 0) - sysfs_found = 1; - } - - fclose(fp); - - return sysfs_found ? sysfs_mountpoint : NULL; -} diff --git a/tools/perf/util/sysfs.h b/tools/perf/util/sysfs.h deleted file mode 100644 index a813b720393..00000000000 --- a/tools/perf/util/sysfs.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __SYSFS_H__ -#define __SYSFS_H__ - -const char *sysfs_find_mountpoint(void); - -#endif /* __DEBUGFS_H__ */ diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c index 065528b7563..e74c5963dc7 100644 --- a/tools/perf/util/target.c +++ b/tools/perf/util/target.c @@ -13,9 +13,9 @@ #include <string.h> -enum perf_target_errno perf_target__validate(struct perf_target *target) +enum target_errno target__validate(struct target *target) { - enum perf_target_errno ret = PERF_ERRNO_TARGET__SUCCESS; + enum target_errno ret = TARGET_ERRNO__SUCCESS; if (target->pid) target->tid = target->pid; @@ -23,42 +23,49 @@ enum perf_target_errno perf_target__validate(struct perf_target *target) /* CPU and PID are mutually exclusive */ if (target->tid && target->cpu_list) { target->cpu_list = NULL; - if (ret == PERF_ERRNO_TARGET__SUCCESS) - ret = PERF_ERRNO_TARGET__PID_OVERRIDE_CPU; + if (ret == TARGET_ERRNO__SUCCESS) + ret = TARGET_ERRNO__PID_OVERRIDE_CPU; } /* UID and PID are mutually exclusive */ if (target->tid && target->uid_str) { target->uid_str = NULL; - if (ret == PERF_ERRNO_TARGET__SUCCESS) - ret = PERF_ERRNO_TARGET__PID_OVERRIDE_UID; + if (ret == TARGET_ERRNO__SUCCESS) + ret = TARGET_ERRNO__PID_OVERRIDE_UID; } /* UID and CPU are mutually exclusive */ if (target->uid_str && target->cpu_list) { target->cpu_list = NULL; - if (ret == PERF_ERRNO_TARGET__SUCCESS) - ret = PERF_ERRNO_TARGET__UID_OVERRIDE_CPU; + if (ret == TARGET_ERRNO__SUCCESS) + ret = TARGET_ERRNO__UID_OVERRIDE_CPU; } /* PID and SYSTEM are mutually exclusive */ if (target->tid && target->system_wide) { target->system_wide = false; - if (ret == PERF_ERRNO_TARGET__SUCCESS) - ret = PERF_ERRNO_TARGET__PID_OVERRIDE_SYSTEM; + if (ret == TARGET_ERRNO__SUCCESS) + ret = TARGET_ERRNO__PID_OVERRIDE_SYSTEM; } /* UID and SYSTEM are mutually exclusive */ if (target->uid_str && target->system_wide) { target->system_wide = false; - if (ret == PERF_ERRNO_TARGET__SUCCESS) - ret = PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM; + if (ret == TARGET_ERRNO__SUCCESS) + ret = TARGET_ERRNO__UID_OVERRIDE_SYSTEM; + } + + /* THREAD and SYSTEM/CPU are mutually exclusive */ + if (target->per_thread && (target->system_wide || target->cpu_list)) { + target->per_thread = false; + if (ret == TARGET_ERRNO__SUCCESS) + ret = TARGET_ERRNO__SYSTEM_OVERRIDE_THREAD; } return ret; } -enum perf_target_errno perf_target__parse_uid(struct perf_target *target) +enum target_errno target__parse_uid(struct target *target) { struct passwd pwd, *result; char buf[1024]; @@ -66,7 +73,7 @@ enum perf_target_errno perf_target__parse_uid(struct perf_target *target) target->uid = UINT_MAX; if (str == NULL) - return PERF_ERRNO_TARGET__SUCCESS; + return TARGET_ERRNO__SUCCESS; /* Try user name first */ getpwnam_r(str, &pwd, buf, sizeof(buf), &result); @@ -79,32 +86,33 @@ enum perf_target_errno perf_target__parse_uid(struct perf_target *target) int uid = strtol(str, &endptr, 10); if (*endptr != '\0') - return PERF_ERRNO_TARGET__INVALID_UID; + return TARGET_ERRNO__INVALID_UID; getpwuid_r(uid, &pwd, buf, sizeof(buf), &result); if (result == NULL) - return PERF_ERRNO_TARGET__USER_NOT_FOUND; + return TARGET_ERRNO__USER_NOT_FOUND; } target->uid = result->pw_uid; - return PERF_ERRNO_TARGET__SUCCESS; + return TARGET_ERRNO__SUCCESS; } /* - * This must have a same ordering as the enum perf_target_errno. + * This must have a same ordering as the enum target_errno. */ -static const char *perf_target__error_str[] = { +static const char *target__error_str[] = { "PID/TID switch overriding CPU", "PID/TID switch overriding UID", "UID switch overriding CPU", "PID/TID switch overriding SYSTEM", "UID switch overriding SYSTEM", + "SYSTEM/CPU switch overriding PER-THREAD", "Invalid User: %s", "Problems obtaining information for user %s", }; -int perf_target__strerror(struct perf_target *target, int errnum, +int target__strerror(struct target *target, int errnum, char *buf, size_t buflen) { int idx; @@ -124,21 +132,20 @@ int perf_target__strerror(struct perf_target *target, int errnum, return 0; } - if (errnum < __PERF_ERRNO_TARGET__START || - errnum >= __PERF_ERRNO_TARGET__END) + if (errnum < __TARGET_ERRNO__START || errnum >= __TARGET_ERRNO__END) return -1; - idx = errnum - __PERF_ERRNO_TARGET__START; - msg = perf_target__error_str[idx]; + idx = errnum - __TARGET_ERRNO__START; + msg = target__error_str[idx]; switch (errnum) { - case PERF_ERRNO_TARGET__PID_OVERRIDE_CPU - ... PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM: + case TARGET_ERRNO__PID_OVERRIDE_CPU ... + TARGET_ERRNO__SYSTEM_OVERRIDE_THREAD: snprintf(buf, buflen, "%s", msg); break; - case PERF_ERRNO_TARGET__INVALID_UID: - case PERF_ERRNO_TARGET__USER_NOT_FOUND: + case TARGET_ERRNO__INVALID_UID: + case TARGET_ERRNO__USER_NOT_FOUND: snprintf(buf, buflen, msg, target->uid_str); break; diff --git a/tools/perf/util/target.h b/tools/perf/util/target.h index a4be8575fda..7381b1ca404 100644 --- a/tools/perf/util/target.h +++ b/tools/perf/util/target.h @@ -4,7 +4,7 @@ #include <stdbool.h> #include <sys/types.h> -struct perf_target { +struct target { const char *pid; const char *tid; const char *cpu_list; @@ -12,10 +12,12 @@ struct perf_target { uid_t uid; bool system_wide; bool uses_mmap; + bool default_per_cpu; + bool per_thread; }; -enum perf_target_errno { - PERF_ERRNO_TARGET__SUCCESS = 0, +enum target_errno { + TARGET_ERRNO__SUCCESS = 0, /* * Choose an arbitrary negative big number not to clash with standard @@ -24,42 +26,54 @@ enum perf_target_errno { * * http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html */ - __PERF_ERRNO_TARGET__START = -10000, + __TARGET_ERRNO__START = -10000, + /* for target__validate() */ + TARGET_ERRNO__PID_OVERRIDE_CPU = __TARGET_ERRNO__START, + TARGET_ERRNO__PID_OVERRIDE_UID, + TARGET_ERRNO__UID_OVERRIDE_CPU, + TARGET_ERRNO__PID_OVERRIDE_SYSTEM, + TARGET_ERRNO__UID_OVERRIDE_SYSTEM, + TARGET_ERRNO__SYSTEM_OVERRIDE_THREAD, - /* for perf_target__validate() */ - PERF_ERRNO_TARGET__PID_OVERRIDE_CPU = __PERF_ERRNO_TARGET__START, - PERF_ERRNO_TARGET__PID_OVERRIDE_UID, - PERF_ERRNO_TARGET__UID_OVERRIDE_CPU, - PERF_ERRNO_TARGET__PID_OVERRIDE_SYSTEM, - PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM, + /* for target__parse_uid() */ + TARGET_ERRNO__INVALID_UID, + TARGET_ERRNO__USER_NOT_FOUND, - /* for perf_target__parse_uid() */ - PERF_ERRNO_TARGET__INVALID_UID, - PERF_ERRNO_TARGET__USER_NOT_FOUND, - - __PERF_ERRNO_TARGET__END, + __TARGET_ERRNO__END, }; -enum perf_target_errno perf_target__validate(struct perf_target *target); -enum perf_target_errno perf_target__parse_uid(struct perf_target *target); +enum target_errno target__validate(struct target *target); +enum target_errno target__parse_uid(struct target *target); -int perf_target__strerror(struct perf_target *target, int errnum, char *buf, - size_t buflen); +int target__strerror(struct target *target, int errnum, char *buf, size_t buflen); -static inline bool perf_target__has_task(struct perf_target *target) +static inline bool target__has_task(struct target *target) { return target->tid || target->pid || target->uid_str; } -static inline bool perf_target__has_cpu(struct perf_target *target) +static inline bool target__has_cpu(struct target *target) { return target->system_wide || target->cpu_list; } -static inline bool perf_target__none(struct perf_target *target) +static inline bool target__none(struct target *target) +{ + return !target__has_task(target) && !target__has_cpu(target); +} + +static inline bool target__uses_dummy_map(struct target *target) { - return !perf_target__has_task(target) && !perf_target__has_cpu(target); + bool use_dummy = false; + + if (target->default_per_cpu) + use_dummy = target->per_thread ? true : false; + else if (target__has_task(target) || + (!target__has_cpu(target) && !target->uses_mmap)) + use_dummy = true; + + return use_dummy; } #endif /* _PERF_TARGET_H */ diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 8b3e5939afb..2fde0d5e40b 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -6,134 +6,188 @@ #include "thread.h" #include "util.h" #include "debug.h" +#include "comm.h" -static struct thread *thread__new(pid_t pid) +int thread__init_map_groups(struct thread *thread, struct machine *machine) { - struct thread *self = zalloc(sizeof(*self)); - - if (self != NULL) { - map_groups__init(&self->mg); - self->pid = pid; - self->comm = malloc(32); - if (self->comm) - snprintf(self->comm, 32, ":%d", self->pid); + struct thread *leader; + pid_t pid = thread->pid_; + + if (pid == thread->tid) { + thread->mg = map_groups__new(); + } else { + leader = machine__findnew_thread(machine, pid, pid); + if (leader) + thread->mg = map_groups__get(leader->mg); } - return self; + return thread->mg ? 0 : -1; } -void thread__delete(struct thread *self) +struct thread *thread__new(pid_t pid, pid_t tid) { - map_groups__exit(&self->mg); - free(self->comm); - free(self); + char *comm_str; + struct comm *comm; + struct thread *thread = zalloc(sizeof(*thread)); + + if (thread != NULL) { + thread->pid_ = pid; + thread->tid = tid; + thread->ppid = -1; + INIT_LIST_HEAD(&thread->comm_list); + + comm_str = malloc(32); + if (!comm_str) + goto err_thread; + + snprintf(comm_str, 32, ":%d", tid); + comm = comm__new(comm_str, 0); + free(comm_str); + if (!comm) + goto err_thread; + + list_add(&comm->list, &thread->comm_list); + } + + return thread; + +err_thread: + free(thread); + return NULL; } -int thread__set_comm(struct thread *self, const char *comm) +void thread__delete(struct thread *thread) { - int err; + struct comm *comm, *tmp; - if (self->comm) - free(self->comm); - self->comm = strdup(comm); - err = self->comm == NULL ? -ENOMEM : 0; - if (!err) { - self->comm_set = true; + map_groups__put(thread->mg); + thread->mg = NULL; + list_for_each_entry_safe(comm, tmp, &thread->comm_list, list) { + list_del(&comm->list); + comm__free(comm); } - return err; + + free(thread); } -int thread__comm_len(struct thread *self) +struct comm *thread__comm(const struct thread *thread) { - if (!self->comm_len) { - if (!self->comm) - return 0; - self->comm_len = strlen(self->comm); + if (list_empty(&thread->comm_list)) + return NULL; + + return list_first_entry(&thread->comm_list, struct comm, list); +} + +/* CHECKME: time should always be 0 if event aren't ordered */ +int thread__set_comm(struct thread *thread, const char *str, u64 timestamp) +{ + struct comm *new, *curr = thread__comm(thread); + int err; + + /* Override latest entry if it had no specific time coverage */ + if (!curr->start) { + err = comm__override(curr, str, timestamp); + if (err) + return err; + } else { + new = comm__new(str, timestamp); + if (!new) + return -ENOMEM; + list_add(&new->list, &thread->comm_list); } - return self->comm_len; + thread->comm_set = true; + + return 0; } -static size_t thread__fprintf(struct thread *self, FILE *fp) +const char *thread__comm_str(const struct thread *thread) { - return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) + - map_groups__fprintf(&self->mg, verbose, fp); + const struct comm *comm = thread__comm(thread); + + if (!comm) + return NULL; + + return comm__str(comm); } -struct thread *machine__findnew_thread(struct machine *self, pid_t pid) +/* CHECKME: it should probably better return the max comm len from its comm list */ +int thread__comm_len(struct thread *thread) { - struct rb_node **p = &self->threads.rb_node; - struct rb_node *parent = NULL; - struct thread *th; - - /* - * Font-end cache - PID lookups come in blocks, - * so most of the time we dont have to look up - * the full rbtree: - */ - if (self->last_match && self->last_match->pid == pid) - return self->last_match; - - while (*p != NULL) { - parent = *p; - th = rb_entry(parent, struct thread, rb_node); - - if (th->pid == pid) { - self->last_match = th; - return th; - } - - if (pid < th->pid) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; + if (!thread->comm_len) { + const char *comm = thread__comm_str(thread); + if (!comm) + return 0; + thread->comm_len = strlen(comm); } - th = thread__new(pid); - if (th != NULL) { - rb_link_node(&th->rb_node, parent, p); - rb_insert_color(&th->rb_node, &self->threads); - self->last_match = th; - } + return thread->comm_len; +} - return th; +size_t thread__fprintf(struct thread *thread, FILE *fp) +{ + return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) + + map_groups__fprintf(thread->mg, verbose, fp); } -void thread__insert_map(struct thread *self, struct map *map) +void thread__insert_map(struct thread *thread, struct map *map) { - map_groups__fixup_overlappings(&self->mg, map, verbose, stderr); - map_groups__insert(&self->mg, map); + map_groups__fixup_overlappings(thread->mg, map, verbose, stderr); + map_groups__insert(thread->mg, map); } -int thread__fork(struct thread *self, struct thread *parent) +static int thread__clone_map_groups(struct thread *thread, + struct thread *parent) { int i; - if (parent->comm_set) { - if (self->comm) - free(self->comm); - self->comm = strdup(parent->comm); - if (!self->comm) - return -ENOMEM; - self->comm_set = true; - } + /* This is new thread, we share map groups for process. */ + if (thread->pid_ == parent->pid_) + return 0; + /* But this one is new process, copy maps. */ for (i = 0; i < MAP__NR_TYPES; ++i) - if (map_groups__clone(&self->mg, &parent->mg, i) < 0) + if (map_groups__clone(thread->mg, parent->mg, i) < 0) return -ENOMEM; + return 0; } -size_t machine__fprintf(struct machine *machine, FILE *fp) +int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp) { - size_t ret = 0; - struct rb_node *nd; - - for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { - struct thread *pos = rb_entry(nd, struct thread, rb_node); + int err; - ret += thread__fprintf(pos, fp); + if (parent->comm_set) { + const char *comm = thread__comm_str(parent); + if (!comm) + return -ENOMEM; + err = thread__set_comm(thread, comm, timestamp); + if (err) + return err; + thread->comm_set = true; } - return ret; + thread->ppid = parent->tid; + return thread__clone_map_groups(thread, parent); +} + +void thread__find_cpumode_addr_location(struct thread *thread, + struct machine *machine, + enum map_type type, u64 addr, + struct addr_location *al) +{ + size_t i; + const u8 const cpumodes[] = { + PERF_RECORD_MISC_USER, + PERF_RECORD_MISC_KERNEL, + PERF_RECORD_MISC_GUEST_USER, + PERF_RECORD_MISC_GUEST_KERNEL + }; + + for (i = 0; i < ARRAY_SIZE(cpumodes); i++) { + thread__find_addr_location(thread, machine, cpumodes[i], type, + addr, al); + if (al->map) + break; + } } diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index f66610b7bac..3c0c2724f82 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -2,45 +2,80 @@ #define __PERF_THREAD_H #include <linux/rbtree.h> +#include <linux/list.h> #include <unistd.h> +#include <sys/types.h> #include "symbol.h" +#include <strlist.h> struct thread { union { struct rb_node rb_node; struct list_head node; }; - struct map_groups mg; - pid_t pid; + struct map_groups *mg; + pid_t pid_; /* Not all tools update this */ + pid_t tid; + pid_t ppid; char shortname[3]; bool comm_set; - char *comm; + bool dead; /* if set thread has exited */ + struct list_head comm_list; int comm_len; void *priv; }; struct machine; +struct comm; -void thread__delete(struct thread *self); - -int thread__set_comm(struct thread *self, const char *comm); -int thread__comm_len(struct thread *self); -void thread__insert_map(struct thread *self, struct map *map); -int thread__fork(struct thread *self, struct thread *parent); - -static inline struct map *thread__find_map(struct thread *self, - enum map_type type, u64 addr) +struct thread *thread__new(pid_t pid, pid_t tid); +int thread__init_map_groups(struct thread *thread, struct machine *machine); +void thread__delete(struct thread *thread); +static inline void thread__exited(struct thread *thread) { - return self ? map_groups__find(&self->mg, type, addr) : NULL; + thread->dead = true; } +int thread__set_comm(struct thread *thread, const char *comm, u64 timestamp); +int thread__comm_len(struct thread *thread); +struct comm *thread__comm(const struct thread *thread); +const char *thread__comm_str(const struct thread *thread); +void thread__insert_map(struct thread *thread, struct map *map); +int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp); +size_t thread__fprintf(struct thread *thread, FILE *fp); + void thread__find_addr_map(struct thread *thread, struct machine *machine, u8 cpumode, enum map_type type, u64 addr, struct addr_location *al); void thread__find_addr_location(struct thread *thread, struct machine *machine, u8 cpumode, enum map_type type, u64 addr, - struct addr_location *al, - symbol_filter_t filter); + struct addr_location *al); + +void thread__find_cpumode_addr_location(struct thread *thread, + struct machine *machine, + enum map_type type, u64 addr, + struct addr_location *al); + +static inline void *thread__priv(struct thread *thread) +{ + return thread->priv; +} + +static inline void thread__set_priv(struct thread *thread, void *p) +{ + thread->priv = p; +} + +static inline bool thread__is_filtered(struct thread *thread) +{ + if (symbol_conf.comm_list && + !strlist__has_entry(symbol_conf.comm_list, thread__comm_str(thread))) { + return true; + } + + return false; +} + #endif /* __PERF_THREAD_H */ diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c index 9b5f856cc28..5d321591210 100644 --- a/tools/perf/util/thread_map.c +++ b/tools/perf/util/thread_map.c @@ -9,6 +9,7 @@ #include "strlist.h" #include <string.h> #include "thread_map.h" +#include "util.h" /* Skip "." and ".." directories */ static int filter(const struct dirent *dir) @@ -40,7 +41,7 @@ struct thread_map *thread_map__new_by_pid(pid_t pid) } for (i=0; i<items; i++) - free(namelist[i]); + zfree(&namelist[i]); free(namelist); return threads; @@ -117,7 +118,7 @@ struct thread_map *thread_map__new_by_uid(uid_t uid) threads->map[threads->nr + i] = atoi(namelist[i]->d_name); for (i = 0; i < items; i++) - free(namelist[i]); + zfree(&namelist[i]); free(namelist); threads->nr += items; @@ -134,12 +135,11 @@ out_free_threads: out_free_namelist: for (i = 0; i < items; i++) - free(namelist[i]); + zfree(&namelist[i]); free(namelist); out_free_closedir: - free(threads); - threads = NULL; + zfree(&threads); goto out_closedir; } @@ -194,7 +194,7 @@ static struct thread_map *thread_map__new_by_pid_str(const char *pid_str) for (i = 0; i < items; i++) { threads->map[j++] = atoi(namelist[i]->d_name); - free(namelist[i]); + zfree(&namelist[i]); } threads->nr = total_tasks; free(namelist); @@ -206,12 +206,11 @@ out: out_free_namelist: for (i = 0; i < items; i++) - free(namelist[i]); + zfree(&namelist[i]); free(namelist); out_free_threads: - free(threads); - threads = NULL; + zfree(&threads); goto out; } @@ -262,8 +261,7 @@ out: return threads; out_free_threads: - free(threads); - threads = NULL; + zfree(&threads); goto out; } diff --git a/tools/perf/util/thread_map.h b/tools/perf/util/thread_map.h index f718df8a3c5..0cd8b310808 100644 --- a/tools/perf/util/thread_map.h +++ b/tools/perf/util/thread_map.h @@ -21,4 +21,9 @@ void thread_map__delete(struct thread_map *threads); size_t thread_map__fprintf(struct thread_map *threads, FILE *fp); +static inline int thread_map__nr(struct thread_map *threads) +{ + return threads ? threads->nr : 1; +} + #endif /* __PERF_THREAD_MAP_H */ diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h index b0e1aadba8d..4385816d3d4 100644 --- a/tools/perf/util/tool.h +++ b/tools/perf/util/tool.h @@ -18,12 +18,9 @@ typedef int (*event_sample)(struct perf_tool *tool, union perf_event *event, typedef int (*event_op)(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine); -typedef int (*event_attr_op)(union perf_event *event, +typedef int (*event_attr_op)(struct perf_tool *tool, + union perf_event *event, struct perf_evlist **pevlist); -typedef int (*event_simple_op)(struct perf_tool *tool, union perf_event *event); - -typedef int (*event_synth_op)(union perf_event *event, - struct perf_session *session); typedef int (*event_op2)(struct perf_tool *tool, union perf_event *event, struct perf_session *session); @@ -32,6 +29,7 @@ struct perf_tool { event_sample sample, read; event_op mmap, + mmap2, comm, fork, exit, @@ -39,8 +37,7 @@ struct perf_tool { throttle, unthrottle; event_attr_op attr; - event_synth_op tracing_data; - event_simple_op event_type; + event_op2 tracing_data; event_op2 finished_round, build_id; bool ordered_samples; diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c index 884dde9b9bc..8e517def925 100644 --- a/tools/perf/util/top.c +++ b/tools/perf/util/top.c @@ -23,18 +23,31 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) { - float samples_per_sec = top->samples / top->delay_secs; - float ksamples_per_sec = top->kernel_samples / top->delay_secs; - float esamples_percent = (100.0 * top->exact_samples) / top->samples; + float samples_per_sec; + float ksamples_per_sec; + float esamples_percent; + struct record_opts *opts = &top->record_opts; + struct target *target = &opts->target; size_t ret = 0; + if (top->samples) { + samples_per_sec = top->samples / top->delay_secs; + ksamples_per_sec = top->kernel_samples / top->delay_secs; + esamples_percent = (100.0 * top->exact_samples) / top->samples; + } else { + samples_per_sec = ksamples_per_sec = esamples_percent = 0.0; + } + if (!perf_guest) { + float ksamples_percent = 0.0; + + if (samples_per_sec) + ksamples_percent = (100.0 * ksamples_per_sec) / + samples_per_sec; ret = SNPRINTF(bf, size, " PerfTop:%8.0f irqs/sec kernel:%4.1f%%" " exact: %4.1f%% [", samples_per_sec, - 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) / - samples_per_sec)), - esamples_percent); + ksamples_percent, esamples_percent); } else { float us_samples_per_sec = top->us_samples / top->delay_secs; float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs; @@ -61,31 +74,31 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) struct perf_evsel *first = perf_evlist__first(top->evlist); ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ", (uint64_t)first->attr.sample_period, - top->freq ? "Hz" : ""); + opts->freq ? "Hz" : ""); } ret += SNPRINTF(bf + ret, size - ret, "%s", perf_evsel__name(top->sym_evsel)); ret += SNPRINTF(bf + ret, size - ret, "], "); - if (top->target.pid) + if (target->pid) ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %s", - top->target.pid); - else if (top->target.tid) + target->pid); + else if (target->tid) ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %s", - top->target.tid); - else if (top->target.uid_str != NULL) + target->tid); + else if (target->uid_str != NULL) ret += SNPRINTF(bf + ret, size - ret, " (uid: %s", - top->target.uid_str); + target->uid_str); else ret += SNPRINTF(bf + ret, size - ret, " (all"); - if (top->target.cpu_list) + if (target->cpu_list) ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)", top->evlist->cpus->nr > 1 ? "s" : "", - top->target.cpu_list); + target->cpu_list); else { - if (top->target.tid) + if (target->tid) ret += SNPRINTF(bf + ret, size - ret, ")"); else ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)", diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h index 86ff1b15059..f92c37abb0a 100644 --- a/tools/perf/util/top.h +++ b/tools/perf/util/top.h @@ -2,7 +2,7 @@ #define __PERF_TOP_H 1 #include "tool.h" -#include "types.h" +#include <linux/types.h> #include <stddef.h> #include <stdbool.h> #include <termios.h> @@ -14,7 +14,7 @@ struct perf_session; struct perf_top { struct perf_tool tool; struct perf_evlist *evlist; - struct perf_target target; + struct record_opts record_opts; /* * Symbols will be added here in perf_event__process_sample and will * get out after decayed. @@ -24,29 +24,24 @@ struct perf_top { u64 exact_samples; u64 guest_us_samples, guest_kernel_samples; int print_entries, count_filter, delay_secs; - int freq; + int max_stack; bool hide_kernel_symbols, hide_user_symbols, zero; bool use_tui, use_stdio; - bool sort_has_symbols; - bool dont_use_callchains; bool kptr_restrict_warned; bool vmlinux_warned; - bool inherit; - bool group; - bool sample_id_all_missing; - bool exclude_guest_missing; bool dump_symtab; struct hist_entry *sym_filter_entry; struct perf_evsel *sym_evsel; struct perf_session *session; struct winsize winsize; - unsigned int mmap_pages; - int default_interval; int realtime_prio; int sym_pcnt_filter; const char *sym_filter; + float min_percent; }; +#define CONSOLE_CLEAR "[H[2J" + size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size); void perf_top__reset_sample_counters(struct perf_top *top); #endif /* __PERF_TOP_H */ diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c index a8d81c35ef6..7e6fcfe8b43 100644 --- a/tools/perf/util/trace-event-info.c +++ b/tools/perf/util/trace-event-info.c @@ -38,116 +38,13 @@ #include "../perf.h" #include "trace-event.h" -#include "debugfs.h" +#include <api/fs/debugfs.h> #include "evsel.h" #define VERSION "0.5" -#define TRACE_CTRL "tracing_on" -#define TRACE "trace" -#define AVAILABLE "available_tracers" -#define CURRENT "current_tracer" -#define ITER_CTRL "trace_options" -#define MAX_LATENCY "tracing_max_latency" - -unsigned int page_size; - -static const char *output_file = "trace.info"; static int output_fd; -struct event_list { - struct event_list *next; - const char *event; -}; - -struct events { - struct events *sibling; - struct events *children; - struct events *next; - char *name; -}; - - -static void *malloc_or_die(unsigned int size) -{ - void *data; - - data = malloc(size); - if (!data) - die("malloc"); - return data; -} - -static const char *find_debugfs(void) -{ - const char *path = debugfs_mount(NULL); - - if (!path) - die("Your kernel not support debugfs filesystem"); - - return path; -} - -/* - * Finds the path to the debugfs/tracing - * Allocates the string and stores it. - */ -static const char *find_tracing_dir(void) -{ - static char *tracing; - static int tracing_found; - const char *debugfs; - - if (tracing_found) - return tracing; - - debugfs = find_debugfs(); - - tracing = malloc_or_die(strlen(debugfs) + 9); - - sprintf(tracing, "%s/tracing", debugfs); - - tracing_found = 1; - return tracing; -} - -static char *get_tracing_file(const char *name) -{ - const char *tracing; - char *file; - - tracing = find_tracing_dir(); - if (!tracing) - return NULL; - - file = malloc_or_die(strlen(tracing) + strlen(name) + 2); - - sprintf(file, "%s/%s", tracing, name); - return file; -} - -static void put_tracing_file(char *file) -{ - free(file); -} - -static ssize_t calc_data_size; - -static ssize_t write_or_die(const void *buf, size_t len) -{ - int ret; - - if (calc_data_size) { - calc_data_size += len; - return len; - } - - ret = write(output_fd, buf, len); - if (ret < 0) - die("writing to '%s'", output_file); - - return ret; -} int bigendian(void) { @@ -159,59 +56,106 @@ int bigendian(void) } /* unfortunately, you can not stat debugfs or proc files for size */ -static void record_file(const char *file, size_t hdr_sz) +static int record_file(const char *file, ssize_t hdr_sz) { unsigned long long size = 0; char buf[BUFSIZ], *sizep; off_t hdr_pos = lseek(output_fd, 0, SEEK_CUR); int r, fd; + int err = -EIO; fd = open(file, O_RDONLY); - if (fd < 0) - die("Can't read '%s'", file); + if (fd < 0) { + pr_debug("Can't read '%s'", file); + return -errno; + } /* put in zeros for file size, then fill true size later */ - if (hdr_sz) - write_or_die(&size, hdr_sz); + if (hdr_sz) { + if (write(output_fd, &size, hdr_sz) != hdr_sz) + goto out; + } do { r = read(fd, buf, BUFSIZ); if (r > 0) { size += r; - write_or_die(buf, r); + if (write(output_fd, buf, r) != r) + goto out; } } while (r > 0); - close(fd); /* ugh, handle big-endian hdr_size == 4 */ sizep = (char*)&size; if (bigendian()) sizep += sizeof(u64) - hdr_sz; - if (hdr_sz && pwrite(output_fd, sizep, hdr_sz, hdr_pos) < 0) - die("writing to %s", output_file); + if (hdr_sz && pwrite(output_fd, sizep, hdr_sz, hdr_pos) < 0) { + pr_debug("writing file size failed\n"); + goto out; + } + + err = 0; +out: + close(fd); + return err; } -static void read_header_files(void) +static int record_header_files(void) { char *path; struct stat st; + int err = -EIO; path = get_tracing_file("events/header_page"); - if (stat(path, &st) < 0) - die("can't read '%s'", path); + if (!path) { + pr_debug("can't get tracing/events/header_page"); + return -ENOMEM; + } + + if (stat(path, &st) < 0) { + pr_debug("can't read '%s'", path); + goto out; + } + + if (write(output_fd, "header_page", 12) != 12) { + pr_debug("can't write header_page\n"); + goto out; + } + + if (record_file(path, 8) < 0) { + pr_debug("can't record header_page file\n"); + goto out; + } - write_or_die("header_page", 12); - record_file(path, 8); put_tracing_file(path); path = get_tracing_file("events/header_event"); - if (stat(path, &st) < 0) - die("can't read '%s'", path); + if (!path) { + pr_debug("can't get tracing/events/header_event"); + err = -ENOMEM; + goto out; + } + + if (stat(path, &st) < 0) { + pr_debug("can't read '%s'", path); + goto out; + } + + if (write(output_fd, "header_event", 13) != 13) { + pr_debug("can't write header_event\n"); + goto out; + } - write_or_die("header_event", 13); - record_file(path, 8); + if (record_file(path, 8) < 0) { + pr_debug("can't record header_event file\n"); + goto out; + } + + err = 0; +out: put_tracing_file(path); + return err; } static bool name_in_tp_list(char *sys, struct tracepoint_path *tps) @@ -225,7 +169,7 @@ static bool name_in_tp_list(char *sys, struct tracepoint_path *tps) return false; } -static void copy_event_system(const char *sys, struct tracepoint_path *tps) +static int copy_event_system(const char *sys, struct tracepoint_path *tps) { struct dirent *dent; struct stat st; @@ -233,10 +177,13 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps) DIR *dir; int count = 0; int ret; + int err; dir = opendir(sys); - if (!dir) - die("can't read directory '%s'", sys); + if (!dir) { + pr_debug("can't read directory '%s'", sys); + return -errno; + } while ((dent = readdir(dir))) { if (dent->d_type != DT_DIR || @@ -244,7 +191,11 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps) strcmp(dent->d_name, "..") == 0 || !name_in_tp_list(dent->d_name, tps)) continue; - format = malloc_or_die(strlen(sys) + strlen(dent->d_name) + 10); + format = malloc(strlen(sys) + strlen(dent->d_name) + 10); + if (!format) { + err = -ENOMEM; + goto out; + } sprintf(format, "%s/%s/format", sys, dent->d_name); ret = stat(format, &st); free(format); @@ -253,7 +204,11 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps) count++; } - write_or_die(&count, 4); + if (write(output_fd, &count, 4) != 4) { + err = -EIO; + pr_debug("can't write count\n"); + goto out; + } rewinddir(dir); while ((dent = readdir(dir))) { @@ -262,27 +217,45 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps) strcmp(dent->d_name, "..") == 0 || !name_in_tp_list(dent->d_name, tps)) continue; - format = malloc_or_die(strlen(sys) + strlen(dent->d_name) + 10); + format = malloc(strlen(sys) + strlen(dent->d_name) + 10); + if (!format) { + err = -ENOMEM; + goto out; + } sprintf(format, "%s/%s/format", sys, dent->d_name); ret = stat(format, &st); - if (ret >= 0) - record_file(format, 8); - + if (ret >= 0) { + err = record_file(format, 8); + if (err) { + free(format); + goto out; + } + } free(format); } + err = 0; +out: closedir(dir); + return err; } -static void read_ftrace_files(struct tracepoint_path *tps) +static int record_ftrace_files(struct tracepoint_path *tps) { char *path; + int ret; path = get_tracing_file("events/ftrace"); + if (!path) { + pr_debug("can't get tracing/events/ftrace"); + return -ENOMEM; + } - copy_event_system(path, tps); + ret = copy_event_system(path, tps); put_tracing_file(path); + + return ret; } static bool system_in_tp_list(char *sys, struct tracepoint_path *tps) @@ -296,7 +269,7 @@ static bool system_in_tp_list(char *sys, struct tracepoint_path *tps) return false; } -static void read_event_files(struct tracepoint_path *tps) +static int record_event_files(struct tracepoint_path *tps) { struct dirent *dent; struct stat st; @@ -305,12 +278,20 @@ static void read_event_files(struct tracepoint_path *tps) DIR *dir; int count = 0; int ret; + int err; path = get_tracing_file("events"); + if (!path) { + pr_debug("can't get tracing/events"); + return -ENOMEM; + } dir = opendir(path); - if (!dir) - die("can't read directory '%s'", path); + if (!dir) { + err = -errno; + pr_debug("can't read directory '%s'", path); + goto out; + } while ((dent = readdir(dir))) { if (dent->d_type != DT_DIR || @@ -322,7 +303,11 @@ static void read_event_files(struct tracepoint_path *tps) count++; } - write_or_die(&count, 4); + if (write(output_fd, &count, 4) != 4) { + err = -EIO; + pr_debug("can't write count\n"); + goto out; + } rewinddir(dir); while ((dent = readdir(dir))) { @@ -332,56 +317,90 @@ static void read_event_files(struct tracepoint_path *tps) strcmp(dent->d_name, "ftrace") == 0 || !system_in_tp_list(dent->d_name, tps)) continue; - sys = malloc_or_die(strlen(path) + strlen(dent->d_name) + 2); + sys = malloc(strlen(path) + strlen(dent->d_name) + 2); + if (!sys) { + err = -ENOMEM; + goto out; + } sprintf(sys, "%s/%s", path, dent->d_name); ret = stat(sys, &st); if (ret >= 0) { - write_or_die(dent->d_name, strlen(dent->d_name) + 1); - copy_event_system(sys, tps); + ssize_t size = strlen(dent->d_name) + 1; + + if (write(output_fd, dent->d_name, size) != size || + copy_event_system(sys, tps) < 0) { + err = -EIO; + free(sys); + goto out; + } } free(sys); } - + err = 0; +out: closedir(dir); put_tracing_file(path); + + return err; } -static void read_proc_kallsyms(void) +static int record_proc_kallsyms(void) { unsigned int size; const char *path = "/proc/kallsyms"; struct stat st; - int ret; + int ret, err = 0; ret = stat(path, &st); if (ret < 0) { /* not found */ size = 0; - write_or_die(&size, 4); - return; + if (write(output_fd, &size, 4) != 4) + err = -EIO; + return err; } - record_file(path, 4); + return record_file(path, 4); } -static void read_ftrace_printk(void) +static int record_ftrace_printk(void) { unsigned int size; char *path; struct stat st; - int ret; + int ret, err = 0; path = get_tracing_file("printk_formats"); + if (!path) { + pr_debug("can't get tracing/printk_formats"); + return -ENOMEM; + } + ret = stat(path, &st); if (ret < 0) { /* not found */ size = 0; - write_or_die(&size, 4); + if (write(output_fd, &size, 4) != 4) + err = -EIO; goto out; } - record_file(path, 4); + err = record_file(path, 4); out: put_tracing_file(path); + return err; +} + +static void +put_tracepoints_path(struct tracepoint_path *tps) +{ + while (tps) { + struct tracepoint_path *t = tps; + + tps = tps->next; + zfree(&t->name); + zfree(&t->system); + free(t); + } } static struct tracepoint_path * @@ -395,28 +414,33 @@ get_tracepoints_path(struct list_head *pattrs) if (pos->attr.type != PERF_TYPE_TRACEPOINT) continue; ++nr_tracepoints; + + if (pos->name) { + ppath->next = tracepoint_name_to_path(pos->name); + if (ppath->next) + goto next; + + if (strchr(pos->name, ':') == NULL) + goto try_id; + + goto error; + } + +try_id: ppath->next = tracepoint_id_to_path(pos->attr.config); - if (!ppath->next) - die("%s\n", "No memory to alloc tracepoints list"); + if (!ppath->next) { +error: + pr_debug("No memory to alloc tracepoints list\n"); + put_tracepoints_path(&path); + return NULL; + } +next: ppath = ppath->next; } return nr_tracepoints > 0 ? path.next : NULL; } -static void -put_tracepoints_path(struct tracepoint_path *tps) -{ - while (tps) { - struct tracepoint_path *t = tps; - - tps = tps->next; - free(t->name); - free(t->system); - free(t); - } -} - bool have_tracepoints(struct list_head *pattrs) { struct perf_evsel *pos; @@ -428,9 +452,10 @@ bool have_tracepoints(struct list_head *pattrs) return false; } -static void tracing_data_header(void) +static int tracing_data_header(void) { char buf[20]; + ssize_t size; /* just guessing this is someone's birthday.. ;) */ buf[0] = 23; @@ -438,9 +463,12 @@ static void tracing_data_header(void) buf[2] = 68; memcpy(buf + 3, "tracing", 7); - write_or_die(buf, 10); + if (write(output_fd, buf, 10) != 10) + return -1; - write_or_die(VERSION, strlen(VERSION) + 1); + size = strlen(VERSION) + 1; + if (write(output_fd, VERSION, size) != size) + return -1; /* save endian */ if (bigendian()) @@ -448,17 +476,19 @@ static void tracing_data_header(void) else buf[0] = 0; - read_trace_init(buf[0], buf[0]); - - write_or_die(buf, 1); + if (write(output_fd, buf, 1) != 1) + return -1; /* save size of long */ buf[0] = sizeof(long); - write_or_die(buf, 1); + if (write(output_fd, buf, 1) != 1) + return -1; /* save page_size */ - page_size = sysconf(_SC_PAGESIZE); - write_or_die(&page_size, 4); + if (write(output_fd, &page_size, 4) != 4) + return -1; + + return 0; } struct tracing_data *tracing_data_get(struct list_head *pattrs, @@ -466,6 +496,7 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs, { struct tracepoint_path *tps; struct tracing_data *tdata; + int err; output_fd = fd; @@ -473,7 +504,10 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs, if (!tps) return NULL; - tdata = malloc_or_die(sizeof(*tdata)); + tdata = malloc(sizeof(*tdata)); + if (!tdata) + return NULL; + tdata->temp = temp; tdata->size = 0; @@ -482,12 +516,16 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs, snprintf(tdata->temp_file, sizeof(tdata->temp_file), "/tmp/perf-XXXXXX"); - if (!mkstemp(tdata->temp_file)) - die("Can't make temp file"); + if (!mkstemp(tdata->temp_file)) { + pr_debug("Can't make temp file"); + return NULL; + } temp_fd = open(tdata->temp_file, O_RDWR); - if (temp_fd < 0) - die("Can't read '%s'", tdata->temp_file); + if (temp_fd < 0) { + pr_debug("Can't read '%s'", tdata->temp_file); + return NULL; + } /* * Set the temp file the default output, so all the @@ -496,13 +534,24 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs, output_fd = temp_fd; } - tracing_data_header(); - read_header_files(); - read_ftrace_files(tps); - read_event_files(tps); - read_proc_kallsyms(); - read_ftrace_printk(); + err = tracing_data_header(); + if (err) + goto out; + err = record_header_files(); + if (err) + goto out; + err = record_ftrace_files(tps); + if (err) + goto out; + err = record_event_files(tps); + if (err) + goto out; + err = record_proc_kallsyms(); + if (err) + goto out; + err = record_ftrace_printk(); +out: /* * All tracing data are stored by now, we can restore * the default output file in case we used temp file. @@ -513,22 +562,29 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs, output_fd = fd; } + if (err) + zfree(&tdata); + put_tracepoints_path(tps); return tdata; } -void tracing_data_put(struct tracing_data *tdata) +int tracing_data_put(struct tracing_data *tdata) { + int err = 0; + if (tdata->temp) { - record_file(tdata->temp_file, 0); + err = record_file(tdata->temp_file, 0); unlink(tdata->temp_file); } free(tdata); + return err; } int read_tracing_data(int fd, struct list_head *pattrs) { + int err; struct tracing_data *tdata; /* @@ -539,6 +595,6 @@ int read_tracing_data(int fd, struct list_head *pattrs) if (!tdata) return -ENOMEM; - tracing_data_put(tdata); - return 0; + err = tracing_data_put(tdata); + return err; } diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 3aabcd687cd..c36636fd825 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -28,25 +28,6 @@ #include "util.h" #include "trace-event.h" -int header_page_size_size; -int header_page_ts_size; -int header_page_data_offset; - -bool latency_format; - -struct pevent *read_trace_init(int file_bigendian, int host_bigendian) -{ - struct pevent *pevent = pevent_alloc(); - - if (pevent != NULL) { - pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT); - pevent_set_file_bigendian(pevent, file_bigendian); - pevent_set_host_bigendian(pevent, host_bigendian); - } - - return pevent; -} - static int get_common_field(struct scripting_context *context, int *offset, int *size, const char *type) { @@ -126,42 +107,6 @@ raw_field_value(struct event_format *event, const char *name, void *data) return val; } -void *raw_field_ptr(struct event_format *event, const char *name, void *data) -{ - struct format_field *field; - - field = pevent_find_any_field(event, name); - if (!field) - return NULL; - - if (field->flags & FIELD_IS_DYNAMIC) { - int offset; - - offset = *(int *)(data + field->offset); - offset &= 0xffff; - - return data + offset; - } - - return data + field->offset; -} - -int trace_parse_common_type(struct pevent *pevent, void *data) -{ - struct pevent_record record; - - record.data = data; - return pevent_data_type(pevent, &record); -} - -int trace_parse_common_pid(struct pevent *pevent, void *data) -{ - struct pevent_record record; - - record.data = data; - return pevent_data_pid(pevent, &record); -} - unsigned long long read_size(struct event_format *event, void *ptr, int size) { return pevent_read_number(event->pevent, ptr, size); @@ -181,43 +126,7 @@ void event_format__print(struct event_format *event, trace_seq_init(&s); pevent_event_info(&s, event, &record); trace_seq_do_printf(&s); -} - -void print_trace_event(struct pevent *pevent, int cpu, void *data, int size) -{ - int type = trace_parse_common_type(pevent, data); - struct event_format *event = pevent_find_event(pevent, type); - - if (!event) { - warning("ug! no event found for type %d", type); - return; - } - - event_format__print(event, cpu, data, size); -} - -void print_event(struct pevent *pevent, int cpu, void *data, int size, - unsigned long long nsecs, char *comm) -{ - struct pevent_record record; - struct trace_seq s; - int pid; - - pevent->latency_format = latency_format; - - record.ts = nsecs; - record.cpu = cpu; - record.size = size; - record.data = data; - pid = pevent_data_pid(pevent, &record); - - if (!pevent_pid_is_registered(pevent, pid)) - pevent_register_comm(pevent, comm, pid); - - trace_seq_init(&s); - pevent_print_event(pevent, &s, &record); - trace_seq_do_printf(&s); - printf("\n"); + trace_seq_destroy(&s); } void parse_proc_kallsyms(struct pevent *pevent, @@ -229,7 +138,7 @@ void parse_proc_kallsyms(struct pevent *pevent, char *next = NULL; char *addr_str; char *mod; - char *fmt; + char *fmt = NULL; line = strtok_r(file, "\n", &next); while (line) { diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c index 719ed74a856..e113e180c48 100644 --- a/tools/perf/util/trace-event-read.c +++ b/tools/perf/util/trace-event-read.c @@ -18,8 +18,6 @@ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#define _FILE_OFFSET_BITS 64 - #include <dirent.h> #include <stdio.h> #include <stdlib.h> @@ -41,28 +39,10 @@ static int input_fd; -static int read_page; - -int file_bigendian; -int host_bigendian; -static int long_size; - -static unsigned long page_size; - -static ssize_t calc_data_size; +static ssize_t trace_data_size; static bool repipe; -static void *malloc_or_die(int size) -{ - void *ret; - - ret = malloc(size); - if (!ret) - die("malloc"); - return ret; -} - -static int do_read(int fd, void *buf, int size) +static int __do_read(int fd, void *buf, int size) { int rsize = size; @@ -75,8 +55,10 @@ static int do_read(int fd, void *buf, int size) if (repipe) { int retw = write(STDOUT_FILENO, buf, ret); - if (retw <= 0 || retw != ret) - die("repiping input file"); + if (retw <= 0 || retw != ret) { + pr_debug("repiping input file"); + return -1; + } } size -= ret; @@ -86,17 +68,18 @@ static int do_read(int fd, void *buf, int size) return rsize; } -static int read_or_die(void *data, int size) +static int do_read(void *data, int size) { int r; - r = do_read(input_fd, data, size); - if (r <= 0) - die("reading input file (size expected=%d received=%d)", - size, r); + r = __do_read(input_fd, data, size); + if (r <= 0) { + pr_debug("reading input file (size expected=%d received=%d)", + size, r); + return -1; + } - if (calc_data_size) - calc_data_size += r; + trace_data_size += r; return r; } @@ -109,7 +92,7 @@ static void skip(int size) while (size) { r = size > BUFSIZ ? BUFSIZ : size; - read_or_die(buf, r); + do_read(buf, r); size -= r; }; } @@ -118,7 +101,8 @@ static unsigned int read4(struct pevent *pevent) { unsigned int data; - read_or_die(&data, 4); + if (do_read(&data, 4) < 0) + return 0; return __data2host4(pevent, data); } @@ -126,7 +110,8 @@ static unsigned long long read8(struct pevent *pevent) { unsigned long long data; - read_or_die(&data, 8); + if (do_read(&data, 8) < 0) + return 0; return __data2host8(pevent, data); } @@ -140,17 +125,23 @@ static char *read_string(void) for (;;) { r = read(input_fd, &c, 1); - if (r < 0) - die("reading input file"); + if (r < 0) { + pr_debug("reading input file"); + goto out; + } - if (!r) - die("no data"); + if (!r) { + pr_debug("no data"); + goto out; + } if (repipe) { int retw = write(STDOUT_FILENO, &c, 1); - if (retw <= 0 || retw != r) - die("repiping input file string"); + if (retw <= 0 || retw != r) { + pr_debug("repiping input file string"); + goto out; + } } buf[size++] = c; @@ -159,336 +150,200 @@ static char *read_string(void) break; } - if (calc_data_size) - calc_data_size += size; - - str = malloc_or_die(size); - memcpy(str, buf, size); + trace_data_size += size; + str = malloc(size); + if (str) + memcpy(str, buf, size); +out: return str; } -static void read_proc_kallsyms(struct pevent *pevent) +static int read_proc_kallsyms(struct pevent *pevent) { unsigned int size; char *buf; size = read4(pevent); if (!size) - return; + return 0; - buf = malloc_or_die(size + 1); - read_or_die(buf, size); + buf = malloc(size + 1); + if (buf == NULL) + return -1; + + if (do_read(buf, size) < 0) { + free(buf); + return -1; + } buf[size] = '\0'; parse_proc_kallsyms(pevent, buf, size); free(buf); + return 0; } -static void read_ftrace_printk(struct pevent *pevent) +static int read_ftrace_printk(struct pevent *pevent) { unsigned int size; char *buf; + /* it can have 0 size */ size = read4(pevent); if (!size) - return; + return 0; + + buf = malloc(size); + if (buf == NULL) + return -1; - buf = malloc_or_die(size); - read_or_die(buf, size); + if (do_read(buf, size) < 0) { + free(buf); + return -1; + } parse_ftrace_printk(pevent, buf, size); free(buf); + return 0; } -static void read_header_files(struct pevent *pevent) +static int read_header_files(struct pevent *pevent) { unsigned long long size; - char *header_event; + char *header_page; char buf[BUFSIZ]; + int ret = 0; - read_or_die(buf, 12); + if (do_read(buf, 12) < 0) + return -1; - if (memcmp(buf, "header_page", 12) != 0) - die("did not read header page"); + if (memcmp(buf, "header_page", 12) != 0) { + pr_debug("did not read header page"); + return -1; + } size = read8(pevent); - skip(size); - /* - * The size field in the page is of type long, - * use that instead, since it represents the kernel. - */ - long_size = header_page_size_size; + header_page = malloc(size); + if (header_page == NULL) + return -1; - read_or_die(buf, 13); - if (memcmp(buf, "header_event", 13) != 0) - die("did not read header event"); + if (do_read(header_page, size) < 0) { + pr_debug("did not read header page"); + free(header_page); + return -1; + } + + if (!pevent_parse_header_page(pevent, header_page, size, + pevent_get_long_size(pevent))) { + /* + * The commit field in the page is of type long, + * use that instead, since it represents the kernel. + */ + pevent_set_long_size(pevent, pevent->header_page_size_size); + } + free(header_page); + + if (do_read(buf, 13) < 0) + return -1; + + if (memcmp(buf, "header_event", 13) != 0) { + pr_debug("did not read header event"); + return -1; + } size = read8(pevent); - header_event = malloc_or_die(size); - read_or_die(header_event, size); - free(header_event); + skip(size); + + return ret; } -static void read_ftrace_file(struct pevent *pevent, unsigned long long size) +static int read_ftrace_file(struct pevent *pevent, unsigned long long size) { char *buf; - buf = malloc_or_die(size); - read_or_die(buf, size); + buf = malloc(size); + if (buf == NULL) + return -1; + + if (do_read(buf, size) < 0) { + free(buf); + return -1; + } + parse_ftrace_file(pevent, buf, size); free(buf); + return 0; } -static void read_event_file(struct pevent *pevent, char *sys, +static int read_event_file(struct pevent *pevent, char *sys, unsigned long long size) { char *buf; - buf = malloc_or_die(size); - read_or_die(buf, size); + buf = malloc(size); + if (buf == NULL) + return -1; + + if (do_read(buf, size) < 0) { + free(buf); + return -1; + } + parse_event_file(pevent, buf, size, sys); free(buf); + return 0; } -static void read_ftrace_files(struct pevent *pevent) +static int read_ftrace_files(struct pevent *pevent) { unsigned long long size; int count; int i; + int ret; count = read4(pevent); for (i = 0; i < count; i++) { size = read8(pevent); - read_ftrace_file(pevent, size); + ret = read_ftrace_file(pevent, size); + if (ret) + return ret; } + return 0; } -static void read_event_files(struct pevent *pevent) +static int read_event_files(struct pevent *pevent) { unsigned long long size; char *sys; int systems; int count; int i,x; + int ret; systems = read4(pevent); for (i = 0; i < systems; i++) { sys = read_string(); + if (sys == NULL) + return -1; count = read4(pevent); + for (x=0; x < count; x++) { size = read8(pevent); - read_event_file(pevent, sys, size); - } - } -} - -struct cpu_data { - unsigned long long offset; - unsigned long long size; - unsigned long long timestamp; - struct pevent_record *next; - char *page; - int cpu; - int index; - int page_size; -}; - -static struct cpu_data *cpu_data; - -static void update_cpu_data_index(int cpu) -{ - cpu_data[cpu].offset += page_size; - cpu_data[cpu].size -= page_size; - cpu_data[cpu].index = 0; -} - -static void get_next_page(int cpu) -{ - off_t save_seek; - off_t ret; - - if (!cpu_data[cpu].page) - return; - - if (read_page) { - if (cpu_data[cpu].size <= page_size) { - free(cpu_data[cpu].page); - cpu_data[cpu].page = NULL; - return; - } - - update_cpu_data_index(cpu); - - /* other parts of the code may expect the pointer to not move */ - save_seek = lseek(input_fd, 0, SEEK_CUR); - - ret = lseek(input_fd, cpu_data[cpu].offset, SEEK_SET); - if (ret == (off_t)-1) - die("failed to lseek"); - ret = read(input_fd, cpu_data[cpu].page, page_size); - if (ret < 0) - die("failed to read page"); - - /* reset the file pointer back */ - lseek(input_fd, save_seek, SEEK_SET); - - return; - } - - munmap(cpu_data[cpu].page, page_size); - cpu_data[cpu].page = NULL; - - if (cpu_data[cpu].size <= page_size) - return; - - update_cpu_data_index(cpu); - - cpu_data[cpu].page = mmap(NULL, page_size, PROT_READ, MAP_PRIVATE, - input_fd, cpu_data[cpu].offset); - if (cpu_data[cpu].page == MAP_FAILED) - die("failed to mmap cpu %d at offset 0x%llx", - cpu, cpu_data[cpu].offset); -} - -static unsigned int type_len4host(unsigned int type_len_ts) -{ - if (file_bigendian) - return (type_len_ts >> 27) & ((1 << 5) - 1); - else - return type_len_ts & ((1 << 5) - 1); -} - -static unsigned int ts4host(unsigned int type_len_ts) -{ - if (file_bigendian) - return type_len_ts & ((1 << 27) - 1); - else - return type_len_ts >> 5; -} - -static int calc_index(void *ptr, int cpu) -{ - return (unsigned long)ptr - (unsigned long)cpu_data[cpu].page; -} - -struct pevent_record *trace_peek_data(struct pevent *pevent, int cpu) -{ - struct pevent_record *data; - void *page = cpu_data[cpu].page; - int idx = cpu_data[cpu].index; - void *ptr = page + idx; - unsigned long long extend; - unsigned int type_len_ts; - unsigned int type_len; - unsigned int delta; - unsigned int length = 0; - - if (cpu_data[cpu].next) - return cpu_data[cpu].next; - - if (!page) - return NULL; - - if (!idx) { - /* FIXME: handle header page */ - if (header_page_ts_size != 8) - die("expected a long long type for timestamp"); - cpu_data[cpu].timestamp = data2host8(pevent, ptr); - ptr += 8; - switch (header_page_size_size) { - case 4: - cpu_data[cpu].page_size = data2host4(pevent, ptr); - ptr += 4; - break; - case 8: - cpu_data[cpu].page_size = data2host8(pevent, ptr); - ptr += 8; - break; - default: - die("bad long size"); + ret = read_event_file(pevent, sys, size); + if (ret) + return ret; } - ptr = cpu_data[cpu].page + header_page_data_offset; } - -read_again: - idx = calc_index(ptr, cpu); - - if (idx >= cpu_data[cpu].page_size) { - get_next_page(cpu); - return trace_peek_data(pevent, cpu); - } - - type_len_ts = data2host4(pevent, ptr); - ptr += 4; - - type_len = type_len4host(type_len_ts); - delta = ts4host(type_len_ts); - - switch (type_len) { - case RINGBUF_TYPE_PADDING: - if (!delta) - die("error, hit unexpected end of page"); - length = data2host4(pevent, ptr); - ptr += 4; - length *= 4; - ptr += length; - goto read_again; - - case RINGBUF_TYPE_TIME_EXTEND: - extend = data2host4(pevent, ptr); - ptr += 4; - extend <<= TS_SHIFT; - extend += delta; - cpu_data[cpu].timestamp += extend; - goto read_again; - - case RINGBUF_TYPE_TIME_STAMP: - ptr += 12; - break; - case 0: - length = data2host4(pevent, ptr); - ptr += 4; - die("here! length=%d", length); - break; - default: - length = type_len * 4; - break; - } - - cpu_data[cpu].timestamp += delta; - - data = malloc_or_die(sizeof(*data)); - memset(data, 0, sizeof(*data)); - - data->ts = cpu_data[cpu].timestamp; - data->size = length; - data->data = ptr; - ptr += length; - - cpu_data[cpu].index = calc_index(ptr, cpu); - cpu_data[cpu].next = data; - - return data; -} - -struct pevent_record *trace_read_data(struct pevent *pevent, int cpu) -{ - struct pevent_record *data; - - data = trace_peek_data(pevent, cpu); - cpu_data[cpu].next = NULL; - - return data; + return 0; } -ssize_t trace_report(int fd, struct pevent **ppevent, bool __repipe) +ssize_t trace_report(int fd, struct trace_event *tevent, bool __repipe) { char buf[BUFSIZ]; char test[] = { 23, 8, 68 }; @@ -496,58 +351,94 @@ ssize_t trace_report(int fd, struct pevent **ppevent, bool __repipe) int show_version = 0; int show_funcs = 0; int show_printk = 0; - ssize_t size; + ssize_t size = -1; + int file_bigendian; + int host_bigendian; + int file_long_size; + int file_page_size; + struct pevent *pevent = NULL; + int err; - calc_data_size = 1; repipe = __repipe; - input_fd = fd; - read_or_die(buf, 3); - if (memcmp(buf, test, 3) != 0) - die("no trace data in the file"); + if (do_read(buf, 3) < 0) + return -1; + if (memcmp(buf, test, 3) != 0) { + pr_debug("no trace data in the file"); + return -1; + } - read_or_die(buf, 7); - if (memcmp(buf, "tracing", 7) != 0) - die("not a trace file (missing 'tracing' tag)"); + if (do_read(buf, 7) < 0) + return -1; + if (memcmp(buf, "tracing", 7) != 0) { + pr_debug("not a trace file (missing 'tracing' tag)"); + return -1; + } version = read_string(); + if (version == NULL) + return -1; if (show_version) printf("version = %s\n", version); free(version); - read_or_die(buf, 1); + if (do_read(buf, 1) < 0) + return -1; file_bigendian = buf[0]; host_bigendian = bigendian(); - *ppevent = read_trace_init(file_bigendian, host_bigendian); - if (*ppevent == NULL) - die("read_trace_init failed"); - - read_or_die(buf, 1); - long_size = buf[0]; - - page_size = read4(*ppevent); - - read_header_files(*ppevent); - - read_ftrace_files(*ppevent); - read_event_files(*ppevent); - read_proc_kallsyms(*ppevent); - read_ftrace_printk(*ppevent); + if (trace_event__init(tevent)) { + pr_debug("trace_event__init failed"); + goto out; + } - size = calc_data_size - 1; - calc_data_size = 0; + pevent = tevent->pevent; + + pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT); + pevent_set_file_bigendian(pevent, file_bigendian); + pevent_set_host_bigendian(pevent, host_bigendian); + + if (do_read(buf, 1) < 0) + goto out; + file_long_size = buf[0]; + + file_page_size = read4(pevent); + if (!file_page_size) + goto out; + + pevent_set_long_size(pevent, file_long_size); + pevent_set_page_size(pevent, file_page_size); + + err = read_header_files(pevent); + if (err) + goto out; + err = read_ftrace_files(pevent); + if (err) + goto out; + err = read_event_files(pevent); + if (err) + goto out; + err = read_proc_kallsyms(pevent); + if (err) + goto out; + err = read_ftrace_printk(pevent); + if (err) + goto out; + + size = trace_data_size; repipe = false; if (show_funcs) { - pevent_print_funcs(*ppevent); - return size; - } - if (show_printk) { - pevent_print_printk(*ppevent); - return size; + pevent_print_funcs(pevent); + } else if (show_printk) { + pevent_print_printk(pevent); } + pevent = NULL; + +out: + if (pevent) + trace_event__cleanup(tevent); return size; } diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c index 8715a1006d0..57aaccc1692 100644 --- a/tools/perf/util/trace-event-scripting.c +++ b/tools/perf/util/trace-event-scripting.c @@ -38,7 +38,7 @@ static int stop_script_unsupported(void) static void process_event_unsupported(union perf_event *event __maybe_unused, struct perf_sample *sample __maybe_unused, struct perf_evsel *evsel __maybe_unused, - struct machine *machine __maybe_unused, + struct thread *thread __maybe_unused, struct addr_location *al __maybe_unused) { } diff --git a/tools/perf/util/trace-event.c b/tools/perf/util/trace-event.c new file mode 100644 index 00000000000..6322d37164c --- /dev/null +++ b/tools/perf/util/trace-event.c @@ -0,0 +1,82 @@ + +#include <stdio.h> +#include <unistd.h> +#include <stdlib.h> +#include <errno.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <linux/kernel.h> +#include <traceevent/event-parse.h> +#include "trace-event.h" +#include "util.h" + +/* + * global trace_event object used by trace_event__tp_format + * + * TODO There's no cleanup call for this. Add some sort of + * __exit function support and call trace_event__cleanup + * there. + */ +static struct trace_event tevent; + +int trace_event__init(struct trace_event *t) +{ + struct pevent *pevent = pevent_alloc(); + + if (pevent) { + t->plugin_list = traceevent_load_plugins(pevent); + t->pevent = pevent; + } + + return pevent ? 0 : -1; +} + +void trace_event__cleanup(struct trace_event *t) +{ + traceevent_unload_plugins(t->plugin_list, t->pevent); + pevent_free(t->pevent); +} + +static struct event_format* +tp_format(const char *sys, const char *name) +{ + struct pevent *pevent = tevent.pevent; + struct event_format *event = NULL; + char path[PATH_MAX]; + size_t size; + char *data; + + scnprintf(path, PATH_MAX, "%s/%s/%s/format", + tracing_events_path, sys, name); + + if (filename__read_str(path, &data, &size)) + return NULL; + + pevent_parse_format(pevent, &event, data, size, sys); + + free(data); + return event; +} + +struct event_format* +trace_event__tp_format(const char *sys, const char *name) +{ + static bool initialized; + + if (!initialized) { + int be = traceevent_host_bigendian(); + struct pevent *pevent; + + if (trace_event__init(&tevent)) + return NULL; + + pevent = tevent.pevent; + pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT); + pevent_set_file_bigendian(pevent, be); + pevent_set_host_bigendian(pevent, be); + initialized = true; + } + + return tp_format(sys, name); +} diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index a55fd37ffea..7b6d6868832 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -1,66 +1,48 @@ #ifndef _PERF_UTIL_TRACE_EVENT_H #define _PERF_UTIL_TRACE_EVENT_H +#include <traceevent/event-parse.h> #include "parse-events.h" -#include "event-parse.h" -#include "session.h" struct machine; struct perf_sample; union perf_event; struct perf_tool; +struct thread; +struct plugin_list; -extern int header_page_size_size; -extern int header_page_ts_size; -extern int header_page_data_offset; - -extern bool latency_format; -extern struct pevent *perf_pevent; - -enum { - RINGBUF_TYPE_PADDING = 29, - RINGBUF_TYPE_TIME_EXTEND = 30, - RINGBUF_TYPE_TIME_STAMP = 31, +struct trace_event { + struct pevent *pevent; + struct plugin_list *plugin_list; }; -#ifndef TS_SHIFT -#define TS_SHIFT 27 -#endif +int trace_event__init(struct trace_event *t); +void trace_event__cleanup(struct trace_event *t); +struct event_format* +trace_event__tp_format(const char *sys, const char *name); int bigendian(void); -struct pevent *read_trace_init(int file_bigendian, int host_bigendian); -void print_trace_event(struct pevent *pevent, int cpu, void *data, int size); void event_format__print(struct event_format *event, int cpu, void *data, int size); -void print_event(struct pevent *pevent, int cpu, void *data, int size, - unsigned long long nsecs, char *comm); - int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size); int parse_event_file(struct pevent *pevent, char *buf, unsigned long size, char *sys); -struct pevent_record *trace_peek_data(struct pevent *pevent, int cpu); - unsigned long long raw_field_value(struct event_format *event, const char *name, void *data); -void *raw_field_ptr(struct event_format *event, const char *name, void *data); void parse_proc_kallsyms(struct pevent *pevent, char *file, unsigned int size); void parse_ftrace_printk(struct pevent *pevent, char *file, unsigned int size); -ssize_t trace_report(int fd, struct pevent **pevent, bool repipe); - -int trace_parse_common_type(struct pevent *pevent, void *data); -int trace_parse_common_pid(struct pevent *pevent, void *data); +ssize_t trace_report(int fd, struct trace_event *tevent, bool repipe); struct event_format *trace_find_next_event(struct pevent *pevent, struct event_format *event); unsigned long long read_size(struct event_format *event, void *ptr, int size); unsigned long long eval_flag(const char *flag); -struct pevent_record *trace_read_data(struct pevent *pevent, int cpu); int read_tracing_data(int fd, struct list_head *pattrs); struct tracing_data { @@ -72,7 +54,7 @@ struct tracing_data { struct tracing_data *tracing_data_get(struct list_head *pattrs, int fd, bool temp); -void tracing_data_put(struct tracing_data *tdata); +int tracing_data_put(struct tracing_data *tdata); struct addr_location; @@ -86,8 +68,8 @@ struct scripting_ops { void (*process_event) (union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, - struct machine *machine, - struct addr_location *al); + struct thread *thread, + struct addr_location *al); int (*generate_script) (struct pevent *pevent, const char *outfile); }; diff --git a/tools/perf/util/types.h b/tools/perf/util/types.h deleted file mode 100644 index c51fa6b70a2..00000000000 --- a/tools/perf/util/types.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef __PERF_TYPES_H -#define __PERF_TYPES_H - -#include <stdint.h> - -/* - * We define u64 as uint64_t for every architecture - * so that we can print it with "%"PRIx64 without getting warnings. - */ -typedef uint64_t u64; -typedef int64_t s64; -typedef unsigned int u32; -typedef signed int s32; -typedef unsigned short u16; -typedef signed short s16; -typedef unsigned char u8; -typedef signed char s8; - -union u64_swap { - u64 val64; - u32 val32[2]; -}; - -#endif /* __PERF_TYPES_H */ diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c new file mode 100644 index 00000000000..5ec80a575b5 --- /dev/null +++ b/tools/perf/util/unwind-libdw.c @@ -0,0 +1,210 @@ +#include <linux/compiler.h> +#include <elfutils/libdw.h> +#include <elfutils/libdwfl.h> +#include <inttypes.h> +#include <errno.h> +#include "unwind.h" +#include "unwind-libdw.h" +#include "machine.h" +#include "thread.h" +#include <linux/types.h> +#include "event.h" +#include "perf_regs.h" + +static char *debuginfo_path; + +static const Dwfl_Callbacks offline_callbacks = { + .find_debuginfo = dwfl_standard_find_debuginfo, + .debuginfo_path = &debuginfo_path, + .section_address = dwfl_offline_section_address, +}; + +static int __report_module(struct addr_location *al, u64 ip, + struct unwind_info *ui) +{ + Dwfl_Module *mod; + struct dso *dso = NULL; + + thread__find_addr_location(ui->thread, ui->machine, + PERF_RECORD_MISC_USER, + MAP__FUNCTION, ip, al); + + if (al->map) + dso = al->map->dso; + + if (!dso) + return 0; + + mod = dwfl_addrmodule(ui->dwfl, ip); + if (!mod) + mod = dwfl_report_elf(ui->dwfl, dso->short_name, + dso->long_name, -1, al->map->start, + false); + + return mod && dwfl_addrmodule(ui->dwfl, ip) == mod ? 0 : -1; +} + +static int report_module(u64 ip, struct unwind_info *ui) +{ + struct addr_location al; + + return __report_module(&al, ip, ui); +} + +static int entry(u64 ip, struct unwind_info *ui) + +{ + struct unwind_entry e; + struct addr_location al; + + if (__report_module(&al, ip, ui)) + return -1; + + e.ip = ip; + e.map = al.map; + e.sym = al.sym; + + pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n", + al.sym ? al.sym->name : "''", + ip, + al.map ? al.map->map_ip(al.map, ip) : (u64) 0); + + return ui->cb(&e, ui->arg); +} + +static pid_t next_thread(Dwfl *dwfl, void *arg, void **thread_argp) +{ + /* We want only single thread to be processed. */ + if (*thread_argp != NULL) + return 0; + + *thread_argp = arg; + return dwfl_pid(dwfl); +} + +static int access_dso_mem(struct unwind_info *ui, Dwarf_Addr addr, + Dwarf_Word *data) +{ + struct addr_location al; + ssize_t size; + + thread__find_addr_map(ui->thread, ui->machine, PERF_RECORD_MISC_USER, + MAP__FUNCTION, addr, &al); + if (!al.map) { + pr_debug("unwind: no map for %lx\n", (unsigned long)addr); + return -1; + } + + if (!al.map->dso) + return -1; + + size = dso__data_read_addr(al.map->dso, al.map, ui->machine, + addr, (u8 *) data, sizeof(*data)); + + return !(size == sizeof(*data)); +} + +static bool memory_read(Dwfl *dwfl __maybe_unused, Dwarf_Addr addr, Dwarf_Word *result, + void *arg) +{ + struct unwind_info *ui = arg; + struct stack_dump *stack = &ui->sample->user_stack; + u64 start, end; + int offset; + int ret; + + ret = perf_reg_value(&start, &ui->sample->user_regs, PERF_REG_SP); + if (ret) + return false; + + end = start + stack->size; + + /* Check overflow. */ + if (addr + sizeof(Dwarf_Word) < addr) + return false; + + if (addr < start || addr + sizeof(Dwarf_Word) > end) { + ret = access_dso_mem(ui, addr, result); + if (ret) { + pr_debug("unwind: access_mem 0x%" PRIx64 " not inside range" + " 0x%" PRIx64 "-0x%" PRIx64 "\n", + addr, start, end); + return false; + } + return true; + } + + offset = addr - start; + *result = *(Dwarf_Word *)&stack->data[offset]; + pr_debug("unwind: access_mem addr 0x%" PRIx64 ", val %lx, offset %d\n", + addr, (unsigned long)*result, offset); + return true; +} + +static const Dwfl_Thread_Callbacks callbacks = { + .next_thread = next_thread, + .memory_read = memory_read, + .set_initial_registers = libdw__arch_set_initial_registers, +}; + +static int +frame_callback(Dwfl_Frame *state, void *arg) +{ + struct unwind_info *ui = arg; + Dwarf_Addr pc; + + if (!dwfl_frame_pc(state, &pc, NULL)) { + pr_err("%s", dwfl_errmsg(-1)); + return DWARF_CB_ABORT; + } + + return entry(pc, ui) || !(--ui->max_stack) ? + DWARF_CB_ABORT : DWARF_CB_OK; +} + +int unwind__get_entries(unwind_entry_cb_t cb, void *arg, + struct machine *machine, struct thread *thread, + struct perf_sample *data, + int max_stack) +{ + struct unwind_info ui = { + .sample = data, + .thread = thread, + .machine = machine, + .cb = cb, + .arg = arg, + .max_stack = max_stack, + }; + Dwarf_Word ip; + int err = -EINVAL; + + if (!data->user_regs.regs) + return -EINVAL; + + ui.dwfl = dwfl_begin(&offline_callbacks); + if (!ui.dwfl) + goto out; + + err = perf_reg_value(&ip, &data->user_regs, PERF_REG_IP); + if (err) + goto out; + + err = report_module(ip, &ui); + if (err) + goto out; + + if (!dwfl_attach_state(ui.dwfl, EM_NONE, thread->tid, &callbacks, &ui)) + goto out; + + err = dwfl_getthread_frames(ui.dwfl, thread->tid, frame_callback, &ui); + + if (err && !ui.max_stack) + err = 0; + + out: + if (err) + pr_debug("unwind: failed with '%s'\n", dwfl_errmsg(-1)); + + dwfl_end(ui.dwfl); + return 0; +} diff --git a/tools/perf/util/unwind-libdw.h b/tools/perf/util/unwind-libdw.h new file mode 100644 index 00000000000..417a1426f3a --- /dev/null +++ b/tools/perf/util/unwind-libdw.h @@ -0,0 +1,21 @@ +#ifndef __PERF_UNWIND_LIBDW_H +#define __PERF_UNWIND_LIBDW_H + +#include <elfutils/libdwfl.h> +#include "event.h" +#include "thread.h" +#include "unwind.h" + +bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg); + +struct unwind_info { + Dwfl *dwfl; + struct perf_sample *sample; + struct machine *machine; + struct thread *thread; + unwind_entry_cb_t cb; + void *arg; + int max_stack; +}; + +#endif /* __PERF_UNWIND_LIBDW_H */ diff --git a/tools/perf/util/unwind.c b/tools/perf/util/unwind-libunwind.c index 958723ba3d2..25578b98f5c 100644 --- a/tools/perf/util/unwind.c +++ b/tools/perf/util/unwind-libunwind.c @@ -28,6 +28,7 @@ #include "session.h" #include "perf_regs.h" #include "unwind.h" +#include "symbol.h" #include "util.h" extern int @@ -39,6 +40,15 @@ UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as, #define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table) +extern int +UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug, + unw_word_t ip, + unw_word_t segbase, + const char *obj_name, unw_word_t start, + unw_word_t end); + +#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame) + #define DW_EH_PE_FORMAT_MASK 0x0f /* format of the encoded value */ #define DW_EH_PE_APPL_MASK 0x70 /* how the value is to be applied */ @@ -76,7 +86,6 @@ struct unwind_info { struct perf_sample *sample; struct machine *machine; struct thread *thread; - u64 sample_uregs; }; #define dw_read(ptr, type, end) ({ \ @@ -149,23 +158,6 @@ static int __dw_read_encoded_value(u8 **p, u8 *end, u64 *val, __v; \ }) -static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, - GElf_Shdr *shp, const char *name) -{ - Elf_Scn *sec = NULL; - - while ((sec = elf_nextscn(elf, sec)) != NULL) { - char *str; - - gelf_getshdr(sec, shp); - str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); - if (!strcmp(name, str)) - break; - } - - return sec; -} - static u64 elf_section_offset(int fd, const char *name) { Elf *elf; @@ -181,7 +173,7 @@ static u64 elf_section_offset(int fd, const char *name) if (gelf_getehdr(elf, &ehdr) == NULL) break; - if (!elf_section_by_name(elf, &ehdr, &shdr, name)) + if (!elf_section_by_name(elf, &ehdr, &shdr, name, NULL)) break; offset = shdr.sh_offset; @@ -245,8 +237,9 @@ static int unwind_spec_ehframe(struct dso *dso, struct machine *machine, return 0; } -static int read_unwind_spec(struct dso *dso, struct machine *machine, - u64 *table_data, u64 *segbase, u64 *fde_count) +static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine, + u64 *table_data, u64 *segbase, + u64 *fde_count) { int ret = -EINVAL, fd; u64 offset; @@ -255,18 +248,36 @@ static int read_unwind_spec(struct dso *dso, struct machine *machine, if (fd < 0) return -EINVAL; + /* Check the .eh_frame section for unwinding info */ offset = elf_section_offset(fd, ".eh_frame_hdr"); - close(fd); if (offset) ret = unwind_spec_ehframe(dso, machine, offset, table_data, segbase, fde_count); - /* TODO .debug_frame check if eh_frame_hdr fails */ return ret; } +#ifndef NO_LIBUNWIND_DEBUG_FRAME +static int read_unwind_spec_debug_frame(struct dso *dso, + struct machine *machine, u64 *offset) +{ + int fd = dso__data_fd(dso, machine); + + if (fd < 0) + return -EINVAL; + + /* Check the .debug_frame section for unwinding info */ + *offset = elf_section_offset(fd, ".debug_frame"); + + if (*offset) + return 0; + + return -EINVAL; +} +#endif + static struct map *find_map(unw_word_t ip, struct unwind_info *ui) { struct addr_location al; @@ -291,20 +302,33 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi, pr_debug("unwind: find_proc_info dso %s\n", map->dso->name); - if (read_unwind_spec(map->dso, ui->machine, - &table_data, &segbase, &fde_count)) - return -EINVAL; + /* Check the .eh_frame section for unwinding info */ + if (!read_unwind_spec_eh_frame(map->dso, ui->machine, + &table_data, &segbase, &fde_count)) { + memset(&di, 0, sizeof(di)); + di.format = UNW_INFO_FORMAT_REMOTE_TABLE; + di.start_ip = map->start; + di.end_ip = map->end; + di.u.rti.segbase = map->start + segbase; + di.u.rti.table_data = map->start + table_data; + di.u.rti.table_len = fde_count * sizeof(struct table_entry) + / sizeof(unw_word_t); + return dwarf_search_unwind_table(as, ip, &di, pi, + need_unwind_info, arg); + } + +#ifndef NO_LIBUNWIND_DEBUG_FRAME + /* Check the .debug_frame section for unwinding info */ + if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) { + memset(&di, 0, sizeof(di)); + if (dwarf_find_debug_frame(0, &di, ip, 0, map->dso->name, + map->start, map->end)) + return dwarf_search_unwind_table(as, ip, &di, pi, + need_unwind_info, arg); + } +#endif - memset(&di, 0, sizeof(di)); - di.format = UNW_INFO_FORMAT_REMOTE_TABLE; - di.start_ip = map->start; - di.end_ip = map->end; - di.u.rti.segbase = map->start + segbase; - di.u.rti.table_data = map->start + table_data; - di.u.rti.table_len = fde_count * sizeof(struct table_entry) - / sizeof(unw_word_t); - return dwarf_search_unwind_table(as, ip, &di, pi, - need_unwind_info, arg); + return -EINVAL; } static int access_fpreg(unw_addr_space_t __maybe_unused as, @@ -364,30 +388,13 @@ static int access_dso_mem(struct unwind_info *ui, unw_word_t addr, return !(size == sizeof(*data)); } -static int reg_value(unw_word_t *valp, struct regs_dump *regs, int id, - u64 sample_regs) -{ - int i, idx = 0; - - if (!(sample_regs & (1 << id))) - return -EINVAL; - - for (i = 0; i < id; i++) { - if (sample_regs & (1 << i)) - idx++; - } - - *valp = regs->regs[idx]; - return 0; -} - static int access_mem(unw_addr_space_t __maybe_unused as, unw_word_t addr, unw_word_t *valp, int __write, void *arg) { struct unwind_info *ui = arg; struct stack_dump *stack = &ui->sample->user_stack; - unw_word_t start, end; + u64 start, end; int offset; int ret; @@ -397,8 +404,7 @@ static int access_mem(unw_addr_space_t __maybe_unused as, return 0; } - ret = reg_value(&start, &ui->sample->user_regs, PERF_REG_SP, - ui->sample_uregs); + ret = perf_reg_value(&start, &ui->sample->user_regs, PERF_REG_SP); if (ret) return ret; @@ -411,8 +417,9 @@ static int access_mem(unw_addr_space_t __maybe_unused as, if (addr < start || addr + sizeof(unw_word_t) >= end) { ret = access_dso_mem(ui, addr, valp); if (ret) { - pr_debug("unwind: access_mem %p not inside range %p-%p\n", - (void *)addr, (void *)start, (void *)end); + pr_debug("unwind: access_mem %p not inside range" + " 0x%" PRIx64 "-0x%" PRIx64 "\n", + (void *) addr, start, end); *valp = 0; return ret; } @@ -421,8 +428,8 @@ static int access_mem(unw_addr_space_t __maybe_unused as, offset = addr - start; *valp = *(unw_word_t *)&stack->data[offset]; - pr_debug("unwind: access_mem addr %p, val %lx, offset %d\n", - (void *)addr, (unsigned long)*valp, offset); + pr_debug("unwind: access_mem addr %p val %lx, offset %d\n", + (void *) addr, (unsigned long)*valp, offset); return 0; } @@ -432,6 +439,7 @@ static int access_reg(unw_addr_space_t __maybe_unused as, { struct unwind_info *ui = arg; int id, ret; + u64 val; /* Don't support write, I suspect we don't need it. */ if (__write) { @@ -444,16 +452,17 @@ static int access_reg(unw_addr_space_t __maybe_unused as, return 0; } - id = unwind__arch_reg_id(regnum); + id = libunwind__arch_reg_id(regnum); if (id < 0) return -EINVAL; - ret = reg_value(valp, &ui->sample->user_regs, id, ui->sample_uregs); + ret = perf_reg_value(&val, &ui->sample->user_regs, id); if (ret) { pr_err("unwind: can't read reg %d\n", regnum); return ret; } + *valp = (unw_word_t) val; pr_debug("unwind: reg %d, val %lx\n", regnum, (unsigned long)*valp); return 0; } @@ -473,7 +482,7 @@ static int entry(u64 ip, struct thread *thread, struct machine *machine, thread__find_addr_location(thread, machine, PERF_RECORD_MISC_USER, - MAP__FUNCTION, ip, &al, NULL); + MAP__FUNCTION, ip, &al); e.ip = ip; e.map = al.map; @@ -516,7 +525,7 @@ static unw_accessors_t accessors = { }; static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb, - void *arg) + void *arg, int max_stack) { unw_addr_space_t addr_space; unw_cursor_t c; @@ -532,11 +541,11 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb, if (ret) display_error(ret); - while (!ret && (unw_step(&c) > 0)) { + while (!ret && (unw_step(&c) > 0) && max_stack--) { unw_word_t ip; unw_get_reg(&c, UNW_REG_IP, &ip); - ret = entry(ip, ui->thread, ui->machine, cb, arg); + ret = ip ? entry(ip, ui->thread, ui->machine, cb, arg) : 0; } unw_destroy_addr_space(addr_space); @@ -545,12 +554,11 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb, int unwind__get_entries(unwind_entry_cb_t cb, void *arg, struct machine *machine, struct thread *thread, - u64 sample_uregs, struct perf_sample *data) + struct perf_sample *data, int max_stack) { - unw_word_t ip; + u64 ip; struct unwind_info ui = { .sample = data, - .sample_uregs = sample_uregs, .thread = thread, .machine = machine, }; @@ -559,7 +567,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg, if (!data->user_regs.regs) return -EINVAL; - ret = reg_value(&ip, &data->user_regs, PERF_REG_IP, sample_uregs); + ret = perf_reg_value(&ip, &data->user_regs, PERF_REG_IP); if (ret) return ret; @@ -567,5 +575,5 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg, if (ret) return -ENOMEM; - return get_entries(&ui, cb, arg); + return --max_stack > 0 ? get_entries(&ui, cb, arg, max_stack) : 0; } diff --git a/tools/perf/util/unwind.h b/tools/perf/util/unwind.h index cb6bc503a79..f03061260b4 100644 --- a/tools/perf/util/unwind.h +++ b/tools/perf/util/unwind.h @@ -1,7 +1,7 @@ #ifndef __UNWIND_H #define __UNWIND_H -#include "types.h" +#include <linux/types.h> #include "event.h" #include "symbol.h" @@ -13,23 +13,25 @@ struct unwind_entry { typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg); -#ifdef LIBUNWIND_SUPPORT +#ifdef HAVE_DWARF_UNWIND_SUPPORT int unwind__get_entries(unwind_entry_cb_t cb, void *arg, struct machine *machine, struct thread *thread, - u64 sample_uregs, - struct perf_sample *data); -int unwind__arch_reg_id(int regnum); + struct perf_sample *data, int max_stack); +/* libunwind specific */ +#ifdef HAVE_LIBUNWIND_SUPPORT +int libunwind__arch_reg_id(int regnum); +#endif #else static inline int unwind__get_entries(unwind_entry_cb_t cb __maybe_unused, void *arg __maybe_unused, struct machine *machine __maybe_unused, struct thread *thread __maybe_unused, - u64 sample_uregs __maybe_unused, - struct perf_sample *data __maybe_unused) + struct perf_sample *data __maybe_unused, + int max_stack __maybe_unused) { return 0; } -#endif /* LIBUNWIND_SUPPORT */ +#endif /* HAVE_DWARF_UNWIND_SUPPORT */ #endif /* __UNWIND_H */ diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index 99664598bc1..95aefa78bb0 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -1,18 +1,31 @@ #include "../perf.h" #include "util.h" +#include <api/fs/fs.h> #include <sys/mman.h> -#ifdef BACKTRACE_SUPPORT +#ifdef HAVE_BACKTRACE_SUPPORT #include <execinfo.h> #endif #include <stdio.h> #include <stdlib.h> +#include <string.h> +#include <errno.h> +#include <limits.h> +#include <byteswap.h> +#include <linux/kernel.h> /* * XXX We need to find a better place for these things... */ +unsigned int page_size; +int cacheline_size; + +bool test_attr__enabled; + bool perf_host = true; bool perf_guest = false; +char tracing_events_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing/events"; + void event_attr_init(struct perf_event_attr *attr) { if (!perf_host) @@ -49,17 +62,20 @@ int mkdir_p(char *path, mode_t mode) return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0; } -static int slow_copyfile(const char *from, const char *to) +static int slow_copyfile(const char *from, const char *to, mode_t mode) { - int err = 0; + int err = -1; char *line = NULL; size_t n; FILE *from_fp = fopen(from, "r"), *to_fp; + mode_t old_umask; if (from_fp == NULL) goto out; + old_umask = umask(mode ^ 0777); to_fp = fopen(to, "w"); + umask(old_umask); if (to_fp == NULL) goto out_fclose_from; @@ -76,7 +92,7 @@ out: return err; } -int copyfile(const char *from, const char *to) +int copyfile_mode(const char *from, const char *to, mode_t mode) { int fromfd, tofd; struct stat st; @@ -87,13 +103,13 @@ int copyfile(const char *from, const char *to) goto out; if (st.st_size == 0) /* /proc? do it slowly... */ - return slow_copyfile(from, to); + return slow_copyfile(from, to, mode); fromfd = open(from, O_RDONLY); if (fromfd < 0) goto out; - tofd = creat(to, 0755); + tofd = creat(to, mode); if (tofd < 0) goto out_close_from; @@ -115,6 +131,11 @@ out: return err; } +int copyfile(const char *from, const char *to) +{ + return copyfile_mode(from, to, 0755); +} + unsigned long convert_unit(unsigned long value, char *unit) { *unit = ' '; @@ -137,21 +158,42 @@ unsigned long convert_unit(unsigned long value, char *unit) return value; } -int readn(int fd, void *buf, size_t n) +static ssize_t ion(bool is_read, int fd, void *buf, size_t n) { void *buf_start = buf; + size_t left = n; - while (n) { - int ret = read(fd, buf, n); + while (left) { + ssize_t ret = is_read ? read(fd, buf, left) : + write(fd, buf, left); + if (ret < 0 && errno == EINTR) + continue; if (ret <= 0) return ret; - n -= ret; - buf += ret; + left -= ret; + buf += ret; } - return buf - buf_start; + BUG_ON((size_t)(buf - buf_start) != n); + return n; +} + +/* + * Read exactly 'n' bytes or return an error. + */ +ssize_t readn(int fd, void *buf, size_t n) +{ + return ion(true, fd, buf, n); +} + +/* + * Write exactly 'n' bytes or return an error. + */ +ssize_t writen(int fd, void *buf, size_t n) +{ + return ion(false, fd, buf, n); } size_t hex_width(u64 v) @@ -164,8 +206,41 @@ size_t hex_width(u64 v) return n; } +static int hex(char ch) +{ + if ((ch >= '0') && (ch <= '9')) + return ch - '0'; + if ((ch >= 'a') && (ch <= 'f')) + return ch - 'a' + 10; + if ((ch >= 'A') && (ch <= 'F')) + return ch - 'A' + 10; + return -1; +} + +/* + * While we find nice hex chars, build a long_val. + * Return number of chars processed. + */ +int hex2u64(const char *ptr, u64 *long_val) +{ + const char *p = ptr; + *long_val = 0; + + while (*p) { + const int hex_val = hex(*p); + + if (hex_val < 0) + break; + + *long_val = (*long_val << 4) | hex_val; + p++; + } + + return p - ptr; +} + /* Obtain a backtrace and print it to stdout. */ -#ifdef BACKTRACE_SUPPORT +#ifdef HAVE_BACKTRACE_SUPPORT void dump_stack(void) { void *array[16]; @@ -183,3 +258,285 @@ void dump_stack(void) #else void dump_stack(void) {} #endif + +void get_term_dimensions(struct winsize *ws) +{ + char *s = getenv("LINES"); + + if (s != NULL) { + ws->ws_row = atoi(s); + s = getenv("COLUMNS"); + if (s != NULL) { + ws->ws_col = atoi(s); + if (ws->ws_row && ws->ws_col) + return; + } + } +#ifdef TIOCGWINSZ + if (ioctl(1, TIOCGWINSZ, ws) == 0 && + ws->ws_row && ws->ws_col) + return; +#endif + ws->ws_row = 25; + ws->ws_col = 80; +} + +static void set_tracing_events_path(const char *mountpoint) +{ + snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s", + mountpoint, "tracing/events"); +} + +const char *perf_debugfs_mount(const char *mountpoint) +{ + const char *mnt; + + mnt = debugfs_mount(mountpoint); + if (!mnt) + return NULL; + + set_tracing_events_path(mnt); + + return mnt; +} + +void perf_debugfs_set_path(const char *mntpt) +{ + snprintf(debugfs_mountpoint, strlen(debugfs_mountpoint), "%s", mntpt); + set_tracing_events_path(mntpt); +} + +static const char *find_debugfs(void) +{ + const char *path = perf_debugfs_mount(NULL); + + if (!path) + fprintf(stderr, "Your kernel does not support the debugfs filesystem"); + + return path; +} + +/* + * Finds the path to the debugfs/tracing + * Allocates the string and stores it. + */ +const char *find_tracing_dir(void) +{ + static char *tracing; + static int tracing_found; + const char *debugfs; + + if (tracing_found) + return tracing; + + debugfs = find_debugfs(); + if (!debugfs) + return NULL; + + tracing = malloc(strlen(debugfs) + 9); + if (!tracing) + return NULL; + + sprintf(tracing, "%s/tracing", debugfs); + + tracing_found = 1; + return tracing; +} + +char *get_tracing_file(const char *name) +{ + const char *tracing; + char *file; + + tracing = find_tracing_dir(); + if (!tracing) + return NULL; + + file = malloc(strlen(tracing) + strlen(name) + 2); + if (!file) + return NULL; + + sprintf(file, "%s/%s", tracing, name); + return file; +} + +void put_tracing_file(char *file) +{ + free(file); +} + +int parse_nsec_time(const char *str, u64 *ptime) +{ + u64 time_sec, time_nsec; + char *end; + + time_sec = strtoul(str, &end, 10); + if (*end != '.' && *end != '\0') + return -1; + + if (*end == '.') { + int i; + char nsec_buf[10]; + + if (strlen(++end) > 9) + return -1; + + strncpy(nsec_buf, end, 9); + nsec_buf[9] = '\0'; + + /* make it nsec precision */ + for (i = strlen(nsec_buf); i < 9; i++) + nsec_buf[i] = '0'; + + time_nsec = strtoul(nsec_buf, &end, 10); + if (*end != '\0') + return -1; + } else + time_nsec = 0; + + *ptime = time_sec * NSEC_PER_SEC + time_nsec; + return 0; +} + +unsigned long parse_tag_value(const char *str, struct parse_tag *tags) +{ + struct parse_tag *i = tags; + + while (i->tag) { + char *s; + + s = strchr(str, i->tag); + if (s) { + unsigned long int value; + char *endptr; + + value = strtoul(str, &endptr, 10); + if (s != endptr) + break; + + if (value > ULONG_MAX / i->mult) + break; + value *= i->mult; + return value; + } + i++; + } + + return (unsigned long) -1; +} + +int filename__read_int(const char *filename, int *value) +{ + char line[64]; + int fd = open(filename, O_RDONLY), err = -1; + + if (fd < 0) + return -1; + + if (read(fd, line, sizeof(line)) > 0) { + *value = atoi(line); + err = 0; + } + + close(fd); + return err; +} + +int filename__read_str(const char *filename, char **buf, size_t *sizep) +{ + size_t size = 0, alloc_size = 0; + void *bf = NULL, *nbf; + int fd, n, err = 0; + + fd = open(filename, O_RDONLY); + if (fd < 0) + return -errno; + + do { + if (size == alloc_size) { + alloc_size += BUFSIZ; + nbf = realloc(bf, alloc_size); + if (!nbf) { + err = -ENOMEM; + break; + } + + bf = nbf; + } + + n = read(fd, bf + size, alloc_size - size); + if (n < 0) { + if (size) { + pr_warning("read failed %d: %s\n", + errno, strerror(errno)); + err = 0; + } else + err = -errno; + + break; + } + + size += n; + } while (n > 0); + + if (!err) { + *sizep = size; + *buf = bf; + } else + free(bf); + + close(fd); + return err; +} + +const char *get_filename_for_perf_kvm(void) +{ + const char *filename; + + if (perf_host && !perf_guest) + filename = strdup("perf.data.host"); + else if (!perf_host && perf_guest) + filename = strdup("perf.data.guest"); + else + filename = strdup("perf.data.kvm"); + + return filename; +} + +int perf_event_paranoid(void) +{ + char path[PATH_MAX]; + const char *procfs = procfs__mountpoint(); + int value; + + if (!procfs) + return INT_MAX; + + scnprintf(path, PATH_MAX, "%s/sys/kernel/perf_event_paranoid", procfs); + + if (filename__read_int(path, &value)) + return INT_MAX; + + return value; +} + +void mem_bswap_32(void *src, int byte_size) +{ + u32 *m = src; + while (byte_size > 0) { + *m = bswap_32(*m); + byte_size -= sizeof(u32); + ++m; + } +} + +void mem_bswap_64(void *src, int byte_size) +{ + u64 *m = src; + + while (byte_size > 0) { + *m = bswap_64(*m); + byte_size -= sizeof(u64); + ++m; + } +} diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index 70fa70b535b..66864364ccb 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -1,8 +1,6 @@ #ifndef GIT_COMPAT_UTIL_H #define GIT_COMPAT_UTIL_H -#define _FILE_OFFSET_BITS 64 - #ifndef FLEX_ARRAY /* * See if our compiler is known to support flexible array members. @@ -71,12 +69,21 @@ #include <sys/ioctl.h> #include <inttypes.h> #include <linux/magic.h> -#include "types.h" +#include <linux/types.h> #include <sys/ttydefaults.h> +#include <api/fs/debugfs.h> +#include <termios.h> +#include <linux/bitops.h> extern const char *graph_line; extern const char *graph_dotted_line; extern char buildid_dir[]; +extern char tracing_events_path[]; +extern void perf_debugfs_set_path(const char *mountpoint); +const char *perf_debugfs_mount(const char *mountpoint); +const char *find_tracing_dir(void); +char *get_tracing_file(const char *name); +void put_tracing_file(char *file); /* On most systems <limits.h> would have given us this, but * not on some systems (e.g. GNU/Hurd). @@ -122,6 +129,8 @@ extern char buildid_dir[]; #endif #endif +#define PERF_GTK_DSO "libperf-gtk.so" + /* General helper functions */ extern void usage(const char *err) NORETURN; extern void die(const char *err, ...) NORETURN __attribute__((format (printf, 1, 2))); @@ -177,6 +186,8 @@ static inline void *zalloc(size_t size) return calloc(1, size); } +#define zfree(ptr) ({ free(*ptr); *ptr = NULL; }) + static inline int has_extension(const char *filename, const char *ext) { size_t len = strlen(filename); @@ -198,6 +209,12 @@ static inline int has_extension(const char *filename, const char *ext) #undef tolower #undef toupper +#ifndef NSEC_PER_MSEC +#define NSEC_PER_MSEC 1000000L +#endif + +int parse_nsec_time(const char *str, u64 *ptime); + extern unsigned char sane_ctype[256]; #define GIT_SPACE 0x01 #define GIT_DIGIT 0x02 @@ -215,8 +232,8 @@ extern unsigned char sane_ctype[256]; #define isalpha(x) sane_istest(x,GIT_ALPHA) #define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT) #define isprint(x) sane_istest(x,GIT_PRINT) -#define islower(x) (sane_istest(x,GIT_ALPHA) && sane_istest(x,0x20)) -#define isupper(x) (sane_istest(x,GIT_ALPHA) && !sane_istest(x,0x20)) +#define islower(x) (sane_istest(x,GIT_ALPHA) && (x & 0x20)) +#define isupper(x) (sane_istest(x,GIT_ALPHA) && !(x & 0x20)) #define tolower(x) sane_case((unsigned char)(x), 0x20) #define toupper(x) sane_case((unsigned char)(x), 0) @@ -229,6 +246,7 @@ static inline int sane_case(int x, int high) int mkdir_p(char *path, mode_t mode); int copyfile(const char *from, const char *to); +int copyfile_mode(const char *from, const char *to, mode_t mode); s64 perf_atoll(const char *str); char **argv_split(const char *str, int *argcp); @@ -236,8 +254,10 @@ void argv_free(char **argv); bool strglobmatch(const char *str, const char *pat); bool strlazymatch(const char *str, const char *pat); int strtailcmp(const char *s1, const char *s2); +char *strxfrchar(char *s, char from, char to); unsigned long convert_unit(unsigned long value, char *unit); -int readn(int fd, void *buf, size_t size); +ssize_t readn(int fd, void *buf, size_t n); +ssize_t writen(int fd, void *buf, size_t n); struct perf_event_attr; @@ -257,10 +277,57 @@ bool is_power_of_2(unsigned long n) return (n != 0 && ((n & (n - 1)) == 0)); } +static inline unsigned next_pow2(unsigned x) +{ + if (!x) + return 1; + return 1ULL << (32 - __builtin_clz(x - 1)); +} + +static inline unsigned long next_pow2_l(unsigned long x) +{ +#if BITS_PER_LONG == 64 + if (x <= (1UL << 31)) + return next_pow2(x); + return (unsigned long)next_pow2(x >> 32) << 32; +#else + return next_pow2(x); +#endif +} + size_t hex_width(u64 v); +int hex2u64(const char *ptr, u64 *val); +char *ltrim(char *s); char *rtrim(char *s); void dump_stack(void); -#endif +extern unsigned int page_size; +extern int cacheline_size; + +void get_term_dimensions(struct winsize *ws); + +struct parse_tag { + char tag; + int mult; +}; + +unsigned long parse_tag_value(const char *str, struct parse_tag *tags); + +#define SRCLINE_UNKNOWN ((char *) "??:0") + +struct dso; + +char *get_srcline(struct dso *dso, unsigned long addr); +void free_srcline(char *srcline); + +int filename__read_int(const char *filename, int *value); +int filename__read_str(const char *filename, char **buf, size_t *sizep); +int perf_event_paranoid(void); + +void mem_bswap_64(void *src, int byte_size); +void mem_bswap_32(void *src, int byte_size); + +const char *get_filename_for_perf_kvm(void); +#endif /* GIT_COMPAT_UTIL_H */ diff --git a/tools/perf/util/values.c b/tools/perf/util/values.c index 697c8b4e59c..0fb3c1fcd3e 100644 --- a/tools/perf/util/values.c +++ b/tools/perf/util/values.c @@ -31,14 +31,14 @@ void perf_read_values_destroy(struct perf_read_values *values) return; for (i = 0; i < values->threads; i++) - free(values->value[i]); - free(values->value); - free(values->pid); - free(values->tid); - free(values->counterrawid); + zfree(&values->value[i]); + zfree(&values->value); + zfree(&values->pid); + zfree(&values->tid); + zfree(&values->counterrawid); for (i = 0; i < values->counters; i++) - free(values->countername[i]); - free(values->countername); + zfree(&values->countername[i]); + zfree(&values->countername); } static void perf_read_values__enlarge_threads(struct perf_read_values *values) diff --git a/tools/perf/util/values.h b/tools/perf/util/values.h index 2fa967e1a88..b21a80c6cf8 100644 --- a/tools/perf/util/values.h +++ b/tools/perf/util/values.h @@ -1,7 +1,7 @@ #ifndef __PERF_VALUES_H #define __PERF_VALUES_H -#include "types.h" +#include <linux/types.h> struct perf_read_values { int threads; diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c index e60951fcdb1..0ddb3b8a89e 100644 --- a/tools/perf/util/vdso.c +++ b/tools/perf/util/vdso.c @@ -91,7 +91,7 @@ void vdso__exit(void) struct dso *vdso__dso_findnew(struct list_head *head) { - struct dso *dso = dsos__find(head, VDSO__MAP_NAME); + struct dso *dso = dsos__find(head, VDSO__MAP_NAME, true); if (!dso) { char *file; @@ -103,7 +103,7 @@ struct dso *vdso__dso_findnew(struct list_head *head) dso = dso__new(VDSO__MAP_NAME); if (dso != NULL) { dsos__add(head, dso); - dso__set_long_name(dso, file); + dso__set_long_name(dso, file, false); } } diff --git a/tools/power/acpi/Makefile b/tools/power/acpi/Makefile index 6b9cf7a987c..e5a3c4be2a1 100644 --- a/tools/power/acpi/Makefile +++ b/tools/power/acpi/Makefile @@ -1,18 +1,156 @@ -PROG= acpidump -SRCS= acpidump.c +# tools/power/acpi/Makefile - ACPI tool Makefile +# +# Copyright (c) 2013, Intel Corporation +# Author: Lv Zheng <lv.zheng@intel.com> +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; version 2 +# of the License. + +OUTPUT=./ +ifeq ("$(origin O)", "command line") + OUTPUT := $(O)/ +endif + +ifneq ($(OUTPUT),) +# check that the output directory actually exists +OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd) +$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist)) +endif + +SUBDIRS = tools/ec + +# --- CONFIGURATION BEGIN --- + +# Set the following to `true' to make a unstripped, unoptimized +# binary. Leave this set to `false' for production use. +DEBUG ?= true + +# make the build silent. Set this to something else to make it noisy again. +V ?= false + +# Prefix to the directories we're installing to +DESTDIR ?= + +# --- CONFIGURATION END --- + +# Directory definitions. These are default and most probably +# do not need to be changed. Please note that DESTDIR is +# added in front of any of them + +bindir ?= /usr/bin +sbindir ?= /usr/sbin +mandir ?= /usr/man + +# Toolchain: what tools do we use, and what options do they need: + +INSTALL = /usr/bin/install -c +INSTALL_PROGRAM = ${INSTALL} +INSTALL_DATA = ${INSTALL} -m 644 +INSTALL_SCRIPT = ${INSTALL_PROGRAM} + +# If you are running a cross compiler, you may want to set this +# to something more interesting, like "arm-linux-". If you want +# to compile vs uClibc, that can be done here as well. +CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc- +CC = $(CROSS)gcc +LD = $(CROSS)gcc +STRIP = $(CROSS)strip +HOSTCC = gcc + +# check if compiler option is supported +cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;} + +# use '-Os' optimization if available, else use -O2 +OPTIMIZATION := $(call cc-supports,-Os,-O2) + +WARNINGS := -Wall +WARNINGS += $(call cc-supports,-Wstrict-prototypes) +WARNINGS += $(call cc-supports,-Wdeclaration-after-statement) + KERNEL_INCLUDE := ../../../include -CFLAGS += -Wall -Wstrict-prototypes -Wdeclaration-after-statement -Os -s -D_LINUX -DDEFINE_ALTERNATE_TYPES -I$(KERNEL_INCLUDE) +ACPICA_INCLUDE := ../../../drivers/acpi/acpica +CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE) +CFLAGS += $(WARNINGS) + +ifeq ($(strip $(V)),false) + QUIET=@ + ECHO=@echo +else + QUIET= + ECHO=@\# +endif +export QUIET ECHO + +# if DEBUG is enabled, then we do not strip or optimize +ifeq ($(strip $(DEBUG)),true) + CFLAGS += -O1 -g -DDEBUG + STRIPCMD = /bin/true -Since_we_are_debugging +else + CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer + STRIPCMD = $(STRIP) -s --remove-section=.note --remove-section=.comment +endif + +# --- ACPIDUMP BEGIN --- + +vpath %.c \ + ../../../drivers/acpi/acpica\ + tools/acpidump\ + common\ + os_specific/service_layers + +CFLAGS += -DACPI_DUMP_APP -Itools/acpidump + +DUMP_OBJS = \ + apdump.o\ + apfiles.o\ + apmain.o\ + osunixdir.o\ + osunixmap.o\ + tbprint.o\ + tbxfroot.o\ + utbuffer.o\ + utexcep.o\ + utmath.o\ + utstring.o\ + utxferror.o\ + oslinuxtbl.o\ + cmfsize.o\ + getopt.o + +DUMP_OBJS := $(addprefix $(OUTPUT)tools/acpidump/,$(DUMP_OBJS)) + +$(OUTPUT)acpidump: $(DUMP_OBJS) + $(ECHO) " LD " $@ + $(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $(DUMP_OBJS) -L$(OUTPUT) -o $@ + $(QUIET) $(STRIPCMD) $@ + +$(OUTPUT)tools/acpidump/%.o: %.c + $(ECHO) " CC " $@ + $(QUIET) $(CC) -c $(CFLAGS) -o $@ $< + +# --- ACPIDUMP END --- + +all: $(OUTPUT)acpidump + echo $(OUTPUT) + +clean: + -find $(OUTPUT) \( -not -type d \) -and \( -name '*~' -o -name '*.[oas]' \) -type f -print \ + | xargs rm -f + -rm -f $(OUTPUT)acpidump -all: acpidump -$(PROG) : $(SRCS) - $(CC) $(CFLAGS) $(SRCS) -o $(PROG) +install-tools: + $(INSTALL) -d $(DESTDIR)${sbindir} + $(INSTALL_PROGRAM) $(OUTPUT)acpidump $(DESTDIR)${sbindir} -CLEANFILES= $(PROG) +install-man: + $(INSTALL_DATA) -D man/acpidump.8 $(DESTDIR)${mandir}/man8/acpidump.8 -clean : - rm -f $(CLEANFILES) $(patsubst %.c,%.o, $(SRCS)) *~ +install: all install-tools install-man -install : - install acpidump /usr/bin/acpidump - install acpidump.8 /usr/share/man/man8 +uninstall: + - rm -f $(DESTDIR)${sbindir}/acpidump + - rm -f $(DESTDIR)${mandir}/man8/acpidump.8 +.PHONY: all utils install-tools install-man install uninstall clean diff --git a/tools/power/acpi/acpidump.8 b/tools/power/acpi/acpidump.8 deleted file mode 100644 index adfa99166e5..00000000000 --- a/tools/power/acpi/acpidump.8 +++ /dev/null @@ -1,59 +0,0 @@ -.TH ACPIDUMP 8 -.SH NAME -acpidump \- Dump system's ACPI tables to an ASCII file. -.SH SYNOPSIS -.ft B -.B acpidump > acpidump.out -.SH DESCRIPTION -\fBacpidump \fP dumps the systems ACPI tables to an ASCII file -appropriate for attaching to a bug report. - -Subsequently, they can be processed by utilities in the ACPICA package. -.SS Options -no options worth worrying about. -.PP -.SH EXAMPLE - -.nf -# acpidump > acpidump.out - -$ acpixtract -a acpidump.out - Acpi table [DSDT] - 15974 bytes written to DSDT.dat - Acpi table [FACS] - 64 bytes written to FACS.dat - Acpi table [FACP] - 116 bytes written to FACP.dat - Acpi table [APIC] - 120 bytes written to APIC.dat - Acpi table [MCFG] - 60 bytes written to MCFG.dat - Acpi table [SSDT] - 444 bytes written to SSDT1.dat - Acpi table [SSDT] - 439 bytes written to SSDT2.dat - Acpi table [SSDT] - 439 bytes written to SSDT3.dat - Acpi table [SSDT] - 439 bytes written to SSDT4.dat - Acpi table [SSDT] - 439 bytes written to SSDT5.dat - Acpi table [RSDT] - 76 bytes written to RSDT.dat - Acpi table [RSDP] - 20 bytes written to RSDP.dat - -$ iasl -d *.dat -... -.fi -creates *.dsl, a human readable form which can be edited -and compiled using iasl. - - -.SH NOTES - -.B "acpidump " -must be run as root. - -.SH REFERENCES -ACPICA: https://acpica.org/ - -.SH FILES -.ta -.nf -/dev/mem -/sys/firmware/acpi/tables/dynamic/* -.fi - -.PP -.SH AUTHOR -.nf -Written by Len Brown <len.brown@intel.com> diff --git a/tools/power/acpi/acpidump.c b/tools/power/acpi/acpidump.c deleted file mode 100644 index a84553a0e0d..00000000000 --- a/tools/power/acpi/acpidump.c +++ /dev/null @@ -1,559 +0,0 @@ -/* - * (c) Alexey Starikovskiy, Intel, 2005-2006. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions, and the following disclaimer, - * without modification. - * 2. Redistributions in binary form must reproduce at minimum a disclaimer - * substantially similar to the "NO WARRANTY" disclaimer below - * ("Disclaimer") and any redistribution must be conditioned upon - * including a substantially similar Disclaimer requirement for further - * binary redistribution. - * 3. Neither the names of the above-listed copyright holders nor the names - * of any contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * NO WARRANTY - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING - * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGES. - */ - -#ifdef DEFINE_ALTERNATE_TYPES -/* hack to enable building old application with new headers -lenb */ -#define acpi_fadt_descriptor acpi_table_fadt -#define acpi_rsdp_descriptor acpi_table_rsdp -#define DSDT_SIG ACPI_SIG_DSDT -#define FACS_SIG ACPI_SIG_FACS -#define FADT_SIG ACPI_SIG_FADT -#define xfirmware_ctrl Xfacs -#define firmware_ctrl facs - -typedef int s32; -typedef unsigned char u8; -typedef unsigned short u16; -typedef unsigned int u32; -typedef unsigned long long u64; -typedef long long s64; -#endif - -#include <sys/mman.h> -#include <sys/types.h> -#include <sys/stat.h> -#include <fcntl.h> -#include <stdio.h> -#include <string.h> -#include <unistd.h> -#include <getopt.h> - -#include <dirent.h> - -#include <acpi/acconfig.h> -#include <acpi/platform/acenv.h> -#include <acpi/actypes.h> -#include <acpi/actbl.h> - -static inline u8 checksum(u8 * buffer, u32 length) -{ - u8 sum = 0, *i = buffer; - buffer += length; - for (; i < buffer; sum += *(i++)); - return sum; -} - -static unsigned long psz, addr, length; -static int print, connect, skip; -static u8 select_sig[4]; - -static unsigned long read_efi_systab( void ) -{ - char buffer[80]; - unsigned long addr; - FILE *f = fopen("/sys/firmware/efi/systab", "r"); - if (f) { - while (fgets(buffer, 80, f)) { - if (sscanf(buffer, "ACPI20=0x%lx", &addr) == 1) - return addr; - } - fclose(f); - } - return 0; -} - -static u8 *acpi_map_memory(unsigned long where, unsigned length) -{ - unsigned long offset; - u8 *there; - int fd = open("/dev/mem", O_RDONLY); - if (fd < 0) { - fprintf(stderr, "acpi_os_map_memory: cannot open /dev/mem\n"); - exit(1); - } - offset = where % psz; - there = mmap(NULL, length + offset, PROT_READ, MAP_PRIVATE, - fd, where - offset); - close(fd); - if (there == MAP_FAILED) return 0; - return (there + offset); -} - -static void acpi_unmap_memory(u8 * there, unsigned length) -{ - unsigned long offset = (unsigned long)there % psz; - munmap(there - offset, length + offset); -} - -static struct acpi_table_header *acpi_map_table(unsigned long where, char *sig) -{ - unsigned size; - struct acpi_table_header *tbl = (struct acpi_table_header *) - acpi_map_memory(where, sizeof(struct acpi_table_header)); - if (!tbl || (sig && memcmp(sig, tbl->signature, 4))) return 0; - size = tbl->length; - acpi_unmap_memory((u8 *) tbl, sizeof(struct acpi_table_header)); - return (struct acpi_table_header *)acpi_map_memory(where, size); -} - -static void acpi_unmap_table(struct acpi_table_header *tbl) -{ - acpi_unmap_memory((u8 *)tbl, tbl->length); -} - -static struct acpi_rsdp_descriptor *acpi_scan_for_rsdp(u8 *begin, u32 length) -{ - struct acpi_rsdp_descriptor *rsdp; - u8 *i, *end = begin + length; - /* Search from given start address for the requested length */ - for (i = begin; i < end; i += ACPI_RSDP_SCAN_STEP) { - /* The signature and checksum must both be correct */ - if (memcmp((char *)i, "RSD PTR ", 8)) continue; - rsdp = (struct acpi_rsdp_descriptor *)i; - /* Signature matches, check the appropriate checksum */ - if (!checksum((u8 *) rsdp, (rsdp->revision < 2) ? - ACPI_RSDP_CHECKSUM_LENGTH : - ACPI_RSDP_XCHECKSUM_LENGTH)) - /* Checksum valid, we have found a valid RSDP */ - return rsdp; - } - /* Searched entire block, no RSDP was found */ - return 0; -} - -/* - * Output data - */ -static void acpi_show_data(int fd, u8 * data, int size) -{ - char buffer[256]; - int len; - int i, remain = size; - while (remain > 0) { - len = snprintf(buffer, 256, " %04x:", size - remain); - for (i = 0; i < 16 && i < remain; i++) { - len += - snprintf(&buffer[len], 256 - len, " %02x", data[i]); - } - for (; i < 16; i++) { - len += snprintf(&buffer[len], 256 - len, " "); - } - len += snprintf(&buffer[len], 256 - len, " "); - for (i = 0; i < 16 && i < remain; i++) { - buffer[len++] = (isprint(data[i])) ? data[i] : '.'; - } - buffer[len++] = '\n'; - write(fd, buffer, len); - data += 16; - remain -= 16; - } -} - -/* - * Output ACPI table - */ -static void acpi_show_table(int fd, struct acpi_table_header *table, unsigned long addr) -{ - char buff[80]; - int len = snprintf(buff, 80, "%.4s @ %p\n", table->signature, (void *)addr); - write(fd, buff, len); - acpi_show_data(fd, (u8 *) table, table->length); - buff[0] = '\n'; - write(fd, buff, 1); -} - -static void write_table(int fd, struct acpi_table_header *tbl, unsigned long addr) -{ - static int select_done = 0; - if (!select_sig[0]) { - if (print) { - acpi_show_table(fd, tbl, addr); - } else { - write(fd, tbl, tbl->length); - } - } else if (!select_done && !memcmp(select_sig, tbl->signature, 4)) { - if (skip > 0) { - --skip; - return; - } - if (print) { - acpi_show_table(fd, tbl, addr); - } else { - write(fd, tbl, tbl->length); - } - select_done = 1; - } -} - -static void acpi_dump_FADT(int fd, struct acpi_table_header *tbl, unsigned long xaddr) { - struct acpi_fadt_descriptor x; - unsigned long addr; - size_t len = sizeof(struct acpi_fadt_descriptor); - if (len > tbl->length) len = tbl->length; - memcpy(&x, tbl, len); - x.header.length = len; - if (checksum((u8 *)tbl, len)) { - fprintf(stderr, "Wrong checksum for FADT!\n"); - } - if (x.header.length >= 148 && x.Xdsdt) { - addr = (unsigned long)x.Xdsdt; - if (connect) { - x.Xdsdt = lseek(fd, 0, SEEK_CUR); - } - } else if (x.header.length >= 44 && x.dsdt) { - addr = (unsigned long)x.dsdt; - if (connect) { - x.dsdt = lseek(fd, 0, SEEK_CUR); - } - } else { - fprintf(stderr, "No DSDT in FADT!\n"); - goto no_dsdt; - } - tbl = acpi_map_table(addr, DSDT_SIG); - if (!tbl) goto no_dsdt; - if (checksum((u8 *)tbl, tbl->length)) - fprintf(stderr, "Wrong checksum for DSDT!\n"); - write_table(fd, tbl, addr); - acpi_unmap_table(tbl); -no_dsdt: - if (x.header.length >= 140 && x.xfirmware_ctrl) { - addr = (unsigned long)x.xfirmware_ctrl; - if (connect) { - x.xfirmware_ctrl = lseek(fd, 0, SEEK_CUR); - } - } else if (x.header.length >= 40 && x.firmware_ctrl) { - addr = (unsigned long)x.firmware_ctrl; - if (connect) { - x.firmware_ctrl = lseek(fd, 0, SEEK_CUR); - } - } else { - fprintf(stderr, "No FACS in FADT!\n"); - goto no_facs; - } - tbl = acpi_map_table(addr, FACS_SIG); - if (!tbl) goto no_facs; - /* do not checksum FACS */ - write_table(fd, tbl, addr); - acpi_unmap_table(tbl); -no_facs: - write_table(fd, (struct acpi_table_header *)&x, xaddr); -} - -static int acpi_dump_SDT(int fd, struct acpi_rsdp_descriptor *rsdp) -{ - struct acpi_table_header *sdt, *tbl = 0; - int xsdt = 1, i, num; - char *offset; - unsigned long addr; - if (rsdp->revision > 1 && rsdp->xsdt_physical_address) { - tbl = acpi_map_table(rsdp->xsdt_physical_address, "XSDT"); - } - if (!tbl && rsdp->rsdt_physical_address) { - xsdt = 0; - tbl = acpi_map_table(rsdp->rsdt_physical_address, "RSDT"); - } - if (!tbl) return 0; - sdt = malloc(tbl->length); - memcpy(sdt, tbl, tbl->length); - acpi_unmap_table(tbl); - if (checksum((u8 *)sdt, sdt->length)) - fprintf(stderr, "Wrong checksum for %s!\n", (xsdt)?"XSDT":"RSDT"); - num = (sdt->length - sizeof(struct acpi_table_header))/((xsdt)?sizeof(u64):sizeof(u32)); - offset = (char *)sdt + sizeof(struct acpi_table_header); - for (i = 0; i < num; ++i, offset += ((xsdt) ? sizeof(u64) : sizeof(u32))) { - addr = (xsdt) ? (unsigned long)(*(u64 *)offset): - (unsigned long)(*(u32 *)offset); - if (!addr) continue; - tbl = acpi_map_table(addr, 0); - if (!tbl) continue; - if (!memcmp(tbl->signature, FADT_SIG, 4)) { - acpi_dump_FADT(fd, tbl, addr); - } else { - if (checksum((u8 *)tbl, tbl->length)) - fprintf(stderr, "Wrong checksum for generic table!\n"); - write_table(fd, tbl, addr); - } - acpi_unmap_table(tbl); - if (connect) { - if (xsdt) - (*(u64*)offset) = lseek(fd, 0, SEEK_CUR); - else - (*(u32*)offset) = lseek(fd, 0, SEEK_CUR); - } - } - if (xsdt) { - addr = (unsigned long)rsdp->xsdt_physical_address; - if (connect) { - rsdp->xsdt_physical_address = lseek(fd, 0, SEEK_CUR); - } - } else { - addr = (unsigned long)rsdp->rsdt_physical_address; - if (connect) { - rsdp->rsdt_physical_address = lseek(fd, 0, SEEK_CUR); - } - } - write_table(fd, sdt, addr); - free (sdt); - return 1; -} - -#define DYNAMIC_SSDT "/sys/firmware/acpi/tables/dynamic" - -static void acpi_dump_dynamic_SSDT(int fd) -{ - struct stat file_stat; - char filename[256], *ptr; - DIR *tabledir; - struct dirent *entry; - FILE *fp; - int count, readcount, length; - struct acpi_table_header table_header, *ptable; - - if (stat(DYNAMIC_SSDT, &file_stat) == -1) { - /* The directory doesn't exist */ - return; - } - tabledir = opendir(DYNAMIC_SSDT); - if(!tabledir){ - /*can't open the directory */ - return; - } - - while ((entry = readdir(tabledir)) != 0){ - /* skip the file of . /.. */ - if (entry->d_name[0] == '.') - continue; - - sprintf(filename, "%s/%s", DYNAMIC_SSDT, entry->d_name); - fp = fopen(filename, "r"); - if (fp == NULL) { - fprintf(stderr, "Can't open the file of %s\n", - filename); - continue; - } - /* Read the Table header to parse the table length */ - count = fread(&table_header, 1, sizeof(struct acpi_table_header), fp); - if (count < sizeof(table_header)) { - /* the length is lessn than ACPI table header. skip it */ - fclose(fp); - continue; - } - length = table_header.length; - ptr = malloc(table_header.length); - fseek(fp, 0, SEEK_SET); - readcount = 0; - while(!feof(fp) && readcount < length) { - count = fread(ptr + readcount, 1, 256, fp); - readcount += count; - } - fclose(fp); - ptable = (struct acpi_table_header *) ptr; - if (checksum((u8 *) ptable, ptable->length)) - fprintf(stderr, "Wrong checksum " - "for dynamic SSDT table!\n"); - write_table(fd, ptable, 0); - free(ptr); - } - closedir(tabledir); - return; -} - -static void usage(const char *progname) -{ - puts("Usage:"); - printf("%s [--addr 0x1234][--table DSDT][--output filename]" - "[--binary][--length 0x456][--help]\n", progname); - puts("\t--addr 0x1234 or -a 0x1234 -- look for tables at this physical address"); - puts("\t--table DSDT or -t DSDT -- only dump table with DSDT signature"); - puts("\t--output filename or -o filename -- redirect output from stdin to filename"); - puts("\t--binary or -b -- dump data in binary form rather than in hex-dump format"); - puts("\t--length 0x456 or -l 0x456 -- works only with --addr, dump physical memory" - "\n\t\tregion without trying to understand it's contents"); - puts("\t--skip 2 or -s 2 -- skip 2 tables of the given name and output only 3rd one"); - puts("\t--help or -h -- this help message"); - exit(0); -} - -static struct option long_options[] = { - {"addr", 1, 0, 0}, - {"table", 1, 0, 0}, - {"output", 1, 0, 0}, - {"binary", 0, 0, 0}, - {"length", 1, 0, 0}, - {"skip", 1, 0, 0}, - {"help", 0, 0, 0}, - {0, 0, 0, 0} -}; -int main(int argc, char **argv) -{ - int option_index, c, fd; - u8 *raw; - struct acpi_rsdp_descriptor rsdpx, *x = 0; - char *filename = 0; - char buff[80]; - memset(select_sig, 0, 4); - print = 1; - connect = 0; - addr = length = 0; - skip = 0; - while (1) { - option_index = 0; - c = getopt_long(argc, argv, "a:t:o:bl:s:h", - long_options, &option_index); - if (c == -1) - break; - - switch (c) { - case 0: - switch (option_index) { - case 0: - addr = strtoul(optarg, (char **)NULL, 16); - break; - case 1: - memcpy(select_sig, optarg, 4); - break; - case 2: - filename = optarg; - break; - case 3: - print = 0; - break; - case 4: - length = strtoul(optarg, (char **)NULL, 16); - break; - case 5: - skip = strtoul(optarg, (char **)NULL, 10); - break; - case 6: - usage(argv[0]); - exit(0); - } - break; - case 'a': - addr = strtoul(optarg, (char **)NULL, 16); - break; - case 't': - memcpy(select_sig, optarg, 4); - break; - case 'o': - filename = optarg; - break; - case 'b': - print = 0; - break; - case 'l': - length = strtoul(optarg, (char **)NULL, 16); - break; - case 's': - skip = strtoul(optarg, (char **)NULL, 10); - break; - case 'h': - usage(argv[0]); - exit(0); - default: - printf("Unknown option!\n"); - usage(argv[0]); - exit(0); - } - } - - fd = STDOUT_FILENO; - if (filename) { - fd = creat(filename, S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH); - if (fd < 0) - return fd; - } - - if (!select_sig[0] && !print) { - connect = 1; - } - - psz = sysconf(_SC_PAGESIZE); - if (length && addr) { - /* We know length and address, it means we just want a memory dump */ - if (!(raw = acpi_map_memory(addr, length))) - goto not_found; - write(fd, raw, length); - acpi_unmap_memory(raw, length); - close(fd); - return 0; - } - - length = sizeof(struct acpi_rsdp_descriptor); - if (!addr) { - addr = read_efi_systab(); - if (!addr) { - addr = ACPI_HI_RSDP_WINDOW_BASE; - length = ACPI_HI_RSDP_WINDOW_SIZE; - } - } - - if (!(raw = acpi_map_memory(addr, length)) || - !(x = acpi_scan_for_rsdp(raw, length))) - goto not_found; - - /* Find RSDP and print all found tables */ - memcpy(&rsdpx, x, sizeof(struct acpi_rsdp_descriptor)); - acpi_unmap_memory(raw, length); - if (connect) { - lseek(fd, sizeof(struct acpi_rsdp_descriptor), SEEK_SET); - } - if (!acpi_dump_SDT(fd, &rsdpx)) - goto not_found; - if (connect) { - lseek(fd, 0, SEEK_SET); - write(fd, x, (rsdpx.revision < 2) ? - ACPI_RSDP_CHECKSUM_LENGTH : ACPI_RSDP_XCHECKSUM_LENGTH); - } else if (!select_sig[0] || !memcmp("RSD PTR ", select_sig, 4)) { - addr += (long)x - (long)raw; - length = snprintf(buff, 80, "RSD PTR @ %p\n", (void *)addr); - write(fd, buff, length); - acpi_show_data(fd, (u8 *) & rsdpx, (rsdpx.revision < 2) ? - ACPI_RSDP_CHECKSUM_LENGTH : ACPI_RSDP_XCHECKSUM_LENGTH); - buff[0] = '\n'; - write(fd, buff, 1); - } - acpi_dump_dynamic_SSDT(fd); - close(fd); - return 0; -not_found: - close(fd); - fprintf(stderr, "ACPI tables were not found. If you know location " - "of RSD PTR table (from dmesg, etc), " - "supply it with either --addr or -a option\n"); - return 1; -} diff --git a/tools/power/acpi/common/cmfsize.c b/tools/power/acpi/common/cmfsize.c new file mode 100644 index 00000000000..5140e5edae1 --- /dev/null +++ b/tools/power/acpi/common/cmfsize.c @@ -0,0 +1,101 @@ +/****************************************************************************** + * + * Module Name: cfsize - Common get file size function + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2014, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include <acpi/acpi.h> +#include "accommon.h" +#include "acapps.h" +#include <stdio.h> + +#define _COMPONENT ACPI_TOOLS +ACPI_MODULE_NAME("cmfsize") + +/******************************************************************************* + * + * FUNCTION: cm_get_file_size + * + * PARAMETERS: file - Open file descriptor + * + * RETURN: File Size. On error, -1 (ACPI_UINT32_MAX) + * + * DESCRIPTION: Get the size of a file. Uses seek-to-EOF. File must be open. + * Does not disturb the current file pointer. Uses perror for + * error messages. + * + ******************************************************************************/ +u32 cm_get_file_size(FILE * file) +{ + long file_size; + long current_offset; + + /* Save the current file pointer, seek to EOF to obtain file size */ + + current_offset = ftell(file); + if (current_offset < 0) { + goto offset_error; + } + + if (fseek(file, 0, SEEK_END)) { + goto seek_error; + } + + file_size = ftell(file); + if (file_size < 0) { + goto offset_error; + } + + /* Restore original file pointer */ + + if (fseek(file, current_offset, SEEK_SET)) { + goto seek_error; + } + + return ((u32)file_size); + +offset_error: + perror("Could not get file offset"); + return (ACPI_UINT32_MAX); + +seek_error: + perror("Could not seek file"); + return (ACPI_UINT32_MAX); +} diff --git a/tools/power/acpi/common/getopt.c b/tools/power/acpi/common/getopt.c new file mode 100644 index 00000000000..a302f52e4fd --- /dev/null +++ b/tools/power/acpi/common/getopt.c @@ -0,0 +1,239 @@ +/****************************************************************************** + * + * Module Name: getopt + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2014, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +/* + * ACPICA getopt() implementation + * + * Option strings: + * "f" - Option has no arguments + * "f:" - Option requires an argument + * "f^" - Option has optional single-char sub-options + * "f|" - Option has required single-char sub-options + */ + +#include <stdio.h> +#include <string.h> +#include <acpi/acpi.h> +#include "accommon.h" +#include "acapps.h" + +#define ACPI_OPTION_ERROR(msg, badchar) \ + if (acpi_gbl_opterr) {fprintf (stderr, "%s%c\n", msg, badchar);} + +int acpi_gbl_opterr = 1; +int acpi_gbl_optind = 1; +int acpi_gbl_sub_opt_char = 0; +char *acpi_gbl_optarg; + +static int current_char_ptr = 1; + +/******************************************************************************* + * + * FUNCTION: acpi_getopt_argument + * + * PARAMETERS: argc, argv - from main + * + * RETURN: 0 if an argument was found, -1 otherwise. Sets acpi_gbl_Optarg + * to point to the next argument. + * + * DESCRIPTION: Get the next argument. Used to obtain arguments for the + * two-character options after the original call to acpi_getopt. + * Note: Either the argument starts at the next character after + * the option, or it is pointed to by the next argv entry. + * (After call to acpi_getopt, we need to backup to the previous + * argv entry). + * + ******************************************************************************/ + +int acpi_getopt_argument(int argc, char **argv) +{ + acpi_gbl_optind--; + current_char_ptr++; + + if (argv[acpi_gbl_optind][(int)(current_char_ptr + 1)] != '\0') { + acpi_gbl_optarg = + &argv[acpi_gbl_optind++][(int)(current_char_ptr + 1)]; + } else if (++acpi_gbl_optind >= argc) { + ACPI_OPTION_ERROR("Option requires an argument: -", 'v'); + + current_char_ptr = 1; + return (-1); + } else { + acpi_gbl_optarg = argv[acpi_gbl_optind++]; + } + + current_char_ptr = 1; + return (0); +} + +/******************************************************************************* + * + * FUNCTION: acpi_getopt + * + * PARAMETERS: argc, argv - from main + * opts - options info list + * + * RETURN: Option character or EOF + * + * DESCRIPTION: Get the next option + * + ******************************************************************************/ + +int acpi_getopt(int argc, char **argv, char *opts) +{ + int current_char; + char *opts_ptr; + + if (current_char_ptr == 1) { + if (acpi_gbl_optind >= argc || + argv[acpi_gbl_optind][0] != '-' || + argv[acpi_gbl_optind][1] == '\0') { + return (EOF); + } else if (strcmp(argv[acpi_gbl_optind], "--") == 0) { + acpi_gbl_optind++; + return (EOF); + } + } + + /* Get the option */ + + current_char = argv[acpi_gbl_optind][current_char_ptr]; + + /* Make sure that the option is legal */ + + if (current_char == ':' || + (opts_ptr = strchr(opts, current_char)) == NULL) { + ACPI_OPTION_ERROR("Illegal option: -", current_char); + + if (argv[acpi_gbl_optind][++current_char_ptr] == '\0') { + acpi_gbl_optind++; + current_char_ptr = 1; + } + + return ('?'); + } + + /* Option requires an argument? */ + + if (*++opts_ptr == ':') { + if (argv[acpi_gbl_optind][(int)(current_char_ptr + 1)] != '\0') { + acpi_gbl_optarg = + &argv[acpi_gbl_optind++][(int) + (current_char_ptr + 1)]; + } else if (++acpi_gbl_optind >= argc) { + ACPI_OPTION_ERROR("Option requires an argument: -", + current_char); + + current_char_ptr = 1; + return ('?'); + } else { + acpi_gbl_optarg = argv[acpi_gbl_optind++]; + } + + current_char_ptr = 1; + } + + /* Option has an optional argument? */ + + else if (*opts_ptr == '+') { + if (argv[acpi_gbl_optind][(int)(current_char_ptr + 1)] != '\0') { + acpi_gbl_optarg = + &argv[acpi_gbl_optind++][(int) + (current_char_ptr + 1)]; + } else if (++acpi_gbl_optind >= argc) { + acpi_gbl_optarg = NULL; + } else { + acpi_gbl_optarg = argv[acpi_gbl_optind++]; + } + + current_char_ptr = 1; + } + + /* Option has optional single-char arguments? */ + + else if (*opts_ptr == '^') { + if (argv[acpi_gbl_optind][(int)(current_char_ptr + 1)] != '\0') { + acpi_gbl_optarg = + &argv[acpi_gbl_optind][(int)(current_char_ptr + 1)]; + } else { + acpi_gbl_optarg = "^"; + } + + acpi_gbl_sub_opt_char = acpi_gbl_optarg[0]; + acpi_gbl_optind++; + current_char_ptr = 1; + } + + /* Option has a required single-char argument? */ + + else if (*opts_ptr == '|') { + if (argv[acpi_gbl_optind][(int)(current_char_ptr + 1)] != '\0') { + acpi_gbl_optarg = + &argv[acpi_gbl_optind][(int)(current_char_ptr + 1)]; + } else { + ACPI_OPTION_ERROR + ("Option requires a single-character suboption: -", + current_char); + + current_char_ptr = 1; + return ('?'); + } + + acpi_gbl_sub_opt_char = acpi_gbl_optarg[0]; + acpi_gbl_optind++; + current_char_ptr = 1; + } + + /* Option with no arguments */ + + else { + if (argv[acpi_gbl_optind][++current_char_ptr] == '\0') { + current_char_ptr = 1; + acpi_gbl_optind++; + } + + acpi_gbl_optarg = NULL; + } + + return (current_char); +} diff --git a/tools/power/acpi/man/acpidump.8 b/tools/power/acpi/man/acpidump.8 new file mode 100644 index 00000000000..38f095d86b5 --- /dev/null +++ b/tools/power/acpi/man/acpidump.8 @@ -0,0 +1,120 @@ +.TH ACPIDUMP 8 +.SH NAME +acpidump \- dump a system's ACPI tables to an ASCII file + +.SH SYNOPSIS +.B acpidump +.RI [ options ] +.br + +.SH DESCRIPTION +.B acpidump +dumps the systems ACPI tables to an ASCII file appropriate for +attaching to a bug report. + +Subsequently, they can be processed by utilities in the ACPICA package. + +.SH OPTIONS +acpidump options are as follow: +.TP +.B Options +.TP +.B \-b +Dump tables to binary files +.TP +.B \-c +Dump customized tables +.TP +.B \-h \-? +This help message +.TP +.B \-o <File> +Redirect output to file +.TP +.B \-r <Address> +Dump tables from specified RSDP +.TP +.B \-s +Print table summaries only +.TP +.B \-v +Display version information +.TP +.B \-z +Verbose mode +.TP +.B Table Options +.TP +.B \-a <Address> +Get table via a physical address +.TP +.B \-f <BinaryFile> +Get table via a binary file +.TP +.B \-n <Signature> +Get table via a name/signature +.TP +Invocation without parameters dumps all available tables +.TP +Multiple mixed instances of -a, -f, and -n are supported + +.SH EXAMPLES + +.nf +# acpidump > acpidump.out + +$ acpixtract -a acpidump.out + Acpi table [DSDT] - 15974 bytes written to DSDT.dat + Acpi table [FACS] - 64 bytes written to FACS.dat + Acpi table [FACP] - 116 bytes written to FACP.dat + Acpi table [APIC] - 120 bytes written to APIC.dat + Acpi table [MCFG] - 60 bytes written to MCFG.dat + Acpi table [SSDT] - 444 bytes written to SSDT1.dat + Acpi table [SSDT] - 439 bytes written to SSDT2.dat + Acpi table [SSDT] - 439 bytes written to SSDT3.dat + Acpi table [SSDT] - 439 bytes written to SSDT4.dat + Acpi table [SSDT] - 439 bytes written to SSDT5.dat + Acpi table [RSDT] - 76 bytes written to RSDT.dat + Acpi table [RSDP] - 20 bytes written to RSDP.dat + +$ iasl -d *.dat +... +.fi +creates *.dsl, a human readable form which can be edited +and compiled using iasl. + + +.SH NOTES + +.B "acpidump " +must be run as root. + +.SH REFERENCES +ACPICA: https://acpica.org/ + +.SH FILES +.ta +.nf +/dev/mem +/sys/firmware/acpi/tables/* +/sys/firmware/acpi/tables/dynamic/* +/sys/firmware/efi/systab +.fi + +.SH AUTHOR +.TP +Original by: + Len Brown <len.brown@intel.com> +.TP +Written by: + Chao Guan <chao.guan@intel.com> +.TP +Updated by: + Bob Moore <robert.moore@intel.com> + Lv Zheng <lv.zheng@intel.com> + +.SH SEE ALSO +\&\fIacpixtract\fR\|(8), \fIiasl\fR\|(8). + +.SH COPYRIGHT +COPYRIGHT (c) 2013, Intel Corporation. diff --git a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c new file mode 100644 index 00000000000..28c52008e85 --- /dev/null +++ b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c @@ -0,0 +1,1329 @@ +/****************************************************************************** + * + * Module Name: oslinuxtbl - Linux OSL for obtaining ACPI tables + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2014, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include "acpidump.h" + +#define _COMPONENT ACPI_OS_SERVICES +ACPI_MODULE_NAME("oslinuxtbl") + +#ifndef PATH_MAX +#define PATH_MAX 256 +#endif +/* List of information about obtained ACPI tables */ +typedef struct osl_table_info { + struct osl_table_info *next; + u32 instance; + char signature[ACPI_NAME_SIZE]; + +} osl_table_info; + +/* Local prototypes */ + +static acpi_status osl_table_initialize(void); + +static acpi_status +osl_table_name_from_file(char *filename, char *signature, u32 *instance); + +static acpi_status osl_add_table_to_list(char *signature, u32 instance); + +static acpi_status +osl_read_table_from_file(char *filename, + acpi_size file_offset, + char *signature, struct acpi_table_header **table); + +static acpi_status +osl_map_table(acpi_size address, + char *signature, struct acpi_table_header **table); + +static void osl_unmap_table(struct acpi_table_header *table); + +static acpi_physical_address osl_find_rsdp_via_efi(void); + +static acpi_status osl_load_rsdp(void); + +static acpi_status osl_list_customized_tables(char *directory); + +static acpi_status +osl_get_customized_table(char *pathname, + char *signature, + u32 instance, + struct acpi_table_header **table, + acpi_physical_address * address); + +static acpi_status osl_list_bios_tables(void); + +static acpi_status +osl_get_bios_table(char *signature, + u32 instance, + struct acpi_table_header **table, + acpi_physical_address * address); + +static acpi_status osl_get_last_status(acpi_status default_status); + +/* File locations */ + +#define DYNAMIC_TABLE_DIR "/sys/firmware/acpi/tables/dynamic" +#define STATIC_TABLE_DIR "/sys/firmware/acpi/tables" +#define EFI_SYSTAB "/sys/firmware/efi/systab" + +/* Should we get dynamically loaded SSDTs from DYNAMIC_TABLE_DIR? */ + +u8 gbl_dump_dynamic_tables = TRUE; + +/* Initialization flags */ + +u8 gbl_table_list_initialized = FALSE; + +/* Local copies of main ACPI tables */ + +struct acpi_table_rsdp gbl_rsdp; +struct acpi_table_fadt *gbl_fadt = NULL; +struct acpi_table_rsdt *gbl_rsdt = NULL; +struct acpi_table_xsdt *gbl_xsdt = NULL; + +/* Table addresses */ + +acpi_physical_address gbl_fadt_address = 0; +acpi_physical_address gbl_rsdp_address = 0; + +/* Revision of RSD PTR */ + +u8 gbl_revision = 0; + +struct osl_table_info *gbl_table_list_head = NULL; +u32 gbl_table_count = 0; + +/****************************************************************************** + * + * FUNCTION: osl_get_last_status + * + * PARAMETERS: default_status - Default error status to return + * + * RETURN: Status; Converted from errno. + * + * DESCRIPTION: Get last errno and conver it to acpi_status. + * + *****************************************************************************/ + +static acpi_status osl_get_last_status(acpi_status default_status) +{ + + switch (errno) { + case EACCES: + case EPERM: + + return (AE_ACCESS); + + case ENOENT: + + return (AE_NOT_FOUND); + + case ENOMEM: + + return (AE_NO_MEMORY); + + default: + + return (default_status); + } +} + +/****************************************************************************** + * + * FUNCTION: acpi_os_get_table_by_address + * + * PARAMETERS: address - Physical address of the ACPI table + * table - Where a pointer to the table is returned + * + * RETURN: Status; Table buffer is returned if AE_OK. + * AE_NOT_FOUND: A valid table was not found at the address + * + * DESCRIPTION: Get an ACPI table via a physical memory address. + * + *****************************************************************************/ + +acpi_status +acpi_os_get_table_by_address(acpi_physical_address address, + struct acpi_table_header ** table) +{ + u32 table_length; + struct acpi_table_header *mapped_table; + struct acpi_table_header *local_table = NULL; + acpi_status status = AE_OK; + + /* Get main ACPI tables from memory on first invocation of this function */ + + status = osl_table_initialize(); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* Map the table and validate it */ + + status = osl_map_table(address, NULL, &mapped_table); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* Copy table to local buffer and return it */ + + table_length = ap_get_table_length(mapped_table); + if (table_length == 0) { + status = AE_BAD_HEADER; + goto exit; + } + + local_table = calloc(1, table_length); + if (!local_table) { + status = AE_NO_MEMORY; + goto exit; + } + + ACPI_MEMCPY(local_table, mapped_table, table_length); + +exit: + osl_unmap_table(mapped_table); + *table = local_table; + return (status); +} + +/****************************************************************************** + * + * FUNCTION: acpi_os_get_table_by_name + * + * PARAMETERS: signature - ACPI Signature for desired table. Must be + * a null terminated 4-character string. + * instance - Multiple table support for SSDT/UEFI (0...n) + * Must be 0 for other tables. + * table - Where a pointer to the table is returned + * address - Where the table physical address is returned + * + * RETURN: Status; Table buffer and physical address returned if AE_OK. + * AE_LIMIT: Instance is beyond valid limit + * AE_NOT_FOUND: A table with the signature was not found + * + * NOTE: Assumes the input signature is uppercase. + * + *****************************************************************************/ + +acpi_status +acpi_os_get_table_by_name(char *signature, + u32 instance, + struct acpi_table_header ** table, + acpi_physical_address * address) +{ + acpi_status status; + + /* Get main ACPI tables from memory on first invocation of this function */ + + status = osl_table_initialize(); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* Not a main ACPI table, attempt to extract it from the RSDT/XSDT */ + + if (!gbl_dump_customized_tables) { + + /* Attempt to get the table from the memory */ + + status = + osl_get_bios_table(signature, instance, table, address); + } else { + /* Attempt to get the table from the static directory */ + + status = osl_get_customized_table(STATIC_TABLE_DIR, signature, + instance, table, address); + } + + if (ACPI_FAILURE(status) && status == AE_LIMIT) { + if (gbl_dump_dynamic_tables) { + + /* Attempt to get a dynamic table */ + + status = + osl_get_customized_table(DYNAMIC_TABLE_DIR, + signature, instance, table, + address); + } + } + + return (status); +} + +/****************************************************************************** + * + * FUNCTION: osl_add_table_to_list + * + * PARAMETERS: signature - Table signature + * instance - Table instance + * + * RETURN: Status; Successfully added if AE_OK. + * AE_NO_MEMORY: Memory allocation error + * + * DESCRIPTION: Insert a table structure into OSL table list. + * + *****************************************************************************/ + +static acpi_status osl_add_table_to_list(char *signature, u32 instance) +{ + struct osl_table_info *new_info; + struct osl_table_info *next; + u32 next_instance = 0; + u8 found = FALSE; + + new_info = calloc(1, sizeof(struct osl_table_info)); + if (!new_info) { + return (AE_NO_MEMORY); + } + + ACPI_MOVE_NAME(new_info->signature, signature); + + if (!gbl_table_list_head) { + gbl_table_list_head = new_info; + } else { + next = gbl_table_list_head; + while (1) { + if (ACPI_COMPARE_NAME(next->signature, signature)) { + if (next->instance == instance) { + found = TRUE; + } + if (next->instance >= next_instance) { + next_instance = next->instance + 1; + } + } + + if (!next->next) { + break; + } + next = next->next; + } + next->next = new_info; + } + + if (found) { + if (instance) { + fprintf(stderr, + "%4.4s: Warning unmatched table instance %d, expected %d\n", + signature, instance, next_instance); + } + instance = next_instance; + } + + new_info->instance = instance; + gbl_table_count++; + + return (AE_OK); +} + +/****************************************************************************** + * + * FUNCTION: acpi_os_get_table_by_index + * + * PARAMETERS: index - Which table to get + * table - Where a pointer to the table is returned + * instance - Where a pointer to the table instance no. is + * returned + * address - Where the table physical address is returned + * + * RETURN: Status; Table buffer and physical address returned if AE_OK. + * AE_LIMIT: Index is beyond valid limit + * + * DESCRIPTION: Get an ACPI table via an index value (0 through n). Returns + * AE_LIMIT when an invalid index is reached. Index is not + * necessarily an index into the RSDT/XSDT. + * + *****************************************************************************/ + +acpi_status +acpi_os_get_table_by_index(u32 index, + struct acpi_table_header ** table, + u32 *instance, acpi_physical_address * address) +{ + struct osl_table_info *info; + acpi_status status; + u32 i; + + /* Get main ACPI tables from memory on first invocation of this function */ + + status = osl_table_initialize(); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* Validate Index */ + + if (index >= gbl_table_count) { + return (AE_LIMIT); + } + + /* Point to the table list entry specified by the Index argument */ + + info = gbl_table_list_head; + for (i = 0; i < index; i++) { + info = info->next; + } + + /* Now we can just get the table via the signature */ + + status = acpi_os_get_table_by_name(info->signature, info->instance, + table, address); + + if (ACPI_SUCCESS(status)) { + *instance = info->instance; + } + return (status); +} + +/****************************************************************************** + * + * FUNCTION: osl_find_rsdp_via_efi + * + * PARAMETERS: None + * + * RETURN: RSDP address if found + * + * DESCRIPTION: Find RSDP address via EFI. + * + *****************************************************************************/ + +static acpi_physical_address osl_find_rsdp_via_efi(void) +{ + FILE *file; + char buffer[80]; + unsigned long address = 0; + + file = fopen(EFI_SYSTAB, "r"); + if (file) { + while (fgets(buffer, 80, file)) { + if (sscanf(buffer, "ACPI20=0x%lx", &address) == 1) { + break; + } + } + fclose(file); + } + + return ((acpi_physical_address) (address)); +} + +/****************************************************************************** + * + * FUNCTION: osl_load_rsdp + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Scan and load RSDP. + * + *****************************************************************************/ + +static acpi_status osl_load_rsdp(void) +{ + struct acpi_table_header *mapped_table; + u8 *rsdp_address; + acpi_physical_address rsdp_base; + acpi_size rsdp_size; + + /* Get RSDP from memory */ + + rsdp_size = sizeof(struct acpi_table_rsdp); + if (gbl_rsdp_base) { + rsdp_base = gbl_rsdp_base; + } else { + rsdp_base = osl_find_rsdp_via_efi(); + } + + if (!rsdp_base) { + rsdp_base = ACPI_HI_RSDP_WINDOW_BASE; + rsdp_size = ACPI_HI_RSDP_WINDOW_SIZE; + } + + rsdp_address = acpi_os_map_memory(rsdp_base, rsdp_size); + if (!rsdp_address) { + return (osl_get_last_status(AE_BAD_ADDRESS)); + } + + /* Search low memory for the RSDP */ + + mapped_table = ACPI_CAST_PTR(struct acpi_table_header, + acpi_tb_scan_memory_for_rsdp(rsdp_address, + rsdp_size)); + if (!mapped_table) { + acpi_os_unmap_memory(rsdp_address, rsdp_size); + return (AE_NOT_FOUND); + } + + gbl_rsdp_address = + rsdp_base + (ACPI_CAST8(mapped_table) - rsdp_address); + + ACPI_MEMCPY(&gbl_rsdp, mapped_table, sizeof(struct acpi_table_rsdp)); + acpi_os_unmap_memory(rsdp_address, rsdp_size); + + return (AE_OK); +} + +/****************************************************************************** + * + * FUNCTION: osl_can_use_xsdt + * + * PARAMETERS: None + * + * RETURN: TRUE if XSDT is allowed to be used. + * + * DESCRIPTION: This function collects logic that can be used to determine if + * XSDT should be used instead of RSDT. + * + *****************************************************************************/ + +static u8 osl_can_use_xsdt(void) +{ + if (gbl_revision && !acpi_gbl_do_not_use_xsdt) { + return (TRUE); + } else { + return (FALSE); + } +} + +/****************************************************************************** + * + * FUNCTION: osl_table_initialize + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Initialize ACPI table data. Get and store main ACPI tables to + * local variables. Main ACPI tables include RSDT, FADT, RSDT, + * and/or XSDT. + * + *****************************************************************************/ + +static acpi_status osl_table_initialize(void) +{ + acpi_status status; + acpi_physical_address address; + + if (gbl_table_list_initialized) { + return (AE_OK); + } + + /* Get RSDP from memory */ + + status = osl_load_rsdp(); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* Get XSDT from memory */ + + if (gbl_rsdp.revision && !gbl_do_not_dump_xsdt) { + if (gbl_xsdt) { + free(gbl_xsdt); + gbl_xsdt = NULL; + } + + gbl_revision = 2; + status = osl_get_bios_table(ACPI_SIG_XSDT, 0, + ACPI_CAST_PTR(struct + acpi_table_header *, + &gbl_xsdt), &address); + if (ACPI_FAILURE(status)) { + return (status); + } + } + + /* Get RSDT from memory */ + + if (gbl_rsdp.rsdt_physical_address) { + if (gbl_rsdt) { + free(gbl_rsdt); + gbl_rsdt = NULL; + } + + status = osl_get_bios_table(ACPI_SIG_RSDT, 0, + ACPI_CAST_PTR(struct + acpi_table_header *, + &gbl_rsdt), &address); + if (ACPI_FAILURE(status)) { + return (status); + } + } + + /* Get FADT from memory */ + + if (gbl_fadt) { + free(gbl_fadt); + gbl_fadt = NULL; + } + + status = osl_get_bios_table(ACPI_SIG_FADT, 0, + ACPI_CAST_PTR(struct acpi_table_header *, + &gbl_fadt), + &gbl_fadt_address); + if (ACPI_FAILURE(status)) { + return (status); + } + + if (!gbl_dump_customized_tables) { + + /* Add mandatory tables to global table list first */ + + status = osl_add_table_to_list(ACPI_RSDP_NAME, 0); + if (ACPI_FAILURE(status)) { + return (status); + } + + status = osl_add_table_to_list(ACPI_SIG_RSDT, 0); + if (ACPI_FAILURE(status)) { + return (status); + } + + if (gbl_revision == 2) { + status = osl_add_table_to_list(ACPI_SIG_XSDT, 0); + if (ACPI_FAILURE(status)) { + return (status); + } + } + + status = osl_add_table_to_list(ACPI_SIG_DSDT, 0); + if (ACPI_FAILURE(status)) { + return (status); + } + + status = osl_add_table_to_list(ACPI_SIG_FACS, 0); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* Add all tables found in the memory */ + + status = osl_list_bios_tables(); + if (ACPI_FAILURE(status)) { + return (status); + } + } else { + /* Add all tables found in the static directory */ + + status = osl_list_customized_tables(STATIC_TABLE_DIR); + if (ACPI_FAILURE(status)) { + return (status); + } + } + + if (gbl_dump_dynamic_tables) { + + /* Add all dynamically loaded tables in the dynamic directory */ + + status = osl_list_customized_tables(DYNAMIC_TABLE_DIR); + if (ACPI_FAILURE(status)) { + return (status); + } + } + + gbl_table_list_initialized = TRUE; + return (AE_OK); +} + +/****************************************************************************** + * + * FUNCTION: osl_list_bios_tables + * + * PARAMETERS: None + * + * RETURN: Status; Table list is initialized if AE_OK. + * + * DESCRIPTION: Add ACPI tables to the table list from memory. + * + * NOTE: This works on Linux as table customization does not modify the + * addresses stored in RSDP/RSDT/XSDT/FADT. + * + *****************************************************************************/ + +static acpi_status osl_list_bios_tables(void) +{ + struct acpi_table_header *mapped_table = NULL; + u8 *table_data; + u8 number_of_tables; + u8 item_size; + acpi_physical_address table_address = 0; + acpi_status status = AE_OK; + u32 i; + + if (osl_can_use_xsdt()) { + item_size = sizeof(u64); + table_data = + ACPI_CAST8(gbl_xsdt) + sizeof(struct acpi_table_header); + number_of_tables = + (u8)((gbl_xsdt->header.length - + sizeof(struct acpi_table_header)) + / item_size); + } else { /* Use RSDT if XSDT is not available */ + + item_size = sizeof(u32); + table_data = + ACPI_CAST8(gbl_rsdt) + sizeof(struct acpi_table_header); + number_of_tables = + (u8)((gbl_rsdt->header.length - + sizeof(struct acpi_table_header)) + / item_size); + } + + /* Search RSDT/XSDT for the requested table */ + + for (i = 0; i < number_of_tables; ++i, table_data += item_size) { + if (osl_can_use_xsdt()) { + table_address = + (acpi_physical_address) (*ACPI_CAST64(table_data)); + } else { + table_address = + (acpi_physical_address) (*ACPI_CAST32(table_data)); + } + + /* Skip NULL entries in RSDT/XSDT */ + + if (!table_address) { + continue; + } + + status = osl_map_table(table_address, NULL, &mapped_table); + if (ACPI_FAILURE(status)) { + return (status); + } + + osl_add_table_to_list(mapped_table->signature, 0); + osl_unmap_table(mapped_table); + } + + return (AE_OK); +} + +/****************************************************************************** + * + * FUNCTION: osl_get_bios_table + * + * PARAMETERS: signature - ACPI Signature for common table. Must be + * a null terminated 4-character string. + * instance - Multiple table support for SSDT/UEFI (0...n) + * Must be 0 for other tables. + * table - Where a pointer to the table is returned + * address - Where the table physical address is returned + * + * RETURN: Status; Table buffer and physical address returned if AE_OK. + * AE_LIMIT: Instance is beyond valid limit + * AE_NOT_FOUND: A table with the signature was not found + * + * DESCRIPTION: Get a BIOS provided ACPI table + * + * NOTE: Assumes the input signature is uppercase. + * + *****************************************************************************/ + +static acpi_status +osl_get_bios_table(char *signature, + u32 instance, + struct acpi_table_header **table, + acpi_physical_address * address) +{ + struct acpi_table_header *local_table = NULL; + struct acpi_table_header *mapped_table = NULL; + u8 *table_data; + u8 number_of_tables; + u8 item_size; + u32 current_instance = 0; + acpi_physical_address table_address = 0; + u32 table_length = 0; + acpi_status status = AE_OK; + u32 i; + + /* Handle special tables whose addresses are not in RSDT/XSDT */ + + if (ACPI_COMPARE_NAME(signature, ACPI_RSDP_NAME) || + ACPI_COMPARE_NAME(signature, ACPI_SIG_RSDT) || + ACPI_COMPARE_NAME(signature, ACPI_SIG_XSDT) || + ACPI_COMPARE_NAME(signature, ACPI_SIG_DSDT) || + ACPI_COMPARE_NAME(signature, ACPI_SIG_FACS)) { + if (instance > 0) { + return (AE_LIMIT); + } + + /* + * Get the appropriate address, either 32-bit or 64-bit. Be very + * careful about the FADT length and validate table addresses. + * Note: The 64-bit addresses have priority. + */ + if (ACPI_COMPARE_NAME(signature, ACPI_SIG_DSDT)) { + if ((gbl_fadt->header.length >= MIN_FADT_FOR_XDSDT) && + gbl_fadt->Xdsdt) { + table_address = + (acpi_physical_address) gbl_fadt->Xdsdt; + } else + if ((gbl_fadt->header.length >= MIN_FADT_FOR_DSDT) + && gbl_fadt->dsdt) { + table_address = + (acpi_physical_address) gbl_fadt->dsdt; + } + } else if (ACPI_COMPARE_NAME(signature, ACPI_SIG_FACS)) { + if ((gbl_fadt->header.length >= MIN_FADT_FOR_XFACS) && + gbl_fadt->Xfacs) { + table_address = + (acpi_physical_address) gbl_fadt->Xfacs; + } else + if ((gbl_fadt->header.length >= MIN_FADT_FOR_FACS) + && gbl_fadt->facs) { + table_address = + (acpi_physical_address) gbl_fadt->facs; + } + } else if (ACPI_COMPARE_NAME(signature, ACPI_SIG_XSDT)) { + if (!gbl_revision) { + return (AE_BAD_SIGNATURE); + } + table_address = + (acpi_physical_address) gbl_rsdp. + xsdt_physical_address; + } else if (ACPI_COMPARE_NAME(signature, ACPI_SIG_RSDT)) { + table_address = + (acpi_physical_address) gbl_rsdp. + rsdt_physical_address; + } else { + table_address = + (acpi_physical_address) gbl_rsdp_address; + signature = ACPI_SIG_RSDP; + } + + /* Now we can get the requested special table */ + + status = osl_map_table(table_address, signature, &mapped_table); + if (ACPI_FAILURE(status)) { + return (status); + } + + table_length = ap_get_table_length(mapped_table); + } else { /* Case for a normal ACPI table */ + + if (osl_can_use_xsdt()) { + item_size = sizeof(u64); + table_data = + ACPI_CAST8(gbl_xsdt) + + sizeof(struct acpi_table_header); + number_of_tables = + (u8)((gbl_xsdt->header.length - + sizeof(struct acpi_table_header)) + / item_size); + } else { /* Use RSDT if XSDT is not available */ + + item_size = sizeof(u32); + table_data = + ACPI_CAST8(gbl_rsdt) + + sizeof(struct acpi_table_header); + number_of_tables = + (u8)((gbl_rsdt->header.length - + sizeof(struct acpi_table_header)) + / item_size); + } + + /* Search RSDT/XSDT for the requested table */ + + for (i = 0; i < number_of_tables; ++i, table_data += item_size) { + if (osl_can_use_xsdt()) { + table_address = + (acpi_physical_address) (*ACPI_CAST64 + (table_data)); + } else { + table_address = + (acpi_physical_address) (*ACPI_CAST32 + (table_data)); + } + + /* Skip NULL entries in RSDT/XSDT */ + + if (!table_address) { + continue; + } + + status = + osl_map_table(table_address, NULL, &mapped_table); + if (ACPI_FAILURE(status)) { + return (status); + } + table_length = mapped_table->length; + + /* Does this table match the requested signature? */ + + if (!ACPI_COMPARE_NAME + (mapped_table->signature, signature)) { + osl_unmap_table(mapped_table); + mapped_table = NULL; + continue; + } + + /* Match table instance (for SSDT/UEFI tables) */ + + if (current_instance != instance) { + osl_unmap_table(mapped_table); + mapped_table = NULL; + current_instance++; + continue; + } + + break; + } + } + + if (!mapped_table) { + return (AE_LIMIT); + } + + if (table_length == 0) { + status = AE_BAD_HEADER; + goto exit; + } + + /* Copy table to local buffer and return it */ + + local_table = calloc(1, table_length); + if (!local_table) { + status = AE_NO_MEMORY; + goto exit; + } + + ACPI_MEMCPY(local_table, mapped_table, table_length); + *address = table_address; + *table = local_table; + +exit: + osl_unmap_table(mapped_table); + return (status); +} + +/****************************************************************************** + * + * FUNCTION: osl_list_customized_tables + * + * PARAMETERS: directory - Directory that contains the tables + * + * RETURN: Status; Table list is initialized if AE_OK. + * + * DESCRIPTION: Add ACPI tables to the table list from a directory. + * + *****************************************************************************/ + +static acpi_status osl_list_customized_tables(char *directory) +{ + void *table_dir; + u32 instance; + char temp_name[ACPI_NAME_SIZE]; + char *filename; + acpi_status status = AE_OK; + + /* Open the requested directory */ + + table_dir = acpi_os_open_directory(directory, "*", REQUEST_FILE_ONLY); + if (!table_dir) { + return (osl_get_last_status(AE_NOT_FOUND)); + } + + /* Examine all entries in this directory */ + + while ((filename = acpi_os_get_next_filename(table_dir))) { + + /* Extract table name and instance number */ + + status = + osl_table_name_from_file(filename, temp_name, &instance); + + /* Ignore meaningless files */ + + if (ACPI_FAILURE(status)) { + continue; + } + + /* Add new info node to global table list */ + + status = osl_add_table_to_list(temp_name, instance); + if (ACPI_FAILURE(status)) { + break; + } + } + + acpi_os_close_directory(table_dir); + return (status); +} + +/****************************************************************************** + * + * FUNCTION: osl_map_table + * + * PARAMETERS: address - Address of the table in memory + * signature - Optional ACPI Signature for desired table. + * Null terminated 4-character string. + * table - Where a pointer to the mapped table is + * returned + * + * RETURN: Status; Mapped table is returned if AE_OK. + * AE_NOT_FOUND: A valid table was not found at the address + * + * DESCRIPTION: Map entire ACPI table into caller's address space. + * + *****************************************************************************/ + +static acpi_status +osl_map_table(acpi_size address, + char *signature, struct acpi_table_header **table) +{ + struct acpi_table_header *mapped_table; + u32 length; + + if (!address) { + return (AE_BAD_ADDRESS); + } + + /* + * Map the header so we can get the table length. + * Use sizeof (struct acpi_table_header) as: + * 1. it is bigger than 24 to include RSDP->Length + * 2. it is smaller than sizeof (struct acpi_table_rsdp) + */ + mapped_table = + acpi_os_map_memory(address, sizeof(struct acpi_table_header)); + if (!mapped_table) { + fprintf(stderr, "Could not map table header at 0x%8.8X%8.8X\n", + ACPI_FORMAT_UINT64(address)); + return (osl_get_last_status(AE_BAD_ADDRESS)); + } + + /* If specified, signature must match */ + + if (signature) { + if (ACPI_VALIDATE_RSDP_SIG(signature)) { + if (!ACPI_VALIDATE_RSDP_SIG(mapped_table->signature)) { + acpi_os_unmap_memory(mapped_table, + sizeof(struct + acpi_table_header)); + return (AE_BAD_SIGNATURE); + } + } else + if (!ACPI_COMPARE_NAME(signature, mapped_table->signature)) + { + acpi_os_unmap_memory(mapped_table, + sizeof(struct acpi_table_header)); + return (AE_BAD_SIGNATURE); + } + } + + /* Map the entire table */ + + length = ap_get_table_length(mapped_table); + acpi_os_unmap_memory(mapped_table, sizeof(struct acpi_table_header)); + if (length == 0) { + return (AE_BAD_HEADER); + } + + mapped_table = acpi_os_map_memory(address, length); + if (!mapped_table) { + fprintf(stderr, + "Could not map table at 0x%8.8X%8.8X length %8.8X\n", + ACPI_FORMAT_UINT64(address), length); + return (osl_get_last_status(AE_INVALID_TABLE_LENGTH)); + } + + (void)ap_is_valid_checksum(mapped_table); + + *table = mapped_table; + return (AE_OK); +} + +/****************************************************************************** + * + * FUNCTION: osl_unmap_table + * + * PARAMETERS: table - A pointer to the mapped table + * + * RETURN: None + * + * DESCRIPTION: Unmap entire ACPI table. + * + *****************************************************************************/ + +static void osl_unmap_table(struct acpi_table_header *table) +{ + if (table) { + acpi_os_unmap_memory(table, ap_get_table_length(table)); + } +} + +/****************************************************************************** + * + * FUNCTION: osl_table_name_from_file + * + * PARAMETERS: filename - File that contains the desired table + * signature - Pointer to 4-character buffer to store + * extracted table signature. + * instance - Pointer to integer to store extracted + * table instance number. + * + * RETURN: Status; Table name is extracted if AE_OK. + * + * DESCRIPTION: Extract table signature and instance number from a table file + * name. + * + *****************************************************************************/ + +static acpi_status +osl_table_name_from_file(char *filename, char *signature, u32 *instance) +{ + + /* Ignore meaningless files */ + + if (strlen(filename) < ACPI_NAME_SIZE) { + return (AE_BAD_SIGNATURE); + } + + /* Extract instance number */ + + if (isdigit((int)filename[ACPI_NAME_SIZE])) { + sscanf(&filename[ACPI_NAME_SIZE], "%d", instance); + } else if (strlen(filename) != ACPI_NAME_SIZE) { + return (AE_BAD_SIGNATURE); + } else { + *instance = 0; + } + + /* Extract signature */ + + ACPI_MOVE_NAME(signature, filename); + return (AE_OK); +} + +/****************************************************************************** + * + * FUNCTION: osl_read_table_from_file + * + * PARAMETERS: filename - File that contains the desired table + * file_offset - Offset of the table in file + * signature - Optional ACPI Signature for desired table. + * A null terminated 4-character string. + * table - Where a pointer to the table is returned + * + * RETURN: Status; Table buffer is returned if AE_OK. + * + * DESCRIPTION: Read a ACPI table from a file. + * + *****************************************************************************/ + +static acpi_status +osl_read_table_from_file(char *filename, + acpi_size file_offset, + char *signature, struct acpi_table_header **table) +{ + FILE *table_file; + struct acpi_table_header header; + struct acpi_table_header *local_table = NULL; + u32 table_length; + s32 count; + acpi_status status = AE_OK; + + /* Open the file */ + + table_file = fopen(filename, "rb"); + if (table_file == NULL) { + fprintf(stderr, "Could not open table file: %s\n", filename); + return (osl_get_last_status(AE_NOT_FOUND)); + } + + fseek(table_file, file_offset, SEEK_SET); + + /* Read the Table header to get the table length */ + + count = fread(&header, 1, sizeof(struct acpi_table_header), table_file); + if (count != sizeof(struct acpi_table_header)) { + fprintf(stderr, "Could not read table header: %s\n", filename); + status = AE_BAD_HEADER; + goto exit; + } + + /* If signature is specified, it must match the table */ + + if (signature) { + if (ACPI_VALIDATE_RSDP_SIG(signature)) { + if (!ACPI_VALIDATE_RSDP_SIG(header.signature)) { + fprintf(stderr, + "Incorrect RSDP signature: found %8.8s\n", + header.signature); + status = AE_BAD_SIGNATURE; + goto exit; + } + } else if (!ACPI_COMPARE_NAME(signature, header.signature)) { + fprintf(stderr, + "Incorrect signature: Expecting %4.4s, found %4.4s\n", + signature, header.signature); + status = AE_BAD_SIGNATURE; + goto exit; + } + } + + table_length = ap_get_table_length(&header); + if (table_length == 0) { + status = AE_BAD_HEADER; + goto exit; + } + + /* Read the entire table into a local buffer */ + + local_table = calloc(1, table_length); + if (!local_table) { + fprintf(stderr, + "%4.4s: Could not allocate buffer for table of length %X\n", + header.signature, table_length); + status = AE_NO_MEMORY; + goto exit; + } + + fseek(table_file, file_offset, SEEK_SET); + + count = fread(local_table, 1, table_length, table_file); + if (count != table_length) { + fprintf(stderr, "%4.4s: Could not read table content\n", + header.signature); + status = AE_INVALID_TABLE_LENGTH; + goto exit; + } + + /* Validate checksum */ + + (void)ap_is_valid_checksum(local_table); + +exit: + fclose(table_file); + *table = local_table; + return (status); +} + +/****************************************************************************** + * + * FUNCTION: osl_get_customized_table + * + * PARAMETERS: pathname - Directory to find Linux customized table + * signature - ACPI Signature for desired table. Must be + * a null terminated 4-character string. + * instance - Multiple table support for SSDT/UEFI (0...n) + * Must be 0 for other tables. + * table - Where a pointer to the table is returned + * address - Where the table physical address is returned + * + * RETURN: Status; Table buffer is returned if AE_OK. + * AE_LIMIT: Instance is beyond valid limit + * AE_NOT_FOUND: A table with the signature was not found + * + * DESCRIPTION: Get an OS customized table. + * + *****************************************************************************/ + +static acpi_status +osl_get_customized_table(char *pathname, + char *signature, + u32 instance, + struct acpi_table_header **table, + acpi_physical_address * address) +{ + void *table_dir; + u32 current_instance = 0; + char temp_name[ACPI_NAME_SIZE]; + char table_filename[PATH_MAX]; + char *filename; + acpi_status status; + + /* Open the directory for customized tables */ + + table_dir = acpi_os_open_directory(pathname, "*", REQUEST_FILE_ONLY); + if (!table_dir) { + return (osl_get_last_status(AE_NOT_FOUND)); + } + + /* Attempt to find the table in the directory */ + + while ((filename = acpi_os_get_next_filename(table_dir))) { + + /* Ignore meaningless files */ + + if (!ACPI_COMPARE_NAME(filename, signature)) { + continue; + } + + /* Extract table name and instance number */ + + status = + osl_table_name_from_file(filename, temp_name, + ¤t_instance); + + /* Ignore meaningless files */ + + if (ACPI_FAILURE(status) || current_instance != instance) { + continue; + } + + /* Create the table pathname */ + + if (instance != 0) { + sprintf(table_filename, "%s/%4.4s%d", pathname, + temp_name, instance); + } else { + sprintf(table_filename, "%s/%4.4s", pathname, + temp_name); + } + break; + } + + acpi_os_close_directory(table_dir); + + if (!filename) { + return (AE_LIMIT); + } + + /* There is no physical address saved for customized tables, use zero */ + + *address = 0; + status = osl_read_table_from_file(table_filename, 0, NULL, table); + + return (status); +} diff --git a/tools/power/acpi/os_specific/service_layers/osunixdir.c b/tools/power/acpi/os_specific/service_layers/osunixdir.c new file mode 100644 index 00000000000..733f9e490fc --- /dev/null +++ b/tools/power/acpi/os_specific/service_layers/osunixdir.c @@ -0,0 +1,204 @@ +/****************************************************************************** + * + * Module Name: osunixdir - Unix directory access interfaces + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2014, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include <acpi/acpi.h> + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <dirent.h> +#include <fnmatch.h> +#include <ctype.h> +#include <sys/stat.h> + +/* + * Allocated structure returned from os_open_directory + */ +typedef struct external_find_info { + char *dir_pathname; + DIR *dir_ptr; + char temp_buffer[256]; + char *wildcard_spec; + char requested_file_type; + +} external_find_info; + +/******************************************************************************* + * + * FUNCTION: acpi_os_open_directory + * + * PARAMETERS: dir_pathname - Full pathname to the directory + * wildcard_spec - string of the form "*.c", etc. + * + * RETURN: A directory "handle" to be used in subsequent search operations. + * NULL returned on failure. + * + * DESCRIPTION: Open a directory in preparation for a wildcard search + * + ******************************************************************************/ + +void *acpi_os_open_directory(char *dir_pathname, + char *wildcard_spec, char requested_file_type) +{ + struct external_find_info *external_info; + DIR *dir; + + /* Allocate the info struct that will be returned to the caller */ + + external_info = calloc(1, sizeof(struct external_find_info)); + if (!external_info) { + return (NULL); + } + + /* Get the directory stream */ + + dir = opendir(dir_pathname); + if (!dir) { + fprintf(stderr, "Cannot open directory - %s\n", dir_pathname); + free(external_info); + return (NULL); + } + + /* Save the info in the return structure */ + + external_info->wildcard_spec = wildcard_spec; + external_info->requested_file_type = requested_file_type; + external_info->dir_pathname = dir_pathname; + external_info->dir_ptr = dir; + return (external_info); +} + +/******************************************************************************* + * + * FUNCTION: acpi_os_get_next_filename + * + * PARAMETERS: dir_handle - Created via acpi_os_open_directory + * + * RETURN: Next filename matched. NULL if no more matches. + * + * DESCRIPTION: Get the next file in the directory that matches the wildcard + * specification. + * + ******************************************************************************/ + +char *acpi_os_get_next_filename(void *dir_handle) +{ + struct external_find_info *external_info = dir_handle; + struct dirent *dir_entry; + char *temp_str; + int str_len; + struct stat temp_stat; + int err; + + while ((dir_entry = readdir(external_info->dir_ptr))) { + if (!fnmatch + (external_info->wildcard_spec, dir_entry->d_name, 0)) { + if (dir_entry->d_name[0] == '.') { + continue; + } + + str_len = strlen(dir_entry->d_name) + + strlen(external_info->dir_pathname) + 2; + + temp_str = calloc(str_len, 1); + if (!temp_str) { + fprintf(stderr, + "Could not allocate buffer for temporary string\n"); + return (NULL); + } + + strcpy(temp_str, external_info->dir_pathname); + strcat(temp_str, "/"); + strcat(temp_str, dir_entry->d_name); + + err = stat(temp_str, &temp_stat); + if (err == -1) { + fprintf(stderr, + "Cannot stat file (should not happen) - %s\n", + temp_str); + free(temp_str); + return (NULL); + } + + free(temp_str); + + if ((S_ISDIR(temp_stat.st_mode) + && (external_info->requested_file_type == + REQUEST_DIR_ONLY)) + || ((!S_ISDIR(temp_stat.st_mode) + && external_info->requested_file_type == + REQUEST_FILE_ONLY))) { + + /* copy to a temp buffer because dir_entry struct is on the stack */ + + strcpy(external_info->temp_buffer, + dir_entry->d_name); + return (external_info->temp_buffer); + } + } + } + + return (NULL); +} + +/******************************************************************************* + * + * FUNCTION: acpi_os_close_directory + * + * PARAMETERS: dir_handle - Created via acpi_os_open_directory + * + * RETURN: None. + * + * DESCRIPTION: Close the open directory and cleanup. + * + ******************************************************************************/ + +void acpi_os_close_directory(void *dir_handle) +{ + struct external_find_info *external_info = dir_handle; + + /* Close the directory and free allocations */ + + closedir(external_info->dir_ptr); + free(dir_handle); +} diff --git a/tools/power/acpi/os_specific/service_layers/osunixmap.c b/tools/power/acpi/os_specific/service_layers/osunixmap.c new file mode 100644 index 00000000000..99b47b6194a --- /dev/null +++ b/tools/power/acpi/os_specific/service_layers/osunixmap.c @@ -0,0 +1,151 @@ +/****************************************************************************** + * + * Module Name: osunixmap - Unix OSL for file mappings + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2014, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include "acpidump.h" +#include <unistd.h> +#include <sys/mman.h> +#ifdef _free_BSD +#include <sys/param.h> +#endif + +#define _COMPONENT ACPI_OS_SERVICES +ACPI_MODULE_NAME("osunixmap") + +#ifndef O_BINARY +#define O_BINARY 0 +#endif +#ifdef _free_BSD +#define MMAP_FLAGS MAP_SHARED +#else +#define MMAP_FLAGS MAP_PRIVATE +#endif +#define SYSTEM_MEMORY "/dev/mem" +/******************************************************************************* + * + * FUNCTION: acpi_os_get_page_size + * + * PARAMETERS: None + * + * RETURN: Page size of the platform. + * + * DESCRIPTION: Obtain page size of the platform. + * + ******************************************************************************/ +static acpi_size acpi_os_get_page_size(void) +{ + +#ifdef PAGE_SIZE + return PAGE_SIZE; +#else + return sysconf(_SC_PAGESIZE); +#endif +} + +/****************************************************************************** + * + * FUNCTION: acpi_os_map_memory + * + * PARAMETERS: where - Physical address of memory to be mapped + * length - How much memory to map + * + * RETURN: Pointer to mapped memory. Null on error. + * + * DESCRIPTION: Map physical memory into local address space. + * + *****************************************************************************/ + +void *acpi_os_map_memory(acpi_physical_address where, acpi_size length) +{ + u8 *mapped_memory; + acpi_physical_address offset; + acpi_size page_size; + int fd; + + fd = open(SYSTEM_MEMORY, O_RDONLY | O_BINARY); + if (fd < 0) { + fprintf(stderr, "Cannot open %s\n", SYSTEM_MEMORY); + return (NULL); + } + + /* Align the offset to use mmap */ + + page_size = acpi_os_get_page_size(); + offset = where % page_size; + + /* Map the table header to get the length of the full table */ + + mapped_memory = mmap(NULL, (length + offset), PROT_READ, MMAP_FLAGS, + fd, (where - offset)); + if (mapped_memory == MAP_FAILED) { + fprintf(stderr, "Cannot map %s\n", SYSTEM_MEMORY); + close(fd); + return (NULL); + } + + close(fd); + return (ACPI_CAST8(mapped_memory + offset)); +} + +/****************************************************************************** + * + * FUNCTION: acpi_os_unmap_memory + * + * PARAMETERS: where - Logical address of memory to be unmapped + * length - How much memory to unmap + * + * RETURN: None. + * + * DESCRIPTION: Delete a previously created mapping. Where and Length must + * correspond to a previous mapping exactly. + * + *****************************************************************************/ + +void acpi_os_unmap_memory(void *where, acpi_size length) +{ + acpi_physical_address offset; + acpi_size page_size; + + page_size = acpi_os_get_page_size(); + offset = (acpi_physical_address) where % page_size; + munmap((u8 *)where - offset, (length + offset)); +} diff --git a/tools/power/acpi/tools/acpidump/acpidump.h b/tools/power/acpi/tools/acpidump/acpidump.h new file mode 100644 index 00000000000..46f519597fe --- /dev/null +++ b/tools/power/acpi/tools/acpidump/acpidump.h @@ -0,0 +1,130 @@ +/****************************************************************************** + * + * Module Name: acpidump.h - Include file for acpi_dump utility + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2014, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +/* + * Global variables. Defined in main.c only, externed in all other files + */ +#ifdef _DECLARE_GLOBALS +#define EXTERN +#define INIT_GLOBAL(a,b) a=b +#define DEFINE_ACPI_GLOBALS 1 +#else +#define EXTERN extern +#define INIT_GLOBAL(a,b) a +#endif + +#include <acpi/acpi.h> +#include "accommon.h" +#include "actables.h" + +#include <stdio.h> +#include <fcntl.h> +#include <errno.h> +#include <sys/stat.h> + +/* Globals */ + +EXTERN u8 INIT_GLOBAL(gbl_summary_mode, FALSE); +EXTERN u8 INIT_GLOBAL(gbl_verbose_mode, FALSE); +EXTERN u8 INIT_GLOBAL(gbl_binary_mode, FALSE); +EXTERN u8 INIT_GLOBAL(gbl_dump_customized_tables, FALSE); +EXTERN u8 INIT_GLOBAL(gbl_do_not_dump_xsdt, FALSE); +EXTERN FILE INIT_GLOBAL(*gbl_output_file, NULL); +EXTERN char INIT_GLOBAL(*gbl_output_filename, NULL); +EXTERN u64 INIT_GLOBAL(gbl_rsdp_base, 0); + +/* Globals required for use with ACPICA modules */ + +#ifdef _DECLARE_GLOBALS +u8 acpi_gbl_integer_byte_width = 8; +#endif + +/* Action table used to defer requested options */ + +struct ap_dump_action { + char *argument; + u32 to_be_done; +}; + +#define AP_MAX_ACTIONS 32 + +#define AP_DUMP_ALL_TABLES 0 +#define AP_DUMP_TABLE_BY_ADDRESS 1 +#define AP_DUMP_TABLE_BY_NAME 2 +#define AP_DUMP_TABLE_BY_FILE 3 + +#define AP_MAX_ACPI_FILES 256 /* Prevent infinite loops */ + +/* Minimum FADT sizes for various table addresses */ + +#define MIN_FADT_FOR_DSDT (ACPI_FADT_OFFSET (dsdt) + sizeof (u32)) +#define MIN_FADT_FOR_FACS (ACPI_FADT_OFFSET (facs) + sizeof (u32)) +#define MIN_FADT_FOR_XDSDT (ACPI_FADT_OFFSET (Xdsdt) + sizeof (u64)) +#define MIN_FADT_FOR_XFACS (ACPI_FADT_OFFSET (Xfacs) + sizeof (u64)) + +/* + * apdump - Table get/dump routines + */ +int ap_dump_table_from_file(char *pathname); + +int ap_dump_table_by_name(char *signature); + +int ap_dump_table_by_address(char *ascii_address); + +int ap_dump_all_tables(void); + +u8 ap_is_valid_header(struct acpi_table_header *table); + +u8 ap_is_valid_checksum(struct acpi_table_header *table); + +u32 ap_get_table_length(struct acpi_table_header *table); + +/* + * apfiles - File I/O utilities + */ +int ap_open_output_file(char *pathname); + +int ap_write_to_binary_file(struct acpi_table_header *table, u32 instance); + +struct acpi_table_header *ap_get_table_from_file(char *pathname, + u32 *file_size); diff --git a/tools/power/acpi/tools/acpidump/apdump.c b/tools/power/acpi/tools/acpidump/apdump.c new file mode 100644 index 00000000000..3cac1237836 --- /dev/null +++ b/tools/power/acpi/tools/acpidump/apdump.c @@ -0,0 +1,451 @@ +/****************************************************************************** + * + * Module Name: apdump - Dump routines for ACPI tables (acpidump) + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2014, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include "acpidump.h" + +/* Local prototypes */ + +static int +ap_dump_table_buffer(struct acpi_table_header *table, + u32 instance, acpi_physical_address address); + +/****************************************************************************** + * + * FUNCTION: ap_is_valid_header + * + * PARAMETERS: table - Pointer to table to be validated + * + * RETURN: TRUE if the header appears to be valid. FALSE otherwise + * + * DESCRIPTION: Check for a valid ACPI table header + * + ******************************************************************************/ + +u8 ap_is_valid_header(struct acpi_table_header *table) +{ + + if (!ACPI_VALIDATE_RSDP_SIG(table->signature)) { + + /* Make sure signature is all ASCII and a valid ACPI name */ + + if (!acpi_ut_valid_acpi_name(table->signature)) { + fprintf(stderr, + "Table signature (0x%8.8X) is invalid\n", + *(u32 *)table->signature); + return (FALSE); + } + + /* Check for minimum table length */ + + if (table->length < sizeof(struct acpi_table_header)) { + fprintf(stderr, "Table length (0x%8.8X) is invalid\n", + table->length); + return (FALSE); + } + } + + return (TRUE); +} + +/****************************************************************************** + * + * FUNCTION: ap_is_valid_checksum + * + * PARAMETERS: table - Pointer to table to be validated + * + * RETURN: TRUE if the checksum appears to be valid. FALSE otherwise. + * + * DESCRIPTION: Check for a valid ACPI table checksum. + * + ******************************************************************************/ + +u8 ap_is_valid_checksum(struct acpi_table_header *table) +{ + acpi_status status; + struct acpi_table_rsdp *rsdp; + + if (ACPI_VALIDATE_RSDP_SIG(table->signature)) { + /* + * Checksum for RSDP. + * Note: Other checksums are computed during the table dump. + */ + rsdp = ACPI_CAST_PTR(struct acpi_table_rsdp, table); + status = acpi_tb_validate_rsdp(rsdp); + } else { + status = acpi_tb_verify_checksum(table, table->length); + } + + if (ACPI_FAILURE(status)) { + fprintf(stderr, "%4.4s: Warning: wrong checksum in table\n", + table->signature); + } + + return (AE_OK); +} + +/****************************************************************************** + * + * FUNCTION: ap_get_table_length + * + * PARAMETERS: table - Pointer to the table + * + * RETURN: Table length + * + * DESCRIPTION: Obtain table length according to table signature. + * + ******************************************************************************/ + +u32 ap_get_table_length(struct acpi_table_header *table) +{ + struct acpi_table_rsdp *rsdp; + + /* Check if table is valid */ + + if (!ap_is_valid_header(table)) { + return (0); + } + + if (ACPI_VALIDATE_RSDP_SIG(table->signature)) { + rsdp = ACPI_CAST_PTR(struct acpi_table_rsdp, table); + return (rsdp->length); + } + + /* Normal ACPI table */ + + return (table->length); +} + +/****************************************************************************** + * + * FUNCTION: ap_dump_table_buffer + * + * PARAMETERS: table - ACPI table to be dumped + * instance - ACPI table instance no. to be dumped + * address - Physical address of the table + * + * RETURN: None + * + * DESCRIPTION: Dump an ACPI table in standard ASCII hex format, with a + * header that is compatible with the acpi_xtract utility. + * + ******************************************************************************/ + +static int +ap_dump_table_buffer(struct acpi_table_header *table, + u32 instance, acpi_physical_address address) +{ + u32 table_length; + + table_length = ap_get_table_length(table); + + /* Print only the header if requested */ + + if (gbl_summary_mode) { + acpi_tb_print_table_header(address, table); + return (0); + } + + /* Dump to binary file if requested */ + + if (gbl_binary_mode) { + return (ap_write_to_binary_file(table, instance)); + } + + /* + * Dump the table with header for use with acpixtract utility. + * Note: simplest to just always emit a 64-bit address. acpi_xtract + * utility can handle this. + */ + printf("%4.4s @ 0x%8.8X%8.8X\n", table->signature, + ACPI_FORMAT_UINT64(address)); + + acpi_ut_dump_buffer(ACPI_CAST_PTR(u8, table), table_length, + DB_BYTE_DISPLAY, 0); + printf("\n"); + return (0); +} + +/****************************************************************************** + * + * FUNCTION: ap_dump_all_tables + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Get all tables from the RSDT/XSDT (or at least all of the + * tables that we can possibly get). + * + ******************************************************************************/ + +int ap_dump_all_tables(void) +{ + struct acpi_table_header *table; + u32 instance = 0; + acpi_physical_address address; + acpi_status status; + int table_status; + u32 i; + + /* Get and dump all available ACPI tables */ + + for (i = 0; i < AP_MAX_ACPI_FILES; i++) { + status = + acpi_os_get_table_by_index(i, &table, &instance, &address); + if (ACPI_FAILURE(status)) { + + /* AE_LIMIT means that no more tables are available */ + + if (status == AE_LIMIT) { + return (0); + } else if (i == 0) { + fprintf(stderr, + "Could not get ACPI tables, %s\n", + acpi_format_exception(status)); + return (-1); + } else { + fprintf(stderr, + "Could not get ACPI table at index %u, %s\n", + i, acpi_format_exception(status)); + continue; + } + } + + table_status = ap_dump_table_buffer(table, instance, address); + free(table); + + if (table_status) { + break; + } + } + + /* Something seriously bad happened if the loop terminates here */ + + return (-1); +} + +/****************************************************************************** + * + * FUNCTION: ap_dump_table_by_address + * + * PARAMETERS: ascii_address - Address for requested ACPI table + * + * RETURN: Status + * + * DESCRIPTION: Get an ACPI table via a physical address and dump it. + * + ******************************************************************************/ + +int ap_dump_table_by_address(char *ascii_address) +{ + acpi_physical_address address; + struct acpi_table_header *table; + acpi_status status; + int table_status; + u64 long_address; + + /* Convert argument to an integer physical address */ + + status = acpi_ut_strtoul64(ascii_address, 0, &long_address); + if (ACPI_FAILURE(status)) { + fprintf(stderr, "%s: Could not convert to a physical address\n", + ascii_address); + return (-1); + } + + address = (acpi_physical_address) long_address; + status = acpi_os_get_table_by_address(address, &table); + if (ACPI_FAILURE(status)) { + fprintf(stderr, "Could not get table at 0x%8.8X%8.8X, %s\n", + ACPI_FORMAT_UINT64(address), + acpi_format_exception(status)); + return (-1); + } + + table_status = ap_dump_table_buffer(table, 0, address); + free(table); + return (table_status); +} + +/****************************************************************************** + * + * FUNCTION: ap_dump_table_by_name + * + * PARAMETERS: signature - Requested ACPI table signature + * + * RETURN: Status + * + * DESCRIPTION: Get an ACPI table via a signature and dump it. Handles + * multiple tables with the same signature (SSDTs). + * + ******************************************************************************/ + +int ap_dump_table_by_name(char *signature) +{ + char local_signature[ACPI_NAME_SIZE + 1]; + u32 instance; + struct acpi_table_header *table; + acpi_physical_address address; + acpi_status status; + int table_status; + + if (strlen(signature) != ACPI_NAME_SIZE) { + fprintf(stderr, + "Invalid table signature [%s]: must be exactly 4 characters\n", + signature); + return (-1); + } + + /* Table signatures are expected to be uppercase */ + + strcpy(local_signature, signature); + acpi_ut_strupr(local_signature); + + /* To be friendly, handle tables whose signatures do not match the name */ + + if (ACPI_COMPARE_NAME(local_signature, "FADT")) { + strcpy(local_signature, ACPI_SIG_FADT); + } else if (ACPI_COMPARE_NAME(local_signature, "MADT")) { + strcpy(local_signature, ACPI_SIG_MADT); + } + + /* Dump all instances of this signature (to handle multiple SSDTs) */ + + for (instance = 0; instance < AP_MAX_ACPI_FILES; instance++) { + status = acpi_os_get_table_by_name(local_signature, instance, + &table, &address); + if (ACPI_FAILURE(status)) { + + /* AE_LIMIT means that no more tables are available */ + + if (status == AE_LIMIT) { + return (0); + } + + fprintf(stderr, + "Could not get ACPI table with signature [%s], %s\n", + local_signature, acpi_format_exception(status)); + return (-1); + } + + table_status = ap_dump_table_buffer(table, instance, address); + free(table); + + if (table_status) { + break; + } + } + + /* Something seriously bad happened if the loop terminates here */ + + return (-1); +} + +/****************************************************************************** + * + * FUNCTION: ap_dump_table_from_file + * + * PARAMETERS: pathname - File containing the binary ACPI table + * + * RETURN: Status + * + * DESCRIPTION: Dump an ACPI table from a binary file + * + ******************************************************************************/ + +int ap_dump_table_from_file(char *pathname) +{ + struct acpi_table_header *table; + u32 file_size = 0; + int table_status = -1; + + /* Get the entire ACPI table from the file */ + + table = ap_get_table_from_file(pathname, &file_size); + if (!table) { + return (-1); + } + + /* File must be at least as long as the table length */ + + if (table->length > file_size) { + fprintf(stderr, + "Table length (0x%X) is too large for input file (0x%X) %s\n", + table->length, file_size, pathname); + goto exit; + } + + if (gbl_verbose_mode) { + fprintf(stderr, + "Input file: %s contains table [%4.4s], 0x%X (%u) bytes\n", + pathname, table->signature, file_size, file_size); + } + + table_status = ap_dump_table_buffer(table, 0, 0); + +exit: + free(table); + return (table_status); +} + +/****************************************************************************** + * + * FUNCTION: acpi_os* print functions + * + * DESCRIPTION: Used for linkage with ACPICA modules + * + ******************************************************************************/ + +void ACPI_INTERNAL_VAR_XFACE acpi_os_printf(const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vfprintf(stdout, fmt, args); + va_end(args); +} + +void acpi_os_vprintf(const char *fmt, va_list args) +{ + vfprintf(stdout, fmt, args); +} diff --git a/tools/power/acpi/tools/acpidump/apfiles.c b/tools/power/acpi/tools/acpidump/apfiles.c new file mode 100644 index 00000000000..4488accc010 --- /dev/null +++ b/tools/power/acpi/tools/acpidump/apfiles.c @@ -0,0 +1,228 @@ +/****************************************************************************** + * + * Module Name: apfiles - File-related functions for acpidump utility + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2014, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include "acpidump.h" +#include "acapps.h" + +/****************************************************************************** + * + * FUNCTION: ap_open_output_file + * + * PARAMETERS: pathname - Output filename + * + * RETURN: Open file handle + * + * DESCRIPTION: Open a text output file for acpidump. Checks if file already + * exists. + * + ******************************************************************************/ + +int ap_open_output_file(char *pathname) +{ + struct stat stat_info; + FILE *file; + + /* If file exists, prompt for overwrite */ + + if (!stat(pathname, &stat_info)) { + fprintf(stderr, + "Target path already exists, overwrite? [y|n] "); + + if (getchar() != 'y') { + return (-1); + } + } + + /* Point stdout to the file */ + + file = freopen(pathname, "w", stdout); + if (!file) { + perror("Could not open output file"); + return (-1); + } + + /* Save the file and path */ + + gbl_output_file = file; + gbl_output_filename = pathname; + return (0); +} + +/****************************************************************************** + * + * FUNCTION: ap_write_to_binary_file + * + * PARAMETERS: table - ACPI table to be written + * instance - ACPI table instance no. to be written + * + * RETURN: Status + * + * DESCRIPTION: Write an ACPI table to a binary file. Builds the output + * filename from the table signature. + * + ******************************************************************************/ + +int ap_write_to_binary_file(struct acpi_table_header *table, u32 instance) +{ + char filename[ACPI_NAME_SIZE + 16]; + char instance_str[16]; + FILE *file; + size_t actual; + u32 table_length; + + /* Obtain table length */ + + table_length = ap_get_table_length(table); + + /* Construct lower-case filename from the table local signature */ + + if (ACPI_VALIDATE_RSDP_SIG(table->signature)) { + ACPI_MOVE_NAME(filename, ACPI_RSDP_NAME); + } else { + ACPI_MOVE_NAME(filename, table->signature); + } + filename[0] = (char)ACPI_TOLOWER(filename[0]); + filename[1] = (char)ACPI_TOLOWER(filename[1]); + filename[2] = (char)ACPI_TOLOWER(filename[2]); + filename[3] = (char)ACPI_TOLOWER(filename[3]); + filename[ACPI_NAME_SIZE] = 0; + + /* Handle multiple SSDts - create different filenames for each */ + + if (instance > 0) { + sprintf(instance_str, "%u", instance); + strcat(filename, instance_str); + } + + strcat(filename, ACPI_TABLE_FILE_SUFFIX); + + if (gbl_verbose_mode) { + fprintf(stderr, + "Writing [%4.4s] to binary file: %s 0x%X (%u) bytes\n", + table->signature, filename, table->length, + table->length); + } + + /* Open the file and dump the entire table in binary mode */ + + file = fopen(filename, "wb"); + if (!file) { + perror("Could not open output file"); + return (-1); + } + + actual = fwrite(table, 1, table_length, file); + if (actual != table_length) { + perror("Error writing binary output file"); + fclose(file); + return (-1); + } + + fclose(file); + return (0); +} + +/****************************************************************************** + * + * FUNCTION: ap_get_table_from_file + * + * PARAMETERS: pathname - File containing the binary ACPI table + * out_file_size - Where the file size is returned + * + * RETURN: Buffer containing the ACPI table. NULL on error. + * + * DESCRIPTION: Open a file and read it entirely into a new buffer + * + ******************************************************************************/ + +struct acpi_table_header *ap_get_table_from_file(char *pathname, + u32 *out_file_size) +{ + struct acpi_table_header *buffer = NULL; + FILE *file; + u32 file_size; + size_t actual; + + /* Must use binary mode */ + + file = fopen(pathname, "rb"); + if (!file) { + perror("Could not open input file"); + return (NULL); + } + + /* Need file size to allocate a buffer */ + + file_size = cm_get_file_size(file); + if (file_size == ACPI_UINT32_MAX) { + fprintf(stderr, + "Could not get input file size: %s\n", pathname); + goto cleanup; + } + + /* Allocate a buffer for the entire file */ + + buffer = calloc(1, file_size); + if (!buffer) { + fprintf(stderr, + "Could not allocate file buffer of size: %u\n", + file_size); + goto cleanup; + } + + /* Read the entire file */ + + actual = fread(buffer, 1, file_size, file); + if (actual != file_size) { + fprintf(stderr, "Could not read input file: %s\n", pathname); + free(buffer); + buffer = NULL; + goto cleanup; + } + + *out_file_size = file_size; + +cleanup: + fclose(file); + return (buffer); +} diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c new file mode 100644 index 00000000000..51e8d638db1 --- /dev/null +++ b/tools/power/acpi/tools/acpidump/apmain.c @@ -0,0 +1,351 @@ +/****************************************************************************** + * + * Module Name: apmain - Main module for the acpidump utility + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2014, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#define _DECLARE_GLOBALS +#include "acpidump.h" +#include "acapps.h" + +/* + * acpidump - A portable utility for obtaining system ACPI tables and dumping + * them in an ASCII hex format suitable for binary extraction via acpixtract. + * + * Obtaining the system ACPI tables is an OS-specific operation. + * + * This utility can be ported to any host operating system by providing a + * module containing system-specific versions of these interfaces: + * + * acpi_os_get_table_by_address + * acpi_os_get_table_by_index + * acpi_os_get_table_by_name + * + * See the ACPICA Reference Guide for the exact definitions of these + * interfaces. Also, see these ACPICA source code modules for example + * implementations: + * + * source/os_specific/service_layers/oswintbl.c + * source/os_specific/service_layers/oslinuxtbl.c + */ + +/* Local prototypes */ + +static void ap_display_usage(void); + +static int ap_do_options(int argc, char **argv); + +static void ap_insert_action(char *argument, u32 to_be_done); + +/* Table for deferred actions from command line options */ + +struct ap_dump_action action_table[AP_MAX_ACTIONS]; +u32 current_action = 0; + +#define AP_UTILITY_NAME "ACPI Binary Table Dump Utility" +#define AP_SUPPORTED_OPTIONS "?a:bcf:hn:o:r:svxz" + +/****************************************************************************** + * + * FUNCTION: ap_display_usage + * + * DESCRIPTION: Usage message for the acpi_dump utility + * + ******************************************************************************/ + +static void ap_display_usage(void) +{ + + ACPI_USAGE_HEADER("acpidump [options]"); + + ACPI_OPTION("-b", "Dump tables to binary files"); + ACPI_OPTION("-c", "Dump customized tables"); + ACPI_OPTION("-h -?", "This help message"); + ACPI_OPTION("-o <File>", "Redirect output to file"); + ACPI_OPTION("-r <Address>", "Dump tables from specified RSDP"); + ACPI_OPTION("-s", "Print table summaries only"); + ACPI_OPTION("-v", "Display version information"); + ACPI_OPTION("-z", "Verbose mode"); + + printf("\nTable Options:\n"); + + ACPI_OPTION("-a <Address>", "Get table via a physical address"); + ACPI_OPTION("-f <BinaryFile>", "Get table via a binary file"); + ACPI_OPTION("-n <Signature>", "Get table via a name/signature"); + ACPI_OPTION("-x", "Do not use but dump XSDT"); + ACPI_OPTION("-x -x", "Do not use or dump XSDT"); + + printf("\n" + "Invocation without parameters dumps all available tables\n" + "Multiple mixed instances of -a, -f, and -n are supported\n\n"); +} + +/****************************************************************************** + * + * FUNCTION: ap_insert_action + * + * PARAMETERS: argument - Pointer to the argument for this action + * to_be_done - What to do to process this action + * + * RETURN: None. Exits program if action table becomes full. + * + * DESCRIPTION: Add an action item to the action table + * + ******************************************************************************/ + +static void ap_insert_action(char *argument, u32 to_be_done) +{ + + /* Insert action and check for table overflow */ + + action_table[current_action].argument = argument; + action_table[current_action].to_be_done = to_be_done; + + current_action++; + if (current_action > AP_MAX_ACTIONS) { + fprintf(stderr, "Too many table options (max %u)\n", + AP_MAX_ACTIONS); + exit(-1); + } +} + +/****************************************************************************** + * + * FUNCTION: ap_do_options + * + * PARAMETERS: argc/argv - Standard argc/argv + * + * RETURN: Status + * + * DESCRIPTION: Command line option processing. The main actions for getting + * and dumping tables are deferred via the action table. + * + *****************************************************************************/ + +static int ap_do_options(int argc, char **argv) +{ + int j; + acpi_status status; + + /* Command line options */ + + while ((j = acpi_getopt(argc, argv, AP_SUPPORTED_OPTIONS)) != EOF) + switch (j) { + /* + * Global options + */ + case 'b': /* Dump all input tables to binary files */ + + gbl_binary_mode = TRUE; + continue; + + case 'c': /* Dump customized tables */ + + gbl_dump_customized_tables = TRUE; + continue; + + case 'h': + case '?': + + ap_display_usage(); + exit(0); + + case 'o': /* Redirect output to a single file */ + + if (ap_open_output_file(acpi_gbl_optarg)) { + exit(-1); + } + continue; + + case 'r': /* Dump tables from specified RSDP */ + + status = + acpi_ut_strtoul64(acpi_gbl_optarg, 0, + &gbl_rsdp_base); + if (ACPI_FAILURE(status)) { + fprintf(stderr, + "%s: Could not convert to a physical address\n", + acpi_gbl_optarg); + exit(-1); + } + continue; + + case 's': /* Print table summaries only */ + + gbl_summary_mode = TRUE; + continue; + + case 'x': /* Do not use XSDT */ + + if (!acpi_gbl_do_not_use_xsdt) { + acpi_gbl_do_not_use_xsdt = TRUE; + } else { + gbl_do_not_dump_xsdt = TRUE; + } + continue; + + case 'v': /* Revision/version */ + + printf(ACPI_COMMON_SIGNON(AP_UTILITY_NAME)); + exit(0); + + case 'z': /* Verbose mode */ + + gbl_verbose_mode = TRUE; + fprintf(stderr, ACPI_COMMON_SIGNON(AP_UTILITY_NAME)); + continue; + + /* + * Table options + */ + case 'a': /* Get table by physical address */ + + ap_insert_action(acpi_gbl_optarg, + AP_DUMP_TABLE_BY_ADDRESS); + break; + + case 'f': /* Get table from a file */ + + ap_insert_action(acpi_gbl_optarg, + AP_DUMP_TABLE_BY_FILE); + break; + + case 'n': /* Get table by input name (signature) */ + + ap_insert_action(acpi_gbl_optarg, + AP_DUMP_TABLE_BY_NAME); + break; + + default: + + ap_display_usage(); + exit(-1); + } + + /* If there are no actions, this means "get/dump all tables" */ + + if (current_action == 0) { + ap_insert_action(NULL, AP_DUMP_ALL_TABLES); + } + + return (0); +} + +/****************************************************************************** + * + * FUNCTION: main + * + * PARAMETERS: argc/argv - Standard argc/argv + * + * RETURN: Status + * + * DESCRIPTION: C main function for acpidump utility + * + ******************************************************************************/ + +int ACPI_SYSTEM_XFACE main(int argc, char *argv[]) +{ + int status = 0; + struct ap_dump_action *action; + u32 file_size; + u32 i; + + ACPI_DEBUG_INITIALIZE(); /* For debug version only */ + + /* Process command line options */ + + if (ap_do_options(argc, argv)) { + return (-1); + } + + /* Get/dump ACPI table(s) as requested */ + + for (i = 0; i < current_action; i++) { + action = &action_table[i]; + switch (action->to_be_done) { + case AP_DUMP_ALL_TABLES: + + status = ap_dump_all_tables(); + break; + + case AP_DUMP_TABLE_BY_ADDRESS: + + status = ap_dump_table_by_address(action->argument); + break; + + case AP_DUMP_TABLE_BY_NAME: + + status = ap_dump_table_by_name(action->argument); + break; + + case AP_DUMP_TABLE_BY_FILE: + + status = ap_dump_table_from_file(action->argument); + break; + + default: + + fprintf(stderr, + "Internal error, invalid action: 0x%X\n", + action->to_be_done); + return (-1); + } + + if (status) { + return (status); + } + } + + if (gbl_output_file) { + if (gbl_verbose_mode) { + + /* Summary for the output file */ + + file_size = cm_get_file_size(gbl_output_file); + fprintf(stderr, + "Output file %s contains 0x%X (%u) bytes\n\n", + gbl_output_filename, file_size, file_size); + } + + fclose(gbl_output_file); + } + + return (status); +} diff --git a/tools/power/acpi/tools/ec/Makefile b/tools/power/acpi/tools/ec/Makefile new file mode 100644 index 00000000000..b7b0b929bd3 --- /dev/null +++ b/tools/power/acpi/tools/ec/Makefile @@ -0,0 +1,22 @@ +ec_access: ec_access.o + $(ECHO) " LD " $@ + $(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $< -o $@ + $(QUIET) $(STRIPCMD) $@ + +%.o: %.c + $(ECHO) " CC " $@ + $(QUIET) $(CC) -c $(CFLAGS) -o $@ $< + +all: ec_access + +install: + $(INSTALL) -d $(DESTDIR)${sbindir} + $(INSTALL_PROGRAM) ec_access $(DESTDIR)${sbindir} + +uninstall: + - rm -f $(DESTDIR)${sbindir}/ec_access + +clean: + -rm -f $(OUTPUT)ec_access + +.PHONY: all install uninstall diff --git a/tools/power/acpi/tools/ec/ec_access.c b/tools/power/acpi/tools/ec/ec_access.c new file mode 100644 index 00000000000..6b8aaed44f2 --- /dev/null +++ b/tools/power/acpi/tools/ec/ec_access.c @@ -0,0 +1,238 @@ +/* + * ec_access.c + * + * Copyright (C) 2010 SUSE Linux Products GmbH + * Author: + * Thomas Renninger <trenn@suse.de> + * + * This work is licensed under the terms of the GNU GPL, version 2. + */ + +#include <fcntl.h> +#include <err.h> +#include <stdio.h> +#include <stdlib.h> +#include <libgen.h> +#include <unistd.h> +#include <getopt.h> +#include <stdint.h> +#include <sys/types.h> +#include <sys/stat.h> + + +#define EC_SPACE_SIZE 256 +#define SYSFS_PATH "/sys/kernel/debug/ec/ec0/io" + +/* TBD/Enhancements: + - Provide param for accessing different ECs (not supported by kernel yet) +*/ + +static int read_mode = -1; +static int sleep_time; +static int write_byte_offset = -1; +static int read_byte_offset = -1; +static uint8_t write_value = -1; + +void usage(char progname[], int exit_status) +{ + printf("Usage:\n"); + printf("1) %s -r [-s sleep]\n", basename(progname)); + printf("2) %s -b byte_offset\n", basename(progname)); + printf("3) %s -w byte_offset -v value\n\n", basename(progname)); + + puts("\t-r [-s sleep] : Dump EC registers"); + puts("\t If sleep is given, sleep x seconds,"); + puts("\t re-read EC registers and show changes"); + puts("\t-b offset : Read value at byte_offset (in hex)"); + puts("\t-w offset -v value : Write value at byte_offset"); + puts("\t-h : Print this help\n\n"); + puts("Offsets and values are in hexadecimal number sytem."); + puts("The offset and value must be between 0 and 0xff."); + exit(exit_status); +} + +void parse_opts(int argc, char *argv[]) +{ + int c; + + while ((c = getopt(argc, argv, "rs:b:w:v:h")) != -1) { + + switch (c) { + case 'r': + if (read_mode != -1) + usage(argv[0], EXIT_FAILURE); + read_mode = 1; + break; + case 's': + if (read_mode != -1 && read_mode != 1) + usage(argv[0], EXIT_FAILURE); + + sleep_time = atoi(optarg); + if (sleep_time <= 0) { + sleep_time = 0; + usage(argv[0], EXIT_FAILURE); + printf("Bad sleep time: %s\n", optarg); + } + break; + case 'b': + if (read_mode != -1) + usage(argv[0], EXIT_FAILURE); + read_mode = 1; + read_byte_offset = strtoul(optarg, NULL, 16); + break; + case 'w': + if (read_mode != -1) + usage(argv[0], EXIT_FAILURE); + read_mode = 0; + write_byte_offset = strtoul(optarg, NULL, 16); + break; + case 'v': + write_value = strtoul(optarg, NULL, 16); + break; + case 'h': + usage(argv[0], EXIT_SUCCESS); + default: + fprintf(stderr, "Unknown option!\n"); + usage(argv[0], EXIT_FAILURE); + } + } + if (read_mode == 0) { + if (write_byte_offset < 0 || + write_byte_offset >= EC_SPACE_SIZE) { + fprintf(stderr, "Wrong byte offset 0x%.2x, valid: " + "[0-0x%.2x]\n", + write_byte_offset, EC_SPACE_SIZE - 1); + usage(argv[0], EXIT_FAILURE); + } + if (write_value < 0 || + write_value >= 255) { + fprintf(stderr, "Wrong byte offset 0x%.2x, valid:" + "[0-0xff]\n", write_byte_offset); + usage(argv[0], EXIT_FAILURE); + } + } + if (read_mode == 1 && read_byte_offset != -1) { + if (read_byte_offset < -1 || + read_byte_offset >= EC_SPACE_SIZE) { + fprintf(stderr, "Wrong byte offset 0x%.2x, valid: " + "[0-0x%.2x]\n", + read_byte_offset, EC_SPACE_SIZE - 1); + usage(argv[0], EXIT_FAILURE); + } + } + /* Add additional parameter checks here */ +} + +void dump_ec(int fd) +{ + char buf[EC_SPACE_SIZE]; + char buf2[EC_SPACE_SIZE]; + int byte_off, bytes_read; + + bytes_read = read(fd, buf, EC_SPACE_SIZE); + + if (bytes_read == -1) + err(EXIT_FAILURE, "Could not read from %s\n", SYSFS_PATH); + + if (bytes_read != EC_SPACE_SIZE) + fprintf(stderr, "Could only read %d bytes\n", bytes_read); + + printf(" 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F"); + for (byte_off = 0; byte_off < bytes_read; byte_off++) { + if ((byte_off % 16) == 0) + printf("\n%.2X: ", byte_off); + printf(" %.2x ", (uint8_t)buf[byte_off]); + } + printf("\n"); + + if (!sleep_time) + return; + + printf("\n"); + lseek(fd, 0, SEEK_SET); + sleep(sleep_time); + + bytes_read = read(fd, buf2, EC_SPACE_SIZE); + + if (bytes_read == -1) + err(EXIT_FAILURE, "Could not read from %s\n", SYSFS_PATH); + + if (bytes_read != EC_SPACE_SIZE) + fprintf(stderr, "Could only read %d bytes\n", bytes_read); + + printf(" 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F"); + for (byte_off = 0; byte_off < bytes_read; byte_off++) { + if ((byte_off % 16) == 0) + printf("\n%.2X: ", byte_off); + + if (buf[byte_off] == buf2[byte_off]) + printf(" %.2x ", (uint8_t)buf2[byte_off]); + else + printf("*%.2x ", (uint8_t)buf2[byte_off]); + } + printf("\n"); +} + +void read_ec_val(int fd, int byte_offset) +{ + uint8_t buf; + int error; + + error = lseek(fd, byte_offset, SEEK_SET); + if (error != byte_offset) + err(EXIT_FAILURE, "Cannot set offset to 0x%.2x", byte_offset); + + error = read(fd, &buf, 1); + if (error != 1) + err(EXIT_FAILURE, "Could not read byte 0x%.2x from %s\n", + byte_offset, SYSFS_PATH); + printf("0x%.2x\n", buf); + return; +} + +void write_ec_val(int fd, int byte_offset, uint8_t value) +{ + int error; + + error = lseek(fd, byte_offset, SEEK_SET); + if (error != byte_offset) + err(EXIT_FAILURE, "Cannot set offset to 0x%.2x", byte_offset); + + error = write(fd, &value, 1); + if (error != 1) + err(EXIT_FAILURE, "Cannot write value 0x%.2x to offset 0x%.2x", + value, byte_offset); +} + +int main(int argc, char *argv[]) +{ + int file_mode = O_RDONLY; + int fd; + + parse_opts(argc, argv); + + if (read_mode == 0) + file_mode = O_WRONLY; + else if (read_mode == 1) + file_mode = O_RDONLY; + else + usage(argv[0], EXIT_FAILURE); + + fd = open(SYSFS_PATH, file_mode); + if (fd == -1) + err(EXIT_FAILURE, "%s", SYSFS_PATH); + + if (read_mode) + if (read_byte_offset == -1) + dump_ec(fd); + else if (read_byte_offset < 0 || + read_byte_offset >= EC_SPACE_SIZE) + usage(argv[0], EXIT_FAILURE); + else + read_ec_val(fd, read_byte_offset); + else + write_ec_val(fd, write_byte_offset, write_value); + close(fd); + + exit(EXIT_SUCCESS); +} diff --git a/tools/power/cpupower/.gitignore b/tools/power/cpupower/.gitignore index 8a83dd2ffc1..d42073f1260 100644 --- a/tools/power/cpupower/.gitignore +++ b/tools/power/cpupower/.gitignore @@ -20,3 +20,10 @@ utils/cpufreq-set.o utils/cpufreq-aperf.o cpupower bench/cpufreq-bench +debug/kernel/Module.symvers +debug/i386/centrino-decode +debug/i386/dump_psb +debug/i386/intel_gsic +debug/i386/powernow-k8-decode +debug/x86_64/centrino-decode +debug/x86_64/powernow-k8-decode diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile index cf397bd26d0..2e2ba2efa0d 100644 --- a/tools/power/cpupower/Makefile +++ b/tools/power/cpupower/Makefile @@ -62,7 +62,7 @@ LIB_MAJ= 0.0.0 LIB_MIN= 0 PACKAGE = cpupower -PACKAGE_BUGREPORT = cpufreq@vger.kernel.org +PACKAGE_BUGREPORT = linux-pm@vger.kernel.org LANGUAGES = de fr it cs pt @@ -128,10 +128,12 @@ UTIL_OBJS = utils/helpers/amd.o utils/helpers/topology.o utils/helpers/msr.o \ utils/helpers/sysfs.o utils/helpers/misc.o utils/helpers/cpuid.o \ utils/helpers/pci.o utils/helpers/bitmask.o \ utils/idle_monitor/nhm_idle.o utils/idle_monitor/snb_idle.o \ + utils/idle_monitor/hsw_ext_idle.o \ utils/idle_monitor/amd_fam14h_idle.o utils/idle_monitor/cpuidle_sysfs.o \ utils/idle_monitor/mperf_monitor.o utils/idle_monitor/cpupower-monitor.o \ utils/cpupower.o utils/cpufreq-info.o utils/cpufreq-set.o \ - utils/cpupower-set.o utils/cpupower-info.o utils/cpuidle-info.o + utils/cpupower-set.o utils/cpupower-info.o utils/cpuidle-info.o \ + utils/cpuidle-set.o UTIL_SRC := $(UTIL_OBJS:.o=.c) @@ -253,7 +255,8 @@ clean: | xargs rm -f -rm -f $(OUTPUT)cpupower -rm -f $(OUTPUT)libcpupower.so* - -rm -rf $(OUTPUT)po/*.{gmo,pot} + -rm -rf $(OUTPUT)po/*.gmo + -rm -rf $(OUTPUT)po/*.pot $(MAKE) -C bench O=$(OUTPUT) clean @@ -271,6 +274,8 @@ install-man: $(INSTALL_DATA) -D man/cpupower.1 $(DESTDIR)${mandir}/man1/cpupower.1 $(INSTALL_DATA) -D man/cpupower-frequency-set.1 $(DESTDIR)${mandir}/man1/cpupower-frequency-set.1 $(INSTALL_DATA) -D man/cpupower-frequency-info.1 $(DESTDIR)${mandir}/man1/cpupower-frequency-info.1 + $(INSTALL_DATA) -D man/cpupower-idle-set.1 $(DESTDIR)${mandir}/man1/cpupower-idle-set.1 + $(INSTALL_DATA) -D man/cpupower-idle-info.1 $(DESTDIR)${mandir}/man1/cpupower-idle-info.1 $(INSTALL_DATA) -D man/cpupower-set.1 $(DESTDIR)${mandir}/man1/cpupower-set.1 $(INSTALL_DATA) -D man/cpupower-info.1 $(DESTDIR)${mandir}/man1/cpupower-info.1 $(INSTALL_DATA) -D man/cpupower-monitor.1 $(DESTDIR)${mandir}/man1/cpupower-monitor.1 @@ -292,8 +297,12 @@ uninstall: - rm -f $(DESTDIR)${libdir}/libcpupower.* - rm -f $(DESTDIR)${includedir}/cpufreq.h - rm -f $(DESTDIR)${bindir}/utils/cpupower - - rm -f $(DESTDIR)${mandir}/man1/cpufreq-set.1 - - rm -f $(DESTDIR)${mandir}/man1/cpufreq-info.1 + - rm -f $(DESTDIR)${mandir}/man1/cpupower.1 + - rm -f $(DESTDIR)${mandir}/man1/cpupower-frequency-set.1 + - rm -f $(DESTDIR)${mandir}/man1/cpupower-frequency-info.1 + - rm -f $(DESTDIR)${mandir}/man1/cpupower-set.1 + - rm -f $(DESTDIR)${mandir}/man1/cpupower-info.1 + - rm -f $(DESTDIR)${mandir}/man1/cpupower-monitor.1 - for HLANG in $(LANGUAGES); do \ rm -f $(DESTDIR)${localedir}/$$HLANG/LC_MESSAGES/cpupower.mo; \ done; diff --git a/tools/power/cpupower/README b/tools/power/cpupower/README index fd9d4c0d668..1c68f47663b 100644 --- a/tools/power/cpupower/README +++ b/tools/power/cpupower/README @@ -1,6 +1,4 @@ -The cpufrequtils package (homepage: -http://www.kernel.org/pub/linux/utils/kernel/cpufreq/cpufrequtils.html ) -consists of the following elements: +The cpupower package consists of the following elements: requirements ------------ @@ -11,10 +9,10 @@ providing cpuid.h is needed. For both it's not explicitly checked for (yet). -libcpufreq +libcpupower ---------- -"libcpufreq" is a library which offers a unified access method for userspace +"libcpupower" is a library which offers a unified access method for userspace tools and programs to the cpufreq core and drivers in the Linux kernel. This allows for code reduction in userspace tools, a clean implementation of the interaction to the cpufreq core, and support for both the sysfs and proc @@ -28,22 +26,22 @@ make su make install -should suffice on most systems. It builds default libcpufreq, -cpufreq-set and cpufreq-info files and installs them in /usr/lib and -/usr/bin, respectively. If you want to set up the paths differently and/or -want to configure the package to your specific needs, you need to open -"Makefile" with an editor of your choice and edit the block marked -CONFIGURATION. +should suffice on most systems. It builds libcpupower to put in +/usr/lib; cpupower, cpufreq-bench_plot.sh to put in /usr/bin; and +cpufreq-bench to put in /usr/sbin. If you want to set up the paths +differently and/or want to configure the package to your specific +needs, you need to open "Makefile" with an editor of your choice and +edit the block marked CONFIGURATION. THANKS ------ Many thanks to Mattia Dongili who wrote the autotoolization and -libtoolization, the manpages and the italian language file for cpufrequtils; +libtoolization, the manpages and the italian language file for cpupower; to Dave Jones for his feedback and his dump_psb tool; to Bruno Ducrot for his powernow-k8-decode and intel_gsic tools as well as the french language file; and to various others commenting on the previous (pre-)releases of -cpufrequtils. +cpupower. Dominik Brodowski diff --git a/tools/power/cpupower/ToDo b/tools/power/cpupower/ToDo index 874b78b586e..6e8b89f282e 100644 --- a/tools/power/cpupower/ToDo +++ b/tools/power/cpupower/ToDo @@ -3,7 +3,6 @@ ToDos sorted by priority: - Use bitmask functions to parse CPU topology more robust (current implementation has issues on AMD) - Try to read out boost states and frequencies on Intel -- Adjust README - Somewhere saw the ability to read power consumption of RAM from HW on Intel SandyBridge -> another monitor? - Add another c1e debug idle monitor diff --git a/tools/power/cpupower/debug/i386/Makefile b/tools/power/cpupower/debug/i386/Makefile index 3ba158f0e28..c05cc0ac80c 100644 --- a/tools/power/cpupower/debug/i386/Makefile +++ b/tools/power/cpupower/debug/i386/Makefile @@ -26,7 +26,10 @@ $(OUTPUT)powernow-k8-decode: powernow-k8-decode.c all: $(OUTPUT)centrino-decode $(OUTPUT)dump_psb $(OUTPUT)intel_gsic $(OUTPUT)powernow-k8-decode clean: - rm -rf $(OUTPUT){centrino-decode,dump_psb,intel_gsic,powernow-k8-decode} + rm -rf $(OUTPUT)centrino-decode + rm -rf $(OUTPUT)dump_psb + rm -rf $(OUTPUT)intel_gsic + rm -rf $(OUTPUT)powernow-k8-decode install: $(INSTALL) -d $(DESTDIR)${bindir} diff --git a/tools/power/cpupower/debug/i386/intel_gsic.c b/tools/power/cpupower/debug/i386/intel_gsic.c index 53f5293c9c9..d032c826d42 100644 --- a/tools/power/cpupower/debug/i386/intel_gsic.c +++ b/tools/power/cpupower/debug/i386/intel_gsic.c @@ -66,7 +66,7 @@ int main (void) printf("ecx = 0x%.8x\n", r.ecx); printf("edx = 0x%.8x\n", r.edx); printf("Note also that some BIOS do not support the initial " - "GSIC call, but the newer\nspeeedstep-smi driver may " + "GSIC call, but the newer\nspeedstep-smi driver may " "work.\nFor this, you need to pass some arguments to " "the speedstep-smi driver:\n"); printf("\tsmi_cmd=0x?? smi_port=0x?? smi_sig=1\n"); diff --git a/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c b/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c index 66cace601e5..5224ee5b392 100644 --- a/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c +++ b/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c @@ -18,19 +18,16 @@ * 5.) if the third value, "diff_pmtmr", changes between 2. and 4., the * TSC-based delay routine on the Linux kernel does not correctly * handle the cpufreq transition. Please report this to - * cpufreq@vger.kernel.org + * linux-pm@vger.kernel.org */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> - +#include <linux/acpi.h> #include <asm/io.h> -#include <acpi/acpi_bus.h> -#include <acpi/acpi_drivers.h> - static int pm_tmr_ioport = 0; /*helper function to safely read acpi pm timesource*/ diff --git a/tools/power/cpupower/man/cpupower-frequency-info.1 b/tools/power/cpupower/man/cpupower-frequency-info.1 index 4a1918ea8f9..9c85a382e35 100644 --- a/tools/power/cpupower/man/cpupower-frequency-info.1 +++ b/tools/power/cpupower/man/cpupower-frequency-info.1 @@ -50,6 +50,9 @@ Prints out information like provided by the /proc/cpufreq interface in 2.4. and \fB\-m\fR \fB\-\-human\fR human\-readable output for the \-f, \-w, \-s and \-y parameters. .TP +\fB\-n\fR \fB\-\-no-rounding\fR +Output frequencies and latencies without rounding off values. +.TP .SH "REMARKS" .LP By default only values of core zero are displayed. How to display settings of diff --git a/tools/power/cpupower/man/cpupower-idle-info.1 b/tools/power/cpupower/man/cpupower-idle-info.1 index 4178effd9e9..7b3646adb92 100644 --- a/tools/power/cpupower/man/cpupower-idle-info.1 +++ b/tools/power/cpupower/man/cpupower-idle-info.1 @@ -87,4 +87,5 @@ Thomas Renninger <trenn@suse.de> .fi .SH "SEE ALSO" .LP -cpupower(1), cpupower\-monitor(1), cpupower\-info(1), cpupower\-set(1) +cpupower(1), cpupower\-monitor(1), cpupower\-info(1), cpupower\-set(1), +cpupower\-idle\-set(1) diff --git a/tools/power/cpupower/man/cpupower-idle-set.1 b/tools/power/cpupower/man/cpupower-idle-set.1 new file mode 100644 index 00000000000..3e6799d7a79 --- /dev/null +++ b/tools/power/cpupower/man/cpupower-idle-set.1 @@ -0,0 +1,77 @@ +.TH "CPUPOWER-IDLE-SET" "1" "0.1" "" "cpupower Manual" +.SH "NAME" +.LP +cpupower idle\-set \- Utility to set cpu idle state specific kernel options +.SH "SYNTAX" +.LP +cpupower [ \-c cpulist ] idle\-info [\fIoptions\fP] +.SH "DESCRIPTION" +.LP +The cpupower idle\-set subcommand allows to set cpu idle, also called cpu +sleep state, specific options offered by the kernel. One example is disabling +sleep states. This can be handy for power vs performance tuning. +.SH "OPTIONS" +.LP +.TP +\fB\-d\fR \fB\-\-disable\fR <STATE_NO> +Disable a specific processor sleep state. +.TP +\fB\-e\fR \fB\-\-enable\fR <STATE_NO> +Enable a specific processor sleep state. +.TP +\fB\-D\fR \fB\-\-disable-by-latency\fR <LATENCY> +Disable all idle states with a equal or higher latency than <LATENCY> +.TP +\fB\-E\fR \fB\-\-enable-all\fR +Enable all idle states if not enabled already. + +.SH "REMARKS" +.LP +Cpuidle Governors Policy on Disabling Sleep States + +.RS 4 +Depending on the used cpuidle governor, implementing the kernel policy +how to choose sleep states, subsequent sleep states on this core, might get +disabled as well. + +There are two cpuidle governors ladder and menu. While the ladder +governor is always available, if CONFIG_CPU_IDLE is selected, the +menu governor additionally requires CONFIG_NO_HZ. + +The behavior and the effect of the disable variable depends on the +implementation of a particular governor. In the ladder governor, for +example, it is not coherent, i.e. if one is disabling a light state, +then all deeper states are disabled as well. Likewise, if one enables a +deep state but a lighter state still is disabled, then this has no effect. +.RE +.LP +Disabling the Lightest Sleep State may not have any Affect + +.RS 4 +If criteria are not met to enter deeper sleep states and the lightest sleep +state is chosen when idle, the kernel may still enter this sleep state, +irrespective of whether it is disabled or not. This is also reflected in +the usage count of the disabled sleep state when using the cpupower idle-info +command. +.RE +.LP +Selecting specific CPU Cores + +.RS 4 +By default processor sleep states of all CPU cores are set. Please refer +to the cpupower(1) manpage in the \-\-cpu option section how to disable +C-states of specific cores. +.RE +.SH "FILES" +.nf +\fI/sys/devices/system/cpu/cpu*/cpuidle/state*\fP +\fI/sys/devices/system/cpu/cpuidle/*\fP +.fi +.SH "AUTHORS" +.nf +Thomas Renninger <trenn@suse.de> +.fi +.SH "SEE ALSO" +.LP +cpupower(1), cpupower\-monitor(1), cpupower\-info(1), cpupower\-set(1), +cpupower\-idle\-info(1) diff --git a/tools/power/cpupower/man/cpupower-info.1 b/tools/power/cpupower/man/cpupower-info.1 index 58e21196f17..340bcd0be7d 100644 --- a/tools/power/cpupower/man/cpupower-info.1 +++ b/tools/power/cpupower/man/cpupower-info.1 @@ -3,7 +3,7 @@ cpupower\-info \- Shows processor power related kernel or hardware configurations .SH SYNOPSIS .ft B -.B cpupower info [ \-b ] [ \-s ] [ \-m ] +.B cpupower info [ \-b ] .SH DESCRIPTION \fBcpupower info \fP shows kernel configurations or processor hardware diff --git a/tools/power/cpupower/man/cpupower-monitor.1 b/tools/power/cpupower/man/cpupower-monitor.1 index 1141c207371..914cbb9d9cd 100644 --- a/tools/power/cpupower/man/cpupower-monitor.1 +++ b/tools/power/cpupower/man/cpupower-monitor.1 @@ -7,11 +7,11 @@ cpupower\-monitor \- Report processor frequency and idle statistics .RB "\-l" .B cpupower monitor -.RB [ "\-m <mon1>," [ "<mon2>,..." ] ] +.RB [ -c ] [ "\-m <mon1>," [ "<mon2>,..." ] ] .RB [ "\-i seconds" ] .br .B cpupower monitor -.RB [ "\-m <mon1>," [ "<mon2>,..." ] ] +.RB [ -c ][ "\-m <mon1>," [ "<mon2>,..." ] ] .RB command .br .SH DESCRIPTION @@ -64,6 +64,17 @@ Only display specific monitors. Use the monitor string(s) provided by \-l option Measure intervall. .RE .PP +\-c +.RS 4 +Schedule the process on every core before starting and ending measuring. +This could be needed for the Idle_Stats monitor when no other MSR based +monitor (has to be run on the core that is measured) is run in parallel. +This is to wake up the processors from deeper sleep states and let the +kernel re +-account its cpuidle (C-state) information before reading the +cpuidle timings from sysfs. +.RE +.PP command .RS 4 Measure idle and frequency characteristics of an arbitrary command/workload. @@ -99,13 +110,21 @@ May work poorly on Linux-2.6.20 through 2.6.29, as the \fBacpi-cpufreq \fP kernel frequency driver periodically cleared aperf/mperf registers in those kernels. -.SS "Nehalem" "SandyBridge" +.SS "Nehalem" "SandyBridge" "HaswellExtended" Intel Core and Package sleep state counters. Threads (hyperthreaded cores) may not be able to enter deeper core states if its sibling is utilized. Deepest package sleep states may in reality show up as machine/platform wide sleep states and can only be entered if all cores are idle. Look up Intel manuals (some are provided in the References section) for further details. +The monitors are named after the CPU family where the sleep state capabilities +got introduced and may not match exactly the CPU name of the platform. +For example an IvyBridge processor has sleep state capabilities which got +introduced in Nehalem and SandyBridge processor families. +Thus on an IvyBridge processor one will get Nehalem and SandyBridge sleep +state monitors. +HaswellExtended extra package sleep state capabilities are available only in a +specific Haswell (family 0x45) and probably also other future processors. .SS "Fam_12h" "Fam_14h" AMD laptop and desktop processor (family 12h and 14h) sleep state counters. diff --git a/tools/power/cpupower/man/cpupower-set.1 b/tools/power/cpupower/man/cpupower-set.1 index 9dbd536518a..2bcc696f449 100644 --- a/tools/power/cpupower/man/cpupower-set.1 +++ b/tools/power/cpupower/man/cpupower-set.1 @@ -3,7 +3,7 @@ cpupower\-set \- Set processor power related kernel or hardware configurations .SH SYNOPSIS .ft B -.B cpupower set [ \-b VAL ] [ \-s VAL ] [ \-m VAL ] +.B cpupower set [ \-b VAL ] .SH DESCRIPTION @@ -55,35 +55,6 @@ Use \fBcpupower -c all info -b\fP to verify. This options needs the msr kernel driver (CONFIG_X86_MSR) loaded. .RE -.PP -\-\-sched\-mc, \-m [ VAL ] -.RE -\-\-sched\-smt, \-s [ VAL ] -.RS 4 -\-\-sched\-mc utilizes cores in one processor package/socket first before -processes are scheduled to other processor packages/sockets. - -\-\-sched\-smt utilizes thread siblings of one processor core first before -processes are scheduled to other cores. - -The impact on power consumption and performance (positiv or negativ) heavily -depends on processor support for deep sleep states, frequency scaling and -frequency boost modes and their dependencies between other thread siblings -and processor cores. - -Taken over from kernel documentation: - -Adjust the kernel's multi-core scheduler support. - -Possible values are: -.RS 2 -0 - No power saving load balance (default value) - -1 - Fill one thread/core/package first for long running threads - -2 - Also bias task wakeups to semi-idle cpu package for power -savings -.RE .SH "SEE ALSO" cpupower-info(1), cpupower-monitor(1), powertop(1) diff --git a/tools/power/cpupower/utils/builtin.h b/tools/power/cpupower/utils/builtin.h index c10496fbe3c..2284c8ea4e2 100644 --- a/tools/power/cpupower/utils/builtin.h +++ b/tools/power/cpupower/utils/builtin.h @@ -5,6 +5,7 @@ extern int cmd_set(int argc, const char **argv); extern int cmd_info(int argc, const char **argv); extern int cmd_freq_set(int argc, const char **argv); extern int cmd_freq_info(int argc, const char **argv); +extern int cmd_idle_set(int argc, const char **argv); extern int cmd_idle_info(int argc, const char **argv); extern int cmd_monitor(int argc, const char **argv); diff --git a/tools/power/cpupower/utils/cpufreq-info.c b/tools/power/cpupower/utils/cpufreq-info.c index 28953c9a7bd..b4b90a97662 100644 --- a/tools/power/cpupower/utils/cpufreq-info.c +++ b/tools/power/cpupower/utils/cpufreq-info.c @@ -82,29 +82,42 @@ static void proc_cpufreq_output(void) } } +static int no_rounding; static void print_speed(unsigned long speed) { unsigned long tmp; - if (speed > 1000000) { - tmp = speed % 10000; - if (tmp >= 5000) - speed += 10000; - printf("%u.%02u GHz", ((unsigned int) speed/1000000), - ((unsigned int) (speed%1000000)/10000)); - } else if (speed > 100000) { - tmp = speed % 1000; - if (tmp >= 500) - speed += 1000; - printf("%u MHz", ((unsigned int) speed / 1000)); - } else if (speed > 1000) { - tmp = speed % 100; - if (tmp >= 50) - speed += 100; - printf("%u.%01u MHz", ((unsigned int) speed/1000), - ((unsigned int) (speed%1000)/100)); - } else - printf("%lu kHz", speed); + if (no_rounding) { + if (speed > 1000000) + printf("%u.%06u GHz", ((unsigned int) speed/1000000), + ((unsigned int) speed%1000000)); + else if (speed > 100000) + printf("%u MHz", (unsigned int) speed); + else if (speed > 1000) + printf("%u.%03u MHz", ((unsigned int) speed/1000), + (unsigned int) (speed%1000)); + else + printf("%lu kHz", speed); + } else { + if (speed > 1000000) { + tmp = speed%10000; + if (tmp >= 5000) + speed += 10000; + printf("%u.%02u GHz", ((unsigned int) speed/1000000), + ((unsigned int) (speed%1000000)/10000)); + } else if (speed > 100000) { + tmp = speed%1000; + if (tmp >= 500) + speed += 1000; + printf("%u MHz", ((unsigned int) speed/1000)); + } else if (speed > 1000) { + tmp = speed%100; + if (tmp >= 50) + speed += 100; + printf("%u.%01u MHz", ((unsigned int) speed/1000), + ((unsigned int) (speed%1000)/100)); + } + } return; } @@ -113,26 +126,38 @@ static void print_duration(unsigned long duration) { unsigned long tmp; - if (duration > 1000000) { - tmp = duration % 10000; - if (tmp >= 5000) - duration += 10000; - printf("%u.%02u ms", ((unsigned int) duration/1000000), - ((unsigned int) (duration%1000000)/10000)); - } else if (duration > 100000) { - tmp = duration % 1000; - if (tmp >= 500) - duration += 1000; - printf("%u us", ((unsigned int) duration / 1000)); - } else if (duration > 1000) { - tmp = duration % 100; - if (tmp >= 50) - duration += 100; - printf("%u.%01u us", ((unsigned int) duration/1000), - ((unsigned int) (duration%1000)/100)); - } else - printf("%lu ns", duration); - + if (no_rounding) { + if (duration > 1000000) + printf("%u.%06u ms", ((unsigned int) duration/1000000), + ((unsigned int) duration%1000000)); + else if (duration > 100000) + printf("%u us", ((unsigned int) duration/1000)); + else if (duration > 1000) + printf("%u.%03u us", ((unsigned int) duration/1000), + ((unsigned int) duration%1000)); + else + printf("%lu ns", duration); + } else { + if (duration > 1000000) { + tmp = duration%10000; + if (tmp >= 5000) + duration += 10000; + printf("%u.%02u ms", ((unsigned int) duration/1000000), + ((unsigned int) (duration%1000000)/10000)); + } else if (duration > 100000) { + tmp = duration%1000; + if (tmp >= 500) + duration += 1000; + printf("%u us", ((unsigned int) duration / 1000)); + } else if (duration > 1000) { + tmp = duration%100; + if (tmp >= 50) + duration += 100; + printf("%u.%01u us", ((unsigned int) duration/1000), + ((unsigned int) (duration%1000)/100)); + } else + printf("%lu ns", duration); + } return; } @@ -525,6 +550,7 @@ static struct option info_opts[] = { { .name = "latency", .has_arg = no_argument, .flag = NULL, .val = 'y'}, { .name = "proc", .has_arg = no_argument, .flag = NULL, .val = 'o'}, { .name = "human", .has_arg = no_argument, .flag = NULL, .val = 'm'}, + { .name = "no-rounding", .has_arg = no_argument, .flag = NULL, .val = 'n'}, { }, }; @@ -538,7 +564,8 @@ int cmd_freq_info(int argc, char **argv) int output_param = 0; do { - ret = getopt_long(argc, argv, "oefwldpgrasmyb", info_opts, NULL); + ret = getopt_long(argc, argv, "oefwldpgrasmybn", info_opts, + NULL); switch (ret) { case '?': output_param = '?'; @@ -575,6 +602,9 @@ int cmd_freq_info(int argc, char **argv) } human = 1; break; + case 'n': + no_rounding = 1; + break; default: fprintf(stderr, "invalid or unknown argument\n"); return EXIT_FAILURE; diff --git a/tools/power/cpupower/utils/cpufreq-set.c b/tools/power/cpupower/utils/cpufreq-set.c index dd1539eb8c6..a416de80c55 100644 --- a/tools/power/cpupower/utils/cpufreq-set.c +++ b/tools/power/cpupower/utils/cpufreq-set.c @@ -257,7 +257,7 @@ int cmd_freq_set(int argc, char **argv) print_unknown_arg(); return -EINVAL; } - if ((sscanf(optarg, "%s", gov)) != 1) { + if ((sscanf(optarg, "%19s", gov)) != 1) { print_unknown_arg(); return -EINVAL; } diff --git a/tools/power/cpupower/utils/cpuidle-info.c b/tools/power/cpupower/utils/cpuidle-info.c index 8145af5f93a..75e66de7e7a 100644 --- a/tools/power/cpupower/utils/cpuidle-info.c +++ b/tools/power/cpupower/utils/cpuidle-info.c @@ -22,7 +22,7 @@ static void cpuidle_cpu_output(unsigned int cpu, int verbose) { - int idlestates, idlestate; + unsigned int idlestates, idlestate; char *tmp; printf(_ ("Analyzing CPU %d:\n"), cpu); @@ -31,10 +31,8 @@ static void cpuidle_cpu_output(unsigned int cpu, int verbose) if (idlestates == 0) { printf(_("CPU %u: No idle states\n"), cpu); return; - } else if (idlestates <= 0) { - printf(_("CPU %u: Can't read idle state info\n"), cpu); - return; } + printf(_("Number of idle states: %d\n"), idlestates); printf(_("Available idle states:")); for (idlestate = 0; idlestate < idlestates; idlestate++) { @@ -50,10 +48,14 @@ static void cpuidle_cpu_output(unsigned int cpu, int verbose) return; for (idlestate = 0; idlestate < idlestates; idlestate++) { + int disabled = sysfs_is_idlestate_disabled(cpu, idlestate); + /* Disabled interface not supported on older kernels */ + if (disabled < 0) + disabled = 0; tmp = sysfs_get_idlestate_name(cpu, idlestate); if (!tmp) continue; - printf("%s:\n", tmp); + printf("%s%s:\n", tmp, (disabled) ? " (DISABLED) " : ""); free(tmp); tmp = sysfs_get_idlestate_desc(cpu, idlestate); @@ -98,21 +100,13 @@ static void cpuidle_general_output(void) static void proc_cpuidle_cpu_output(unsigned int cpu) { long max_allowed_cstate = 2000000000; - int cstates, cstate; + unsigned int cstate, cstates; cstates = sysfs_get_idlestate_count(cpu); if (cstates == 0) { - /* - * Go on and print same useless info as you'd see with - * cat /proc/acpi/processor/../power - * printf(_("CPU %u: No C-states available\n"), cpu); - * return; - */ - } else if (cstates <= 0) { - printf(_("CPU %u: Can't read C-state info\n"), cpu); + printf(_("CPU %u: No C-states info\n"), cpu); return; } - /* printf("Cstates: %d\n", cstates); */ printf(_("active state: C0\n")); printf(_("max_cstate: C%u\n"), cstates-1); diff --git a/tools/power/cpupower/utils/cpuidle-set.c b/tools/power/cpupower/utils/cpuidle-set.c new file mode 100644 index 00000000000..d45d8d775c0 --- /dev/null +++ b/tools/power/cpupower/utils/cpuidle-set.c @@ -0,0 +1,181 @@ +#include <unistd.h> +#include <stdio.h> +#include <errno.h> +#include <stdlib.h> +#include <limits.h> +#include <string.h> +#include <ctype.h> + +#include <getopt.h> + +#include "cpufreq.h" +#include "helpers/helpers.h" +#include "helpers/sysfs.h" + +static struct option info_opts[] = { + { .name = "disable", + .has_arg = required_argument, .flag = NULL, .val = 'd'}, + { .name = "enable", + .has_arg = required_argument, .flag = NULL, .val = 'e'}, + { .name = "disable-by-latency", + .has_arg = required_argument, .flag = NULL, .val = 'D'}, + { .name = "enable-all", + .has_arg = no_argument, .flag = NULL, .val = 'E'}, + { }, +}; + + +int cmd_idle_set(int argc, char **argv) +{ + extern char *optarg; + extern int optind, opterr, optopt; + int ret = 0, cont = 1, param = 0, disabled; + unsigned long long latency = 0, state_latency; + unsigned int cpu = 0, idlestate = 0, idlestates = 0; + char *endptr; + + do { + ret = getopt_long(argc, argv, "d:e:ED:", info_opts, NULL); + if (ret == -1) + break; + switch (ret) { + case '?': + param = '?'; + cont = 0; + break; + case 'd': + if (param) { + param = -1; + cont = 0; + break; + } + param = ret; + idlestate = atoi(optarg); + break; + case 'e': + if (param) { + param = -1; + cont = 0; + break; + } + param = ret; + idlestate = atoi(optarg); + break; + case 'D': + if (param) { + param = -1; + cont = 0; + break; + } + param = ret; + latency = strtoull(optarg, &endptr, 10); + if (*endptr != '\0') { + printf(_("Bad latency value: %s\n"), optarg); + exit(EXIT_FAILURE); + } + break; + case 'E': + if (param) { + param = -1; + cont = 0; + break; + } + param = ret; + break; + case -1: + cont = 0; + break; + } + } while (cont); + + switch (param) { + case -1: + printf(_("You can't specify more than one " + "output-specific argument\n")); + exit(EXIT_FAILURE); + case '?': + printf(_("invalid or unknown argument\n")); + exit(EXIT_FAILURE); + } + + /* Default is: set all CPUs */ + if (bitmask_isallclear(cpus_chosen)) + bitmask_setall(cpus_chosen); + + for (cpu = bitmask_first(cpus_chosen); + cpu <= bitmask_last(cpus_chosen); cpu++) { + + if (!bitmask_isbitset(cpus_chosen, cpu)) + continue; + + if (sysfs_is_cpu_online(cpu) != 1) + continue; + + idlestates = sysfs_get_idlestate_count(cpu); + if (idlestates <= 0) + continue; + + switch (param) { + case 'd': + ret = sysfs_idlestate_disable(cpu, idlestate, 1); + if (ret == 0) + printf(_("Idlestate %u disabled on CPU %u\n"), idlestate, cpu); + else if (ret == -1) + printf(_("Idlestate %u not available on CPU %u\n"), + idlestate, cpu); + else if (ret == -2) + printf(_("Idlestate disabling not supported by kernel\n")); + else + printf(_("Idlestate %u not disabled on CPU %u\n"), + idlestate, cpu); + break; + case 'e': + ret = sysfs_idlestate_disable(cpu, idlestate, 0); + if (ret == 0) + printf(_("Idlestate %u enabled on CPU %u\n"), idlestate, cpu); + else if (ret == -1) + printf(_("Idlestate %u not available on CPU %u\n"), + idlestate, cpu); + else if (ret == -2) + printf(_("Idlestate enabling not supported by kernel\n")); + else + printf(_("Idlestate %u not enabled on CPU %u\n"), + idlestate, cpu); + break; + case 'D': + for (idlestate = 0; idlestate < idlestates; idlestate++) { + disabled = sysfs_is_idlestate_disabled + (cpu, idlestate); + state_latency = sysfs_get_idlestate_latency + (cpu, idlestate); + printf("CPU: %u - idlestate %u - state_latency: %llu - latency: %llu\n", + cpu, idlestate, state_latency, latency); + if (disabled == 1 || latency > state_latency) + continue; + ret = sysfs_idlestate_disable + (cpu, idlestate, 1); + if (ret == 0) + printf(_("Idlestate %u disabled on CPU %u\n"), idlestate, cpu); + } + break; + case 'E': + for (idlestate = 0; idlestate < idlestates; idlestate++) { + disabled = sysfs_is_idlestate_disabled + (cpu, idlestate); + if (disabled == 1) { + ret = sysfs_idlestate_disable + (cpu, idlestate, 0); + if (ret == 0) + printf(_("Idlestate %u enabled on CPU %u\n"), idlestate, cpu); + } + } + break; + default: + /* Not reachable with proper args checking */ + printf(_("Invalid or unknown argument\n")); + exit(EXIT_FAILURE); + break; + } + } + return EXIT_SUCCESS; +} diff --git a/tools/power/cpupower/utils/cpupower-info.c b/tools/power/cpupower/utils/cpupower-info.c index 3f68632c28c..136d979e958 100644 --- a/tools/power/cpupower/utils/cpupower-info.c +++ b/tools/power/cpupower/utils/cpupower-info.c @@ -18,8 +18,6 @@ static struct option set_opts[] = { { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, - { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'}, - { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'}, { }, }; @@ -37,8 +35,6 @@ int cmd_info(int argc, char **argv) union { struct { - int sched_mc:1; - int sched_smt:1; int perf_bias:1; }; int params; @@ -49,23 +45,13 @@ int cmd_info(int argc, char **argv) textdomain(PACKAGE); /* parameter parsing */ - while ((ret = getopt_long(argc, argv, "msb", set_opts, NULL)) != -1) { + while ((ret = getopt_long(argc, argv, "b", set_opts, NULL)) != -1) { switch (ret) { case 'b': if (params.perf_bias) print_wrong_arg_exit(); params.perf_bias = 1; break; - case 'm': - if (params.sched_mc) - print_wrong_arg_exit(); - params.sched_mc = 1; - break; - case 's': - if (params.sched_smt) - print_wrong_arg_exit(); - params.sched_smt = 1; - break; default: print_wrong_arg_exit(); } @@ -78,25 +64,6 @@ int cmd_info(int argc, char **argv) if (bitmask_isallclear(cpus_chosen)) bitmask_setbit(cpus_chosen, 0); - if (params.sched_mc) { - ret = sysfs_get_sched("mc"); - printf(_("System's multi core scheduler setting: ")); - if (ret < 0) - /* if sysfs file is missing it's: errno == ENOENT */ - printf(_("not supported\n")); - else - printf("%d\n", ret); - } - if (params.sched_smt) { - ret = sysfs_get_sched("smt"); - printf(_("System's thread sibling scheduler setting: ")); - if (ret < 0) - /* if sysfs file is missing it's: errno == ENOENT */ - printf(_("not supported\n")); - else - printf("%d\n", ret); - } - /* Add more per cpu options here */ if (!params.perf_bias) return ret; @@ -125,11 +92,12 @@ int cmd_info(int argc, char **argv) if (params.perf_bias) { ret = msr_intel_get_perf_bias(cpu); if (ret < 0) { - printf(_("Could not read perf-bias value\n")); - break; + fprintf(stderr, + _("Could not read perf-bias value[%d]\n"), ret); + exit(EXIT_FAILURE); } else printf(_("perf-bias: %d\n"), ret); } } - return ret; + return 0; } diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c index dc4de376211..573c75f8e3f 100644 --- a/tools/power/cpupower/utils/cpupower-set.c +++ b/tools/power/cpupower/utils/cpupower-set.c @@ -18,9 +18,7 @@ #include "helpers/bitmask.h" static struct option set_opts[] = { - { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, - { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'}, - { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'}, + { .name = "perf-bias", .has_arg = required_argument, .flag = NULL, .val = 'b'}, { }, }; @@ -38,13 +36,11 @@ int cmd_set(int argc, char **argv) union { struct { - int sched_mc:1; - int sched_smt:1; int perf_bias:1; }; int params; } params; - int sched_mc = 0, sched_smt = 0, perf_bias = 0; + int perf_bias = 0; int ret = 0; setlocale(LC_ALL, ""); @@ -52,7 +48,7 @@ int cmd_set(int argc, char **argv) params.params = 0; /* parameter parsing */ - while ((ret = getopt_long(argc, argv, "m:s:b:", + while ((ret = getopt_long(argc, argv, "b:", set_opts, NULL)) != -1) { switch (ret) { case 'b': @@ -66,28 +62,6 @@ int cmd_set(int argc, char **argv) } params.perf_bias = 1; break; - case 'm': - if (params.sched_mc) - print_wrong_arg_exit(); - sched_mc = atoi(optarg); - if (sched_mc < 0 || sched_mc > 2) { - printf(_("--sched-mc param out " - "of range [0-%d]\n"), 2); - print_wrong_arg_exit(); - } - params.sched_mc = 1; - break; - case 's': - if (params.sched_smt) - print_wrong_arg_exit(); - sched_smt = atoi(optarg); - if (sched_smt < 0 || sched_smt > 2) { - printf(_("--sched-smt param out " - "of range [0-%d]\n"), 2); - print_wrong_arg_exit(); - } - params.sched_smt = 1; - break; default: print_wrong_arg_exit(); } @@ -96,19 +70,6 @@ int cmd_set(int argc, char **argv) if (!params.params) print_wrong_arg_exit(); - if (params.sched_mc) { - ret = sysfs_set_sched("mc", sched_mc); - if (ret) - fprintf(stderr, _("Error setting sched-mc %s\n"), - (ret == -ENODEV) ? "not supported" : ""); - } - if (params.sched_smt) { - ret = sysfs_set_sched("smt", sched_smt); - if (ret) - fprintf(stderr, _("Error setting sched-smt %s\n"), - (ret == -ENODEV) ? "not supported" : ""); - } - /* Default is: set all CPUs */ if (bitmask_isallclear(cpus_chosen)) bitmask_setall(cpus_chosen); diff --git a/tools/power/cpupower/utils/cpupower.c b/tools/power/cpupower/utils/cpupower.c index 52bee591c1c..7cdcf88659c 100644 --- a/tools/power/cpupower/utils/cpupower.c +++ b/tools/power/cpupower/utils/cpupower.c @@ -12,17 +12,14 @@ #include <string.h> #include <unistd.h> #include <errno.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <sys/utsname.h> #include "builtin.h" #include "helpers/helpers.h" #include "helpers/bitmask.h" -struct cmd_struct { - const char *cmd; - int (*main)(int, const char **); - int needs_root; -}; - #define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) static int cmd_help(int argc, const char **argv); @@ -43,10 +40,17 @@ int be_verbose; static void print_help(void); +struct cmd_struct { + const char *cmd; + int (*main)(int, const char **); + int needs_root; +}; + static struct cmd_struct commands[] = { { "frequency-info", cmd_freq_info, 0 }, { "frequency-set", cmd_freq_set, 1 }, { "idle-info", cmd_idle_info, 0 }, + { "idle-set", cmd_idle_set, 1 }, { "set", cmd_set, 1 }, { "info", cmd_info, 0 }, { "monitor", cmd_monitor, 0 }, @@ -168,6 +172,8 @@ int main(int argc, const char *argv[]) { const char *cmd; unsigned int i, ret; + struct stat statbuf; + struct utsname uts; cpus_chosen = bitmask_alloc(sysconf(_SC_NPROCESSORS_CONF)); @@ -194,6 +200,15 @@ int main(int argc, const char *argv[]) get_cpu_info(0, &cpupower_cpu_info); run_as_root = !getuid(); + if (run_as_root) { + ret = uname(&uts); + if (!ret && !strcmp(uts.machine, "x86_64") && + stat("/dev/cpu/0/msr", &statbuf) != 0) { + if (system("modprobe msr") == -1) + fprintf(stderr, _("MSR access not available.\n")); + } + } + for (i = 0; i < ARRAY_SIZE(commands); i++) { struct cmd_struct *p = commands + i; diff --git a/tools/power/cpupower/utils/helpers/cpuid.c b/tools/power/cpupower/utils/helpers/cpuid.c index 906895d21cc..93b0aa74ca0 100644 --- a/tools/power/cpupower/utils/helpers/cpuid.c +++ b/tools/power/cpupower/utils/helpers/cpuid.c @@ -158,6 +158,8 @@ out: cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO; case 0x2A: /* SNB */ case 0x2D: /* SNB Xeon */ + case 0x3A: /* IVB */ + case 0x3E: /* IVB Xeon */ cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO; cpu_info->caps |= CPUPOWER_CAP_IS_SNB; break; diff --git a/tools/power/cpupower/utils/helpers/helpers.h b/tools/power/cpupower/utils/helpers/helpers.h index 2eb584cf2f5..aa9e95486a2 100644 --- a/tools/power/cpupower/utils/helpers/helpers.h +++ b/tools/power/cpupower/utils/helpers/helpers.h @@ -92,6 +92,14 @@ extern int get_cpu_info(unsigned int cpu, struct cpupower_cpu_info *cpu_info); extern struct cpupower_cpu_info cpupower_cpu_info; /* cpuid and cpuinfo helpers **************************/ +struct cpuid_core_info { + int pkg; + int core; + int cpu; + + /* flags */ + unsigned int is_online:1; +}; /* CPU topology/hierarchy parsing ******************/ struct cpupower_topology { @@ -101,18 +109,12 @@ struct cpupower_topology { unsigned int threads; /* per core */ /* Array gets mallocated with cores entries, holding per core info */ - struct { - int pkg; - int core; - int cpu; - - /* flags */ - unsigned int is_online:1; - } *core_info; + struct cpuid_core_info *core_info; }; extern int get_cpu_topology(struct cpupower_topology *cpu_top); extern void cpu_topology_release(struct cpupower_topology cpu_top); + /* CPU topology/hierarchy parsing ******************/ /* X86 ONLY ****************************************/ diff --git a/tools/power/cpupower/utils/helpers/sysfs.c b/tools/power/cpupower/utils/helpers/sysfs.c index 96e28c124b5..851c7a16ca4 100644 --- a/tools/power/cpupower/utils/helpers/sysfs.c +++ b/tools/power/cpupower/utils/helpers/sysfs.c @@ -37,25 +37,6 @@ unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen) return (unsigned int) numread; } -static unsigned int sysfs_write_file(const char *path, - const char *value, size_t len) -{ - int fd; - ssize_t numwrite; - - fd = open(path, O_WRONLY); - if (fd == -1) - return 0; - - numwrite = write(fd, value, len); - if (numwrite < 1) { - close(fd); - return 0; - } - close(fd); - return (unsigned int) numwrite; -} - /* * Detect whether a CPU is online * @@ -108,6 +89,33 @@ int sysfs_is_cpu_online(unsigned int cpu) /* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */ + +/* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */ + +/* + * helper function to check whether a file under "../cpuX/cpuidle/stateX/" dir + * exists. + * For example the functionality to disable c-states was introduced in later + * kernel versions, this function can be used to explicitly check for this + * feature. + * + * returns 1 if the file exists, 0 otherwise. + */ +unsigned int sysfs_idlestate_file_exists(unsigned int cpu, + unsigned int idlestate, + const char *fname) +{ + char path[SYSFS_PATH_MAX]; + struct stat statbuf; + + + snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s", + cpu, idlestate, fname); + if (stat(path, &statbuf) != 0) + return 0; + return 1; +} + /* * helper function to read file from /sys into given buffer * fname is a relative path under "cpuX/cpuidle/stateX/" dir @@ -140,6 +148,40 @@ unsigned int sysfs_idlestate_read_file(unsigned int cpu, unsigned int idlestate, return (unsigned int) numread; } +/* + * helper function to write a new value to a /sys file + * fname is a relative path under "../cpuX/cpuidle/cstateY/" dir + * + * Returns the number of bytes written or 0 on error + */ +static +unsigned int sysfs_idlestate_write_file(unsigned int cpu, + unsigned int idlestate, + const char *fname, + const char *value, size_t len) +{ + char path[SYSFS_PATH_MAX]; + int fd; + ssize_t numwrite; + + snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s", + cpu, idlestate, fname); + + fd = open(path, O_WRONLY); + if (fd == -1) + return 0; + + numwrite = write(fd, value, len); + if (numwrite < 1) { + close(fd); + return 0; + } + + close(fd); + + return (unsigned int) numwrite; +} + /* read access to files which contain one numeric value */ enum idlestate_value { @@ -147,6 +189,7 @@ enum idlestate_value { IDLESTATE_POWER, IDLESTATE_LATENCY, IDLESTATE_TIME, + IDLESTATE_DISABLE, MAX_IDLESTATE_VALUE_FILES }; @@ -155,6 +198,7 @@ static const char *idlestate_value_files[MAX_IDLESTATE_VALUE_FILES] = { [IDLESTATE_POWER] = "power", [IDLESTATE_LATENCY] = "latency", [IDLESTATE_TIME] = "time", + [IDLESTATE_DISABLE] = "disable", }; static unsigned long long sysfs_idlestate_get_one_value(unsigned int cpu, @@ -224,8 +268,59 @@ static char *sysfs_idlestate_get_one_string(unsigned int cpu, return result; } +/* + * Returns: + * 1 if disabled + * 0 if enabled + * -1 if idlestate is not available + * -2 if disabling is not supported by the kernel + */ +int sysfs_is_idlestate_disabled(unsigned int cpu, + unsigned int idlestate) +{ + if (sysfs_get_idlestate_count(cpu) <= idlestate) + return -1; + + if (!sysfs_idlestate_file_exists(cpu, idlestate, + idlestate_value_files[IDLESTATE_DISABLE])) + return -2; + return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_DISABLE); +} + +/* + * Pass 1 as last argument to disable or 0 to enable the state + * Returns: + * 0 on success + * negative values on error, for example: + * -1 if idlestate is not available + * -2 if disabling is not supported by the kernel + * -3 No write access to disable/enable C-states + */ +int sysfs_idlestate_disable(unsigned int cpu, + unsigned int idlestate, + unsigned int disable) +{ + char value[SYSFS_PATH_MAX]; + int bytes_written; + + if (sysfs_get_idlestate_count(cpu) <= idlestate) + return -1; + + if (!sysfs_idlestate_file_exists(cpu, idlestate, + idlestate_value_files[IDLESTATE_DISABLE])) + return -2; + + snprintf(value, SYSFS_PATH_MAX, "%u", disable); + + bytes_written = sysfs_idlestate_write_file(cpu, idlestate, "disable", + value, sizeof(disable)); + if (bytes_written) + return 0; + return -3; +} + unsigned long sysfs_get_idlestate_latency(unsigned int cpu, - unsigned int idlestate) + unsigned int idlestate) { return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_LATENCY); } @@ -257,7 +352,7 @@ char *sysfs_get_idlestate_desc(unsigned int cpu, unsigned int idlestate) * Negativ in error case * Zero if cpuidle does not export any C-states */ -int sysfs_get_idlestate_count(unsigned int cpu) +unsigned int sysfs_get_idlestate_count(unsigned int cpu) { char file[SYSFS_PATH_MAX]; struct stat statbuf; diff --git a/tools/power/cpupower/utils/helpers/sysfs.h b/tools/power/cpupower/utils/helpers/sysfs.h index 8cb797bbceb..d28f11fedbd 100644 --- a/tools/power/cpupower/utils/helpers/sysfs.h +++ b/tools/power/cpupower/utils/helpers/sysfs.h @@ -7,8 +7,16 @@ extern unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen); +extern unsigned int sysfs_idlestate_file_exists(unsigned int cpu, + unsigned int idlestate, + const char *fname); + extern int sysfs_is_cpu_online(unsigned int cpu); +extern int sysfs_is_idlestate_disabled(unsigned int cpu, + unsigned int idlestate); +extern int sysfs_idlestate_disable(unsigned int cpu, unsigned int idlestate, + unsigned int disable); extern unsigned long sysfs_get_idlestate_latency(unsigned int cpu, unsigned int idlestate); extern unsigned long sysfs_get_idlestate_usage(unsigned int cpu, @@ -19,7 +27,7 @@ extern char *sysfs_get_idlestate_name(unsigned int cpu, unsigned int idlestate); extern char *sysfs_get_idlestate_desc(unsigned int cpu, unsigned int idlestate); -extern int sysfs_get_idlestate_count(unsigned int cpu); +extern unsigned int sysfs_get_idlestate_count(unsigned int cpu); extern char *sysfs_get_cpuidle_governor(void); extern char *sysfs_get_cpuidle_driver(void); diff --git a/tools/power/cpupower/utils/helpers/topology.c b/tools/power/cpupower/utils/helpers/topology.c index 4eae2c47ba4..c13120af519 100644 --- a/tools/power/cpupower/utils/helpers/topology.c +++ b/tools/power/cpupower/utils/helpers/topology.c @@ -20,9 +20,8 @@ #include <helpers/sysfs.h> /* returns -1 on failure, 0 on success */ -int sysfs_topology_read_file(unsigned int cpu, const char *fname) +static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *result) { - unsigned long value; char linebuf[MAX_LINE_LEN]; char *endp; char path[SYSFS_PATH_MAX]; @@ -31,20 +30,12 @@ int sysfs_topology_read_file(unsigned int cpu, const char *fname) cpu, fname); if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0) return -1; - value = strtoul(linebuf, &endp, 0); + *result = strtol(linebuf, &endp, 0); if (endp == linebuf || errno == ERANGE) return -1; - return value; + return 0; } -struct cpuid_core_info { - unsigned int pkg; - unsigned int thread; - unsigned int cpu; - /* flags */ - unsigned int is_online:1; -}; - static int __compare(const void *t1, const void *t2) { struct cpuid_core_info *top1 = (struct cpuid_core_info *)t1; @@ -53,9 +44,9 @@ static int __compare(const void *t1, const void *t2) return -1; else if (top1->pkg > top2->pkg) return 1; - else if (top1->thread < top2->thread) + else if (top1->core < top2->core) return -1; - else if (top1->thread > top2->thread) + else if (top1->core > top2->core) return 1; else if (top1->cpu < top2->cpu) return -1; @@ -73,28 +64,42 @@ static int __compare(const void *t1, const void *t2) */ int get_cpu_topology(struct cpupower_topology *cpu_top) { - int cpu, cpus = sysconf(_SC_NPROCESSORS_CONF); + int cpu, last_pkg, cpus = sysconf(_SC_NPROCESSORS_CONF); - cpu_top->core_info = malloc(sizeof(struct cpupower_topology) * cpus); + cpu_top->core_info = malloc(sizeof(struct cpuid_core_info) * cpus); if (cpu_top->core_info == NULL) return -ENOMEM; cpu_top->pkgs = cpu_top->cores = 0; for (cpu = 0; cpu < cpus; cpu++) { cpu_top->core_info[cpu].cpu = cpu; cpu_top->core_info[cpu].is_online = sysfs_is_cpu_online(cpu); - cpu_top->core_info[cpu].pkg = - sysfs_topology_read_file(cpu, "physical_package_id"); - if ((int)cpu_top->core_info[cpu].pkg != -1 && - cpu_top->core_info[cpu].pkg > cpu_top->pkgs) - cpu_top->pkgs = cpu_top->core_info[cpu].pkg; - cpu_top->core_info[cpu].core = - sysfs_topology_read_file(cpu, "core_id"); + if(sysfs_topology_read_file( + cpu, + "physical_package_id", + &(cpu_top->core_info[cpu].pkg)) < 0) + return -1; + if(sysfs_topology_read_file( + cpu, + "core_id", + &(cpu_top->core_info[cpu].core)) < 0) + return -1; } - cpu_top->pkgs++; qsort(cpu_top->core_info, cpus, sizeof(struct cpuid_core_info), __compare); + /* Count the number of distinct pkgs values. This works + because the primary sort of the core_info struct was just + done by pkg value. */ + last_pkg = cpu_top->core_info[0].pkg; + for(cpu = 1; cpu < cpus; cpu++) { + if(cpu_top->core_info[cpu].pkg != last_pkg) { + last_pkg = cpu_top->core_info[cpu].pkg; + cpu_top->pkgs++; + } + } + cpu_top->pkgs++; + /* Intel's cores count is not consecutively numbered, there may * be a core_id of 3, but none of 2. Assume there always is 0 * Get amount of cores by counting duplicates in a package diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c index 0d6571e418d..c4bae9203a6 100644 --- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c +++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c @@ -39,6 +39,7 @@ static int mode; static int interval = 1; static char *show_monitors_param; static struct cpupower_topology cpu_top; +static unsigned int wake_cpus; /* ToDo: Document this in the manpage */ static char range_abbr[RANGE_MAX] = { 'T', 'C', 'P', 'M', }; @@ -84,7 +85,7 @@ int fill_string_with_spaces(char *s, int n) void print_header(int topology_depth) { int unsigned mon; - int state, need_len, pr_mon_len; + int state, need_len; cstate_t s; char buf[128] = ""; int percent_width = 4; @@ -93,7 +94,6 @@ void print_header(int topology_depth) printf("%s|", buf); for (mon = 0; mon < avail_monitors; mon++) { - pr_mon_len = 0; need_len = monitors[mon]->hw_states_num * (percent_width + 3) - 1; if (mon != 0) { @@ -315,16 +315,28 @@ int fork_it(char **argv) int do_interval_measure(int i) { unsigned int num; + int cpu; + + if (wake_cpus) + for (cpu = 0; cpu < cpu_count; cpu++) + bind_cpu(cpu); for (num = 0; num < avail_monitors; num++) { dprint("HW C-state residency monitor: %s - States: %d\n", monitors[num]->name, monitors[num]->hw_states_num); monitors[num]->start(); } + sleep(i); + + if (wake_cpus) + for (cpu = 0; cpu < cpu_count; cpu++) + bind_cpu(cpu); + for (num = 0; num < avail_monitors; num++) monitors[num]->stop(); + return 0; } @@ -333,7 +345,7 @@ static void cmdline(int argc, char *argv[]) int opt; progname = basename(argv[0]); - while ((opt = getopt(argc, argv, "+li:m:")) != -1) { + while ((opt = getopt(argc, argv, "+lci:m:")) != -1) { switch (opt) { case 'l': if (mode) @@ -352,6 +364,9 @@ static void cmdline(int argc, char *argv[]) mode = show; show_monitors_param = optarg; break; + case 'c': + wake_cpus = 1; + break; default: print_wrong_arg_exit(); } diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h index 9312ee1f2db..9e43f3371fb 100644 --- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h +++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h @@ -65,4 +65,21 @@ extern long long timespec_diff_us(struct timespec start, struct timespec end); "could be inaccurate\n"), mes, ov); \ } + +/* Taken over from x86info project sources -> return 0 on success */ +#include <sched.h> +#include <sys/types.h> +#include <unistd.h> +static inline int bind_cpu(int cpu) +{ + cpu_set_t set; + + if (sched_getaffinity(getpid(), sizeof(set), &set) == 0) { + CPU_ZERO(&set); + CPU_SET(cpu, &set); + return sched_setaffinity(getpid(), sizeof(set), &set); + } + return 1; +} + #endif /* __CPUIDLE_INFO_HW__ */ diff --git a/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c b/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c new file mode 100644 index 00000000000..ebeaba6571a --- /dev/null +++ b/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c @@ -0,0 +1,196 @@ +/* + * (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc. + * + * Licensed under the terms of the GNU GPL License version 2. + * + * Based on SandyBridge monitor. Implements the new package C-states + * (PC8, PC9, PC10) coming with a specific Haswell (family 0x45) CPU. + */ + +#if defined(__i386__) || defined(__x86_64__) + +#include <stdio.h> +#include <stdint.h> +#include <stdlib.h> +#include <string.h> + +#include "helpers/helpers.h" +#include "idle_monitor/cpupower-monitor.h" + +#define MSR_PKG_C8_RESIDENCY 0x00000630 +#define MSR_PKG_C9_RESIDENCY 0x00000631 +#define MSR_PKG_C10_RESIDENCY 0x00000632 + +#define MSR_TSC 0x10 + +enum intel_hsw_ext_id { PC8 = 0, PC9, PC10, HSW_EXT_CSTATE_COUNT, + TSC = 0xFFFF }; + +static int hsw_ext_get_count_percent(unsigned int self_id, double *percent, + unsigned int cpu); + +static cstate_t hsw_ext_cstates[HSW_EXT_CSTATE_COUNT] = { + { + .name = "PC8", + .desc = N_("Processor Package C8"), + .id = PC8, + .range = RANGE_PACKAGE, + .get_count_percent = hsw_ext_get_count_percent, + }, + { + .name = "PC9", + .desc = N_("Processor Package C9"), + .desc = N_("Processor Package C2"), + .id = PC9, + .range = RANGE_PACKAGE, + .get_count_percent = hsw_ext_get_count_percent, + }, + { + .name = "PC10", + .desc = N_("Processor Package C10"), + .id = PC10, + .range = RANGE_PACKAGE, + .get_count_percent = hsw_ext_get_count_percent, + }, +}; + +static unsigned long long tsc_at_measure_start; +static unsigned long long tsc_at_measure_end; +static unsigned long long *previous_count[HSW_EXT_CSTATE_COUNT]; +static unsigned long long *current_count[HSW_EXT_CSTATE_COUNT]; +/* valid flag for all CPUs. If a MSR read failed it will be zero */ +static int *is_valid; + +static int hsw_ext_get_count(enum intel_hsw_ext_id id, unsigned long long *val, + unsigned int cpu) +{ + int msr; + + switch (id) { + case PC8: + msr = MSR_PKG_C8_RESIDENCY; + break; + case PC9: + msr = MSR_PKG_C9_RESIDENCY; + break; + case PC10: + msr = MSR_PKG_C10_RESIDENCY; + break; + case TSC: + msr = MSR_TSC; + break; + default: + return -1; + }; + if (read_msr(cpu, msr, val)) + return -1; + return 0; +} + +static int hsw_ext_get_count_percent(unsigned int id, double *percent, + unsigned int cpu) +{ + *percent = 0.0; + + if (!is_valid[cpu]) + return -1; + + *percent = (100.0 * + (current_count[id][cpu] - previous_count[id][cpu])) / + (tsc_at_measure_end - tsc_at_measure_start); + + dprint("%s: previous: %llu - current: %llu - (%u)\n", + hsw_ext_cstates[id].name, previous_count[id][cpu], + current_count[id][cpu], cpu); + + dprint("%s: tsc_diff: %llu - count_diff: %llu - percent: %2.f (%u)\n", + hsw_ext_cstates[id].name, + (unsigned long long) tsc_at_measure_end - tsc_at_measure_start, + current_count[id][cpu] - previous_count[id][cpu], + *percent, cpu); + + return 0; +} + +static int hsw_ext_start(void) +{ + int num, cpu; + unsigned long long val; + + for (num = 0; num < HSW_EXT_CSTATE_COUNT; num++) { + for (cpu = 0; cpu < cpu_count; cpu++) { + hsw_ext_get_count(num, &val, cpu); + previous_count[num][cpu] = val; + } + } + hsw_ext_get_count(TSC, &tsc_at_measure_start, 0); + return 0; +} + +static int hsw_ext_stop(void) +{ + unsigned long long val; + int num, cpu; + + hsw_ext_get_count(TSC, &tsc_at_measure_end, 0); + + for (num = 0; num < HSW_EXT_CSTATE_COUNT; num++) { + for (cpu = 0; cpu < cpu_count; cpu++) { + is_valid[cpu] = !hsw_ext_get_count(num, &val, cpu); + current_count[num][cpu] = val; + } + } + return 0; +} + +struct cpuidle_monitor intel_hsw_ext_monitor; + +static struct cpuidle_monitor *hsw_ext_register(void) +{ + int num; + + if (cpupower_cpu_info.vendor != X86_VENDOR_INTEL + || cpupower_cpu_info.family != 6) + return NULL; + + switch (cpupower_cpu_info.model) { + case 0x45: /* HSW */ + break; + default: + return NULL; + } + + is_valid = calloc(cpu_count, sizeof(int)); + for (num = 0; num < HSW_EXT_CSTATE_COUNT; num++) { + previous_count[num] = calloc(cpu_count, + sizeof(unsigned long long)); + current_count[num] = calloc(cpu_count, + sizeof(unsigned long long)); + } + intel_hsw_ext_monitor.name_len = strlen(intel_hsw_ext_monitor.name); + return &intel_hsw_ext_monitor; +} + +void hsw_ext_unregister(void) +{ + int num; + free(is_valid); + for (num = 0; num < HSW_EXT_CSTATE_COUNT; num++) { + free(previous_count[num]); + free(current_count[num]); + } +} + +struct cpuidle_monitor intel_hsw_ext_monitor = { + .name = "HaswellExtended", + .hw_states = hsw_ext_cstates, + .hw_states_num = HSW_EXT_CSTATE_COUNT, + .start = hsw_ext_start, + .stop = hsw_ext_stop, + .do_register = hsw_ext_register, + .unregister = hsw_ext_unregister, + .needs_root = 1, + .overflow_s = 922000000 /* 922337203 seconds TSC overflow + at 20GHz */ +}; +#endif /* defined(__i386__) || defined(__x86_64__) */ diff --git a/tools/power/cpupower/utils/idle_monitor/idle_monitors.def b/tools/power/cpupower/utils/idle_monitor/idle_monitors.def index e3f8d9b2b18..0d6ba4dbb9c 100644 --- a/tools/power/cpupower/utils/idle_monitor/idle_monitors.def +++ b/tools/power/cpupower/utils/idle_monitor/idle_monitors.def @@ -2,6 +2,7 @@ DEF(amd_fam14h) DEF(intel_nhm) DEF(intel_snb) +DEF(intel_hsw_ext) DEF(mperf) #endif DEF(cpuidle_sysfs) diff --git a/tools/power/cpupower/utils/idle_monitor/snb_idle.c b/tools/power/cpupower/utils/idle_monitor/snb_idle.c index a1bc07cd53e..efc8a69c9ab 100644 --- a/tools/power/cpupower/utils/idle_monitor/snb_idle.c +++ b/tools/power/cpupower/utils/idle_monitor/snb_idle.c @@ -150,9 +150,19 @@ static struct cpuidle_monitor *snb_register(void) || cpupower_cpu_info.family != 6) return NULL; - if (cpupower_cpu_info.model != 0x2A - && cpupower_cpu_info.model != 0x2D) + switch (cpupower_cpu_info.model) { + case 0x2A: /* SNB */ + case 0x2D: /* SNB Xeon */ + case 0x3A: /* IVB */ + case 0x3E: /* IVB Xeon */ + case 0x3C: /* HSW */ + case 0x3F: /* HSW */ + case 0x45: /* HSW */ + case 0x46: /* HSW */ + break; + default: return NULL; + } is_valid = calloc(cpu_count, sizeof(int)); for (num = 0; num < SNB_CSTATE_COUNT; num++) { diff --git a/tools/power/x86/turbostat/.gitignore b/tools/power/x86/turbostat/.gitignore new file mode 100644 index 00000000000..7521370d356 --- /dev/null +++ b/tools/power/x86/turbostat/.gitignore @@ -0,0 +1 @@ +turbostat diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile index f8564955419..d1b3a361e52 100644 --- a/tools/power/x86/turbostat/Makefile +++ b/tools/power/x86/turbostat/Makefile @@ -1,9 +1,22 @@ +CC = $(CROSS_COMPILE)gcc +BUILD_OUTPUT := $(PWD) +PREFIX := /usr +DESTDIR := + turbostat : turbostat.c CFLAGS += -Wall +CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"' + +%: %.c + @mkdir -p $(BUILD_OUTPUT) + $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@ +.PHONY : clean clean : - rm -f turbostat + @rm -f $(BUILD_OUTPUT)/turbostat -install : - install turbostat /usr/bin/turbostat - install turbostat.8 /usr/share/man/man8 +install : turbostat + install -d $(DESTDIR)$(PREFIX)/bin + install $(BUILD_OUTPUT)/turbostat $(DESTDIR)$(PREFIX)/bin/turbostat + install -d $(DESTDIR)$(PREFIX)/share/man/man8 + install turbostat.8 $(DESTDIR)$(PREFIX)/share/man/man8 diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8 index e4d0690cccf..56bfb523c5b 100644 --- a/tools/power/x86/turbostat/turbostat.8 +++ b/tools/power/x86/turbostat/turbostat.8 @@ -11,16 +11,16 @@ turbostat \- Report processor frequency and idle statistics .RB [ Options ] .RB [ "\-i interval_sec" ] .SH DESCRIPTION -\fBturbostat \fP reports processor topology, frequency -and idle power state statistics on modern X86 processors. +\fBturbostat \fP reports processor topology, frequency, +idle power-state statistics, temperature and power on modern X86 processors. Either \fBcommand\fP is forked and statistics are printed upon its completion, or statistics are printed periodically. \fBturbostat \fP -requires that the processor +must be run on root, and +minimally requires that the processor supports an "invariant" TSC, plus the APERF and MPERF MSRs. -\fBturbostat \fP will report idle cpu power state residency -on processors that additionally support C-state residency counters. +Additional information is reported depending on hardware counter support. .SS Options The \fB-p\fP option limits output to the 1st thread in 1st core of each package. @@ -31,8 +31,6 @@ The \fB-S\fP option limits output to a 1-line System Summary for each interval. .PP The \fB-v\fP option increases verbosity. .PP -The \fB-s\fP option prints the SMI counter, equivalent to "-c 0x34" -.PP The \fB-c MSR#\fP option includes the delta of the specified 32-bit MSR counter. .PP The \fB-C MSR#\fP option includes the delta of the specified 64-bit MSR counter. @@ -49,15 +47,24 @@ displays the statistics gathered since it was forked. .PP .SH FIELD DESCRIPTIONS .nf -\fBpk\fP processor package number. -\fBcor\fP processor core number. +\fBPackage\fP processor package number. +\fBCore\fP processor core number. \fBCPU\fP Linux CPU (logical processor) number. Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading Technology. -\fB%c0\fP percent of the interval that the CPU retired instructions. -\fBGHz\fP average clock rate while the CPU was in c0 state. -\fBTSC\fP average GHz that the TSC ran during the entire interval. -\fB%c1, %c3, %c6, %c7\fP show the percentage residency in hardware core idle states. -\fB%pc2, %pc3, %pc6, %pc7\fP percentage residency in hardware package idle states. +\fBAVG_MHz\fP number of cycles executed divided by time elapsed. +\fB%Buzy\fP percent of the interval that the CPU retired instructions, aka. % of time in "C0" state. +\fBBzy_MHz\fP average clock rate while the CPU was busy (in "c0" state). +\fBTSC_MHz\fP average MHz that the TSC ran during the entire interval. +\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. +\fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor. +\fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor. +\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. +\fBPkgWatt\fP Watts consumed by the whole package. +\fBCorWatt\fP Watts consumed by the core part of the package. +\fBGFXWatt\fP Watts consumed by the Graphics part of the package -- available only on client processors. +\fBRAMWatt\fP Watts consumed by the DRAM DIMMS -- available only on server processors. +\fBPKG_%\fP percent of the interval that RAPL throttling was active on the Package. +\fBRAM_%\fP percent of the interval that RAPL throttling was active on DRAM. .fi .PP .SH EXAMPLE @@ -66,50 +73,61 @@ Without any parameters, turbostat prints out counters ever 5 seconds. for turbostat to fork). The first row of statistics is a summary for the entire system. -Note that the summary is a weighted average. +For residency % columns, the summary is a weighted average. +For Temperature columns, the summary is the column maximum. +For Watts columns, the summary is a system total. Subsequent rows show per-CPU statistics. .nf -[root@x980]# ./turbostat -cor CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6 - 0.09 1.62 3.38 1.83 0.32 97.76 1.26 83.61 - 0 0 0.15 1.62 3.38 10.23 0.05 89.56 1.26 83.61 - 0 6 0.05 1.62 3.38 10.34 - 1 2 0.03 1.62 3.38 0.07 0.05 99.86 - 1 8 0.03 1.62 3.38 0.06 - 2 4 0.21 1.62 3.38 0.10 1.49 98.21 - 2 10 0.02 1.62 3.38 0.29 - 8 1 0.04 1.62 3.38 0.04 0.08 99.84 - 8 7 0.01 1.62 3.38 0.06 - 9 3 0.53 1.62 3.38 0.10 0.20 99.17 - 9 9 0.02 1.62 3.38 0.60 - 10 5 0.01 1.62 3.38 0.02 0.04 99.92 - 10 11 0.02 1.62 3.38 0.02 -.fi -.SH SUMMARY EXAMPLE -The "-s" option prints the column headers just once, -and then the one line system summary for each sample interval. - -.nf -[root@x980]# ./turbostat -s - %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6 - 0.23 1.67 3.38 2.00 0.30 97.47 1.07 82.12 - 0.10 1.62 3.38 1.87 2.25 95.77 12.02 72.60 - 0.20 1.64 3.38 1.98 0.11 97.72 0.30 83.36 - 0.11 1.70 3.38 1.86 1.81 96.22 9.71 74.90 +[root@ivy]# ./turbostat + Core CPU Avg_MHz %Busy Bzy_MHz TSC_MHz SMI CPU%c1 CPU%c3 CPU%c6 CPU%c7 CoreTmp PkgTmp Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt + - - 6 0.36 1596 3492 0 0.59 0.01 99.04 0.00 23 24 23.82 0.01 72.47 0.00 6.40 1.01 0.00 + 0 0 9 0.58 1596 3492 0 0.28 0.01 99.13 0.00 23 24 23.82 0.01 72.47 0.00 6.40 1.01 0.00 + 0 4 1 0.07 1596 3492 0 0.79 + 1 1 10 0.65 1596 3492 0 0.59 0.00 98.76 0.00 23 + 1 5 5 0.28 1596 3492 0 0.95 + 2 2 10 0.66 1596 3492 0 0.41 0.01 98.92 0.00 23 + 2 6 2 0.10 1597 3492 0 0.97 + 3 3 3 0.20 1596 3492 0 0.44 0.00 99.37 0.00 23 + 3 7 5 0.31 1596 3492 0 0.33 .fi .SH VERBOSE EXAMPLE The "-v" option adds verbosity to the output: .nf -GenuineIntel 11 CPUID levels; family:model:stepping 0x6:2c:2 (6:44:2) -12 * 133 = 1600 MHz max efficiency -25 * 133 = 3333 MHz TSC frequency -26 * 133 = 3467 MHz max turbo 4 active cores -26 * 133 = 3467 MHz max turbo 3 active cores -27 * 133 = 3600 MHz max turbo 2 active cores -27 * 133 = 3600 MHz max turbo 1 active cores - +[root@ivy]# turbostat -v +turbostat v3.0 November 23, 2012 - Len Brown <lenb@kernel.org> +CPUID(0): GenuineIntel 13 CPUID levels; family:model:stepping 0x6:3a:9 (6:58:9) +CPUID(6): APERF, DTS, PTM, EPB +RAPL: 851 sec. Joule Counter Range +cpu0: MSR_NHM_PLATFORM_INFO: 0x81010f0012300 +16 * 100 = 1600 MHz max efficiency +35 * 100 = 3500 MHz TSC frequency +cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x1e008402 (UNdemote-C3, UNdemote-C1, demote-C3, demote-C1, locked: pkg-cstate-limit=2: pc6-noret) +cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x25262727 +37 * 100 = 3700 MHz max turbo 4 active cores +38 * 100 = 3800 MHz max turbo 3 active cores +39 * 100 = 3900 MHz max turbo 2 active cores +39 * 100 = 3900 MHz max turbo 1 active cores +cpu0: MSR_IA32_ENERGY_PERF_BIAS: 0x00000006 (balanced) +cpu0: MSR_RAPL_POWER_UNIT: 0x000a1003 (0.125000 Watts, 0.000015 Joules, 0.000977 sec.) +cpu0: MSR_PKG_POWER_INFO: 0x01e00268 (77 W TDP, RAPL 60 - 0 W, 0.000000 sec.) +cpu0: MSR_PKG_POWER_LIMIT: 0x830000148268 (UNlocked) +cpu0: PKG Limit #1: ENabled (77.000000 Watts, 1.000000 sec, clamp DISabled) +cpu0: PKG Limit #2: ENabled (96.000000 Watts, 0.000977* sec, clamp DISabled) +cpu0: MSR_PP0_POLICY: 0 +cpu0: MSR_PP0_POWER_LIMIT: 0x00000000 (UNlocked) +cpu0: Cores Limit: DISabled (0.000000 Watts, 0.000977 sec, clamp DISabled) +cpu0: MSR_PP1_POLICY: 0 +cpu0: MSR_PP1_POWER_LIMIT: 0x00000000 (UNlocked) +cpu0: GFX Limit: DISabled (0.000000 Watts, 0.000977 sec, clamp DISabled) +cpu0: MSR_IA32_TEMPERATURE_TARGET: 0x00691400 (105 C) +cpu0: MSR_IA32_PACKAGE_THERM_STATUS: 0x884e0000 (27 C) +cpu0: MSR_IA32_THERM_STATUS: 0x88560000 (19 C +/- 1) +cpu1: MSR_IA32_THERM_STATUS: 0x88560000 (19 C +/- 1) +cpu2: MSR_IA32_THERM_STATUS: 0x88540000 (21 C +/- 1) +cpu3: MSR_IA32_THERM_STATUS: 0x884e0000 (27 C +/- 1) + ... .fi The \fBmax efficiency\fP frequency, a.k.a. Low Frequency Mode, is the frequency available at the minimum package voltage. The \fBTSC frequency\fP is the nominal @@ -125,57 +143,35 @@ eg. Here a cycle soaker is run on 1 CPU (see %c0) for a few seconds until ^C while the other CPUs are mostly idle: .nf -[root@x980 lenb]# ./turbostat cat /dev/zero > /dev/null +root@ivy: turbostat cat /dev/zero > /dev/null ^C -cor CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6 - 8.86 3.61 3.38 15.06 31.19 44.89 0.00 0.00 - 0 0 1.46 3.22 3.38 16.84 29.48 52.22 0.00 0.00 - 0 6 0.21 3.06 3.38 18.09 - 1 2 0.53 3.33 3.38 2.80 46.40 50.27 - 1 8 0.89 3.47 3.38 2.44 - 2 4 1.36 3.43 3.38 9.04 23.71 65.89 - 2 10 0.18 2.86 3.38 10.22 - 8 1 0.04 2.87 3.38 99.96 0.01 0.00 - 8 7 99.72 3.63 3.38 0.27 - 9 3 0.31 3.21 3.38 7.64 56.55 35.50 - 9 9 0.08 2.95 3.38 7.88 - 10 5 1.42 3.43 3.38 2.14 30.99 65.44 - 10 11 0.16 2.88 3.38 3.40 + Core CPU Avg_MHz %Busy Bzy_MHz TSC_MHz SMI CPU%c1 CPU%c3 CPU%c6 CPU%c7 CoreTmp PkgTmp Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt + - - 496 12.75 3886 3492 0 13.16 0.04 74.04 0.00 36 36 0.00 0.00 0.00 0.00 23.15 17.65 0.00 + 0 0 22 0.57 3830 3492 0 0.83 0.02 98.59 0.00 27 36 0.00 0.00 0.00 0.00 23.15 17.65 0.00 + 0 4 9 0.24 3829 3492 0 1.15 + 1 1 4 0.09 3783 3492 0 99.91 0.00 0.00 0.00 36 + 1 5 3880 99.82 3888 3492 0 0.18 + 2 2 17 0.44 3813 3492 0 0.77 0.04 98.75 0.00 28 + 2 6 12 0.32 3823 3492 0 0.89 + 3 3 16 0.43 3844 3492 0 0.63 0.11 98.84 0.00 30 + 3 7 4 0.11 3827 3492 0 0.94 +30.372243 sec + .fi -Above the cycle soaker drives cpu7 up its 3.6 Ghz turbo limit +Above the cycle soaker drives cpu5 up its 3.8 GHz turbo limit while the other processors are generally in various states of idle. -Note that cpu1 and cpu7 are HT siblings within core8. -As cpu7 is very busy, it prevents its sibling, cpu1, +Note that cpu1 and cpu5 are HT siblings within core1. +As cpu5 is very busy, it prevents its sibling, cpu1, from entering a c-state deeper than c1. -Note that turbostat reports average GHz of 3.63, while -the arithmetic average of the GHz column above is lower. -This is a weighted average, where the weight is %c0. ie. it is the total number of -un-halted cycles elapsed per time divided by the number of CPUs. -.SH SMI COUNTING EXAMPLE -On Intel Nehalem and newer processors, MSR 0x34 is a System Management Mode Interrupt (SMI) counter. -Using the -m option, you can display how many SMIs have fired since reset, or if there -are SMIs during the measurement interval, you can display the delta using the -d option. -.nf -[root@x980 ~]# turbostat -m 0x34 -cor CPU %c0 GHz TSC MSR 0x034 %c1 %c3 %c6 %pc3 %pc6 - 1.41 1.82 3.38 0x00000000 8.92 37.82 51.85 17.37 0.55 - 0 0 3.73 2.03 3.38 0x00000055 1.72 48.25 46.31 17.38 0.55 - 0 6 0.14 1.63 3.38 0x00000056 5.30 - 1 2 2.51 1.80 3.38 0x00000056 15.65 29.33 52.52 - 1 8 0.10 1.65 3.38 0x00000056 18.05 - 2 4 1.16 1.68 3.38 0x00000056 5.87 24.47 68.50 - 2 10 0.10 1.63 3.38 0x00000056 6.93 - 8 1 3.84 1.91 3.38 0x00000056 1.36 50.65 44.16 - 8 7 0.08 1.64 3.38 0x00000056 5.12 - 9 3 1.82 1.73 3.38 0x00000056 7.59 24.21 66.38 - 9 9 0.09 1.68 3.38 0x00000056 9.32 - 10 5 1.66 1.65 3.38 0x00000056 15.10 50.00 33.23 - 10 11 1.72 1.65 3.38 0x00000056 15.05 -^C -[root@x980 ~]# -.fi +Note that the Avg_MHz column reflects the total number of cycles executed +divided by the measurement interval. If the %Busy column is 100%, +then the processor was running at that speed the entire interval. +The Avg_MHz multiplied by the %Busy results in the Bzy_MHz -- +which is the average frequency while the processor was executing -- +not including any non-busy idle time. + .SH NOTES .B "turbostat " diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 2655ae9a3ad..d0396af99fa 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -2,7 +2,7 @@ * turbostat -- show CPU frequency and C-state residency * on modern Intel turbo-capable processors. * - * Copyright (c) 2012 Intel Corporation. + * Copyright (c) 2013 Intel Corporation. * Len Brown <len.brown@intel.com> * * This program is free software; you can redistribute it and/or modify it @@ -20,7 +20,10 @@ */ #define _GNU_SOURCE +#include MSRHEADER +#include <stdarg.h> #include <stdio.h> +#include <err.h> #include <unistd.h> #include <sys/types.h> #include <sys/wait.h> @@ -34,30 +37,26 @@ #include <string.h> #include <ctype.h> #include <sched.h> - -#define MSR_NEHALEM_PLATFORM_INFO 0xCE -#define MSR_NEHALEM_TURBO_RATIO_LIMIT 0x1AD -#define MSR_IVT_TURBO_RATIO_LIMIT 0x1AE -#define MSR_APERF 0xE8 -#define MSR_MPERF 0xE7 -#define MSR_PKG_C2_RESIDENCY 0x60D /* SNB only */ -#define MSR_PKG_C3_RESIDENCY 0x3F8 -#define MSR_PKG_C6_RESIDENCY 0x3F9 -#define MSR_PKG_C7_RESIDENCY 0x3FA /* SNB only */ -#define MSR_CORE_C3_RESIDENCY 0x3FC -#define MSR_CORE_C6_RESIDENCY 0x3FD -#define MSR_CORE_C7_RESIDENCY 0x3FE /* SNB only */ +#include <cpuid.h> char *proc_stat = "/proc/stat"; unsigned int interval_sec = 5; /* set with -i interval_sec */ unsigned int verbose; /* set with -v */ -unsigned int summary_only; /* set with -s */ +unsigned int rapl_verbose; /* set with -R */ +unsigned int rapl_joules; /* set with -J */ +unsigned int thermal_verbose; /* set with -T */ +unsigned int summary_only; /* set with -S */ +unsigned int dump_only; /* set with -s */ unsigned int skip_c0; unsigned int skip_c1; unsigned int do_nhm_cstates; unsigned int do_snb_cstates; +unsigned int do_c8_c9_c10; +unsigned int do_slm_cstates; +unsigned int use_c1_residency_msr; unsigned int has_aperf; -unsigned int units = 1000000000; /* Ghz etc */ +unsigned int has_epb; +unsigned int units = 1000000; /* MHz etc */ unsigned int genuine_intel; unsigned int has_invariant_tsc; unsigned int do_nehalem_platform_info; @@ -67,6 +66,7 @@ unsigned int extra_msr_offset32; unsigned int extra_msr_offset64; unsigned int extra_delta_offset32; unsigned int extra_delta_offset64; +int do_smi; double bclk; unsigned int show_pkg; unsigned int show_core; @@ -74,6 +74,43 @@ unsigned int show_cpu; unsigned int show_pkg_only; unsigned int show_core_only; char *output_buffer, *outp; +unsigned int do_rapl; +unsigned int do_dts; +unsigned int do_ptm; +unsigned int tcc_activation_temp; +unsigned int tcc_activation_temp_override; +double rapl_power_units, rapl_energy_units, rapl_time_units; +double rapl_joule_counter_range; + +#define RAPL_PKG (1 << 0) + /* 0x610 MSR_PKG_POWER_LIMIT */ + /* 0x611 MSR_PKG_ENERGY_STATUS */ +#define RAPL_PKG_PERF_STATUS (1 << 1) + /* 0x613 MSR_PKG_PERF_STATUS */ +#define RAPL_PKG_POWER_INFO (1 << 2) + /* 0x614 MSR_PKG_POWER_INFO */ + +#define RAPL_DRAM (1 << 3) + /* 0x618 MSR_DRAM_POWER_LIMIT */ + /* 0x619 MSR_DRAM_ENERGY_STATUS */ + /* 0x61c MSR_DRAM_POWER_INFO */ +#define RAPL_DRAM_PERF_STATUS (1 << 4) + /* 0x61b MSR_DRAM_PERF_STATUS */ + +#define RAPL_CORES (1 << 5) + /* 0x638 MSR_PP0_POWER_LIMIT */ + /* 0x639 MSR_PP0_ENERGY_STATUS */ +#define RAPL_CORE_POLICY (1 << 6) + /* 0x63a MSR_PP0_POLICY */ + + +#define RAPL_GFX (1 << 7) + /* 0x640 MSR_PP1_POWER_LIMIT */ + /* 0x641 MSR_PP1_ENERGY_STATUS */ + /* 0x642 MSR_PP1_POLICY */ +#define TJMAX_DEFAULT 100 + +#define MAX(a, b) ((a) > (b) ? (a) : (b)) int aperf_mperf_unstable; int backwards_count; @@ -86,11 +123,12 @@ struct thread_data { unsigned long long tsc; unsigned long long aperf; unsigned long long mperf; - unsigned long long c1; /* derived */ + unsigned long long c1; unsigned long long extra_msr64; unsigned long long extra_delta64; unsigned long long extra_msr32; unsigned long long extra_delta32; + unsigned int smi_count; unsigned int cpu_id; unsigned int flags; #define CPU_IS_FIRST_THREAD_IN_CORE 0x2 @@ -101,6 +139,7 @@ struct core_data { unsigned long long c3; unsigned long long c6; unsigned long long c7; + unsigned int core_temp_c; unsigned int core_id; } *core_even, *core_odd; @@ -109,7 +148,18 @@ struct pkg_data { unsigned long long pc3; unsigned long long pc6; unsigned long long pc7; + unsigned long long pc8; + unsigned long long pc9; + unsigned long long pc10; unsigned int package_id; + unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */ + unsigned int energy_dram; /* MSR_DRAM_ENERGY_STATUS */ + unsigned int energy_cores; /* MSR_PP0_ENERGY_STATUS */ + unsigned int energy_gfx; /* MSR_PP1_ENERGY_STATUS */ + unsigned int rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */ + unsigned int rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */ + unsigned int pkg_temp_c; + } *package_even, *package_odd; #define ODD_COUNTERS thread_odd, core_odd, package_odd @@ -206,108 +256,171 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr) retval = pread(fd, msr, sizeof *msr, offset); close(fd); - if (retval != sizeof *msr) + if (retval != sizeof *msr) { + fprintf(stderr, "%s offset 0x%llx read failed\n", pathname, (unsigned long long)offset); return -1; + } return 0; } +/* + * Example Format w/ field column widths: + * + * Package Core CPU Avg_MHz Bzy_MHz TSC_MHz SMI %Busy CPU_%c1 CPU_%c3 CPU_%c6 CPU_%c7 CoreTmp PkgTmp Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt + * 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 + */ + void print_header(void) { if (show_pkg) - outp += sprintf(outp, "pk"); - if (show_pkg) - outp += sprintf(outp, " "); + outp += sprintf(outp, "Package "); if (show_core) - outp += sprintf(outp, "cor"); + outp += sprintf(outp, " Core "); if (show_cpu) - outp += sprintf(outp, " CPU"); - if (show_pkg || show_core || show_cpu) - outp += sprintf(outp, " "); + outp += sprintf(outp, " CPU "); + if (has_aperf) + outp += sprintf(outp, "Avg_MHz "); if (do_nhm_cstates) - outp += sprintf(outp, " %%c0"); + outp += sprintf(outp, " %%Busy "); if (has_aperf) - outp += sprintf(outp, " GHz"); - outp += sprintf(outp, " TSC"); + outp += sprintf(outp, "Bzy_MHz "); + outp += sprintf(outp, "TSC_MHz "); + if (do_smi) + outp += sprintf(outp, " SMI "); if (extra_delta_offset32) - outp += sprintf(outp, " count 0x%03X", extra_delta_offset32); + outp += sprintf(outp, " count 0x%03X ", extra_delta_offset32); if (extra_delta_offset64) - outp += sprintf(outp, " COUNT 0x%03X", extra_delta_offset64); + outp += sprintf(outp, " COUNT 0x%03X ", extra_delta_offset64); if (extra_msr_offset32) - outp += sprintf(outp, " MSR 0x%03X", extra_msr_offset32); + outp += sprintf(outp, " MSR 0x%03X ", extra_msr_offset32); if (extra_msr_offset64) - outp += sprintf(outp, " MSR 0x%03X", extra_msr_offset64); + outp += sprintf(outp, " MSR 0x%03X ", extra_msr_offset64); if (do_nhm_cstates) - outp += sprintf(outp, " %%c1"); + outp += sprintf(outp, " CPU%%c1 "); + if (do_nhm_cstates && !do_slm_cstates) + outp += sprintf(outp, " CPU%%c3 "); if (do_nhm_cstates) - outp += sprintf(outp, " %%c3"); - if (do_nhm_cstates) - outp += sprintf(outp, " %%c6"); + outp += sprintf(outp, " CPU%%c6 "); if (do_snb_cstates) - outp += sprintf(outp, " %%c7"); + outp += sprintf(outp, " CPU%%c7 "); + + if (do_dts) + outp += sprintf(outp, "CoreTmp "); + if (do_ptm) + outp += sprintf(outp, " PkgTmp "); + if (do_snb_cstates) - outp += sprintf(outp, " %%pc2"); - if (do_nhm_cstates) - outp += sprintf(outp, " %%pc3"); - if (do_nhm_cstates) - outp += sprintf(outp, " %%pc6"); + outp += sprintf(outp, "Pkg%%pc2 "); + if (do_nhm_cstates && !do_slm_cstates) + outp += sprintf(outp, "Pkg%%pc3 "); + if (do_nhm_cstates && !do_slm_cstates) + outp += sprintf(outp, "Pkg%%pc6 "); if (do_snb_cstates) - outp += sprintf(outp, " %%pc7"); + outp += sprintf(outp, "Pkg%%pc7 "); + if (do_c8_c9_c10) { + outp += sprintf(outp, "Pkg%%pc8 "); + outp += sprintf(outp, "Pkg%%pc9 "); + outp += sprintf(outp, "Pk%%pc10 "); + } + if (do_rapl && !rapl_joules) { + if (do_rapl & RAPL_PKG) + outp += sprintf(outp, "PkgWatt "); + if (do_rapl & RAPL_CORES) + outp += sprintf(outp, "CorWatt "); + if (do_rapl & RAPL_GFX) + outp += sprintf(outp, "GFXWatt "); + if (do_rapl & RAPL_DRAM) + outp += sprintf(outp, "RAMWatt "); + if (do_rapl & RAPL_PKG_PERF_STATUS) + outp += sprintf(outp, " PKG_%% "); + if (do_rapl & RAPL_DRAM_PERF_STATUS) + outp += sprintf(outp, " RAM_%% "); + } else { + if (do_rapl & RAPL_PKG) + outp += sprintf(outp, " Pkg_J "); + if (do_rapl & RAPL_CORES) + outp += sprintf(outp, " Cor_J "); + if (do_rapl & RAPL_GFX) + outp += sprintf(outp, " GFX_J "); + if (do_rapl & RAPL_DRAM) + outp += sprintf(outp, " RAM_W "); + if (do_rapl & RAPL_PKG_PERF_STATUS) + outp += sprintf(outp, " PKG_%% "); + if (do_rapl & RAPL_DRAM_PERF_STATUS) + outp += sprintf(outp, " RAM_%% "); + outp += sprintf(outp, " time "); + + } outp += sprintf(outp, "\n"); } int dump_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { - fprintf(stderr, "t %p, c %p, p %p\n", t, c, p); + outp += sprintf(outp, "t %p, c %p, p %p\n", t, c, p); if (t) { - fprintf(stderr, "CPU: %d flags 0x%x\n", t->cpu_id, t->flags); - fprintf(stderr, "TSC: %016llX\n", t->tsc); - fprintf(stderr, "aperf: %016llX\n", t->aperf); - fprintf(stderr, "mperf: %016llX\n", t->mperf); - fprintf(stderr, "c1: %016llX\n", t->c1); - fprintf(stderr, "msr0x%x: %08llX\n", + outp += sprintf(outp, "CPU: %d flags 0x%x\n", + t->cpu_id, t->flags); + outp += sprintf(outp, "TSC: %016llX\n", t->tsc); + outp += sprintf(outp, "aperf: %016llX\n", t->aperf); + outp += sprintf(outp, "mperf: %016llX\n", t->mperf); + outp += sprintf(outp, "c1: %016llX\n", t->c1); + outp += sprintf(outp, "msr0x%x: %08llX\n", extra_delta_offset32, t->extra_delta32); - fprintf(stderr, "msr0x%x: %016llX\n", + outp += sprintf(outp, "msr0x%x: %016llX\n", extra_delta_offset64, t->extra_delta64); - fprintf(stderr, "msr0x%x: %08llX\n", + outp += sprintf(outp, "msr0x%x: %08llX\n", extra_msr_offset32, t->extra_msr32); - fprintf(stderr, "msr0x%x: %016llX\n", + outp += sprintf(outp, "msr0x%x: %016llX\n", extra_msr_offset64, t->extra_msr64); + if (do_smi) + outp += sprintf(outp, "SMI: %08X\n", t->smi_count); } if (c) { - fprintf(stderr, "core: %d\n", c->core_id); - fprintf(stderr, "c3: %016llX\n", c->c3); - fprintf(stderr, "c6: %016llX\n", c->c6); - fprintf(stderr, "c7: %016llX\n", c->c7); + outp += sprintf(outp, "core: %d\n", c->core_id); + outp += sprintf(outp, "c3: %016llX\n", c->c3); + outp += sprintf(outp, "c6: %016llX\n", c->c6); + outp += sprintf(outp, "c7: %016llX\n", c->c7); + outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c); } if (p) { - fprintf(stderr, "package: %d\n", p->package_id); - fprintf(stderr, "pc2: %016llX\n", p->pc2); - fprintf(stderr, "pc3: %016llX\n", p->pc3); - fprintf(stderr, "pc6: %016llX\n", p->pc6); - fprintf(stderr, "pc7: %016llX\n", p->pc7); + outp += sprintf(outp, "package: %d\n", p->package_id); + outp += sprintf(outp, "pc2: %016llX\n", p->pc2); + outp += sprintf(outp, "pc3: %016llX\n", p->pc3); + outp += sprintf(outp, "pc6: %016llX\n", p->pc6); + outp += sprintf(outp, "pc7: %016llX\n", p->pc7); + outp += sprintf(outp, "pc8: %016llX\n", p->pc8); + outp += sprintf(outp, "pc9: %016llX\n", p->pc9); + outp += sprintf(outp, "pc10: %016llX\n", p->pc10); + outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg); + outp += sprintf(outp, "Joules COR: %0X\n", p->energy_cores); + outp += sprintf(outp, "Joules GFX: %0X\n", p->energy_gfx); + outp += sprintf(outp, "Joules RAM: %0X\n", p->energy_dram); + outp += sprintf(outp, "Throttle PKG: %0X\n", + p->rapl_pkg_perf_status); + outp += sprintf(outp, "Throttle RAM: %0X\n", + p->rapl_dram_perf_status); + outp += sprintf(outp, "PTM: %dC\n", p->pkg_temp_c); } + + outp += sprintf(outp, "\n"); + return 0; } /* * column formatting convention & formats - * package: "pk" 2 columns %2d - * core: "cor" 3 columns %3d - * CPU: "CPU" 3 columns %3d - * GHz: "GHz" 3 columns %3.2 - * TSC: "TSC" 3 columns %3.2 - * percentage " %pc3" %6.2 */ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { double interval_float; + char *fmt8; /* if showing only 1st thread in core and this isn't one, bail out */ if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) @@ -322,62 +435,52 @@ int format_counters(struct thread_data *t, struct core_data *c, /* topo columns, print blanks on 1st (average) line */ if (t == &average.threads) { if (show_pkg) - outp += sprintf(outp, " "); - if (show_pkg && show_core) - outp += sprintf(outp, " "); + outp += sprintf(outp, " -"); if (show_core) - outp += sprintf(outp, " "); + outp += sprintf(outp, " -"); if (show_cpu) - outp += sprintf(outp, " " " "); + outp += sprintf(outp, " -"); } else { if (show_pkg) { if (p) - outp += sprintf(outp, "%2d", p->package_id); + outp += sprintf(outp, "%8d", p->package_id); else - outp += sprintf(outp, " "); + outp += sprintf(outp, " -"); } - if (show_pkg && show_core) - outp += sprintf(outp, " "); if (show_core) { if (c) - outp += sprintf(outp, "%3d", c->core_id); + outp += sprintf(outp, "%8d", c->core_id); else - outp += sprintf(outp, " "); + outp += sprintf(outp, " -"); } if (show_cpu) - outp += sprintf(outp, " %3d", t->cpu_id); + outp += sprintf(outp, "%8d", t->cpu_id); } + /* AvgMHz */ + if (has_aperf) + outp += sprintf(outp, "%8.0f", + 1.0 / units * t->aperf / interval_float); + /* %c0 */ if (do_nhm_cstates) { - if (show_pkg || show_core || show_cpu) - outp += sprintf(outp, " "); if (!skip_c0) - outp += sprintf(outp, "%6.2f", 100.0 * t->mperf/t->tsc); + outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc); else - outp += sprintf(outp, " ****"); + outp += sprintf(outp, "********"); } - /* GHz */ - if (has_aperf) { - if (!aperf_mperf_unstable) { - outp += sprintf(outp, " %3.2f", - 1.0 * t->tsc / units * t->aperf / - t->mperf / interval_float); - } else { - if (t->aperf > t->tsc || t->mperf > t->tsc) { - outp += sprintf(outp, " ***"); - } else { - outp += sprintf(outp, "%3.1f*", - 1.0 * t->tsc / - units * t->aperf / - t->mperf / interval_float); - } - } - } + /* BzyMHz */ + if (has_aperf) + outp += sprintf(outp, "%8.0f", + 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float); /* TSC */ - outp += sprintf(outp, "%5.2f", 1.0 * t->tsc/units/interval_float); + outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float); + + /* SMI */ + if (do_smi) + outp += sprintf(outp, "%8d", t->smi_count); /* delta */ if (extra_delta_offset32) @@ -396,34 +499,88 @@ int format_counters(struct thread_data *t, struct core_data *c, if (do_nhm_cstates) { if (!skip_c1) - outp += sprintf(outp, " %6.2f", 100.0 * t->c1/t->tsc); + outp += sprintf(outp, "%8.2f", 100.0 * t->c1/t->tsc); else - outp += sprintf(outp, " ****"); + outp += sprintf(outp, "********"); } /* print per-core data only for 1st thread in core */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) goto done; + if (do_nhm_cstates && !do_slm_cstates) + outp += sprintf(outp, "%8.2f", 100.0 * c->c3/t->tsc); if (do_nhm_cstates) - outp += sprintf(outp, " %6.2f", 100.0 * c->c3/t->tsc); - if (do_nhm_cstates) - outp += sprintf(outp, " %6.2f", 100.0 * c->c6/t->tsc); + outp += sprintf(outp, "%8.2f", 100.0 * c->c6/t->tsc); if (do_snb_cstates) - outp += sprintf(outp, " %6.2f", 100.0 * c->c7/t->tsc); + outp += sprintf(outp, "%8.2f", 100.0 * c->c7/t->tsc); + + if (do_dts) + outp += sprintf(outp, "%8d", c->core_temp_c); /* print per-package data only for 1st core in package */ if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) goto done; + if (do_ptm) + outp += sprintf(outp, "%8d", p->pkg_temp_c); + if (do_snb_cstates) - outp += sprintf(outp, " %6.2f", 100.0 * p->pc2/t->tsc); - if (do_nhm_cstates) - outp += sprintf(outp, " %6.2f", 100.0 * p->pc3/t->tsc); - if (do_nhm_cstates) - outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc); + outp += sprintf(outp, "%8.2f", 100.0 * p->pc2/t->tsc); + if (do_nhm_cstates && !do_slm_cstates) + outp += sprintf(outp, "%8.2f", 100.0 * p->pc3/t->tsc); + if (do_nhm_cstates && !do_slm_cstates) + outp += sprintf(outp, "%8.2f", 100.0 * p->pc6/t->tsc); if (do_snb_cstates) - outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc); + outp += sprintf(outp, "%8.2f", 100.0 * p->pc7/t->tsc); + if (do_c8_c9_c10) { + outp += sprintf(outp, "%8.2f", 100.0 * p->pc8/t->tsc); + outp += sprintf(outp, "%8.2f", 100.0 * p->pc9/t->tsc); + outp += sprintf(outp, "%8.2f", 100.0 * p->pc10/t->tsc); + } + + /* + * If measurement interval exceeds minimum RAPL Joule Counter range, + * indicate that results are suspect by printing "**" in fraction place. + */ + if (interval_float < rapl_joule_counter_range) + fmt8 = "%8.2f"; + else + fmt8 = " %6.0f**"; + + if (do_rapl && !rapl_joules) { + if (do_rapl & RAPL_PKG) + outp += sprintf(outp, fmt8, p->energy_pkg * rapl_energy_units / interval_float); + if (do_rapl & RAPL_CORES) + outp += sprintf(outp, fmt8, p->energy_cores * rapl_energy_units / interval_float); + if (do_rapl & RAPL_GFX) + outp += sprintf(outp, fmt8, p->energy_gfx * rapl_energy_units / interval_float); + if (do_rapl & RAPL_DRAM) + outp += sprintf(outp, fmt8, p->energy_dram * rapl_energy_units / interval_float); + if (do_rapl & RAPL_PKG_PERF_STATUS) + outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float); + if (do_rapl & RAPL_DRAM_PERF_STATUS) + outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float); + } else { + if (do_rapl & RAPL_PKG) + outp += sprintf(outp, fmt8, + p->energy_pkg * rapl_energy_units); + if (do_rapl & RAPL_CORES) + outp += sprintf(outp, fmt8, + p->energy_cores * rapl_energy_units); + if (do_rapl & RAPL_GFX) + outp += sprintf(outp, fmt8, + p->energy_gfx * rapl_energy_units); + if (do_rapl & RAPL_DRAM) + outp += sprintf(outp, fmt8, + p->energy_dram * rapl_energy_units); + if (do_rapl & RAPL_PKG_PERF_STATUS) + outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float); + if (do_rapl & RAPL_DRAM_PERF_STATUS) + outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float); + outp += sprintf(outp, fmt8, interval_float); + + } done: outp += sprintf(outp, "\n"); @@ -433,6 +590,7 @@ done: void flush_stdout() { fputs(output_buffer, stdout); + fflush(stdout); outp = output_buffer; } void flush_stderr() @@ -459,6 +617,13 @@ void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_ for_all_cpus(format_counters, t, c, p); } +#define DELTA_WRAP32(new, old) \ + if (new > old) { \ + old = new - old; \ + } else { \ + old = 0x100000000 + new - old; \ + } + void delta_package(struct pkg_data *new, struct pkg_data *old) { @@ -466,6 +631,17 @@ delta_package(struct pkg_data *new, struct pkg_data *old) old->pc3 = new->pc3 - old->pc3; old->pc6 = new->pc6 - old->pc6; old->pc7 = new->pc7 - old->pc7; + old->pc8 = new->pc8 - old->pc8; + old->pc9 = new->pc9 - old->pc9; + old->pc10 = new->pc10 - old->pc10; + old->pkg_temp_c = new->pkg_temp_c; + + DELTA_WRAP32(new->energy_pkg, old->energy_pkg); + DELTA_WRAP32(new->energy_cores, old->energy_cores); + DELTA_WRAP32(new->energy_gfx, old->energy_gfx); + DELTA_WRAP32(new->energy_dram, old->energy_dram); + DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status); + DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status); } void @@ -474,6 +650,7 @@ delta_core(struct core_data *new, struct core_data *old) old->c3 = new->c3 - old->c3; old->c6 = new->c6 - old->c6; old->c7 = new->c7 - old->c7; + old->core_temp_c = new->core_temp_c; } /* @@ -486,12 +663,10 @@ delta_thread(struct thread_data *new, struct thread_data *old, old->tsc = new->tsc - old->tsc; /* check for TSC < 1 Mcycles over interval */ - if (old->tsc < (1000 * 1000)) { - fprintf(stderr, "Insanely slow TSC rate, TSC stops in idle?\n"); - fprintf(stderr, "You can disable all c-states by booting with \"idle=poll\"\n"); - fprintf(stderr, "or just the deep ones with \"processor.max_cstate=1\"\n"); - exit(-3); - } + if (old->tsc < (1000 * 1000)) + errx(-3, "Insanely slow TSC rate, TSC stops in idle?\n" + "You can disable all c-states by booting with \"idle=poll\"\n" + "or just the deep ones with \"processor.max_cstate=1\""); old->c1 = new->c1 - old->c1; @@ -516,17 +691,24 @@ delta_thread(struct thread_data *new, struct thread_data *old, } - /* - * As counter collection is not atomic, - * it is possible for mperf's non-halted cycles + idle states - * to exceed TSC's all cycles: show c1 = 0% in that case. - */ - if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc) - old->c1 = 0; - else { - /* normal case, derive c1 */ - old->c1 = old->tsc - old->mperf - core_delta->c3 + if (use_c1_residency_msr) { + /* + * Some models have a dedicated C1 residency MSR, + * which should be more accurate than the derivation below. + */ + } else { + /* + * As counter collection is not atomic, + * it is possible for mperf's non-halted cycles + idle states + * to exceed TSC's all cycles: show c1 = 0% in that case. + */ + if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc) + old->c1 = 0; + else { + /* normal case, derive c1 */ + old->c1 = old->tsc - old->mperf - core_delta->c3 - core_delta->c6 - core_delta->c7; + } } if (old->mperf == 0) { @@ -544,6 +726,9 @@ delta_thread(struct thread_data *new, struct thread_data *old, */ old->extra_msr32 = new->extra_msr32; old->extra_msr64 = new->extra_msr64; + + if (do_smi) + old->smi_count = new->smi_count - old->smi_count; } int delta_cpu(struct thread_data *t, struct core_data *c, @@ -571,6 +756,7 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data t->mperf = 0; t->c1 = 0; + t->smi_count = 0; t->extra_delta32 = 0; t->extra_delta64 = 0; @@ -580,11 +766,23 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data c->c3 = 0; c->c6 = 0; c->c7 = 0; + c->core_temp_c = 0; p->pc2 = 0; p->pc3 = 0; p->pc6 = 0; p->pc7 = 0; + p->pc8 = 0; + p->pc9 = 0; + p->pc10 = 0; + + p->energy_pkg = 0; + p->energy_dram = 0; + p->energy_cores = 0; + p->energy_gfx = 0; + p->rapl_pkg_perf_status = 0; + p->rapl_dram_perf_status = 0; + p->pkg_temp_c = 0; } int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) @@ -605,6 +803,8 @@ int sum_counters(struct thread_data *t, struct core_data *c, average.cores.c6 += c->c6; average.cores.c7 += c->c7; + average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c); + /* sum per-pkg values only for 1st core in pkg */ if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; @@ -613,7 +813,19 @@ int sum_counters(struct thread_data *t, struct core_data *c, average.packages.pc3 += p->pc3; average.packages.pc6 += p->pc6; average.packages.pc7 += p->pc7; + average.packages.pc8 += p->pc8; + average.packages.pc9 += p->pc9; + average.packages.pc10 += p->pc10; + average.packages.energy_pkg += p->energy_pkg; + average.packages.energy_dram += p->energy_dram; + average.packages.energy_cores += p->energy_cores; + average.packages.energy_gfx += p->energy_gfx; + + average.packages.pkg_temp_c = MAX(average.packages.pkg_temp_c, p->pkg_temp_c); + + average.packages.rapl_pkg_perf_status += p->rapl_pkg_perf_status; + average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status; return 0; } /* @@ -645,6 +857,10 @@ void compute_average(struct thread_data *t, struct core_data *c, average.packages.pc3 /= topo.num_packages; average.packages.pc6 /= topo.num_packages; average.packages.pc7 /= topo.num_packages; + + average.packages.pc8 /= topo.num_packages; + average.packages.pc9 /= topo.num_packages; + average.packages.pc10 /= topo.num_packages; } static unsigned long long rdtsc(void) @@ -665,23 +881,31 @@ static unsigned long long rdtsc(void) int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { int cpu = t->cpu_id; + unsigned long long msr; - if (cpu_migrate(cpu)) + if (cpu_migrate(cpu)) { + fprintf(stderr, "Could not migrate to CPU %d\n", cpu); return -1; + } t->tsc = rdtsc(); /* we are running on local CPU of interest */ if (has_aperf) { - if (get_msr(cpu, MSR_APERF, &t->aperf)) + if (get_msr(cpu, MSR_IA32_APERF, &t->aperf)) return -3; - if (get_msr(cpu, MSR_MPERF, &t->mperf)) + if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf)) return -4; } + if (do_smi) { + if (get_msr(cpu, MSR_SMI_COUNT, &msr)) + return -5; + t->smi_count = msr & 0xFFFFFFFF; + } if (extra_delta_offset32) { - if (get_msr(cpu, extra_delta_offset32, &t->extra_delta32)) + if (get_msr(cpu, extra_delta_offset32, &msr)) return -5; - t->extra_delta32 &= 0xFFFFFFFF; + t->extra_delta32 = msr & 0xFFFFFFFF; } if (extra_delta_offset64) @@ -689,22 +913,30 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) return -5; if (extra_msr_offset32) { - if (get_msr(cpu, extra_msr_offset32, &t->extra_msr32)) + if (get_msr(cpu, extra_msr_offset32, &msr)) return -5; - t->extra_msr32 &= 0xFFFFFFFF; + t->extra_msr32 = msr & 0xFFFFFFFF; } if (extra_msr_offset64) if (get_msr(cpu, extra_msr_offset64, &t->extra_msr64)) return -5; + if (use_c1_residency_msr) { + if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1)) + return -6; + } + /* collect core counters only for 1st thread in core */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) return 0; - if (do_nhm_cstates) { + if (do_nhm_cstates && !do_slm_cstates) { if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) return -6; + } + + if (do_nhm_cstates) { if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) return -7; } @@ -713,11 +945,18 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7)) return -8; + if (do_dts) { + if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) + return -9; + c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); + } + + /* collect package counters only for 1st core in package */ if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; - if (do_nhm_cstates) { + if (do_nhm_cstates && !do_slm_cstates) { if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3)) return -9; if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6)) @@ -729,6 +968,49 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7)) return -12; } + if (do_c8_c9_c10) { + if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8)) + return -13; + if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9)) + return -13; + if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10)) + return -13; + } + if (do_rapl & RAPL_PKG) { + if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr)) + return -13; + p->energy_pkg = msr & 0xFFFFFFFF; + } + if (do_rapl & RAPL_CORES) { + if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr)) + return -14; + p->energy_cores = msr & 0xFFFFFFFF; + } + if (do_rapl & RAPL_DRAM) { + if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr)) + return -15; + p->energy_dram = msr & 0xFFFFFFFF; + } + if (do_rapl & RAPL_GFX) { + if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr)) + return -16; + p->energy_gfx = msr & 0xFFFFFFFF; + } + if (do_rapl & RAPL_PKG_PERF_STATUS) { + if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr)) + return -16; + p->rapl_pkg_perf_status = msr & 0xFFFFFFFF; + } + if (do_rapl & RAPL_DRAM_PERF_STATUS) { + if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr)) + return -16; + p->rapl_dram_perf_status = msr & 0xFFFFFFFF; + } + if (do_ptm) { + if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) + return -17; + p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); + } return 0; } @@ -740,10 +1022,9 @@ void print_verbose_header(void) if (!do_nehalem_platform_info) return; - get_msr(0, MSR_NEHALEM_PLATFORM_INFO, &msr); + get_msr(0, MSR_NHM_PLATFORM_INFO, &msr); - if (verbose > 1) - fprintf(stderr, "MSR_NEHALEM_PLATFORM_INFO: 0x%llx\n", msr); + fprintf(stderr, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr); ratio = (msr >> 40) & 0xFF; fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency\n", @@ -753,13 +1034,16 @@ void print_verbose_header(void) fprintf(stderr, "%d * %.0f = %.0f MHz TSC frequency\n", ratio, bclk, ratio * bclk); + get_msr(0, MSR_IA32_POWER_CTL, &msr); + fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n", + msr, msr & 0x2 ? "EN" : "DIS"); + if (!do_ivt_turbo_ratio_limit) goto print_nhm_turbo_ratio_limits; get_msr(0, MSR_IVT_TURBO_RATIO_LIMIT, &msr); - if (verbose > 1) - fprintf(stderr, "MSR_IVT_TURBO_RATIO_LIMIT: 0x%llx\n", msr); + fprintf(stderr, "cpu0: MSR_IVT_TURBO_RATIO_LIMIT: 0x%08llx\n", msr); ratio = (msr >> 56) & 0xFF; if (ratio) @@ -802,14 +1086,58 @@ void print_verbose_header(void) ratio, bclk, ratio * bclk); print_nhm_turbo_ratio_limits: + get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); + +#define SNB_C1_AUTO_UNDEMOTE (1UL << 27) +#define SNB_C3_AUTO_UNDEMOTE (1UL << 28) + + fprintf(stderr, "cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", msr); + + fprintf(stderr, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: ", + (msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "", + (msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "", + (msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "", + (msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "", + (msr & (1 << 15)) ? "" : "UN", + (unsigned int)msr & 7); + + + switch(msr & 0x7) { + case 0: + fprintf(stderr, do_slm_cstates ? "no pkg states" : "pc0"); + break; + case 1: + fprintf(stderr, do_slm_cstates ? "no pkg states" : do_snb_cstates ? "pc2" : "pc0"); + break; + case 2: + fprintf(stderr, do_slm_cstates ? "invalid" : do_snb_cstates ? "pc6-noret" : "pc3"); + break; + case 3: + fprintf(stderr, do_slm_cstates ? "invalid" : "pc6"); + break; + case 4: + fprintf(stderr, do_slm_cstates ? "pc4" : "pc7"); + break; + case 5: + fprintf(stderr, do_slm_cstates ? "invalid" : do_snb_cstates ? "pc7s" : "invalid"); + break; + case 6: + fprintf(stderr, do_slm_cstates ? "pc6" : "invalid"); + break; + case 7: + fprintf(stderr, do_slm_cstates ? "pc7" : "unlimited"); + break; + default: + fprintf(stderr, "invalid"); + } + fprintf(stderr, ")\n"); if (!do_nehalem_turbo_ratio_limit) return; - get_msr(0, MSR_NEHALEM_TURBO_RATIO_LIMIT, &msr); + get_msr(0, MSR_NHM_TURBO_RATIO_LIMIT, &msr); - if (verbose > 1) - fprintf(stderr, "MSR_NEHALEM_TURBO_RATIO_LIMIT: 0x%llx\n", msr); + fprintf(stderr, "cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n", msr); ratio = (msr >> 56) & 0xFF; if (ratio) @@ -884,24 +1212,43 @@ void free_all_buffers(void) } /* + * Open a file, and exit on failure + */ +FILE *fopen_or_die(const char *path, const char *mode) +{ + FILE *filep = fopen(path, "r"); + if (!filep) + err(1, "%s: open failed", path); + return filep; +} + +/* + * Parse a file containing a single int. + */ +int parse_int_file(const char *fmt, ...) +{ + va_list args; + char path[PATH_MAX]; + FILE *filep; + int value; + + va_start(args, fmt); + vsnprintf(path, sizeof(path), fmt, args); + va_end(args); + filep = fopen_or_die(path, "r"); + if (fscanf(filep, "%d", &value) != 1) + err(1, "%s: failed to parse number from file", path); + fclose(filep); + return value; +} + +/* * cpu_is_first_sibling_in_core(cpu) * return 1 if given CPU is 1st HT sibling in the core */ int cpu_is_first_sibling_in_core(int cpu) { - char path[64]; - FILE *filep; - int first_cpu; - - sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); - filep = fopen(path, "r"); - if (filep == NULL) { - perror(path); - exit(1); - } - fscanf(filep, "%d", &first_cpu); - fclose(filep); - return (cpu == first_cpu); + return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); } /* @@ -910,53 +1257,17 @@ int cpu_is_first_sibling_in_core(int cpu) */ int cpu_is_first_core_in_package(int cpu) { - char path[64]; - FILE *filep; - int first_cpu; - - sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu); - filep = fopen(path, "r"); - if (filep == NULL) { - perror(path); - exit(1); - } - fscanf(filep, "%d", &first_cpu); - fclose(filep); - return (cpu == first_cpu); + return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu); } int get_physical_package_id(int cpu) { - char path[80]; - FILE *filep; - int pkg; - - sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu); - filep = fopen(path, "r"); - if (filep == NULL) { - perror(path); - exit(1); - } - fscanf(filep, "%d", &pkg); - fclose(filep); - return pkg; + return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu); } int get_core_id(int cpu) { - char path[80]; - FILE *filep; - int core; - - sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/core_id", cpu); - filep = fopen(path, "r"); - if (filep == NULL) { - perror(path); - exit(1); - } - fscanf(filep, "%d", &core); - fclose(filep); - return core; + return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu); } int get_num_ht_siblings(int cpu) @@ -968,11 +1279,7 @@ int get_num_ht_siblings(int cpu) char character; sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); - filep = fopen(path, "r"); - if (filep == NULL) { - perror(path); - exit(1); - } + filep = fopen_or_die(path, "r"); /* * file format: * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4) @@ -1042,17 +1349,11 @@ int for_all_proc_cpus(int (func)(int)) int cpu_num; int retval; - fp = fopen(proc_stat, "r"); - if (fp == NULL) { - perror(proc_stat); - exit(1); - } + fp = fopen_or_die(proc_stat, "r"); retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n"); - if (retval != 0) { - perror("/proc/stat format"); - exit(1); - } + if (retval != 0) + err(1, "%s: failed to parse format", proc_stat); while (1) { retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num); @@ -1098,13 +1399,22 @@ int mark_cpu_present(int cpu) void turbostat_loop() { int retval; + int restarted = 0; restart: + restarted++; + retval = for_all_cpus(get_counters, EVEN_COUNTERS); - if (retval) { + if (retval < -1) { + exit(retval); + } else if (retval == -1) { + if (restarted > 1) { + exit(retval); + } re_initialize(); goto restart; } + restarted = 0; gettimeofday(&tv_even, (struct timezone *)NULL); while (1) { @@ -1114,7 +1424,9 @@ restart: } sleep(interval_sec); retval = for_all_cpus(get_counters, ODD_COUNTERS); - if (retval) { + if (retval < -1) { + exit(retval); + } else if (retval == -1) { re_initialize(); goto restart; } @@ -1126,7 +1438,9 @@ restart: flush_stdout(); sleep(interval_sec); retval = for_all_cpus(get_counters, EVEN_COUNTERS); - if (retval) { + if (retval < -1) { + exit(retval); + } else if (retval == -1) { re_initialize(); goto restart; } @@ -1143,19 +1457,15 @@ void check_dev_msr() { struct stat sb; - if (stat("/dev/cpu/0/msr", &sb)) { - fprintf(stderr, "no /dev/cpu/0/msr\n"); - fprintf(stderr, "Try \"# modprobe msr\"\n"); - exit(-5); - } + if (stat("/dev/cpu/0/msr", &sb)) + err(-5, "no /dev/cpu/0/msr\n" + "Try \"# modprobe msr\""); } void check_super_user() { - if (getuid() != 0) { - fprintf(stderr, "must be root\n"); - exit(-6); - } + if (getuid() != 0) + errx(-6, "must be root"); } int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model) @@ -1176,6 +1486,15 @@ int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model) case 0x2D: /* SNB Xeon */ case 0x3A: /* IVB */ case 0x3E: /* IVB Xeon */ + case 0x3C: /* HSW */ + case 0x3F: /* HSX */ + case 0x45: /* HSW */ + case 0x46: /* HSW */ + case 0x37: /* BYT */ + case 0x4D: /* AVN */ + case 0x3D: /* BDW */ + case 0x4F: /* BDX */ + case 0x56: /* BDX-DE */ return 1; case 0x2E: /* Nehalem-EX Xeon - Beckton */ case 0x2F: /* Westmere-EX Xeon - Eagleton */ @@ -1199,6 +1518,330 @@ int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model) } } +/* + * print_epb() + * Decode the ENERGY_PERF_BIAS MSR + */ +int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p) +{ + unsigned long long msr; + char *epb_string; + int cpu; + + if (!has_epb) + return 0; + + cpu = t->cpu_id; + + /* EPB is per-package */ + if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) + return 0; + + if (cpu_migrate(cpu)) { + fprintf(stderr, "Could not migrate to CPU %d\n", cpu); + return -1; + } + + if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr)) + return 0; + + switch (msr & 0x7) { + case ENERGY_PERF_BIAS_PERFORMANCE: + epb_string = "performance"; + break; + case ENERGY_PERF_BIAS_NORMAL: + epb_string = "balanced"; + break; + case ENERGY_PERF_BIAS_POWERSAVE: + epb_string = "powersave"; + break; + default: + epb_string = "custom"; + break; + } + fprintf(stderr, "cpu%d: MSR_IA32_ENERGY_PERF_BIAS: 0x%08llx (%s)\n", cpu, msr, epb_string); + + return 0; +} + +#define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */ +#define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */ + +double get_tdp(model) +{ + unsigned long long msr; + + if (do_rapl & RAPL_PKG_POWER_INFO) + if (!get_msr(0, MSR_PKG_POWER_INFO, &msr)) + return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units; + + switch (model) { + case 0x37: + case 0x4D: + return 30.0; + default: + return 135.0; + } +} + + +/* + * rapl_probe() + * + * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units + */ +void rapl_probe(unsigned int family, unsigned int model) +{ + unsigned long long msr; + unsigned int time_unit; + double tdp; + + if (!genuine_intel) + return; + + if (family != 6) + return; + + switch (model) { + case 0x2A: + case 0x3A: + case 0x3C: /* HSW */ + case 0x45: /* HSW */ + case 0x46: /* HSW */ + case 0x3D: /* BDW */ + do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO; + break; + case 0x3F: /* HSX */ + case 0x4F: /* BDX */ + case 0x56: /* BDX-DE */ + do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; + break; + case 0x2D: + case 0x3E: + do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO; + break; + case 0x37: /* BYT */ + case 0x4D: /* AVN */ + do_rapl = RAPL_PKG | RAPL_CORES ; + break; + default: + return; + } + + /* units on package 0, verify later other packages match */ + if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr)) + return; + + rapl_power_units = 1.0 / (1 << (msr & 0xF)); + if (model == 0x37) + rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000; + else + rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F)); + + time_unit = msr >> 16 & 0xF; + if (time_unit == 0) + time_unit = 0xA; + + rapl_time_units = 1.0 / (1 << (time_unit)); + + tdp = get_tdp(model); + + rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; + if (verbose) + fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp); + + return; +} + +int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) +{ + unsigned long long msr; + unsigned int dts; + int cpu; + + if (!(do_dts || do_ptm)) + return 0; + + cpu = t->cpu_id; + + /* DTS is per-core, no need to print for each thread */ + if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) + return 0; + + if (cpu_migrate(cpu)) { + fprintf(stderr, "Could not migrate to CPU %d\n", cpu); + return -1; + } + + if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) { + if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) + return 0; + + dts = (msr >> 16) & 0x7F; + fprintf(stderr, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n", + cpu, msr, tcc_activation_temp - dts); + +#ifdef THERM_DEBUG + if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr)) + return 0; + + dts = (msr >> 16) & 0x7F; + dts2 = (msr >> 8) & 0x7F; + fprintf(stderr, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", + cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); +#endif + } + + + if (do_dts) { + unsigned int resolution; + + if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) + return 0; + + dts = (msr >> 16) & 0x7F; + resolution = (msr >> 27) & 0xF; + fprintf(stderr, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n", + cpu, msr, tcc_activation_temp - dts, resolution); + +#ifdef THERM_DEBUG + if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr)) + return 0; + + dts = (msr >> 16) & 0x7F; + dts2 = (msr >> 8) & 0x7F; + fprintf(stderr, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", + cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); +#endif + } + + return 0; +} + +void print_power_limit_msr(int cpu, unsigned long long msr, char *label) +{ + fprintf(stderr, "cpu%d: %s: %sabled (%f Watts, %f sec, clamp %sabled)\n", + cpu, label, + ((msr >> 15) & 1) ? "EN" : "DIS", + ((msr >> 0) & 0x7FFF) * rapl_power_units, + (1.0 + (((msr >> 22) & 0x3)/4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units, + (((msr >> 16) & 1) ? "EN" : "DIS")); + + return; +} + +int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) +{ + unsigned long long msr; + int cpu; + + if (!do_rapl) + return 0; + + /* RAPL counters are per package, so print only for 1st thread/package */ + if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) + return 0; + + cpu = t->cpu_id; + if (cpu_migrate(cpu)) { + fprintf(stderr, "Could not migrate to CPU %d\n", cpu); + return -1; + } + + if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr)) + return -1; + + if (verbose) { + fprintf(stderr, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx " + "(%f Watts, %f Joules, %f sec.)\n", cpu, msr, + rapl_power_units, rapl_energy_units, rapl_time_units); + } + if (do_rapl & RAPL_PKG_POWER_INFO) { + + if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr)) + return -5; + + + fprintf(stderr, "cpu%d: MSR_PKG_POWER_INFO: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n", + cpu, msr, + ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units, + ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units, + ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units, + ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units); + + } + if (do_rapl & RAPL_PKG) { + + if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr)) + return -9; + + fprintf(stderr, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n", + cpu, msr, (msr >> 63) & 1 ? "": "UN"); + + print_power_limit_msr(cpu, msr, "PKG Limit #1"); + fprintf(stderr, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n", + cpu, + ((msr >> 47) & 1) ? "EN" : "DIS", + ((msr >> 32) & 0x7FFF) * rapl_power_units, + (1.0 + (((msr >> 54) & 0x3)/4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units, + ((msr >> 48) & 1) ? "EN" : "DIS"); + } + + if (do_rapl & RAPL_DRAM) { + if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr)) + return -6; + + + fprintf(stderr, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n", + cpu, msr, + ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units, + ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units, + ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units, + ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units); + + + if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr)) + return -9; + fprintf(stderr, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n", + cpu, msr, (msr >> 31) & 1 ? "": "UN"); + + print_power_limit_msr(cpu, msr, "DRAM Limit"); + } + if (do_rapl & RAPL_CORE_POLICY) { + if (verbose) { + if (get_msr(cpu, MSR_PP0_POLICY, &msr)) + return -7; + + fprintf(stderr, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF); + } + } + if (do_rapl & RAPL_CORES) { + if (verbose) { + + if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr)) + return -9; + fprintf(stderr, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n", + cpu, msr, (msr >> 31) & 1 ? "": "UN"); + print_power_limit_msr(cpu, msr, "Cores Limit"); + } + } + if (do_rapl & RAPL_GFX) { + if (verbose) { + if (get_msr(cpu, MSR_PP1_POLICY, &msr)) + return -8; + + fprintf(stderr, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF); + + if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr)) + return -9; + fprintf(stderr, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n", + cpu, msr, (msr >> 31) & 1 ? "": "UN"); + print_power_limit_msr(cpu, msr, "GFX Limit"); + } + } + return 0; +} + int is_snb(unsigned int family, unsigned int model) { @@ -1210,19 +1853,144 @@ int is_snb(unsigned int family, unsigned int model) case 0x2D: case 0x3A: /* IVB */ case 0x3E: /* IVB Xeon */ + case 0x3C: /* HSW */ + case 0x3F: /* HSW */ + case 0x45: /* HSW */ + case 0x46: /* HSW */ + case 0x3D: /* BDW */ + case 0x4F: /* BDX */ + case 0x56: /* BDX-DE */ return 1; } return 0; } +int has_c8_c9_c10(unsigned int family, unsigned int model) +{ + if (!genuine_intel) + return 0; + + switch (model) { + case 0x45: /* HSW */ + case 0x3D: /* BDW */ + return 1; + } + return 0; +} + + +int is_slm(unsigned int family, unsigned int model) +{ + if (!genuine_intel) + return 0; + switch (model) { + case 0x37: /* BYT */ + case 0x4D: /* AVN */ + return 1; + } + return 0; +} + +#define SLM_BCLK_FREQS 5 +double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; + +double slm_bclk(void) +{ + unsigned long long msr = 3; + unsigned int i; + double freq; + + if (get_msr(0, MSR_FSB_FREQ, &msr)) + fprintf(stderr, "SLM BCLK: unknown\n"); + + i = msr & 0xf; + if (i >= SLM_BCLK_FREQS) { + fprintf(stderr, "SLM BCLK[%d] invalid\n", i); + msr = 3; + } + freq = slm_freq_table[i]; + + fprintf(stderr, "SLM BCLK: %.1f Mhz\n", freq); + + return freq; +} + double discover_bclk(unsigned int family, unsigned int model) { if (is_snb(family, model)) return 100.00; + else if (is_slm(family, model)) + return slm_bclk(); else return 133.33; } +/* + * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where + * the Thermal Control Circuit (TCC) activates. + * This is usually equal to tjMax. + * + * Older processors do not have this MSR, so there we guess, + * but also allow cmdline over-ride with -T. + * + * Several MSR temperature values are in units of degrees-C + * below this value, including the Digital Thermal Sensor (DTS), + * Package Thermal Management Sensor (PTM), and thermal event thresholds. + */ +int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p) +{ + unsigned long long msr; + unsigned int target_c_local; + int cpu; + + /* tcc_activation_temp is used only for dts or ptm */ + if (!(do_dts || do_ptm)) + return 0; + + /* this is a per-package concept */ + if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) + return 0; + + cpu = t->cpu_id; + if (cpu_migrate(cpu)) { + fprintf(stderr, "Could not migrate to CPU %d\n", cpu); + return -1; + } + + if (tcc_activation_temp_override != 0) { + tcc_activation_temp = tcc_activation_temp_override; + fprintf(stderr, "cpu%d: Using cmdline TCC Target (%d C)\n", + cpu, tcc_activation_temp); + return 0; + } + + /* Temperature Target MSR is Nehalem and newer only */ + if (!do_nehalem_platform_info) + goto guess; + + if (get_msr(0, MSR_IA32_TEMPERATURE_TARGET, &msr)) + goto guess; + + target_c_local = (msr >> 16) & 0xFF; + + if (verbose) + fprintf(stderr, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n", + cpu, msr, target_c_local); + + if (!target_c_local) + goto guess; + + tcc_activation_temp = target_c_local; + + return 0; + +guess: + tcc_activation_temp = TJMAX_DEFAULT; + fprintf(stderr, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n", + cpu, tcc_activation_temp); + + return 0; +} void check_cpuid() { unsigned int eax, ebx, ecx, edx, max_level; @@ -1230,16 +1998,16 @@ void check_cpuid() eax = ebx = ecx = edx = 0; - asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0)); + __get_cpuid(0, &max_level, &ebx, &ecx, &edx); if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e) genuine_intel = 1; if (verbose) - fprintf(stderr, "%.4s%.4s%.4s ", + fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ", (char *)&ebx, (char *)&edx, (char *)&ecx); - asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx"); + __get_cpuid(1, &fms, &ebx, &ecx, &edx); family = (fms >> 8) & 0xf; model = (fms >> 4) & 0xf; stepping = fms & 0xf; @@ -1250,10 +2018,8 @@ void check_cpuid() fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n", max_level, family, model, stepping, family, model, stepping); - if (!(edx & (1 << 5))) { - fprintf(stderr, "CPUID: no MSR\n"); - exit(1); - } + if (!(edx & (1 << 5))) + errx(1, "CPUID: no MSR"); /* * check max extended function levels of CPUID. @@ -1261,52 +2027,62 @@ void check_cpuid() * This check is valid for both Intel and AMD. */ ebx = ecx = edx = 0; - asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000000)); + __get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx); - if (max_level < 0x80000007) { - fprintf(stderr, "CPUID: no invariant TSC (max_level 0x%x)\n", max_level); - exit(1); - } + if (max_level < 0x80000007) + errx(1, "CPUID: no invariant TSC (max_level 0x%x)", max_level); /* * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8 * this check is valid for both Intel and AMD */ - asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007)); + __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx); has_invariant_tsc = edx & (1 << 8); - if (!has_invariant_tsc) { - fprintf(stderr, "No invariant TSC\n"); - exit(1); - } + if (!has_invariant_tsc) + errx(1, "No invariant TSC"); /* * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0 * this check is valid for both Intel and AMD */ - asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6)); + __get_cpuid(0x6, &eax, &ebx, &ecx, &edx); has_aperf = ecx & (1 << 0); - if (!has_aperf) { - fprintf(stderr, "No APERF MSR\n"); - exit(1); - } + do_dts = eax & (1 << 0); + do_ptm = eax & (1 << 6); + has_epb = ecx & (1 << 3); + + if (verbose) + fprintf(stderr, "CPUID(6): %s%s%s%s\n", + has_aperf ? "APERF" : "No APERF!", + do_dts ? ", DTS" : "", + do_ptm ? ", PTM": "", + has_epb ? ", EPB": ""); + + if (!has_aperf) + errx(-1, "No APERF"); do_nehalem_platform_info = genuine_intel && has_invariant_tsc; do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */ + do_smi = do_nhm_cstates; do_snb_cstates = is_snb(family, model); + do_c8_c9_c10 = has_c8_c9_c10(family, model); + do_slm_cstates = is_slm(family, model); bclk = discover_bclk(family, model); do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model); do_ivt_turbo_ratio_limit = has_ivt_turbo_ratio_limit(family, model); + rapl_probe(family, model); + + return; } void usage() { - fprintf(stderr, "%s: [-v][-p|-P|-S][-c MSR# | -s]][-C MSR#][-m MSR#][-M MSR#][-i interval_sec | command ...]\n", - progname); - exit(1); + errx(1, "%s: [-v][-R][-T][-p|-P|-S][-c MSR#][-C MSR#][-m MSR#][-M MSR#][-i interval_sec | command ...]\n", + progname); } @@ -1349,19 +2125,15 @@ void topology_probe() fprintf(stderr, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num); cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology)); - if (cpus == NULL) { - perror("calloc cpus"); - exit(1); - } + if (cpus == NULL) + err(1, "calloc cpus"); /* * Allocate and initialize cpu_present_set */ cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1)); - if (cpu_present_set == NULL) { - perror("CPU_ALLOC"); - exit(3); - } + if (cpu_present_set == NULL) + err(3, "CPU_ALLOC"); cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); CPU_ZERO_S(cpu_present_setsize, cpu_present_set); for_all_proc_cpus(mark_cpu_present); @@ -1370,10 +2142,8 @@ void topology_probe() * Allocate and initialize cpu_affinity_set */ cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1)); - if (cpu_affinity_set == NULL) { - perror("CPU_ALLOC"); - exit(3); - } + if (cpu_affinity_set == NULL) + err(3, "CPU_ALLOC"); cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); @@ -1457,8 +2227,7 @@ allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data return; error: - perror("calloc counters"); - exit(1); + err(1, "calloc counters"); } /* * init_counter() @@ -1513,12 +2282,10 @@ int initialize_counters(int cpu_id) void allocate_output_buffer() { - output_buffer = calloc(1, (1 + topo.num_cpus) * 128); + output_buffer = calloc(1, (1 + topo.num_cpus) * 1024); outp = output_buffer; - if (outp == NULL) { - perror("calloc"); - exit(-1); - } + if (outp == NULL) + err(-1, "calloc output buffer"); } void setup_all_buffers(void) @@ -1529,6 +2296,7 @@ void setup_all_buffers(void) allocate_output_buffer(); for_all_proc_cpus(initialize_counters); } + void turbostat_init() { check_cpuid(); @@ -1540,13 +2308,27 @@ void turbostat_init() if (verbose) print_verbose_header(); + + if (verbose) + for_all_cpus(print_epb, ODD_COUNTERS); + + if (verbose) + for_all_cpus(print_rapl, ODD_COUNTERS); + + for_all_cpus(set_temperature_target, ODD_COUNTERS); + + if (verbose) + for_all_cpus(print_thermal, ODD_COUNTERS); } int fork_it(char **argv) { pid_t child_pid; + int status; - for_all_cpus(get_counters, EVEN_COUNTERS); + status = for_all_cpus(get_counters, EVEN_COUNTERS); + if (status) + exit(status); /* clear affinity side-effect of get_counters() */ sched_setaffinity(0, cpu_present_setsize, cpu_present_set); gettimeofday(&tv_even, (struct timezone *)NULL); @@ -1556,20 +2338,15 @@ int fork_it(char **argv) /* child */ execvp(argv[0], argv); } else { - int status; /* parent */ - if (child_pid == -1) { - perror("fork"); - exit(1); - } + if (child_pid == -1) + err(1, "fork"); signal(SIGINT, SIG_IGN); signal(SIGQUIT, SIG_IGN); - if (waitpid(child_pid, &status, 0) == -1) { - perror("wait"); - exit(1); - } + if (waitpid(child_pid, &status, 0) == -1) + err(status, "waitpid"); } /* * n.b. fork_it() does not check for errors from for_all_cpus() @@ -1585,7 +2362,24 @@ int fork_it(char **argv) fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0); - return 0; + return status; +} + +int get_and_dump_counters(void) +{ + int status; + + status = for_all_cpus(get_counters, ODD_COUNTERS); + if (status) + return status; + + status = for_all_cpus(dump_counters, ODD_COUNTERS); + if (status) + return status; + + flush_stdout(); + + return status; } void cmdline(int argc, char **argv) @@ -1594,7 +2388,7 @@ void cmdline(int argc, char **argv) progname = argv[0]; - while ((opt = getopt(argc, argv, "+pPSvisc:sC:m:M:")) != -1) { + while ((opt = getopt(argc, argv, "+pPsSvi:c:C:m:M:RJT:")) != -1) { switch (opt) { case 'p': show_core_only++; @@ -1602,6 +2396,9 @@ void cmdline(int argc, char **argv) case 'P': show_pkg_only++; break; + case 's': + dump_only++; + break; case 'S': summary_only++; break; @@ -1614,9 +2411,6 @@ void cmdline(int argc, char **argv) case 'c': sscanf(optarg, "%x", &extra_delta_offset32); break; - case 's': - extra_delta_offset32 = 0x34; /* SMI counter */ - break; case 'C': sscanf(optarg, "%x", &extra_delta_offset64); break; @@ -1626,6 +2420,16 @@ void cmdline(int argc, char **argv) case 'M': sscanf(optarg, "%x", &extra_msr_offset64); break; + case 'R': + rapl_verbose++; + break; + case 'T': + tcc_activation_temp_override = atoi(optarg); + break; + case 'J': + rapl_joules++; + break; + default: usage(); } @@ -1636,12 +2440,16 @@ int main(int argc, char **argv) { cmdline(argc, argv); - if (verbose > 1) - fprintf(stderr, "turbostat v2.1 October 6, 2012" + if (verbose) + fprintf(stderr, "turbostat v3.7 Feb 6, 2014" " - Len Brown <lenb@kernel.org>\n"); turbostat_init(); + /* dump counters and exit */ + if (dump_only) + return get_and_dump_counters(); + /* * if any params left, it must be a command to fork */ diff --git a/tools/power/x86/x86_energy_perf_policy/Makefile b/tools/power/x86/x86_energy_perf_policy/Makefile index f458237fdd7..971c9ffdcb5 100644 --- a/tools/power/x86/x86_energy_perf_policy/Makefile +++ b/tools/power/x86/x86_energy_perf_policy/Makefile @@ -1,8 +1,10 @@ +DESTDIR ?= + x86_energy_perf_policy : x86_energy_perf_policy.c clean : rm -f x86_energy_perf_policy install : - install x86_energy_perf_policy /usr/bin/ - install x86_energy_perf_policy.8 /usr/share/man/man8/ + install x86_energy_perf_policy ${DESTDIR}/usr/bin/ + install x86_energy_perf_policy.8 ${DESTDIR}/usr/share/man/man8/ diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c index 33c5c7ee148..40b3e5482f8 100644 --- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c +++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c @@ -289,7 +289,7 @@ void for_every_cpu(void (func)(int)) "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu); if (retval != 1) - return; + break; func(cpu); } diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include index 96ce80a3743..8abbef164b4 100644 --- a/tools/scripts/Makefile.include +++ b/tools/scripts/Makefile.include @@ -1,12 +1,17 @@ -ifeq ("$(origin O)", "command line") +ifneq ($(O),) +ifeq ($(origin O), command line) dummy := $(if $(shell test -d $(O) || echo $(O)),$(error O=$(O) does not exist),) ABSOLUTE_O := $(shell cd $(O) ; pwd) - OUTPUT := $(ABSOLUTE_O)/ + OUTPUT := $(ABSOLUTE_O)/$(if $(subdir),$(subdir)/) COMMAND_O := O=$(ABSOLUTE_O) +ifeq ($(objtree),) + objtree := $(O) +endif +endif endif -ifneq ($(OUTPUT),) # check that the output directory actually exists +ifneq ($(OUTPUT),) OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd) $(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist)) endif @@ -41,20 +46,39 @@ else NO_SUBDIR = : endif -QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir +# +# Define a callable command for descending to a new directory +# +# Call by doing: $(call descend,directory[,target]) +# +descend = \ + +mkdir -p $(OUTPUT)$(1) && \ + $(MAKE) $(COMMAND_O) subdir=$(if $(subdir),$(subdir)/$(1),$(1)) $(PRINT_DIR) -C $(1) $(2) + +QUIET_SUBDIR0 = +$(MAKE) $(COMMAND_O) -C # space to separate -C and subdir QUIET_SUBDIR1 = ifneq ($(findstring $(MAKEFLAGS),s),s) -ifndef V - QUIET_CC = @echo ' ' CC $@; - QUIET_AR = @echo ' ' AR $@; - QUIET_LINK = @echo ' ' LINK $@; - QUIET_MKDIR = @echo ' ' MKDIR $@; - QUIET_GEN = @echo ' ' GEN $@; + ifneq ($(V),1) + QUIET_CC = @echo ' CC '$@; + QUIET_CC_FPIC = @echo ' CC FPIC '$@; + QUIET_AR = @echo ' AR '$@; + QUIET_LINK = @echo ' LINK '$@; + QUIET_MKDIR = @echo ' MKDIR '$@; + QUIET_GEN = @echo ' GEN '$@; QUIET_SUBDIR0 = +@subdir= - QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \ + QUIET_SUBDIR1 = ;$(NO_SUBDIR) \ + echo ' SUBDIR '$$subdir; \ $(MAKE) $(PRINT_DIR) -C $$subdir - QUIET_FLEX = @echo ' ' FLEX $@; - QUIET_BISON = @echo ' ' BISON $@; -endif + QUIET_FLEX = @echo ' FLEX '$@; + QUIET_BISON = @echo ' BISON '$@; + + descend = \ + +@echo ' DESCEND '$(1); \ + mkdir -p $(OUTPUT)$(1) && \ + $(MAKE) $(COMMAND_O) subdir=$(if $(subdir),$(subdir)/$(1),$(1)) $(PRINT_DIR) -C $(1) $(2) + + QUIET_CLEAN = @printf ' CLEAN %s\n' $1; + QUIET_INSTALL = @printf ' INSTALL %s\n' $1; + endif endif diff --git a/tools/testing/ktest/examples/crosstests.conf b/tools/testing/ktest/examples/crosstests.conf index 46736604c26..a1203148dfa 100644 --- a/tools/testing/ktest/examples/crosstests.conf +++ b/tools/testing/ktest/examples/crosstests.conf @@ -133,12 +133,6 @@ CROSS = frv-linux ARCH = frv GCC_VER = 4.5.1 -# h8300 - failed make defconfig?? -TEST_START IF ${RUN} == h8300 || ${DO_FAILED} -CROSS = h8300-elf -ARCH = h8300 -GCC_VER = 4.5.1 - # m68k fails with error? TEST_START IF ${RUN} == m68k || ${DO_DEFAULT} CROSS = m68k-linux diff --git a/tools/testing/ktest/examples/include/patchcheck.conf b/tools/testing/ktest/examples/include/patchcheck.conf index 339d3e1700f..0eb0a5ac77d 100644 --- a/tools/testing/ktest/examples/include/patchcheck.conf +++ b/tools/testing/ktest/examples/include/patchcheck.conf @@ -14,6 +14,16 @@ PATCH_START := HEAD~3 PATCH_END := HEAD +# Use the oldconfig if build_type wasn't defined +DEFAULTS IF NOT DEFINED BUILD_TYPE +DO_BUILD_TYPE := oldconfig + +DEFAULTS ELSE +DO_BUILD_TYPE := ${BUILD_TYPE} + +DEFAULTS + + # Change PATCH_CHECKOUT to be the branch you want to test. The test will # do a git checkout of this branch before starting. Obviously both # PATCH_START and PATCH_END must be in this branch (and PATCH_START must @@ -43,6 +53,31 @@ PATCH_TEST_TYPE := boot # (space delimited) #IGNORE_WARNINGS = 39eaf7ef884dcc44f7ff1bac803ca2a1dcf43544 6edb2a8a385f0cdef51dae37ff23e74d76d8a6ce +# Instead of just checking for warnings to files that are changed +# it can be advantageous to check for any new warnings. If a +# header file is changed, it could cause a warning in a file not +# touched by the commit. To detect these kinds of warnings, you +# can use the WARNINGS_FILE option. +# +# If the variable CREATE_WARNINGS_FILE is set, this config will +# enable the WARNINGS_FILE during the patchcheck test. Also, +# before running the patchcheck test, it will create the +# warnings file. +# +DEFAULTS IF DEFINED CREATE_WARNINGS_FILE +WARNINGS_FILE = ${OUTPUT_DIR}/warnings_file + +TEST_START IF DEFINED CREATE_WARNINGS_FILE +# WARNINGS_FILE is already set by the DEFAULTS above +TEST_TYPE = make_warnings_file +# Checkout the commit before the patches to test, +# and record all the warnings that exist before the patches +# to test are added +CHECKOUT = ${PATCHCHECK_START}~1 +# Force a full build +BUILD_NOCLEAN = 0 +BUILD_TYPE = ${DO_BUILD_TYPE} + # If you are running a multi test, and the test failed on the first # test but on, say the 5th patch. If you want to restart on the # fifth patch, set PATCH_START1. This will make the first test start @@ -61,6 +96,7 @@ PATCHCHECK_TYPE = ${PATCH_TEST_TYPE} PATCHCHECK_START = ${PATCH_START1} PATCHCHECK_END = ${PATCH_END} CHECKOUT = ${PATCH_CHECKOUT} +BUILD_TYPE = ${DO_BUILD_TYPE} TEST_START IF ${TEST} == patchcheck && ${MULTI} TEST_TYPE = patchcheck @@ -72,3 +108,4 @@ PATCHCHECK_END = ${PATCH_END} CHECKOUT = ${PATCH_CHECKOUT} # Use multi to test different compilers? MAKE_CMD = CC=gcc-4.5.1 make +BUILD_TYPE = ${DO_BUILD_TYPE} diff --git a/tools/testing/ktest/examples/kvm.conf b/tools/testing/ktest/examples/kvm.conf index 831c7c5395f..fbc134f9ac6 100644 --- a/tools/testing/ktest/examples/kvm.conf +++ b/tools/testing/ktest/examples/kvm.conf @@ -10,6 +10,10 @@ MACHINE = Guest # Use virsh to read the serial console of the guest CONSOLE = virsh console ${MACHINE} +# Use SIGKILL to terminate virsh console. We can't kill virsh console +# by the default signal, SIGINT. +CLOSE_CONSOLE_SIGNAL = KILL + #*************************************# # This part is the same as test.conf # #*************************************# diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index c7ba7614061..40631569a0f 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -18,6 +18,7 @@ $| = 1; my %opt; my %repeat_tests; my %repeats; +my %evals; #default opts my %default = ( @@ -25,6 +26,7 @@ my %default = ( "TEST_TYPE" => "build", "BUILD_TYPE" => "randconfig", "MAKE_CMD" => "make", + "CLOSE_CONSOLE_SIGNAL" => "INT", "TIMEOUT" => 120, "TMP_DIR" => "/tmp/ktest/\${MACHINE}", "SLEEP_TIME" => 60, # sleep time between tests @@ -39,6 +41,7 @@ my %default = ( "CLEAR_LOG" => 0, "BISECT_MANUAL" => 0, "BISECT_SKIP" => 1, + "BISECT_TRIES" => 1, "MIN_CONFIG_TYPE" => "boot", "SUCCESS_LINE" => "login:", "DETECT_TRIPLE_FAULT" => 1, @@ -53,6 +56,9 @@ my %default = ( "STOP_AFTER_FAILURE" => 60, "STOP_TEST_AFTER" => 600, "MAX_MONITOR_WAIT" => 1800, + "GRUB_REBOOT" => "grub2-reboot", + "SYSLINUX" => "extlinux", + "SYSLINUX_PATH" => "/boot/extlinux", # required, and we will ask users if they don't have them but we keep the default # value something that is common. @@ -70,6 +76,7 @@ my $ktest_config; my $version; my $have_version = 0; my $machine; +my $last_machine; my $ssh_user; my $tmpdir; my $builddir; @@ -105,7 +112,13 @@ my $scp_to_target; my $scp_to_target_install; my $power_off; my $grub_menu; +my $last_grub_menu; +my $grub_file; my $grub_number; +my $grub_reboot; +my $syslinux; +my $syslinux_path; +my $syslinux_label; my $target; my $make; my $pre_install; @@ -118,6 +131,7 @@ my $start_minconfig_defined; my $output_minconfig; my $minconfig_type; my $use_output_minconfig; +my $warnings_file; my $ignore_config; my $ignore_errors; my $addconfig; @@ -126,6 +140,7 @@ my $bisect_bad_commit = ""; my $reverse_bisect; my $bisect_manual; my $bisect_skip; +my $bisect_tries; my $config_bisect_good; my $bisect_ret_good; my $bisect_ret_bad; @@ -152,6 +167,7 @@ my $timeout; my $booted_timeout; my $detect_triplefault; my $console; +my $close_console_signal; my $reboot_success_line; my $success_line; my $stop_after_success; @@ -185,6 +201,9 @@ my $patchcheck_end; # which would require more options. my $buildonly = 1; +# tell build not to worry about warnings, even when WARNINGS_FILE is set +my $warnings_ok = 0; + # set when creating a new config my $newconfig = 0; @@ -227,11 +246,17 @@ my %option_map = ( "START_MIN_CONFIG" => \$start_minconfig, "MIN_CONFIG_TYPE" => \$minconfig_type, "USE_OUTPUT_MIN_CONFIG" => \$use_output_minconfig, + "WARNINGS_FILE" => \$warnings_file, "IGNORE_CONFIG" => \$ignore_config, "TEST" => \$run_test, "ADD_CONFIG" => \$addconfig, "REBOOT_TYPE" => \$reboot_type, "GRUB_MENU" => \$grub_menu, + "GRUB_FILE" => \$grub_file, + "GRUB_REBOOT" => \$grub_reboot, + "SYSLINUX" => \$syslinux, + "SYSLINUX_PATH" => \$syslinux_path, + "SYSLINUX_LABEL" => \$syslinux_label, "PRE_INSTALL" => \$pre_install, "POST_INSTALL" => \$post_install, "NO_INSTALL" => \$no_install, @@ -253,6 +278,7 @@ my %option_map = ( "IGNORE_ERRORS" => \$ignore_errors, "BISECT_MANUAL" => \$bisect_manual, "BISECT_SKIP" => \$bisect_skip, + "BISECT_TRIES" => \$bisect_tries, "CONFIG_BISECT_GOOD" => \$config_bisect_good, "BISECT_RET_GOOD" => \$bisect_ret_good, "BISECT_RET_BAD" => \$bisect_ret_bad, @@ -265,6 +291,7 @@ my %option_map = ( "TIMEOUT" => \$timeout, "BOOTED_TIMEOUT" => \$booted_timeout, "CONSOLE" => \$console, + "CLOSE_CONSOLE_SIGNAL" => \$close_console_signal, "DETECT_TRIPLE_FAULT" => \$detect_triplefault, "SUCCESS_LINE" => \$success_line, "REBOOT_SUCCESS_LINE" => \$reboot_success_line, @@ -368,7 +395,7 @@ EOF ; $config_help{"REBOOT_TYPE"} = << "EOF" Way to reboot the box to the test kernel. - Only valid options so far are "grub" and "script". + Only valid options so far are "grub", "grub2", "syslinux", and "script". If you specify grub, it will assume grub version 1 and will search in /boot/grub/menu.lst for the title \$GRUB_MENU @@ -378,11 +405,19 @@ $config_help{"REBOOT_TYPE"} = << "EOF" The entry in /boot/grub/menu.lst must be entered in manually. The test will not modify that file. + + If you specify grub2, then you also need to specify both \$GRUB_MENU + and \$GRUB_FILE. + + If you specify syslinux, then you may use SYSLINUX to define the syslinux + command (defaults to extlinux), and SYSLINUX_PATH to specify the path to + the syslinux install (defaults to /boot/extlinux). But you have to specify + SYSLINUX_LABEL to define the label to boot to for the test kernel. EOF ; $config_help{"GRUB_MENU"} = << "EOF" The grub title name for the test kernel to boot - (Only mandatory if REBOOT_TYPE = grub) + (Only mandatory if REBOOT_TYPE = grub or grub2) Note, ktest.pl will not update the grub menu.lst, you need to manually add an option for the test. ktest.pl will search @@ -393,6 +428,22 @@ $config_help{"GRUB_MENU"} = << "EOF" title Test Kernel kernel vmlinuz-test GRUB_MENU = Test Kernel + + For grub2, a search of \$GRUB_FILE is performed for the lines + that begin with "menuentry". It will not detect submenus. The + menu must be a non-nested menu. Add the quotes used in the menu + to guarantee your selection, as the first menuentry with the content + of \$GRUB_MENU that is found will be used. +EOF + ; +$config_help{"GRUB_FILE"} = << "EOF" + If grub2 is used, the full path for the grub.cfg file is placed + here. Use something like /boot/grub2/grub.cfg to search. +EOF + ; +$config_help{"SYSLINUX_LABEL"} = << "EOF" + If syslinux is used, the label that boots the target kernel must + be specified with SYSLINUX_LABEL. EOF ; $config_help{"REBOOT_SCRIPT"} = << "EOF" @@ -401,6 +452,27 @@ $config_help{"REBOOT_SCRIPT"} = << "EOF" EOF ; +sub _logit { + if (defined($opt{"LOG_FILE"})) { + open(OUT, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}"; + print OUT @_; + close(OUT); + } +} + +sub logit { + if (defined($opt{"LOG_FILE"})) { + _logit @_; + } else { + print @_; + } +} + +sub doprint { + print @_; + _logit @_; +} + sub read_prompt { my ($cancel, $prompt) = @_; @@ -521,6 +593,15 @@ sub get_ktest_configs { if ($rtype eq "grub") { get_ktest_config("GRUB_MENU"); } + + if ($rtype eq "grub2") { + get_ktest_config("GRUB_MENU"); + get_ktest_config("GRUB_FILE"); + } + + if ($rtype eq "syslinux") { + get_ktest_config("SYSLINUX_LABEL"); + } } sub process_variables { @@ -573,6 +654,18 @@ sub set_value { # Note if a test is something other than build, then we # will need other manditory options. if ($prvalue ne "install") { + # for bisect, we need to check BISECT_TYPE + if ($prvalue ne "bisect") { + $buildonly = 0; + } + } else { + # install still limits some manditory options. + $buildonly = 2; + } + } + + if ($buildonly && $lvalue =~ /^BISECT_TYPE(\[.*\])?$/ && $prvalue ne "build") { + if ($prvalue ne "install") { $buildonly = 0; } else { # install still limits some manditory options. @@ -597,6 +690,22 @@ sub set_value { } } +sub set_eval { + my ($lvalue, $rvalue, $name) = @_; + + my $prvalue = process_variables($rvalue); + my $arr; + + if (defined($evals{$lvalue})) { + $arr = $evals{$lvalue}; + } else { + $arr = []; + $evals{$lvalue} = $arr; + } + + push @{$arr}, $rvalue; +} + sub set_variable { my ($lvalue, $rvalue) = @_; @@ -882,6 +991,20 @@ sub __read_config { $test_case = 1; } + } elsif (/^\s*([A-Z_\[\]\d]+)\s*=~\s*(.*?)\s*$/) { + + next if ($skip); + + my $lvalue = $1; + my $rvalue = $2; + + if ($default || $lvalue =~ /\[\d+\]$/) { + set_eval($lvalue, $rvalue, $name); + } else { + my $val = "$lvalue\[$test_num\]"; + set_eval($val, $rvalue, $name); + } + } elsif (/^\s*([A-Z_\[\]\d]+)\s*=\s*(.*?)\s*$/) { next if ($skip); @@ -1016,7 +1139,7 @@ sub read_config { } sub __eval_option { - my ($option, $i) = @_; + my ($name, $option, $i) = @_; # Add space to evaluate the character before $ $option = " $option"; @@ -1048,7 +1171,11 @@ sub __eval_option { my $o = "$var\[$i\]"; my $parento = "$var\[$parent\]"; - if (defined($opt{$o})) { + # If a variable contains itself, use the default var + if (($var eq $name) && defined($opt{$var})) { + $o = $opt{$var}; + $retval = "$retval$o"; + } elsif (defined($opt{$o})) { $o = $opt{$o}; $retval = "$retval$o"; } elsif ($repeated && defined($opt{$parento})) { @@ -1057,6 +1184,10 @@ sub __eval_option { } elsif (defined($opt{$var})) { $o = $opt{$var}; $retval = "$retval$o"; + } elsif ($var eq "KERNEL_VERSION" && defined($make)) { + # special option KERNEL_VERSION uses kernel version + get_version(); + $retval = "$retval$version"; } else { $retval = "$retval\$\{$var\}"; } @@ -1071,8 +1202,35 @@ sub __eval_option { return $retval; } +sub process_evals { + my ($name, $option, $i) = @_; + + my $option_name = "$name\[$i\]"; + my $ev; + + my $old_option = $option; + + if (defined($evals{$option_name})) { + $ev = $evals{$option_name}; + } elsif (defined($evals{$name})) { + $ev = $evals{$name}; + } else { + return $option; + } + + for my $e (@{$ev}) { + eval "\$option =~ $e"; + } + + if ($option ne $old_option) { + doprint("$name changed from '$old_option' to '$option'\n"); + } + + return $option; +} + sub eval_option { - my ($option, $i) = @_; + my ($name, $option, $i) = @_; my $prev = ""; @@ -1088,31 +1246,12 @@ sub eval_option { "Check for recursive variables\n"; } $prev = $option; - $option = __eval_option($option, $i); + $option = __eval_option($name, $option, $i); } - return $option; -} - -sub _logit { - if (defined($opt{"LOG_FILE"})) { - open(OUT, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}"; - print OUT @_; - close(OUT); - } -} + $option = process_evals($name, $option, $i); -sub logit { - if (defined($opt{"LOG_FILE"})) { - _logit @_; - } else { - print @_; - } -} - -sub doprint { - print @_; - _logit @_; + return $option; } sub run_command; @@ -1123,6 +1262,9 @@ sub wait_for_monitor; sub reboot { my ($time) = @_; + # Make sure everything has been written to disk + run_ssh("sync"); + if (defined($time)) { start_monitor; # flush out current monitor @@ -1142,11 +1284,24 @@ sub reboot { } if (defined($time)) { - if (wait_for_monitor($time, $reboot_success_line)) { + + # We only want to get to the new kernel, don't fail + # if we stumble over a call trace. + my $save_ignore_errors = $ignore_errors; + $ignore_errors = 1; + + # Look for the good kernel to boot + if (wait_for_monitor($time, "Linux version")) { # reboot got stuck? doprint "Reboot did not finish. Forcing power cycle\n"; run_command "$power_cycle"; } + + $ignore_errors = $save_ignore_errors; + + # Still need to wait for the reboot to finish + wait_for_monitor($time, $reboot_success_line); + end_monitor; } } @@ -1211,7 +1366,7 @@ sub close_console { my ($fp, $pid) = @_; doprint "kill child process $pid\n"; - kill 2, $pid; + kill $close_console_signal, $pid; print "closing!\n"; close($fp); @@ -1230,6 +1385,7 @@ sub start_monitor { } sub end_monitor { + return if (!defined $console); if (--$monitor_cnt) { return; } @@ -1452,12 +1608,54 @@ sub run_scp_mod { return run_scp($src, $dst, $cp_scp); } +sub get_grub2_index { + + return if (defined($grub_number) && defined($last_grub_menu) && + $last_grub_menu eq $grub_menu && defined($last_machine) && + $last_machine eq $machine); + + doprint "Find grub2 menu ... "; + $grub_number = -1; + + my $ssh_grub = $ssh_exec; + $ssh_grub =~ s,\$SSH_COMMAND,cat $grub_file,g; + + open(IN, "$ssh_grub |") + or die "unable to get $grub_file"; + + my $found = 0; + + while (<IN>) { + if (/^menuentry.*$grub_menu/) { + $grub_number++; + $found = 1; + last; + } elsif (/^menuentry\s/) { + $grub_number++; + } + } + close(IN); + + die "Could not find '$grub_menu' in $grub_file on $machine" + if (!$found); + doprint "$grub_number\n"; + $last_grub_menu = $grub_menu; + $last_machine = $machine; +} + sub get_grub_index { + if ($reboot_type eq "grub2") { + get_grub2_index; + return; + } + if ($reboot_type ne "grub") { return; } - return if (defined($grub_number)); + return if (defined($grub_number) && defined($last_grub_menu) && + $last_grub_menu eq $grub_menu && defined($last_machine) && + $last_machine eq $machine); doprint "Find grub menu ... "; $grub_number = -1; @@ -1484,6 +1682,8 @@ sub get_grub_index { die "Could not find '$grub_menu' in /boot/grub/menu on $machine" if (!$found); doprint "$grub_number\n"; + $last_grub_menu = $grub_menu; + $last_machine = $machine; } sub wait_for_input @@ -1500,7 +1700,7 @@ sub wait_for_input $rin = ''; vec($rin, fileno($fp), 1) = 1; - $ready = select($rin, undef, undef, $time); + ($ready, $time) = select($rin, undef, undef, $time); $line = ""; @@ -1524,6 +1724,10 @@ sub reboot_to { if ($reboot_type eq "grub") { run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'"; + } elsif ($reboot_type eq "grub2") { + run_ssh "$grub_reboot $grub_number"; + } elsif ($reboot_type eq "syslinux") { + run_ssh "$syslinux --once \\\"$syslinux_label\\\" $syslinux_path"; } elsif (defined $reboot_script) { run_command "$reboot_script"; } @@ -1662,7 +1866,7 @@ sub monitor { # We already booted into the kernel we are testing, # but now we booted into another kernel? # Consider this a triple fault. - doprint "Aleady booted in Linux kernel $version, but now\n"; + doprint "Already booted in Linux kernel $version, but now\n"; doprint "we booted into Linux kernel $1.\n"; doprint "Assuming that this is a triple fault.\n"; doprint "To disable this: set DETECT_TRIPLE_FAULT to 0\n"; @@ -1718,6 +1922,14 @@ sub do_post_install { dodie "Failed to run post install"; } +# Sometimes the reboot fails, and will hang. We try to ssh to the box +# and if we fail, we force another reboot, that should powercycle it. +sub test_booted { + if (!run_ssh "echo testing connection") { + reboot $sleep_time; + } +} + sub install { return if ($no_install); @@ -1730,6 +1942,8 @@ sub install { my $cp_target = eval_kernel_version $target_image; + test_booted; + run_scp_install "$outputdir/$build_target", "$cp_target" or dodie "failed to copy image"; @@ -1792,23 +2006,102 @@ sub get_version { sub start_monitor_and_boot { # Make sure the stable kernel has finished booting - start_monitor; - wait_for_monitor 5; - end_monitor; + + # Install bisects, don't need console + if (defined $console) { + start_monitor; + wait_for_monitor 5; + end_monitor; + } get_grub_index; get_version; install; - start_monitor; + start_monitor if (defined $console); return monitor; } +my $check_build_re = ".*:.*(warning|error|Error):.*"; +my $utf8_quote = "\\x{e2}\\x{80}(\\x{98}|\\x{99})"; + +sub process_warning_line { + my ($line) = @_; + + chomp $line; + + # for distcc heterogeneous systems, some compilers + # do things differently causing warning lines + # to be slightly different. This makes an attempt + # to fixe those issues. + + # chop off the index into the line + # using distcc, some compilers give different indexes + # depending on white space + $line =~ s/^(\s*\S+:\d+:)\d+/$1/; + + # Some compilers use UTF-8 extended for quotes and some don't. + $line =~ s/$utf8_quote/'/g; + + return $line; +} + +# Read buildlog and check against warnings file for any +# new warnings. +# +# Returns 1 if OK +# 0 otherwise sub check_buildlog { + return 1 if (!defined $warnings_file); + + my %warnings_list; + + # Failed builds should not reboot the target + my $save_no_reboot = $no_reboot; + $no_reboot = 1; + + if (-f $warnings_file) { + open(IN, $warnings_file) or + dodie "Error opening $warnings_file"; + + while (<IN>) { + if (/$check_build_re/) { + my $warning = process_warning_line $_; + + $warnings_list{$warning} = 1; + } + } + close(IN); + } + + # If warnings file didn't exist, and WARNINGS_FILE exist, + # then we fail on any warning! + + open(IN, $buildlog) or dodie "Can't open $buildlog"; + while (<IN>) { + if (/$check_build_re/) { + my $warning = process_warning_line $_; + + if (!defined $warnings_list{$warning}) { + fail "New warning found (not in $warnings_file)\n$_\n"; + $no_reboot = $save_no_reboot; + return 0; + } + } + } + $no_reboot = $save_no_reboot; + close(IN); +} + +sub check_patch_buildlog { my ($patch) = @_; my @files = `git show $patch | diffstat -l`; + foreach my $file (@files) { + chomp $file; + } + open(IN, "git show $patch |") or dodie "failed to show $patch"; while (<IN>) { @@ -1877,10 +2170,14 @@ sub make_oldconfig { if (!run_command "$make olddefconfig") { # Perhaps olddefconfig doesn't exist in this version of the kernel - # try a yes '' | oldconfig - doprint "olddefconfig failed, trying yes '' | make oldconfig\n"; - run_command "yes '' | $make oldconfig" or - dodie "failed make config oldconfig"; + # try oldnoconfig + doprint "olddefconfig failed, trying make oldnoconfig\n"; + if (!run_command "$make oldnoconfig") { + doprint "oldnoconfig failed, trying yes '' | make oldconfig\n"; + # try a yes '' | oldconfig + run_command "yes '' | $make oldconfig" or + dodie "failed make config oldconfig"; + } } } @@ -2290,12 +2587,29 @@ sub run_bisect { $buildtype = "useconfig:$minconfig"; } - my $ret = run_bisect_test $type, $buildtype; + # If the user sets bisect_tries to less than 1, then no tries + # is a success. + my $ret = 1; - if ($bisect_manual) { + # Still let the user manually decide that though. + if ($bisect_tries < 1 && $bisect_manual) { $ret = answer_bisect; } + for (my $i = 0; $i < $bisect_tries; $i++) { + if ($bisect_tries > 1) { + my $t = $i + 1; + doprint("Running bisect trial $t of $bisect_tries:\n"); + } + $ret = run_bisect_test $type, $buildtype; + + if ($bisect_manual) { + $ret = answer_bisect; + } + + last if (!$ret); + } + # Are we looking for where it worked, not failed? if ($reverse_bisect && $ret >= 0) { $ret = !$ret; @@ -2952,11 +3266,13 @@ sub patchcheck { build "oldconfig" or return 0; } - - if (!defined($ignored_warnings{$sha1})) { - check_buildlog $sha1 or return 0; + # No need to do per patch checking if warnings file exists + if (!defined($warnings_file) && !defined($ignored_warnings{$sha1})) { + check_patch_buildlog $sha1 or return 0; } + check_buildlog or return 0; + next if ($type eq "build"); my $failed = 0; @@ -3514,6 +3830,39 @@ sub make_min_config { return 1; } +sub make_warnings_file { + my ($i) = @_; + + if (!defined($warnings_file)) { + dodie "Must define WARNINGS_FILE for make_warnings_file test"; + } + + if ($build_type eq "nobuild") { + dodie "BUILD_TYPE can not be 'nobuild' for make_warnings_file test"; + } + + build $build_type or dodie "Failed to build"; + + open(OUT, ">$warnings_file") or dodie "Can't create $warnings_file"; + + open(IN, $buildlog) or dodie "Can't open $buildlog"; + while (<IN>) { + + # Some compilers use UTF-8 extended for quotes + # for distcc heterogeneous systems, this causes issues + s/$utf8_quote/'/g; + + if (/$check_build_re/) { + print OUT; + } + } + close(IN); + + close(OUT); + + success $i; +} + $#ARGV < 1 or die "ktest.pl version: $VERSION\n usage: ktest.pl config-file\n"; if ($#ARGV == 0) { @@ -3559,7 +3908,7 @@ EOF read_config $ktest_config; if (defined($opt{"LOG_FILE"})) { - $opt{"LOG_FILE"} = eval_option($opt{"LOG_FILE"}, -1); + $opt{"LOG_FILE"} = eval_option("LOG_FILE", $opt{"LOG_FILE"}, -1); } # Append any configs entered in manually to the config file. @@ -3636,7 +3985,7 @@ sub set_test_option { my $option = __set_test_option($name, $i); return $option if (!defined($option)); - return eval_option($option, $i); + return eval_option($name, $option, $i); } # First we need to do is the builds @@ -3654,6 +4003,18 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { my $makecmd = set_test_option("MAKE_CMD", $i); + $outputdir = set_test_option("OUTPUT_DIR", $i); + $builddir = set_test_option("BUILD_DIR", $i); + + chdir $builddir || die "can't change directory to $builddir"; + + if (!-d $outputdir) { + mkpath($outputdir) or + die "can't create $outputdir"; + } + + $make = "$makecmd O=$outputdir"; + # Load all the options into their mapped variable names foreach my $opt (keys %option_map) { ${$option_map{$opt}} = set_test_option($opt, $i); @@ -3678,13 +4039,9 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { $start_minconfig = $minconfig; } - chdir $builddir || die "can't change directory to $builddir"; - - foreach my $dir ($tmpdir, $outputdir) { - if (!-d $dir) { - mkpath($dir) or - die "can't create $dir"; - } + if (!-d $tmpdir) { + mkpath($tmpdir) or + die "can't create $tmpdir"; } $ENV{"SSH_USER"} = $ssh_user; @@ -3693,13 +4050,17 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { $buildlog = "$tmpdir/buildlog-$machine"; $testlog = "$tmpdir/testlog-$machine"; $dmesg = "$tmpdir/dmesg-$machine"; - $make = "$makecmd O=$outputdir"; $output_config = "$outputdir/.config"; if (!$buildonly) { $target = "$ssh_user\@$machine"; if ($reboot_type eq "grub") { dodie "GRUB_MENU not defined" if (!defined($grub_menu)); + } elsif ($reboot_type eq "grub2") { + dodie "GRUB_MENU not defined" if (!defined($grub_menu)); + dodie "GRUB_FILE not defined" if (!defined($grub_file)); + } elsif ($reboot_type eq "syslinux") { + dodie "SYSLINUX_LABEL not defined" if (!defined($syslinux_label)); } } @@ -3710,9 +4071,9 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { $run_type = $bisect_type; } elsif ($test_type eq "config_bisect") { $run_type = $config_bisect_type; - } - - if ($test_type eq "make_min_config") { + } elsif ($test_type eq "make_min_config") { + $run_type = ""; + } elsif ($test_type eq "make_warnings_file") { $run_type = ""; } @@ -3769,10 +4130,15 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { } elsif ($test_type eq "make_min_config") { make_min_config $i; next; + } elsif ($test_type eq "make_warnings_file") { + $no_reboot = 1; + make_warnings_file $i; + next; } if ($build_type ne "nobuild") { build $build_type or next; + check_buildlog or next; } if ($test_type eq "install") { diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf index de28a0a3b8f..172eec4517f 100644 --- a/tools/testing/ktest/sample.conf +++ b/tools/testing/ktest/sample.conf @@ -328,12 +328,29 @@ # For a virtual machine with guest name "Guest". #CONSOLE = virsh console Guest +# Signal to send to kill console. +# ktest.pl will create a child process to monitor the console. +# When the console is finished, ktest will kill the child process +# with this signal. +# (default INT) +#CLOSE_CONSOLE_SIGNAL = HUP + # Required version ending to differentiate the test # from other linux builds on the system. #LOCALVERSION = -test +# For REBOOT_TYPE = grub2, you must specify where the grub.cfg +# file is. This is the file that is searched to find the menu +# option to boot to with GRUB_REBOOT +#GRUB_FILE = /boot/grub2/grub.cfg + +# The tool for REBOOT_TYPE = grub2 to set the next reboot kernel +# to boot into (one shot mode). +# (default grub2_reboot) +#GRUB_REBOOT = grub2_reboot + # The grub title name for the test kernel to boot -# (Only mandatory if REBOOT_TYPE = grub) +# (Only mandatory if REBOOT_TYPE = grub or grub2) # # Note, ktest.pl will not update the grub menu.lst, you need to # manually add an option for the test. ktest.pl will search @@ -343,8 +360,33 @@ # For example, if in the /boot/grub/menu.lst the test kernel title has: # title Test Kernel # kernel vmlinuz-test +# +# For grub2, a search of top level "menuentry"s are done. No +# submenu is searched. The menu is found by searching for the +# contents of GRUB_MENU in the line that starts with "menuentry". +# You may want to include the quotes around the option. For example: +# for: menuentry 'Test Kernel' +# do a: GRUB_MENU = 'Test Kernel' +# For customizing, add your entry in /etc/grub.d/40_custom. +# #GRUB_MENU = Test Kernel +# For REBOOT_TYPE = syslinux, the name of the syslinux executable +# (on the target) to use to set up the next reboot to boot the +# test kernel. +# (default extlinux) +#SYSLINUX = syslinux + +# For REBOOT_TYPE = syslinux, the path that is passed to to the +# syslinux command where syslinux is installed. +# (default /boot/extlinux) +#SYSLINUX_PATH = /boot/syslinux + +# For REBOOT_TYPE = syslinux, the syslinux label that references the +# test kernel in the syslinux config file. +# (default undefined) +#SYSLINUX_LABEL = "test-kernel" + # A script to reboot the target into the test kernel # This and SWITCH_TO_TEST are about the same, except # SWITCH_TO_TEST is run even for REBOOT_TYPE = grub. @@ -497,7 +539,7 @@ #POST_BUILD_DIE = 1 # Way to reboot the box to the test kernel. -# Only valid options so far are "grub" and "script" +# Only valid options so far are "grub", "grub2", "syslinux" and "script" # (default grub) # If you specify grub, it will assume grub version 1 # and will search in /boot/grub/menu.lst for the title $GRUB_MENU @@ -505,6 +547,13 @@ # your setup, then specify "script" and have a command or script # specified in REBOOT_SCRIPT to boot to the target. # +# For REBOOT_TYPE = grub2, you must define both GRUB_MENU and +# GRUB_FILE. +# +# For REBOOT_TYPE = syslinux, you must define SYSLINUX_LABEL, and +# perhaps modify SYSLINUX (default extlinux) and SYSLINUX_PATH +# (default /boot/extlinux) +# # The entry in /boot/grub/menu.lst must be entered in manually. # The test will not modify that file. #REBOOT_TYPE = grub @@ -751,6 +800,20 @@ # Example for a virtual guest call "Guest". #POWER_OFF = virsh destroy Guest +# To have the build fail on "new" warnings, create a file that +# contains a list of all known warnings (they must match exactly +# to the line with 'warning:', 'error:' or 'Error:'. If the option +# WARNINGS_FILE is set, then that file will be read, and if the +# build detects a warning, it will examine this file and if the +# warning does not exist in it, it will fail the build. +# +# Note, if this option is defined to a file that does not exist +# then any warning will fail the build. +# (see make_warnings_file below) +# +# (optional, default undefined) +#WARNINGS_FILE = ${OUTPUT_DIR}/warnings_file + # The way to execute a command on the target # (default ssh $SSH_USER@$MACHINE $SSH_COMMAND";) # The variables SSH_USER, MACHINE and SSH_COMMAND are defined @@ -965,6 +1028,20 @@ # BISECT_BAD with BISECT_CHECK = good or # BISECT_CHECK = bad, respectively. # +# BISECT_TRIES = 5 (optional, default 1) +# +# For those cases that it takes several tries to hit a bug, +# the BISECT_TRIES is useful. It is the number of times the +# test is ran before it says the kernel is good. The first failure +# will stop trying and mark the current SHA1 as bad. +# +# Note, as with all race bugs, there's no guarantee that if +# it succeeds, it is really a good bisect. But it helps in case +# the bug is some what reliable. +# +# You can set BISECT_TRIES to zero, and all tests will be considered +# good, unless you also set BISECT_MANUAL. +# # BISECT_RET_GOOD = 0 (optional, default undefined) # # In case the specificed test returns something other than just @@ -1180,3 +1257,33 @@ # MIN_CONFIG_TYPE = test # TEST = ssh ${USER}@${MACHINE} echo hi # +# +# +# +# For TEST_TYPE = make_warnings_file +# +# If you want the build to fail when a new warning is discovered +# you set the WARNINGS_FILE to point to a file of known warnings. +# +# The test "make_warnings_file" will let you create a new warnings +# file before you run other tests, like patchcheck. +# +# What this test does is to run just a build, you still need to +# specify BUILD_TYPE to tell the test what type of config to use. +# A BUILD_TYPE of nobuild will fail this test. +# +# The test will do the build and scan for all warnings. Any warning +# it discovers will be saved in the WARNINGS_FILE (required) option. +# +# It is recommended (but not necessary) to make sure BUILD_NOCLEAN is +# off, so that a full build is done (make mrproper is performed). +# That way, all warnings will be captured. +# +# Example: +# +# TEST_TYPE = make_warnings_file +# WARNINGS_FILE = ${OUTPUT_DIR} +# BUILD_TYPE = useconfig:oldconfig +# CHECKOUT = v3.8 +# BUILD_NOCLEAN = 0 +# diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 43480149119..e66e710cc59 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -1,4 +1,16 @@ -TARGETS = breakpoints kcmp mqueue vm cpu-hotplug memory-hotplug epoll +TARGETS = breakpoints +TARGETS += cpu-hotplug +TARGETS += efivarfs +TARGETS += kcmp +TARGETS += memory-hotplug +TARGETS += mqueue +TARGETS += net +TARGETS += ptrace +TARGETS += timers +TARGETS += vm +TARGETS += powerpc +TARGETS += user +TARGETS += sysctl all: for TARGET in $(TARGETS); do \ diff --git a/tools/testing/selftests/README.txt b/tools/testing/selftests/README.txt new file mode 100644 index 00000000000..5e2faf9c55d --- /dev/null +++ b/tools/testing/selftests/README.txt @@ -0,0 +1,42 @@ +Linux Kernel Selftests + +The kernel contains a set of "self tests" under the tools/testing/selftests/ +directory. These are intended to be small unit tests to exercise individual +code paths in the kernel. + +Running the selftests +===================== + +To build the tests: + + $ make -C tools/testing/selftests + + +To run the tests: + + $ make -C tools/testing/selftests run_tests + +- note that some tests will require root privileges. + + +To run only tests targetted for a single subsystem: + + $ make -C tools/testing/selftests TARGETS=cpu-hotplug run_tests + +See the top-level tools/testing/selftests/Makefile for the list of all possible +targets. + + +Contributing new tests +====================== + +In general, the rules for for selftests are + + * Do as much as you can if you're not root; + + * Don't take too long; + + * Don't break the build on any architecture, and + + * Don't cause the top-level "make run_tests" to fail if your feature is + unconfigured. diff --git a/tools/testing/selftests/breakpoints/Makefile b/tools/testing/selftests/breakpoints/Makefile index 931278035f5..e18b42b254a 100644 --- a/tools/testing/selftests/breakpoints/Makefile +++ b/tools/testing/selftests/breakpoints/Makefile @@ -17,7 +17,7 @@ else endif run_tests: - ./breakpoint_test + @./breakpoint_test || echo "breakpoints selftests: [FAIL]" clean: rm -fr breakpoint_test diff --git a/tools/testing/selftests/cpu-hotplug/Makefile b/tools/testing/selftests/cpu-hotplug/Makefile index 7c9c20ff578..790c23a9db4 100644 --- a/tools/testing/selftests/cpu-hotplug/Makefile +++ b/tools/testing/selftests/cpu-hotplug/Makefile @@ -1,6 +1,6 @@ all: run_tests: - ./on-off-test.sh + @/bin/bash ./on-off-test.sh || echo "cpu-hotplug selftests: [FAIL]" clean: diff --git a/tools/testing/selftests/efivarfs/Makefile b/tools/testing/selftests/efivarfs/Makefile new file mode 100644 index 00000000000..29e8c6bc81b --- /dev/null +++ b/tools/testing/selftests/efivarfs/Makefile @@ -0,0 +1,12 @@ +CC = $(CROSS_COMPILE)gcc +CFLAGS = -Wall + +test_objs = open-unlink create-read + +all: $(test_objs) + +run_tests: all + @/bin/bash ./efivarfs.sh || echo "efivarfs selftests: [FAIL]" + +clean: + rm -f $(test_objs) diff --git a/tools/testing/selftests/efivarfs/create-read.c b/tools/testing/selftests/efivarfs/create-read.c new file mode 100644 index 00000000000..7feef188096 --- /dev/null +++ b/tools/testing/selftests/efivarfs/create-read.c @@ -0,0 +1,38 @@ +#include <stdio.h> +#include <stdint.h> +#include <stdlib.h> +#include <unistd.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <errno.h> +#include <string.h> + +int main(int argc, char **argv) +{ + const char *path; + char buf[4]; + int fd, rc; + + if (argc < 2) { + fprintf(stderr, "usage: %s <path>\n", argv[0]); + return EXIT_FAILURE; + } + + path = argv[1]; + + /* create a test variable */ + fd = open(path, O_RDWR | O_CREAT, 0600); + if (fd < 0) { + perror("open(O_WRONLY)"); + return EXIT_FAILURE; + } + + rc = read(fd, buf, sizeof(buf)); + if (rc != 0) { + fprintf(stderr, "Reading a new var should return EOF\n"); + return EXIT_FAILURE; + } + + return EXIT_SUCCESS; +} diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh new file mode 100644 index 00000000000..77edcdcc016 --- /dev/null +++ b/tools/testing/selftests/efivarfs/efivarfs.sh @@ -0,0 +1,198 @@ +#!/bin/bash + +efivarfs_mount=/sys/firmware/efi/efivars +test_guid=210be57c-9849-4fc7-a635-e6382d1aec27 + +check_prereqs() +{ + local msg="skip all tests:" + + if [ $UID != 0 ]; then + echo $msg must be run as root >&2 + exit 0 + fi + + if ! grep -q "^\S\+ $efivarfs_mount efivarfs" /proc/mounts; then + echo $msg efivarfs is not mounted on $efivarfs_mount >&2 + exit 0 + fi +} + +run_test() +{ + local test="$1" + + echo "--------------------" + echo "running $test" + echo "--------------------" + + if [ "$(type -t $test)" = 'function' ]; then + ( $test ) + else + ( ./$test ) + fi + + if [ $? -ne 0 ]; then + echo " [FAIL]" + rc=1 + else + echo " [PASS]" + fi +} + +test_create() +{ + local attrs='\x07\x00\x00\x00' + local file=$efivarfs_mount/$FUNCNAME-$test_guid + + printf "$attrs\x00" > $file + + if [ ! -e $file ]; then + echo "$file couldn't be created" >&2 + exit 1 + fi + + if [ $(stat -c %s $file) -ne 5 ]; then + echo "$file has invalid size" >&2 + exit 1 + fi +} + +test_create_empty() +{ + local file=$efivarfs_mount/$FUNCNAME-$test_guid + + : > $file + + if [ ! -e $file ]; then + echo "$file can not be created without writing" >&2 + exit 1 + fi +} + +test_create_read() +{ + local file=$efivarfs_mount/$FUNCNAME-$test_guid + ./create-read $file +} + +test_delete() +{ + local attrs='\x07\x00\x00\x00' + local file=$efivarfs_mount/$FUNCNAME-$test_guid + + printf "$attrs\x00" > $file + + if [ ! -e $file ]; then + echo "$file couldn't be created" >&2 + exit 1 + fi + + rm $file + + if [ -e $file ]; then + echo "$file couldn't be deleted" >&2 + exit 1 + fi + +} + +# test that we can remove a variable by issuing a write with only +# attributes specified +test_zero_size_delete() +{ + local attrs='\x07\x00\x00\x00' + local file=$efivarfs_mount/$FUNCNAME-$test_guid + + printf "$attrs\x00" > $file + + if [ ! -e $file ]; then + echo "$file does not exist" >&2 + exit 1 + fi + + printf "$attrs" > $file + + if [ -e $file ]; then + echo "$file should have been deleted" >&2 + exit 1 + fi +} + +test_open_unlink() +{ + local file=$efivarfs_mount/$FUNCNAME-$test_guid + ./open-unlink $file +} + +# test that we can create a range of filenames +test_valid_filenames() +{ + local attrs='\x07\x00\x00\x00' + local ret=0 + + local file_list="abc dump-type0-11-1-1362436005 1234 -" + for f in $file_list; do + local file=$efivarfs_mount/$f-$test_guid + + printf "$attrs\x00" > $file + + if [ ! -e $file ]; then + echo "$file could not be created" >&2 + ret=1 + else + rm $file + fi + done + + exit $ret +} + +test_invalid_filenames() +{ + local attrs='\x07\x00\x00\x00' + local ret=0 + + local file_list=" + -1234-1234-1234-123456789abc + foo + foo-bar + -foo- + foo-barbazba-foob-foob-foob-foobarbazfoo + foo------------------------------------- + -12345678-1234-1234-1234-123456789abc + a-12345678=1234-1234-1234-123456789abc + a-12345678-1234=1234-1234-123456789abc + a-12345678-1234-1234=1234-123456789abc + a-12345678-1234-1234-1234=123456789abc + 1112345678-1234-1234-1234-123456789abc" + + for f in $file_list; do + local file=$efivarfs_mount/$f + + printf "$attrs\x00" 2>/dev/null > $file + + if [ -e $file ]; then + echo "Creating $file should have failed" >&2 + rm $file + ret=1 + fi + done + + exit $ret +} + +check_prereqs + +rc=0 + +run_test test_create +run_test test_create_empty +run_test test_create_read +run_test test_delete +run_test test_zero_size_delete +run_test test_open_unlink +run_test test_valid_filenames +run_test test_invalid_filenames + +exit $rc diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c new file mode 100644 index 00000000000..8c0764407b3 --- /dev/null +++ b/tools/testing/selftests/efivarfs/open-unlink.c @@ -0,0 +1,63 @@ +#include <stdio.h> +#include <stdint.h> +#include <stdlib.h> +#include <unistd.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> + +int main(int argc, char **argv) +{ + const char *path; + char buf[5]; + int fd, rc; + + if (argc < 2) { + fprintf(stderr, "usage: %s <path>\n", argv[0]); + return EXIT_FAILURE; + } + + path = argv[1]; + + /* attributes: EFI_VARIABLE_NON_VOLATILE | + * EFI_VARIABLE_BOOTSERVICE_ACCESS | + * EFI_VARIABLE_RUNTIME_ACCESS + */ + *(uint32_t *)buf = 0x7; + buf[4] = 0; + + /* create a test variable */ + fd = open(path, O_WRONLY | O_CREAT); + if (fd < 0) { + perror("open(O_WRONLY)"); + return EXIT_FAILURE; + } + + rc = write(fd, buf, sizeof(buf)); + if (rc != sizeof(buf)) { + perror("write"); + return EXIT_FAILURE; + } + + close(fd); + + fd = open(path, O_RDONLY); + if (fd < 0) { + perror("open"); + return EXIT_FAILURE; + } + + if (unlink(path) < 0) { + perror("unlink"); + return EXIT_FAILURE; + } + + rc = read(fd, buf, sizeof(buf)); + if (rc > 0) { + fprintf(stderr, "reading from an unlinked variable " + "shouldn't be possible\n"); + return EXIT_FAILURE; + } + + return EXIT_SUCCESS; +} diff --git a/tools/testing/selftests/epoll/Makefile b/tools/testing/selftests/epoll/Makefile deleted file mode 100644 index 19806ed62f5..00000000000 --- a/tools/testing/selftests/epoll/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -# Makefile for epoll selftests - -all: test_epoll -%: %.c - gcc -pthread -g -o $@ $^ - -run_tests: all - ./test_epoll - -clean: - $(RM) test_epoll diff --git a/tools/testing/selftests/epoll/test_epoll.c b/tools/testing/selftests/epoll/test_epoll.c deleted file mode 100644 index f7525392ce8..00000000000 --- a/tools/testing/selftests/epoll/test_epoll.c +++ /dev/null @@ -1,344 +0,0 @@ -/* - * tools/testing/selftests/epoll/test_epoll.c - * - * Copyright 2012 Adobe Systems Incorporated - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * Paton J. Lewis <palewis@adobe.com> - * - */ - -#include <errno.h> -#include <fcntl.h> -#include <pthread.h> -#include <stdio.h> -#include <stdlib.h> -#include <unistd.h> -#include <sys/epoll.h> -#include <sys/socket.h> - -/* - * A pointer to an epoll_item_private structure will be stored in the epoll - * item's event structure so that we can get access to the epoll_item_private - * data after calling epoll_wait: - */ -struct epoll_item_private { - int index; /* Position of this struct within the epoll_items array. */ - int fd; - uint32_t events; - pthread_mutex_t mutex; /* Guards the following variables... */ - int stop; - int status; /* Stores any error encountered while handling item. */ - /* The following variable allows us to test whether we have encountered - a problem while attempting to cancel and delete the associated - event. When the test program exits, 'deleted' should be exactly - one. If it is greater than one, then the failed test reflects a real - world situation where we would have tried to access the epoll item's - private data after deleting it: */ - int deleted; -}; - -struct epoll_item_private *epoll_items; - -/* - * Delete the specified item from the epoll set. In a real-world secneario this - * is where we would free the associated data structure, but in this testing - * environment we retain the structure so that we can test for double-deletion: - */ -void delete_item(int index) -{ - __sync_fetch_and_add(&epoll_items[index].deleted, 1); -} - -/* - * A pointer to a read_thread_data structure will be passed as the argument to - * each read thread: - */ -struct read_thread_data { - int stop; - int status; /* Indicates any error encountered by the read thread. */ - int epoll_set; -}; - -/* - * The function executed by the read threads: - */ -void *read_thread_function(void *function_data) -{ - struct read_thread_data *thread_data = - (struct read_thread_data *)function_data; - struct epoll_event event_data; - struct epoll_item_private *item_data; - char socket_data; - - /* Handle events until we encounter an error or this thread's 'stop' - condition is set: */ - while (1) { - int result = epoll_wait(thread_data->epoll_set, - &event_data, - 1, /* Number of desired events */ - 1000); /* Timeout in ms */ - if (result < 0) { - /* Breakpoints signal all threads. Ignore that while - debugging: */ - if (errno == EINTR) - continue; - thread_data->status = errno; - return 0; - } else if (thread_data->stop) - return 0; - else if (result == 0) /* Timeout */ - continue; - - /* We need the mutex here because checking for the stop - condition and re-enabling the epoll item need to be done - together as one atomic operation when EPOLL_CTL_DISABLE is - available: */ - item_data = (struct epoll_item_private *)event_data.data.ptr; - pthread_mutex_lock(&item_data->mutex); - - /* Remove the item from the epoll set if we want to stop - handling that event: */ - if (item_data->stop) - delete_item(item_data->index); - else { - /* Clear the data that was written to the other end of - our non-blocking socket: */ - do { - if (read(item_data->fd, &socket_data, 1) < 1) { - if ((errno == EAGAIN) || - (errno == EWOULDBLOCK)) - break; - else - goto error_unlock; - } - } while (item_data->events & EPOLLET); - - /* The item was one-shot, so re-enable it: */ - event_data.events = item_data->events; - if (epoll_ctl(thread_data->epoll_set, - EPOLL_CTL_MOD, - item_data->fd, - &event_data) < 0) - goto error_unlock; - } - - pthread_mutex_unlock(&item_data->mutex); - } - -error_unlock: - thread_data->status = item_data->status = errno; - pthread_mutex_unlock(&item_data->mutex); - return 0; -} - -/* - * A pointer to a write_thread_data structure will be passed as the argument to - * the write thread: - */ -struct write_thread_data { - int stop; - int status; /* Indicates any error encountered by the write thread. */ - int n_fds; - int *fds; -}; - -/* - * The function executed by the write thread. It writes a single byte to each - * socket in turn until the stop condition for this thread is set. If writing to - * a socket would block (i.e. errno was EAGAIN), we leave that socket alone for - * the moment and just move on to the next socket in the list. We don't care - * about the order in which we deliver events to the epoll set. In fact we don't - * care about the data we're writing to the pipes at all; we just want to - * trigger epoll events: - */ -void *write_thread_function(void *function_data) -{ - const char data = 'X'; - int index; - struct write_thread_data *thread_data = - (struct write_thread_data *)function_data; - while (!thread_data->stop) - for (index = 0; - !thread_data->stop && (index < thread_data->n_fds); - ++index) - if ((write(thread_data->fds[index], &data, 1) < 1) && - (errno != EAGAIN) && - (errno != EWOULDBLOCK)) { - thread_data->status = errno; - return; - } -} - -/* - * Arguments are currently ignored: - */ -int main(int argc, char **argv) -{ - const int n_read_threads = 100; - const int n_epoll_items = 500; - int index; - int epoll_set = epoll_create1(0); - struct write_thread_data write_thread_data = { - 0, 0, n_epoll_items, malloc(n_epoll_items * sizeof(int)) - }; - struct read_thread_data *read_thread_data = - malloc(n_read_threads * sizeof(struct read_thread_data)); - pthread_t *read_threads = malloc(n_read_threads * sizeof(pthread_t)); - pthread_t write_thread; - - printf("-----------------\n"); - printf("Runing test_epoll\n"); - printf("-----------------\n"); - - epoll_items = malloc(n_epoll_items * sizeof(struct epoll_item_private)); - - if (epoll_set < 0 || epoll_items == 0 || write_thread_data.fds == 0 || - read_thread_data == 0 || read_threads == 0) - goto error; - - if (sysconf(_SC_NPROCESSORS_ONLN) < 2) { - printf("Error: please run this test on a multi-core system.\n"); - goto error; - } - - /* Create the socket pairs and epoll items: */ - for (index = 0; index < n_epoll_items; ++index) { - int socket_pair[2]; - struct epoll_event event_data; - if (socketpair(AF_UNIX, - SOCK_STREAM | SOCK_NONBLOCK, - 0, - socket_pair) < 0) - goto error; - write_thread_data.fds[index] = socket_pair[0]; - epoll_items[index].index = index; - epoll_items[index].fd = socket_pair[1]; - if (pthread_mutex_init(&epoll_items[index].mutex, NULL) != 0) - goto error; - /* We always use EPOLLONESHOT because this test is currently - structured to demonstrate the need for EPOLL_CTL_DISABLE, - which only produces useful information in the EPOLLONESHOT - case (without EPOLLONESHOT, calling epoll_ctl with - EPOLL_CTL_DISABLE will never return EBUSY). If support for - testing events without EPOLLONESHOT is desired, it should - probably be implemented in a separate unit test. */ - epoll_items[index].events = EPOLLIN | EPOLLONESHOT; - if (index < n_epoll_items / 2) - epoll_items[index].events |= EPOLLET; - epoll_items[index].stop = 0; - epoll_items[index].status = 0; - epoll_items[index].deleted = 0; - event_data.events = epoll_items[index].events; - event_data.data.ptr = &epoll_items[index]; - if (epoll_ctl(epoll_set, - EPOLL_CTL_ADD, - epoll_items[index].fd, - &event_data) < 0) - goto error; - } - - /* Create and start the read threads: */ - for (index = 0; index < n_read_threads; ++index) { - read_thread_data[index].stop = 0; - read_thread_data[index].status = 0; - read_thread_data[index].epoll_set = epoll_set; - if (pthread_create(&read_threads[index], - NULL, - read_thread_function, - &read_thread_data[index]) != 0) - goto error; - } - - if (pthread_create(&write_thread, - NULL, - write_thread_function, - &write_thread_data) != 0) - goto error; - - /* Cancel all event pollers: */ -#ifdef EPOLL_CTL_DISABLE - for (index = 0; index < n_epoll_items; ++index) { - pthread_mutex_lock(&epoll_items[index].mutex); - ++epoll_items[index].stop; - if (epoll_ctl(epoll_set, - EPOLL_CTL_DISABLE, - epoll_items[index].fd, - NULL) == 0) - delete_item(index); - else if (errno != EBUSY) { - pthread_mutex_unlock(&epoll_items[index].mutex); - goto error; - } - /* EBUSY means events were being handled; allow the other thread - to delete the item. */ - pthread_mutex_unlock(&epoll_items[index].mutex); - } -#else - for (index = 0; index < n_epoll_items; ++index) { - pthread_mutex_lock(&epoll_items[index].mutex); - ++epoll_items[index].stop; - pthread_mutex_unlock(&epoll_items[index].mutex); - /* Wait in case a thread running read_thread_function is - currently executing code between epoll_wait and - pthread_mutex_lock with this item. Note that a longer delay - would make double-deletion less likely (at the expense of - performance), but there is no guarantee that any delay would - ever be sufficient. Note also that we delete all event - pollers at once for testing purposes, but in a real-world - environment we are likely to want to be able to cancel event - pollers at arbitrary times. Therefore we can't improve this - situation by just splitting this loop into two loops - (i.e. signal 'stop' for all items, sleep, and then delete all - items). We also can't fix the problem via EPOLL_CTL_DEL - because that command can't prevent the case where some other - thread is executing read_thread_function within the region - mentioned above: */ - usleep(1); - pthread_mutex_lock(&epoll_items[index].mutex); - if (!epoll_items[index].deleted) - delete_item(index); - pthread_mutex_unlock(&epoll_items[index].mutex); - } -#endif - - /* Shut down the read threads: */ - for (index = 0; index < n_read_threads; ++index) - __sync_fetch_and_add(&read_thread_data[index].stop, 1); - for (index = 0; index < n_read_threads; ++index) { - if (pthread_join(read_threads[index], NULL) != 0) - goto error; - if (read_thread_data[index].status) - goto error; - } - - /* Shut down the write thread: */ - __sync_fetch_and_add(&write_thread_data.stop, 1); - if ((pthread_join(write_thread, NULL) != 0) || write_thread_data.status) - goto error; - - /* Check for final error conditions: */ - for (index = 0; index < n_epoll_items; ++index) { - if (epoll_items[index].status != 0) - goto error; - if (pthread_mutex_destroy(&epoll_items[index].mutex) < 0) - goto error; - } - for (index = 0; index < n_epoll_items; ++index) - if (epoll_items[index].deleted != 1) { - printf("Error: item data deleted %1d times.\n", - epoll_items[index].deleted); - goto error; - } - - printf("[PASS]\n"); - return 0; - - error: - printf("[FAIL]\n"); - return errno; -} diff --git a/tools/testing/selftests/ipc/Makefile b/tools/testing/selftests/ipc/Makefile new file mode 100644 index 00000000000..5386fd7c43a --- /dev/null +++ b/tools/testing/selftests/ipc/Makefile @@ -0,0 +1,25 @@ +uname_M := $(shell uname -m 2>/dev/null || echo not) +ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/) +ifeq ($(ARCH),i386) + ARCH := X86 + CFLAGS := -DCONFIG_X86_32 -D__i386__ +endif +ifeq ($(ARCH),x86_64) + ARCH := X86 + CFLAGS := -DCONFIG_X86_64 -D__x86_64__ +endif + +CFLAGS += -I../../../../usr/include/ + +all: +ifeq ($(ARCH),X86) + gcc $(CFLAGS) msgque.c -o msgque_test +else + echo "Not an x86 target, can't build msgque selftest" +endif + +run_tests: all + ./msgque_test + +clean: + rm -fr ./msgque_test diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c new file mode 100644 index 00000000000..552f0810bff --- /dev/null +++ b/tools/testing/selftests/ipc/msgque.c @@ -0,0 +1,252 @@ +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <errno.h> +#include <linux/msg.h> +#include <fcntl.h> + +#define MAX_MSG_SIZE 32 + +struct msg1 { + int msize; + long mtype; + char mtext[MAX_MSG_SIZE]; +}; + +#define TEST_STRING "Test sysv5 msg" +#define MSG_TYPE 1 + +#define ANOTHER_TEST_STRING "Yet another test sysv5 msg" +#define ANOTHER_MSG_TYPE 26538 + +struct msgque_data { + key_t key; + int msq_id; + int qbytes; + int qnum; + int mode; + struct msg1 *messages; +}; + +int restore_queue(struct msgque_data *msgque) +{ + int fd, ret, id, i; + char buf[32]; + + fd = open("/proc/sys/kernel/msg_next_id", O_WRONLY); + if (fd == -1) { + printf("Failed to open /proc/sys/kernel/msg_next_id\n"); + return -errno; + } + sprintf(buf, "%d", msgque->msq_id); + + ret = write(fd, buf, strlen(buf)); + if (ret != strlen(buf)) { + printf("Failed to write to /proc/sys/kernel/msg_next_id\n"); + return -errno; + } + + id = msgget(msgque->key, msgque->mode | IPC_CREAT | IPC_EXCL); + if (id == -1) { + printf("Failed to create queue\n"); + return -errno; + } + + if (id != msgque->msq_id) { + printf("Restored queue has wrong id (%d instead of %d)\n", + id, msgque->msq_id); + ret = -EFAULT; + goto destroy; + } + + for (i = 0; i < msgque->qnum; i++) { + if (msgsnd(msgque->msq_id, &msgque->messages[i].mtype, + msgque->messages[i].msize, IPC_NOWAIT) != 0) { + printf("msgsnd failed (%m)\n"); + ret = -errno; + goto destroy; + }; + } + return 0; + +destroy: + if (msgctl(id, IPC_RMID, 0)) + printf("Failed to destroy queue: %d\n", -errno); + return ret; +} + +int check_and_destroy_queue(struct msgque_data *msgque) +{ + struct msg1 message; + int cnt = 0, ret; + + while (1) { + ret = msgrcv(msgque->msq_id, &message.mtype, MAX_MSG_SIZE, + 0, IPC_NOWAIT); + if (ret < 0) { + if (errno == ENOMSG) + break; + printf("Failed to read IPC message: %m\n"); + ret = -errno; + goto err; + } + if (ret != msgque->messages[cnt].msize) { + printf("Wrong message size: %d (expected %d)\n", ret, + msgque->messages[cnt].msize); + ret = -EINVAL; + goto err; + } + if (message.mtype != msgque->messages[cnt].mtype) { + printf("Wrong message type\n"); + ret = -EINVAL; + goto err; + } + if (memcmp(message.mtext, msgque->messages[cnt].mtext, ret)) { + printf("Wrong message content\n"); + ret = -EINVAL; + goto err; + } + cnt++; + } + + if (cnt != msgque->qnum) { + printf("Wrong message number\n"); + ret = -EINVAL; + goto err; + } + + ret = 0; +err: + if (msgctl(msgque->msq_id, IPC_RMID, 0)) { + printf("Failed to destroy queue: %d\n", -errno); + return -errno; + } + return ret; +} + +int dump_queue(struct msgque_data *msgque) +{ + struct msqid64_ds ds; + int kern_id; + int i, ret; + + for (kern_id = 0; kern_id < 256; kern_id++) { + ret = msgctl(kern_id, MSG_STAT, &ds); + if (ret < 0) { + if (errno == -EINVAL) + continue; + printf("Failed to get stats for IPC queue with id %d\n", + kern_id); + return -errno; + } + + if (ret == msgque->msq_id) + break; + } + + msgque->messages = malloc(sizeof(struct msg1) * ds.msg_qnum); + if (msgque->messages == NULL) { + printf("Failed to get stats for IPC queue\n"); + return -ENOMEM; + } + + msgque->qnum = ds.msg_qnum; + msgque->mode = ds.msg_perm.mode; + msgque->qbytes = ds.msg_qbytes; + + for (i = 0; i < msgque->qnum; i++) { + ret = msgrcv(msgque->msq_id, &msgque->messages[i].mtype, + MAX_MSG_SIZE, i, IPC_NOWAIT | MSG_COPY); + if (ret < 0) { + printf("Failed to copy IPC message: %m (%d)\n", errno); + return -errno; + } + msgque->messages[i].msize = ret; + } + return 0; +} + +int fill_msgque(struct msgque_data *msgque) +{ + struct msg1 msgbuf; + + msgbuf.mtype = MSG_TYPE; + memcpy(msgbuf.mtext, TEST_STRING, sizeof(TEST_STRING)); + if (msgsnd(msgque->msq_id, &msgbuf.mtype, sizeof(TEST_STRING), + IPC_NOWAIT) != 0) { + printf("First message send failed (%m)\n"); + return -errno; + }; + + msgbuf.mtype = ANOTHER_MSG_TYPE; + memcpy(msgbuf.mtext, ANOTHER_TEST_STRING, sizeof(ANOTHER_TEST_STRING)); + if (msgsnd(msgque->msq_id, &msgbuf.mtype, sizeof(ANOTHER_TEST_STRING), + IPC_NOWAIT) != 0) { + printf("Second message send failed (%m)\n"); + return -errno; + }; + return 0; +} + +int main(int argc, char **argv) +{ + int msg, pid, err; + struct msgque_data msgque; + + if (getuid() != 0) { + printf("Please run the test as root - Exiting.\n"); + exit(1); + } + + msgque.key = ftok(argv[0], 822155650); + if (msgque.key == -1) { + printf("Can't make key\n"); + return -errno; + } + + msgque.msq_id = msgget(msgque.key, IPC_CREAT | IPC_EXCL | 0666); + if (msgque.msq_id == -1) { + err = -errno; + printf("Can't create queue\n"); + goto err_out; + } + + err = fill_msgque(&msgque); + if (err) { + printf("Failed to fill queue\n"); + goto err_destroy; + } + + err = dump_queue(&msgque); + if (err) { + printf("Failed to dump queue\n"); + goto err_destroy; + } + + err = check_and_destroy_queue(&msgque); + if (err) { + printf("Failed to check and destroy queue\n"); + goto err_out; + } + + err = restore_queue(&msgque); + if (err) { + printf("Failed to restore queue\n"); + goto err_destroy; + } + + err = check_and_destroy_queue(&msgque); + if (err) { + printf("Failed to test queue\n"); + goto err_out; + } + return 0; + +err_destroy: + if (msgctl(msgque.msq_id, IPC_RMID, 0)) { + printf("Failed to destroy queue: %d\n", -errno); + return -errno; + } +err_out: + return err; +} diff --git a/tools/testing/selftests/kcmp/.gitignore b/tools/testing/selftests/kcmp/.gitignore new file mode 100644 index 00000000000..5a9b3732b2d --- /dev/null +++ b/tools/testing/selftests/kcmp/.gitignore @@ -0,0 +1,2 @@ +kcmp_test +kcmp-test-file diff --git a/tools/testing/selftests/kcmp/Makefile b/tools/testing/selftests/kcmp/Makefile index dc79b86ea65..d7d6bbeeff2 100644 --- a/tools/testing/selftests/kcmp/Makefile +++ b/tools/testing/selftests/kcmp/Makefile @@ -16,14 +16,13 @@ CFLAGS += -I../../../../arch/x86/include/ all: ifeq ($(ARCH),X86) - gcc $(CFLAGS) kcmp_test.c -o run_test + gcc $(CFLAGS) kcmp_test.c -o kcmp_test else echo "Not an x86 target, can't build kcmp selftest" endif -run-tests: all - ./kcmp_test +run_tests: all + @./kcmp_test || echo "kcmp_test: [FAIL]" clean: - rm -fr ./run_test - rm -fr ./test-file + $(RM) kcmp_test kcmp-test-file diff --git a/tools/testing/selftests/kcmp/kcmp_test.c b/tools/testing/selftests/kcmp/kcmp_test.c index 358cc6bfa35..fa4f1b37e04 100644 --- a/tools/testing/selftests/kcmp/kcmp_test.c +++ b/tools/testing/selftests/kcmp/kcmp_test.c @@ -72,7 +72,8 @@ int main(int argc, char **argv) /* This one should return same fd */ ret = sys_kcmp(pid1, pid2, KCMP_FILE, fd1, fd1); if (ret) { - printf("FAIL: 0 expected but %d returned\n", ret); + printf("FAIL: 0 expected but %d returned (%s)\n", + ret, strerror(errno)); ret = -1; } else printf("PASS: 0 returned as expected\n"); @@ -80,7 +81,8 @@ int main(int argc, char **argv) /* Compare with self */ ret = sys_kcmp(pid1, pid1, KCMP_VM, 0, 0); if (ret) { - printf("FAIL: 0 expected but %li returned\n", ret); + printf("FAIL: 0 expected but %li returned (%s)\n", + ret, strerror(errno)); ret = -1; } else printf("PASS: 0 returned as expected\n"); diff --git a/tools/testing/selftests/memory-hotplug/Makefile b/tools/testing/selftests/memory-hotplug/Makefile index 7c9c20ff578..058c76f5d10 100644 --- a/tools/testing/selftests/memory-hotplug/Makefile +++ b/tools/testing/selftests/memory-hotplug/Makefile @@ -1,6 +1,6 @@ all: run_tests: - ./on-off-test.sh + @/bin/bash ./on-off-test.sh || echo "memory-hotplug selftests: [FAIL]" clean: diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile index 54c0aad2b47..218a122c795 100644 --- a/tools/testing/selftests/mqueue/Makefile +++ b/tools/testing/selftests/mqueue/Makefile @@ -3,8 +3,8 @@ all: gcc -O2 -lrt -lpthread -lpopt -o mq_perf_tests mq_perf_tests.c run_tests: - ./mq_open_tests /test1 - ./mq_perf_tests + @./mq_open_tests /test1 || echo "mq_open_tests: [FAIL]" + @./mq_perf_tests || echo "mq_perf_tests: [FAIL]" clean: rm -f mq_open_tests mq_perf_tests diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore new file mode 100644 index 00000000000..00326629d4a --- /dev/null +++ b/tools/testing/selftests/net/.gitignore @@ -0,0 +1,3 @@ +socket +psock_fanout +psock_tpacket diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile new file mode 100644 index 00000000000..c7493b8f9b0 --- /dev/null +++ b/tools/testing/selftests/net/Makefile @@ -0,0 +1,25 @@ +# Makefile for net selftests + +CC = $(CROSS_COMPILE)gcc +CFLAGS = -Wall -O2 -g + +CFLAGS += -I../../../../usr/include/ + +NET_PROGS = socket psock_fanout psock_tpacket + +all: $(NET_PROGS) +%: %.c + $(CC) $(CFLAGS) -o $@ $^ + +run_tests: all + @/bin/sh ./run_netsocktests || echo "sockettests: [FAIL]" + @/bin/sh ./run_afpackettests || echo "afpackettests: [FAIL]" + @if /sbin/modprobe test_bpf ; then \ + /sbin/rmmod test_bpf; \ + echo "test_bpf: ok"; \ + else \ + echo "test_bpf: [FAIL]"; \ + exit 1; \ + fi +clean: + $(RM) $(NET_PROGS) diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c new file mode 100644 index 00000000000..57b9c2b7c4f --- /dev/null +++ b/tools/testing/selftests/net/psock_fanout.c @@ -0,0 +1,312 @@ +/* + * Copyright 2013 Google Inc. + * Author: Willem de Bruijn (willemb@google.com) + * + * A basic test of packet socket fanout behavior. + * + * Control: + * - create fanout fails as expected with illegal flag combinations + * - join fanout fails as expected with diverging types or flags + * + * Datapath: + * Open a pair of packet sockets and a pair of INET sockets, send a known + * number of packets across the two INET sockets and count the number of + * packets enqueued onto the two packet sockets. + * + * The test currently runs for + * - PACKET_FANOUT_HASH + * - PACKET_FANOUT_HASH with PACKET_FANOUT_FLAG_ROLLOVER + * - PACKET_FANOUT_LB + * - PACKET_FANOUT_CPU + * - PACKET_FANOUT_ROLLOVER + * + * Todo: + * - functionality: PACKET_FANOUT_FLAG_DEFRAG + * + * License (GPLv2): + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#define _GNU_SOURCE /* for sched_setaffinity */ + +#include <arpa/inet.h> +#include <errno.h> +#include <fcntl.h> +#include <linux/filter.h> +#include <linux/if_packet.h> +#include <net/ethernet.h> +#include <netinet/ip.h> +#include <netinet/udp.h> +#include <poll.h> +#include <sched.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/mman.h> +#include <sys/socket.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <unistd.h> + +#include "psock_lib.h" + +#define RING_NUM_FRAMES 20 + +/* Open a socket in a given fanout mode. + * @return -1 if mode is bad, a valid socket otherwise */ +static int sock_fanout_open(uint16_t typeflags, int num_packets) +{ + int fd, val; + + fd = socket(PF_PACKET, SOCK_DGRAM, htons(ETH_P_IP)); + if (fd < 0) { + perror("socket packet"); + exit(1); + } + + /* fanout group ID is always 0: tests whether old groups are deleted */ + val = ((int) typeflags) << 16; + if (setsockopt(fd, SOL_PACKET, PACKET_FANOUT, &val, sizeof(val))) { + if (close(fd)) { + perror("close packet"); + exit(1); + } + return -1; + } + + pair_udp_setfilter(fd); + return fd; +} + +static char *sock_fanout_open_ring(int fd) +{ + struct tpacket_req req = { + .tp_block_size = getpagesize(), + .tp_frame_size = getpagesize(), + .tp_block_nr = RING_NUM_FRAMES, + .tp_frame_nr = RING_NUM_FRAMES, + }; + char *ring; + int val = TPACKET_V2; + + if (setsockopt(fd, SOL_PACKET, PACKET_VERSION, (void *) &val, + sizeof(val))) { + perror("packetsock ring setsockopt version"); + exit(1); + } + if (setsockopt(fd, SOL_PACKET, PACKET_RX_RING, (void *) &req, + sizeof(req))) { + perror("packetsock ring setsockopt"); + exit(1); + } + + ring = mmap(0, req.tp_block_size * req.tp_block_nr, + PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + if (!ring) { + fprintf(stderr, "packetsock ring mmap\n"); + exit(1); + } + + return ring; +} + +static int sock_fanout_read_ring(int fd, void *ring) +{ + struct tpacket2_hdr *header = ring; + int count = 0; + + while (header->tp_status & TP_STATUS_USER && count < RING_NUM_FRAMES) { + count++; + header = ring + (count * getpagesize()); + } + + return count; +} + +static int sock_fanout_read(int fds[], char *rings[], const int expect[]) +{ + int ret[2]; + + ret[0] = sock_fanout_read_ring(fds[0], rings[0]); + ret[1] = sock_fanout_read_ring(fds[1], rings[1]); + + fprintf(stderr, "info: count=%d,%d, expect=%d,%d\n", + ret[0], ret[1], expect[0], expect[1]); + + if ((!(ret[0] == expect[0] && ret[1] == expect[1])) && + (!(ret[0] == expect[1] && ret[1] == expect[0]))) { + fprintf(stderr, "ERROR: incorrect queue lengths\n"); + return 1; + } + + return 0; +} + +/* Test illegal mode + flag combination */ +static void test_control_single(void) +{ + fprintf(stderr, "test: control single socket\n"); + + if (sock_fanout_open(PACKET_FANOUT_ROLLOVER | + PACKET_FANOUT_FLAG_ROLLOVER, 0) != -1) { + fprintf(stderr, "ERROR: opened socket with dual rollover\n"); + exit(1); + } +} + +/* Test illegal group with different modes or flags */ +static void test_control_group(void) +{ + int fds[2]; + + fprintf(stderr, "test: control multiple sockets\n"); + + fds[0] = sock_fanout_open(PACKET_FANOUT_HASH, 20); + if (fds[0] == -1) { + fprintf(stderr, "ERROR: failed to open HASH socket\n"); + exit(1); + } + if (sock_fanout_open(PACKET_FANOUT_HASH | + PACKET_FANOUT_FLAG_DEFRAG, 10) != -1) { + fprintf(stderr, "ERROR: joined group with wrong flag defrag\n"); + exit(1); + } + if (sock_fanout_open(PACKET_FANOUT_HASH | + PACKET_FANOUT_FLAG_ROLLOVER, 10) != -1) { + fprintf(stderr, "ERROR: joined group with wrong flag ro\n"); + exit(1); + } + if (sock_fanout_open(PACKET_FANOUT_CPU, 10) != -1) { + fprintf(stderr, "ERROR: joined group with wrong mode\n"); + exit(1); + } + fds[1] = sock_fanout_open(PACKET_FANOUT_HASH, 20); + if (fds[1] == -1) { + fprintf(stderr, "ERROR: failed to join group\n"); + exit(1); + } + if (close(fds[1]) || close(fds[0])) { + fprintf(stderr, "ERROR: closing sockets\n"); + exit(1); + } +} + +static int test_datapath(uint16_t typeflags, int port_off, + const int expect1[], const int expect2[]) +{ + const int expect0[] = { 0, 0 }; + char *rings[2]; + int fds[2], fds_udp[2][2], ret; + + fprintf(stderr, "test: datapath 0x%hx\n", typeflags); + + fds[0] = sock_fanout_open(typeflags, 20); + fds[1] = sock_fanout_open(typeflags, 20); + if (fds[0] == -1 || fds[1] == -1) { + fprintf(stderr, "ERROR: failed open\n"); + exit(1); + } + rings[0] = sock_fanout_open_ring(fds[0]); + rings[1] = sock_fanout_open_ring(fds[1]); + pair_udp_open(fds_udp[0], PORT_BASE); + pair_udp_open(fds_udp[1], PORT_BASE + port_off); + sock_fanout_read(fds, rings, expect0); + + /* Send data, but not enough to overflow a queue */ + pair_udp_send(fds_udp[0], 15); + pair_udp_send(fds_udp[1], 5); + ret = sock_fanout_read(fds, rings, expect1); + + /* Send more data, overflow the queue */ + pair_udp_send(fds_udp[0], 15); + /* TODO: ensure consistent order between expect1 and expect2 */ + ret |= sock_fanout_read(fds, rings, expect2); + + if (munmap(rings[1], RING_NUM_FRAMES * getpagesize()) || + munmap(rings[0], RING_NUM_FRAMES * getpagesize())) { + fprintf(stderr, "close rings\n"); + exit(1); + } + if (close(fds_udp[1][1]) || close(fds_udp[1][0]) || + close(fds_udp[0][1]) || close(fds_udp[0][0]) || + close(fds[1]) || close(fds[0])) { + fprintf(stderr, "close datapath\n"); + exit(1); + } + + return ret; +} + +static int set_cpuaffinity(int cpuid) +{ + cpu_set_t mask; + + CPU_ZERO(&mask); + CPU_SET(cpuid, &mask); + if (sched_setaffinity(0, sizeof(mask), &mask)) { + if (errno != EINVAL) { + fprintf(stderr, "setaffinity %d\n", cpuid); + exit(1); + } + return 1; + } + + return 0; +} + +int main(int argc, char **argv) +{ + const int expect_hash[2][2] = { { 15, 5 }, { 20, 5 } }; + const int expect_hash_rb[2][2] = { { 15, 5 }, { 20, 15 } }; + const int expect_lb[2][2] = { { 10, 10 }, { 18, 17 } }; + const int expect_rb[2][2] = { { 20, 0 }, { 20, 15 } }; + const int expect_cpu0[2][2] = { { 20, 0 }, { 20, 0 } }; + const int expect_cpu1[2][2] = { { 0, 20 }, { 0, 20 } }; + int port_off = 2, tries = 5, ret; + + test_control_single(); + test_control_group(); + + /* find a set of ports that do not collide onto the same socket */ + ret = test_datapath(PACKET_FANOUT_HASH, port_off, + expect_hash[0], expect_hash[1]); + while (ret && tries--) { + fprintf(stderr, "info: trying alternate ports (%d)\n", tries); + ret = test_datapath(PACKET_FANOUT_HASH, ++port_off, + expect_hash[0], expect_hash[1]); + } + + ret |= test_datapath(PACKET_FANOUT_HASH | PACKET_FANOUT_FLAG_ROLLOVER, + port_off, expect_hash_rb[0], expect_hash_rb[1]); + ret |= test_datapath(PACKET_FANOUT_LB, + port_off, expect_lb[0], expect_lb[1]); + ret |= test_datapath(PACKET_FANOUT_ROLLOVER, + port_off, expect_rb[0], expect_rb[1]); + + set_cpuaffinity(0); + ret |= test_datapath(PACKET_FANOUT_CPU, port_off, + expect_cpu0[0], expect_cpu0[1]); + if (!set_cpuaffinity(1)) + /* TODO: test that choice alternates with previous */ + ret |= test_datapath(PACKET_FANOUT_CPU, port_off, + expect_cpu1[0], expect_cpu1[1]); + + if (ret) + return 1; + + printf("OK. All tests passed\n"); + return 0; +} diff --git a/tools/testing/selftests/net/psock_lib.h b/tools/testing/selftests/net/psock_lib.h new file mode 100644 index 00000000000..37da54ac85a --- /dev/null +++ b/tools/testing/selftests/net/psock_lib.h @@ -0,0 +1,127 @@ +/* + * Copyright 2013 Google Inc. + * Author: Willem de Bruijn <willemb@google.com> + * Daniel Borkmann <dborkman@redhat.com> + * + * License (GPLv2): + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef PSOCK_LIB_H +#define PSOCK_LIB_H + +#include <sys/types.h> +#include <sys/socket.h> +#include <string.h> +#include <arpa/inet.h> +#include <unistd.h> + +#define DATA_LEN 100 +#define DATA_CHAR 'a' + +#define PORT_BASE 8000 + +#ifndef __maybe_unused +# define __maybe_unused __attribute__ ((__unused__)) +#endif + +static __maybe_unused void pair_udp_setfilter(int fd) +{ + struct sock_filter bpf_filter[] = { + { 0x80, 0, 0, 0x00000000 }, /* LD pktlen */ + { 0x35, 0, 5, DATA_LEN }, /* JGE DATA_LEN [f goto nomatch]*/ + { 0x30, 0, 0, 0x00000050 }, /* LD ip[80] */ + { 0x15, 0, 3, DATA_CHAR }, /* JEQ DATA_CHAR [f goto nomatch]*/ + { 0x30, 0, 0, 0x00000051 }, /* LD ip[81] */ + { 0x15, 0, 1, DATA_CHAR }, /* JEQ DATA_CHAR [f goto nomatch]*/ + { 0x06, 0, 0, 0x00000060 }, /* RET match */ + { 0x06, 0, 0, 0x00000000 }, /* RET no match */ + }; + struct sock_fprog bpf_prog; + + bpf_prog.filter = bpf_filter; + bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter); + if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &bpf_prog, + sizeof(bpf_prog))) { + perror("setsockopt SO_ATTACH_FILTER"); + exit(1); + } +} + +static __maybe_unused void pair_udp_open(int fds[], uint16_t port) +{ + struct sockaddr_in saddr, daddr; + + fds[0] = socket(PF_INET, SOCK_DGRAM, 0); + fds[1] = socket(PF_INET, SOCK_DGRAM, 0); + if (fds[0] == -1 || fds[1] == -1) { + fprintf(stderr, "ERROR: socket dgram\n"); + exit(1); + } + + memset(&saddr, 0, sizeof(saddr)); + saddr.sin_family = AF_INET; + saddr.sin_port = htons(port); + saddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); + + memset(&daddr, 0, sizeof(daddr)); + daddr.sin_family = AF_INET; + daddr.sin_port = htons(port + 1); + daddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); + + /* must bind both to get consistent hash result */ + if (bind(fds[1], (void *) &daddr, sizeof(daddr))) { + perror("bind"); + exit(1); + } + if (bind(fds[0], (void *) &saddr, sizeof(saddr))) { + perror("bind"); + exit(1); + } + if (connect(fds[0], (void *) &daddr, sizeof(daddr))) { + perror("connect"); + exit(1); + } +} + +static __maybe_unused void pair_udp_send(int fds[], int num) +{ + char buf[DATA_LEN], rbuf[DATA_LEN]; + + memset(buf, DATA_CHAR, sizeof(buf)); + while (num--) { + /* Should really handle EINTR and EAGAIN */ + if (write(fds[0], buf, sizeof(buf)) != sizeof(buf)) { + fprintf(stderr, "ERROR: send failed left=%d\n", num); + exit(1); + } + if (read(fds[1], rbuf, sizeof(rbuf)) != sizeof(rbuf)) { + fprintf(stderr, "ERROR: recv failed left=%d\n", num); + exit(1); + } + if (memcmp(buf, rbuf, sizeof(buf))) { + fprintf(stderr, "ERROR: data failed left=%d\n", num); + exit(1); + } + } +} + +static __maybe_unused void pair_udp_close(int fds[]) +{ + close(fds[0]); + close(fds[1]); +} + +#endif /* PSOCK_LIB_H */ diff --git a/tools/testing/selftests/net/psock_tpacket.c b/tools/testing/selftests/net/psock_tpacket.c new file mode 100644 index 00000000000..24adf709bd9 --- /dev/null +++ b/tools/testing/selftests/net/psock_tpacket.c @@ -0,0 +1,805 @@ +/* + * Copyright 2013 Red Hat, Inc. + * Author: Daniel Borkmann <dborkman@redhat.com> + * Chetan Loke <loke.chetan@gmail.com> (TPACKET_V3 usage example) + * + * A basic test of packet socket's TPACKET_V1/TPACKET_V2/TPACKET_V3 behavior. + * + * Control: + * Test the setup of the TPACKET socket with different patterns that are + * known to fail (TODO) resp. succeed (OK). + * + * Datapath: + * Open a pair of packet sockets and send resp. receive an a priori known + * packet pattern accross the sockets and check if it was received resp. + * sent correctly. Fanout in combination with RX_RING is currently not + * tested here. + * + * The test currently runs for + * - TPACKET_V1: RX_RING, TX_RING + * - TPACKET_V2: RX_RING, TX_RING + * - TPACKET_V3: RX_RING + * + * License (GPLv2): + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <sys/socket.h> +#include <sys/mman.h> +#include <linux/if_packet.h> +#include <linux/filter.h> +#include <ctype.h> +#include <fcntl.h> +#include <unistd.h> +#include <bits/wordsize.h> +#include <net/ethernet.h> +#include <netinet/ip.h> +#include <arpa/inet.h> +#include <stdint.h> +#include <string.h> +#include <assert.h> +#include <net/if.h> +#include <inttypes.h> +#include <poll.h> + +#include "psock_lib.h" + +#ifndef bug_on +# define bug_on(cond) assert(!(cond)) +#endif + +#ifndef __aligned_tpacket +# define __aligned_tpacket __attribute__((aligned(TPACKET_ALIGNMENT))) +#endif + +#ifndef __align_tpacket +# define __align_tpacket(x) __attribute__((aligned(TPACKET_ALIGN(x)))) +#endif + +#define NUM_PACKETS 100 +#define ALIGN_8(x) (((x) + 8 - 1) & ~(8 - 1)) + +struct ring { + struct iovec *rd; + uint8_t *mm_space; + size_t mm_len, rd_len; + struct sockaddr_ll ll; + void (*walk)(int sock, struct ring *ring); + int type, rd_num, flen, version; + union { + struct tpacket_req req; + struct tpacket_req3 req3; + }; +}; + +struct block_desc { + uint32_t version; + uint32_t offset_to_priv; + struct tpacket_hdr_v1 h1; +}; + +union frame_map { + struct { + struct tpacket_hdr tp_h __aligned_tpacket; + struct sockaddr_ll s_ll __align_tpacket(sizeof(struct tpacket_hdr)); + } *v1; + struct { + struct tpacket2_hdr tp_h __aligned_tpacket; + struct sockaddr_ll s_ll __align_tpacket(sizeof(struct tpacket2_hdr)); + } *v2; + void *raw; +}; + +static unsigned int total_packets, total_bytes; + +static int pfsocket(int ver) +{ + int ret, sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); + if (sock == -1) { + perror("socket"); + exit(1); + } + + ret = setsockopt(sock, SOL_PACKET, PACKET_VERSION, &ver, sizeof(ver)); + if (ret == -1) { + perror("setsockopt"); + exit(1); + } + + return sock; +} + +static void status_bar_update(void) +{ + if (total_packets % 10 == 0) { + fprintf(stderr, "."); + fflush(stderr); + } +} + +static void test_payload(void *pay, size_t len) +{ + struct ethhdr *eth = pay; + + if (len < sizeof(struct ethhdr)) { + fprintf(stderr, "test_payload: packet too " + "small: %zu bytes!\n", len); + exit(1); + } + + if (eth->h_proto != htons(ETH_P_IP)) { + fprintf(stderr, "test_payload: wrong ethernet " + "type: 0x%x!\n", ntohs(eth->h_proto)); + exit(1); + } +} + +static void create_payload(void *pay, size_t *len) +{ + int i; + struct ethhdr *eth = pay; + struct iphdr *ip = pay + sizeof(*eth); + + /* Lets create some broken crap, that still passes + * our BPF filter. + */ + + *len = DATA_LEN + 42; + + memset(pay, 0xff, ETH_ALEN * 2); + eth->h_proto = htons(ETH_P_IP); + + for (i = 0; i < sizeof(*ip); ++i) + ((uint8_t *) pay)[i + sizeof(*eth)] = (uint8_t) rand(); + + ip->ihl = 5; + ip->version = 4; + ip->protocol = 0x11; + ip->frag_off = 0; + ip->ttl = 64; + ip->tot_len = htons((uint16_t) *len - sizeof(*eth)); + + ip->saddr = htonl(INADDR_LOOPBACK); + ip->daddr = htonl(INADDR_LOOPBACK); + + memset(pay + sizeof(*eth) + sizeof(*ip), + DATA_CHAR, DATA_LEN); +} + +static inline int __v1_rx_kernel_ready(struct tpacket_hdr *hdr) +{ + return ((hdr->tp_status & TP_STATUS_USER) == TP_STATUS_USER); +} + +static inline void __v1_rx_user_ready(struct tpacket_hdr *hdr) +{ + hdr->tp_status = TP_STATUS_KERNEL; + __sync_synchronize(); +} + +static inline int __v2_rx_kernel_ready(struct tpacket2_hdr *hdr) +{ + return ((hdr->tp_status & TP_STATUS_USER) == TP_STATUS_USER); +} + +static inline void __v2_rx_user_ready(struct tpacket2_hdr *hdr) +{ + hdr->tp_status = TP_STATUS_KERNEL; + __sync_synchronize(); +} + +static inline int __v1_v2_rx_kernel_ready(void *base, int version) +{ + switch (version) { + case TPACKET_V1: + return __v1_rx_kernel_ready(base); + case TPACKET_V2: + return __v2_rx_kernel_ready(base); + default: + bug_on(1); + return 0; + } +} + +static inline void __v1_v2_rx_user_ready(void *base, int version) +{ + switch (version) { + case TPACKET_V1: + __v1_rx_user_ready(base); + break; + case TPACKET_V2: + __v2_rx_user_ready(base); + break; + } +} + +static void walk_v1_v2_rx(int sock, struct ring *ring) +{ + struct pollfd pfd; + int udp_sock[2]; + union frame_map ppd; + unsigned int frame_num = 0; + + bug_on(ring->type != PACKET_RX_RING); + + pair_udp_open(udp_sock, PORT_BASE); + pair_udp_setfilter(sock); + + memset(&pfd, 0, sizeof(pfd)); + pfd.fd = sock; + pfd.events = POLLIN | POLLERR; + pfd.revents = 0; + + pair_udp_send(udp_sock, NUM_PACKETS); + + while (total_packets < NUM_PACKETS * 2) { + while (__v1_v2_rx_kernel_ready(ring->rd[frame_num].iov_base, + ring->version)) { + ppd.raw = ring->rd[frame_num].iov_base; + + switch (ring->version) { + case TPACKET_V1: + test_payload((uint8_t *) ppd.raw + ppd.v1->tp_h.tp_mac, + ppd.v1->tp_h.tp_snaplen); + total_bytes += ppd.v1->tp_h.tp_snaplen; + break; + + case TPACKET_V2: + test_payload((uint8_t *) ppd.raw + ppd.v2->tp_h.tp_mac, + ppd.v2->tp_h.tp_snaplen); + total_bytes += ppd.v2->tp_h.tp_snaplen; + break; + } + + status_bar_update(); + total_packets++; + + __v1_v2_rx_user_ready(ppd.raw, ring->version); + + frame_num = (frame_num + 1) % ring->rd_num; + } + + poll(&pfd, 1, 1); + } + + pair_udp_close(udp_sock); + + if (total_packets != 2 * NUM_PACKETS) { + fprintf(stderr, "walk_v%d_rx: received %u out of %u pkts\n", + ring->version, total_packets, NUM_PACKETS); + exit(1); + } + + fprintf(stderr, " %u pkts (%u bytes)", NUM_PACKETS, total_bytes >> 1); +} + +static inline int __v1_tx_kernel_ready(struct tpacket_hdr *hdr) +{ + return !(hdr->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING)); +} + +static inline void __v1_tx_user_ready(struct tpacket_hdr *hdr) +{ + hdr->tp_status = TP_STATUS_SEND_REQUEST; + __sync_synchronize(); +} + +static inline int __v2_tx_kernel_ready(struct tpacket2_hdr *hdr) +{ + return !(hdr->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING)); +} + +static inline void __v2_tx_user_ready(struct tpacket2_hdr *hdr) +{ + hdr->tp_status = TP_STATUS_SEND_REQUEST; + __sync_synchronize(); +} + +static inline int __v1_v2_tx_kernel_ready(void *base, int version) +{ + switch (version) { + case TPACKET_V1: + return __v1_tx_kernel_ready(base); + case TPACKET_V2: + return __v2_tx_kernel_ready(base); + default: + bug_on(1); + return 0; + } +} + +static inline void __v1_v2_tx_user_ready(void *base, int version) +{ + switch (version) { + case TPACKET_V1: + __v1_tx_user_ready(base); + break; + case TPACKET_V2: + __v2_tx_user_ready(base); + break; + } +} + +static void __v1_v2_set_packet_loss_discard(int sock) +{ + int ret, discard = 1; + + ret = setsockopt(sock, SOL_PACKET, PACKET_LOSS, (void *) &discard, + sizeof(discard)); + if (ret == -1) { + perror("setsockopt"); + exit(1); + } +} + +static void walk_v1_v2_tx(int sock, struct ring *ring) +{ + struct pollfd pfd; + int rcv_sock, ret; + size_t packet_len; + union frame_map ppd; + char packet[1024]; + unsigned int frame_num = 0, got = 0; + struct sockaddr_ll ll = { + .sll_family = PF_PACKET, + .sll_halen = ETH_ALEN, + }; + + bug_on(ring->type != PACKET_TX_RING); + bug_on(ring->rd_num < NUM_PACKETS); + + rcv_sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); + if (rcv_sock == -1) { + perror("socket"); + exit(1); + } + + pair_udp_setfilter(rcv_sock); + + ll.sll_ifindex = if_nametoindex("lo"); + ret = bind(rcv_sock, (struct sockaddr *) &ll, sizeof(ll)); + if (ret == -1) { + perror("bind"); + exit(1); + } + + memset(&pfd, 0, sizeof(pfd)); + pfd.fd = sock; + pfd.events = POLLOUT | POLLERR; + pfd.revents = 0; + + total_packets = NUM_PACKETS; + create_payload(packet, &packet_len); + + while (total_packets > 0) { + while (__v1_v2_tx_kernel_ready(ring->rd[frame_num].iov_base, + ring->version) && + total_packets > 0) { + ppd.raw = ring->rd[frame_num].iov_base; + + switch (ring->version) { + case TPACKET_V1: + ppd.v1->tp_h.tp_snaplen = packet_len; + ppd.v1->tp_h.tp_len = packet_len; + + memcpy((uint8_t *) ppd.raw + TPACKET_HDRLEN - + sizeof(struct sockaddr_ll), packet, + packet_len); + total_bytes += ppd.v1->tp_h.tp_snaplen; + break; + + case TPACKET_V2: + ppd.v2->tp_h.tp_snaplen = packet_len; + ppd.v2->tp_h.tp_len = packet_len; + + memcpy((uint8_t *) ppd.raw + TPACKET2_HDRLEN - + sizeof(struct sockaddr_ll), packet, + packet_len); + total_bytes += ppd.v2->tp_h.tp_snaplen; + break; + } + + status_bar_update(); + total_packets--; + + __v1_v2_tx_user_ready(ppd.raw, ring->version); + + frame_num = (frame_num + 1) % ring->rd_num; + } + + poll(&pfd, 1, 1); + } + + bug_on(total_packets != 0); + + ret = sendto(sock, NULL, 0, 0, NULL, 0); + if (ret == -1) { + perror("sendto"); + exit(1); + } + + while ((ret = recvfrom(rcv_sock, packet, sizeof(packet), + 0, NULL, NULL)) > 0 && + total_packets < NUM_PACKETS) { + got += ret; + test_payload(packet, ret); + + status_bar_update(); + total_packets++; + } + + close(rcv_sock); + + if (total_packets != NUM_PACKETS) { + fprintf(stderr, "walk_v%d_rx: received %u out of %u pkts\n", + ring->version, total_packets, NUM_PACKETS); + exit(1); + } + + fprintf(stderr, " %u pkts (%u bytes)", NUM_PACKETS, got); +} + +static void walk_v1_v2(int sock, struct ring *ring) +{ + if (ring->type == PACKET_RX_RING) + walk_v1_v2_rx(sock, ring); + else + walk_v1_v2_tx(sock, ring); +} + +static uint64_t __v3_prev_block_seq_num = 0; + +void __v3_test_block_seq_num(struct block_desc *pbd) +{ + if (__v3_prev_block_seq_num + 1 != pbd->h1.seq_num) { + fprintf(stderr, "\nprev_block_seq_num:%"PRIu64", expected " + "seq:%"PRIu64" != actual seq:%"PRIu64"\n", + __v3_prev_block_seq_num, __v3_prev_block_seq_num + 1, + (uint64_t) pbd->h1.seq_num); + exit(1); + } + + __v3_prev_block_seq_num = pbd->h1.seq_num; +} + +static void __v3_test_block_len(struct block_desc *pbd, uint32_t bytes, int block_num) +{ + if (pbd->h1.num_pkts && bytes != pbd->h1.blk_len) { + fprintf(stderr, "\nblock:%u with %upackets, expected " + "len:%u != actual len:%u\n", block_num, + pbd->h1.num_pkts, bytes, pbd->h1.blk_len); + exit(1); + } +} + +static void __v3_test_block_header(struct block_desc *pbd, const int block_num) +{ + if ((pbd->h1.block_status & TP_STATUS_USER) == 0) { + fprintf(stderr, "\nblock %u: not in TP_STATUS_USER\n", block_num); + exit(1); + } + + __v3_test_block_seq_num(pbd); +} + +static void __v3_walk_block(struct block_desc *pbd, const int block_num) +{ + int num_pkts = pbd->h1.num_pkts, i; + unsigned long bytes = 0, bytes_with_padding = ALIGN_8(sizeof(*pbd)); + struct tpacket3_hdr *ppd; + + __v3_test_block_header(pbd, block_num); + + ppd = (struct tpacket3_hdr *) ((uint8_t *) pbd + + pbd->h1.offset_to_first_pkt); + + for (i = 0; i < num_pkts; ++i) { + bytes += ppd->tp_snaplen; + + if (ppd->tp_next_offset) + bytes_with_padding += ppd->tp_next_offset; + else + bytes_with_padding += ALIGN_8(ppd->tp_snaplen + ppd->tp_mac); + + test_payload((uint8_t *) ppd + ppd->tp_mac, ppd->tp_snaplen); + + status_bar_update(); + total_packets++; + + ppd = (struct tpacket3_hdr *) ((uint8_t *) ppd + ppd->tp_next_offset); + __sync_synchronize(); + } + + __v3_test_block_len(pbd, bytes_with_padding, block_num); + total_bytes += bytes; +} + +void __v3_flush_block(struct block_desc *pbd) +{ + pbd->h1.block_status = TP_STATUS_KERNEL; + __sync_synchronize(); +} + +static void walk_v3_rx(int sock, struct ring *ring) +{ + unsigned int block_num = 0; + struct pollfd pfd; + struct block_desc *pbd; + int udp_sock[2]; + + bug_on(ring->type != PACKET_RX_RING); + + pair_udp_open(udp_sock, PORT_BASE); + pair_udp_setfilter(sock); + + memset(&pfd, 0, sizeof(pfd)); + pfd.fd = sock; + pfd.events = POLLIN | POLLERR; + pfd.revents = 0; + + pair_udp_send(udp_sock, NUM_PACKETS); + + while (total_packets < NUM_PACKETS * 2) { + pbd = (struct block_desc *) ring->rd[block_num].iov_base; + + while ((pbd->h1.block_status & TP_STATUS_USER) == 0) + poll(&pfd, 1, 1); + + __v3_walk_block(pbd, block_num); + __v3_flush_block(pbd); + + block_num = (block_num + 1) % ring->rd_num; + } + + pair_udp_close(udp_sock); + + if (total_packets != 2 * NUM_PACKETS) { + fprintf(stderr, "walk_v3_rx: received %u out of %u pkts\n", + total_packets, NUM_PACKETS); + exit(1); + } + + fprintf(stderr, " %u pkts (%u bytes)", NUM_PACKETS, total_bytes >> 1); +} + +static void walk_v3(int sock, struct ring *ring) +{ + if (ring->type == PACKET_RX_RING) + walk_v3_rx(sock, ring); + else + bug_on(1); +} + +static void __v1_v2_fill(struct ring *ring, unsigned int blocks) +{ + ring->req.tp_block_size = getpagesize() << 2; + ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7; + ring->req.tp_block_nr = blocks; + + ring->req.tp_frame_nr = ring->req.tp_block_size / + ring->req.tp_frame_size * + ring->req.tp_block_nr; + + ring->mm_len = ring->req.tp_block_size * ring->req.tp_block_nr; + ring->walk = walk_v1_v2; + ring->rd_num = ring->req.tp_frame_nr; + ring->flen = ring->req.tp_frame_size; +} + +static void __v3_fill(struct ring *ring, unsigned int blocks) +{ + ring->req3.tp_retire_blk_tov = 64; + ring->req3.tp_sizeof_priv = 0; + ring->req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH; + + ring->req3.tp_block_size = getpagesize() << 2; + ring->req3.tp_frame_size = TPACKET_ALIGNMENT << 7; + ring->req3.tp_block_nr = blocks; + + ring->req3.tp_frame_nr = ring->req3.tp_block_size / + ring->req3.tp_frame_size * + ring->req3.tp_block_nr; + + ring->mm_len = ring->req3.tp_block_size * ring->req3.tp_block_nr; + ring->walk = walk_v3; + ring->rd_num = ring->req3.tp_block_nr; + ring->flen = ring->req3.tp_block_size; +} + +static void setup_ring(int sock, struct ring *ring, int version, int type) +{ + int ret = 0; + unsigned int blocks = 256; + + ring->type = type; + ring->version = version; + + switch (version) { + case TPACKET_V1: + case TPACKET_V2: + if (type == PACKET_TX_RING) + __v1_v2_set_packet_loss_discard(sock); + __v1_v2_fill(ring, blocks); + ret = setsockopt(sock, SOL_PACKET, type, &ring->req, + sizeof(ring->req)); + break; + + case TPACKET_V3: + __v3_fill(ring, blocks); + ret = setsockopt(sock, SOL_PACKET, type, &ring->req3, + sizeof(ring->req3)); + break; + } + + if (ret == -1) { + perror("setsockopt"); + exit(1); + } + + ring->rd_len = ring->rd_num * sizeof(*ring->rd); + ring->rd = malloc(ring->rd_len); + if (ring->rd == NULL) { + perror("malloc"); + exit(1); + } + + total_packets = 0; + total_bytes = 0; +} + +static void mmap_ring(int sock, struct ring *ring) +{ + int i; + + ring->mm_space = mmap(0, ring->mm_len, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_LOCKED | MAP_POPULATE, sock, 0); + if (ring->mm_space == MAP_FAILED) { + perror("mmap"); + exit(1); + } + + memset(ring->rd, 0, ring->rd_len); + for (i = 0; i < ring->rd_num; ++i) { + ring->rd[i].iov_base = ring->mm_space + (i * ring->flen); + ring->rd[i].iov_len = ring->flen; + } +} + +static void bind_ring(int sock, struct ring *ring) +{ + int ret; + + ring->ll.sll_family = PF_PACKET; + ring->ll.sll_protocol = htons(ETH_P_ALL); + ring->ll.sll_ifindex = if_nametoindex("lo"); + ring->ll.sll_hatype = 0; + ring->ll.sll_pkttype = 0; + ring->ll.sll_halen = 0; + + ret = bind(sock, (struct sockaddr *) &ring->ll, sizeof(ring->ll)); + if (ret == -1) { + perror("bind"); + exit(1); + } +} + +static void walk_ring(int sock, struct ring *ring) +{ + ring->walk(sock, ring); +} + +static void unmap_ring(int sock, struct ring *ring) +{ + munmap(ring->mm_space, ring->mm_len); + free(ring->rd); +} + +static int test_kernel_bit_width(void) +{ + char in[512], *ptr; + int num = 0, fd; + ssize_t ret; + + fd = open("/proc/kallsyms", O_RDONLY); + if (fd == -1) { + perror("open"); + exit(1); + } + + ret = read(fd, in, sizeof(in)); + if (ret <= 0) { + perror("read"); + exit(1); + } + + close(fd); + + ptr = in; + while(!isspace(*ptr)) { + num++; + ptr++; + } + + return num * 4; +} + +static int test_user_bit_width(void) +{ + return __WORDSIZE; +} + +static const char *tpacket_str[] = { + [TPACKET_V1] = "TPACKET_V1", + [TPACKET_V2] = "TPACKET_V2", + [TPACKET_V3] = "TPACKET_V3", +}; + +static const char *type_str[] = { + [PACKET_RX_RING] = "PACKET_RX_RING", + [PACKET_TX_RING] = "PACKET_TX_RING", +}; + +static int test_tpacket(int version, int type) +{ + int sock; + struct ring ring; + + fprintf(stderr, "test: %s with %s ", tpacket_str[version], + type_str[type]); + fflush(stderr); + + if (version == TPACKET_V1 && + test_kernel_bit_width() != test_user_bit_width()) { + fprintf(stderr, "test: skip %s %s since user and kernel " + "space have different bit width\n", + tpacket_str[version], type_str[type]); + return 0; + } + + sock = pfsocket(version); + memset(&ring, 0, sizeof(ring)); + setup_ring(sock, &ring, version, type); + mmap_ring(sock, &ring); + bind_ring(sock, &ring); + walk_ring(sock, &ring); + unmap_ring(sock, &ring); + close(sock); + + fprintf(stderr, "\n"); + return 0; +} + +int main(void) +{ + int ret = 0; + + ret |= test_tpacket(TPACKET_V1, PACKET_RX_RING); + ret |= test_tpacket(TPACKET_V1, PACKET_TX_RING); + + ret |= test_tpacket(TPACKET_V2, PACKET_RX_RING); + ret |= test_tpacket(TPACKET_V2, PACKET_TX_RING); + + ret |= test_tpacket(TPACKET_V3, PACKET_RX_RING); + + if (ret) + return 1; + + printf("OK. All tests passed\n"); + return 0; +} diff --git a/tools/testing/selftests/net/run_afpackettests b/tools/testing/selftests/net/run_afpackettests new file mode 100644 index 00000000000..5246e782d6e --- /dev/null +++ b/tools/testing/selftests/net/run_afpackettests @@ -0,0 +1,26 @@ +#!/bin/sh + +if [ $(id -u) != 0 ]; then + echo $msg must be run as root >&2 + exit 0 +fi + +echo "--------------------" +echo "running psock_fanout test" +echo "--------------------" +./psock_fanout +if [ $? -ne 0 ]; then + echo "[FAIL]" +else + echo "[PASS]" +fi + +echo "--------------------" +echo "running psock_tpacket test" +echo "--------------------" +./psock_tpacket +if [ $? -ne 0 ]; then + echo "[FAIL]" +else + echo "[PASS]" +fi diff --git a/tools/testing/selftests/net/run_netsocktests b/tools/testing/selftests/net/run_netsocktests new file mode 100644 index 00000000000..c09a682df56 --- /dev/null +++ b/tools/testing/selftests/net/run_netsocktests @@ -0,0 +1,12 @@ +#!/bin/bash + +echo "--------------------" +echo "running socket test" +echo "--------------------" +./socket +if [ $? -ne 0 ]; then + echo "[FAIL]" +else + echo "[PASS]" +fi + diff --git a/tools/testing/selftests/net/socket.c b/tools/testing/selftests/net/socket.c new file mode 100644 index 00000000000..0f227f2f9be --- /dev/null +++ b/tools/testing/selftests/net/socket.c @@ -0,0 +1,92 @@ +#include <stdio.h> +#include <errno.h> +#include <unistd.h> +#include <string.h> +#include <sys/types.h> +#include <sys/socket.h> +#include <netinet/in.h> + +struct socket_testcase { + int domain; + int type; + int protocol; + + /* 0 = valid file descriptor + * -foo = error foo + */ + int expect; + + /* If non-zero, accept EAFNOSUPPORT to handle the case + * of the protocol not being configured into the kernel. + */ + int nosupport_ok; +}; + +static struct socket_testcase tests[] = { + { AF_MAX, 0, 0, -EAFNOSUPPORT, 0 }, + { AF_INET, SOCK_STREAM, IPPROTO_TCP, 0, 1 }, + { AF_INET, SOCK_DGRAM, IPPROTO_TCP, -EPROTONOSUPPORT, 1 }, + { AF_INET, SOCK_DGRAM, IPPROTO_UDP, 0, 1 }, + { AF_INET, SOCK_STREAM, IPPROTO_UDP, -EPROTONOSUPPORT, 1 }, +}; + +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) +#define ERR_STRING_SZ 64 + +static int run_tests(void) +{ + char err_string1[ERR_STRING_SZ]; + char err_string2[ERR_STRING_SZ]; + int i, err; + + err = 0; + for (i = 0; i < ARRAY_SIZE(tests); i++) { + struct socket_testcase *s = &tests[i]; + int fd; + + fd = socket(s->domain, s->type, s->protocol); + if (fd < 0) { + if (s->nosupport_ok && + errno == EAFNOSUPPORT) + continue; + + if (s->expect < 0 && + errno == -s->expect) + continue; + + strerror_r(-s->expect, err_string1, ERR_STRING_SZ); + strerror_r(errno, err_string2, ERR_STRING_SZ); + + fprintf(stderr, "socket(%d, %d, %d) expected " + "err (%s) got (%s)\n", + s->domain, s->type, s->protocol, + err_string1, err_string2); + + err = -1; + break; + } else { + close(fd); + + if (s->expect < 0) { + strerror_r(errno, err_string1, ERR_STRING_SZ); + + fprintf(stderr, "socket(%d, %d, %d) expected " + "success got err (%s)\n", + s->domain, s->type, s->protocol, + err_string1); + + err = -1; + break; + } + } + } + + return err; +} + +int main(void) +{ + int err = run_tests(); + + return err; +} diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile new file mode 100644 index 00000000000..54833a791a4 --- /dev/null +++ b/tools/testing/selftests/powerpc/Makefile @@ -0,0 +1,39 @@ +# Makefile for powerpc selftests + +# ARCH can be overridden by the user for cross compiling +ARCH ?= $(shell uname -m) +ARCH := $(shell echo $(ARCH) | sed -e s/ppc.*/powerpc/) + +ifeq ($(ARCH),powerpc) + +GIT_VERSION = $(shell git describe --always --long --dirty || echo "unknown") + +CC := $(CROSS_COMPILE)$(CC) +CFLAGS := -Wall -O2 -flto -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS) + +export CC CFLAGS + +TARGETS = pmu copyloops mm tm + +endif + +all: + @for TARGET in $(TARGETS); do \ + $(MAKE) -C $$TARGET all; \ + done; + +run_tests: all + @for TARGET in $(TARGETS); do \ + $(MAKE) -C $$TARGET run_tests; \ + done; + +clean: + @for TARGET in $(TARGETS); do \ + $(MAKE) -C $$TARGET clean; \ + done; + rm -f tags + +tags: + find . -name '*.c' -o -name '*.h' | xargs ctags + +.PHONY: all run_tests clean tags diff --git a/tools/testing/selftests/powerpc/copyloops/Makefile b/tools/testing/selftests/powerpc/copyloops/Makefile new file mode 100644 index 00000000000..6f2d3be227f --- /dev/null +++ b/tools/testing/selftests/powerpc/copyloops/Makefile @@ -0,0 +1,29 @@ +# The loops are all 64-bit code +CFLAGS += -m64 +CFLAGS += -I$(CURDIR) +CFLAGS += -D SELFTEST + +# Use our CFLAGS for the implicit .S rule +ASFLAGS = $(CFLAGS) + +PROGS := copyuser_64 copyuser_power7 memcpy_64 memcpy_power7 +EXTRA_SOURCES := validate.c ../harness.c + +all: $(PROGS) + +copyuser_64: CPPFLAGS += -D COPY_LOOP=test___copy_tofrom_user_base +copyuser_power7: CPPFLAGS += -D COPY_LOOP=test___copy_tofrom_user_power7 +memcpy_64: CPPFLAGS += -D COPY_LOOP=test_memcpy +memcpy_power7: CPPFLAGS += -D COPY_LOOP=test_memcpy_power7 + +$(PROGS): $(EXTRA_SOURCES) + +run_tests: all + @-for PROG in $(PROGS); do \ + ./$$PROG; \ + done; + +clean: + rm -f $(PROGS) *.o + +.PHONY: all run_tests clean diff --git a/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h b/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h new file mode 100644 index 00000000000..d1dc3742551 --- /dev/null +++ b/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h @@ -0,0 +1,89 @@ +#include <ppc-asm.h> + +#define CONFIG_ALTIVEC + +#define r1 1 + +#define vr0 0 +#define vr1 1 +#define vr2 2 +#define vr3 3 +#define vr4 4 +#define vr5 5 +#define vr6 6 +#define vr7 7 +#define vr8 8 +#define vr9 9 +#define vr10 10 +#define vr11 11 +#define vr12 12 +#define vr13 13 +#define vr14 14 +#define vr15 15 +#define vr16 16 +#define vr17 17 +#define vr18 18 +#define vr19 19 +#define vr20 20 +#define vr21 21 +#define vr22 22 +#define vr23 23 +#define vr24 24 +#define vr25 25 +#define vr26 26 +#define vr27 27 +#define vr28 28 +#define vr29 29 +#define vr30 30 +#define vr31 31 + +#define R14 r14 +#define R15 r15 +#define R16 r16 +#define R17 r17 +#define R18 r18 +#define R19 r19 +#define R20 r20 +#define R21 r21 +#define R22 r22 +#define R29 r29 +#define R30 r30 +#define R31 r31 + +#define STACKFRAMESIZE 256 +#define STK_REG(i) (112 + ((i)-14)*8) + +#define _GLOBAL(A) FUNC_START(test_ ## A) +#define _GLOBAL_TOC(A) _GLOBAL(A) + +#define PPC_MTOCRF(A, B) mtocrf A, B + +FUNC_START(enter_vmx_usercopy) + li r3,1 + blr + +FUNC_START(exit_vmx_usercopy) + li r3,0 + blr + +FUNC_START(enter_vmx_copy) + li r3,1 + blr + +FUNC_START(exit_vmx_copy) + blr + +FUNC_START(memcpy_power7) + blr + +FUNC_START(__copy_tofrom_user_power7) + blr + +FUNC_START(__copy_tofrom_user_base) + blr + +#define BEGIN_FTR_SECTION +#define FTR_SECTION_ELSE +#define ALT_FTR_SECTION_END_IFCLR(x) +#define ALT_FTR_SECTION_END(x, y) +#define END_FTR_SECTION_IFCLR(x) diff --git a/tools/testing/selftests/powerpc/copyloops/asm/processor.h b/tools/testing/selftests/powerpc/copyloops/asm/processor.h new file mode 100644 index 00000000000..e69de29bb2d --- /dev/null +++ b/tools/testing/selftests/powerpc/copyloops/asm/processor.h diff --git a/tools/testing/selftests/powerpc/copyloops/copyuser_64.S b/tools/testing/selftests/powerpc/copyloops/copyuser_64.S new file mode 120000 index 00000000000..f1c418a2521 --- /dev/null +++ b/tools/testing/selftests/powerpc/copyloops/copyuser_64.S @@ -0,0 +1 @@ +../../../../../arch/powerpc/lib/copyuser_64.S
\ No newline at end of file diff --git a/tools/testing/selftests/powerpc/copyloops/copyuser_power7.S b/tools/testing/selftests/powerpc/copyloops/copyuser_power7.S new file mode 120000 index 00000000000..47868959829 --- /dev/null +++ b/tools/testing/selftests/powerpc/copyloops/copyuser_power7.S @@ -0,0 +1 @@ +../../../../../arch/powerpc/lib/copyuser_power7.S
\ No newline at end of file diff --git a/tools/testing/selftests/powerpc/copyloops/memcpy_64.S b/tools/testing/selftests/powerpc/copyloops/memcpy_64.S new file mode 120000 index 00000000000..cce33fb6f9d --- /dev/null +++ b/tools/testing/selftests/powerpc/copyloops/memcpy_64.S @@ -0,0 +1 @@ +../../../../../arch/powerpc/lib/memcpy_64.S
\ No newline at end of file diff --git a/tools/testing/selftests/powerpc/copyloops/memcpy_power7.S b/tools/testing/selftests/powerpc/copyloops/memcpy_power7.S new file mode 120000 index 00000000000..0d6fbfaf3d5 --- /dev/null +++ b/tools/testing/selftests/powerpc/copyloops/memcpy_power7.S @@ -0,0 +1 @@ +../../../../../arch/powerpc/lib/memcpy_power7.S
\ No newline at end of file diff --git a/tools/testing/selftests/powerpc/copyloops/validate.c b/tools/testing/selftests/powerpc/copyloops/validate.c new file mode 100644 index 00000000000..1750ff57ee5 --- /dev/null +++ b/tools/testing/selftests/powerpc/copyloops/validate.c @@ -0,0 +1,99 @@ +#include <malloc.h> +#include <string.h> +#include <stdlib.h> +#include <stdbool.h> + +#include "../utils.h" + +#define MAX_LEN 8192 +#define MAX_OFFSET 16 +#define MIN_REDZONE 128 +#define BUFLEN (MAX_LEN+MAX_OFFSET+2*MIN_REDZONE) +#define POISON 0xa5 + +unsigned long COPY_LOOP(void *to, const void *from, unsigned long size); + +static void do_one(char *src, char *dst, unsigned long src_off, + unsigned long dst_off, unsigned long len, void *redzone, + void *fill) +{ + char *srcp, *dstp; + unsigned long ret; + unsigned long i; + + srcp = src + MIN_REDZONE + src_off; + dstp = dst + MIN_REDZONE + dst_off; + + memset(src, POISON, BUFLEN); + memset(dst, POISON, BUFLEN); + memcpy(srcp, fill, len); + + ret = COPY_LOOP(dstp, srcp, len); + if (ret && ret != (unsigned long)dstp) { + printf("(%p,%p,%ld) returned %ld\n", dstp, srcp, len, ret); + abort(); + } + + if (memcmp(dstp, srcp, len)) { + printf("(%p,%p,%ld) miscompare\n", dstp, srcp, len); + printf("src: "); + for (i = 0; i < len; i++) + printf("%02x ", srcp[i]); + printf("\ndst: "); + for (i = 0; i < len; i++) + printf("%02x ", dstp[i]); + printf("\n"); + abort(); + } + + if (memcmp(dst, redzone, dstp - dst)) { + printf("(%p,%p,%ld) redzone before corrupted\n", + dstp, srcp, len); + abort(); + } + + if (memcmp(dstp+len, redzone, dst+BUFLEN-(dstp+len))) { + printf("(%p,%p,%ld) redzone after corrupted\n", + dstp, srcp, len); + abort(); + } +} + +int test_copy_loop(void) +{ + char *src, *dst, *redzone, *fill; + unsigned long len, src_off, dst_off; + unsigned long i; + + src = memalign(BUFLEN, BUFLEN); + dst = memalign(BUFLEN, BUFLEN); + redzone = malloc(BUFLEN); + fill = malloc(BUFLEN); + + if (!src || !dst || !redzone || !fill) { + fprintf(stderr, "malloc failed\n"); + exit(1); + } + + memset(redzone, POISON, BUFLEN); + + /* Fill with sequential bytes */ + for (i = 0; i < BUFLEN; i++) + fill[i] = i & 0xff; + + for (len = 1; len < MAX_LEN; len++) { + for (src_off = 0; src_off < MAX_OFFSET; src_off++) { + for (dst_off = 0; dst_off < MAX_OFFSET; dst_off++) { + do_one(src, dst, src_off, dst_off, len, + redzone, fill); + } + } + } + + return 0; +} + +int main(void) +{ + return test_harness(test_copy_loop, str(COPY_LOOP)); +} diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c new file mode 100644 index 00000000000..8ebc58a0931 --- /dev/null +++ b/tools/testing/selftests/powerpc/harness.c @@ -0,0 +1,114 @@ +/* + * Copyright 2013, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <errno.h> +#include <signal.h> +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <unistd.h> + +#include "subunit.h" +#include "utils.h" + +#define TIMEOUT 120 +#define KILL_TIMEOUT 5 + + +int run_test(int (test_function)(void), char *name) +{ + bool terminated; + int rc, status; + pid_t pid; + + /* Make sure output is flushed before forking */ + fflush(stdout); + + pid = fork(); + if (pid == 0) { + setpgid(0, 0); + exit(test_function()); + } else if (pid == -1) { + perror("fork"); + return 1; + } + + setpgid(pid, pid); + + /* Wake us up in timeout seconds */ + alarm(TIMEOUT); + terminated = false; + +wait: + rc = waitpid(pid, &status, 0); + if (rc == -1) { + if (errno != EINTR) { + printf("unknown error from waitpid\n"); + return 1; + } + + if (terminated) { + printf("!! force killing %s\n", name); + kill(-pid, SIGKILL); + return 1; + } else { + printf("!! killing %s\n", name); + kill(-pid, SIGTERM); + terminated = true; + alarm(KILL_TIMEOUT); + goto wait; + } + } + + /* Kill anything else in the process group that is still running */ + kill(-pid, SIGTERM); + + if (WIFEXITED(status)) + status = WEXITSTATUS(status); + else { + if (WIFSIGNALED(status)) + printf("!! child died by signal %d\n", WTERMSIG(status)); + else + printf("!! child died by unknown cause\n"); + + status = 1; /* Signal or other */ + } + + return status; +} + +static void alarm_handler(int signum) +{ + /* Jut wake us up from waitpid */ +} + +static struct sigaction alarm_action = { + .sa_handler = alarm_handler, +}; + +int test_harness(int (test_function)(void), char *name) +{ + int rc; + + test_start(name); + test_set_git_version(GIT_VERSION); + + if (sigaction(SIGALRM, &alarm_action, NULL)) { + perror("sigaction"); + test_error(name); + return 1; + } + + rc = run_test(test_function, name); + + if (rc == MAGIC_SKIP_RETURN_VALUE) + test_skip(name); + else + test_finish(name, rc); + + return rc; +} diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile new file mode 100644 index 00000000000..357ccbd6bad --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/Makefile @@ -0,0 +1,18 @@ +noarg: + $(MAKE) -C ../ + +PROGS := hugetlb_vs_thp_test + +all: $(PROGS) + +$(PROGS): ../harness.c + +run_tests: all + @-for PROG in $(PROGS); do \ + ./$$PROG; \ + done; + +clean: + rm -f $(PROGS) + +.PHONY: all run_tests clean diff --git a/tools/testing/selftests/powerpc/mm/hugetlb_vs_thp_test.c b/tools/testing/selftests/powerpc/mm/hugetlb_vs_thp_test.c new file mode 100644 index 00000000000..3d8e5b033e1 --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/hugetlb_vs_thp_test.c @@ -0,0 +1,72 @@ +#include <stdio.h> +#include <sys/mman.h> +#include <unistd.h> + +#include "utils.h" + +/* This must match the huge page & THP size */ +#define SIZE (16 * 1024 * 1024) + +static int test_body(void) +{ + void *addr; + char *p; + + addr = (void *)0xa0000000; + + p = mmap(addr, SIZE, PROT_READ | PROT_WRITE, + MAP_HUGETLB | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + if (p != MAP_FAILED) { + /* + * Typically the mmap will fail because no huge pages are + * allocated on the system. But if there are huge pages + * allocated the mmap will succeed. That's fine too, we just + * munmap here before continuing. + */ + munmap(addr, SIZE); + } + + p = mmap(addr, SIZE, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + if (p == MAP_FAILED) { + printf("Mapping failed @ %p\n", addr); + perror("mmap"); + return 1; + } + + /* + * Either a user or kernel access is sufficient to trigger the bug. + * A kernel access is easier to spot & debug, as it will trigger the + * softlockup or RCU stall detectors, and when the system is kicked + * into xmon we get a backtrace in the kernel. + * + * A good option is: + * getcwd(p, SIZE); + * + * For the purposes of this testcase it's preferable to spin in + * userspace, so the harness can kill us if we get stuck. That way we + * see a test failure rather than a dead system. + */ + *p = 0xf; + + munmap(addr, SIZE); + + return 0; +} + +static int test_main(void) +{ + int i; + + /* 10,000 because it's a "bunch", and completes reasonably quickly */ + for (i = 0; i < 10000; i++) + if (test_body()) + return 1; + + return 0; +} + +int main(void) +{ + return test_harness(test_main, "hugetlb_vs_thp"); +} diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile new file mode 100644 index 00000000000..b9ff0db42c7 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/Makefile @@ -0,0 +1,41 @@ +noarg: + $(MAKE) -C ../ + +PROGS := count_instructions +EXTRA_SOURCES := ../harness.c event.c + +all: $(PROGS) sub_all + +$(PROGS): $(EXTRA_SOURCES) + +# loop.S can only be built 64-bit +count_instructions: loop.S count_instructions.c $(EXTRA_SOURCES) + $(CC) $(CFLAGS) -m64 -o $@ $^ + +run_tests: all sub_run_tests + @-for PROG in $(PROGS); do \ + ./$$PROG; \ + done; + +clean: sub_clean + rm -f $(PROGS) loop.o + + +SUB_TARGETS = ebb + +sub_all: + @for TARGET in $(SUB_TARGETS); do \ + $(MAKE) -C $$TARGET all; \ + done; + +sub_run_tests: all + @for TARGET in $(SUB_TARGETS); do \ + $(MAKE) -C $$TARGET run_tests; \ + done; + +sub_clean: + @for TARGET in $(SUB_TARGETS); do \ + $(MAKE) -C $$TARGET clean; \ + done; + +.PHONY: all run_tests clean sub_all sub_run_tests sub_clean diff --git a/tools/testing/selftests/powerpc/pmu/count_instructions.c b/tools/testing/selftests/powerpc/pmu/count_instructions.c new file mode 100644 index 00000000000..312b4f0fd27 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/count_instructions.c @@ -0,0 +1,135 @@ +/* + * Copyright 2013, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#define _GNU_SOURCE + +#include <stdio.h> +#include <stdbool.h> +#include <string.h> +#include <sys/prctl.h> + +#include "event.h" +#include "utils.h" + +extern void thirty_two_instruction_loop(u64 loops); + +static void setup_event(struct event *e, u64 config, char *name) +{ + event_init_opts(e, config, PERF_TYPE_HARDWARE, name); + + e->attr.disabled = 1; + e->attr.exclude_kernel = 1; + e->attr.exclude_hv = 1; + e->attr.exclude_idle = 1; +} + +static int do_count_loop(struct event *events, u64 instructions, + u64 overhead, bool report) +{ + s64 difference, expected; + double percentage; + + prctl(PR_TASK_PERF_EVENTS_ENABLE); + + /* Run for 1M instructions */ + thirty_two_instruction_loop(instructions >> 5); + + prctl(PR_TASK_PERF_EVENTS_DISABLE); + + event_read(&events[0]); + event_read(&events[1]); + + expected = instructions + overhead; + difference = events[0].result.value - expected; + percentage = (double)difference / events[0].result.value * 100; + + if (report) { + event_report(&events[0]); + event_report(&events[1]); + + printf("Looped for %llu instructions, overhead %llu\n", instructions, overhead); + printf("Expected %llu\n", expected); + printf("Actual %llu\n", events[0].result.value); + printf("Delta %lld, %f%%\n", difference, percentage); + } + + event_reset(&events[0]); + event_reset(&events[1]); + + if (difference < 0) + difference = -difference; + + /* Tolerate a difference below 0.0001 % */ + difference *= 10000 * 100; + if (difference / events[0].result.value) + return -1; + + return 0; +} + +/* Count how many instructions it takes to do a null loop */ +static u64 determine_overhead(struct event *events) +{ + u64 current, overhead; + int i; + + do_count_loop(events, 0, 0, false); + overhead = events[0].result.value; + + for (i = 0; i < 100; i++) { + do_count_loop(events, 0, 0, false); + current = events[0].result.value; + if (current < overhead) { + printf("Replacing overhead %llu with %llu\n", overhead, current); + overhead = current; + } + } + + return overhead; +} + +static int count_instructions(void) +{ + struct event events[2]; + u64 overhead; + + setup_event(&events[0], PERF_COUNT_HW_INSTRUCTIONS, "instructions"); + setup_event(&events[1], PERF_COUNT_HW_CPU_CYCLES, "cycles"); + + if (event_open(&events[0])) { + perror("perf_event_open"); + return -1; + } + + if (event_open_with_group(&events[1], events[0].fd)) { + perror("perf_event_open"); + return -1; + } + + overhead = determine_overhead(events); + printf("Overhead of null loop: %llu instructions\n", overhead); + + /* Run for 1M instructions */ + FAIL_IF(do_count_loop(events, 0x100000, overhead, true)); + + /* Run for 10M instructions */ + FAIL_IF(do_count_loop(events, 0xa00000, overhead, true)); + + /* Run for 100M instructions */ + FAIL_IF(do_count_loop(events, 0x6400000, overhead, true)); + + /* Run for 1G instructions */ + FAIL_IF(do_count_loop(events, 0x40000000, overhead, true)); + + event_close(&events[0]); + event_close(&events[1]); + + return 0; +} + +int main(void) +{ + return test_harness(count_instructions, "count_instructions"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile new file mode 100644 index 00000000000..edbba2affc2 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile @@ -0,0 +1,32 @@ +noarg: + $(MAKE) -C ../../ + +# The EBB handler is 64-bit code and everything links against it +CFLAGS += -m64 + +PROGS := reg_access_test event_attributes_test cycles_test \ + cycles_with_freeze_test pmc56_overflow_test \ + ebb_vs_cpu_event_test cpu_event_vs_ebb_test \ + cpu_event_pinned_vs_ebb_test task_event_vs_ebb_test \ + task_event_pinned_vs_ebb_test multi_ebb_procs_test \ + multi_counter_test pmae_handling_test \ + close_clears_pmcc_test instruction_count_test \ + fork_cleanup_test ebb_on_child_test \ + ebb_on_willing_child_test back_to_back_ebbs_test \ + lost_exception_test no_handler_test + +all: $(PROGS) + +$(PROGS): ../../harness.c ../event.c ../lib.c ebb.c ebb_handler.S trace.c + +instruction_count_test: ../loop.S + +lost_exception_test: ../lib.c + +run_tests: all + @-for PROG in $(PROGS); do \ + ./$$PROG; \ + done; + +clean: + rm -f $(PROGS) diff --git a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c new file mode 100644 index 00000000000..66ea765c0e7 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c @@ -0,0 +1,106 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> + +#include "ebb.h" + + +#define NUMBER_OF_EBBS 50 + +/* + * Test that if we overflow the counter while in the EBB handler, we take + * another EBB on exiting from the handler. + * + * We do this by counting with a stupidly low sample period, causing us to + * overflow the PMU while we're still in the EBB handler, leading to another + * EBB. + * + * We get out of what would otherwise be an infinite loop by leaving the + * counter frozen once we've taken enough EBBs. + */ + +static void ebb_callee(void) +{ + uint64_t siar, val; + + val = mfspr(SPRN_BESCR); + if (!(val & BESCR_PMEO)) { + ebb_state.stats.spurious++; + goto out; + } + + ebb_state.stats.ebb_count++; + trace_log_counter(ebb_state.trace, ebb_state.stats.ebb_count); + + /* Resets the PMC */ + count_pmc(1, sample_period); + +out: + if (ebb_state.stats.ebb_count == NUMBER_OF_EBBS) + /* Reset but leave counters frozen */ + reset_ebb_with_clear_mask(MMCR0_PMAO); + else + /* Unfreezes */ + reset_ebb(); + + /* Do some stuff to chew some cycles and pop the counter */ + siar = mfspr(SPRN_SIAR); + trace_log_reg(ebb_state.trace, SPRN_SIAR, siar); + + val = mfspr(SPRN_PMC1); + trace_log_reg(ebb_state.trace, SPRN_PMC1, val); + + val = mfspr(SPRN_MMCR0); + trace_log_reg(ebb_state.trace, SPRN_MMCR0, val); +} + +int back_to_back_ebbs(void) +{ + struct event event; + + event_init_named(&event, 0x1001e, "cycles"); + event_leader_ebb_init(&event); + + event.attr.exclude_kernel = 1; + event.attr.exclude_hv = 1; + event.attr.exclude_idle = 1; + + FAIL_IF(event_open(&event)); + + setup_ebb_handler(ebb_callee); + + FAIL_IF(ebb_event_enable(&event)); + + sample_period = 5; + + ebb_freeze_pmcs(); + mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); + ebb_global_enable(); + ebb_unfreeze_pmcs(); + + while (ebb_state.stats.ebb_count < NUMBER_OF_EBBS) + FAIL_IF(core_busy_loop()); + + ebb_global_disable(); + ebb_freeze_pmcs(); + + count_pmc(1, sample_period); + + dump_ebb_state(); + + event_close(&event); + + FAIL_IF(ebb_state.stats.ebb_count != NUMBER_OF_EBBS); + + return 0; +} + +int main(void) +{ + return test_harness(back_to_back_ebbs, "back_to_back_ebbs"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/close_clears_pmcc_test.c b/tools/testing/selftests/powerpc/pmu/ebb/close_clears_pmcc_test.c new file mode 100644 index 00000000000..0f0423dba18 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/close_clears_pmcc_test.c @@ -0,0 +1,59 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <setjmp.h> +#include <signal.h> + +#include "ebb.h" + + +/* + * Test that closing the EBB event clears MMCR0_PMCC, preventing further access + * by userspace to the PMU hardware. + */ + +int close_clears_pmcc(void) +{ + struct event event; + + event_init_named(&event, 0x1001e, "cycles"); + event_leader_ebb_init(&event); + + FAIL_IF(event_open(&event)); + + ebb_enable_pmc_counting(1); + setup_ebb_handler(standard_ebb_callee); + ebb_global_enable(); + FAIL_IF(ebb_event_enable(&event)); + + mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); + + while (ebb_state.stats.ebb_count < 1) + FAIL_IF(core_busy_loop()); + + ebb_global_disable(); + event_close(&event); + + FAIL_IF(ebb_state.stats.ebb_count == 0); + + /* The real test is here, do we take a SIGILL when writing PMU regs now + * that we have closed the event. We expect that we will. */ + + FAIL_IF(catch_sigill(write_pmc1)); + + /* We should still be able to read EBB regs though */ + mfspr(SPRN_EBBHR); + mfspr(SPRN_EBBRR); + mfspr(SPRN_BESCR); + + return 0; +} + +int main(void) +{ + return test_harness(close_clears_pmcc, "close_clears_pmcc"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c new file mode 100644 index 00000000000..d3ed64d5d6c --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c @@ -0,0 +1,93 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdbool.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <unistd.h> + +#include "ebb.h" + + +/* + * Tests a pinned cpu event vs an EBB - in that order. The pinned cpu event + * should remain and the EBB event should fail to enable. + */ + +static int setup_cpu_event(struct event *event, int cpu) +{ + event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); + + event->attr.pinned = 1; + + event->attr.exclude_kernel = 1; + event->attr.exclude_hv = 1; + event->attr.exclude_idle = 1; + + SKIP_IF(require_paranoia_below(1)); + FAIL_IF(event_open_with_cpu(event, cpu)); + FAIL_IF(event_enable(event)); + + return 0; +} + +int cpu_event_pinned_vs_ebb(void) +{ + union pipe read_pipe, write_pipe; + struct event event; + int cpu, rc; + pid_t pid; + + cpu = pick_online_cpu(); + FAIL_IF(cpu < 0); + FAIL_IF(bind_to_cpu(cpu)); + + FAIL_IF(pipe(read_pipe.fds) == -1); + FAIL_IF(pipe(write_pipe.fds) == -1); + + pid = fork(); + if (pid == 0) { + /* NB order of pipes looks reversed */ + exit(ebb_child(write_pipe, read_pipe)); + } + + /* We setup the cpu event first */ + rc = setup_cpu_event(&event, cpu); + if (rc) { + kill_child_and_wait(pid); + return rc; + } + + /* Signal the child to install its EBB event and wait */ + if (sync_with_child(read_pipe, write_pipe)) + /* If it fails, wait for it to exit */ + goto wait; + + /* Signal the child to run */ + FAIL_IF(sync_with_child(read_pipe, write_pipe)); + +wait: + /* We expect it to fail to read the event */ + FAIL_IF(wait_for_child(pid) != 2); + + FAIL_IF(event_disable(&event)); + FAIL_IF(event_read(&event)); + + event_report(&event); + + /* The cpu event should have run */ + FAIL_IF(event.result.value == 0); + FAIL_IF(event.result.enabled != event.result.running); + + return 0; +} + +int main(void) +{ + return test_harness(cpu_event_pinned_vs_ebb, "cpu_event_pinned_vs_ebb"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c new file mode 100644 index 00000000000..8b972c2aa39 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c @@ -0,0 +1,89 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdbool.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <unistd.h> + +#include "ebb.h" + + +/* + * Tests a cpu event vs an EBB - in that order. The EBB should force the cpu + * event off the PMU. + */ + +static int setup_cpu_event(struct event *event, int cpu) +{ + event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); + + event->attr.exclude_kernel = 1; + event->attr.exclude_hv = 1; + event->attr.exclude_idle = 1; + + SKIP_IF(require_paranoia_below(1)); + FAIL_IF(event_open_with_cpu(event, cpu)); + FAIL_IF(event_enable(event)); + + return 0; +} + +int cpu_event_vs_ebb(void) +{ + union pipe read_pipe, write_pipe; + struct event event; + int cpu, rc; + pid_t pid; + + cpu = pick_online_cpu(); + FAIL_IF(cpu < 0); + FAIL_IF(bind_to_cpu(cpu)); + + FAIL_IF(pipe(read_pipe.fds) == -1); + FAIL_IF(pipe(write_pipe.fds) == -1); + + pid = fork(); + if (pid == 0) { + /* NB order of pipes looks reversed */ + exit(ebb_child(write_pipe, read_pipe)); + } + + /* We setup the cpu event first */ + rc = setup_cpu_event(&event, cpu); + if (rc) { + kill_child_and_wait(pid); + return rc; + } + + /* Signal the child to install its EBB event and wait */ + if (sync_with_child(read_pipe, write_pipe)) + /* If it fails, wait for it to exit */ + goto wait; + + /* Signal the child to run */ + FAIL_IF(sync_with_child(read_pipe, write_pipe)); + +wait: + /* We expect the child to succeed */ + FAIL_IF(wait_for_child(pid)); + + FAIL_IF(event_disable(&event)); + FAIL_IF(event_read(&event)); + + event_report(&event); + + /* The cpu event may have run */ + + return 0; +} + +int main(void) +{ + return test_harness(cpu_event_vs_ebb, "cpu_event_vs_ebb"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c new file mode 100644 index 00000000000..8590fc1bfc0 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c @@ -0,0 +1,58 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <stdio.h> +#include <stdlib.h> + +#include "ebb.h" + + +/* + * Basic test that counts user cycles and takes EBBs. + */ +int cycles(void) +{ + struct event event; + + event_init_named(&event, 0x1001e, "cycles"); + event_leader_ebb_init(&event); + + event.attr.exclude_kernel = 1; + event.attr.exclude_hv = 1; + event.attr.exclude_idle = 1; + + FAIL_IF(event_open(&event)); + + ebb_enable_pmc_counting(1); + setup_ebb_handler(standard_ebb_callee); + ebb_global_enable(); + FAIL_IF(ebb_event_enable(&event)); + + mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); + + while (ebb_state.stats.ebb_count < 10) { + FAIL_IF(core_busy_loop()); + FAIL_IF(ebb_check_mmcr0()); + } + + ebb_global_disable(); + ebb_freeze_pmcs(); + + count_pmc(1, sample_period); + + dump_ebb_state(); + + event_close(&event); + + FAIL_IF(ebb_state.stats.ebb_count == 0); + FAIL_IF(!ebb_check_count(1, sample_period, 100)); + + return 0; +} + +int main(void) +{ + return test_harness(cycles, "cycles"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c new file mode 100644 index 00000000000..754b3f2008d --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c @@ -0,0 +1,117 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <stdbool.h> + +#include "ebb.h" + + +/* + * Test of counting cycles while using MMCR0_FC (freeze counters) to only count + * parts of the code. This is complicated by the fact that FC is set by the + * hardware when the event overflows. We may take the EBB after we have set FC, + * so we have to be careful about whether we clear FC at the end of the EBB + * handler or not. + */ + +static bool counters_frozen = false; +static int ebbs_while_frozen = 0; + +static void ebb_callee(void) +{ + uint64_t mask, val; + + mask = MMCR0_PMAO | MMCR0_FC; + + val = mfspr(SPRN_BESCR); + if (!(val & BESCR_PMEO)) { + ebb_state.stats.spurious++; + goto out; + } + + ebb_state.stats.ebb_count++; + trace_log_counter(ebb_state.trace, ebb_state.stats.ebb_count); + + val = mfspr(SPRN_MMCR0); + trace_log_reg(ebb_state.trace, SPRN_MMCR0, val); + + if (counters_frozen) { + trace_log_string(ebb_state.trace, "frozen"); + ebbs_while_frozen++; + mask &= ~MMCR0_FC; + } + + count_pmc(1, sample_period); +out: + reset_ebb_with_clear_mask(mask); +} + +int cycles_with_freeze(void) +{ + struct event event; + uint64_t val; + bool fc_cleared; + + event_init_named(&event, 0x1001e, "cycles"); + event_leader_ebb_init(&event); + + event.attr.exclude_kernel = 1; + event.attr.exclude_hv = 1; + event.attr.exclude_idle = 1; + + FAIL_IF(event_open(&event)); + + setup_ebb_handler(ebb_callee); + ebb_global_enable(); + FAIL_IF(ebb_event_enable(&event)); + + mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); + + fc_cleared = false; + + /* Make sure we loop until we take at least one EBB */ + while ((ebb_state.stats.ebb_count < 20 && !fc_cleared) || + ebb_state.stats.ebb_count < 1) + { + counters_frozen = false; + mb(); + mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_FC); + + FAIL_IF(core_busy_loop()); + + counters_frozen = true; + mb(); + mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) | MMCR0_FC); + + val = mfspr(SPRN_MMCR0); + if (! (val & MMCR0_FC)) { + printf("Outside of loop, FC NOT set MMCR0 0x%lx\n", val); + fc_cleared = true; + } + } + + ebb_global_disable(); + ebb_freeze_pmcs(); + + count_pmc(1, sample_period); + + dump_ebb_state(); + + printf("EBBs while frozen %d\n", ebbs_while_frozen); + + event_close(&event); + + FAIL_IF(ebb_state.stats.ebb_count == 0); + FAIL_IF(fc_cleared); + + return 0; +} + +int main(void) +{ + return test_harness(cycles_with_freeze, "cycles_with_freeze"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c new file mode 100644 index 00000000000..1b46be94b64 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c @@ -0,0 +1,727 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#define _GNU_SOURCE /* For CPU_ZERO etc. */ + +#include <sched.h> +#include <sys/wait.h> +#include <setjmp.h> +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/ioctl.h> + +#include "trace.h" +#include "reg.h" +#include "ebb.h" + + +void (*ebb_user_func)(void); + +void ebb_hook(void) +{ + if (ebb_user_func) + ebb_user_func(); +} + +struct ebb_state ebb_state; + +u64 sample_period = 0x40000000ull; + +void reset_ebb_with_clear_mask(unsigned long mmcr0_clear_mask) +{ + u64 val; + + /* 2) clear MMCR0[PMAO] - docs say BESCR[PMEO] should do this */ + /* 3) set MMCR0[PMAE] - docs say BESCR[PME] should do this */ + val = mfspr(SPRN_MMCR0); + mtspr(SPRN_MMCR0, (val & ~mmcr0_clear_mask) | MMCR0_PMAE); + + /* 4) clear BESCR[PMEO] */ + mtspr(SPRN_BESCRR, BESCR_PMEO); + + /* 5) set BESCR[PME] */ + mtspr(SPRN_BESCRS, BESCR_PME); + + /* 6) rfebb 1 - done in our caller */ +} + +void reset_ebb(void) +{ + reset_ebb_with_clear_mask(MMCR0_PMAO | MMCR0_FC); +} + +/* Called outside of the EBB handler to check MMCR0 is sane */ +int ebb_check_mmcr0(void) +{ + u64 val; + + val = mfspr(SPRN_MMCR0); + if ((val & (MMCR0_FC | MMCR0_PMAO)) == MMCR0_FC) { + /* It's OK if we see FC & PMAO, but not FC by itself */ + printf("Outside of loop, only FC set 0x%llx\n", val); + return 1; + } + + return 0; +} + +bool ebb_check_count(int pmc, u64 sample_period, int fudge) +{ + u64 count, upper, lower; + + count = ebb_state.stats.pmc_count[PMC_INDEX(pmc)]; + + lower = ebb_state.stats.ebb_count * (sample_period - fudge); + + if (count < lower) { + printf("PMC%d count (0x%llx) below lower limit 0x%llx (-0x%llx)\n", + pmc, count, lower, lower - count); + return false; + } + + upper = ebb_state.stats.ebb_count * (sample_period + fudge); + + if (count > upper) { + printf("PMC%d count (0x%llx) above upper limit 0x%llx (+0x%llx)\n", + pmc, count, upper, count - upper); + return false; + } + + printf("PMC%d count (0x%llx) is between 0x%llx and 0x%llx delta +0x%llx/-0x%llx\n", + pmc, count, lower, upper, count - lower, upper - count); + + return true; +} + +void standard_ebb_callee(void) +{ + int found, i; + u64 val; + + val = mfspr(SPRN_BESCR); + if (!(val & BESCR_PMEO)) { + ebb_state.stats.spurious++; + goto out; + } + + ebb_state.stats.ebb_count++; + trace_log_counter(ebb_state.trace, ebb_state.stats.ebb_count); + + val = mfspr(SPRN_MMCR0); + trace_log_reg(ebb_state.trace, SPRN_MMCR0, val); + + found = 0; + for (i = 1; i <= 6; i++) { + if (ebb_state.pmc_enable[PMC_INDEX(i)]) + found += count_pmc(i, sample_period); + } + + if (!found) + ebb_state.stats.no_overflow++; + +out: + reset_ebb(); +} + +extern void ebb_handler(void); + +void setup_ebb_handler(void (*callee)(void)) +{ + u64 entry; + +#if defined(_CALL_ELF) && _CALL_ELF == 2 + entry = (u64)ebb_handler; +#else + struct opd + { + u64 entry; + u64 toc; + } *opd; + + opd = (struct opd *)ebb_handler; + entry = opd->entry; +#endif + printf("EBB Handler is at %#llx\n", entry); + + ebb_user_func = callee; + + /* Ensure ebb_user_func is set before we set the handler */ + mb(); + mtspr(SPRN_EBBHR, entry); + + /* Make sure the handler is set before we return */ + mb(); +} + +void clear_ebb_stats(void) +{ + memset(&ebb_state.stats, 0, sizeof(ebb_state.stats)); +} + +void dump_summary_ebb_state(void) +{ + printf("ebb_state:\n" \ + " ebb_count = %d\n" \ + " spurious = %d\n" \ + " negative = %d\n" \ + " no_overflow = %d\n" \ + " pmc[1] count = 0x%llx\n" \ + " pmc[2] count = 0x%llx\n" \ + " pmc[3] count = 0x%llx\n" \ + " pmc[4] count = 0x%llx\n" \ + " pmc[5] count = 0x%llx\n" \ + " pmc[6] count = 0x%llx\n", + ebb_state.stats.ebb_count, ebb_state.stats.spurious, + ebb_state.stats.negative, ebb_state.stats.no_overflow, + ebb_state.stats.pmc_count[0], ebb_state.stats.pmc_count[1], + ebb_state.stats.pmc_count[2], ebb_state.stats.pmc_count[3], + ebb_state.stats.pmc_count[4], ebb_state.stats.pmc_count[5]); +} + +static char *decode_mmcr0(u32 value) +{ + static char buf[16]; + + buf[0] = '\0'; + + if (value & (1 << 31)) + strcat(buf, "FC "); + if (value & (1 << 26)) + strcat(buf, "PMAE "); + if (value & (1 << 7)) + strcat(buf, "PMAO "); + + return buf; +} + +static char *decode_bescr(u64 value) +{ + static char buf[16]; + + buf[0] = '\0'; + + if (value & (1ull << 63)) + strcat(buf, "GE "); + if (value & (1ull << 32)) + strcat(buf, "PMAE "); + if (value & 1) + strcat(buf, "PMAO "); + + return buf; +} + +void dump_ebb_hw_state(void) +{ + u64 bescr; + u32 mmcr0; + + mmcr0 = mfspr(SPRN_MMCR0); + bescr = mfspr(SPRN_BESCR); + + printf("HW state:\n" \ + "MMCR0 0x%016x %s\n" \ + "EBBHR 0x%016lx\n" \ + "BESCR 0x%016llx %s\n" \ + "PMC1 0x%016lx\n" \ + "PMC2 0x%016lx\n" \ + "PMC3 0x%016lx\n" \ + "PMC4 0x%016lx\n" \ + "PMC5 0x%016lx\n" \ + "PMC6 0x%016lx\n" \ + "SIAR 0x%016lx\n", + mmcr0, decode_mmcr0(mmcr0), mfspr(SPRN_EBBHR), bescr, + decode_bescr(bescr), mfspr(SPRN_PMC1), mfspr(SPRN_PMC2), + mfspr(SPRN_PMC3), mfspr(SPRN_PMC4), mfspr(SPRN_PMC5), + mfspr(SPRN_PMC6), mfspr(SPRN_SIAR)); +} + +void dump_ebb_state(void) +{ + dump_summary_ebb_state(); + + dump_ebb_hw_state(); + + trace_buffer_print(ebb_state.trace); +} + +int count_pmc(int pmc, uint32_t sample_period) +{ + uint32_t start_value; + u64 val; + + /* 0) Read PMC */ + start_value = pmc_sample_period(sample_period); + + val = read_pmc(pmc); + if (val < start_value) + ebb_state.stats.negative++; + else + ebb_state.stats.pmc_count[PMC_INDEX(pmc)] += val - start_value; + + trace_log_reg(ebb_state.trace, SPRN_PMC1 + pmc - 1, val); + + /* 1) Reset PMC */ + write_pmc(pmc, start_value); + + /* Report if we overflowed */ + return val >= COUNTER_OVERFLOW; +} + +int ebb_event_enable(struct event *e) +{ + int rc; + + /* Ensure any SPR writes are ordered vs us */ + mb(); + + rc = ioctl(e->fd, PERF_EVENT_IOC_ENABLE); + if (rc) + return rc; + + rc = event_read(e); + + /* Ditto */ + mb(); + + return rc; +} + +void ebb_freeze_pmcs(void) +{ + mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) | MMCR0_FC); + mb(); +} + +void ebb_unfreeze_pmcs(void) +{ + /* Unfreeze counters */ + mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_FC); + mb(); +} + +void ebb_global_enable(void) +{ + /* Enable EBBs globally and PMU EBBs */ + mtspr(SPRN_BESCR, 0x8000000100000000ull); + mb(); +} + +void ebb_global_disable(void) +{ + /* Disable EBBs & freeze counters, events are still scheduled */ + mtspr(SPRN_BESCRR, BESCR_PME); + mb(); +} + +void event_ebb_init(struct event *e) +{ + e->attr.config |= (1ull << 63); +} + +void event_bhrb_init(struct event *e, unsigned ifm) +{ + e->attr.config |= (1ull << 62) | ((u64)ifm << 60); +} + +void event_leader_ebb_init(struct event *e) +{ + event_ebb_init(e); + + e->attr.exclusive = 1; + e->attr.pinned = 1; +} + +int core_busy_loop(void) +{ + int rc; + + asm volatile ( + "li 3, 0x3030\n" + "std 3, -96(1)\n" + "li 4, 0x4040\n" + "std 4, -104(1)\n" + "li 5, 0x5050\n" + "std 5, -112(1)\n" + "li 6, 0x6060\n" + "std 6, -120(1)\n" + "li 7, 0x7070\n" + "std 7, -128(1)\n" + "li 8, 0x0808\n" + "std 8, -136(1)\n" + "li 9, 0x0909\n" + "std 9, -144(1)\n" + "li 10, 0x1010\n" + "std 10, -152(1)\n" + "li 11, 0x1111\n" + "std 11, -160(1)\n" + "li 14, 0x1414\n" + "std 14, -168(1)\n" + "li 15, 0x1515\n" + "std 15, -176(1)\n" + "li 16, 0x1616\n" + "std 16, -184(1)\n" + "li 17, 0x1717\n" + "std 17, -192(1)\n" + "li 18, 0x1818\n" + "std 18, -200(1)\n" + "li 19, 0x1919\n" + "std 19, -208(1)\n" + "li 20, 0x2020\n" + "std 20, -216(1)\n" + "li 21, 0x2121\n" + "std 21, -224(1)\n" + "li 22, 0x2222\n" + "std 22, -232(1)\n" + "li 23, 0x2323\n" + "std 23, -240(1)\n" + "li 24, 0x2424\n" + "std 24, -248(1)\n" + "li 25, 0x2525\n" + "std 25, -256(1)\n" + "li 26, 0x2626\n" + "std 26, -264(1)\n" + "li 27, 0x2727\n" + "std 27, -272(1)\n" + "li 28, 0x2828\n" + "std 28, -280(1)\n" + "li 29, 0x2929\n" + "std 29, -288(1)\n" + "li 30, 0x3030\n" + "li 31, 0x3131\n" + + "li 3, 0\n" + "0: " + "addi 3, 3, 1\n" + "cmpwi 3, 100\n" + "blt 0b\n" + + /* Return 1 (fail) unless we get through all the checks */ + "li 0, 1\n" + + /* Check none of our registers have been corrupted */ + "cmpwi 4, 0x4040\n" + "bne 1f\n" + "cmpwi 5, 0x5050\n" + "bne 1f\n" + "cmpwi 6, 0x6060\n" + "bne 1f\n" + "cmpwi 7, 0x7070\n" + "bne 1f\n" + "cmpwi 8, 0x0808\n" + "bne 1f\n" + "cmpwi 9, 0x0909\n" + "bne 1f\n" + "cmpwi 10, 0x1010\n" + "bne 1f\n" + "cmpwi 11, 0x1111\n" + "bne 1f\n" + "cmpwi 14, 0x1414\n" + "bne 1f\n" + "cmpwi 15, 0x1515\n" + "bne 1f\n" + "cmpwi 16, 0x1616\n" + "bne 1f\n" + "cmpwi 17, 0x1717\n" + "bne 1f\n" + "cmpwi 18, 0x1818\n" + "bne 1f\n" + "cmpwi 19, 0x1919\n" + "bne 1f\n" + "cmpwi 20, 0x2020\n" + "bne 1f\n" + "cmpwi 21, 0x2121\n" + "bne 1f\n" + "cmpwi 22, 0x2222\n" + "bne 1f\n" + "cmpwi 23, 0x2323\n" + "bne 1f\n" + "cmpwi 24, 0x2424\n" + "bne 1f\n" + "cmpwi 25, 0x2525\n" + "bne 1f\n" + "cmpwi 26, 0x2626\n" + "bne 1f\n" + "cmpwi 27, 0x2727\n" + "bne 1f\n" + "cmpwi 28, 0x2828\n" + "bne 1f\n" + "cmpwi 29, 0x2929\n" + "bne 1f\n" + "cmpwi 30, 0x3030\n" + "bne 1f\n" + "cmpwi 31, 0x3131\n" + "bne 1f\n" + + /* Load junk into all our registers before we reload them from the stack. */ + "li 3, 0xde\n" + "li 4, 0xad\n" + "li 5, 0xbe\n" + "li 6, 0xef\n" + "li 7, 0xde\n" + "li 8, 0xad\n" + "li 9, 0xbe\n" + "li 10, 0xef\n" + "li 11, 0xde\n" + "li 14, 0xad\n" + "li 15, 0xbe\n" + "li 16, 0xef\n" + "li 17, 0xde\n" + "li 18, 0xad\n" + "li 19, 0xbe\n" + "li 20, 0xef\n" + "li 21, 0xde\n" + "li 22, 0xad\n" + "li 23, 0xbe\n" + "li 24, 0xef\n" + "li 25, 0xde\n" + "li 26, 0xad\n" + "li 27, 0xbe\n" + "li 28, 0xef\n" + "li 29, 0xdd\n" + + "ld 3, -96(1)\n" + "cmpwi 3, 0x3030\n" + "bne 1f\n" + "ld 4, -104(1)\n" + "cmpwi 4, 0x4040\n" + "bne 1f\n" + "ld 5, -112(1)\n" + "cmpwi 5, 0x5050\n" + "bne 1f\n" + "ld 6, -120(1)\n" + "cmpwi 6, 0x6060\n" + "bne 1f\n" + "ld 7, -128(1)\n" + "cmpwi 7, 0x7070\n" + "bne 1f\n" + "ld 8, -136(1)\n" + "cmpwi 8, 0x0808\n" + "bne 1f\n" + "ld 9, -144(1)\n" + "cmpwi 9, 0x0909\n" + "bne 1f\n" + "ld 10, -152(1)\n" + "cmpwi 10, 0x1010\n" + "bne 1f\n" + "ld 11, -160(1)\n" + "cmpwi 11, 0x1111\n" + "bne 1f\n" + "ld 14, -168(1)\n" + "cmpwi 14, 0x1414\n" + "bne 1f\n" + "ld 15, -176(1)\n" + "cmpwi 15, 0x1515\n" + "bne 1f\n" + "ld 16, -184(1)\n" + "cmpwi 16, 0x1616\n" + "bne 1f\n" + "ld 17, -192(1)\n" + "cmpwi 17, 0x1717\n" + "bne 1f\n" + "ld 18, -200(1)\n" + "cmpwi 18, 0x1818\n" + "bne 1f\n" + "ld 19, -208(1)\n" + "cmpwi 19, 0x1919\n" + "bne 1f\n" + "ld 20, -216(1)\n" + "cmpwi 20, 0x2020\n" + "bne 1f\n" + "ld 21, -224(1)\n" + "cmpwi 21, 0x2121\n" + "bne 1f\n" + "ld 22, -232(1)\n" + "cmpwi 22, 0x2222\n" + "bne 1f\n" + "ld 23, -240(1)\n" + "cmpwi 23, 0x2323\n" + "bne 1f\n" + "ld 24, -248(1)\n" + "cmpwi 24, 0x2424\n" + "bne 1f\n" + "ld 25, -256(1)\n" + "cmpwi 25, 0x2525\n" + "bne 1f\n" + "ld 26, -264(1)\n" + "cmpwi 26, 0x2626\n" + "bne 1f\n" + "ld 27, -272(1)\n" + "cmpwi 27, 0x2727\n" + "bne 1f\n" + "ld 28, -280(1)\n" + "cmpwi 28, 0x2828\n" + "bne 1f\n" + "ld 29, -288(1)\n" + "cmpwi 29, 0x2929\n" + "bne 1f\n" + + /* Load 0 (success) to return */ + "li 0, 0\n" + + "1: mr %0, 0\n" + + : "=r" (rc) + : /* no inputs */ + : "3", "4", "5", "6", "7", "8", "9", "10", "11", "14", + "15", "16", "17", "18", "19", "20", "21", "22", "23", + "24", "25", "26", "27", "28", "29", "30", "31", + "memory" + ); + + return rc; +} + +int core_busy_loop_with_freeze(void) +{ + int rc; + + mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_FC); + rc = core_busy_loop(); + mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) | MMCR0_FC); + + return rc; +} + +int ebb_child(union pipe read_pipe, union pipe write_pipe) +{ + struct event event; + uint64_t val; + + FAIL_IF(wait_for_parent(read_pipe)); + + event_init_named(&event, 0x1001e, "cycles"); + event_leader_ebb_init(&event); + + event.attr.exclude_kernel = 1; + event.attr.exclude_hv = 1; + event.attr.exclude_idle = 1; + + FAIL_IF(event_open(&event)); + + ebb_enable_pmc_counting(1); + setup_ebb_handler(standard_ebb_callee); + ebb_global_enable(); + + FAIL_IF(event_enable(&event)); + + if (event_read(&event)) { + /* + * Some tests expect to fail here, so don't report an error on + * this line, and return a distinguisable error code. Tell the + * parent an error happened. + */ + notify_parent_of_error(write_pipe); + return 2; + } + + mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); + + FAIL_IF(notify_parent(write_pipe)); + FAIL_IF(wait_for_parent(read_pipe)); + FAIL_IF(notify_parent(write_pipe)); + + while (ebb_state.stats.ebb_count < 20) { + FAIL_IF(core_busy_loop()); + + /* To try and hit SIGILL case */ + val = mfspr(SPRN_MMCRA); + val |= mfspr(SPRN_MMCR2); + val |= mfspr(SPRN_MMCR0); + } + + ebb_global_disable(); + ebb_freeze_pmcs(); + + count_pmc(1, sample_period); + + dump_ebb_state(); + + event_close(&event); + + FAIL_IF(ebb_state.stats.ebb_count == 0); + + return 0; +} + +static jmp_buf setjmp_env; + +static void sigill_handler(int signal) +{ + printf("Took sigill\n"); + longjmp(setjmp_env, 1); +} + +static struct sigaction sigill_action = { + .sa_handler = sigill_handler, +}; + +int catch_sigill(void (*func)(void)) +{ + if (sigaction(SIGILL, &sigill_action, NULL)) { + perror("sigaction"); + return 1; + } + + if (setjmp(setjmp_env) == 0) { + func(); + return 1; + } + + return 0; +} + +void write_pmc1(void) +{ + mtspr(SPRN_PMC1, 0); +} + +void write_pmc(int pmc, u64 value) +{ + switch (pmc) { + case 1: mtspr(SPRN_PMC1, value); break; + case 2: mtspr(SPRN_PMC2, value); break; + case 3: mtspr(SPRN_PMC3, value); break; + case 4: mtspr(SPRN_PMC4, value); break; + case 5: mtspr(SPRN_PMC5, value); break; + case 6: mtspr(SPRN_PMC6, value); break; + } +} + +u64 read_pmc(int pmc) +{ + switch (pmc) { + case 1: return mfspr(SPRN_PMC1); + case 2: return mfspr(SPRN_PMC2); + case 3: return mfspr(SPRN_PMC3); + case 4: return mfspr(SPRN_PMC4); + case 5: return mfspr(SPRN_PMC5); + case 6: return mfspr(SPRN_PMC6); + } + + return 0; +} + +static void term_handler(int signal) +{ + dump_summary_ebb_state(); + dump_ebb_hw_state(); + abort(); +} + +struct sigaction term_action = { + .sa_handler = term_handler, +}; + +static void __attribute__((constructor)) ebb_init(void) +{ + clear_ebb_stats(); + + if (sigaction(SIGTERM, &term_action, NULL)) + perror("sigaction"); + + ebb_state.trace = trace_buffer_allocate(1 * 1024 * 1024); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb.h b/tools/testing/selftests/powerpc/pmu/ebb/ebb.h new file mode 100644 index 00000000000..e62bde05bf7 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb.h @@ -0,0 +1,78 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#ifndef _SELFTESTS_POWERPC_PMU_EBB_EBB_H +#define _SELFTESTS_POWERPC_PMU_EBB_EBB_H + +#include "../event.h" +#include "../lib.h" +#include "trace.h" +#include "reg.h" + +#define PMC_INDEX(pmc) ((pmc)-1) + +#define NUM_PMC_VALUES 128 + +struct ebb_state +{ + struct { + u64 pmc_count[6]; + volatile int ebb_count; + int spurious; + int negative; + int no_overflow; + } stats; + + bool pmc_enable[6]; + struct trace_buffer *trace; +}; + +extern struct ebb_state ebb_state; + +#define COUNTER_OVERFLOW 0x80000000ull + +static inline uint32_t pmc_sample_period(uint32_t value) +{ + return COUNTER_OVERFLOW - value; +} + +static inline void ebb_enable_pmc_counting(int pmc) +{ + ebb_state.pmc_enable[PMC_INDEX(pmc)] = true; +} + +bool ebb_check_count(int pmc, u64 sample_period, int fudge); +void event_leader_ebb_init(struct event *e); +void event_ebb_init(struct event *e); +void event_bhrb_init(struct event *e, unsigned ifm); +void setup_ebb_handler(void (*callee)(void)); +void standard_ebb_callee(void); +int ebb_event_enable(struct event *e); +void ebb_global_enable(void); +void ebb_global_disable(void); +void ebb_freeze_pmcs(void); +void ebb_unfreeze_pmcs(void); +void event_ebb_init(struct event *e); +void event_leader_ebb_init(struct event *e); +int count_pmc(int pmc, uint32_t sample_period); +void dump_ebb_state(void); +void dump_summary_ebb_state(void); +void dump_ebb_hw_state(void); +void clear_ebb_stats(void); +void write_pmc(int pmc, u64 value); +u64 read_pmc(int pmc); +void reset_ebb_with_clear_mask(unsigned long mmcr0_clear_mask); +void reset_ebb(void); +int ebb_check_mmcr0(void); + +extern u64 sample_period; + +int core_busy_loop(void); +int core_busy_loop_with_freeze(void); +int ebb_child(union pipe read_pipe, union pipe write_pipe); +int catch_sigill(void (*func)(void)); +void write_pmc1(void); + +#endif /* _SELFTESTS_POWERPC_PMU_EBB_EBB_H */ diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_handler.S b/tools/testing/selftests/powerpc/pmu/ebb/ebb_handler.S new file mode 100644 index 00000000000..14274ea206e --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_handler.S @@ -0,0 +1,365 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <ppc-asm.h> +#include "reg.h" + + +/* ppc-asm.h defines most of the reg aliases, but not r1/r2. */ +#define r1 1 +#define r2 2 + +#define RFEBB .long 0x4c000924 + +/* Stack layout: + * + * ^ + * User stack | + * Back chain ------+ <- r1 <-------+ + * ... | + * Red zone / ABI Gap | + * ... | + * vr63 <+ | + * vr0 | | + * VSCR | | + * FSCR | | + * r31 | Save area | + * r0 | | + * XER | | + * CTR | | + * LR | | + * CCR <+ | + * ... <+ | + * LR | Caller frame | + * CCR | | + * Back chain <+ <- updated r1 --------+ + * + */ + +#if defined(_CALL_ELF) && _CALL_ELF == 2 +#define ABIGAP 512 +#else +#define ABIGAP 288 +#endif + +#define NR_GPR 32 +#define NR_SPR 6 +#define NR_VSR 64 + +#define SAVE_AREA ((NR_GPR + NR_SPR) * 8 + (NR_VSR * 16)) +#define CALLER_FRAME 112 + +#define STACK_FRAME (ABIGAP + SAVE_AREA + CALLER_FRAME) + +#define CCR_SAVE (CALLER_FRAME) +#define LR_SAVE (CCR_SAVE + 8) +#define CTR_SAVE (LR_SAVE + 8) +#define XER_SAVE (CTR_SAVE + 8) +#define GPR_SAVE(n) (XER_SAVE + 8 + (8 * n)) +#define FSCR_SAVE (GPR_SAVE(31) + 8) +#define VSCR_SAVE (FSCR_SAVE + 8) +#define VSR_SAVE(n) (VSCR_SAVE + 8 + (16 * n)) + +#define SAVE_GPR(n) std n,GPR_SAVE(n)(r1) +#define REST_GPR(n) ld n,GPR_SAVE(n)(r1) +#define TRASH_GPR(n) lis n,0xaaaa + +#define SAVE_VSR(n, b) li b, VSR_SAVE(n); stxvd2x n,b,r1 +#define LOAD_VSR(n, b) li b, VSR_SAVE(n); lxvd2x n,b,r1 + +#define LOAD_REG_IMMEDIATE(reg,expr) \ + lis reg,(expr)@highest; \ + ori reg,reg,(expr)@higher; \ + rldicr reg,reg,32,31; \ + oris reg,reg,(expr)@h; \ + ori reg,reg,(expr)@l; + + +#if defined(_CALL_ELF) && _CALL_ELF == 2 +#define ENTRY_POINT(name) \ + .type FUNC_NAME(name),@function; \ + .globl FUNC_NAME(name); \ + FUNC_NAME(name): + +#define RESTORE_TOC(name) \ + /* Restore our TOC pointer using our entry point */ \ + LOAD_REG_IMMEDIATE(r12, name) \ +0: addis r2,r12,(.TOC.-0b)@ha; \ + addi r2,r2,(.TOC.-0b)@l; + +#else +#define ENTRY_POINT(name) FUNC_START(name) +#define RESTORE_TOC(name) \ + /* Restore our TOC pointer via our opd entry */ \ + LOAD_REG_IMMEDIATE(r2, name) \ + ld r2,8(r2); +#endif + + .text + +ENTRY_POINT(ebb_handler) + stdu r1,-STACK_FRAME(r1) + SAVE_GPR(0) + mflr r0 + std r0,LR_SAVE(r1) + mfcr r0 + std r0,CCR_SAVE(r1) + mfctr r0 + std r0,CTR_SAVE(r1) + mfxer r0 + std r0,XER_SAVE(r1) + SAVE_GPR(2) + SAVE_GPR(3) + SAVE_GPR(4) + SAVE_GPR(5) + SAVE_GPR(6) + SAVE_GPR(7) + SAVE_GPR(8) + SAVE_GPR(9) + SAVE_GPR(10) + SAVE_GPR(11) + SAVE_GPR(12) + SAVE_GPR(13) + SAVE_GPR(14) + SAVE_GPR(15) + SAVE_GPR(16) + SAVE_GPR(17) + SAVE_GPR(18) + SAVE_GPR(19) + SAVE_GPR(20) + SAVE_GPR(21) + SAVE_GPR(22) + SAVE_GPR(23) + SAVE_GPR(24) + SAVE_GPR(25) + SAVE_GPR(26) + SAVE_GPR(27) + SAVE_GPR(28) + SAVE_GPR(29) + SAVE_GPR(30) + SAVE_GPR(31) + SAVE_VSR(0, r3) + mffs f0 + stfd f0, FSCR_SAVE(r1) + mfvscr f0 + stfd f0, VSCR_SAVE(r1) + SAVE_VSR(1, r3) + SAVE_VSR(2, r3) + SAVE_VSR(3, r3) + SAVE_VSR(4, r3) + SAVE_VSR(5, r3) + SAVE_VSR(6, r3) + SAVE_VSR(7, r3) + SAVE_VSR(8, r3) + SAVE_VSR(9, r3) + SAVE_VSR(10, r3) + SAVE_VSR(11, r3) + SAVE_VSR(12, r3) + SAVE_VSR(13, r3) + SAVE_VSR(14, r3) + SAVE_VSR(15, r3) + SAVE_VSR(16, r3) + SAVE_VSR(17, r3) + SAVE_VSR(18, r3) + SAVE_VSR(19, r3) + SAVE_VSR(20, r3) + SAVE_VSR(21, r3) + SAVE_VSR(22, r3) + SAVE_VSR(23, r3) + SAVE_VSR(24, r3) + SAVE_VSR(25, r3) + SAVE_VSR(26, r3) + SAVE_VSR(27, r3) + SAVE_VSR(28, r3) + SAVE_VSR(29, r3) + SAVE_VSR(30, r3) + SAVE_VSR(31, r3) + SAVE_VSR(32, r3) + SAVE_VSR(33, r3) + SAVE_VSR(34, r3) + SAVE_VSR(35, r3) + SAVE_VSR(36, r3) + SAVE_VSR(37, r3) + SAVE_VSR(38, r3) + SAVE_VSR(39, r3) + SAVE_VSR(40, r3) + SAVE_VSR(41, r3) + SAVE_VSR(42, r3) + SAVE_VSR(43, r3) + SAVE_VSR(44, r3) + SAVE_VSR(45, r3) + SAVE_VSR(46, r3) + SAVE_VSR(47, r3) + SAVE_VSR(48, r3) + SAVE_VSR(49, r3) + SAVE_VSR(50, r3) + SAVE_VSR(51, r3) + SAVE_VSR(52, r3) + SAVE_VSR(53, r3) + SAVE_VSR(54, r3) + SAVE_VSR(55, r3) + SAVE_VSR(56, r3) + SAVE_VSR(57, r3) + SAVE_VSR(58, r3) + SAVE_VSR(59, r3) + SAVE_VSR(60, r3) + SAVE_VSR(61, r3) + SAVE_VSR(62, r3) + SAVE_VSR(63, r3) + + TRASH_GPR(2) + TRASH_GPR(3) + TRASH_GPR(4) + TRASH_GPR(5) + TRASH_GPR(6) + TRASH_GPR(7) + TRASH_GPR(8) + TRASH_GPR(9) + TRASH_GPR(10) + TRASH_GPR(11) + TRASH_GPR(12) + TRASH_GPR(14) + TRASH_GPR(15) + TRASH_GPR(16) + TRASH_GPR(17) + TRASH_GPR(18) + TRASH_GPR(19) + TRASH_GPR(20) + TRASH_GPR(21) + TRASH_GPR(22) + TRASH_GPR(23) + TRASH_GPR(24) + TRASH_GPR(25) + TRASH_GPR(26) + TRASH_GPR(27) + TRASH_GPR(28) + TRASH_GPR(29) + TRASH_GPR(30) + TRASH_GPR(31) + + RESTORE_TOC(ebb_handler) + + /* + * r13 is our TLS pointer. We leave whatever value was in there when the + * EBB fired. That seems to be OK because once set the TLS pointer is not + * changed - but presumably that could change in future. + */ + + bl ebb_hook + nop + + /* r2 may be changed here but we don't care */ + + lfd f0, FSCR_SAVE(r1) + mtfsf 0xff,f0 + lfd f0, VSCR_SAVE(r1) + mtvscr f0 + LOAD_VSR(0, r3) + LOAD_VSR(1, r3) + LOAD_VSR(2, r3) + LOAD_VSR(3, r3) + LOAD_VSR(4, r3) + LOAD_VSR(5, r3) + LOAD_VSR(6, r3) + LOAD_VSR(7, r3) + LOAD_VSR(8, r3) + LOAD_VSR(9, r3) + LOAD_VSR(10, r3) + LOAD_VSR(11, r3) + LOAD_VSR(12, r3) + LOAD_VSR(13, r3) + LOAD_VSR(14, r3) + LOAD_VSR(15, r3) + LOAD_VSR(16, r3) + LOAD_VSR(17, r3) + LOAD_VSR(18, r3) + LOAD_VSR(19, r3) + LOAD_VSR(20, r3) + LOAD_VSR(21, r3) + LOAD_VSR(22, r3) + LOAD_VSR(23, r3) + LOAD_VSR(24, r3) + LOAD_VSR(25, r3) + LOAD_VSR(26, r3) + LOAD_VSR(27, r3) + LOAD_VSR(28, r3) + LOAD_VSR(29, r3) + LOAD_VSR(30, r3) + LOAD_VSR(31, r3) + LOAD_VSR(32, r3) + LOAD_VSR(33, r3) + LOAD_VSR(34, r3) + LOAD_VSR(35, r3) + LOAD_VSR(36, r3) + LOAD_VSR(37, r3) + LOAD_VSR(38, r3) + LOAD_VSR(39, r3) + LOAD_VSR(40, r3) + LOAD_VSR(41, r3) + LOAD_VSR(42, r3) + LOAD_VSR(43, r3) + LOAD_VSR(44, r3) + LOAD_VSR(45, r3) + LOAD_VSR(46, r3) + LOAD_VSR(47, r3) + LOAD_VSR(48, r3) + LOAD_VSR(49, r3) + LOAD_VSR(50, r3) + LOAD_VSR(51, r3) + LOAD_VSR(52, r3) + LOAD_VSR(53, r3) + LOAD_VSR(54, r3) + LOAD_VSR(55, r3) + LOAD_VSR(56, r3) + LOAD_VSR(57, r3) + LOAD_VSR(58, r3) + LOAD_VSR(59, r3) + LOAD_VSR(60, r3) + LOAD_VSR(61, r3) + LOAD_VSR(62, r3) + LOAD_VSR(63, r3) + + ld r0,XER_SAVE(r1) + mtxer r0 + ld r0,CTR_SAVE(r1) + mtctr r0 + ld r0,LR_SAVE(r1) + mtlr r0 + ld r0,CCR_SAVE(r1) + mtcr r0 + REST_GPR(0) + REST_GPR(2) + REST_GPR(3) + REST_GPR(4) + REST_GPR(5) + REST_GPR(6) + REST_GPR(7) + REST_GPR(8) + REST_GPR(9) + REST_GPR(10) + REST_GPR(11) + REST_GPR(12) + REST_GPR(13) + REST_GPR(14) + REST_GPR(15) + REST_GPR(16) + REST_GPR(17) + REST_GPR(18) + REST_GPR(19) + REST_GPR(20) + REST_GPR(21) + REST_GPR(22) + REST_GPR(23) + REST_GPR(24) + REST_GPR(25) + REST_GPR(26) + REST_GPR(27) + REST_GPR(28) + REST_GPR(29) + REST_GPR(30) + REST_GPR(31) + addi r1,r1,STACK_FRAME + RFEBB +FUNC_END(ebb_handler) diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_child_test.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_child_test.c new file mode 100644 index 00000000000..c45f948148e --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_child_test.c @@ -0,0 +1,86 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdbool.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <unistd.h> + +#include "ebb.h" + + +/* + * Tests we can setup an EBB on our child. Nothing interesting happens, because + * even though the event is enabled and running the child hasn't enabled the + * actual delivery of the EBBs. + */ + +static int victim_child(union pipe read_pipe, union pipe write_pipe) +{ + int i; + + FAIL_IF(wait_for_parent(read_pipe)); + FAIL_IF(notify_parent(write_pipe)); + + /* Parent creates EBB event */ + + FAIL_IF(wait_for_parent(read_pipe)); + FAIL_IF(notify_parent(write_pipe)); + + /* Check the EBB is enabled by writing PMC1 */ + write_pmc1(); + + /* EBB event is enabled here */ + for (i = 0; i < 1000000; i++) ; + + return 0; +} + +int ebb_on_child(void) +{ + union pipe read_pipe, write_pipe; + struct event event; + pid_t pid; + + FAIL_IF(pipe(read_pipe.fds) == -1); + FAIL_IF(pipe(write_pipe.fds) == -1); + + pid = fork(); + if (pid == 0) { + /* NB order of pipes looks reversed */ + exit(victim_child(write_pipe, read_pipe)); + } + + FAIL_IF(sync_with_child(read_pipe, write_pipe)); + + /* Child is running now */ + + event_init_named(&event, 0x1001e, "cycles"); + event_leader_ebb_init(&event); + + event.attr.exclude_kernel = 1; + event.attr.exclude_hv = 1; + event.attr.exclude_idle = 1; + + FAIL_IF(event_open_with_pid(&event, pid)); + FAIL_IF(ebb_event_enable(&event)); + + FAIL_IF(sync_with_child(read_pipe, write_pipe)); + + /* Child should just exit happily */ + FAIL_IF(wait_for_child(pid)); + + event_close(&event); + + return 0; +} + +int main(void) +{ + return test_harness(ebb_on_child, "ebb_on_child"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c new file mode 100644 index 00000000000..11acf1d55f8 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c @@ -0,0 +1,92 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdbool.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <unistd.h> + +#include "ebb.h" + + +/* + * Tests we can setup an EBB on our child. The child expects this and enables + * EBBs, which are then delivered to the child, even though the event is + * created by the parent. + */ + +static int victim_child(union pipe read_pipe, union pipe write_pipe) +{ + FAIL_IF(wait_for_parent(read_pipe)); + + /* Setup our EBB handler, before the EBB event is created */ + ebb_enable_pmc_counting(1); + setup_ebb_handler(standard_ebb_callee); + ebb_global_enable(); + + FAIL_IF(notify_parent(write_pipe)); + + while (ebb_state.stats.ebb_count < 20) { + FAIL_IF(core_busy_loop()); + } + + ebb_global_disable(); + ebb_freeze_pmcs(); + + count_pmc(1, sample_period); + + dump_ebb_state(); + + FAIL_IF(ebb_state.stats.ebb_count == 0); + + return 0; +} + +/* Tests we can setup an EBB on our child - if it's expecting it */ +int ebb_on_willing_child(void) +{ + union pipe read_pipe, write_pipe; + struct event event; + pid_t pid; + + FAIL_IF(pipe(read_pipe.fds) == -1); + FAIL_IF(pipe(write_pipe.fds) == -1); + + pid = fork(); + if (pid == 0) { + /* NB order of pipes looks reversed */ + exit(victim_child(write_pipe, read_pipe)); + } + + /* Signal the child to setup its EBB handler */ + FAIL_IF(sync_with_child(read_pipe, write_pipe)); + + /* Child is running now */ + + event_init_named(&event, 0x1001e, "cycles"); + event_leader_ebb_init(&event); + + event.attr.exclude_kernel = 1; + event.attr.exclude_hv = 1; + event.attr.exclude_idle = 1; + + FAIL_IF(event_open_with_pid(&event, pid)); + FAIL_IF(ebb_event_enable(&event)); + + /* Child show now take EBBs and then exit */ + FAIL_IF(wait_for_child(pid)); + + event_close(&event); + + return 0; +} + +int main(void) +{ + return test_harness(ebb_on_willing_child, "ebb_on_willing_child"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c new file mode 100644 index 00000000000..be4dd5a4e98 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c @@ -0,0 +1,86 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdbool.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <unistd.h> + +#include "ebb.h" + + +/* + * Tests an EBB vs a cpu event - in that order. The EBB should force the cpu + * event off the PMU. + */ + +static int setup_cpu_event(struct event *event, int cpu) +{ + event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); + + event->attr.exclude_kernel = 1; + event->attr.exclude_hv = 1; + event->attr.exclude_idle = 1; + + SKIP_IF(require_paranoia_below(1)); + FAIL_IF(event_open_with_cpu(event, cpu)); + FAIL_IF(event_enable(event)); + + return 0; +} + +int ebb_vs_cpu_event(void) +{ + union pipe read_pipe, write_pipe; + struct event event; + int cpu, rc; + pid_t pid; + + cpu = pick_online_cpu(); + FAIL_IF(cpu < 0); + FAIL_IF(bind_to_cpu(cpu)); + + FAIL_IF(pipe(read_pipe.fds) == -1); + FAIL_IF(pipe(write_pipe.fds) == -1); + + pid = fork(); + if (pid == 0) { + /* NB order of pipes looks reversed */ + exit(ebb_child(write_pipe, read_pipe)); + } + + /* Signal the child to install its EBB event and wait */ + FAIL_IF(sync_with_child(read_pipe, write_pipe)); + + /* Now try to install our CPU event */ + rc = setup_cpu_event(&event, cpu); + if (rc) { + kill_child_and_wait(pid); + return rc; + } + + /* Signal the child to run */ + FAIL_IF(sync_with_child(read_pipe, write_pipe)); + + /* .. and wait for it to complete */ + FAIL_IF(wait_for_child(pid)); + FAIL_IF(event_disable(&event)); + FAIL_IF(event_read(&event)); + + event_report(&event); + + /* The cpu event may have run, but we don't expect 100% */ + FAIL_IF(event.result.enabled >= event.result.running); + + return 0; +} + +int main(void) +{ + return test_harness(ebb_vs_cpu_event, "ebb_vs_cpu_event"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c b/tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c new file mode 100644 index 00000000000..7e78153f08e --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c @@ -0,0 +1,131 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <stdio.h> +#include <stdlib.h> + +#include "ebb.h" + + +/* + * Test various attributes of the EBB event are enforced. + */ +int event_attributes(void) +{ + struct event event, leader; + + event_init(&event, 0x1001e); + event_leader_ebb_init(&event); + /* Expected to succeed */ + FAIL_IF(event_open(&event)); + event_close(&event); + + + event_init(&event, 0x001e); /* CYCLES - no PMC specified */ + event_leader_ebb_init(&event); + /* Expected to fail, no PMC specified */ + FAIL_IF(event_open(&event) == 0); + + + event_init(&event, 0x2001e); + event_leader_ebb_init(&event); + event.attr.exclusive = 0; + /* Expected to fail, not exclusive */ + FAIL_IF(event_open(&event) == 0); + + + event_init(&event, 0x3001e); + event_leader_ebb_init(&event); + event.attr.freq = 1; + /* Expected to fail, sets freq */ + FAIL_IF(event_open(&event) == 0); + + + event_init(&event, 0x4001e); + event_leader_ebb_init(&event); + event.attr.sample_period = 1; + /* Expected to fail, sets sample_period */ + FAIL_IF(event_open(&event) == 0); + + + event_init(&event, 0x1001e); + event_leader_ebb_init(&event); + event.attr.enable_on_exec = 1; + /* Expected to fail, sets enable_on_exec */ + FAIL_IF(event_open(&event) == 0); + + + event_init(&event, 0x1001e); + event_leader_ebb_init(&event); + event.attr.inherit = 1; + /* Expected to fail, sets inherit */ + FAIL_IF(event_open(&event) == 0); + + + event_init(&leader, 0x1001e); + event_leader_ebb_init(&leader); + FAIL_IF(event_open(&leader)); + + event_init(&event, 0x20002); + event_ebb_init(&event); + + /* Expected to succeed */ + FAIL_IF(event_open_with_group(&event, leader.fd)); + event_close(&leader); + event_close(&event); + + + event_init(&leader, 0x1001e); + event_leader_ebb_init(&leader); + FAIL_IF(event_open(&leader)); + + event_init(&event, 0x20002); + + /* Expected to fail, event doesn't request EBB, leader does */ + FAIL_IF(event_open_with_group(&event, leader.fd) == 0); + event_close(&leader); + + + event_init(&leader, 0x1001e); + event_leader_ebb_init(&leader); + /* Clear the EBB flag */ + leader.attr.config &= ~(1ull << 63); + + FAIL_IF(event_open(&leader)); + + event_init(&event, 0x20002); + event_ebb_init(&event); + + /* Expected to fail, leader doesn't request EBB */ + FAIL_IF(event_open_with_group(&event, leader.fd) == 0); + event_close(&leader); + + + event_init(&leader, 0x1001e); + event_leader_ebb_init(&leader); + leader.attr.exclusive = 0; + /* Expected to fail, leader isn't exclusive */ + FAIL_IF(event_open(&leader) == 0); + + + event_init(&leader, 0x1001e); + event_leader_ebb_init(&leader); + leader.attr.pinned = 0; + /* Expected to fail, leader isn't pinned */ + FAIL_IF(event_open(&leader) == 0); + + event_init(&event, 0x1001e); + event_leader_ebb_init(&event); + /* Expected to fail, not a task event */ + SKIP_IF(require_paranoia_below(1)); + FAIL_IF(event_open_with_cpu(&event, 0) == 0); + + return 0; +} + +int main(void) +{ + return test_harness(event_attributes, "event_attributes"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S b/tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S new file mode 100644 index 00000000000..b866a0581d3 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S @@ -0,0 +1,43 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <ppc-asm.h> + + .text + +FUNC_START(thirty_two_instruction_loop) + cmpwi r3,0 + beqlr + addi r4,r3,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 # 28 addi's + subi r3,r3,1 + b FUNC_NAME(thirty_two_instruction_loop) +FUNC_END(thirty_two_instruction_loop) diff --git a/tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c b/tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c new file mode 100644 index 00000000000..9e7af6e7662 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c @@ -0,0 +1,79 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdbool.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <unistd.h> +#include <setjmp.h> +#include <signal.h> + +#include "ebb.h" + + +/* + * Test that a fork clears the PMU state of the child. eg. BESCR/EBBHR/EBBRR + * are cleared, and MMCR0_PMCC is reset, preventing the child from accessing + * the PMU. + */ + +static struct event event; + +static int child(void) +{ + /* Even though we have EBE=0 we can still see the EBB regs */ + FAIL_IF(mfspr(SPRN_BESCR) != 0); + FAIL_IF(mfspr(SPRN_EBBHR) != 0); + FAIL_IF(mfspr(SPRN_EBBRR) != 0); + + FAIL_IF(catch_sigill(write_pmc1)); + + /* We can still read from the event, though it is on our parent */ + FAIL_IF(event_read(&event)); + + return 0; +} + +/* Tests that fork clears EBB state */ +int fork_cleanup(void) +{ + pid_t pid; + + event_init_named(&event, 0x1001e, "cycles"); + event_leader_ebb_init(&event); + + FAIL_IF(event_open(&event)); + + ebb_enable_pmc_counting(1); + setup_ebb_handler(standard_ebb_callee); + ebb_global_enable(); + + FAIL_IF(ebb_event_enable(&event)); + + mtspr(SPRN_MMCR0, MMCR0_FC); + mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); + + /* Don't need to actually take any EBBs */ + + pid = fork(); + if (pid == 0) + exit(child()); + + /* Child does the actual testing */ + FAIL_IF(wait_for_child(pid)); + + /* After fork */ + event_close(&event); + + return 0; +} + +int main(void) +{ + return test_harness(fork_cleanup, "fork_cleanup"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c b/tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c new file mode 100644 index 00000000000..f8190fa2959 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c @@ -0,0 +1,164 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#define _GNU_SOURCE + +#include <stdio.h> +#include <stdbool.h> +#include <string.h> +#include <sys/prctl.h> + +#include "ebb.h" + + +/* + * Run a calibrated instruction loop and count instructions executed using + * EBBs. Make sure the counts look right. + */ + +extern void thirty_two_instruction_loop(uint64_t loops); + +static bool counters_frozen = true; + +static int do_count_loop(struct event *event, uint64_t instructions, + uint64_t overhead, bool report) +{ + int64_t difference, expected; + double percentage; + + clear_ebb_stats(); + + counters_frozen = false; + mb(); + mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_FC); + + thirty_two_instruction_loop(instructions >> 5); + + counters_frozen = true; + mb(); + mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) | MMCR0_FC); + + count_pmc(4, sample_period); + + event->result.value = ebb_state.stats.pmc_count[4-1]; + expected = instructions + overhead; + difference = event->result.value - expected; + percentage = (double)difference / event->result.value * 100; + + if (report) { + printf("Looped for %lu instructions, overhead %lu\n", instructions, overhead); + printf("Expected %lu\n", expected); + printf("Actual %llu\n", event->result.value); + printf("Error %ld, %f%%\n", difference, percentage); + printf("Took %d EBBs\n", ebb_state.stats.ebb_count); + } + + if (difference < 0) + difference = -difference; + + /* Tolerate a difference of up to 0.0001 % */ + difference *= 10000 * 100; + if (difference / event->result.value) + return -1; + + return 0; +} + +/* Count how many instructions it takes to do a null loop */ +static uint64_t determine_overhead(struct event *event) +{ + uint64_t current, overhead; + int i; + + do_count_loop(event, 0, 0, false); + overhead = event->result.value; + + for (i = 0; i < 100; i++) { + do_count_loop(event, 0, 0, false); + current = event->result.value; + if (current < overhead) { + printf("Replacing overhead %lu with %lu\n", overhead, current); + overhead = current; + } + } + + return overhead; +} + +static void pmc4_ebb_callee(void) +{ + uint64_t val; + + val = mfspr(SPRN_BESCR); + if (!(val & BESCR_PMEO)) { + ebb_state.stats.spurious++; + goto out; + } + + ebb_state.stats.ebb_count++; + count_pmc(4, sample_period); +out: + if (counters_frozen) + reset_ebb_with_clear_mask(MMCR0_PMAO); + else + reset_ebb(); +} + +int instruction_count(void) +{ + struct event event; + uint64_t overhead; + + event_init_named(&event, 0x400FA, "PM_RUN_INST_CMPL"); + event_leader_ebb_init(&event); + event.attr.exclude_kernel = 1; + event.attr.exclude_hv = 1; + event.attr.exclude_idle = 1; + + FAIL_IF(event_open(&event)); + FAIL_IF(ebb_event_enable(&event)); + + sample_period = COUNTER_OVERFLOW; + + setup_ebb_handler(pmc4_ebb_callee); + mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_FC); + ebb_global_enable(); + + overhead = determine_overhead(&event); + printf("Overhead of null loop: %lu instructions\n", overhead); + + /* Run for 1M instructions */ + FAIL_IF(do_count_loop(&event, 0x100000, overhead, true)); + + /* Run for 10M instructions */ + FAIL_IF(do_count_loop(&event, 0xa00000, overhead, true)); + + /* Run for 100M instructions */ + FAIL_IF(do_count_loop(&event, 0x6400000, overhead, true)); + + /* Run for 1G instructions */ + FAIL_IF(do_count_loop(&event, 0x40000000, overhead, true)); + + /* Run for 16G instructions */ + FAIL_IF(do_count_loop(&event, 0x400000000, overhead, true)); + + /* Run for 64G instructions */ + FAIL_IF(do_count_loop(&event, 0x1000000000, overhead, true)); + + /* Run for 128G instructions */ + FAIL_IF(do_count_loop(&event, 0x2000000000, overhead, true)); + + ebb_global_disable(); + event_close(&event); + + printf("Finished OK\n"); + + return 0; +} + +int main(void) +{ + return test_harness(instruction_count, "instruction_count"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c new file mode 100644 index 00000000000..0c9dd9b2e39 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c @@ -0,0 +1,100 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <sched.h> +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <sys/mman.h> + +#include "ebb.h" + + +/* + * Test that tries to trigger CPU_FTR_PMAO_BUG. Which is a hardware defect + * where an exception triggers but we context switch before it is delivered and + * lose the exception. + */ + +static int test_body(void) +{ + int i, orig_period, max_period; + struct event event; + + /* We use PMC4 to make sure the kernel switches all counters correctly */ + event_init_named(&event, 0x40002, "instructions"); + event_leader_ebb_init(&event); + + event.attr.exclude_kernel = 1; + event.attr.exclude_hv = 1; + event.attr.exclude_idle = 1; + + FAIL_IF(event_open(&event)); + + ebb_enable_pmc_counting(4); + setup_ebb_handler(standard_ebb_callee); + ebb_global_enable(); + FAIL_IF(ebb_event_enable(&event)); + + /* + * We want a low sample period, but we also want to get out of the EBB + * handler without tripping up again. + * + * This value picked after much experimentation. + */ + orig_period = max_period = sample_period = 400; + + mtspr(SPRN_PMC4, pmc_sample_period(sample_period)); + + while (ebb_state.stats.ebb_count < 1000000) { + /* + * We are trying to get the EBB exception to race exactly with + * us entering the kernel to do the syscall. We then need the + * kernel to decide our timeslice is up and context switch to + * the other thread. When we come back our EBB will have been + * lost and we'll spin in this while loop forever. + */ + + for (i = 0; i < 100000; i++) + sched_yield(); + + /* Change the sample period slightly to try and hit the race */ + if (sample_period >= (orig_period + 200)) + sample_period = orig_period; + else + sample_period++; + + if (sample_period > max_period) + max_period = sample_period; + } + + ebb_freeze_pmcs(); + ebb_global_disable(); + + count_pmc(4, sample_period); + mtspr(SPRN_PMC4, 0xdead); + + dump_summary_ebb_state(); + dump_ebb_hw_state(); + + event_close(&event); + + FAIL_IF(ebb_state.stats.ebb_count == 0); + + /* We vary our sample period so we need extra fudge here */ + FAIL_IF(!ebb_check_count(4, orig_period, 2 * (max_period - orig_period))); + + return 0; +} + +static int lost_exception(void) +{ + return eat_cpu(test_body); +} + +int main(void) +{ + return test_harness(lost_exception, "lost_exception"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c new file mode 100644 index 00000000000..67d78af3284 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c @@ -0,0 +1,91 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <sys/ioctl.h> + +#include "ebb.h" + + +/* + * Test counting multiple events using EBBs. + */ +int multi_counter(void) +{ + struct event events[6]; + int i, group_fd; + + event_init_named(&events[0], 0x1001C, "PM_CMPLU_STALL_THRD"); + event_init_named(&events[1], 0x2D016, "PM_CMPLU_STALL_FXU"); + event_init_named(&events[2], 0x30006, "PM_CMPLU_STALL_OTHER_CMPL"); + event_init_named(&events[3], 0x4000A, "PM_CMPLU_STALL"); + event_init_named(&events[4], 0x600f4, "PM_RUN_CYC"); + event_init_named(&events[5], 0x500fa, "PM_RUN_INST_CMPL"); + + event_leader_ebb_init(&events[0]); + for (i = 1; i < 6; i++) + event_ebb_init(&events[i]); + + group_fd = -1; + for (i = 0; i < 6; i++) { + events[i].attr.exclude_kernel = 1; + events[i].attr.exclude_hv = 1; + events[i].attr.exclude_idle = 1; + + FAIL_IF(event_open_with_group(&events[i], group_fd)); + if (group_fd == -1) + group_fd = events[0].fd; + } + + ebb_enable_pmc_counting(1); + ebb_enable_pmc_counting(2); + ebb_enable_pmc_counting(3); + ebb_enable_pmc_counting(4); + ebb_enable_pmc_counting(5); + ebb_enable_pmc_counting(6); + setup_ebb_handler(standard_ebb_callee); + + FAIL_IF(ioctl(events[0].fd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP)); + FAIL_IF(event_read(&events[0])); + + ebb_global_enable(); + + mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); + mtspr(SPRN_PMC2, pmc_sample_period(sample_period)); + mtspr(SPRN_PMC3, pmc_sample_period(sample_period)); + mtspr(SPRN_PMC4, pmc_sample_period(sample_period)); + mtspr(SPRN_PMC5, pmc_sample_period(sample_period)); + mtspr(SPRN_PMC6, pmc_sample_period(sample_period)); + + while (ebb_state.stats.ebb_count < 50) { + FAIL_IF(core_busy_loop()); + FAIL_IF(ebb_check_mmcr0()); + } + + ebb_global_disable(); + ebb_freeze_pmcs(); + + count_pmc(1, sample_period); + count_pmc(2, sample_period); + count_pmc(3, sample_period); + count_pmc(4, sample_period); + count_pmc(5, sample_period); + count_pmc(6, sample_period); + + dump_ebb_state(); + + for (i = 0; i < 6; i++) + event_close(&events[i]); + + FAIL_IF(ebb_state.stats.ebb_count == 0); + + return 0; +} + +int main(void) +{ + return test_harness(multi_counter, "multi_counter"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c new file mode 100644 index 00000000000..b8dc371f933 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c @@ -0,0 +1,109 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> +#include <signal.h> + +#include "ebb.h" + + +/* + * Test running multiple EBB using processes at once on a single CPU. They + * should all run happily without interfering with each other. + */ + +static bool child_should_exit; + +static void sigint_handler(int signal) +{ + child_should_exit = true; +} + +struct sigaction sigint_action = { + .sa_handler = sigint_handler, +}; + +static int cycles_child(void) +{ + struct event event; + + if (sigaction(SIGINT, &sigint_action, NULL)) { + perror("sigaction"); + return 1; + } + + event_init_named(&event, 0x1001e, "cycles"); + event_leader_ebb_init(&event); + + event.attr.exclude_kernel = 1; + event.attr.exclude_hv = 1; + event.attr.exclude_idle = 1; + + FAIL_IF(event_open(&event)); + + ebb_enable_pmc_counting(1); + setup_ebb_handler(standard_ebb_callee); + ebb_global_enable(); + + FAIL_IF(ebb_event_enable(&event)); + + mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); + + while (!child_should_exit) { + FAIL_IF(core_busy_loop()); + FAIL_IF(ebb_check_mmcr0()); + } + + ebb_global_disable(); + ebb_freeze_pmcs(); + + count_pmc(1, sample_period); + + dump_summary_ebb_state(); + + event_close(&event); + + FAIL_IF(ebb_state.stats.ebb_count == 0); + + return 0; +} + +#define NR_CHILDREN 4 + +int multi_ebb_procs(void) +{ + pid_t pids[NR_CHILDREN]; + int cpu, rc, i; + + cpu = pick_online_cpu(); + FAIL_IF(cpu < 0); + FAIL_IF(bind_to_cpu(cpu)); + + for (i = 0; i < NR_CHILDREN; i++) { + pids[i] = fork(); + if (pids[i] == 0) + exit(cycles_child()); + } + + /* Have them all run for "a while" */ + sleep(10); + + rc = 0; + for (i = 0; i < NR_CHILDREN; i++) { + /* Tell them to stop */ + kill(pids[i], SIGINT); + /* And wait */ + rc |= wait_for_child(pids[i]); + } + + return rc; +} + +int main(void) +{ + return test_harness(multi_ebb_procs, "multi_ebb_procs"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c b/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c new file mode 100644 index 00000000000..2f9bf8edfa6 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c @@ -0,0 +1,61 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <setjmp.h> +#include <signal.h> + +#include "ebb.h" + + +/* Test that things work sanely if we have no handler */ + +static int no_handler_test(void) +{ + struct event event; + u64 val; + int i; + + event_init_named(&event, 0x1001e, "cycles"); + event_leader_ebb_init(&event); + + event.attr.exclude_kernel = 1; + event.attr.exclude_hv = 1; + event.attr.exclude_idle = 1; + + FAIL_IF(event_open(&event)); + FAIL_IF(ebb_event_enable(&event)); + + val = mfspr(SPRN_EBBHR); + FAIL_IF(val != 0); + + /* Make sure it overflows quickly */ + sample_period = 1000; + mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); + + /* Spin to make sure the event has time to overflow */ + for (i = 0; i < 1000; i++) + mb(); + + dump_ebb_state(); + + /* We expect to see the PMU frozen & PMAO set */ + val = mfspr(SPRN_MMCR0); + FAIL_IF(val != 0x0000000080000080); + + event_close(&event); + + dump_ebb_state(); + + /* The real test is that we never took an EBB at 0x0 */ + + return 0; +} + +int main(void) +{ + return test_harness(no_handler_test,"no_handler_test"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c new file mode 100644 index 00000000000..986500fd213 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c @@ -0,0 +1,106 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <sched.h> +#include <signal.h> +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> + +#include "ebb.h" + + +/* + * Test that the kernel properly handles PMAE across context switches. + * + * We test this by calling into the kernel inside our EBB handler, where PMAE + * is clear. A cpu eater companion thread is running on the same CPU as us to + * encourage the scheduler to switch us. + * + * The kernel must make sure that when it context switches us back in, it + * honours the fact that we had PMAE clear. + * + * Observed to hit the failing case on the first EBB with a broken kernel. + */ + +static bool mmcr0_mismatch; +static uint64_t before, after; + +static void syscall_ebb_callee(void) +{ + uint64_t val; + + val = mfspr(SPRN_BESCR); + if (!(val & BESCR_PMEO)) { + ebb_state.stats.spurious++; + goto out; + } + + ebb_state.stats.ebb_count++; + count_pmc(1, sample_period); + + before = mfspr(SPRN_MMCR0); + + /* Try and get ourselves scheduled, to force a PMU context switch */ + sched_yield(); + + after = mfspr(SPRN_MMCR0); + if (before != after) + mmcr0_mismatch = true; + +out: + reset_ebb(); +} + +static int test_body(void) +{ + struct event event; + + event_init_named(&event, 0x1001e, "cycles"); + event_leader_ebb_init(&event); + + event.attr.exclude_kernel = 1; + event.attr.exclude_hv = 1; + event.attr.exclude_idle = 1; + + FAIL_IF(event_open(&event)); + + setup_ebb_handler(syscall_ebb_callee); + ebb_global_enable(); + + FAIL_IF(ebb_event_enable(&event)); + + mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); + + while (ebb_state.stats.ebb_count < 20 && !mmcr0_mismatch) + FAIL_IF(core_busy_loop()); + + ebb_global_disable(); + ebb_freeze_pmcs(); + + count_pmc(1, sample_period); + + dump_ebb_state(); + + if (mmcr0_mismatch) + printf("Saw MMCR0 before 0x%lx after 0x%lx\n", before, after); + + event_close(&event); + + FAIL_IF(ebb_state.stats.ebb_count == 0); + FAIL_IF(mmcr0_mismatch); + + return 0; +} + +int pmae_handling(void) +{ + return eat_cpu(test_body); +} + +int main(void) +{ + return test_harness(pmae_handling, "pmae_handling"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c new file mode 100644 index 00000000000..a503fa70c95 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c @@ -0,0 +1,93 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <stdio.h> +#include <stdlib.h> + +#include "ebb.h" + + +/* + * Test that PMC5 & 6 are frozen (ie. don't overflow) when they are not being + * used. Tests the MMCR0_FC56 logic in the kernel. + */ + +static int pmc56_overflowed; + +static void ebb_callee(void) +{ + uint64_t val; + + val = mfspr(SPRN_BESCR); + if (!(val & BESCR_PMEO)) { + ebb_state.stats.spurious++; + goto out; + } + + ebb_state.stats.ebb_count++; + count_pmc(2, sample_period); + + val = mfspr(SPRN_PMC5); + if (val >= COUNTER_OVERFLOW) + pmc56_overflowed++; + + count_pmc(5, COUNTER_OVERFLOW); + + val = mfspr(SPRN_PMC6); + if (val >= COUNTER_OVERFLOW) + pmc56_overflowed++; + + count_pmc(6, COUNTER_OVERFLOW); + +out: + reset_ebb(); +} + +int pmc56_overflow(void) +{ + struct event event; + + /* Use PMC2 so we set PMCjCE, which enables PMC5/6 */ + event_init(&event, 0x2001e); + event_leader_ebb_init(&event); + + event.attr.exclude_kernel = 1; + event.attr.exclude_hv = 1; + event.attr.exclude_idle = 1; + + FAIL_IF(event_open(&event)); + + setup_ebb_handler(ebb_callee); + ebb_global_enable(); + + FAIL_IF(ebb_event_enable(&event)); + + mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); + mtspr(SPRN_PMC5, 0); + mtspr(SPRN_PMC6, 0); + + while (ebb_state.stats.ebb_count < 10) + FAIL_IF(core_busy_loop()); + + ebb_global_disable(); + ebb_freeze_pmcs(); + + count_pmc(2, sample_period); + + dump_ebb_state(); + + printf("PMC5/6 overflow %d\n", pmc56_overflowed); + + event_close(&event); + + FAIL_IF(ebb_state.stats.ebb_count == 0 || pmc56_overflowed != 0); + + return 0; +} + +int main(void) +{ + return test_harness(pmc56_overflow, "pmc56_overflow"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/reg.h b/tools/testing/selftests/powerpc/pmu/ebb/reg.h new file mode 100644 index 00000000000..5921b0dfe2e --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/reg.h @@ -0,0 +1,49 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#ifndef _SELFTESTS_POWERPC_REG_H +#define _SELFTESTS_POWERPC_REG_H + +#define __stringify_1(x) #x +#define __stringify(x) __stringify_1(x) + +#define mfspr(rn) ({unsigned long rval; \ + asm volatile("mfspr %0," __stringify(rn) \ + : "=r" (rval)); rval; }) +#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : \ + : "r" ((unsigned long)(v)) \ + : "memory") + +#define mb() asm volatile("sync" : : : "memory"); + +#define SPRN_MMCR2 769 +#define SPRN_MMCRA 770 +#define SPRN_MMCR0 779 +#define MMCR0_PMAO 0x00000080 +#define MMCR0_PMAE 0x04000000 +#define MMCR0_FC 0x80000000 +#define SPRN_EBBHR 804 +#define SPRN_EBBRR 805 +#define SPRN_BESCR 806 /* Branch event status & control register */ +#define SPRN_BESCRS 800 /* Branch event status & control set (1 bits set to 1) */ +#define SPRN_BESCRSU 801 /* Branch event status & control set upper */ +#define SPRN_BESCRR 802 /* Branch event status & control REset (1 bits set to 0) */ +#define SPRN_BESCRRU 803 /* Branch event status & control REset upper */ + +#define BESCR_PMEO 0x1 /* PMU Event-based exception Occurred */ +#define BESCR_PME (0x1ul << 32) /* PMU Event-based exception Enable */ + +#define SPRN_PMC1 771 +#define SPRN_PMC2 772 +#define SPRN_PMC3 773 +#define SPRN_PMC4 774 +#define SPRN_PMC5 775 +#define SPRN_PMC6 776 + +#define SPRN_SIAR 780 +#define SPRN_SDAR 781 +#define SPRN_SIER 768 + +#endif /* _SELFTESTS_POWERPC_REG_H */ diff --git a/tools/testing/selftests/powerpc/pmu/ebb/reg_access_test.c b/tools/testing/selftests/powerpc/pmu/ebb/reg_access_test.c new file mode 100644 index 00000000000..0cae66f659a --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/reg_access_test.c @@ -0,0 +1,39 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <stdio.h> +#include <stdlib.h> + +#include "ebb.h" +#include "reg.h" + + +/* + * Test basic access to the EBB regs, they should be user accessible with no + * kernel interaction required. + */ +int reg_access(void) +{ + uint64_t val, expected; + + expected = 0x8000000100000000ull; + mtspr(SPRN_BESCR, expected); + val = mfspr(SPRN_BESCR); + + FAIL_IF(val != expected); + + expected = 0x0000000001000000ull; + mtspr(SPRN_EBBHR, expected); + val = mfspr(SPRN_EBBHR); + + FAIL_IF(val != expected); + + return 0; +} + +int main(void) +{ + return test_harness(reg_access, "reg_access"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c b/tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c new file mode 100644 index 00000000000..d56607e4ffa --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c @@ -0,0 +1,91 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdbool.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <unistd.h> + +#include "ebb.h" + + +/* + * Tests a pinned per-task event vs an EBB - in that order. The pinned per-task + * event should prevent the EBB event from being enabled. + */ + +static int setup_child_event(struct event *event, pid_t child_pid) +{ + event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); + + event->attr.pinned = 1; + + event->attr.exclude_kernel = 1; + event->attr.exclude_hv = 1; + event->attr.exclude_idle = 1; + + FAIL_IF(event_open_with_pid(event, child_pid)); + FAIL_IF(event_enable(event)); + + return 0; +} + +int task_event_pinned_vs_ebb(void) +{ + union pipe read_pipe, write_pipe; + struct event event; + pid_t pid; + int rc; + + FAIL_IF(pipe(read_pipe.fds) == -1); + FAIL_IF(pipe(write_pipe.fds) == -1); + + pid = fork(); + if (pid == 0) { + /* NB order of pipes looks reversed */ + exit(ebb_child(write_pipe, read_pipe)); + } + + /* We setup the task event first */ + rc = setup_child_event(&event, pid); + if (rc) { + kill_child_and_wait(pid); + return rc; + } + + /* Signal the child to install its EBB event and wait */ + if (sync_with_child(read_pipe, write_pipe)) + /* If it fails, wait for it to exit */ + goto wait; + + /* Signal the child to run */ + FAIL_IF(sync_with_child(read_pipe, write_pipe)); + +wait: + /* We expect it to fail to read the event */ + FAIL_IF(wait_for_child(pid) != 2); + FAIL_IF(event_disable(&event)); + FAIL_IF(event_read(&event)); + + event_report(&event); + + FAIL_IF(event.result.value == 0); + /* + * For reasons I don't understand enabled is usually just slightly + * lower than running. Would be good to confirm why. + */ + FAIL_IF(event.result.enabled == 0); + FAIL_IF(event.result.running == 0); + + return 0; +} + +int main(void) +{ + return test_harness(task_event_pinned_vs_ebb, "task_event_pinned_vs_ebb"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c b/tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c new file mode 100644 index 00000000000..eba32196dbb --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c @@ -0,0 +1,83 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdbool.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <unistd.h> + +#include "ebb.h" + + +/* + * Tests a per-task event vs an EBB - in that order. The EBB should push the + * per-task event off the PMU. + */ + +static int setup_child_event(struct event *event, pid_t child_pid) +{ + event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL"); + + event->attr.exclude_kernel = 1; + event->attr.exclude_hv = 1; + event->attr.exclude_idle = 1; + + FAIL_IF(event_open_with_pid(event, child_pid)); + FAIL_IF(event_enable(event)); + + return 0; +} + +int task_event_vs_ebb(void) +{ + union pipe read_pipe, write_pipe; + struct event event; + pid_t pid; + int rc; + + FAIL_IF(pipe(read_pipe.fds) == -1); + FAIL_IF(pipe(write_pipe.fds) == -1); + + pid = fork(); + if (pid == 0) { + /* NB order of pipes looks reversed */ + exit(ebb_child(write_pipe, read_pipe)); + } + + /* We setup the task event first */ + rc = setup_child_event(&event, pid); + if (rc) { + kill_child_and_wait(pid); + return rc; + } + + /* Signal the child to install its EBB event and wait */ + if (sync_with_child(read_pipe, write_pipe)) + /* If it fails, wait for it to exit */ + goto wait; + + /* Signal the child to run */ + FAIL_IF(sync_with_child(read_pipe, write_pipe)); + +wait: + /* The EBB event should push the task event off so the child should succeed */ + FAIL_IF(wait_for_child(pid)); + FAIL_IF(event_disable(&event)); + FAIL_IF(event_read(&event)); + + event_report(&event); + + /* The task event may have run, or not so we can't assert anything about it */ + + return 0; +} + +int main(void) +{ + return test_harness(task_event_vs_ebb, "task_event_vs_ebb"); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/trace.c b/tools/testing/selftests/powerpc/pmu/ebb/trace.c new file mode 100644 index 00000000000..251e66ab2aa --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/trace.c @@ -0,0 +1,300 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <errno.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/mman.h> + +#include "trace.h" + + +struct trace_buffer *trace_buffer_allocate(u64 size) +{ + struct trace_buffer *tb; + + if (size < sizeof(*tb)) { + fprintf(stderr, "Error: trace buffer too small\n"); + return NULL; + } + + tb = mmap(NULL, size, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + if (tb == MAP_FAILED) { + perror("mmap"); + return NULL; + } + + tb->size = size; + tb->tail = tb->data; + tb->overflow = false; + + return tb; +} + +static bool trace_check_bounds(struct trace_buffer *tb, void *p) +{ + return p < ((void *)tb + tb->size); +} + +static bool trace_check_alloc(struct trace_buffer *tb, void *p) +{ + /* + * If we ever overflowed don't allow any more input. This prevents us + * from dropping a large item and then later logging a small one. The + * buffer should just stop when overflow happened, not be patchy. If + * you're overflowing, make your buffer bigger. + */ + if (tb->overflow) + return false; + + if (!trace_check_bounds(tb, p)) { + tb->overflow = true; + return false; + } + + return true; +} + +static void *trace_alloc(struct trace_buffer *tb, int bytes) +{ + void *p, *newtail; + + p = tb->tail; + newtail = tb->tail + bytes; + if (!trace_check_alloc(tb, newtail)) + return NULL; + + tb->tail = newtail; + + return p; +} + +static struct trace_entry *trace_alloc_entry(struct trace_buffer *tb, int payload_size) +{ + struct trace_entry *e; + + e = trace_alloc(tb, sizeof(*e) + payload_size); + if (e) + e->length = payload_size; + + return e; +} + +int trace_log_reg(struct trace_buffer *tb, u64 reg, u64 value) +{ + struct trace_entry *e; + u64 *p; + + e = trace_alloc_entry(tb, sizeof(reg) + sizeof(value)); + if (!e) + return -ENOSPC; + + e->type = TRACE_TYPE_REG; + p = (u64 *)e->data; + *p++ = reg; + *p++ = value; + + return 0; +} + +int trace_log_counter(struct trace_buffer *tb, u64 value) +{ + struct trace_entry *e; + u64 *p; + + e = trace_alloc_entry(tb, sizeof(value)); + if (!e) + return -ENOSPC; + + e->type = TRACE_TYPE_COUNTER; + p = (u64 *)e->data; + *p++ = value; + + return 0; +} + +int trace_log_string(struct trace_buffer *tb, char *str) +{ + struct trace_entry *e; + char *p; + int len; + + len = strlen(str); + + /* We NULL terminate to make printing easier */ + e = trace_alloc_entry(tb, len + 1); + if (!e) + return -ENOSPC; + + e->type = TRACE_TYPE_STRING; + p = (char *)e->data; + memcpy(p, str, len); + p += len; + *p = '\0'; + + return 0; +} + +int trace_log_indent(struct trace_buffer *tb) +{ + struct trace_entry *e; + + e = trace_alloc_entry(tb, 0); + if (!e) + return -ENOSPC; + + e->type = TRACE_TYPE_INDENT; + + return 0; +} + +int trace_log_outdent(struct trace_buffer *tb) +{ + struct trace_entry *e; + + e = trace_alloc_entry(tb, 0); + if (!e) + return -ENOSPC; + + e->type = TRACE_TYPE_OUTDENT; + + return 0; +} + +static void trace_print_header(int seq, int prefix) +{ + printf("%*s[%d]: ", prefix, "", seq); +} + +static char *trace_decode_reg(int reg) +{ + switch (reg) { + case 769: return "SPRN_MMCR2"; break; + case 770: return "SPRN_MMCRA"; break; + case 779: return "SPRN_MMCR0"; break; + case 804: return "SPRN_EBBHR"; break; + case 805: return "SPRN_EBBRR"; break; + case 806: return "SPRN_BESCR"; break; + case 800: return "SPRN_BESCRS"; break; + case 801: return "SPRN_BESCRSU"; break; + case 802: return "SPRN_BESCRR"; break; + case 803: return "SPRN_BESCRRU"; break; + case 771: return "SPRN_PMC1"; break; + case 772: return "SPRN_PMC2"; break; + case 773: return "SPRN_PMC3"; break; + case 774: return "SPRN_PMC4"; break; + case 775: return "SPRN_PMC5"; break; + case 776: return "SPRN_PMC6"; break; + case 780: return "SPRN_SIAR"; break; + case 781: return "SPRN_SDAR"; break; + case 768: return "SPRN_SIER"; break; + } + + return NULL; +} + +static void trace_print_reg(struct trace_entry *e) +{ + u64 *p, *reg, *value; + char *name; + + p = (u64 *)e->data; + reg = p++; + value = p; + + name = trace_decode_reg(*reg); + if (name) + printf("register %-10s = 0x%016llx\n", name, *value); + else + printf("register %lld = 0x%016llx\n", *reg, *value); +} + +static void trace_print_counter(struct trace_entry *e) +{ + u64 *value; + + value = (u64 *)e->data; + printf("counter = %lld\n", *value); +} + +static void trace_print_string(struct trace_entry *e) +{ + char *str; + + str = (char *)e->data; + puts(str); +} + +#define BASE_PREFIX 2 +#define PREFIX_DELTA 8 + +static void trace_print_entry(struct trace_entry *e, int seq, int *prefix) +{ + switch (e->type) { + case TRACE_TYPE_REG: + trace_print_header(seq, *prefix); + trace_print_reg(e); + break; + case TRACE_TYPE_COUNTER: + trace_print_header(seq, *prefix); + trace_print_counter(e); + break; + case TRACE_TYPE_STRING: + trace_print_header(seq, *prefix); + trace_print_string(e); + break; + case TRACE_TYPE_INDENT: + trace_print_header(seq, *prefix); + puts("{"); + *prefix += PREFIX_DELTA; + break; + case TRACE_TYPE_OUTDENT: + *prefix -= PREFIX_DELTA; + if (*prefix < BASE_PREFIX) + *prefix = BASE_PREFIX; + trace_print_header(seq, *prefix); + puts("}"); + break; + default: + trace_print_header(seq, *prefix); + printf("entry @ %p type %d\n", e, e->type); + break; + } +} + +void trace_buffer_print(struct trace_buffer *tb) +{ + struct trace_entry *e; + int i, prefix; + void *p; + + printf("Trace buffer dump:\n"); + printf(" address %p \n", tb); + printf(" tail %p\n", tb->tail); + printf(" size %llu\n", tb->size); + printf(" overflow %s\n", tb->overflow ? "TRUE" : "false"); + printf(" Content:\n"); + + p = tb->data; + + i = 0; + prefix = BASE_PREFIX; + + while (trace_check_bounds(tb, p) && p < tb->tail) { + e = p; + + trace_print_entry(e, i, &prefix); + + i++; + p = (void *)e + sizeof(*e) + e->length; + } +} + +void trace_print_location(struct trace_buffer *tb) +{ + printf("Trace buffer 0x%llx bytes @ %p\n", tb->size, tb); +} diff --git a/tools/testing/selftests/powerpc/pmu/ebb/trace.h b/tools/testing/selftests/powerpc/pmu/ebb/trace.h new file mode 100644 index 00000000000..926458e28c8 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/ebb/trace.h @@ -0,0 +1,41 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#ifndef _SELFTESTS_POWERPC_PMU_EBB_TRACE_H +#define _SELFTESTS_POWERPC_PMU_EBB_TRACE_H + +#include "utils.h" + +#define TRACE_TYPE_REG 1 +#define TRACE_TYPE_COUNTER 2 +#define TRACE_TYPE_STRING 3 +#define TRACE_TYPE_INDENT 4 +#define TRACE_TYPE_OUTDENT 5 + +struct trace_entry +{ + u8 type; + u8 length; + u8 data[0]; +}; + +struct trace_buffer +{ + u64 size; + bool overflow; + void *tail; + u8 data[0]; +}; + +struct trace_buffer *trace_buffer_allocate(u64 size); +int trace_log_reg(struct trace_buffer *tb, u64 reg, u64 value); +int trace_log_counter(struct trace_buffer *tb, u64 value); +int trace_log_string(struct trace_buffer *tb, char *str); +int trace_log_indent(struct trace_buffer *tb); +int trace_log_outdent(struct trace_buffer *tb); +void trace_buffer_print(struct trace_buffer *tb); +void trace_print_location(struct trace_buffer *tb); + +#endif /* _SELFTESTS_POWERPC_PMU_EBB_TRACE_H */ diff --git a/tools/testing/selftests/powerpc/pmu/event.c b/tools/testing/selftests/powerpc/pmu/event.c new file mode 100644 index 00000000000..184b36807d4 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/event.c @@ -0,0 +1,131 @@ +/* + * Copyright 2013, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#define _GNU_SOURCE +#include <unistd.h> +#include <sys/syscall.h> +#include <string.h> +#include <stdio.h> +#include <sys/ioctl.h> + +#include "event.h" + + +int perf_event_open(struct perf_event_attr *attr, pid_t pid, int cpu, + int group_fd, unsigned long flags) +{ + return syscall(__NR_perf_event_open, attr, pid, cpu, + group_fd, flags); +} + +void event_init_opts(struct event *e, u64 config, int type, char *name) +{ + memset(e, 0, sizeof(*e)); + + e->name = name; + + e->attr.type = type; + e->attr.config = config; + e->attr.size = sizeof(e->attr); + /* This has to match the structure layout in the header */ + e->attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | \ + PERF_FORMAT_TOTAL_TIME_RUNNING; +} + +void event_init_named(struct event *e, u64 config, char *name) +{ + event_init_opts(e, config, PERF_TYPE_RAW, name); +} + +void event_init(struct event *e, u64 config) +{ + event_init_opts(e, config, PERF_TYPE_RAW, "event"); +} + +#define PERF_CURRENT_PID 0 +#define PERF_NO_PID -1 +#define PERF_NO_CPU -1 +#define PERF_NO_GROUP -1 + +int event_open_with_options(struct event *e, pid_t pid, int cpu, int group_fd) +{ + e->fd = perf_event_open(&e->attr, pid, cpu, group_fd, 0); + if (e->fd == -1) { + perror("perf_event_open"); + return -1; + } + + return 0; +} + +int event_open_with_group(struct event *e, int group_fd) +{ + return event_open_with_options(e, PERF_CURRENT_PID, PERF_NO_CPU, group_fd); +} + +int event_open_with_pid(struct event *e, pid_t pid) +{ + return event_open_with_options(e, pid, PERF_NO_CPU, PERF_NO_GROUP); +} + +int event_open_with_cpu(struct event *e, int cpu) +{ + return event_open_with_options(e, PERF_NO_PID, cpu, PERF_NO_GROUP); +} + +int event_open(struct event *e) +{ + return event_open_with_options(e, PERF_CURRENT_PID, PERF_NO_CPU, PERF_NO_GROUP); +} + +void event_close(struct event *e) +{ + close(e->fd); +} + +int event_enable(struct event *e) +{ + return ioctl(e->fd, PERF_EVENT_IOC_ENABLE); +} + +int event_disable(struct event *e) +{ + return ioctl(e->fd, PERF_EVENT_IOC_DISABLE); +} + +int event_reset(struct event *e) +{ + return ioctl(e->fd, PERF_EVENT_IOC_RESET); +} + +int event_read(struct event *e) +{ + int rc; + + rc = read(e->fd, &e->result, sizeof(e->result)); + if (rc != sizeof(e->result)) { + fprintf(stderr, "read error on event %p!\n", e); + return -1; + } + + return 0; +} + +void event_report_justified(struct event *e, int name_width, int result_width) +{ + printf("%*s: result %*llu ", name_width, e->name, result_width, + e->result.value); + + if (e->result.running == e->result.enabled) + printf("running/enabled %llu\n", e->result.running); + else + printf("running %llu enabled %llu\n", e->result.running, + e->result.enabled); +} + +void event_report(struct event *e) +{ + event_report_justified(e, 0, 0); +} diff --git a/tools/testing/selftests/powerpc/pmu/event.h b/tools/testing/selftests/powerpc/pmu/event.h new file mode 100644 index 00000000000..a0ea6b1eef7 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/event.h @@ -0,0 +1,43 @@ +/* + * Copyright 2013, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#ifndef _SELFTESTS_POWERPC_PMU_EVENT_H +#define _SELFTESTS_POWERPC_PMU_EVENT_H + +#include <unistd.h> +#include <linux/perf_event.h> + +#include "utils.h" + + +struct event { + struct perf_event_attr attr; + char *name; + int fd; + /* This must match the read_format we use */ + struct { + u64 value; + u64 running; + u64 enabled; + } result; +}; + +void event_init(struct event *e, u64 config); +void event_init_named(struct event *e, u64 config, char *name); +void event_init_opts(struct event *e, u64 config, int type, char *name); +int event_open_with_options(struct event *e, pid_t pid, int cpu, int group_fd); +int event_open_with_group(struct event *e, int group_fd); +int event_open_with_pid(struct event *e, pid_t pid); +int event_open_with_cpu(struct event *e, int cpu); +int event_open(struct event *e); +void event_close(struct event *e); +int event_enable(struct event *e); +int event_disable(struct event *e); +int event_reset(struct event *e); +int event_read(struct event *e); +void event_report_justified(struct event *e, int name_width, int result_width); +void event_report(struct event *e); + +#endif /* _SELFTESTS_POWERPC_PMU_EVENT_H */ diff --git a/tools/testing/selftests/powerpc/pmu/lib.c b/tools/testing/selftests/powerpc/pmu/lib.c new file mode 100644 index 00000000000..0f6a4731d54 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/lib.c @@ -0,0 +1,252 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#define _GNU_SOURCE /* For CPU_ZERO etc. */ + +#include <errno.h> +#include <sched.h> +#include <setjmp.h> +#include <stdlib.h> +#include <sys/wait.h> + +#include "utils.h" +#include "lib.h" + + +int pick_online_cpu(void) +{ + cpu_set_t mask; + int cpu; + + CPU_ZERO(&mask); + + if (sched_getaffinity(0, sizeof(mask), &mask)) { + perror("sched_getaffinity"); + return -1; + } + + /* We prefer a primary thread, but skip 0 */ + for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8) + if (CPU_ISSET(cpu, &mask)) + return cpu; + + /* Search for anything, but in reverse */ + for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--) + if (CPU_ISSET(cpu, &mask)) + return cpu; + + printf("No cpus in affinity mask?!\n"); + return -1; +} + +int bind_to_cpu(int cpu) +{ + cpu_set_t mask; + + printf("Binding to cpu %d\n", cpu); + + CPU_ZERO(&mask); + CPU_SET(cpu, &mask); + + return sched_setaffinity(0, sizeof(mask), &mask); +} + +#define PARENT_TOKEN 0xAA +#define CHILD_TOKEN 0x55 + +int sync_with_child(union pipe read_pipe, union pipe write_pipe) +{ + char c = PARENT_TOKEN; + + FAIL_IF(write(write_pipe.write_fd, &c, 1) != 1); + FAIL_IF(read(read_pipe.read_fd, &c, 1) != 1); + if (c != CHILD_TOKEN) /* sometimes expected */ + return 1; + + return 0; +} + +int wait_for_parent(union pipe read_pipe) +{ + char c; + + FAIL_IF(read(read_pipe.read_fd, &c, 1) != 1); + FAIL_IF(c != PARENT_TOKEN); + + return 0; +} + +int notify_parent(union pipe write_pipe) +{ + char c = CHILD_TOKEN; + + FAIL_IF(write(write_pipe.write_fd, &c, 1) != 1); + + return 0; +} + +int notify_parent_of_error(union pipe write_pipe) +{ + char c = ~CHILD_TOKEN; + + FAIL_IF(write(write_pipe.write_fd, &c, 1) != 1); + + return 0; +} + +int wait_for_child(pid_t child_pid) +{ + int rc; + + if (waitpid(child_pid, &rc, 0) == -1) { + perror("waitpid"); + return 1; + } + + if (WIFEXITED(rc)) + rc = WEXITSTATUS(rc); + else + rc = 1; /* Signal or other */ + + return rc; +} + +int kill_child_and_wait(pid_t child_pid) +{ + kill(child_pid, SIGTERM); + + return wait_for_child(child_pid); +} + +static int eat_cpu_child(union pipe read_pipe, union pipe write_pipe) +{ + volatile int i = 0; + + /* + * We are just here to eat cpu and die. So make sure we can be killed, + * and also don't do any custom SIGTERM handling. + */ + signal(SIGTERM, SIG_DFL); + + notify_parent(write_pipe); + wait_for_parent(read_pipe); + + /* Soak up cpu forever */ + while (1) i++; + + return 0; +} + +pid_t eat_cpu(int (test_function)(void)) +{ + union pipe read_pipe, write_pipe; + int cpu, rc; + pid_t pid; + + cpu = pick_online_cpu(); + FAIL_IF(cpu < 0); + FAIL_IF(bind_to_cpu(cpu)); + + if (pipe(read_pipe.fds) == -1) + return -1; + + if (pipe(write_pipe.fds) == -1) + return -1; + + pid = fork(); + if (pid == 0) + exit(eat_cpu_child(write_pipe, read_pipe)); + + if (sync_with_child(read_pipe, write_pipe)) { + rc = -1; + goto out; + } + + printf("main test running as pid %d\n", getpid()); + + rc = test_function(); +out: + kill(pid, SIGKILL); + + return rc; +} + +struct addr_range libc, vdso; + +int parse_proc_maps(void) +{ + char execute, name[128]; + uint64_t start, end; + FILE *f; + int rc; + + f = fopen("/proc/self/maps", "r"); + if (!f) { + perror("fopen"); + return -1; + } + + do { + /* This skips line with no executable which is what we want */ + rc = fscanf(f, "%lx-%lx %*c%*c%c%*c %*x %*d:%*d %*d %127s\n", + &start, &end, &execute, name); + if (rc <= 0) + break; + + if (execute != 'x') + continue; + + if (strstr(name, "libc")) { + libc.first = start; + libc.last = end - 1; + } else if (strstr(name, "[vdso]")) { + vdso.first = start; + vdso.last = end - 1; + } + } while(1); + + fclose(f); + + return 0; +} + +#define PARANOID_PATH "/proc/sys/kernel/perf_event_paranoid" + +bool require_paranoia_below(int level) +{ + unsigned long current; + char *end, buf[16]; + FILE *f; + int rc; + + rc = -1; + + f = fopen(PARANOID_PATH, "r"); + if (!f) { + perror("fopen"); + goto out; + } + + if (!fgets(buf, sizeof(buf), f)) { + printf("Couldn't read " PARANOID_PATH "?\n"); + goto out_close; + } + + current = strtoul(buf, &end, 10); + + if (end == buf) { + printf("Couldn't parse " PARANOID_PATH "?\n"); + goto out_close; + } + + if (current >= level) + goto out; + + rc = 0; +out_close: + fclose(f); +out: + return rc; +} diff --git a/tools/testing/selftests/powerpc/pmu/lib.h b/tools/testing/selftests/powerpc/pmu/lib.h new file mode 100644 index 00000000000..ca5d72ae3be --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/lib.h @@ -0,0 +1,41 @@ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#ifndef __SELFTESTS_POWERPC_PMU_LIB_H +#define __SELFTESTS_POWERPC_PMU_LIB_H + +#include <stdio.h> +#include <stdint.h> +#include <string.h> +#include <unistd.h> + +union pipe { + struct { + int read_fd; + int write_fd; + }; + int fds[2]; +}; + +extern int pick_online_cpu(void); +extern int bind_to_cpu(int cpu); +extern int kill_child_and_wait(pid_t child_pid); +extern int wait_for_child(pid_t child_pid); +extern int sync_with_child(union pipe read_pipe, union pipe write_pipe); +extern int wait_for_parent(union pipe read_pipe); +extern int notify_parent(union pipe write_pipe); +extern int notify_parent_of_error(union pipe write_pipe); +extern pid_t eat_cpu(int (test_function)(void)); +extern bool require_paranoia_below(int level); + +struct addr_range { + uint64_t first, last; +}; + +extern struct addr_range libc, vdso; + +int parse_proc_maps(void); + +#endif /* __SELFTESTS_POWERPC_PMU_LIB_H */ diff --git a/tools/testing/selftests/powerpc/pmu/loop.S b/tools/testing/selftests/powerpc/pmu/loop.S new file mode 100644 index 00000000000..20c1f0876c4 --- /dev/null +++ b/tools/testing/selftests/powerpc/pmu/loop.S @@ -0,0 +1,43 @@ +/* + * Copyright 2013, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#include <ppc-asm.h> + + .text + +FUNC_START(thirty_two_instruction_loop) + cmpdi r3,0 + beqlr + addi r4,r3,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 + addi r4,r4,1 # 28 addi's + subi r3,r3,1 + b FUNC_NAME(thirty_two_instruction_loop) +FUNC_END(thirty_two_instruction_loop) diff --git a/tools/testing/selftests/powerpc/subunit.h b/tools/testing/selftests/powerpc/subunit.h new file mode 100644 index 00000000000..9c6c4e901ab --- /dev/null +++ b/tools/testing/selftests/powerpc/subunit.h @@ -0,0 +1,52 @@ +/* + * Copyright 2013, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#ifndef _SELFTESTS_POWERPC_SUBUNIT_H +#define _SELFTESTS_POWERPC_SUBUNIT_H + +static inline void test_start(char *name) +{ + printf("test: %s\n", name); +} + +static inline void test_failure_detail(char *name, char *detail) +{ + printf("failure: %s [%s]\n", name, detail); +} + +static inline void test_failure(char *name) +{ + printf("failure: %s\n", name); +} + +static inline void test_error(char *name) +{ + printf("error: %s\n", name); +} + +static inline void test_skip(char *name) +{ + printf("skip: %s\n", name); +} + +static inline void test_success(char *name) +{ + printf("success: %s\n", name); +} + +static inline void test_finish(char *name, int status) +{ + if (status) + test_failure(name); + else + test_success(name); +} + +static inline void test_set_git_version(char *value) +{ + printf("tags: git_version:%s\n", value); +} + +#endif /* _SELFTESTS_POWERPC_SUBUNIT_H */ diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile new file mode 100644 index 00000000000..2cede239a07 --- /dev/null +++ b/tools/testing/selftests/powerpc/tm/Makefile @@ -0,0 +1,15 @@ +PROGS := tm-resched-dscr + +all: $(PROGS) + +$(PROGS): ../harness.c + +run_tests: all + @-for PROG in $(PROGS); do \ + ./$$PROG; \ + done; + +clean: + rm -f $(PROGS) *.o + +.PHONY: all run_tests clean diff --git a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c new file mode 100644 index 00000000000..42d4c8caad8 --- /dev/null +++ b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c @@ -0,0 +1,98 @@ +/* Test context switching to see if the DSCR SPR is correctly preserved + * when within a transaction. + * + * Note: We assume that the DSCR has been left at the default value (0) + * for all CPUs. + * + * Method: + * + * Set a value into the DSCR. + * + * Start a transaction, and suspend it (*). + * + * Hard loop checking to see if the transaction has become doomed. + * + * Now that we *may* have been preempted, record the DSCR and TEXASR SPRS. + * + * If the abort was because of a context switch, check the DSCR value. + * Otherwise, try again. + * + * (*) If the transaction is not suspended we can't see the problem because + * the transaction abort handler will restore the DSCR to it's checkpointed + * value before we regain control. + */ + +#include <inttypes.h> +#include <stdio.h> +#include <stdlib.h> +#include <assert.h> +#include <asm/tm.h> + +#include "utils.h" + +#define TBEGIN ".long 0x7C00051D ;" +#define TEND ".long 0x7C00055D ;" +#define TCHECK ".long 0x7C00059C ;" +#define TSUSPEND ".long 0x7C0005DD ;" +#define TRESUME ".long 0x7C2005DD ;" +#define SPRN_TEXASR 0x82 +#define SPRN_DSCR 0x03 + +int test_body(void) +{ + uint64_t rv, dscr1 = 1, dscr2, texasr; + + printf("Check DSCR TM context switch: "); + fflush(stdout); + for (;;) { + rv = 1; + asm __volatile__ ( + /* set a known value into the DSCR */ + "ld 3, %[dscr1];" + "mtspr %[sprn_dscr], 3;" + + /* start and suspend a transaction */ + TBEGIN + "beq 1f;" + TSUSPEND + + /* hard loop until the transaction becomes doomed */ + "2: ;" + TCHECK + "bc 4, 0, 2b;" + + /* record DSCR and TEXASR */ + "mfspr 3, %[sprn_dscr];" + "std 3, %[dscr2];" + "mfspr 3, %[sprn_texasr];" + "std 3, %[texasr];" + + TRESUME + TEND + "li %[rv], 0;" + "1: ;" + : [rv]"=r"(rv), [dscr2]"=m"(dscr2), [texasr]"=m"(texasr) + : [dscr1]"m"(dscr1) + , [sprn_dscr]"i"(SPRN_DSCR), [sprn_texasr]"i"(SPRN_TEXASR) + : "memory", "r3" + ); + assert(rv); /* make sure the transaction aborted */ + if ((texasr >> 56) != TM_CAUSE_RESCHED) { + putchar('.'); + fflush(stdout); + continue; + } + if (dscr2 != dscr1) { + printf(" FAIL\n"); + return 1; + } else { + printf(" OK\n"); + return 0; + } + } +} + +int main(void) +{ + return test_harness(test_body, "tm_resched_dscr"); +} diff --git a/tools/testing/selftests/powerpc/utils.h b/tools/testing/selftests/powerpc/utils.h new file mode 100644 index 00000000000..a93777ae068 --- /dev/null +++ b/tools/testing/selftests/powerpc/utils.h @@ -0,0 +1,49 @@ +/* + * Copyright 2013, Michael Ellerman, IBM Corp. + * Licensed under GPLv2. + */ + +#ifndef _SELFTESTS_POWERPC_UTILS_H +#define _SELFTESTS_POWERPC_UTILS_H + +#include <stdint.h> +#include <stdbool.h> + +/* Avoid headaches with PRI?64 - just use %ll? always */ +typedef unsigned long long u64; +typedef signed long long s64; + +/* Just for familiarity */ +typedef uint32_t u32; +typedef uint8_t u8; + + +int test_harness(int (test_function)(void), char *name); + + +/* Yes, this is evil */ +#define FAIL_IF(x) \ +do { \ + if ((x)) { \ + fprintf(stderr, \ + "[FAIL] Test FAILED on line %d\n", __LINE__); \ + return 1; \ + } \ +} while (0) + +/* The test harness uses this, yes it's gross */ +#define MAGIC_SKIP_RETURN_VALUE 99 + +#define SKIP_IF(x) \ +do { \ + if ((x)) { \ + fprintf(stderr, \ + "[SKIP] Test skipped on line %d\n", __LINE__); \ + return MAGIC_SKIP_RETURN_VALUE; \ + } \ +} while (0) + +#define _str(s) #s +#define str(s) _str(s) + +#endif /* _SELFTESTS_POWERPC_UTILS_H */ diff --git a/tools/testing/selftests/ptrace/Makefile b/tools/testing/selftests/ptrace/Makefile new file mode 100644 index 00000000000..47ae2d385ce --- /dev/null +++ b/tools/testing/selftests/ptrace/Makefile @@ -0,0 +1,10 @@ +CFLAGS += -iquote../../../../include/uapi -Wall +peeksiginfo: peeksiginfo.c + +all: peeksiginfo + +clean: + rm -f peeksiginfo + +run_tests: all + @./peeksiginfo || echo "peeksiginfo selftests: [FAIL]" diff --git a/tools/testing/selftests/ptrace/peeksiginfo.c b/tools/testing/selftests/ptrace/peeksiginfo.c new file mode 100644 index 00000000000..d46558b1f58 --- /dev/null +++ b/tools/testing/selftests/ptrace/peeksiginfo.c @@ -0,0 +1,214 @@ +#define _GNU_SOURCE +#include <stdio.h> +#include <signal.h> +#include <unistd.h> +#include <errno.h> +#include <linux/types.h> +#include <sys/wait.h> +#include <sys/syscall.h> +#include <sys/user.h> +#include <sys/mman.h> + +#include "linux/ptrace.h" + +static int sys_rt_sigqueueinfo(pid_t tgid, int sig, siginfo_t *uinfo) +{ + return syscall(SYS_rt_sigqueueinfo, tgid, sig, uinfo); +} + +static int sys_rt_tgsigqueueinfo(pid_t tgid, pid_t tid, + int sig, siginfo_t *uinfo) +{ + return syscall(SYS_rt_tgsigqueueinfo, tgid, tid, sig, uinfo); +} + +static int sys_ptrace(int request, pid_t pid, void *addr, void *data) +{ + return syscall(SYS_ptrace, request, pid, addr, data); +} + +#define SIGNR 10 +#define TEST_SICODE_PRIV -1 +#define TEST_SICODE_SHARE -2 + +#define err(fmt, ...) \ + fprintf(stderr, \ + "Error (%s:%d): " fmt, \ + __FILE__, __LINE__, ##__VA_ARGS__) + +static int check_error_paths(pid_t child) +{ + struct ptrace_peeksiginfo_args arg; + int ret, exit_code = -1; + void *addr_rw, *addr_ro; + + /* + * Allocate two contiguous pages. The first one is for read-write, + * another is for read-only. + */ + addr_rw = mmap(NULL, 2 * PAGE_SIZE, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (addr_rw == MAP_FAILED) { + err("mmap() failed: %m\n"); + return 1; + } + + addr_ro = mmap(addr_rw + PAGE_SIZE, PAGE_SIZE, PROT_READ, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); + if (addr_ro == MAP_FAILED) { + err("mmap() failed: %m\n"); + goto out; + } + + arg.nr = SIGNR; + arg.off = 0; + + /* Unsupported flags */ + arg.flags = ~0; + ret = sys_ptrace(PTRACE_PEEKSIGINFO, child, &arg, addr_rw); + if (ret != -1 || errno != EINVAL) { + err("sys_ptrace() returns %d (expected -1)," + " errno %d (expected %d): %m\n", + ret, errno, EINVAL); + goto out; + } + arg.flags = 0; + + /* A part of the buffer is read-only */ + ret = sys_ptrace(PTRACE_PEEKSIGINFO, child, &arg, + addr_ro - sizeof(siginfo_t) * 2); + if (ret != 2) { + err("sys_ptrace() returns %d (expected 2): %m\n", ret); + goto out; + } + + /* Read-only buffer */ + ret = sys_ptrace(PTRACE_PEEKSIGINFO, child, &arg, addr_ro); + if (ret != -1 && errno != EFAULT) { + err("sys_ptrace() returns %d (expected -1)," + " errno %d (expected %d): %m\n", + ret, errno, EFAULT); + goto out; + } + + exit_code = 0; +out: + munmap(addr_rw, 2 * PAGE_SIZE); + return exit_code; +} + +int check_direct_path(pid_t child, int shared, int nr) +{ + struct ptrace_peeksiginfo_args arg = {.flags = 0, .nr = nr, .off = 0}; + int i, j, ret, exit_code = -1; + siginfo_t siginfo[SIGNR]; + int si_code; + + if (shared == 1) { + arg.flags = PTRACE_PEEKSIGINFO_SHARED; + si_code = TEST_SICODE_SHARE; + } else { + arg.flags = 0; + si_code = TEST_SICODE_PRIV; + } + + for (i = 0; i < SIGNR; ) { + arg.off = i; + ret = sys_ptrace(PTRACE_PEEKSIGINFO, child, &arg, siginfo); + if (ret == -1) { + err("ptrace() failed: %m\n"); + goto out; + } + + if (ret == 0) + break; + + for (j = 0; j < ret; j++, i++) { + if (siginfo[j].si_code == si_code && + siginfo[j].si_int == i) + continue; + + err("%d: Wrong siginfo i=%d si_code=%d si_int=%d\n", + shared, i, siginfo[j].si_code, siginfo[j].si_int); + goto out; + } + } + + if (i != SIGNR) { + err("Only %d signals were read\n", i); + goto out; + } + + exit_code = 0; +out: + return exit_code; +} + +int main(int argc, char *argv[]) +{ + siginfo_t siginfo[SIGNR]; + int i, exit_code = 1; + sigset_t blockmask; + pid_t child; + + sigemptyset(&blockmask); + sigaddset(&blockmask, SIGRTMIN); + sigprocmask(SIG_BLOCK, &blockmask, NULL); + + child = fork(); + if (child == -1) { + err("fork() failed: %m"); + return 1; + } else if (child == 0) { + pid_t ppid = getppid(); + while (1) { + if (ppid != getppid()) + break; + sleep(1); + } + return 1; + } + + /* Send signals in process-wide and per-thread queues */ + for (i = 0; i < SIGNR; i++) { + siginfo->si_code = TEST_SICODE_SHARE; + siginfo->si_int = i; + sys_rt_sigqueueinfo(child, SIGRTMIN, siginfo); + + siginfo->si_code = TEST_SICODE_PRIV; + siginfo->si_int = i; + sys_rt_tgsigqueueinfo(child, child, SIGRTMIN, siginfo); + } + + if (sys_ptrace(PTRACE_ATTACH, child, NULL, NULL) == -1) + return 1; + + waitpid(child, NULL, 0); + + /* Dump signals one by one*/ + if (check_direct_path(child, 0, 1)) + goto out; + /* Dump all signals for one call */ + if (check_direct_path(child, 0, SIGNR)) + goto out; + + /* + * Dump signal from the process-wide queue. + * The number of signals is not multible to the buffer size + */ + if (check_direct_path(child, 1, 3)) + goto out; + + if (check_error_paths(child)) + goto out; + + printf("PASS\n"); + exit_code = 0; +out: + if (sys_ptrace(PTRACE_KILL, child, NULL, NULL) == -1) + return 1; + + waitpid(child, NULL, 0); + + return exit_code; +} diff --git a/tools/testing/selftests/rcutorture/.gitignore b/tools/testing/selftests/rcutorture/.gitignore new file mode 100644 index 00000000000..05838f6f2eb --- /dev/null +++ b/tools/testing/selftests/rcutorture/.gitignore @@ -0,0 +1,6 @@ +initrd +linux-2.6 +b[0-9]* +rcu-test-image +res +*.swp diff --git a/tools/testing/selftests/rcutorture/bin/config2frag.sh b/tools/testing/selftests/rcutorture/bin/config2frag.sh new file mode 100644 index 00000000000..9f9ffcd427d --- /dev/null +++ b/tools/testing/selftests/rcutorture/bin/config2frag.sh @@ -0,0 +1,25 @@ +#!/bin/sh +# Usage: sh config2frag.sh < .config > configfrag +# +# Converts the "# CONFIG_XXX is not set" to "CONFIG_XXX=n" so that the +# resulting file becomes a legitimate Kconfig fragment. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# Copyright (C) IBM Corporation, 2013 +# +# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +LANG=C sed -e 's/^# CONFIG_\([a-zA-Z0-9_]*\) is not set$/CONFIG_\1=n/' diff --git a/tools/testing/selftests/rcutorture/bin/configNR_CPUS.sh b/tools/testing/selftests/rcutorture/bin/configNR_CPUS.sh new file mode 100755 index 00000000000..43540f1828c --- /dev/null +++ b/tools/testing/selftests/rcutorture/bin/configNR_CPUS.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# +# Extract the number of CPUs expected from the specified Kconfig-file +# fragment by checking CONFIG_SMP and CONFIG_NR_CPUS. If the specified +# file gives no clue, base the number on the number of idle CPUs on +# the system. +# +# Usage: configNR_CPUS.sh config-frag +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# Copyright (C) IBM Corporation, 2013 +# +# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +cf=$1 +if test ! -r $cf +then + echo Unreadable config fragment $cf 1>&2 + exit -1 +fi +if grep -q '^CONFIG_SMP=n$' $cf +then + echo 1 + exit 0 +fi +if grep -q '^CONFIG_NR_CPUS=' $cf +then + grep '^CONFIG_NR_CPUS=' $cf | + sed -e 's/^CONFIG_NR_CPUS=\([0-9]*\).*$/\1/' + exit 0 +fi +cpus2use.sh diff --git a/tools/testing/selftests/rcutorture/bin/configcheck.sh b/tools/testing/selftests/rcutorture/bin/configcheck.sh new file mode 100755 index 00000000000..d686537dd55 --- /dev/null +++ b/tools/testing/selftests/rcutorture/bin/configcheck.sh @@ -0,0 +1,54 @@ +#!/bin/sh +# Usage: sh configcheck.sh .config .config-template +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# Copyright (C) IBM Corporation, 2011 +# +# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +T=/tmp/abat-chk-config.sh.$$ +trap 'rm -rf $T' 0 +mkdir $T + +cat $1 > $T/.config + +cat $2 | sed -e 's/\(.*\)=n/# \1 is not set/' -e 's/^#CHECK#//' | +awk ' +BEGIN { + print "if grep -q \"" $0 "\" < '"$T/.config"'"; + print "then"; + print "\t:"; + print "else"; + if ($1 == "#") { + print "\tif grep -q \"" $2 "\" < '"$T/.config"'"; + print "\tthen"; + print "\t\tif test \"$firsttime\" = \"\"" + print "\t\tthen" + print "\t\t\tfirsttime=1" + print "\t\tfi" + print "\t\techo \":" $2 ": improperly set\""; + print "\telse"; + print "\t\t:"; + print "\tfi"; + } else { + print "\tif test \"$firsttime\" = \"\"" + print "\tthen" + print "\t\tfirsttime=1" + print "\tfi" + print "\techo \":" $0 ": improperly set\""; + } + print "fi"; + }' | sh diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh b/tools/testing/selftests/rcutorture/bin/configinit.sh new file mode 100755 index 00000000000..9c3f3d39b93 --- /dev/null +++ b/tools/testing/selftests/rcutorture/bin/configinit.sh @@ -0,0 +1,74 @@ +#!/bin/sh +# +# sh configinit.sh config-spec-file [ build output dir ] +# +# Create a .config file from the spec file. Run from the kernel source tree. +# Exits with 0 if all went well, with 1 if all went well but the config +# did not match, and some other number for other failures. +# +# The first argument is the .config specification file, which contains +# desired settings, for example, "CONFIG_NO_HZ=y". For best results, +# this should be a full pathname. +# +# The second argument is a optional path to a build output directory, +# for example, "O=/tmp/foo". If this argument is omitted, the .config +# file will be generated directly in the current directory. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# Copyright (C) IBM Corporation, 2013 +# +# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +T=/tmp/configinit.sh.$$ +trap 'rm -rf $T' 0 +mkdir $T + +# Capture config spec file. + +c=$1 +buildloc=$2 +builddir= +if test -n $buildloc +then + if echo $buildloc | grep -q '^O=' + then + builddir=`echo $buildloc | sed -e 's/^O=//'` + if test ! -d $builddir + then + mkdir $builddir + fi + else + echo Bad build directory: \"$builddir\" + exit 2 + fi +fi + +sed -e 's/^\(CONFIG[0-9A-Z_]*\)=.*$/grep -v "^# \1" |/' < $c > $T/u.sh +sed -e 's/^\(CONFIG[0-9A-Z_]*=\).*$/grep -v \1 |/' < $c >> $T/u.sh +grep '^grep' < $T/u.sh > $T/upd.sh +echo "cat - $c" >> $T/upd.sh +make mrproper +make $buildloc distclean > $builddir/Make.distclean 2>&1 +make $buildloc $TORTURE_DEFCONFIG > $builddir/Make.defconfig.out 2>&1 +mv $builddir/.config $builddir/.config.sav +sh $T/upd.sh < $builddir/.config.sav > $builddir/.config +cp $builddir/.config $builddir/.config.new +yes '' | make $buildloc oldconfig > $builddir/Make.modconfig.out 2>&1 + +# verify new config matches specification. +configcheck.sh $builddir/.config $c + +exit 0 diff --git a/tools/testing/selftests/rcutorture/bin/cpus2use.sh b/tools/testing/selftests/rcutorture/bin/cpus2use.sh new file mode 100755 index 00000000000..abe14b7f36e --- /dev/null +++ b/tools/testing/selftests/rcutorture/bin/cpus2use.sh @@ -0,0 +1,41 @@ +#!/bin/bash +# +# Get an estimate of how CPU-hoggy to be. +# +# Usage: cpus2use.sh +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# Copyright (C) IBM Corporation, 2013 +# +# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +ncpus=`grep '^processor' /proc/cpuinfo | wc -l` +idlecpus=`mpstat | tail -1 | \ + awk -v ncpus=$ncpus '{ print ncpus * ($7 + $12) / 100 }'` +awk -v ncpus=$ncpus -v idlecpus=$idlecpus < /dev/null ' +BEGIN { + cpus2use = idlecpus; + if (cpus2use < 1) + cpus2use = 1; + if (cpus2use < ncpus / 10) + cpus2use = ncpus / 10; + if (cpus2use == int(cpus2use)) + cpus2use = int(cpus2use) + else + cpus2use = int(cpus2use) + 1 + print cpus2use; +}' + diff --git a/tools/testing/selftests/rcutorture/bin/functions.sh b/tools/testing/selftests/rcutorture/bin/functions.sh new file mode 100644 index 00000000000..d01b865bb10 --- /dev/null +++ b/tools/testing/selftests/rcutorture/bin/functions.sh @@ -0,0 +1,223 @@ +#!/bin/bash +# +# Shell functions for the rest of the scripts. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# Copyright (C) IBM Corporation, 2013 +# +# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +# bootparam_hotplug_cpu bootparam-string +# +# Returns 1 if the specified boot-parameter string tells rcutorture to +# test CPU-hotplug operations. +bootparam_hotplug_cpu () { + echo "$1" | grep -q "rcutorture\.onoff_" +} + +# checkarg --argname argtype $# arg mustmatch cannotmatch +# +# Checks the specified argument "arg" against the mustmatch and cannotmatch +# patterns. +checkarg () { + if test $3 -le 1 + then + echo $1 needs argument $2 matching \"$5\" + usage + fi + if echo "$4" | grep -q -e "$5" + then + : + else + echo $1 $2 \"$4\" must match \"$5\" + usage + fi + if echo "$4" | grep -q -e "$6" + then + echo $1 $2 \"$4\" must not match \"$6\" + usage + fi +} + +# configfrag_boot_params bootparam-string config-fragment-file +# +# Adds boot parameters from the .boot file, if any. +configfrag_boot_params () { + if test -r "$2.boot" + then + echo $1 `grep -v '^#' "$2.boot" | tr '\012' ' '` + else + echo $1 + fi +} + +# configfrag_hotplug_cpu config-fragment-file +# +# Returns 1 if the config fragment specifies hotplug CPU. +configfrag_hotplug_cpu () { + if test ! -r "$1" + then + echo Unreadable config fragment "$1" 1>&2 + exit -1 + fi + grep -q '^CONFIG_HOTPLUG_CPU=y$' "$1" +} + +# identify_boot_image qemu-cmd +# +# Returns the relative path to the kernel build image. This will be +# arch/<arch>/boot/bzImage unless overridden with the TORTURE_BOOT_IMAGE +# environment variable. +identify_boot_image () { + if test -n "$TORTURE_BOOT_IMAGE" + then + echo $TORTURE_BOOT_IMAGE + else + case "$1" in + qemu-system-x86_64|qemu-system-i386) + echo arch/x86/boot/bzImage + ;; + qemu-system-ppc64) + echo arch/powerpc/boot/bzImage + ;; + *) + echo "" + ;; + esac + fi +} + +# identify_qemu builddir +# +# Returns our best guess as to which qemu command is appropriate for +# the kernel at hand. Override with the TORTURE_QEMU_CMD environment variable. +identify_qemu () { + local u="`file "$1"`" + if test -n "$TORTURE_QEMU_CMD" + then + echo $TORTURE_QEMU_CMD + elif echo $u | grep -q x86-64 + then + echo qemu-system-x86_64 + elif echo $u | grep -q "Intel 80386" + then + echo qemu-system-i386 + elif uname -a | grep -q ppc64 + then + echo qemu-system-ppc64 + else + echo Cannot figure out what qemu command to use! 1>&2 + echo file $1 output: $u + # Usually this will be one of /usr/bin/qemu-system-* + # Use TORTURE_QEMU_CMD environment variable or appropriate + # argument to top-level script. + exit 1 + fi +} + +# identify_qemu_append qemu-cmd +# +# Output arguments for the qemu "-append" string based on CPU type +# and the TORTURE_QEMU_INTERACTIVE environment variable. +identify_qemu_append () { + case "$1" in + qemu-system-x86_64|qemu-system-i386) + echo noapic selinux=0 initcall_debug debug + ;; + esac + if test -n "$TORTURE_QEMU_INTERACTIVE" + then + echo root=/dev/sda + else + echo console=ttyS0 + fi +} + +# identify_qemu_args qemu-cmd serial-file +# +# Output arguments for qemu arguments based on the TORTURE_QEMU_MAC +# and TORTURE_QEMU_INTERACTIVE environment variables. +identify_qemu_args () { + case "$1" in + qemu-system-x86_64|qemu-system-i386) + ;; + qemu-system-ppc64) + echo -enable-kvm -M pseries -cpu POWER7 -nodefaults + echo -device spapr-vscsi + if test -n "$TORTURE_QEMU_INTERACTIVE" -a -n "$TORTURE_QEMU_MAC" + then + echo -device spapr-vlan,netdev=net0,mac=$TORTURE_QEMU_MAC + echo -netdev bridge,br=br0,id=net0 + elif test -n "$TORTURE_QEMU_INTERACTIVE" + then + echo -net nic -net user + fi + ;; + esac + if test -n "$TORTURE_QEMU_INTERACTIVE" + then + echo -monitor stdio -serial pty -S + else + echo -serial file:$2 + fi +} + +# identify_qemu_vcpus +# +# Returns the number of virtual CPUs available to the aggregate of the +# guest OSes. +identify_qemu_vcpus () { + lscpu | grep '^CPU(s):' | sed -e 's/CPU(s)://' +} + +# print_bug +# +# Prints "BUG: " in red followed by remaining arguments +print_bug () { + printf '\033[031mBUG: \033[m' + echo $* +} + +# print_warning +# +# Prints "WARNING: " in yellow followed by remaining arguments +print_warning () { + printf '\033[033mWARNING: \033[m' + echo $* +} + +# specify_qemu_cpus qemu-cmd qemu-args #cpus +# +# Appends a string containing "-smp XXX" to qemu-args, unless the incoming +# qemu-args already contains "-smp". +specify_qemu_cpus () { + local nt; + + if echo $2 | grep -q -e -smp + then + echo $2 + else + case "$1" in + qemu-system-x86_64|qemu-system-i386) + echo $2 -smp $3 + ;; + qemu-system-ppc64) + nt="`lscpu | grep '^NUMA node0' | sed -e 's/^[^,]*,\([0-9]*\),.*$/\1/'`" + echo $2 -smp cores=`expr \( $3 + $nt - 1 \) / $nt`,threads=$nt + ;; + esac + fi +} diff --git a/tools/testing/selftests/rcutorture/bin/kvm-build.sh b/tools/testing/selftests/rcutorture/bin/kvm-build.sh new file mode 100755 index 00000000000..7c1e56b46de --- /dev/null +++ b/tools/testing/selftests/rcutorture/bin/kvm-build.sh @@ -0,0 +1,71 @@ +#!/bin/bash +# +# Build a kvm-ready Linux kernel from the tree in the current directory. +# +# Usage: sh kvm-build.sh config-template build-dir more-configs +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# Copyright (C) IBM Corporation, 2011 +# +# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +config_template=${1} +if test -z "$config_template" -o ! -f "$config_template" -o ! -r "$config_template" +then + echo "kvm-build.sh :$config_template: Not a readable file" + exit 1 +fi +builddir=${2} +if test -z "$builddir" -o ! -d "$builddir" -o ! -w "$builddir" +then + echo "kvm-build.sh :$builddir: Not a writable directory, cannot build into it" + exit 1 +fi +moreconfigs=${3} +if test -z "$moreconfigs" -o ! -r "$moreconfigs" +then + echo "kvm-build.sh :$moreconfigs: Not a readable file" + exit 1 +fi + +T=/tmp/test-linux.sh.$$ +trap 'rm -rf $T' 0 +mkdir $T + +grep -v 'CONFIG_[A-Z]*_TORTURE_TEST' < ${config_template} > $T/config +cat << ___EOF___ >> $T/config +CONFIG_INITRAMFS_SOURCE="$TORTURE_INITRD" +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_CONSOLE=y +___EOF___ +cat $moreconfigs >> $T/config + +configinit.sh $T/config O=$builddir +retval=$? +if test $retval -gt 1 +then + exit 2 +fi +ncpus=`cpus2use.sh` +make O=$builddir -j$ncpus $TORTURE_KMAKE_ARG > $builddir/Make.out 2>&1 +retval=$? +if test $retval -ne 0 || grep "rcu[^/]*": < $builddir/Make.out | egrep -q "Stop|Error|error:|warning:" || egrep -q "Stop|Error|error:" < $builddir/Make.out +then + echo Kernel build error + egrep "Stop|Error|error:|warning:" < $builddir/Make.out + echo Run aborted. + exit 3 +fi diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh new file mode 100755 index 00000000000..7f1ff1a8fc4 --- /dev/null +++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# +# Analyze a given results directory for locktorture progress. +# +# Usage: sh kvm-recheck-lock.sh resdir +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# Copyright (C) IBM Corporation, 2014 +# +# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +i="$1" +if test -d $i +then + : +else + echo Unreadable results directory: $i + exit 1 +fi + +configfile=`echo $i | sed -e 's/^.*\///'` +ncs=`grep "Writes: Total:" $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* Total: //' -e 's/ .*$//'` +if test -z "$ncs" +then + echo "$configfile -------" +else + title="$configfile ------- $ncs acquisitions/releases" + dur=`sed -e 's/^.* locktorture.shutdown_secs=//' -e 's/ .*$//' < $i/qemu-cmd 2> /dev/null` + if test -z "$dur" + then + : + else + ncsps=`awk -v ncs=$ncs -v dur=$dur ' + BEGIN { print ncs / dur }' < /dev/null` + title="$title ($ncsps per second)" + fi + echo $title +fi diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh new file mode 100755 index 00000000000..307c4b95f32 --- /dev/null +++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# +# Analyze a given results directory for rcutorture progress. +# +# Usage: sh kvm-recheck-rcu.sh resdir +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# Copyright (C) IBM Corporation, 2014 +# +# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +i="$1" +if test -d $i +then + : +else + echo Unreadable results directory: $i + exit 1 +fi + +configfile=`echo $i | sed -e 's/^.*\///'` +ngps=`grep ver: $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* ver: //' -e 's/ .*$//'` +if test -z "$ngps" +then + echo "$configfile -------" +else + title="$configfile ------- $ngps grace periods" + dur=`sed -e 's/^.* rcutorture.shutdown_secs=//' -e 's/ .*$//' < $i/qemu-cmd 2> /dev/null` + if test -z "$dur" + then + : + else + ngpsps=`awk -v ngps=$ngps -v dur=$dur ' + BEGIN { print ngps / dur }' < /dev/null` + title="$title ($ngpsps per second)" + fi + echo $title +fi diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh new file mode 100755 index 00000000000..ee1f6cae3d7 --- /dev/null +++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh @@ -0,0 +1,63 @@ +#!/bin/bash +# +# Given the results directories for previous KVM-based torture runs, +# check the build and console output for errors. Given a directory +# containing results directories, this recursively checks them all. +# +# Usage: sh kvm-recheck.sh resdir ... +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# Copyright (C) IBM Corporation, 2011 +# +# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +PATH=`pwd`/tools/testing/selftests/rcutorture/bin:$PATH; export PATH +. tools/testing/selftests/rcutorture/bin/functions.sh +for rd in "$@" +do + firsttime=1 + dirs=`find $rd -name Make.defconfig.out -print | sort | sed -e 's,/[^/]*$,,' | sort -u` + for i in $dirs + do + if test -n "$firsttime" + then + firsttime="" + resdir=`echo $i | sed -e 's,/$,,' -e 's,/[^/]*$,,'` + head -1 $resdir/log + fi + TORTURE_SUITE="`cat $i/../TORTURE_SUITE`" + kvm-recheck-${TORTURE_SUITE}.sh $i + if test -f "$i/console.log" + then + configcheck.sh $i/.config $i/ConfigFragment + parse-build.sh $i/Make.out $configfile + parse-torture.sh $i/console.log $configfile + parse-console.sh $i/console.log $configfile + if test -r $i/Warnings + then + cat $i/Warnings + fi + else + if test -f "$i/qemu-cmd" + then + print_bug qemu failed + else + print_bug Build failed + fi + echo " $i" + fi + done +done diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh new file mode 100755 index 00000000000..27e544e2951 --- /dev/null +++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh @@ -0,0 +1,222 @@ +#!/bin/bash +# +# Run a kvm-based test of the specified tree on the specified configs. +# Fully automated run and error checking, no graphics console. +# +# Execute this in the source tree. Do not run it as a background task +# because qemu does not seem to like that much. +# +# Usage: sh kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args +# +# qemu-args defaults to "-nographic", along with arguments specifying the +# number of CPUs and other options generated from +# the underlying CPU architecture. +# boot_args defaults to value returned by the per_version_boot_params +# shell function. +# +# Anything you specify for either qemu-args or boot_args is appended to +# the default values. The "-smp" value is deduced from the contents of +# the config fragment. +# +# More sophisticated argument parsing is clearly needed. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# Copyright (C) IBM Corporation, 2011 +# +# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +grace=120 + +T=/tmp/kvm-test-1-run.sh.$$ +trap 'rm -rf $T' 0 + +. $KVM/bin/functions.sh +. $KVPATH/ver_functions.sh + +config_template=${1} +config_dir=`echo $config_template | sed -e 's,/[^/]*$,,'` +title=`echo $config_template | sed -e 's/^.*\///'` +builddir=${2} +if test -z "$builddir" -o ! -d "$builddir" -o ! -w "$builddir" +then + echo "kvm-test-1-run.sh :$builddir: Not a writable directory, cannot build into it" + exit 1 +fi +resdir=${3} +if test -z "$resdir" -o ! -d "$resdir" -o ! -w "$resdir" +then + echo "kvm-test-1-run.sh :$resdir: Not a writable directory, cannot store results into it" + exit 1 +fi +cp $config_template $resdir/ConfigFragment +echo ' ---' `date`: Starting build +echo ' ---' Kconfig fragment at: $config_template >> $resdir/log +if test -r "$config_dir/CFcommon" +then + cat < $config_dir/CFcommon >> $T +fi +# Optimizations below this point +# CONFIG_USB=n +# CONFIG_SECURITY=n +# CONFIG_NFS_FS=n +# CONFIG_SOUND=n +# CONFIG_INPUT_JOYSTICK=n +# CONFIG_INPUT_TABLET=n +# CONFIG_INPUT_TOUCHSCREEN=n +# CONFIG_INPUT_MISC=n +# CONFIG_INPUT_MOUSE=n +# # CONFIG_NET=n # disables console access, so accept the slower build. +# CONFIG_SCSI=n +# CONFIG_ATA=n +# CONFIG_FAT_FS=n +# CONFIG_MSDOS_FS=n +# CONFIG_VFAT_FS=n +# CONFIG_ISO9660_FS=n +# CONFIG_QUOTA=n +# CONFIG_HID=n +# CONFIG_CRYPTO=n +# CONFIG_PCCARD=n +# CONFIG_PCMCIA=n +# CONFIG_CARDBUS=n +# CONFIG_YENTA=n +if kvm-build.sh $config_template $builddir $T +then + QEMU="`identify_qemu $builddir/vmlinux`" + BOOT_IMAGE="`identify_boot_image $QEMU`" + cp $builddir/Make*.out $resdir + cp $builddir/.config $resdir + if test -n "$BOOT_IMAGE" + then + cp $builddir/$BOOT_IMAGE $resdir + else + echo No identifiable boot image, not running KVM, see $resdir. + echo Do the torture scripts know about your architecture? + fi + parse-build.sh $resdir/Make.out $title + if test -f $builddir.wait + then + mv $builddir.wait $builddir.ready + fi +else + cp $builddir/Make*.out $resdir + cp $builddir/.config $resdir || : + echo Build failed, not running KVM, see $resdir. + if test -f $builddir.wait + then + mv $builddir.wait $builddir.ready + fi + exit 1 +fi +while test -f $builddir.ready +do + sleep 1 +done +minutes=$4 +seconds=$(($minutes * 60)) +qemu_args=$5 +boot_args=$6 + +cd $KVM +kstarttime=`awk 'BEGIN { print systime() }' < /dev/null` +echo ' ---' `date`: Starting kernel + +# Generate -smp qemu argument. +qemu_args="-nographic $qemu_args" +cpu_count=`configNR_CPUS.sh $config_template` +vcpus=`identify_qemu_vcpus` +if test $cpu_count -gt $vcpus +then + echo CPU count limited from $cpu_count to $vcpus + touch $resdir/Warnings + echo CPU count limited from $cpu_count to $vcpus >> $resdir/Warnings + cpu_count=$vcpus +fi +qemu_args="`specify_qemu_cpus "$QEMU" "$qemu_args" "$cpu_count"`" + +# Generate architecture-specific and interaction-specific qemu arguments +qemu_args="$qemu_args `identify_qemu_args "$QEMU" "$builddir/console.log"`" + +# Generate qemu -append arguments +qemu_append="`identify_qemu_append "$QEMU"`" + +# Pull in Kconfig-fragment boot parameters +boot_args="`configfrag_boot_params "$boot_args" "$config_template"`" +# Generate kernel-version-specific boot parameters +boot_args="`per_version_boot_params "$boot_args" $builddir/.config $seconds`" + +echo $QEMU $qemu_args -m 512 -kernel $builddir/$BOOT_IMAGE -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd +if test -n "$TORTURE_BUILDONLY" +then + echo Build-only run specified, boot/test omitted. + exit 0 +fi +( $QEMU $qemu_args -m 512 -kernel $builddir/$BOOT_IMAGE -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) & +qemu_pid=$! +commandcompleted=0 +echo Monitoring qemu job at pid $qemu_pid +while : +do + kruntime=`awk 'BEGIN { print systime() - '"$kstarttime"' }' < /dev/null` + if kill -0 $qemu_pid > /dev/null 2>&1 + then + if test $kruntime -ge $seconds + then + break; + fi + sleep 1 + else + commandcompleted=1 + if test $kruntime -lt $seconds + then + echo Completed in $kruntime vs. $seconds >> $resdir/Warnings 2>&1 + grep "^(qemu) qemu:" $resdir/kvm-test-1-run.sh.out >> $resdir/Warnings 2>&1 + killpid="`sed -n "s/^(qemu) qemu: terminating on signal [0-9]* from pid \([0-9]*\).*$/\1/p" $resdir/Warnings`" + if test -n "$killpid" + then + echo "ps -fp $killpid" >> $resdir/Warnings 2>&1 + ps -fp $killpid >> $resdir/Warnings 2>&1 + fi + else + echo ' ---' `date`: Kernel done + fi + break + fi +done +if test $commandcompleted -eq 0 +then + echo Grace period for qemu job at pid $qemu_pid + while : + do + kruntime=`awk 'BEGIN { print systime() - '"$kstarttime"' }' < /dev/null` + if kill -0 $qemu_pid > /dev/null 2>&1 + then + : + else + break + fi + if test $kruntime -ge $((seconds + grace)) + then + echo "!!! Hang at $kruntime vs. $seconds seconds" >> $resdir/Warnings 2>&1 + kill -KILL $qemu_pid + break + fi + sleep 1 + done +fi + +cp $builddir/console.log $resdir +parse-torture.sh $resdir/console.log $title +parse-console.sh $resdir/console.log $title diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh new file mode 100644 index 00000000000..40285c58653 --- /dev/null +++ b/tools/testing/selftests/rcutorture/bin/kvm.sh @@ -0,0 +1,410 @@ +#!/bin/bash +# +# Run a series of 14 tests under KVM. These are not particularly +# well-selected or well-tuned, but are the current set. Run from the +# top level of the source tree. +# +# Edit the definitions below to set the locations of the various directories, +# as well as the test duration. +# +# Usage: sh kvm.sh [ options ] +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# Copyright (C) IBM Corporation, 2011 +# +# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +scriptname=$0 +args="$*" + +T=/tmp/kvm.sh.$$ +trap 'rm -rf $T' 0 +mkdir $T + +dur=30 +dryrun="" +KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM +PATH=${KVM}/bin:$PATH; export PATH +TORTURE_DEFCONFIG=defconfig +TORTURE_BOOT_IMAGE="" +TORTURE_INITRD="$KVM/initrd"; export TORTURE_INITRD +TORTURE_KMAKE_ARG="" +TORTURE_SUITE=rcu +resdir="" +configs="" +cpus=0 +ds=`date +%Y.%m.%d-%H:%M:%S` +kversion="" + +. functions.sh + +usage () { + echo "Usage: $scriptname optional arguments:" + echo " --bootargs kernel-boot-arguments" + echo " --bootimage relative-path-to-kernel-boot-image" + echo " --buildonly" + echo " --configs \"config-file list\"" + echo " --cpus N" + echo " --datestamp string" + echo " --defconfig string" + echo " --dryrun sched|script" + echo " --duration minutes" + echo " --interactive" + echo " --kmake-arg kernel-make-arguments" + echo " --kversion vN.NN" + echo " --mac nn:nn:nn:nn:nn:nn" + echo " --no-initrd" + echo " --qemu-args qemu-system-..." + echo " --qemu-cmd qemu-system-..." + echo " --results absolute-pathname" + echo " --torture rcu" + exit 1 +} + +while test $# -gt 0 +do + case "$1" in + --bootargs) + checkarg --bootargs "(list of kernel boot arguments)" "$#" "$2" '.*' '^--' + TORTURE_BOOTARGS="$2" + shift + ;; + --bootimage) + checkarg --bootimage "(relative path to kernel boot image)" "$#" "$2" '[a-zA-Z0-9][a-zA-Z0-9_]*' '^--' + TORTURE_BOOT_IMAGE="$2" + shift + ;; + --buildonly) + TORTURE_BUILDONLY=1 + ;; + --configs) + checkarg --configs "(list of config files)" "$#" "$2" '^[^/]*$' '^--' + configs="$2" + shift + ;; + --cpus) + checkarg --cpus "(number)" "$#" "$2" '^[0-9]*$' '^--' + cpus=$2 + shift + ;; + --datestamp) + checkarg --datestamp "(relative pathname)" "$#" "$2" '^[^/]*$' '^--' + ds=$2 + shift + ;; + --defconfig) + checkarg --defconfig "defconfigtype" "$#" "$2" '^[^/][^/]*$' '^--' + TORTURE_DEFCONFIG=$2 + shift + ;; + --dryrun) + checkarg --dryrun "sched|script" $# "$2" 'sched\|script' '^--' + dryrun=$2 + shift + ;; + --duration) + checkarg --duration "(minutes)" $# "$2" '^[0-9]*$' '^error' + dur=$2 + shift + ;; + --interactive) + TORTURE_QEMU_INTERACTIVE=1; export TORTURE_QEMU_INTERACTIVE + ;; + --kmake-arg) + checkarg --kmake-arg "(kernel make arguments)" $# "$2" '.*' '^error$' + TORTURE_KMAKE_ARG="$2" + shift + ;; + --kversion) + checkarg --kversion "(kernel version)" $# "$2" '^v[0-9.]*$' '^error' + kversion=$2 + shift + ;; + --mac) + checkarg --mac "(MAC address)" $# "$2" '^\([0-9a-fA-F]\{2\}:\)\{5\}[0-9a-fA-F]\{2\}$' error + TORTURE_QEMU_MAC=$2 + shift + ;; + --no-initrd) + TORTURE_INITRD=""; export TORTURE_INITRD + ;; + --qemu-args) + checkarg --qemu-args "-qemu args" $# "$2" '^-' '^error' + TORTURE_QEMU_ARG="$2" + shift + ;; + --qemu-cmd) + checkarg --qemu-cmd "(qemu-system-...)" $# "$2" 'qemu-system-' '^--' + TORTURE_QEMU_CMD="$2" + shift + ;; + --results) + checkarg --results "(absolute pathname)" "$#" "$2" '^/' '^error' + resdir=$2 + shift + ;; + --torture) + checkarg --torture "(suite name)" "$#" "$2" '^\(lock\|rcu\)$' '^--' + TORTURE_SUITE=$2 + shift + ;; + *) + echo Unknown argument $1 + usage + ;; + esac + shift +done + +CONFIGFRAG=${KVM}/configs/${TORTURE_SUITE}; export CONFIGFRAG +KVPATH=${CONFIGFRAG}/$kversion; export KVPATH + +if test -z "$configs" +then + configs="`cat $CONFIGFRAG/$kversion/CFLIST`" +fi + +if test -z "$resdir" +then + resdir=$KVM/res +fi + +# Create a file of test-name/#cpus pairs, sorted by decreasing #cpus. +touch $T/cfgcpu +for CF in $configs +do + if test -f "$CONFIGFRAG/$kversion/$CF" + then + echo $CF `configNR_CPUS.sh $CONFIGFRAG/$kversion/$CF` >> $T/cfgcpu + else + echo "The --configs file $CF does not exist, terminating." + exit 1 + fi +done +sort -k2nr $T/cfgcpu > $T/cfgcpu.sort + +# Use a greedy bin-packing algorithm, sorting the list accordingly. +awk < $T/cfgcpu.sort > $T/cfgcpu.pack -v ncpus=$cpus ' +BEGIN { + njobs = 0; +} + +{ + # Read file of tests and corresponding required numbers of CPUs. + cf[njobs] = $1; + cpus[njobs] = $2; + njobs++; +} + +END { + alldone = 0; + batch = 0; + nc = -1; + + # Each pass through the following loop creates on test batch + # that can be executed concurrently given ncpus. Note that a + # given test that requires more than the available CPUs will run in + # their own batch. Such tests just have to make do with what + # is available. + while (nc != ncpus) { + batch++; + nc = ncpus; + + # Each pass through the following loop considers one + # test for inclusion in the current batch. + for (i = 0; i < njobs; i++) { + if (done[i]) + continue; # Already part of a batch. + if (nc >= cpus[i] || nc == ncpus) { + + # This test fits into the current batch. + done[i] = batch; + nc -= cpus[i]; + if (nc <= 0) + break; # Too-big test in its own batch. + } + } + } + + # Dump out the tests in batch order. + for (b = 1; b <= batch; b++) + for (i = 0; i < njobs; i++) + if (done[i] == b) + print cf[i], cpus[i]; +}' + +# Generate a script to execute the tests in appropriate batches. +cat << ___EOF___ > $T/script +CONFIGFRAG="$CONFIGFRAG"; export CONFIGFRAG +KVM="$KVM"; export KVM +KVPATH="$KVPATH"; export KVPATH +PATH="$PATH"; export PATH +TORTURE_BOOT_IMAGE="$TORTURE_BOOT_IMAGE"; export TORTURE_BOOT_IMAGE +TORTURE_BUILDONLY="$TORTURE_BUILDONLY"; export TORTURE_BUILDONLY +TORTURE_DEFCONFIG="$TORTURE_DEFCONFIG"; export TORTURE_DEFCONFIG +TORTURE_INITRD="$TORTURE_INITRD"; export TORTURE_INITRD +TORTURE_KMAKE_ARG="$TORTURE_KMAKE_ARG"; export TORTURE_KMAKE_ARG +TORTURE_QEMU_CMD="$TORTURE_QEMU_CMD"; export TORTURE_QEMU_CMD +TORTURE_QEMU_INTERACTIVE="$TORTURE_QEMU_INTERACTIVE"; export TORTURE_QEMU_INTERACTIVE +TORTURE_QEMU_MAC="$TORTURE_QEMU_MAC"; export TORTURE_QEMU_MAC +TORTURE_SUITE="$TORTURE_SUITE"; export TORTURE_SUITE +if ! test -e $resdir +then + mkdir -p "$resdir" || : +fi +mkdir $resdir/$ds +echo Results directory: $resdir/$ds +echo $scriptname $args +touch $resdir/$ds/log +echo $scriptname $args >> $resdir/$ds/log +echo ${TORTURE_SUITE} > $resdir/$ds/TORTURE_SUITE +pwd > $resdir/$ds/testid.txt +if test -d .git +then + git status >> $resdir/$ds/testid.txt + git rev-parse HEAD >> $resdir/$ds/testid.txt + if ! git diff HEAD > $T/git-diff 2>&1 + then + cp $T/git-diff $resdir/$ds + fi +fi +___EOF___ +awk < $T/cfgcpu.pack \ + -v CONFIGDIR="$CONFIGFRAG/$kversion/" \ + -v KVM="$KVM" \ + -v ncpus=$cpus \ + -v rd=$resdir/$ds/ \ + -v dur=$dur \ + -v TORTURE_QEMU_ARG="$TORTURE_QEMU_ARG" \ + -v TORTURE_BOOTARGS="$TORTURE_BOOTARGS" \ +'BEGIN { + i = 0; +} + +{ + cf[i] = $1; + cpus[i] = $2; + i++; +} + +# Dump out the scripting required to run one test batch. +function dump(first, pastlast) +{ + print "echo ----Start batch: `date`"; + print "echo ----Start batch: `date` >> " rd "/log"; + jn=1 + for (j = first; j < pastlast; j++) { + builddir=KVM "/b" jn + cpusr[jn] = cpus[j]; + if (cfrep[cf[j]] == "") { + cfr[jn] = cf[j]; + cfrep[cf[j]] = 1; + } else { + cfrep[cf[j]]++; + cfr[jn] = cf[j] "." cfrep[cf[j]]; + } + if (cpusr[jn] > ncpus && ncpus != 0) + ovf = "(!)"; + else + ovf = ""; + print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date`"; + print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date` >> " rd "/log"; + print "rm -f " builddir ".*"; + print "touch " builddir ".wait"; + print "mkdir " builddir " > /dev/null 2>&1 || :"; + print "mkdir " rd cfr[jn] " || :"; + print "kvm-test-1-run.sh " CONFIGDIR cf[j], builddir, rd cfr[jn], dur " \"" TORTURE_QEMU_ARG "\" \"" TORTURE_BOOTARGS "\" > " rd cfr[jn] "/kvm-test-1-run.sh.out 2>&1 &" + print "echo ", cfr[jn], cpusr[jn] ovf ": Waiting for build to complete. `date`"; + print "echo ", cfr[jn], cpusr[jn] ovf ": Waiting for build to complete. `date` >> " rd "/log"; + print "while test -f " builddir ".wait" + print "do" + print "\tsleep 1" + print "done" + print "echo ", cfr[jn], cpusr[jn] ovf ": Build complete. `date`"; + print "echo ", cfr[jn], cpusr[jn] ovf ": Build complete. `date` >> " rd "/log"; + jn++; + } + for (j = 1; j < jn; j++) { + builddir=KVM "/b" j + print "rm -f " builddir ".ready" + print "echo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date`"; + print "echo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date` >> " rd "/log"; + } + print "wait" + print "echo ---- All kernel runs complete. `date`"; + print "echo ---- All kernel runs complete. `date` >> " rd "/log"; + for (j = 1; j < jn; j++) { + builddir=KVM "/b" j + print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results:"; + print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results: >> " rd "/log"; + print "cat " rd cfr[j] "/kvm-test-1-run.sh.out"; + print "cat " rd cfr[j] "/kvm-test-1-run.sh.out >> " rd "/log"; + } +} + +END { + njobs = i; + nc = ncpus; + first = 0; + + # Each pass through the following loop considers one test. + for (i = 0; i < njobs; i++) { + if (ncpus == 0) { + # Sequential test specified, each test its own batch. + dump(i, i + 1); + first = i; + } else if (nc < cpus[i] && i != 0) { + # Out of CPUs, dump out a batch. + dump(first, i); + first = i; + nc = ncpus; + } + # Account for the CPUs needed by the current test. + nc -= cpus[i]; + } + # Dump the last batch. + if (ncpus != 0) + dump(first, i); +}' >> $T/script + +cat << ___EOF___ >> $T/script +echo +echo +echo " --- `date` Test summary:" +echo Results directory: $resdir/$ds +if test -z "$TORTURE_BUILDONLY" +then + kvm-recheck.sh $resdir/$ds +fi +___EOF___ + +if test "$dryrun" = script +then + cat $T/script + exit 0 +elif test "$dryrun" = sched +then + # Extract the test run schedule from the script. + egrep 'Start batch|Starting build\.' $T/script | + grep -v ">>" | + sed -e 's/:.*$//' -e 's/^echo //' + exit 0 +else + # Not a dryru, so run the script. + sh $T/script +fi + +# Tracing: trace_event=rcu:rcu_grace_period,rcu:rcu_future_grace_period,rcu:rcu_grace_period_init,rcu:rcu_nocb_wake,rcu:rcu_preempt_task,rcu:rcu_unlock_preempted_task,rcu:rcu_quiescent_state_report,rcu:rcu_fqs,rcu:rcu_callback,rcu:rcu_kfree_callback,rcu:rcu_batch_start,rcu:rcu_invoke_callback,rcu:rcu_invoke_kfree_callback,rcu:rcu_batch_end,rcu:rcu_torture_read,rcu:rcu_barrier diff --git a/tools/testing/selftests/rcutorture/bin/parse-build.sh b/tools/testing/selftests/rcutorture/bin/parse-build.sh new file mode 100755 index 00000000000..543230951c3 --- /dev/null +++ b/tools/testing/selftests/rcutorture/bin/parse-build.sh @@ -0,0 +1,57 @@ +#!/bin/sh +# +# Check the build output from an rcutorture run for goodness. +# The "file" is a pathname on the local system, and "title" is +# a text string for error-message purposes. +# +# The file must contain kernel build output. +# +# Usage: +# sh parse-build.sh file title +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# Copyright (C) IBM Corporation, 2011 +# +# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +T=$1 +title=$2 + +. functions.sh + +if grep -q CC < $T +then + : +else + print_bug $title no build + exit 1 +fi + +if grep -q "error:" < $T +then + print_bug $title build errors: + grep "error:" < $T + exit 2 +fi +exit 0 + +if egrep -q "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T +then + print_warning $title build errors: + egrep "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T + exit 2 +fi +exit 0 diff --git a/tools/testing/selftests/rcutorture/bin/parse-console.sh b/tools/testing/selftests/rcutorture/bin/parse-console.sh new file mode 100755 index 00000000000..4185d4cab32 --- /dev/null +++ b/tools/testing/selftests/rcutorture/bin/parse-console.sh @@ -0,0 +1,41 @@ +#!/bin/sh +# +# Check the console output from an rcutorture run for oopses. +# The "file" is a pathname on the local system, and "title" is +# a text string for error-message purposes. +# +# Usage: +# sh parse-console.sh file title +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# Copyright (C) IBM Corporation, 2011 +# +# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +T=/tmp/abat-chk-badness.sh.$$ +trap 'rm -f $T' 0 + +file="$1" +title="$2" + +. functions.sh + +egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T +if test -s $T +then + print_warning Assertion failure in $file $title + cat $T +fi diff --git a/tools/testing/selftests/rcutorture/bin/parse-torture.sh b/tools/testing/selftests/rcutorture/bin/parse-torture.sh new file mode 100755 index 00000000000..3455560ab4e --- /dev/null +++ b/tools/testing/selftests/rcutorture/bin/parse-torture.sh @@ -0,0 +1,106 @@ +#!/bin/sh +# +# Check the console output from a torture run for goodness. +# The "file" is a pathname on the local system, and "title" is +# a text string for error-message purposes. +# +# The file must contain torture output, but can be interspersed +# with other dmesg text, as in console-log output. +# +# Usage: +# sh parse-torture.sh file title +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# Copyright (C) IBM Corporation, 2011 +# +# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +T=/tmp/parse-torture.sh.$$ +file="$1" +title="$2" + +trap 'rm -f $T.seq' 0 + +. functions.sh + +# check for presence of torture output file. + +if test -f "$file" -a -r "$file" +then + : +else + echo $title unreadable torture output file: $file + exit 1 +fi + +# check for abject failure + +if grep -q FAILURE $file || grep -q -e '-torture.*!!!' $file +then + nerrs=`grep --binary-files=text '!!!' $file | tail -1 | awk '{for (i=NF-8;i<=NF;i++) sum+=$i; } END {print sum}'` + print_bug $title FAILURE, $nerrs instances + echo " " $url + exit +fi + +grep --binary-files=text 'torture:.*ver:' $file | grep --binary-files=text -v '(null)' | sed -e 's/^(initramfs)[^]]*] //' -e 's/^\[[^]]*] //' | +awk ' +BEGIN { + ver = 0; + badseq = 0; + } + + { + if (!badseq && ($5 + 0 != $5 || $5 <= ver)) { + badseqno1 = ver; + badseqno2 = $5; + badseqnr = NR; + badseq = 1; + } + ver = $5 + } + +END { + if (badseq) { + if (badseqno1 == badseqno2 && badseqno2 == ver) + print "GP HANG at " ver " torture stat " badseqnr; + else + print "BAD SEQ " badseqno1 ":" badseqno2 " last:" ver " version " badseqnr; + } + }' > $T.seq + +if grep -q SUCCESS $file +then + if test -s $T.seq + then + print_warning $title $title `cat $T.seq` + echo " " $file + exit 2 + fi +else + if grep -q "_HOTPLUG:" $file + then + print_warning HOTPLUG FAILURES $title `cat $T.seq` + echo " " $file + exit 3 + fi + echo $title no success message, `grep --binary-files=text 'ver:' $file | wc -l` successful version messages + if test -s $T.seq + then + print_warning $title `cat $T.seq` + fi + exit 2 +fi diff --git a/tools/testing/selftests/rcutorture/configs/lock/BUSTED b/tools/testing/selftests/rcutorture/configs/lock/BUSTED new file mode 100644 index 00000000000..1d1da1477fc --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/lock/BUSTED @@ -0,0 +1,6 @@ +CONFIG_SMP=y +CONFIG_NR_CPUS=4 +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y diff --git a/tools/testing/selftests/rcutorture/configs/lock/BUSTED.boot b/tools/testing/selftests/rcutorture/configs/lock/BUSTED.boot new file mode 100644 index 00000000000..6386c15e977 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/lock/BUSTED.boot @@ -0,0 +1 @@ +locktorture.torture_type=lock_busted diff --git a/tools/testing/selftests/rcutorture/configs/lock/CFLIST b/tools/testing/selftests/rcutorture/configs/lock/CFLIST new file mode 100644 index 00000000000..a061b22d189 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/lock/CFLIST @@ -0,0 +1 @@ +LOCK01 diff --git a/tools/testing/selftests/rcutorture/configs/lock/CFcommon b/tools/testing/selftests/rcutorture/configs/lock/CFcommon new file mode 100644 index 00000000000..e372dc26925 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/lock/CFcommon @@ -0,0 +1,2 @@ +CONFIG_LOCK_TORTURE_TEST=y +CONFIG_PRINTK_TIME=y diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK01 b/tools/testing/selftests/rcutorture/configs/lock/LOCK01 new file mode 100644 index 00000000000..a9625e3d6cd --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/lock/LOCK01 @@ -0,0 +1,6 @@ +CONFIG_SMP=y +CONFIG_NR_CPUS=8 +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y diff --git a/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh new file mode 100644 index 00000000000..9746ea1cd6c --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# +# Kernel-version-dependent shell functions for the rest of the scripts. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# Copyright (C) IBM Corporation, 2014 +# +# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +# locktorture_param_onoff bootparam-string config-file +# +# Adds onoff locktorture module parameters to kernels having it. +locktorture_param_onoff () { + if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2" + then + echo CPU-hotplug kernel, adding locktorture onoff. 1>&2 + echo locktorture.onoff_interval=3 locktorture.onoff_holdoff=30 + fi +} + +# per_version_boot_params bootparam-string config-file seconds +# +# Adds per-version torture-module parameters to kernels supporting them. +per_version_boot_params () { + echo $1 `locktorture_param_onoff "$1" "$2"` \ + locktorture.stat_interval=15 \ + locktorture.shutdown_secs=$3 \ + locktorture.locktorture_runnable=1 \ + locktorture.verbose=1 +} diff --git a/tools/testing/selftests/rcutorture/configs/rcu/BUSTED b/tools/testing/selftests/rcutorture/configs/rcu/BUSTED new file mode 100644 index 00000000000..48d8a245c7f --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/BUSTED @@ -0,0 +1,7 @@ +CONFIG_RCU_TRACE=n +CONFIG_SMP=y +CONFIG_NR_CPUS=4 +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/BUSTED.boot b/tools/testing/selftests/rcutorture/configs/rcu/BUSTED.boot new file mode 100644 index 00000000000..6804f9dcfc1 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/BUSTED.boot @@ -0,0 +1 @@ +rcutorture.torture_type=rcu_busted diff --git a/tools/testing/selftests/rcutorture/configs/rcu/CFLIST b/tools/testing/selftests/rcutorture/configs/rcu/CFLIST new file mode 100644 index 00000000000..cd3d29cb0a4 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/CFLIST @@ -0,0 +1,13 @@ +TREE01 +TREE02 +TREE03 +TREE04 +TREE05 +TREE06 +TREE07 +TREE08 +TREE09 +SRCU-N +SRCU-P +TINY01 +TINY02 diff --git a/tools/testing/selftests/rcutorture/configs/rcu/CFcommon b/tools/testing/selftests/rcutorture/configs/rcu/CFcommon new file mode 100644 index 00000000000..d2d2a86139d --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/CFcommon @@ -0,0 +1,2 @@ +CONFIG_RCU_TORTURE_TEST=y +CONFIG_PRINTK_TIME=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-N b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-N new file mode 100644 index 00000000000..9fbb41b9b31 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-N @@ -0,0 +1,7 @@ +CONFIG_RCU_TRACE=n +CONFIG_SMP=y +CONFIG_NR_CPUS=4 +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-N.boot b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-N.boot new file mode 100644 index 00000000000..238bfe3bd0c --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-N.boot @@ -0,0 +1 @@ +rcutorture.torture_type=srcu diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P new file mode 100644 index 00000000000..4b6f272dba2 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P @@ -0,0 +1,7 @@ +CONFIG_RCU_TRACE=n +CONFIG_SMP=y +CONFIG_NR_CPUS=8 +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot new file mode 100644 index 00000000000..238bfe3bd0c --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot @@ -0,0 +1 @@ +rcutorture.torture_type=srcu diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TINY01 b/tools/testing/selftests/rcutorture/configs/rcu/TINY01 new file mode 100644 index 00000000000..0a63e073a00 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/TINY01 @@ -0,0 +1,12 @@ +CONFIG_SMP=n +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TINY_RCU=y +CONFIG_HZ_PERIODIC=n +CONFIG_NO_HZ_IDLE=y +CONFIG_NO_HZ_FULL=n +CONFIG_RCU_TRACE=n +CONFIG_DEBUG_LOCK_ALLOC=n +CONFIG_DEBUG_OBJECTS_RCU_HEAD=n +CONFIG_PREEMPT_COUNT=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TINY02 b/tools/testing/selftests/rcutorture/configs/rcu/TINY02 new file mode 100644 index 00000000000..f4feaee4077 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/TINY02 @@ -0,0 +1,12 @@ +CONFIG_SMP=n +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TINY_RCU=y +CONFIG_HZ_PERIODIC=y +CONFIG_NO_HZ_IDLE=n +CONFIG_NO_HZ_FULL=n +CONFIG_RCU_TRACE=y +CONFIG_DEBUG_LOCK_ALLOC=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=n +CONFIG_PREEMPT_COUNT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE01 b/tools/testing/selftests/rcutorture/configs/rcu/TREE01 new file mode 100644 index 00000000000..9c827ec59a9 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE01 @@ -0,0 +1,22 @@ +CONFIG_SMP=y +CONFIG_NR_CPUS=8 +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_HZ_PERIODIC=n +CONFIG_NO_HZ_IDLE=y +CONFIG_NO_HZ_FULL=n +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_TRACE=y +CONFIG_HOTPLUG_CPU=y +CONFIG_RCU_FANOUT=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_RCU_NOCB_CPU=y +CONFIG_RCU_NOCB_CPU_ZERO=y +CONFIG_DEBUG_LOCK_ALLOC=n +CONFIG_PROVE_RCU_DELAY=n +CONFIG_RCU_CPU_STALL_INFO=n +CONFIG_RCU_CPU_STALL_VERBOSE=n +CONFIG_RCU_BOOST=n +CONFIG_DEBUG_OBJECTS_RCU_HEAD=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot new file mode 100644 index 00000000000..0fc8a342893 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot @@ -0,0 +1 @@ +rcutorture.torture_type=rcu_bh diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE02 b/tools/testing/selftests/rcutorture/configs/rcu/TREE02 new file mode 100644 index 00000000000..1a777b5f68b --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE02 @@ -0,0 +1,25 @@ +CONFIG_SMP=y +CONFIG_NR_CPUS=8 +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_HZ_PERIODIC=n +CONFIG_NO_HZ_IDLE=y +CONFIG_NO_HZ_FULL=n +CONFIG_RCU_FAST_NO_HZ=n +CONFIG_RCU_TRACE=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_RCU_FANOUT=3 +CONFIG_RCU_FANOUT_LEAF=3 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_RCU_NOCB_CPU=n +CONFIG_DEBUG_LOCK_ALLOC=y +CONFIG_PROVE_LOCKING=n +CONFIG_PROVE_RCU_DELAY=n +CONFIG_RCU_CPU_STALL_INFO=n +CONFIG_RCU_CPU_STALL_VERBOSE=y +CONFIG_RCU_BOOST=n +CONFIG_DEBUG_OBJECTS_RCU_HEAD=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T b/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T new file mode 100644 index 00000000000..61c8d9ce5bb --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T @@ -0,0 +1,25 @@ +CONFIG_SMP=y +CONFIG_NR_CPUS=8 +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_HZ_PERIODIC=n +CONFIG_NO_HZ_IDLE=y +CONFIG_NO_HZ_FULL=n +CONFIG_RCU_FAST_NO_HZ=n +CONFIG_RCU_TRACE=y +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_RCU_FANOUT=3 +CONFIG_RCU_FANOUT_LEAF=3 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_RCU_NOCB_CPU=n +CONFIG_DEBUG_LOCK_ALLOC=y +CONFIG_PROVE_LOCKING=n +CONFIG_PROVE_RCU_DELAY=n +CONFIG_RCU_CPU_STALL_INFO=n +CONFIG_RCU_CPU_STALL_VERBOSE=y +CONFIG_RCU_BOOST=n +CONFIG_DEBUG_OBJECTS_RCU_HEAD=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE03 b/tools/testing/selftests/rcutorture/configs/rcu/TREE03 new file mode 100644 index 00000000000..c1f111c1561 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE03 @@ -0,0 +1,22 @@ +CONFIG_SMP=y +CONFIG_NR_CPUS=8 +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_HZ_PERIODIC=y +CONFIG_NO_HZ_IDLE=n +CONFIG_NO_HZ_FULL=n +CONFIG_RCU_TRACE=y +CONFIG_HOTPLUG_CPU=y +CONFIG_RCU_FANOUT=4 +CONFIG_RCU_FANOUT_LEAF=4 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_RCU_NOCB_CPU=n +CONFIG_DEBUG_LOCK_ALLOC=n +CONFIG_PROVE_RCU_DELAY=n +CONFIG_RCU_CPU_STALL_INFO=n +CONFIG_RCU_CPU_STALL_VERBOSE=n +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=2 +CONFIG_DEBUG_OBJECTS_RCU_HEAD=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE04 b/tools/testing/selftests/rcutorture/configs/rcu/TREE04 new file mode 100644 index 00000000000..7dbd27ce17a --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04 @@ -0,0 +1,24 @@ +CONFIG_SMP=y +CONFIG_NR_CPUS=8 +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_HZ_PERIODIC=n +CONFIG_NO_HZ_IDLE=n +CONFIG_NO_HZ_FULL=y +CONFIG_NO_HZ_FULL_ALL=y +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_TRACE=y +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_RCU_FANOUT=2 +CONFIG_RCU_FANOUT_LEAF=2 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_RCU_NOCB_CPU=n +CONFIG_DEBUG_LOCK_ALLOC=n +CONFIG_PROVE_RCU_DELAY=n +CONFIG_RCU_CPU_STALL_INFO=y +CONFIG_RCU_CPU_STALL_VERBOSE=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot new file mode 100644 index 00000000000..0fc8a342893 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot @@ -0,0 +1 @@ +rcutorture.torture_type=rcu_bh diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE05 b/tools/testing/selftests/rcutorture/configs/rcu/TREE05 new file mode 100644 index 00000000000..d0f32e57474 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE05 @@ -0,0 +1,24 @@ +CONFIG_SMP=y +CONFIG_NR_CPUS=8 +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_HZ_PERIODIC=n +CONFIG_NO_HZ_IDLE=y +CONFIG_NO_HZ_FULL=n +CONFIG_RCU_FAST_NO_HZ=n +CONFIG_RCU_TRACE=n +CONFIG_HOTPLUG_CPU=y +CONFIG_RCU_FANOUT=6 +CONFIG_RCU_FANOUT_LEAF=6 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_RCU_NOCB_CPU=y +CONFIG_RCU_NOCB_CPU_NONE=y +CONFIG_DEBUG_LOCK_ALLOC=y +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_PROVE_RCU_DELAY=y +CONFIG_RCU_CPU_STALL_INFO=n +CONFIG_RCU_CPU_STALL_VERBOSE=n +CONFIG_DEBUG_OBJECTS_RCU_HEAD=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot new file mode 100644 index 00000000000..3b42b8b033c --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot @@ -0,0 +1 @@ +rcutorture.torture_type=sched diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE06 b/tools/testing/selftests/rcutorture/configs/rcu/TREE06 new file mode 100644 index 00000000000..2e477dfb9c5 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE06 @@ -0,0 +1,25 @@ +CONFIG_SMP=y +CONFIG_NR_CPUS=8 +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_HZ_PERIODIC=n +CONFIG_NO_HZ_IDLE=y +CONFIG_NO_HZ_FULL=n +CONFIG_RCU_FAST_NO_HZ=n +CONFIG_RCU_TRACE=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_RCU_FANOUT=6 +CONFIG_RCU_FANOUT_LEAF=6 +CONFIG_RCU_FANOUT_EXACT=y +CONFIG_RCU_NOCB_CPU=n +CONFIG_DEBUG_LOCK_ALLOC=y +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_PROVE_RCU_DELAY=n +CONFIG_RCU_CPU_STALL_INFO=n +CONFIG_RCU_CPU_STALL_VERBOSE=n +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE07 b/tools/testing/selftests/rcutorture/configs/rcu/TREE07 new file mode 100644 index 00000000000..042f86ef362 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE07 @@ -0,0 +1,23 @@ +CONFIG_SMP=y +CONFIG_NR_CPUS=16 +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_HZ_PERIODIC=n +CONFIG_NO_HZ_IDLE=n +CONFIG_NO_HZ_FULL=y +CONFIG_NO_HZ_FULL_ALL=y +CONFIG_NO_HZ_FULL_SYSIDLE=y +CONFIG_RCU_FAST_NO_HZ=n +CONFIG_RCU_TRACE=y +CONFIG_HOTPLUG_CPU=y +CONFIG_RCU_FANOUT=2 +CONFIG_RCU_FANOUT_LEAF=2 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_RCU_NOCB_CPU=n +CONFIG_DEBUG_LOCK_ALLOC=n +CONFIG_PROVE_RCU_DELAY=n +CONFIG_RCU_CPU_STALL_INFO=y +CONFIG_RCU_CPU_STALL_VERBOSE=n +CONFIG_DEBUG_OBJECTS_RCU_HEAD=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08 b/tools/testing/selftests/rcutorture/configs/rcu/TREE08 new file mode 100644 index 00000000000..3438cee1e3c --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08 @@ -0,0 +1,25 @@ +CONFIG_SMP=y +CONFIG_NR_CPUS=16 +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_HZ_PERIODIC=n +CONFIG_NO_HZ_IDLE=y +CONFIG_NO_HZ_FULL=n +CONFIG_RCU_FAST_NO_HZ=n +CONFIG_RCU_TRACE=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_RCU_FANOUT=3 +CONFIG_RCU_FANOUT_EXACT=y +CONFIG_RCU_FANOUT_LEAF=2 +CONFIG_RCU_NOCB_CPU=y +CONFIG_RCU_NOCB_CPU_ALL=y +CONFIG_DEBUG_LOCK_ALLOC=n +CONFIG_PROVE_RCU_DELAY=n +CONFIG_RCU_CPU_STALL_INFO=n +CONFIG_RCU_CPU_STALL_VERBOSE=n +CONFIG_RCU_BOOST=n +CONFIG_DEBUG_OBJECTS_RCU_HEAD=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T new file mode 100644 index 00000000000..bf4523d3e44 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T @@ -0,0 +1,25 @@ +CONFIG_SMP=y +CONFIG_NR_CPUS=16 +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_HZ_PERIODIC=n +CONFIG_NO_HZ_IDLE=y +CONFIG_NO_HZ_FULL=n +CONFIG_RCU_FAST_NO_HZ=n +CONFIG_RCU_TRACE=y +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_RCU_FANOUT=3 +CONFIG_RCU_FANOUT_EXACT=y +CONFIG_RCU_FANOUT_LEAF=2 +CONFIG_RCU_NOCB_CPU=y +CONFIG_RCU_NOCB_CPU_ALL=y +CONFIG_DEBUG_LOCK_ALLOC=n +CONFIG_PROVE_RCU_DELAY=n +CONFIG_RCU_CPU_STALL_INFO=n +CONFIG_RCU_CPU_STALL_VERBOSE=n +CONFIG_RCU_BOOST=n +CONFIG_DEBUG_OBJECTS_RCU_HEAD=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot new file mode 100644 index 00000000000..3b42b8b033c --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot @@ -0,0 +1 @@ +rcutorture.torture_type=sched diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE09 b/tools/testing/selftests/rcutorture/configs/rcu/TREE09 new file mode 100644 index 00000000000..81e4f7c0bf0 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE09 @@ -0,0 +1,20 @@ +CONFIG_SMP=n +CONFIG_NR_CPUS=1 +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_HZ_PERIODIC=n +CONFIG_NO_HZ_IDLE=y +CONFIG_NO_HZ_FULL=n +CONFIG_RCU_TRACE=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_RCU_NOCB_CPU=n +CONFIG_DEBUG_LOCK_ALLOC=n +CONFIG_PROVE_RCU_DELAY=n +CONFIG_RCU_CPU_STALL_INFO=n +CONFIG_RCU_CPU_STALL_VERBOSE=n +CONFIG_RCU_BOOST=n +CONFIG_DEBUG_OBJECTS_RCU_HEAD=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v0.0/CFLIST b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/CFLIST new file mode 100644 index 00000000000..18223947bbc --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/CFLIST @@ -0,0 +1,14 @@ +P1-S-T-NH-SD-SMP-HP +P2-2-t-nh-sd-SMP-hp +P3-3-T-nh-SD-SMP-hp +P4-A-t-NH-sd-SMP-HP +P5-U-T-NH-sd-SMP-hp +N1-S-T-NH-SD-SMP-HP +N2-2-t-nh-sd-SMP-hp +N3-3-T-nh-SD-SMP-hp +N4-A-t-NH-sd-SMP-HP +N5-U-T-NH-sd-SMP-hp +PT1-nh +PT2-NH +NT1-nh +NT3-NH diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v0.0/N1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/N1-S-T-NH-SD-SMP-HP new file mode 100644 index 00000000000..d3ef873eb6e --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/N1-S-T-NH-SD-SMP-HP @@ -0,0 +1,18 @@ +CONFIG_RCU_TRACE=y +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=8 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v0.0/N2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/N2-2-t-nh-sd-SMP-hp new file mode 100644 index 00000000000..02e418572b1 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/N2-2-t-nh-sd-SMP-hp @@ -0,0 +1,20 @@ +CONFIG_RCU_TRACE=n +CONFIG_NO_HZ=n +CONFIG_SMP=y +CONFIG_RCU_FANOUT=4 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v0.0/N3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/N3-3-T-nh-SD-SMP-hp new file mode 100644 index 00000000000..b3100f69c8c --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/N3-3-T-nh-SD-SMP-hp @@ -0,0 +1,22 @@ +CONFIG_RCU_TRACE=y +CONFIG_NO_HZ=n +CONFIG_SMP=y +CONFIG_RCU_FANOUT=2 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v0.0/N4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/N4-A-t-NH-sd-SMP-HP new file mode 100644 index 00000000000..c56b4453072 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/N4-A-t-NH-sd-SMP-HP @@ -0,0 +1,18 @@ +CONFIG_RCU_TRACE=n +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=6 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v0.0/N5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/N5-U-T-NH-sd-SMP-hp new file mode 100644 index 00000000000..90d924fea9e --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/N5-U-T-NH-sd-SMP-hp @@ -0,0 +1,22 @@ +CONFIG_RCU_TRACE=y +CONFIG_DEBUG_KERNEL=y +CONFIG_RCU_CPU_STALL_INFO=y +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=6 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=y +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v0.0/NT1-nh b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/NT1-nh new file mode 100644 index 00000000000..023f312a931 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/NT1-nh @@ -0,0 +1,23 @@ +#CHECK#CONFIG_TINY_RCU=y +CONFIG_RCU_TRACE=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +# +CONFIG_SMP=n +# +CONFIG_HOTPLUG_CPU=n +# +CONFIG_NO_HZ=n +# +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v0.0/NT3-NH b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/NT3-NH new file mode 100644 index 00000000000..6fd0235dae7 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/NT3-NH @@ -0,0 +1,20 @@ +#CHECK#CONFIG_TINY_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +# +CONFIG_SMP=n +# +CONFIG_HOTPLUG_CPU=n +# +CONFIG_NO_HZ=y +# +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P1-S-T-NH-SD-SMP-HP new file mode 100644 index 00000000000..f72402d7c13 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P1-S-T-NH-SD-SMP-HP @@ -0,0 +1,19 @@ +CONFIG_RCU_TRACE=y +CONFIG_RCU_CPU_STALL_INFO=y +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=8 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P2-2-t-nh-sd-SMP-hp new file mode 100644 index 00000000000..0f3b667d2a9 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P2-2-t-nh-sd-SMP-hp @@ -0,0 +1,20 @@ +CONFIG_RCU_TRACE=n +CONFIG_NO_HZ=n +CONFIG_SMP=y +CONFIG_RCU_FANOUT=4 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P3-3-T-nh-SD-SMP-hp new file mode 100644 index 00000000000..b035e141bf2 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P3-3-T-nh-SD-SMP-hp @@ -0,0 +1,20 @@ +CONFIG_RCU_TRACE=y +CONFIG_NO_HZ=n +CONFIG_SMP=y +CONFIG_RCU_FANOUT=2 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P4-A-t-NH-sd-SMP-HP new file mode 100644 index 00000000000..3ccf6a9447f --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P4-A-t-NH-sd-SMP-HP @@ -0,0 +1,22 @@ +CONFIG_RCU_TRACE=n +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=6 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_RT_MUTEXES=y +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=2 +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P5-U-T-NH-sd-SMP-hp new file mode 100644 index 00000000000..ef624ce73d8 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/P5-U-T-NH-sd-SMP-hp @@ -0,0 +1,28 @@ +CONFIG_RCU_TRACE=y +CONFIG_RCU_CPU_STALL_INFO=y +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=6 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=y +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_DEBUG_KERNEL=y +CONFIG_PROVE_RCU_DELAY=y +CONFIG_DEBUG_OBJECTS=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_RT_MUTEXES=y +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=2 +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v0.0/PT1-nh b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/PT1-nh new file mode 100644 index 00000000000..e3361c3894a --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/PT1-nh @@ -0,0 +1,23 @@ +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=2 +CONFIG_RCU_TRACE=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +# +CONFIG_SMP=n +# +CONFIG_HOTPLUG_CPU=n +# +CONFIG_NO_HZ=n +# +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v0.0/PT2-NH b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/PT2-NH new file mode 100644 index 00000000000..64abfc3b4d9 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/PT2-NH @@ -0,0 +1,22 @@ +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +# +CONFIG_SMP=n +# +CONFIG_HOTPLUG_CPU=n +# +CONFIG_NO_HZ=y +# +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v0.0/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/ver_functions.sh new file mode 100644 index 00000000000..5ace37a8978 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v0.0/ver_functions.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# +# Kernel-version-dependent shell functions for the rest of the scripts. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# Copyright (C) IBM Corporation, 2013 +# +# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +# per_version_boot_params bootparam-string config-file seconds +# +# Adds per-version torture-module parameters to kernels supporting them. +# Which old kernels do not. +per_version_boot_params () { + echo rcutorture.stat_interval=15 \ + rcutorture.shutdown_secs=$3 \ + rcutorture.rcutorture_runnable=1 \ + rcutorture.test_no_idle_hz=1 \ + rcutorture.verbose=1 +} diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/CFLIST b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/CFLIST new file mode 100644 index 00000000000..da4cbc668f2 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/CFLIST @@ -0,0 +1,17 @@ +sysidleY.2013.06.19a +sysidleN.2013.06.19a +P1-S-T-NH-SD-SMP-HP +P2-2-t-nh-sd-SMP-hp +P3-3-T-nh-SD-SMP-hp +P4-A-t-NH-sd-SMP-HP +P5-U-T-NH-sd-SMP-hp +P6---t-nh-SD-smp-hp +N1-S-T-NH-SD-SMP-HP +N2-2-t-nh-sd-SMP-hp +N3-3-T-nh-SD-SMP-hp +N4-A-t-NH-sd-SMP-HP +N5-U-T-NH-sd-SMP-hp +PT1-nh +PT2-NH +NT1-nh +NT3-NH diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N1-S-T-NH-SD-SMP-HP new file mode 100644 index 00000000000..d81e11d280a --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N1-S-T-NH-SD-SMP-HP @@ -0,0 +1,19 @@ +CONFIG_RCU_TRACE=y +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=8 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N2-2-t-nh-sd-SMP-hp new file mode 100644 index 00000000000..02e418572b1 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N2-2-t-nh-sd-SMP-hp @@ -0,0 +1,20 @@ +CONFIG_RCU_TRACE=n +CONFIG_NO_HZ=n +CONFIG_SMP=y +CONFIG_RCU_FANOUT=4 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N3-3-T-nh-SD-SMP-hp new file mode 100644 index 00000000000..b3100f69c8c --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N3-3-T-nh-SD-SMP-hp @@ -0,0 +1,22 @@ +CONFIG_RCU_TRACE=y +CONFIG_NO_HZ=n +CONFIG_SMP=y +CONFIG_RCU_FANOUT=2 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N4-A-t-NH-sd-SMP-HP new file mode 100644 index 00000000000..c56b4453072 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N4-A-t-NH-sd-SMP-HP @@ -0,0 +1,18 @@ +CONFIG_RCU_TRACE=n +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=6 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N5-U-T-NH-sd-SMP-hp new file mode 100644 index 00000000000..90d924fea9e --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N5-U-T-NH-sd-SMP-hp @@ -0,0 +1,22 @@ +CONFIG_RCU_TRACE=y +CONFIG_DEBUG_KERNEL=y +CONFIG_RCU_CPU_STALL_INFO=y +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=6 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=y +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N6---t-nh-SD-smp-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N6---t-nh-SD-smp-hp new file mode 100644 index 00000000000..0ccc36d7273 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N6---t-nh-SD-smp-hp @@ -0,0 +1,19 @@ +CONFIG_RCU_TRACE=n +CONFIG_NO_HZ=n +CONFIG_SMP=y +CONFIG_NR_CPUS=1 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N7-4-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N7-4-T-NH-SD-SMP-HP new file mode 100644 index 00000000000..3f640cf8497 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N7-4-T-NH-SD-SMP-HP @@ -0,0 +1,26 @@ +CONFIG_RCU_TRACE=y +CONFIG_DEBUG_KERNEL=y +CONFIG_RCU_CPU_STALL_INFO=y +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=2 +CONFIG_NR_CPUS=16 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=y +CONFIG_RCU_NOCB_CPU=y +CONFIG_RCU_NOCB_CPU_NONE=y +CONFIG_RCU_NOCB_CPU_ZERO=n +CONFIG_RCU_NOCB_CPU_ALL=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N8-2-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N8-2-T-NH-SD-SMP-HP new file mode 100644 index 00000000000..285da2dd8ac --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/N8-2-T-NH-SD-SMP-HP @@ -0,0 +1,22 @@ +CONFIG_RCU_TRACE=y +CONFIG_DEBUG_KERNEL=y +CONFIG_RCU_CPU_STALL_INFO=y +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=14 +CONFIG_NR_CPUS=16 +CONFIG_RCU_FANOUT_EXACT=y +CONFIG_HOTPLUG_CPU=y +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/NT1-nh b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/NT1-nh new file mode 100644 index 00000000000..023f312a931 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/NT1-nh @@ -0,0 +1,23 @@ +#CHECK#CONFIG_TINY_RCU=y +CONFIG_RCU_TRACE=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +# +CONFIG_SMP=n +# +CONFIG_HOTPLUG_CPU=n +# +CONFIG_NO_HZ=n +# +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/NT3-NH b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/NT3-NH new file mode 100644 index 00000000000..6fd0235dae7 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/NT3-NH @@ -0,0 +1,20 @@ +#CHECK#CONFIG_TINY_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +# +CONFIG_SMP=n +# +CONFIG_HOTPLUG_CPU=n +# +CONFIG_NO_HZ=y +# +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P1-S-T-NH-SD-SMP-HP new file mode 100644 index 00000000000..9647c44cf4b --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P1-S-T-NH-SD-SMP-HP @@ -0,0 +1,20 @@ +CONFIG_RCU_TRACE=y +CONFIG_RCU_CPU_STALL_INFO=y +CONFIG_NO_HZ=y +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=8 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P2-2-t-nh-sd-SMP-hp new file mode 100644 index 00000000000..0f3b667d2a9 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P2-2-t-nh-sd-SMP-hp @@ -0,0 +1,20 @@ +CONFIG_RCU_TRACE=n +CONFIG_NO_HZ=n +CONFIG_SMP=y +CONFIG_RCU_FANOUT=4 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P3-3-T-nh-SD-SMP-hp new file mode 100644 index 00000000000..b035e141bf2 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P3-3-T-nh-SD-SMP-hp @@ -0,0 +1,20 @@ +CONFIG_RCU_TRACE=y +CONFIG_NO_HZ=n +CONFIG_SMP=y +CONFIG_RCU_FANOUT=2 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P4-A-t-NH-sd-SMP-HP new file mode 100644 index 00000000000..3ccf6a9447f --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P4-A-t-NH-sd-SMP-HP @@ -0,0 +1,22 @@ +CONFIG_RCU_TRACE=n +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=6 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_RT_MUTEXES=y +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=2 +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P5-U-T-NH-sd-SMP-hp new file mode 100644 index 00000000000..ef624ce73d8 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P5-U-T-NH-sd-SMP-hp @@ -0,0 +1,28 @@ +CONFIG_RCU_TRACE=y +CONFIG_RCU_CPU_STALL_INFO=y +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=6 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=y +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_DEBUG_KERNEL=y +CONFIG_PROVE_RCU_DELAY=y +CONFIG_DEBUG_OBJECTS=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_RT_MUTEXES=y +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=2 +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P6---t-nh-SD-smp-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P6---t-nh-SD-smp-hp new file mode 100644 index 00000000000..f4c9175828b --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P6---t-nh-SD-smp-hp @@ -0,0 +1,18 @@ +CONFIG_RCU_TRACE=n +CONFIG_NO_HZ=n +CONFIG_SMP=n +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +CONFIG_TREE_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-HP new file mode 100644 index 00000000000..77a8c5b7576 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-HP @@ -0,0 +1,30 @@ +CONFIG_RCU_TRACE=y +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=2 +CONFIG_NR_CPUS=16 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=y +CONFIG_RCU_NOCB_CPU=y +CONFIG_RCU_NOCB_CPU_NONE=n +CONFIG_RCU_NOCB_CPU_ZERO=n +CONFIG_RCU_NOCB_CPU_ALL=y +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_OBJECTS=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_SLUB=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-HP-all b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-HP-all new file mode 100644 index 00000000000..0eecebc6e95 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-HP-all @@ -0,0 +1,30 @@ +CONFIG_RCU_TRACE=y +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=2 +CONFIG_NR_CPUS=16 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=y +CONFIG_RCU_NOCB_CPU=y +CONFIG_RCU_NOCB_CPU_NONE=y +CONFIG_RCU_NOCB_CPU_ZERO=n +CONFIG_RCU_NOCB_CPU_ALL=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_OBJECTS=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_SLUB=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-HP-none b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-HP-none new file mode 100644 index 00000000000..0eecebc6e95 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-HP-none @@ -0,0 +1,30 @@ +CONFIG_RCU_TRACE=y +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=2 +CONFIG_NR_CPUS=16 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=y +CONFIG_RCU_NOCB_CPU=y +CONFIG_RCU_NOCB_CPU_NONE=y +CONFIG_RCU_NOCB_CPU_ZERO=n +CONFIG_RCU_NOCB_CPU_ALL=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_OBJECTS=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_SLUB=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-hp new file mode 100644 index 00000000000..588bc70420c --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-hp @@ -0,0 +1,30 @@ +CONFIG_RCU_TRACE=y +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=2 +CONFIG_NR_CPUS=16 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=n +CONFIG_RCU_NOCB_CPU=y +CONFIG_RCU_NOCB_CPU_NONE=n +CONFIG_RCU_NOCB_CPU_ZERO=y +CONFIG_RCU_NOCB_CPU_ALL=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_OBJECTS=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_SLUB=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/PT1-nh b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/PT1-nh new file mode 100644 index 00000000000..e3361c3894a --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/PT1-nh @@ -0,0 +1,23 @@ +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=2 +CONFIG_RCU_TRACE=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +# +CONFIG_SMP=n +# +CONFIG_HOTPLUG_CPU=n +# +CONFIG_NO_HZ=n +# +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.12/PT2-NH b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/PT2-NH new file mode 100644 index 00000000000..64abfc3b4d9 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.12/PT2-NH @@ -0,0 +1,22 @@ +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +# +CONFIG_SMP=n +# +CONFIG_HOTPLUG_CPU=n +# +CONFIG_NO_HZ=y +# +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.3/CFLIST b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/CFLIST new file mode 100644 index 00000000000..18223947bbc --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/CFLIST @@ -0,0 +1,14 @@ +P1-S-T-NH-SD-SMP-HP +P2-2-t-nh-sd-SMP-hp +P3-3-T-nh-SD-SMP-hp +P4-A-t-NH-sd-SMP-HP +P5-U-T-NH-sd-SMP-hp +N1-S-T-NH-SD-SMP-HP +N2-2-t-nh-sd-SMP-hp +N3-3-T-nh-SD-SMP-hp +N4-A-t-NH-sd-SMP-HP +N5-U-T-NH-sd-SMP-hp +PT1-nh +PT2-NH +NT1-nh +NT3-NH diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.3/N1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/N1-S-T-NH-SD-SMP-HP new file mode 100644 index 00000000000..d81e11d280a --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/N1-S-T-NH-SD-SMP-HP @@ -0,0 +1,19 @@ +CONFIG_RCU_TRACE=y +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=8 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.3/N2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/N2-2-t-nh-sd-SMP-hp new file mode 100644 index 00000000000..02e418572b1 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/N2-2-t-nh-sd-SMP-hp @@ -0,0 +1,20 @@ +CONFIG_RCU_TRACE=n +CONFIG_NO_HZ=n +CONFIG_SMP=y +CONFIG_RCU_FANOUT=4 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.3/N3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/N3-3-T-nh-SD-SMP-hp new file mode 100644 index 00000000000..b3100f69c8c --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/N3-3-T-nh-SD-SMP-hp @@ -0,0 +1,22 @@ +CONFIG_RCU_TRACE=y +CONFIG_NO_HZ=n +CONFIG_SMP=y +CONFIG_RCU_FANOUT=2 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.3/N4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/N4-A-t-NH-sd-SMP-HP new file mode 100644 index 00000000000..c56b4453072 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/N4-A-t-NH-sd-SMP-HP @@ -0,0 +1,18 @@ +CONFIG_RCU_TRACE=n +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=6 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.3/N5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/N5-U-T-NH-sd-SMP-hp new file mode 100644 index 00000000000..90d924fea9e --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/N5-U-T-NH-sd-SMP-hp @@ -0,0 +1,22 @@ +CONFIG_RCU_TRACE=y +CONFIG_DEBUG_KERNEL=y +CONFIG_RCU_CPU_STALL_INFO=y +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=6 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=y +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.3/NT1-nh b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/NT1-nh new file mode 100644 index 00000000000..023f312a931 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/NT1-nh @@ -0,0 +1,23 @@ +#CHECK#CONFIG_TINY_RCU=y +CONFIG_RCU_TRACE=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +# +CONFIG_SMP=n +# +CONFIG_HOTPLUG_CPU=n +# +CONFIG_NO_HZ=n +# +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.3/NT3-NH b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/NT3-NH new file mode 100644 index 00000000000..6fd0235dae7 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/NT3-NH @@ -0,0 +1,20 @@ +#CHECK#CONFIG_TINY_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +# +CONFIG_SMP=n +# +CONFIG_HOTPLUG_CPU=n +# +CONFIG_NO_HZ=y +# +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P1-S-T-NH-SD-SMP-HP new file mode 100644 index 00000000000..9647c44cf4b --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P1-S-T-NH-SD-SMP-HP @@ -0,0 +1,20 @@ +CONFIG_RCU_TRACE=y +CONFIG_RCU_CPU_STALL_INFO=y +CONFIG_NO_HZ=y +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=8 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P2-2-t-nh-sd-SMP-hp new file mode 100644 index 00000000000..0f3b667d2a9 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P2-2-t-nh-sd-SMP-hp @@ -0,0 +1,20 @@ +CONFIG_RCU_TRACE=n +CONFIG_NO_HZ=n +CONFIG_SMP=y +CONFIG_RCU_FANOUT=4 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P3-3-T-nh-SD-SMP-hp new file mode 100644 index 00000000000..b035e141bf2 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P3-3-T-nh-SD-SMP-hp @@ -0,0 +1,20 @@ +CONFIG_RCU_TRACE=y +CONFIG_NO_HZ=n +CONFIG_SMP=y +CONFIG_RCU_FANOUT=2 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P4-A-t-NH-sd-SMP-HP new file mode 100644 index 00000000000..3ccf6a9447f --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P4-A-t-NH-sd-SMP-HP @@ -0,0 +1,22 @@ +CONFIG_RCU_TRACE=n +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=6 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_RT_MUTEXES=y +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=2 +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P5-U-T-NH-sd-SMP-hp new file mode 100644 index 00000000000..ef624ce73d8 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/P5-U-T-NH-sd-SMP-hp @@ -0,0 +1,28 @@ +CONFIG_RCU_TRACE=y +CONFIG_RCU_CPU_STALL_INFO=y +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=6 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=y +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_DEBUG_KERNEL=y +CONFIG_PROVE_RCU_DELAY=y +CONFIG_DEBUG_OBJECTS=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_RT_MUTEXES=y +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=2 +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.3/PT1-nh b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/PT1-nh new file mode 100644 index 00000000000..e3361c3894a --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/PT1-nh @@ -0,0 +1,23 @@ +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=2 +CONFIG_RCU_TRACE=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +# +CONFIG_SMP=n +# +CONFIG_HOTPLUG_CPU=n +# +CONFIG_NO_HZ=n +# +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.3/PT2-NH b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/PT2-NH new file mode 100644 index 00000000000..64abfc3b4d9 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/PT2-NH @@ -0,0 +1,22 @@ +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +# +CONFIG_SMP=n +# +CONFIG_HOTPLUG_CPU=n +# +CONFIG_NO_HZ=y +# +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.3/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/ver_functions.sh new file mode 100644 index 00000000000..bae55692ce6 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.3/ver_functions.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# +# Kernel-version-dependent shell functions for the rest of the scripts. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# Copyright (C) IBM Corporation, 2013 +# +# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +# rcutorture_param_onoff bootparam-string config-file +# +# Adds onoff rcutorture module parameters to kernels having it. +rcutorture_param_onoff () { + if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2" + then + echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2 + echo rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30 + fi +} + +# per_version_boot_params bootparam-string config-file seconds +# +# Adds per-version torture-module parameters to kernels supporting them. +per_version_boot_params () { + echo $1 `rcutorture_param_onoff "$1" "$2"` \ + rcutorture.stat_interval=15 \ + rcutorture.shutdown_secs=$3 \ + rcutorture.rcutorture_runnable=1 \ + rcutorture.test_no_idle_hz=1 \ + rcutorture.verbose=1 +} diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.5/CFLIST b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/CFLIST new file mode 100644 index 00000000000..18223947bbc --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/CFLIST @@ -0,0 +1,14 @@ +P1-S-T-NH-SD-SMP-HP +P2-2-t-nh-sd-SMP-hp +P3-3-T-nh-SD-SMP-hp +P4-A-t-NH-sd-SMP-HP +P5-U-T-NH-sd-SMP-hp +N1-S-T-NH-SD-SMP-HP +N2-2-t-nh-sd-SMP-hp +N3-3-T-nh-SD-SMP-hp +N4-A-t-NH-sd-SMP-HP +N5-U-T-NH-sd-SMP-hp +PT1-nh +PT2-NH +NT1-nh +NT3-NH diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.5/N1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/N1-S-T-NH-SD-SMP-HP new file mode 100644 index 00000000000..d81e11d280a --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/N1-S-T-NH-SD-SMP-HP @@ -0,0 +1,19 @@ +CONFIG_RCU_TRACE=y +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=8 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.5/N2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/N2-2-t-nh-sd-SMP-hp new file mode 100644 index 00000000000..02e418572b1 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/N2-2-t-nh-sd-SMP-hp @@ -0,0 +1,20 @@ +CONFIG_RCU_TRACE=n +CONFIG_NO_HZ=n +CONFIG_SMP=y +CONFIG_RCU_FANOUT=4 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.5/N3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/N3-3-T-nh-SD-SMP-hp new file mode 100644 index 00000000000..b3100f69c8c --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/N3-3-T-nh-SD-SMP-hp @@ -0,0 +1,22 @@ +CONFIG_RCU_TRACE=y +CONFIG_NO_HZ=n +CONFIG_SMP=y +CONFIG_RCU_FANOUT=2 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.5/N4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/N4-A-t-NH-sd-SMP-HP new file mode 100644 index 00000000000..c56b4453072 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/N4-A-t-NH-sd-SMP-HP @@ -0,0 +1,18 @@ +CONFIG_RCU_TRACE=n +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=6 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.5/N5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/N5-U-T-NH-sd-SMP-hp new file mode 100644 index 00000000000..90d924fea9e --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/N5-U-T-NH-sd-SMP-hp @@ -0,0 +1,22 @@ +CONFIG_RCU_TRACE=y +CONFIG_DEBUG_KERNEL=y +CONFIG_RCU_CPU_STALL_INFO=y +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=6 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=y +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +#CHECK#CONFIG_TREE_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.5/NT1-nh b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/NT1-nh new file mode 100644 index 00000000000..023f312a931 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/NT1-nh @@ -0,0 +1,23 @@ +#CHECK#CONFIG_TINY_RCU=y +CONFIG_RCU_TRACE=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +# +CONFIG_SMP=n +# +CONFIG_HOTPLUG_CPU=n +# +CONFIG_NO_HZ=n +# +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.5/NT3-NH b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/NT3-NH new file mode 100644 index 00000000000..6fd0235dae7 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/NT3-NH @@ -0,0 +1,20 @@ +#CHECK#CONFIG_TINY_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +# +CONFIG_SMP=n +# +CONFIG_HOTPLUG_CPU=n +# +CONFIG_NO_HZ=y +# +CONFIG_PREEMPT_NONE=y +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=n +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P1-S-T-NH-SD-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P1-S-T-NH-SD-SMP-HP new file mode 100644 index 00000000000..9647c44cf4b --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P1-S-T-NH-SD-SMP-HP @@ -0,0 +1,20 @@ +CONFIG_RCU_TRACE=y +CONFIG_RCU_CPU_STALL_INFO=y +CONFIG_NO_HZ=y +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=8 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P2-2-t-nh-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P2-2-t-nh-sd-SMP-hp new file mode 100644 index 00000000000..0f3b667d2a9 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P2-2-t-nh-sd-SMP-hp @@ -0,0 +1,20 @@ +CONFIG_RCU_TRACE=n +CONFIG_NO_HZ=n +CONFIG_SMP=y +CONFIG_RCU_FANOUT=4 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P3-3-T-nh-SD-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P3-3-T-nh-SD-SMP-hp new file mode 100644 index 00000000000..b035e141bf2 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P3-3-T-nh-SD-SMP-hp @@ -0,0 +1,20 @@ +CONFIG_RCU_TRACE=y +CONFIG_NO_HZ=n +CONFIG_SMP=y +CONFIG_RCU_FANOUT=2 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P4-A-t-NH-sd-SMP-HP b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P4-A-t-NH-sd-SMP-HP new file mode 100644 index 00000000000..3ccf6a9447f --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P4-A-t-NH-sd-SMP-HP @@ -0,0 +1,22 @@ +CONFIG_RCU_TRACE=n +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=6 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=n +CONFIG_HOTPLUG_CPU=y +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_RT_MUTEXES=y +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=2 +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P5-U-T-NH-sd-SMP-hp b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P5-U-T-NH-sd-SMP-hp new file mode 100644 index 00000000000..ef624ce73d8 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/P5-U-T-NH-sd-SMP-hp @@ -0,0 +1,28 @@ +CONFIG_RCU_TRACE=y +CONFIG_RCU_CPU_STALL_INFO=y +CONFIG_NO_HZ=y +CONFIG_SMP=y +CONFIG_RCU_FANOUT=6 +CONFIG_NR_CPUS=8 +CONFIG_RCU_FANOUT_EXACT=y +CONFIG_HOTPLUG_CPU=n +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +#CHECK#CONFIG_TREE_PREEMPT_RCU=y +CONFIG_DEBUG_KERNEL=y +CONFIG_PROVE_RCU_DELAY=y +CONFIG_DEBUG_OBJECTS=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_RT_MUTEXES=y +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=2 +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.5/PT1-nh b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/PT1-nh new file mode 100644 index 00000000000..e3361c3894a --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/PT1-nh @@ -0,0 +1,23 @@ +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=2 +CONFIG_RCU_TRACE=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +# +CONFIG_SMP=n +# +CONFIG_HOTPLUG_CPU=n +# +CONFIG_NO_HZ=n +# +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.5/PT2-NH b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/PT2-NH new file mode 100644 index 00000000000..64abfc3b4d9 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/PT2-NH @@ -0,0 +1,22 @@ +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_MODULE_UNLOAD=y +CONFIG_SUSPEND=n +CONFIG_HIBERNATION=n +# +CONFIG_SMP=n +# +CONFIG_HOTPLUG_CPU=n +# +CONFIG_NO_HZ=y +# +CONFIG_PREEMPT_NONE=n +CONFIG_PREEMPT_VOLUNTARY=n +CONFIG_PREEMPT=y +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_SYSFS_DEPRECATED_V2=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_PRINTK_TIME=y + diff --git a/tools/testing/selftests/rcutorture/configs/rcu/v3.5/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/ver_functions.sh new file mode 100644 index 00000000000..8977d8d31b1 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/v3.5/ver_functions.sh @@ -0,0 +1,57 @@ +#!/bin/bash +# +# Kernel-version-dependent shell functions for the rest of the scripts. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# Copyright (C) IBM Corporation, 2013 +# +# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +# rcutorture_param_n_barrier_cbs bootparam-string +# +# Adds n_barrier_cbs rcutorture module parameter to kernels having it. +rcutorture_param_n_barrier_cbs () { + if echo $1 | grep -q "rcutorture\.n_barrier_cbs" + then + : + else + echo rcutorture.n_barrier_cbs=4 + fi +} + +# rcutorture_param_onoff bootparam-string config-file +# +# Adds onoff rcutorture module parameters to kernels having it. +rcutorture_param_onoff () { + if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2" + then + echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2 + echo rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30 + fi +} + +# per_version_boot_params bootparam-string config-file seconds +# +# Adds per-version torture-module parameters to kernels supporting them. +per_version_boot_params () { + echo $1 `rcutorture_param_onoff "$1" "$2"` \ + `rcutorture_param_n_barrier_cbs "$1"` \ + rcutorture.stat_interval=15 \ + rcutorture.shutdown_secs=$3 \ + rcutorture.rcutorture_runnable=1 \ + rcutorture.test_no_idle_hz=1 \ + rcutorture.verbose=1 +} diff --git a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh new file mode 100644 index 00000000000..8977d8d31b1 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh @@ -0,0 +1,57 @@ +#!/bin/bash +# +# Kernel-version-dependent shell functions for the rest of the scripts. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# Copyright (C) IBM Corporation, 2013 +# +# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +# rcutorture_param_n_barrier_cbs bootparam-string +# +# Adds n_barrier_cbs rcutorture module parameter to kernels having it. +rcutorture_param_n_barrier_cbs () { + if echo $1 | grep -q "rcutorture\.n_barrier_cbs" + then + : + else + echo rcutorture.n_barrier_cbs=4 + fi +} + +# rcutorture_param_onoff bootparam-string config-file +# +# Adds onoff rcutorture module parameters to kernels having it. +rcutorture_param_onoff () { + if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2" + then + echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2 + echo rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30 + fi +} + +# per_version_boot_params bootparam-string config-file seconds +# +# Adds per-version torture-module parameters to kernels supporting them. +per_version_boot_params () { + echo $1 `rcutorture_param_onoff "$1" "$2"` \ + `rcutorture_param_n_barrier_cbs "$1"` \ + rcutorture.stat_interval=15 \ + rcutorture.shutdown_secs=$3 \ + rcutorture.rcutorture_runnable=1 \ + rcutorture.test_no_idle_hz=1 \ + rcutorture.verbose=1 +} diff --git a/tools/testing/selftests/rcutorture/doc/TINY_RCU.txt b/tools/testing/selftests/rcutorture/doc/TINY_RCU.txt new file mode 100644 index 00000000000..28db67b54e5 --- /dev/null +++ b/tools/testing/selftests/rcutorture/doc/TINY_RCU.txt @@ -0,0 +1,40 @@ +This document gives a brief rationale for the TINY_RCU test cases. + + +Kconfig Parameters: + +CONFIG_DEBUG_LOCK_ALLOC -- Do all three and none of the three. +CONFIG_PREEMPT_COUNT +CONFIG_RCU_TRACE + +The theory here is that randconfig testing will hit the other six possible +combinations of these parameters. + + +Kconfig Parameters Ignored: + +CONFIG_DEBUG_OBJECTS_RCU_HEAD +CONFIG_PROVE_RCU + + In common code tested by TREE_RCU test cases. + +CONFIG_NO_HZ_FULL_SYSIDLE +CONFIG_RCU_NOCB_CPU +CONFIG_RCU_USER_QS + + Meaningless for TINY_RCU. + +CONFIG_RCU_STALL_COMMON +CONFIG_RCU_TORTURE_TEST + + Redundant with CONFIG_RCU_TRACE. + +CONFIG_HOTPLUG_CPU +CONFIG_PREEMPT +CONFIG_PREEMPT_RCU +CONFIG_SMP +CONFIG_TINY_RCU +CONFIG_TREE_PREEMPT_RCU +CONFIG_TREE_RCU + + All forced by CONFIG_TINY_RCU. diff --git a/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt b/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt new file mode 100644 index 00000000000..adbb76cffb4 --- /dev/null +++ b/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt @@ -0,0 +1,95 @@ +This document gives a brief rationale for the TREE_RCU-related test +cases, a group that includes TREE_PREEMPT_RCU. + + +Kconfig Parameters: + +CONFIG_DEBUG_LOCK_ALLOC -- Do three, covering CONFIG_PROVE_LOCKING & not. +CONFIG_DEBUG_OBJECTS_RCU_HEAD -- Do one. +CONFIG_HOTPLUG_CPU -- Do half. (Every second.) +CONFIG_HZ_PERIODIC -- Do one. +CONFIG_NO_HZ_IDLE -- Do those not otherwise specified. (Groups of two.) +CONFIG_NO_HZ_FULL -- Do two, one with CONFIG_NO_HZ_FULL_SYSIDLE. +CONFIG_NO_HZ_FULL_SYSIDLE -- Do one. +CONFIG_PREEMPT -- Do half. (First three and #8.) +CONFIG_PROVE_LOCKING -- Do all but two, covering CONFIG_PROVE_RCU and not. +CONFIG_PROVE_RCU -- Do all but one under CONFIG_PROVE_LOCKING. +CONFIG_PROVE_RCU_DELAY -- Do one. +CONFIG_RCU_BOOST -- one of TREE_PREEMPT_RCU. +CONFIG_RCU_BOOST_PRIO -- set to 2 for _BOOST testing. +CONFIG_RCU_CPU_STALL_INFO -- do one with and without _VERBOSE. +CONFIG_RCU_CPU_STALL_VERBOSE -- do one with and without _INFO. +CONFIG_RCU_FANOUT -- Cover hierarchy as currently, but overlap with others. +CONFIG_RCU_FANOUT_EXACT -- Do one. +CONFIG_RCU_FANOUT_LEAF -- Do one non-default. +CONFIG_RCU_FAST_NO_HZ -- Do one, but not with CONFIG_RCU_NOCB_CPU_ALL. +CONFIG_RCU_NOCB_CPU -- Do three, see below. +CONFIG_RCU_NOCB_CPU_ALL -- Do one. +CONFIG_RCU_NOCB_CPU_NONE -- Do one. +CONFIG_RCU_NOCB_CPU_ZERO -- Do one. +CONFIG_RCU_TRACE -- Do half. +CONFIG_SMP -- Need one !SMP for TREE_PREEMPT_RCU. +RCU-bh: Do one with PREEMPT and one with !PREEMPT. +RCU-sched: Do one with PREEMPT but not BOOST. + + +Hierarchy: + +TREE01. CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=8, CONFIG_RCU_FANOUT_EXACT=n. +TREE02. CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=3, CONFIG_RCU_FANOUT_EXACT=n, + CONFIG_RCU_FANOUT_LEAF=3. +TREE03. CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=4, CONFIG_RCU_FANOUT_EXACT=n, + CONFIG_RCU_FANOUT_LEAF=4. +TREE04. CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=2, CONFIG_RCU_FANOUT_EXACT=n, + CONFIG_RCU_FANOUT_LEAF=2. +TREE05. CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=6, CONFIG_RCU_FANOUT_EXACT=n + CONFIG_RCU_FANOUT_LEAF=6. +TREE06. CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=6, CONFIG_RCU_FANOUT_EXACT=y + CONFIG_RCU_FANOUT_LEAF=6. +TREE07. CONFIG_NR_CPUS=16, CONFIG_RCU_FANOUT=2, CONFIG_RCU_FANOUT_EXACT=n, + CONFIG_RCU_FANOUT_LEAF=2. +TREE08. CONFIG_NR_CPUS=16, CONFIG_RCU_FANOUT=3, CONFIG_RCU_FANOUT_EXACT=y, + CONFIG_RCU_FANOUT_LEAF=2. +TREE09. CONFIG_NR_CPUS=1. + + +Kconfig Parameters Ignored: + +CONFIG_64BIT + + Used only to check CONFIG_RCU_FANOUT value, inspection suffices. + +CONFIG_NO_HZ_FULL_SYSIDLE_SMALL + + Defer until Frederic uses this. + +CONFIG_PREEMPT_COUNT +CONFIG_PREEMPT_RCU + + Redundant with CONFIG_PREEMPT, ignore. + +CONFIG_RCU_BOOST_DELAY + + Inspection suffices, ignore. + +CONFIG_RCU_CPU_STALL_TIMEOUT + + Inspection suffices, ignore. + +CONFIG_RCU_STALL_COMMON + + Implied by TREE_RCU and TREE_PREEMPT_RCU. + +CONFIG_RCU_TORTURE_TEST +CONFIG_RCU_TORTURE_TEST_RUNNABLE + + Always used in KVM testing. + +CONFIG_RCU_USER_QS + + Redundant with CONFIG_NO_HZ_FULL. + +CONFIG_TREE_PREEMPT_RCU +CONFIG_TREE_RCU + + These are controlled by CONFIG_PREEMPT. diff --git a/tools/testing/selftests/rcutorture/doc/initrd.txt b/tools/testing/selftests/rcutorture/doc/initrd.txt new file mode 100644 index 00000000000..49d134c25c0 --- /dev/null +++ b/tools/testing/selftests/rcutorture/doc/initrd.txt @@ -0,0 +1,90 @@ +This document describes one way to create the initrd directory hierarchy +in order to allow an initrd to be built into your kernel. The trick +here is to steal the initrd file used on your Linux laptop, Ubuntu in +this case. There are probably much better ways of doing this. + +That said, here are the commands: + +------------------------------------------------------------------------ +zcat /initrd.img > /tmp/initrd.img.zcat +mkdir initrd +cd initrd +cpio -id < /tmp/initrd.img.zcat +------------------------------------------------------------------------ + +Interestingly enough, if you are running rcutorture, you don't really +need userspace in many cases. Running without userspace has the +advantage of allowing you to test your kernel independently of the +distro in place, the root-filesystem layout, and so on. To make this +happen, put the following script in the initrd's tree's "/init" file, +with 0755 mode. + +------------------------------------------------------------------------ +#!/bin/sh + +[ -d /dev ] || mkdir -m 0755 /dev +[ -d /root ] || mkdir -m 0700 /root +[ -d /sys ] || mkdir /sys +[ -d /proc ] || mkdir /proc +[ -d /tmp ] || mkdir /tmp +mkdir -p /var/lock +mount -t sysfs -o nodev,noexec,nosuid sysfs /sys +mount -t proc -o nodev,noexec,nosuid proc /proc +# Some things don't work properly without /etc/mtab. +ln -sf /proc/mounts /etc/mtab + +# Note that this only becomes /dev on the real filesystem if udev's scripts +# are used; which they will be, but it's worth pointing out +if ! mount -t devtmpfs -o mode=0755 udev /dev; then + echo "W: devtmpfs not available, falling back to tmpfs for /dev" + mount -t tmpfs -o mode=0755 udev /dev + [ -e /dev/console ] || mknod --mode=600 /dev/console c 5 1 + [ -e /dev/kmsg ] || mknod --mode=644 /dev/kmsg c 1 11 + [ -e /dev/null ] || mknod --mode=666 /dev/null c 1 3 +fi + +mkdir /dev/pts +mount -t devpts -o noexec,nosuid,gid=5,mode=0620 devpts /dev/pts || true +mount -t tmpfs -o "nosuid,size=20%,mode=0755" tmpfs /run +mkdir /run/initramfs +# compatibility symlink for the pre-oneiric locations +ln -s /run/initramfs /dev/.initramfs + +# Export relevant variables +export ROOT= +export ROOTDELAY= +export ROOTFLAGS= +export ROOTFSTYPE= +export IP= +export BOOT= +export BOOTIF= +export UBIMTD= +export break= +export init=/sbin/init +export quiet=n +export readonly=y +export rootmnt=/root +export debug= +export panic= +export blacklist= +export resume= +export resume_offset= +export recovery= + +for i in /sys/devices/system/cpu/cpu*/online +do + case $i in + '/sys/devices/system/cpu/cpu0/online') + ;; + '/sys/devices/system/cpu/cpu*/online') + ;; + *) + echo 1 > $i + ;; + esac +done + +while : +do + sleep 10 +done diff --git a/tools/testing/selftests/rcutorture/doc/rcu-test-image.txt b/tools/testing/selftests/rcutorture/doc/rcu-test-image.txt new file mode 100644 index 00000000000..66efb59a1bd --- /dev/null +++ b/tools/testing/selftests/rcutorture/doc/rcu-test-image.txt @@ -0,0 +1,42 @@ +This document describes one way to created the rcu-test-image file +that contains the filesystem used by the guest-OS kernel. There are +probably much better ways of doing this, and this filesystem could no +doubt be smaller. It is probably also possible to simply download +an appropriate image from any number of places. + +That said, here are the commands: + +------------------------------------------------------------------------ +dd if=/dev/zero of=rcu-test-image bs=400M count=1 +mkfs.ext3 ./rcu-test-image +sudo mount -o loop ./rcu-test-image /mnt + +# Replace "precise" below with your favorite Ubuntu release. +# Empirical evidence says this image will work for 64-bit, but... +# Note that debootstrap does take a few minutes to run. Or longer. +sudo debootstrap --verbose --arch i386 precise /mnt http://archive.ubuntu.com/ubuntu +cat << '___EOF___' | sudo dd of=/mnt/etc/fstab +# UNCONFIGURED FSTAB FOR BASE SYSTEM +# +/dev/vda / ext3 defaults 1 1 +dev /dev tmpfs rw 0 0 +tmpfs /dev/shm tmpfs defaults 0 0 +devpts /dev/pts devpts gid=5,mode=620 0 0 +sysfs /sys sysfs defaults 0 0 +proc /proc proc defaults 0 0 +___EOF___ +sudo umount /mnt +------------------------------------------------------------------------ + + +References: + + http://sripathikodi.blogspot.com/2010/02/creating-kvm-bootable-fedora-system.html + https://help.ubuntu.com/community/KVM/CreateGuests + https://help.ubuntu.com/community/JeOSVMBuilder + http://wiki.libvirt.org/page/UbuntuKVMWalkthrough + http://www.moe.co.uk/2011/01/07/pci_add_option_rom-failed-to-find-romfile-pxe-rtl8139-bin/ -- "apt-get install kvm-pxe" + http://www.landley.net/writing/rootfs-howto.html + http://en.wikipedia.org/wiki/Initrd + http://en.wikipedia.org/wiki/Cpio + http://wiki.libvirt.org/page/UbuntuKVMWalkthrough diff --git a/tools/testing/selftests/sysctl/Makefile b/tools/testing/selftests/sysctl/Makefile new file mode 100644 index 00000000000..0a92adaf086 --- /dev/null +++ b/tools/testing/selftests/sysctl/Makefile @@ -0,0 +1,19 @@ +# Makefile for sysctl selftests. +# Expects kernel.sysctl_writes_strict=1. + +# No binaries, but make sure arg-less "make" doesn't trigger "run_tests". +all: + +# Allow specific tests to be selected. +test_num: + @/bin/sh ./run_numerictests + +test_string: + @/bin/sh ./run_stringtests + +run_tests: all test_num test_string + +# Nothing to clean up. +clean: + +.PHONY: all run_tests clean test_num test_string diff --git a/tools/testing/selftests/sysctl/common_tests b/tools/testing/selftests/sysctl/common_tests new file mode 100644 index 00000000000..17d534b1b7b --- /dev/null +++ b/tools/testing/selftests/sysctl/common_tests @@ -0,0 +1,109 @@ +#!/bin/sh + +TEST_FILE=$(mktemp) + +echo "== Testing sysctl behavior against ${TARGET} ==" + +set_orig() +{ + echo "${ORIG}" > "${TARGET}" +} + +set_test() +{ + echo "${TEST_STR}" > "${TARGET}" +} + +verify() +{ + local seen + seen=$(cat "$1") + if [ "${seen}" != "${TEST_STR}" ]; then + return 1 + fi + return 0 +} + +trap 'set_orig; rm -f "${TEST_FILE}"' EXIT + +rc=0 + +echo -n "Writing test file ... " +echo "${TEST_STR}" > "${TEST_FILE}" +if ! verify "${TEST_FILE}"; then + echo "FAIL" >&2 + exit 1 +else + echo "ok" +fi + +echo -n "Checking sysctl is not set to test value ... " +if verify "${TARGET}"; then + echo "FAIL" >&2 + exit 1 +else + echo "ok" +fi + +echo -n "Writing sysctl from shell ... " +set_test +if ! verify "${TARGET}"; then + echo "FAIL" >&2 + exit 1 +else + echo "ok" +fi + +echo -n "Resetting sysctl to original value ... " +set_orig +if verify "${TARGET}"; then + echo "FAIL" >&2 + exit 1 +else + echo "ok" +fi + +# Now that we've validated the sanity of "set_test" and "set_orig", +# we can use those functions to set starting states before running +# specific behavioral tests. + +echo -n "Writing entire sysctl in single write ... " +set_orig +dd if="${TEST_FILE}" of="${TARGET}" bs=4096 2>/dev/null +if ! verify "${TARGET}"; then + echo "FAIL" >&2 + rc=1 +else + echo "ok" +fi + +echo -n "Writing middle of sysctl after synchronized seek ... " +set_test +dd if="${TEST_FILE}" of="${TARGET}" bs=1 seek=1 skip=1 2>/dev/null +if ! verify "${TARGET}"; then + echo "FAIL" >&2 + rc=1 +else + echo "ok" +fi + +echo -n "Writing beyond end of sysctl ... " +set_orig +dd if="${TEST_FILE}" of="${TARGET}" bs=20 seek=2 2>/dev/null +if verify "${TARGET}"; then + echo "FAIL" >&2 + rc=1 +else + echo "ok" +fi + +echo -n "Writing sysctl with multiple long writes ... " +set_orig +(perl -e 'print "A" x 50;'; echo "${TEST_STR}") | \ + dd of="${TARGET}" bs=50 2>/dev/null +if verify "${TARGET}"; then + echo "FAIL" >&2 + rc=1 +else + echo "ok" +fi diff --git a/tools/testing/selftests/sysctl/run_numerictests b/tools/testing/selftests/sysctl/run_numerictests new file mode 100644 index 00000000000..8510f93f2d1 --- /dev/null +++ b/tools/testing/selftests/sysctl/run_numerictests @@ -0,0 +1,10 @@ +#!/bin/sh + +SYSCTL="/proc/sys" +TARGET="${SYSCTL}/vm/swappiness" +ORIG=$(cat "${TARGET}") +TEST_STR=$(( $ORIG + 1 )) + +. ./common_tests + +exit $rc diff --git a/tools/testing/selftests/sysctl/run_stringtests b/tools/testing/selftests/sysctl/run_stringtests new file mode 100644 index 00000000000..90a9293d520 --- /dev/null +++ b/tools/testing/selftests/sysctl/run_stringtests @@ -0,0 +1,77 @@ +#!/bin/sh + +SYSCTL="/proc/sys" +TARGET="${SYSCTL}/kernel/domainname" +ORIG=$(cat "${TARGET}") +TEST_STR="Testing sysctl" + +. ./common_tests + +# Only string sysctls support seeking/appending. +MAXLEN=65 + +echo -n "Writing entire sysctl in short writes ... " +set_orig +dd if="${TEST_FILE}" of="${TARGET}" bs=1 2>/dev/null +if ! verify "${TARGET}"; then + echo "FAIL" >&2 + rc=1 +else + echo "ok" +fi + +echo -n "Writing middle of sysctl after unsynchronized seek ... " +set_test +dd if="${TEST_FILE}" of="${TARGET}" bs=1 seek=1 2>/dev/null +if verify "${TARGET}"; then + echo "FAIL" >&2 + rc=1 +else + echo "ok" +fi + +echo -n "Checking sysctl maxlen is at least $MAXLEN ... " +set_orig +perl -e 'print "A" x ('"${MAXLEN}"'-2), "B";' | \ + dd of="${TARGET}" bs="${MAXLEN}" 2>/dev/null +if ! grep -q B "${TARGET}"; then + echo "FAIL" >&2 + rc=1 +else + echo "ok" +fi + +echo -n "Checking sysctl keeps original string on overflow append ... " +set_orig +perl -e 'print "A" x ('"${MAXLEN}"'-1), "B";' | \ + dd of="${TARGET}" bs=$(( MAXLEN - 1 )) 2>/dev/null +if grep -q B "${TARGET}"; then + echo "FAIL" >&2 + rc=1 +else + echo "ok" +fi + +echo -n "Checking sysctl stays NULL terminated on write ... " +set_orig +perl -e 'print "A" x ('"${MAXLEN}"'-1), "B";' | \ + dd of="${TARGET}" bs="${MAXLEN}" 2>/dev/null +if grep -q B "${TARGET}"; then + echo "FAIL" >&2 + rc=1 +else + echo "ok" +fi + +echo -n "Checking sysctl stays NULL terminated on overwrite ... " +set_orig +perl -e 'print "A" x ('"${MAXLEN}"'-1), "BB";' | \ + dd of="${TARGET}" bs=$(( $MAXLEN + 1 )) 2>/dev/null +if grep -q B "${TARGET}"; then + echo "FAIL" >&2 + rc=1 +else + echo "ok" +fi + +exit $rc diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile new file mode 100644 index 00000000000..eb2859f4ad2 --- /dev/null +++ b/tools/testing/selftests/timers/Makefile @@ -0,0 +1,8 @@ +all: + gcc posix_timers.c -o posix_timers -lrt + +run_tests: all + ./posix_timers + +clean: + rm -f ./posix_timers diff --git a/tools/testing/selftests/timers/posix_timers.c b/tools/testing/selftests/timers/posix_timers.c new file mode 100644 index 00000000000..41bd85559d4 --- /dev/null +++ b/tools/testing/selftests/timers/posix_timers.c @@ -0,0 +1,221 @@ +/* + * Copyright (C) 2013 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com> + * + * Licensed under the terms of the GNU GPL License version 2 + * + * Selftests for a few posix timers interface. + * + * Kernel loop code stolen from Steven Rostedt <srostedt@redhat.com> + */ + +#include <sys/time.h> +#include <stdio.h> +#include <signal.h> +#include <unistd.h> +#include <time.h> +#include <pthread.h> + +#define DELAY 2 +#define USECS_PER_SEC 1000000 + +static volatile int done; + +/* Busy loop in userspace to elapse ITIMER_VIRTUAL */ +static void user_loop(void) +{ + while (!done); +} + +/* + * Try to spend as much time as possible in kernelspace + * to elapse ITIMER_PROF. + */ +static void kernel_loop(void) +{ + void *addr = sbrk(0); + + while (!done) { + brk(addr + 4096); + brk(addr); + } +} + +/* + * Sleep until ITIMER_REAL expiration. + */ +static void idle_loop(void) +{ + pause(); +} + +static void sig_handler(int nr) +{ + done = 1; +} + +/* + * Check the expected timer expiration matches the GTOD elapsed delta since + * we armed the timer. Keep a 0.5 sec error margin due to various jitter. + */ +static int check_diff(struct timeval start, struct timeval end) +{ + long long diff; + + diff = end.tv_usec - start.tv_usec; + diff += (end.tv_sec - start.tv_sec) * USECS_PER_SEC; + + if (abs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) { + printf("Diff too high: %lld..", diff); + return -1; + } + + return 0; +} + +static int check_itimer(int which) +{ + int err; + struct timeval start, end; + struct itimerval val = { + .it_value.tv_sec = DELAY, + }; + + printf("Check itimer "); + + if (which == ITIMER_VIRTUAL) + printf("virtual... "); + else if (which == ITIMER_PROF) + printf("prof... "); + else if (which == ITIMER_REAL) + printf("real... "); + + fflush(stdout); + + done = 0; + + if (which == ITIMER_VIRTUAL) + signal(SIGVTALRM, sig_handler); + else if (which == ITIMER_PROF) + signal(SIGPROF, sig_handler); + else if (which == ITIMER_REAL) + signal(SIGALRM, sig_handler); + + err = gettimeofday(&start, NULL); + if (err < 0) { + perror("Can't call gettimeofday()\n"); + return -1; + } + + err = setitimer(which, &val, NULL); + if (err < 0) { + perror("Can't set timer\n"); + return -1; + } + + if (which == ITIMER_VIRTUAL) + user_loop(); + else if (which == ITIMER_PROF) + kernel_loop(); + else if (which == ITIMER_REAL) + idle_loop(); + + gettimeofday(&end, NULL); + if (err < 0) { + perror("Can't call gettimeofday()\n"); + return -1; + } + + if (!check_diff(start, end)) + printf("[OK]\n"); + else + printf("[FAIL]\n"); + + return 0; +} + +static int check_timer_create(int which) +{ + int err; + timer_t id; + struct timeval start, end; + struct itimerspec val = { + .it_value.tv_sec = DELAY, + }; + + printf("Check timer_create() "); + if (which == CLOCK_THREAD_CPUTIME_ID) { + printf("per thread... "); + } else if (which == CLOCK_PROCESS_CPUTIME_ID) { + printf("per process... "); + } + fflush(stdout); + + done = 0; + err = timer_create(which, NULL, &id); + if (err < 0) { + perror("Can't create timer\n"); + return -1; + } + signal(SIGALRM, sig_handler); + + err = gettimeofday(&start, NULL); + if (err < 0) { + perror("Can't call gettimeofday()\n"); + return -1; + } + + err = timer_settime(id, 0, &val, NULL); + if (err < 0) { + perror("Can't set timer\n"); + return -1; + } + + user_loop(); + + gettimeofday(&end, NULL); + if (err < 0) { + perror("Can't call gettimeofday()\n"); + return -1; + } + + if (!check_diff(start, end)) + printf("[OK]\n"); + else + printf("[FAIL]\n"); + + return 0; +} + +int main(int argc, char **argv) +{ + int err; + + printf("Testing posix timers. False negative may happen on CPU execution \n"); + printf("based timers if other threads run on the CPU...\n"); + + if (check_itimer(ITIMER_VIRTUAL) < 0) + return -1; + + if (check_itimer(ITIMER_PROF) < 0) + return -1; + + if (check_itimer(ITIMER_REAL) < 0) + return -1; + + if (check_timer_create(CLOCK_THREAD_CPUTIME_ID) < 0) + return -1; + + /* + * It's unfortunately hard to reliably test a timer expiration + * on parallel multithread cputime. We could arm it to expire + * on DELAY * nr_threads, with nr_threads busy looping, then wait + * the normal DELAY since the time is elapsing nr_threads faster. + * But for that we need to ensure we have real physical free CPUs + * to ensure true parallelism. So test only one thread until we + * find a better solution. + */ + if (check_timer_create(CLOCK_PROCESS_CPUTIME_ID) < 0) + return -1; + + return 0; +} diff --git a/tools/testing/selftests/user/Makefile b/tools/testing/selftests/user/Makefile new file mode 100644 index 00000000000..396255bd720 --- /dev/null +++ b/tools/testing/selftests/user/Makefile @@ -0,0 +1,13 @@ +# Makefile for user memory selftests + +# No binaries, but make sure arg-less "make" doesn't trigger "run_tests" +all: + +run_tests: all + @if /sbin/modprobe test_user_copy ; then \ + rmmod test_user_copy; \ + echo "user_copy: ok"; \ + else \ + echo "user_copy: [FAIL]"; \ + exit 1; \ + fi diff --git a/tools/testing/selftests/vm/.gitignore b/tools/testing/selftests/vm/.gitignore new file mode 100644 index 00000000000..ff1bb16cec4 --- /dev/null +++ b/tools/testing/selftests/vm/.gitignore @@ -0,0 +1,4 @@ +hugepage-mmap +hugepage-shm +map_hugetlb +thuge-gen diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index b336b24aa6c..3f94e1afd6c 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile @@ -1,14 +1,15 @@ # Makefile for vm selftests CC = $(CROSS_COMPILE)gcc -CFLAGS = -Wall -Wextra +CFLAGS = -Wall +BINARIES = hugepage-mmap hugepage-shm map_hugetlb thuge-gen hugetlbfstest -all: hugepage-mmap hugepage-shm map_hugetlb +all: $(BINARIES) %: %.c $(CC) $(CFLAGS) -o $@ $^ run_tests: all - /bin/sh ./run_vmtests + @/bin/sh ./run_vmtests || (echo "vmtests: [FAIL]"; exit 1) clean: - $(RM) hugepage-mmap hugepage-shm map_hugetlb + $(RM) $(BINARIES) diff --git a/tools/testing/selftests/vm/hugetlbfstest.c b/tools/testing/selftests/vm/hugetlbfstest.c new file mode 100644 index 00000000000..ea40ff8c239 --- /dev/null +++ b/tools/testing/selftests/vm/hugetlbfstest.c @@ -0,0 +1,84 @@ +#define _GNU_SOURCE +#include <assert.h> +#include <fcntl.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/mman.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <unistd.h> + +typedef unsigned long long u64; + +static size_t length = 1 << 24; + +static u64 read_rss(void) +{ + char buf[4096], *s = buf; + int i, fd; + u64 rss; + + fd = open("/proc/self/statm", O_RDONLY); + assert(fd > 2); + memset(buf, 0, sizeof(buf)); + read(fd, buf, sizeof(buf) - 1); + for (i = 0; i < 1; i++) + s = strchr(s, ' ') + 1; + rss = strtoull(s, NULL, 10); + return rss << 12; /* assumes 4k pagesize */ +} + +static void do_mmap(int fd, int extra_flags, int unmap) +{ + int *p; + int flags = MAP_PRIVATE | MAP_POPULATE | extra_flags; + u64 before, after; + + before = read_rss(); + p = mmap(NULL, length, PROT_READ | PROT_WRITE, flags, fd, 0); + assert(p != MAP_FAILED || + !"mmap returned an unexpected error"); + after = read_rss(); + assert(llabs(after - before - length) < 0x40000 || + !"rss didn't grow as expected"); + if (!unmap) + return; + munmap(p, length); + after = read_rss(); + assert(llabs(after - before) < 0x40000 || + !"rss didn't shrink as expected"); +} + +static int open_file(const char *path) +{ + int fd, err; + + unlink(path); + fd = open(path, O_CREAT | O_RDWR | O_TRUNC | O_EXCL + | O_LARGEFILE | O_CLOEXEC, 0600); + assert(fd > 2); + unlink(path); + err = ftruncate(fd, length); + assert(!err); + return fd; +} + +int main(void) +{ + int hugefd, fd; + + fd = open_file("/dev/shm/hugetlbhog"); + hugefd = open_file("/hugepages/hugetlbhog"); + + system("echo 100 > /proc/sys/vm/nr_hugepages"); + do_mmap(-1, MAP_ANONYMOUS, 1); + do_mmap(fd, 0, 1); + do_mmap(-1, MAP_ANONYMOUS | MAP_HUGETLB, 1); + do_mmap(hugefd, 0, 1); + do_mmap(hugefd, MAP_HUGETLB, 1); + /* Leak the last one to test do_exit() */ + do_mmap(-1, MAP_ANONYMOUS | MAP_HUGETLB, 0); + printf("oll korrekt.\n"); + return 0; +} diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests index 4c53cae6c27..c87b6812300 100644 --- a/tools/testing/selftests/vm/run_vmtests +++ b/tools/testing/selftests/vm/run_vmtests @@ -4,6 +4,7 @@ #we need 256M, below is the size in kB needmem=262144 mnt=./huge +exitcode=0 #get pagesize and freepages from /proc/meminfo while read name size unit; do @@ -41,6 +42,7 @@ echo "--------------------" ./hugepage-mmap if [ $? -ne 0 ]; then echo "[FAIL]" + exitcode=1 else echo "[PASS]" fi @@ -55,6 +57,7 @@ echo "--------------------" ./hugepage-shm if [ $? -ne 0 ]; then echo "[FAIL]" + exitcode=1 else echo "[PASS]" fi @@ -67,6 +70,18 @@ echo "--------------------" ./map_hugetlb if [ $? -ne 0 ]; then echo "[FAIL]" + exitcode=1 +else + echo "[PASS]" +fi + +echo "--------------------" +echo "running hugetlbfstest" +echo "--------------------" +./hugetlbfstest +if [ $? -ne 0 ]; then + echo "[FAIL]" + exitcode=1 else echo "[PASS]" fi @@ -75,3 +90,4 @@ fi umount $mnt rm -rf $mnt echo $nr_hugepgs > /proc/sys/vm/nr_hugepages +exit $exitcode diff --git a/tools/testing/selftests/vm/thuge-gen.c b/tools/testing/selftests/vm/thuge-gen.c new file mode 100644 index 00000000000..c87957295f7 --- /dev/null +++ b/tools/testing/selftests/vm/thuge-gen.c @@ -0,0 +1,254 @@ +/* Test selecting other page sizes for mmap/shmget. + + Before running this huge pages for each huge page size must have been + reserved. + For large pages beyond MAX_ORDER (like 1GB on x86) boot options must be used. + Also shmmax must be increased. + And you need to run as root to work around some weird permissions in shm. + And nothing using huge pages should run in parallel. + When the program aborts you may need to clean up the shm segments with + ipcrm -m by hand, like this + sudo ipcs | awk '$1 == "0x00000000" {print $2}' | xargs -n1 sudo ipcrm -m + (warning this will remove all if someone else uses them) */ + +#define _GNU_SOURCE 1 +#include <sys/mman.h> +#include <stdlib.h> +#include <stdio.h> +#include <sys/ipc.h> +#include <sys/shm.h> +#include <sys/stat.h> +#include <glob.h> +#include <assert.h> +#include <unistd.h> +#include <stdarg.h> +#include <string.h> + +#define err(x) perror(x), exit(1) + +#define MAP_HUGE_2MB (21 << MAP_HUGE_SHIFT) +#define MAP_HUGE_1GB (30 << MAP_HUGE_SHIFT) +#define MAP_HUGE_SHIFT 26 +#define MAP_HUGE_MASK 0x3f +#define MAP_HUGETLB 0x40000 + +#define SHM_HUGETLB 04000 /* segment will use huge TLB pages */ +#define SHM_HUGE_SHIFT 26 +#define SHM_HUGE_MASK 0x3f +#define SHM_HUGE_2MB (21 << SHM_HUGE_SHIFT) +#define SHM_HUGE_1GB (30 << SHM_HUGE_SHIFT) + +#define NUM_PAGESIZES 5 + +#define NUM_PAGES 4 + +#define Dprintf(fmt...) // printf(fmt) + +unsigned long page_sizes[NUM_PAGESIZES]; +int num_page_sizes; + +int ilog2(unsigned long v) +{ + int l = 0; + while ((1UL << l) < v) + l++; + return l; +} + +void find_pagesizes(void) +{ + glob_t g; + int i; + glob("/sys/kernel/mm/hugepages/hugepages-*kB", 0, NULL, &g); + assert(g.gl_pathc <= NUM_PAGESIZES); + for (i = 0; i < g.gl_pathc; i++) { + sscanf(g.gl_pathv[i], "/sys/kernel/mm/hugepages/hugepages-%lukB", + &page_sizes[i]); + page_sizes[i] <<= 10; + printf("Found %luMB\n", page_sizes[i] >> 20); + } + num_page_sizes = g.gl_pathc; + globfree(&g); +} + +unsigned long default_huge_page_size(void) +{ + unsigned long hps = 0; + char *line = NULL; + size_t linelen = 0; + FILE *f = fopen("/proc/meminfo", "r"); + if (!f) + return 0; + while (getline(&line, &linelen, f) > 0) { + if (sscanf(line, "Hugepagesize: %lu kB", &hps) == 1) { + hps <<= 10; + break; + } + } + free(line); + return hps; +} + +void show(unsigned long ps) +{ + char buf[100]; + if (ps == getpagesize()) + return; + printf("%luMB: ", ps >> 20); + fflush(stdout); + snprintf(buf, sizeof buf, + "cat /sys/kernel/mm/hugepages/hugepages-%lukB/free_hugepages", + ps >> 10); + system(buf); +} + +unsigned long read_sysfs(int warn, char *fmt, ...) +{ + char *line = NULL; + size_t linelen = 0; + char buf[100]; + FILE *f; + va_list ap; + unsigned long val = 0; + + va_start(ap, fmt); + vsnprintf(buf, sizeof buf, fmt, ap); + va_end(ap); + + f = fopen(buf, "r"); + if (!f) { + if (warn) + printf("missing %s\n", buf); + return 0; + } + if (getline(&line, &linelen, f) > 0) { + sscanf(line, "%lu", &val); + } + fclose(f); + free(line); + return val; +} + +unsigned long read_free(unsigned long ps) +{ + return read_sysfs(ps != getpagesize(), + "/sys/kernel/mm/hugepages/hugepages-%lukB/free_hugepages", + ps >> 10); +} + +void test_mmap(unsigned long size, unsigned flags) +{ + char *map; + unsigned long before, after; + int err; + + before = read_free(size); + map = mmap(NULL, size*NUM_PAGES, PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB|flags, 0, 0); + + if (map == (char *)-1) err("mmap"); + memset(map, 0xff, size*NUM_PAGES); + after = read_free(size); + Dprintf("before %lu after %lu diff %ld size %lu\n", + before, after, before - after, size); + assert(size == getpagesize() || (before - after) == NUM_PAGES); + show(size); + err = munmap(map, size); + assert(!err); +} + +void test_shmget(unsigned long size, unsigned flags) +{ + int id; + unsigned long before, after; + int err; + + before = read_free(size); + id = shmget(IPC_PRIVATE, size * NUM_PAGES, IPC_CREAT|0600|flags); + if (id < 0) err("shmget"); + + struct shm_info i; + if (shmctl(id, SHM_INFO, (void *)&i) < 0) err("shmctl"); + Dprintf("alloc %lu res %lu\n", i.shm_tot, i.shm_rss); + + + Dprintf("id %d\n", id); + char *map = shmat(id, NULL, 0600); + if (map == (char*)-1) err("shmat"); + + shmctl(id, IPC_RMID, NULL); + + memset(map, 0xff, size*NUM_PAGES); + after = read_free(size); + + Dprintf("before %lu after %lu diff %ld size %lu\n", + before, after, before - after, size); + assert(size == getpagesize() || (before - after) == NUM_PAGES); + show(size); + err = shmdt(map); + assert(!err); +} + +void sanity_checks(void) +{ + int i; + unsigned long largest = getpagesize(); + + for (i = 0; i < num_page_sizes; i++) { + if (page_sizes[i] > largest) + largest = page_sizes[i]; + + if (read_free(page_sizes[i]) < NUM_PAGES) { + printf("Not enough huge pages for page size %lu MB, need %u\n", + page_sizes[i] >> 20, + NUM_PAGES); + exit(0); + } + } + + if (read_sysfs(0, "/proc/sys/kernel/shmmax") < NUM_PAGES * largest) { + printf("Please do echo %lu > /proc/sys/kernel/shmmax", largest * NUM_PAGES); + exit(0); + } + +#if defined(__x86_64__) + if (largest != 1U<<30) { + printf("No GB pages available on x86-64\n" + "Please boot with hugepagesz=1G hugepages=%d\n", NUM_PAGES); + exit(0); + } +#endif +} + +int main(void) +{ + int i; + unsigned default_hps = default_huge_page_size(); + + find_pagesizes(); + + sanity_checks(); + + for (i = 0; i < num_page_sizes; i++) { + unsigned long ps = page_sizes[i]; + int arg = ilog2(ps) << MAP_HUGE_SHIFT; + printf("Testing %luMB mmap with shift %x\n", ps >> 20, arg); + test_mmap(ps, MAP_HUGETLB | arg); + } + printf("Testing default huge mmap\n"); + test_mmap(default_hps, SHM_HUGETLB); + + puts("Testing non-huge shmget"); + test_shmget(getpagesize(), 0); + + for (i = 0; i < num_page_sizes; i++) { + unsigned long ps = page_sizes[i]; + int arg = ilog2(ps) << SHM_HUGE_SHIFT; + printf("Testing %luMB shmget with shift %x\n", ps >> 20, arg); + test_shmget(ps, SHM_HUGETLB | arg); + } + puts("default huge shmget"); + test_shmget(default_hps, SHM_HUGETLB); + + return 0; +} diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile new file mode 100644 index 00000000000..e775adcbd29 --- /dev/null +++ b/tools/thermal/tmon/Makefile @@ -0,0 +1,47 @@ +VERSION = 1.0 + +BINDIR=usr/bin +WARNFLAGS=-Wall -Wshadow -W -Wformat -Wimplicit-function-declaration -Wimplicit-int +CFLAGS= -O1 ${WARNFLAGS} -fstack-protector +CC=gcc + +CFLAGS+=-D VERSION=\"$(VERSION)\" +LDFLAGS+= +TARGET=tmon + +INSTALL_PROGRAM=install -m 755 -p +DEL_FILE=rm -f + +INSTALL_CONFIGFILE=install -m 644 -p +CONFIG_FILE= +CONFIG_PATH= + + +OBJS = tmon.o tui.o sysfs.o pid.o +OBJS += + +tmon: $(OBJS) Makefile tmon.h + $(CC) ${CFLAGS} $(LDFLAGS) $(OBJS) -o $(TARGET) -lm -lpanel -lncursesw -ltinfo -lpthread + +valgrind: tmon + sudo valgrind -v --track-origins=yes --tool=memcheck --leak-check=yes --show-reachable=yes --num-callers=20 --track-fds=yes ./$(TARGET) 1> /dev/null + +install: + - mkdir -p $(INSTALL_ROOT)/$(BINDIR) + - $(INSTALL_PROGRAM) "$(TARGET)" "$(INSTALL_ROOT)/$(BINDIR)/$(TARGET)" + - mkdir -p $(INSTALL_ROOT)/$(CONFIG_PATH) + - $(INSTALL_CONFIGFILE) "$(CONFIG_FILE)" "$(INSTALL_ROOT)/$(CONFIG_PATH)" + +uninstall: + $(DEL_FILE) "$(INSTALL_ROOT)/$(BINDIR)/$(TARGET)" + $(CONFIG_FILE) "$(CONFIG_PATH)" + + +clean: + find . -name "*.o" | xargs $(DEL_FILE) + rm -f $(TARGET) + +dist: + git tag v$(VERSION) + git archive --format=tar --prefix="$(TARGET)-$(VERSION)/" v$(VERSION) | \ + gzip > $(TARGET)-$(VERSION).tar.gz diff --git a/tools/thermal/tmon/README b/tools/thermal/tmon/README new file mode 100644 index 00000000000..457949897a8 --- /dev/null +++ b/tools/thermal/tmon/README @@ -0,0 +1,50 @@ +TMON - A Monitoring and Testing Tool for Linux kernel thermal subsystem + +Why TMON? +========== +Increasingly, Linux is running on thermally constrained devices. The simple +thermal relationship between processor and fan has become past for modern +computers. + +As hardware vendors cope with the thermal constraints on their products, more +and more sensors are added, new cooling capabilities are introduced. The +complexity of the thermal relationship can grow exponentially among cooling +devices, zones, sensors, and trip points. They can also change dynamically. + +To expose such relationship to the userspace, Linux generic thermal layer +introduced sysfs entry at /sys/class/thermal with a matrix of symbolic +links, trip point bindings, and device instances. To traverse such +matrix by hand is not a trivial task. Testing is also difficult in that +thermal conditions are often exception cases that hard to reach in +normal operations. + +TMON is conceived as a tool to help visualize, tune, and test the +complex thermal subsystem. + +Files +===== + tmon.c : main function for set up and configurations. + tui.c : handles ncurses based user interface + sysfs.c : access to the generic thermal sysfs + pid.c : a proportional-integral-derivative (PID) controller + that can be used for thermal relationship training. + +Requirements +============ +Depends on ncurses + +Build +========= +$ make +$ sudo ./tmon -h +Usage: tmon [OPTION...] + -c, --control cooling device in control + -d, --daemon run as daemon, no TUI + -l, --log log data to /var/tmp/tmon.log + -h, --help show this help message + -t, --time-interval set time interval for sampling + -v, --version show version + -g, --debug debug message in syslog + +1. For monitoring only: +$ sudo ./tmon diff --git a/tools/thermal/tmon/pid.c b/tools/thermal/tmon/pid.c new file mode 100644 index 00000000000..fd7e9e9d6f4 --- /dev/null +++ b/tools/thermal/tmon/pid.c @@ -0,0 +1,131 @@ +/* + * pid.c PID controller for testing cooling devices + * + * + * + * Copyright (C) 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 or later as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author Name Jacob Pan <jacob.jun.pan@linux.intel.com> + * + */ + +#include <unistd.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <stdint.h> +#include <sys/types.h> +#include <dirent.h> +#include <libintl.h> +#include <ctype.h> +#include <assert.h> +#include <time.h> +#include <limits.h> +#include <math.h> +#include <sys/stat.h> +#include <syslog.h> + +#include "tmon.h" + +/************************************************************************** + * PID (Proportional-Integral-Derivative) controller is commonly used in + * linear control system, consider the the process. + * G(s) = U(s)/E(s) + * kp = proportional gain + * ki = integral gain + * kd = derivative gain + * Ts + * We use type C Alan Bradley equation which takes set point off the + * output dependency in P and D term. + * + * y[k] = y[k-1] - kp*(x[k] - x[k-1]) + Ki*Ts*e[k] - Kd*(x[k] + * - 2*x[k-1]+x[k-2])/Ts + * + * + ***********************************************************************/ +struct pid_params p_param; +/* cached data from previous loop */ +static double xk_1, xk_2; /* input temperature x[k-#] */ + +/* + * TODO: make PID parameters tuned automatically, + * 1. use CPU burn to produce open loop unit step response + * 2. calculate PID based on Ziegler-Nichols rule + * + * add a flag for tuning PID + */ +int init_thermal_controller(void) +{ + int ret = 0; + + /* init pid params */ + p_param.ts = ticktime; + /* TODO: get it from TUI tuning tab */ + p_param.kp = .36; + p_param.ki = 5.0; + p_param.kd = 0.19; + + p_param.t_target = target_temp_user; + + return ret; +} + +void controller_reset(void) +{ + /* TODO: relax control data when not over thermal limit */ + syslog(LOG_DEBUG, "TC inactive, relax p-state\n"); + p_param.y_k = 0.0; + xk_1 = 0.0; + xk_2 = 0.0; + set_ctrl_state(0); +} + +/* To be called at time interval Ts. Type C PID controller. + * y[k] = y[k-1] - kp*(x[k] - x[k-1]) + Ki*Ts*e[k] - Kd*(x[k] + * - 2*x[k-1]+x[k-2])/Ts + * TODO: add low pass filter for D term + */ +#define GUARD_BAND (2) +void controller_handler(const double xk, double *yk) +{ + double ek; + double p_term, i_term, d_term; + + ek = p_param.t_target - xk; /* error */ + if (ek >= 3.0) { + syslog(LOG_DEBUG, "PID: %3.1f Below set point %3.1f, stop\n", + xk, p_param.t_target); + controller_reset(); + *yk = 0.0; + return; + } + /* compute intermediate PID terms */ + p_term = -p_param.kp * (xk - xk_1); + i_term = p_param.kp * p_param.ki * p_param.ts * ek; + d_term = -p_param.kp * p_param.kd * (xk - 2 * xk_1 + xk_2) / p_param.ts; + /* compute output */ + *yk += p_term + i_term + d_term; + /* update sample data */ + xk_1 = xk; + xk_2 = xk_1; + + /* clamp output adjustment range */ + if (*yk < -LIMIT_HIGH) + *yk = -LIMIT_HIGH; + else if (*yk > -LIMIT_LOW) + *yk = -LIMIT_LOW; + + p_param.y_k = *yk; + + set_ctrl_state(lround(fabs(p_param.y_k))); + +} diff --git a/tools/thermal/tmon/sysfs.c b/tools/thermal/tmon/sysfs.c new file mode 100644 index 00000000000..dfe454855cd --- /dev/null +++ b/tools/thermal/tmon/sysfs.c @@ -0,0 +1,596 @@ +/* + * sysfs.c sysfs ABI access functions for TMON program + * + * Copyright (C) 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 or later as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Jacob Pan <jacob.jun.pan@linux.intel.com> + * + */ +#include <unistd.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <stdint.h> +#include <dirent.h> +#include <libintl.h> +#include <ctype.h> +#include <time.h> +#include <syslog.h> +#include <sys/time.h> +#include <errno.h> + +#include "tmon.h" + +struct tmon_platform_data ptdata; +const char *trip_type_name[] = { + "critical", + "hot", + "passive", + "active", +}; + +int sysfs_set_ulong(char *path, char *filename, unsigned long val) +{ + FILE *fd; + int ret = -1; + char filepath[256]; + + snprintf(filepath, 256, "%s/%s", path, filename); + + fd = fopen(filepath, "w"); + if (!fd) { + syslog(LOG_ERR, "Err: open %s: %s\n", __func__, filepath); + return ret; + } + ret = fprintf(fd, "%lu", val); + fclose(fd); + + return 0; +} + +/* history of thermal data, used for control algo */ +#define NR_THERMAL_RECORDS 3 +struct thermal_data_record trec[NR_THERMAL_RECORDS]; +int cur_thermal_record; /* index to the trec array */ + +static int sysfs_get_ulong(char *path, char *filename, unsigned long *p_ulong) +{ + FILE *fd; + int ret = -1; + char filepath[256]; + + snprintf(filepath, 256, "%s/%s", path, filename); + + fd = fopen(filepath, "r"); + if (!fd) { + syslog(LOG_ERR, "Err: open %s: %s\n", __func__, filepath); + return ret; + } + ret = fscanf(fd, "%lu", p_ulong); + fclose(fd); + + return 0; +} + +static int sysfs_get_string(char *path, char *filename, char *str) +{ + FILE *fd; + int ret = -1; + char filepath[256]; + + snprintf(filepath, 256, "%s/%s", path, filename); + + fd = fopen(filepath, "r"); + if (!fd) { + syslog(LOG_ERR, "Err: open %s: %s\n", __func__, filepath); + return ret; + } + ret = fscanf(fd, "%256s", str); + fclose(fd); + + return ret; +} + +/* get states of the cooling device instance */ +static int probe_cdev(struct cdev_info *cdi, char *path) +{ + sysfs_get_string(path, "type", cdi->type); + sysfs_get_ulong(path, "max_state", &cdi->max_state); + sysfs_get_ulong(path, "cur_state", &cdi->cur_state); + + syslog(LOG_INFO, "%s: %s: type %s, max %lu, curr %lu inst %d\n", + __func__, path, + cdi->type, cdi->max_state, cdi->cur_state, cdi->instance); + + return 0; +} + +static int str_to_trip_type(char *name) +{ + int i; + + for (i = 0; i < NR_THERMAL_TRIP_TYPE; i++) { + if (!strcmp(name, trip_type_name[i])) + return i; + } + + return -ENOENT; +} + +/* scan and fill in trip point info for a thermal zone and trip point id */ +static int get_trip_point_data(char *tz_path, int tzid, int tpid) +{ + char filename[256]; + char temp_str[256]; + int trip_type; + + if (tpid >= MAX_NR_TRIP) + return -EINVAL; + /* check trip point type */ + snprintf(filename, sizeof(filename), "trip_point_%d_type", tpid); + sysfs_get_string(tz_path, filename, temp_str); + trip_type = str_to_trip_type(temp_str); + if (trip_type < 0) { + syslog(LOG_ERR, "%s:%s no matching type\n", __func__, temp_str); + return -ENOENT; + } + ptdata.tzi[tzid].tp[tpid].type = trip_type; + syslog(LOG_INFO, "%s:tz:%d tp:%d:type:%s type id %d\n", __func__, tzid, + tpid, temp_str, trip_type); + + /* TODO: check attribute */ + + return 0; +} + +/* return instance id for file format such as trip_point_4_temp */ +static int get_instance_id(char *name, int pos, int skip) +{ + char *ch; + int i = 0; + + ch = strtok(name, "_"); + while (ch != NULL) { + ++i; + syslog(LOG_INFO, "%s:%s:%s:%d", __func__, name, ch, i); + ch = strtok(NULL, "_"); + if (pos == i) + return atol(ch + skip); + } + + return -1; +} + +/* Find trip point info of a thermal zone */ +static int find_tzone_tp(char *tz_name, char *d_name, struct tz_info *tzi, + int tz_id) +{ + int tp_id; + unsigned long temp_ulong; + + if (strstr(d_name, "trip_point") && + strstr(d_name, "temp")) { + /* check if trip point temp is non-zero + * ignore 0/invalid trip points + */ + sysfs_get_ulong(tz_name, d_name, &temp_ulong); + if (temp_ulong < MAX_TEMP_KC) { + tzi->nr_trip_pts++; + /* found a valid trip point */ + tp_id = get_instance_id(d_name, 2, 0); + syslog(LOG_DEBUG, "tzone %s trip %d temp %lu tpnode %s", + tz_name, tp_id, temp_ulong, d_name); + if (tp_id < 0 || tp_id >= MAX_NR_TRIP) { + syslog(LOG_ERR, "Failed to find TP inst %s\n", + d_name); + return -1; + } + get_trip_point_data(tz_name, tz_id, tp_id); + tzi->tp[tp_id].temp = temp_ulong; + } + } + + return 0; +} + +/* check cooling devices for binding info. */ +static int find_tzone_cdev(struct dirent *nl, char *tz_name, + struct tz_info *tzi, int tz_id, int cid) +{ + unsigned long trip_instance = 0; + char cdev_name_linked[256]; + char cdev_name[256]; + char cdev_trip_name[256]; + int cdev_id; + + if (nl->d_type == DT_LNK) { + syslog(LOG_DEBUG, "TZ%d: cdev: %s cid %d\n", tz_id, nl->d_name, + cid); + tzi->nr_cdev++; + if (tzi->nr_cdev > ptdata.nr_cooling_dev) { + syslog(LOG_ERR, "Err: Too many cdev? %d\n", + tzi->nr_cdev); + return -EINVAL; + } + /* find the link to real cooling device record binding */ + snprintf(cdev_name, 256, "%s/%s", tz_name, nl->d_name); + memset(cdev_name_linked, 0, sizeof(cdev_name_linked)); + if (readlink(cdev_name, cdev_name_linked, + sizeof(cdev_name_linked) - 1) != -1) { + cdev_id = get_instance_id(cdev_name_linked, 1, + sizeof("device") - 1); + syslog(LOG_DEBUG, "cdev %s linked to %s : %d\n", + cdev_name, cdev_name_linked, cdev_id); + tzi->cdev_binding |= (1 << cdev_id); + + /* find the trip point in which the cdev is binded to + * in this tzone + */ + snprintf(cdev_trip_name, 256, "%s%s", nl->d_name, + "_trip_point"); + sysfs_get_ulong(tz_name, cdev_trip_name, + &trip_instance); + /* validate trip point range, e.g. trip could return -1 + * when passive is enabled + */ + if (trip_instance > MAX_NR_TRIP) + trip_instance = 0; + tzi->trip_binding[cdev_id] |= 1 << trip_instance; + syslog(LOG_DEBUG, "cdev %s -> trip:%lu: 0x%lx %d\n", + cdev_name, trip_instance, + tzi->trip_binding[cdev_id], + cdev_id); + + + } + return 0; + } + + return -ENODEV; +} + + + +/***************************************************************************** + * Before calling scan_tzones, thermal sysfs must be probed to determine + * the number of thermal zones and cooling devices. + * We loop through each thermal zone and fill in tz_info struct, i.e. + * ptdata.tzi[] +root@jacob-chiefriver:~# tree -d /sys/class/thermal/thermal_zone0 +/sys/class/thermal/thermal_zone0 +|-- cdev0 -> ../cooling_device4 +|-- cdev1 -> ../cooling_device3 +|-- cdev10 -> ../cooling_device7 +|-- cdev11 -> ../cooling_device6 +|-- cdev12 -> ../cooling_device5 +|-- cdev2 -> ../cooling_device2 +|-- cdev3 -> ../cooling_device1 +|-- cdev4 -> ../cooling_device0 +|-- cdev5 -> ../cooling_device12 +|-- cdev6 -> ../cooling_device11 +|-- cdev7 -> ../cooling_device10 +|-- cdev8 -> ../cooling_device9 +|-- cdev9 -> ../cooling_device8 +|-- device -> ../../../LNXSYSTM:00/device:62/LNXTHERM:00 +|-- power +`-- subsystem -> ../../../../class/thermal +*****************************************************************************/ +static int scan_tzones(void) +{ + DIR *dir; + struct dirent **namelist; + char tz_name[256]; + int i, j, n, k = 0; + + if (!ptdata.nr_tz_sensor) + return -1; + + for (i = 0; i <= ptdata.max_tz_instance; i++) { + memset(tz_name, 0, sizeof(tz_name)); + snprintf(tz_name, 256, "%s/%s%d", THERMAL_SYSFS, TZONE, i); + + dir = opendir(tz_name); + if (!dir) { + syslog(LOG_INFO, "Thermal zone %s skipped\n", tz_name); + continue; + } + /* keep track of valid tzones */ + n = scandir(tz_name, &namelist, 0, alphasort); + if (n < 0) + syslog(LOG_ERR, "scandir failed in %s", tz_name); + else { + sysfs_get_string(tz_name, "type", ptdata.tzi[k].type); + ptdata.tzi[k].instance = i; + /* detect trip points and cdev attached to this tzone */ + j = 0; /* index for cdev */ + ptdata.tzi[k].nr_cdev = 0; + ptdata.tzi[k].nr_trip_pts = 0; + while (n--) { + char *temp_str; + + if (find_tzone_tp(tz_name, namelist[n]->d_name, + &ptdata.tzi[k], k)) + break; + temp_str = strstr(namelist[n]->d_name, "cdev"); + if (!temp_str) { + free(namelist[n]); + continue; + } + if (!find_tzone_cdev(namelist[n], tz_name, + &ptdata.tzi[k], i, j)) + j++; /* increment cdev index */ + free(namelist[n]); + } + free(namelist); + } + /*TODO: reverse trip points */ + closedir(dir); + syslog(LOG_INFO, "TZ %d has %d cdev\n", i, + ptdata.tzi[k].nr_cdev); + k++; + } + + return 0; +} + +static int scan_cdevs(void) +{ + DIR *dir; + struct dirent **namelist; + char cdev_name[256]; + int i, n, k = 0; + + if (!ptdata.nr_cooling_dev) { + fprintf(stderr, "No cooling devices found\n"); + return 0; + } + for (i = 0; i <= ptdata.max_cdev_instance; i++) { + memset(cdev_name, 0, sizeof(cdev_name)); + snprintf(cdev_name, 256, "%s/%s%d", THERMAL_SYSFS, CDEV, i); + + dir = opendir(cdev_name); + if (!dir) { + syslog(LOG_INFO, "Cooling dev %s skipped\n", cdev_name); + /* there is a gap in cooling device id, check again + * for the same index. + */ + continue; + } + + n = scandir(cdev_name, &namelist, 0, alphasort); + if (n < 0) + syslog(LOG_ERR, "scandir failed in %s", cdev_name); + else { + sysfs_get_string(cdev_name, "type", ptdata.cdi[k].type); + ptdata.cdi[k].instance = i; + if (strstr(ptdata.cdi[k].type, ctrl_cdev)) { + ptdata.cdi[k].flag |= CDEV_FLAG_IN_CONTROL; + syslog(LOG_DEBUG, "control cdev id %d\n", i); + } + while (n--) + free(namelist[n]); + free(namelist); + } + closedir(dir); + k++; + } + return 0; +} + + +int probe_thermal_sysfs(void) +{ + DIR *dir; + struct dirent **namelist; + int n; + + dir = opendir(THERMAL_SYSFS); + if (!dir) { + fprintf(stderr, "\nNo thermal sysfs, exit\n"); + return -1; + } + n = scandir(THERMAL_SYSFS, &namelist, 0, alphasort); + if (n < 0) + syslog(LOG_ERR, "scandir failed in thermal sysfs"); + else { + /* detect number of thermal zones and cooling devices */ + while (n--) { + int inst; + + if (strstr(namelist[n]->d_name, CDEV)) { + inst = get_instance_id(namelist[n]->d_name, 1, + sizeof("device") - 1); + /* keep track of the max cooling device since + * there may be gaps. + */ + if (inst > ptdata.max_cdev_instance) + ptdata.max_cdev_instance = inst; + + syslog(LOG_DEBUG, "found cdev: %s %d %d\n", + namelist[n]->d_name, + ptdata.nr_cooling_dev, + ptdata.max_cdev_instance); + ptdata.nr_cooling_dev++; + } else if (strstr(namelist[n]->d_name, TZONE)) { + inst = get_instance_id(namelist[n]->d_name, 1, + sizeof("zone") - 1); + if (inst > ptdata.max_tz_instance) + ptdata.max_tz_instance = inst; + + syslog(LOG_DEBUG, "found tzone: %s %d %d\n", + namelist[n]->d_name, + ptdata.nr_tz_sensor, + ptdata.max_tz_instance); + ptdata.nr_tz_sensor++; + } + free(namelist[n]); + } + free(namelist); + } + syslog(LOG_INFO, "found %d tzone(s), %d cdev(s), target zone %d\n", + ptdata.nr_tz_sensor, ptdata.nr_cooling_dev, + target_thermal_zone); + closedir(dir); + + if (!ptdata.nr_tz_sensor) { + fprintf(stderr, "\nNo thermal zones found, exit\n\n"); + return -1; + } + + ptdata.tzi = calloc(sizeof(struct tz_info), ptdata.max_tz_instance+1); + if (!ptdata.tzi) { + fprintf(stderr, "Err: allocate tz_info\n"); + return -1; + } + + /* we still show thermal zone information if there is no cdev */ + if (ptdata.nr_cooling_dev) { + ptdata.cdi = calloc(sizeof(struct cdev_info), + ptdata.max_cdev_instance + 1); + if (!ptdata.cdi) { + free(ptdata.tzi); + fprintf(stderr, "Err: allocate cdev_info\n"); + return -1; + } + } + + /* now probe tzones */ + if (scan_tzones()) + return -1; + if (scan_cdevs()) + return -1; + return 0; +} + +/* convert sysfs zone instance to zone array index */ +int zone_instance_to_index(int zone_inst) +{ + int i; + + for (i = 0; i < ptdata.nr_tz_sensor; i++) + if (ptdata.tzi[i].instance == zone_inst) + return i; + return -ENOENT; +} + +/* read temperature of all thermal zones */ +int update_thermal_data() +{ + int i; + char tz_name[256]; + static unsigned long samples; + + if (!ptdata.nr_tz_sensor) { + syslog(LOG_ERR, "No thermal zones found!\n"); + return -1; + } + + /* circular buffer for keeping historic data */ + if (cur_thermal_record >= NR_THERMAL_RECORDS) + cur_thermal_record = 0; + gettimeofday(&trec[cur_thermal_record].tv, NULL); + if (tmon_log) { + fprintf(tmon_log, "%lu ", ++samples); + fprintf(tmon_log, "%3.1f ", p_param.t_target); + } + for (i = 0; i < ptdata.nr_tz_sensor; i++) { + memset(tz_name, 0, sizeof(tz_name)); + snprintf(tz_name, 256, "%s/%s%d", THERMAL_SYSFS, TZONE, + ptdata.tzi[i].instance); + sysfs_get_ulong(tz_name, "temp", + &trec[cur_thermal_record].temp[i]); + if (tmon_log) + fprintf(tmon_log, "%lu ", + trec[cur_thermal_record].temp[i]/1000); + } + for (i = 0; i < ptdata.nr_cooling_dev; i++) { + char cdev_name[256]; + unsigned long val; + + snprintf(cdev_name, 256, "%s/%s%d", THERMAL_SYSFS, CDEV, + ptdata.cdi[i].instance); + probe_cdev(&ptdata.cdi[i], cdev_name); + val = ptdata.cdi[i].cur_state; + if (val > 1000000) + val = 0; + if (tmon_log) + fprintf(tmon_log, "%lu ", val); + } + + if (tmon_log) { + fprintf(tmon_log, "\n"); + fflush(tmon_log); + } + + return 0; +} + +void set_ctrl_state(unsigned long state) +{ + char ctrl_cdev_path[256]; + int i; + unsigned long cdev_state; + + if (no_control) + return; + /* set all ctrl cdev to the same state */ + for (i = 0; i < ptdata.nr_cooling_dev; i++) { + if (ptdata.cdi[i].flag & CDEV_FLAG_IN_CONTROL) { + if (ptdata.cdi[i].max_state < 10) { + strcpy(ctrl_cdev, "None."); + return; + } + /* scale to percentage of max_state */ + cdev_state = state * ptdata.cdi[i].max_state/100; + syslog(LOG_DEBUG, + "ctrl cdev %d set state %lu scaled to %lu\n", + ptdata.cdi[i].instance, state, cdev_state); + snprintf(ctrl_cdev_path, 256, "%s/%s%d", THERMAL_SYSFS, + CDEV, ptdata.cdi[i].instance); + syslog(LOG_DEBUG, "ctrl cdev path %s", ctrl_cdev_path); + sysfs_set_ulong(ctrl_cdev_path, "cur_state", + cdev_state); + } + } +} + +void get_ctrl_state(unsigned long *state) +{ + char ctrl_cdev_path[256]; + int ctrl_cdev_id = -1; + int i; + + /* TODO: take average of all ctrl types. also consider change based on + * uevent. Take the first reading for now. + */ + for (i = 0; i < ptdata.nr_cooling_dev; i++) { + if (ptdata.cdi[i].flag & CDEV_FLAG_IN_CONTROL) { + ctrl_cdev_id = ptdata.cdi[i].instance; + syslog(LOG_INFO, "ctrl cdev %d get state\n", + ptdata.cdi[i].instance); + break; + } + } + if (ctrl_cdev_id == -1) { + *state = 0; + return; + } + snprintf(ctrl_cdev_path, 256, "%s/%s%d", THERMAL_SYSFS, + CDEV, ctrl_cdev_id); + sysfs_get_ulong(ctrl_cdev_path, "cur_state", state); +} + +void free_thermal_data(void) +{ + free(ptdata.tzi); + free(ptdata.cdi); +} diff --git a/tools/thermal/tmon/tmon.8 b/tools/thermal/tmon/tmon.8 new file mode 100644 index 00000000000..0be727cb989 --- /dev/null +++ b/tools/thermal/tmon/tmon.8 @@ -0,0 +1,142 @@ +.TH TMON 8 +.SH NAME +\fBtmon\fP - A monitoring and testing tool for Linux kernel thermal subsystem + +.SH SYNOPSIS +.ft B +.B tmon +.RB [ Options ] +.br +.SH DESCRIPTION +\fBtmon \fP can be used to visualize thermal relationship and +real-time thermal data; tune +and test cooling devices and sensors; collect thermal data for offline +analysis and plot. \fBtmon\fP must be run as root in order to control device +states via sysfs. +.PP +\fBFunctions\fP +.PP +.nf +1. Thermal relationships: +- show thermal zone information +- show cooling device information +- show trip point binding within each thermal zone +- show trip point and cooling device instance bindings +.PP +2. Real time data display +- show temperature of all thermal zones w.r.t. its trip points and types +- show states of all cooling devices +.PP +3. Thermal relationship learning and device tuning +- with a built-in Proportional Integral Derivative (\fBPID\fP) +controller, user can pair a cooling device to a thermal sensor for +testing the effectiveness and learn about the thermal distance between the two +- allow manual control of cooling device states and target temperature +.PP +4. Data logging in /var/tmp/tmon.log +- contains thermal configuration data, i.e. cooling device, thermal + zones, and trip points. Can be used for data collection in remote + debugging. +- log real-time thermal data into space separated format that can be + directly consumed by plotting tools such as Rscript. + +.SS Options +.PP +The \fB-c --control\fP option sets a cooling device type to control temperature +of a thermal zone +.PP +The \fB-d --daemon\fP option runs \fBtmon \fP as daemon without user interface +.PP +The \fB-g --debug\fP option allow debug messages to be stored in syslog +.PP +The \fB-h --help\fP option shows help message +.PP +The \fB-l --log\fP option write data to /var/tmp/tmon.log +.PP +The \fB-t --time-interval\fP option sets the polling interval in seconds +.PP +The \fB-v --version\fP option shows the version of \fBtmon \fP +.PP +The \fB-z --zone\fP option sets the target therma zone instance to be controlled +.PP + +.SH FIELD DESCRIPTIONS +.nf +.PP +\fBP \fP passive cooling trip point type +\fBA \fP active cooling trip point type (fan) +\fBC \fP critical trip point type +\fBA \fP hot trip point type +\fBkp \fP proportional gain of \fBPID\fP controller +\fBki \fP integral gain of \fBPID\fP controller +\fBkd \fP derivative gain of \fBPID\fP controller + +.SH REQUIREMENT +Build depends on ncurses +.PP +Runtime depends on window size large enough to show the number of +devices found on the system. + +.PP + +.SH INTERACTIVE COMMANDS +.pp +.nf +\fBCtrl-C, q/Q\fP stops \fBtmon\fP +\fBTAB\fP shows tuning pop up panel, choose a letter to modify + +.SH EXAMPLES +Without any parameters, tmon is in monitoring only mode and refresh +screen every 1 second. +.PP +1. For monitoring only: +.nf +$ sudo ./tmon + +2. Use Processor cooling device to control thermal zone 0 at default 65C. +$ sudo ./tmon -c Processor -z 0 + +3. Use intel_powerclamp(idle injection) cooling device to control thermal zone 1 +$ sudo ./tmon -c intel_powerclamp -z 1 + +4. Turn on debug and collect data log at /var/tmp/tmon.log +$ sudo ./tmon -g -l + +For example, the log below shows PID controller was adjusting current states +for all cooling devices with "Processor" type such that thermal zone 0 +can stay below 65 dC. + +#---------- THERMAL DATA LOG STARTED ----------- +Samples TargetTemp acpitz0 acpitz1 Fan0 Fan1 Fan2 Fan3 Fan4 Fan5 +Fan6 Fan7 Fan8 Fan9 Processor10 Processor11 Processor12 Processor13 +LCD14 intel_powerclamp15 1 65.0 65 65 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 0 2 +65.0 66 65 0 0 0 0 0 0 0 0 0 0 4 4 4 4 6 0 3 65.0 60 54 0 0 0 0 0 0 0 0 +0 0 4 4 4 4 6 0 4 65.0 53 53 0 0 0 0 0 0 0 0 0 0 4 4 4 4 6 0 +5 65.0 52 52 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 0 +6 65.0 53 65 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 0 +7 65.0 68 70 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6 0 +8 65.0 68 68 0 0 0 0 0 0 0 0 0 0 5 5 5 5 6 0 +9 65.0 68 68 0 0 0 0 0 0 0 0 0 0 6 6 6 6 6 0 +10 65.0 67 67 0 0 0 0 0 0 0 0 0 0 7 7 7 7 6 0 +11 65.0 67 67 0 0 0 0 0 0 0 0 0 0 8 8 8 8 6 0 +12 65.0 67 67 0 0 0 0 0 0 0 0 0 0 8 8 8 8 6 0 +13 65.0 67 67 0 0 0 0 0 0 0 0 0 0 9 9 9 9 6 0 +14 65.0 66 66 0 0 0 0 0 0 0 0 0 0 10 10 10 10 6 0 +15 65.0 66 67 0 0 0 0 0 0 0 0 0 0 10 10 10 10 6 0 +16 65.0 66 66 0 0 0 0 0 0 0 0 0 0 11 11 11 11 6 0 +17 65.0 66 66 0 0 0 0 0 0 0 0 0 0 11 11 11 11 6 0 +18 65.0 64 61 0 0 0 0 0 0 0 0 0 0 11 11 11 11 6 0 +19 65.0 60 59 0 0 0 0 0 0 0 0 0 0 12 12 12 12 6 0 + +Data can be read directly into an array by an example R-script below: + +#!/usr/bin/Rscript +tdata <- read.table("/var/tmp/tmon.log", header=T, comment.char="#") +attach(tdata) +jpeg("tmon.jpg") +X11() +g_range <- range(0, intel_powerclamp15, TargetTemp, acpitz0) +plot( Samples, intel_powerclamp15, col="blue", ylim=g_range, axes=FALSE, ann=FALSE) +par(new=TRUE) +lines(TargetTemp, type="o", pch=22, lty=2, col="red") +dev.off() diff --git a/tools/thermal/tmon/tmon.c b/tools/thermal/tmon/tmon.c new file mode 100644 index 00000000000..09b7c321833 --- /dev/null +++ b/tools/thermal/tmon/tmon.c @@ -0,0 +1,376 @@ +/* + * tmon.c Thermal Monitor (TMON) main function and entry point + * + * Copyright (C) 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 or later as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Jacob Pan <jacob.jun.pan@linux.intel.com> + * + */ + +#include <getopt.h> +#include <unistd.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <ncurses.h> +#include <ctype.h> +#include <time.h> +#include <signal.h> +#include <limits.h> +#include <sys/time.h> +#include <pthread.h> +#include <math.h> +#include <stdarg.h> +#include <syslog.h> + +#include "tmon.h" + +unsigned long ticktime = 1; /* seconds */ +unsigned long no_control = 1; /* monitoring only or use cooling device for + * temperature control. + */ +double time_elapsed = 0.0; +unsigned long target_temp_user = 65; /* can be select by tui later */ +int dialogue_on; +int tmon_exit; +static short daemon_mode; +static int logging; /* for recording thermal data to a file */ +static int debug_on; +FILE *tmon_log; +/*cooling device used for the PID controller */ +char ctrl_cdev[CDEV_NAME_SIZE] = "None"; +int target_thermal_zone; /* user selected target zone instance */ +static void start_daemon_mode(void); + +pthread_t event_tid; +pthread_mutex_t input_lock; +void usage() +{ + printf("Usage: tmon [OPTION...]\n"); + printf(" -c, --control cooling device in control\n"); + printf(" -d, --daemon run as daemon, no TUI\n"); + printf(" -g, --debug debug message in syslog\n"); + printf(" -h, --help show this help message\n"); + printf(" -l, --log log data to /var/tmp/tmon.log\n"); + printf(" -t, --time-interval sampling time interval, > 1 sec.\n"); + printf(" -v, --version show version\n"); + printf(" -z, --zone target thermal zone id\n"); + + exit(0); +} + +void version() +{ + printf("TMON version %s\n", VERSION); + exit(EXIT_SUCCESS); +} + +static void tmon_cleanup(void) +{ + + syslog(LOG_INFO, "TMON exit cleanup\n"); + fflush(stdout); + refresh(); + if (tmon_log) + fclose(tmon_log); + if (event_tid) { + pthread_mutex_lock(&input_lock); + pthread_cancel(event_tid); + pthread_mutex_unlock(&input_lock); + pthread_mutex_destroy(&input_lock); + } + closelog(); + /* relax control knobs, undo throttling */ + set_ctrl_state(0); + + keypad(stdscr, FALSE); + echo(); + nocbreak(); + close_windows(); + endwin(); + free_thermal_data(); + + exit(1); +} + + +static void tmon_sig_handler(int sig) +{ + syslog(LOG_INFO, "TMON caught signal %d\n", sig); + refresh(); + switch (sig) { + case SIGTERM: + printf("sigterm, exit and clean up\n"); + fflush(stdout); + break; + case SIGKILL: + printf("sigkill, exit and clean up\n"); + fflush(stdout); + break; + case SIGINT: + printf("ctrl-c, exit and clean up\n"); + fflush(stdout); + break; + default: + break; + } + tmon_exit = true; +} + + +static void start_syslog(void) +{ + if (debug_on) + setlogmask(LOG_UPTO(LOG_DEBUG)); + else + setlogmask(LOG_UPTO(LOG_ERR)); + openlog("tmon.log", LOG_CONS | LOG_PID | LOG_NDELAY, LOG_LOCAL0); + syslog(LOG_NOTICE, "TMON started by User %d", getuid()); +} + +static void prepare_logging(void) +{ + int i; + struct stat logstat; + + if (!logging) + return; + /* open local data log file */ + tmon_log = fopen(TMON_LOG_FILE, "w+"); + if (!tmon_log) { + syslog(LOG_ERR, "failed to open log file %s\n", TMON_LOG_FILE); + return; + } + + if (lstat(TMON_LOG_FILE, &logstat) < 0) { + syslog(LOG_ERR, "Unable to stat log file %s\n", TMON_LOG_FILE); + fclose(tmon_log); + tmon_log = NULL; + return; + } + + /* The log file must be a regular file owned by us */ + if (S_ISLNK(logstat.st_mode)) { + syslog(LOG_ERR, "Log file is a symlink. Will not log\n"); + fclose(tmon_log); + tmon_log = NULL; + return; + } + + if (logstat.st_uid != getuid()) { + syslog(LOG_ERR, "We don't own the log file. Not logging\n"); + fclose(tmon_log); + tmon_log = NULL; + return; + } + + + fprintf(tmon_log, "#----------- THERMAL SYSTEM CONFIG -------------\n"); + for (i = 0; i < ptdata.nr_tz_sensor; i++) { + char binding_str[33]; /* size of long + 1 */ + int j; + + memset(binding_str, 0, sizeof(binding_str)); + for (j = 0; j < 32; j++) + binding_str[j] = (ptdata.tzi[i].cdev_binding & 1<<j) ? + '1' : '0'; + + fprintf(tmon_log, "#thermal zone %s%02d cdevs binding: %32s\n", + ptdata.tzi[i].type, + ptdata.tzi[i].instance, + binding_str); + for (j = 0; j < ptdata.tzi[i].nr_trip_pts; j++) { + fprintf(tmon_log, "#\tTP%02d type:%s, temp:%lu\n", j, + trip_type_name[ptdata.tzi[i].tp[j].type], + ptdata.tzi[i].tp[j].temp); + } + + } + + for (i = 0; i < ptdata.nr_cooling_dev; i++) + fprintf(tmon_log, "#cooling devices%02d: %s\n", + i, ptdata.cdi[i].type); + + fprintf(tmon_log, "#---------- THERMAL DATA LOG STARTED -----------\n"); + fprintf(tmon_log, "Samples TargetTemp "); + for (i = 0; i < ptdata.nr_tz_sensor; i++) { + fprintf(tmon_log, "%s%d ", ptdata.tzi[i].type, + ptdata.tzi[i].instance); + } + for (i = 0; i < ptdata.nr_cooling_dev; i++) + fprintf(tmon_log, "%s%d ", ptdata.cdi[i].type, + ptdata.cdi[i].instance); + + fprintf(tmon_log, "\n"); +} + +static struct option opts[] = { + { "control", 1, NULL, 'c' }, + { "daemon", 0, NULL, 'd' }, + { "time-interval", 1, NULL, 't' }, + { "log", 0, NULL, 'l' }, + { "help", 0, NULL, 'h' }, + { "version", 0, NULL, 'v' }, + { "debug", 0, NULL, 'g' }, + { 0, 0, NULL, 0 } +}; + + +int main(int argc, char **argv) +{ + int err = 0; + int id2 = 0, c; + double yk = 0.0; /* controller output */ + int target_tz_index; + + if (geteuid() != 0) { + printf("TMON needs to be run as root\n"); + exit(EXIT_FAILURE); + } + + while ((c = getopt_long(argc, argv, "c:dlht:vgz:", opts, &id2)) != -1) { + switch (c) { + case 'c': + no_control = 0; + strncpy(ctrl_cdev, optarg, CDEV_NAME_SIZE); + break; + case 'd': + start_daemon_mode(); + printf("Run TMON in daemon mode\n"); + break; + case 't': + ticktime = strtod(optarg, NULL); + if (ticktime < 1) + ticktime = 1; + break; + case 'l': + printf("Logging data to /var/tmp/tmon.log\n"); + logging = 1; + break; + case 'h': + usage(); + break; + case 'v': + version(); + break; + case 'g': + debug_on = 1; + break; + case 'z': + target_thermal_zone = strtod(optarg, NULL); + break; + default: + break; + } + } + if (pthread_mutex_init(&input_lock, NULL) != 0) { + fprintf(stderr, "\n mutex init failed, exit\n"); + return 1; + } + start_syslog(); + if (signal(SIGINT, tmon_sig_handler) == SIG_ERR) + syslog(LOG_DEBUG, "Cannot handle SIGINT\n"); + if (signal(SIGTERM, tmon_sig_handler) == SIG_ERR) + syslog(LOG_DEBUG, "Cannot handle SIGINT\n"); + + if (probe_thermal_sysfs()) { + pthread_mutex_destroy(&input_lock); + closelog(); + return -1; + } + initialize_curses(); + setup_windows(); + signal(SIGWINCH, resize_handler); + show_title_bar(); + show_sensors_w(); + show_cooling_device(); + update_thermal_data(); + show_data_w(); + prepare_logging(); + init_thermal_controller(); + + nodelay(stdscr, TRUE); + err = pthread_create(&event_tid, NULL, &handle_tui_events, NULL); + if (err != 0) { + printf("\ncan't create thread :[%s]", strerror(err)); + tmon_cleanup(); + exit(EXIT_FAILURE); + } + + /* validate range of user selected target zone, default to the first + * instance if out of range + */ + target_tz_index = zone_instance_to_index(target_thermal_zone); + if (target_tz_index < 0) { + target_thermal_zone = ptdata.tzi[0].instance; + syslog(LOG_ERR, "target zone is not found, default to %d\n", + target_thermal_zone); + } + while (1) { + sleep(ticktime); + show_title_bar(); + show_sensors_w(); + update_thermal_data(); + if (!dialogue_on) { + show_data_w(); + show_cooling_device(); + } + cur_thermal_record++; + time_elapsed += ticktime; + controller_handler(trec[0].temp[target_tz_index] / 1000, + &yk); + trec[0].pid_out_pct = yk; + if (!dialogue_on) + show_control_w(); + if (tmon_exit) + break; + } + tmon_cleanup(); + return 0; +} + +static void start_daemon_mode() +{ + daemon_mode = 1; + /* fork */ + pid_t sid, pid = fork(); + if (pid < 0) { + exit(EXIT_FAILURE); + } else if (pid > 0) + /* kill parent */ + exit(EXIT_SUCCESS); + + /* disable TUI, it may not be necessary, but saves some resource */ + disable_tui(); + + /* change the file mode mask */ + umask(S_IWGRP | S_IWOTH); + + /* new SID for the daemon process */ + sid = setsid(); + if (sid < 0) + exit(EXIT_FAILURE); + + /* change working directory */ + if ((chdir("/")) < 0) + exit(EXIT_FAILURE); + + + sleep(10); + + close(STDIN_FILENO); + close(STDOUT_FILENO); + close(STDERR_FILENO); + +} diff --git a/tools/thermal/tmon/tmon.h b/tools/thermal/tmon/tmon.h new file mode 100644 index 00000000000..9e3c49c547a --- /dev/null +++ b/tools/thermal/tmon/tmon.h @@ -0,0 +1,204 @@ +/* + * tmon.h contains data structures and constants used by TMON + * + * Copyright (C) 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 or later as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author Name Jacob Pan <jacob.jun.pan@linux.intel.com> + * + */ + +#ifndef TMON_H +#define TMON_H + +#define MAX_DISP_TEMP 125 +#define MAX_CTRL_TEMP 105 +#define MIN_CTRL_TEMP 40 +#define MAX_NR_TZONE 16 +#define MAX_NR_CDEV 32 +#define MAX_NR_TRIP 16 +#define MAX_NR_CDEV_TRIP 12 /* number of cooling devices that can bind + * to a thermal zone trip. + */ +#define MAX_TEMP_KC 140000 +/* starting char position to draw sensor data, such as tz names + * trip point list, etc. + */ +#define DATA_LEFT_ALIGN 10 +#define NR_LINES_TZDATA 1 +#define TMON_LOG_FILE "/var/tmp/tmon.log" + +extern unsigned long ticktime; +extern double time_elapsed; +extern unsigned long target_temp_user; +extern int dialogue_on; +extern char ctrl_cdev[]; +extern pthread_mutex_t input_lock; +extern int tmon_exit; +extern int target_thermal_zone; +/* use fixed size record to simplify data processing and transfer + * TBD: more info to be added, e.g. programmable trip point data. +*/ +struct thermal_data_record { + struct timeval tv; + unsigned long temp[MAX_NR_TZONE]; + double pid_out_pct; +}; + +struct cdev_info { + char type[64]; + int instance; + unsigned long max_state; + unsigned long cur_state; + unsigned long flag; +}; + +enum trip_type { + THERMAL_TRIP_CRITICAL, + THERMAL_TRIP_HOT, + THERMAL_TRIP_PASSIVE, + THERMAL_TRIP_ACTIVE, + NR_THERMAL_TRIP_TYPE, +}; + +struct trip_point { + enum trip_type type; + unsigned long temp; + unsigned long hysteresis; + int attribute; /* programmability etc. */ +}; + +/* thermal zone configuration information, binding with cooling devices could + * change at runtime. + */ +struct tz_info { + char type[256]; /* e.g. acpitz */ + int instance; + int passive; /* active zone has passive node to force passive mode */ + int nr_cdev; /* number of cooling device binded */ + int nr_trip_pts; + struct trip_point tp[MAX_NR_TRIP]; + unsigned long cdev_binding; /* bitmap for attached cdevs */ + /* cdev bind trip points, allow one cdev bind to multiple trips */ + unsigned long trip_binding[MAX_NR_CDEV]; +}; + +struct tmon_platform_data { + int nr_tz_sensor; + int nr_cooling_dev; + /* keep track of instance ids since there might be gaps */ + int max_tz_instance; + int max_cdev_instance; + struct tz_info *tzi; + struct cdev_info *cdi; +}; + +struct control_ops { + void (*set_ratio)(unsigned long ratio); + unsigned long (*get_ratio)(unsigned long ratio); + +}; + +enum cdev_types { + CDEV_TYPE_PROC, + CDEV_TYPE_FAN, + CDEV_TYPE_MEM, + CDEV_TYPE_NR, +}; + +/* REVISIT: the idea is to group sensors if possible, e.g. on intel mid + * we have "skin0", "skin1", "sys", "msicdie" + * on DPTF enabled systems, we might have PCH, TSKN, TAMB, etc. + */ +enum tzone_types { + TZONE_TYPE_ACPI, + TZONE_TYPE_PCH, + TZONE_TYPE_NR, +}; + +/* limit the output of PID controller adjustment */ +#define LIMIT_HIGH (95) +#define LIMIT_LOW (2) + +struct pid_params { + double kp; /* Controller gain from Dialog Box */ + double ki; /* Time-constant for I action from Dialog Box */ + double kd; /* Time-constant for D action from Dialog Box */ + double ts; + double k_lpf; + + double t_target; + double y_k; +}; + +extern int init_thermal_controller(void); +extern void controller_handler(const double xk, double *yk); + +extern struct tmon_platform_data ptdata; +extern struct pid_params p_param; + +extern FILE *tmon_log; +extern int cur_thermal_record; /* index to the trec array */ +extern struct thermal_data_record trec[]; +extern const char *trip_type_name[]; +extern unsigned long no_control; + +extern void initialize_curses(void); +extern void show_controller_stats(char *line); +extern void show_title_bar(void); +extern void setup_windows(void); +extern void disable_tui(void); +extern void show_sensors_w(void); +extern void show_data_w(void); +extern void write_status_bar(int x, char *line); +extern void show_control_w(); + +extern void show_cooling_device(void); +extern void show_dialogue(void); +extern int update_thermal_data(void); + +extern int probe_thermal_sysfs(void); +extern void free_thermal_data(void); +extern void resize_handler(int sig); +extern void set_ctrl_state(unsigned long state); +extern void get_ctrl_state(unsigned long *state); +extern void *handle_tui_events(void *arg); +extern int sysfs_set_ulong(char *path, char *filename, unsigned long val); +extern int zone_instance_to_index(int zone_inst); +extern void close_windows(void); + +#define PT_COLOR_DEFAULT 1 +#define PT_COLOR_HEADER_BAR 2 +#define PT_COLOR_ERROR 3 +#define PT_COLOR_RED 4 +#define PT_COLOR_YELLOW 5 +#define PT_COLOR_GREEN 6 +#define PT_COLOR_BRIGHT 7 +#define PT_COLOR_BLUE 8 + +/* each thermal zone uses 12 chars, 8 for name, 2 for instance, 2 space + * also used to list trip points in forms of AAAC, which represents + * A: Active + * C: Critical + */ +#define TZONE_RECORD_SIZE 12 +#define TZ_LEFT_ALIGN 32 +#define CDEV_NAME_SIZE 20 +#define CDEV_FLAG_IN_CONTROL (1 << 0) + +/* dialogue box starts */ +#define DIAG_X 48 +#define DIAG_Y 8 +#define THERMAL_SYSFS "/sys/class/thermal" +#define CDEV "cooling_device" +#define TZONE "thermal_zone" +#define TDATA_LEFT 16 +#endif /* TMON_H */ diff --git a/tools/thermal/tmon/tui.c b/tools/thermal/tmon/tui.c new file mode 100644 index 00000000000..89f8ef0e15c --- /dev/null +++ b/tools/thermal/tmon/tui.c @@ -0,0 +1,638 @@ +/* + * tui.c ncurses text user interface for TMON program + * + * Copyright (C) 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 or later as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Jacob Pan <jacob.jun.pan@linux.intel.com> + * + */ + +#include <unistd.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <stdint.h> +#include <ncurses.h> +#include <time.h> +#include <syslog.h> +#include <panel.h> +#include <pthread.h> +#include <signal.h> + +#include "tmon.h" + +static PANEL *data_panel; +static PANEL *dialogue_panel; +static PANEL *top; + +static WINDOW *title_bar_window; +static WINDOW *tz_sensor_window; +static WINDOW *cooling_device_window; +static WINDOW *control_window; +static WINDOW *status_bar_window; +static WINDOW *thermal_data_window; +static WINDOW *dialogue_window; + +char status_bar_slots[10][40]; +static void draw_hbar(WINDOW *win, int y, int start, int len, + unsigned long pattern, bool end); + +static int maxx, maxy; +static int maxwidth = 200; + +#define TITLE_BAR_HIGHT 1 +#define SENSOR_WIN_HIGHT 4 /* one row for tz name, one for trip points */ + + +/* daemon mode flag (set by startup parameter -d) */ +static int tui_disabled; + +static void close_panel(PANEL *p) +{ + if (p) { + del_panel(p); + p = NULL; + } +} + +static void close_window(WINDOW *win) +{ + if (win) { + delwin(win); + win = NULL; + } +} + +void close_windows(void) +{ + if (tui_disabled) + return; + /* must delete panels before their attached windows */ + if (dialogue_window) + close_panel(dialogue_panel); + if (cooling_device_window) + close_panel(data_panel); + + close_window(title_bar_window); + close_window(tz_sensor_window); + close_window(status_bar_window); + close_window(cooling_device_window); + close_window(control_window); + close_window(thermal_data_window); + close_window(dialogue_window); + +} + +void write_status_bar(int x, char *line) +{ + mvwprintw(status_bar_window, 0, x, "%s", line); + wrefresh(status_bar_window); +} + +void setup_windows(void) +{ + int y_begin = 1; + + if (tui_disabled) + return; + + getmaxyx(stdscr, maxy, maxx); + resizeterm(maxy, maxx); + + title_bar_window = subwin(stdscr, TITLE_BAR_HIGHT, maxx, 0, 0); + y_begin += TITLE_BAR_HIGHT; + + tz_sensor_window = subwin(stdscr, SENSOR_WIN_HIGHT, maxx, y_begin, 0); + y_begin += SENSOR_WIN_HIGHT; + + cooling_device_window = subwin(stdscr, ptdata.nr_cooling_dev + 3, maxx, + y_begin, 0); + y_begin += ptdata.nr_cooling_dev + 3; /* 2 lines for border */ + /* two lines to show borders, one line per tz show trip point position + * and value. + * dialogue window is a pop-up, when needed it lays on top of cdev win + */ + + dialogue_window = subwin(stdscr, ptdata.nr_cooling_dev+5, maxx-50, + DIAG_Y, DIAG_X); + + thermal_data_window = subwin(stdscr, ptdata.nr_tz_sensor * + NR_LINES_TZDATA + 3, maxx, y_begin, 0); + y_begin += ptdata.nr_tz_sensor * NR_LINES_TZDATA + 3; + control_window = subwin(stdscr, 4, maxx, y_begin, 0); + + scrollok(cooling_device_window, TRUE); + maxwidth = maxx - 18; + status_bar_window = subwin(stdscr, 1, maxx, maxy-1, 0); + + strcpy(status_bar_slots[0], " Ctrl-c - Quit "); + strcpy(status_bar_slots[1], " TAB - Tuning "); + wmove(status_bar_window, 1, 30); + + /* prepare panels for dialogue, if panel already created then we must + * be doing resizing, so just replace windows with new ones, old ones + * should have been deleted by close_window + */ + data_panel = new_panel(cooling_device_window); + if (!data_panel) + syslog(LOG_DEBUG, "No data panel\n"); + else { + if (dialogue_window) { + dialogue_panel = new_panel(dialogue_window); + if (!dialogue_panel) + syslog(LOG_DEBUG, "No dialogue panel\n"); + else { + /* Set up the user pointer to the next panel*/ + set_panel_userptr(data_panel, dialogue_panel); + set_panel_userptr(dialogue_panel, data_panel); + top = data_panel; + } + } else + syslog(LOG_INFO, "no dialogue win, term too small\n"); + } + doupdate(); + werase(stdscr); + refresh(); +} + +void resize_handler(int sig) +{ + /* start over when term gets resized, but first we clean up */ + close_windows(); + endwin(); + refresh(); + clear(); + getmaxyx(stdscr, maxy, maxx); /* get the new screen size */ + setup_windows(); + /* rate limit */ + sleep(1); + syslog(LOG_DEBUG, "SIG %d, term resized to %d x %d\n", + sig, maxy, maxx); + signal(SIGWINCH, resize_handler); +} + +const char cdev_title[] = " COOLING DEVICES "; +void show_cooling_device(void) +{ + int i, j, x, y = 0; + + if (tui_disabled || !cooling_device_window) + return; + + werase(cooling_device_window); + wattron(cooling_device_window, A_BOLD); + mvwprintw(cooling_device_window, 1, 1, + "ID Cooling Dev Cur Max Thermal Zone Binding"); + wattroff(cooling_device_window, A_BOLD); + for (j = 0; j < ptdata.nr_cooling_dev; j++) { + /* draw cooling device list on the left in the order of + * cooling device instances. skip unused idr. + */ + mvwprintw(cooling_device_window, j + 2, 1, + "%02d %12.12s%6d %6d", + ptdata.cdi[j].instance, + ptdata.cdi[j].type, + ptdata.cdi[j].cur_state, + ptdata.cdi[j].max_state); + } + + /* show cdev binding, y is the global cooling device instance */ + for (i = 0; i < ptdata.nr_tz_sensor; i++) { + int tz_inst = ptdata.tzi[i].instance; + for (j = 0; j < ptdata.nr_cooling_dev; j++) { + int cdev_inst; + y = j; + x = tz_inst * TZONE_RECORD_SIZE + TZ_LEFT_ALIGN; + + draw_hbar(cooling_device_window, y+2, x, + TZONE_RECORD_SIZE-1, ACS_VLINE, false); + + /* draw a column of spaces to separate thermal zones */ + mvwprintw(cooling_device_window, y+2, x-1, " "); + if (ptdata.tzi[i].cdev_binding) { + cdev_inst = ptdata.cdi[j].instance; + unsigned long trip_binding = + ptdata.tzi[i].trip_binding[cdev_inst]; + int k = 0; /* per zone trip point id that + * binded to this cdev, one to + * many possible based on the + * binding bitmask. + */ + syslog(LOG_DEBUG, + "bind tz%d cdev%d tp%lx %d cdev%lx\n", + i, j, trip_binding, y, + ptdata.tzi[i].cdev_binding); + /* draw each trip binding for the cdev */ + while (trip_binding >>= 1) { + k++; + if (!(trip_binding & 1)) + continue; + /* draw '*' to show binding */ + mvwprintw(cooling_device_window, + y + 2, + x + ptdata.tzi[i].nr_trip_pts - + k - 1, "*"); + } + } + } + } + /* draw border after data so that border will not be messed up + * even there is not enough space for all the data to be shown + */ + wborder(cooling_device_window, 0, 0, 0, 0, 0, 0, 0, 0); + wattron(cooling_device_window, A_BOLD); + mvwprintw(cooling_device_window, 0, maxx/2 - sizeof(cdev_title), + cdev_title); + wattroff(cooling_device_window, A_BOLD); + + wrefresh(cooling_device_window); +} + +const char DIAG_TITLE[] = "[ TUNABLES ]"; +#define DIAG_DEV_ROWS 5 +void show_dialogue(void) +{ + int j, x = 0, y = 0; + WINDOW *w = dialogue_window; + + if (tui_disabled || !w) + return; + + werase(w); + box(w, 0, 0); + mvwprintw(w, 0, maxx/4, DIAG_TITLE); + /* list all the available tunables */ + for (j = 0; j <= ptdata.nr_cooling_dev; j++) { + y = j % DIAG_DEV_ROWS; + if (y == 0 && j != 0) + x += 20; + if (j == ptdata.nr_cooling_dev) + /* save last choice for target temp */ + mvwprintw(w, y+1, x+1, "%C-%.12s", 'A'+j, "Set Temp"); + else + mvwprintw(w, y+1, x+1, "%C-%.10s-%2d", 'A'+j, + ptdata.cdi[j].type, ptdata.cdi[j].instance); + } + wattron(w, A_BOLD); + mvwprintw(w, DIAG_DEV_ROWS+1, 1, "Enter Choice [A-Z]?"); + wattroff(w, A_BOLD); + /* y size of dialogue win is nr cdev + 5, so print legend + * at the bottom line + */ + mvwprintw(w, ptdata.nr_cooling_dev+3, 1, + "Legend: A=Active, P=Passive, C=Critical"); + + wrefresh(dialogue_window); +} + +void write_dialogue_win(char *buf, int y, int x) +{ + WINDOW *w = dialogue_window; + + mvwprintw(w, y, x, "%s", buf); +} + +const char control_title[] = " CONTROLS "; +void show_control_w(void) +{ + unsigned long state; + + get_ctrl_state(&state); + + if (tui_disabled || !control_window) + return; + + werase(control_window); + mvwprintw(control_window, 1, 1, + "PID gain: kp=%2.2f ki=%2.2f kd=%2.2f Output %2.2f", + p_param.kp, p_param.ki, p_param.kd, p_param.y_k); + + mvwprintw(control_window, 2, 1, + "Target Temp: %2.1fC, Zone: %d, Control Device: %.12s", + p_param.t_target, target_thermal_zone, ctrl_cdev); + + /* draw border last such that everything is within boundary */ + wborder(control_window, 0, 0, 0, 0, 0, 0, 0, 0); + wattron(control_window, A_BOLD); + mvwprintw(control_window, 0, maxx/2 - sizeof(control_title), + control_title); + wattroff(control_window, A_BOLD); + + wrefresh(control_window); +} + +void initialize_curses(void) +{ + if (tui_disabled) + return; + + initscr(); + start_color(); + keypad(stdscr, TRUE); /* enable keyboard mapping */ + nonl(); /* tell curses not to do NL->CR/NL on output */ + cbreak(); /* take input chars one at a time */ + noecho(); /* dont echo input */ + curs_set(0); /* turn off cursor */ + use_default_colors(); + + init_pair(PT_COLOR_DEFAULT, COLOR_WHITE, COLOR_BLACK); + init_pair(PT_COLOR_HEADER_BAR, COLOR_BLACK, COLOR_WHITE); + init_pair(PT_COLOR_ERROR, COLOR_BLACK, COLOR_RED); + init_pair(PT_COLOR_RED, COLOR_WHITE, COLOR_RED); + init_pair(PT_COLOR_YELLOW, COLOR_WHITE, COLOR_YELLOW); + init_pair(PT_COLOR_GREEN, COLOR_WHITE, COLOR_GREEN); + init_pair(PT_COLOR_BLUE, COLOR_WHITE, COLOR_BLUE); + init_pair(PT_COLOR_BRIGHT, COLOR_WHITE, COLOR_BLACK); + +} + +void show_title_bar(void) +{ + int i; + int x = 0; + + if (tui_disabled || !title_bar_window) + return; + + wattrset(title_bar_window, COLOR_PAIR(PT_COLOR_HEADER_BAR)); + wbkgd(title_bar_window, COLOR_PAIR(PT_COLOR_HEADER_BAR)); + werase(title_bar_window); + + mvwprintw(title_bar_window, 0, 0, + " TMON v%s", VERSION); + + wrefresh(title_bar_window); + + werase(status_bar_window); + + for (i = 0; i < 10; i++) { + if (strlen(status_bar_slots[i]) == 0) + continue; + wattron(status_bar_window, A_REVERSE); + mvwprintw(status_bar_window, 0, x, "%s", status_bar_slots[i]); + wattroff(status_bar_window, A_REVERSE); + x += strlen(status_bar_slots[i]) + 1; + } + wrefresh(status_bar_window); +} + +static void handle_input_val(int ch) +{ + char buf[32]; + int val; + char path[256]; + WINDOW *w = dialogue_window; + + echo(); + keypad(w, TRUE); + wgetnstr(w, buf, 31); + val = atoi(buf); + + if (ch == ptdata.nr_cooling_dev) { + snprintf(buf, 31, "Invalid Temp %d! %d-%d", val, + MIN_CTRL_TEMP, MAX_CTRL_TEMP); + if (val < MIN_CTRL_TEMP || val > MAX_CTRL_TEMP) + write_status_bar(40, buf); + else { + p_param.t_target = val; + snprintf(buf, 31, "Set New Target Temp %d", val); + write_status_bar(40, buf); + } + } else { + snprintf(path, 256, "%s/%s%d", THERMAL_SYSFS, + CDEV, ptdata.cdi[ch].instance); + sysfs_set_ulong(path, "cur_state", val); + } + noecho(); + dialogue_on = 0; + show_data_w(); + show_control_w(); + + top = (PANEL *)panel_userptr(top); + top_panel(top); +} + +static void handle_input_choice(int ch) +{ + char buf[48]; + int base = 0; + int cdev_id = 0; + + if ((ch >= 'A' && ch <= 'A' + ptdata.nr_cooling_dev) || + (ch >= 'a' && ch <= 'a' + ptdata.nr_cooling_dev)) { + base = (ch < 'a') ? 'A' : 'a'; + cdev_id = ch - base; + if (ptdata.nr_cooling_dev == cdev_id) + snprintf(buf, sizeof(buf), "New Target Temp:"); + else + snprintf(buf, sizeof(buf), "New Value for %.10s-%2d: ", + ptdata.cdi[cdev_id].type, + ptdata.cdi[cdev_id].instance); + write_dialogue_win(buf, DIAG_DEV_ROWS+2, 2); + handle_input_val(cdev_id); + } else { + snprintf(buf, sizeof(buf), "Invalid selection %d", ch); + write_dialogue_win(buf, 8, 2); + } +} + +void *handle_tui_events(void *arg) +{ + int ch; + + keypad(cooling_device_window, TRUE); + while ((ch = wgetch(cooling_device_window)) != EOF) { + if (tmon_exit) + break; + /* when term size is too small, no dialogue panels are set. + * we need to filter out such cases. + */ + if (!data_panel || !dialogue_panel || + !cooling_device_window || + !dialogue_window) { + + continue; + } + pthread_mutex_lock(&input_lock); + if (dialogue_on) { + handle_input_choice(ch); + /* top panel filter */ + if (ch == 'q' || ch == 'Q') + ch = 0; + } + switch (ch) { + case KEY_LEFT: + box(cooling_device_window, 10, 0); + break; + case 9: /* TAB */ + top = (PANEL *)panel_userptr(top); + top_panel(top); + if (top == dialogue_panel) { + dialogue_on = 1; + show_dialogue(); + } else { + dialogue_on = 0; + /* force refresh */ + show_data_w(); + show_control_w(); + } + break; + case 'q': + case 'Q': + tmon_exit = 1; + break; + } + update_panels(); + doupdate(); + pthread_mutex_unlock(&input_lock); + } + + if (arg) + *(int *)arg = 0; /* make gcc happy */ + + return NULL; +} + +/* draw a horizontal bar in given pattern */ +static void draw_hbar(WINDOW *win, int y, int start, int len, unsigned long ptn, + bool end) +{ + mvwaddch(win, y, start, ptn); + whline(win, ptn, len); + if (end) + mvwaddch(win, y, MAX_DISP_TEMP+TDATA_LEFT, ']'); +} + +static char trip_type_to_char(int type) +{ + switch (type) { + case THERMAL_TRIP_CRITICAL: return 'C'; + case THERMAL_TRIP_HOT: return 'H'; + case THERMAL_TRIP_PASSIVE: return 'P'; + case THERMAL_TRIP_ACTIVE: return 'A'; + default: + return '?'; + } +} + +/* fill a string with trip point type and value in one line + * e.g. P(56) C(106) + * maintain the distance one degree per char + */ +static void draw_tp_line(int tz, int y) +{ + int j; + int x; + + for (j = 0; j < ptdata.tzi[tz].nr_trip_pts; j++) { + x = ptdata.tzi[tz].tp[j].temp / 1000; + mvwprintw(thermal_data_window, y + 0, x + TDATA_LEFT, + "%c%d", trip_type_to_char(ptdata.tzi[tz].tp[j].type), + x); + syslog(LOG_INFO, "%s:tz %d tp %d temp = %lu\n", __func__, + tz, j, ptdata.tzi[tz].tp[j].temp); + } +} + +const char data_win_title[] = " THERMAL DATA "; +void show_data_w(void) +{ + int i; + + + if (tui_disabled || !thermal_data_window) + return; + + werase(thermal_data_window); + wattron(thermal_data_window, A_BOLD); + mvwprintw(thermal_data_window, 0, maxx/2 - sizeof(data_win_title), + data_win_title); + wattroff(thermal_data_window, A_BOLD); + /* draw a line as ruler */ + for (i = 10; i < MAX_DISP_TEMP; i += 10) + mvwprintw(thermal_data_window, 1, i+TDATA_LEFT, "%2d", i); + + for (i = 0; i < ptdata.nr_tz_sensor; i++) { + int temp = trec[cur_thermal_record].temp[i] / 1000; + int y = 0; + + y = i * NR_LINES_TZDATA + 2; + /* y at tz temp data line */ + mvwprintw(thermal_data_window, y, 1, "%6.6s%2d:[%3d][", + ptdata.tzi[i].type, + ptdata.tzi[i].instance, temp); + draw_hbar(thermal_data_window, y, TDATA_LEFT, temp, ACS_RARROW, + true); + draw_tp_line(i, y); + } + wborder(thermal_data_window, 0, 0, 0, 0, 0, 0, 0, 0); + wrefresh(thermal_data_window); +} + +const char tz_title[] = "THERMAL ZONES(SENSORS)"; + +void show_sensors_w(void) +{ + int i, j; + char buffer[512]; + + if (tui_disabled || !tz_sensor_window) + return; + + werase(tz_sensor_window); + + memset(buffer, 0, sizeof(buffer)); + wattron(tz_sensor_window, A_BOLD); + mvwprintw(tz_sensor_window, 1, 1, "Thermal Zones:"); + wattroff(tz_sensor_window, A_BOLD); + + mvwprintw(tz_sensor_window, 1, TZ_LEFT_ALIGN, "%s", buffer); + /* fill trip points for each tzone */ + wattron(tz_sensor_window, A_BOLD); + mvwprintw(tz_sensor_window, 2, 1, "Trip Points:"); + wattroff(tz_sensor_window, A_BOLD); + + /* draw trip point from low to high for each tz */ + for (i = 0; i < ptdata.nr_tz_sensor; i++) { + int inst = ptdata.tzi[i].instance; + + mvwprintw(tz_sensor_window, 1, + TZ_LEFT_ALIGN+TZONE_RECORD_SIZE * inst, "%.9s%02d", + ptdata.tzi[i].type, ptdata.tzi[i].instance); + for (j = ptdata.tzi[i].nr_trip_pts - 1; j >= 0; j--) { + /* loop through all trip points */ + char type; + int tp_pos; + /* reverse the order here since trips are sorted + * in ascending order in terms of temperature. + */ + tp_pos = ptdata.tzi[i].nr_trip_pts - j - 1; + + type = trip_type_to_char(ptdata.tzi[i].tp[j].type); + mvwaddch(tz_sensor_window, 2, + inst * TZONE_RECORD_SIZE + TZ_LEFT_ALIGN + + tp_pos, type); + syslog(LOG_DEBUG, "draw tz %d tp %d ch:%c\n", + inst, j, type); + } + } + wborder(tz_sensor_window, 0, 0, 0, 0, 0, 0, 0, 0); + wattron(tz_sensor_window, A_BOLD); + mvwprintw(tz_sensor_window, 0, maxx/2 - sizeof(tz_title), tz_title); + wattroff(tz_sensor_window, A_BOLD); + wrefresh(tz_sensor_window); +} + +void disable_tui(void) +{ + tui_disabled = 1; +} diff --git a/tools/usb/Makefile b/tools/usb/Makefile index 396d6c44e9d..acf2165c04e 100644 --- a/tools/usb/Makefile +++ b/tools/usb/Makefile @@ -3,11 +3,12 @@ CC = $(CROSS_COMPILE)gcc PTHREAD_LIBS = -lpthread WARNINGS = -Wall -Wextra -CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) -I../include +CFLAGS = $(WARNINGS) -g -I../include +LDFLAGS = $(PTHREAD_LIBS) all: testusb ffs-test %: %.c - $(CC) $(CFLAGS) -o $@ $^ + $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) clean: $(RM) testusb ffs-test diff --git a/tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c b/tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c new file mode 100644 index 00000000000..87216a0c4a8 --- /dev/null +++ b/tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c @@ -0,0 +1,349 @@ +#define _BSD_SOURCE /* for endian.h */ + +#include <endian.h> +#include <errno.h> +#include <fcntl.h> +#include <stdarg.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/ioctl.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <sys/poll.h> +#include <unistd.h> +#include <stdbool.h> +#include <sys/eventfd.h> + +#include "libaio.h" +#define IOCB_FLAG_RESFD (1 << 0) + +#include <linux/usb/functionfs.h> + +#define BUF_LEN 8192 +#define BUFS_MAX 128 +#define AIO_MAX (BUFS_MAX*2) + +/******************** Descriptors and Strings *******************************/ + +static const struct { + struct usb_functionfs_descs_head header; + struct { + struct usb_interface_descriptor intf; + struct usb_endpoint_descriptor_no_audio bulk_sink; + struct usb_endpoint_descriptor_no_audio bulk_source; + } __attribute__ ((__packed__)) fs_descs, hs_descs; +} __attribute__ ((__packed__)) descriptors = { + .header = { + .magic = htole32(FUNCTIONFS_DESCRIPTORS_MAGIC), + .length = htole32(sizeof(descriptors)), + .fs_count = 3, + .hs_count = 3, + }, + .fs_descs = { + .intf = { + .bLength = sizeof(descriptors.fs_descs.intf), + .bDescriptorType = USB_DT_INTERFACE, + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + .iInterface = 1, + }, + .bulk_sink = { + .bLength = sizeof(descriptors.fs_descs.bulk_sink), + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = 1 | USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + }, + .bulk_source = { + .bLength = sizeof(descriptors.fs_descs.bulk_source), + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = 2 | USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + }, + }, + .hs_descs = { + .intf = { + .bLength = sizeof(descriptors.hs_descs.intf), + .bDescriptorType = USB_DT_INTERFACE, + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + .iInterface = 1, + }, + .bulk_sink = { + .bLength = sizeof(descriptors.hs_descs.bulk_sink), + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = 1 | USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = htole16(512), + }, + .bulk_source = { + .bLength = sizeof(descriptors.hs_descs.bulk_source), + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = 2 | USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = htole16(512), + }, + }, +}; + +#define STR_INTERFACE "AIO Test" + +static const struct { + struct usb_functionfs_strings_head header; + struct { + __le16 code; + const char str1[sizeof(STR_INTERFACE)]; + } __attribute__ ((__packed__)) lang0; +} __attribute__ ((__packed__)) strings = { + .header = { + .magic = htole32(FUNCTIONFS_STRINGS_MAGIC), + .length = htole32(sizeof(strings)), + .str_count = htole32(1), + .lang_count = htole32(1), + }, + .lang0 = { + htole16(0x0409), /* en-us */ + STR_INTERFACE, + }, +}; + +/********************** Buffer structure *******************************/ + +struct io_buffer { + struct iocb **iocb; + unsigned char **buf; + unsigned cnt; + unsigned len; + unsigned requested; +}; + +/******************** Endpoints handling *******************************/ + +static void display_event(struct usb_functionfs_event *event) +{ + static const char *const names[] = { + [FUNCTIONFS_BIND] = "BIND", + [FUNCTIONFS_UNBIND] = "UNBIND", + [FUNCTIONFS_ENABLE] = "ENABLE", + [FUNCTIONFS_DISABLE] = "DISABLE", + [FUNCTIONFS_SETUP] = "SETUP", + [FUNCTIONFS_SUSPEND] = "SUSPEND", + [FUNCTIONFS_RESUME] = "RESUME", + }; + switch (event->type) { + case FUNCTIONFS_BIND: + case FUNCTIONFS_UNBIND: + case FUNCTIONFS_ENABLE: + case FUNCTIONFS_DISABLE: + case FUNCTIONFS_SETUP: + case FUNCTIONFS_SUSPEND: + case FUNCTIONFS_RESUME: + printf("Event %s\n", names[event->type]); + } +} + +static void handle_ep0(int ep0, bool *ready) +{ + int ret; + struct usb_functionfs_event event; + + ret = read(ep0, &event, sizeof(event)); + if (!ret) { + perror("unable to read event from ep0"); + return; + } + display_event(&event); + switch (event.type) { + case FUNCTIONFS_SETUP: + if (event.u.setup.bRequestType & USB_DIR_IN) + write(ep0, NULL, 0); + else + read(ep0, NULL, 0); + break; + + case FUNCTIONFS_ENABLE: + *ready = true; + break; + + case FUNCTIONFS_DISABLE: + *ready = false; + break; + + default: + break; + } +} + +void init_bufs(struct io_buffer *iobuf, unsigned n, unsigned len) +{ + unsigned i; + iobuf->buf = malloc(n*sizeof(*iobuf->buf)); + iobuf->iocb = malloc(n*sizeof(*iobuf->iocb)); + iobuf->cnt = n; + iobuf->len = len; + iobuf->requested = 0; + for (i = 0; i < n; ++i) { + iobuf->buf[i] = malloc(len*sizeof(**iobuf->buf)); + iobuf->iocb[i] = malloc(sizeof(**iobuf->iocb)); + } + iobuf->cnt = n; +} + +void delete_bufs(struct io_buffer *iobuf) +{ + unsigned i; + for (i = 0; i < iobuf->cnt; ++i) { + free(iobuf->buf[i]); + free(iobuf->iocb[i]); + } + free(iobuf->buf); + free(iobuf->iocb); +} + +int main(int argc, char *argv[]) +{ + int ret; + unsigned i, j; + char *ep_path; + + int ep0, ep1; + + io_context_t ctx; + + int evfd; + fd_set rfds; + + struct io_buffer iobuf[2]; + int actual = 0; + bool ready; + + if (argc != 2) { + printf("ffs directory not specified!\n"); + return 1; + } + + ep_path = malloc(strlen(argv[1]) + 4 /* "/ep#" */ + 1 /* '\0' */); + if (!ep_path) { + perror("malloc"); + return 1; + } + + /* open endpoint files */ + sprintf(ep_path, "%s/ep0", argv[1]); + ep0 = open(ep_path, O_RDWR); + if (ep0 < 0) { + perror("unable to open ep0"); + return 1; + } + if (write(ep0, &descriptors, sizeof(descriptors)) < 0) { + perror("unable do write descriptors"); + return 1; + } + if (write(ep0, &strings, sizeof(strings)) < 0) { + perror("unable to write strings"); + return 1; + } + sprintf(ep_path, "%s/ep1", argv[1]); + ep1 = open(ep_path, O_RDWR); + if (ep1 < 0) { + perror("unable to open ep1"); + return 1; + } + + free(ep_path); + + memset(&ctx, 0, sizeof(ctx)); + /* setup aio context to handle up to AIO_MAX requests */ + if (io_setup(AIO_MAX, &ctx) < 0) { + perror("unable to setup aio"); + return 1; + } + + evfd = eventfd(0, 0); + if (evfd < 0) { + perror("unable to open eventfd"); + return 1; + } + + for (i = 0; i < sizeof(iobuf)/sizeof(*iobuf); ++i) + init_bufs(&iobuf[i], BUFS_MAX, BUF_LEN); + + while (1) { + FD_ZERO(&rfds); + FD_SET(ep0, &rfds); + FD_SET(evfd, &rfds); + + ret = select(((ep0 > evfd) ? ep0 : evfd)+1, + &rfds, NULL, NULL, NULL); + if (ret < 0) { + if (errno == EINTR) + continue; + perror("select"); + break; + } + + if (FD_ISSET(ep0, &rfds)) + handle_ep0(ep0, &ready); + + /* we are waiting for function ENABLE */ + if (!ready) + continue; + + /* + * when we're preparing new data to submit, + * second buffer being transmitted + */ + for (i = 0; i < sizeof(iobuf)/sizeof(*iobuf); ++i) { + if (iobuf[i].requested) + continue; + /* prepare requests */ + for (j = 0; j < iobuf[i].cnt; ++j) { + io_prep_pwrite(iobuf[i].iocb[j], ep1, + iobuf[i].buf[j], + iobuf[i].len, 0); + /* enable eventfd notification */ + iobuf[i].iocb[j]->u.c.flags |= IOCB_FLAG_RESFD; + iobuf[i].iocb[j]->u.c.resfd = evfd; + } + /* submit table of requests */ + ret = io_submit(ctx, iobuf[i].cnt, iobuf[i].iocb); + if (ret >= 0) { + iobuf[i].requested = ret; + printf("submit: %d requests buf: %d\n", ret, i); + } else + perror("unable to submit reqests"); + } + + /* if event is ready to read */ + if (!FD_ISSET(evfd, &rfds)) + continue; + + uint64_t ev_cnt; + ret = read(evfd, &ev_cnt, sizeof(ev_cnt)); + if (ret < 0) { + perror("unable to read eventfd"); + break; + } + + struct io_event e[BUFS_MAX]; + /* we read aio events */ + ret = io_getevents(ctx, 1, BUFS_MAX, e, NULL); + if (ret > 0) /* if we got events */ + iobuf[actual].requested -= ret; + + /* if all req's from iocb completed */ + if (!iobuf[actual].requested) + actual = (actual + 1)%(sizeof(iobuf)/sizeof(*iobuf)); + } + + /* free resources */ + + for (i = 0; i < sizeof(iobuf)/sizeof(*iobuf); ++i) + delete_bufs(&iobuf[i]); + io_destroy(ctx); + + close(ep1); + close(ep0); + + return 0; +} diff --git a/tools/usb/ffs-aio-example/multibuff/host_app/Makefile b/tools/usb/ffs-aio-example/multibuff/host_app/Makefile new file mode 100644 index 00000000000..8c4a6f0aa82 --- /dev/null +++ b/tools/usb/ffs-aio-example/multibuff/host_app/Makefile @@ -0,0 +1,13 @@ +CC = gcc +LIBUSB_CFLAGS = $(shell pkg-config --cflags libusb-1.0) +LIBUSB_LIBS = $(shell pkg-config --libs libusb-1.0) +WARNINGS = -Wall -Wextra +CFLAGS = $(LIBUSB_CFLAGS) $(WARNINGS) +LDFLAGS = $(LIBUSB_LIBS) + +all: test +%: %.c + $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) + +clean: + $(RM) test diff --git a/tools/usb/ffs-aio-example/multibuff/host_app/test.c b/tools/usb/ffs-aio-example/multibuff/host_app/test.c new file mode 100644 index 00000000000..b0ad8747d03 --- /dev/null +++ b/tools/usb/ffs-aio-example/multibuff/host_app/test.c @@ -0,0 +1,146 @@ +#include <libusb.h> +#include <stdio.h> +#include <string.h> +#include <unistd.h> + +#define VENDOR 0x1d6b +#define PRODUCT 0x0105 + +/* endpoints indexes */ + +#define EP_BULK_IN (1 | LIBUSB_ENDPOINT_IN) +#define EP_BULK_OUT (2 | LIBUSB_ENDPOINT_OUT) + +#define BUF_LEN 8192 + +/* + * struct test_state - describes test program state + * @list: list of devices returned by libusb_get_device_list function + * @found: pointer to struct describing tested device + * @ctx: context, set to NULL + * @handle: handle of tested device + * @attached: indicates that device was attached to kernel, and has to be + * reattached at the end of test program + */ + +struct test_state { + libusb_device *found; + libusb_context *ctx; + libusb_device_handle *handle; + int attached; +}; + +/* + * test_init - initialize test program + */ + +int test_init(struct test_state *state) +{ + int i, ret; + ssize_t cnt; + libusb_device **list; + + state->found = NULL; + state->ctx = NULL; + state->handle = NULL; + state->attached = 0; + + ret = libusb_init(&state->ctx); + if (ret) { + printf("cannot init libusb: %s\n", libusb_error_name(ret)); + return 1; + } + + cnt = libusb_get_device_list(state->ctx, &list); + if (cnt <= 0) { + printf("no devices found\n"); + goto error1; + } + + for (i = 0; i < cnt; ++i) { + libusb_device *dev = list[i]; + struct libusb_device_descriptor desc; + ret = libusb_get_device_descriptor(dev, &desc); + if (ret) { + printf("unable to get device descriptor: %s\n", + libusb_error_name(ret)); + goto error2; + } + if (desc.idVendor == VENDOR && desc.idProduct == PRODUCT) { + state->found = dev; + break; + } + } + + if (!state->found) { + printf("no devices found\n"); + goto error2; + } + + ret = libusb_open(state->found, &state->handle); + if (ret) { + printf("cannot open device: %s\n", libusb_error_name(ret)); + goto error2; + } + + if (libusb_claim_interface(state->handle, 0)) { + ret = libusb_detach_kernel_driver(state->handle, 0); + if (ret) { + printf("unable to detach kernel driver: %s\n", + libusb_error_name(ret)); + goto error3; + } + state->attached = 1; + ret = libusb_claim_interface(state->handle, 0); + if (ret) { + printf("cannot claim interface: %s\n", + libusb_error_name(ret)); + goto error4; + } + } + + return 0; + +error4: + if (state->attached == 1) + libusb_attach_kernel_driver(state->handle, 0); + +error3: + libusb_close(state->handle); + +error2: + libusb_free_device_list(list, 1); + +error1: + libusb_exit(state->ctx); + return 1; +} + +/* + * test_exit - cleanup test program + */ + +void test_exit(struct test_state *state) +{ + libusb_release_interface(state->handle, 0); + if (state->attached == 1) + libusb_attach_kernel_driver(state->handle, 0); + libusb_close(state->handle); + libusb_exit(state->ctx); +} + +int main(void) +{ + struct test_state state; + + if (test_init(&state)) + return 1; + + while (1) { + static unsigned char buffer[BUF_LEN]; + int bytes; + libusb_bulk_transfer(state.handle, EP_BULK_IN, buffer, BUF_LEN, + &bytes, 500); + } + test_exit(&state); +} diff --git a/tools/usb/ffs-aio-example/simple/device_app/aio_simple.c b/tools/usb/ffs-aio-example/simple/device_app/aio_simple.c new file mode 100644 index 00000000000..f558664a331 --- /dev/null +++ b/tools/usb/ffs-aio-example/simple/device_app/aio_simple.c @@ -0,0 +1,335 @@ +#define _BSD_SOURCE /* for endian.h */ + +#include <endian.h> +#include <errno.h> +#include <fcntl.h> +#include <stdarg.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/ioctl.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <sys/poll.h> +#include <unistd.h> +#include <stdbool.h> +#include <sys/eventfd.h> + +#include "libaio.h" +#define IOCB_FLAG_RESFD (1 << 0) + +#include <linux/usb/functionfs.h> + +#define BUF_LEN 8192 + +/******************** Descriptors and Strings *******************************/ + +static const struct { + struct usb_functionfs_descs_head header; + struct { + struct usb_interface_descriptor intf; + struct usb_endpoint_descriptor_no_audio bulk_sink; + struct usb_endpoint_descriptor_no_audio bulk_source; + } __attribute__ ((__packed__)) fs_descs, hs_descs; +} __attribute__ ((__packed__)) descriptors = { + .header = { + .magic = htole32(FUNCTIONFS_DESCRIPTORS_MAGIC), + .length = htole32(sizeof(descriptors)), + .fs_count = 3, + .hs_count = 3, + }, + .fs_descs = { + .intf = { + .bLength = sizeof(descriptors.fs_descs.intf), + .bDescriptorType = USB_DT_INTERFACE, + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + .iInterface = 1, + }, + .bulk_sink = { + .bLength = sizeof(descriptors.fs_descs.bulk_sink), + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = 1 | USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + }, + .bulk_source = { + .bLength = sizeof(descriptors.fs_descs.bulk_source), + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = 2 | USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + }, + }, + .hs_descs = { + .intf = { + .bLength = sizeof(descriptors.hs_descs.intf), + .bDescriptorType = USB_DT_INTERFACE, + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + .iInterface = 1, + }, + .bulk_sink = { + .bLength = sizeof(descriptors.hs_descs.bulk_sink), + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = 1 | USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + }, + .bulk_source = { + .bLength = sizeof(descriptors.hs_descs.bulk_source), + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = 2 | USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + }, + }, +}; + +#define STR_INTERFACE "AIO Test" + +static const struct { + struct usb_functionfs_strings_head header; + struct { + __le16 code; + const char str1[sizeof(STR_INTERFACE)]; + } __attribute__ ((__packed__)) lang0; +} __attribute__ ((__packed__)) strings = { + .header = { + .magic = htole32(FUNCTIONFS_STRINGS_MAGIC), + .length = htole32(sizeof(strings)), + .str_count = htole32(1), + .lang_count = htole32(1), + }, + .lang0 = { + htole16(0x0409), /* en-us */ + STR_INTERFACE, + }, +}; + +/******************** Endpoints handling *******************************/ + +static void display_event(struct usb_functionfs_event *event) +{ + static const char *const names[] = { + [FUNCTIONFS_BIND] = "BIND", + [FUNCTIONFS_UNBIND] = "UNBIND", + [FUNCTIONFS_ENABLE] = "ENABLE", + [FUNCTIONFS_DISABLE] = "DISABLE", + [FUNCTIONFS_SETUP] = "SETUP", + [FUNCTIONFS_SUSPEND] = "SUSPEND", + [FUNCTIONFS_RESUME] = "RESUME", + }; + switch (event->type) { + case FUNCTIONFS_BIND: + case FUNCTIONFS_UNBIND: + case FUNCTIONFS_ENABLE: + case FUNCTIONFS_DISABLE: + case FUNCTIONFS_SETUP: + case FUNCTIONFS_SUSPEND: + case FUNCTIONFS_RESUME: + printf("Event %s\n", names[event->type]); + } +} + +static void handle_ep0(int ep0, bool *ready) +{ + struct usb_functionfs_event event; + int ret; + + struct pollfd pfds[1]; + pfds[0].fd = ep0; + pfds[0].events = POLLIN; + + ret = poll(pfds, 1, 0); + + if (ret && (pfds[0].revents & POLLIN)) { + ret = read(ep0, &event, sizeof(event)); + if (!ret) { + perror("unable to read event from ep0"); + return; + } + display_event(&event); + switch (event.type) { + case FUNCTIONFS_SETUP: + if (event.u.setup.bRequestType & USB_DIR_IN) + write(ep0, NULL, 0); + else + read(ep0, NULL, 0); + break; + + case FUNCTIONFS_ENABLE: + *ready = true; + break; + + case FUNCTIONFS_DISABLE: + *ready = false; + break; + + default: + break; + } + } +} + +int main(int argc, char *argv[]) +{ + int i, ret; + char *ep_path; + + int ep0; + int ep[2]; + + io_context_t ctx; + + int evfd; + fd_set rfds; + + char *buf_in, *buf_out; + struct iocb *iocb_in, *iocb_out; + int req_in = 0, req_out = 0; + bool ready; + + if (argc != 2) { + printf("ffs directory not specified!\n"); + return 1; + } + + ep_path = malloc(strlen(argv[1]) + 4 /* "/ep#" */ + 1 /* '\0' */); + if (!ep_path) { + perror("malloc"); + return 1; + } + + /* open endpoint files */ + sprintf(ep_path, "%s/ep0", argv[1]); + ep0 = open(ep_path, O_RDWR); + if (ep0 < 0) { + perror("unable to open ep0"); + return 1; + } + if (write(ep0, &descriptors, sizeof(descriptors)) < 0) { + perror("unable do write descriptors"); + return 1; + } + if (write(ep0, &strings, sizeof(strings)) < 0) { + perror("unable to write strings"); + return 1; + } + for (i = 0; i < 2; ++i) { + sprintf(ep_path, "%s/ep%d", argv[1], i+1); + ep[i] = open(ep_path, O_RDWR); + if (ep[i] < 0) { + printf("unable to open ep%d: %s\n", i+1, + strerror(errno)); + return 1; + } + } + + free(ep_path); + + memset(&ctx, 0, sizeof(ctx)); + /* setup aio context to handle up to 2 requests */ + if (io_setup(2, &ctx) < 0) { + perror("unable to setup aio"); + return 1; + } + + evfd = eventfd(0, 0); + if (evfd < 0) { + perror("unable to open eventfd"); + return 1; + } + + /* alloc buffers and requests */ + buf_in = malloc(BUF_LEN); + buf_out = malloc(BUF_LEN); + iocb_in = malloc(sizeof(*iocb_in)); + iocb_out = malloc(sizeof(*iocb_out)); + + while (1) { + FD_ZERO(&rfds); + FD_SET(ep0, &rfds); + FD_SET(evfd, &rfds); + + ret = select(((ep0 > evfd) ? ep0 : evfd)+1, + &rfds, NULL, NULL, NULL); + if (ret < 0) { + if (errno == EINTR) + continue; + perror("select"); + break; + } + + if (FD_ISSET(ep0, &rfds)) + handle_ep0(ep0, &ready); + + /* we are waiting for function ENABLE */ + if (!ready) + continue; + + /* if something was submitted we wait for event */ + if (FD_ISSET(evfd, &rfds)) { + uint64_t ev_cnt; + ret = read(evfd, &ev_cnt, sizeof(ev_cnt)); + if (ret < 0) { + perror("unable to read eventfd"); + break; + } + + struct io_event e[2]; + /* we wait for one event */ + ret = io_getevents(ctx, 1, 2, e, NULL); + /* if we got event */ + for (i = 0; i < ret; ++i) { + if (e[i].obj->aio_fildes == ep[0]) { + printf("ev=in; ret=%lu\n", e[i].res); + req_in = 0; + } else if (e[i].obj->aio_fildes == ep[1]) { + printf("ev=out; ret=%lu\n", e[i].res); + req_out = 0; + } + } + } + + if (!req_in) { /* if IN transfer not requested*/ + /* prepare write request */ + io_prep_pwrite(iocb_in, ep[0], buf_in, BUF_LEN, 0); + /* enable eventfd notification */ + iocb_in->u.c.flags |= IOCB_FLAG_RESFD; + iocb_in->u.c.resfd = evfd; + /* submit table of requests */ + ret = io_submit(ctx, 1, &iocb_in); + if (ret >= 0) { /* if ret > 0 request is queued */ + req_in = 1; + printf("submit: in\n"); + } else + perror("unable to submit request"); + } + if (!req_out) { /* if OUT transfer not requested */ + /* prepare read request */ + io_prep_pread(iocb_out, ep[1], buf_out, BUF_LEN, 0); + /* enable eventfs notification */ + iocb_out->u.c.flags |= IOCB_FLAG_RESFD; + iocb_out->u.c.resfd = evfd; + /* submit table of requests */ + ret = io_submit(ctx, 1, &iocb_out); + if (ret >= 0) { /* if ret > 0 request is queued */ + req_out = 1; + printf("submit: out\n"); + } else + perror("unable to submit request"); + } + } + + /* free resources */ + + io_destroy(ctx); + + free(buf_in); + free(buf_out); + free(iocb_in); + free(iocb_out); + + for (i = 0; i < 2; ++i) + close(ep[i]); + close(ep0); + + return 0; +} diff --git a/tools/usb/ffs-aio-example/simple/host_app/Makefile b/tools/usb/ffs-aio-example/simple/host_app/Makefile new file mode 100644 index 00000000000..8c4a6f0aa82 --- /dev/null +++ b/tools/usb/ffs-aio-example/simple/host_app/Makefile @@ -0,0 +1,13 @@ +CC = gcc +LIBUSB_CFLAGS = $(shell pkg-config --cflags libusb-1.0) +LIBUSB_LIBS = $(shell pkg-config --libs libusb-1.0) +WARNINGS = -Wall -Wextra +CFLAGS = $(LIBUSB_CFLAGS) $(WARNINGS) +LDFLAGS = $(LIBUSB_LIBS) + +all: test +%: %.c + $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) + +clean: + $(RM) test diff --git a/tools/usb/ffs-aio-example/simple/host_app/test.c b/tools/usb/ffs-aio-example/simple/host_app/test.c new file mode 100644 index 00000000000..64b6a57d8ca --- /dev/null +++ b/tools/usb/ffs-aio-example/simple/host_app/test.c @@ -0,0 +1,148 @@ +#include <libusb.h> +#include <stdio.h> +#include <string.h> +#include <unistd.h> + +#define VENDOR 0x1d6b +#define PRODUCT 0x0105 + +/* endpoints indexes */ + +#define EP_BULK_IN (1 | LIBUSB_ENDPOINT_IN) +#define EP_BULK_OUT (2 | LIBUSB_ENDPOINT_OUT) + +#define BUF_LEN 8192 + +/* + * struct test_state - describes test program state + * @list: list of devices returned by libusb_get_device_list function + * @found: pointer to struct describing tested device + * @ctx: context, set to NULL + * @handle: handle of tested device + * @attached: indicates that device was attached to kernel, and has to be + * reattached at the end of test program + */ + +struct test_state { + libusb_device *found; + libusb_context *ctx; + libusb_device_handle *handle; + int attached; +}; + +/* + * test_init - initialize test program + */ + +int test_init(struct test_state *state) +{ + int i, ret; + ssize_t cnt; + libusb_device **list; + + state->found = NULL; + state->ctx = NULL; + state->handle = NULL; + state->attached = 0; + + ret = libusb_init(&state->ctx); + if (ret) { + printf("cannot init libusb: %s\n", libusb_error_name(ret)); + return 1; + } + + cnt = libusb_get_device_list(state->ctx, &list); + if (cnt <= 0) { + printf("no devices found\n"); + goto error1; + } + + for (i = 0; i < cnt; ++i) { + libusb_device *dev = list[i]; + struct libusb_device_descriptor desc; + ret = libusb_get_device_descriptor(dev, &desc); + if (ret) { + printf("unable to get device descriptor: %s\n", + libusb_error_name(ret)); + goto error2; + } + if (desc.idVendor == VENDOR && desc.idProduct == PRODUCT) { + state->found = dev; + break; + } + } + + if (!state->found) { + printf("no devices found\n"); + goto error2; + } + + ret = libusb_open(state->found, &state->handle); + if (ret) { + printf("cannot open device: %s\n", libusb_error_name(ret)); + goto error2; + } + + if (libusb_claim_interface(state->handle, 0)) { + ret = libusb_detach_kernel_driver(state->handle, 0); + if (ret) { + printf("unable to detach kernel driver: %s\n", + libusb_error_name(ret)); + goto error3; + } + state->attached = 1; + ret = libusb_claim_interface(state->handle, 0); + if (ret) { + printf("cannot claim interface: %s\n", + libusb_error_name(ret)); + goto error4; + } + } + + return 0; + +error4: + if (state->attached == 1) + libusb_attach_kernel_driver(state->handle, 0); + +error3: + libusb_close(state->handle); + +error2: + libusb_free_device_list(list, 1); + +error1: + libusb_exit(state->ctx); + return 1; +} + +/* + * test_exit - cleanup test program + */ + +void test_exit(struct test_state *state) +{ + libusb_release_interface(state->handle, 0); + if (state->attached == 1) + libusb_attach_kernel_driver(state->handle, 0); + libusb_close(state->handle); + libusb_exit(state->ctx); +} + +int main(void) +{ + struct test_state state; + + if (test_init(&state)) + return 1; + + while (1) { + static unsigned char buffer[BUF_LEN]; + int bytes; + libusb_bulk_transfer(state.handle, EP_BULK_IN, buffer, BUF_LEN, + &bytes, 500); + libusb_bulk_transfer(state.handle, EP_BULK_OUT, buffer, BUF_LEN, + &bytes, 500); + } + test_exit(&state); +} diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c index 8674b9ec14f..a87e99f37c5 100644 --- a/tools/usb/ffs-test.c +++ b/tools/usb/ffs-test.c @@ -38,7 +38,7 @@ #include <unistd.h> #include <tools/le_byteshift.h> -#include "../../include/linux/usb/functionfs.h" +#include "../../include/uapi/linux/usb/functionfs.h" /******************** Little Endian Handling ********************************/ @@ -116,8 +116,8 @@ static const struct { .header = { .magic = cpu_to_le32(FUNCTIONFS_DESCRIPTORS_MAGIC), .length = cpu_to_le32(sizeof descriptors), - .fs_count = 3, - .hs_count = 3, + .fs_count = cpu_to_le32(3), + .hs_count = cpu_to_le32(3), }, .fs_descs = { .intf = { diff --git a/tools/usb/testusb.c b/tools/usb/testusb.c index 68d0734b208..879f9870a6b 100644 --- a/tools/usb/testusb.c +++ b/tools/usb/testusb.c @@ -279,8 +279,7 @@ nomem: entry->ifnum = ifnum; - /* FIXME ask usbfs what speed; update USBDEVFS_CONNECTINFO so - * it tells about high speed etc */ + /* FIXME update USBDEVFS_CONNECTINFO so it tells about high speed etc */ fprintf(stderr, "%s speed\t%s\t%u\n", speed(entry->speed), entry->name, entry->ifnum); @@ -351,7 +350,7 @@ restart: return arg; } -static const char *usbfs_dir_find(void) +static const char *usb_dir_find(void) { static char udev_usb_path[] = "/dev/bus/usb"; @@ -380,7 +379,7 @@ int main (int argc, char **argv) int c; struct testdev *entry; char *device; - const char *usbfs_dir = NULL; + const char *usb_dir = NULL; int all = 0, forever = 0, not = 0; int test = -1 /* all */; struct usbtest_param param; @@ -407,8 +406,8 @@ int main (int argc, char **argv) case 'D': /* device, if only one */ device = optarg; continue; - case 'A': /* use all devices with specified usbfs dir */ - usbfs_dir = optarg; + case 'A': /* use all devices with specified USB dir */ + usb_dir = optarg; /* FALL THROUGH */ case 'a': /* use all devices */ device = NULL; @@ -449,7 +448,7 @@ usage: "usage: %s [options]\n" "Options:\n" "\t-D dev only test specific device\n" - "\t-A usbfs-dir\n" + "\t-A usb-dir\n" "\t-a test all recognized devices\n" "\t-l loop forever(for stress test)\n" "\t-t testnum only run specified case\n" @@ -470,18 +469,18 @@ usage: goto usage; } - /* Find usbfs mount point */ - if (!usbfs_dir) { - usbfs_dir = usbfs_dir_find(); - if (!usbfs_dir) { - fputs ("usbfs files are missing\n", stderr); + /* Find usb device subdirectory */ + if (!usb_dir) { + usb_dir = usb_dir_find(); + if (!usb_dir) { + fputs ("USB device files are missing\n", stderr); return -1; } } /* collect and list the test devices */ - if (ftw (usbfs_dir, find_testdev, 3) != 0) { - fputs ("ftw failed; is usbfs missing?\n", stderr); + if (ftw (usb_dir, find_testdev, 3) != 0) { + fputs ("ftw failed; are USB device files missing?\n", stderr); return -1; } @@ -507,10 +506,8 @@ usage: return handle_testdev (entry) != entry; } status = pthread_create (&entry->thread, 0, handle_testdev, entry); - if (status) { + if (status) perror ("pthread_create"); - continue; - } } if (device) { struct testdev dev; diff --git a/tools/virtio/.gitignore b/tools/virtio/.gitignore new file mode 100644 index 00000000000..1cfbb0157a4 --- /dev/null +++ b/tools/virtio/.gitignore @@ -0,0 +1,3 @@ +*.d +virtio_test +vringh_test diff --git a/tools/virtio/Makefile b/tools/virtio/Makefile index d1d442ed106..9325f469382 100644 --- a/tools/virtio/Makefile +++ b/tools/virtio/Makefile @@ -1,12 +1,14 @@ all: test mod -test: virtio_test +test: virtio_test vringh_test virtio_test: virtio_ring.o virtio_test.o -CFLAGS += -g -O2 -Wall -I. -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -MMD -vpath %.c ../../drivers/virtio +vringh_test: vringh_test.o vringh.o virtio_ring.o + +CFLAGS += -g -O2 -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE +vpath %.c ../../drivers/virtio ../../drivers/vhost mod: ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test .PHONY: all test mod clean clean: - ${RM} *.o vhost_test/*.o vhost_test/.*.cmd \ + ${RM} *.o vringh_test virtio_test vhost_test/*.o vhost_test/.*.cmd \ vhost_test/Module.symvers vhost_test/modules.order *.d -include *.d diff --git a/tools/virtio/asm/barrier.h b/tools/virtio/asm/barrier.h new file mode 100644 index 00000000000..aff61e13306 --- /dev/null +++ b/tools/virtio/asm/barrier.h @@ -0,0 +1,14 @@ +#if defined(__i386__) || defined(__x86_64__) +#define barrier() asm volatile("" ::: "memory") +#define mb() __sync_synchronize() + +#define smp_mb() mb() +# define smp_rmb() barrier() +# define smp_wmb() barrier() +/* Weak barriers should be used. If not - it's a bug */ +# define rmb() abort() +# define wmb() abort() +#else +#error Please fill in barrier macros +#endif + diff --git a/tools/virtio/linux/bug.h b/tools/virtio/linux/bug.h new file mode 100644 index 00000000000..fb94f0787c4 --- /dev/null +++ b/tools/virtio/linux/bug.h @@ -0,0 +1,10 @@ +#ifndef BUG_H +#define BUG_H + +#define BUG_ON(__BUG_ON_cond) assert(!(__BUG_ON_cond)) + +#define BUILD_BUG_ON(x) + +#define BUG() abort() + +#endif /* BUG_H */ diff --git a/tools/virtio/linux/err.h b/tools/virtio/linux/err.h new file mode 100644 index 00000000000..e32eff8b2a1 --- /dev/null +++ b/tools/virtio/linux/err.h @@ -0,0 +1,26 @@ +#ifndef ERR_H +#define ERR_H +#define MAX_ERRNO 4095 + +#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) + +static inline void * __must_check ERR_PTR(long error) +{ + return (void *) error; +} + +static inline long __must_check PTR_ERR(const void *ptr) +{ + return (long) ptr; +} + +static inline long __must_check IS_ERR(const void *ptr) +{ + return IS_ERR_VALUE((unsigned long)ptr); +} + +static inline long __must_check IS_ERR_OR_NULL(const void *ptr) +{ + return !ptr || IS_ERR_VALUE((unsigned long)ptr); +} +#endif /* ERR_H */ diff --git a/tools/virtio/linux/irqreturn.h b/tools/virtio/linux/irqreturn.h new file mode 100644 index 00000000000..a3c4e7be708 --- /dev/null +++ b/tools/virtio/linux/irqreturn.h @@ -0,0 +1 @@ +#include "../../../include/linux/irqreturn.h" diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h new file mode 100644 index 00000000000..1e8ce6979c1 --- /dev/null +++ b/tools/virtio/linux/kernel.h @@ -0,0 +1,105 @@ +#ifndef KERNEL_H +#define KERNEL_H +#include <stdbool.h> +#include <stdlib.h> +#include <stddef.h> +#include <stdio.h> +#include <string.h> +#include <assert.h> +#include <stdarg.h> + +#include <linux/types.h> +#include <linux/printk.h> +#include <linux/bug.h> +#include <errno.h> +#include <unistd.h> +#include <asm/barrier.h> + +#define CONFIG_SMP + +#define PAGE_SIZE getpagesize() +#define PAGE_MASK (~(PAGE_SIZE-1)) + +typedef unsigned long long dma_addr_t; +typedef size_t __kernel_size_t; + +struct page { + unsigned long long dummy; +}; + +/* Physical == Virtual */ +#define virt_to_phys(p) ((unsigned long)p) +#define phys_to_virt(a) ((void *)(unsigned long)(a)) +/* Page address: Virtual / 4K */ +#define page_to_phys(p) ((dma_addr_t)(unsigned long)(p)) +#define virt_to_page(p) ((struct page *)((unsigned long)p & PAGE_MASK)) + +#define offset_in_page(p) (((unsigned long)p) % PAGE_SIZE) + +#define __printf(a,b) __attribute__((format(printf,a,b))) + +#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) + +extern void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end; +static inline void *kmalloc(size_t s, gfp_t gfp) +{ + if (__kmalloc_fake) + return __kmalloc_fake; + return malloc(s); +} + +static inline void kfree(void *p) +{ + if (p >= __kfree_ignore_start && p < __kfree_ignore_end) + return; + free(p); +} + +static inline void *krealloc(void *p, size_t s, gfp_t gfp) +{ + return realloc(p, s); +} + + +static inline unsigned long __get_free_page(gfp_t gfp) +{ + void *p; + + posix_memalign(&p, PAGE_SIZE, PAGE_SIZE); + return (unsigned long)p; +} + +static inline void free_page(unsigned long addr) +{ + free((void *)addr); +} + +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) + +#define uninitialized_var(x) x = x + +# ifndef likely +# define likely(x) (__builtin_expect(!!(x), 1)) +# endif +# ifndef unlikely +# define unlikely(x) (__builtin_expect(!!(x), 0)) +# endif + +#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__) +#ifdef DEBUG +#define pr_debug(format, ...) fprintf (stderr, format, ## __VA_ARGS__) +#else +#define pr_debug(format, ...) do {} while (0) +#endif +#define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) +#define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) + +#define min(x, y) ({ \ + typeof(x) _min1 = (x); \ + typeof(y) _min2 = (y); \ + (void) (&_min1 == &_min2); \ + _min1 < _min2 ? _min1 : _min2; }) + +#endif /* KERNEL_H */ diff --git a/tools/virtio/linux/kmemleak.h b/tools/virtio/linux/kmemleak.h new file mode 100644 index 00000000000..c07072270e2 --- /dev/null +++ b/tools/virtio/linux/kmemleak.h @@ -0,0 +1,3 @@ +static inline void kmemleak_ignore(const void *ptr) +{ +} diff --git a/tools/virtio/linux/module.h b/tools/virtio/linux/module.h index e69de29bb2d..28ce95a0599 100644 --- a/tools/virtio/linux/module.h +++ b/tools/virtio/linux/module.h @@ -0,0 +1,6 @@ +#include <linux/export.h> + +#define MODULE_LICENSE(__MODULE_LICENSE_value) \ + static __attribute__((unused)) const char *__MODULE_LICENSE_name = \ + __MODULE_LICENSE_value + diff --git a/tools/virtio/linux/printk.h b/tools/virtio/linux/printk.h new file mode 100644 index 00000000000..9f2423bd89c --- /dev/null +++ b/tools/virtio/linux/printk.h @@ -0,0 +1,4 @@ +#include "../../../include/linux/kern_levels.h" + +#define printk printf +#define vprintk vprintf diff --git a/tools/virtio/linux/ratelimit.h b/tools/virtio/linux/ratelimit.h new file mode 100644 index 00000000000..dcce1725f90 --- /dev/null +++ b/tools/virtio/linux/ratelimit.h @@ -0,0 +1,4 @@ +#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) int name = 0 + +#define __ratelimit(x) (*(x)) + diff --git a/tools/virtio/linux/scatterlist.h b/tools/virtio/linux/scatterlist.h new file mode 100644 index 00000000000..68c9e2adc99 --- /dev/null +++ b/tools/virtio/linux/scatterlist.h @@ -0,0 +1,189 @@ +#ifndef SCATTERLIST_H +#define SCATTERLIST_H +#include <linux/kernel.h> + +struct scatterlist { + unsigned long page_link; + unsigned int offset; + unsigned int length; + dma_addr_t dma_address; +}; + +/* Scatterlist helpers, stolen from linux/scatterlist.h */ +#define sg_is_chain(sg) ((sg)->page_link & 0x01) +#define sg_is_last(sg) ((sg)->page_link & 0x02) +#define sg_chain_ptr(sg) \ + ((struct scatterlist *) ((sg)->page_link & ~0x03)) + +/** + * sg_assign_page - Assign a given page to an SG entry + * @sg: SG entry + * @page: The page + * + * Description: + * Assign page to sg entry. Also see sg_set_page(), the most commonly used + * variant. + * + **/ +static inline void sg_assign_page(struct scatterlist *sg, struct page *page) +{ + unsigned long page_link = sg->page_link & 0x3; + + /* + * In order for the low bit stealing approach to work, pages + * must be aligned at a 32-bit boundary as a minimum. + */ + BUG_ON((unsigned long) page & 0x03); +#ifdef CONFIG_DEBUG_SG + BUG_ON(sg->sg_magic != SG_MAGIC); + BUG_ON(sg_is_chain(sg)); +#endif + sg->page_link = page_link | (unsigned long) page; +} + +/** + * sg_set_page - Set sg entry to point at given page + * @sg: SG entry + * @page: The page + * @len: Length of data + * @offset: Offset into page + * + * Description: + * Use this function to set an sg entry pointing at a page, never assign + * the page directly. We encode sg table information in the lower bits + * of the page pointer. See sg_page() for looking up the page belonging + * to an sg entry. + * + **/ +static inline void sg_set_page(struct scatterlist *sg, struct page *page, + unsigned int len, unsigned int offset) +{ + sg_assign_page(sg, page); + sg->offset = offset; + sg->length = len; +} + +static inline struct page *sg_page(struct scatterlist *sg) +{ +#ifdef CONFIG_DEBUG_SG + BUG_ON(sg->sg_magic != SG_MAGIC); + BUG_ON(sg_is_chain(sg)); +#endif + return (struct page *)((sg)->page_link & ~0x3); +} + +/* + * Loop over each sg element, following the pointer to a new list if necessary + */ +#define for_each_sg(sglist, sg, nr, __i) \ + for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg)) + +/** + * sg_chain - Chain two sglists together + * @prv: First scatterlist + * @prv_nents: Number of entries in prv + * @sgl: Second scatterlist + * + * Description: + * Links @prv@ and @sgl@ together, to form a longer scatterlist. + * + **/ +static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, + struct scatterlist *sgl) +{ + /* + * offset and length are unused for chain entry. Clear them. + */ + prv[prv_nents - 1].offset = 0; + prv[prv_nents - 1].length = 0; + + /* + * Set lowest bit to indicate a link pointer, and make sure to clear + * the termination bit if it happens to be set. + */ + prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02; +} + +/** + * sg_mark_end - Mark the end of the scatterlist + * @sg: SG entryScatterlist + * + * Description: + * Marks the passed in sg entry as the termination point for the sg + * table. A call to sg_next() on this entry will return NULL. + * + **/ +static inline void sg_mark_end(struct scatterlist *sg) +{ +#ifdef CONFIG_DEBUG_SG + BUG_ON(sg->sg_magic != SG_MAGIC); +#endif + /* + * Set termination bit, clear potential chain bit + */ + sg->page_link |= 0x02; + sg->page_link &= ~0x01; +} + +/** + * sg_unmark_end - Undo setting the end of the scatterlist + * @sg: SG entryScatterlist + * + * Description: + * Removes the termination marker from the given entry of the scatterlist. + * + **/ +static inline void sg_unmark_end(struct scatterlist *sg) +{ +#ifdef CONFIG_DEBUG_SG + BUG_ON(sg->sg_magic != SG_MAGIC); +#endif + sg->page_link &= ~0x02; +} + +static inline struct scatterlist *sg_next(struct scatterlist *sg) +{ +#ifdef CONFIG_DEBUG_SG + BUG_ON(sg->sg_magic != SG_MAGIC); +#endif + if (sg_is_last(sg)) + return NULL; + + sg++; + if (unlikely(sg_is_chain(sg))) + sg = sg_chain_ptr(sg); + + return sg; +} + +static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents) +{ + memset(sgl, 0, sizeof(*sgl) * nents); +#ifdef CONFIG_DEBUG_SG + { + unsigned int i; + for (i = 0; i < nents; i++) + sgl[i].sg_magic = SG_MAGIC; + } +#endif + sg_mark_end(&sgl[nents - 1]); +} + +static inline dma_addr_t sg_phys(struct scatterlist *sg) +{ + return page_to_phys(sg_page(sg)) + sg->offset; +} + +static inline void sg_set_buf(struct scatterlist *sg, const void *buf, + unsigned int buflen) +{ + sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); +} + +static inline void sg_init_one(struct scatterlist *sg, + const void *buf, unsigned int buflen) +{ + sg_init_table(sg, 1); + sg_set_buf(sg, buf, buflen); +} +#endif /* SCATTERLIST_H */ diff --git a/tools/virtio/linux/uaccess.h b/tools/virtio/linux/uaccess.h new file mode 100644 index 00000000000..0a578fe1865 --- /dev/null +++ b/tools/virtio/linux/uaccess.h @@ -0,0 +1,50 @@ +#ifndef UACCESS_H +#define UACCESS_H +extern void *__user_addr_min, *__user_addr_max; + +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) + +static inline void __chk_user_ptr(const volatile void *p, size_t size) +{ + assert(p >= __user_addr_min && p + size <= __user_addr_max); +} + +#define put_user(x, ptr) \ +({ \ + typeof(ptr) __pu_ptr = (ptr); \ + __chk_user_ptr(__pu_ptr, sizeof(*__pu_ptr)); \ + ACCESS_ONCE(*(__pu_ptr)) = x; \ + 0; \ +}) + +#define get_user(x, ptr) \ +({ \ + typeof(ptr) __pu_ptr = (ptr); \ + __chk_user_ptr(__pu_ptr, sizeof(*__pu_ptr)); \ + x = ACCESS_ONCE(*(__pu_ptr)); \ + 0; \ +}) + +static void volatile_memcpy(volatile char *to, const volatile char *from, + unsigned long n) +{ + while (n--) + *(to++) = *(from++); +} + +static inline int copy_from_user(void *to, const void __user volatile *from, + unsigned long n) +{ + __chk_user_ptr(from, n); + volatile_memcpy(to, from, n); + return 0; +} + +static inline int copy_to_user(void __user volatile *to, const void *from, + unsigned long n) +{ + __chk_user_ptr(to, n); + volatile_memcpy(to, from, n); + return 0; +} +#endif /* UACCESS_H */ diff --git a/tools/virtio/linux/uio.h b/tools/virtio/linux/uio.h new file mode 100644 index 00000000000..cd20f0ba308 --- /dev/null +++ b/tools/virtio/linux/uio.h @@ -0,0 +1,3 @@ +#include <linux/kernel.h> + +#include "../../../include/linux/uio.h" diff --git a/tools/virtio/linux/virtio.h b/tools/virtio/linux/virtio.h index 81847dd08bd..5a2d1f0f6bc 100644 --- a/tools/virtio/linux/virtio.h +++ b/tools/virtio/linux/virtio.h @@ -1,127 +1,7 @@ #ifndef LINUX_VIRTIO_H #define LINUX_VIRTIO_H - -#include <stdbool.h> -#include <stdlib.h> -#include <stddef.h> -#include <stdio.h> -#include <string.h> -#include <assert.h> - -#include <linux/types.h> -#include <errno.h> - -typedef unsigned long long dma_addr_t; - -struct scatterlist { - unsigned long page_link; - unsigned int offset; - unsigned int length; - dma_addr_t dma_address; -}; - -struct page { - unsigned long long dummy; -}; - -#define BUG_ON(__BUG_ON_cond) assert(!(__BUG_ON_cond)) - -/* Physical == Virtual */ -#define virt_to_phys(p) ((unsigned long)p) -#define phys_to_virt(a) ((void *)(unsigned long)(a)) -/* Page address: Virtual / 4K */ -#define virt_to_page(p) ((struct page*)((virt_to_phys(p) / 4096) * \ - sizeof(struct page))) -#define offset_in_page(p) (((unsigned long)p) % 4096) -#define sg_phys(sg) ((sg->page_link & ~0x3) / sizeof(struct page) * 4096 + \ - sg->offset) -static inline void sg_mark_end(struct scatterlist *sg) -{ - /* - * Set termination bit, clear potential chain bit - */ - sg->page_link |= 0x02; - sg->page_link &= ~0x01; -} -static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents) -{ - memset(sgl, 0, sizeof(*sgl) * nents); - sg_mark_end(&sgl[nents - 1]); -} -static inline void sg_assign_page(struct scatterlist *sg, struct page *page) -{ - unsigned long page_link = sg->page_link & 0x3; - - /* - * In order for the low bit stealing approach to work, pages - * must be aligned at a 32-bit boundary as a minimum. - */ - BUG_ON((unsigned long) page & 0x03); - sg->page_link = page_link | (unsigned long) page; -} - -static inline void sg_set_page(struct scatterlist *sg, struct page *page, - unsigned int len, unsigned int offset) -{ - sg_assign_page(sg, page); - sg->offset = offset; - sg->length = len; -} - -static inline void sg_set_buf(struct scatterlist *sg, const void *buf, - unsigned int buflen) -{ - sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); -} - -static inline void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen) -{ - sg_init_table(sg, 1); - sg_set_buf(sg, buf, buflen); -} - -typedef __u16 u16; - -typedef enum { - GFP_KERNEL, - GFP_ATOMIC, -} gfp_t; -typedef enum { - IRQ_NONE, - IRQ_HANDLED -} irqreturn_t; - -static inline void *kmalloc(size_t s, gfp_t gfp) -{ - return malloc(s); -} - -static inline void kfree(void *p) -{ - free(p); -} - -#define container_of(ptr, type, member) ({ \ - const typeof( ((type *)0)->member ) *__mptr = (ptr); \ - (type *)( (char *)__mptr - offsetof(type,member) );}) - -#define uninitialized_var(x) x = x - -# ifndef likely -# define likely(x) (__builtin_expect(!!(x), 1)) -# endif -# ifndef unlikely -# define unlikely(x) (__builtin_expect(!!(x), 0)) -# endif - -#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__) -#ifdef DEBUG -#define pr_debug(format, ...) fprintf (stderr, format, ## __VA_ARGS__) -#else -#define pr_debug(format, ...) do {} while (0) -#endif -#define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) -#define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) +#include <linux/scatterlist.h> +#include <linux/kernel.h> /* TODO: empty stubs for now. Broken but enough for virtio_ring.c */ #define list_add_tail(a, b) do {} while (0) @@ -131,6 +11,7 @@ static inline void kfree(void *p) #define BITS_PER_BYTE 8 #define BITS_PER_LONG (sizeof(long) * BITS_PER_BYTE) #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) + /* TODO: Not atomic as it should be: * we don't use this for anything important. */ static inline void clear_bit(int nr, volatile unsigned long *addr) @@ -145,10 +26,6 @@ static inline int test_bit(int nr, const volatile unsigned long *addr) { return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } - -/* The only feature we care to support */ -#define virtio_has_feature(dev, feature) \ - test_bit((feature), (dev)->features) /* end of stubs */ struct virtio_device { @@ -163,40 +40,30 @@ struct virtqueue { void (*callback)(struct virtqueue *vq); const char *name; struct virtio_device *vdev; + unsigned int index; + unsigned int num_free; void *priv; }; -#define EXPORT_SYMBOL_GPL(__EXPORT_SYMBOL_GPL_name) \ - void __EXPORT_SYMBOL_GPL##__EXPORT_SYMBOL_GPL_name() { \ -} -#define MODULE_LICENSE(__MODULE_LICENSE_value) \ - const char *__MODULE_LICENSE_name = __MODULE_LICENSE_value - -#define CONFIG_SMP - -#if defined(__i386__) || defined(__x86_64__) -#define barrier() asm volatile("" ::: "memory") -#define mb() __sync_synchronize() - -#define smp_mb() mb() -# define smp_rmb() barrier() -# define smp_wmb() barrier() -/* Weak barriers should be used. If not - it's a bug */ -# define rmb() abort() -# define wmb() abort() -#else -#error Please fill in barrier macros -#endif - /* Interfaces exported by virtio_ring. */ -int virtqueue_add_buf(struct virtqueue *vq, - struct scatterlist sg[], - unsigned int out_num, - unsigned int in_num, +int virtqueue_add_sgs(struct virtqueue *vq, + struct scatterlist *sgs[], + unsigned int out_sgs, + unsigned int in_sgs, void *data, gfp_t gfp); -void virtqueue_kick(struct virtqueue *vq); +int virtqueue_add_outbuf(struct virtqueue *vq, + struct scatterlist sg[], unsigned int num, + void *data, + gfp_t gfp); + +int virtqueue_add_inbuf(struct virtqueue *vq, + struct scatterlist sg[], unsigned int num, + void *data, + gfp_t gfp); + +bool virtqueue_kick(struct virtqueue *vq); void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len); @@ -206,12 +73,13 @@ bool virtqueue_enable_cb(struct virtqueue *vq); bool virtqueue_enable_cb_delayed(struct virtqueue *vq); void *virtqueue_detach_unused_buf(struct virtqueue *vq); -struct virtqueue *vring_new_virtqueue(unsigned int num, +struct virtqueue *vring_new_virtqueue(unsigned int index, + unsigned int num, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, void *pages, - void (*notify)(struct virtqueue *vq), + bool (*notify)(struct virtqueue *vq), void (*callback)(struct virtqueue *vq), const char *name); void vring_del_virtqueue(struct virtqueue *vq); diff --git a/tools/virtio/linux/virtio_config.h b/tools/virtio/linux/virtio_config.h new file mode 100644 index 00000000000..5049967f99f --- /dev/null +++ b/tools/virtio/linux/virtio_config.h @@ -0,0 +1,6 @@ +#define VIRTIO_TRANSPORT_F_START 28 +#define VIRTIO_TRANSPORT_F_END 32 + +#define virtio_has_feature(dev, feature) \ + test_bit((feature), (dev)->features) + diff --git a/tools/virtio/linux/virtio_ring.h b/tools/virtio/linux/virtio_ring.h new file mode 100644 index 00000000000..8949c4e2772 --- /dev/null +++ b/tools/virtio/linux/virtio_ring.h @@ -0,0 +1 @@ +#include "../../../include/linux/virtio_ring.h" diff --git a/tools/virtio/linux/vringh.h b/tools/virtio/linux/vringh.h new file mode 100644 index 00000000000..9348957be56 --- /dev/null +++ b/tools/virtio/linux/vringh.h @@ -0,0 +1 @@ +#include "../../../include/linux/vringh.h" diff --git a/tools/virtio/uapi/linux/uio.h b/tools/virtio/uapi/linux/uio.h new file mode 100644 index 00000000000..7230e900220 --- /dev/null +++ b/tools/virtio/uapi/linux/uio.h @@ -0,0 +1 @@ +#include <sys/uio.h> diff --git a/tools/virtio/uapi/linux/virtio_config.h b/tools/virtio/uapi/linux/virtio_config.h new file mode 100644 index 00000000000..4c86675f015 --- /dev/null +++ b/tools/virtio/uapi/linux/virtio_config.h @@ -0,0 +1 @@ +#include "../../../../include/uapi/linux/virtio_config.h" diff --git a/tools/virtio/uapi/linux/virtio_ring.h b/tools/virtio/uapi/linux/virtio_ring.h new file mode 100644 index 00000000000..4d99c78234d --- /dev/null +++ b/tools/virtio/uapi/linux/virtio_ring.h @@ -0,0 +1,4 @@ +#ifndef VIRTIO_RING_H +#define VIRTIO_RING_H +#include "../../../../include/uapi/linux/virtio_ring.h" +#endif /* VIRTIO_RING_H */ diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c index e626fa553c5..00ea679b382 100644 --- a/tools/virtio/virtio_test.c +++ b/tools/virtio/virtio_test.c @@ -10,11 +10,15 @@ #include <sys/stat.h> #include <sys/types.h> #include <fcntl.h> +#include <stdbool.h> #include <linux/vhost.h> #include <linux/virtio.h> #include <linux/virtio_ring.h> #include "../../drivers/vhost/test.h" +/* Unused */ +void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end; + struct vq_info { int kick; int call; @@ -37,13 +41,14 @@ struct vdev_info { struct vhost_memory *mem; }; -void vq_notify(struct virtqueue *vq) +bool vq_notify(struct virtqueue *vq) { struct vq_info *info = vq->priv; unsigned long long v = 1; int r; r = write(info->kick, &v, sizeof v); assert(r == sizeof v); + return true; } void vq_callback(struct virtqueue *vq) @@ -92,7 +97,8 @@ static void vq_info_add(struct vdev_info *dev, int num) assert(r >= 0); memset(info->ring, 0, vring_size(num, 4096)); vring_init(&info->vring, num, info->ring, 4096); - info->vq = vring_new_virtqueue(info->vring.num, 4096, &dev->vdev, + info->vq = vring_new_virtqueue(info->idx, + info->vring.num, 4096, &dev->vdev, true, info->ring, vq_notify, vq_callback, "test"); assert(info->vq); @@ -161,12 +167,13 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq, do { if (started < bufs) { sg_init_one(&sl, dev->buf, dev->buf_size); - r = virtqueue_add_buf(vq->vq, &sl, 1, 0, - dev->buf + started, - GFP_ATOMIC); - if (likely(r >= 0)) { + r = virtqueue_add_outbuf(vq->vq, &sl, 1, + dev->buf + started, + GFP_ATOMIC); + if (likely(r == 0)) { ++started; - virtqueue_kick(vq->vq); + if (unlikely(!virtqueue_kick(vq->vq))) + r = -1; } } else r = -1; @@ -177,7 +184,7 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq, r = 0; } - } while (r >= 0); + } while (r == 0); if (completed == completed_before) ++spurious; assert(completed <= bufs); @@ -232,7 +239,7 @@ const struct option longopts[] = { } }; -static void help() +static void help(void) { fprintf(stderr, "Usage: virtio_test [--help]" " [--no-indirect]" diff --git a/tools/virtio/vringh_test.c b/tools/virtio/vringh_test.c new file mode 100644 index 00000000000..14a4f4cab5b --- /dev/null +++ b/tools/virtio/vringh_test.c @@ -0,0 +1,746 @@ +/* Simple test of virtio code, entirely in userpsace. */ +#define _GNU_SOURCE +#include <sched.h> +#include <err.h> +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/virtio.h> +#include <linux/vringh.h> +#include <linux/virtio_ring.h> +#include <linux/uaccess.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <sys/mman.h> +#include <sys/wait.h> +#include <fcntl.h> + +#define USER_MEM (1024*1024) +void *__user_addr_min, *__user_addr_max; +void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end; +static u64 user_addr_offset; + +#define RINGSIZE 256 +#define ALIGN 4096 + +static bool never_notify_host(struct virtqueue *vq) +{ + abort(); +} + +static void never_callback_guest(struct virtqueue *vq) +{ + abort(); +} + +static bool getrange_iov(struct vringh *vrh, u64 addr, struct vringh_range *r) +{ + if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset) + return false; + if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset) + return false; + + r->start = (u64)(unsigned long)__user_addr_min - user_addr_offset; + r->end_incl = (u64)(unsigned long)__user_addr_max - 1 - user_addr_offset; + r->offset = user_addr_offset; + return true; +} + +/* We return single byte ranges. */ +static bool getrange_slow(struct vringh *vrh, u64 addr, struct vringh_range *r) +{ + if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset) + return false; + if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset) + return false; + + r->start = addr; + r->end_incl = r->start; + r->offset = user_addr_offset; + return true; +} + +struct guest_virtio_device { + struct virtio_device vdev; + int to_host_fd; + unsigned long notifies; +}; + +static bool parallel_notify_host(struct virtqueue *vq) +{ + int rc; + struct guest_virtio_device *gvdev; + + gvdev = container_of(vq->vdev, struct guest_virtio_device, vdev); + rc = write(gvdev->to_host_fd, "", 1); + if (rc < 0) + return false; + gvdev->notifies++; + return true; +} + +static bool no_notify_host(struct virtqueue *vq) +{ + return true; +} + +#define NUM_XFERS (10000000) + +/* We aim for two "distant" cpus. */ +static void find_cpus(unsigned int *first, unsigned int *last) +{ + unsigned int i; + + *first = -1U; + *last = 0; + for (i = 0; i < 4096; i++) { + cpu_set_t set; + CPU_ZERO(&set); + CPU_SET(i, &set); + if (sched_setaffinity(getpid(), sizeof(set), &set) == 0) { + if (i < *first) + *first = i; + if (i > *last) + *last = i; + } + } +} + +/* Opencoded version for fast mode */ +static inline int vringh_get_head(struct vringh *vrh, u16 *head) +{ + u16 avail_idx, i; + int err; + + err = get_user(avail_idx, &vrh->vring.avail->idx); + if (err) + return err; + + if (vrh->last_avail_idx == avail_idx) + return 0; + + /* Only get avail ring entries after they have been exposed by guest. */ + virtio_rmb(vrh->weak_barriers); + + i = vrh->last_avail_idx & (vrh->vring.num - 1); + + err = get_user(*head, &vrh->vring.avail->ring[i]); + if (err) + return err; + + vrh->last_avail_idx++; + return 1; +} + +static int parallel_test(unsigned long features, + bool (*getrange)(struct vringh *vrh, + u64 addr, struct vringh_range *r), + bool fast_vringh) +{ + void *host_map, *guest_map; + int fd, mapsize, to_guest[2], to_host[2]; + unsigned long xfers = 0, notifies = 0, receives = 0; + unsigned int first_cpu, last_cpu; + cpu_set_t cpu_set; + char buf[128]; + + /* Create real file to mmap. */ + fd = open("/tmp/vringh_test-file", O_RDWR|O_CREAT|O_TRUNC, 0600); + if (fd < 0) + err(1, "Opening /tmp/vringh_test-file"); + + /* Extra room at the end for some data, and indirects */ + mapsize = vring_size(RINGSIZE, ALIGN) + + RINGSIZE * 2 * sizeof(int) + + RINGSIZE * 6 * sizeof(struct vring_desc); + mapsize = (mapsize + getpagesize() - 1) & ~(getpagesize() - 1); + ftruncate(fd, mapsize); + + /* Parent and child use separate addresses, to check our mapping logic! */ + host_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); + guest_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); + + pipe(to_guest); + pipe(to_host); + + CPU_ZERO(&cpu_set); + find_cpus(&first_cpu, &last_cpu); + printf("Using CPUS %u and %u\n", first_cpu, last_cpu); + fflush(stdout); + + if (fork() != 0) { + struct vringh vrh; + int status, err, rlen = 0; + char rbuf[5]; + + /* We are the host: never access guest addresses! */ + munmap(guest_map, mapsize); + + __user_addr_min = host_map; + __user_addr_max = __user_addr_min + mapsize; + user_addr_offset = host_map - guest_map; + assert(user_addr_offset); + + close(to_guest[0]); + close(to_host[1]); + + vring_init(&vrh.vring, RINGSIZE, host_map, ALIGN); + vringh_init_user(&vrh, features, RINGSIZE, true, + vrh.vring.desc, vrh.vring.avail, vrh.vring.used); + CPU_SET(first_cpu, &cpu_set); + if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set)) + errx(1, "Could not set affinity to cpu %u", first_cpu); + + while (xfers < NUM_XFERS) { + struct iovec host_riov[2], host_wiov[2]; + struct vringh_iov riov, wiov; + u16 head, written; + + if (fast_vringh) { + for (;;) { + err = vringh_get_head(&vrh, &head); + if (err != 0) + break; + err = vringh_need_notify_user(&vrh); + if (err < 0) + errx(1, "vringh_need_notify_user: %i", + err); + if (err) { + write(to_guest[1], "", 1); + notifies++; + } + } + if (err != 1) + errx(1, "vringh_get_head"); + written = 0; + goto complete; + } else { + vringh_iov_init(&riov, + host_riov, + ARRAY_SIZE(host_riov)); + vringh_iov_init(&wiov, + host_wiov, + ARRAY_SIZE(host_wiov)); + + err = vringh_getdesc_user(&vrh, &riov, &wiov, + getrange, &head); + } + if (err == 0) { + err = vringh_need_notify_user(&vrh); + if (err < 0) + errx(1, "vringh_need_notify_user: %i", + err); + if (err) { + write(to_guest[1], "", 1); + notifies++; + } + + if (!vringh_notify_enable_user(&vrh)) + continue; + + /* Swallow all notifies at once. */ + if (read(to_host[0], buf, sizeof(buf)) < 1) + break; + + vringh_notify_disable_user(&vrh); + receives++; + continue; + } + if (err != 1) + errx(1, "vringh_getdesc_user: %i", err); + + /* We simply copy bytes. */ + if (riov.used) { + rlen = vringh_iov_pull_user(&riov, rbuf, + sizeof(rbuf)); + if (rlen != 4) + errx(1, "vringh_iov_pull_user: %i", + rlen); + assert(riov.i == riov.used); + written = 0; + } else { + err = vringh_iov_push_user(&wiov, rbuf, rlen); + if (err != rlen) + errx(1, "vringh_iov_push_user: %i", + err); + assert(wiov.i == wiov.used); + written = err; + } + complete: + xfers++; + + err = vringh_complete_user(&vrh, head, written); + if (err != 0) + errx(1, "vringh_complete_user: %i", err); + } + + err = vringh_need_notify_user(&vrh); + if (err < 0) + errx(1, "vringh_need_notify_user: %i", err); + if (err) { + write(to_guest[1], "", 1); + notifies++; + } + wait(&status); + if (!WIFEXITED(status)) + errx(1, "Child died with signal %i?", WTERMSIG(status)); + if (WEXITSTATUS(status) != 0) + errx(1, "Child exited %i?", WEXITSTATUS(status)); + printf("Host: notified %lu, pinged %lu\n", notifies, receives); + return 0; + } else { + struct guest_virtio_device gvdev; + struct virtqueue *vq; + unsigned int *data; + struct vring_desc *indirects; + unsigned int finished = 0; + + /* We pass sg[]s pointing into here, but we need RINGSIZE+1 */ + data = guest_map + vring_size(RINGSIZE, ALIGN); + indirects = (void *)data + (RINGSIZE + 1) * 2 * sizeof(int); + + /* We are the guest. */ + munmap(host_map, mapsize); + + close(to_guest[1]); + close(to_host[0]); + + gvdev.vdev.features[0] = features; + gvdev.to_host_fd = to_host[1]; + gvdev.notifies = 0; + + CPU_SET(first_cpu, &cpu_set); + if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set)) + err(1, "Could not set affinity to cpu %u", first_cpu); + + vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &gvdev.vdev, true, + guest_map, fast_vringh ? no_notify_host + : parallel_notify_host, + never_callback_guest, "guest vq"); + + /* Don't kfree indirects. */ + __kfree_ignore_start = indirects; + __kfree_ignore_end = indirects + RINGSIZE * 6; + + while (xfers < NUM_XFERS) { + struct scatterlist sg[4]; + unsigned int num_sg, len; + int *dbuf, err; + bool output = !(xfers % 2); + + /* Consume bufs. */ + while ((dbuf = virtqueue_get_buf(vq, &len)) != NULL) { + if (len == 4) + assert(*dbuf == finished - 1); + else if (!fast_vringh) + assert(*dbuf == finished); + finished++; + } + + /* Produce a buffer. */ + dbuf = data + (xfers % (RINGSIZE + 1)); + + if (output) + *dbuf = xfers; + else + *dbuf = -1; + + switch ((xfers / sizeof(*dbuf)) % 4) { + case 0: + /* Nasty three-element sg list. */ + sg_init_table(sg, num_sg = 3); + sg_set_buf(&sg[0], (void *)dbuf, 1); + sg_set_buf(&sg[1], (void *)dbuf + 1, 2); + sg_set_buf(&sg[2], (void *)dbuf + 3, 1); + break; + case 1: + sg_init_table(sg, num_sg = 2); + sg_set_buf(&sg[0], (void *)dbuf, 1); + sg_set_buf(&sg[1], (void *)dbuf + 1, 3); + break; + case 2: + sg_init_table(sg, num_sg = 1); + sg_set_buf(&sg[0], (void *)dbuf, 4); + break; + case 3: + sg_init_table(sg, num_sg = 4); + sg_set_buf(&sg[0], (void *)dbuf, 1); + sg_set_buf(&sg[1], (void *)dbuf + 1, 1); + sg_set_buf(&sg[2], (void *)dbuf + 2, 1); + sg_set_buf(&sg[3], (void *)dbuf + 3, 1); + break; + } + + /* May allocate an indirect, so force it to allocate + * user addr */ + __kmalloc_fake = indirects + (xfers % RINGSIZE) * 4; + if (output) + err = virtqueue_add_outbuf(vq, sg, num_sg, dbuf, + GFP_KERNEL); + else + err = virtqueue_add_inbuf(vq, sg, num_sg, + dbuf, GFP_KERNEL); + + if (err == -ENOSPC) { + if (!virtqueue_enable_cb_delayed(vq)) + continue; + /* Swallow all notifies at once. */ + if (read(to_guest[0], buf, sizeof(buf)) < 1) + break; + + receives++; + virtqueue_disable_cb(vq); + continue; + } + + if (err) + errx(1, "virtqueue_add_in/outbuf: %i", err); + + xfers++; + virtqueue_kick(vq); + } + + /* Any extra? */ + while (finished != xfers) { + int *dbuf; + unsigned int len; + + /* Consume bufs. */ + dbuf = virtqueue_get_buf(vq, &len); + if (dbuf) { + if (len == 4) + assert(*dbuf == finished - 1); + else + assert(len == 0); + finished++; + continue; + } + + if (!virtqueue_enable_cb_delayed(vq)) + continue; + if (read(to_guest[0], buf, sizeof(buf)) < 1) + break; + + receives++; + virtqueue_disable_cb(vq); + } + + printf("Guest: notified %lu, pinged %lu\n", + gvdev.notifies, receives); + vring_del_virtqueue(vq); + return 0; + } +} + +int main(int argc, char *argv[]) +{ + struct virtio_device vdev; + struct virtqueue *vq; + struct vringh vrh; + struct scatterlist guest_sg[RINGSIZE], *sgs[2]; + struct iovec host_riov[2], host_wiov[2]; + struct vringh_iov riov, wiov; + struct vring_used_elem used[RINGSIZE]; + char buf[28]; + u16 head; + int err; + unsigned i; + void *ret; + bool (*getrange)(struct vringh *vrh, u64 addr, struct vringh_range *r); + bool fast_vringh = false, parallel = false; + + getrange = getrange_iov; + vdev.features[0] = 0; + + while (argv[1]) { + if (strcmp(argv[1], "--indirect") == 0) + vdev.features[0] |= (1 << VIRTIO_RING_F_INDIRECT_DESC); + else if (strcmp(argv[1], "--eventidx") == 0) + vdev.features[0] |= (1 << VIRTIO_RING_F_EVENT_IDX); + else if (strcmp(argv[1], "--slow-range") == 0) + getrange = getrange_slow; + else if (strcmp(argv[1], "--fast-vringh") == 0) + fast_vringh = true; + else if (strcmp(argv[1], "--parallel") == 0) + parallel = true; + else + errx(1, "Unknown arg %s", argv[1]); + argv++; + } + + if (parallel) + return parallel_test(vdev.features[0], getrange, fast_vringh); + + if (posix_memalign(&__user_addr_min, PAGE_SIZE, USER_MEM) != 0) + abort(); + __user_addr_max = __user_addr_min + USER_MEM; + memset(__user_addr_min, 0, vring_size(RINGSIZE, ALIGN)); + + /* Set up guest side. */ + vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true, + __user_addr_min, + never_notify_host, never_callback_guest, + "guest vq"); + + /* Set up host side. */ + vring_init(&vrh.vring, RINGSIZE, __user_addr_min, ALIGN); + vringh_init_user(&vrh, vdev.features[0], RINGSIZE, true, + vrh.vring.desc, vrh.vring.avail, vrh.vring.used); + + /* No descriptor to get yet... */ + err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head); + if (err != 0) + errx(1, "vringh_getdesc_user: %i", err); + + /* Guest puts in a descriptor. */ + memcpy(__user_addr_max - 1, "a", 1); + sg_init_table(guest_sg, 1); + sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1); + sg_init_table(guest_sg+1, 1); + sg_set_buf(&guest_sg[1], __user_addr_max - 3, 2); + sgs[0] = &guest_sg[0]; + sgs[1] = &guest_sg[1]; + + /* May allocate an indirect, so force it to allocate user addr */ + __kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN); + err = virtqueue_add_sgs(vq, sgs, 1, 1, &err, GFP_KERNEL); + if (err) + errx(1, "virtqueue_add_sgs: %i", err); + __kmalloc_fake = NULL; + + /* Host retreives it. */ + vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov)); + vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov)); + + err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head); + if (err != 1) + errx(1, "vringh_getdesc_user: %i", err); + + assert(riov.used == 1); + assert(riov.iov[0].iov_base == __user_addr_max - 1); + assert(riov.iov[0].iov_len == 1); + if (getrange != getrange_slow) { + assert(wiov.used == 1); + assert(wiov.iov[0].iov_base == __user_addr_max - 3); + assert(wiov.iov[0].iov_len == 2); + } else { + assert(wiov.used == 2); + assert(wiov.iov[0].iov_base == __user_addr_max - 3); + assert(wiov.iov[0].iov_len == 1); + assert(wiov.iov[1].iov_base == __user_addr_max - 2); + assert(wiov.iov[1].iov_len == 1); + } + + err = vringh_iov_pull_user(&riov, buf, 5); + if (err != 1) + errx(1, "vringh_iov_pull_user: %i", err); + assert(buf[0] == 'a'); + assert(riov.i == 1); + assert(vringh_iov_pull_user(&riov, buf, 5) == 0); + + memcpy(buf, "bcdef", 5); + err = vringh_iov_push_user(&wiov, buf, 5); + if (err != 2) + errx(1, "vringh_iov_push_user: %i", err); + assert(memcmp(__user_addr_max - 3, "bc", 2) == 0); + assert(wiov.i == wiov.used); + assert(vringh_iov_push_user(&wiov, buf, 5) == 0); + + /* Host is done. */ + err = vringh_complete_user(&vrh, head, err); + if (err != 0) + errx(1, "vringh_complete_user: %i", err); + + /* Guest should see used token now. */ + __kfree_ignore_start = __user_addr_min + vring_size(RINGSIZE, ALIGN); + __kfree_ignore_end = __kfree_ignore_start + 1; + ret = virtqueue_get_buf(vq, &i); + if (ret != &err) + errx(1, "virtqueue_get_buf: %p", ret); + assert(i == 2); + + /* Guest puts in a huge descriptor. */ + sg_init_table(guest_sg, RINGSIZE); + for (i = 0; i < RINGSIZE; i++) { + sg_set_buf(&guest_sg[i], + __user_addr_max - USER_MEM/4, USER_MEM/4); + } + + /* Fill contents with recognisable garbage. */ + for (i = 0; i < USER_MEM/4; i++) + ((char *)__user_addr_max - USER_MEM/4)[i] = i; + + /* This will allocate an indirect, so force it to allocate user addr */ + __kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN); + err = virtqueue_add_outbuf(vq, guest_sg, RINGSIZE, &err, GFP_KERNEL); + if (err) + errx(1, "virtqueue_add_outbuf (large): %i", err); + __kmalloc_fake = NULL; + + /* Host picks it up (allocates new iov). */ + vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov)); + vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov)); + + err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head); + if (err != 1) + errx(1, "vringh_getdesc_user: %i", err); + + assert(riov.max_num & VRINGH_IOV_ALLOCATED); + assert(riov.iov != host_riov); + if (getrange != getrange_slow) + assert(riov.used == RINGSIZE); + else + assert(riov.used == RINGSIZE * USER_MEM/4); + + assert(!(wiov.max_num & VRINGH_IOV_ALLOCATED)); + assert(wiov.used == 0); + + /* Pull data back out (in odd chunks), should be as expected. */ + for (i = 0; i < RINGSIZE * USER_MEM/4; i += 3) { + err = vringh_iov_pull_user(&riov, buf, 3); + if (err != 3 && i + err != RINGSIZE * USER_MEM/4) + errx(1, "vringh_iov_pull_user large: %i", err); + assert(buf[0] == (char)i); + assert(err < 2 || buf[1] == (char)(i + 1)); + assert(err < 3 || buf[2] == (char)(i + 2)); + } + assert(riov.i == riov.used); + vringh_iov_cleanup(&riov); + vringh_iov_cleanup(&wiov); + + /* Complete using multi interface, just because we can. */ + used[0].id = head; + used[0].len = 0; + err = vringh_complete_multi_user(&vrh, used, 1); + if (err) + errx(1, "vringh_complete_multi_user(1): %i", err); + + /* Free up those descriptors. */ + ret = virtqueue_get_buf(vq, &i); + if (ret != &err) + errx(1, "virtqueue_get_buf: %p", ret); + + /* Add lots of descriptors. */ + sg_init_table(guest_sg, 1); + sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1); + for (i = 0; i < RINGSIZE; i++) { + err = virtqueue_add_outbuf(vq, guest_sg, 1, &err, GFP_KERNEL); + if (err) + errx(1, "virtqueue_add_outbuf (multiple): %i", err); + } + + /* Now get many, and consume them all at once. */ + vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov)); + vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov)); + + for (i = 0; i < RINGSIZE; i++) { + err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head); + if (err != 1) + errx(1, "vringh_getdesc_user: %i", err); + used[i].id = head; + used[i].len = 0; + } + /* Make sure it wraps around ring, to test! */ + assert(vrh.vring.used->idx % RINGSIZE != 0); + err = vringh_complete_multi_user(&vrh, used, RINGSIZE); + if (err) + errx(1, "vringh_complete_multi_user: %i", err); + + /* Free those buffers. */ + for (i = 0; i < RINGSIZE; i++) { + unsigned len; + assert(virtqueue_get_buf(vq, &len) != NULL); + } + + /* Test weird (but legal!) indirect. */ + if (vdev.features[0] & (1 << VIRTIO_RING_F_INDIRECT_DESC)) { + char *data = __user_addr_max - USER_MEM/4; + struct vring_desc *d = __user_addr_max - USER_MEM/2; + struct vring vring; + + /* Force creation of direct, which we modify. */ + vdev.features[0] &= ~(1 << VIRTIO_RING_F_INDIRECT_DESC); + vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true, + __user_addr_min, + never_notify_host, + never_callback_guest, + "guest vq"); + + sg_init_table(guest_sg, 4); + sg_set_buf(&guest_sg[0], d, sizeof(*d)*2); + sg_set_buf(&guest_sg[1], d + 2, sizeof(*d)*1); + sg_set_buf(&guest_sg[2], data + 6, 4); + sg_set_buf(&guest_sg[3], d + 3, sizeof(*d)*3); + + err = virtqueue_add_outbuf(vq, guest_sg, 4, &err, GFP_KERNEL); + if (err) + errx(1, "virtqueue_add_outbuf (indirect): %i", err); + + vring_init(&vring, RINGSIZE, __user_addr_min, ALIGN); + + /* They're used in order, but double-check... */ + assert(vring.desc[0].addr == (unsigned long)d); + assert(vring.desc[1].addr == (unsigned long)(d+2)); + assert(vring.desc[2].addr == (unsigned long)data + 6); + assert(vring.desc[3].addr == (unsigned long)(d+3)); + vring.desc[0].flags |= VRING_DESC_F_INDIRECT; + vring.desc[1].flags |= VRING_DESC_F_INDIRECT; + vring.desc[3].flags |= VRING_DESC_F_INDIRECT; + + /* First indirect */ + d[0].addr = (unsigned long)data; + d[0].len = 1; + d[0].flags = VRING_DESC_F_NEXT; + d[0].next = 1; + d[1].addr = (unsigned long)data + 1; + d[1].len = 2; + d[1].flags = 0; + + /* Second indirect */ + d[2].addr = (unsigned long)data + 3; + d[2].len = 3; + d[2].flags = 0; + + /* Third indirect */ + d[3].addr = (unsigned long)data + 10; + d[3].len = 5; + d[3].flags = VRING_DESC_F_NEXT; + d[3].next = 1; + d[4].addr = (unsigned long)data + 15; + d[4].len = 6; + d[4].flags = VRING_DESC_F_NEXT; + d[4].next = 2; + d[5].addr = (unsigned long)data + 21; + d[5].len = 7; + d[5].flags = 0; + + /* Host picks it up (allocates new iov). */ + vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov)); + vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov)); + + err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head); + if (err != 1) + errx(1, "vringh_getdesc_user: %i", err); + + if (head != 0) + errx(1, "vringh_getdesc_user: head %i not 0", head); + + assert(riov.max_num & VRINGH_IOV_ALLOCATED); + if (getrange != getrange_slow) + assert(riov.used == 7); + else + assert(riov.used == 28); + err = vringh_iov_pull_user(&riov, buf, 29); + assert(err == 28); + + /* Data should be linear. */ + for (i = 0; i < err; i++) + assert(buf[i] == i); + vringh_iov_cleanup(&riov); + } + + /* Don't leak memory... */ + vring_del_virtqueue(vq); + free(__user_addr_min); + + return 0; +} diff --git a/tools/vm/.gitignore b/tools/vm/.gitignore new file mode 100644 index 00000000000..44f095fa260 --- /dev/null +++ b/tools/vm/.gitignore @@ -0,0 +1,2 @@ +slabinfo +page-types diff --git a/tools/vm/Makefile b/tools/vm/Makefile index 8e30e5c40f8..3d907dacf2a 100644 --- a/tools/vm/Makefile +++ b/tools/vm/Makefile @@ -1,11 +1,22 @@ # Makefile for vm tools +# +TARGETS=page-types slabinfo + +LIB_DIR = ../lib/api +LIBS = $(LIB_DIR)/libapikfs.a CC = $(CROSS_COMPILE)gcc -CFLAGS = -Wall -Wextra +CFLAGS = -Wall -Wextra -I../lib/ +LDFLAGS = $(LIBS) + +$(TARGETS): $(LIBS) + +$(LIBS): + make -C $(LIB_DIR) -all: page-types slabinfo %: %.c - $(CC) $(CFLAGS) -o $@ $^ + $(CC) $(CFLAGS) -o $@ $< $(LDFLAGS) clean: $(RM) page-types slabinfo + make -C $(LIB_DIR) clean diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c index b76edf2f833..c4d6d2e20e0 100644 --- a/tools/vm/page-types.c +++ b/tools/vm/page-types.c @@ -19,7 +19,8 @@ * Authors: Wu Fengguang <fengguang.wu@intel.com> */ -#define _LARGEFILE64_SOURCE +#define _FILE_OFFSET_BITS 64 +#define _GNU_SOURCE #include <stdio.h> #include <stdlib.h> #include <unistd.h> @@ -29,14 +30,19 @@ #include <getopt.h> #include <limits.h> #include <assert.h> +#include <ftw.h> +#include <time.h> +#include <setjmp.h> +#include <signal.h> #include <sys/types.h> #include <sys/errno.h> #include <sys/fcntl.h> #include <sys/mount.h> #include <sys/statfs.h> +#include <sys/mman.h> #include "../../include/uapi/linux/magic.h" #include "../../include/uapi/linux/kernel-page-flags.h" - +#include <api/fs/debugfs.h> #ifndef MAX_PATH # define MAX_PATH 256 @@ -59,12 +65,14 @@ #define PM_PSHIFT_BITS 6 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS) #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET) -#define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK) +#define __PM_PSHIFT(x) (((uint64_t) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK) #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1) #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK) +#define __PM_SOFT_DIRTY (1LL) #define PM_PRESENT PM_STATUS(4LL) #define PM_SWAP PM_STATUS(2LL) +#define PM_SOFT_DIRTY __PM_PSHIFT(__PM_SOFT_DIRTY) /* @@ -83,6 +91,7 @@ #define KPF_OWNER_PRIVATE 37 #define KPF_ARCH 38 #define KPF_UNCACHED 39 +#define KPF_SOFTDIRTY 40 /* [48-] take some arbitrary free slots for expanding overloaded flags * not part of kernel API @@ -132,6 +141,7 @@ static const char * const page_flag_names[] = { [KPF_OWNER_PRIVATE] = "O:owner_private", [KPF_ARCH] = "h:arch", [KPF_UNCACHED] = "c:uncached", + [KPF_SOFTDIRTY] = "f:softdirty", [KPF_READAHEAD] = "I:readahead", [KPF_SLOB_FREE] = "P:slob_free", @@ -154,6 +164,7 @@ static int opt_raw; /* for kernel developers */ static int opt_list; /* list pages (in ranges) */ static int opt_no_summary; /* don't show summary */ static pid_t opt_pid; /* process to walk */ +const char * opt_file; #define MAX_ADDR_RANGES 1024 static int nr_addr_ranges; @@ -178,7 +189,7 @@ static int kpageflags_fd; static int opt_hwpoison; static int opt_unpoison; -static char hwpoison_debug_fs[MAX_PATH+1]; +static char *hwpoison_debug_fs; static int hwpoison_inject_fd; static int hwpoison_forget_fd; @@ -249,12 +260,7 @@ static unsigned long do_u64_read(int fd, char *name, if (index > ULONG_MAX / 8) fatal("index overflow: %lu\n", index); - if (lseek(fd, index * 8, SEEK_SET) < 0) { - perror(name); - exit(EXIT_FAILURE); - } - - bytes = read(fd, buf, count * 8); + bytes = pread(fd, buf, count * 8, (off_t)index * 8); if (bytes < 0) { perror(name); exit(EXIT_FAILURE); @@ -339,8 +345,8 @@ static char *page_flag_longname(uint64_t flags) * page list and summary */ -static void show_page_range(unsigned long voffset, - unsigned long offset, uint64_t flags) +static void show_page_range(unsigned long voffset, unsigned long offset, + unsigned long size, uint64_t flags) { static uint64_t flags0; static unsigned long voff; @@ -348,14 +354,16 @@ static void show_page_range(unsigned long voffset, static unsigned long count; if (flags == flags0 && offset == index + count && - (!opt_pid || voffset == voff + count)) { - count++; + size && voffset == voff + count) { + count += size; return; } if (count) { if (opt_pid) printf("%lx\t", voff); + if (opt_file) + printf("%lu\t", voff); printf("%lx\t%lx\t%s\n", index, count, page_flag_name(flags0)); } @@ -363,7 +371,12 @@ static void show_page_range(unsigned long voffset, flags0 = flags; index = offset; voff = voffset; - count = 1; + count = size; +} + +static void flush_page_range(void) +{ + show_page_range(0, 0, 0, 0); } static void show_page(unsigned long voffset, @@ -371,6 +384,8 @@ static void show_page(unsigned long voffset, { if (opt_pid) printf("%lx\t", voffset); + if (opt_file) + printf("%lu\t", voffset); printf("%lx\t%s\n", offset, page_flag_name(flags)); } @@ -417,7 +432,7 @@ static int bit_mask_ok(uint64_t flags) return 1; } -static uint64_t expand_overloaded_flags(uint64_t flags) +static uint64_t expand_overloaded_flags(uint64_t flags, uint64_t pme) { /* SLOB/SLUB overload several page flags */ if (flags & BIT(SLAB)) { @@ -433,6 +448,9 @@ static uint64_t expand_overloaded_flags(uint64_t flags) if ((flags & (BIT(RECLAIM) | BIT(WRITEBACK))) == BIT(RECLAIM)) flags ^= BIT(RECLAIM) | BIT(READAHEAD); + if (pme & PM_SOFT_DIRTY) + flags |= BIT(SOFTDIRTY); + return flags; } @@ -448,91 +466,16 @@ static uint64_t well_known_flags(uint64_t flags) return flags; } -static uint64_t kpageflags_flags(uint64_t flags) +static uint64_t kpageflags_flags(uint64_t flags, uint64_t pme) { - flags = expand_overloaded_flags(flags); - - if (!opt_raw) + if (opt_raw) + flags = expand_overloaded_flags(flags, pme); + else flags = well_known_flags(flags); return flags; } -/* verify that a mountpoint is actually a debugfs instance */ -static int debugfs_valid_mountpoint(const char *debugfs) -{ - struct statfs st_fs; - - if (statfs(debugfs, &st_fs) < 0) - return -ENOENT; - else if (st_fs.f_type != (long) DEBUGFS_MAGIC) - return -ENOENT; - - return 0; -} - -/* find the path to the mounted debugfs */ -static const char *debugfs_find_mountpoint(void) -{ - const char *const *ptr; - char type[100]; - FILE *fp; - - ptr = debugfs_known_mountpoints; - while (*ptr) { - if (debugfs_valid_mountpoint(*ptr) == 0) { - strcpy(hwpoison_debug_fs, *ptr); - return hwpoison_debug_fs; - } - ptr++; - } - - /* give up and parse /proc/mounts */ - fp = fopen("/proc/mounts", "r"); - if (fp == NULL) - perror("Can't open /proc/mounts for read"); - - while (fscanf(fp, "%*s %" - STR(MAX_PATH) - "s %99s %*s %*d %*d\n", - hwpoison_debug_fs, type) == 2) { - if (strcmp(type, "debugfs") == 0) - break; - } - fclose(fp); - - if (strcmp(type, "debugfs") != 0) - return NULL; - - return hwpoison_debug_fs; -} - -/* mount the debugfs somewhere if it's not mounted */ - -static void debugfs_mount(void) -{ - const char *const *ptr; - - /* see if it's already mounted */ - if (debugfs_find_mountpoint()) - return; - - ptr = debugfs_known_mountpoints; - while (*ptr) { - if (mount(NULL, *ptr, "debugfs", 0, NULL) == 0) { - /* save the mountpoint */ - strcpy(hwpoison_debug_fs, *ptr); - break; - } - ptr++; - } - - if (*ptr == NULL) { - perror("mount debugfs"); - exit(EXIT_FAILURE); - } -} - /* * page actions */ @@ -541,7 +484,11 @@ static void prepare_hwpoison_fd(void) { char buf[MAX_PATH + 1]; - debugfs_mount(); + hwpoison_debug_fs = debugfs_mount(NULL); + if (!hwpoison_debug_fs) { + perror("mount debugfs"); + exit(EXIT_FAILURE); + } if (opt_hwpoison && !hwpoison_inject_fd) { snprintf(buf, MAX_PATH, "%s/hwpoison/corrupt-pfn", @@ -616,9 +563,9 @@ static size_t hash_slot(uint64_t flags) } static void add_page(unsigned long voffset, - unsigned long offset, uint64_t flags) + unsigned long offset, uint64_t flags, uint64_t pme) { - flags = kpageflags_flags(flags); + flags = kpageflags_flags(flags, pme); if (!bit_mask_ok(flags)) return; @@ -629,7 +576,7 @@ static void add_page(unsigned long voffset, unpoison_page(offset); if (opt_list == 1) - show_page_range(voffset, offset, flags); + show_page_range(voffset, offset, 1, flags); else if (opt_list == 2) show_page(voffset, offset, flags); @@ -640,7 +587,8 @@ static void add_page(unsigned long voffset, #define KPAGEFLAGS_BATCH (64 << 10) /* 64k pages */ static void walk_pfn(unsigned long voffset, unsigned long index, - unsigned long count) + unsigned long count, + uint64_t pme) { uint64_t buf[KPAGEFLAGS_BATCH]; unsigned long batch; @@ -654,7 +602,7 @@ static void walk_pfn(unsigned long voffset, break; for (i = 0; i < pages; i++) - add_page(voffset + i, index + i, buf[i]); + add_page(voffset + i, index + i, buf[i], pme); index += pages; count -= pages; @@ -679,7 +627,7 @@ static void walk_vma(unsigned long index, unsigned long count) for (i = 0; i < pages; i++) { pfn = pagemap_pfn(buf[i]); if (pfn) - walk_pfn(index + i, pfn, 1); + walk_pfn(index + i, pfn, 1, buf[i]); } index += pages; @@ -730,7 +678,7 @@ static void walk_addr_ranges(void) for (i = 0; i < nr_addr_ranges; i++) if (!opt_pid) - walk_pfn(0, opt_offset[i], opt_size[i]); + walk_pfn(opt_offset[i], opt_offset[i], opt_size[i], 0); else walk_task(opt_offset[i], opt_size[i]); @@ -762,9 +710,7 @@ static void usage(void) " -a|--addr addr-spec Walk a range of pages\n" " -b|--bits bits-spec Walk pages with specified bits\n" " -p|--pid pid Walk process address space\n" -#if 0 /* planned features */ " -f|--file filename Walk file address space\n" -#endif " -l|--list Show page details in ranges\n" " -L|--list-each Show page details one by one\n" " -N|--no-summary Don't show summary info\n" @@ -862,8 +808,157 @@ static void parse_pid(const char *str) fclose(file); } +static void show_file(const char *name, const struct stat *st) +{ + unsigned long long size = st->st_size; + char atime[64], mtime[64]; + long now = time(NULL); + + printf("%s\tInode: %u\tSize: %llu (%llu pages)\n", + name, (unsigned)st->st_ino, + size, (size + page_size - 1) / page_size); + + strftime(atime, sizeof(atime), "%c", localtime(&st->st_atime)); + strftime(mtime, sizeof(mtime), "%c", localtime(&st->st_mtime)); + + printf("Modify: %s (%ld seconds ago)\nAccess: %s (%ld seconds ago)\n", + mtime, now - st->st_mtime, + atime, now - st->st_atime); +} + +static sigjmp_buf sigbus_jmp; + +static void * volatile sigbus_addr; + +static void sigbus_handler(int sig, siginfo_t *info, void *ucontex) +{ + (void)sig; + (void)ucontex; + sigbus_addr = info ? info->si_addr : NULL; + siglongjmp(sigbus_jmp, 1); +} + +static struct sigaction sigbus_action = { + .sa_sigaction = sigbus_handler, + .sa_flags = SA_SIGINFO, +}; + +static void walk_file(const char *name, const struct stat *st) +{ + uint8_t vec[PAGEMAP_BATCH]; + uint64_t buf[PAGEMAP_BATCH], flags; + unsigned long nr_pages, pfn, i; + off_t off, end = st->st_size; + int fd; + ssize_t len; + void *ptr; + int first = 1; + + fd = checked_open(name, O_RDONLY|O_NOATIME|O_NOFOLLOW); + + for (off = 0; off < end; off += len) { + nr_pages = (end - off + page_size - 1) / page_size; + if (nr_pages > PAGEMAP_BATCH) + nr_pages = PAGEMAP_BATCH; + len = nr_pages * page_size; + + ptr = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, off); + if (ptr == MAP_FAILED) + fatal("mmap failed: %s", name); + + /* determine cached pages */ + if (mincore(ptr, len, vec)) + fatal("mincore failed: %s", name); + + /* turn off readahead */ + if (madvise(ptr, len, MADV_RANDOM)) + fatal("madvice failed: %s", name); + + if (sigsetjmp(sigbus_jmp, 1)) { + end = off + sigbus_addr ? sigbus_addr - ptr : 0; + fprintf(stderr, "got sigbus at offset %lld: %s\n", + (long long)end, name); + goto got_sigbus; + } + + /* populate ptes */ + for (i = 0; i < nr_pages ; i++) { + if (vec[i] & 1) + (void)*(volatile int *)(ptr + i * page_size); + } +got_sigbus: + + /* turn off harvesting reference bits */ + if (madvise(ptr, len, MADV_SEQUENTIAL)) + fatal("madvice failed: %s", name); + + if (pagemap_read(buf, (unsigned long)ptr / page_size, + nr_pages) != nr_pages) + fatal("cannot read pagemap"); + + munmap(ptr, len); + + for (i = 0; i < nr_pages; i++) { + pfn = pagemap_pfn(buf[i]); + if (!pfn) + continue; + if (!kpageflags_read(&flags, pfn, 1)) + continue; + if (first && opt_list) { + first = 0; + flush_page_range(); + show_file(name, st); + } + add_page(off / page_size + i, pfn, flags, buf[i]); + } + } + + close(fd); +} + +int walk_tree(const char *name, const struct stat *st, int type, struct FTW *f) +{ + (void)f; + switch (type) { + case FTW_F: + if (S_ISREG(st->st_mode)) + walk_file(name, st); + break; + case FTW_DNR: + fprintf(stderr, "cannot read dir: %s\n", name); + break; + } + return 0; +} + +static void walk_page_cache(void) +{ + struct stat st; + + kpageflags_fd = checked_open(PROC_KPAGEFLAGS, O_RDONLY); + pagemap_fd = checked_open("/proc/self/pagemap", O_RDONLY); + sigaction(SIGBUS, &sigbus_action, NULL); + + if (stat(opt_file, &st)) + fatal("stat failed: %s\n", opt_file); + + if (S_ISREG(st.st_mode)) { + walk_file(opt_file, &st); + } else if (S_ISDIR(st.st_mode)) { + /* do not follow symlinks and mountpoints */ + if (nftw(opt_file, walk_tree, 64, FTW_MOUNT | FTW_PHYS) < 0) + fatal("nftw failed: %s\n", opt_file); + } else + fatal("unhandled file type: %s\n", opt_file); + + close(kpageflags_fd); + close(pagemap_fd); + signal(SIGBUS, SIG_DFL); +} + static void parse_file(const char *name) { + opt_file = name; } static void parse_addr_range(const char *optarg) @@ -1054,15 +1149,20 @@ int main(int argc, char *argv[]) if (opt_list && opt_pid) printf("voffset\t"); + if (opt_list && opt_file) + printf("foffset\t"); if (opt_list == 1) printf("offset\tlen\tflags\n"); if (opt_list == 2) printf("offset\tflags\n"); - walk_addr_ranges(); + if (opt_file) + walk_page_cache(); + else + walk_addr_ranges(); if (opt_list == 1) - show_page_range(0, 0, 0); /* drain the buffer */ + flush_page_range(); if (opt_no_summary) return 0; |
