diff options
-rw-r--r-- | arch/arm/mach-prima2/irq.c | 6 | ||||
-rw-r--r-- | arch/arm/mach-tegra/flowctrl.c | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_para.h | 3 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/boot.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/microcode_intel.c | 14 | ||||
-rw-r--r-- | drivers/crypto/Kconfig | 1 | ||||
-rw-r--r-- | drivers/md/md.c | 2 | ||||
-rw-r--r-- | drivers/md/raid10.c | 56 | ||||
-rw-r--r-- | include/linux/ftrace_event.h | 2 | ||||
-rw-r--r-- | kernel/sched/core.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_export.c | 1 | ||||
-rw-r--r-- | tools/perf/Makefile | 4 | ||||
-rw-r--r-- | tools/perf/builtin-stat.c | 40 | ||||
-rw-r--r-- | tools/perf/util/header.c | 2 |
15 files changed, 100 insertions, 44 deletions
diff --git a/arch/arm/mach-prima2/irq.c b/arch/arm/mach-prima2/irq.c index 37c2de9b6f2..a7b9415d30f 100644 --- a/arch/arm/mach-prima2/irq.c +++ b/arch/arm/mach-prima2/irq.c @@ -42,7 +42,8 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num) static __init void sirfsoc_irq_init(void) { sirfsoc_alloc_gc(sirfsoc_intc_base, 0, 32); - sirfsoc_alloc_gc(sirfsoc_intc_base + 4, 32, SIRFSOC_INTENAL_IRQ_END - 32); + sirfsoc_alloc_gc(sirfsoc_intc_base + 4, 32, + SIRFSOC_INTENAL_IRQ_END + 1 - 32); writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL0); writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL1); @@ -68,7 +69,8 @@ void __init sirfsoc_of_irq_init(void) if (!sirfsoc_intc_base) panic("unable to map intc cpu registers\n"); - irq_domain_add_legacy(np, 32, 0, 0, &irq_domain_simple_ops, NULL); + irq_domain_add_legacy(np, SIRFSOC_INTENAL_IRQ_END + 1, 0, 0, + &irq_domain_simple_ops, NULL); of_node_put(np); diff --git a/arch/arm/mach-tegra/flowctrl.c b/arch/arm/mach-tegra/flowctrl.c index fef66a7486e..f07488e0bd3 100644 --- a/arch/arm/mach-tegra/flowctrl.c +++ b/arch/arm/mach-tegra/flowctrl.c @@ -53,10 +53,10 @@ static void flowctrl_update(u8 offset, u32 value) void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value) { - return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value); + return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value); } void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value) { - return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value); + return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value); } diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index 734c3767cfa..183922e13de 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h @@ -170,6 +170,9 @@ static inline int kvm_para_available(void) unsigned int eax, ebx, ecx, edx; char signature[13]; + if (boot_cpu_data.cpuid_level < 0) + return 0; /* So we don't blow up on old processors */ + cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx); memcpy(signature + 0, &ebx, 4); memcpy(signature + 4, &ecx, 4); diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index a415b1f4436..7c439fe4941 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -593,7 +593,7 @@ void __init acpi_set_irq_model_ioapic(void) #ifdef CONFIG_ACPI_HOTPLUG_CPU #include <acpi/processor.h> -static void __cpuinitdata acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) +static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) { #ifdef CONFIG_ACPI_NUMA int nid; diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c index 3ca42d0e43a..0327e2b3c40 100644 --- a/arch/x86/kernel/microcode_intel.c +++ b/arch/x86/kernel/microcode_intel.c @@ -147,12 +147,6 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) memset(csig, 0, sizeof(*csig)); - if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || - cpu_has(c, X86_FEATURE_IA64)) { - pr_err("CPU%d not a capable Intel processor\n", cpu_num); - return -1; - } - csig->sig = cpuid_eax(0x00000001); if ((c->x86_model >= 5) || (c->x86 > 6)) { @@ -463,6 +457,14 @@ static struct microcode_ops microcode_intel_ops = { struct microcode_ops * __init init_intel_microcode(void) { + struct cpuinfo_x86 *c = &cpu_data(0); + + if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || + cpu_has(c, X86_FEATURE_IA64)) { + pr_err("Intel CPU family 0x%x not supported\n", c->x86); + return NULL; + } + return µcode_intel_ops; } diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index ab9abb46d01..dd414d9350e 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -164,6 +164,7 @@ config CRYPTO_DEV_MV_CESA select CRYPTO_ALGAPI select CRYPTO_AES select CRYPTO_BLKCIPHER2 + select CRYPTO_HASH help This driver allows you to utilize the Cryptographic Engines and Security Accelerator (CESA) which can be found on the Marvell Orion diff --git a/drivers/md/md.c b/drivers/md/md.c index 477eb2e180c..01233d855eb 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -391,6 +391,8 @@ void mddev_suspend(struct mddev *mddev) synchronize_rcu(); wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); mddev->pers->quiesce(mddev, 1); + + del_timer_sync(&mddev->safemode_timer); } EXPORT_SYMBOL_GPL(mddev_suspend); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index c8dbb84d535..3e7b1548111 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3164,12 +3164,40 @@ raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) return size << conf->chunk_shift; } +static void calc_sectors(struct r10conf *conf, sector_t size) +{ + /* Calculate the number of sectors-per-device that will + * actually be used, and set conf->dev_sectors and + * conf->stride + */ + + size = size >> conf->chunk_shift; + sector_div(size, conf->far_copies); + size = size * conf->raid_disks; + sector_div(size, conf->near_copies); + /* 'size' is now the number of chunks in the array */ + /* calculate "used chunks per device" */ + size = size * conf->copies; + + /* We need to round up when dividing by raid_disks to + * get the stride size. + */ + size = DIV_ROUND_UP_SECTOR_T(size, conf->raid_disks); + + conf->dev_sectors = size << conf->chunk_shift; + + if (conf->far_offset) + conf->stride = 1 << conf->chunk_shift; + else { + sector_div(size, conf->near_copies); + conf->stride = size << conf->chunk_shift; + } +} static struct r10conf *setup_conf(struct mddev *mddev) { struct r10conf *conf = NULL; int nc, fc, fo; - sector_t stride, size; int err = -EINVAL; if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) || @@ -3219,28 +3247,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) if (!conf->r10bio_pool) goto out; - size = mddev->dev_sectors >> conf->chunk_shift; - sector_div(size, fc); - size = size * conf->raid_disks; - sector_div(size, nc); - /* 'size' is now the number of chunks in the array */ - /* calculate "used chunks per device" in 'stride' */ - stride = size * conf->copies; - - /* We need to round up when dividing by raid_disks to - * get the stride size. - */ - stride += conf->raid_disks - 1; - sector_div(stride, conf->raid_disks); - - conf->dev_sectors = stride << conf->chunk_shift; - - if (fo) - stride = 1; - else - sector_div(stride, fc); - conf->stride = stride << conf->chunk_shift; - + calc_sectors(conf, mddev->dev_sectors); spin_lock_init(&conf->device_lock); INIT_LIST_HEAD(&conf->retry_list); @@ -3468,7 +3475,8 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors) mddev->recovery_cp = oldsize; set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); } - mddev->dev_sectors = sectors; + calc_sectors(conf, sectors); + mddev->dev_sectors = conf->dev_sectors; mddev->resync_max_sectors = size; return 0; } diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 5f3f3be5af0..176a939d154 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -179,6 +179,7 @@ enum { TRACE_EVENT_FL_RECORDED_CMD_BIT, TRACE_EVENT_FL_CAP_ANY_BIT, TRACE_EVENT_FL_NO_SET_FILTER_BIT, + TRACE_EVENT_FL_IGNORE_ENABLE_BIT, }; enum { @@ -187,6 +188,7 @@ enum { TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT), TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), + TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), }; struct ftrace_event_call { diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0533a688ce2..e5212ae294f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6382,6 +6382,8 @@ static int __sdt_alloc(const struct cpumask *cpu_map) if (!sg) return -ENOMEM; + sg->next = sg; + *per_cpu_ptr(sdd->sg, j) = sg; sgp = kzalloc_node(sizeof(struct sched_group_power), diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 079a93ae8a9..29111da1d10 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -294,6 +294,9 @@ static int __ftrace_set_clr_event(const char *match, const char *sub, if (!call->name || !call->class || !call->class->reg) continue; + if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) + continue; + if (match && strcmp(match, call->name) != 0 && strcmp(match, call->class->system) != 0) @@ -1164,7 +1167,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, return -1; } - if (call->class->reg) + if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) trace_create_file("enable", 0644, call->dir, call, enable); diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 3dd15e8bc85..e039906b037 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c @@ -180,6 +180,7 @@ struct ftrace_event_call __used event_##call = { \ .event.type = etype, \ .class = &event_class_ftrace_##call, \ .print_fmt = print, \ + .flags = TRACE_EVENT_FL_IGNORE_ENABLE, \ }; \ struct ftrace_event_call __used \ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 9bf3fc75934..92271d32bc3 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -774,10 +774,10 @@ $(OUTPUT)perf.o perf.spec \ # over the general rule for .o $(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $< + $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -w $< $(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $< + $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -w $< $(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $< diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index c941bb640f4..1e5e9b270f5 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -283,6 +283,8 @@ static int create_perf_stat_counter(struct perf_evsel *evsel, { struct perf_event_attr *attr = &evsel->attr; struct xyarray *group_fd = NULL; + bool exclude_guest_missing = false; + int ret; if (group && evsel != first) group_fd = first->fd; @@ -293,16 +295,39 @@ static int create_perf_stat_counter(struct perf_evsel *evsel, attr->inherit = !no_inherit; - if (system_wide) - return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, +retry: + if (exclude_guest_missing) + evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; + + if (system_wide) { + ret = perf_evsel__open_per_cpu(evsel, evsel_list->cpus, group, group_fd); + if (ret) + goto check_ret; + return 0; + } + if (!target_pid && !target_tid && (!group || evsel == first)) { attr->disabled = 1; attr->enable_on_exec = 1; } - return perf_evsel__open_per_thread(evsel, evsel_list->threads, - group, group_fd); + ret = perf_evsel__open_per_thread(evsel, evsel_list->threads, + group, group_fd); + if (!ret) + return 0; + /* fall through */ +check_ret: + if (ret && errno == EINVAL) { + if (!exclude_guest_missing && + (evsel->attr.exclude_guest || evsel->attr.exclude_host)) { + pr_debug("Old kernel, cannot exclude " + "guest or host samples.\n"); + exclude_guest_missing = true; + goto retry; + } + } + return ret; } /* @@ -463,8 +488,13 @@ static int run_perf_stat(int argc __used, const char **argv) list_for_each_entry(counter, &evsel_list->entries, node) { if (create_perf_stat_counter(counter, first) < 0) { + /* + * PPC returns ENXIO for HW counters until 2.6.37 + * (behavior changed with commit b0a873e). + */ if (errno == EINVAL || errno == ENOSYS || - errno == ENOENT || errno == EOPNOTSUPP) { + errno == ENOENT || errno == EOPNOTSUPP || + errno == ENXIO) { if (verbose) ui__warning("%s event is not supported by the kernel.\n", event_name(counter)); diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 4c7c2d73251..c0b70c697a3 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -296,7 +296,7 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, if (mkdir_p(filename, 0755)) goto out_free; - snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id); + snprintf(filename + len, size - len, "/%s", sbuild_id); if (access(filename, F_OK)) { if (is_kallsyms) { |