diff options
Diffstat (limited to 'arch/arm/kernel/perf_event_cpu.c')
| -rw-r--r-- | arch/arm/kernel/perf_event_cpu.c | 176 |
1 files changed, 110 insertions, 66 deletions
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 5f6620684e2..af9e35e8836 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -25,6 +25,8 @@ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/irq.h> +#include <linux/irqdesc.h> #include <asm/cputype.h> #include <asm/irq_regs.h> @@ -33,6 +35,7 @@ /* Set at runtime when we know what CPU type we are. */ static struct arm_pmu *cpu_pmu; +static DEFINE_PER_CPU(struct arm_pmu *, percpu_pmu); static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); @@ -68,7 +71,27 @@ EXPORT_SYMBOL_GPL(perf_num_counters); static struct pmu_hw_events *cpu_pmu_get_cpu_events(void) { - return &__get_cpu_var(cpu_hw_events); + return this_cpu_ptr(&cpu_hw_events); +} + +static void cpu_pmu_enable_percpu_irq(void *data) +{ + struct arm_pmu *cpu_pmu = data; + struct platform_device *pmu_device = cpu_pmu->plat_device; + int irq = platform_get_irq(pmu_device, 0); + + enable_percpu_irq(irq, IRQ_TYPE_NONE); + cpumask_set_cpu(smp_processor_id(), &cpu_pmu->active_irqs); +} + +static void cpu_pmu_disable_percpu_irq(void *data) +{ + struct arm_pmu *cpu_pmu = data; + struct platform_device *pmu_device = cpu_pmu->plat_device; + int irq = platform_get_irq(pmu_device, 0); + + cpumask_clear_cpu(smp_processor_id(), &cpu_pmu->active_irqs); + disable_percpu_irq(irq); } static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) @@ -78,12 +101,18 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) irqs = min(pmu_device->num_resources, num_possible_cpus()); - for (i = 0; i < irqs; ++i) { - if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs)) - continue; - irq = platform_get_irq(pmu_device, i); - if (irq >= 0) - free_irq(irq, cpu_pmu); + irq = platform_get_irq(pmu_device, 0); + if (irq >= 0 && irq_is_percpu(irq)) { + on_each_cpu(cpu_pmu_disable_percpu_irq, cpu_pmu, 1); + free_percpu_irq(irq, &percpu_pmu); + } else { + for (i = 0; i < irqs; ++i) { + if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs)) + continue; + irq = platform_get_irq(pmu_device, i); + if (irq >= 0) + free_irq(irq, cpu_pmu); + } } } @@ -97,36 +126,48 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) irqs = min(pmu_device->num_resources, num_possible_cpus()); if (irqs < 1) { - pr_err("no irqs for PMUs defined\n"); - return -ENODEV; + printk_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n"); + return 0; } - for (i = 0; i < irqs; ++i) { - err = 0; - irq = platform_get_irq(pmu_device, i); - if (irq < 0) - continue; - - /* - * If we have a single PMU interrupt that we can't shift, - * assume that we're running on a uniprocessor machine and - * continue. Otherwise, continue without this interrupt. - */ - if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { - pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", - irq, i); - continue; - } - - err = request_irq(irq, handler, IRQF_NOBALANCING, "arm-pmu", - cpu_pmu); + irq = platform_get_irq(pmu_device, 0); + if (irq >= 0 && irq_is_percpu(irq)) { + err = request_percpu_irq(irq, handler, "arm-pmu", &percpu_pmu); if (err) { pr_err("unable to request IRQ%d for ARM PMU counters\n", irq); return err; } - - cpumask_set_cpu(i, &cpu_pmu->active_irqs); + on_each_cpu(cpu_pmu_enable_percpu_irq, cpu_pmu, 1); + } else { + for (i = 0; i < irqs; ++i) { + err = 0; + irq = platform_get_irq(pmu_device, i); + if (irq < 0) + continue; + + /* + * If we have a single PMU interrupt that we can't shift, + * assume that we're running on a uniprocessor machine and + * continue. Otherwise, continue without this interrupt. + */ + if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { + pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", + irq, i); + continue; + } + + err = request_irq(irq, handler, + IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", + cpu_pmu); + if (err) { + pr_err("unable to request IRQ%d for ARM PMU counters\n", + irq); + return err; + } + + cpumask_set_cpu(i, &cpu_pmu->active_irqs); + } } return 0; @@ -140,6 +181,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu) events->events = per_cpu(hw_events, cpu); events->used_mask = per_cpu(used_mask, cpu); raw_spin_lock_init(&events->pmu_lock); + per_cpu(percpu_pmu, cpu) = cpu_pmu; } cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events; @@ -147,8 +189,12 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->free_irq = cpu_pmu_free_irq; /* Ensure the PMU has sane values out of reset. */ - if (cpu_pmu && cpu_pmu->reset) + if (cpu_pmu->reset) on_each_cpu(cpu_pmu->reset, cpu_pmu, 1); + + /* If no interrupts available, set the corresponding capability flag */ + if (!platform_get_irq(cpu_pmu->plat_device, 0)) + cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; } /* @@ -157,8 +203,8 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu) * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading * junk values out of them. */ -static int __cpuinit cpu_pmu_notify(struct notifier_block *b, - unsigned long action, void *hcpu) +static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, + void *hcpu) { if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) return NOTIFY_DONE; @@ -171,7 +217,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = { +static struct notifier_block cpu_pmu_hotplug_notifier = { .notifier_call = cpu_pmu_notify, }; @@ -179,7 +225,9 @@ static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = { * PMU platform driver and devicetree bindings. */ static struct of_device_id cpu_pmu_of_device_ids[] = { + {.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init}, {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init}, + {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init}, {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init}, {.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init}, {.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init}, @@ -187,6 +235,7 @@ static struct of_device_id cpu_pmu_of_device_ids[] = { {.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init}, {.compatible = "arm,arm1176-pmu", .data = armv6pmu_init}, {.compatible = "arm,arm1136-pmu", .data = armv6pmu_init}, + {.compatible = "qcom,krait-pmu", .data = krait_pmu_init}, {}, }; @@ -201,48 +250,37 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = { static int probe_current_pmu(struct arm_pmu *pmu) { int cpu = get_cpu(); - unsigned long cpuid = read_cpuid_id(); - unsigned long implementor = (cpuid & 0xFF000000) >> 24; - unsigned long part_number = (cpuid & 0xFFF0); + unsigned long implementor = read_cpuid_implementor(); + unsigned long part_number = read_cpuid_part_number(); int ret = -ENODEV; pr_info("probing PMU on CPU %d\n", cpu); /* ARM Ltd CPUs. */ - if (0x41 == implementor) { + if (implementor == ARM_CPU_IMP_ARM) { switch (part_number) { - case 0xB360: /* ARM1136 */ - case 0xB560: /* ARM1156 */ - case 0xB760: /* ARM1176 */ + case ARM_CPU_PART_ARM1136: + case ARM_CPU_PART_ARM1156: + case ARM_CPU_PART_ARM1176: ret = armv6pmu_init(pmu); break; - case 0xB020: /* ARM11mpcore */ + case ARM_CPU_PART_ARM11MPCORE: ret = armv6mpcore_pmu_init(pmu); break; - case 0xC080: /* Cortex-A8 */ + case ARM_CPU_PART_CORTEX_A8: ret = armv7_a8_pmu_init(pmu); break; - case 0xC090: /* Cortex-A9 */ + case ARM_CPU_PART_CORTEX_A9: ret = armv7_a9_pmu_init(pmu); break; - case 0xC050: /* Cortex-A5 */ - ret = armv7_a5_pmu_init(pmu); - break; - case 0xC0F0: /* Cortex-A15 */ - ret = armv7_a15_pmu_init(pmu); - break; - case 0xC070: /* Cortex-A7 */ - ret = armv7_a7_pmu_init(pmu); - break; } /* Intel CPUs [xscale]. */ - } else if (0x69 == implementor) { - part_number = (cpuid >> 13) & 0x7; - switch (part_number) { - case 1: + } else if (implementor == ARM_CPU_IMP_INTEL) { + switch (xscale_cpu_arch_version()) { + case ARM_CPU_XSCALE_ARCH_V1: ret = xscale1pmu_init(pmu); break; - case 2: + case ARM_CPU_XSCALE_ARCH_V2: ret = xscale2pmu_init(pmu); break; } @@ -255,7 +293,7 @@ static int probe_current_pmu(struct arm_pmu *pmu) static int cpu_pmu_device_probe(struct platform_device *pdev) { const struct of_device_id *of_id; - int (*init_fn)(struct arm_pmu *); + const int (*init_fn)(struct arm_pmu *); struct device_node *node = pdev->dev.of_node; struct arm_pmu *pmu; int ret = -ENODEV; @@ -271,6 +309,9 @@ static int cpu_pmu_device_probe(struct platform_device *pdev) return -ENOMEM; } + cpu_pmu = pmu; + cpu_pmu->plat_device = pdev; + if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { init_fn = of_id->data; ret = init_fn(pmu); @@ -279,17 +320,20 @@ static int cpu_pmu_device_probe(struct platform_device *pdev) } if (ret) { - pr_info("failed to register PMU devices!"); - kfree(pmu); - return ret; + pr_info("failed to probe PMU!"); + goto out_free; } - cpu_pmu = pmu; - cpu_pmu->plat_device = pdev; cpu_pmu_init(cpu_pmu); - armpmu_register(cpu_pmu, PERF_TYPE_RAW); + ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW); - return 0; + if (!ret) + return 0; + +out_free: + pr_info("failed to register PMU devices!"); + kfree(pmu); + return ret; } static struct platform_driver cpu_pmu_driver = { |
