diff options
Diffstat (limited to 'arch/arm/kernel/perf_event_cpu.c')
| -rw-r--r-- | arch/arm/kernel/perf_event_cpu.c | 126 | 
1 files changed, 83 insertions, 43 deletions
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 8d6147b2001..af9e35e8836 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -25,6 +25,8 @@  #include <linux/platform_device.h>  #include <linux/slab.h>  #include <linux/spinlock.h> +#include <linux/irq.h> +#include <linux/irqdesc.h>  #include <asm/cputype.h>  #include <asm/irq_regs.h> @@ -33,6 +35,7 @@  /* Set at runtime when we know what CPU type we are. */  static struct arm_pmu *cpu_pmu; +static DEFINE_PER_CPU(struct arm_pmu *, percpu_pmu);  static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);  static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);  static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); @@ -68,7 +71,27 @@ EXPORT_SYMBOL_GPL(perf_num_counters);  static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)  { -	return &__get_cpu_var(cpu_hw_events); +	return this_cpu_ptr(&cpu_hw_events); +} + +static void cpu_pmu_enable_percpu_irq(void *data) +{ +	struct arm_pmu *cpu_pmu = data; +	struct platform_device *pmu_device = cpu_pmu->plat_device; +	int irq = platform_get_irq(pmu_device, 0); + +	enable_percpu_irq(irq, IRQ_TYPE_NONE); +	cpumask_set_cpu(smp_processor_id(), &cpu_pmu->active_irqs); +} + +static void cpu_pmu_disable_percpu_irq(void *data) +{ +	struct arm_pmu *cpu_pmu = data; +	struct platform_device *pmu_device = cpu_pmu->plat_device; +	int irq = platform_get_irq(pmu_device, 0); + +	cpumask_clear_cpu(smp_processor_id(), &cpu_pmu->active_irqs); +	disable_percpu_irq(irq);  }  static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) @@ -78,12 +101,18 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)  	irqs = min(pmu_device->num_resources, num_possible_cpus()); -	for (i = 0; i < irqs; ++i) { -		if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs)) -			continue; -		irq = platform_get_irq(pmu_device, i); -		if (irq >= 0) -			free_irq(irq, cpu_pmu); +	irq = platform_get_irq(pmu_device, 0); +	if (irq >= 0 && irq_is_percpu(irq)) { +		on_each_cpu(cpu_pmu_disable_percpu_irq, cpu_pmu, 1); +		free_percpu_irq(irq, &percpu_pmu); +	} else { +		for (i = 0; i < irqs; ++i) { +			if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs)) +				continue; +			irq = platform_get_irq(pmu_device, i); +			if (irq >= 0) +				free_irq(irq, cpu_pmu); +		}  	}  } @@ -97,37 +126,48 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)  	irqs = min(pmu_device->num_resources, num_possible_cpus());  	if (irqs < 1) { -		pr_err("no irqs for PMUs defined\n"); -		return -ENODEV; +		printk_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n"); +		return 0;  	} -	for (i = 0; i < irqs; ++i) { -		err = 0; -		irq = platform_get_irq(pmu_device, i); -		if (irq < 0) -			continue; - -		/* -		 * If we have a single PMU interrupt that we can't shift, -		 * assume that we're running on a uniprocessor machine and -		 * continue. Otherwise, continue without this interrupt. -		 */ -		if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { -			pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", -				    irq, i); -			continue; -		} - -		err = request_irq(irq, handler, -				  IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", -				  cpu_pmu); +	irq = platform_get_irq(pmu_device, 0); +	if (irq >= 0 && irq_is_percpu(irq)) { +		err = request_percpu_irq(irq, handler, "arm-pmu", &percpu_pmu);  		if (err) {  			pr_err("unable to request IRQ%d for ARM PMU counters\n",  				irq);  			return err;  		} - -		cpumask_set_cpu(i, &cpu_pmu->active_irqs); +		on_each_cpu(cpu_pmu_enable_percpu_irq, cpu_pmu, 1); +	} else { +		for (i = 0; i < irqs; ++i) { +			err = 0; +			irq = platform_get_irq(pmu_device, i); +			if (irq < 0) +				continue; + +			/* +			 * If we have a single PMU interrupt that we can't shift, +			 * assume that we're running on a uniprocessor machine and +			 * continue. Otherwise, continue without this interrupt. +			 */ +			if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { +				pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", +					    irq, i); +				continue; +			} + +			err = request_irq(irq, handler, +					  IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", +					  cpu_pmu); +			if (err) { +				pr_err("unable to request IRQ%d for ARM PMU counters\n", +					irq); +				return err; +			} + +			cpumask_set_cpu(i, &cpu_pmu->active_irqs); +		}  	}  	return 0; @@ -141,6 +181,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)  		events->events = per_cpu(hw_events, cpu);  		events->used_mask = per_cpu(used_mask, cpu);  		raw_spin_lock_init(&events->pmu_lock); +		per_cpu(percpu_pmu, cpu) = cpu_pmu;  	}  	cpu_pmu->get_hw_events	= cpu_pmu_get_cpu_events; @@ -150,6 +191,10 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)  	/* Ensure the PMU has sane values out of reset. */  	if (cpu_pmu->reset)  		on_each_cpu(cpu_pmu->reset, cpu_pmu, 1); + +	/* If no interrupts available, set the corresponding capability flag */ +	if (!platform_get_irq(cpu_pmu->plat_device, 0)) +		cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;  }  /* @@ -180,7 +225,9 @@ static struct notifier_block cpu_pmu_hotplug_notifier = {   * PMU platform driver and devicetree bindings.   */  static struct of_device_id cpu_pmu_of_device_ids[] = { +	{.compatible = "arm,cortex-a17-pmu",	.data = armv7_a17_pmu_init},  	{.compatible = "arm,cortex-a15-pmu",	.data = armv7_a15_pmu_init}, +	{.compatible = "arm,cortex-a12-pmu",	.data = armv7_a12_pmu_init},  	{.compatible = "arm,cortex-a9-pmu",	.data = armv7_a9_pmu_init},  	{.compatible = "arm,cortex-a8-pmu",	.data = armv7_a8_pmu_init},  	{.compatible = "arm,cortex-a7-pmu",	.data = armv7_a7_pmu_init}, @@ -188,6 +235,7 @@ static struct of_device_id cpu_pmu_of_device_ids[] = {  	{.compatible = "arm,arm11mpcore-pmu",	.data = armv6mpcore_pmu_init},  	{.compatible = "arm,arm1176-pmu",	.data = armv6pmu_init},  	{.compatible = "arm,arm1136-pmu",	.data = armv6pmu_init}, +	{.compatible = "qcom,krait-pmu",	.data = krait_pmu_init},  	{},  }; @@ -225,15 +273,6 @@ static int probe_current_pmu(struct arm_pmu *pmu)  		case ARM_CPU_PART_CORTEX_A9:  			ret = armv7_a9_pmu_init(pmu);  			break; -		case ARM_CPU_PART_CORTEX_A5: -			ret = armv7_a5_pmu_init(pmu); -			break; -		case ARM_CPU_PART_CORTEX_A15: -			ret = armv7_a15_pmu_init(pmu); -			break; -		case ARM_CPU_PART_CORTEX_A7: -			ret = armv7_a7_pmu_init(pmu); -			break;  		}  	/* Intel CPUs [xscale]. */  	} else if (implementor == ARM_CPU_IMP_INTEL) { @@ -254,7 +293,7 @@ static int probe_current_pmu(struct arm_pmu *pmu)  static int cpu_pmu_device_probe(struct platform_device *pdev)  {  	const struct of_device_id *of_id; -	int (*init_fn)(struct arm_pmu *); +	const int (*init_fn)(struct arm_pmu *);  	struct device_node *node = pdev->dev.of_node;  	struct arm_pmu *pmu;  	int ret = -ENODEV; @@ -270,6 +309,9 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)  		return -ENOMEM;  	} +	cpu_pmu = pmu; +	cpu_pmu->plat_device = pdev; +  	if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {  		init_fn = of_id->data;  		ret = init_fn(pmu); @@ -282,8 +324,6 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)  		goto out_free;  	} -	cpu_pmu = pmu; -	cpu_pmu->plat_device = pdev;  	cpu_pmu_init(cpu_pmu);  	ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW);  | 
