diff options
author | Will Deacon <will.deacon@arm.com> | 2012-03-06 17:34:50 +0100 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-03-12 12:31:39 -0700 |
commit | 288733a6065034d1870b02b164da0fb2d8452178 (patch) | |
tree | c3e47649068e0ef4356e3c7886dd5e1b2bd91777 /arch/arm/kernel | |
parent | 075964ad4370162964f41ba1136b8581ab1a302a (diff) |
ARM: 7356/1: perf: check that we have an event in the PMU IRQ handlers
commit f6f5a30c834135c9f2fa10400c59ebbdd9188567 upstream.
The PMU IRQ handlers in perf assume that if a counter has overflowed
then perf must be responsible. In the paranoid world of crazy hardware,
this could be false, so check that we do have a valid event before
attempting to dereference NULL in the interrupt path.
Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/perf_event_v6.c | 20 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 6 |
3 files changed, 12 insertions, 18 deletions
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index ccbf9856456..0ad3c6fec23 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -463,23 +463,6 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static int counter_is_active(unsigned long pmcr, int idx) -{ - unsigned long mask = 0; - if (idx == ARMV6_CYCLE_COUNTER) - mask = ARMV6_PMCR_CCOUNT_IEN; - else if (idx == ARMV6_COUNTER0) - mask = ARMV6_PMCR_COUNT0_IEN; - else if (idx == ARMV6_COUNTER1) - mask = ARMV6_PMCR_COUNT1_IEN; - - if (mask) - return pmcr & mask; - - WARN_ONCE(1, "invalid counter number (%d)\n", idx); - return 0; -} - static irqreturn_t armv6pmu_handle_irq(int irq_num, void *dev) @@ -509,7 +492,8 @@ armv6pmu_handle_irq(int irq_num, struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; - if (!counter_is_active(pmcr, idx)) + /* Ignore if we don't have an event. */ + if (!event) continue; /* diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index c63934294e0..104931924c8 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -1029,6 +1029,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; + /* Ignore if we don't have an event. */ + if (!event) + continue; + /* * We have a single interrupt for all counters. Check that * each counter has overflowed before we process it. diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 5d36557bd72..ea92c0cac10 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -253,6 +253,9 @@ xscale1pmu_handle_irq(int irq_num, void *dev) struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; + if (!event) + continue; + if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) continue; @@ -590,6 +593,9 @@ xscale2pmu_handle_irq(int irq_num, void *dev) struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; + if (!event) + continue; + if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) continue; |