aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2012-02-15 18:48:22 +0000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-02-16 16:24:35 +1100
commit9a45a9407c69d068500923480884661e2b9cc421 (patch)
tree8ff43b5fd98ab68352dc4dd0823401a5736afab2 /arch
parent64f8c13561fbd2f1a8b4a8975b2aee73b561093e (diff)
powerpc/perf: power_pmu_start restores incorrect values, breaking frequency events
perf on POWER stopped working after commit e050e3f0a71b (perf: Fix broken interrupt rate throttling). That patch exposed a bug in the POWER perf_events code. Since the PMCs count upwards and take an exception when the top bit is set, we want to write 0x80000000 - left in power_pmu_start. We were instead programming in left which effectively disables the counter until we eventually hit 0x80000000. This could take seconds or longer. With the patch applied I get the expected number of samples: SAMPLE events: 9948 Signed-off-by: Anton Blanchard <anton@samba.org> Acked-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: <stable@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/perf_event.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 10a140f82cb..64483fde95c 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -865,6 +865,7 @@ static void power_pmu_start(struct perf_event *event, int ef_flags)
{
unsigned long flags;
s64 left;
+ unsigned long val;
if (!event->hw.idx || !event->hw.sample_period)
return;
@@ -880,7 +881,12 @@ static void power_pmu_start(struct perf_event *event, int ef_flags)
event->hw.state = 0;
left = local64_read(&event->hw.period_left);
- write_pmc(event->hw.idx, left);
+
+ val = 0;
+ if (left < 0x80000000L)
+ val = 0x80000000L - left;
+
+ write_pmc(event->hw.idx, val);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);