aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2012-03-15 15:19:05 +0000
committerArnd Bergmann <arnd@arndb.de>2012-03-15 15:20:07 +0000
commitf4e2467bad53023589cbff18dd1ab6e0aa3f004c (patch)
tree8d7abbf418eabd25bbcdc9b6de2f8216d2eaa616 /arch/arm/kernel
parente3643b77de143c5548ec93abd8aa68f4123295ea (diff)
parenta6de3df4f172e124280d88e617ee7d29f7af970b (diff)
Merge branch 'ep93xx-for-arm-soc' of git://github.com/RyanMallon/linux-2.6 into next/cleanup
* 'ep93xx-for-arm-soc' of git://github.com/RyanMallon/linux-2.6: ep93xx: Remove unnecessary includes of ep93xx-regs.h ep93xx: Move EP93XX_SYSCON defines to SoC private header ep93xx: Move crunch code to mach-ep93xx directory ep93xx: Make syscon access functions private to SoC ep93xx: Configure GPIO ports in core code ep93xx: Move peripheral defines to local SoC header ep93xx: Convert the watchdog driver into a platform device. ep93xx: Use ioremap for backlight driver ep93xx: Move GPIO defines to gpio-ep93xx.h ep93xx: Don't use system controller defines in audio drivers ep93xx: Move PHYS_BASE defines to local SoC header file (update to v3.3-rc7) Conflicts: arch/arm/mach-s3c2440/common.h
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile3
-rw-r--r--arch/arm/kernel/crunch-bits.S305
-rw-r--r--arch/arm/kernel/crunch.c88
-rw-r--r--arch/arm/kernel/ecard.c1
-rw-r--r--arch/arm/kernel/perf_event.c45
-rw-r--r--arch/arm/kernel/perf_event_v6.c22
-rw-r--r--arch/arm/kernel/perf_event_v7.c11
-rw-r--r--arch/arm/kernel/perf_event_xscale.c20
8 files changed, 64 insertions, 431 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 43b740d0e37..f16d7652f34 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -62,9 +62,6 @@ obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o
CFLAGS_swp_emulate.o := -Wa,-march=armv7-a
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
-obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
-AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
-
obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
diff --git a/arch/arm/kernel/crunch-bits.S b/arch/arm/kernel/crunch-bits.S
deleted file mode 100644
index 0ec9bb48fab..00000000000
--- a/arch/arm/kernel/crunch-bits.S
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * arch/arm/kernel/crunch-bits.S
- * Cirrus MaverickCrunch context switching and handling
- *
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- *
- * Shamelessly stolen from the iWMMXt code by Nicolas Pitre, which is
- * Copyright (c) 2003-2004, MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/linkage.h>
-#include <asm/ptrace.h>
-#include <asm/thread_info.h>
-#include <asm/asm-offsets.h>
-#include <mach/ep93xx-regs.h>
-
-/*
- * We can't use hex constants here due to a bug in gas.
- */
-#define CRUNCH_MVDX0 0
-#define CRUNCH_MVDX1 8
-#define CRUNCH_MVDX2 16
-#define CRUNCH_MVDX3 24
-#define CRUNCH_MVDX4 32
-#define CRUNCH_MVDX5 40
-#define CRUNCH_MVDX6 48
-#define CRUNCH_MVDX7 56
-#define CRUNCH_MVDX8 64
-#define CRUNCH_MVDX9 72
-#define CRUNCH_MVDX10 80
-#define CRUNCH_MVDX11 88
-#define CRUNCH_MVDX12 96
-#define CRUNCH_MVDX13 104
-#define CRUNCH_MVDX14 112
-#define CRUNCH_MVDX15 120
-#define CRUNCH_MVAX0L 128
-#define CRUNCH_MVAX0M 132
-#define CRUNCH_MVAX0H 136
-#define CRUNCH_MVAX1L 140
-#define CRUNCH_MVAX1M 144
-#define CRUNCH_MVAX1H 148
-#define CRUNCH_MVAX2L 152
-#define CRUNCH_MVAX2M 156
-#define CRUNCH_MVAX2H 160
-#define CRUNCH_MVAX3L 164
-#define CRUNCH_MVAX3M 168
-#define CRUNCH_MVAX3H 172
-#define CRUNCH_DSPSC 176
-
-#define CRUNCH_SIZE 184
-
- .text
-
-/*
- * Lazy switching of crunch coprocessor context
- *
- * r10 = struct thread_info pointer
- * r9 = ret_from_exception
- * lr = undefined instr exit
- *
- * called from prefetch exception handler with interrupts disabled
- */
-ENTRY(crunch_task_enable)
- ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr
-
- ldr r1, [r8, #0x80]
- tst r1, #0x00800000 @ access to crunch enabled?
- movne pc, lr @ if so no business here
- mov r3, #0xaa @ unlock syscon swlock
- str r3, [r8, #0xc0]
- orr r1, r1, #0x00800000 @ enable access to crunch
- str r1, [r8, #0x80]
-
- ldr r3, =crunch_owner
- add r0, r10, #TI_CRUNCH_STATE @ get task crunch save area
- ldr r2, [sp, #60] @ current task pc value
- ldr r1, [r3] @ get current crunch owner
- str r0, [r3] @ this task now owns crunch
- sub r2, r2, #4 @ adjust pc back
- str r2, [sp, #60]
-
- ldr r2, [r8, #0x80]
- mov r2, r2 @ flush out enable (@@@)
-
- teq r1, #0 @ test for last ownership
- mov lr, r9 @ normal exit from exception
- beq crunch_load @ no owner, skip save
-
-crunch_save:
- cfstr64 mvdx0, [r1, #CRUNCH_MVDX0] @ save 64b registers
- cfstr64 mvdx1, [r1, #CRUNCH_MVDX1]
- cfstr64 mvdx2, [r1, #CRUNCH_MVDX2]
- cfstr64 mvdx3, [r1, #CRUNCH_MVDX3]
- cfstr64 mvdx4, [r1, #CRUNCH_MVDX4]
- cfstr64 mvdx5, [r1, #CRUNCH_MVDX5]
- cfstr64 mvdx6, [r1, #CRUNCH_MVDX6]
- cfstr64 mvdx7, [r1, #CRUNCH_MVDX7]
- cfstr64 mvdx8, [r1, #CRUNCH_MVDX8]
- cfstr64 mvdx9, [r1, #CRUNCH_MVDX9]
- cfstr64 mvdx10, [r1, #CRUNCH_MVDX10]
- cfstr64 mvdx11, [r1, #CRUNCH_MVDX11]
- cfstr64 mvdx12, [r1, #CRUNCH_MVDX12]
- cfstr64 mvdx13, [r1, #CRUNCH_MVDX13]
- cfstr64 mvdx14, [r1, #CRUNCH_MVDX14]
- cfstr64 mvdx15, [r1, #CRUNCH_MVDX15]
-
-#ifdef __ARMEB__
-#error fix me for ARMEB
-#endif
-
- cfmv32al mvfx0, mvax0 @ save 72b accumulators
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX0L]
- cfmv32am mvfx0, mvax0
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX0M]
- cfmv32ah mvfx0, mvax0
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX0H]
- cfmv32al mvfx0, mvax1
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX1L]
- cfmv32am mvfx0, mvax1
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX1M]
- cfmv32ah mvfx0, mvax1
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX1H]
- cfmv32al mvfx0, mvax2
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX2L]
- cfmv32am mvfx0, mvax2
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX2M]
- cfmv32ah mvfx0, mvax2
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX2H]
- cfmv32al mvfx0, mvax3
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX3L]
- cfmv32am mvfx0, mvax3
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX3M]
- cfmv32ah mvfx0, mvax3
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX3H]
-
- cfmv32sc mvdx0, dspsc @ save status word
- cfstr64 mvdx0, [r1, #CRUNCH_DSPSC]
-
- teq r0, #0 @ anything to load?
- cfldr64eq mvdx0, [r1, #CRUNCH_MVDX0] @ mvdx0 was clobbered
- moveq pc, lr
-
-crunch_load:
- cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word
- cfmvsc32 dspsc, mvdx0
-
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX0L] @ load 72b accumulators
- cfmval32 mvax0, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX0M]
- cfmvam32 mvax0, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX0H]
- cfmvah32 mvax0, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX1L]
- cfmval32 mvax1, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX1M]
- cfmvam32 mvax1, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX1H]
- cfmvah32 mvax1, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX2L]
- cfmval32 mvax2, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX2M]
- cfmvam32 mvax2, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX2H]
- cfmvah32 mvax2, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX3L]
- cfmval32 mvax3, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX3M]
- cfmvam32 mvax3, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX3H]
- cfmvah32 mvax3, mvfx0
-
- cfldr64 mvdx0, [r0, #CRUNCH_MVDX0] @ load 64b registers
- cfldr64 mvdx1, [r0, #CRUNCH_MVDX1]
- cfldr64 mvdx2, [r0, #CRUNCH_MVDX2]
- cfldr64 mvdx3, [r0, #CRUNCH_MVDX3]
- cfldr64 mvdx4, [r0, #CRUNCH_MVDX4]
- cfldr64 mvdx5, [r0, #CRUNCH_MVDX5]
- cfldr64 mvdx6, [r0, #CRUNCH_MVDX6]
- cfldr64 mvdx7, [r0, #CRUNCH_MVDX7]
- cfldr64 mvdx8, [r0, #CRUNCH_MVDX8]
- cfldr64 mvdx9, [r0, #CRUNCH_MVDX9]
- cfldr64 mvdx10, [r0, #CRUNCH_MVDX10]
- cfldr64 mvdx11, [r0, #CRUNCH_MVDX11]
- cfldr64 mvdx12, [r0, #CRUNCH_MVDX12]
- cfldr64 mvdx13, [r0, #CRUNCH_MVDX13]
- cfldr64 mvdx14, [r0, #CRUNCH_MVDX14]
- cfldr64 mvdx15, [r0, #CRUNCH_MVDX15]
-
- mov pc, lr
-
-/*
- * Back up crunch regs to save area and disable access to them
- * (mainly for gdb or sleep mode usage)
- *
- * r0 = struct thread_info pointer of target task or NULL for any
- */
-ENTRY(crunch_task_disable)
- stmfd sp!, {r4, r5, lr}
-
- mrs ip, cpsr
- orr r2, ip, #PSR_I_BIT @ disable interrupts
- msr cpsr_c, r2
-
- ldr r4, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr
-
- ldr r3, =crunch_owner
- add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
- ldr r1, [r3] @ get current crunch owner
- teq r1, #0 @ any current owner?
- beq 1f @ no: quit
- teq r0, #0 @ any owner?
- teqne r1, r2 @ or specified one?
- bne 1f @ no: quit
-
- ldr r5, [r4, #0x80] @ enable access to crunch
- mov r2, #0xaa
- str r2, [r4, #0xc0]
- orr r5, r5, #0x00800000
- str r5, [r4, #0x80]
-
- mov r0, #0 @ nothing to load
- str r0, [r3] @ no more current owner
- ldr r2, [r4, #0x80] @ flush out enable (@@@)
- mov r2, r2
- bl crunch_save
-
- mov r2, #0xaa @ disable access to crunch
- str r2, [r4, #0xc0]
- bic r5, r5, #0x00800000
- str r5, [r4, #0x80]
- ldr r5, [r4, #0x80] @ flush out enable (@@@)
- mov r5, r5
-
-1: msr cpsr_c, ip @ restore interrupt mode
- ldmfd sp!, {r4, r5, pc}
-
-/*
- * Copy crunch state to given memory address
- *
- * r0 = struct thread_info pointer of target task
- * r1 = memory address where to store crunch state
- *
- * this is called mainly in the creation of signal stack frames
- */
-ENTRY(crunch_task_copy)
- mrs ip, cpsr
- orr r2, ip, #PSR_I_BIT @ disable interrupts
- msr cpsr_c, r2
-
- ldr r3, =crunch_owner
- add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
- ldr r3, [r3] @ get current crunch owner
- teq r2, r3 @ does this task own it...
- beq 1f
-
- @ current crunch values are in the task save area
- msr cpsr_c, ip @ restore interrupt mode
- mov r0, r1
- mov r1, r2
- mov r2, #CRUNCH_SIZE
- b memcpy
-
-1: @ this task owns crunch regs -- grab a copy from there
- mov r0, #0 @ nothing to load
- mov r3, lr @ preserve return address
- bl crunch_save
- msr cpsr_c, ip @ restore interrupt mode
- mov pc, r3
-
-/*
- * Restore crunch state from given memory address
- *
- * r0 = struct thread_info pointer of target task
- * r1 = memory address where to get crunch state from
- *
- * this is used to restore crunch state when unwinding a signal stack frame
- */
-ENTRY(crunch_task_restore)
- mrs ip, cpsr
- orr r2, ip, #PSR_I_BIT @ disable interrupts
- msr cpsr_c, r2
-
- ldr r3, =crunch_owner
- add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
- ldr r3, [r3] @ get current crunch owner
- teq r2, r3 @ does this task own it...
- beq 1f
-
- @ this task doesn't own crunch regs -- use its save area
- msr cpsr_c, ip @ restore interrupt mode
- mov r0, r2
- mov r2, #CRUNCH_SIZE
- b memcpy
-
-1: @ this task owns crunch regs -- load them directly
- mov r0, r1
- mov r1, #0 @ nothing to save
- mov r3, lr @ preserve return address
- bl crunch_load
- msr cpsr_c, ip @ restore interrupt mode
- mov pc, r3
diff --git a/arch/arm/kernel/crunch.c b/arch/arm/kernel/crunch.c
deleted file mode 100644
index 25ef223ba7f..00000000000
--- a/arch/arm/kernel/crunch.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * arch/arm/kernel/crunch.c
- * Cirrus MaverickCrunch context switching and handling
- *
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <mach/ep93xx-regs.h>
-#include <asm/thread_notify.h>
-
-struct crunch_state *crunch_owner;
-
-void crunch_task_release(struct thread_info *thread)
-{
- local_irq_disable();
- if (crunch_owner == &thread->crunchstate)
- crunch_owner = NULL;
- local_irq_enable();
-}
-
-static int crunch_enabled(u32 devcfg)
-{
- return !!(devcfg & EP93XX_SYSCON_DEVCFG_CPENA);
-}
-
-static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t)
-{
- struct thread_info *thread = (struct thread_info *)t;
- struct crunch_state *crunch_state;
- u32 devcfg;
-
- crunch_state = &thread->crunchstate;
-
- switch (cmd) {
- case THREAD_NOTIFY_FLUSH:
- memset(crunch_state, 0, sizeof(*crunch_state));
-
- /*
- * FALLTHROUGH: Ensure we don't try to overwrite our newly
- * initialised state information on the first fault.
- */
-
- case THREAD_NOTIFY_EXIT:
- crunch_task_release(thread);
- break;
-
- case THREAD_NOTIFY_SWITCH:
- devcfg = __raw_readl(EP93XX_SYSCON_DEVCFG);
- if (crunch_enabled(devcfg) || crunch_owner == crunch_state) {
- /*
- * We don't use ep93xx_syscon_swlocked_write() here
- * because we are on the context switch path and
- * preemption is already disabled.
- */
- devcfg ^= EP93XX_SYSCON_DEVCFG_CPENA;
- __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
- __raw_writel(devcfg, EP93XX_SYSCON_DEVCFG);
- }
- break;
- }
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block crunch_notifier_block = {
- .notifier_call = crunch_do,
-};
-
-static int __init crunch_init(void)
-{
- thread_register_notifier(&crunch_notifier_block);
- elf_hwcap |= HWCAP_CRUNCH;
-
- return 0;
-}
-
-late_initcall(crunch_init);
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c
index 4dd0edab6a6..1651d495074 100644
--- a/arch/arm/kernel/ecard.c
+++ b/arch/arm/kernel/ecard.c
@@ -242,6 +242,7 @@ static void ecard_init_pgtables(struct mm_struct *mm)
memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
+ vma.vm_flags = VM_EXEC;
vma.vm_mm = mm;
flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 5bb91bf3d47..b2abfa18f13 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -180,7 +180,7 @@ armpmu_event_set_period(struct perf_event *event,
u64
armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
- int idx, int overflow)
+ int idx)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
u64 delta, prev_raw_count, new_raw_count;
@@ -193,13 +193,7 @@ again:
new_raw_count) != prev_raw_count)
goto again;
- new_raw_count &= armpmu->max_period;
- prev_raw_count &= armpmu->max_period;
-
- if (overflow)
- delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
- else
- delta = new_raw_count - prev_raw_count;
+ delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
@@ -216,7 +210,7 @@ armpmu_read(struct perf_event *event)
if (hwc->idx < 0)
return;
- armpmu_event_update(event, hwc, hwc->idx, 0);
+ armpmu_event_update(event, hwc, hwc->idx);
}
static void
@@ -232,7 +226,7 @@ armpmu_stop(struct perf_event *event, int flags)
if (!(hwc->state & PERF_HES_STOPPED)) {
armpmu->disable(hwc, hwc->idx);
barrier(); /* why? */
- armpmu_event_update(event, hwc, hwc->idx, 0);
+ armpmu_event_update(event, hwc, hwc->idx);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
@@ -518,7 +512,13 @@ __hw_perf_event_init(struct perf_event *event)
hwc->config_base |= (unsigned long)mapping;
if (!hwc->sample_period) {
- hwc->sample_period = armpmu->max_period;
+ /*
+ * For non-sampling runs, limit the sample_period to half
+ * of the counter width. That way, the new counter value
+ * is far less likely to overtake the previous one unless
+ * you have some serious IRQ latency issues.
+ */
+ hwc->sample_period = armpmu->max_period >> 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
@@ -680,6 +680,28 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu)
}
/*
+ * PMU hardware loses all context when a CPU goes offline.
+ * When a CPU is hotplugged back in, since some hardware registers are
+ * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
+ * junk values out of them.
+ */
+static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
+ unsigned long action, void *hcpu)
+{
+ if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
+ return NOTIFY_DONE;
+
+ if (cpu_pmu && cpu_pmu->reset)
+ cpu_pmu->reset(NULL);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata pmu_cpu_notifier = {
+ .notifier_call = pmu_cpu_notify,
+};
+
+/*
* CPU PMU identification and registration.
*/
static int __init
@@ -730,6 +752,7 @@ init_hw_perf_events(void)
pr_info("enabled with %s PMU driver, %d counters available\n",
cpu_pmu->name, cpu_pmu->num_events);
cpu_pmu_init(cpu_pmu);
+ register_cpu_notifier(&pmu_cpu_notifier);
armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
} else {
pr_info("no hardware support available\n");
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 533be9930ec..b78af0cc6ef 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -467,23 +467,6 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static int counter_is_active(unsigned long pmcr, int idx)
-{
- unsigned long mask = 0;
- if (idx == ARMV6_CYCLE_COUNTER)
- mask = ARMV6_PMCR_CCOUNT_IEN;
- else if (idx == ARMV6_COUNTER0)
- mask = ARMV6_PMCR_COUNT0_IEN;
- else if (idx == ARMV6_COUNTER1)
- mask = ARMV6_PMCR_COUNT1_IEN;
-
- if (mask)
- return pmcr & mask;
-
- WARN_ONCE(1, "invalid counter number (%d)\n", idx);
- return 0;
-}
-
static irqreturn_t
armv6pmu_handle_irq(int irq_num,
void *dev)
@@ -513,7 +496,8 @@ armv6pmu_handle_irq(int irq_num,
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!counter_is_active(pmcr, idx))
+ /* Ignore if we don't have an event. */
+ if (!event)
continue;
/*
@@ -524,7 +508,7 @@ armv6pmu_handle_irq(int irq_num,
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 6933244c68f..4d7095af2ab 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -809,6 +809,11 @@ static inline int armv7_pmnc_disable_intens(int idx)
counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
+ isb();
+ /* Clear the overflow flag in case an interrupt is pending. */
+ asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
+ isb();
+
return idx;
}
@@ -955,6 +960,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
+ /* Ignore if we don't have an event. */
+ if (!event)
+ continue;
+
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
@@ -963,7 +972,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 3b99d826982..71a21e6712f 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -255,11 +255,14 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
+ if (!event)
+ continue;
+
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
@@ -592,11 +595,14 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
+ if (!event)
+ continue;
+
+ if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
@@ -663,7 +669,7 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
static void
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
- unsigned long flags, ien, evtsel;
+ unsigned long flags, ien, evtsel, of_flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
ien = xscale2pmu_read_int_enable();
@@ -672,26 +678,31 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
switch (idx) {
case XSCALE_CYCLE_COUNTER:
ien &= ~XSCALE2_CCOUNT_INT_EN;
+ of_flags = XSCALE2_CCOUNT_OVERFLOW;
break;
case XSCALE_COUNTER0:
ien &= ~XSCALE2_COUNT0_INT_EN;
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
+ of_flags = XSCALE2_COUNT0_OVERFLOW;
break;
case XSCALE_COUNTER1:
ien &= ~XSCALE2_COUNT1_INT_EN;
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
+ of_flags = XSCALE2_COUNT1_OVERFLOW;
break;
case XSCALE_COUNTER2:
ien &= ~XSCALE2_COUNT2_INT_EN;
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
+ of_flags = XSCALE2_COUNT2_OVERFLOW;
break;
case XSCALE_COUNTER3:
ien &= ~XSCALE2_COUNT3_INT_EN;
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
+ of_flags = XSCALE2_COUNT3_OVERFLOW;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
@@ -701,6 +712,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
raw_spin_lock_irqsave(&events->pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
+ xscale2pmu_write_overflow_flags(of_flags);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}