aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile3
-rw-r--r--arch/arm/kernel/asm-offsets.c5
-rw-r--r--arch/arm/kernel/bios32.c8
-rw-r--r--arch/arm/kernel/debug.S12
-rw-r--r--arch/arm/kernel/leds.c115
-rw-r--r--arch/arm/kernel/perf_event.c2276
-rw-r--r--arch/arm/kernel/pmu.c103
-rw-r--r--arch/arm/kernel/ptrace.c53
-rw-r--r--arch/arm/kernel/setup.c80
-rw-r--r--arch/arm/kernel/time.c178
-rw-r--r--arch/arm/kernel/traps.c35
-rw-r--r--arch/arm/kernel/vmlinux.lds.S4
12 files changed, 2605 insertions, 267 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index dd00f747e2a..26d302c28e1 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -17,6 +17,7 @@ obj-y := compat.o elf.o entry-armv.o entry-common.o irq.o \
process.o ptrace.o return_address.o setup.o signal.o \
sys_arm.o stacktrace.o time.o traps.o
+obj-$(CONFIG_LEDS) += leds.o
obj-$(CONFIG_OC_ETM) += etm.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
@@ -46,6 +47,8 @@ obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
+obj-$(CONFIG_CPU_HAS_PMU) += pmu.o
+obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
ifneq ($(CONFIG_ARCH_EBSA110),y)
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 4a881258bb1..883511522fc 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -12,6 +12,7 @@
*/
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/dma-mapping.h>
#include <asm/mach/arch.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
@@ -112,5 +113,9 @@ int main(void)
#ifdef MULTI_PABORT
DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort));
#endif
+ BLANK();
+ DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
+ DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
+ DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
return 0;
}
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 809681900ec..bd397e0b663 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -616,15 +616,17 @@ char * __init pcibios_setup(char *str)
* but we want to try to avoid allocating at 0x2900-0x2bff
* which might be mirrored at 0x0100-0x03ff..
*/
-void pcibios_align_resource(void *data, struct resource *res,
- resource_size_t size, resource_size_t align)
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size, resource_size_t align)
{
resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO && start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
- res->start = (start + align - 1) & ~(align - 1);
+ start = (start + align - 1) & ~(align - 1);
+
+ return start;
}
/**
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index 5c91addcaeb..a38b4879441 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -24,7 +24,7 @@
#if defined(CONFIG_CPU_V6)
- .macro addruart, rx
+ .macro addruart, rx, tmp
.endm
.macro senduart, rd, rx
@@ -51,7 +51,7 @@
#elif defined(CONFIG_CPU_V7)
- .macro addruart, rx
+ .macro addruart, rx, tmp
.endm
.macro senduart, rd, rx
@@ -71,7 +71,7 @@ wait: mrc p14, 0, pc, c0, c1, 0
#elif defined(CONFIG_CPU_XSCALE)
- .macro addruart, rx
+ .macro addruart, rx, tmp
.endm
.macro senduart, rd, rx
@@ -98,7 +98,7 @@ wait: mrc p14, 0, pc, c0, c1, 0
#else
- .macro addruart, rx
+ .macro addruart, rx, tmp
.endm
.macro senduart, rd, rx
@@ -164,7 +164,7 @@ ENDPROC(printhex2)
.ltorg
ENTRY(printascii)
- addruart r3
+ addruart r3, r1
b 2f
1: waituart r2, r3
senduart r1, r3
@@ -180,7 +180,7 @@ ENTRY(printascii)
ENDPROC(printascii)
ENTRY(printch)
- addruart r3
+ addruart r3, r1
mov r1, r0
mov r0, #0
b 1b
diff --git a/arch/arm/kernel/leds.c b/arch/arm/kernel/leds.c
new file mode 100644
index 00000000000..31a316c1777
--- /dev/null
+++ b/arch/arm/kernel/leds.c
@@ -0,0 +1,115 @@
+/*
+ * LED support code, ripped out of arch/arm/kernel/time.c
+ *
+ * Copyright (C) 1994-2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sysdev.h>
+
+#include <asm/leds.h>
+
+static void dummy_leds_event(led_event_t evt)
+{
+}
+
+void (*leds_event)(led_event_t) = dummy_leds_event;
+
+struct leds_evt_name {
+ const char name[8];
+ int on;
+ int off;
+};
+
+static const struct leds_evt_name evt_names[] = {
+ { "amber", led_amber_on, led_amber_off },
+ { "blue", led_blue_on, led_blue_off },
+ { "green", led_green_on, led_green_off },
+ { "red", led_red_on, led_red_off },
+};
+
+static ssize_t leds_store(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret = -EINVAL, len = strcspn(buf, " ");
+
+ if (len > 0 && buf[len] == '\0')
+ len--;
+
+ if (strncmp(buf, "claim", len) == 0) {
+ leds_event(led_claim);
+ ret = size;
+ } else if (strncmp(buf, "release", len) == 0) {
+ leds_event(led_release);
+ ret = size;
+ } else {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(evt_names); i++) {
+ if (strlen(evt_names[i].name) != len ||
+ strncmp(buf, evt_names[i].name, len) != 0)
+ continue;
+ if (strncmp(buf+len, " on", 3) == 0) {
+ leds_event(evt_names[i].on);
+ ret = size;
+ } else if (strncmp(buf+len, " off", 4) == 0) {
+ leds_event(evt_names[i].off);
+ ret = size;
+ }
+ break;
+ }
+ }
+ return ret;
+}
+
+static SYSDEV_ATTR(event, 0200, NULL, leds_store);
+
+static int leds_suspend(struct sys_device *dev, pm_message_t state)
+{
+ leds_event(led_stop);
+ return 0;
+}
+
+static int leds_resume(struct sys_device *dev)
+{
+ leds_event(led_start);
+ return 0;
+}
+
+static int leds_shutdown(struct sys_device *dev)
+{
+ leds_event(led_halted);
+ return 0;
+}
+
+static struct sysdev_class leds_sysclass = {
+ .name = "leds",
+ .shutdown = leds_shutdown,
+ .suspend = leds_suspend,
+ .resume = leds_resume,
+};
+
+static struct sys_device leds_device = {
+ .id = 0,
+ .cls = &leds_sysclass,
+};
+
+static int __init leds_init(void)
+{
+ int ret;
+ ret = sysdev_class_register(&leds_sysclass);
+ if (ret == 0)
+ ret = sysdev_register(&leds_device);
+ if (ret == 0)
+ ret = sysdev_create_file(&leds_device, &attr_event);
+ return ret;
+}
+
+device_initcall(leds_init);
+
+EXPORT_SYMBOL(leds_event);
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
new file mode 100644
index 00000000000..c54ceb3d1f9
--- /dev/null
+++ b/arch/arm/kernel/perf_event.c
@@ -0,0 +1,2276 @@
+#undef DEBUG
+
+/*
+ * ARM performance counter support.
+ *
+ * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
+ *
+ * ARMv7 support: Jean Pihet <jpihet@mvista.com>
+ * 2010 (c) MontaVista Software, LLC.
+ *
+ * This code is based on the sparc64 perf event code, which is in turn based
+ * on the x86 code. Callchain code is based on the ARM OProfile backtrace
+ * code.
+ */
+#define pr_fmt(fmt) "hw perfevents: " fmt
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+#include <asm/cputype.h>
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/pmu.h>
+#include <asm/stacktrace.h>
+
+static const struct pmu_irqs *pmu_irqs;
+
+/*
+ * Hardware lock to serialize accesses to PMU registers. Needed for the
+ * read/modify/write sequences.
+ */
+DEFINE_SPINLOCK(pmu_lock);
+
+/*
+ * ARMv6 supports a maximum of 3 events, starting from index 1. If we add
+ * another platform that supports more, we need to increase this to be the
+ * largest of all platforms.
+ *
+ * ARMv7 supports up to 32 events:
+ * cycle counter CCNT + 31 events counters CNT0..30.
+ * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
+ */
+#define ARMPMU_MAX_HWEVENTS 33
+
+/* The events for a given CPU. */
+struct cpu_hw_events {
+ /*
+ * The events that are active on the CPU for the given index. Index 0
+ * is reserved.
+ */
+ struct perf_event *events[ARMPMU_MAX_HWEVENTS];
+
+ /*
+ * A 1 bit for an index indicates that the counter is being used for
+ * an event. A 0 means that the counter can be used.
+ */
+ unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
+
+ /*
+ * A 1 bit for an index indicates that the counter is actively being
+ * used.
+ */
+ unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
+};
+DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
+
+struct arm_pmu {
+ char *name;
+ irqreturn_t (*handle_irq)(int irq_num, void *dev);
+ void (*enable)(struct hw_perf_event *evt, int idx);
+ void (*disable)(struct hw_perf_event *evt, int idx);
+ int (*event_map)(int evt);
+ u64 (*raw_event)(u64);
+ int (*get_event_idx)(struct cpu_hw_events *cpuc,
+ struct hw_perf_event *hwc);
+ u32 (*read_counter)(int idx);
+ void (*write_counter)(int idx, u32 val);
+ void (*start)(void);
+ void (*stop)(void);
+ int num_events;
+ u64 max_period;
+};
+
+/* Set at runtime when we know what CPU type we are. */
+static const struct arm_pmu *armpmu;
+
+#define HW_OP_UNSUPPORTED 0xFFFF
+
+#define C(_x) \
+ PERF_COUNT_HW_CACHE_##_x
+
+#define CACHE_OP_UNSUPPORTED 0xFFFF
+
+static unsigned armpmu_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX];
+
+static int
+armpmu_map_cache_event(u64 config)
+{
+ unsigned int cache_type, cache_op, cache_result, ret;
+
+ cache_type = (config >> 0) & 0xff;
+ if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+ return -EINVAL;
+
+ cache_op = (config >> 8) & 0xff;
+ if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+ return -EINVAL;
+
+ cache_result = (config >> 16) & 0xff;
+ if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return -EINVAL;
+
+ ret = (int)armpmu_perf_cache_map[cache_type][cache_op][cache_result];
+
+ if (ret == CACHE_OP_UNSUPPORTED)
+ return -ENOENT;
+
+ return ret;
+}
+
+static int
+armpmu_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ s64 left = atomic64_read(&hwc->period_left);
+ s64 period = hwc->sample_period;
+ int ret = 0;
+
+ if (unlikely(left <= -period)) {
+ left = period;
+ atomic64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (unlikely(left <= 0)) {
+ left += period;
+ atomic64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (left > (s64)armpmu->max_period)
+ left = armpmu->max_period;
+
+ atomic64_set(&hwc->prev_count, (u64)-left);
+
+ armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
+
+ perf_event_update_userpage(event);
+
+ return ret;
+}
+
+static u64
+armpmu_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ int shift = 64 - 32;
+ s64 prev_raw_count, new_raw_count;
+ s64 delta;
+
+again:
+ prev_raw_count = atomic64_read(&hwc->prev_count);
+ new_raw_count = armpmu->read_counter(idx);
+
+ if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ delta = (new_raw_count << shift) - (prev_raw_count << shift);
+ delta >>= shift;
+
+ atomic64_add(delta, &event->count);
+ atomic64_sub(delta, &hwc->period_left);
+
+ return new_raw_count;
+}
+
+static void
+armpmu_disable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ WARN_ON(idx < 0);
+
+ clear_bit(idx, cpuc->active_mask);
+ armpmu->disable(hwc, idx);
+
+ barrier();
+
+ armpmu_event_update(event, hwc, idx);
+ cpuc->events[idx] = NULL;
+ clear_bit(idx, cpuc->used_mask);
+
+ perf_event_update_userpage(event);
+}
+
+static void
+armpmu_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* Don't read disabled counters! */
+ if (hwc->idx < 0)
+ return;
+
+ armpmu_event_update(event, hwc, hwc->idx);
+}
+
+static void
+armpmu_unthrottle(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /*
+ * Set the period again. Some counters can't be stopped, so when we
+ * were throttled we simply disabled the IRQ source and the counter
+ * may have been left counting. If we don't do this step then we may
+ * get an interrupt too soon or *way* too late if the overflow has
+ * happened since disabling.
+ */
+ armpmu_event_set_period(event, hwc, hwc->idx);
+ armpmu->enable(hwc, hwc->idx);
+}
+
+static int
+armpmu_enable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+ int err = 0;
+
+ /* If we don't have a space for the counter then finish early. */
+ idx = armpmu->get_event_idx(cpuc, hwc);
+ if (idx < 0) {
+ err = idx;
+ goto out;
+ }
+
+ /*
+ * If there is an event in the counter we are going to use then make
+ * sure it is disabled.
+ */
+ event->hw.idx = idx;
+ armpmu->disable(hwc, idx);
+ cpuc->events[idx] = event;
+ set_bit(idx, cpuc->active_mask);
+
+ /* Set the period for the event. */
+ armpmu_event_set_period(event, hwc, idx);
+
+ /* Enable the event. */
+ armpmu->enable(hwc, idx);
+
+ /* Propagate our changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+out:
+ return err;
+}
+
+static struct pmu pmu = {
+ .enable = armpmu_enable,
+ .disable = armpmu_disable,
+ .unthrottle = armpmu_unthrottle,
+ .read = armpmu_read,
+};
+
+static int
+validate_event(struct cpu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ struct hw_perf_event fake_event = event->hw;
+
+ if (event->pmu && event->pmu != &pmu)
+ return 0;
+
+ return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
+}
+
+static int
+validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct cpu_hw_events fake_pmu;
+
+ memset(&fake_pmu, 0, sizeof(fake_pmu));
+
+ if (!validate_event(&fake_pmu, leader))
+ return -ENOSPC;
+
+ list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+ if (!validate_event(&fake_pmu, sibling))
+ return -ENOSPC;
+ }
+
+ if (!validate_event(&fake_pmu, event))
+ return -ENOSPC;
+
+ return 0;
+}
+
+static int
+armpmu_reserve_hardware(void)
+{
+ int i;
+ int err;
+
+ pmu_irqs = reserve_pmu();
+ if (IS_ERR(pmu_irqs)) {
+ pr_warning("unable to reserve pmu\n");
+ return PTR_ERR(pmu_irqs);
+ }
+
+ init_pmu();
+
+ if (pmu_irqs->num_irqs < 1) {
+ pr_err("no irqs for PMUs defined\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < pmu_irqs->num_irqs; ++i) {
+ err = request_irq(pmu_irqs->irqs[i], armpmu->handle_irq,
+ IRQF_DISABLED, "armpmu", NULL);
+ if (err) {
+ pr_warning("unable to request IRQ%d for ARM "
+ "perf counters\n", pmu_irqs->irqs[i]);
+ break;
+ }
+ }
+
+ if (err) {
+ for (i = i - 1; i >= 0; --i)
+ free_irq(pmu_irqs->irqs[i], NULL);
+ release_pmu(pmu_irqs);
+ pmu_irqs = NULL;
+ }
+
+ return err;
+}
+
+static void
+armpmu_release_hardware(void)
+{
+ int i;
+
+ for (i = pmu_irqs->num_irqs - 1; i >= 0; --i)
+ free_irq(pmu_irqs->irqs[i], NULL);
+ armpmu->stop();
+
+ release_pmu(pmu_irqs);
+ pmu_irqs = NULL;
+}
+
+static atomic_t active_events = ATOMIC_INIT(0);
+static DEFINE_MUTEX(pmu_reserve_mutex);
+
+static void
+hw_perf_event_destroy(struct perf_event *event)
+{
+ if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) {
+ armpmu_release_hardware();
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+}
+
+static int
+__hw_perf_event_init(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int mapping, err;
+
+ /* Decode the generic type into an ARM event identifier. */
+ if (PERF_TYPE_HARDWARE == event->attr.type) {
+ mapping = armpmu->event_map(event->attr.config);
+ } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
+ mapping = armpmu_map_cache_event(event->attr.config);
+ } else if (PERF_TYPE_RAW == event->attr.type) {
+ mapping = armpmu->raw_event(event->attr.config);
+ } else {
+ pr_debug("event type %x not supported\n", event->attr.type);
+ return -EOPNOTSUPP;
+ }
+
+ if (mapping < 0) {
+ pr_debug("event %x:%llx not supported\n", event->attr.type,
+ event->attr.config);
+ return mapping;
+ }
+
+ /*
+ * Check whether we need to exclude the counter from certain modes.
+ * The ARM performance counters are on all of the time so if someone
+ * has asked us for some excludes then we have to fail.
+ */
+ if (event->attr.exclude_kernel || event->attr.exclude_user ||
+ event->attr.exclude_hv || event->attr.exclude_idle) {
+ pr_debug("ARM performance counters do not support "
+ "mode exclusion\n");
+ return -EPERM;
+ }
+
+ /*
+ * We don't assign an index until we actually place the event onto
+ * hardware. Use -1 to signify that we haven't decided where to put it
+ * yet. For SMP systems, each core has it's own PMU so we can't do any
+ * clever allocation or constraints checking at this point.
+ */
+ hwc->idx = -1;
+
+ /*
+ * Store the event encoding into the config_base field. config and
+ * event_base are unused as the only 2 things we need to know are
+ * the event mapping and the counter to use. The counter to use is
+ * also the indx and the config_base is the event type.
+ */
+ hwc->config_base = (unsigned long)mapping;
+ hwc->config = 0;
+ hwc->event_base = 0;
+
+ if (!hwc->sample_period) {
+ hwc->sample_period = armpmu->max_period;
+ hwc->last_period = hwc->sample_period;
+ atomic64_set(&hwc->period_left, hwc->sample_period);
+ }
+
+ err = 0;
+ if (event->group_leader != event) {
+ err = validate_group(event);
+ if (err)
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+const struct pmu *
+hw_perf_event_init(struct perf_event *event)
+{
+ int err = 0;
+
+ if (!armpmu)
+ return ERR_PTR(-ENODEV);
+
+ event->destroy = hw_perf_event_destroy;
+
+ if (!atomic_inc_not_zero(&active_events)) {
+ if (atomic_read(&active_events) > perf_max_events) {
+ atomic_dec(&active_events);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ mutex_lock(&pmu_reserve_mutex);
+ if (atomic_read(&active_events) == 0) {
+ err = armpmu_reserve_hardware();
+ }
+
+ if (!err)
+ atomic_inc(&active_events);
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+
+ if (err)
+ return ERR_PTR(err);
+
+ err = __hw_perf_event_init(event);
+ if (err)
+ hw_perf_event_destroy(event);
+
+ return err ? ERR_PTR(err) : &pmu;
+}
+
+void
+hw_perf_enable(void)
+{
+ /* Enable all of the perf events on hardware. */
+ int idx;
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ if (!armpmu)
+ return;
+
+ for (idx = 0; idx <= armpmu->num_events; ++idx) {
+ struct perf_event *event = cpuc->events[idx];
+
+ if (!event)
+ continue;
+
+ armpmu->enable(&event->hw, idx);
+ }
+
+ armpmu->start();
+}
+
+void
+hw_perf_disable(void)
+{
+ if (armpmu)
+ armpmu->stop();
+}
+
+/*
+ * ARMv6 Performance counter handling code.
+ *
+ * ARMv6 has 2 configurable performance counters and a single cycle counter.
+ * They all share a single reset bit but can be written to zero so we can use
+ * that for a reset.
+ *
+ * The counters can't be individually enabled or disabled so when we remove
+ * one event and replace it with another we could get spurious counts from the
+ * wrong event. However, we can take advantage of the fact that the
+ * performance counters can export events to the event bus, and the event bus
+ * itself can be monitored. This requires that we *don't* export the events to
+ * the event bus. The procedure for disabling a configurable counter is:
+ * - change the counter to count the ETMEXTOUT[0] signal (0x20). This
+ * effectively stops the counter from counting.
+ * - disable the counter's interrupt generation (each counter has it's
+ * own interrupt enable bit).
+ * Once stopped, the counter value can be written as 0 to reset.
+ *
+ * To enable a counter:
+ * - enable the counter's interrupt generation.
+ * - set the new event type.
+ *
+ * Note: the dedicated cycle counter only counts cycles and can't be
+ * enabled/disabled independently of the others. When we want to disable the
+ * cycle counter, we have to just disable the interrupt reporting and start
+ * ignoring that counter. When re-enabling, we have to reset the value and
+ * enable the interrupt.
+ */
+
+enum armv6_perf_types {
+ ARMV6_PERFCTR_ICACHE_MISS = 0x0,
+ ARMV6_PERFCTR_IBUF_STALL = 0x1,
+ ARMV6_PERFCTR_DDEP_STALL = 0x2,
+ ARMV6_PERFCTR_ITLB_MISS = 0x3,
+ ARMV6_PERFCTR_DTLB_MISS = 0x4,
+ ARMV6_PERFCTR_BR_EXEC = 0x5,
+ ARMV6_PERFCTR_BR_MISPREDICT = 0x6,
+ ARMV6_PERFCTR_INSTR_EXEC = 0x7,
+ ARMV6_PERFCTR_DCACHE_HIT = 0x9,
+ ARMV6_PERFCTR_DCACHE_ACCESS = 0xA,
+ ARMV6_PERFCTR_DCACHE_MISS = 0xB,
+ ARMV6_PERFCTR_DCACHE_WBACK = 0xC,
+ ARMV6_PERFCTR_SW_PC_CHANGE = 0xD,
+ ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF,
+ ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10,
+ ARMV6_PERFCTR_LSU_FULL_STALL = 0x11,
+ ARMV6_PERFCTR_WBUF_DRAINED = 0x12,
+ ARMV6_PERFCTR_CPU_CYCLES = 0xFF,
+ ARMV6_PERFCTR_NOP = 0x20,
+};
+
+enum armv6_counters {
+ ARMV6_CYCLE_COUNTER = 1,
+ ARMV6_COUNTER0,
+ ARMV6_COUNTER1,
+};
+
+/*
+ * The hardware events that we support. We do support cache operations but
+ * we have harvard caches and no way to combine instruction and data
+ * accesses/misses in hardware.
+ */
+static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
+ [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
+};
+
+static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(L1D)] = {
+ /*
+ * The performance counters don't differentiate between read
+ * and write accesses/misses so this isn't strictly correct,
+ * but it's the best we can do. Writes and reads get
+ * combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(DTLB)] = {
+ /*
+ * The ARM performance counters can count micro DTLB misses,
+ * micro ITLB misses and main TLB misses. There isn't an event
+ * for TLB misses, so use the micro misses here and if users
+ * want the main TLB misses they can use a raw counter.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+};
+
+enum armv6mpcore_perf_types {
+ ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0,
+ ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1,
+ ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2,
+ ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3,
+ ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4,
+ ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5,
+ ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6,
+ ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7,
+ ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8,
+ ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
+ ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB,
+ ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
+ ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD,
+ ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
+ ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF,
+ ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10,
+ ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
+ ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12,
+ ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13,
+ ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF,
+};
+
+/*
+ * The hardware events that we support. We do support cache operations but
+ * we have harvard caches and no way to combine instruction and data
+ * accesses/misses in hardware.
+ */
+static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
+ [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
+};
+
+static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] =
+ ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
+ [C(RESULT_MISS)] =
+ ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] =
+ ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
+ [C(RESULT_MISS)] =
+ ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(DTLB)] = {
+ /*
+ * The ARM performance counters can count micro DTLB misses,
+ * micro ITLB misses and main TLB misses. There isn't an event
+ * for TLB misses, so use the micro misses here and if users
+ * want the main TLB misses they can use a raw counter.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+};
+
+static inline unsigned long
+armv6_pmcr_read(void)
+{
+ u32 val;
+ asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val));
+ return val;
+}
+
+static inline void
+armv6_pmcr_write(unsigned long val)
+{
+ asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val));
+}
+
+#define ARMV6_PMCR_ENABLE (1 << 0)
+#define ARMV6_PMCR_CTR01_RESET (1 << 1)
+#define ARMV6_PMCR_CCOUNT_RESET (1 << 2)
+#define ARMV6_PMCR_CCOUNT_DIV (1 << 3)
+#define ARMV6_PMCR_COUNT0_IEN (1 << 4)
+#define ARMV6_PMCR_COUNT1_IEN (1 << 5)
+#define ARMV6_PMCR_CCOUNT_IEN (1 << 6)
+#define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8)
+#define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9)
+#define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10)
+#define ARMV6_PMCR_EVT_COUNT0_SHIFT 20
+#define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
+#define ARMV6_PMCR_EVT_COUNT1_SHIFT 12
+#define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
+
+#define ARMV6_PMCR_OVERFLOWED_MASK \
+ (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
+ ARMV6_PMCR_CCOUNT_OVERFLOW)
+
+static inline int
+armv6_pmcr_has_overflowed(unsigned long pmcr)
+{
+ return (pmcr & ARMV6_PMCR_OVERFLOWED_MASK);
+}
+
+static inline int
+armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
+ enum armv6_counters counter)
+{
+ int ret = 0;
+
+ if (ARMV6_CYCLE_COUNTER == counter)
+ ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
+ else if (ARMV6_COUNTER0 == counter)
+ ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
+ else if (ARMV6_COUNTER1 == counter)
+ ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
+ else
+ WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+
+ return ret;
+}
+
+static inline u32
+armv6pmu_read_counter(int counter)
+{
+ unsigned long value = 0;
+
+ if (ARMV6_CYCLE_COUNTER == counter)
+ asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value));
+ else if (ARMV6_COUNTER0 == counter)
+ asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value));
+ else if (ARMV6_COUNTER1 == counter)
+ asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value));
+ else
+ WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+
+ return value;
+}
+
+static inline void
+armv6pmu_write_counter(int counter,
+ u32 value)
+{
+ if (ARMV6_CYCLE_COUNTER == counter)
+ asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value));
+ else if (ARMV6_COUNTER0 == counter)
+ asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value));
+ else if (ARMV6_COUNTER1 == counter)
+ asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value));
+ else
+ WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+}
+
+void
+armv6pmu_enable_event(struct hw_perf_event *hwc,
+ int idx)
+{
+ unsigned long val, mask, evt, flags;
+
+ if (ARMV6_CYCLE_COUNTER == idx) {
+ mask = 0;
+ evt = ARMV6_PMCR_CCOUNT_IEN;
+ } else if (ARMV6_COUNTER0 == idx) {
+ mask = ARMV6_PMCR_EVT_COUNT0_MASK;
+ evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
+ ARMV6_PMCR_COUNT0_IEN;
+ } else if (ARMV6_COUNTER1 == idx) {
+ mask = ARMV6_PMCR_EVT_COUNT1_MASK;
+ evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
+ ARMV6_PMCR_COUNT1_IEN;
+ } else {
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return;
+ }
+
+ /*
+ * Mask out the current event and set the counter to count the event
+ * that we're interested in.
+ */
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = armv6_pmcr_read();
+ val &= ~mask;
+ val |= evt;
+ armv6_pmcr_write(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static irqreturn_t
+armv6pmu_handle_irq(int irq_num,
+ void *dev)
+{
+ unsigned long pmcr = armv6_pmcr_read();
+ struct perf_sample_data data;
+ struct cpu_hw_events *cpuc;
+ struct pt_regs *regs;
+ int idx;
+
+ if (!armv6_pmcr_has_overflowed(pmcr))
+ return IRQ_NONE;
+
+ regs = get_irq_regs();
+
+ /*
+ * The interrupts are cleared by writing the overflow flags back to
+ * the control register. All of the other bits don't have any effect
+ * if they are rewritten, so write the whole value back.
+ */
+ armv6_pmcr_write(pmcr);
+
+ data.addr = 0;
+
+ cpuc = &__get_cpu_var(cpu_hw_events);
+ for (idx = 0; idx <= armpmu->num_events; ++idx) {
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+
+ /*
+ * We have a single interrupt for all counters. Check that
+ * each counter has overflowed before we process it.
+ */
+ if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
+ continue;
+
+ hwc = &event->hw;
+ armpmu_event_update(event, hwc, idx);
+ data.period = event->hw.last_period;
+ if (!armpmu_event_set_period(event, hwc, idx))
+ continue;
+
+ if (perf_event_overflow(event, 0, &data, regs))
+ armpmu->disable(hwc, idx);
+ }
+
+ /*
+ * Handle the pending perf events.
+ *
+ * Note: this call *must* be run with interrupts enabled. For
+ * platforms that can have the PMU interrupts raised as a PMI, this
+ * will not work.
+ */
+ perf_event_do_pending();
+
+ return IRQ_HANDLED;
+}
+
+static void
+armv6pmu_start(void)
+{
+ unsigned long flags, val;
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = armv6_pmcr_read();
+ val |= ARMV6_PMCR_ENABLE;
+ armv6_pmcr_write(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+void
+armv6pmu_stop(void)
+{
+ unsigned long flags, val;
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = armv6_pmcr_read();
+ val &= ~ARMV6_PMCR_ENABLE;
+ armv6_pmcr_write(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static inline int
+armv6pmu_event_map(int config)
+{
+ int mapping = armv6_perf_map[config];
+ if (HW_OP_UNSUPPORTED == mapping)
+ mapping = -EOPNOTSUPP;
+ return mapping;
+}
+
+static inline int
+armv6mpcore_pmu_event_map(int config)
+{
+ int mapping = armv6mpcore_perf_map[config];
+ if (HW_OP_UNSUPPORTED == mapping)
+ mapping = -EOPNOTSUPP;
+ return mapping;
+}
+
+static u64
+armv6pmu_raw_event(u64 config)
+{
+ return config & 0xff;
+}
+
+static int
+armv6pmu_get_event_idx(struct cpu_hw_events *cpuc,
+ struct hw_perf_event *event)
+{
+ /* Always place a cycle counter into the cycle counter. */
+ if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) {
+ if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
+ return -EAGAIN;
+
+ return ARMV6_CYCLE_COUNTER;
+ } else {
+ /*
+ * For anything other than a cycle counter, try and use
+ * counter0 and counter1.
+ */
+ if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) {
+ return ARMV6_COUNTER1;
+ }
+
+ if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) {
+ return ARMV6_COUNTER0;
+ }
+
+ /* The counters are all in use. */
+ return -EAGAIN;
+ }
+}
+
+static void
+armv6pmu_disable_event(struct hw_perf_event *hwc,
+ int idx)
+{
+ unsigned long val, mask, evt, flags;
+
+ if (ARMV6_CYCLE_COUNTER == idx) {
+ mask = ARMV6_PMCR_CCOUNT_IEN;
+ evt = 0;
+ } else if (ARMV6_COUNTER0 == idx) {
+ mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
+ evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
+ } else if (ARMV6_COUNTER1 == idx) {
+ mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
+ evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
+ } else {
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return;
+ }
+
+ /*
+ * Mask out the current event and set the counter to count the number
+ * of ETM bus signal assertion cycles. The external reporting should
+ * be disabled and so this should never increment.
+ */
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = armv6_pmcr_read();
+ val &= ~mask;
+ val |= evt;
+ armv6_pmcr_write(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void
+armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
+ int idx)
+{
+ unsigned long val, mask, flags, evt = 0;
+
+ if (ARMV6_CYCLE_COUNTER == idx) {
+ mask = ARMV6_PMCR_CCOUNT_IEN;
+ } else if (ARMV6_COUNTER0 == idx) {
+ mask = ARMV6_PMCR_COUNT0_IEN;
+ } else if (ARMV6_COUNTER1 == idx) {
+ mask = ARMV6_PMCR_COUNT1_IEN;
+ } else {
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return;
+ }
+
+ /*
+ * Unlike UP ARMv6, we don't have a way of stopping the counters. We
+ * simply disable the interrupt reporting.
+ */
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = armv6_pmcr_read();
+ val &= ~mask;
+ val |= evt;
+ armv6_pmcr_write(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static const struct arm_pmu armv6pmu = {
+ .name = "v6",
+ .handle_irq = armv6pmu_handle_irq,
+ .enable = armv6pmu_enable_event,
+ .disable = armv6pmu_disable_event,
+ .event_map = armv6pmu_event_map,
+ .raw_event = armv6pmu_raw_event,
+ .read_counter = armv6pmu_read_counter,
+ .write_counter = armv6pmu_write_counter,
+ .get_event_idx = armv6pmu_get_event_idx,
+ .start = armv6pmu_start,
+ .stop = armv6pmu_stop,
+ .num_events = 3,
+ .max_period = (1LLU << 32) - 1,
+};
+
+/*
+ * ARMv6mpcore is almost identical to single core ARMv6 with the exception
+ * that some of the events have different enumerations and that there is no
+ * *hack* to stop the programmable counters. To stop the counters we simply
+ * disable the interrupt reporting and update the event. When unthrottling we
+ * reset the period and enable the interrupt reporting.
+ */
+static const struct arm_pmu armv6mpcore_pmu = {
+ .name = "v6mpcore",
+ .handle_irq = armv6pmu_handle_irq,
+ .enable = armv6pmu_enable_event,
+ .disable = armv6mpcore_pmu_disable_event,
+ .event_map = armv6mpcore_pmu_event_map,
+ .raw_event = armv6pmu_raw_event,
+ .read_counter = armv6pmu_read_counter,
+ .write_counter = armv6pmu_write_counter,
+ .get_event_idx = armv6pmu_get_event_idx,
+ .start = armv6pmu_start,
+ .stop = armv6pmu_stop,
+ .num_events = 3,
+ .max_period = (1LLU << 32) - 1,
+};
+
+/*
+ * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
+ *
+ * Copied from ARMv6 code, with the low level code inspired
+ * by the ARMv7 Oprofile code.
+ *
+ * Cortex-A8 has up to 4 configurable performance counters and
+ * a single cycle counter.
+ * Cortex-A9 has up to 31 configurable performance counters and
+ * a single cycle counter.
+ *
+ * All counters can be enabled/disabled and IRQ masked separately. The cycle
+ * counter and all 4 performance counters together can be reset separately.
+ */
+
+#define ARMV7_PMU_CORTEX_A8_NAME "ARMv7 Cortex-A8"
+
+#define ARMV7_PMU_CORTEX_A9_NAME "ARMv7 Cortex-A9"
+
+/* Common ARMv7 event types */
+enum armv7_perf_types {
+ ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
+ ARMV7_PERFCTR_IFETCH_MISS = 0x01,
+ ARMV7_PERFCTR_ITLB_MISS = 0x02,
+ ARMV7_PERFCTR_DCACHE_REFILL = 0x03,
+ ARMV7_PERFCTR_DCACHE_ACCESS = 0x04,
+ ARMV7_PERFCTR_DTLB_REFILL = 0x05,
+ ARMV7_PERFCTR_DREAD = 0x06,
+ ARMV7_PERFCTR_DWRITE = 0x07,
+
+ ARMV7_PERFCTR_EXC_TAKEN = 0x09,
+ ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
+ ARMV7_PERFCTR_CID_WRITE = 0x0B,
+ /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
+ * It counts:
+ * - all branch instructions,
+ * - instructions that explicitly write the PC,
+ * - exception generating instructions.
+ */
+ ARMV7_PERFCTR_PC_WRITE = 0x0C,
+ ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
+ ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F,
+ ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
+ ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
+
+ ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12,
+
+ ARMV7_PERFCTR_CPU_CYCLES = 0xFF
+};
+
+/* ARMv7 Cortex-A8 specific event types */
+enum armv7_a8_perf_types {
+ ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
+
+ ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
+
+ ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40,
+ ARMV7_PERFCTR_L2_STORE_MERGED = 0x41,
+ ARMV7_PERFCTR_L2_STORE_BUFF = 0x42,
+ ARMV7_PERFCTR_L2_ACCESS = 0x43,
+ ARMV7_PERFCTR_L2_CACH_MISS = 0x44,
+ ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45,
+ ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46,
+ ARMV7_PERFCTR_MEMORY_REPLAY = 0x47,
+ ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48,
+ ARMV7_PERFCTR_L1_DATA_MISS = 0x49,
+ ARMV7_PERFCTR_L1_INST_MISS = 0x4A,
+ ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B,
+ ARMV7_PERFCTR_L1_NEON_DATA = 0x4C,
+ ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D,
+ ARMV7_PERFCTR_L2_NEON = 0x4E,
+ ARMV7_PERFCTR_L2_NEON_HIT = 0x4F,
+ ARMV7_PERFCTR_L1_INST = 0x50,
+ ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51,
+ ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52,
+ ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53,
+ ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54,
+ ARMV7_PERFCTR_OP_EXECUTED = 0x55,
+ ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56,
+ ARMV7_PERFCTR_CYCLES_INST = 0x57,
+ ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58,
+ ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59,
+ ARMV7_PERFCTR_NEON_CYCLES = 0x5A,
+
+ ARMV7_PERFCTR_PMU0_EVENTS = 0x70,
+ ARMV7_PERFCTR_PMU1_EVENTS = 0x71,
+ ARMV7_PERFCTR_PMU_EVENTS = 0x72,
+};
+
+/* ARMv7 Cortex-A9 specific event types */
+enum armv7_a9_perf_types {
+ ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40,
+ ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41,
+ ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42,
+
+ ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50,
+ ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51,
+
+ ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60,
+ ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61,
+ ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
+ ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63,
+ ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64,
+ ARMV7_PERFCTR_DATA_EVICTION = 0x65,
+ ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66,
+ ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67,
+ ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68,
+
+ ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
+
+ ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70,
+ ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
+ ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72,
+ ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73,
+ ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74,
+
+ ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
+ ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81,
+ ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82,
+ ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83,
+ ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84,
+ ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85,
+ ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86,
+
+ ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A,
+ ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
+
+ ARMV7_PERFCTR_ISB_INST = 0x90,
+ ARMV7_PERFCTR_DSB_INST = 0x91,
+ ARMV7_PERFCTR_DMB_INST = 0x92,
+ ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93,
+
+ ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0,
+ ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1,
+ ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2,
+ ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3,
+ ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4,
+ ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
+};
+
+/*
+ * Cortex-A8 HW events mapping
+ *
+ * The hardware events that we support. We do support cache operations but
+ * we have harvard caches and no way to combine instruction and data
+ * accesses/misses in hardware.
+ */
+static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
+};
+
+static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(L1D)] = {
+ /*
+ * The performance counters don't differentiate between read
+ * and write accesses/misses so this isn't strictly correct,
+ * but it's the best we can do. Writes and reads get
+ * combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(DTLB)] = {
+ /*
+ * Only ITLB misses and DTLB refills are supported.
+ * If users want the DTLB refills misses a raw counter
+ * must be used.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+};
+
+/*
+ * Cortex-A9 HW events mapping
+ */
+static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] =
+ ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT,
+ [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
+};
+
+static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(L1D)] = {
+ /*
+ * The performance counters don't differentiate between read
+ * and write accesses/misses so this isn't strictly correct,
+ * but it's the best we can do. Writes and reads get
+ * combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(DTLB)] = {
+ /*
+ * Only ITLB misses and DTLB refills are supported.
+ * If users want the DTLB refills misses a raw counter
+ * must be used.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+};
+
+/*
+ * Perf Events counters
+ */
+enum armv7_counters {
+ ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */
+ ARMV7_COUNTER0 = 2, /* First event counter */
+};
+
+/*
+ * The cycle counter is ARMV7_CYCLE_COUNTER.
+ * The first event counter is ARMV7_COUNTER0.
+ * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
+ */
+#define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
+
+/*
+ * ARMv7 low level PMNC access
+ */
+
+/*
+ * Per-CPU PMNC: config reg
+ */
+#define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
+#define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
+#define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
+#define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
+#define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
+#define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
+#define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
+#define ARMV7_PMNC_N_MASK 0x1f
+#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
+
+/*
+ * Available counters
+ */
+#define ARMV7_CNT0 0 /* First event counter */
+#define ARMV7_CCNT 31 /* Cycle counter */
+
+/* Perf Event to low level counters mapping */
+#define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
+
+/*
+ * CNTENS: counters enable reg
+ */
+#define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
+#define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
+
+/*
+ * CNTENC: counters disable reg
+ */
+#define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
+#define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
+
+/*
+ * INTENS: counters overflow interrupt enable reg
+ */
+#define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
+#define ARMV7_INTENS_C (1 << ARMV7_CCNT)
+
+/*
+ * INTENC: counters overflow interrupt disable reg
+ */
+#define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
+#define ARMV7_INTENC_C (1 << ARMV7_CCNT)
+
+/*
+ * EVTSEL: Event selection reg
+ */
+#define ARMV7_EVTSEL_MASK 0x7f /* Mask for writable bits */
+
+/*
+ * SELECT: Counter selection reg
+ */
+#define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */
+
+/*
+ * FLAG: counters overflow flag status reg
+ */
+#define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
+#define ARMV7_FLAG_C (1 << ARMV7_CCNT)
+#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
+#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
+
+static inline unsigned long armv7_pmnc_read(void)
+{
+ u32 val;
+ asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
+ return val;
+}
+
+static inline void armv7_pmnc_write(unsigned long val)
+{
+ val &= ARMV7_PMNC_MASK;
+ asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
+}
+
+static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
+{
+ return pmnc & ARMV7_OVERFLOWED_MASK;
+}
+
+static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
+ enum armv7_counters counter)
+{
+ int ret;
+
+ if (counter == ARMV7_CYCLE_COUNTER)
+ ret = pmnc & ARMV7_FLAG_C;
+ else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
+ ret = pmnc & ARMV7_FLAG_P(counter);
+ else
+ pr_err("CPU%u checking wrong counter %d overflow status\n",
+ smp_processor_id(), counter);
+
+ return ret;
+}
+
+static inline int armv7_pmnc_select_counter(unsigned int idx)
+{
+ u32 val;
+
+ if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
+ pr_err("CPU%u selecting wrong PMNC counter"
+ " %d\n", smp_processor_id(), idx);
+ return -1;
+ }
+
+ val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
+ asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
+
+ return idx;
+}
+
+static inline u32 armv7pmu_read_counter(int idx)
+{
+ unsigned long value = 0;
+
+ if (idx == ARMV7_CYCLE_COUNTER)
+ asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
+ else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
+ if (armv7_pmnc_select_counter(idx) == idx)
+ asm volatile("mrc p15, 0, %0, c9, c13, 2"
+ : "=r" (value));
+ } else
+ pr_err("CPU%u reading wrong counter %d\n",
+ smp_processor_id(), idx);
+
+ return value;
+}
+
+static inline void armv7pmu_write_counter(int idx, u32 value)
+{
+ if (idx == ARMV7_CYCLE_COUNTER)
+ asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
+ else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
+ if (armv7_pmnc_select_counter(idx) == idx)
+ asm volatile("mcr p15, 0, %0, c9, c13, 2"
+ : : "r" (value));
+ } else
+ pr_err("CPU%u writing wrong counter %d\n",
+ smp_processor_id(), idx);
+}
+
+static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
+{
+ if (armv7_pmnc_select_counter(idx) == idx) {
+ val &= ARMV7_EVTSEL_MASK;
+ asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
+ }
+}
+
+static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
+{
+ u32 val;
+
+ if ((idx != ARMV7_CYCLE_COUNTER) &&
+ ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
+ pr_err("CPU%u enabling wrong PMNC counter"
+ " %d\n", smp_processor_id(), idx);
+ return -1;
+ }
+
+ if (idx == ARMV7_CYCLE_COUNTER)
+ val = ARMV7_CNTENS_C;
+ else
+ val = ARMV7_CNTENS_P(idx);
+
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
+
+ return idx;
+}
+
+static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
+{
+ u32 val;
+
+
+ if ((idx != ARMV7_CYCLE_COUNTER) &&
+ ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
+ pr_err("CPU%u disabling wrong PMNC counter"
+ " %d\n", smp_processor_id(), idx);
+ return -1;
+ }
+
+ if (idx == ARMV7_CYCLE_COUNTER)
+ val = ARMV7_CNTENC_C;
+ else
+ val = ARMV7_CNTENC_P(idx);
+
+ asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
+
+ return idx;
+}
+
+static inline u32 armv7_pmnc_enable_intens(unsigned int idx)
+{
+ u32 val;
+
+ if ((idx != ARMV7_CYCLE_COUNTER) &&
+ ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
+ pr_err("CPU%u enabling wrong PMNC counter"
+ " interrupt enable %d\n", smp_processor_id(), idx);
+ return -1;
+ }
+
+ if (idx == ARMV7_CYCLE_COUNTER)
+ val = ARMV7_INTENS_C;
+ else
+ val = ARMV7_INTENS_P(idx);
+
+ asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
+
+ return idx;
+}
+
+static inline u32 armv7_pmnc_disable_intens(unsigned int idx)
+{
+ u32 val;
+
+ if ((idx != ARMV7_CYCLE_COUNTER) &&
+ ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
+ pr_err("CPU%u disabling wrong PMNC counter"
+ " interrupt enable %d\n", smp_processor_id(), idx);
+ return -1;
+ }
+
+ if (idx == ARMV7_CYCLE_COUNTER)
+ val = ARMV7_INTENC_C;
+ else
+ val = ARMV7_INTENC_P(idx);
+
+ asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
+
+ return idx;
+}
+
+static inline u32 armv7_pmnc_getreset_flags(void)
+{
+ u32 val;
+
+ /* Read */
+ asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
+
+ /* Write to clear flags */
+ val &= ARMV7_FLAG_MASK;
+ asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
+
+ return val;
+}
+
+#ifdef DEBUG
+static void armv7_pmnc_dump_regs(void)
+{
+ u32 val;
+ unsigned int cnt;
+
+ printk(KERN_INFO "PMNC registers dump:\n");
+
+ asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
+ printk(KERN_INFO "PMNC =0x%08x\n", val);
+
+ asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
+ printk(KERN_INFO "CNTENS=0x%08x\n", val);
+
+ asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
+ printk(KERN_INFO "INTENS=0x%08x\n", val);
+
+ asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
+ printk(KERN_INFO "FLAGS =0x%08x\n", val);
+
+ asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
+ printk(KERN_INFO "SELECT=0x%08x\n", val);
+
+ asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
+ printk(KERN_INFO "CCNT =0x%08x\n", val);
+
+ for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
+ armv7_pmnc_select_counter(cnt);
+ asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
+ printk(KERN_INFO "CNT[%d] count =0x%08x\n",
+ cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
+ asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
+ printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
+ cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
+ }
+}
+#endif
+
+void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
+{
+ unsigned long flags;
+
+ /*
+ * Enable counter and interrupt, and set the counter to count
+ * the event that we're interested in.
+ */
+ spin_lock_irqsave(&pmu_lock, flags);
+
+ /*
+ * Disable counter
+ */
+ armv7_pmnc_disable_counter(idx);
+
+ /*
+ * Set event (if destined for PMNx counters)
+ * We don't need to set the event if it's a cycle count
+ */
+ if (idx != ARMV7_CYCLE_COUNTER)
+ armv7_pmnc_write_evtsel(idx, hwc->config_base);
+
+ /*
+ * Enable interrupt for this counter
+ */
+ armv7_pmnc_enable_intens(idx);
+
+ /*
+ * Enable counter
+ */
+ armv7_pmnc_enable_counter(idx);
+
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
+{
+ unsigned long flags;
+
+ /*
+ * Disable counter and interrupt
+ */
+ spin_lock_irqsave(&pmu_lock, flags);
+
+ /*
+ * Disable counter
+ */
+ armv7_pmnc_disable_counter(idx);
+
+ /*
+ * Disable interrupt for this counter
+ */
+ armv7_pmnc_disable_intens(idx);
+
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
+{
+ unsigned long pmnc;
+ struct perf_sample_data data;
+ struct cpu_hw_events *cpuc;
+ struct pt_regs *regs;
+ int idx;
+
+ /*
+ * Get and reset the IRQ flags
+ */
+ pmnc = armv7_pmnc_getreset_flags();
+
+ /*
+ * Did an overflow occur?
+ */
+ if (!armv7_pmnc_has_overflowed(pmnc))
+ return IRQ_NONE;
+
+ /*
+ * Handle the counter(s) overflow(s)
+ */
+ regs = get_irq_regs();
+
+ data.addr = 0;
+
+ cpuc = &__get_cpu_var(cpu_hw_events);
+ for (idx = 0; idx <= armpmu->num_events; ++idx) {
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+
+ /*
+ * We have a single interrupt for all counters. Check that
+ * each counter has overflowed before we process it.
+ */
+ if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
+ continue;
+
+ hwc = &event->hw;
+ armpmu_event_update(event, hwc, idx);
+ data.period = event->hw.last_period;
+ if (!armpmu_event_set_period(event, hwc, idx))
+ continue;
+
+ if (perf_event_overflow(event, 0, &data, regs))
+ armpmu->disable(hwc, idx);
+ }
+
+ /*
+ * Handle the pending perf events.
+ *
+ * Note: this call *must* be run with interrupts enabled. For
+ * platforms that can have the PMU interrupts raised as a PMI, this
+ * will not work.
+ */
+ perf_event_do_pending();
+
+ return IRQ_HANDLED;
+}
+
+static void armv7pmu_start(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ /* Enable all counters */
+ armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void armv7pmu_stop(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ /* Disable all counters */
+ armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static inline int armv7_a8_pmu_event_map(int config)
+{
+ int mapping = armv7_a8_perf_map[config];
+ if (HW_OP_UNSUPPORTED == mapping)
+ mapping = -EOPNOTSUPP;
+ return mapping;
+}
+
+static inline int armv7_a9_pmu_event_map(int config)
+{
+ int mapping = armv7_a9_perf_map[config];
+ if (HW_OP_UNSUPPORTED == mapping)
+ mapping = -EOPNOTSUPP;
+ return mapping;
+}
+
+static u64 armv7pmu_raw_event(u64 config)
+{
+ return config & 0xff;
+}
+
+static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
+ struct hw_perf_event *event)
+{
+ int idx;
+
+ /* Always place a cycle counter into the cycle counter. */
+ if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
+ if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
+ return -EAGAIN;
+
+ return ARMV7_CYCLE_COUNTER;
+ } else {
+ /*
+ * For anything other than a cycle counter, try and use
+ * the events counters
+ */
+ for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
+ if (!test_and_set_bit(idx, cpuc->used_mask))
+ return idx;
+ }
+
+ /* The counters are all in use. */
+ return -EAGAIN;
+ }
+}
+
+static struct arm_pmu armv7pmu = {
+ .handle_irq = armv7pmu_handle_irq,
+ .enable = armv7pmu_enable_event,
+ .disable = armv7pmu_disable_event,
+ .raw_event = armv7pmu_raw_event,
+ .read_counter = armv7pmu_read_counter,
+ .write_counter = armv7pmu_write_counter,
+ .get_event_idx = armv7pmu_get_event_idx,
+ .start = armv7pmu_start,
+ .stop = armv7pmu_stop,
+ .max_period = (1LLU << 32) - 1,
+};
+
+static u32 __init armv7_reset_read_pmnc(void)
+{
+ u32 nb_cnt;
+
+ /* Initialize & Reset PMNC: C and P bits */
+ armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
+
+ /* Read the nb of CNTx counters supported from PMNC */
+ nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
+
+ /* Add the CPU cycles counter and return */
+ return nb_cnt + 1;
+}
+
+static int __init
+init_hw_perf_events(void)
+{
+ unsigned long cpuid = read_cpuid_id();
+ unsigned long implementor = (cpuid & 0xFF000000) >> 24;
+ unsigned long part_number = (cpuid & 0xFFF0);
+
+ /* We only support ARM CPUs implemented by ARM at the moment. */
+ if (0x41 == implementor) {
+ switch (part_number) {
+ case 0xB360: /* ARM1136 */
+ case 0xB560: /* ARM1156 */
+ case 0xB760: /* ARM1176 */
+ armpmu = &armv6pmu;
+ memcpy(armpmu_perf_cache_map, armv6_perf_cache_map,
+ sizeof(armv6_perf_cache_map));
+ perf_max_events = armv6pmu.num_events;
+ break;
+ case 0xB020: /* ARM11mpcore */
+ armpmu = &armv6mpcore_pmu;
+ memcpy(armpmu_perf_cache_map,
+ armv6mpcore_perf_cache_map,
+ sizeof(armv6mpcore_perf_cache_map));
+ perf_max_events = armv6mpcore_pmu.num_events;
+ break;
+ case 0xC080: /* Cortex-A8 */
+ armv7pmu.name = ARMV7_PMU_CORTEX_A8_NAME;
+ memcpy(armpmu_perf_cache_map, armv7_a8_perf_cache_map,
+ sizeof(armv7_a8_perf_cache_map));
+ armv7pmu.event_map = armv7_a8_pmu_event_map;
+ armpmu = &armv7pmu;
+
+ /* Reset PMNC and read the nb of CNTx counters
+ supported */
+ armv7pmu.num_events = armv7_reset_read_pmnc();
+ perf_max_events = armv7pmu.num_events;
+ break;
+ case 0xC090: /* Cortex-A9 */
+ armv7pmu.name = ARMV7_PMU_CORTEX_A9_NAME;
+ memcpy(armpmu_perf_cache_map, armv7_a9_perf_cache_map,
+ sizeof(armv7_a9_perf_cache_map));
+ armv7pmu.event_map = armv7_a9_pmu_event_map;
+ armpmu = &armv7pmu;
+
+ /* Reset PMNC and read the nb of CNTx counters
+ supported */
+ armv7pmu.num_events = armv7_reset_read_pmnc();
+ perf_max_events = armv7pmu.num_events;
+ break;
+ default:
+ pr_info("no hardware support available\n");
+ perf_max_events = -1;
+ }
+ }
+
+ if (armpmu)
+ pr_info("enabled with %s PMU driver, %d counters available\n",
+ armpmu->name, armpmu->num_events);
+
+ return 0;
+}
+arch_initcall(init_hw_perf_events);
+
+/*
+ * Callchain handling code.
+ */
+static inline void
+callchain_store(struct perf_callchain_entry *entry,
+ u64 ip)
+{
+ if (entry->nr < PERF_MAX_STACK_DEPTH)
+ entry->ip[entry->nr++] = ip;
+}
+
+/*
+ * The registers we're interested in are at the end of the variable
+ * length saved register structure. The fp points at the end of this
+ * structure so the address of this struct is:
+ * (struct frame_tail *)(xxx->fp)-1
+ *
+ * This code has been adapted from the ARM OProfile support.
+ */
+struct frame_tail {
+ struct frame_tail *fp;
+ unsigned long sp;
+ unsigned long lr;
+} __attribute__((packed));
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static struct frame_tail *
+user_backtrace(struct frame_tail *tail,
+ struct perf_callchain_entry *entry)
+{
+ struct frame_tail buftail;
+
+ /* Also check accessibility of one struct frame_tail beyond */
+ if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+ return NULL;
+ if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
+ return NULL;
+
+ callchain_store(entry, buftail.lr);
+
+ /*
+ * Frame pointers should strictly progress back up the stack
+ * (towards higher addresses).
+ */
+ if (tail >= buftail.fp)
+ return NULL;
+
+ return buftail.fp - 1;
+}
+
+static void
+perf_callchain_user(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
+{
+ struct frame_tail *tail;
+
+ callchain_store(entry, PERF_CONTEXT_USER);
+
+ if (!user_mode(regs))
+ regs = task_pt_regs(current);
+
+ tail = (struct frame_tail *)regs->ARM_fp - 1;
+
+ while (tail && !((unsigned long)tail & 0x3))
+ tail = user_backtrace(tail, entry);
+}
+
+/*
+ * Gets called by walk_stackframe() for every stackframe. This will be called
+ * whist unwinding the stackframe and is like a subroutine return so we use
+ * the PC.
+ */
+static int
+callchain_trace(struct stackframe *fr,
+ void *data)
+{
+ struct perf_callchain_entry *entry = data;
+ callchain_store(entry, fr->pc);
+ return 0;
+}
+
+static void
+perf_callchain_kernel(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
+{
+ struct stackframe fr;
+
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
+ fr.fp = regs->ARM_fp;
+ fr.sp = regs->ARM_sp;
+ fr.lr = regs->ARM_lr;
+ fr.pc = regs->ARM_pc;
+ walk_stackframe(&fr, callchain_trace, entry);
+}
+
+static void
+perf_do_callchain(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
+{
+ int is_user;
+
+ if (!regs)
+ return;
+
+ is_user = user_mode(regs);
+
+ if (!current || !current->pid)
+ return;
+
+ if (is_user && current->state != TASK_RUNNING)
+ return;
+
+ if (!is_user)
+ perf_callchain_kernel(regs, entry);
+
+ if (current->mm)
+ perf_callchain_user(regs, entry);
+}
+
+static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
+
+struct perf_callchain_entry *
+perf_callchain(struct pt_regs *regs)
+{
+ struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
+
+ entry->nr = 0;
+ perf_do_callchain(regs, entry);
+ return entry;
+}
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
new file mode 100644
index 00000000000..a124312e343
--- /dev/null
+++ b/arch/arm/kernel/pmu.c
@@ -0,0 +1,103 @@
+/*
+ * linux/arch/arm/kernel/pmu.c
+ *
+ * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <asm/pmu.h>
+
+/*
+ * Define the IRQs for the system. We could use something like a platform
+ * device but that seems fairly heavyweight for this. Also, the performance
+ * counters can't be removed or hotplugged.
+ *
+ * Ordering is important: init_pmu() will use the ordering to set the affinity
+ * to the corresponding core. e.g. the first interrupt will go to cpu 0, the
+ * second goes to cpu 1 etc.
+ */
+static const int irqs[] = {
+#if defined(CONFIG_ARCH_OMAP2)
+ 3,
+#elif defined(CONFIG_ARCH_BCMRING)
+ IRQ_PMUIRQ,
+#elif defined(CONFIG_MACH_REALVIEW_EB)
+ IRQ_EB11MP_PMU_CPU0,
+ IRQ_EB11MP_PMU_CPU1,
+ IRQ_EB11MP_PMU_CPU2,
+ IRQ_EB11MP_PMU_CPU3,
+#elif defined(CONFIG_ARCH_OMAP3)
+ INT_34XX_BENCH_MPU_EMUL,
+#elif defined(CONFIG_ARCH_IOP32X)
+ IRQ_IOP32X_CORE_PMU,
+#elif defined(CONFIG_ARCH_IOP33X)
+ IRQ_IOP33X_CORE_PMU,
+#elif defined(CONFIG_ARCH_PXA)
+ IRQ_PMU,
+#endif
+};
+
+static const struct pmu_irqs pmu_irqs = {
+ .irqs = irqs,
+ .num_irqs = ARRAY_SIZE(irqs),
+};
+
+static volatile long pmu_lock;
+
+const struct pmu_irqs *
+reserve_pmu(void)
+{
+ return test_and_set_bit_lock(0, &pmu_lock) ? ERR_PTR(-EBUSY) :
+ &pmu_irqs;
+}
+EXPORT_SYMBOL_GPL(reserve_pmu);
+
+int
+release_pmu(const struct pmu_irqs *irqs)
+{
+ if (WARN_ON(irqs != &pmu_irqs))
+ return -EINVAL;
+ clear_bit_unlock(0, &pmu_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(release_pmu);
+
+static int
+set_irq_affinity(int irq,
+ unsigned int cpu)
+{
+#ifdef CONFIG_SMP
+ int err = irq_set_affinity(irq, cpumask_of(cpu));
+ if (err)
+ pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
+ irq, cpu);
+ return err;
+#else
+ return 0;
+#endif
+}
+
+int
+init_pmu(void)
+{
+ int i, err = 0;
+
+ for (i = 0; i < pmu_irqs.num_irqs; ++i) {
+ err = set_irq_affinity(pmu_irqs.irqs[i], i);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(init_pmu);
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index a2ea3854cb3..08f899fb76a 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -499,10 +499,41 @@ static struct undef_hook thumb_break_hook = {
.fn = break_trap,
};
+static int thumb2_break_trap(struct pt_regs *regs, unsigned int instr)
+{
+ unsigned int instr2;
+ void __user *pc;
+
+ /* Check the second half of the instruction. */
+ pc = (void __user *)(instruction_pointer(regs) + 2);
+
+ if (processor_mode(regs) == SVC_MODE) {
+ instr2 = *(u16 *) pc;
+ } else {
+ get_user(instr2, (u16 __user *)pc);
+ }
+
+ if (instr2 == 0xa000) {
+ ptrace_break(current, regs);
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+static struct undef_hook thumb2_break_hook = {
+ .instr_mask = 0xffff,
+ .instr_val = 0xf7f0,
+ .cpsr_mask = PSR_T_BIT,
+ .cpsr_val = PSR_T_BIT,
+ .fn = thumb2_break_trap,
+};
+
static int __init ptrace_break_init(void)
{
register_undef_hook(&arm_break_hook);
register_undef_hook(&thumb_break_hook);
+ register_undef_hook(&thumb2_break_hook);
return 0;
}
@@ -669,7 +700,7 @@ static int ptrace_getvfpregs(struct task_struct *tsk, void __user *data)
union vfp_state *vfp = &thread->vfpstate;
struct user_vfp __user *ufp = data;
- vfp_sync_state(thread);
+ vfp_sync_hwstate(thread);
/* copy the floating point registers */
if (copy_to_user(&ufp->fpregs, &vfp->hard.fpregs,
@@ -692,7 +723,7 @@ static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data)
union vfp_state *vfp = &thread->vfpstate;
struct user_vfp __user *ufp = data;
- vfp_sync_state(thread);
+ vfp_sync_hwstate(thread);
/* copy the floating point registers */
if (copy_from_user(&vfp->hard.fpregs, &ufp->fpregs,
@@ -703,6 +734,8 @@ static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data)
if (get_user(vfp->hard.fpscr, &ufp->fpscr))
return -EFAULT;
+ vfp_flush_hwstate(thread);
+
return 0;
}
#endif
@@ -712,26 +745,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
int ret;
switch (request) {
- /*
- * read word at location "addr" in the child process.
- */
- case PTRACE_PEEKTEXT:
- case PTRACE_PEEKDATA:
- ret = generic_ptrace_peekdata(child, addr, data);
- break;
-
case PTRACE_PEEKUSR:
ret = ptrace_read_user(child, addr, (unsigned long __user *)data);
break;
- /*
- * write the word at location addr.
- */
- case PTRACE_POKETEXT:
- case PTRACE_POKEDATA:
- ret = generic_ptrace_pokedata(child, addr, data);
- break;
-
case PTRACE_POKEUSR:
ret = ptrace_write_user(child, addr, data);
break;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index c6c57b640b6..c91c77b54de 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/fs.h>
+#include <linux/proc_fs.h>
#include <asm/unified.h>
#include <asm/cpu.h>
@@ -102,6 +103,7 @@ struct cpu_cache_fns cpu_cache;
#endif
#ifdef CONFIG_OUTER_CACHE
struct outer_cache_fns outer_cache;
+EXPORT_SYMBOL(outer_cache);
#endif
struct stack {
@@ -117,7 +119,7 @@ EXPORT_SYMBOL(elf_platform);
static const char *cpu_name;
static const char *machine_name;
-static char __initdata command_line[COMMAND_LINE_SIZE];
+static char __initdata cmd_line[COMMAND_LINE_SIZE];
static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
@@ -417,10 +419,11 @@ static int __init arm_add_memory(unsigned long start, unsigned long size)
* Pick out the memory size. We look for mem=size@start,
* where start and size are "size[KkMm]"
*/
-static void __init early_mem(char **p)
+static int __init early_mem(char *p)
{
static int usermem __initdata = 0;
unsigned long size, start;
+ char *endp;
/*
* If the user specifies memory size, we
@@ -433,52 +436,15 @@ static void __init early_mem(char **p)
}
start = PHYS_OFFSET;
- size = memparse(*p, p);
- if (**p == '@')
- start = memparse(*p + 1, p);
+ size = memparse(p, &endp);
+ if (*endp == '@')
+ start = memparse(endp + 1, NULL);
arm_add_memory(start, size);
-}
-__early_param("mem=", early_mem);
-/*
- * Initial parsing of the command line.
- */
-static void __init parse_cmdline(char **cmdline_p, char *from)
-{
- char c = ' ', *to = command_line;
- int len = 0;
-
- for (;;) {
- if (c == ' ') {
- extern struct early_params __early_begin, __early_end;
- struct early_params *p;
-
- for (p = &__early_begin; p < &__early_end; p++) {
- int arglen = strlen(p->arg);
-
- if (memcmp(from, p->arg, arglen) == 0) {
- if (to != command_line)
- to -= 1;
- from += arglen;
- p->fn(&from);
-
- while (*from != ' ' && *from != '\0')
- from++;
- break;
- }
- }
- }
- c = *from++;
- if (!c)
- break;
- if (COMMAND_LINE_SIZE <= ++len)
- break;
- *to++ = c;
- }
- *to = '\0';
- *cmdline_p = command_line;
+ return 0;
}
+early_param("mem", early_mem);
static void __init
setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
@@ -739,9 +705,15 @@ void __init setup_arch(char **cmdline_p)
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = (unsigned long) _end;
- memcpy(boot_command_line, from, COMMAND_LINE_SIZE);
- boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
- parse_cmdline(cmdline_p, from);
+ /* parse_early_param needs a boot_command_line */
+ strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
+
+ /* populate cmd_line too for later use, preserving boot_command_line */
+ strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
+ *cmdline_p = cmd_line;
+
+ parse_early_param();
+
paging_init(mdesc);
request_standard_resources(&meminfo, mdesc);
@@ -782,9 +754,21 @@ static int __init topology_init(void)
return 0;
}
-
subsys_initcall(topology_init);
+#ifdef CONFIG_HAVE_PROC_CPU
+static int __init proc_cpu_init(void)
+{
+ struct proc_dir_entry *res;
+
+ res = proc_mkdir("cpu", NULL);
+ if (!res)
+ return -ENOMEM;
+ return 0;
+}
+fs_initcall(proc_cpu_init);
+#endif
+
static const char *hwcap_str[] = {
"swp",
"half",
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index d38cdf2c827..28753805d2d 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -10,11 +10,6 @@
*
* This file contains the ARM-specific time handling details:
* reading the RTC at bootup, etc...
- *
- * 1994-07-02 Alan Modra
- * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
- * 1998-12-20 Updated NTP code according to technical memorandum Jan '96
- * "A Kernel Model for Precision Timekeeping" by Dave Mills
*/
#include <linux/module.h>
#include <linux/kernel.h>
@@ -77,11 +72,6 @@ unsigned long profile_pc(struct pt_regs *regs)
EXPORT_SYMBOL(profile_pc);
#endif
-/*
- * hook for setting the RTC's idea of the current time.
- */
-int (*set_rtc)(void);
-
#ifndef CONFIG_GENERIC_TIME
static unsigned long dummy_gettimeoffset(void)
{
@@ -89,140 +79,6 @@ static unsigned long dummy_gettimeoffset(void)
}
#endif
-static unsigned long next_rtc_update;
-
-/*
- * If we have an externally synchronized linux clock, then update
- * CMOS clock accordingly every ~11 minutes. set_rtc() has to be
- * called as close as possible to 500 ms before the new second
- * starts.
- */
-static inline void do_set_rtc(void)
-{
- if (!ntp_synced() || set_rtc == NULL)
- return;
-
- if (next_rtc_update &&
- time_before((unsigned long)xtime.tv_sec, next_rtc_update))
- return;
-
- if (xtime.tv_nsec < 500000000 - ((unsigned) tick_nsec >> 1) &&
- xtime.tv_nsec >= 500000000 + ((unsigned) tick_nsec >> 1))
- return;
-
- if (set_rtc())
- /*
- * rtc update failed. Try again in 60s
- */
- next_rtc_update = xtime.tv_sec + 60;
- else
- next_rtc_update = xtime.tv_sec + 660;
-}
-
-#ifdef CONFIG_LEDS
-
-static void dummy_leds_event(led_event_t evt)
-{
-}
-
-void (*leds_event)(led_event_t) = dummy_leds_event;
-
-struct leds_evt_name {
- const char name[8];
- int on;
- int off;
-};
-
-static const struct leds_evt_name evt_names[] = {
- { "amber", led_amber_on, led_amber_off },
- { "blue", led_blue_on, led_blue_off },
- { "green", led_green_on, led_green_off },
- { "red", led_red_on, led_red_off },
-};
-
-static ssize_t leds_store(struct sys_device *dev,
- struct sysdev_attribute *attr,
- const char *buf, size_t size)
-{
- int ret = -EINVAL, len = strcspn(buf, " ");
-
- if (len > 0 && buf[len] == '\0')
- len--;
-
- if (strncmp(buf, "claim", len) == 0) {
- leds_event(led_claim);
- ret = size;
- } else if (strncmp(buf, "release", len) == 0) {
- leds_event(led_release);
- ret = size;
- } else {
- int i;
-
- for (i = 0; i < ARRAY_SIZE(evt_names); i++) {
- if (strlen(evt_names[i].name) != len ||
- strncmp(buf, evt_names[i].name, len) != 0)
- continue;
- if (strncmp(buf+len, " on", 3) == 0) {
- leds_event(evt_names[i].on);
- ret = size;
- } else if (strncmp(buf+len, " off", 4) == 0) {
- leds_event(evt_names[i].off);
- ret = size;
- }
- break;
- }
- }
- return ret;
-}
-
-static SYSDEV_ATTR(event, 0200, NULL, leds_store);
-
-static int leds_suspend(struct sys_device *dev, pm_message_t state)
-{
- leds_event(led_stop);
- return 0;
-}
-
-static int leds_resume(struct sys_device *dev)
-{
- leds_event(led_start);
- return 0;
-}
-
-static int leds_shutdown(struct sys_device *dev)
-{
- leds_event(led_halted);
- return 0;
-}
-
-static struct sysdev_class leds_sysclass = {
- .name = "leds",
- .shutdown = leds_shutdown,
- .suspend = leds_suspend,
- .resume = leds_resume,
-};
-
-static struct sys_device leds_device = {
- .id = 0,
- .cls = &leds_sysclass,
-};
-
-static int __init leds_init(void)
-{
- int ret;
- ret = sysdev_class_register(&leds_sysclass);
- if (ret == 0)
- ret = sysdev_register(&leds_device);
- if (ret == 0)
- ret = sysdev_create_file(&leds_device, &attr_event);
- return ret;
-}
-
-device_initcall(leds_init);
-
-EXPORT_SYMBOL(leds_event);
-#endif
-
#ifdef CONFIG_LEDS_TIMER
static inline void do_leds(void)
{
@@ -295,39 +151,6 @@ int do_settimeofday(struct timespec *tv)
EXPORT_SYMBOL(do_settimeofday);
#endif /* !CONFIG_GENERIC_TIME */
-/**
- * save_time_delta - Save the offset between system time and RTC time
- * @delta: pointer to timespec to store delta
- * @rtc: pointer to timespec for current RTC time
- *
- * Return a delta between the system time and the RTC time, such
- * that system time can be restored later with restore_time_delta()
- */
-void save_time_delta(struct timespec *delta, struct timespec *rtc)
-{
- set_normalized_timespec(delta,
- xtime.tv_sec - rtc->tv_sec,
- xtime.tv_nsec - rtc->tv_nsec);
-}
-EXPORT_SYMBOL(save_time_delta);
-
-/**
- * restore_time_delta - Restore the current system time
- * @delta: delta returned by save_time_delta()
- * @rtc: pointer to timespec for current RTC time
- */
-void restore_time_delta(struct timespec *delta, struct timespec *rtc)
-{
- struct timespec ts;
-
- set_normalized_timespec(&ts,
- delta->tv_sec + rtc->tv_sec,
- delta->tv_nsec + rtc->tv_nsec);
-
- do_settimeofday(&ts);
-}
-EXPORT_SYMBOL(restore_time_delta);
-
#ifndef CONFIG_GENERIC_CLOCKEVENTS
/*
* Kernel system timer support.
@@ -336,7 +159,6 @@ void timer_tick(void)
{
profile_tick(CPU_PROFILING);
do_leds();
- do_set_rtc();
write_seqlock(&xtime_lock);
do_timer(1);
write_sequnlock(&xtime_lock);
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 3f361a783f4..1621e5327b2 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -12,15 +12,17 @@
* 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably
* kill the offending process.
*/
-#include <linux/module.h>
#include <linux/signal.h>
-#include <linux/spinlock.h>
#include <linux/personality.h>
#include <linux/kallsyms.h>
-#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
#include <linux/hardirq.h>
+#include <linux/kdebug.h>
+#include <linux/module.h>
+#include <linux/kexec.h>
+#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/uaccess.h>
#include <asm/atomic.h>
#include <asm/cacheflush.h>
@@ -224,14 +226,21 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
#define S_SMP ""
#endif
-static void __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs)
+static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs)
{
struct task_struct *tsk = thread->task;
static int die_counter;
+ int ret;
printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
str, err, ++die_counter);
sysfs_printk_last_file();
+
+ /* trap and error numbers are mostly meaningless on ARM */
+ ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
+ if (ret == NOTIFY_STOP)
+ return ret;
+
print_modules();
__show_regs(regs);
printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
@@ -243,6 +252,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
dump_backtrace(regs, tsk);
dump_instr(KERN_EMERG, regs);
}
+
+ return ret;
}
DEFINE_SPINLOCK(die_lock);
@@ -250,16 +261,21 @@ DEFINE_SPINLOCK(die_lock);
/*
* This function is protected against re-entrancy.
*/
-NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
+void die(const char *str, struct pt_regs *regs, int err)
{
struct thread_info *thread = current_thread_info();
+ int ret;
oops_enter();
spin_lock_irq(&die_lock);
console_verbose();
bust_spinlocks(1);
- __die(str, err, thread, regs);
+ ret = __die(str, err, thread, regs);
+
+ if (regs && kexec_should_crash(thread->task))
+ crash_kexec(regs);
+
bust_spinlocks(0);
add_taint(TAINT_DIE);
spin_unlock_irq(&die_lock);
@@ -267,11 +283,10 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
if (in_interrupt())
panic("Fatal exception in interrupt");
-
if (panic_on_oops)
panic("Fatal exception");
-
- do_exit(SIGSEGV);
+ if (ret != NOTIFY_STOP)
+ do_exit(SIGSEGV);
}
void arm_notify_die(const char *str, struct pt_regs *regs,
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 4957e13ef55..b16c07914b5 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -43,10 +43,6 @@ SECTIONS
INIT_SETUP(16)
- __early_begin = .;
- *(.early_param.init)
- __early_end = .;
-
INIT_CALLS
CON_INITCALL
SECURITY_INITCALL