diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/Makefile | 3 | ||||
-rw-r--r-- | arch/arm/kernel/asm-offsets.c | 5 | ||||
-rw-r--r-- | arch/arm/kernel/bios32.c | 8 | ||||
-rw-r--r-- | arch/arm/kernel/debug.S | 12 | ||||
-rw-r--r-- | arch/arm/kernel/leds.c | 115 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 2276 | ||||
-rw-r--r-- | arch/arm/kernel/pmu.c | 103 | ||||
-rw-r--r-- | arch/arm/kernel/ptrace.c | 53 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 79 | ||||
-rw-r--r-- | arch/arm/kernel/time.c | 178 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 35 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 4 |
12 files changed, 2604 insertions, 267 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index dd00f747e2a..26d302c28e1 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -17,6 +17,7 @@ obj-y := compat.o elf.o entry-armv.o entry-common.o irq.o \ process.o ptrace.o return_address.o setup.o signal.o \ sys_arm.o stacktrace.o time.o traps.o +obj-$(CONFIG_LEDS) += leds.o obj-$(CONFIG_OC_ETM) += etm.o obj-$(CONFIG_ISA_DMA_API) += dma.o @@ -46,6 +47,8 @@ obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o obj-$(CONFIG_IWMMXT) += iwmmxt.o +obj-$(CONFIG_CPU_HAS_PMU) += pmu.o +obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt ifneq ($(CONFIG_ARCH_EBSA110),y) diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 4a881258bb1..883511522fc 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -12,6 +12,7 @@ */ #include <linux/sched.h> #include <linux/mm.h> +#include <linux/dma-mapping.h> #include <asm/mach/arch.h> #include <asm/thread_info.h> #include <asm/memory.h> @@ -112,5 +113,9 @@ int main(void) #ifdef MULTI_PABORT DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort)); #endif + BLANK(); + DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); + DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); + DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); return 0; } diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 809681900ec..bd397e0b663 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -616,15 +616,17 @@ char * __init pcibios_setup(char *str) * but we want to try to avoid allocating at 0x2900-0x2bff * which might be mirrored at 0x0100-0x03ff.. */ -void pcibios_align_resource(void *data, struct resource *res, - resource_size_t size, resource_size_t align) +resource_size_t pcibios_align_resource(void *data, const struct resource *res, + resource_size_t size, resource_size_t align) { resource_size_t start = res->start; if (res->flags & IORESOURCE_IO && start & 0x300) start = (start + 0x3ff) & ~0x3ff; - res->start = (start + align - 1) & ~(align - 1); + start = (start + align - 1) & ~(align - 1); + + return start; } /** diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index 5c91addcaeb..a38b4879441 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -24,7 +24,7 @@ #if defined(CONFIG_CPU_V6) - .macro addruart, rx + .macro addruart, rx, tmp .endm .macro senduart, rd, rx @@ -51,7 +51,7 @@ #elif defined(CONFIG_CPU_V7) - .macro addruart, rx + .macro addruart, rx, tmp .endm .macro senduart, rd, rx @@ -71,7 +71,7 @@ wait: mrc p14, 0, pc, c0, c1, 0 #elif defined(CONFIG_CPU_XSCALE) - .macro addruart, rx + .macro addruart, rx, tmp .endm .macro senduart, rd, rx @@ -98,7 +98,7 @@ wait: mrc p14, 0, pc, c0, c1, 0 #else - .macro addruart, rx + .macro addruart, rx, tmp .endm .macro senduart, rd, rx @@ -164,7 +164,7 @@ ENDPROC(printhex2) .ltorg ENTRY(printascii) - addruart r3 + addruart r3, r1 b 2f 1: waituart r2, r3 senduart r1, r3 @@ -180,7 +180,7 @@ ENTRY(printascii) ENDPROC(printascii) ENTRY(printch) - addruart r3 + addruart r3, r1 mov r1, r0 mov r0, #0 b 1b diff --git a/arch/arm/kernel/leds.c b/arch/arm/kernel/leds.c new file mode 100644 index 00000000000..31a316c1777 --- /dev/null +++ b/arch/arm/kernel/leds.c @@ -0,0 +1,115 @@ +/* + * LED support code, ripped out of arch/arm/kernel/time.c + * + * Copyright (C) 1994-2001 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/sysdev.h> + +#include <asm/leds.h> + +static void dummy_leds_event(led_event_t evt) +{ +} + +void (*leds_event)(led_event_t) = dummy_leds_event; + +struct leds_evt_name { + const char name[8]; + int on; + int off; +}; + +static const struct leds_evt_name evt_names[] = { + { "amber", led_amber_on, led_amber_off }, + { "blue", led_blue_on, led_blue_off }, + { "green", led_green_on, led_green_off }, + { "red", led_red_on, led_red_off }, +}; + +static ssize_t leds_store(struct sys_device *dev, + struct sysdev_attribute *attr, + const char *buf, size_t size) +{ + int ret = -EINVAL, len = strcspn(buf, " "); + + if (len > 0 && buf[len] == '\0') + len--; + + if (strncmp(buf, "claim", len) == 0) { + leds_event(led_claim); + ret = size; + } else if (strncmp(buf, "release", len) == 0) { + leds_event(led_release); + ret = size; + } else { + int i; + + for (i = 0; i < ARRAY_SIZE(evt_names); i++) { + if (strlen(evt_names[i].name) != len || + strncmp(buf, evt_names[i].name, len) != 0) + continue; + if (strncmp(buf+len, " on", 3) == 0) { + leds_event(evt_names[i].on); + ret = size; + } else if (strncmp(buf+len, " off", 4) == 0) { + leds_event(evt_names[i].off); + ret = size; + } + break; + } + } + return ret; +} + +static SYSDEV_ATTR(event, 0200, NULL, leds_store); + +static int leds_suspend(struct sys_device *dev, pm_message_t state) +{ + leds_event(led_stop); + return 0; +} + +static int leds_resume(struct sys_device *dev) +{ + leds_event(led_start); + return 0; +} + +static int leds_shutdown(struct sys_device *dev) +{ + leds_event(led_halted); + return 0; +} + +static struct sysdev_class leds_sysclass = { + .name = "leds", + .shutdown = leds_shutdown, + .suspend = leds_suspend, + .resume = leds_resume, +}; + +static struct sys_device leds_device = { + .id = 0, + .cls = &leds_sysclass, +}; + +static int __init leds_init(void) +{ + int ret; + ret = sysdev_class_register(&leds_sysclass); + if (ret == 0) + ret = sysdev_register(&leds_device); + if (ret == 0) + ret = sysdev_create_file(&leds_device, &attr_event); + return ret; +} + +device_initcall(leds_init); + +EXPORT_SYMBOL(leds_event); diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c new file mode 100644 index 00000000000..c54ceb3d1f9 --- /dev/null +++ b/arch/arm/kernel/perf_event.c @@ -0,0 +1,2276 @@ +#undef DEBUG + +/* + * ARM performance counter support. + * + * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles + * + * ARMv7 support: Jean Pihet <jpihet@mvista.com> + * 2010 (c) MontaVista Software, LLC. + * + * This code is based on the sparc64 perf event code, which is in turn based + * on the x86 code. Callchain code is based on the ARM OProfile backtrace + * code. + */ +#define pr_fmt(fmt) "hw perfevents: " fmt + +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/perf_event.h> +#include <linux/spinlock.h> +#include <linux/uaccess.h> + +#include <asm/cputype.h> +#include <asm/irq.h> +#include <asm/irq_regs.h> +#include <asm/pmu.h> +#include <asm/stacktrace.h> + +static const struct pmu_irqs *pmu_irqs; + +/* + * Hardware lock to serialize accesses to PMU registers. Needed for the + * read/modify/write sequences. + */ +DEFINE_SPINLOCK(pmu_lock); + +/* + * ARMv6 supports a maximum of 3 events, starting from index 1. If we add + * another platform that supports more, we need to increase this to be the + * largest of all platforms. + * + * ARMv7 supports up to 32 events: + * cycle counter CCNT + 31 events counters CNT0..30. + * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters. + */ +#define ARMPMU_MAX_HWEVENTS 33 + +/* The events for a given CPU. */ +struct cpu_hw_events { + /* + * The events that are active on the CPU for the given index. Index 0 + * is reserved. + */ + struct perf_event *events[ARMPMU_MAX_HWEVENTS]; + + /* + * A 1 bit for an index indicates that the counter is being used for + * an event. A 0 means that the counter can be used. + */ + unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)]; + + /* + * A 1 bit for an index indicates that the counter is actively being + * used. + */ + unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)]; +}; +DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); + +struct arm_pmu { + char *name; + irqreturn_t (*handle_irq)(int irq_num, void *dev); + void (*enable)(struct hw_perf_event *evt, int idx); + void (*disable)(struct hw_perf_event *evt, int idx); + int (*event_map)(int evt); + u64 (*raw_event)(u64); + int (*get_event_idx)(struct cpu_hw_events *cpuc, + struct hw_perf_event *hwc); + u32 (*read_counter)(int idx); + void (*write_counter)(int idx, u32 val); + void (*start)(void); + void (*stop)(void); + int num_events; + u64 max_period; +}; + +/* Set at runtime when we know what CPU type we are. */ +static const struct arm_pmu *armpmu; + +#define HW_OP_UNSUPPORTED 0xFFFF + +#define C(_x) \ + PERF_COUNT_HW_CACHE_##_x + +#define CACHE_OP_UNSUPPORTED 0xFFFF + +static unsigned armpmu_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + +static int +armpmu_map_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result, ret; + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return -EINVAL; + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return -EINVAL; + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return -EINVAL; + + ret = (int)armpmu_perf_cache_map[cache_type][cache_op][cache_result]; + + if (ret == CACHE_OP_UNSUPPORTED) + return -ENOENT; + + return ret; +} + +static int +armpmu_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, + int idx) +{ + s64 left = atomic64_read(&hwc->period_left); + s64 period = hwc->sample_period; + int ret = 0; + + if (unlikely(left <= -period)) { + left = period; + atomic64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + atomic64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (left > (s64)armpmu->max_period) + left = armpmu->max_period; + + atomic64_set(&hwc->prev_count, (u64)-left); + + armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); + + perf_event_update_userpage(event); + + return ret; +} + +static u64 +armpmu_event_update(struct perf_event *event, + struct hw_perf_event *hwc, + int idx) +{ + int shift = 64 - 32; + s64 prev_raw_count, new_raw_count; + s64 delta; + +again: + prev_raw_count = atomic64_read(&hwc->prev_count); + new_raw_count = armpmu->read_counter(idx); + + if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = (new_raw_count << shift) - (prev_raw_count << shift); + delta >>= shift; + + atomic64_add(delta, &event->count); + atomic64_sub(delta, &hwc->period_left); + + return new_raw_count; +} + +static void +armpmu_disable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + WARN_ON(idx < 0); + + clear_bit(idx, cpuc->active_mask); + armpmu->disable(hwc, idx); + + barrier(); + + armpmu_event_update(event, hwc, idx); + cpuc->events[idx] = NULL; + clear_bit(idx, cpuc->used_mask); + + perf_event_update_userpage(event); +} + +static void +armpmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + /* Don't read disabled counters! */ + if (hwc->idx < 0) + return; + + armpmu_event_update(event, hwc, hwc->idx); +} + +static void +armpmu_unthrottle(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + /* + * Set the period again. Some counters can't be stopped, so when we + * were throttled we simply disabled the IRQ source and the counter + * may have been left counting. If we don't do this step then we may + * get an interrupt too soon or *way* too late if the overflow has + * happened since disabling. + */ + armpmu_event_set_period(event, hwc, hwc->idx); + armpmu->enable(hwc, hwc->idx); +} + +static int +armpmu_enable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx; + int err = 0; + + /* If we don't have a space for the counter then finish early. */ + idx = armpmu->get_event_idx(cpuc, hwc); + if (idx < 0) { + err = idx; + goto out; + } + + /* + * If there is an event in the counter we are going to use then make + * sure it is disabled. + */ + event->hw.idx = idx; + armpmu->disable(hwc, idx); + cpuc->events[idx] = event; + set_bit(idx, cpuc->active_mask); + + /* Set the period for the event. */ + armpmu_event_set_period(event, hwc, idx); + + /* Enable the event. */ + armpmu->enable(hwc, idx); + + /* Propagate our changes to the userspace mapping. */ + perf_event_update_userpage(event); + +out: + return err; +} + +static struct pmu pmu = { + .enable = armpmu_enable, + .disable = armpmu_disable, + .unthrottle = armpmu_unthrottle, + .read = armpmu_read, +}; + +static int +validate_event(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + struct hw_perf_event fake_event = event->hw; + + if (event->pmu && event->pmu != &pmu) + return 0; + + return armpmu->get_event_idx(cpuc, &fake_event) >= 0; +} + +static int +validate_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct cpu_hw_events fake_pmu; + + memset(&fake_pmu, 0, sizeof(fake_pmu)); + + if (!validate_event(&fake_pmu, leader)) + return -ENOSPC; + + list_for_each_entry(sibling, &leader->sibling_list, group_entry) { + if (!validate_event(&fake_pmu, sibling)) + return -ENOSPC; + } + + if (!validate_event(&fake_pmu, event)) + return -ENOSPC; + + return 0; +} + +static int +armpmu_reserve_hardware(void) +{ + int i; + int err; + + pmu_irqs = reserve_pmu(); + if (IS_ERR(pmu_irqs)) { + pr_warning("unable to reserve pmu\n"); + return PTR_ERR(pmu_irqs); + } + + init_pmu(); + + if (pmu_irqs->num_irqs < 1) { + pr_err("no irqs for PMUs defined\n"); + return -ENODEV; + } + + for (i = 0; i < pmu_irqs->num_irqs; ++i) { + err = request_irq(pmu_irqs->irqs[i], armpmu->handle_irq, + IRQF_DISABLED, "armpmu", NULL); + if (err) { + pr_warning("unable to request IRQ%d for ARM " + "perf counters\n", pmu_irqs->irqs[i]); + break; + } + } + + if (err) { + for (i = i - 1; i >= 0; --i) + free_irq(pmu_irqs->irqs[i], NULL); + release_pmu(pmu_irqs); + pmu_irqs = NULL; + } + + return err; +} + +static void +armpmu_release_hardware(void) +{ + int i; + + for (i = pmu_irqs->num_irqs - 1; i >= 0; --i) + free_irq(pmu_irqs->irqs[i], NULL); + armpmu->stop(); + + release_pmu(pmu_irqs); + pmu_irqs = NULL; +} + +static atomic_t active_events = ATOMIC_INIT(0); +static DEFINE_MUTEX(pmu_reserve_mutex); + +static void +hw_perf_event_destroy(struct perf_event *event) +{ + if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) { + armpmu_release_hardware(); + mutex_unlock(&pmu_reserve_mutex); + } +} + +static int +__hw_perf_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int mapping, err; + + /* Decode the generic type into an ARM event identifier. */ + if (PERF_TYPE_HARDWARE == event->attr.type) { + mapping = armpmu->event_map(event->attr.config); + } else if (PERF_TYPE_HW_CACHE == event->attr.type) { + mapping = armpmu_map_cache_event(event->attr.config); + } else if (PERF_TYPE_RAW == event->attr.type) { + mapping = armpmu->raw_event(event->attr.config); + } else { + pr_debug("event type %x not supported\n", event->attr.type); + return -EOPNOTSUPP; + } + + if (mapping < 0) { + pr_debug("event %x:%llx not supported\n", event->attr.type, + event->attr.config); + return mapping; + } + + /* + * Check whether we need to exclude the counter from certain modes. + * The ARM performance counters are on all of the time so if someone + * has asked us for some excludes then we have to fail. + */ + if (event->attr.exclude_kernel || event->attr.exclude_user || + event->attr.exclude_hv || event->attr.exclude_idle) { + pr_debug("ARM performance counters do not support " + "mode exclusion\n"); + return -EPERM; + } + + /* + * We don't assign an index until we actually place the event onto + * hardware. Use -1 to signify that we haven't decided where to put it + * yet. For SMP systems, each core has it's own PMU so we can't do any + * clever allocation or constraints checking at this point. + */ + hwc->idx = -1; + + /* + * Store the event encoding into the config_base field. config and + * event_base are unused as the only 2 things we need to know are + * the event mapping and the counter to use. The counter to use is + * also the indx and the config_base is the event type. + */ + hwc->config_base = (unsigned long)mapping; + hwc->config = 0; + hwc->event_base = 0; + + if (!hwc->sample_period) { + hwc->sample_period = armpmu->max_period; + hwc->last_period = hwc->sample_period; + atomic64_set(&hwc->period_left, hwc->sample_period); + } + + err = 0; + if (event->group_leader != event) { + err = validate_group(event); + if (err) + return -EINVAL; + } + + return err; +} + +const struct pmu * +hw_perf_event_init(struct perf_event *event) +{ + int err = 0; + + if (!armpmu) + return ERR_PTR(-ENODEV); + + event->destroy = hw_perf_event_destroy; + + if (!atomic_inc_not_zero(&active_events)) { + if (atomic_read(&active_events) > perf_max_events) { + atomic_dec(&active_events); + return ERR_PTR(-ENOSPC); + } + + mutex_lock(&pmu_reserve_mutex); + if (atomic_read(&active_events) == 0) { + err = armpmu_reserve_hardware(); + } + + if (!err) + atomic_inc(&active_events); + mutex_unlock(&pmu_reserve_mutex); + } + + if (err) + return ERR_PTR(err); + + err = __hw_perf_event_init(event); + if (err) + hw_perf_event_destroy(event); + + return err ? ERR_PTR(err) : &pmu; +} + +void +hw_perf_enable(void) +{ + /* Enable all of the perf events on hardware. */ + int idx; + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + if (!armpmu) + return; + + for (idx = 0; idx <= armpmu->num_events; ++idx) { + struct perf_event *event = cpuc->events[idx]; + + if (!event) + continue; + + armpmu->enable(&event->hw, idx); + } + + armpmu->start(); +} + +void +hw_perf_disable(void) +{ + if (armpmu) + armpmu->stop(); +} + +/* + * ARMv6 Performance counter handling code. + * + * ARMv6 has 2 configurable performance counters and a single cycle counter. + * They all share a single reset bit but can be written to zero so we can use + * that for a reset. + * + * The counters can't be individually enabled or disabled so when we remove + * one event and replace it with another we could get spurious counts from the + * wrong event. However, we can take advantage of the fact that the + * performance counters can export events to the event bus, and the event bus + * itself can be monitored. This requires that we *don't* export the events to + * the event bus. The procedure for disabling a configurable counter is: + * - change the counter to count the ETMEXTOUT[0] signal (0x20). This + * effectively stops the counter from counting. + * - disable the counter's interrupt generation (each counter has it's + * own interrupt enable bit). + * Once stopped, the counter value can be written as 0 to reset. + * + * To enable a counter: + * - enable the counter's interrupt generation. + * - set the new event type. + * + * Note: the dedicated cycle counter only counts cycles and can't be + * enabled/disabled independently of the others. When we want to disable the + * cycle counter, we have to just disable the interrupt reporting and start + * ignoring that counter. When re-enabling, we have to reset the value and + * enable the interrupt. + */ + +enum armv6_perf_types { + ARMV6_PERFCTR_ICACHE_MISS = 0x0, + ARMV6_PERFCTR_IBUF_STALL = 0x1, + ARMV6_PERFCTR_DDEP_STALL = 0x2, + ARMV6_PERFCTR_ITLB_MISS = 0x3, + ARMV6_PERFCTR_DTLB_MISS = 0x4, + ARMV6_PERFCTR_BR_EXEC = 0x5, + ARMV6_PERFCTR_BR_MISPREDICT = 0x6, + ARMV6_PERFCTR_INSTR_EXEC = 0x7, + ARMV6_PERFCTR_DCACHE_HIT = 0x9, + ARMV6_PERFCTR_DCACHE_ACCESS = 0xA, + ARMV6_PERFCTR_DCACHE_MISS = 0xB, + ARMV6_PERFCTR_DCACHE_WBACK = 0xC, + ARMV6_PERFCTR_SW_PC_CHANGE = 0xD, + ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF, + ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10, + ARMV6_PERFCTR_LSU_FULL_STALL = 0x11, + ARMV6_PERFCTR_WBUF_DRAINED = 0x12, + ARMV6_PERFCTR_CPU_CYCLES = 0xFF, + ARMV6_PERFCTR_NOP = 0x20, +}; + +enum armv6_counters { + ARMV6_CYCLE_COUNTER = 1, + ARMV6_COUNTER0, + ARMV6_COUNTER1, +}; + +/* + * The hardware events that we support. We do support cache operations but + * we have harvard caches and no way to combine instruction and data + * accesses/misses in hardware. + */ +static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, +}; + +static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + /* + * The performance counters don't differentiate between read + * and write accesses/misses so this isn't strictly correct, + * but it's the best we can do. Writes and reads get + * combined. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + /* + * The ARM performance counters can count micro DTLB misses, + * micro ITLB misses and main TLB misses. There isn't an event + * for TLB misses, so use the micro misses here and if users + * want the main TLB misses they can use a raw counter. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + +enum armv6mpcore_perf_types { + ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0, + ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1, + ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2, + ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3, + ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4, + ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5, + ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6, + ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7, + ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8, + ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA, + ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB, + ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC, + ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD, + ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE, + ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF, + ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10, + ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11, + ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12, + ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13, + ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF, +}; + +/* + * The hardware events that we support. We do support cache operations but + * we have harvard caches and no way to combine instruction and data + * accesses/misses in hardware. + */ +static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, +}; + +static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = + ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS, + [C(RESULT_MISS)] = + ARMV6MPCORE_PERFCTR_DCACHE_RDMISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = + ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS, + [C(RESULT_MISS)] = + ARMV6MPCORE_PERFCTR_DCACHE_WRMISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + /* + * The ARM performance counters can count micro DTLB misses, + * micro ITLB misses and main TLB misses. There isn't an event + * for TLB misses, so use the micro misses here and if users + * want the main TLB misses they can use a raw counter. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + +static inline unsigned long +armv6_pmcr_read(void) +{ + u32 val; + asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val)); + return val; +} + +static inline void +armv6_pmcr_write(unsigned long val) +{ + asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val)); +} + +#define ARMV6_PMCR_ENABLE (1 << 0) +#define ARMV6_PMCR_CTR01_RESET (1 << 1) +#define ARMV6_PMCR_CCOUNT_RESET (1 << 2) +#define ARMV6_PMCR_CCOUNT_DIV (1 << 3) +#define ARMV6_PMCR_COUNT0_IEN (1 << 4) +#define ARMV6_PMCR_COUNT1_IEN (1 << 5) +#define ARMV6_PMCR_CCOUNT_IEN (1 << 6) +#define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8) +#define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9) +#define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10) +#define ARMV6_PMCR_EVT_COUNT0_SHIFT 20 +#define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT) +#define ARMV6_PMCR_EVT_COUNT1_SHIFT 12 +#define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT) + +#define ARMV6_PMCR_OVERFLOWED_MASK \ + ( |