aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel/perf_event.c')
-rw-r--r--arch/arm64/kernel/perf_event.c1523
1 files changed, 1523 insertions, 0 deletions
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
new file mode 100644
index 00000000000..baf5afb7e6a
--- /dev/null
+++ b/arch/arm64/kernel/perf_event.c
@@ -0,0 +1,1523 @@
+/*
+ * PMU support
+ *
+ * Copyright (C) 2012 ARM Limited
+ * Author: Will Deacon <will.deacon@arm.com>
+ *
+ * This code is based heavily on the ARMv7 perf event code.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#define pr_fmt(fmt) "hw perfevents: " fmt
+
+#include <linux/bitmap.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+#include <asm/cputype.h>
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/pmu.h>
+#include <asm/stacktrace.h>
+
+/*
+ * ARMv8 supports a maximum of 32 events.
+ * The cycle counter is included in this total.
+ */
+#define ARMPMU_MAX_HWEVENTS 32
+
+static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
+static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
+static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
+
+#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
+
+/* Set at runtime when we know what CPU type we are. */
+static struct arm_pmu *cpu_pmu;
+
+int
+armpmu_get_max_events(void)
+{
+ int max_events = 0;
+
+ if (cpu_pmu != NULL)
+ max_events = cpu_pmu->num_events;
+
+ return max_events;
+}
+EXPORT_SYMBOL_GPL(armpmu_get_max_events);
+
+int perf_num_counters(void)
+{
+ return armpmu_get_max_events();
+}
+EXPORT_SYMBOL_GPL(perf_num_counters);
+
+#define HW_OP_UNSUPPORTED 0xFFFF
+
+#define C(_x) \
+ PERF_COUNT_HW_CACHE_##_x
+
+#define CACHE_OP_UNSUPPORTED 0xFFFF
+
+static int
+armpmu_map_cache_event(const unsigned (*cache_map)
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX],
+ u64 config)
+{
+ unsigned int cache_type, cache_op, cache_result, ret;
+
+ cache_type = (config >> 0) & 0xff;
+ if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+ return -EINVAL;
+
+ cache_op = (config >> 8) & 0xff;
+ if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+ return -EINVAL;
+
+ cache_result = (config >> 16) & 0xff;
+ if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return -EINVAL;
+
+ ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
+
+ if (ret == CACHE_OP_UNSUPPORTED)
+ return -ENOENT;
+
+ return ret;
+}
+
+static int
+armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
+{
+ int mapping;
+
+ if (config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+
+ mapping = (*event_map)[config];
+ return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
+}
+
+static int
+armpmu_map_raw_event(u32 raw_event_mask, u64 config)
+{
+ return (int)(config & raw_event_mask);
+}
+
+static int map_cpu_event(struct perf_event *event,
+ const unsigned (*event_map)[PERF_COUNT_HW_MAX],
+ const unsigned (*cache_map)
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX],
+ u32 raw_event_mask)
+{
+ u64 config = event->attr.config;
+
+ switch (event->attr.type) {
+ case PERF_TYPE_HARDWARE:
+ return armpmu_map_event(event_map, config);
+ case PERF_TYPE_HW_CACHE:
+ return armpmu_map_cache_event(cache_map, config);
+ case PERF_TYPE_RAW:
+ return armpmu_map_raw_event(raw_event_mask, config);
+ }
+
+ return -ENOENT;
+}
+
+int
+armpmu_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ s64 left = local64_read(&hwc->period_left);
+ s64 period = hwc->sample_period;
+ int ret = 0;
+
+ if (unlikely(left <= -period)) {
+ left = period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (unlikely(left <= 0)) {
+ left += period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (left > (s64)armpmu->max_period)
+ left = armpmu->max_period;
+
+ local64_set(&hwc->prev_count, (u64)-left);
+
+ armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
+
+ perf_event_update_userpage(event);
+
+ return ret;
+}
+
+u64
+armpmu_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ u64 delta, prev_raw_count, new_raw_count;
+
+again:
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = armpmu->read_counter(idx);
+
+ if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
+
+ local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
+
+ return new_raw_count;
+}
+
+static void
+armpmu_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* Don't read disabled counters! */
+ if (hwc->idx < 0)
+ return;
+
+ armpmu_event_update(event, hwc, hwc->idx);
+}
+
+static void
+armpmu_stop(struct perf_event *event, int flags)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ /*
+ * ARM pmu always has to update the counter, so ignore
+ * PERF_EF_UPDATE, see comments in armpmu_start().
+ */
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ armpmu->disable(hwc, hwc->idx);
+ barrier(); /* why? */
+ armpmu_event_update(event, hwc, hwc->idx);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ }
+}
+
+static void
+armpmu_start(struct perf_event *event, int flags)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ /*
+ * ARM pmu always has to reprogram the period, so ignore
+ * PERF_EF_RELOAD, see the comment below.
+ */
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+ hwc->state = 0;
+ /*
+ * Set the period again. Some counters can't be stopped, so when we
+ * were stopped we simply disabled the IRQ source and the counter
+ * may have been left counting. If we don't do this step then we may
+ * get an interrupt too soon or *way* too late if the overflow has
+ * happened since disabling.
+ */
+ armpmu_event_set_period(event, hwc, hwc->idx);
+ armpmu->enable(hwc, hwc->idx);
+}
+
+static void
+armpmu_del(struct perf_event *event, int flags)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct pmu_hw_events *hw_events = armpmu->get_hw_events();
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ WARN_ON(idx < 0);
+
+ armpmu_stop(event, PERF_EF_UPDATE);
+ hw_events->events[idx] = NULL;
+ clear_bit(idx, hw_events->used_mask);
+
+ perf_event_update_userpage(event);
+}
+
+static int
+armpmu_add(struct perf_event *event, int flags)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct pmu_hw_events *hw_events = armpmu->get_hw_events();
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+ int err = 0;
+
+ perf_pmu_disable(event->pmu);
+
+ /* If we don't have a space for the counter then finish early. */
+ idx = armpmu->get_event_idx(hw_events, hwc);
+ if (idx < 0) {
+ err = idx;
+ goto out;
+ }
+
+ /*
+ * If there is an event in the counter we are going to use then make
+ * sure it is disabled.
+ */
+ event->hw.idx = idx;
+ armpmu->disable(hwc, idx);
+ hw_events->events[idx] = event;
+
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ if (flags & PERF_EF_START)
+ armpmu_start(event, PERF_EF_RELOAD);
+
+ /* Propagate our changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+out:
+ perf_pmu_enable(event->pmu);
+ return err;
+}
+
+static int
+validate_event(struct pmu_hw_events *hw_events,
+ struct perf_event *event)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event fake_event = event->hw;
+ struct pmu *leader_pmu = event->group_leader->pmu;
+
+ if (is_software_event(event))
+ return 1;
+
+ if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+ return 1;
+
+ if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
+ return 1;
+
+ return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
+}
+
+static int
+validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct pmu_hw_events fake_pmu;
+ DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
+
+ /*
+ * Initialise the fake PMU. We only need to populate the
+ * used_mask for the purposes of validation.
+ */
+ memset(fake_used_mask, 0, sizeof(fake_used_mask));
+ fake_pmu.used_mask = fake_used_mask;
+
+ if (!validate_event(&fake_pmu, leader))
+ return -EINVAL;
+
+ list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+ if (!validate_event(&fake_pmu, sibling))
+ return -EINVAL;
+ }
+
+ if (!validate_event(&fake_pmu, event))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void
+armpmu_disable_percpu_irq(void *data)
+{
+ unsigned int irq = *(unsigned int *)data;
+ disable_percpu_irq(irq);
+}
+
+static void
+armpmu_release_hardware(struct arm_pmu *armpmu)
+{
+ int irq;
+ unsigned int i, irqs;
+ struct platform_device *pmu_device = armpmu->plat_device;
+
+ irqs = min(pmu_device->num_resources, num_possible_cpus());
+ if (!irqs)
+ return;
+
+ irq = platform_get_irq(pmu_device, 0);
+ if (irq <= 0)
+ return;
+
+ if (irq_is_percpu(irq)) {
+ on_each_cpu(armpmu_disable_percpu_irq, &irq, 1);
+ free_percpu_irq(irq, &cpu_hw_events);
+ } else {
+ for (i = 0; i < irqs; ++i) {
+ if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
+ continue;
+ irq = platform_get_irq(pmu_device, i);
+ if (irq > 0)
+ free_irq(irq, armpmu);
+ }
+ }
+}
+
+static void
+armpmu_enable_percpu_irq(void *data)
+{
+ unsigned int irq = *(unsigned int *)data;
+ enable_percpu_irq(irq, IRQ_TYPE_NONE);
+}
+
+static int
+armpmu_reserve_hardware(struct arm_pmu *armpmu)
+{
+ int err, irq;
+ unsigned int i, irqs;
+ struct platform_device *pmu_device = armpmu->plat_device;
+
+ if (!pmu_device) {
+ pr_err("no PMU device registered\n");
+ return -ENODEV;
+ }
+
+ irqs = min(pmu_device->num_resources, num_possible_cpus());
+ if (!irqs) {
+ pr_err("no irqs for PMUs defined\n");
+ return -ENODEV;
+ }
+
+ irq = platform_get_irq(pmu_device, 0);
+ if (irq <= 0) {
+ pr_err("failed to get valid irq for PMU device\n");
+ return -ENODEV;
+ }
+
+ if (irq_is_percpu(irq)) {
+ err = request_percpu_irq(irq, armpmu->handle_irq,
+ "arm-pmu", &cpu_hw_events);
+
+ if (err) {
+ pr_err("unable to request percpu IRQ%d for ARM PMU counters\n",
+ irq);
+ armpmu_release_hardware(armpmu);
+ return err;
+ }
+
+ on_each_cpu(armpmu_enable_percpu_irq, &irq, 1);
+ } else {
+ for (i = 0; i < irqs; ++i) {
+ err = 0;
+ irq = platform_get_irq(pmu_device, i);
+ if (irq <= 0)
+ continue;
+
+ /*
+ * If we have a single PMU interrupt that we can't shift,
+ * assume that we're running on a uniprocessor machine and
+ * continue. Otherwise, continue without this interrupt.
+ */
+ if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
+ pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
+ irq, i);
+ continue;
+ }
+
+ err = request_irq(irq, armpmu->handle_irq,
+ IRQF_NOBALANCING,
+ "arm-pmu", armpmu);
+ if (err) {
+ pr_err("unable to request IRQ%d for ARM PMU counters\n",
+ irq);
+ armpmu_release_hardware(armpmu);
+ return err;
+ }
+
+ cpumask_set_cpu(i, &armpmu->active_irqs);
+ }
+ }
+
+ return 0;
+}
+
+static void
+hw_perf_event_destroy(struct perf_event *event)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ atomic_t *active_events = &armpmu->active_events;
+ struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
+
+ if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
+ armpmu_release_hardware(armpmu);
+ mutex_unlock(pmu_reserve_mutex);
+ }
+}
+
+static int
+event_requires_mode_exclusion(struct perf_event_attr *attr)
+{
+ return attr->exclude_idle || attr->exclude_user ||
+ attr->exclude_kernel || attr->exclude_hv;
+}
+
+static int
+__hw_perf_event_init(struct perf_event *event)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int mapping, err;
+
+ mapping = armpmu->map_event(event);
+
+ if (mapping < 0) {
+ pr_debug("event %x:%llx not supported\n", event->attr.type,
+ event->attr.config);
+ return mapping;
+ }
+
+ /*
+ * We don't assign an index until we actually place the event onto
+ * hardware. Use -1 to signify that we haven't decided where to put it
+ * yet. For SMP systems, each core has it's own PMU so we can't do any
+ * clever allocation or constraints checking at this point.
+ */
+ hwc->idx = -1;
+ hwc->config_base = 0;
+ hwc->config = 0;
+ hwc->event_base = 0;
+
+ /*
+ * Check whether we need to exclude the counter from certain modes.
+ */
+ if ((!armpmu->set_event_filter ||
+ armpmu->set_event_filter(hwc, &event->attr)) &&
+ event_requires_mode_exclusion(&event->attr)) {
+ pr_debug("ARM performance counters do not support mode exclusion\n");
+ return -EPERM;
+ }
+
+ /*
+ * Store the event encoding into the config_base field.
+ */
+ hwc->config_base |= (unsigned long)mapping;
+
+ if (!hwc->sample_period) {
+ /*
+ * For non-sampling runs, limit the sample_period to half
+ * of the counter width. That way, the new counter value
+ * is far less likely to overtake the previous one unless
+ * you have some serious IRQ latency issues.
+ */
+ hwc->sample_period = armpmu->max_period >> 1;
+ hwc->last_period = hwc->sample_period;
+ local64_set(&hwc->period_left, hwc->sample_period);
+ }
+
+ err = 0;
+ if (event->group_leader != event) {
+ err = validate_group(event);
+ if (err)
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static int armpmu_event_init(struct perf_event *event)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ int err = 0;
+ atomic_t *active_events = &armpmu->active_events;
+
+ if (armpmu->map_event(event) == -ENOENT)
+ return -ENOENT;
+
+ event->destroy = hw_perf_event_destroy;
+
+ if (!atomic_inc_not_zero(active_events)) {
+ mutex_lock(&armpmu->reserve_mutex);
+ if (atomic_read(active_events) == 0)
+ err = armpmu_reserve_hardware(armpmu);
+
+ if (!err)
+ atomic_inc(active_events);
+ mutex_unlock(&armpmu->reserve_mutex);
+ }
+
+ if (err)
+ return err;
+
+ err = __hw_perf_event_init(event);
+ if (err)
+ hw_perf_event_destroy(event);
+
+ return err;
+}
+
+static void armpmu_enable(struct pmu *pmu)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(pmu);
+ struct pmu_hw_events *hw_events = armpmu->get_hw_events();
+ int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
+
+ if (enabled)
+ armpmu->start();
+}
+
+static void armpmu_disable(struct pmu *pmu)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(pmu);
+ armpmu->stop();
+}
+
+static void __init armpmu_init(struct arm_pmu *armpmu)
+{
+ atomic_set(&armpmu->active_events, 0);
+ mutex_init(&armpmu->reserve_mutex);
+
+ armpmu->pmu = (struct pmu) {
+ .pmu_enable = armpmu_enable,
+ .pmu_disable = armpmu_disable,
+ .event_init = armpmu_event_init,
+ .add = armpmu_add,
+ .del = armpmu_del,
+ .start = armpmu_start,
+ .stop = armpmu_stop,
+ .read = armpmu_read,
+ };
+}
+
+int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type)
+{
+ armpmu_init(armpmu);
+ return perf_pmu_register(&armpmu->pmu, name, type);
+}
+
+/*
+ * ARMv8 PMUv3 Performance Events handling code.
+ * Common event types.
+ */
+enum armv8_pmuv3_perf_types {
+ /* Required events. */
+ ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR = 0x00,
+ ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL = 0x03,
+ ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS = 0x04,
+ ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
+ ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES = 0x11,
+ ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED = 0x12,
+
+ /* At least one of the following is required. */
+ ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED = 0x08,
+ ARMV8_PMUV3_PERFCTR_OP_SPEC = 0x1B,
+
+ /* Common architectural events. */
+ ARMV8_PMUV3_PERFCTR_MEM_READ = 0x06,
+ ARMV8_PMUV3_PERFCTR_MEM_WRITE = 0x07,
+ ARMV8_PMUV3_PERFCTR_EXC_TAKEN = 0x09,
+ ARMV8_PMUV3_PERFCTR_EXC_EXECUTED = 0x0A,
+ ARMV8_PMUV3_PERFCTR_CID_WRITE = 0x0B,
+ ARMV8_PMUV3_PERFCTR_PC_WRITE = 0x0C,
+ ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH = 0x0D,
+ ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN = 0x0E,
+ ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
+ ARMV8_PMUV3_PERFCTR_TTBR_WRITE = 0x1C,
+
+ /* Common microarchitectural events. */
+ ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL = 0x01,
+ ARMV8_PMUV3_PERFCTR_ITLB_REFILL = 0x02,
+ ARMV8_PMUV3_PERFCTR_DTLB_REFILL = 0x05,
+ ARMV8_PMUV3_PERFCTR_MEM_ACCESS = 0x13,
+ ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS = 0x14,
+ ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB = 0x15,
+ ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS = 0x16,
+ ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL = 0x17,
+ ARMV8_PMUV3_PERFCTR_L2_CACHE_WB = 0x18,
+ ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19,
+ ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A,
+ ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D,
+};
+
+/* PMUv3 HW events mapping. */
+static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+ [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
+};
+
+static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+ [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+ [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+};
+
+/*
+ * Perf Events' indices
+ */
+#define ARMV8_IDX_CYCLE_COUNTER 0
+#define ARMV8_IDX_COUNTER0 1
+#define ARMV8_IDX_COUNTER_LAST (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
+
+#define ARMV8_MAX_COUNTERS 32
+#define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
+
+/*
+ * ARMv8 low level PMU access
+ */
+
+/*
+ * Perf Event to low level counters mapping
+ */
+#define ARMV8_IDX_TO_COUNTER(x) \
+ (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
+
+/*
+ * Per-CPU PMCR: config reg
+ */
+#define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
+#define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
+#define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
+#define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
+#define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
+#define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
+#define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
+#define ARMV8_PMCR_N_MASK 0x1f
+#define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
+
+/*
+ * PMOVSR: counters overflow flag status reg
+ */
+#define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
+#define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
+
+/*
+ * PMXEVTYPER: Event selection reg
+ */
+#define ARMV8_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */
+#define ARMV8_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */
+
+/*
+ * Event filters for PMUv3
+ */
+#define ARMV8_EXCLUDE_EL1 (1 << 31)
+#define ARMV8_EXCLUDE_EL0 (1 << 30)
+#define ARMV8_INCLUDE_EL2 (1 << 27)
+
+static inline u32 armv8pmu_pmcr_read(void)
+{
+ u32 val;
+ asm volatile("mrs %0, pmcr_el0" : "=r" (val));
+ return val;
+}
+
+static inline void armv8pmu_pmcr_write(u32 val)
+{
+ val &= ARMV8_PMCR_MASK;
+ isb();
+ asm volatile("msr pmcr_el0, %0" :: "r" (val));
+}
+
+static inline int armv8pmu_has_overflowed(u32 pmovsr)
+{
+ return pmovsr & ARMV8_OVERFLOWED_MASK;
+}
+
+static inline int armv8pmu_counter_valid(int idx)
+{
+ return idx >= ARMV8_IDX_CYCLE_COUNTER && idx <= ARMV8_IDX_COUNTER_LAST;
+}
+
+static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
+{
+ int ret = 0;
+ u32 counter;
+
+ if (!armv8pmu_counter_valid(idx)) {
+ pr_err("CPU%u checking wrong counter %d overflow status\n",
+ smp_processor_id(), idx);
+ } else {
+ counter = ARMV8_IDX_TO_COUNTER(idx);
+ ret = pmnc & BIT(counter);
+ }
+
+ return ret;
+}
+
+static inline int armv8pmu_select_counter(int idx)
+{
+ u32 counter;
+
+ if (!armv8pmu_counter_valid(idx)) {
+ pr_err("CPU%u selecting wrong PMNC counter %d\n",
+ smp_processor_id(), idx);
+ return -EINVAL;
+ }
+
+ counter = ARMV8_IDX_TO_COUNTER(idx);
+ asm volatile("msr pmselr_el0, %0" :: "r" (counter));
+ isb();
+
+ return idx;
+}
+
+static inline u32 armv8pmu_read_counter(int idx)
+{
+ u32 value = 0;
+
+ if (!armv8pmu_counter_valid(idx))
+ pr_err("CPU%u reading wrong counter %d\n",
+ smp_processor_id(), idx);
+ else if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ asm volatile("mrs %0, pmccntr_el0" : "=r" (value));
+ else if (armv8pmu_select_counter(idx) == idx)
+ asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value));
+
+ return value;
+}
+
+static inline void armv8pmu_write_counter(int idx, u32 value)
+{
+ if (!armv8pmu_counter_valid(idx))
+ pr_err("CPU%u writing wrong counter %d\n",
+ smp_processor_id(), idx);
+ else if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ asm volatile("msr pmccntr_el0, %0" :: "r" (value));
+ else if (armv8pmu_select_counter(idx) == idx)
+ asm volatile("msr pmxevcntr_el0, %0" :: "r" (value));
+}
+
+static inline void armv8pmu_write_evtype(int idx, u32 val)
+{
+ if (armv8pmu_select_counter(idx) == idx) {
+ val &= ARMV8_EVTYPE_MASK;
+ asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
+ }
+}
+
+static inline int armv8pmu_enable_counter(int idx)
+{
+ u32 counter;
+
+ if (!armv8pmu_counter_valid(idx)) {
+ pr_err("CPU%u enabling wrong PMNC counter %d\n",
+ smp_processor_id(), idx);
+ return -EINVAL;
+ }
+
+ counter = ARMV8_IDX_TO_COUNTER(idx);
+ asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter)));
+ return idx;
+}
+
+static inline int armv8pmu_disable_counter(int idx)
+{
+ u32 counter;
+
+ if (!armv8pmu_counter_valid(idx)) {
+ pr_err("CPU%u disabling wrong PMNC counter %d\n",
+ smp_processor_id(), idx);
+ return -EINVAL;
+ }
+
+ counter = ARMV8_IDX_TO_COUNTER(idx);
+ asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter)));
+ return idx;
+}
+
+static inline int armv8pmu_enable_intens(int idx)
+{
+ u32 counter;
+
+ if (!armv8pmu_counter_valid(idx)) {
+ pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
+ smp_processor_id(), idx);
+ return -EINVAL;
+ }
+
+ counter = ARMV8_IDX_TO_COUNTER(idx);
+ asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter)));
+ return idx;
+}
+
+static inline int armv8pmu_disable_intens(int idx)
+{
+ u32 counter;
+
+ if (!armv8pmu_counter_valid(idx)) {
+ pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
+ smp_processor_id(), idx);
+ return -EINVAL;
+ }
+
+ counter = ARMV8_IDX_TO_COUNTER(idx);
+ asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter)));
+ isb();
+ /* Clear the overflow flag in case an interrupt is pending. */
+ asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter)));
+ isb();
+ return idx;
+}
+
+static inline u32 armv8pmu_getreset_flags(void)
+{
+ u32 value;
+
+ /* Read */
+ asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));
+
+ /* Write to clear flags */
+ value &= ARMV8_OVSR_MASK;
+ asm volatile("msr pmovsclr_el0, %0" :: "r" (value));
+
+ return value;
+}
+
+static void armv8pmu_enable_event(struct hw_perf_event *hwc, int idx)
+{
+ unsigned long flags;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+ /*
+ * Enable counter and interrupt, and set the counter to count
+ * the event that we're interested in.
+ */
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ /*
+ * Disable counter
+ */
+ armv8pmu_disable_counter(idx);
+
+ /*
+ * Set event (if destined for PMNx counters).
+ */
+ armv8pmu_write_evtype(idx, hwc->config_base);
+
+ /*
+ * Enable interrupt for this counter
+ */
+ armv8pmu_enable_intens(idx);
+
+ /*
+ * Enable counter
+ */
+ armv8pmu_enable_counter(idx);
+
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void armv8pmu_disable_event(struct hw_perf_event *hwc, int idx)
+{
+ unsigned long flags;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+ /*
+ * Disable counter and interrupt
+ */
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ /*
+ * Disable counter
+ */
+ armv8pmu_disable_counter(idx);
+
+ /*
+ * Disable interrupt for this counter
+ */
+ armv8pmu_disable_intens(idx);
+
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
+{
+ u32 pmovsr;
+ struct perf_sample_data data;
+ struct pmu_hw_events *cpuc;
+ struct pt_regs *regs;
+ int idx;
+
+ /*
+ * Get and reset the IRQ flags
+ */
+ pmovsr = armv8pmu_getreset_flags();
+
+ /*
+ * Did an overflow occur?
+ */
+ if (!armv8pmu_has_overflowed(pmovsr))
+ return IRQ_NONE;
+
+ /*
+ * Handle the counter(s) overflow(s)
+ */
+ regs = get_irq_regs();
+
+ cpuc = this_cpu_ptr(&cpu_hw_events);
+ for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
+ /* Ignore if we don't have an event. */
+ if (!event)
+ continue;
+
+ /*
+ * We have a single interrupt for all counters. Check that
+ * each counter has overflowed before we process it.
+ */
+ if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
+ continue;
+
+ hwc = &event->hw;
+ armpmu_event_update(event, hwc, idx);
+ perf_sample_data_init(&data, 0, hwc->last_period);
+ if (!armpmu_event_set_period(event, hwc, idx))
+ continue;
+
+ if (perf_event_overflow(event, &data, regs))
+ cpu_pmu->disable(hwc, idx);
+ }
+
+ /*
+ * Handle the pending perf events.
+ *
+ * Note: this call *must* be run with interrupts disabled. For
+ * platforms that can have the PMU interrupts raised as an NMI, this
+ * will not work.
+ */
+ irq_work_run();
+
+ return IRQ_HANDLED;
+}
+
+static void armv8pmu_start(void)
+{
+ unsigned long flags;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ /* Enable all counters */
+ armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void armv8pmu_stop(void)
+{
+ unsigned long flags;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ /* Disable all counters */
+ armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
+ struct hw_perf_event *event)
+{
+ int idx;
+ unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT;
+
+ /* Always place a cycle counter into the cycle counter. */
+ if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
+ if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
+ return -EAGAIN;
+
+ return ARMV8_IDX_CYCLE_COUNTER;
+ }
+
+ /*
+ * For anything other than a cycle counter, try and use
+ * the events counters
+ */
+ for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
+ if (!test_and_set_bit(idx, cpuc->used_mask))
+ return idx;
+ }
+
+ /* The counters are all in use. */
+ return -EAGAIN;
+}
+
+/*
+ * Add an event filter to a given event. This will only work for PMUv2 PMUs.
+ */
+static int armv8pmu_set_event_filter(struct hw_perf_event *event,
+ struct perf_event_attr *attr)
+{
+ unsigned long config_base = 0;
+
+ if (attr->exclude_idle)
+ return -EPERM;
+ if (attr->exclude_user)
+ config_base |= ARMV8_EXCLUDE_EL0;
+ if (attr->exclude_kernel)
+ config_base |= ARMV8_EXCLUDE_EL1;
+ if (!attr->exclude_hv)
+ config_base |= ARMV8_INCLUDE_EL2;
+
+ /*
+ * Install the filter into config_base as this is used to
+ * construct the event type.
+ */
+ event->config_base = config_base;
+
+ return 0;
+}
+
+static void armv8pmu_reset(void *info)
+{
+ u32 idx, nb_cnt = cpu_pmu->num_events;
+
+ /* The counter and interrupt enable registers are unknown at reset. */
+ for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
+ armv8pmu_disable_event(NULL, idx);
+
+ /* Initialize & Reset PMNC: C and P bits. */
+ armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
+
+ /* Disable access from userspace. */
+ asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
+}
+
+static int armv8_pmuv3_map_event(struct perf_event *event)
+{
+ return map_cpu_event(event, &armv8_pmuv3_perf_map,
+ &armv8_pmuv3_perf_cache_map,
+ ARMV8_EVTYPE_EVENT);
+}
+
+static struct arm_pmu armv8pmu = {
+ .handle_irq = armv8pmu_handle_irq,
+ .enable = armv8pmu_enable_event,
+ .disable = armv8pmu_disable_event,
+ .read_counter = armv8pmu_read_counter,
+ .write_counter = armv8pmu_write_counter,
+ .get_event_idx = armv8pmu_get_event_idx,
+ .start = armv8pmu_start,
+ .stop = armv8pmu_stop,
+ .reset = armv8pmu_reset,
+ .max_period = (1LLU << 32) - 1,
+};
+
+static u32 __init armv8pmu_read_num_pmnc_events(void)
+{
+ u32 nb_cnt;
+
+ /* Read the nb of CNTx counters supported from PMNC */
+ nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
+
+ /* Add the CPU cycles counter and return */
+ return nb_cnt + 1;
+}
+
+static struct arm_pmu *__init armv8_pmuv3_pmu_init(void)
+{
+ armv8pmu.name = "arm/armv8-pmuv3";
+ armv8pmu.map_event = armv8_pmuv3_map_event;
+ armv8pmu.num_events = armv8pmu_read_num_pmnc_events();
+ armv8pmu.set_event_filter = armv8pmu_set_event_filter;
+ return &armv8pmu;
+}
+
+/*
+ * Ensure the PMU has sane values out of reset.
+ * This requires SMP to be available, so exists as a separate initcall.
+ */
+static int __init
+cpu_pmu_reset(void)
+{
+ if (cpu_pmu && cpu_pmu->reset)
+ return on_each_cpu(cpu_pmu->reset, NULL, 1);
+ return 0;
+}
+arch_initcall(cpu_pmu_reset);
+
+/*
+ * PMU platform driver and devicetree bindings.
+ */
+static struct of_device_id armpmu_of_device_ids[] = {
+ {.compatible = "arm,armv8-pmuv3"},
+ {},
+};
+
+static int armpmu_device_probe(struct platform_device *pdev)
+{
+ if (!cpu_pmu)
+ return -ENODEV;
+
+ cpu_pmu->plat_device = pdev;
+ return 0;
+}
+
+static struct platform_driver armpmu_driver = {
+ .driver = {
+ .name = "arm-pmu",
+ .of_match_table = armpmu_of_device_ids,
+ },
+ .probe = armpmu_device_probe,
+};
+
+static int __init register_pmu_driver(void)
+{
+ return platform_driver_register(&armpmu_driver);
+}
+device_initcall(register_pmu_driver);
+
+static struct pmu_hw_events *armpmu_get_cpu_events(void)
+{
+ return this_cpu_ptr(&cpu_hw_events);
+}
+
+static void __init cpu_pmu_init(struct arm_pmu *armpmu)
+{
+ int cpu;
+ for_each_possible_cpu(cpu) {
+ struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
+ events->events = per_cpu(hw_events, cpu);
+ events->used_mask = per_cpu(used_mask, cpu);
+ raw_spin_lock_init(&events->pmu_lock);
+ }
+ armpmu->get_hw_events = armpmu_get_cpu_events;
+}
+
+static int __init init_hw_perf_events(void)
+{
+ u64 dfr = read_cpuid(ID_AA64DFR0_EL1);
+
+ switch ((dfr >> 8) & 0xf) {
+ case 0x1: /* PMUv3 */
+ cpu_pmu = armv8_pmuv3_pmu_init();
+ break;
+ }
+
+ if (cpu_pmu) {
+ pr_info("enabled with %s PMU driver, %d counters available\n",
+ cpu_pmu->name, cpu_pmu->num_events);
+ cpu_pmu_init(cpu_pmu);
+ armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
+ } else {
+ pr_info("no hardware support available\n");
+ }
+
+ return 0;
+}
+early_initcall(init_hw_perf_events);
+
+/*
+ * Callchain handling code.
+ */
+struct frame_tail {
+ struct frame_tail __user *fp;
+ unsigned long lr;
+} __attribute__((packed));
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static struct frame_tail __user *
+user_backtrace(struct frame_tail __user *tail,
+ struct perf_callchain_entry *entry)
+{
+ struct frame_tail buftail;
+ unsigned long err;
+
+ /* Also check accessibility of one struct frame_tail beyond */
+ if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+ return NULL;
+
+ pagefault_disable();
+ err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+ pagefault_enable();
+
+ if (err)
+ return NULL;
+
+ perf_callchain_store(entry, buftail.lr);
+
+ /*
+ * Frame pointers should strictly progress back up the stack
+ * (towards higher addresses).
+ */
+ if (tail >= buftail.fp)
+ return NULL;
+
+ return buftail.fp;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * The registers we're interested in are at the end of the variable
+ * length saved register structure. The fp points at the end of this
+ * structure so the address of this struct is:
+ * (struct compat_frame_tail *)(xxx->fp)-1
+ *
+ * This code has been adapted from the ARM OProfile support.
+ */
+struct compat_frame_tail {
+ compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
+ u32 sp;
+ u32 lr;
+} __attribute__((packed));
+
+static struct compat_frame_tail __user *
+compat_user_backtrace(struct compat_frame_tail __user *tail,
+ struct perf_callchain_entry *entry)
+{
+ struct compat_frame_tail buftail;
+ unsigned long err;
+
+ /* Also check accessibility of one struct frame_tail beyond */
+ if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+ return NULL;
+
+ pagefault_disable();
+ err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+ pagefault_enable();
+
+ if (err)
+ return NULL;
+
+ perf_callchain_store(entry, buftail.lr);
+
+ /*
+ * Frame pointers should strictly progress back up the stack
+ * (towards higher addresses).
+ */
+ if (tail + 1 >= (struct compat_frame_tail __user *)
+ compat_ptr(buftail.fp))
+ return NULL;
+
+ return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
+}
+#endif /* CONFIG_COMPAT */
+
+void perf_callchain_user(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
+{
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
+
+ perf_callchain_store(entry, regs->pc);
+
+ if (!compat_user_mode(regs)) {
+ /* AARCH64 mode */
+ struct frame_tail __user *tail;
+
+ tail = (struct frame_tail __user *)regs->regs[29];
+
+ while (entry->nr < PERF_MAX_STACK_DEPTH &&
+ tail && !((unsigned long)tail & 0xf))
+ tail = user_backtrace(tail, entry);
+ } else {
+#ifdef CONFIG_COMPAT
+ /* AARCH32 compat mode */
+ struct compat_frame_tail __user *tail;
+
+ tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
+
+ while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+ tail && !((unsigned long)tail & 0x3))
+ tail = compat_user_backtrace(tail, entry);
+#endif
+ }
+}
+
+/*
+ * Gets called by walk_stackframe() for every stackframe. This will be called
+ * whist unwinding the stackframe and is like a subroutine return so we use
+ * the PC.
+ */
+static int callchain_trace(struct stackframe *frame, void *data)
+{
+ struct perf_callchain_entry *entry = data;
+ perf_callchain_store(entry, frame->pc);
+ return 0;
+}
+
+void perf_callchain_kernel(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
+{
+ struct stackframe frame;
+
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
+
+ frame.fp = regs->regs[29];
+ frame.sp = regs->sp;
+ frame.pc = regs->pc;
+
+ walk_stackframe(&frame, callchain_trace, entry);
+}
+
+unsigned long perf_instruction_pointer(struct pt_regs *regs)
+{
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+ return perf_guest_cbs->get_guest_ip();
+
+ return instruction_pointer(regs);
+}
+
+unsigned long perf_misc_flags(struct pt_regs *regs)
+{
+ int misc = 0;
+
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ if (perf_guest_cbs->is_user_mode())
+ misc |= PERF_RECORD_MISC_GUEST_USER;
+ else
+ misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+ } else {
+ if (user_mode(regs))
+ misc |= PERF_RECORD_MISC_USER;
+ else
+ misc |= PERF_RECORD_MISC_KERNEL;
+ }
+
+ return misc;
+}
urpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
@@ -47,12 +59,6 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
-config GENERIC_CMOS_UPDATE
- def_bool y
-
-config GENERIC_GPIO
- bool
-
config ZONE_DMA
bool
default y
@@ -73,6 +79,8 @@ config GENERIC_ISA_DMA
source "init/Kconfig"
source "kernel/Kconfig.freezer"
+config AUDIT_ARCH
+ bool
menu "System setup"
@@ -120,6 +128,7 @@ choice
config ALPHA_GENERIC
bool "Generic"
+ depends on TTY
help
A generic kernel will run on all supported Alpha hardware.
@@ -477,15 +486,30 @@ config ALPHA_BROKEN_IRQ_MASK
config VGA_HOSE
bool
- depends on ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL || ALPHA_TSUNAMI
+ depends on VGA_CONSOLE && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL || ALPHA_TSUNAMI)
default y
help
Support VGA on an arbitrary hose; needed for several platforms
which always have multiple hoses, and whose consoles support it.
+config ALPHA_QEMU
+ bool "Run under QEMU emulation"
+ depends on !ALPHA_GENERIC
+ ---help---
+ Assume the presence of special features supported by QEMU PALcode
+ that reduce the overhead of system emulation.
+
+ Generic kernels will auto-detect QEMU. But when building a
+ system-specific kernel, the assumption is that we want to
+ elimiate as many runtime tests as possible.
+
+ If unsure, say N.
+
+
config ALPHA_SRM
bool "Use SRM as bootloader" if ALPHA_CABRIOLET || ALPHA_AVANTI_CH || ALPHA_EB64P || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_NAUTILUS || ALPHA_NONAME
+ depends on TTY
default y if ALPHA_JENSEN || ALPHA_MIKASA || ALPHA_SABLE || ALPHA_LYNX || ALPHA_NORITAKE || ALPHA_DP264 || ALPHA_RAWHIDE || ALPHA_EIGER || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_SHARK || ALPHA_MARVEL
---help---
There are two different types of booting firmware on Alphas: SRM,
@@ -518,16 +542,15 @@ config ARCH_MAY_HAVE_PC_FDC
config SMP
bool "Symmetric multi-processing support"
depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL
- select USE_GENERIC_SMP_HELPERS
---help---
This enables support for systems with more than one CPU. If you have
- a system with only one CPU, like most personal computers, say N. If
- you have a system with more than one CPU, say Y.
+ a system with only one CPU, say N. If you have a system with more
+ than one CPU, say Y.
- If you say N here, the kernel will run on single and multiprocessor
+ If you say N here, the kernel will run on uni- and multiprocessor
machines, but will use only one CPU of a multiprocessor machine. If
you say Y here, the kernel will run on many, but not all,
- singleprocessor machines. On a singleprocessor machine, the kernel
+ uniprocessor machines. On a uniprocessor machine, the kernel
will run faster if you say N here.
See also the SMP-HOWTO available at
@@ -551,8 +574,7 @@ config NR_CPUS
with working support have a maximum of 4 CPUs.
config ARCH_DISCONTIGMEM_ENABLE
- bool "Discontiguous Memory Support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ bool "Discontiguous Memory Support"
help
Say Y to support efficient handling of discontiguous physical memory,
for architectures which are either NUMA (Non-Uniform Memory Access)
@@ -569,6 +591,30 @@ config NUMA
Access). This option is for configuring high-end multiprocessor
server machines. If in doubt, say N.
+config ALPHA_WTINT
+ bool "Use WTINT" if ALPHA_SRM || ALPHA_GENERIC
+ default y if ALPHA_QEMU
+ default n if ALPHA_EV5 || ALPHA_EV56 || (ALPHA_EV4 && !ALPHA_LCA)
+ default n if !ALPHA_SRM && !ALPHA_GENERIC
+ default y if SMP
+ ---help---
+ The Wait for Interrupt (WTINT) PALcall attempts to place the CPU
+ to sleep until the next interrupt. This may reduce the power
+ consumed, and the heat produced by the computer. However, it has
+ the side effect of making the cycle counter unreliable as a timing
+ device across the sleep.
+
+ For emulation under QEMU, definitely say Y here, as we have other
+ mechanisms for measuring time than the cycle counter.
+
+ For EV4 (but not LCA), EV5 and EV56 systems, or for systems running
+ MILO, sleep mode is not supported so you might as well say N here.
+
+ For SMP systems we cannot use the cycle counter for timing anyway,
+ so you might as well say Y here.
+
+ If unsure, say N.
+
config NODES_SHIFT
int
default "7"
@@ -610,9 +656,41 @@ config VERBOSE_MCHECK_ON
Take the default (1) unless you want more control or more info.
+choice
+ prompt "Timer interrupt frequency (HZ)?"
+ default HZ_128 if ALPHA_QEMU
+ default HZ_1200 if ALPHA_RAWHIDE
+ default HZ_1024
+ ---help---
+ The frequency at which timer interrupts occur. A high frequency
+ minimizes latency, whereas a low frequency minimizes overhead of
+ process accounting. The later effect is especially significant
+ when being run under QEMU.
+
+ Note that some Alpha hardware cannot change the interrupt frequency
+ of the timer. If unsure, say 1024 (or 1200 for Rawhide).
+
+ config HZ_32
+ bool "32 Hz"
+ config HZ_64
+ bool "64 Hz"
+ config HZ_128
+ bool "128 Hz"
+ config HZ_256
+ bool "256 Hz"
+ config HZ_1024
+ bool "1024 Hz"
+ config HZ_1200
+ bool "1200 Hz"
+endchoice
+
config HZ
- int
- default 1200 if ALPHA_RAWHIDE
+ int
+ default 32 if HZ_32
+ default 64 if HZ_64
+ default 128 if HZ_128
+ default 256 if HZ_256
+ default 1200 if HZ_1200
default 1024
source "drivers/pci/Kconfig"
diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile
index 4759fe751aa..2cc3cc519c5 100644
--- a/arch/alpha/Makefile
+++ b/arch/alpha/Makefile
@@ -12,7 +12,7 @@ NM := $(NM) -B
LDFLAGS_vmlinux := -static -N #-relax
CHECKFLAGS += -D__alpha__ -m64
-cflags-y := -pipe -mno-fp-regs -ffixed-8 -msmall-data
+cflags-y := -pipe -mno-fp-regs -ffixed-8
cflags-y += $(call cc-option, -fno-jump-tables)
cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4
diff --git a/arch/alpha/boot/bootp.c b/arch/alpha/boot/bootp.c
index be61670d409..2a542a50655 100644
--- a/arch/alpha/boot/bootp.c
+++ b/arch/alpha/boot/bootp.c
@@ -13,7 +13,6 @@
#include <generated/utsrelease.h>
#include <linux/mm.h>
-#include <asm/system.h>
#include <asm/console.h>
#include <asm/hwrpb.h>
#include <asm/pgtable.h>
diff --git a/arch/alpha/boot/bootpz.c b/arch/alpha/boot/bootpz.c
index c98865f2142..d6ad191698d 100644
--- a/arch/alpha/boot/bootpz.c
+++ b/arch/alpha/boot/bootpz.c
@@ -15,7 +15,6 @@
#include <generated/utsrelease.h>
#include <linux/mm.h>
-#include <asm/system.h>
#include <asm/console.h>
#include <asm/hwrpb.h>
#include <asm/pgtable.h>
diff --git a/arch/alpha/boot/head.S b/arch/alpha/boot/head.S
index f3d98089b3d..8efb26686d4 100644
--- a/arch/alpha/boot/head.S
+++ b/arch/alpha/boot/head.S
@@ -4,7 +4,7 @@
* initial bootloader stuff..
*/
-#include <asm/system.h>
+#include <asm/pal.h>
.set noreorder
.globl __start
diff --git a/arch/alpha/boot/main.c b/arch/alpha/boot/main.c
index ded57d9a80e..3baf2d1e908 100644
--- a/arch/alpha/boot/main.c
+++ b/arch/alpha/boot/main.c
@@ -11,7 +11,6 @@
#include <generated/utsrelease.h>
#include <linux/mm.h>
-#include <asm/system.h>
#include <asm/console.h>
#include <asm/hwrpb.h>
#include <asm/pgtable.h>
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index e423defed91..96e54bed508 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -1,10 +1,9 @@
-include include/asm-generic/Kbuild.asm
-header-y += compiler.h
-header-y += console.h
-header-y += fpu.h
-header-y += gentrap.h
-header-y += pal.h
-header-y += reg.h
-header-y += regdef.h
-header-y += sysinfo.h
+
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += exec.h
+generic-y += hash.h
+generic-y += mcs_spinlock.h
+generic-y += preempt.h
+generic-y += trace_clock.h
diff --git a/arch/alpha/include/asm/a.out.h b/arch/alpha/include/asm/a.out.h
index acdc681231c..9abbd245530 100644
--- a/arch/alpha/include/asm/a.out.h
+++ b/arch/alpha/include/asm/a.out.h
@@ -1,94 +1,8 @@
#ifndef __ALPHA_A_OUT_H__
#define __ALPHA_A_OUT_H__
-#include <linux/types.h>
+#include <uapi/asm/a.out.h>
-/*
- * OSF/1 ECOFF header structs. ECOFF files consist of:
- * - a file header (struct filehdr),
- * - an a.out header (struct aouthdr),
- * - one or more section headers (struct scnhdr).
- * The filhdr's "f_nscns" field contains the
- * number of section headers.
- */
-
-struct filehdr
-{
- /* OSF/1 "file" header */
- __u16 f_magic, f_nscns;
- __u32 f_timdat;
- __u64 f_symptr;
- __u32 f_nsyms;
- __u16 f_opthdr, f_flags;
-};
-
-struct aouthdr
-{
- __u64 info; /* after that it looks quite normal.. */
- __u64 tsize;
- __u64 dsize;
- __u64 bsize;
- __u64 entry;
- __u64 text_start; /* with a few additions that actually make sense */
- __u64 data_start;
- __u64 bss_start;
- __u32 gprmask, fprmask; /* bitmask of general & floating point regs used in binary */
- __u64 gpvalue;
-};
-
-struct scnhdr
-{
- char s_name[8];
- __u64 s_paddr;
- __u64 s_vaddr;
- __u64 s_size;
- __u64 s_scnptr;
- __u64 s_relptr;
- __u64 s_lnnoptr;
- __u16 s_nreloc;
- __u16 s_nlnno;
- __u32 s_flags;
-};
-
-struct exec
-{
- /* OSF/1 "file" header */
- struct filehdr fh;
- struct aouthdr ah;
-};
-
-/*
- * Define's so that the kernel exec code can access the a.out header
- * fields...
- */
-#define a_info ah.info
-#define a_text ah.tsize
-#define a_data ah.dsize
-#define a_bss ah.bsize
-#define a_entry ah.entry
-#define a_textstart ah.text_start
-#define a_datastart ah.data_start
-#define a_bssstart ah.bss_start
-#define a_gprmask ah.gprmask
-#define a_fprmask ah.fprmask
-#define a_gpvalue ah.gpvalue
-
-#define N_TXTADDR(x) ((x).a_textstart)
-#define N_DATADDR(x) ((x).a_datastart)
-#define N_BSSADDR(x) ((x).a_bssstart)
-#define N_DRSIZE(x) 0
-#define N_TRSIZE(x) 0
-#define N_SYMSIZE(x) 0
-
-#define AOUTHSZ sizeof(struct aouthdr)
-#define SCNHSZ sizeof(struct scnhdr)
-#define SCNROUND 16
-
-#define N_TXTOFF(x) \
- ((long) N_MAGIC(x) == ZMAGIC ? 0 : \
- (sizeof(struct exec) + (x).fh.f_nscns*SCNHSZ + SCNROUND - 1) & ~(SCNROUND - 1))
-
-#ifdef __KERNEL__
/* Assume that start addresses below 4G belong to a TASO application.
Unfortunately, there is no proper bit in the exec header to check.
@@ -98,5 +12,4 @@ struct exec
set_personality (((BFPM->taso || EX.ah.entry < 0x100000000L \
? ADDR_LIMIT_32BIT : 0) | PER_OSF4))
-#endif /* __KERNEL__ */
#endif /* __A_OUT_GNU_H__ */
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 640f909ddd4..ed60a1ee1ed 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -3,7 +3,7 @@
#include <linux/types.h>
#include <asm/barrier.h>
-#include <asm/system.h>
+#include <asm/cmpxchg.h>
/*
* Atomic operations that C can't guarantee us. Useful for
@@ -14,8 +14,8 @@
*/
-#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
-#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
+#define ATOMIC_INIT(i) { (i) }
+#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic64_read(v) (*(volatile long *)&(v)->counter)
@@ -186,17 +186,24 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
*/
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c;
+ int c, new, old;
+ smp_mb();
+ __asm__ __volatile__(
+ "1: ldl_l %[old],%[mem]\n"
+ " cmpeq %[old],%[u],%[c]\n"
+ " addl %[old],%[a],%[new]\n"
+ " bne %[c],2f\n"
+ " stl_c %[new],%[mem]\n"
+ " beq %[new],3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
+ : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
+ : "memory");
+ smp_mb();
+ return old;
}
@@ -207,21 +214,56 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
+ * Returns true iff @v was not @u.
*/
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
{
- long c, old;
- c = atomic64_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic64_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
- return c != (u);
+ long c, tmp;
+ smp_mb();
+ __asm__ __volatile__(
+ "1: ldq_l %[tmp],%[mem]\n"
+ " cmpeq %[tmp],%[u],%[c]\n"
+ " addq %[tmp],%[a],%[tmp]\n"
+ " bne %[c],2f\n"
+ " stq_c %[tmp],%[mem]\n"
+ " beq %[tmp],3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : [tmp] "=&r"(tmp), [c] "=&r"(c)
+ : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
+ : "memory");
+ smp_mb();
+ return !c;
+}
+
+/*
+ * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic_t
+ *
+ * The function returns the old value of *v minus 1, even if
+ * the atomic variable, v, was not decremented.
+ */
+static inline long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long old, tmp;
+ smp_mb();
+ __asm__ __volatile__(
+ "1: ldq_l %[old],%[mem]\n"
+ " subq %[old],1,%[tmp]\n"
+ " ble %[old],2f\n"
+ " stq_c %[tmp],%[mem]\n"
+ " beq %[tmp],3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : [old] "=&r"(old), [tmp] "=&r"(tmp)
+ : [mem] "m"(*v)
+ : "memory");
+ smp_mb();
+ return old - 1;
}
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
@@ -250,9 +292,4 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic_dec(v) atomic_sub(1,(v))
#define atomic64_dec(v) atomic64_sub(1,(v))
-#define smp_mb__before_atomic_dec() smp_mb()
-#define smp_mb__after_atomic_dec() smp_mb()
-#define smp_mb__before_atomic_inc() smp_mb()
-#define smp_mb__after_atomic_inc() smp_mb()
-
#endif /* _ALPHA_ATOMIC_H */
diff --git a/arch/alpha/include/asm/barrier.h b/arch/alpha/include/asm/barrier.h
index ce8860a0b32..3832bdb794f 100644
--- a/arch/alpha/include/asm/barrier.h
+++ b/arch/alpha/include/asm/barrier.h
@@ -3,33 +3,18 @@
#include <asm/compiler.h>
-#define mb() \
-__asm__ __volatile__("mb": : :"memory")
+#define mb() __asm__ __volatile__("mb": : :"memory")
+#define rmb() __asm__ __volatile__("mb": : :"memory")
+#define wmb() __asm__ __volatile__("wmb": : :"memory")
-#define rmb() \
-__asm__ __volatile__("mb": : :"memory")
-
-#define wmb() \
-__asm__ __volatile__("wmb": : :"memory")
-
-#define read_barrier_depends() \
-__asm__ __volatile__("mb": : :"memory")
+#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory")
#ifdef CONFIG_SMP
#define __ASM_SMP_MB "\tmb\n"
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_read_barrier_depends() read_barrier_depends()
#else
#define __ASM_SMP_MB
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while (0)
#endif
-#define set_mb(var, value) \
-do { var = value; mb(); } while (0)
+#include <asm-generic/barrier.h>
#endif /* __BARRIER_H */
diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h
index a19ba5efea4..4bdfbd444e6 100644
--- a/arch/alpha/include/asm/bitops.h
+++ b/arch/alpha/include/asm/bitops.h
@@ -53,9 +53,6 @@ __set_bit(unsigned long nr, volatile void * addr)
*m |= 1 << (nr & 31);
}
-#define smp_mb__before_clear_bit() smp_mb()
-#define smp_mb__after_clear_bit() smp_mb()
-
static inline void
clear_bit(unsigned long nr, volatile void * addr)
{
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
new file mode 100644
index 00000000000..429e8cd0d78
--- /dev/null
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -0,0 +1,71 @@
+#ifndef _ALPHA_CMPXCHG_H
+#define _ALPHA_CMPXCHG_H
+
+/*
+ * Atomic exchange routines.
+ */
+
+#define __ASM__MB
+#define ____xchg(type, args...) __xchg ## type ## _local(args)
+#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
+#include <asm/xchg.h>
+
+#define xchg_local(ptr, x) \
+({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \
+ sizeof(*(ptr))); \
+})
+
+#define cmpxchg_local(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, \
+ sizeof(*(ptr))); \
+})
+
+#define cmpxchg64_local(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg_local((ptr), (o), (n)); \
+})
+
+#ifdef CONFIG_SMP
+#undef __ASM__MB
+#define __ASM__MB "\tmb\n"
+#endif
+#undef ____xchg
+#undef ____cmpxchg
+#define ____xchg(type, args...) __xchg ##type(args)
+#define ____cmpxchg(type, args...) __cmpxchg ##type(args)
+#include <asm/xchg.h>
+
+#define xchg(ptr, x) \
+({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, \
+ sizeof(*(ptr))); \
+})
+
+#define cmpxchg(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr)));\
+})
+
+#define cmpxchg64(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg((ptr), (o), (n)); \
+})
+
+#undef __ASM__MB
+#undef ____cmpxchg
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+#endif /* _ALPHA_CMPXCHG_H */
diff --git a/arch/alpha/include/asm/compiler.h b/arch/alpha/include/asm/compiler.h
index da6bb199839..a7720b96bcc 100644
--- a/arch/alpha/include/asm/compiler.h
+++ b/arch/alpha/include/asm/compiler.h
@@ -1,119 +1,8 @@
#ifndef __ALPHA_COMPILER_H
#define __ALPHA_COMPILER_H
-/*
- * Herein are macros we use when describing various patterns we want to GCC.
- * In all cases we can get better schedules out of the compiler if we hide
- * as little as possible inside inline assembly. However, we want to be
- * able to know what we'll get out before giving up inline assembly. Thus
- * these tests and macros.
- */
+#include <uapi/asm/compiler.h>
-#if __GNUC__ == 3 && __GNUC_MINOR__ >= 4 || __GNUC__ > 3
-# define __kernel_insbl(val, shift) __builtin_alpha_insbl(val, shift)
-# define __kernel_inswl(val, shift) __builtin_alpha_inswl(val, shift)
-# define __kernel_insql(val, shift) __builtin_alpha_insql(val, shift)
-# define __kernel_inslh(val, shift) __builtin_alpha_inslh(val, shift)
-# define __kernel_extbl(val, shift) __builtin_alpha_extbl(val, shift)
-# define __kernel_extwl(val, shift) __builtin_alpha_extwl(val, shift)
-# define __kernel_cmpbge(a, b) __builtin_alpha_cmpbge(a, b)
-#else
-# define __kernel_insbl(val, shift) \
- ({ unsigned long __kir; \
- __asm__("insbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
- __kir; })
-# define __kernel_inswl(val, shift) \
- ({ unsigned long __kir; \
- __asm__("inswl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
- __kir; })
-# define __kernel_insql(val, shift) \
- ({ unsigned long __kir; \
- __asm__("insql %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
- __kir; })
-# define __kernel_inslh(val, shift) \
- ({ unsigned long __kir; \
- __asm__("inslh %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
- __kir; })
-# define __kernel_extbl(val, shift) \
- ({ unsigned long __kir; \
- __asm__("extbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
- __kir; })
-# define __kernel_extwl(val, shift) \
- ({ unsigned long __kir; \
- __asm__("extwl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
- __kir; })
-# define __kernel_cmpbge(a, b) \
- ({ unsigned long __kir; \
- __asm__("cmpbge %r2,%1,%0" : "=r"(__kir) : "rI"(b), "rJ"(a)); \
- __kir; })
-#endif
-
-#ifdef __alpha_cix__
-# if __GNUC__ == 3 && __GNUC_MINOR__ >= 4 || __GNUC__ > 3
-# define __kernel_cttz(x) __builtin_ctzl(x)
-# define __kernel_ctlz(x) __builtin_clzl(x)
-# define __kernel_ctpop(x) __builtin_popcountl(x)
-# else
-# define __kernel_cttz(x) \
- ({ unsigned long __kir; \
- __asm__("cttz %1,%0" : "=r"(__kir) : "r"(x)); \
- __kir; })
-# define __kernel_ctlz(x) \
- ({ unsigned long __kir; \
- __asm__("ctlz %1,%0" : "=r"(__kir) : "r"(x)); \
- __kir; })
-# define __kernel_ctpop(x) \
- ({ unsigned long __kir; \
- __asm__("ctpop %1,%0" : "=r"(__kir) : "r"(x)); \
- __kir; })
-# endif
-#else
-# define __kernel_cttz(x) \
- ({ unsigned long __kir; \
- __asm__(".arch ev67; cttz %1,%0" : "=r"(__kir) : "r"(x)); \
- __kir; })
-# define __kernel_ctlz(x) \
- ({ unsigned long __kir; \
- __asm__(".arch ev67; ctlz %1,%0" : "=r"(__kir) : "r"(x)); \
- __kir; })
-# define __kernel_ctpop(x) \
- ({ unsigned long __kir; \
- __asm__(".arch ev67; ctpop %1,%0" : "=r"(__kir) : "r"(x)); \
- __kir; })
-#endif
-
-
-/*
- * Beginning with EGCS 1.1, GCC defines __alpha_bwx__ when the BWX
- * extension is enabled. Previous versions did not define anything
- * we could test during compilation -- too bad, so sad.
- */
-
-#if defined(__alpha_bwx__)
-#define __kernel_ldbu(mem) (mem)
-#define __kernel_ldwu(mem) (mem)
-#define __kernel_stb(val,mem) ((mem) = (val))
-#define __kernel_stw(val,mem) ((mem) = (val))
-#else
-#define __kernel_ldbu(mem) \
- ({ unsigned char __kir; \
- __asm__(".arch ev56; \
- ldbu %0,%1" : "=r"(__kir) : "m"(mem)); \
- __kir; })
-#define __kernel_ldwu(mem) \
- ({ unsigned short __kir; \
- __asm__(".arch ev56; \
- ldwu %0,%1" : "=r"(__kir) : "m"(mem)); \
- __kir; })
-#define __kernel_stb(val,mem) \
- __asm__(".arch ev56; \
- stb %1,%0" : "=m"(mem) : "r"(val))
-#define __kernel_stw(val,mem) \
- __asm__(".arch ev56; \
- stw %1,%0" : "=m"(mem) : "r"(val))
-#endif
-
-#ifdef __KERNEL__
/* Some idiots over in <linux/compiler.h> thought inline should imply
always_inline. This breaks stuff. We'll include this file whenever
we run into such problems. */
@@ -125,6 +14,4 @@
#undef __always_inline
#define __always_inline inline __attribute__((always_inline))
-#endif /* __KERNEL__ */
-
#endif /* __ALPHA_COMPILER_H */
diff --git a/arch/alpha/include/asm/console.h b/arch/alpha/include/asm/console.h
index a3ce4e62249..f2b584fe099 100644
--- a/arch/alpha/include/asm/console.h
+++ b/arch/alpha/include/asm/console.h
@@ -1,52 +1,8 @@
#ifndef __AXP_CONSOLE_H
#define __AXP_CONSOLE_H
-/*
- * Console callback routine numbers
- */
-#define CCB_GETC 0x01
-#define CCB_PUTS 0x02
-#define CCB_RESET_TERM 0x03
-#define CCB_SET_TERM_INT 0x04
-#define CCB_SET_TERM_CTL 0x05
-#define CCB_PROCESS_KEYCODE 0x06
-#define CCB_OPEN_CONSOLE 0x07
-#define CCB_CLOSE_CONSOLE 0x08
+#include <uapi/asm/console.h>
-#define CCB_OPEN 0x10
-#define CCB_CLOSE 0x11
-#define CCB_IOCTL 0x12
-#define CCB_READ 0x13
-#define CCB_WRITE 0x14
-
-#define CCB_SET_ENV 0x20
-#define CCB_RESET_ENV 0x21
-#define CCB_GET_ENV 0x22
-#define CCB_SAVE_ENV 0x23
-
-#define CCB_PSWITCH 0x30
-#define CCB_BIOS_EMUL 0x32
-
-/*
- * Environment variable numbers
- */
-#define ENV_AUTO_ACTION 0x01
-#define ENV_BOOT_DEV 0x02
-#define ENV_BOOTDEF_DEV 0x03
-#define ENV_BOOTED_DEV 0x04
-#define ENV_BOOT_FILE 0x05
-#define ENV_BOOTED_FILE 0x06
-#define ENV_BOOT_OSFLAGS 0x07
-#define ENV_BOOTED_OSFLAGS 0x08
-#define ENV_BOOT_RESET 0x09
-#define ENV_DUMP_DEV 0x0A
-#define ENV_ENABLE_AUDIT 0x0B
-#define ENV_LICENSE 0x0C
-#define ENV_CHAR_SET 0x0D
-#define ENV_LANGUAGE 0x0E
-#define ENV_TTY_DEV 0x0F
-
-#ifdef __KERNEL__
#ifndef __ASSEMBLY__
extern long callback_puts(long unit, const char *s, long length);
extern long callback_getc(long unit);
@@ -70,6 +26,4 @@ struct hwrpb_struct;
extern int callback_init_done;
extern void * callback_init(void *);
#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
#endif /* __AXP_CONSOLE_H */
diff --git a/arch/alpha/include/asm/core_lca.h b/arch/alpha/include/asm/core_lca.h
index f7cb4b46095..8ee6c516279 100644
--- a/arch/alpha/include/asm/core_lca.h
+++ b/arch/alpha/include/asm/core_lca.h
@@ -1,8 +1,8 @@
#ifndef __ALPHA_LCA__H__
#define __ALPHA_LCA__H__
-#include <asm/system.h>
#include <asm/compiler.h>
+#include <asm/mce.h>
/*
* Low Cost Alpha (LCA) definitions (these apply to 21066 and 21068,
diff --git a/arch/alpha/include/asm/core_mcpcia.h b/arch/alpha/include/asm/core_mcpcia.h
index 9f67a056b46..ad44bef29fb 100644
--- a/arch/alpha/include/asm/core_mcpcia.h
+++ b/arch/alpha/include/asm/core_mcpcia.h
@@ -7,6 +7,7 @@
#include <linux/types.h>
#include <asm/compiler.h>
+#include <asm/mce.h>
/*
* MCPCIA is the internal name for a core logic chipset which provides
diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h
index 91b46801b29..ade9d92e68b 100644
--- a/arch/alpha/include/asm/core_t2.h
+++ b/arch/alpha/include/asm/core_t2.h
@@ -7,7 +7,6 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <asm/compiler.h>
-#include <asm/system.h>
/*
* T2 is the internal name for the core logic chipset which provides
diff --git a/arch/alpha/include/asm/cputime.h b/arch/alpha/include/asm/cputime.h
deleted file mode 100644
index 19577fd9323..00000000000
--- a/arch/alpha/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ALPHA_CPUTIME_H
-#define __ALPHA_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __ALPHA_CPUTIME_H */
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h
index 4567aca6fdd..dfa32f06132 100644
--- a/arch/alpha/include/asm/dma-mapping.h
+++ b/arch/alpha/include/asm/dma-mapping.h
@@ -12,16 +12,22 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
#include <asm-generic/dma-mapping-common.h>
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
{
- return get_dma_ops(dev)->alloc_coherent(dev, size, dma_handle, gfp);
+ return get_dma_ops(dev)->alloc(dev, size, dma_handle, gfp, attrs);
}
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
- get_dma_ops(dev)->free_coherent(dev, size, vaddr, dma_handle);
+ get_dma_ops(dev)->free(dev, size, vaddr, dma_handle, attrs);
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
index da5449e2217..968d9991f5e 100644
--- a/arch/alpha/include/asm/elf.h
+++ b/arch/alpha/include/asm/elf.h
@@ -2,6 +2,7 @@
#define __ASM_ALPHA_ELF_H
#include <asm/auxvec.h>
+#include <asm/special_insns.h>
/* Special values for the st_other field in the symbol table. */
diff --git a/arch/alpha/include/asm/floppy.h b/arch/alpha/include/asm/floppy.h
index 46cefbd50e7..bae97eb19d2 100644
--- a/arch/alpha/include/asm/floppy.h
+++ b/arch/alpha/include/asm/floppy.h
@@ -26,7 +26,7 @@
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
#define fd_cacheflush(addr,size) /* nothing */
#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\
- IRQF_DISABLED, "floppy", NULL)
+ 0, "floppy", NULL)
#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
#ifdef CONFIG_PCI
diff --git a/arch/alpha/include/asm/fpu.h b/arch/alpha/include/asm/fpu.h
index ecb17a72acc..71c20956b90 100644
--- a/arch/alpha/include/asm/fpu.h
+++ b/arch/alpha/include/asm/fpu.h
@@ -1,124 +1,8 @@
#ifndef __ASM_ALPHA_FPU_H
#define __ASM_ALPHA_FPU_H
-/*
- * Alpha floating-point control register defines:
- */
-#define FPCR_DNOD (1UL<<47) /* denorm INV trap disable */
-#define FPCR_DNZ (1UL<<48) /* denorms to zero */
-#define FPCR_INVD (1UL<<49) /* invalid op disable (opt.) */
-#define FPCR_DZED (1UL<<50) /* division by zero disable (opt.) */
-#define FPCR_OVFD (1UL<<51) /* overflow disable (optional) */
-#define FPCR_INV (1UL<<52) /* invalid operation */
-#define FPCR_DZE (1UL<<53) /* division by zero */
-#define FPCR_OVF (1UL<<54) /* overflow */
-#define FPCR_UNF (1UL<<55) /* underflow */
-#define FPCR_INE (1UL<<56) /* inexact */
-#define FPCR_IOV (1UL<<57) /* integer overflow */
-#define FPCR_UNDZ (1UL<<60) /* underflow to zero (opt.) */
-#define FPCR_UNFD (1UL<<61) /* underflow disable (opt.) */
-#define FPCR_INED (1UL<<62) /* inexact disable (opt.) */
-#define FPCR_SUM (1UL<<63) /* summary bit */
-
-#define FPCR_DYN_SHIFT 58 /* first dynamic rounding mode bit */
-#define FPCR_DYN_CHOPPED (0x0UL << FPCR_DYN_SHIFT) /* towards 0 */
-#define FPCR_DYN_MINUS (0x1UL << FPCR_DYN_SHIFT) /* towards -INF */
-#define FPCR_DYN_NORMAL (0x2UL << FPCR_DYN_SHIFT) /* towards nearest */
-#define FPCR_DYN_PLUS (0x3UL << FPCR_DYN_SHIFT) /* towards +INF */
-#define FPCR_DYN_MASK (0x3UL << FPCR_DYN_SHIFT)
-
-#define FPCR_MASK 0xffff800000000000L
-
-/*
- * IEEE trap enables are implemented in software. These per-thread
- * bits are stored in the "ieee_state" field of "struct thread_info".
- * Thus, the bits are defined so as not to conflict with the
- * floating-point enable bit (which is architected). On top of that,
- * we want to make these bits compatible with OSF/1 so
- * ieee_set_fp_control() etc. can be implemented easily and
- * compatibly. The corresponding definitions are in
- * /usr/include/machine/fpu.h under OSF/1.
- */
-#define IEEE_TRAP_ENABLE_INV (1UL<<1) /* invalid op */
-#define IEEE_TRAP_ENABLE_DZE (1UL<<2) /* division by zero */
-#define IEEE_TRAP_ENABLE_OVF (1UL<<3) /* overflow */
-#define IEEE_TRAP_ENABLE_UNF (1UL<<4) /* underflow */
-#define IEEE_TRAP_ENABLE_INE (1UL<<5) /* inexact */
-#define IEEE_TRAP_ENABLE_DNO (1UL<<6) /* denorm */
-#define IEEE_TRAP_ENABLE_MASK (IEEE_TRAP_ENABLE_INV | IEEE_TRAP_ENABLE_DZE |\
- IEEE_TRAP_ENABLE_OVF | IEEE_TRAP_ENABLE_UNF |\
- IEEE_TRAP_ENABLE_INE | IEEE_TRAP_ENABLE_DNO)
-
-/* Denorm and Underflow flushing */
-#define IEEE_MAP_DMZ (1UL<<12) /* Map denorm inputs to zero */
-#define IEEE_MAP_UMZ (1UL<<13) /* Map underflowed outputs to zero */
-
-#define IEEE_MAP_MASK (IEEE_MAP_DMZ | IEEE_MAP_UMZ)
-
-/* status bits coming from fpcr: */
-#define IEEE_STATUS_INV (1UL<<17)
-#define IEEE_STATUS_DZE (1UL<<18)
-#define IEEE_STATUS_OVF (1UL<<19)
-#define IEEE_STATUS_UNF (1UL<<20)
-#define IEEE_STATUS_INE (1UL<<21)
-#define IEEE_STATUS_DNO (1UL<<22)
-
-#define IEEE_STATUS_MASK (IEEE_STATUS_INV | IEEE_STATUS_DZE | \
- IEEE_STATUS_OVF | IEEE_STATUS_UNF | \
- IEEE_STATUS_INE | IEEE_STATUS_DNO)
-
-#define IEEE_SW_MASK (IEEE_TRAP_ENABLE_MASK | \
- IEEE_STATUS_MASK | IEEE_MAP_MASK)
-
-#define IEEE_CURRENT_RM_SHIFT 32
-#define IEEE_CURRENT_RM_MASK (3UL<<IEEE_CURRENT_RM_SHIFT)
-
-#define IEEE_STATUS_TO_EXCSUM_SHIFT 16
-
-#define IEEE_INHERIT (1UL<<63) /* inherit on thread create? */
-
-/*
- * Convert the software IEEE trap enable and status bits into the
- * hardware fpcr format.
- *
- * Digital Unix engineers receive my thanks for not defining the
- * software bits identical to the hardware bits. The chip designers
- * receive my thanks for making all the not-implemented fpcr bits
- * RAZ forcing us to use system calls to read/write this value.
- */
-
-static inline unsigned long
-ieee_swcr_to_fpcr(unsigned long sw)
-{
- unsigned long fp;
- fp = (sw & IEEE_STATUS_MASK) << 35;
- fp |= (sw & IEEE_MAP_DMZ) << 36;
- fp |= (sw & IEEE_STATUS_MASK ? FPCR_SUM : 0);
- fp |= (~sw & (IEEE_TRAP_ENABLE_INV
- | IEEE_TRAP_ENABLE_DZE
- | IEEE_TRAP_ENABLE_OVF)) << 48;
- fp |= (~sw & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE)) << 57;
- fp |= (sw & IEEE_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
- fp |= (~sw & IEEE_TRAP_ENABLE_DNO) << 41;
- return fp;
-}
-
-static inline unsigned long
-ieee_fpcr_to_swcr(unsigned long fp)
-{
- unsigned long sw;
- sw = (fp >> 35) & IEEE_STATUS_MASK;
- sw |= (fp >> 36) & IEEE_MAP_DMZ;
- sw |= (~fp >> 48) & (IEEE_TRAP_ENABLE_INV
- | IEEE_TRAP_ENABLE_DZE
- | IEEE_TRAP_ENABLE_OVF);
- sw |= (~fp >> 57) & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE);
- sw |= (fp >> 47) & IEEE_MAP_UMZ;
- sw |= (~fp >> 41) & IEEE_TRAP_ENABLE_DNO;
- return sw;
-}
-
-#ifdef __KERNEL__
+#include <asm/special_insns.h>
+#include <uapi/asm/fpu.h>
/* The following two functions don't need trapb/excb instructions
around the mf_fpcr/mt_fpcr instructions because (a) the kernel
@@ -188,6 +72,4 @@ extern void alpha_write_fp_reg (unsigned long reg, unsigned long val);
extern unsigned long alpha_read_fp_reg_s (unsigned long reg);
extern void alpha_write_fp_reg_s (unsigned long reg, unsigned long val);
-#endif /* __KERNEL__ */
-
#endif /* __ASM_ALPHA_FPU_H */
diff --git a/arch/alpha/include/asm/gpio.h b/arch/alpha/include/asm/gpio.h
index 7dc6a6343c0..b3799d88ffc 100644
--- a/arch/alpha/include/asm/gpio.h
+++ b/arch/alpha/include/asm/gpio.h
@@ -1,55 +1,4 @@
-/*
- * Generic GPIO API implementation for Alpha.
- *
- * A stright copy of that for PowerPC which was:
- *
- * Copyright (c) 2007-2008 MontaVista Software, Inc.
- *
- * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef _ASM_ALPHA_GPIO_H
-#define _ASM_ALPHA_GPIO_H
-
-#include <linux/errno.h>
-#include <asm-generic/gpio.h>
-
-#ifdef CONFIG_GPIOLIB
-
-/*
- * We don't (yet) implement inlined/rapid versions for on-chip gpios.
- * Just call gpiolib.
- */
-static inline int gpio_get_value(unsigned int gpio)
-{
- return __gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned int gpio, int value)
-{
- __gpio_set_value(gpio, value);
-}
-
-static inline int gpio_cansleep(unsigned int gpio)
-{
- return __gpio_cansleep(gpio);
-}
-
-static inline int gpio_to_irq(unsigned int gpio)
-{
- return __gpio_to_irq(gpio);
-}
-
-static inline int irq_to_gpio(unsigned int irq)
-{
- return -EINVAL;
-}
-
-#endif /* CONFIG_GPIOLIB */
-
-#endif /* _ASM_ALPHA_GPIO_H */
+#ifndef __LINUX_GPIO_H
+#warning Include linux/gpio.h instead of asm/gpio.h
+#include <linux/gpio.h>
+#endif
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index 56ff9650135..5ebab5895ed 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -6,7 +6,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/compiler.h>
-#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/machvec.h>
#include <asm/hwrpb.h>
@@ -490,6 +489,11 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
}
#endif
+#define ioread16be(p) be16_to_cpu(ioread16(p))
+#define ioread32be(p) be32_to_cpu(ioread32(p))
+#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p))
+#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p))
+
#define inb_p inb
#define inw_p inw
#define inl_p inl
diff --git a/arch/alpha/include/asm/irqflags.h b/arch/alpha/include/asm/irqflags.h
index 299bbc7e9d7..ffb1726484a 100644
--- a/arch/alpha/include/asm/irqflags.h
+++ b/arch/alpha/include/asm/irqflags.h
@@ -1,7 +1,7 @@
#ifndef __ALPHA_IRQFLAGS_H
#define __ALPHA_IRQFLAGS_H
-#include <asm/system.h>
+#include <asm/pal.h>
#define IPL_MIN 0
#define IPL_SW0 1
diff --git a/arch/alpha/include/asm/linkage.h b/arch/alpha/include/asm/linkage.h
index 291c2d01c44..7cfd06e8c93 100644
--- a/arch/alpha/include/asm/linkage.h
+++ b/arch/alpha/include/asm/linkage.h
@@ -1,6 +1,8 @@
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
-/* Nothing to see here... */
+#define cond_syscall(x) asm(".weak\t" #x "\n" #x " = sys_ni_syscall")
+#define SYSCALL_ALIAS(alias, name) \
+ asm ( #alias " = " #name "\n\t.globl " #alias)
#endif
diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h
index 72dbf235927..75cb3641ed2 100644
--- a/arch/alpha/include/asm/machvec.h
+++ b/arch/alpha/include/asm/machvec.h
@@ -33,6 +33,7 @@ struct alpha_machine_vector
int nr_irqs;
int rtc_port;
+ int rtc_boot_cpu_only;
unsigned int max_asn;
unsigned long max_isa_dma_address;
unsigned long irq_probe_mask;
@@ -95,9 +96,6 @@ struct alpha_machine_vector
struct _alpha_agp_info *(*agp_info)(void);
- unsigned int (*rtc_get_time)(struct rtc_time *);
- int (*rtc_set_time)(struct rtc_time *);
-
const char *vector_name;
/* NUMA information */
@@ -126,13 +124,19 @@ extern struct alpha_machine_vector alpha_mv;
#ifdef CONFIG_ALPHA_GENERIC
extern int alpha_using_srm;
+extern int alpha_using_qemu;
#else
-#ifdef CONFIG_ALPHA_SRM
-#define alpha_using_srm 1
-#else
-#define alpha_using_srm 0
-#endif
+# ifdef CONFIG_ALPHA_SRM
+# define alpha_using_srm 1
+# else
+# define alpha_using_srm 0
+# endif
+# ifdef CONFIG_ALPHA_QEMU
+# define alpha_using_qemu 1
+# else
+# define alpha_using_qemu 0
+# endif
#endif /* GENERIC */
-#endif
+#endif /* __KERNEL__ */
#endif /* __ALPHA_MACHVEC_H */
diff --git a/arch/alpha/include/asm/mce.h b/arch/alpha/include/asm/mce.h
new file mode 100644
index 00000000000..660285b9aca
--- /dev/null
+++ b/arch/alpha/include/asm/mce.h
@@ -0,0 +1,83 @@
+#ifndef __ALPHA_MCE_H
+#define __ALPHA_MCE_H
+
+/*
+ * This is the logout header that should be common to all platforms
+ * (assuming they are running OSF/1 PALcode, I guess).
+ */
+struct el_common {
+ unsigned int size; /* size in bytes of logout area */
+ unsigned int sbz1 : 30; /* should be zero */
+ unsigned int err2 : 1; /* second error */
+ unsigned int retry : 1; /* retry flag */
+ unsigned int proc_offset; /* processor-specific offset */
+ unsigned int sys_offset; /* system-specific offset */
+ unsigned int code; /* machine check code */
+ unsigned int frame_rev; /* frame revision */
+};
+
+/* Machine Check Frame for uncorrectable errors (Large format)
+ * --- This is used to log uncorrectable errors such as
+ * double bit ECC errors.
+ * --- These errors are detected by both processor and systems.
+ */
+struct el_common_EV5_uncorrectable_mcheck {
+ unsigned long shadow[8]; /* Shadow reg. 8-14, 25 */
+ unsigned long paltemp[24]; /* PAL TEMP REGS. */
+ unsigned long exc_addr; /* Address of excepting instruction*/
+ unsigned long exc_sum; /* Summary of arithmetic traps. */
+ unsigned long exc_mask; /* Exception mask (from exc_sum). */
+ unsigned long pal_base; /* Base address for PALcode. */
+ unsigned long isr; /* Interrupt Status Reg. */
+ unsigned long icsr; /* CURRENT SETUP OF EV5 IBOX */
+ unsigned long ic_perr_stat; /* I-CACHE Reg. <11> set Data parity
+ <12> set TAG parity*/
+ unsigned long dc_perr_stat; /* D-CACHE error Reg. Bits set to 1:
+ <2> Data error in bank 0
+ <3> Data error in bank 1
+ <4> Tag error in bank 0
+ <5> Tag error in bank 1 */
+ unsigned long va; /* Effective VA of fault or miss. */
+ unsigned long mm_stat; /* Holds the reason for D-stream
+ fault or D-cache parity errors */
+ unsigned long sc_addr; /* Address that was being accessed
+ when EV5 detected Secondary cache
+ failure. */
+ unsigned long sc_stat; /* Helps determine if the error was
+ TAG/Data parity(Secondary Cache)*/
+ unsigned long bc_tag_addr; /* Contents of EV5 BC_TAG_ADDR */
+ unsigned long ei_addr; /* Physical address of any transfer
+ that is logged in EV5 EI_STAT */
+ unsigned long fill_syndrome; /* For correcting ECC errors. */
+ unsigned long ei_stat; /* Helps identify reason of any
+ processor uncorrectable error
+ at its external interface. */
+ unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/
+};
+
+struct el_common_EV6_mcheck {
+ unsigned int FrameSize; /* Bytes, including this field */
+ unsigned int FrameFlags; /* <31> = Retry, <30> = Second Error */
+ unsigned int CpuOffset; /* Offset to CPU-specific info */
+ unsigned int SystemOffset; /* Offset to system-specific info */
+ unsigned int MCHK_Code;
+ unsigned int MCHK_Frame_Rev;
+ unsigned long I_STAT; /* EV6 Internal Processor Registers */
+ unsigned long DC_STAT; /* (See the 21264 Spec) */
+ unsigned long C_ADDR;
+ unsigned long DC1_SYNDROME;
+ unsigned long DC0_SYNDROME;
+ unsigned long C_STAT;
+ unsigned long C_STS;
+ unsigned long MM_STAT;
+ unsigned long EXC_ADDR;
+ unsigned long IER_CM;
+ unsigned long ISUM;
+ unsigned long RESERVED0;
+ unsigned long PAL_BASE;
+ unsigned long I_CTL;
+ unsigned long PCTX;
+};
+
+
+#endif /* __ALPHA_MCE_H */
diff --git a/arch/alpha/include/asm/mmu_context.h b/arch/alpha/include/asm/mmu_context.h
index 86c08a02d23..4c51c05333c 100644
--- a/arch/alpha/include/asm/mmu_context.h
+++ b/arch/alpha/include/asm/mmu_context.h
@@ -7,7 +7,6 @@
* Copyright (C) 1996, Linus Torvalds
*/
-#include <asm/system.h>
#include <asm/machvec.h>
#include <asm/compiler.h>
#include <asm-generic/mm_hooks.h>
diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h
index 445dc42e033..14ce27bccd2 100644
--- a/arch/alpha/include/asm/mmzone.h
+++ b/arch/alpha/include/asm/mmzone.h
@@ -66,13 +66,11 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
((unsigned long)__va(NODE_DATA(kvaddr_to_nid(kaddr))->node_start_pfn \
<< PAGE_SHIFT))
-/* XXX: FIXME -- wli */
+/* XXX: FIXME -- nyc */
#define kern_addr_valid(kaddr) (0)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define VALID_PAGE(page) (((page) - mem_map) < max_mapnr)
-
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32))
#define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> 32))
#define pte_pfn(pte) (pte_val(pte) >> 32)
diff --git a/arch/alpha/include/asm/module.h b/arch/alpha/include/asm/module.h
index 7b63743c534..9cd13b55155 100644
--- a/arch/alpha/include/asm/module.h
+++ b/arch/alpha/include/asm/module.h
@@ -1,19 +1,13 @@
#ifndef _ALPHA_MODULE_H
#define _ALPHA_MODULE_H
+#include <asm-generic/module.h>
+
struct mod_arch_specific
{
unsigned int gotsecindex;
};
-#define Elf_Sym Elf64_Sym
-#define Elf_Shdr Elf64_Shdr
-#define Elf_Ehdr Elf64_Ehdr
-#define Elf_Phdr Elf64_Phdr
-#define Elf_Dyn Elf64_Dyn
-#define Elf_Rel Elf64_Rel
-#define Elf_Rela Elf64_Rela
-
#define ARCH_SHF_SMALL SHF_ALPHA_GPREL
#ifdef MODULE
diff --git a/arch/alpha/include/asm/pal.h b/arch/alpha/include/asm/pal.h
index 9b4ba0d6f00..5422a47646f 100644
--- a/arch/alpha/include/asm/pal.h
+++ b/arch/alpha/include/asm/pal.h
@@ -1,51 +1,186 @@
#ifndef __ALPHA_PAL_H
#define __ALPHA_PAL_H
-/*
- * Common PAL-code
- */
-#define PAL_halt 0
-#define PAL_cflush 1
-#define PAL_draina 2
-#define PAL_bpt 128
-#define PAL_bugchk 129
-#define PAL_chmk 131
-#define PAL_callsys 131
-#define PAL_imb 134
-#define PAL_rduniq 158
-#define PAL_wruniq 159
-#define PAL_gentrap 170
-#define PAL_nphalt 190
+#include <uapi/asm/pal.h>
+
+#ifndef __ASSEMBLY__
+
+extern void halt(void) __attribute__((noreturn));
+#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
+
+#define imb() \
+__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
+
+#define draina() \
+__asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory")
+
+#define __CALL_PAL_R0(NAME, TYPE) \
+extern inline TYPE NAME(void) \
+{ \
+ register TYPE __r0 __asm__("$0"); \
+ __asm__ __volatile__( \
+ "call_pal %1 # " #NAME \
+ :"=r" (__r0) \
+ :"i" (PAL_ ## NAME) \
+ :"$1", "$16", "$22", "$23", "$24", "$25"); \
+ return __r0; \
+}
+
+#define __CALL_PAL_W1(NAME, TYPE0) \
+extern inline void NAME(TYPE0 arg0) \
+{ \
+ register TYPE0 __r16 __asm__("$16") = arg0; \
+ __asm__ __volatile__( \
+ "call_pal %1 # "#NAME \
+ : "=r"(__r16) \
+ : "i"(PAL_ ## NAME), "0"(__r16) \
+ : "$1", "$22", "$23", "$24", "$25"); \
+}
+
+#define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \
+extern inline void NAME(TYPE0 arg0, TYPE1 arg1) \
+{ \
+ register TYPE0 __r16 __asm__("$16") = arg0; \
+ register TYPE1 __r17 __asm__("$17") = arg1; \
+ __asm__ __volatile__( \
+ "call_pal %2 # "#NAME \
+ : "=r"(__r16), "=r"(__r17) \
+ : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \
+ : "$1", "$22", "$23", "$24", "$25"); \
+}
+
+#define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \
+extern inline RTYPE NAME(TYPE0 arg0) \
+{ \
+ register RTYPE __r0 __asm__("$0"); \
+ register TYPE0 __r16 __asm__("$16") = arg0; \
+ __asm__ __volatile__( \
+ "call_pal %2 # "#NAME \
+ : "=r"(__r16), "=r"(__r0) \
+ : "i"(PAL_ ## NAME), "0"(__r16) \
+ : "$1", "$22", "$23", "$24", "$25"); \
+ return __r0; \
+}
+
+#define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \
+extern inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \
+{ \
+ register RTYPE __r0 __asm__("$0"); \
+ register TYPE0 __r16 __asm__("$16") = arg0; \
+ register TYPE1 __r17 __asm__("$17") = arg1; \
+ __asm__ __volatile__( \
+ "call_pal %3 # "#NAME \
+ : "=r"(__r16), "=r"(__r17), "=r"(__r0) \
+ : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \
+ : "$1", "$22", "$23", "$24", "$25"); \
+ return __r0; \
+}
+
+__CALL_PAL_W1(cflush, unsigned long);
+__CALL_PAL_R0(rdmces, unsigned long);
+__CALL_PAL_R0(rdps, unsigned long);
+__CALL_PAL_R0(rdusp, unsigned long);
+__CALL_PAL_RW1(swpipl, unsigned long, unsigned long);
+__CALL_PAL_R0(whami, unsigned long);
+__CALL_PAL_W2(wrent, void*, unsigned long);
+__CALL_PAL_W1(wripir, unsigned long);
+__CALL_PAL_W1(wrkgp, unsigned long);
+__CALL_PAL_W1(wrmces, unsigned long);
+__CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long);
+__CALL_PAL_W1(wrusp, unsigned long);
+__CALL_PAL_W1(wrvptptr, unsigned long);
+__CALL_PAL_RW1(wtint, unsigned long, unsigned long);
/*
- * VMS specific PAL-code
+ * TB routines..
*/
-#define PAL_swppal 10
-#define PAL_mfpr_vptb 41
+#define __tbi(nr,arg,arg1...) \
+({ \
+ register unsigned long __r16 __asm__("$16") = (nr); \
+ register unsigned long __r17 __asm__("$17"); arg; \
+ __asm__ __volatile__( \
+ "call_pal %3 #__tbi" \
+ :"=r" (__r16),"=r" (__r17) \
+ :"0" (__r16),"i" (PAL_tbi) ,##arg1 \
+ :"$0", "$1", "$22", "$23", "$24", "$25"); \
+})
+
+#define tbi(x,y) __tbi(x,__r17=(y),"1" (__r17))
+#define tbisi(x) __tbi(1,__r17=(x),"1" (__r17))
+#define tbisd(x) __tbi(2,__r17=(x),"1" (__r17))
+#define tbis(x) __tbi(3,__r17=(x),"1" (__r17))
+#define tbiap() __tbi(-1, /* no second argument */)
+#define tbia() __tbi(-2, /* no second argument */)
/*
- * OSF specific PAL-code
+ * QEMU Cserv routines..
*/
-#define PAL_cserve 9
-#define PAL_wripir 13
-#define PAL_rdmces 16
-#define PAL_wrmces 17
-#define PAL_wrfen 43
-#define PAL_wrvptptr 45
-#define PAL_jtopal 46
-#define PAL_swpctx 48
-#define PAL_wrval 49
-#define PAL_rdval 50
-#define PAL_tbi 51
-#define PAL_wrent 52
-#define PAL_swpipl 53
-#define PAL_rdps 54
-#define PAL_wrkgp 55
-#define PAL_wrusp 56
-#define PAL_wrperfmon 57
-#define PAL_rdusp 58
-#define PAL_whami 60
-#define PAL_retsys 61
-#define PAL_rti 63
+static inline unsigned long
+qemu_get_walltime(void)
+{
+ register unsigned long v0 __asm__("$0");
+ register unsigned long a0 __asm__("$16") = 3;
+
+ asm("call_pal %2 # cserve get_time"
+ : "=r"(v0), "+r"(a0)
+ : "i"(PAL_cserve)
+ : "$17", "$18", "$19", "$20", "$21");
+
+ return v0;
+}
+
+static inline unsigned long
+qemu_get_alarm(void)
+{
+ register unsigned long v0 __asm__("$0");
+ register unsigned long a0 __asm__("$16") = 4;
+
+ asm("call_pal %2 # cserve get_alarm"
+ : "=r"(v0), "+r"(a0)
+ : "i"(PAL_cserve)
+ : "$17", "$18", "$19", "$20", "$21");
+
+ return v0;
+}
+
+static inline void
+qemu_set_alarm_rel(unsigned long expire)
+{
+ register unsigned long a0 __asm__("$16") = 5;
+ register unsigned long a1 __asm__("$17") = expire;
+
+ asm volatile("call_pal %2 # cserve set_alarm_rel"
+ : "+r"(a0), "+r"(a1)
+ : "i"(PAL_cserve)
+ : "$0", "$18", "$19", "$20", "$21");
+}
+
+static inline void
+qemu_set_alarm_abs(unsigned long expire)
+{
+ register unsigned long a0 __asm__("$16") = 6;
+ register unsigned long a1 __asm__("$17") = expire;
+
+ asm volatile("call_pal %2 # cserve set_alarm_abs"
+ : "+r"(a0), "+r"(a1)
+ : "i"(PAL_cserve)
+ : "$0", "$18", "$19", "$20", "$21");
+}
+
+static inline unsigned long
+qemu_get_vmtime(void)
+{
+ register unsigned long v0 __asm__("$0");
+ register unsigned long a0 __asm__("$16") = 7;
+
+ asm("call_pal %2 # cserve get_time"
+ : "=r"(v0), "+r"(a0)
+ : "i"(PAL_cserve)
+ : "$17", "$18", "$19", "$20", "$21");
+
+ return v0;
+}
+
+#endif /* !__ASSEMBLY__ */
#endif /* __ALPHA_PAL_H */
diff --git a/arch/alpha/include/asm/param.h b/arch/alpha/include/asm/param.h
index e691ecfedb2..a5b68b268bc 100644
--- a/arch/alpha/include/asm/param.h
+++ b/arch/alpha/include/asm/param.h
@@ -1,27 +1,11 @@
#ifndef _ASM_ALPHA_PARAM_H
#define _ASM_ALPHA_PARAM_H
-/* ??? Gross. I don't want to parameterize this, and supposedly the
- hardware ignores reprogramming. We also need userland buy-in to the
- change in HZ, since this is visible in the wait4 resources etc. */
+#include <uapi/asm/param.h>
-#ifdef __KERNEL__
-#define HZ CONFIG_HZ
-#define USER_HZ HZ
-#else
-#define HZ 1024
-#endif
-
-#define EXEC_PAGESIZE 8192
-
-#ifndef NOGROUP
-#define NOGROUP (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64 /* max length of hostname */
-
-#ifdef __KERNEL__
-# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */
-#endif
+# undef HZ
+# define HZ CONFIG_HZ
+# define USER_HZ 1024
+# define CLOCKS_PER_SEC USER_HZ /* frequency at which times() counts */
#endif /* _ASM_ALPHA_PARAM_H */
diff --git a/arch/alpha/include/asm/parport.h b/arch/alpha/include/asm/parport.h
index c5ee7cbb2fc..6abd0af11f1 100644
--- a/arch/alpha/include/asm/parport.h
+++ b/arch/alpha/include/asm/parport.h
@@ -9,8 +9,8 @@
#ifndef _ASM_AXP_PARPORT_H
#define _ASM_AXP_PARPORT_H 1
-static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
-static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
+static int parport_pc_find_isa_ports (int autoirq, int autodma);
+static int parport_pc_find_nonpci_ports (int autoirq, int autodma)
{
return parport_pc_find_isa_ports (autoirq, autodma);
}
diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h
index 28d0497fd3c..f7f680f7457 100644
--- a/arch/alpha/include/asm/pci.h
+++ b/arch/alpha/include/asm/pci.h
@@ -7,6 +7,7 @@
#include <linux/dma-mapping.h>
#include <asm/scatterlist.h>
#include <asm/machvec.h>
+#include <asm-generic/pci-bridge.h>
/*
* The following structure is used to manage multiple PCI busses.
@@ -58,11 +59,6 @@ struct pci_controller {
extern void pcibios_set_master(struct pci_dev *dev);
-extern inline void pcibios_penalize_isa_irq(int irq, int active)
-{
- /* We don't do dynamic PCI IRQ allocation */
-}
-
/* IOMMU controls. */
/* The PCI address space does not equal the physical memory address space.
@@ -99,12 +95,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
return channel ? 15 : 14;
}
-extern void pcibios_resource_to_bus(struct pci_dev *, struct pci_bus_region *,
- struct resource *);
-
-extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
- struct pci_bus_region *region);
-
#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
static inline int pci_proc_domain(struct pci_bus *bus)
diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h