aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile10
-rw-r--r--arch/powerpc/kernel/asm-offsets.c16
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/e500-pmu.c134
-rw-r--r--arch/powerpc/kernel/entry_64.S250
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S236
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S314
-rw-r--r--arch/powerpc/kernel/fadump.c1315
-rw-r--r--arch/powerpc/kernel/head_32.S4
-rw-r--r--arch/powerpc/kernel/head_40x.S4
-rw-r--r--arch/powerpc/kernel/head_64.S62
-rw-r--r--arch/powerpc/kernel/head_8xx.S4
-rw-r--r--arch/powerpc/kernel/head_booke.h4
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S2
-rw-r--r--arch/powerpc/kernel/idle.c6
-rw-r--r--arch/powerpc/kernel/idle_book3e.S25
-rw-r--r--arch/powerpc/kernel/idle_power4.S24
-rw-r--r--arch/powerpc/kernel/idle_power7.S23
-rw-r--r--arch/powerpc/kernel/iommu.c8
-rw-r--r--arch/powerpc/kernel/irq.c212
-rw-r--r--arch/powerpc/kernel/isa-bridge.c3
-rw-r--r--arch/powerpc/kernel/lparcfg.c108
-rw-r--r--arch/powerpc/kernel/misc.S1
-rw-r--r--arch/powerpc/kernel/mpc7450-pmu.c422
-rw-r--r--arch/powerpc/kernel/of_platform.c6
-rw-r--r--arch/powerpc/kernel/paca.c12
-rw-r--r--arch/powerpc/kernel/pci-common.c15
-rw-r--r--arch/powerpc/kernel/perf_callchain.c492
-rw-r--r--arch/powerpc/kernel/perf_event.c1448
-rw-r--r--arch/powerpc/kernel/perf_event_fsl_emb.c688
-rw-r--r--arch/powerpc/kernel/power4-pmu.c621
-rw-r--r--arch/powerpc/kernel/power5+-pmu.c690
-rw-r--r--arch/powerpc/kernel/power5-pmu.c629
-rw-r--r--arch/powerpc/kernel/power6-pmu.c552
-rw-r--r--arch/powerpc/kernel/power7-pmu.c379
-rw-r--r--arch/powerpc/kernel/ppc970-pmu.c502
-rw-r--r--arch/powerpc/kernel/process.c27
-rw-r--r--arch/powerpc/kernel/prom.c98
-rw-r--r--arch/powerpc/kernel/prom_init.c15
-rw-r--r--arch/powerpc/kernel/rtas_pci.c3
-rw-r--r--arch/powerpc/kernel/setup-common.c14
-rw-r--r--arch/powerpc/kernel/signal.c13
-rw-r--r--arch/powerpc/kernel/signal_32.c11
-rw-r--r--arch/powerpc/kernel/sysfs.c7
-rw-r--r--arch/powerpc/kernel/time.c116
-rw-r--r--arch/powerpc/kernel/traps.c6
-rw-r--r--arch/powerpc/kernel/vio.c18
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S5
49 files changed, 2041 insertions, 7535 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index ee728e433aa..f5808a35688 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -60,6 +60,7 @@ obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_IBMEBUS) += ibmebus.o
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+obj-$(CONFIG_FA_DUMP) += fadump.o
ifeq ($(CONFIG_PPC32),y)
obj-$(CONFIG_E500) += idle_e500.o
endif
@@ -113,15 +114,6 @@ obj-$(CONFIG_PPC_IO_WORKAROUNDS) += io-workarounds.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
-obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
-
-obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o
-obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
- power5+-pmu.o power6-pmu.o power7-pmu.o
-obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
-
-obj-$(CONFIG_FSL_EMB_PERF_EVENT) += perf_event_fsl_emb.o
-obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o
obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 04caee7d9bc..cc492e48ddf 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -46,9 +46,6 @@
#include <asm/hvcall.h>
#include <asm/xics.h>
#endif
-#ifdef CONFIG_PPC_ISERIES
-#include <asm/iseries/alpaca.h>
-#endif
#ifdef CONFIG_PPC_POWERNV
#include <asm/opal.h>
#endif
@@ -147,7 +144,7 @@ int main(void)
DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase));
DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
- DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
+ DEFINE(PACAIRQHAPPENED, offsetof(struct paca_struct, irq_happened));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
#ifdef CONFIG_PPC_MM_SLICES
DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
@@ -384,17 +381,6 @@ int main(void)
DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
#endif
-#ifdef CONFIG_PPC_ISERIES
- /* the assembler miscalculates the VSID values */
- DEFINE(PAGE_OFFSET_ESID, GET_ESID(PAGE_OFFSET));
- DEFINE(PAGE_OFFSET_VSID, KERNEL_VSID(PAGE_OFFSET));
- DEFINE(VMALLOC_START_ESID, GET_ESID(VMALLOC_START));
- DEFINE(VMALLOC_START_VSID, KERNEL_VSID(VMALLOC_START));
-
- /* alpaca */
- DEFINE(ALPACA_SIZE, sizeof(struct alpaca));
-#endif
-
DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
DEFINE(PTE_SIZE, sizeof(pte_t));
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 81db9e2a8a2..138ae183c44 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1816,7 +1816,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.platform = "ppc440",
},
{ /* 464 in APM821xx */
- .pvr_mask = 0xffffff00,
+ .pvr_mask = 0xfffffff0,
.pvr_value = 0x12C41C80,
.cpu_name = "APM821XX",
.cpu_features = CPU_FTRS_44X,
@@ -2019,6 +2019,24 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_e500mc,
.platform = "ppce5500",
},
+ { /* e6500 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x80400000,
+ .cpu_name = "e6500",
+ .cpu_features = CPU_FTRS_E6500,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
+ MMU_FTR_USE_TLBILX,
+ .icache_bsize = 64,
+ .dcache_bsize = 64,
+ .num_pmcs = 4,
+ .oprofile_cpu_type = "ppc/e6500",
+ .oprofile_type = PPC_OPROFILE_FSL_EMB,
+ .cpu_setup = __setup_cpu_e5500,
+ .cpu_restore = __restore_cpu_e5500,
+ .machine_check = machine_check_e500mc,
+ .platform = "ppce6500",
+ },
#ifdef CONFIG_PPC32
{ /* default match */
.pvr_mask = 0x00000000,
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index 2cc451aaaca..5b25c8060fd 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -37,6 +37,8 @@ void doorbell_exception(struct pt_regs *regs)
irq_enter();
+ may_hard_irq_enable();
+
smp_ipi_demux();
irq_exit();
diff --git a/arch/powerpc/kernel/e500-pmu.c b/arch/powerpc/kernel/e500-pmu.c
deleted file mode 100644
index cb2e2949c8d..00000000000
--- a/arch/powerpc/kernel/e500-pmu.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Performance counter support for e500 family processors.
- *
- * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
- * Copyright 2010 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/string.h>
-#include <linux/perf_event.h>
-#include <asm/reg.h>
-#include <asm/cputable.h>
-
-/*
- * Map of generic hardware event types to hardware events
- * Zero if unsupported
- */
-static int e500_generic_events[] = {
- [PERF_COUNT_HW_CPU_CYCLES] = 1,
- [PERF_COUNT_HW_INSTRUCTIONS] = 2,
- [PERF_COUNT_HW_CACHE_MISSES] = 41, /* Data L1 cache reloads */
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 12,
- [PERF_COUNT_HW_BRANCH_MISSES] = 15,
-};
-
-#define C(x) PERF_COUNT_HW_CACHE_##x
-
-/*
- * Table of generalized cache-related events.
- * 0 means not supported, -1 means nonsensical, other values
- * are event codes.
- */
-static int e500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
- /*
- * D-cache misses are not split into read/write/prefetch;
- * use raw event 41.
- */
- [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
- [C(OP_READ)] = { 27, 0 },
- [C(OP_WRITE)] = { 28, 0 },
- [C(OP_PREFETCH)] = { 29, 0 },
- },
- [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
- [C(OP_READ)] = { 2, 60 },
- [C(OP_WRITE)] = { -1, -1 },
- [C(OP_PREFETCH)] = { 0, 0 },
- },
- /*
- * Assuming LL means L2, it's not a good match for this model.
- * It allocates only on L1 castout or explicit prefetch, and
- * does not have separate read/write events (but it does have
- * separate instruction/data events).
- */
- [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
- [C(OP_READ)] = { 0, 0 },
- [C(OP_WRITE)] = { 0, 0 },
- [C(OP_PREFETCH)] = { 0, 0 },
- },
- /*
- * There are data/instruction MMU misses, but that's a miss on
- * the chip's internal level-one TLB which is probably not
- * what the user wants. Instead, unified level-two TLB misses
- * are reported here.
- */
- [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
- [C(OP_READ)] = { 26, 66 },
- [C(OP_WRITE)] = { -1, -1 },
- [C(OP_PREFETCH)] = { -1, -1 },
- },
- [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
- [C(OP_READ)] = { 12, 15 },
- [C(OP_WRITE)] = { -1, -1 },
- [C(OP_PREFETCH)] = { -1, -1 },
- },
- [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
- [C(OP_READ)] = { -1, -1 },
- [C(OP_WRITE)] = { -1, -1 },
- [C(OP_PREFETCH)] = { -1, -1 },
- },
-};
-
-static int num_events = 128;
-
-/* Upper half of event id is PMLCb, for threshold events */
-static u64 e500_xlate_event(u64 event_id)
-{
- u32 event_low = (u32)event_id;
- u64 ret;
-
- if (event_low >= num_events)
- return 0;
-
- ret = FSL_EMB_EVENT_VALID;
-
- if (event_low >= 76 && event_low <= 81) {
- ret |= FSL_EMB_EVENT_RESTRICTED;
- ret |= event_id &
- (FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH);
- } else if (event_id &
- (FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH)) {
- /* Threshold requested on non-threshold event */
- return 0;
- }
-
- return ret;
-}
-
-static struct fsl_emb_pmu e500_pmu = {
- .name = "e500 family",
- .n_counter = 4,
- .n_restricted = 2,
- .xlate_event = e500_xlate_event,
- .n_generic = ARRAY_SIZE(e500_generic_events),
- .generic_events = e500_generic_events,
- .cache_events = &e500_cache_events,
-};
-
-static int init_e500_pmu(void)
-{
- if (!cur_cpu_spec->oprofile_cpu_type)
- return -ENODEV;
-
- if (!strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e500mc"))
- num_events = 256;
- else if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e500"))
- return -ENODEV;
-
- return register_fsl_emb_pmu(&e500_pmu);
-}
-
-early_initcall(init_e500_pmu);
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 866462cbe2d..f8a7a1a1a9f 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -32,6 +32,7 @@
#include <asm/ptrace.h>
#include <asm/irqflags.h>
#include <asm/ftrace.h>
+#include <asm/hw_irq.h>
/*
* System calls.
@@ -115,39 +116,33 @@ BEGIN_FW_FTR_SECTION
END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl .trace_hardirqs_on
- REST_GPR(0,r1)
- REST_4GPRS(3,r1)
- REST_2GPRS(7,r1)
- addi r9,r1,STACK_FRAME_OVERHEAD
- ld r12,_MSR(r1)
-#endif /* CONFIG_TRACE_IRQFLAGS */
- li r10,1
- stb r10,PACASOFTIRQEN(r13)
- stb r10,PACAHARDIRQEN(r13)
- std r10,SOFTE(r1)
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
- /* Hack for handling interrupts when soft-enabling on iSeries */
- cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
- andi. r10,r12,MSR_PR /* from kernel */
- crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
- bne 2f
- b hardware_interrupt_entry
-2:
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif /* CONFIG_PPC_ISERIES */
+ /*
+ * A syscall should always be called with interrupts enabled
+ * so we just unconditionally hard-enable here. When some kind
+ * of irq tracing is used, we additionally check that condition
+ * is correct
+ */
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
+ lbz r10,PACASOFTIRQEN(r13)
+ xori r10,r10,1
+1: tdnei r10,0
+ EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
+#endif
- /* Hard enable interrupts */
#ifdef CONFIG_PPC_BOOK3E
wrteei 1
#else
- mfmsr r11
+ ld r11,PACAKMSR(r13)
ori r11,r11,MSR_EE
mtmsrd r11,1
#endif /* CONFIG_PPC_BOOK3E */
+ /* We do need to set SOFTE in the stack frame or the return
+ * from interrupt will be painful
+ */
+ li r10,1
+ std r10,SOFTE(r1)
+
#ifdef SHOW_SYSCALLS
bl .do_show_syscall
REST_GPR(0,r1)
@@ -198,16 +193,14 @@ syscall_exit:
andi. r10,r8,MSR_RI
beq- unrecov_restore
#endif
-
- /* Disable interrupts so current_thread_info()->flags can't change,
+ /*
+ * Disable interrupts so current_thread_info()->flags can't change,
* and so that we don't get interrupted after loading SRR0/1.
*/
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
- mfmsr r10
- rldicl r10,r10,48,1
- rotldi r10,r10,16
+ ld r10,PACAKMSR(r13)
mtmsrd r10,1
#endif /* CONFIG_PPC_BOOK3E */
@@ -319,7 +312,7 @@ syscall_exit_work:
#ifdef CONFIG_PPC_BOOK3E
wrteei 1
#else
- mfmsr r10
+ ld r10,PACAKMSR(r13)
ori r10,r10,MSR_EE
mtmsrd r10,1
#endif /* CONFIG_PPC_BOOK3E */
@@ -565,10 +558,8 @@ _GLOBAL(ret_from_except_lite)
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
- mfmsr r10 /* Get current interrupt state */
- rldicl r9,r10,48,1 /* clear MSR_EE */
- rotldi r9,r9,16
- mtmsrd r9,1 /* Update machine state */
+ ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
+ mtmsrd r10,1 /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
#ifdef CONFIG_PREEMPT
@@ -591,25 +582,74 @@ _GLOBAL(ret_from_except_lite)
ld r4,TI_FLAGS(r9)
andi. r0,r4,_TIF_USER_WORK_MASK
bne do_work
-#endif
+#endif /* !CONFIG_PREEMPT */
+ .globl fast_exc_return_irq
+fast_exc_return_irq:
restore:
-BEGIN_FW_FTR_SECTION
+ /*
+ * This is the main kernel exit path, we first check if we
+ * have to change our interrupt state.
+ */
ld r5,SOFTE(r1)
-FW_FTR_SECTION_ELSE
- b .Liseries_check_pending_irqs
-ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
-2:
- TRACE_AND_RESTORE_IRQ(r5);
+ lbz r6,PACASOFTIRQEN(r13)
+ cmpwi cr1,r5,0
+ cmpw cr0,r5,r6
+ beq cr0,4f
+
+ /* We do, handle disable first, which is easy */
+ bne cr1,3f;
+ li r0,0
+ stb r0,PACASOFTIRQEN(r13);
+ TRACE_DISABLE_INTS
+ b 4f
- /* extract EE bit and use it to restore paca->hard_enabled */
- ld r3,_MSR(r1)
- rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
- stb r4,PACAHARDIRQEN(r13)
+3: /*
+ * We are about to soft-enable interrupts (we are hard disabled
+ * at this point). We check if there's anything that needs to
+ * be replayed first.
+ */
+ lbz r0,PACAIRQHAPPENED(r13)
+ cmpwi cr0,r0,0
+ bne- restore_check_irq_replay
+ /*
+ * Get here when nothing happened while soft-disabled, just
+ * soft-enable and move-on. We will hard-enable as a side
+ * effect of rfi
+ */
+restore_no_replay:
+ TRACE_ENABLE_INTS
+ li r0,1
+ stb r0,PACASOFTIRQEN(r13);
+
+ /*
+ * Final return path. BookE is handled in a different file
+ */
+4:
#ifdef CONFIG_PPC_BOOK3E
b .exception_return_book3e
#else
+ /*
+ * Clear the reservation. If we know the CPU tracks the address of
+ * the reservation then we can potentially save some cycles and use
+ * a larx. On POWER6 and POWER7 this is significantly faster.
+ */
+BEGIN_FTR_SECTION
+ stdcx. r0,0,r1 /* to clear the reservation */
+FTR_SECTION_ELSE
+ ldarx r4,0,r1
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+
+ /*
+ * Some code path such as load_up_fpu or altivec return directly
+ * here. They run entirely hard disabled and do not alter the
+ * interrupt state. They also don't use lwarx/stwcx. and thus
+ * are known not to leave dangling reservations.
+ */
+ .globl fast_exception_return
+fast_exception_return:
+ ld r3,_MSR(r1)
ld r4,_CTR(r1)
ld r0,_LINK(r1)
mtctr r4
@@ -623,28 +663,18 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
beq- unrecov_restore
/*
- * Clear the reservation. If we know the CPU tracks the address of
- * the reservation then we can potentially save some cycles and use
- * a larx. On POWER6 and POWER7 this is significantly faster.
- */
-BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
-FTR_SECTION_ELSE
- ldarx r4,0,r1
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
-
- /*
* Clear RI before restoring r13. If we are returning to
* userspace and we take an exception after restoring r13,
* we end up corrupting the userspace r13 value.
*/
- mfmsr r4
- andc r4,r4,r0 /* r0 contains MSR_RI here */
+ ld r4,PACAKMSR(r13) /* Get kernel MSR without EE */
+ andc r4,r4,r0 /* r0 contains MSR_RI here */
mtmsrd r4,1
/*
* r13 is our per cpu area, only restore it if we are returning to
- * userspace
+ * userspace the value stored in the stack frame may belong to
+ * another CPU.
*/
andi. r0,r3,MSR_PR
beq 1f
@@ -669,30 +699,55 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
#endif /* CONFIG_PPC_BOOK3E */
-.Liseries_check_pending_irqs:
-#ifdef CONFIG_PPC_ISERIES
- ld r5,SOFTE(r1)
- cmpdi 0,r5,0
- beq 2b
- /* Check for pending interrupts (iSeries) */
- ld r3,PACALPPACAPTR(r13)
- ld r3,LPPACAANYINT(r3)
- cmpdi r3,0
- beq+ 2b /* skip do_IRQ if no interrupts */
-
- li r3,0
- stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl .trace_hardirqs_off
- mfmsr r10
-#endif
- ori r10,r10,MSR_EE
- mtmsrd r10 /* hard-enable again */
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .do_IRQ
- b .ret_from_except_lite /* loop back and handle more */
-#endif
+ /*
+ * Something did happen, check if a re-emit is needed
+ * (this also clears paca->irq_happened)
+ */
+restore_check_irq_replay:
+ /* XXX: We could implement a fast path here where we check
+ * for irq_happened being just 0x01, in which case we can
+ * clear it and return. That means that we would potentially
+ * miss a decrementer having wrapped all the way around.
+ *
+ * Still, this might be useful for things like hash_page
+ */
+ bl .__check_irq_replay
+ cmpwi cr0,r3,0
+ beq restore_no_replay
+
+ /*
+ * We need to re-emit an interrupt. We do so by re-using our
+ * existing exception frame. We first change the trap value,
+ * but we need to ensure we preserve the low nibble of it
+ */
+ ld r4,_TRAP(r1)
+ clrldi r4,r4,60
+ or r4,r4,r3
+ std r4,_TRAP(r1)
+ /*
+ * Then find the right handler and call it. Interrupts are
+ * still soft-disabled and we keep them that way.
+ */
+ cmpwi cr0,r3,0x500
+ bne 1f
+ addi r3,r1,STACK_FRAME_OVERHEAD;
+ bl .do_IRQ
+ b .ret_from_except
+1: cmpwi cr0,r3,0x900
+ bne 1f
+ addi r3,r1,STACK_FRAME_OVERHEAD;
+ bl .timer_interrupt
+ b .ret_from_except
+#ifdef CONFIG_PPC_BOOK3E
+1: cmpwi cr0,r3,0x280
+ bne 1f
+ addi r3,r1,STACK_FRAME_OVERHEAD;
+ bl .doorbell_exception
+ b .ret_from_except
+#endif /* CONFIG_PPC_BOOK3E */
+1: b .ret_from_except /* What else to do here ? */
+
do_work:
#ifdef CONFIG_PREEMPT
andi. r0,r3,MSR_PR /* Returning to user mode? */
@@ -705,31 +760,22 @@ do_work:
crandc eq,cr1*4+eq,eq
bne restore
- /* Here we are preempting the current task.
- *
- * Ensure interrupts are soft-disabled. We also properly mark
- * the PACA to reflect the fact that they are hard-disabled
- * and trace the change
+ /*
+ * Here we are preempting the current task. We want to make
+ * sure we are soft-disabled first
*/
- li r0,0
- stb r0,PACASOFTIRQEN(r13)
- stb r0,PACAHARDIRQEN(r13)
- TRACE_DISABLE_INTS
-
- /* Call the scheduler with soft IRQs off */
+ SOFT_DISABLE_INTS(r3,r4)
1: bl .preempt_schedule_irq
/* Hard-disable interrupts again (and update PACA) */
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
- mfmsr r10
- rldicl r10,r10,48,1
- rotldi r10,r10,16
+ ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
mtmsrd r10,1
#endif /* CONFIG_PPC_BOOK3E */
- li r0,0
- stb r0,PACAHARDIRQEN(r13)
+ li r0,PACA_IRQ_HARD_DIS
+ stb r0,PACAIRQHAPPENED(r13)
/* Re-test flags and eventually loop */
clrrdi r9,r1,THREAD_SHIFT
@@ -751,14 +797,12 @@ user_work:
andi. r0,r4,_TIF_NEED_RESCHED
beq 1f
- li r5,1
- TRACE_AND_RESTORE_IRQ(r5);
+ bl .restore_interrupts
bl .schedule
b .ret_from_except_lite
1: bl .save_nvgprs
- li r5,1
- TRACE_AND_RESTORE_IRQ(r5);
+ bl .restore_interrupts
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_notify_resume
b .ret_from_except
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 429983c06f9..7215cc2495d 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -24,6 +24,7 @@
#include <asm/ptrace.h>
#include <asm/ppc-opcode.h>
#include <asm/mmu.h>
+#include <asm/hw_irq.h>
/* XXX This will ultimately add space for a special exception save
* structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
@@ -77,59 +78,55 @@
#define SPRN_MC_SRR1 SPRN_MCSRR1
#define NORMAL_EXCEPTION_PROLOG(n, addition) \
- EXCEPTION_PROLOG(n, GEN, addition##_GEN)
+ EXCEPTION_PROLOG(n, GEN, addition##_GEN(n))
#define CRIT_EXCEPTION_PROLOG(n, addition) \
- EXCEPTION_PROLOG(n, CRIT, addition##_CRIT)
+ EXCEPTION_PROLOG(n, CRIT, addition##_CRIT(n))
#define DBG_EXCEPTION_PROLOG(n, addition) \
- EXCEPTION_PROLOG(n, DBG, addition##_DBG)
+ EXCEPTION_PROLOG(n, DBG, addition##_DBG(n))
#define MC_EXCEPTION_PROLOG(n, addition) \
- EXCEPTION_PROLOG(n, MC, addition##_MC)
+ EXCEPTION_PROLOG(n, MC, addition##_MC(n))
/* Variants of the "addition" argument for the prolog
*/
-#define PROLOG_ADDITION_NONE_GEN
-#define PROLOG_ADDITION_NONE_CRIT
-#define PROLOG_ADDITION_NONE_DBG
-#define PROLOG_ADDITION_NONE_MC
+#define PROLOG_ADDITION_NONE_GEN(n)
+#define PROLOG_ADDITION_NONE_CRIT(n)
+#define PROLOG_ADDITION_NONE_DBG(n)
+#define PROLOG_ADDITION_NONE_MC(n)
-#define PROLOG_ADDITION_MASKABLE_GEN \
+#define PROLOG_ADDITION_MASKABLE_GEN(n) \
lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \
cmpwi cr0,r11,0; /* yes -> go out of line */ \
- beq masked_interrupt_book3e;
+ beq masked_interrupt_book3e_##n
-#define PROLOG_ADDITION_2REGS_GEN \
+#define PROLOG_ADDITION_2REGS_GEN(n) \
std r14,PACA_EXGEN+EX_R14(r13); \
std r15,PACA_EXGEN+EX_R15(r13)
-#define PROLOG_ADDITION_1REG_GEN \
+#define PROLOG_ADDITION_1REG_GEN(n) \
std r14,PACA_EXGEN+EX_R14(r13);
-#define PROLOG_ADDITION_2REGS_CRIT \
+#define PROLOG_ADDITION_2REGS_CRIT(n) \
std r14,PACA_EXCRIT+EX_R14(r13); \
std r15,PACA_EXCRIT+EX_R15(r13)
-#define PROLOG_ADDITION_2REGS_DBG \
+#define PROLOG_ADDITION_2REGS_DBG(n) \
std r14,PACA_EXDBG+EX_R14(r13); \
std r15,PACA_EXDBG+EX_R15(r13)
-#define PROLOG_ADDITION_2REGS_MC \
+#define PROLOG_ADDITION_2REGS_MC(n) \
std r14,PACA_EXMC+EX_R14(r13); \
std r15,PACA_EXMC+EX_R15(r13)
-#define PROLOG_ADDITION_DOORBELL_GEN \
- lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \
- cmpwi cr0,r11,0; /* yes -> go out of line */ \
- beq masked_doorbell_book3e
-
/* Core exception code for all exceptions except TLB misses.
* XXX: Needs to make SPRN_SPRG_GEN depend on exception type
*/
#define EXCEPTION_COMMON(n, excf, ints) \
+exc_##n##_common: \
std r0,GPR0(r1); /* save r0 in stackframe */ \
std r2,GPR2(r1); /* save r2 in stackframe */ \
SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
@@ -167,20 +164,21 @@
std r0,RESULT(r1); /* clear regs->result */ \
ints;
-/* Variants for the "ints" argument */
+/* Variants for the "ints" argument. This one does nothing when we want
+ * to keep interrupts in their original state
+ */
#define INTS_KEEP
-#define INTS_DISABLE_SOFT \
- stb r0,PACASOFTIRQEN(r13); /* mark interrupts soft-disabled */ \
- TRACE_DISABLE_INTS;
-#define INTS_DISABLE_HARD \
- stb r0,PACAHARDIRQEN(r13); /* and hard disabled */
-#define INTS_DISABLE_ALL \
- INTS_DISABLE_SOFT \
- INTS_DISABLE_HARD
-
-/* This is called by exceptions that used INTS_KEEP (that is did not clear