aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/exception-64s.h113
-rw-r--r--arch/powerpc/include/asm/hw_irq.h63
-rw-r--r--arch/powerpc/include/asm/irqflags.h37
-rw-r--r--arch/powerpc/include/asm/paca.h2
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h2
-rw-r--r--arch/powerpc/include/asm/reg.h22
-rw-r--r--arch/powerpc/include/asm/system.h38
-rw-r--r--arch/powerpc/include/asm/thread_info.h9
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/entry_64.S250
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S236
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S314
-rw-r--r--arch/powerpc/kernel/head_32.S4
-rw-r--r--arch/powerpc/kernel/head_40x.S4
-rw-r--r--arch/powerpc/kernel/head_64.S62
-rw-r--r--arch/powerpc/kernel/head_8xx.S4
-rw-r--r--arch/powerpc/kernel/head_booke.h4
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S2
-rw-r--r--arch/powerpc/kernel/idle.c6
-rw-r--r--arch/powerpc/kernel/idle_book3e.S25
-rw-r--r--arch/powerpc/kernel/idle_power4.S24
-rw-r--r--arch/powerpc/kernel/idle_power7.S23
-rw-r--r--arch/powerpc/kernel/irq.c204
-rw-r--r--arch/powerpc/kernel/misc.S1
-rw-r--r--arch/powerpc/kernel/process.c27
-rw-r--r--arch/powerpc/kernel/time.c8
-rw-r--r--arch/powerpc/kernel/traps.c3
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S5
-rw-r--r--arch/powerpc/mm/fault.c181
-rw-r--r--arch/powerpc/mm/slb_low.S16
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c18
-rw-r--r--arch/powerpc/xmon/xmon.c7
33 files changed, 972 insertions, 746 deletions
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 8057f4f6980..548da3aa0a3 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -232,23 +232,30 @@ label##_hv: \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
EXC_HV, KVMTEST, vec)
-#define __SOFTEN_TEST(h) \
+/* This associate vector numbers with bits in paca->irq_happened */
+#define SOFTEN_VALUE_0x500 PACA_IRQ_EE
+#define SOFTEN_VALUE_0x502 PACA_IRQ_EE
+#define SOFTEN_VALUE_0x900 PACA_IRQ_DEC
+#define SOFTEN_VALUE_0x982 PACA_IRQ_DEC
+
+#define __SOFTEN_TEST(h, vec) \
lbz r10,PACASOFTIRQEN(r13); \
cmpwi r10,0; \
+ li r10,SOFTEN_VALUE_##vec; \
beq masked_##h##interrupt
-#define _SOFTEN_TEST(h) __SOFTEN_TEST(h)
+#define _SOFTEN_TEST(h, vec) __SOFTEN_TEST(h, vec)
#define SOFTEN_TEST_PR(vec) \
KVMTEST_PR(vec); \
- _SOFTEN_TEST(EXC_STD)
+ _SOFTEN_TEST(EXC_STD, vec)
#define SOFTEN_TEST_HV(vec) \
KVMTEST(vec); \
- _SOFTEN_TEST(EXC_HV)
+ _SOFTEN_TEST(EXC_HV, vec)
#define SOFTEN_TEST_HV_201(vec) \
KVMTEST(vec); \
- _SOFTEN_TEST(EXC_STD)
+ _SOFTEN_TEST(EXC_STD, vec)
#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
HMT_MEDIUM; \
@@ -272,73 +279,55 @@ label##_hv: \
_MASKABLE_EXCEPTION_PSERIES(vec, label, \
EXC_HV, SOFTEN_TEST_HV)
-#ifdef CONFIG_PPC_ISERIES
-#define DISABLE_INTS \
- li r11,0; \
- stb r11,PACASOFTIRQEN(r13); \
-BEGIN_FW_FTR_SECTION; \
- stb r11,PACAHARDIRQEN(r13); \
-END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \
- TRACE_DISABLE_INTS; \
-BEGIN_FW_FTR_SECTION; \
- mfmsr r10; \
- ori r10,r10,MSR_EE; \
- mtmsrd r10,1; \
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#else
-#define DISABLE_INTS \
- li r11,0; \
- stb r11,PACASOFTIRQEN(r13); \
- stb r11,PACAHARDIRQEN(r13); \
- TRACE_DISABLE_INTS
-#endif /* CONFIG_PPC_ISERIES */
+/*
+ * Our exception common code can be passed various "additions"
+ * to specify the behaviour of interrupts, whether to kick the
+ * runlatch, etc...
+ */
+
+/* Exception addition: Hard disable interrupts */
+#define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11)
+/* Exception addition: Keep interrupt state */
#define ENABLE_INTS \
+ ld r11,PACAKMSR(r13); \
ld r12,_MSR(r1); \
- mfmsr r11; \
rlwimi r11,r12,0,MSR_EE; \
mtmsrd r11,1
-#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
- .align 7; \
- .globl label##_common; \
-label##_common: \
- EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
- DISABLE_INTS; \
- bl .save_nvgprs; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- bl hdlr; \
- b .ret_from_except
+#define ADD_NVGPRS \
+ bl .save_nvgprs
+
+#define RUNLATCH_ON \
+BEGIN_FTR_SECTION \
+ clrrdi r3,r1,THREAD_SHIFT; \
+ ld r4,TI_LOCAL_FLAGS(r3); \
+ andi. r0,r4,_TLF_RUNLATCH; \
+ beql ppc64_runlatch_on_trampoline; \
+END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
+
+#define EXCEPTION_COMMON(trap, label, hdlr, ret, additions) \
+ .align 7; \
+ .globl label##_common; \
+label##_common: \
+ EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
+ additions; \
+ addi r3,r1,STACK_FRAME_OVERHEAD; \
+ bl hdlr; \
+ b ret
+
+#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
+ EXCEPTION_COMMON(trap, label, hdlr, ret_from_except, \
+ ADD_NVGPRS;DISABLE_INTS)
/*
* Like STD_EXCEPTION_COMMON, but for exceptions that can occur
- * in the idle task and therefore need the special idle handling.
+ * in the idle task and therefore need the special idle handling
+ * (finish nap and runlatch)
*/
-#define STD_EXCEPTION_COMMON_IDLE(trap, label, hdlr) \
- .align 7; \
- .globl label##_common; \
-label##_common: \
- EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
- FINISH_NAP; \
- DISABLE_INTS; \
- bl .save_nvgprs; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- bl hdlr; \
- b .ret_from_except
-
-#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
- .align 7; \
- .globl label##_common; \
-label##_common: \
- EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
- FINISH_NAP; \
- DISABLE_INTS; \
-BEGIN_FTR_SECTION \
- bl .ppc64_runlatch_on; \
-END_FTR_SECTION_IFSET(CPU_FTR_CTRL) \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- bl hdlr; \
- b .ret_from_except_lite
+#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \
+ EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \
+ FINISH_NAP;RUNLATCH_ON;DISABLE_INTS)
/*
* When the idle code in power4_idle puts the CPU into NAP mode,
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index bb712c9488b..51010bfc792 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -11,6 +11,27 @@
#include <asm/ptrace.h>
#include <asm/processor.h>
+#ifdef CONFIG_PPC64
+
+/*
+ * PACA flags in paca->irq_happened.
+ *
+ * This bits are set when interrupts occur while soft-disabled
+ * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
+ * is set whenever we manually hard disable.
+ */
+#define PACA_IRQ_HARD_DIS 0x01
+#define PACA_IRQ_DBELL 0x02
+#define PACA_IRQ_EE 0x04
+#define PACA_IRQ_DEC 0x08 /* Or FIT */
+#define PACA_IRQ_EE_EDGE 0x10 /* BookE only */
+
+#endif /* CONFIG_PPC64 */
+
+#ifndef __ASSEMBLY__
+
+extern void __replay_interrupt(unsigned int vector);
+
extern void timer_interrupt(struct pt_regs *);
#ifdef CONFIG_PPC64
@@ -42,7 +63,6 @@ static inline unsigned long arch_local_irq_disable(void)
}
extern void arch_local_irq_restore(unsigned long);
-extern void iseries_handle_interrupts(void);
static inline void arch_local_irq_enable(void)
{
@@ -68,16 +88,33 @@ static inline bool arch_irqs_disabled(void)
#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory");
#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory");
#else
-#define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1)
-#define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1)
+#define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
+#define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
#endif
-#define hard_irq_disable() \
- do { \
- __hard_irq_disable(); \
- get_paca()->soft_enabled = 0; \
- get_paca()->hard_enabled = 0; \
- } while(0)
+static inline void hard_irq_disable(void)
+{
+ __hard_irq_disable();
+ get_paca()->soft_enabled = 0;
+ get_paca()->irq_happened |= PACA_IRQ_HARD_DIS;
+}
+
+/*
+ * This is called by asynchronous interrupts to conditionally
+ * re-enable hard interrupts when soft-disabled after having
+ * cleared the source of the interrupt
+ */
+static inline void may_hard_irq_enable(void)
+{
+ get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
+ if (!(get_paca()->irq_happened & PACA_IRQ_EE))
+ __hard_irq_enable();
+}
+
+static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
+{
+ return !regs->softe;
+}
#else /* CONFIG_PPC64 */
@@ -139,6 +176,13 @@ static inline bool arch_irqs_disabled(void)
#define hard_irq_disable() arch_local_irq_disable()
+static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
+{
+ return !(regs->msr & MSR_EE);
+}
+
+static inline void may_hard_irq_enable(void) { }
+
#endif /* CONFIG_PPC64 */
#define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
@@ -149,5 +193,6 @@ static inline bool arch_irqs_disabled(void)
*/
struct irq_chip;
+#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_HW_IRQ_H */
diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h
index b0b06d85788..6f9b6e23dc5 100644
--- a/arch/powerpc/include/asm/irqflags.h
+++ b/arch/powerpc/include/asm/irqflags.h
@@ -39,24 +39,31 @@
#define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on)
#define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off)
-#define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip) \
- cmpdi en,0; \
- bne 95f; \
- stb en,PACASOFTIRQEN(r13); \
- TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off) \
- b skip; \
-95: TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on) \
- li en,1;
-#define TRACE_AND_RESTORE_IRQ(en) \
- TRACE_AND_RESTORE_IRQ_PARTIAL(en,96f); \
- stb en,PACASOFTIRQEN(r13); \
-96:
+/*
+ * This is used by assembly code to soft-disable interrupts
+ */
+#define SOFT_DISABLE_INTS(__rA, __rB) \
+ lbz __rA,PACASOFTIRQEN(r13); \
+ lbz __rB,PACAIRQHAPPENED(r13); \
+ cmpwi cr0,__rA,0; \
+ li __rA,0; \
+ ori __rB,__rB,PACA_IRQ_HARD_DIS; \
+ stb __rB,PACAIRQHAPPENED(r13); \
+ beq 44f; \
+ stb __rA,PACASOFTIRQEN(r13); \
+ TRACE_DISABLE_INTS; \
+44:
+
#else
#define TRACE_ENABLE_INTS
#define TRACE_DISABLE_INTS
-#define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip)
-#define TRACE_AND_RESTORE_IRQ(en) \
- stb en,PACASOFTIRQEN(r13)
+
+#define SOFT_DISABLE_INTS(__rA, __rB) \
+ lbz __rA,PACAIRQHAPPENED(r13); \
+ li __rB,0; \
+ ori __rA,__rA,PACA_IRQ_HARD_DIS; \
+ stb __rB,PACASOFTIRQEN(r13); \
+ stb __rA,PACAIRQHAPPENED(r13)
#endif
#endif
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 269c05a36d9..daf813fea91 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -132,7 +132,7 @@ struct paca_struct {
u64 saved_msr; /* MSR saved here by enter_rtas */
u16 trap_save; /* Used when bad stack is encountered */
u8 soft_enabled; /* irq soft-enable flag */
- u8 hard_enabled; /* set if irqs are enabled in MSR */
+ u8 irq_happened; /* irq happened while soft-disabled */
u8 io_sync; /* writel() needs spin_unlock sync */
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
u8 nap_state_lost; /* NV GPR values lost in power7_idle */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 368f72f7980..50f73aa2ba2 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -60,6 +60,8 @@ BEGIN_FW_FTR_SECTION; \
cmpd cr1,r11,r10; \
beq+ cr1,33f; \
bl .accumulate_stolen_time; \
+ ld r12,_MSR(r1); \
+ andi. r10,r12,MSR_PR; /* Restore cr0 (coming from user) */ \
33: \
END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 7fdc2c0b7fa..b1a215eabef 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1079,30 +1079,12 @@
#define proc_trap() asm volatile("trap")
-#ifdef CONFIG_PPC64
-
-extern void ppc64_runlatch_on(void);
-extern void __ppc64_runlatch_off(void);
-
-#define ppc64_runlatch_off() \
- do { \
- if (cpu_has_feature(CPU_FTR_CTRL) && \
- test_thread_flag(TIF_RUNLATCH)) \
- __ppc64_runlatch_off(); \
- } while (0)
+#define __get_SP() ({unsigned long sp; \
+ asm volatile("mr %0,1": "=r" (sp)); sp;})
extern unsigned long scom970_read(unsigned int address);
extern void scom970_write(unsigned int address, unsigned long value);
-#else
-#define ppc64_runlatch_on()
-#define ppc64_runlatch_off()
-
-#endif /* CONFIG_PPC64 */
-
-#define __get_SP() ({unsigned long sp; \
- asm volatile("mr %0,1": "=r" (sp)); sp;})
-
struct pt_regs;
extern void ppc_save_regs(struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index c377457d1b8..a02883d5af4 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -550,5 +550,43 @@ extern void reloc_got2(unsigned long);
extern struct dentry *powerpc_debugfs_root;
+#ifdef CONFIG_PPC64
+
+extern void __ppc64_runlatch_on(void);
+extern void __ppc64_runlatch_off(void);
+
+/*
+ * We manually hard enable-disable, this is called
+ * in the idle loop and we don't want to mess up
+ * with soft-disable/enable & interrupt replay.
+ */
+#define ppc64_runlatch_off() \
+ do { \
+ if (cpu_has_feature(CPU_FTR_CTRL) && \
+ test_thread_local_flags(_TLF_RUNLATCH)) { \
+ unsigned long msr = mfmsr(); \
+ __hard_irq_disable(); \
+ __ppc64_runlatch_off(); \
+ if (msr & MSR_EE) \
+ __hard_irq_enable(); \
+ } \
+ } while (0)
+
+#define ppc64_runlatch_on() \
+ do { \
+ if (cpu_has_feature(CPU_FTR_CTRL) && \
+ !test_thread_local_flags(_TLF_RUNLATCH)) { \
+ unsigned long msr = mfmsr(); \
+ __hard_irq_disable(); \
+ __ppc64_runlatch_on(); \
+ if (msr & MSR_EE) \
+ __hard_irq_enable(); \
+ } \
+ } while (0)
+#else
+#define ppc64_runlatch_on()
+#define ppc64_runlatch_off()
+#endif /* CONFIG_PPC64 */
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SYSTEM_H */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 96471494096..4a741c7efd0 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -110,7 +110,6 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NOERROR 12 /* Force successful syscall return */
#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
-#define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -141,11 +140,13 @@ static inline struct thread_info *current_thread_info(void)
#define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */
#define TLF_RESTORE_SIGMASK 2 /* Restore signal mask in do_signal */
#define TLF_LAZY_MMU 3 /* tlb_batch is active */
+#define TLF_RUNLATCH 4 /* Is the runlatch enabled? */
#define _TLF_NAPPING (1 << TLF_NAPPING)
#define _TLF_SLEEPING (1 << TLF_SLEEPING)
#define _TLF_RESTORE_SIGMASK (1 << TLF_RESTORE_SIGMASK)
#define _TLF_LAZY_MMU (1 << TLF_LAZY_MMU)
+#define _TLF_RUNLATCH (1 << TLF_RUNLATCH)
#ifndef __ASSEMBLY__
#define HAVE_SET_RESTORE_SIGMASK 1
@@ -156,6 +157,12 @@ static inline void set_restore_sigmask(void)
set_bit(TIF_SIGPENDING, &ti->flags);
}
+static inline bool test_thread_local_flags(unsigned int flags)
+{
+ struct thread_info *ti = current_thread_info();
+ return (ti->local_flags & flags) != 0;
+}
+
#ifdef CONFIG_PPC64
#define is_32bit_task() (test_thread_flag(TIF_32BIT))
#else
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 04caee7d9bc..cdd0d264415 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -147,7 +147,7 @@ int main(void)
DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase));
DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
- DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
+ DEFINE(PACAIRQHAPPENED, offsetof(struct paca_struct, irq_happened));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
#ifdef CONFIG_PPC_MM_SLICES
DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index 2cc451aaaca..5b25c8060fd 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -37,6 +37,8 @@ void doorbell_exception(struct pt_regs *regs)
irq_enter();
+ may_hard_irq_enable();
+
smp_ipi_demux();
irq_exit();
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 866462cbe2d..f8a7a1a1a9f 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -32,6 +32,7 @@
#include <asm/ptrace.h>
#include <asm/irqflags.h>
#include <asm/ftrace.h>
+#include <asm/hw_irq.h>
/*
* System calls.
@@ -115,39 +116,33 @@ BEGIN_FW_FTR_SECTION
END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl .trace_hardirqs_on
- REST_GPR(0,r1)
- REST_4GPRS(3,r1)
- REST_2GPRS(7,r1)
- addi r9,r1,STACK_FRAME_OVERHEAD
- ld r12,_MSR(r1)
-#endif /* CONFIG_TRACE_IRQFLAGS */
- li r10,1
- stb r10,PACASOFTIRQEN(r13)
- stb r10,PACAHARDIRQEN(r13)
- std r10,SOFTE(r1)
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
- /* Hack for handling interrupts when soft-enabling on iSeries */
- cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
- andi. r10,r12,MSR_PR /* from kernel */
- crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
- bne 2f
- b hardware_interrupt_entry
-2:
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif /* CONFIG_PPC_ISERIES */
+ /*
+ * A syscall should always be called with interrupts enabled
+ * so we just unconditionally hard-enable here. When some kind
+ * of irq tracing is used, we additionally check that condition
+ * is correct
+ */
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
+ lbz r10,PACASOFTIRQEN(r13)
+ xori r10,r10,1
+1: tdnei r10,0
+ EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
+#endif
- /* Hard enable interrupts */
#ifdef CONFIG_PPC_BOOK3E
wrteei 1
#else
- mfmsr r11
+ ld r11,PACAKMSR(r13)
ori r11,r11,MSR_EE
mtmsrd r11,1
#endif /* CONFIG_PPC_BOOK3E */
+ /* We do need to set SOFTE in the stack frame or the return
+ * from interrupt will be painful
+ */
+ li r10,1
+ std r10,SOFTE(r1)
+
#ifdef SHOW_SYSCALLS
bl .do_show_syscall
REST_GPR(0,r1)
@@ -198,16 +193,14 @@ syscall_exit:
andi. r10,r8,MSR_RI
beq- unrecov_restore
#endif
-
- /* Disable interrupts so current_thread_info()->flags can't change,
+ /*
+ * Disable interrupts so current_thread_info()->flags can't change,
* and so that we don't get interrupted after loading SRR0/1.
*/
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
- mfmsr r10
- rldicl r10,r10,48,1
- rotldi r10,r10,16
+ ld r10,PACAKMSR(r13)
mtmsrd r10,1
#endif /* CONFIG_PPC_BOOK3E */
@@ -319,7 +312,7 @@ syscall_exit_work:
#ifdef CONFIG_PPC_BOOK3E
wrteei 1
#else
- mfmsr r10
+ ld r10,PACAKMSR(r13)
ori r10,r10,MSR_EE
mtmsrd r10,1
#endif /* CONFIG_PPC_BOOK3E */
@@ -565,10 +558,8 @@ _GLOBAL(ret_from_except_lite)
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
- mfmsr r10 /* Get current interrupt state */
- rldicl r9,r10,48,1 /* clear MSR_EE */
- rotldi r9,r9,16
- mtmsrd r9,1 /* Update machine state */
+ ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
+ mtmsrd r10,1 /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
#ifdef CONFIG_PREEMPT
@@ -591,25 +582,74 @@ _GLOBAL(ret_from_except_lite)
ld r4,TI_FLAGS(r9)
andi. r0,r4,_TIF_USER_WORK_MASK
bne do_work
-#endif
+#endif /* !CONFIG_PREEMPT */
+ .globl fast_exc_return_irq
+fast_exc_return_irq:
restore:
-BEGIN_FW_FTR_SECTION
+ /*
+ * This is the main kernel exit path, we first check if we
+ * have to change our interrupt state.
+ */
ld r5,SOFTE(r1)
-FW_FTR_SECTION_ELSE
- b .Liseries_check_pending_irqs
-ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
-2:
- TRACE_AND_RESTORE_IRQ(r5);
+ lbz r6,PACASOFTIRQEN(r13)
+ cmpwi cr1,r5,0
+ cmpw cr0,r5,r6
+ beq cr0,4f
+
+ /* We do, handle disable first, which is easy */
+ bne cr1,3f;
+ li r0,0
+ stb r0,PACASOFTIRQEN(r13);
+ TRACE_DISABLE_INTS
+ b 4f
- /* extract EE bit and use it to restore paca->hard_enabled */
- ld r3,_MSR(r1)
- rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
- stb r4,PACAHARDIRQEN(r13)
+3: /*
+ * We are about to soft-enable interrupts (we are hard disabled
+ * at this point). We check if there's anything that needs to
+ * be replayed first.
+ */
+ lbz r0,PACAIRQHAPPENED(r13)
+ cmpwi cr0,r0,0
+ bne- restore_check_irq_replay
+ /*
+ * Get here when nothing happened while soft-disabled, just
+ * soft-enable and move-on. We will hard-enable as a side
+ * effect of rfi
+ */
+restore_no_replay:
+ TRACE_ENABLE_INTS
+ li r0,1
+ stb r0,PACASOFTIRQEN(r13);
+
+ /*
+ * Final return path. BookE is handled in a different file
+ */
+4:
#ifdef CONFIG_PPC_BOOK3E
b .exception_return_book3e
#else
+ /*
+ * Clear the reservation. If we know the CPU tracks the address of
+ * the reservation then we can potentially save some cycles and use
+ * a larx. On POWER6 and POWER7 this is significantly faster.
+ */
+BEGIN_FTR_SECTION
+ stdcx. r0,0,r1 /* to clear the reservation */
+FTR_SECTION_ELSE
+ ldarx r4,0,r1
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+
+ /*
+ * Some code path such as load_up_fpu or altivec return directly
+ * here. They run entirely hard disabled and do not alter the
+ * interrupt state. They also don't use lwarx/stwcx. and thus
+ * are known not to leave dangling reservations.
+ */
+ .globl fast_exception_return
+fast_exception_return:
+ ld r3,_MSR(r1)
ld r4,_CTR(r1)
ld r0,_LINK(r1)
mtctr r4
@@ -623,28 +663,18 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
beq- unrecov_restore
/*
- * Clear the reservation. If we know the CPU tracks the address of
- * the reservation then we can potentially save some cycles and use
- * a larx. On POWER6 and POWER7 this is significantly faster.
- */
-BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
-FTR_SECTION_ELSE
- ldarx r4,0,r1
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
-
- /*
* Clear RI before restoring r13. If we are returning to
* userspace and we take an exception after restoring r13,
* we end up corrupting the userspace r13 value.
*/
- mfmsr r4
- andc r4,r4,r0 /* r0 contains MSR_RI here */
+ ld r4,PACAKMSR(r13) /* Get kernel MSR without EE */
+ andc r4,r4,r0 /* r0 contains MSR_RI here */
mtmsrd r4,1
/*
* r13 is our per cpu area, only restore it if we are returning to
- * userspace
+ * userspace the value stored in the stack frame may belong to
+ * another CPU.
*/
andi. r0,r3,MSR_PR
beq 1f
@@ -669,30 +699,55 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
#endif /* CONFIG_PPC_BOOK3E */
-.Liseries_check_pending_irqs:
-#ifdef CONFIG_PPC_ISERIES
- ld r5,SOFTE(r1)
- cmpdi 0,r5,0
- beq 2b
- /* Check for pending interrupts (iSeries) */
- ld r3,PACALPPACAPTR(r13)
- ld r3,LPPACAANYINT(r3)
- cmpdi r3,0
- beq+ 2b /* skip do_IRQ if no interrupts */
-
- li r3,0
- stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl .trace_hardirqs_off
- mfmsr r10
-#endif
- ori r10,r10,MSR_EE
- mtmsrd r10 /* hard-enable again */
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .do_IRQ
- b .ret_from_except_lite /* loop back and handle more */
-#endif
+ /*
+ * Something did happen, check if a re-emit is needed
+ * (this also clears paca->irq_happened)
+ */
+restore_check_irq_replay:
+ /* XXX: We could implement a fast path here where we check
+ * for irq_happened being just 0x01, in which case we can
+ * clear it and return. That means that we would potentially
+ * miss a decrementer having wrapped all the way around.
+ *
+ * Still, this might be useful for things like hash_page
+ */
+ bl .__check_irq_replay
+ cmpwi cr0,r3,0
+ beq restore_no_replay
+
+ /*
+ * We need to re-emit an interrupt. We do so by re-using our
+ * existing exception frame. We first change the trap value,
+ * but we need to ensure we preserve the low nibble of it
+ */
+ ld r4,_TRAP(r1)
+ clrldi r4,r4,60
+ or r4,r4,r3
+ std r4,_TRAP(r1)
+ /*
+ * Then find the right handler and call it. Interrupts are
+ * still soft-disabled and we keep them that way.
+ */
+ cmpwi cr0,r3,0x500
+ bne 1f
+ addi r3,r1,STACK_FRAME_OVERHEAD;
+ bl .do_IRQ
+ b .ret_from_except
+1: cmpwi cr0,r3,0x900
+ bne 1f
+ addi r3,r1,STACK_FRAME_OVERHEAD;
+ bl .timer_interrupt
+ b .ret_from_except
+#ifdef CONFIG_PPC_BOOK3E
+1: cmpwi cr0,r3,0x280
+ bne 1f
+ addi r3,r1,STACK_FRAME_OVERHEAD;
+ bl .doorbell_exception
+ b .ret_from_except
+#endif /* CONFIG_PPC_BOOK3E */
+1: b .ret_from_except /* What else to do here ? */
+
do_work:
#ifdef CONFIG_PREEMPT
andi. r0,r3,MSR_PR /* Returning to user mode? */
@@ -705,31 +760,22 @@ do_work:
crandc eq,cr1*4+eq,eq
bne restore
- /* Here we are preempting the current task.
- *
- * Ensure interrupts are soft-disabled. We also properly mark
- * the PACA to reflect the fact that they are hard-disabled
- * and trace the change
+ /*
+ * Here we are preempting the current task. We want to make
+ * sure we are soft-disabled first
*/
- li r0,0
- stb r0,PACASOFTIRQEN(r13)
- stb r0,PACAHARDIRQEN(r13)
- TRACE_DISABLE_INTS
-
- /* Call the scheduler with soft IRQs off */
+ SOFT_DISABLE_INTS(r3,r4)
1: bl .preempt_schedule_irq
/* Hard-disable interrupts again (and update PACA) */
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
- mfmsr r10
- rldicl r10,r10,48,1
- rotldi r10,r10,16
+ ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
mtmsrd r10,1
#endif /* CONFIG_PPC_BOOK3E */
- li r0,0
- stb r0,PACAHARDIRQEN(r13)
+ li r0,PACA_IRQ_HARD_DIS
+ stb r0,PACAIRQHAPPENED(r13)
/* Re-test flags and eventually loop */
clrrdi r9,r1,THREAD_SHIFT
@@ -751,14 +797,12 @@ user_work:
andi. r0,r4,_TIF_NEED_RESCHED
beq 1f
- li r5,1
- TRACE_AND_RESTORE_IRQ(r5);
+ bl .restore_interrupts
bl .schedule
b .ret_from_except_lite
1: bl .save_nvgprs
- li r5,1
- TRACE_AND_RESTORE_IRQ(r5);
+ bl .restore_interrupts
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_notify_resume
b .ret_from_except