aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/entry_64.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/entry_64.S')
-rw-r--r--arch/powerpc/kernel/entry_64.S710
1 files changed, 445 insertions, 265 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 866462cbe2d..6528c5e2cc4 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -32,13 +32,15 @@
#include <asm/ptrace.h>
#include <asm/irqflags.h>
#include <asm/ftrace.h>
+#include <asm/hw_irq.h>
+#include <asm/context_tracking.h>
/*
* System calls.
*/
.section ".toc","aw"
-.SYS_CALL_TABLE:
- .tc .sys_call_table[TC],.sys_call_table
+SYS_CALL_TABLE:
+ .tc sys_call_table[TC],sys_call_table
/* This value is used to mark exception frames on the stack. */
exception_marker:
@@ -61,16 +63,11 @@ system_call_common:
std r12,_MSR(r1)
std r0,GPR0(r1)
std r10,GPR1(r1)
+ beq 2f /* if from kernel mode */
ACCOUNT_CPU_USER_ENTRY(r10, r11)
- /*
- * This "crclr so" clears CR0.SO, which is the error indication on
- * return from this system call. There must be no cmp instruction
- * between it and the "mfcr r9" below, otherwise if XER.SO is set,
- * CR0.SO will get set, causing all system calls to appear to fail.
- */
- crclr so
- std r2,GPR2(r1)
+2: std r2,GPR2(r1)
std r3,GPR3(r1)
+ mfcr r2
std r4,GPR4(r1)
std r5,GPR5(r1)
std r6,GPR6(r1)
@@ -81,85 +78,82 @@ system_call_common:
std r11,GPR10(r1)
std r11,GPR11(r1)
std r11,GPR12(r1)
+ std r11,_XER(r1)
+ std r11,_CTR(r1)
std r9,GPR13(r1)
- mfcr r9
mflr r10
+ /*
+ * This clears CR0.SO (bit 28), which is the error indication on
+ * return from this system call.
+ */
+ rldimi r2,r11,28,(63-28)
li r11,0xc01
- std r9,_CCR(r1)
std r10,_LINK(r1)
std r11,_TRAP(r1)
- mfxer r9
- mfctr r10
- std r9,_XER(r1)
- std r10,_CTR(r1)
std r3,ORIG_GPR3(r1)
+ std r2,_CCR(r1)
ld r2,PACATOC(r13)
addi r9,r1,STACK_FRAME_OVERHEAD
ld r11,exception_marker@toc(r2)
std r11,-16(r9) /* "regshere" marker */
-#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
+#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
BEGIN_FW_FTR_SECTION
beq 33f
/* if from user, see if there are any DTL entries to process */
ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
ld r11,PACA_DTL_RIDX(r13) /* get log read index */
- ld r10,LPPACA_DTLIDX(r10) /* get log write index */
+ addi r10,r10,LPPACA_DTLIDX
+ LDX_BE r10,0,r10 /* get log write index */
cmpd cr1,r11,r10
beq+ cr1,33f
- bl .accumulate_stolen_time
+ bl accumulate_stolen_time
REST_GPR(0,r1)
REST_4GPRS(3,r1)
REST_2GPRS(7,r1)
addi r9,r1,STACK_FRAME_OVERHEAD
33:
END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl .trace_hardirqs_on
- REST_GPR(0,r1)
- REST_4GPRS(3,r1)
- REST_2GPRS(7,r1)
- addi r9,r1,STACK_FRAME_OVERHEAD
- ld r12,_MSR(r1)
-#endif /* CONFIG_TRACE_IRQFLAGS */
- li r10,1
- stb r10,PACASOFTIRQEN(r13)
- stb r10,PACAHARDIRQEN(r13)
- std r10,SOFTE(r1)
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
- /* Hack for handling interrupts when soft-enabling on iSeries */
- cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
- andi. r10,r12,MSR_PR /* from kernel */
- crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
- bne 2f
- b hardware_interrupt_entry
-2:
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif /* CONFIG_PPC_ISERIES */
+ /*
+ * A syscall should always be called with interrupts enabled
+ * so we just unconditionally hard-enable here. When some kind
+ * of irq tracing is used, we additionally check that condition
+ * is correct
+ */
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
+ lbz r10,PACASOFTIRQEN(r13)
+ xori r10,r10,1
+1: tdnei r10,0
+ EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
+#endif
- /* Hard enable interrupts */
#ifdef CONFIG_PPC_BOOK3E
wrteei 1
#else
- mfmsr r11
+ ld r11,PACAKMSR(r13)
ori r11,r11,MSR_EE
mtmsrd r11,1
#endif /* CONFIG_PPC_BOOK3E */
+ /* We do need to set SOFTE in the stack frame or the return
+ * from interrupt will be painful
+ */
+ li r10,1
+ std r10,SOFTE(r1)
+
#ifdef SHOW_SYSCALLS
- bl .do_show_syscall
+ bl do_show_syscall
REST_GPR(0,r1)
REST_4GPRS(3,r1)
REST_2GPRS(7,r1)
addi r9,r1,STACK_FRAME_OVERHEAD
#endif
- clrrdi r11,r1,THREAD_SHIFT
+ CURRENT_THREAD_INFO(r11, r1)
ld r10,TI_FLAGS(r11)
andi. r11,r10,_TIF_SYSCALL_T_OR_A
- bne- syscall_dotrace
-syscall_dotrace_cont:
+ bne syscall_dotrace
+.Lsyscall_dotrace_cont:
cmpldi 0,r0,NR_syscalls
bge- syscall_enosys
@@ -168,7 +162,7 @@ system_call: /* label this so stack traces look sane */
* Need to vector to 32 Bit or default sys_call_table here,
* based on caller's run-mode / personality.
*/
- ld r11,.SYS_CALL_TABLE@toc(2)
+ ld r11,SYS_CALL_TABLE@toc(2)
andi. r10,r10,_TIF_32BIT
beq 15f
addi r11,r11,8 /* use 32-bit syscall entries */
@@ -180,17 +174,17 @@ system_call: /* label this so stack traces look sane */
clrldi r8,r8,32
15:
slwi r0,r0,4
- ldx r10,r11,r0 /* Fetch system call handler [ptr] */
- mtctr r10
+ ldx r12,r11,r0 /* Fetch system call handler [ptr] */
+ mtctr r12
bctrl /* Call handler */
syscall_exit:
std r3,RESULT(r1)
#ifdef SHOW_SYSCALLS
- bl .do_show_syscall_exit
+ bl do_show_syscall_exit
ld r3,RESULT(r1)
#endif
- clrrdi r12,r1,THREAD_SHIFT
+ CURRENT_THREAD_INFO(r12, r1)
ld r8,_MSR(r1)
#ifdef CONFIG_PPC_BOOK3S
@@ -198,17 +192,24 @@ syscall_exit:
andi. r10,r8,MSR_RI
beq- unrecov_restore
#endif
-
- /* Disable interrupts so current_thread_info()->flags can't change,
+ /*
+ * Disable interrupts so current_thread_info()->flags can't change,
* and so that we don't get interrupted after loading SRR0/1.
*/
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
- mfmsr r10
- rldicl r10,r10,48,1
- rotldi r10,r10,16
- mtmsrd r10,1
+ ld r10,PACAKMSR(r13)
+ /*
+ * For performance reasons we clear RI the same time that we
+ * clear EE. We only need to clear RI just before we restore r13
+ * below, but batching it with EE saves us one expensive mtmsrd call.
+ * We have to be careful to restore RI if we branch anywhere from
+ * here (eg syscall_exit_work).
+ */
+ li r9,MSR_RI
+ andc r11,r10,r9
+ mtmsrd r11,1
#endif /* CONFIG_PPC_BOOK3E */
ld r9,TI_FLAGS(r12)
@@ -218,27 +219,17 @@ syscall_exit:
cmpld r3,r11
ld r5,_CCR(r1)
bge- syscall_error
-syscall_error_cont:
+.Lsyscall_error_cont:
ld r7,_NIP(r1)
BEGIN_FTR_SECTION
stdcx. r0,0,r1 /* to clear the reservation */
END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
andi. r6,r8,MSR_PR
ld r4,_LINK(r1)
- /*
- * Clear RI before restoring r13. If we are returning to
- * userspace and we take an exception after restoring r13,
- * we end up corrupting the userspace r13 value.
- */
-#ifdef CONFIG_PPC_BOOK3S
- /* No MSR:RI on BookE */
- li r12,MSR_RI
- andc r11,r10,r12
- mtmsrd r11,1 /* clear MSR.RI */
-#endif /* CONFIG_PPC_BOOK3S */
beq- 1f
ACCOUNT_CPU_USER_EXIT(r11, r12)
+ HMT_MEDIUM_LOW_HAS_PPR
ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
1: ld r2,GPR2(r1)
ld r1,GPR1(r1)
@@ -253,13 +244,13 @@ syscall_error:
oris r5,r5,0x1000 /* Set SO bit in CR */
neg r3,r3
std r5,_CCR(r1)
- b syscall_error_cont
+ b .Lsyscall_error_cont
/* Traced system call support */
syscall_dotrace:
- bl .save_nvgprs
+ bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
- bl .do_syscall_trace_enter
+ bl do_syscall_trace_enter
/*
* Restore argument registers possibly just changed.
* We use the return value of do_syscall_trace_enter
@@ -273,15 +264,18 @@ syscall_dotrace:
ld r7,GPR7(r1)
ld r8,GPR8(r1)
addi r9,r1,STACK_FRAME_OVERHEAD
- clrrdi r10,r1,THREAD_SHIFT
+ CURRENT_THREAD_INFO(r10, r1)
ld r10,TI_FLAGS(r10)
- b syscall_dotrace_cont
+ b .Lsyscall_dotrace_cont
syscall_enosys:
li r3,-ENOSYS
b syscall_exit
syscall_exit_work:
+#ifdef CONFIG_PPC_BOOK3S
+ mtmsrd r10,1 /* Restore RI */
+#endif
/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
If TIF_NOERROR is set, just save r3 as it is. */
@@ -312,22 +306,23 @@ syscall_exit_work:
subi r12,r12,TI_FLAGS
4: /* Anything else left to do? */
+ SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */
andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
- beq .ret_from_except_lite
+ beq ret_from_except_lite
/* Re-enable interrupts */
#ifdef CONFIG_PPC_BOOK3E
wrteei 1
#else
- mfmsr r10
+ ld r10,PACAKMSR(r13)
ori r10,r10,MSR_EE
mtmsrd r10,1
#endif /* CONFIG_PPC_BOOK3E */
- bl .save_nvgprs
+ bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
- bl .do_syscall_trace_leave
- b .ret_from_except
+ bl do_syscall_trace_leave
+ b ret_from_except
/* Save non-volatile GPRs, if not already saved. */
_GLOBAL(save_nvgprs)
@@ -350,36 +345,48 @@ _GLOBAL(save_nvgprs)
*/
_GLOBAL(ppc_fork)
- bl .save_nvgprs
- bl .sys_fork
+ bl save_nvgprs
+ bl sys_fork
b syscall_exit
_GLOBAL(ppc_vfork)
- bl .save_nvgprs
- bl .sys_vfork
+ bl save_nvgprs
+ bl sys_vfork
b syscall_exit
_GLOBAL(ppc_clone)
- bl .save_nvgprs
- bl .sys_clone
+ bl save_nvgprs
+ bl sys_clone
b syscall_exit
_GLOBAL(ppc32_swapcontext)
- bl .save_nvgprs
- bl .compat_sys_swapcontext
+ bl save_nvgprs
+ bl compat_sys_swapcontext
b syscall_exit
_GLOBAL(ppc64_swapcontext)
- bl .save_nvgprs
- bl .sys_swapcontext
+ bl save_nvgprs
+ bl sys_swapcontext
b syscall_exit
_GLOBAL(ret_from_fork)
- bl .schedule_tail
+ bl schedule_tail
REST_NVGPRS(r1)
li r3,0
b syscall_exit
+_GLOBAL(ret_from_kernel_thread)
+ bl schedule_tail
+ REST_NVGPRS(r1)
+ mtlr r14
+ mr r3,r15
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+ mr r12,r14
+#endif
+ blrl
+ li r3,0
+ b syscall_exit
+
/*
* This routine switches between two different tasks. The process
* state of one is saved on its kernel stack. Then the state
@@ -421,12 +428,6 @@ BEGIN_FTR_SECTION
std r24,THREAD_VRSAVE(r3)
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_PPC64
-BEGIN_FTR_SECTION
- mfspr r25,SPRN_DSCR
- std r25,THREAD_DSCR(r3)
-END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
-#endif
and. r0,r0,r22
beq+ 1f
andc r22,r22,r0
@@ -437,6 +438,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
std r23,_CCR(r1)
std r1,KSP(r3) /* Set old stack pointer */
+#ifdef CONFIG_PPC_BOOK3S_64
+BEGIN_FTR_SECTION
+ /* Event based branch registers */
+ mfspr r0, SPRN_BESCR
+ std r0, THREAD_BESCR(r3)
+ mfspr r0, SPRN_EBBHR
+ std r0, THREAD_EBBHR(r3)
+ mfspr r0, SPRN_EBBRR
+ std r0, THREAD_EBBRR(r3)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+#endif
+
#ifdef CONFIG_SMP
/* We need a sync somewhere here to make sure that if the
* previous task gets rescheduled on another CPU, it sees all
@@ -456,6 +469,13 @@ BEGIN_FTR_SECTION
ldarx r6,0,r1
END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
+#ifdef CONFIG_PPC_BOOK3S
+/* Cancel all explict user streams as they will have no use after context
+ * switch and will stop the HW from creating streams itself
+ */
+ DCBT_STOP_ALL_STREAM_IDS(r6)
+#endif
+
addi r6,r4,-THREAD /* Convert THREAD to 'current' */
std r6,PACACURRENT(r13) /* Set new 'current' */
@@ -493,9 +513,11 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
*/
ld r9,PACA_SLBSHADOWPTR(r13)
li r12,0
- std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
- std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
- std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
+ std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
+ li r12,SLBSHADOW_STACKVSID
+ STDX_BE r7,r12,r9 /* Save VSID */
+ li r12,SLBSHADOW_STACKESID
+ STDX_BE r0,r12,r9 /* Save ESID */
/* No need to check for MMU_FTR_NO_SLBIE_B here, since when
* we have 1TB segments, the only CPUs known to have the errata
@@ -510,7 +532,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
2:
#endif /* !CONFIG_PPC_BOOK3S */
- clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
+ CURRENT_THREAD_INFO(r7, r8) /* base of new stack */
/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
because we don't need to leave the 288-byte ABI gap at the
top of the kernel stack. */
@@ -519,8 +541,20 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
mr r1,r8 /* start using new stack pointer */
std r7,PACAKSAVE(r13)
- ld r6,_CCR(r1)
- mtcrf 0xFF,r6
+#ifdef CONFIG_PPC_BOOK3S_64
+BEGIN_FTR_SECTION
+ /* Event based branch registers */
+ ld r0, THREAD_BESCR(r4)
+ mtspr SPRN_BESCR, r0
+ ld r0, THREAD_EBBHR(r4)
+ mtspr SPRN_EBBHR, r0
+ ld r0, THREAD_EBBRR(r4)
+ mtspr SPRN_EBBRR, r0
+
+ ld r0,THREAD_TAR(r4)
+ mtspr SPRN_TAR,r0
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+#endif
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
@@ -530,14 +564,27 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_PPC64
BEGIN_FTR_SECTION
+ lwz r6,THREAD_DSCR_INHERIT(r4)
ld r0,THREAD_DSCR(r4)
+ cmpwi r6,0
+ bne 1f
+ ld r0,PACA_DSCR(r13)
+1:
+BEGIN_FTR_SECTION_NESTED(70)
+ mfspr r8, SPRN_FSCR
+ rldimi r8, r6, FSCR_DSCR_LG, (63 - FSCR_DSCR_LG)
+ mtspr SPRN_FSCR, r8
+END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
cmpd r0,r25
- beq 1f
+ beq 2f
mtspr SPRN_DSCR,r0
-1:
+2:
END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
#endif
+ ld r6,_CCR(r1)
+ mtcrf 0xFF,r6
+
/* r3-r13 are destroyed -- Cort */
REST_8GPRS(14, r1)
REST_10GPRS(22, r1)
@@ -553,7 +600,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
_GLOBAL(ret_from_except)
ld r11,_TRAP(r1)
andi. r0,r11,1
- bne .ret_from_except_lite
+ bne ret_from_except_lite
REST_NVGPRS(r1)
_GLOBAL(ret_from_except_lite)
@@ -565,51 +612,197 @@ _GLOBAL(ret_from_except_lite)
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
- mfmsr r10 /* Get current interrupt state */
- rldicl r9,r10,48,1 /* clear MSR_EE */
- rotldi r9,r9,16
- mtmsrd r9,1 /* Update machine state */
+ ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
+ mtmsrd r10,1 /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
-#ifdef CONFIG_PREEMPT
- clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
- li r0,_TIF_NEED_RESCHED /* bits to check */
+ CURRENT_THREAD_INFO(r9, r1)
ld r3,_MSR(r1)
+#ifdef CONFIG_PPC_BOOK3E
+ ld r10,PACACURRENT(r13)
+#endif /* CONFIG_PPC_BOOK3E */
ld r4,TI_FLAGS(r9)
- /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
- rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
- and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
- bne do_work
-
-#else /* !CONFIG_PREEMPT */
- ld r3,_MSR(r1) /* Returning to user mode? */
andi. r3,r3,MSR_PR
- beq restore /* if not, just restore regs and return */
+ beq resume_kernel
+#ifdef CONFIG_PPC_BOOK3E
+ lwz r3,(THREAD+THREAD_DBCR0)(r10)
+#endif /* CONFIG_PPC_BOOK3E */
/* Check current_thread_info()->flags */
- clrrdi r9,r1,THREAD_SHIFT
- ld r4,TI_FLAGS(r9)
andi. r0,r4,_TIF_USER_WORK_MASK
- bne do_work
+#ifdef CONFIG_PPC_BOOK3E
+ bne 1f
+ /*
+ * Check to see if the dbcr0 register is set up to debug.
+ * Use the internal debug mode bit to do this.
+ */
+ andis. r0,r3,DBCR0_IDM@h
+ beq restore
+ mfmsr r0
+ rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
+ mtmsr r0
+ mtspr SPRN_DBCR0,r3
+ li r10, -1
+ mtspr SPRN_DBSR,r10
+ b restore
+#else
+ beq restore
#endif
+1: andi. r0,r4,_TIF_NEED_RESCHED
+ beq 2f
+ bl restore_interrupts
+ SCHEDULE_USER
+ b ret_from_except_lite
+2:
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
+ bne 3f /* only restore TM if nothing else to do */
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl restore_tm_state
+ b restore
+3:
+#endif
+ bl save_nvgprs
+ bl restore_interrupts
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl do_notify_resume
+ b ret_from_except
+
+resume_kernel:
+ /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
+ andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
+ beq+ 1f
+
+ addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
+ lwz r3,GPR1(r1)
+ subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
+ mr r4,r1 /* src: current exception frame */
+ mr r1,r3 /* Reroute the trampoline frame to r1 */
+
+ /* Copy from the original to the trampoline. */
+ li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
+ li r6,0 /* start offset: 0 */
+ mtctr r5
+2: ldx r0,r6,r4
+ stdx r0,r6,r3
+ addi r6,r6,8
+ bdnz 2b
+
+ /* Do real store operation to complete stwu */
+ lwz r5,GPR1(r1)
+ std r8,0(r5)
+
+ /* Clear _TIF_EMULATE_STACK_STORE flag */
+ lis r11,_TIF_EMULATE_STACK_STORE@h
+ addi r5,r9,TI_FLAGS
+0: ldarx r4,0,r5
+ andc r4,r4,r11
+ stdcx. r4,0,r5
+ bne- 0b
+1:
+
+#ifdef CONFIG_PREEMPT
+ /* Check if we need to preempt */
+ andi. r0,r4,_TIF_NEED_RESCHED
+ beq+ restore
+ /* Check that preempt_count() == 0 and interrupts are enabled */
+ lwz r8,TI_PREEMPT(r9)
+ cmpwi cr1,r8,0
+ ld r0,SOFTE(r1)
+ cmpdi r0,0
+ crandc eq,cr1*4+eq,eq
+ bne restore
+
+ /*
+ * Here we are preempting the current task. We want to make
+ * sure we are soft-disabled first and reconcile irq state.
+ */
+ RECONCILE_IRQ_STATE(r3,r4)
+1: bl preempt_schedule_irq
+
+ /* Re-test flags and eventually loop */
+ CURRENT_THREAD_INFO(r9, r1)
+ ld r4,TI_FLAGS(r9)
+ andi. r0,r4,_TIF_NEED_RESCHED
+ bne 1b
+
+ /*
+ * arch_local_irq_restore() from preempt_schedule_irq above may
+ * enable hard interrupt but we really should disable interrupts
+ * when we return from the interrupt, and so that we don't get
+ * interrupted after loading SRR0/1.
+ */
+#ifdef CONFIG_PPC_BOOK3E
+ wrteei 0
+#else
+ ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
+ mtmsrd r10,1 /* Update machine state */
+#endif /* CONFIG_PPC_BOOK3E */
+#endif /* CONFIG_PREEMPT */
+
+ .globl fast_exc_return_irq
+fast_exc_return_irq:
restore:
-BEGIN_FW_FTR_SECTION
+ /*
+ * This is the main kernel exit path. First we check if we
+ * are about to re-enable interrupts
+ */
ld r5,SOFTE(r1)
-FW_FTR_SECTION_ELSE
- b .Liseries_check_pending_irqs
-ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
-2:
- TRACE_AND_RESTORE_IRQ(r5);
+ lbz r6,PACASOFTIRQEN(r13)
+ cmpwi cr0,r5,0
+ beq restore_irq_off
- /* extract EE bit and use it to restore paca->hard_enabled */
- ld r3,_MSR(r1)
- rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
- stb r4,PACAHARDIRQEN(r13)
+ /* We are enabling, were we already enabled ? Yes, just return */
+ cmpwi cr0,r6,1
+ beq cr0,do_restore
+ /*
+ * We are about to soft-enable interrupts (we are hard disabled
+ * at this point). We check if there's anything that needs to
+ * be replayed first.
+ */
+ lbz r0,PACAIRQHAPPENED(r13)
+ cmpwi cr0,r0,0
+ bne- restore_check_irq_replay
+
+ /*
+ * Get here when nothing happened while soft-disabled, just
+ * soft-enable and move-on. We will hard-enable as a side
+ * effect of rfi
+ */
+restore_no_replay:
+ TRACE_ENABLE_INTS
+ li r0,1
+ stb r0,PACASOFTIRQEN(r13);
+
+ /*
+ * Final return path. BookE is handled in a different file
+ */
+do_restore:
#ifdef CONFIG_PPC_BOOK3E
- b .exception_return_book3e
+ b exception_return_book3e
#else
+ /*
+ * Clear the reservation. If we know the CPU tracks the address of
+ * the reservation then we can potentially save some cycles and use
+ * a larx. On POWER6 and POWER7 this is significantly faster.
+ */
+BEGIN_FTR_SECTION
+ stdcx. r0,0,r1 /* to clear the reservation */
+FTR_SECTION_ELSE
+ ldarx r4,0,r1
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+
+ /*
+ * Some code path such as load_up_fpu or altivec return directly
+ * here. They run entirely hard disabled and do not alter the
+ * interrupt state. They also don't use lwarx/stwcx. and thus
+ * are known not to leave dangling reservations.
+ */
+ .globl fast_exception_return
+fast_exception_return:
+ ld r3,_MSR(r1)
ld r4,_CTR(r1)
ld r0,_LINK(r1)
mtctr r4
@@ -622,32 +815,35 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
andi. r0,r3,MSR_RI
beq- unrecov_restore
- /*
- * Clear the reservation. If we know the CPU tracks the address of
- * the reservation then we can potentially save some cycles and use
- * a larx. On POWER6 and POWER7 this is significantly faster.
- */
+ /* Load PPR from thread struct before we clear MSR:RI */
BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
-FTR_SECTION_ELSE
- ldarx r4,0,r1
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+ ld r2,PACACURRENT(r13)
+ ld r2,TASKTHREADPPR(r2)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/*
* Clear RI before restoring r13. If we are returning to
* userspace and we take an exception after restoring r13,
* we end up corrupting the userspace r13 value.
*/
- mfmsr r4
- andc r4,r4,r0 /* r0 contains MSR_RI here */
+ ld r4,PACAKMSR(r13) /* Get kernel MSR without EE */
+ andc r4,r4,r0 /* r0 contains MSR_RI here */
mtmsrd r4,1
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /* TM debug */
+ std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
+#endif
/*
* r13 is our per cpu area, only restore it if we are returning to
- * userspace
+ * userspace the value stored in the stack frame may belong to
+ * another CPU.
*/
andi. r0,r3,MSR_PR
beq 1f
+BEGIN_FTR_SECTION
+ mtspr SPRN_PPR,r2 /* Restore PPR */
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
ACCOUNT_CPU_USER_EXIT(r2, r4)
REST_GPR(13, r1)
1:
@@ -669,103 +865,86 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
#endif /* CONFIG_PPC_BOOK3E */
-.Liseries_check_pending_irqs:
-#ifdef CONFIG_PPC_ISERIES
- ld r5,SOFTE(r1)
- cmpdi 0,r5,0
- beq 2b
- /* Check for pending interrupts (iSeries) */
- ld r3,PACALPPACAPTR(r13)
- ld r3,LPPACAANYINT(r3)
- cmpdi r3,0
- beq+ 2b /* skip do_IRQ if no interrupts */
-
- li r3,0
- stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl .trace_hardirqs_off
- mfmsr r10
-#endif
- ori r10,r10,MSR_EE
- mtmsrd r10 /* hard-enable again */
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .do_IRQ
- b .ret_from_except_lite /* loop back and handle more */
-#endif
-
-do_work:
-#ifdef CONFIG_PREEMPT
- andi. r0,r3,MSR_PR /* Returning to user mode? */
- bne user_work
- /* Check that preempt_count() == 0 and interrupts are enabled */
- lwz r8,TI_PREEMPT(r9)
- cmpwi cr1,r8,0
- ld r0,SOFTE(r1)
- cmpdi r0,0
- crandc eq,cr1*4+eq,eq
- bne restore
-
- /* Here we are preempting the current task.
+ /*
+ * We are returning to a context with interrupts soft disabled.
*
- * Ensure interrupts are soft-disabled. We also properly mark
- * the PACA to reflect the fact that they are hard-disabled
- * and trace the change
+ * However, we may also about to hard enable, so we need to
+ * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
+ * or that bit can get out of sync and bad things will happen
*/
- li r0,0
- stb r0,PACASOFTIRQEN(r13)
- stb r0,PACAHARDIRQEN(r13)
+restore_irq_off:
+ ld r3,_MSR(r1)
+ lbz r7,PACAIRQHAPPENED(r13)
+ andi. r0,r3,MSR_EE
+ beq 1f
+ rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
+ stb r7,PACAIRQHAPPENED(r13)
+1: li r0,0
+ stb r0,PACASOFTIRQEN(r13);
TRACE_DISABLE_INTS
+ b do_restore
- /* Call the scheduler with soft IRQs off */
-1: bl .preempt_schedule_irq
-
- /* Hard-disable interrupts again (and update PACA) */
-#ifdef CONFIG_PPC_BOOK3E
- wrteei 0
-#else
- mfmsr r10
- rldicl r10,r10,48,1
- rotldi r10,r10,16
- mtmsrd r10,1
-#endif /* CONFIG_PPC_BOOK3E */
- li r0,0
- stb r0,PACAHARDIRQEN(r13)
-
- /* Re-test flags and eventually loop */
- clrrdi r9,r1,THREAD_SHIFT
- ld r4,TI_FLAGS(r9)
- andi. r0,r4,_TIF_NEED_RESCHED
- bne 1b
- b restore
-
-user_work:
-#endif /* CONFIG_PREEMPT */
+ /*
+ * Something did happen, check if a re-emit is needed
+ * (this also clears paca->irq_happened)
+ */
+restore_check_irq_replay:
+ /* XXX: We could implement a fast path here where we check
+ * for irq_happened being just 0x01, in which case we can
+ * clear it and return. That means that we would potentially
+ * miss a decrementer having wrapped all the way around.
+ *
+ * Still, this might be useful for things like hash_page
+ */
+ bl __check_irq_replay
+ cmpwi cr0,r3,0
+ beq restore_no_replay
+
+ /*
+ * We need to re-emit an interrupt. We do so by re-using our
+ * existing exception frame. We first change the trap value,
+ * but we need to ensure we preserve the low nibble of it
+ */
+ ld r4,_TRAP(r1)
+ clrldi r4,r4,60
+ or r4,r4,r3
+ std r4,_TRAP(r1)
- /* Enable interrupts */
+ /*
+ * Then find the right handler and call it. Interrupts are
+ * still soft-disabled and we keep them that way.
+ */
+ cmpwi cr0,r3,0x500
+ bne 1f
+ addi r3,r1,STACK_FRAME_OVERHEAD;
+ bl do_IRQ
+ b ret_from_except
+1: cmpwi cr0,r3,0x900
+ bne 1f
+ addi r3,r1,STACK_FRAME_OVERHEAD;
+ bl timer_interrupt
+ b ret_from_except
+#ifdef CONFIG_PPC_DOORBELL
+1:
#ifdef CONFIG_PPC_BOOK3E
- wrteei 1
+ cmpwi cr0,r3,0x280
#else
- ori r10,r10,MSR_EE
- mtmsrd r10,1
+ BEGIN_FTR_SECTION
+ cmpwi cr0,r3,0xe80
+ FTR_SECTION_ELSE
+ cmpwi cr0,r3,0xa00
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
#endif /* CONFIG_PPC_BOOK3E */
-
- andi. r0,r4,_TIF_NEED_RESCHED
- beq 1f
- li r5,1
- TRACE_AND_RESTORE_IRQ(r5);
- bl .schedule
- b .ret_from_except_lite
-
-1: bl .save_nvgprs
- li r5,1
- TRACE_AND_RESTORE_IRQ(r5);
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .do_notify_resume
- b .ret_from_except
-
+ bne 1f
+ addi r3,r1,STACK_FRAME_OVERHEAD;
+ bl doorbell_exception
+ b ret_from_except
+#endif /* CONFIG_PPC_DOORBELL */
+1: b ret_from_except /* What else to do here ? */
+
unrecov_restore:
addi r3,r1,STACK_FRAME_OVERHEAD
- bl .unrecoverable_exception
+ bl unrecoverable_exception
b unrecov_restore
#ifdef CONFIG_PPC_RTAS
@@ -831,7 +1010,7 @@ _GLOBAL(enter_rtas)
std r6,PACASAVEDMSR(r13)
/* Setup our real return addr */
- LOAD_REG_ADDR(r4,.rtas_return_loc)
+ LOAD_REG_ADDR(r4,rtas_return_loc)
clrldi r4,r4,2 /* convert to realmode address */
mtlr r4
@@ -841,7 +1020,7 @@ _GLOBAL(enter_rtas)
li r9,1
rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
- ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
+ ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
andc r6,r0,r9
sync /* disable interrupts so SRR0/1 */
mtmsrd r0 /* don't get trashed */
@@ -855,14 +1034,16 @@ _GLOBAL(enter_rtas)
rfid
b . /* prevent speculative execution */
-_STATIC(rtas_return_loc)
+rtas_return_loc:
+ FIXUP_ENDIAN
+
/* relocation is off at this point */
GET_PACA(r4)
clrldi r4,r4,2 /* convert to realmode address */
bcl 20,31,$+4
0: mflr r3
- ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */
+ ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
mfmsr r6
li r0,MSR_RI
@@ -879,9 +1060,9 @@ _STATIC(rtas_return_loc)
b . /* prevent speculative execution */
.align 3
-1: .llong .rtas_restore_regs
+1: .llong rtas_restore_regs
-_STATIC(rtas_restore_regs)
+rtas_restore_regs:
/* relocation is on at this point */
REST_GPR(2, r1) /* Restore the TOC */
REST_GPR(13, r1) /* Restore paca */
@@ -927,28 +1108,30 @@ _GLOBAL(enter_prom)
std r10,_CCR(r1)
std r11,_MSR(r1)
- /* Get the PROM entrypoint */
- mtlr r4
+ /* Put PROM address in SRR0 */
+ mtsrr0 r4
- /* Switch MSR to 32 bits mode
+ /* Setup our trampoline return addr in LR */
+ bcl 20,31,$+4
+0: mflr r4
+ addi r4,r4,(1f - 0b)
+ mtlr r4
+
+ /* Prepare a 32-bit mode big endian MSR
*/
#ifdef CONFIG_PPC_BOOK3E
rlwinm r11,r11,0,1,31
- mtmsr r11
+ mtsrr1 r11
+ rfi
#else /* CONFIG_PPC_BOOK3E */
- mfmsr r11
- li r12,1
- rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
- andc r11,r11,r12
- li r12,1
- rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
- andc r11,r11,r12
- mtmsrd r11
+ LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
+ andc r11,r11,r12
+ mtsrr1 r11
+ rfid
#endif /* CONFIG_PPC_BOOK3E */
- isync
- /* Enter PROM here... */
- blrl
+1: /* Return from OF */
+ FIXUP_ENDIAN
/* Just make sure that r1 top 32 bits didn't get
* corrupt by OF
@@ -979,7 +1162,7 @@ _GLOBAL(mcount)
_GLOBAL(_mcount)
blr
-_GLOBAL(ftrace_caller)
+_GLOBAL_TOC(ftrace_caller)
/* Taken from output of objdump from lib64/glibc */
mflr r3
ld r11, 0(r1)
@@ -1003,10 +1186,7 @@ _GLOBAL(ftrace_graph_stub)
_GLOBAL(ftrace_stub)
blr
#else
-_GLOBAL(mcount)
- blr
-
-_GLOBAL(_mcount)
+_GLOBAL_TOC(_mcount)
/* Taken from output of objdump from lib64/glibc */
mflr r3
ld r11, 0(r1)
@@ -1044,7 +1224,7 @@ _GLOBAL(ftrace_graph_caller)
ld r11, 112(r1)
addi r3, r11, 16
- bl .prepare_ftrace_return
+ bl prepare_ftrace_return
nop
ld r0, 128(r1)
@@ -1060,7 +1240,7 @@ _GLOBAL(return_to_handler)
mr r31, r1
stdu r1, -112(r1)
- bl .ftrace_return_to_handler
+ bl ftrace_return_to_handler
nop
/* return value has real return address */
@@ -1090,7 +1270,7 @@ _GLOBAL(mod_return_to_handler)
*/
ld r2, PACATOC(r13)
- bl .ftrace_return_to_handler
+ bl ftrace_return_to_handler
nop
/* return value has real return address */