aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/entry_32.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/entry_32.S')
-rw-r--r--arch/powerpc/kernel/entry_32.S721
1 files changed, 578 insertions, 143 deletions
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index f20a67261ec..22b45a4955c 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -19,7 +19,6 @@
*
*/
-#include <linux/config.h>
#include <linux/errno.h>
#include <linux/sys.h>
#include <linux/threads.h>
@@ -31,6 +30,8 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
+#include <asm/ftrace.h>
+#include <asm/ptrace.h>
#undef SHOW_SYSCALLS
#undef SHOW_SYSCALLS_TASK
@@ -45,29 +46,58 @@
#endif
#ifdef CONFIG_BOOKE
-#include "head_booke.h"
-#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
- mtspr exc_level##_SPRG,r8; \
- BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
- lwz r0,GPR10-INT_FRAME_SIZE(r8); \
- stw r0,GPR10(r11); \
- lwz r0,GPR11-INT_FRAME_SIZE(r8); \
- stw r0,GPR11(r11); \
- mfspr r8,exc_level##_SPRG
-
.globl mcheck_transfer_to_handler
mcheck_transfer_to_handler:
- TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
- b transfer_to_handler_full
+ mfspr r0,SPRN_DSRR0
+ stw r0,_DSRR0(r11)
+ mfspr r0,SPRN_DSRR1
+ stw r0,_DSRR1(r11)
+ /* fall through */
.globl debug_transfer_to_handler
debug_transfer_to_handler:
- TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
- b transfer_to_handler_full
+ mfspr r0,SPRN_CSRR0
+ stw r0,_CSRR0(r11)
+ mfspr r0,SPRN_CSRR1
+ stw r0,_CSRR1(r11)
+ /* fall through */
.globl crit_transfer_to_handler
crit_transfer_to_handler:
- TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
+#ifdef CONFIG_PPC_BOOK3E_MMU
+ mfspr r0,SPRN_MAS0
+ stw r0,MAS0(r11)
+ mfspr r0,SPRN_MAS1
+ stw r0,MAS1(r11)
+ mfspr r0,SPRN_MAS2
+ stw r0,MAS2(r11)
+ mfspr r0,SPRN_MAS3
+ stw r0,MAS3(r11)
+ mfspr r0,SPRN_MAS6
+ stw r0,MAS6(r11)
+#ifdef CONFIG_PHYS_64BIT
+ mfspr r0,SPRN_MAS7
+ stw r0,MAS7(r11)
+#endif /* CONFIG_PHYS_64BIT */
+#endif /* CONFIG_PPC_BOOK3E_MMU */
+#ifdef CONFIG_44x
+ mfspr r0,SPRN_MMUCR
+ stw r0,MMUCR(r11)
+#endif
+ mfspr r0,SPRN_SRR0
+ stw r0,_SRR0(r11)
+ mfspr r0,SPRN_SRR1
+ stw r0,_SRR1(r11)
+
+ /* set the stack limit to the current stack
+ * and set the limit to protect the thread_info
+ * struct
+ */
+ mfspr r8,SPRN_SPRG_THREAD
+ lwz r0,KSP_LIMIT(r8)
+ stw r0,SAVED_KSP_LIMIT(r11)
+ rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
+ stw r0,KSP_LIMIT(r8)
/* fall through */
#endif
@@ -78,6 +108,20 @@ crit_transfer_to_handler:
stw r0,GPR10(r11)
lwz r0,crit_r11@l(0)
stw r0,GPR11(r11)
+ mfspr r0,SPRN_SRR0
+ stw r0,crit_srr0@l(0)
+ mfspr r0,SPRN_SRR1
+ stw r0,crit_srr1@l(0)
+
+ /* set the stack limit to the current stack
+ * and set the limit to protect the thread_info
+ * struct
+ */
+ mfspr r8,SPRN_SPRG_THREAD
+ lwz r0,KSP_LIMIT(r8)
+ stw r0,saved_ksp_limit@l(0)
+ rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
+ stw r0,KSP_LIMIT(r8)
/* fall through */
#endif
@@ -103,7 +147,7 @@ transfer_to_handler:
mfspr r2,SPRN_XER
stw r12,_CTR(r11)
stw r2,_XER(r11)
- mfspr r12,SPRN_SPRG3
+ mfspr r12,SPRN_SPRG_THREAD
addi r2,r12,-THREAD
tovirt(r2,r2) /* set r2 to current */
beq 2f /* if from user, fix up THREAD.regs */
@@ -111,9 +155,9 @@ transfer_to_handler:
stw r11,PT_REGS(r12)
#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
/* Check to see if the dbcr0 register is set up to debug. Use the
- single-step bit to do this. */
+ internal debug mode bit to do this. */
lwz r12,THREAD_DBCR0(r12)
- andis. r12,r12,DBCR0_IC@h
+ andis. r12,r12,DBCR0_IDM@h
beq+ 3f
/* From user and task is ptraced - load up global dbcr0 */
li r12,-1 /* clear all pending debug events */
@@ -121,6 +165,12 @@ transfer_to_handler:
lis r11,global_dbcr0@ha
tophys(r11,r11)
addi r11,r11,global_dbcr0@l
+#ifdef CONFIG_SMP
+ CURRENT_THREAD_INFO(r9, r1)
+ lwz r9,TI_CPU(r9)
+ slwi r9,r9,3
+ add r11,r11,r9
+#endif
lwz r12,0(r11)
mtspr SPRN_DBCR0,r12
lwz r12,4(r11)
@@ -128,34 +178,96 @@ transfer_to_handler:
stw r12,4(r11)
#endif
b 3f
+
2: /* if from kernel, check interrupted DOZE/NAP mode and
* check for stack overflow
*/
-#ifdef CONFIG_6xx
- mfspr r11,SPRN_HID0
- mtcr r11
-BEGIN_FTR_SECTION
- bt- 8,power_save_6xx_restore /* Check DOZE */
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
-BEGIN_FTR_SECTION
- bt- 9,power_save_6xx_restore /* Check NAP */
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
-#endif /* CONFIG_6xx */
+ lwz r9,KSP_LIMIT(r12)
+ cmplw r1,r9 /* if r1 <= ksp_limit */
+ ble- stack_ovf /* then the kernel stack overflowed */
+5:
+#if defined(CONFIG_6xx) || defined(CONFIG_E500)
+ CURRENT_THREAD_INFO(r9, r1)
+ tophys(r9,r9) /* check local flags */
+ lwz r12,TI_LOCAL_FLAGS(r9)
+ mtcrf 0x01,r12
+ bt- 31-TLF_NAPPING,4f
+ bt- 31-TLF_SLEEPING,7f
+#endif /* CONFIG_6xx || CONFIG_E500 */
.globl transfer_to_handler_cont
transfer_to_handler_cont:
- lwz r11,THREAD_INFO-THREAD(r12)
- cmplw r1,r11 /* if r1 <= current->thread_info */
- ble- stack_ovf /* then the kernel stack overflowed */
3:
mflr r9
lwz r11,0(r9) /* virtual address of handler */
lwz r9,4(r9) /* where to go when done */
- FIX_SRR1(r10,r12)
+#ifdef CONFIG_TRACE_IRQFLAGS
+ lis r12,reenable_mmu@h
+ ori r12,r12,reenable_mmu@l
+ mtspr SPRN_SRR0,r12
+ mtspr SPRN_SRR1,r10
+ SYNC
+ RFI
+reenable_mmu: /* re-enable mmu so we can */
+ mfmsr r10
+ lwz r12,_MSR(r1)
+ xor r10,r10,r12
+ andi. r10,r10,MSR_EE /* Did EE change? */
+ beq 1f
+
+ /*
+ * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
+ * If from user mode there is only one stack frame on the stack, and
+ * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
+ * stack frame to make trace_hardirqs_off happy.
+ *
+ * This is handy because we also need to save a bunch of GPRs,
+ * r3 can be different from GPR3(r1) at this point, r9 and r11
+ * contains the old MSR and handler address respectively,
+ * r4 & r5 can contain page fault arguments that need to be passed
+ * along as well. r12, CCR, CTR, XER etc... are left clobbered as
+ * they aren't useful past this point (aren't syscall arguments),
+ * the rest is restored from the exception frame.
+ */
+ stwu r1,-32(r1)
+ stw r9,8(r1)
+ stw r11,12(r1)
+ stw r3,16(r1)
+ stw r4,20(r1)
+ stw r5,24(r1)
+ bl trace_hardirqs_off
+ lwz r5,24(r1)
+ lwz r4,20(r1)
+ lwz r3,16(r1)
+ lwz r11,12(r1)
+ lwz r9,8(r1)
+ addi r1,r1,32
+ lwz r0,GPR0(r1)
+ lwz r6,GPR6(r1)
+ lwz r7,GPR7(r1)
+ lwz r8,GPR8(r1)
+1: mtctr r11
+ mtlr r9
+ bctr /* jump to handler */
+#else /* CONFIG_TRACE_IRQFLAGS */
mtspr SPRN_SRR0,r11
mtspr SPRN_SRR1,r10
mtlr r9
SYNC
RFI /* jump to handler, enable MMU */
+#endif /* CONFIG_TRACE_IRQFLAGS */
+
+#if defined (CONFIG_6xx) || defined(CONFIG_E500)
+4: rlwinm r12,r12,0,~_TLF_NAPPING
+ stw r12,TI_LOCAL_FLAGS(r9)
+ b power_save_ppc32_restore
+
+7: rlwinm r12,r12,0,~_TLF_SLEEPING
+ stw r12,TI_LOCAL_FLAGS(r9)
+ lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
+ rlwinm r9,r9,0,~MSR_EE
+ lwz r12,_LINK(r11) /* and return to address in LR */
+ b fast_exception_return
+#endif
/*
* On kernel stack overflow, load up an initial stack pointer
@@ -163,10 +275,10 @@ transfer_to_handler_cont:
*/
stack_ovf:
/* sometimes we use a statically-allocated stack, which is OK. */
- lis r11,_end@h
- ori r11,r11,_end@l
- cmplw r1,r11
- ble 3b /* r1 <= &_end is OK */
+ lis r12,_end@h
+ ori r12,r12,_end@l
+ cmplw r1,r12
+ ble 5b /* r1 <= &_end is OK */
SAVE_NVGPRS(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
lis r1,init_thread_union@ha
@@ -189,7 +301,6 @@ stack_ovf:
0:
_GLOBAL(DoSyscall)
- stw r0,THREAD+LAST_SYSCALL(r2)
stw r3,ORIG_GPR3(r1)
li r12,0
stw r12,RESULT(r1)
@@ -199,7 +310,32 @@ _GLOBAL(DoSyscall)
#ifdef SHOW_SYSCALLS
bl do_show_syscall
#endif /* SHOW_SYSCALLS */
- rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Return from syscalls can (and generally will) hard enable
+ * interrupts. You aren't supposed to call a syscall with
+ * interrupts disabled in the first place. However, to ensure
+ * that we get it right vs. lockdep if it happens, we force
+ * that hard enable here with appropriate tracing if we see
+ * that we have been called with interrupts off
+ */
+ mfmsr r11
+ andi. r12,r11,MSR_EE
+ bne+ 1f
+ /* We came in with interrupts disabled, we enable them now */
+ bl trace_hardirqs_on
+ mfmsr r11
+ lwz r0,GPR0(r1)
+ lwz r3,GPR3(r1)
+ lwz r4,GPR4(r1)
+ ori r11,r11,MSR_EE
+ lwz r5,GPR5(r1)
+ lwz r6,GPR6(r1)
+ lwz r7,GPR7(r1)
+ lwz r8,GPR8(r1)
+ mtmsr r11
+1:
+#endif /* CONFIG_TRACE_IRQFLAGS */
+ CURRENT_THREAD_INFO(r10, r1)
lwz r11,TI_FLAGS(r10)
andi. r11,r11,_TIF_SYSCALL_T_OR_A
bne- syscall_dotrace
@@ -220,14 +356,15 @@ ret_from_syscall:
bl do_show_syscall_exit
#endif
mr r6,r3
- rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
+ CURRENT_THREAD_INFO(r12, r1)
/* disable interrupts so current_thread_info()->flags can't change */
LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
+ /* Note: We don't bother telling lockdep about it */
SYNC
MTMSRD(r10)
lwz r9,TI_FLAGS(r12)
li r8,-_LAST_ERRNO
- andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_RESTORE_SIGMASK)
+ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
bne- syscall_exit_work
cmplw 0,r3,r8
blt+ syscall_exit_cont
@@ -236,20 +373,44 @@ ret_from_syscall:
oris r11,r11,0x1000 /* Set SO bit in CR */
stw r11,_CCR(r1)
syscall_exit_cont:
+ lwz r8,_MSR(r1)
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* If we are going to return from the syscall with interrupts
+ * off, we trace that here. It shouldn't happen though but we
+ * want to catch the bugger if it does right ?
+ */
+ andi. r10,r8,MSR_EE
+ bne+ 1f
+ stw r3,GPR3(r1)
+ bl trace_hardirqs_off
+ lwz r3,GPR3(r1)
+1:
+#endif /* CONFIG_TRACE_IRQFLAGS */
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
- /* If the process has its own DBCR0 value, load it up. The single
- step bit tells us that dbcr0 should be loaded. */
+ /* If the process has its own DBCR0 value, load it up. The internal
+ debug mode bit tells us that dbcr0 should be loaded. */
lwz r0,THREAD+THREAD_DBCR0(r2)
- andis. r10,r0,DBCR0_IC@h
+ andis. r10,r0,DBCR0_IDM@h
bnel- load_dbcr0
#endif
+#ifdef CONFIG_44x
+BEGIN_MMU_FTR_SECTION
+ lis r4,icache_44x_need_flush@ha
+ lwz r5,icache_44x_need_flush@l(r4)
+ cmplwi cr0,r5,0
+ bne- 2f
+1:
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
+#endif /* CONFIG_44x */
+BEGIN_FTR_SECTION
+ lwarx r7,0,r1
+END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
stwcx. r0,0,r1 /* to clear the reservation */
lwz r4,_LINK(r1)
lwz r5,_CCR(r1)
mtlr r4
mtcr r5
lwz r7,_NIP(r1)
- lwz r8,_MSR(r1)
FIX_SRR1(r8, r0)
lwz r2,GPR2(r1)
lwz r1,GPR1(r1)
@@ -257,6 +418,12 @@ syscall_exit_cont:
mtspr SPRN_SRR1,r8
SYNC
RFI
+#ifdef CONFIG_44x
+2: li r7,0
+ iccci r0,r0
+ stw r7,icache_44x_need_flush@l(r4)
+ b 1b
+#endif /* CONFIG_44x */
66: li r3,-ENOSYS
b ret_from_syscall
@@ -268,6 +435,17 @@ ret_from_fork:
li r3,0
b ret_from_syscall
+ .globl ret_from_kernel_thread
+ret_from_kernel_thread:
+ REST_NVGPRS(r1)
+ bl schedule_tail
+ mtlr r14
+ mr r3,r15
+ PPC440EP_ERR42
+ blrl
+ li r3,0
+ b ret_from_syscall
+
/* Traced system call support */
syscall_dotrace:
SAVE_NVGPRS(r1)
@@ -275,7 +453,12 @@ syscall_dotrace:
stw r0,_TRAP(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_syscall_trace_enter
- lwz r0,GPR0(r1) /* Restore original registers */
+ /*
+ * Restore argument registers possibly just changed.
+ * We use the return value of do_syscall_trace_enter
+ * for call number to look up in the table (r0).
+ */
+ mr r0,r3
lwz r3,GPR3(r1)
lwz r4,GPR4(r1)
lwz r5,GPR5(r1)
@@ -287,8 +470,10 @@ syscall_dotrace:
syscall_exit_work:
andi. r0,r9,_TIF_RESTOREALL
- bne- 2f
- cmplw 0,r3,r8
+ beq+ 0f
+ REST_NVGPRS(r1)
+ b 2f
+0: cmplw 0,r3,r8
blt+ 1f
andi. r0,r9,_TIF_NOERROR
bne- 1f
@@ -302,9 +487,7 @@ syscall_exit_work:
2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
beq 4f
- /* Clear per-syscall TIF flags if any are set, but _leave_
- _TIF_SAVE_NVGPRS set in r9 since we haven't dealt with that
- yet. */
+ /* Clear per-syscall TIF flags if any are set. */
li r11,_TIF_PERSYSCALL_MASK
addi r12,r12,TI_FLAGS
@@ -318,8 +501,15 @@ syscall_exit_work:
subi r12,r12,TI_FLAGS
4: /* Anything which requires enabling interrupts? */
- andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SAVE_NVGPRS)
- beq 7f
+ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
+ beq ret_from_except
+
+ /* Re-enable interrupts. There is no need to trace that with
+ * lockdep as we are supposed to have IRQs on at this point
+ */
+ ori r10,r10,MSR_EE
+ SYNC
+ MTMSRD(r10)
/* Save NVGPRS if they're not saved already */
lwz r4,_TRAP(r1)
@@ -328,71 +518,11 @@ syscall_exit_work:
SAVE_NVGPRS(r1)
li r4,0xc00
stw r4,_TRAP(r1)
-
- /* Re-enable interrupts */
-5: ori r10,r10,MSR_EE
- SYNC
- MTMSRD(r10)
-
- andi. r0,r9,_TIF_SAVE_NVGPRS
- bne save_user_nvgprs
-
-save_user_nvgprs_cont:
- andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
- beq 7f
-
+5:
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_syscall_trace_leave
- REST_NVGPRS(r1)
-
-6: lwz r3,GPR3(r1)
- LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
- SYNC
- MTMSRD(r10) /* disable interrupts again */
- rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
- lwz r9,TI_FLAGS(r12)
-7:
- andi. r0,r9,_TIF_NEED_RESCHED
- bne 8f
- lwz r5,_MSR(r1)
- andi. r5,r5,MSR_PR
- beq ret_from_except
- andi. r0,r9,_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK
- beq ret_from_except
- b do_user_signal
-8:
- ori r10,r10,MSR_EE
- SYNC
- MTMSRD(r10) /* re-enable interrupts */
- bl schedule
- b 6b
-
-save_user_nvgprs:
- lwz r8,TI_SIGFRAME(r12)
-
-.macro savewords start, end
- 1: stw \start,4*(\start)(r8)
- .section __ex_table,"a"
- .align 2
- .long 1b,save_user_nvgprs_fault
- .previous
- .if \end - \start
- savewords "(\start+1)",\end
- .endif
-.endm
- savewords 14,31
- b save_user_nvgprs_cont
-
-
-save_user_nvgprs_fault:
- li r3,11 /* SIGSEGV */
- lwz r4,TI_TASK(r12)
- bl force_sigsegv
+ b ret_from_except_full
- rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
- lwz r9,TI_FLAGS(r12)
- b save_user_nvgprs_cont
-
#ifdef SHOW_SYSCALLS
do_show_syscall:
#ifdef SHOW_SYSCALLS_TASK
@@ -490,6 +620,14 @@ ppc_clone:
stw r0,_TRAP(r1) /* register set saved */
b sys_clone
+ .globl ppc_swapcontext
+ppc_swapcontext:
+ SAVE_NVGPRS(r1)
+ lwz r0,_TRAP(r1)
+ rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
+ stw r0,_TRAP(r1) /* register set saved */
+ b sys_swapcontext
+
/*
* Top-level page fault handling.
* This is in assembler because if do_page_fault tells us that
@@ -550,9 +688,11 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
+BEGIN_FTR_SECTION
oris r0,r0,MSR_SPE@h /* Disable SPE */
mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
stw r12,THREAD+THREAD_SPEFSCR(r2)
+END_FTR_SECTION_IFSET(CPU_FTR_SPE)
#endif /* CONFIG_SPE */
and. r0,r0,r11 /* FP or altivec or SPE enabled? */
beq+ 1f
@@ -574,7 +714,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
tophys(r0,r4)
CLR_TOP32(r0)
- mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
+ mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
lwz r1,KSP(r4) /* Load new stack pointer */
/* save the old current 'last' for return value */
@@ -588,8 +728,10 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
+BEGIN_FTR_SECTION
lwz r0,THREAD+THREAD_SPEFSCR(r2)
mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
+END_FTR_SECTION_IFSET(CPU_FTR_SPE)
#endif /* CONFIG_SPE */
lwz r0,_CCR(r1)
@@ -642,7 +784,11 @@ fast_exception_return:
mr r12,r4 /* restart at exc_exit_restart */
b 2b
- .comm fee_restarts,4
+ .section .bss
+ .align 2
+fee_restarts:
+ .space 4
+ .previous
/* aargh, a nonrecoverable interrupt, panic */
/* aargh, we don't know which trap this is */
@@ -671,6 +817,7 @@ ret_from_except:
/* Hard-disable interrupts so that current_thread_info()->flags
* can't change between when we test it and when we return
* from the interrupt. */
+ /* Note: We don't bother telling lockdep about it */
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC /* Some chip revs have problems here... */
MTMSRD(r10) /* disable interrupts */
@@ -681,46 +828,140 @@ ret_from_except:
user_exc_return: /* r10 contains MSR_KERNEL here */
/* Check current_thread_info()->flags */
- rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
+ CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_FLAGS(r9)
- andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_RESTORE_SIGMASK)
+ andi. r0,r9,_TIF_USER_WORK_MASK
bne do_work
restore_user:
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
- /* Check whether this process has its own DBCR0 value. The single
- step bit tells us that dbcr0 should be loaded. */
+ /* Check whether this process has its own DBCR0 value. The internal
+ debug mode bit tells us that dbcr0 should be loaded. */
lwz r0,THREAD+THREAD_DBCR0(r2)
- andis. r10,r0,DBCR0_IC@h
+ andis. r10,r0,DBCR0_IDM@h
bnel- load_dbcr0
#endif
-#ifdef CONFIG_PREEMPT
b restore
/* N.B. the only way to get here is from the beq following ret_from_except. */
resume_kernel:
+ /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
+ CURRENT_THREAD_INFO(r9, r1)
+ lwz r8,TI_FLAGS(r9)
+ andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
+ beq+ 1f
+
+ addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
+
+ lwz r3,GPR1(r1)
+ subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
+ mr r4,r1 /* src: current exception frame */
+ mr r1,r3 /* Reroute the trampoline frame to r1 */
+
+ /* Copy from the original to the trampoline. */
+ li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
+ li r6,0 /* start offset: 0 */
+ mtctr r5
+2: lwzx r0,r6,r4
+ stwx r0,r6,r3
+ addi r6,r6,4
+ bdnz 2b
+
+ /* Do real store operation to complete stwu */
+ lwz r5,GPR1(r1)
+ stw r8,0(r5)
+
+ /* Clear _TIF_EMULATE_STACK_STORE flag */
+ lis r11,_TIF_EMULATE_STACK_STORE@h
+ addi r5,r9,TI_FLAGS
+0: lwarx r8,0,r5
+ andc r8,r8,r11
+#ifdef CONFIG_IBM405_ERR77
+ dcbt 0,r5
+#endif
+ stwcx. r8,0,r5
+ bne- 0b
+1:
+
+#ifdef CONFIG_PREEMPT
/* check current_thread_info->preempt_count */
- rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
lwz r0,TI_PREEMPT(r9)
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore
- lwz r0,TI_FLAGS(r9)
- andi. r0,r0,_TIF_NEED_RESCHED
+ andi. r8,r8,_TIF_NEED_RESCHED
beq+ restore
+ lwz r3,_MSR(r1)
andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Lockdep thinks irqs are enabled, we need to call
+ * preempt_schedule_irq with IRQs off, so we inform lockdep
+ * now that we -did- turn them off already
+ */
+ bl trace_hardirqs_off
+#endif
1: bl preempt_schedule_irq
- rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
+ CURRENT_THREAD_INFO(r9, r1)
lwz r3,TI_FLAGS(r9)
andi. r0,r3,_TIF_NEED_RESCHED
bne- 1b
-#else
-resume_kernel:
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* And now, to properly rebalance the above, we tell lockdep they
+ * are being turned back on, which will happen when we return
+ */
+ bl trace_hardirqs_on
+#endif
#endif /* CONFIG_PREEMPT */
/* interrupts are hard-disabled at this point */
restore:
+#ifdef CONFIG_44x
+BEGIN_MMU_FTR_SECTION
+ b 1f
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
+ lis r4,icache_44x_need_flush@ha
+ lwz r5,icache_44x_need_flush@l(r4)
+ cmplwi cr0,r5,0
+ beq+ 1f
+ li r6,0
+ iccci r0,r0
+ stw r6,icache_44x_need_flush@l(r4)
+1:
+#endif /* CONFIG_44x */
+
+ lwz r9,_MSR(r1)
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Lockdep doesn't know about the fact that IRQs are temporarily turned
+ * off in this assembly code while peeking at TI_FLAGS() and such. However
+ * we need to inform it if the exception turned interrupts off, and we
+ * are about to trun them back on.
+ *
+ * The problem here sadly is that we don't know whether the exceptions was
+ * one that turned interrupts off or not. So we always tell lockdep about
+ * turning them on here when we go back to wherever we came from with EE
+ * on, even if that may meen some redudant calls being tracked. Maybe later
+ * we could encode what the exception did somewhere or test the exception
+ * type in the pt_regs but that sounds overkill
+ */
+ andi. r10,r9,MSR_EE
+ beq 1f
+ /*
+ * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
+ * which is the stack frame here, we need to force a stack frame
+ * in case we came from user space.
+ */
+ stwu r1,-32(r1)
+ mflr r0
+ stw r0,4(r1)
+ stwu r1,-32(r1)
+ bl trace_hardirqs_on
+ lwz r1,0(r1)
+ lwz r1,0(r1)
+ lwz r9,_MSR(r1)
+1:
+#endif /* CONFIG_TRACE_IRQFLAGS */
+
lwz r0,GPR0(r1)
lwz r2,GPR2(r1)
REST_4GPRS(3, r1)
@@ -732,10 +973,12 @@ restore:
mtctr r11
PPC405_ERR77(0,r1)
+BEGIN_FTR_SECTION
+ lwarx r11,0,r1
+END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
stwcx. r0,0,r1 /* to clear the reservation */
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
- lwz r9,_MSR(r1)
andi. r10,r9,MSR_RI /* check if this exception occurred */
beql nonrecoverable /* at a bad place (MSR:RI = 0) */
@@ -758,7 +1001,6 @@ restore:
MTMSRD(r10) /* clear the RI bit */
.globl exc_exit_restart
exc_exit_restart:
- lwz r9,_MSR(r1)
lwz r12,_NIP(r1)
FIX_SRR1(r9,r10)
mtspr SPRN_SRR0,r12
@@ -861,18 +1103,91 @@ exc_exit_restart_end:
exc_lvl_rfi; \
b .; /* prevent prefetch past exc_lvl_rfi */
+#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
+ lwz r9,_##exc_lvl_srr0(r1); \
+ lwz r10,_##exc_lvl_srr1(r1); \
+ mtspr SPRN_##exc_lvl_srr0,r9; \
+ mtspr SPRN_##exc_lvl_srr1,r10;
+
+#if defined(CONFIG_PPC_BOOK3E_MMU)
+#ifdef CONFIG_PHYS_64BIT
+#define RESTORE_MAS7 \
+ lwz r11,MAS7(r1); \
+ mtspr SPRN_MAS7,r11;
+#else
+#define RESTORE_MAS7
+#endif /* CONFIG_PHYS_64BIT */
+#define RESTORE_MMU_REGS \
+ lwz r9,MAS0(r1); \
+ lwz r10,MAS1(r1); \
+ lwz r11,MAS2(r1); \
+ mtspr SPRN_MAS0,r9; \
+ lwz r9,MAS3(r1); \
+ mtspr SPRN_MAS1,r10; \
+ lwz r10,MAS6(r1); \
+ mtspr SPRN_MAS2,r11; \
+ mtspr SPRN_MAS3,r9; \
+ mtspr SPRN_MAS6,r10; \
+ RESTORE_MAS7;
+#elif defined(CONFIG_44x)
+#define RESTORE_MMU_REGS \
+ lwz r9,MMUCR(r1); \
+ mtspr SPRN_MMUCR,r9;
+#else
+#define RESTORE_MMU_REGS
+#endif
+
+#ifdef CONFIG_40x
.globl ret_from_crit_exc
ret_from_crit_exc:
- RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
+ mfspr r9,SPRN_SPRG_THREAD
+ lis r10,saved_ksp_limit@ha;
+ lwz r10,saved_ksp_limit@l(r10);
+ tovirt(r9,r9);
+ stw r10,KSP_LIMIT(r9)
+ lis r9,crit_srr0@ha;
+ lwz r9,crit_srr0@l(r9);
+ lis r10,crit_srr1@ha;
+ lwz r10,crit_srr1@l(r10);
+ mtspr SPRN_SRR0,r9;
+ mtspr SPRN_SRR1,r10;
+ RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
+#endif /* CONFIG_40x */
#ifdef CONFIG_BOOKE
+ .globl ret_from_crit_exc
+ret_from_crit_exc:
+ mfspr r9,SPRN_SPRG_THREAD
+ lwz r10,SAVED_KSP_LIMIT(r1)
+ stw r10,KSP_LIMIT(r9)
+ RESTORE_xSRR(SRR0,SRR1);
+ RESTORE_MMU_REGS;
+ RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
+
.globl ret_from_debug_exc
ret_from_debug_exc:
- RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
+ mfspr r9,SPRN_SPRG_THREAD
+ lwz r10,SAVED_KSP_LIMIT(r1)
+ stw r10,KSP_LIMIT(r9)
+ lwz r9,THREAD_INFO-THREAD(r9)
+ CURRENT_THREAD_INFO(r10, r1)
+ lwz r10,TI_PREEMPT(r10)
+ stw r10,TI_PREEMPT(r9)
+ RESTORE_xSRR(SRR0,SRR1);
+ RESTORE_xSRR(CSRR0,CSRR1);
+ RESTORE_MMU_REGS;
+ RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
.globl ret_from_mcheck_exc
ret_from_mcheck_exc:
- RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
+ mfspr r9,SPRN_SPRG_THREAD
+ lwz r10,SAVED_KSP_LIMIT(r1)
+ stw r10,KSP_LIMIT(r9)
+ RESTORE_xSRR(SRR0,SRR1);
+ RESTORE_xSRR(CSRR0,CSRR1);
+ RESTORE_xSRR(DSRR0,DSRR1);
+ RESTORE_MMU_REGS;
+ RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
#endif /* CONFIG_BOOKE */
/*
@@ -888,6 +1203,12 @@ load_dbcr0:
mfspr r10,SPRN_DBCR0
lis r11,global_dbcr0@ha
addi r11,r11,global_dbcr0@l
+#ifdef CONFIG_SMP
+ CURRENT_THREAD_INFO(r9, r1)
+ lwz r9,TI_CPU(r9)
+ slwi r9,r9,3
+ add r11,r11,r9
+#endif
stw r10,0(r11)
mtspr SPRN_DBCR0,r0
lwz r10,4(r11)
@@ -897,7 +1218,11 @@ load_dbcr0:
mtspr SPRN_DBSR,r11 /* clear all pending debug events */
blr
- .comm global_dbcr0,8
+ .section .bss
+ .align 4
+global_dbcr0:
+ .space 8*NR_CPUS
+ .previous
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
do_work: /* r10 contains MSR_KERNEL here */
@@ -905,19 +1230,26 @@ do_work: /* r10 contains MSR_KERNEL here */
beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */
+ /* Note: We don't need to inform lockdep that we are enabling
+ * interrupts here. As far as it knows, they are already enabled
+ */
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10) /* hard-enable interrupts */
bl schedule
recheck:
+ /* Note: And we don't tell it we are disabling them again
+ * neither. Those disable/enable cycles used to peek at
+ * TI_FLAGS aren't advertised.
+ */
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC
MTMSRD(r10) /* disable interrupts */
- rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
+ CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_FLAGS(r9)
andi. r0,r9,_TIF_NEED_RESCHED
bne- do_resched
- andi. r0,r9,_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK
+ andi. r0,r9,_TIF_USER_WORK_MASK
beq restore_user
do_user_signal: /* r10 contains MSR_KERNEL here */
ori r10,r10,MSR_EE
@@ -930,9 +1262,9 @@ do_user_signal: /* r10 contains MSR_KERNEL here */
SAVE_NVGPRS(r1)
rlwinm r3,r3,0,0,30
stw r3,_TRAP(r1)
-2: li r3,0
- addi r4,r1,STACK_FRAME_OVERHEAD
- bl do_signal
+2: addi r3,r1,STACK_FRAME_OVERHEAD
+ mr r4,r9
+ bl do_notify_resume
REST_NVGPRS(r1)
b recheck
@@ -972,7 +1304,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
/* shouldn't return */
b 4b
- .comm ee_restarts,4
+ .section .bss
+ .align 2
+ee_restarts:
+ .space 4
+ .previous
/*
* PROM code for specific machines follows. Put it
@@ -1002,7 +1338,7 @@ _GLOBAL(enter_rtas)
MTMSRD(r0) /* don't get trashed */
li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
mtlr r6
- mtspr SPRN_SPRG2,r7
+ mtspr SPRN_SPRG_RTAS,r7
mtspr SPRN_SRR0,r8
mtspr SPRN_SRR1,r9
RFI
@@ -1012,7 +1348,7 @@ _GLOBAL(enter_rtas)
FIX_SRR1(r9,r0)
addi r1,r1,INT_FRAME_SIZE
li r0,0
- mtspr SPRN_SPRG2,r0
+ mtspr SPRN_SPRG_RTAS,r0
mtspr SPRN_SRR0,r8
mtspr SPRN_SRR1,r9
RFI /* return to caller */
@@ -1023,3 +1359,102 @@ machine_check_in_rtas:
/* XXX load up BATs and panic */
#endif /* CONFIG_PPC_RTAS */
+
+#ifdef CONFIG_FUNCTION_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE
+_GLOBAL(mcount)
+_GLOBAL(_mcount)
+ /*
+ * It is required that _mcount on PPC32 must preserve the
+ * link register. But we have r0 to play with. We use r0
+ * to push the return address back to the caller of mcount
+ * into the ctr register, restore the link register and
+ * then jump back using the ctr register.
+ */
+ mflr r0
+ mtctr r0
+ lwz r0, 4(r1)
+ mtlr r0
+ bctr
+
+_GLOBAL(ftrace_caller)
+ MCOUNT_SAVE_FRAME
+ /* r3 ends up with link register */
+ subi r3, r3, MCOUNT_INSN_SIZE
+.globl ftrace_call
+ftrace_call:
+ bl ftrace_stub
+ nop
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+.globl ftrace_graph_call
+ftrace_graph_call:
+ b ftrace_graph_stub
+_GLOBAL(ftrace_graph_stub)
+#endif
+ MCOUNT_RESTORE_FRAME
+ /* old link register ends up in ctr reg */
+ bctr
+#else
+_GLOBAL(mcount)
+_GLOBAL(_mcount)
+
+ MCOUNT_SAVE_FRAME
+
+ subi r3, r3, MCOUNT_INSN_SIZE
+ LOAD_REG_ADDR(r5, ftrace_trace_function)
+ lwz r5,0(r5)
+
+ mtctr r5
+ bctrl
+ nop
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ b ftrace_graph_caller
+#endif
+ MCOUNT_RESTORE_FRAME
+ bctr
+#endif
+
+_GLOBAL(ftrace_stub)
+ blr
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+_GLOBAL(ftrace_graph_caller)
+ /* load r4 with local address */
+ lwz r4, 44(r1)
+ subi r4, r4, MCOUNT_INSN_SIZE
+
+ /* get the parent address */
+ addi r3, r1, 52
+
+ bl prepare_ftrace_return
+ nop
+
+ MCOUNT_RESTORE_FRAME
+ /* old link register ends up in ctr reg */
+ bctr
+
+_GLOBAL(return_to_handler)
+ /* need to save return values */
+ stwu r1, -32(r1)
+ stw r3, 20(r1)
+ stw r4, 16(r1)
+ stw r31, 12(r1)
+ mr r31, r1
+
+ bl ftrace_return_to_handler
+ nop
+
+ /* return value has real return address */
+ mtlr r3
+
+ lwz r3, 20(r1)
+ lwz r4, 16(r1)
+ lwz r31,12(r1)
+ lwz r1, 0(r1)
+
+ /* Jump back to real return address */
+ blr
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#endif /* CONFIG_MCOUNT */