diff options
Diffstat (limited to 'arch/powerpc/kernel/exceptions-64s.S')
| -rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 531 | 
1 files changed, 402 insertions, 129 deletions
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 3a9ed6ac224..a7d36b19221 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -54,14 +54,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)				\  	xori	r12,r12,MSR_LE ;				\  	mtspr	SPRN_SRR1,r12 ;					\  	rfid ;		/* return to userspace */		\ -	b	. ;						\ -2:	mfspr	r12,SPRN_SRR1 ;					\ -	andi.	r12,r12,MSR_PR ;				\ -	bne	0b ;						\ -	mtspr	SPRN_SRR0,r3 ;					\ -	mtspr	SPRN_SRR1,r4 ;					\ -	mtspr	SPRN_SDR1,r5 ;					\ -	rfid ;							\  	b	. ;	/* prevent speculative execution */  #if defined(CONFIG_RELOCATABLE) @@ -121,12 +113,13 @@ BEGIN_FTR_SECTION  	cmpwi	cr1,r13,2  	/* Total loss of HV state is fatal, we could try to use the  	 * PIR to locate a PACA, then use an emergency stack etc... -	 * but for now, let's just stay stuck here +	 * OPAL v3 based powernv platforms have new idle states +	 * which fall in this catagory.  	 */ -	bgt	cr1,. +	bgt	cr1,8f  	GET_PACA(r13) -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE  	li	r0,KVM_HWTHREAD_IN_KERNEL  	stb	r0,HSTATE_HWTHREAD_STATE(r13)  	/* Order setting hwthread_state vs. testing hwthread_req */ @@ -139,8 +132,13 @@ BEGIN_FTR_SECTION  #endif  	beq	cr1,2f -	b	.power7_wakeup_noloss -2:	b	.power7_wakeup_loss +	b	power7_wakeup_noloss +2:	b	power7_wakeup_loss + +	/* Fast Sleep wakeup on PowerNV */ +8:	GET_PACA(r13) +	b 	power7_wakeup_tb_loss +  9:  END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)  #endif /* CONFIG_PPC_P7_NAP */ @@ -155,8 +153,35 @@ machine_check_pSeries_1:  	 */  	HMT_MEDIUM_PPR_DISCARD  	SET_SCRATCH0(r13)		/* save r13 */ +#ifdef CONFIG_PPC_P7_NAP +BEGIN_FTR_SECTION +	/* Running native on arch 2.06 or later, check if we are +	 * waking up from nap. We only handle no state loss and +	 * supervisor state loss. We do -not- handle hypervisor +	 * state loss at this time. +	 */ +	mfspr	r13,SPRN_SRR1 +	rlwinm.	r13,r13,47-31,30,31 +	OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) +	beq	9f + +	mfspr	r13,SPRN_SRR1 +	rlwinm.	r13,r13,47-31,30,31 +	/* waking up from powersave (nap) state */ +	cmpwi	cr1,r13,2 +	/* Total loss of HV state is fatal. let's just stay stuck here */ +	OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) +	bgt	cr1,. +9: +	OPT_SET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) +#endif /* CONFIG_PPC_P7_NAP */  	EXCEPTION_PROLOG_0(PACA_EXMC) +BEGIN_FTR_SECTION +	b	machine_check_pSeries_early +FTR_SECTION_ELSE  	b	machine_check_pSeries_0 +ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)  	. = 0x300  	.globl data_access_pSeries @@ -186,16 +211,16 @@ data_access_slb_pSeries:  #endif /* __DISABLED__ */  	mfspr	r12,SPRN_SRR1  #ifndef CONFIG_RELOCATABLE -	b	.slb_miss_realmode +	b	slb_miss_realmode  #else  	/* -	 * We can't just use a direct branch to .slb_miss_realmode +	 * We can't just use a direct branch to slb_miss_realmode  	 * because the distance from here to there depends on where  	 * the kernel ends up being put.  	 */  	mfctr	r11  	ld	r10,PACAKBASE(r13) -	LOAD_HANDLER(r10, .slb_miss_realmode) +	LOAD_HANDLER(r10, slb_miss_realmode)  	mtctr	r10  	bctr  #endif @@ -218,11 +243,11 @@ instruction_access_slb_pSeries:  #endif /* __DISABLED__ */  	mfspr	r12,SPRN_SRR1  #ifndef CONFIG_RELOCATABLE -	b	.slb_miss_realmode +	b	slb_miss_realmode  #else  	mfctr	r11  	ld	r10,PACAKBASE(r13) -	LOAD_HANDLER(r10, .slb_miss_realmode) +	LOAD_HANDLER(r10, slb_miss_realmode)  	mtctr	r10  	bctr  #endif @@ -405,6 +430,80 @@ denorm_exception_hv:  	.align	7  	/* moved from 0x200 */ +machine_check_pSeries_early: +BEGIN_FTR_SECTION +	EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200) +	/* +	 * Register contents: +	 * R13		= PACA +	 * R9		= CR +	 * Original R9 to R13 is saved on PACA_EXMC +	 * +	 * Switch to mc_emergency stack and handle re-entrancy (we limit +	 * the nested MCE upto level 4 to avoid stack overflow). +	 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1 +	 * +	 * We use paca->in_mce to check whether this is the first entry or +	 * nested machine check. We increment paca->in_mce to track nested +	 * machine checks. +	 * +	 * If this is the first entry then set stack pointer to +	 * paca->mc_emergency_sp, otherwise r1 is already pointing to +	 * stack frame on mc_emergency stack. +	 * +	 * NOTE: We are here with MSR_ME=0 (off), which means we risk a +	 * checkstop if we get another machine check exception before we do +	 * rfid with MSR_ME=1. +	 */ +	mr	r11,r1			/* Save r1 */ +	lhz	r10,PACA_IN_MCE(r13) +	cmpwi	r10,0			/* Are we in nested machine check */ +	bne	0f			/* Yes, we are. */ +	/* First machine check entry */ +	ld	r1,PACAMCEMERGSP(r13)	/* Use MC emergency stack */ +0:	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame */ +	addi	r10,r10,1		/* increment paca->in_mce */ +	sth	r10,PACA_IN_MCE(r13) +	/* Limit nested MCE to level 4 to avoid stack overflow */ +	cmpwi	r10,4 +	bgt	2f			/* Check if we hit limit of 4 */ +	std	r11,GPR1(r1)		/* Save r1 on the stack. */ +	std	r11,0(r1)		/* make stack chain pointer */ +	mfspr	r11,SPRN_SRR0		/* Save SRR0 */ +	std	r11,_NIP(r1) +	mfspr	r11,SPRN_SRR1		/* Save SRR1 */ +	std	r11,_MSR(r1) +	mfspr	r11,SPRN_DAR		/* Save DAR */ +	std	r11,_DAR(r1) +	mfspr	r11,SPRN_DSISR		/* Save DSISR */ +	std	r11,_DSISR(r1) +	std	r9,_CCR(r1)		/* Save CR in stackframe */ +	/* Save r9 through r13 from EXMC save area to stack frame. */ +	EXCEPTION_PROLOG_COMMON_2(PACA_EXMC) +	mfmsr	r11			/* get MSR value */ +	ori	r11,r11,MSR_ME		/* turn on ME bit */ +	ori	r11,r11,MSR_RI		/* turn on RI bit */ +	ld	r12,PACAKBASE(r13)	/* get high part of &label */ +	LOAD_HANDLER(r12, machine_check_handle_early) +1:	mtspr	SPRN_SRR0,r12 +	mtspr	SPRN_SRR1,r11 +	rfid +	b	.	/* prevent speculative execution */ +2: +	/* Stack overflow. Stay on emergency stack and panic. +	 * Keep the ME bit off while panic-ing, so that if we hit +	 * another machine check we checkstop. +	 */ +	addi	r1,r1,INT_FRAME_SIZE	/* go back to previous stack frame */ +	ld	r11,PACAKMSR(r13) +	ld	r12,PACAKBASE(r13) +	LOAD_HANDLER(r12, unrecover_mce) +	li	r10,MSR_ME +	andc	r11,r11,r10		/* Turn off MSR_ME */ +	b	1b +	b	.	/* prevent speculative execution */ +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) +  machine_check_pSeries:  	.globl machine_check_fwnmi  machine_check_fwnmi: @@ -425,7 +524,7 @@ data_access_check_stab:  	mfspr	r9,SPRN_DSISR  	srdi	r10,r10,60  	rlwimi	r10,r9,16,0x20 -#ifdef CONFIG_KVM_BOOK3S_PR +#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE  	lbz	r9,HSTATE_IN_GUEST(r13)  	rlwimi	r10,r9,8,0x300  #endif @@ -441,7 +540,7 @@ do_stab_bolted_pSeries:  	std	r12,PACA_EXSLB+EX_R12(r13)  	GET_SCRATCH0(r10)  	std	r10,PACA_EXSLB+EX_R13(r13) -	EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) +	EXCEPTION_PROLOG_PSERIES_1(do_stab_bolted, EXC_STD)  	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)  	KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) @@ -650,6 +749,32 @@ slb_miss_user_pseries:  	b	.				/* prevent spec. execution */  #endif /* __DISABLED__ */ +#ifdef CONFIG_KVM_BOOK3S_64_HANDLER +kvmppc_skip_interrupt: +	/* +	 * Here all GPRs are unchanged from when the interrupt happened +	 * except for r13, which is saved in SPRG_SCRATCH0. +	 */ +	mfspr	r13, SPRN_SRR0 +	addi	r13, r13, 4 +	mtspr	SPRN_SRR0, r13 +	GET_SCRATCH0(r13) +	rfid +	b	. + +kvmppc_skip_Hinterrupt: +	/* +	 * Here all GPRs are unchanged from when the interrupt happened +	 * except for r13, which is saved in SPRG_SCRATCH0. +	 */ +	mfspr	r13, SPRN_HSRR0 +	addi	r13, r13, 4 +	mtspr	SPRN_HSRR0, r13 +	GET_SCRATCH0(r13) +	hrfid +	b	. +#endif +  /*   * Code from here down to __end_handlers is invoked from the   * exception prologs above.  Because the prologs assemble the @@ -660,62 +785,38 @@ slb_miss_user_pseries:  /*** Common interrupt handlers ***/ -	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) - -	/* -	 * Machine check is different because we use a different -	 * save area: PACA_EXMC instead of PACA_EXGEN. -	 */ -	.align	7 -	.globl machine_check_common -machine_check_common: - -	mfspr	r10,SPRN_DAR -	std	r10,PACA_EXGEN+EX_DAR(r13) -	mfspr	r10,SPRN_DSISR -	stw	r10,PACA_EXGEN+EX_DSISR(r13) -	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) -	FINISH_NAP -	DISABLE_INTS -	ld	r3,PACA_EXGEN+EX_DAR(r13) -	lwz	r4,PACA_EXGEN+EX_DSISR(r13) -	std	r3,_DAR(r1) -	std	r4,_DSISR(r1) -	bl	.save_nvgprs -	addi	r3,r1,STACK_FRAME_OVERHEAD -	bl	.machine_check_exception -	b	.ret_from_except +	STD_EXCEPTION_COMMON(0x100, system_reset, system_reset_exception)  	STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) -	STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) -	STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt) +	STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, timer_interrupt) +	STD_EXCEPTION_COMMON(0x980, hdecrementer, hdec_interrupt)  #ifdef CONFIG_PPC_DOORBELL -	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .doorbell_exception) +	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, doorbell_exception)  #else -	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .unknown_exception) +	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, unknown_exception)  #endif -	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) -	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) -	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) -	STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt) -	STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) +	STD_EXCEPTION_COMMON(0xb00, trap_0b, unknown_exception) +	STD_EXCEPTION_COMMON(0xd00, single_step, single_step_exception) +	STD_EXCEPTION_COMMON(0xe00, trap_0e, unknown_exception) +	STD_EXCEPTION_COMMON(0xe40, emulation_assist, emulation_assist_interrupt) +	STD_EXCEPTION_COMMON(0xe60, hmi_exception, unknown_exception)  #ifdef CONFIG_PPC_DOORBELL -	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception) +	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, doorbell_exception)  #else -	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .unknown_exception) +	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, unknown_exception)  #endif -	STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception) -	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) -	STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception) +	STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, performance_monitor_exception) +	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, instruction_breakpoint_exception) +	STD_EXCEPTION_COMMON(0x1502, denorm, unknown_exception)  #ifdef CONFIG_ALTIVEC -	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) +	STD_EXCEPTION_COMMON(0x1700, altivec_assist, altivec_assist_exception)  #else -	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) +	STD_EXCEPTION_COMMON(0x1700, altivec_assist, unknown_exception)  #endif  #ifdef CONFIG_CBE_RAS -	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) -	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) -	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) +	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, cbe_system_error_exception) +	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, cbe_maintenance_exception) +	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, cbe_thermal_exception)  #endif /* CONFIG_CBE_RAS */  	/* @@ -744,16 +845,16 @@ data_access_slb_relon_pSeries:  	mfspr	r3,SPRN_DAR  	mfspr	r12,SPRN_SRR1  #ifndef CONFIG_RELOCATABLE -	b	.slb_miss_realmode +	b	slb_miss_realmode  #else  	/* -	 * We can't just use a direct branch to .slb_miss_realmode +	 * We can't just use a direct branch to slb_miss_realmode  	 * because the distance from here to there depends on where  	 * the kernel ends up being put.  	 */  	mfctr	r11  	ld	r10,PACAKBASE(r13) -	LOAD_HANDLER(r10, .slb_miss_realmode) +	LOAD_HANDLER(r10, slb_miss_realmode)  	mtctr	r10  	bctr  #endif @@ -769,11 +870,11 @@ instruction_access_slb_relon_pSeries:  	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */  	mfspr	r12,SPRN_SRR1  #ifndef CONFIG_RELOCATABLE -	b	.slb_miss_realmode +	b	slb_miss_realmode  #else  	mfctr	r11  	ld	r10,PACAKBASE(r13) -	LOAD_HANDLER(r10, .slb_miss_realmode) +	LOAD_HANDLER(r10, slb_miss_realmode)  	mtctr	r10  	bctr  #endif @@ -881,7 +982,7 @@ system_call_entry:  	b	system_call_common  ppc64_runlatch_on_trampoline: -	b	.__ppc64_runlatch_on +	b	__ppc64_runlatch_on  /*   * Here we have detected that the kernel stack pointer is bad. @@ -940,7 +1041,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)  	std	r12,RESULT(r1)  	std	r11,STACK_FRAME_OVERHEAD-16(r1)  1:	addi	r3,r1,STACK_FRAME_OVERHEAD -	bl	.kernel_bad_stack +	bl	kernel_bad_stack  	b	1b  /* @@ -961,7 +1062,7 @@ data_access_common:  	ld	r3,PACA_EXGEN+EX_DAR(r13)  	lwz	r4,PACA_EXGEN+EX_DSISR(r13)  	li	r5,0x300 -	b	.do_hash_page		/* Try to handle as hpte fault */ +	b	do_hash_page		/* Try to handle as hpte fault */  	.align  7  	.globl  h_data_storage_common @@ -971,11 +1072,11 @@ h_data_storage_common:  	mfspr   r10,SPRN_HDSISR  	stw     r10,PACA_EXGEN+EX_DSISR(r13)  	EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) -	bl      .save_nvgprs +	bl      save_nvgprs  	DISABLE_INTS  	addi    r3,r1,STACK_FRAME_OVERHEAD -	bl      .unknown_exception -	b       .ret_from_except +	bl      unknown_exception +	b       ret_from_except  	.align	7  	.globl instruction_access_common @@ -986,9 +1087,9 @@ instruction_access_common:  	ld	r3,_NIP(r1)  	andis.	r4,r12,0x5820  	li	r5,0x400 -	b	.do_hash_page		/* Try to handle as hpte fault */ +	b	do_hash_page		/* Try to handle as hpte fault */ -	STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) +	STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception)  /*   * Here is the common SLB miss user that is used when going to virtual @@ -1003,7 +1104,7 @@ slb_miss_user_common:  	stw	r9,PACA_EXGEN+EX_CCR(r13)  	std	r10,PACA_EXGEN+EX_LR(r13)  	std	r11,PACA_EXGEN+EX_SRR0(r13) -	bl	.slb_allocate_user +	bl	slb_allocate_user  	ld	r10,PACA_EXGEN+EX_LR(r13)  	ld	r3,PACA_EXGEN+EX_R3(r13) @@ -1046,14 +1147,38 @@ slb_miss_fault:  unrecov_user_slb:  	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)  	DISABLE_INTS -	bl	.save_nvgprs +	bl	save_nvgprs  1:	addi	r3,r1,STACK_FRAME_OVERHEAD -	bl	.unrecoverable_exception +	bl	unrecoverable_exception  	b	1b  #endif /* __DISABLED__ */ +	/* +	 * Machine check is different because we use a different +	 * save area: PACA_EXMC instead of PACA_EXGEN. +	 */ +	.align	7 +	.globl machine_check_common +machine_check_common: + +	mfspr	r10,SPRN_DAR +	std	r10,PACA_EXGEN+EX_DAR(r13) +	mfspr	r10,SPRN_DSISR +	stw	r10,PACA_EXGEN+EX_DSISR(r13) +	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) +	FINISH_NAP +	DISABLE_INTS +	ld	r3,PACA_EXGEN+EX_DAR(r13) +	lwz	r4,PACA_EXGEN+EX_DSISR(r13) +	std	r3,_DAR(r1) +	std	r4,_DSISR(r1) +	bl	save_nvgprs +	addi	r3,r1,STACK_FRAME_OVERHEAD +	bl	machine_check_exception +	b	ret_from_except +  	.align	7  	.globl alignment_common  alignment_common: @@ -1066,31 +1191,31 @@ alignment_common:  	lwz	r4,PACA_EXGEN+EX_DSISR(r13)  	std	r3,_DAR(r1)  	std	r4,_DSISR(r1) -	bl	.save_nvgprs +	bl	save_nvgprs  	DISABLE_INTS  	addi	r3,r1,STACK_FRAME_OVERHEAD -	bl	.alignment_exception -	b	.ret_from_except +	bl	alignment_exception +	b	ret_from_except  	.align	7  	.globl program_check_common  program_check_common:  	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) -	bl	.save_nvgprs +	bl	save_nvgprs  	DISABLE_INTS  	addi	r3,r1,STACK_FRAME_OVERHEAD -	bl	.program_check_exception -	b	.ret_from_except +	bl	program_check_exception +	b	ret_from_except  	.align	7  	.globl fp_unavailable_common  fp_unavailable_common:  	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)  	bne	1f			/* if from user, just load it up */ -	bl	.save_nvgprs +	bl	save_nvgprs  	DISABLE_INTS  	addi	r3,r1,STACK_FRAME_OVERHEAD -	bl	.kernel_fp_unavailable_exception +	bl	kernel_fp_unavailable_exception  	BUG_OPCODE  1:  #ifdef CONFIG_PPC_TRANSACTIONAL_MEM @@ -1102,15 +1227,15 @@ BEGIN_FTR_SECTION  	bne-	2f  END_FTR_SECTION_IFSET(CPU_FTR_TM)  #endif -	bl	.load_up_fpu +	bl	load_up_fpu  	b	fast_exception_return  #ifdef CONFIG_PPC_TRANSACTIONAL_MEM  2:	/* User process was in a transaction */ -	bl	.save_nvgprs +	bl	save_nvgprs  	DISABLE_INTS  	addi	r3,r1,STACK_FRAME_OVERHEAD -	bl	.fp_unavailable_tm -	b	.ret_from_except +	bl	fp_unavailable_tm +	b	ret_from_except  #endif  	.align	7  	.globl altivec_unavailable_common @@ -1128,24 +1253,24 @@ BEGIN_FTR_SECTION  	bne-	2f    END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)  #endif -	bl	.load_up_altivec +	bl	load_up_altivec  	b	fast_exception_return  #ifdef CONFIG_PPC_TRANSACTIONAL_MEM  2:	/* User process was in a transaction */ -	bl	.save_nvgprs +	bl	save_nvgprs  	DISABLE_INTS  	addi	r3,r1,STACK_FRAME_OVERHEAD -	bl	.altivec_unavailable_tm -	b	.ret_from_except +	bl	altivec_unavailable_tm +	b	ret_from_except  #endif  1:  END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)  #endif -	bl	.save_nvgprs +	bl	save_nvgprs  	DISABLE_INTS  	addi	r3,r1,STACK_FRAME_OVERHEAD -	bl	.altivec_unavailable_exception -	b	.ret_from_except +	bl	altivec_unavailable_exception +	b	ret_from_except  	.align	7  	.globl vsx_unavailable_common @@ -1163,26 +1288,26 @@ BEGIN_FTR_SECTION  	bne-	2f    END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)  #endif -	b	.load_up_vsx +	b	load_up_vsx  #ifdef CONFIG_PPC_TRANSACTIONAL_MEM  2:	/* User process was in a transaction */ -	bl	.save_nvgprs +	bl	save_nvgprs  	DISABLE_INTS  	addi	r3,r1,STACK_FRAME_OVERHEAD -	bl	.vsx_unavailable_tm -	b	.ret_from_except +	bl	vsx_unavailable_tm +	b	ret_from_except  #endif  1:  END_FTR_SECTION_IFSET(CPU_FTR_VSX)  #endif -	bl	.save_nvgprs +	bl	save_nvgprs  	DISABLE_INTS  	addi	r3,r1,STACK_FRAME_OVERHEAD -	bl	.vsx_unavailable_exception -	b	.ret_from_except +	bl	vsx_unavailable_exception +	b	ret_from_except -	STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) -	STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception) +	STD_EXCEPTION_COMMON(0xf60, facility_unavailable, facility_unavailable_exception) +	STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, facility_unavailable_exception)  	.align	7  	.globl	__end_handlers @@ -1237,6 +1362,154 @@ _GLOBAL(opal_mc_secondary_handler)  #endif /* CONFIG_PPC_POWERNV */ +#define MACHINE_CHECK_HANDLER_WINDUP			\ +	/* Clear MSR_RI before setting SRR0 and SRR1. */\ +	li	r0,MSR_RI;				\ +	mfmsr	r9;		/* get MSR value */	\ +	andc	r9,r9,r0;				\ +	mtmsrd	r9,1;		/* Clear MSR_RI */	\ +	/* Move original SRR0 and SRR1 into the respective regs */	\ +	ld	r9,_MSR(r1);				\ +	mtspr	SPRN_SRR1,r9;				\ +	ld	r3,_NIP(r1);				\ +	mtspr	SPRN_SRR0,r3;				\ +	ld	r9,_CTR(r1);				\ +	mtctr	r9;					\ +	ld	r9,_XER(r1);				\ +	mtxer	r9;					\ +	ld	r9,_LINK(r1);				\ +	mtlr	r9;					\ +	REST_GPR(0, r1);				\ +	REST_8GPRS(2, r1);				\ +	REST_GPR(10, r1);				\ +	ld	r11,_CCR(r1);				\ +	mtcr	r11;					\ +	/* Decrement paca->in_mce. */			\ +	lhz	r12,PACA_IN_MCE(r13);			\ +	subi	r12,r12,1;				\ +	sth	r12,PACA_IN_MCE(r13);			\ +	REST_GPR(11, r1);				\ +	REST_2GPRS(12, r1);				\ +	/* restore original r1. */			\ +	ld	r1,GPR1(r1) + +	/* +	 * Handle machine check early in real mode. We come here with +	 * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack. +	 */ +	.align	7 +	.globl machine_check_handle_early +machine_check_handle_early: +	std	r0,GPR0(r1)	/* Save r0 */ +	EXCEPTION_PROLOG_COMMON_3(0x200) +	bl	save_nvgprs +	addi	r3,r1,STACK_FRAME_OVERHEAD +	bl	machine_check_early +	std	r3,RESULT(r1)	/* Save result */ +	ld	r12,_MSR(r1) +#ifdef	CONFIG_PPC_P7_NAP +	/* +	 * Check if thread was in power saving mode. We come here when any +	 * of the following is true: +	 * a. thread wasn't in power saving mode +	 * b. thread was in power saving mode with no state loss or +	 *    supervisor state loss +	 * +	 * Go back to nap again if (b) is true. +	 */ +	rlwinm.	r11,r12,47-31,30,31	/* Was it in power saving mode? */ +	beq	4f			/* No, it wasn;t */ +	/* Thread was in power saving mode. Go back to nap again. */ +	cmpwi	r11,2 +	bne	3f +	/* Supervisor state loss */ +	li	r0,1 +	stb	r0,PACA_NAPSTATELOST(r13) +3:	bl	machine_check_queue_event +	MACHINE_CHECK_HANDLER_WINDUP +	GET_PACA(r13) +	ld	r1,PACAR1(r13) +	b	power7_enter_nap_mode +4: +#endif +	/* +	 * Check if we are coming from hypervisor userspace. If yes then we +	 * continue in host kernel in V mode to deliver the MC event. +	 */ +	rldicl.	r11,r12,4,63		/* See if MC hit while in HV mode. */ +	beq	5f +	andi.	r11,r12,MSR_PR		/* See if coming from user. */ +	bne	9f			/* continue in V mode if we are. */ + +5: +#ifdef CONFIG_KVM_BOOK3S_64_HV +	/* +	 * We are coming from kernel context. Check if we are coming from +	 * guest. if yes, then we can continue. We will fall through +	 * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest. +	 */ +	lbz	r11,HSTATE_IN_GUEST(r13) +	cmpwi	r11,0			/* Check if coming from guest */ +	bne	9f			/* continue if we are. */ +#endif +	/* +	 * At this point we are not sure about what context we come from. +	 * Queue up the MCE event and return from the interrupt. +	 * But before that, check if this is an un-recoverable exception. +	 * If yes, then stay on emergency stack and panic. +	 */ +	andi.	r11,r12,MSR_RI +	bne	2f +1:	mfspr	r11,SPRN_SRR0 +	ld	r10,PACAKBASE(r13) +	LOAD_HANDLER(r10,unrecover_mce) +	mtspr	SPRN_SRR0,r10 +	ld	r10,PACAKMSR(r13) +	/* +	 * We are going down. But there are chances that we might get hit by +	 * another MCE during panic path and we may run into unstable state +	 * with no way out. Hence, turn ME bit off while going down, so that +	 * when another MCE is hit during panic path, system will checkstop +	 * and hypervisor will get restarted cleanly by SP. +	 */ +	li	r3,MSR_ME +	andc	r10,r10,r3		/* Turn off MSR_ME */ +	mtspr	SPRN_SRR1,r10 +	rfid +	b	. +2: +	/* +	 * Check if we have successfully handled/recovered from error, if not +	 * then stay on emergency stack and panic. +	 */ +	ld	r3,RESULT(r1)	/* Load result */ +	cmpdi	r3,0		/* see if we handled MCE successfully */ + +	beq	1b		/* if !handled then panic */ +	/* +	 * Return from MC interrupt. +	 * Queue up the MCE event so that we can log it later, while +	 * returning from kernel or opal call. +	 */ +	bl	machine_check_queue_event +	MACHINE_CHECK_HANDLER_WINDUP +	rfid +9: +	/* Deliver the machine check to host kernel in V mode. */ +	MACHINE_CHECK_HANDLER_WINDUP +	b	machine_check_pSeries + +unrecover_mce: +	/* Invoke machine_check_exception to print MCE event and panic. */ +	addi	r3,r1,STACK_FRAME_OVERHEAD +	bl	machine_check_exception +	/* +	 * We will not reach here. Even if we did, there is no way out. Call +	 * unrecoverable_exception and die. +	 */ +1:	addi	r3,r1,STACK_FRAME_OVERHEAD +	bl	unrecoverable_exception +	b	1b  /*   * r13 points to the PACA, r9 contains the saved CR,   * r12 contain the saved SRR1, SRR0 is still ready for return @@ -1245,7 +1518,7 @@ _GLOBAL(opal_mc_secondary_handler)   * r3 is saved in paca->slb_r3   * We assume we aren't going to take any exceptions during this procedure.   */ -_GLOBAL(slb_miss_realmode) +slb_miss_realmode:  	mflr	r10  #ifdef CONFIG_RELOCATABLE  	mtctr	r11 @@ -1254,7 +1527,7 @@ _GLOBAL(slb_miss_realmode)  	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */  	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */ -	bl	.slb_allocate_realmode +	bl	slb_allocate_realmode  	/* All done -- return from exception. */ @@ -1294,9 +1567,9 @@ _GLOBAL(slb_miss_realmode)  unrecov_slb:  	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)  	DISABLE_INTS -	bl	.save_nvgprs +	bl	save_nvgprs  1:	addi	r3,r1,STACK_FRAME_OVERHEAD -	bl	.unrecoverable_exception +	bl	unrecoverable_exception  	b	1b @@ -1313,7 +1586,7 @@ power4_fixup_nap:   * Hash table stuff   */  	.align	7 -_STATIC(do_hash_page) +do_hash_page:  	std	r3,_DAR(r1)  	std	r4,_DSISR(r1) @@ -1350,7 +1623,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)  	 *  	 * at return r3 = 0 for success, 1 for page fault, negative for error  	 */ -	bl	.hash_page		/* build HPTE if possible */ +	bl	hash_page		/* build HPTE if possible */  	cmpdi	r3,0			/* see if hash_page succeeded */  	/* Success */ @@ -1364,35 +1637,35 @@ handle_page_fault:  11:	ld	r4,_DAR(r1)  	ld	r5,_DSISR(r1)  	addi	r3,r1,STACK_FRAME_OVERHEAD -	bl	.do_page_fault +	bl	do_page_fault  	cmpdi	r3,0  	beq+	12f -	bl	.save_nvgprs +	bl	save_nvgprs  	mr	r5,r3  	addi	r3,r1,STACK_FRAME_OVERHEAD  	lwz	r4,_DAR(r1) -	bl	.bad_page_fault -	b	.ret_from_except +	bl	bad_page_fault +	b	ret_from_except  /* We have a data breakpoint exception - handle it */  handle_dabr_fault: -	bl	.save_nvgprs +	bl	save_nvgprs  	ld      r4,_DAR(r1)  	ld      r5,_DSISR(r1)  	addi    r3,r1,STACK_FRAME_OVERHEAD -	bl      .do_break -12:	b       .ret_from_except_lite +	bl      do_break +12:	b       ret_from_except_lite  /* We have a page fault that hash_page could handle but HV refused   * the PTE insertion   */ -13:	bl	.save_nvgprs +13:	bl	save_nvgprs  	mr	r5,r3  	addi	r3,r1,STACK_FRAME_OVERHEAD  	ld	r4,_DAR(r1) -	bl	.low_hash_fault -	b	.ret_from_except +	bl	low_hash_fault +	b	ret_from_except  /*   * We come here as a result of a DSI at a point where we don't want @@ -1401,16 +1674,16 @@ handle_dabr_fault:   * were soft-disabled.  We want to invoke the exception handler for   * the access, or panic if there isn't a handler.   */ -77:	bl	.save_nvgprs +77:	bl	save_nvgprs  	mr	r4,r3  	addi	r3,r1,STACK_FRAME_OVERHEAD  	li	r5,SIGSEGV -	bl	.bad_page_fault -	b	.ret_from_except +	bl	bad_page_fault +	b	ret_from_except  	/* here we have a segment miss */  do_ste_alloc: -	bl	.ste_allocate		/* try to insert stab entry */ +	bl	ste_allocate		/* try to insert stab entry */  	cmpdi	r3,0  	bne-	handle_page_fault  	b	fast_exception_return @@ -1423,7 +1696,7 @@ do_ste_alloc:   * We assume (DAR >> 60) == 0xc.   */  	.align	7 -_GLOBAL(do_stab_bolted) +do_stab_bolted:  	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */  	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */  	mfspr	r11,SPRN_DAR			/* ea */  | 
