diff options
Diffstat (limited to 'arch/mips/kernel/traps.c')
| -rw-r--r-- | arch/mips/kernel/traps.c | 374 | 
1 files changed, 268 insertions, 106 deletions
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index aec3408edd4..51706d6dd5b 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -10,10 +10,12 @@   * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com   * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki   * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved. + * Copyright (C) 2014, Imagination Technologies Ltd.   */  #include <linux/bug.h>  #include <linux/compiler.h>  #include <linux/context_tracking.h> +#include <linux/cpu_pm.h>  #include <linux/kexec.h>  #include <linux/init.h>  #include <linux/kernel.h> @@ -39,6 +41,7 @@  #include <asm/break.h>  #include <asm/cop2.h>  #include <asm/cpu.h> +#include <asm/cpu-type.h>  #include <asm/dsp.h>  #include <asm/fpu.h>  #include <asm/fpu_emulator.h> @@ -46,6 +49,7 @@  #include <asm/mipsregs.h>  #include <asm/mipsmtregs.h>  #include <asm/module.h> +#include <asm/msa.h>  #include <asm/pgtable.h>  #include <asm/ptrace.h>  #include <asm/sections.h> @@ -76,7 +80,10 @@ extern asmlinkage void handle_ri_rdhwr(void);  extern asmlinkage void handle_cpu(void);  extern asmlinkage void handle_ov(void);  extern asmlinkage void handle_tr(void); +extern asmlinkage void handle_msa_fpe(void);  extern asmlinkage void handle_fpe(void); +extern asmlinkage void handle_ftlb(void); +extern asmlinkage void handle_msa(void);  extern asmlinkage void handle_mdmx(void);  extern asmlinkage void handle_watch(void);  extern asmlinkage void handle_mt(void); @@ -329,6 +336,7 @@ void show_regs(struct pt_regs *regs)  void show_registers(struct pt_regs *regs)  {  	const int field = 2 * sizeof(unsigned long); +	mm_segment_t old_fs = get_fs();  	__show_regs(regs);  	print_modules(); @@ -343,9 +351,13 @@ void show_registers(struct pt_regs *regs)  			printk("*HwTLS: %0*lx\n", field, tls);  	} +	if (!user_mode(regs)) +		/* Necessary for getting the correct stack content */ +		set_fs(KERNEL_DS);  	show_stacktrace(current, regs);  	show_code((unsigned int __user *) regs->cp0_epc);  	printk("\n"); +	set_fs(old_fs);  }  static int regs_to_trapnr(struct pt_regs *regs) @@ -359,24 +371,16 @@ void __noreturn die(const char *str, struct pt_regs *regs)  {  	static int die_counter;  	int sig = SIGSEGV; -#ifdef CONFIG_MIPS_MT_SMTC -	unsigned long dvpret; -#endif /* CONFIG_MIPS_MT_SMTC */  	oops_enter(); -	if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) +	if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), +		       SIGSEGV) == NOTIFY_STOP)  		sig = 0;  	console_verbose();  	raw_spin_lock_irq(&die_lock); -#ifdef CONFIG_MIPS_MT_SMTC -	dvpret = dvpe(); -#endif /* CONFIG_MIPS_MT_SMTC */  	bust_spinlocks(1); -#ifdef CONFIG_MIPS_MT_SMTC -	mips_mt_regdump(dvpret); -#endif /* CONFIG_MIPS_MT_SMTC */  	printk("%s[#%d]:\n", str, ++die_counter);  	show_registers(regs); @@ -456,8 +460,8 @@ asmlinkage void do_be(struct pt_regs *regs)  	printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",  	       data ? "Data" : "Instruction",  	       field, regs->cp0_epc, field, regs->regs[31]); -	if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS) -	    == NOTIFY_STOP) +	if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), +		       SIGBUS) == NOTIFY_STOP)  		goto out;  	die_if_kernel("Oops", regs); @@ -622,7 +626,7 @@ static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)  		regs->regs[rt] = read_c0_count();  		return 0;  	case 3:		/* Count register resolution */ -		switch (current_cpu_data.cputype) { +		switch (current_cpu_type()) {  		case CPU_20KC:  		case CPU_25KF:  			regs->regs[rt] = 1; @@ -700,10 +704,12 @@ int process_fpemu_return(int sig, void __user *fault_addr)  		si.si_addr = fault_addr;  		si.si_signo = sig;  		if (sig == SIGSEGV) { +			down_read(¤t->mm->mmap_sem);  			if (find_vma(current->mm, (unsigned long)fault_addr))  				si.si_code = SEGV_ACCERR;  			else  				si.si_code = SEGV_MAPERR; +			up_read(¤t->mm->mmap_sem);  		} else {  			si.si_code = BUS_ADRERR;  		} @@ -726,8 +732,8 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)  	siginfo_t info = {0};  	prev_state = exception_enter(); -	if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE) -	    == NOTIFY_STOP) +	if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), +		       SIGFPE) == NOTIFY_STOP)  		goto out;  	die_if_kernel("FP exception in kernel code", regs); @@ -797,7 +803,8 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,  		return;  #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ -	if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) +	if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), +		       SIGTRAP) == NOTIFY_STOP)  		return;  	/* @@ -852,6 +859,11 @@ asmlinkage void do_bp(struct pt_regs *regs)  	enum ctx_state prev_state;  	unsigned long epc;  	u16 instr[2]; +	mm_segment_t seg; + +	seg = get_fs(); +	if (!user_mode(regs)) +		set_fs(KERNEL_DS);  	prev_state = exception_enter();  	if (get_isa16_mode(regs->cp0_epc)) { @@ -861,17 +873,19 @@ asmlinkage void do_bp(struct pt_regs *regs)  			if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||  			    (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))  				goto out_sigsegv; -		    opcode = (instr[0] << 16) | instr[1]; +			opcode = (instr[0] << 16) | instr[1];  		} else { -		    /* MIPS16e mode */ -		    if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) +			/* MIPS16e mode */ +			if (__get_user(instr[0], +				       (u16 __user *)msk_isa16_mode(epc)))  				goto out_sigsegv; -		    bcode = (instr[0] >> 6) & 0x3f; -		    do_trap_or_bp(regs, bcode, "Break"); -		    goto out; +			bcode = (instr[0] >> 6) & 0x3f; +			do_trap_or_bp(regs, bcode, "Break"); +			goto out;  		}  	} else { -		if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) +		if (__get_user(opcode, +			       (unsigned int __user *) exception_epc(regs)))  			goto out_sigsegv;  	} @@ -891,12 +905,14 @@ asmlinkage void do_bp(struct pt_regs *regs)  	 */  	switch (bcode) {  	case BRK_KPROBE_BP: -		if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) +		if (notify_die(DIE_BREAK, "debug", regs, bcode, +			       regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)  			goto out;  		else  			break;  	case BRK_KPROBE_SSTEPBP: -		if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) +		if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, +			       regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)  			goto out;  		else  			break; @@ -907,6 +923,7 @@ asmlinkage void do_bp(struct pt_regs *regs)  	do_trap_or_bp(regs, bcode, "Break");  out: +	set_fs(seg);  	exception_exit(prev_state);  	return; @@ -920,8 +937,13 @@ asmlinkage void do_tr(struct pt_regs *regs)  	u32 opcode, tcode = 0;  	enum ctx_state prev_state;  	u16 instr[2]; +	mm_segment_t seg;  	unsigned long epc = msk_isa16_mode(exception_epc(regs)); +	seg = get_fs(); +	if (!user_mode(regs)) +		set_fs(get_ds()); +  	prev_state = exception_enter();  	if (get_isa16_mode(regs->cp0_epc)) {  		if (__get_user(instr[0], (u16 __user *)(epc + 0)) || @@ -942,6 +964,7 @@ asmlinkage void do_tr(struct pt_regs *regs)  	do_trap_or_bp(regs, tcode, "Trap");  out: +	set_fs(seg);  	exception_exit(prev_state);  	return; @@ -960,8 +983,8 @@ asmlinkage void do_ri(struct pt_regs *regs)  	int status = -1;  	prev_state = exception_enter(); -	if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL) -	    == NOTIFY_STOP) +	if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), +		       SIGILL) == NOTIFY_STOP)  		goto out;  	die_if_kernel("Reserved instruction in kernel code", regs); @@ -1063,6 +1086,76 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,  	return NOTIFY_OK;  } +static int enable_restore_fp_context(int msa) +{ +	int err, was_fpu_owner; + +	if (!used_math()) { +		/* First time FP context user. */ +		err = init_fpu(); +		if (msa && !err) +			enable_msa(); +		if (!err) +			set_used_math(); +		return err; +	} + +	/* +	 * This task has formerly used the FP context. +	 * +	 * If this thread has no live MSA vector context then we can simply +	 * restore the scalar FP context. If it has live MSA vector context +	 * (that is, it has or may have used MSA since last performing a +	 * function call) then we'll need to restore the vector context. This +	 * applies even if we're currently only executing a scalar FP +	 * instruction. This is because if we were to later execute an MSA +	 * instruction then we'd either have to: +	 * +	 *  - Restore the vector context & clobber any registers modified by +	 *    scalar FP instructions between now & then. +	 * +	 * or +	 * +	 *  - Not restore the vector context & lose the most significant bits +	 *    of all vector registers. +	 * +	 * Neither of those options is acceptable. We cannot restore the least +	 * significant bits of the registers now & only restore the most +	 * significant bits later because the most significant bits of any +	 * vector registers whose aliased FP register is modified now will have +	 * been zeroed. We'd have no way to know that when restoring the vector +	 * context & thus may load an outdated value for the most significant +	 * bits of a vector register. +	 */ +	if (!msa && !thread_msa_context_live()) +		return own_fpu(1); + +	/* +	 * This task is using or has previously used MSA. Thus we require +	 * that Status.FR == 1. +	 */ +	was_fpu_owner = is_fpu_owner(); +	err = own_fpu(0); +	if (err) +		return err; + +	enable_msa(); +	write_msa_csr(current->thread.fpu.msacsr); +	set_thread_flag(TIF_USEDMSA); + +	/* +	 * If this is the first time that the task is using MSA and it has +	 * previously used scalar FP in this time slice then we already nave +	 * FP context which we shouldn't clobber. +	 */ +	if (!test_and_set_thread_flag(TIF_MSA_CTX_LIVE) && was_fpu_owner) +		return 0; + +	/* We need to restore the vector context. */ +	restore_msa(current); +	return 0; +} +  asmlinkage void do_cpu(struct pt_regs *regs)  {  	enum ctx_state prev_state; @@ -1070,7 +1163,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)  	unsigned long old_epc, old31;  	unsigned int opcode;  	unsigned int cpid; -	int status; +	int status, err;  	unsigned long __maybe_unused flags;  	prev_state = exception_enter(); @@ -1142,20 +1235,15 @@ asmlinkage void do_cpu(struct pt_regs *regs)  		/* Fall through.  */  	case 1: -		if (used_math())	/* Using the FPU again.	 */ -			own_fpu(1); -		else {			/* First time FPU user.	 */ -			init_fpu(); -			set_used_math(); -		} +		err = enable_restore_fp_context(0); -		if (!raw_cpu_has_fpu) { +		if (!raw_cpu_has_fpu || err) {  			int sig;  			void __user *fault_addr = NULL;  			sig = fpu_emulator_cop1Handler(regs,  						       ¤t->thread.fpu,  						       0, &fault_addr); -			if (!process_fpemu_return(sig, fault_addr)) +			if (!process_fpemu_return(sig, fault_addr) && !err)  				mt_ase_fp_affinity();  		} @@ -1172,6 +1260,37 @@ out:  	exception_exit(prev_state);  } +asmlinkage void do_msa_fpe(struct pt_regs *regs) +{ +	enum ctx_state prev_state; + +	prev_state = exception_enter(); +	die_if_kernel("do_msa_fpe invoked from kernel context!", regs); +	force_sig(SIGFPE, current); +	exception_exit(prev_state); +} + +asmlinkage void do_msa(struct pt_regs *regs) +{ +	enum ctx_state prev_state; +	int err; + +	prev_state = exception_enter(); + +	if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) { +		force_sig(SIGILL, current); +		goto out; +	} + +	die_if_kernel("do_msa invoked from kernel context!", regs); + +	err = enable_restore_fp_context(1); +	if (err) +		force_sig(SIGILL, current); +out: +	exception_exit(prev_state); +} +  asmlinkage void do_mdmx(struct pt_regs *regs)  {  	enum ctx_state prev_state; @@ -1326,6 +1445,10 @@ static inline void parity_protection_init(void)  	case CPU_34K:  	case CPU_74K:  	case CPU_1004K: +	case CPU_1074K: +	case CPU_INTERAPTIV: +	case CPU_PROAPTIV: +	case CPU_P5600:  		{  #define ERRCTL_PE	0x80000000  #define ERRCTL_L2P	0x00800000 @@ -1415,14 +1538,27 @@ asmlinkage void cache_parity_error(void)  	printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",  	       reg_val & (1<<30) ? "secondary" : "primary",  	       reg_val & (1<<31) ? "data" : "insn"); -	printk("Error bits: %s%s%s%s%s%s%s\n", -	       reg_val & (1<<29) ? "ED " : "", -	       reg_val & (1<<28) ? "ET " : "", -	       reg_val & (1<<26) ? "EE " : "", -	       reg_val & (1<<25) ? "EB " : "", -	       reg_val & (1<<24) ? "EI " : "", -	       reg_val & (1<<23) ? "E1 " : "", -	       reg_val & (1<<22) ? "E0 " : ""); +	if (cpu_has_mips_r2 && +	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { +		pr_err("Error bits: %s%s%s%s%s%s%s%s\n", +			reg_val & (1<<29) ? "ED " : "", +			reg_val & (1<<28) ? "ET " : "", +			reg_val & (1<<27) ? "ES " : "", +			reg_val & (1<<26) ? "EE " : "", +			reg_val & (1<<25) ? "EB " : "", +			reg_val & (1<<24) ? "EI " : "", +			reg_val & (1<<23) ? "E1 " : "", +			reg_val & (1<<22) ? "E0 " : ""); +	} else { +		pr_err("Error bits: %s%s%s%s%s%s%s\n", +			reg_val & (1<<29) ? "ED " : "", +			reg_val & (1<<28) ? "ET " : "", +			reg_val & (1<<26) ? "EE " : "", +			reg_val & (1<<25) ? "EB " : "", +			reg_val & (1<<24) ? "EI " : "", +			reg_val & (1<<23) ? "E1 " : "", +			reg_val & (1<<22) ? "E0 " : ""); +	}  	printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));  #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) @@ -1436,6 +1572,34 @@ asmlinkage void cache_parity_error(void)  	panic("Can't handle the cache error!");  } +asmlinkage void do_ftlb(void) +{ +	const int field = 2 * sizeof(unsigned long); +	unsigned int reg_val; + +	/* For the moment, report the problem and hang. */ +	if (cpu_has_mips_r2 && +	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { +		pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", +		       read_c0_ecc()); +		pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); +		reg_val = read_c0_cacheerr(); +		pr_err("c0_cacheerr == %08x\n", reg_val); + +		if ((reg_val & 0xc0000000) == 0xc0000000) { +			pr_err("Decoded c0_cacheerr: FTLB parity error\n"); +		} else { +			pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n", +			       reg_val & (1<<30) ? "secondary" : "primary", +			       reg_val & (1<<31) ? "data" : "insn"); +		} +	} else { +		pr_err("FTLB error exception\n"); +	} +	/* Just print the cacheerr bits for now */ +	cache_parity_error(); +} +  /*   * SDBBP EJTAG debug exception handler.   * We skip the instruction and return to the next instruction. @@ -1487,10 +1651,14 @@ int register_nmi_notifier(struct notifier_block *nb)  void __noreturn nmi_exception_handler(struct pt_regs *regs)  { +	char str[100]; +  	raw_notifier_call_chain(&nmi_chain, 0, regs);  	bust_spinlocks(1); -	printk("NMI taken!!!!\n"); -	die("NMI", regs); +	snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n", +		 smp_processor_id(), regs->cp0_epc); +	regs->cp0_epc = read_c0_errorepc(); +	die(str, regs);  }  #define VECTORSPACING 0x100	/* for EI/VI mode */ @@ -1553,7 +1721,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)  	unsigned char *b;  	BUG_ON(!cpu_has_veic && !cpu_has_vint); -	BUG_ON((n < 0) && (n > 9));  	if (addr == NULL) {  		handler = (unsigned long) do_default_vi; @@ -1586,19 +1753,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)  		extern char rollback_except_vec_vi;  		char *vec_start = using_rollback_handler() ?  			&rollback_except_vec_vi : &except_vec_vi; -#ifdef CONFIG_MIPS_MT_SMTC -		/* -		 * We need to provide the SMTC vectored interrupt handler -		 * not only with the address of the handler, but with the -		 * Status.IM bit to be masked before going there. -		 */ -		extern char except_vec_vi_mori; -#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) -		const int mori_offset = &except_vec_vi_mori - vec_start + 2; -#else -		const int mori_offset = &except_vec_vi_mori - vec_start; -#endif -#endif /* CONFIG_MIPS_MT_SMTC */  #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)  		const int lui_offset = &except_vec_vi_lui - vec_start + 2;  		const int ori_offset = &except_vec_vi_ori - vec_start + 2; @@ -1622,12 +1776,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)  #else  				handler_len);  #endif -#ifdef CONFIG_MIPS_MT_SMTC -		BUG_ON(n > 7);	/* Vector index %d exceeds SMTC maximum. */ - -		h = (u16 *)(b + mori_offset); -		*h = (0x100 << n); -#endif /* CONFIG_MIPS_MT_SMTC */  		h = (u16 *)(b + lui_offset);  		*h = (handler >> 16) & 0xffff;  		h = (u16 *)(b + ori_offset); @@ -1692,32 +1840,16 @@ static int __init ulri_disable(char *s)  }  __setup("noulri", ulri_disable); -void per_cpu_trap_init(bool is_boot_cpu) +/* configure STATUS register */ +static void configure_status(void)  { -	unsigned int cpu = smp_processor_id(); -	unsigned int status_set = ST0_CU0; -	unsigned int hwrena = cpu_hwrena_impl_bits; -#ifdef CONFIG_MIPS_MT_SMTC -	int secondaryTC = 0; -	int bootTC = (cpu == 0); - -	/* -	 * Only do per_cpu_trap_init() for first TC of Each VPE. -	 * Note that this hack assumes that the SMTC init code -	 * assigns TCs consecutively and in ascending order. -	 */ - -	if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && -	    ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) -		secondaryTC = 1; -#endif /* CONFIG_MIPS_MT_SMTC */ -  	/*  	 * Disable coprocessors and select 32-bit or 64-bit addressing  	 * and the 16/32 or 32/32 FPR register model.  Reset the BEV  	 * flag that some firmware may have left set and the TS bit (for  	 * IP27).  Set XX for ISA IV code to work.  	 */ +	unsigned int status_set = ST0_CU0;  #ifdef CONFIG_64BIT  	status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;  #endif @@ -1728,6 +1860,12 @@ void per_cpu_trap_init(bool is_boot_cpu)  	change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,  			 status_set); +} + +/* configure HWRENA register */ +static void configure_hwrena(void) +{ +	unsigned int hwrena = cpu_hwrena_impl_bits;  	if (cpu_has_mips_r2)  		hwrena |= 0x0000000f; @@ -1737,11 +1875,10 @@ void per_cpu_trap_init(bool is_boot_cpu)  	if (hwrena)  		write_c0_hwrena(hwrena); +} -#ifdef CONFIG_MIPS_MT_SMTC -	if (!secondaryTC) { -#endif /* CONFIG_MIPS_MT_SMTC */ - +static void configure_exception_vector(void) +{  	if (cpu_has_veic || cpu_has_vint) {  		unsigned long sr = set_c0_status(ST0_BEV);  		write_c0_ebase(ebase); @@ -1757,6 +1894,16 @@ void per_cpu_trap_init(bool is_boot_cpu)  		} else  			set_c0_cause(CAUSEF_IV);  	} +} + +void per_cpu_trap_init(bool is_boot_cpu) +{ +	unsigned int cpu = smp_processor_id(); + +	configure_status(); +	configure_hwrena(); + +	configure_exception_vector();  	/*  	 * Before R2 both interrupt numbers were fixed to 7, so on R2 only: @@ -1776,10 +1923,6 @@ void per_cpu_trap_init(bool is_boot_cpu)  		cp0_perfcount_irq = -1;  	} -#ifdef CONFIG_MIPS_MT_SMTC -	} -#endif /* CONFIG_MIPS_MT_SMTC */ -  	if (!cpu_data[cpu].asid_cache)  		cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; @@ -1788,23 +1931,10 @@ void per_cpu_trap_init(bool is_boot_cpu)  	BUG_ON(current->mm);  	enter_lazy_tlb(&init_mm, current); -#ifdef CONFIG_MIPS_MT_SMTC -	if (bootTC) { -#endif /* CONFIG_MIPS_MT_SMTC */  		/* Boot CPU's cache setup in setup_arch(). */  		if (!is_boot_cpu)  			cpu_cache_init();  		tlb_init(); -#ifdef CONFIG_MIPS_MT_SMTC -	} else if (!secondaryTC) { -		/* -		 * First TC in non-boot VPE must do subset of tlb_init() -		 * for MMU countrol registers. -		 */ -		write_c0_pagemask(PM_DEFAULT_MASK); -		write_c0_wired(0); -	} -#endif /* CONFIG_MIPS_MT_SMTC */  	TLBMISS_HANDLER_SETUP();  } @@ -1960,6 +2090,7 @@ void __init trap_init(void)  	set_except_vector(11, handle_cpu);  	set_except_vector(12, handle_ov);  	set_except_vector(13, handle_tr); +	set_except_vector(14, handle_msa_fpe);  	if (current_cpu_type() == CPU_R6000 ||  	    current_cpu_type() == CPU_R6000A) { @@ -1982,6 +2113,8 @@ void __init trap_init(void)  	if (cpu_has_fpu && !cpu_has_nofpuex)  		set_except_vector(15, handle_fpe); +	set_except_vector(16, handle_ftlb); +	set_except_vector(21, handle_msa);  	set_except_vector(22, handle_mdmx);  	if (cpu_has_mcheck) @@ -2009,3 +2142,32 @@ void __init trap_init(void)  	cu2_notifier(default_cu2_call, 0x80000000);	/* Run last  */  } + +static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd, +			    void *v) +{ +	switch (cmd) { +	case CPU_PM_ENTER_FAILED: +	case CPU_PM_EXIT: +		configure_status(); +		configure_hwrena(); +		configure_exception_vector(); + +		/* Restore register with CPU number for TLB handlers */ +		TLBMISS_HANDLER_RESTORE(); + +		break; +	} + +	return NOTIFY_OK; +} + +static struct notifier_block trap_pm_notifier_block = { +	.notifier_call = trap_pm_notifier, +}; + +static int __init trap_pm_init(void) +{ +	return cpu_pm_register_notifier(&trap_pm_notifier_block); +} +arch_initcall(trap_pm_init);  | 
