diff options
Diffstat (limited to 'arch/mips/include')
178 files changed, 5918 insertions, 3882 deletions
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 454ddf9bb76..05439187891 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -2,10 +2,13 @@  generic-y += cputime.h  generic-y += current.h  generic-y += emergency-restart.h +generic-y += hash.h  generic-y += local64.h +generic-y += mcs_spinlock.h  generic-y += mutex.h  generic-y += parport.h  generic-y += percpu.h +generic-y += preempt.h  generic-y += scatterlist.h  generic-y += sections.h  generic-y += segment.h diff --git a/arch/mips/include/asm/addrspace.h b/arch/mips/include/asm/addrspace.h index 13d61c002e4..3f745459fdb 100644 --- a/arch/mips/include/asm/addrspace.h +++ b/arch/mips/include/asm/addrspace.h @@ -58,7 +58,7 @@  /*   * Memory segments (64bit kernel mode addresses) - * The compatibility segments use the full 64-bit sign extended value.	Note + * The compatibility segments use the full 64-bit sign extended value.  Note   * the R8000 doesn't have them so don't reference these in generic MIPS code.   */  #define XKUSEG			_CONST64_(0x0000000000000000) @@ -131,7 +131,7 @@  /*   * The ultimate limited of the 64-bit MIPS architecture:  2 bits for selecting - * the region, 3 bits for the CCA mode.	 This leaves 59 bits of which the + * the region, 3 bits for the CCA mode.  This leaves 59 bits of which the   * R8000 implements most with its 48-bit physical address space.   */  #define TO_PHYS_MASK	_CONST64_(0x07ffffffffffffff)	/* 2^^59 - 1 */ diff --git a/arch/mips/include/asm/amon.h b/arch/mips/include/asm/amon.h index c3dc1a68dd8..3cc03c64a9c 100644 --- a/arch/mips/include/asm/amon.h +++ b/arch/mips/include/asm/amon.h @@ -1,7 +1,12 @@  /* - * Amon support + * This file is subject to the terms and conditions of the GNU General Public + * License.  See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2013 Imagination Technologies Ltd. + * + * Arbitrary Monitor Support (AMON)   */ - -int amon_cpu_avail(int); -void amon_cpu_start(int, unsigned long, unsigned long, -		    unsigned long, unsigned long); +int amon_cpu_avail(int cpu); +int amon_cpu_start(int cpu, unsigned long pc, unsigned long sp, +		   unsigned long gp, unsigned long a0); diff --git a/arch/mips/include/asm/asm-eva.h b/arch/mips/include/asm/asm-eva.h new file mode 100644 index 00000000000..e41c56e375b --- /dev/null +++ b/arch/mips/include/asm/asm-eva.h @@ -0,0 +1,135 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License.  See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2014 Imagination Technologies Ltd. + * + */ + +#ifndef __ASM_ASM_EVA_H +#define __ASM_ASM_EVA_H + +#ifndef __ASSEMBLY__ +#ifdef CONFIG_EVA + +#define __BUILD_EVA_INSN(insn, reg, addr)				\ +				"	.set	push\n"			\ +				"	.set	mips0\n"		\ +				"	.set	eva\n"			\ +				"	"insn" "reg", "addr "\n"	\ +				"	.set	pop\n" + +#define user_cache(op, base)		__BUILD_EVA_INSN("cachee", op, base) +#define user_ll(reg, addr)		__BUILD_EVA_INSN("lle", reg, addr) +#define user_sc(reg, addr)		__BUILD_EVA_INSN("sce", reg, addr) +#define user_lw(reg, addr)		__BUILD_EVA_INSN("lwe", reg, addr) +#define user_lwl(reg, addr)		__BUILD_EVA_INSN("lwle", reg, addr) +#define user_lwr(reg, addr)		__BUILD_EVA_INSN("lwre", reg, addr) +#define user_lh(reg, addr)		__BUILD_EVA_INSN("lhe", reg, addr) +#define user_lb(reg, addr)		__BUILD_EVA_INSN("lbe", reg, addr) +#define user_lbu(reg, addr)		__BUILD_EVA_INSN("lbue", reg, addr) +/* No 64-bit EVA instruction for loading double words */ +#define user_ld(reg, addr)		user_lw(reg, addr) +#define user_sw(reg, addr)		__BUILD_EVA_INSN("swe", reg, addr) +#define user_swl(reg, addr)		__BUILD_EVA_INSN("swle", reg, addr) +#define user_swr(reg, addr)		__BUILD_EVA_INSN("swre", reg, addr) +#define user_sh(reg, addr)		__BUILD_EVA_INSN("she", reg, addr) +#define user_sb(reg, addr)		__BUILD_EVA_INSN("sbe", reg, addr) +/* No 64-bit EVA instruction for storing double words */ +#define user_sd(reg, addr)		user_sw(reg, addr) + +#else + +#define user_cache(op, base)		"cache " op ", " base "\n" +#define user_ll(reg, addr)		"ll " reg ", " addr "\n" +#define user_sc(reg, addr)		"sc " reg ", " addr "\n" +#define user_lw(reg, addr)		"lw " reg ", " addr "\n" +#define user_lwl(reg, addr)		"lwl " reg ", " addr "\n" +#define user_lwr(reg, addr)		"lwr " reg ", " addr "\n" +#define user_lh(reg, addr)		"lh " reg ", " addr "\n" +#define user_lb(reg, addr)		"lb " reg ", " addr "\n" +#define user_lbu(reg, addr)		"lbu " reg ", " addr "\n" +#define user_sw(reg, addr)		"sw " reg ", " addr "\n" +#define user_swl(reg, addr)		"swl " reg ", " addr "\n" +#define user_swr(reg, addr)		"swr " reg ", " addr "\n" +#define user_sh(reg, addr)		"sh " reg ", " addr "\n" +#define user_sb(reg, addr)		"sb " reg ", " addr "\n" + +#ifdef CONFIG_32BIT +/* + * No 'sd' or 'ld' instructions in 32-bit but the code will + * do the correct thing + */ +#define user_sd(reg, addr)		user_sw(reg, addr) +#define user_ld(reg, addr)		user_lw(reg, addr) +#else +#define user_sd(reg, addr)		"sd " reg", " addr "\n" +#define user_ld(reg, addr)		"ld " reg", " addr "\n" +#endif /* CONFIG_32BIT */ + +#endif /* CONFIG_EVA */ + +#else /* __ASSEMBLY__ */ + +#ifdef CONFIG_EVA + +#define __BUILD_EVA_INSN(insn, reg, addr)			\ +				.set	push;			\ +				.set	mips0;			\ +				.set	eva;			\ +				insn reg, addr;			\ +				.set	pop; + +#define user_cache(op, base)		__BUILD_EVA_INSN(cachee, op, base) +#define user_ll(reg, addr)		__BUILD_EVA_INSN(lle, reg, addr) +#define user_sc(reg, addr)		__BUILD_EVA_INSN(sce, reg, addr) +#define user_lw(reg, addr)		__BUILD_EVA_INSN(lwe, reg, addr) +#define user_lwl(reg, addr)		__BUILD_EVA_INSN(lwle, reg, addr) +#define user_lwr(reg, addr)		__BUILD_EVA_INSN(lwre, reg, addr) +#define user_lh(reg, addr)		__BUILD_EVA_INSN(lhe, reg, addr) +#define user_lb(reg, addr)		__BUILD_EVA_INSN(lbe, reg, addr) +#define user_lbu(reg, addr)		__BUILD_EVA_INSN(lbue, reg, addr) +/* No 64-bit EVA instruction for loading double words */ +#define user_ld(reg, addr)		user_lw(reg, addr) +#define user_sw(reg, addr)		__BUILD_EVA_INSN(swe, reg, addr) +#define user_swl(reg, addr)		__BUILD_EVA_INSN(swle, reg, addr) +#define user_swr(reg, addr)		__BUILD_EVA_INSN(swre, reg, addr) +#define user_sh(reg, addr)		__BUILD_EVA_INSN(she, reg, addr) +#define user_sb(reg, addr)		__BUILD_EVA_INSN(sbe, reg, addr) +/* No 64-bit EVA instruction for loading double words */ +#define user_sd(reg, addr)		user_sw(reg, addr) +#else + +#define user_cache(op, base)		cache op, base +#define user_ll(reg, addr)		ll reg, addr +#define user_sc(reg, addr)		sc reg, addr +#define user_lw(reg, addr)		lw reg, addr +#define user_lwl(reg, addr)		lwl reg, addr +#define user_lwr(reg, addr)		lwr reg, addr +#define user_lh(reg, addr)		lh reg, addr +#define user_lb(reg, addr)		lb reg, addr +#define user_lbu(reg, addr)		lbu reg, addr +#define user_sw(reg, addr)		sw reg, addr +#define user_swl(reg, addr)		swl reg, addr +#define user_swr(reg, addr)		swr reg, addr +#define user_sh(reg, addr)		sh reg, addr +#define user_sb(reg, addr)		sb reg, addr + +#ifdef CONFIG_32BIT +/* + * No 'sd' or 'ld' instructions in 32-bit but the code will + * do the correct thing + */ +#define user_sd(reg, addr)		user_sw(reg, addr) +#define user_ld(reg, addr)		user_lw(reg, addr) +#else +#define user_sd(reg, addr)		sd reg, addr +#define user_ld(reg, addr)		ld reg, addr +#endif /* CONFIG_32BIT */ + +#endif /* CONFIG_EVA */ + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_ASM_EVA_H */ diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h index 879691d194a..7c26b28bf25 100644 --- a/arch/mips/include/asm/asm.h +++ b/arch/mips/include/asm/asm.h @@ -18,6 +18,7 @@  #define __ASM_ASM_H  #include <asm/sgidefs.h> +#include <asm/asm-eva.h>  #ifndef CAT  #ifdef __STDC__ @@ -145,19 +146,27 @@ symbol		=	value  #define PREF(hint,addr)					\  		.set	push;				\ -		.set	mips4;				\ +		.set	arch=r5000;			\  		pref	hint, addr;			\  		.set	pop +#define PREFE(hint, addr)				\ +		.set	push;				\ +		.set	mips0;				\ +		.set	eva;				\ +		prefe	hint, addr;			\ +		.set	pop +  #define PREFX(hint,addr)				\  		.set	push;				\ -		.set	mips4;				\ +		.set	arch=r5000;			\  		prefx	hint, addr;			\  		.set	pop  #else /* !CONFIG_CPU_HAS_PREFETCH */  #define PREF(hint, addr) +#define PREFE(hint, addr)  #define PREFX(hint, addr)  #endif /* !CONFIG_CPU_HAS_PREFETCH */ diff --git a/arch/mips/include/asm/asmmacro-32.h b/arch/mips/include/asm/asmmacro-32.h index 2413afe21b3..e38c2811d4e 100644 --- a/arch/mips/include/asm/asmmacro-32.h +++ b/arch/mips/include/asm/asmmacro-32.h @@ -12,119 +12,77 @@  #include <asm/fpregdef.h>  #include <asm/mipsregs.h> -	.macro	fpu_save_double thread status tmp1=t0 -	cfc1	\tmp1,  fcr31 -	sdc1	$f0,  THREAD_FPR0(\thread) -	sdc1	$f2,  THREAD_FPR2(\thread) -	sdc1	$f4,  THREAD_FPR4(\thread) -	sdc1	$f6,  THREAD_FPR6(\thread) -	sdc1	$f8,  THREAD_FPR8(\thread) -	sdc1	$f10, THREAD_FPR10(\thread) -	sdc1	$f12, THREAD_FPR12(\thread) -	sdc1	$f14, THREAD_FPR14(\thread) -	sdc1	$f16, THREAD_FPR16(\thread) -	sdc1	$f18, THREAD_FPR18(\thread) -	sdc1	$f20, THREAD_FPR20(\thread) -	sdc1	$f22, THREAD_FPR22(\thread) -	sdc1	$f24, THREAD_FPR24(\thread) -	sdc1	$f26, THREAD_FPR26(\thread) -	sdc1	$f28, THREAD_FPR28(\thread) -	sdc1	$f30, THREAD_FPR30(\thread) -	sw	\tmp1, THREAD_FCR31(\thread) -	.endm -  	.macro	fpu_save_single thread tmp=t0  	cfc1	\tmp,  fcr31 -	swc1	$f0,  THREAD_FPR0(\thread) -	swc1	$f1,  THREAD_FPR1(\thread) -	swc1	$f2,  THREAD_FPR2(\thread) -	swc1	$f3,  THREAD_FPR3(\thread) -	swc1	$f4,  THREAD_FPR4(\thread) -	swc1	$f5,  THREAD_FPR5(\thread) -	swc1	$f6,  THREAD_FPR6(\thread) -	swc1	$f7,  THREAD_FPR7(\thread) -	swc1	$f8,  THREAD_FPR8(\thread) -	swc1	$f9,  THREAD_FPR9(\thread) -	swc1	$f10, THREAD_FPR10(\thread) -	swc1	$f11, THREAD_FPR11(\thread) -	swc1	$f12, THREAD_FPR12(\thread) -	swc1	$f13, THREAD_FPR13(\thread) -	swc1	$f14, THREAD_FPR14(\thread) -	swc1	$f15, THREAD_FPR15(\thread) -	swc1	$f16, THREAD_FPR16(\thread) -	swc1	$f17, THREAD_FPR17(\thread) -	swc1	$f18, THREAD_FPR18(\thread) -	swc1	$f19, THREAD_FPR19(\thread) -	swc1	$f20, THREAD_FPR20(\thread) -	swc1	$f21, THREAD_FPR21(\thread) -	swc1	$f22, THREAD_FPR22(\thread) -	swc1	$f23, THREAD_FPR23(\thread) -	swc1	$f24, THREAD_FPR24(\thread) -	swc1	$f25, THREAD_FPR25(\thread) -	swc1	$f26, THREAD_FPR26(\thread) -	swc1	$f27, THREAD_FPR27(\thread) -	swc1	$f28, THREAD_FPR28(\thread) -	swc1	$f29, THREAD_FPR29(\thread) -	swc1	$f30, THREAD_FPR30(\thread) -	swc1	$f31, THREAD_FPR31(\thread) +	swc1	$f0,  THREAD_FPR0_LS64(\thread) +	swc1	$f1,  THREAD_FPR1_LS64(\thread) +	swc1	$f2,  THREAD_FPR2_LS64(\thread) +	swc1	$f3,  THREAD_FPR3_LS64(\thread) +	swc1	$f4,  THREAD_FPR4_LS64(\thread) +	swc1	$f5,  THREAD_FPR5_LS64(\thread) +	swc1	$f6,  THREAD_FPR6_LS64(\thread) +	swc1	$f7,  THREAD_FPR7_LS64(\thread) +	swc1	$f8,  THREAD_FPR8_LS64(\thread) +	swc1	$f9,  THREAD_FPR9_LS64(\thread) +	swc1	$f10, THREAD_FPR10_LS64(\thread) +	swc1	$f11, THREAD_FPR11_LS64(\thread) +	swc1	$f12, THREAD_FPR12_LS64(\thread) +	swc1	$f13, THREAD_FPR13_LS64(\thread) +	swc1	$f14, THREAD_FPR14_LS64(\thread) +	swc1	$f15, THREAD_FPR15_LS64(\thread) +	swc1	$f16, THREAD_FPR16_LS64(\thread) +	swc1	$f17, THREAD_FPR17_LS64(\thread) +	swc1	$f18, THREAD_FPR18_LS64(\thread) +	swc1	$f19, THREAD_FPR19_LS64(\thread) +	swc1	$f20, THREAD_FPR20_LS64(\thread) +	swc1	$f21, THREAD_FPR21_LS64(\thread) +	swc1	$f22, THREAD_FPR22_LS64(\thread) +	swc1	$f23, THREAD_FPR23_LS64(\thread) +	swc1	$f24, THREAD_FPR24_LS64(\thread) +	swc1	$f25, THREAD_FPR25_LS64(\thread) +	swc1	$f26, THREAD_FPR26_LS64(\thread) +	swc1	$f27, THREAD_FPR27_LS64(\thread) +	swc1	$f28, THREAD_FPR28_LS64(\thread) +	swc1	$f29, THREAD_FPR29_LS64(\thread) +	swc1	$f30, THREAD_FPR30_LS64(\thread) +	swc1	$f31, THREAD_FPR31_LS64(\thread)  	sw	\tmp, THREAD_FCR31(\thread)  	.endm -	.macro	fpu_restore_double thread status tmp=t0 -	lw	\tmp, THREAD_FCR31(\thread) -	ldc1	$f0,  THREAD_FPR0(\thread) -	ldc1	$f2,  THREAD_FPR2(\thread) -	ldc1	$f4,  THREAD_FPR4(\thread) -	ldc1	$f6,  THREAD_FPR6(\thread) -	ldc1	$f8,  THREAD_FPR8(\thread) -	ldc1	$f10, THREAD_FPR10(\thread) -	ldc1	$f12, THREAD_FPR12(\thread) -	ldc1	$f14, THREAD_FPR14(\thread) -	ldc1	$f16, THREAD_FPR16(\thread) -	ldc1	$f18, THREAD_FPR18(\thread) -	ldc1	$f20, THREAD_FPR20(\thread) -	ldc1	$f22, THREAD_FPR22(\thread) -	ldc1	$f24, THREAD_FPR24(\thread) -	ldc1	$f26, THREAD_FPR26(\thread) -	ldc1	$f28, THREAD_FPR28(\thread) -	ldc1	$f30, THREAD_FPR30(\thread) -	ctc1	\tmp, fcr31 -	.endm -  	.macro	fpu_restore_single thread tmp=t0  	lw	\tmp, THREAD_FCR31(\thread) -	lwc1	$f0,  THREAD_FPR0(\thread) -	lwc1	$f1,  THREAD_FPR1(\thread) -	lwc1	$f2,  THREAD_FPR2(\thread) -	lwc1	$f3,  THREAD_FPR3(\thread) -	lwc1	$f4,  THREAD_FPR4(\thread) -	lwc1	$f5,  THREAD_FPR5(\thread) -	lwc1	$f6,  THREAD_FPR6(\thread) -	lwc1	$f7,  THREAD_FPR7(\thread) -	lwc1	$f8,  THREAD_FPR8(\thread) -	lwc1	$f9,  THREAD_FPR9(\thread) -	lwc1	$f10, THREAD_FPR10(\thread) -	lwc1	$f11, THREAD_FPR11(\thread) -	lwc1	$f12, THREAD_FPR12(\thread) -	lwc1	$f13, THREAD_FPR13(\thread) -	lwc1	$f14, THREAD_FPR14(\thread) -	lwc1	$f15, THREAD_FPR15(\thread) -	lwc1	$f16, THREAD_FPR16(\thread) -	lwc1	$f17, THREAD_FPR17(\thread) -	lwc1	$f18, THREAD_FPR18(\thread) -	lwc1	$f19, THREAD_FPR19(\thread) -	lwc1	$f20, THREAD_FPR20(\thread) -	lwc1	$f21, THREAD_FPR21(\thread) -	lwc1	$f22, THREAD_FPR22(\thread) -	lwc1	$f23, THREAD_FPR23(\thread) -	lwc1	$f24, THREAD_FPR24(\thread) -	lwc1	$f25, THREAD_FPR25(\thread) -	lwc1	$f26, THREAD_FPR26(\thread) -	lwc1	$f27, THREAD_FPR27(\thread) -	lwc1	$f28, THREAD_FPR28(\thread) -	lwc1	$f29, THREAD_FPR29(\thread) -	lwc1	$f30, THREAD_FPR30(\thread) -	lwc1	$f31, THREAD_FPR31(\thread) +	lwc1	$f0,  THREAD_FPR0_LS64(\thread) +	lwc1	$f1,  THREAD_FPR1_LS64(\thread) +	lwc1	$f2,  THREAD_FPR2_LS64(\thread) +	lwc1	$f3,  THREAD_FPR3_LS64(\thread) +	lwc1	$f4,  THREAD_FPR4_LS64(\thread) +	lwc1	$f5,  THREAD_FPR5_LS64(\thread) +	lwc1	$f6,  THREAD_FPR6_LS64(\thread) +	lwc1	$f7,  THREAD_FPR7_LS64(\thread) +	lwc1	$f8,  THREAD_FPR8_LS64(\thread) +	lwc1	$f9,  THREAD_FPR9_LS64(\thread) +	lwc1	$f10, THREAD_FPR10_LS64(\thread) +	lwc1	$f11, THREAD_FPR11_LS64(\thread) +	lwc1	$f12, THREAD_FPR12_LS64(\thread) +	lwc1	$f13, THREAD_FPR13_LS64(\thread) +	lwc1	$f14, THREAD_FPR14_LS64(\thread) +	lwc1	$f15, THREAD_FPR15_LS64(\thread) +	lwc1	$f16, THREAD_FPR16_LS64(\thread) +	lwc1	$f17, THREAD_FPR17_LS64(\thread) +	lwc1	$f18, THREAD_FPR18_LS64(\thread) +	lwc1	$f19, THREAD_FPR19_LS64(\thread) +	lwc1	$f20, THREAD_FPR20_LS64(\thread) +	lwc1	$f21, THREAD_FPR21_LS64(\thread) +	lwc1	$f22, THREAD_FPR22_LS64(\thread) +	lwc1	$f23, THREAD_FPR23_LS64(\thread) +	lwc1	$f24, THREAD_FPR24_LS64(\thread) +	lwc1	$f25, THREAD_FPR25_LS64(\thread) +	lwc1	$f26, THREAD_FPR26_LS64(\thread) +	lwc1	$f27, THREAD_FPR27_LS64(\thread) +	lwc1	$f28, THREAD_FPR28_LS64(\thread) +	lwc1	$f29, THREAD_FPR29_LS64(\thread) +	lwc1	$f30, THREAD_FPR30_LS64(\thread) +	lwc1	$f31, THREAD_FPR31_LS64(\thread)  	ctc1	\tmp, fcr31  	.endm diff --git a/arch/mips/include/asm/asmmacro-64.h b/arch/mips/include/asm/asmmacro-64.h index 08a527dfe4a..38ea609465b 100644 --- a/arch/mips/include/asm/asmmacro-64.h +++ b/arch/mips/include/asm/asmmacro-64.h @@ -13,102 +13,6 @@  #include <asm/fpregdef.h>  #include <asm/mipsregs.h> -	.macro	fpu_save_16even thread tmp=t0 -	cfc1	\tmp, fcr31 -	sdc1	$f0,  THREAD_FPR0(\thread) -	sdc1	$f2,  THREAD_FPR2(\thread) -	sdc1	$f4,  THREAD_FPR4(\thread) -	sdc1	$f6,  THREAD_FPR6(\thread) -	sdc1	$f8,  THREAD_FPR8(\thread) -	sdc1	$f10, THREAD_FPR10(\thread) -	sdc1	$f12, THREAD_FPR12(\thread) -	sdc1	$f14, THREAD_FPR14(\thread) -	sdc1	$f16, THREAD_FPR16(\thread) -	sdc1	$f18, THREAD_FPR18(\thread) -	sdc1	$f20, THREAD_FPR20(\thread) -	sdc1	$f22, THREAD_FPR22(\thread) -	sdc1	$f24, THREAD_FPR24(\thread) -	sdc1	$f26, THREAD_FPR26(\thread) -	sdc1	$f28, THREAD_FPR28(\thread) -	sdc1	$f30, THREAD_FPR30(\thread) -	sw	\tmp, THREAD_FCR31(\thread) -	.endm - -	.macro	fpu_save_16odd thread -	sdc1	$f1,  THREAD_FPR1(\thread) -	sdc1	$f3,  THREAD_FPR3(\thread) -	sdc1	$f5,  THREAD_FPR5(\thread) -	sdc1	$f7,  THREAD_FPR7(\thread) -	sdc1	$f9,  THREAD_FPR9(\thread) -	sdc1	$f11, THREAD_FPR11(\thread) -	sdc1	$f13, THREAD_FPR13(\thread) -	sdc1	$f15, THREAD_FPR15(\thread) -	sdc1	$f17, THREAD_FPR17(\thread) -	sdc1	$f19, THREAD_FPR19(\thread) -	sdc1	$f21, THREAD_FPR21(\thread) -	sdc1	$f23, THREAD_FPR23(\thread) -	sdc1	$f25, THREAD_FPR25(\thread) -	sdc1	$f27, THREAD_FPR27(\thread) -	sdc1	$f29, THREAD_FPR29(\thread) -	sdc1	$f31, THREAD_FPR31(\thread) -	.endm - -	.macro	fpu_save_double thread status tmp -	sll	\tmp, \status, 5 -	bgez	\tmp, 2f -	fpu_save_16odd \thread -2: -	fpu_save_16even \thread \tmp -	.endm - -	.macro	fpu_restore_16even thread tmp=t0 -	lw	\tmp, THREAD_FCR31(\thread) -	ldc1	$f0,  THREAD_FPR0(\thread) -	ldc1	$f2,  THREAD_FPR2(\thread) -	ldc1	$f4,  THREAD_FPR4(\thread) -	ldc1	$f6,  THREAD_FPR6(\thread) -	ldc1	$f8,  THREAD_FPR8(\thread) -	ldc1	$f10, THREAD_FPR10(\thread) -	ldc1	$f12, THREAD_FPR12(\thread) -	ldc1	$f14, THREAD_FPR14(\thread) -	ldc1	$f16, THREAD_FPR16(\thread) -	ldc1	$f18, THREAD_FPR18(\thread) -	ldc1	$f20, THREAD_FPR20(\thread) -	ldc1	$f22, THREAD_FPR22(\thread) -	ldc1	$f24, THREAD_FPR24(\thread) -	ldc1	$f26, THREAD_FPR26(\thread) -	ldc1	$f28, THREAD_FPR28(\thread) -	ldc1	$f30, THREAD_FPR30(\thread) -	ctc1	\tmp, fcr31 -	.endm - -	.macro	fpu_restore_16odd thread -	ldc1	$f1,  THREAD_FPR1(\thread) -	ldc1	$f3,  THREAD_FPR3(\thread) -	ldc1	$f5,  THREAD_FPR5(\thread) -	ldc1	$f7,  THREAD_FPR7(\thread) -	ldc1	$f9,  THREAD_FPR9(\thread) -	ldc1	$f11, THREAD_FPR11(\thread) -	ldc1	$f13, THREAD_FPR13(\thread) -	ldc1	$f15, THREAD_FPR15(\thread) -	ldc1	$f17, THREAD_FPR17(\thread) -	ldc1	$f19, THREAD_FPR19(\thread) -	ldc1	$f21, THREAD_FPR21(\thread) -	ldc1	$f23, THREAD_FPR23(\thread) -	ldc1	$f25, THREAD_FPR25(\thread) -	ldc1	$f27, THREAD_FPR27(\thread) -	ldc1	$f29, THREAD_FPR29(\thread) -	ldc1	$f31, THREAD_FPR31(\thread) -	.endm - -	.macro	fpu_restore_double thread status tmp -	sll	\tmp, \status, 5 -	bgez	\tmp, 1f				# 16 register mode? - -	fpu_restore_16odd \thread -1:	fpu_restore_16even \thread \tmp -	.endm -  	.macro	cpu_save_nonscratch thread  	LONG_S	s0, THREAD_REG16(\thread)  	LONG_S	s1, THREAD_REG17(\thread) diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index 6c8342ae74d..935543f1453 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h @@ -9,6 +9,7 @@  #define _ASM_ASMMACRO_H  #include <asm/hazards.h> +#include <asm/asm-offsets.h>  #ifdef CONFIG_32BIT  #include <asm/asmmacro-32.h> @@ -16,26 +17,8 @@  #ifdef CONFIG_64BIT  #include <asm/asmmacro-64.h>  #endif -#ifdef CONFIG_MIPS_MT_SMTC -#include <asm/mipsmtregs.h> -#endif - -#ifdef CONFIG_MIPS_MT_SMTC -	.macro	local_irq_enable reg=t0 -	mfc0	\reg, CP0_TCSTATUS -	ori	\reg, \reg, TCSTATUS_IXMT -	xori	\reg, \reg, TCSTATUS_IXMT -	mtc0	\reg, CP0_TCSTATUS -	_ehb -	.endm -	.macro	local_irq_disable reg=t0 -	mfc0	\reg, CP0_TCSTATUS -	ori	\reg, \reg, TCSTATUS_IXMT -	mtc0	\reg, CP0_TCSTATUS -	_ehb -	.endm -#elif defined(CONFIG_CPU_MIPSR2) +#ifdef CONFIG_CPU_MIPSR2  	.macro	local_irq_enable reg=t0  	ei  	irq_enable_hazard @@ -54,13 +37,141 @@  	.endm  	.macro	local_irq_disable reg=t0 +#ifdef CONFIG_PREEMPT +	lw      \reg, TI_PRE_COUNT($28) +	addi    \reg, \reg, 1 +	sw      \reg, TI_PRE_COUNT($28) +#endif  	mfc0	\reg, CP0_STATUS  	ori	\reg, \reg, 1  	xori	\reg, \reg, 1  	mtc0	\reg, CP0_STATUS  	irq_disable_hazard +#ifdef CONFIG_PREEMPT +	lw      \reg, TI_PRE_COUNT($28) +	addi    \reg, \reg, -1 +	sw      \reg, TI_PRE_COUNT($28) +#endif +	.endm +#endif /* CONFIG_CPU_MIPSR2 */ + +	.macro	fpu_save_16even thread tmp=t0 +	cfc1	\tmp, fcr31 +	sdc1	$f0,  THREAD_FPR0_LS64(\thread) +	sdc1	$f2,  THREAD_FPR2_LS64(\thread) +	sdc1	$f4,  THREAD_FPR4_LS64(\thread) +	sdc1	$f6,  THREAD_FPR6_LS64(\thread) +	sdc1	$f8,  THREAD_FPR8_LS64(\thread) +	sdc1	$f10, THREAD_FPR10_LS64(\thread) +	sdc1	$f12, THREAD_FPR12_LS64(\thread) +	sdc1	$f14, THREAD_FPR14_LS64(\thread) +	sdc1	$f16, THREAD_FPR16_LS64(\thread) +	sdc1	$f18, THREAD_FPR18_LS64(\thread) +	sdc1	$f20, THREAD_FPR20_LS64(\thread) +	sdc1	$f22, THREAD_FPR22_LS64(\thread) +	sdc1	$f24, THREAD_FPR24_LS64(\thread) +	sdc1	$f26, THREAD_FPR26_LS64(\thread) +	sdc1	$f28, THREAD_FPR28_LS64(\thread) +	sdc1	$f30, THREAD_FPR30_LS64(\thread) +	sw	\tmp, THREAD_FCR31(\thread) +	.endm + +	.macro	fpu_save_16odd thread +	.set	push +	.set	mips64r2 +	sdc1	$f1,  THREAD_FPR1_LS64(\thread) +	sdc1	$f3,  THREAD_FPR3_LS64(\thread) +	sdc1	$f5,  THREAD_FPR5_LS64(\thread) +	sdc1	$f7,  THREAD_FPR7_LS64(\thread) +	sdc1	$f9,  THREAD_FPR9_LS64(\thread) +	sdc1	$f11, THREAD_FPR11_LS64(\thread) +	sdc1	$f13, THREAD_FPR13_LS64(\thread) +	sdc1	$f15, THREAD_FPR15_LS64(\thread) +	sdc1	$f17, THREAD_FPR17_LS64(\thread) +	sdc1	$f19, THREAD_FPR19_LS64(\thread) +	sdc1	$f21, THREAD_FPR21_LS64(\thread) +	sdc1	$f23, THREAD_FPR23_LS64(\thread) +	sdc1	$f25, THREAD_FPR25_LS64(\thread) +	sdc1	$f27, THREAD_FPR27_LS64(\thread) +	sdc1	$f29, THREAD_FPR29_LS64(\thread) +	sdc1	$f31, THREAD_FPR31_LS64(\thread) +	.set	pop +	.endm + +	.macro	fpu_save_double thread status tmp +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) +	sll	\tmp, \status, 5 +	bgez	\tmp, 10f +	fpu_save_16odd \thread +10: +#endif +	fpu_save_16even \thread \tmp +	.endm + +	.macro	fpu_restore_16even thread tmp=t0 +	lw	\tmp, THREAD_FCR31(\thread) +	ldc1	$f0,  THREAD_FPR0_LS64(\thread) +	ldc1	$f2,  THREAD_FPR2_LS64(\thread) +	ldc1	$f4,  THREAD_FPR4_LS64(\thread) +	ldc1	$f6,  THREAD_FPR6_LS64(\thread) +	ldc1	$f8,  THREAD_FPR8_LS64(\thread) +	ldc1	$f10, THREAD_FPR10_LS64(\thread) +	ldc1	$f12, THREAD_FPR12_LS64(\thread) +	ldc1	$f14, THREAD_FPR14_LS64(\thread) +	ldc1	$f16, THREAD_FPR16_LS64(\thread) +	ldc1	$f18, THREAD_FPR18_LS64(\thread) +	ldc1	$f20, THREAD_FPR20_LS64(\thread) +	ldc1	$f22, THREAD_FPR22_LS64(\thread) +	ldc1	$f24, THREAD_FPR24_LS64(\thread) +	ldc1	$f26, THREAD_FPR26_LS64(\thread) +	ldc1	$f28, THREAD_FPR28_LS64(\thread) +	ldc1	$f30, THREAD_FPR30_LS64(\thread) +	ctc1	\tmp, fcr31 +	.endm + +	.macro	fpu_restore_16odd thread +	.set	push +	.set	mips64r2 +	ldc1	$f1,  THREAD_FPR1_LS64(\thread) +	ldc1	$f3,  THREAD_FPR3_LS64(\thread) +	ldc1	$f5,  THREAD_FPR5_LS64(\thread) +	ldc1	$f7,  THREAD_FPR7_LS64(\thread) +	ldc1	$f9,  THREAD_FPR9_LS64(\thread) +	ldc1	$f11, THREAD_FPR11_LS64(\thread) +	ldc1	$f13, THREAD_FPR13_LS64(\thread) +	ldc1	$f15, THREAD_FPR15_LS64(\thread) +	ldc1	$f17, THREAD_FPR17_LS64(\thread) +	ldc1	$f19, THREAD_FPR19_LS64(\thread) +	ldc1	$f21, THREAD_FPR21_LS64(\thread) +	ldc1	$f23, THREAD_FPR23_LS64(\thread) +	ldc1	$f25, THREAD_FPR25_LS64(\thread) +	ldc1	$f27, THREAD_FPR27_LS64(\thread) +	ldc1	$f29, THREAD_FPR29_LS64(\thread) +	ldc1	$f31, THREAD_FPR31_LS64(\thread) +	.set	pop +	.endm + +	.macro	fpu_restore_double thread status tmp +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) +	sll	\tmp, \status, 5 +	bgez	\tmp, 10f				# 16 register mode? + +	fpu_restore_16odd \thread +10: +#endif +	fpu_restore_16even \thread \tmp +	.endm + +#ifdef CONFIG_CPU_MIPSR2 +	.macro	_EXT	rd, rs, p, s +	ext	\rd, \rs, \p, \s +	.endm +#else /* !CONFIG_CPU_MIPSR2 */ +	.macro	_EXT	rd, rs, p, s +	srl	\rd, \rs, \p +	andi	\rd, \rd, (1 << \s) - 1  	.endm -#endif /* CONFIG_MIPS_MT_SMTC */ +#endif /* !CONFIG_CPU_MIPSR2 */  /*   * Temporary until all gas have MT ASE support @@ -89,4 +200,219 @@  	 .word	0x41800000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)  	.endm +#ifdef TOOLCHAIN_SUPPORTS_MSA +	.macro	ld_d	wd, off, base +	.set	push +	.set	mips32r2 +	.set	msa +	ld.d	$w\wd, \off(\base) +	.set	pop +	.endm + +	.macro	st_d	wd, off, base +	.set	push +	.set	mips32r2 +	.set	msa +	st.d	$w\wd, \off(\base) +	.set	pop +	.endm + +	.macro	copy_u_w	rd, ws, n +	.set	push +	.set	mips32r2 +	.set	msa +	copy_u.w \rd, $w\ws[\n] +	.set	pop +	.endm + +	.macro	copy_u_d	rd, ws, n +	.set	push +	.set	mips64r2 +	.set	msa +	copy_u.d \rd, $w\ws[\n] +	.set	pop +	.endm + +	.macro	insert_w	wd, n, rs +	.set	push +	.set	mips32r2 +	.set	msa +	insert.w $w\wd[\n], \rs +	.set	pop +	.endm + +	.macro	insert_d	wd, n, rs +	.set	push +	.set	mips64r2 +	.set	msa +	insert.d $w\wd[\n], \rs +	.set	pop +	.endm +#else + +#ifdef CONFIG_CPU_MICROMIPS +#define CFC_MSA_INSN		0x587e0056 +#define CTC_MSA_INSN		0x583e0816 +#define LDD_MSA_INSN		0x58000837 +#define STD_MSA_INSN		0x5800083f +#define COPY_UW_MSA_INSN	0x58f00056 +#define COPY_UD_MSA_INSN	0x58f80056 +#define INSERT_W_MSA_INSN	0x59300816 +#define INSERT_D_MSA_INSN	0x59380816 +#else +#define CFC_MSA_INSN		0x787e0059 +#define CTC_MSA_INSN		0x783e0819 +#define LDD_MSA_INSN		0x78000823 +#define STD_MSA_INSN		0x78000827 +#define COPY_UW_MSA_INSN	0x78f00059 +#define COPY_UD_MSA_INSN	0x78f80059 +#define INSERT_W_MSA_INSN	0x79300819 +#define INSERT_D_MSA_INSN	0x79380819 +#endif + +	/* +	 * Temporary until all toolchains in use include MSA support. +	 */ +	.macro	cfcmsa	rd, cs +	.set	push +	.set	noat +	.insn +	.word	CFC_MSA_INSN | (\cs << 11) +	move	\rd, $1 +	.set	pop +	.endm + +	.macro	ctcmsa	cd, rs +	.set	push +	.set	noat +	move	$1, \rs +	.word	CTC_MSA_INSN | (\cd << 6) +	.set	pop +	.endm + +	.macro	ld_d	wd, off, base +	.set	push +	.set	noat +	add	$1, \base, \off +	.word	LDD_MSA_INSN | (\wd << 6) +	.set	pop +	.endm + +	.macro	st_d	wd, off, base +	.set	push +	.set	noat +	add	$1, \base, \off +	.word	STD_MSA_INSN | (\wd << 6) +	.set	pop +	.endm + +	.macro	copy_u_w	rd, ws, n +	.set	push +	.set	noat +	.insn +	.word	COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11) +	/* move triggers an assembler bug... */ +	or	\rd, $1, zero +	.set	pop +	.endm + +	.macro	copy_u_d	rd, ws, n +	.set	push +	.set	noat +	.insn +	.word	COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11) +	/* move triggers an assembler bug... */ +	or	\rd, $1, zero +	.set	pop +	.endm + +	.macro	insert_w	wd, n, rs +	.set	push +	.set	noat +	/* move triggers an assembler bug... */ +	or	$1, \rs, zero +	.word	INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6) +	.set	pop +	.endm + +	.macro	insert_d	wd, n, rs +	.set	push +	.set	noat +	/* move triggers an assembler bug... */ +	or	$1, \rs, zero +	.word	INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6) +	.set	pop +	.endm +#endif + +	.macro	msa_save_all	thread +	st_d	0, THREAD_FPR0, \thread +	st_d	1, THREAD_FPR1, \thread +	st_d	2, THREAD_FPR2, \thread +	st_d	3, THREAD_FPR3, \thread +	st_d	4, THREAD_FPR4, \thread +	st_d	5, THREAD_FPR5, \thread +	st_d	6, THREAD_FPR6, \thread +	st_d	7, THREAD_FPR7, \thread +	st_d	8, THREAD_FPR8, \thread +	st_d	9, THREAD_FPR9, \thread +	st_d	10, THREAD_FPR10, \thread +	st_d	11, THREAD_FPR11, \thread +	st_d	12, THREAD_FPR12, \thread +	st_d	13, THREAD_FPR13, \thread +	st_d	14, THREAD_FPR14, \thread +	st_d	15, THREAD_FPR15, \thread +	st_d	16, THREAD_FPR16, \thread +	st_d	17, THREAD_FPR17, \thread +	st_d	18, THREAD_FPR18, \thread +	st_d	19, THREAD_FPR19, \thread +	st_d	20, THREAD_FPR20, \thread +	st_d	21, THREAD_FPR21, \thread +	st_d	22, THREAD_FPR22, \thread +	st_d	23, THREAD_FPR23, \thread +	st_d	24, THREAD_FPR24, \thread +	st_d	25, THREAD_FPR25, \thread +	st_d	26, THREAD_FPR26, \thread +	st_d	27, THREAD_FPR27, \thread +	st_d	28, THREAD_FPR28, \thread +	st_d	29, THREAD_FPR29, \thread +	st_d	30, THREAD_FPR30, \thread +	st_d	31, THREAD_FPR31, \thread +	.endm + +	.macro	msa_restore_all	thread +	ld_d	0, THREAD_FPR0, \thread +	ld_d	1, THREAD_FPR1, \thread +	ld_d	2, THREAD_FPR2, \thread +	ld_d	3, THREAD_FPR3, \thread +	ld_d	4, THREAD_FPR4, \thread +	ld_d	5, THREAD_FPR5, \thread +	ld_d	6, THREAD_FPR6, \thread +	ld_d	7, THREAD_FPR7, \thread +	ld_d	8, THREAD_FPR8, \thread +	ld_d	9, THREAD_FPR9, \thread +	ld_d	10, THREAD_FPR10, \thread +	ld_d	11, THREAD_FPR11, \thread +	ld_d	12, THREAD_FPR12, \thread +	ld_d	13, THREAD_FPR13, \thread +	ld_d	14, THREAD_FPR14, \thread +	ld_d	15, THREAD_FPR15, \thread +	ld_d	16, THREAD_FPR16, \thread +	ld_d	17, THREAD_FPR17, \thread +	ld_d	18, THREAD_FPR18, \thread +	ld_d	19, THREAD_FPR19, \thread +	ld_d	20, THREAD_FPR20, \thread +	ld_d	21, THREAD_FPR21, \thread +	ld_d	22, THREAD_FPR22, \thread +	ld_d	23, THREAD_FPR23, \thread +	ld_d	24, THREAD_FPR24, \thread +	ld_d	25, THREAD_FPR25, \thread +	ld_d	26, THREAD_FPR26, \thread +	ld_d	27, THREAD_FPR27, \thread +	ld_d	28, THREAD_FPR28, \thread +	ld_d	29, THREAD_FPR29, \thread +	ld_d	30, THREAD_FPR30, \thread +	ld_d	31, THREAD_FPR31, \thread +	.endm +  #endif /* _ASM_ASMMACRO_H */ diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 08b607969a1..37b2befe651 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -1,5 +1,5 @@  /* - * Atomic operations that C can't guarantee us.	 Useful for + * Atomic operations that C can't guarantee us.  Useful for   * resource counting etc..   *   * But use these as seldom as possible since they are much more slower @@ -53,7 +53,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)  		int temp;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:	ll	%0, %1		# atomic_add		\n"  		"	addu	%0, %2					\n"  		"	sc	%0, %1					\n" @@ -66,7 +66,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)  		do {  			__asm__ __volatile__( -			"	.set	mips3				\n" +			"	.set	arch=r4000			\n"  			"	ll	%0, %1		# atomic_add	\n"  			"	addu	%0, %2				\n"  			"	sc	%0, %1				\n" @@ -96,7 +96,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v)  		int temp;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:	ll	%0, %1		# atomic_sub		\n"  		"	subu	%0, %2					\n"  		"	sc	%0, %1					\n" @@ -109,7 +109,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v)  		do {  			__asm__ __volatile__( -			"	.set	mips3				\n" +			"	.set	arch=r4000			\n"  			"	ll	%0, %1		# atomic_sub	\n"  			"	subu	%0, %2				\n"  			"	sc	%0, %1				\n" @@ -139,7 +139,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)  		int temp;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:	ll	%1, %2		# atomic_add_return	\n"  		"	addu	%0, %1, %3				\n"  		"	sc	%0, %2					\n" @@ -153,7 +153,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)  		do {  			__asm__ __volatile__( -			"	.set	mips3				\n" +			"	.set	arch=r4000			\n"  			"	ll	%1, %2	# atomic_add_return	\n"  			"	addu	%0, %1, %3			\n"  			"	sc	%0, %2				\n" @@ -188,7 +188,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)  		int temp;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:	ll	%1, %2		# atomic_sub_return	\n"  		"	subu	%0, %1, %3				\n"  		"	sc	%0, %2					\n" @@ -205,7 +205,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)  		do {  			__asm__ __volatile__( -			"	.set	mips3				\n" +			"	.set	arch=r4000			\n"  			"	ll	%1, %2	# atomic_sub_return	\n"  			"	subu	%0, %1, %3			\n"  			"	sc	%0, %2				\n" @@ -248,7 +248,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)  		int temp;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:	ll	%1, %2		# atomic_sub_if_positive\n"  		"	subu	%0, %1, %3				\n"  		"	bltz	%0, 1f					\n" @@ -266,7 +266,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)  		int temp;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:	ll	%1, %2		# atomic_sub_if_positive\n"  		"	subu	%0, %1, %3				\n"  		"	bltz	%0, 1f					\n" @@ -420,7 +420,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)  		long temp;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:	lld	%0, %1		# atomic64_add		\n"  		"	daddu	%0, %2					\n"  		"	scd	%0, %1					\n" @@ -433,7 +433,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)  		do {  			__asm__ __volatile__( -			"	.set	mips3				\n" +			"	.set	arch=r4000			\n"  			"	lld	%0, %1		# atomic64_add	\n"  			"	daddu	%0, %2				\n"  			"	scd	%0, %1				\n" @@ -463,7 +463,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)  		long temp;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:	lld	%0, %1		# atomic64_sub		\n"  		"	dsubu	%0, %2					\n"  		"	scd	%0, %1					\n" @@ -476,7 +476,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)  		do {  			__asm__ __volatile__( -			"	.set	mips3				\n" +			"	.set	arch=r4000			\n"  			"	lld	%0, %1		# atomic64_sub	\n"  			"	dsubu	%0, %2				\n"  			"	scd	%0, %1				\n" @@ -506,7 +506,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)  		long temp;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:	lld	%1, %2		# atomic64_add_return	\n"  		"	daddu	%0, %1, %3				\n"  		"	scd	%0, %2					\n" @@ -520,7 +520,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)  		do {  			__asm__ __volatile__( -			"	.set	mips3				\n" +			"	.set	arch=r4000			\n"  			"	lld	%1, %2	# atomic64_add_return	\n"  			"	daddu	%0, %1, %3			\n"  			"	scd	%0, %2				\n" @@ -556,7 +556,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)  		long temp;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:	lld	%1, %2		# atomic64_sub_return	\n"  		"	dsubu	%0, %1, %3				\n"  		"	scd	%0, %2					\n" @@ -571,7 +571,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)  		do {  			__asm__ __volatile__( -			"	.set	mips3				\n" +			"	.set	arch=r4000			\n"  			"	lld	%1, %2	# atomic64_sub_return	\n"  			"	dsubu	%0, %1, %3			\n"  			"	scd	%0, %2				\n" @@ -615,7 +615,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)  		long temp;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:	lld	%1, %2		# atomic64_sub_if_positive\n"  		"	dsubu	%0, %1, %3				\n"  		"	bltz	%0, 1f					\n" @@ -633,7 +633,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)  		long temp;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:	lld	%1, %2		# atomic64_sub_if_positive\n"  		"	dsubu	%0, %1, %3				\n"  		"	bltz	%0, 1f					\n" @@ -761,13 +761,4 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)  #endif /* CONFIG_64BIT */ -/* - * atomic*_return operations are serializing but not the non-*_return - * versions. - */ -#define smp_mb__before_atomic_dec()	smp_mb__before_llsc() -#define smp_mb__after_atomic_dec()	smp_llsc_mb() -#define smp_mb__before_atomic_inc()	smp_mb__before_llsc() -#define smp_mb__after_atomic_inc()	smp_llsc_mb() -  #endif /* _ASM_ATOMIC_H */ diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index 314ab553201..d0101dd0575 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h @@ -18,7 +18,7 @@   * over this barrier.  All reads preceding this primitive are guaranteed   * to access memory (but not necessarily other CPUs' caches) before any   * reads following this primitive that depend on the data return by - * any of the preceding reads.	This primitive is much lighter weight than + * any of the preceding reads.  This primitive is much lighter weight than   * rmb() on most CPUs, and is never heavier weight than is   * rmb().   * @@ -43,7 +43,7 @@   * </programlisting>   *   * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends().	 However, + * two reads are separated by a read_barrier_depends().  However,   * the following code, with the same initial values for "a" and "b":   *   * <programlisting> @@ -57,7 +57,7 @@   * </programlisting>   *   * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b".	 Therefore, on some CPUs, such + * the read of "a" and the read of "b".  Therefore, on some CPUs, such   * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()   * in cases like this where there are no data dependencies.   */ @@ -180,4 +180,22 @@  #define nudge_writes() mb()  #endif +#define smp_store_release(p, v)						\ +do {									\ +	compiletime_assert_atomic_type(*p);				\ +	smp_mb();							\ +	ACCESS_ONCE(*p) = (v);						\ +} while (0) + +#define smp_load_acquire(p)						\ +({									\ +	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\ +	compiletime_assert_atomic_type(*p);				\ +	smp_mb();							\ +	___p1;								\ +}) + +#define smp_mb__before_atomic()	smp_mb__before_llsc() +#define smp_mb__after_atomic()	smp_llsc_mb() +  #endif /* __ASM_BARRIER_H */ diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index 71305a8b3d7..7c8816f7b7c 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -38,13 +38,6 @@  #endif  /* - * clear_bit() doesn't provide any barrier for the compiler. - */ -#define smp_mb__before_clear_bit()	smp_mb__before_llsc() -#define smp_mb__after_clear_bit()	smp_llsc_mb() - - -/*   * These are the "slower" versions of the functions and are in bitops.c.   * These functions call raw_local_irq_{save,restore}().   */ @@ -79,7 +72,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)  	if (kernel_uses_llsc && R10000_LLSC_WAR) {  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:	" __LL "%0, %1			# set_bit	\n"  		"	or	%0, %2					\n"  		"	" __SC	"%0, %1					\n" @@ -101,7 +94,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)  	} else if (kernel_uses_llsc) {  		do {  			__asm__ __volatile__( -			"	.set	mips3				\n" +			"	.set	arch=r4000			\n"  			"	" __LL "%0, %1		# set_bit	\n"  			"	or	%0, %2				\n"  			"	" __SC	"%0, %1				\n" @@ -120,7 +113,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)   *   * clear_bit() is atomic and may not be reordered.  However, it does   * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()   * in order to ensure changes are visible on other processors.   */  static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) @@ -131,7 +124,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)  	if (kernel_uses_llsc && R10000_LLSC_WAR) {  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:	" __LL "%0, %1			# clear_bit	\n"  		"	and	%0, %2					\n"  		"	" __SC "%0, %1					\n" @@ -153,7 +146,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)  	} else if (kernel_uses_llsc) {  		do {  			__asm__ __volatile__( -			"	.set	mips3				\n" +			"	.set	arch=r4000			\n"  			"	" __LL "%0, %1		# clear_bit	\n"  			"	and	%0, %2				\n"  			"	" __SC "%0, %1				\n" @@ -175,7 +168,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)   */  static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)  { -	smp_mb__before_clear_bit(); +	smp_mb__before_atomic();  	clear_bit(nr, addr);  } @@ -197,7 +190,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)  		unsigned long temp;  		__asm__ __volatile__( -		"	.set	mips3				\n" +		"	.set	arch=r4000			\n"  		"1:	" __LL "%0, %1		# change_bit	\n"  		"	xor	%0, %2				\n"  		"	" __SC	"%0, %1				\n" @@ -211,7 +204,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)  		do {  			__asm__ __volatile__( -			"	.set	mips3				\n" +			"	.set	arch=r4000			\n"  			"	" __LL "%0, %1		# change_bit	\n"  			"	xor	%0, %2				\n"  			"	" __SC	"%0, %1				\n" @@ -244,7 +237,7 @@ static inline int test_and_set_bit(unsigned long nr,  		unsigned long temp;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:	" __LL "%0, %1		# test_and_set_bit	\n"  		"	or	%2, %0, %3				\n"  		"	" __SC	"%2, %1					\n" @@ -260,7 +253,7 @@ static inline int test_and_set_bit(unsigned long nr,  		do {  			__asm__ __volatile__( -			"	.set	mips3				\n" +			"	.set	arch=r4000			\n"  			"	" __LL "%0, %1	# test_and_set_bit	\n"  			"	or	%2, %0, %3			\n"  			"	" __SC	"%2, %1				\n" @@ -298,7 +291,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,  		unsigned long temp;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:	" __LL "%0, %1		# test_and_set_bit	\n"  		"	or	%2, %0, %3				\n"  		"	" __SC	"%2, %1					\n" @@ -314,7 +307,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,  		do {  			__asm__ __volatile__( -			"	.set	mips3				\n" +			"	.set	arch=r4000			\n"  			"	" __LL "%0, %1	# test_and_set_bit	\n"  			"	or	%2, %0, %3			\n"  			"	" __SC	"%2, %1				\n" @@ -353,7 +346,7 @@ static inline int test_and_clear_bit(unsigned long nr,  		unsigned long temp;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:	" __LL	"%0, %1		# test_and_clear_bit	\n"  		"	or	%2, %0, %3				\n"  		"	xor	%2, %3					\n" @@ -386,7 +379,7 @@ static inline int test_and_clear_bit(unsigned long nr,  		do {  			__asm__ __volatile__( -			"	.set	mips3				\n" +			"	.set	arch=r4000			\n"  			"	" __LL	"%0, %1 # test_and_clear_bit	\n"  			"	or	%2, %0, %3			\n"  			"	xor	%2, %3				\n" @@ -427,7 +420,7 @@ static inline int test_and_change_bit(unsigned long nr,  		unsigned long temp;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:	" __LL	"%0, %1		# test_and_change_bit	\n"  		"	xor	%2, %0, %3				\n"  		"	" __SC	"%2, %1					\n" @@ -443,7 +436,7 @@ static inline int test_and_change_bit(unsigned long nr,  		do {  			__asm__ __volatile__( -			"	.set	mips3				\n" +			"	.set	arch=r4000			\n"  			"	" __LL	"%0, %1 # test_and_change_bit	\n"  			"	xor	%2, %0, %3			\n"  			"	" __SC	"\t%2, %1			\n" diff --git a/arch/mips/include/asm/bmips.h b/arch/mips/include/asm/bmips.h index 27bd060d716..cbaccebf506 100644 --- a/arch/mips/include/asm/bmips.h +++ b/arch/mips/include/asm/bmips.h @@ -46,8 +46,35 @@  #include <linux/cpumask.h>  #include <asm/r4kcache.h> +#include <asm/smp-ops.h> + +extern struct plat_smp_ops bmips43xx_smp_ops; +extern struct plat_smp_ops bmips5000_smp_ops; + +static inline int register_bmips_smp_ops(void) +{ +#if IS_ENABLED(CONFIG_CPU_BMIPS) && IS_ENABLED(CONFIG_SMP) +	switch (current_cpu_type()) { +	case CPU_BMIPS32: +	case CPU_BMIPS3300: +		return register_up_smp_ops(); +	case CPU_BMIPS4350: +	case CPU_BMIPS4380: +		register_smp_ops(&bmips43xx_smp_ops); +		break; +	case CPU_BMIPS5000: +		register_smp_ops(&bmips5000_smp_ops); +		break; +	default: +		return -ENODEV; +	} + +	return 0; +#else +	return -ENODEV; +#endif +} -extern struct plat_smp_ops bmips_smp_ops;  extern char bmips_reset_nmi_vec;  extern char bmips_reset_nmi_vec_end;  extern char bmips_smp_movevec; diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h index 4d2cdea5aa3..1f7ca8b0040 100644 --- a/arch/mips/include/asm/bootinfo.h +++ b/arch/mips/include/asm/bootinfo.h @@ -61,15 +61,21 @@  /*   * Valid machtype for Loongson family   */ -#define MACH_LOONGSON_UNKNOWN  0 -#define MACH_LEMOTE_FL2E       1 -#define MACH_LEMOTE_FL2F       2 -#define MACH_LEMOTE_ML2F7      3 -#define MACH_LEMOTE_YL2F89     4 -#define MACH_DEXXON_GDIUM2F10  5 -#define MACH_LEMOTE_NAS	       6 -#define MACH_LEMOTE_LL2F       7 -#define MACH_LOONGSON_END      8 +enum loongson_machine_type { +	MACH_LOONGSON_UNKNOWN, +	MACH_LEMOTE_FL2E, +	MACH_LEMOTE_FL2F, +	MACH_LEMOTE_ML2F7, +	MACH_LEMOTE_YL2F89, +	MACH_DEXXON_GDIUM2F10, +	MACH_LEMOTE_NAS, +	MACH_LEMOTE_LL2F, +	MACH_LEMOTE_A1004, +	MACH_LEMOTE_A1101, +	MACH_LEMOTE_A1201, +	MACH_LEMOTE_A1205, +	MACH_LOONGSON_END +};  /*   * Valid machtype for group INGENIC @@ -112,6 +118,8 @@ extern void prom_free_prom_memory(void);  extern void free_init_pages(const char *what,  			    unsigned long begin, unsigned long end); +extern void (*free_init_pages_eva)(void *begin, void *end); +  /*   * Initial kernel command line, usually setup by prom_init()   */ diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h index e28a3e0eb3c..de781cf54bc 100644 --- a/arch/mips/include/asm/branch.h +++ b/arch/mips/include/asm/branch.h @@ -8,6 +8,8 @@  #ifndef _ASM_BRANCH_H  #define _ASM_BRANCH_H +#include <asm/cpu-features.h> +#include <asm/mipsregs.h>  #include <asm/ptrace.h>  #include <asm/inst.h> @@ -18,12 +20,40 @@ extern int __compute_return_epc_for_insn(struct pt_regs *regs,  extern int __microMIPS_compute_return_epc(struct pt_regs *regs);  extern int __MIPS16e_compute_return_epc(struct pt_regs *regs); +/* + * microMIPS bitfields + */ +#define MM_POOL32A_MINOR_MASK	0x3f +#define MM_POOL32A_MINOR_SHIFT	0x6 +#define MM_MIPS32_COND_FC	0x30 + +extern int __mm_isBranchInstr(struct pt_regs *regs, +	struct mm_decoded_insn dec_insn, unsigned long *contpc); + +static inline int mm_isBranchInstr(struct pt_regs *regs, +	struct mm_decoded_insn dec_insn, unsigned long *contpc) +{ +	if (!cpu_has_mmips) +		return 0; + +	return __mm_isBranchInstr(regs, dec_insn, contpc); +}  static inline int delay_slot(struct pt_regs *regs)  {  	return regs->cp0_cause & CAUSEF_BD;  } +static inline void clear_delay_slot(struct pt_regs *regs) +{ +	regs->cp0_cause &= ~CAUSEF_BD; +} + +static inline void set_delay_slot(struct pt_regs *regs) +{ +	regs->cp0_cause |= CAUSEF_BD; +} +  static inline unsigned long exception_epc(struct pt_regs *regs)  {  	if (likely(!delay_slot(regs))) diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h index 69468ded282..e08381a37f8 100644 --- a/arch/mips/include/asm/cacheflush.h +++ b/arch/mips/include/asm/cacheflush.h @@ -113,6 +113,12 @@ unsigned long run_uncached(void *func);  extern void *kmap_coherent(struct page *page, unsigned long addr);  extern void kunmap_coherent(void); +extern void *kmap_noncoherent(struct page *page, unsigned long addr); + +static inline void kunmap_noncoherent(void) +{ +	kunmap_coherent(); +}  #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE  static inline void flush_kernel_dcache_page(struct page *page) diff --git a/arch/mips/include/asm/cacheops.h b/arch/mips/include/asm/cacheops.h index 68f37e3eccc..06b9bc7ea14 100644 --- a/arch/mips/include/asm/cacheops.h +++ b/arch/mips/include/asm/cacheops.h @@ -14,56 +14,52 @@  /*   * Cache Operations available on all MIPS processors with R4000-style caches   */ -#define Index_Invalidate_I	0x00 -#define Index_Writeback_Inv_D	0x01 -#define Index_Load_Tag_I	0x04 -#define Index_Load_Tag_D	0x05 -#define Index_Store_Tag_I	0x08 -#define Index_Store_Tag_D	0x09 -#if defined(CONFIG_CPU_LOONGSON2) -#define Hit_Invalidate_I	0x00 -#else -#define Hit_Invalidate_I	0x10 -#endif -#define Hit_Invalidate_D	0x11 -#define Hit_Writeback_Inv_D	0x15 +#define Index_Invalidate_I		0x00 +#define Index_Writeback_Inv_D		0x01 +#define Index_Load_Tag_I		0x04 +#define Index_Load_Tag_D		0x05 +#define Index_Store_Tag_I		0x08 +#define Index_Store_Tag_D		0x09 +#define Hit_Invalidate_I		0x10 +#define Hit_Invalidate_D		0x11 +#define Hit_Writeback_Inv_D		0x15  /*   * R4000-specific cacheops   */ -#define Create_Dirty_Excl_D	0x0d -#define Fill			0x14 -#define Hit_Writeback_I		0x18 -#define Hit_Writeback_D		0x19 +#define Create_Dirty_Excl_D		0x0d +#define Fill				0x14 +#define Hit_Writeback_I			0x18 +#define Hit_Writeback_D			0x19  /*   * R4000SC and R4400SC-specific cacheops   */ -#define Index_Invalidate_SI	0x02 -#define Index_Writeback_Inv_SD	0x03 -#define Index_Load_Tag_SI	0x06 -#define Index_Load_Tag_SD	0x07 -#define Index_Store_Tag_SI	0x0A -#define Index_Store_Tag_SD	0x0B -#define Create_Dirty_Excl_SD	0x0f -#define Hit_Invalidate_SI	0x12 -#define Hit_Invalidate_SD	0x13 -#define Hit_Writeback_Inv_SD	0x17 -#define Hit_Writeback_SD	0x1b -#define Hit_Set_Virtual_SI	0x1e -#define Hit_Set_Virtual_SD	0x1f +#define Index_Invalidate_SI		0x02 +#define Index_Writeback_Inv_SD		0x03 +#define Index_Load_Tag_SI		0x06 +#define Index_Load_Tag_SD		0x07 +#define Index_Store_Tag_SI		0x0A +#define Index_Store_Tag_SD		0x0B +#define Create_Dirty_Excl_SD		0x0f +#define Hit_Invalidate_SI		0x12 +#define Hit_Invalidate_SD		0x13 +#define Hit_Writeback_Inv_SD		0x17 +#define Hit_Writeback_SD		0x1b +#define Hit_Set_Virtual_SI		0x1e +#define Hit_Set_Virtual_SD		0x1f  /*   * R5000-specific cacheops   */ -#define R5K_Page_Invalidate_S	0x17 +#define R5K_Page_Invalidate_S		0x17  /*   * RM7000-specific cacheops   */ -#define Page_Invalidate_T	0x16 -#define Index_Store_Tag_T	0x0a -#define Index_Load_Tag_T	0x06 +#define Page_Invalidate_T		0x16 +#define Index_Store_Tag_T		0x0a +#define Index_Load_Tag_T		0x06  /*   * R10000-specific cacheops @@ -71,17 +67,22 @@   * Cacheops 0x02, 0x06, 0x0a, 0x0c-0x0e, 0x16, 0x1a and 0x1e are unused.   * Most of the _S cacheops are identical to the R4000SC _SD cacheops.   */ -#define Index_Writeback_Inv_S	0x03 -#define Index_Load_Tag_S	0x07 -#define Index_Store_Tag_S	0x0B -#define Hit_Invalidate_S	0x13 -#define Cache_Barrier		0x14 -#define Hit_Writeback_Inv_S	0x17 -#define Index_Load_Data_I	0x18 -#define Index_Load_Data_D	0x19 -#define Index_Load_Data_S	0x1b -#define Index_Store_Data_I	0x1c -#define Index_Store_Data_D	0x1d -#define Index_Store_Data_S	0x1f +#define Index_Writeback_Inv_S		0x03 +#define Index_Load_Tag_S		0x07 +#define Index_Store_Tag_S		0x0B +#define Hit_Invalidate_S		0x13 +#define Cache_Barrier			0x14 +#define Hit_Writeback_Inv_S		0x17 +#define Index_Load_Data_I		0x18 +#define Index_Load_Data_D		0x19 +#define Index_Load_Data_S		0x1b +#define Index_Store_Data_I		0x1c +#define Index_Store_Data_D		0x1d +#define Index_Store_Data_S		0x1f + +/* + * Loongson2-specific cacheops + */ +#define Hit_Invalidate_I_Loongson2	0x00  #endif	/* __ASM_CACHEOPS_H */ diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h index ac3d2b8a20d..3418c51e115 100644 --- a/arch/mips/include/asm/checksum.h +++ b/arch/mips/include/asm/checksum.h @@ -7,6 +7,7 @@   * Copyright (C) 1999 Silicon Graphics, Inc.   * Copyright (C) 2001 Thiemo Seufer.   * Copyright (C) 2002 Maciej W. Rozycki + * Copyright (C) 2014 Imagination Technologies Ltd.   */  #ifndef _ASM_CHECKSUM_H  #define _ASM_CHECKSUM_H @@ -29,9 +30,13 @@   */  __wsum csum_partial(const void *buff, int len, __wsum sum); -__wsum __csum_partial_copy_user(const void *src, void *dst, -				int len, __wsum sum, int *err_ptr); +__wsum __csum_partial_copy_kernel(const void *src, void *dst, +				  int len, __wsum sum, int *err_ptr); +__wsum __csum_partial_copy_from_user(const void *src, void *dst, +				     int len, __wsum sum, int *err_ptr); +__wsum __csum_partial_copy_to_user(const void *src, void *dst, +				   int len, __wsum sum, int *err_ptr);  /*   * this is a new version of the above that records errors it finds in *errp,   * but continues and zeros the rest of the buffer. @@ -41,8 +46,26 @@ __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,  				   __wsum sum, int *err_ptr)  {  	might_fault(); -	return __csum_partial_copy_user((__force void *)src, dst, -					len, sum, err_ptr); +	if (segment_eq(get_fs(), get_ds())) +		return __csum_partial_copy_kernel((__force void *)src, dst, +						  len, sum, err_ptr); +	else +		return __csum_partial_copy_from_user((__force void *)src, dst, +						     len, sum, err_ptr); +} + +#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER +static inline +__wsum csum_and_copy_from_user(const void __user *src, void *dst, +			       int len, __wsum sum, int *err_ptr) +{ +	if (access_ok(VERIFY_READ, src, len)) +		return csum_partial_copy_from_user(src, dst, len, sum, +						   err_ptr); +	if (len) +		*err_ptr = -EFAULT; + +	return sum;  }  /* @@ -54,9 +77,16 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,  			     __wsum sum, int *err_ptr)  {  	might_fault(); -	if (access_ok(VERIFY_WRITE, dst, len)) -		return __csum_partial_copy_user(src, (__force void *)dst, -						len, sum, err_ptr); +	if (access_ok(VERIFY_WRITE, dst, len)) { +		if (segment_eq(get_fs(), get_ds())) +			return __csum_partial_copy_kernel(src, +							  (__force void *)dst, +							  len, sum, err_ptr); +		else +			return __csum_partial_copy_to_user(src, +							   (__force void *)dst, +							   len, sum, err_ptr); +	}  	if (len)  		*err_ptr = -EFAULT; diff --git a/arch/mips/include/asm/clkdev.h b/arch/mips/include/asm/clkdev.h index 262475414e5..1b3ad7b09dc 100644 --- a/arch/mips/include/asm/clkdev.h +++ b/arch/mips/include/asm/clkdev.h @@ -14,8 +14,10 @@  #include <linux/slab.h> +#ifndef CONFIG_COMMON_CLK  #define __clk_get(clk)	({ 1; })  #define __clk_put(clk)	do { } while (0) +#endif  static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)  { diff --git a/arch/mips/include/asm/cmp.h b/arch/mips/include/asm/cmp.h index 89a73fb93ae..033d97303c8 100644 --- a/arch/mips/include/asm/cmp.h +++ b/arch/mips/include/asm/cmp.h @@ -10,7 +10,6 @@ extern void cmp_smp_setup(void);  extern void cmp_smp_finish(void);  extern void cmp_boot_secondary(int cpu, struct task_struct *t);  extern void cmp_init_secondary(void); -extern void cmp_cpus_done(void);  extern void cmp_prepare_cpus(unsigned int max_cpus);  /* This is platform specific */ diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index 466069bd846..eefcaa363a8 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -22,11 +22,11 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)  		unsigned long dummy;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:	ll	%0, %3			# xchg_u32	\n"  		"	.set	mips0					\n"  		"	move	%2, %z4					\n" -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"	sc	%2, %1					\n"  		"	beqzl	%2, 1b					\n"  		"	.set	mips0					\n" @@ -38,11 +38,11 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)  		do {  			__asm__ __volatile__( -			"	.set	mips3				\n" +			"	.set	arch=r4000			\n"  			"	ll	%0, %3		# xchg_u32	\n"  			"	.set	mips0				\n"  			"	move	%2, %z4				\n" -			"	.set	mips3				\n" +			"	.set	arch=r4000			\n"  			"	sc	%2, %1				\n"  			"	.set	mips0				\n"  			: "=&r" (retval), "=m" (*m), "=&r" (dummy) @@ -74,7 +74,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)  		unsigned long dummy;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:	lld	%0, %3			# xchg_u64	\n"  		"	move	%2, %z4					\n"  		"	scd	%2, %1					\n" @@ -88,7 +88,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)  		do {  			__asm__ __volatile__( -			"	.set	mips3				\n" +			"	.set	arch=r4000			\n"  			"	lld	%0, %3		# xchg_u64	\n"  			"	move	%2, %z4				\n"  			"	scd	%2, %1				\n" @@ -145,12 +145,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz  		__asm__ __volatile__(					\  		"	.set	push				\n"	\  		"	.set	noat				\n"	\ -		"	.set	mips3				\n"	\ +		"	.set	arch=r4000			\n"	\  		"1:	" ld "	%0, %2		# __cmpxchg_asm \n"	\  		"	bne	%0, %z3, 2f			\n"	\  		"	.set	mips0				\n"	\  		"	move	$1, %z4				\n"	\ -		"	.set	mips3				\n"	\ +		"	.set	arch=r4000			\n"	\  		"	" st "	$1, %1				\n"	\  		"	beqzl	$1, 1b				\n"	\  		"2:						\n"	\ @@ -162,12 +162,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz  		__asm__ __volatile__(					\  		"	.set	push				\n"	\  		"	.set	noat				\n"	\ -		"	.set	mips3				\n"	\ +		"	.set	arch=r4000			\n"	\  		"1:	" ld "	%0, %2		# __cmpxchg_asm \n"	\  		"	bne	%0, %z3, 2f			\n"	\  		"	.set	mips0				\n"	\  		"	move	$1, %z4				\n"	\ -		"	.set	mips3				\n"	\ +		"	.set	arch=r4000			\n"	\  		"	" st "	$1, %1				\n"	\  		"	beqz	$1, 1b				\n"	\  		"	.set	pop				\n"	\ diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index fa44f3ec530..c7d8c997d93 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -13,12 +13,6 @@  #include <asm/cpu-info.h>  #include <cpu-feature-overrides.h> -#ifndef current_cpu_type -#define current_cpu_type()	current_cpu_data.cputype -#endif - -#define boot_cpu_type()		cpu_data[0].cputype -  /*   * SMP assumption: Options of CPU 0 are a superset of all processors.   * This is true for all known MIPS systems. @@ -26,6 +20,15 @@  #ifndef cpu_has_tlb  #define cpu_has_tlb		(cpu_data[0].options & MIPS_CPU_TLB)  #endif +#ifndef cpu_has_tlbinv +#define cpu_has_tlbinv		(cpu_data[0].options & MIPS_CPU_TLBINV) +#endif +#ifndef cpu_has_segments +#define cpu_has_segments	(cpu_data[0].options & MIPS_CPU_SEGMENTS) +#endif +#ifndef cpu_has_eva +#define cpu_has_eva		(cpu_data[0].options & MIPS_CPU_EVA) +#endif  /*   * For the moment we don't consider R6000 and R8000 so we can assume that @@ -107,9 +110,15 @@  #ifndef cpu_has_smartmips  #define cpu_has_smartmips	(cpu_data[0].ases & MIPS_ASE_SMARTMIPS)  #endif +  #ifndef cpu_has_rixi -#define cpu_has_rixi		(cpu_data[0].options & MIPS_CPU_RIXI) +# ifdef CONFIG_64BIT +# define cpu_has_rixi		(cpu_data[0].options & MIPS_CPU_RIXI) +# else /* CONFIG_32BIT */ +# define cpu_has_rixi		((cpu_data[0].options & MIPS_CPU_RIXI) && !cpu_has_64bits) +# endif  #endif +  #ifndef cpu_has_mmips  # ifdef CONFIG_SYS_SUPPORTS_MICROMIPS  #  define cpu_has_mmips		(cpu_data[0].options & MIPS_CPU_MICROMIPS) @@ -117,6 +126,7 @@  #  define cpu_has_mmips		0  # endif  #endif +  #ifndef cpu_has_vtag_icache  #define cpu_has_vtag_icache	(cpu_data[0].icache.flags & MIPS_CACHE_VTAG)  #endif @@ -180,6 +190,17 @@  /*   * Shortcuts ...   */ +#define cpu_has_mips_2_3_4_5	(cpu_has_mips_2 | cpu_has_mips_3_4_5) +#define cpu_has_mips_3_4_5	(cpu_has_mips_3 | cpu_has_mips_4_5) +#define cpu_has_mips_4_5	(cpu_has_mips_4 | cpu_has_mips_5) + +#define cpu_has_mips_2_3_4_5_r	(cpu_has_mips_2 | cpu_has_mips_3_4_5_r) +#define cpu_has_mips_3_4_5_r	(cpu_has_mips_3 | cpu_has_mips_4_5_r) +#define cpu_has_mips_4_5_r	(cpu_has_mips_4 | cpu_has_mips_5_r) +#define cpu_has_mips_5_r	(cpu_has_mips_5 | cpu_has_mips_r) + +#define cpu_has_mips_4_5_r2	(cpu_has_mips_4_5 | cpu_has_mips_r2) +  #define cpu_has_mips32	(cpu_has_mips32r1 | cpu_has_mips32r2)  #define cpu_has_mips64	(cpu_has_mips64r1 | cpu_has_mips64r2)  #define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1) @@ -193,7 +214,7 @@  /*   * MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other - * pre-MIPS32/MIPS53 processors have CLO, CLZ.	The IDT RC64574 is 64-bit and + * pre-MIPS32/MIPS64 processors have CLO, CLZ.	The IDT RC64574 is 64-bit and   * has CLO and CLZ but not DCLO nor DCLZ.  For 64-bit kernels   * cpu_has_clo_clz also indicates the availability of DCLO and DCLZ.   */ @@ -298,4 +319,10 @@  #define cpu_has_vz		(cpu_data[0].ases & MIPS_ASE_VZ)  #endif +#if defined(CONFIG_CPU_HAS_MSA) && !defined(cpu_has_msa) +# define cpu_has_msa		(cpu_data[0].ases & MIPS_ASE_MSA) +#elif !defined(cpu_has_msa) +# define cpu_has_msa		0 +#endif +  #endif /* __ASM_CPU_FEATURES_H */ diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h index 41401d8eb7d..47d5967ce7e 100644 --- a/arch/mips/include/asm/cpu-info.h +++ b/arch/mips/include/asm/cpu-info.h @@ -39,19 +39,23 @@ struct cache_desc {  #define MIPS_CACHE_PINDEX	0x00000020	/* Physically indexed cache */  struct cpuinfo_mips { -	unsigned int		udelay_val; -	unsigned int		asid_cache; +	unsigned long		asid_cache;  	/*  	 * Capability and feature descriptor structure for MIPS CPU  	 */  	unsigned long		options;  	unsigned long		ases; +	unsigned int		udelay_val;  	unsigned int		processor_id;  	unsigned int		fpu_id; +	unsigned int		msa_id;  	unsigned int		cputype;  	int			isa_level;  	int			tlbsize; +	int			tlbsizevtlb; +	int			tlbsizeftlbsets; +	int			tlbsizeftlbways;  	struct cache_desc	icache; /* Primary I-cache */  	struct cache_desc	dcache; /* Primary D or combined I/D cache */  	struct cache_desc	scache; /* Secondary cache */ @@ -61,18 +65,13 @@ struct cpuinfo_mips {  #ifdef CONFIG_64BIT  	int			vmbits; /* Virtual memory size in bits */  #endif -#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) +#ifdef CONFIG_MIPS_MT_SMP  	/* -	 * In the MIPS MT "SMTC" model, each TC is considered -	 * to be a "CPU" for the purposes of scheduling, but -	 * exception resources, ASID spaces, etc, are common -	 * to all TCs within the same VPE. +	 * There is not necessarily a 1:1 mapping of VPE num to CPU number +	 * in particular on multi-core systems.  	 */  	int			vpe_id;	 /* Virtual Processor number */  #endif -#ifdef CONFIG_MIPS_MT_SMTC -	int			tc_id;	 /* Thread Context number */ -#endif  	void			*data;	/* Additional data */  	unsigned int		watch_reg_count;   /* Number that exist */  	unsigned int		watch_reg_use_cnt; /* Usable by ptrace */ @@ -84,6 +83,7 @@ struct cpuinfo_mips {  extern struct cpuinfo_mips cpu_data[];  #define current_cpu_data cpu_data[smp_processor_id()]  #define raw_current_cpu_data cpu_data[raw_smp_processor_id()] +#define boot_cpu_data cpu_data[0]  extern void cpu_probe(void);  extern void cpu_report(void); @@ -91,4 +91,31 @@ extern void cpu_report(void);  extern const char *__cpu_name[];  #define cpu_name_string()	__cpu_name[smp_processor_id()] +struct seq_file; +struct notifier_block; + +extern int register_proc_cpuinfo_notifier(struct notifier_block *nb); +extern int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v); + +#define proc_cpuinfo_notifier(fn, pri)					\ +({									\ +	static struct notifier_block fn##_nb = {			\ +		.notifier_call = fn,					\ +		.priority = pri						\ +	};								\ +									\ +	register_proc_cpuinfo_notifier(&fn##_nb);			\ +}) + +struct proc_cpuinfo_notifier_args { +	struct seq_file *m; +	unsigned long n; +}; + +#ifdef CONFIG_MIPS_MT_SMP +# define cpu_vpe_id(cpuinfo)	((cpuinfo)->vpe_id) +#else +# define cpu_vpe_id(cpuinfo)	0 +#endif +  #endif /* __ASM_CPU_INFO_H */ diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h new file mode 100644 index 00000000000..b4e2bd87df5 --- /dev/null +++ b/arch/mips/include/asm/cpu-type.h @@ -0,0 +1,216 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License.  See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003, 2004 Ralf Baechle + * Copyright (C) 2004  Maciej W. Rozycki + */ +#ifndef __ASM_CPU_TYPE_H +#define __ASM_CPU_TYPE_H + +#include <linux/smp.h> +#include <linux/compiler.h> + +static inline int __pure __get_cpu_type(const int cpu_type) +{ +	switch (cpu_type) { +#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2E) || \ +    defined(CONFIG_SYS_HAS_CPU_LOONGSON2F) +	case CPU_LOONGSON2: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_LOONGSON3 +	case CPU_LOONGSON3: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_LOONGSON1B +	case CPU_LOONGSON1: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_MIPS32_R1 +	case CPU_4KC: +	case CPU_ALCHEMY: +	case CPU_PR4450: +	case CPU_JZRISC: +#endif + +#if defined(CONFIG_SYS_HAS_CPU_MIPS32_R1) || \ +    defined(CONFIG_SYS_HAS_CPU_MIPS32_R2) +	case CPU_4KEC: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_MIPS32_R2 +	case CPU_4KSC: +	case CPU_24K: +	case CPU_34K: +	case CPU_1004K: +	case CPU_74K: +	case CPU_M14KC: +	case CPU_M14KEC: +	case CPU_INTERAPTIV: +	case CPU_PROAPTIV: +	case CPU_P5600: +	case CPU_M5150: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1 +	case CPU_5KC: +	case CPU_5KE: +	case CPU_20KC: +	case CPU_25KF: +	case CPU_SB1: +	case CPU_SB1A: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R2 +	/* +	 * All MIPS64 R2 processors have their own special symbols.  That is, +	 * there currently is no pure R2 core +	 */ +#endif + +#ifdef CONFIG_SYS_HAS_CPU_R3000 +	case CPU_R2000: +	case CPU_R3000: +	case CPU_R3000A: +	case CPU_R3041: +	case CPU_R3051: +	case CPU_R3052: +	case CPU_R3081: +	case CPU_R3081E: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_TX39XX +	case CPU_TX3912: +	case CPU_TX3922: +	case CPU_TX3927: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_VR41XX +	case CPU_VR41XX: +	case CPU_VR4111: +	case CPU_VR4121: +	case CPU_VR4122: +	case CPU_VR4131: +	case CPU_VR4133: +	case CPU_VR4181: +	case CPU_VR4181A: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_R4300 +	case CPU_R4300: +	case CPU_R4310: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_R4X00 +	case CPU_R4000PC: +	case CPU_R4000SC: +	case CPU_R4000MC: +	case CPU_R4200: +	case CPU_R4400PC: +	case CPU_R4400SC: +	case CPU_R4400MC: +	case CPU_R4600: +	case CPU_R4700: +	case CPU_R4640: +	case CPU_R4650: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_TX49XX +	case CPU_TX49XX: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_R5000 +	case CPU_R5000: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_R5432 +	case CPU_R5432: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_R5500 +	case CPU_R5500: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_R6000 +	case CPU_R6000: +	case CPU_R6000A: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_NEVADA +	case CPU_NEVADA: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_R8000 +	case CPU_R8000: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_R10000 +	case CPU_R10000: +	case CPU_R12000: +	case CPU_R14000: +#endif +#ifdef CONFIG_SYS_HAS_CPU_RM7000 +	case CPU_RM7000: +	case CPU_SR71000: +#endif +#ifdef CONFIG_SYS_HAS_CPU_SB1 +	case CPU_SB1: +	case CPU_SB1A: +#endif +#ifdef CONFIG_SYS_HAS_CPU_CAVIUM_OCTEON +	case CPU_CAVIUM_OCTEON: +	case CPU_CAVIUM_OCTEON_PLUS: +	case CPU_CAVIUM_OCTEON2: +	case CPU_CAVIUM_OCTEON3: +#endif + +#if defined(CONFIG_SYS_HAS_CPU_BMIPS32_3300) || \ +	defined (CONFIG_SYS_HAS_CPU_MIPS32_R1) +	case CPU_BMIPS32: +	case CPU_BMIPS3300: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_BMIPS4350 +	case CPU_BMIPS4350: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_BMIPS4380 +	case CPU_BMIPS4380: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_BMIPS5000 +	case CPU_BMIPS5000: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_XLP +	case CPU_XLP: +#endif + +#ifdef CONFIG_SYS_HAS_CPU_XLR +	case CPU_XLR: +#endif +		break; +	default: +		unreachable(); +	} + +	return cpu_type; +} + +static inline int __pure current_cpu_type(void) +{ +	const int cpu_type = current_cpu_data.cputype; + +	return __get_cpu_type(cpu_type); +} + +static inline int __pure boot_cpu_type(void) +{ +	const int cpu_type = cpu_data[0].cputype; + +	return __get_cpu_type(cpu_type); +} + +#endif /* __ASM_CPU_TYPE_H */ diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index 71b9f1998be..129d08701e9 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -3,15 +3,14 @@   *	  various MIPS cpu types.   *   * Copyright (C) 1996 David S. Miller (davem@davemloft.net) - * Copyright (C) 2004  Maciej W. Rozycki + * Copyright (C) 2004, 2013  Maciej W. Rozycki   */  #ifndef _ASM_CPU_H  #define _ASM_CPU_H -/* Assigned Company values for bits 23:16 of the PRId Register -   (CP0 register 15, select 0).	 As of the MIPS32 and MIPS64 specs from -   MTI, the PRId register is defined in this (backwards compatible) -   way: +/* +   As of the MIPS32 and MIPS64 specs from MTI, the PRId register (CP0 +   register 15, select 0) is defined in this (backwards compatible) way:    +----------------+----------------+----------------+----------------+    | Company Options| Company ID	    | Processor ID   | Revision	      | @@ -23,6 +22,14 @@     spec.  */ +#define PRID_OPT_MASK		0xff000000 + +/* + * Assigned Company values for bits 23:16 of the PRId register. + */ + +#define PRID_COMP_MASK		0xff0000 +  #define PRID_COMP_LEGACY	0x000000  #define PRID_COMP_MIPS		0x010000  #define PRID_COMP_BROADCOM	0x020000 @@ -38,10 +45,17 @@  #define PRID_COMP_INGENIC	0xd00000  /* - * Assigned values for the product ID register.	 In order to detect a - * certain CPU type exactly eventually additional registers may need to - * be examined.	 These are valid when 23:16 == PRID_COMP_LEGACY + * Assigned Processor ID (implementation) values for bits 15:8 of the PRId + * register.  In order to detect a certain CPU type exactly eventually + * additional registers may need to be examined. + */ + +#define PRID_IMP_MASK		0xff00 + +/* + * These are valid when 23:16 == PRID_COMP_LEGACY   */ +  #define PRID_IMP_R2000		0x0100  #define PRID_IMP_AU1_REV1	0x0100  #define PRID_IMP_AU1_REV2	0x0200 @@ -68,10 +82,10 @@  #define PRID_IMP_RM7000		0x2700  #define PRID_IMP_NEVADA		0x2800		/* RM5260 ??? */  #define PRID_IMP_RM9000		0x3400 -#define PRID_IMP_LOONGSON1	0x4200 +#define PRID_IMP_LOONGSON_32	0x4200  /* Loongson-1 */  #define PRID_IMP_R5432		0x5400  #define PRID_IMP_R5500		0x5500 -#define PRID_IMP_LOONGSON2	0x6300 +#define PRID_IMP_LOONGSON_64	0x6300  /* Loongson-2/3 */  #define PRID_IMP_UNKNOWN	0xff00 @@ -97,6 +111,12 @@  #define PRID_IMP_1074K		0x9a00  #define PRID_IMP_M14KC		0x9c00  #define PRID_IMP_M14KEC		0x9e00 +#define PRID_IMP_INTERAPTIV_UP	0xa000 +#define PRID_IMP_INTERAPTIV_MP	0xa100 +#define PRID_IMP_PROAPTIV_UP	0xa200 +#define PRID_IMP_PROAPTIV_MP	0xa300 +#define PRID_IMP_M5150		0xa700 +#define PRID_IMP_P5600		0xa800  /*   * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE @@ -180,13 +200,19 @@  #define PRID_IMP_NETLOGIC_XLP8XX	0x1000  #define PRID_IMP_NETLOGIC_XLP3XX	0x1100  #define PRID_IMP_NETLOGIC_XLP2XX	0x1200 +#define PRID_IMP_NETLOGIC_XLP9XX	0x1500 +#define PRID_IMP_NETLOGIC_XLP5XX	0x1300  /* - * Definitions for 7:0 on legacy processors + * Particular Revision values for bits 7:0 of the PRId register.   */  #define PRID_REV_MASK		0x00ff +/* + * Definitions for 7:0 on legacy processors + */ +  #define PRID_REV_TX4927		0x0022  #define PRID_REV_TX4937		0x0030  #define PRID_REV_R4400		0x0040 @@ -206,6 +232,7 @@  #define PRID_REV_LOONGSON1B	0x0020  #define PRID_REV_LOONGSON2E	0x0002  #define PRID_REV_LOONGSON2F	0x0003 +#define PRID_REV_LOONGSON3A	0x0005  /*   * Older processors used to encode processor version and revision in two @@ -227,8 +254,12 @@   *  31				   16 15	     8 7	      0   */ +#define FPIR_IMP_MASK		0xff00 +  #define FPIR_IMP_NONE		0x0000 +#if !defined(__ASSEMBLY__) +  enum cpu_type_enum {  	CPU_UNKNOWN, @@ -251,7 +282,7 @@ enum cpu_type_enum {  	CPU_R4700, CPU_R5000, CPU_R5500, CPU_NEVADA, CPU_R5432, CPU_R10000,  	CPU_R12000, CPU_R14000, CPU_VR41XX, CPU_VR4111, CPU_VR4121, CPU_VR4122,  	CPU_VR4131, CPU_VR4133, CPU_VR4181, CPU_VR4181A, CPU_RM7000, -	CPU_SR71000, CPU_RM9000, CPU_TX49XX, +	CPU_SR71000, CPU_TX49XX,  	/*  	 * R8000 class processors @@ -269,18 +300,19 @@ enum cpu_type_enum {  	CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,  	CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,  	CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC, -	CPU_M14KEC, +	CPU_M14KEC, CPU_INTERAPTIV, CPU_P5600, CPU_PROAPTIV, CPU_1074K, CPU_M5150,  	/*  	 * MIPS64 class processors  	 */  	CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2, -	CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2, -	CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP, +	CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, +	CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP,  	CPU_LAST  }; +#endif /* !__ASSEMBLY */  /*   * ISA Level encodings @@ -328,6 +360,9 @@ enum cpu_type_enum {  #define MIPS_CPU_PCI		0x00400000 /* CPU has Perf Ctr Int indicator */  #define MIPS_CPU_RIXI		0x00800000 /* CPU has TLB Read/eXec Inhibit */  #define MIPS_CPU_MICROMIPS	0x01000000 /* CPU has microMIPS capability */ +#define MIPS_CPU_TLBINV		0x02000000 /* CPU supports TLBINV/F */ +#define MIPS_CPU_SEGMENTS	0x04000000 /* CPU supports Segmentation Control registers */ +#define MIPS_CPU_EVA		0x80000000 /* CPU supports Enhanced Virtual Addressing */  /*   * CPU ASE encodings @@ -340,5 +375,6 @@ enum cpu_type_enum {  #define MIPS_ASE_MIPSMT		0x00000020 /* CPU supports MIPS MT */  #define MIPS_ASE_DSP2P		0x00000040 /* Signal Processing ASE Rev 2 */  #define MIPS_ASE_VZ		0x00000080 /* Virtualization ASE */ +#define MIPS_ASE_MSA		0x00000100 /* MIPS SIMD Architecture */  #endif /* _ASM_CPU_H */ diff --git a/arch/mips/include/asm/dec/ioasic.h b/arch/mips/include/asm/dec/ioasic.h index a6e505a0e44..be4d62a5a10 100644 --- a/arch/mips/include/asm/dec/ioasic.h +++ b/arch/mips/include/asm/dec/ioasic.h @@ -31,8 +31,6 @@ static inline u32 ioasic_read(unsigned int reg)  	return ioasic_base[reg / 4];  } -extern void clear_ioasic_dma_irq(unsigned int irq); -  extern void init_ioasic_irqs(int base);  extern int dec_ioasic_clocksource_init(void); diff --git a/arch/mips/include/asm/dec/ioasic_addrs.h b/arch/mips/include/asm/dec/ioasic_addrs.h index a8665a7611c..8bd95971fe2 100644 --- a/arch/mips/include/asm/dec/ioasic_addrs.h +++ b/arch/mips/include/asm/dec/ioasic_addrs.h @@ -40,7 +40,7 @@  #define IOASIC_FLOPPY	(11*IOASIC_SLOT_SIZE)	/* FDC (maxine) */  #define IOASIC_SCSI	(12*IOASIC_SLOT_SIZE)	/* ASC SCSI */  #define IOASIC_FDC_DMA	(13*IOASIC_SLOT_SIZE)	/* FDC DMA (maxine) */ -#define IOASIC_SCSI_DMA (14*IOASIC_SLOT_SIZE)	/* ??? */ +#define IOASIC_SCSI_DMA	(14*IOASIC_SLOT_SIZE)	/* ??? */  #define IOASIC_RES_15	(15*IOASIC_SLOT_SIZE)	/* unused? */ diff --git a/arch/mips/include/asm/dec/kn01.h b/arch/mips/include/asm/dec/kn01.h index 0eb3241de70..88d9ffd7425 100644 --- a/arch/mips/include/asm/dec/kn01.h +++ b/arch/mips/include/asm/dec/kn01.h @@ -57,12 +57,12 @@  /*   * System Control & Status Register bits.   */ -#define KN01_CSR_MNFMOD		(1<<15) /* MNFMOD manufacturing jumper */ -#define KN01_CSR_STATUS		(1<<14) /* self-test result status output */ -#define KN01_CSR_PARDIS		(1<<13) /* parity error disable */ -#define KN01_CSR_CRSRTST	(1<<12) /* PCC test output */ -#define KN01_CSR_MONO		(1<<11) /* mono/color fb SIMM installed */ -#define KN01_CSR_MEMERR		(1<<10) /* write timeout error status & ack*/ +#define KN01_CSR_MNFMOD		(1<<15)	/* MNFMOD manufacturing jumper */ +#define KN01_CSR_STATUS		(1<<14)	/* self-test result status output */ +#define KN01_CSR_PARDIS		(1<<13)	/* parity error disable */ +#define KN01_CSR_CRSRTST	(1<<12)	/* PCC test output */ +#define KN01_CSR_MONO		(1<<11)	/* mono/color fb SIMM installed */ +#define KN01_CSR_MEMERR		(1<<10)	/* write timeout error status & ack*/  #define KN01_CSR_VINT		(1<<9)	/* PCC area detect #2 status & ack */  #define KN01_CSR_TXDIS		(1<<8)	/* DZ11 transmit disable */  #define KN01_CSR_VBGTRG		(1<<2)	/* blue DAC voltage over green (r/o) */ diff --git a/arch/mips/include/asm/dec/kn02ca.h b/arch/mips/include/asm/dec/kn02ca.h index 69dc2a9a2d0..92c0fe25609 100644 --- a/arch/mips/include/asm/dec/kn02ca.h +++ b/arch/mips/include/asm/dec/kn02ca.h @@ -68,7 +68,7 @@  #define KN03CA_IO_SSR_ISDN_RST	(1<<12)		/* ~ISDN (Am79C30A) reset */  #define KN03CA_IO_SSR_FLOPPY_RST (1<<7)		/* ~FDC (82077) reset */ -#define KN03CA_IO_SSR_VIDEO_RST (1<<6)		/* ~framebuffer reset */ +#define KN03CA_IO_SSR_VIDEO_RST	(1<<6)		/* ~framebuffer reset */  #define KN03CA_IO_SSR_AB_RST	(1<<5)		/* ACCESS.bus reset */  #define KN03CA_IO_SSR_RES_4	(1<<4)		/* unused */  #define KN03CA_IO_SSR_RES_3	(1<<4)		/* unused */ diff --git a/arch/mips/include/asm/dec/kn05.h b/arch/mips/include/asm/dec/kn05.h index 56d22dc8803..8e14f677e5e 100644 --- a/arch/mips/include/asm/dec/kn05.h +++ b/arch/mips/include/asm/dec/kn05.h @@ -49,12 +49,20 @@  #define KN4K_RES_15	(15*IOASIC_SLOT_SIZE)	/* unused? */  /* + * MB ASIC interrupt bits. + */ +#define KN4K_MB_INR_MB		4	/* ??? */ +#define KN4K_MB_INR_MT		3	/* memory, I/O bus read/write errors */ +#define KN4K_MB_INR_RES_2	2	/* unused */ +#define KN4K_MB_INR_RTC		1	/* RTC */ +#define KN4K_MB_INR_TC		0	/* I/O ASIC cascade */ + +/*   * Bits for the MB interrupt register.   * The register appears read-only.   */ -#define KN4K_MB_INT_TC		(1<<0)		/* TURBOchannel? */ -#define KN4K_MB_INT_RTC		(1<<1)		/* RTC? */ -#define KN4K_MB_INT_MT		(1<<3)		/* I/O ASIC cascade */ +#define KN4K_MB_INT_IRQ		(0x1f<<0)	/* CPU Int[4:0] status. */ +#define KN4K_MB_INT_IRQ_N(n)	(1<<(n))	/* Individual status bits. */  /*   * Bits for the MB control & status register. @@ -70,6 +78,7 @@  #define KN4K_MB_CSR_NC		(1<<14)		/* ??? */  #define KN4K_MB_CSR_EE		(1<<15)		/* (bus) Exception Enable? */  #define KN4K_MB_CSR_MSK		(0x1f<<16)	/* CPU Int[4:0] mask */ +#define KN4K_MB_CSR_MSK_N(n)	(1<<((n)+16))	/* Individual mask bits. */  #define KN4K_MB_CSR_FW		(1<<21)		/* ??? */  #define KN4K_MB_CSR_W		(1<<31)		/* ??? */ diff --git a/arch/mips/include/asm/dec/prom.h b/arch/mips/include/asm/dec/prom.h index 446577712be..b59a2103b61 100644 --- a/arch/mips/include/asm/dec/prom.h +++ b/arch/mips/include/asm/dec/prom.h @@ -49,7 +49,7 @@  #ifdef CONFIG_64BIT -#define prom_is_rex(magic)	1	/* KN04 and KN05 are REX PROMs.	 */ +#define prom_is_rex(magic)	1	/* KN04 and KN05 are REX PROMs.  */  #else /* !CONFIG_64BIT */ @@ -113,31 +113,31 @@ extern int (*__pmax_close)(int);  #define __DEC_PROM_O32(fun, arg) fun arg __asm__(#fun); \  				 __asm__(#fun " = call_o32") -int __DEC_PROM_O32(_rex_bootinit, (int (*)(void))); -int __DEC_PROM_O32(_rex_bootread, (int (*)(void))); -int __DEC_PROM_O32(_rex_getbitmap, (int (*)(memmap *), memmap *)); +int __DEC_PROM_O32(_rex_bootinit, (int (*)(void), void *)); +int __DEC_PROM_O32(_rex_bootread, (int (*)(void), void *)); +int __DEC_PROM_O32(_rex_getbitmap, (int (*)(memmap *), void *, memmap *));  unsigned long *__DEC_PROM_O32(_rex_slot_address, -			     (unsigned long *(*)(int), int)); -void *__DEC_PROM_O32(_rex_gettcinfo, (void *(*)(void))); -int __DEC_PROM_O32(_rex_getsysid, (int (*)(void))); -void __DEC_PROM_O32(_rex_clear_cache, (void (*)(void))); - -int __DEC_PROM_O32(_prom_getchar, (int (*)(void))); -char *__DEC_PROM_O32(_prom_getenv, (char *(*)(char *), char *)); -int __DEC_PROM_O32(_prom_printf, (int (*)(char *, ...), char *, ...)); - - -#define rex_bootinit()		_rex_bootinit(__rex_bootinit) -#define rex_bootread()		_rex_bootread(__rex_bootread) -#define rex_getbitmap(x)	_rex_getbitmap(__rex_getbitmap, x) -#define rex_slot_address(x)	_rex_slot_address(__rex_slot_address, x) -#define rex_gettcinfo()		_rex_gettcinfo(__rex_gettcinfo) -#define rex_getsysid()		_rex_getsysid(__rex_getsysid) -#define rex_clear_cache()	_rex_clear_cache(__rex_clear_cache) - -#define prom_getchar()		_prom_getchar(__prom_getchar) -#define prom_getenv(x)		_prom_getenv(__prom_getenv, x) -#define prom_printf(x...)	_prom_printf(__prom_printf, x) +			     (unsigned long *(*)(int), void *, int)); +void *__DEC_PROM_O32(_rex_gettcinfo, (void *(*)(void), void *)); +int __DEC_PROM_O32(_rex_getsysid, (int (*)(void), void *)); +void __DEC_PROM_O32(_rex_clear_cache, (void (*)(void), void *)); + +int __DEC_PROM_O32(_prom_getchar, (int (*)(void), void *)); +char *__DEC_PROM_O32(_prom_getenv, (char *(*)(char *), void *, char *)); +int __DEC_PROM_O32(_prom_printf, (int (*)(char *, ...), void *, char *, ...)); + + +#define rex_bootinit()		_rex_bootinit(__rex_bootinit, NULL) +#define rex_bootread()		_rex_bootread(__rex_bootread, NULL) +#define rex_getbitmap(x)	_rex_getbitmap(__rex_getbitmap, NULL, x) +#define rex_slot_address(x)	_rex_slot_address(__rex_slot_address, NULL, x) +#define rex_gettcinfo()		_rex_gettcinfo(__rex_gettcinfo, NULL) +#define rex_getsysid()		_rex_getsysid(__rex_getsysid, NULL) +#define rex_clear_cache()	_rex_clear_cache(__rex_clear_cache, NULL) + +#define prom_getchar()		_prom_getchar(__prom_getchar, NULL) +#define prom_getenv(x)		_prom_getenv(__prom_getenv, NULL, x) +#define prom_printf(x...)	_prom_printf(__prom_printf, NULL, x)  #else /* !CONFIG_64BIT */ diff --git a/arch/mips/include/asm/dma-coherence.h b/arch/mips/include/asm/dma-coherence.h index 242cbb3ca58..bc5e85d579e 100644 --- a/arch/mips/include/asm/dma-coherence.h +++ b/arch/mips/include/asm/dma-coherence.h @@ -9,7 +9,16 @@  #ifndef __ASM_DMA_COHERENCE_H  #define __ASM_DMA_COHERENCE_H +#ifdef CONFIG_DMA_MAYBE_COHERENT  extern int coherentio;  extern int hw_coherentio; +#else +#ifdef CONFIG_DMA_COHERENT +#define coherentio	1 +#else +#define coherentio	0 +#endif +#define hw_coherentio	0 +#endif /* CONFIG_DMA_MAYBE_COHERENT */  #endif diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index 84238c574d5..06412aa9e3f 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -49,9 +49,14 @@ static inline int dma_mapping_error(struct device *dev, u64 mask)  static inline int  dma_set_mask(struct device *dev, u64 mask)  { +	struct dma_map_ops *ops = get_dma_ops(dev); +  	if(!dev->dma_mask || !dma_supported(dev, mask))  		return -EIO; +	if (ops->set_dma_mask) +		return ops->set_dma_mask(dev, mask); +  	*dev->dma_mask = mask;  	return 0; diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index cf3ae2480b1..d4144056e92 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h @@ -36,6 +36,7 @@  #define EF_MIPS_ABI2		0x00000020  #define EF_MIPS_OPTIONS_FIRST	0x00000080  #define EF_MIPS_32BITMODE	0x00000100 +#define EF_MIPS_FP64		0x00000200  #define EF_MIPS_ABI		0x0000f000  #define EF_MIPS_ARCH		0xf0000000 @@ -176,6 +177,18 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];  #ifdef CONFIG_32BIT  /* + * In order to be sure that we don't attempt to execute an O32 binary which + * requires 64 bit FP (FR=1) on a system which does not support it we refuse + * to execute any binary which has bits specified by the following macro set + * in its ELF header flags. + */ +#ifdef CONFIG_MIPS_O32_FP64_SUPPORT +# define __MIPS_O32_FP64_MUST_BE_ZERO	0 +#else +# define __MIPS_O32_FP64_MUST_BE_ZERO	EF_MIPS_FP64 +#endif + +/*   * This is used to ensure we don't load something for the wrong architecture.   */  #define elf_check_arch(hdr)						\ @@ -192,6 +205,8 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];  	if (((__h->e_flags & EF_MIPS_ABI) != 0) &&			\  	    ((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32))		\  		__res = 0;						\ +	if (__h->e_flags & __MIPS_O32_FP64_MUST_BE_ZERO)		\ +		__res = 0;						\  									\  	__res;								\  }) @@ -249,6 +264,11 @@ extern struct mips_abi mips_abi_n32;  #define SET_PERSONALITY(ex)						\  do {									\ +	if ((ex).e_flags & EF_MIPS_FP64)				\ +		clear_thread_flag(TIF_32BIT_FPREGS);			\ +	else								\ +		set_thread_flag(TIF_32BIT_FPREGS);			\ +									\  	if (personality(current->personality) != PER_LINUX)		\  		set_personality(PER_LINUX);				\  									\ @@ -271,14 +291,18 @@ do {									\  #endif  #ifdef CONFIG_MIPS32_O32 -#define __SET_PERSONALITY32_O32()					\ +#define __SET_PERSONALITY32_O32(ex)					\  	do {								\  		set_thread_flag(TIF_32BIT_REGS);			\  		set_thread_flag(TIF_32BIT_ADDR);			\ +									\ +		if (!((ex).e_flags & EF_MIPS_FP64))			\ +			set_thread_flag(TIF_32BIT_FPREGS);		\ +									\  		current->thread.abi = &mips_abi_32;			\  	} while (0)  #else -#define __SET_PERSONALITY32_O32()					\ +#define __SET_PERSONALITY32_O32(ex)					\  	do { } while (0)  #endif @@ -289,7 +313,7 @@ do {									\  	     ((ex).e_flags & EF_MIPS_ABI) == 0)				\  		__SET_PERSONALITY32_N32();				\  	else								\ -		__SET_PERSONALITY32_O32();				\ +		__SET_PERSONALITY32_O32(ex);                            \  } while (0)  #else  #define __SET_PERSONALITY32(ex) do { } while (0) @@ -300,6 +324,7 @@ do {									\  	unsigned int p;							\  									\  	clear_thread_flag(TIF_32BIT_REGS);				\ +	clear_thread_flag(TIF_32BIT_FPREGS);				\  	clear_thread_flag(TIF_32BIT_ADDR);				\  									\  	if ((ex).e_ident[EI_CLASS] == ELFCLASS32)			\ @@ -331,6 +356,7 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);  #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs)			\  	dump_task_fpu(tsk, elf_fpregs) +#define CORE_DUMP_USE_REGSET  #define ELF_EXEC_PAGESIZE	PAGE_SIZE  /* This yields a mask that user programs can use to figure out what diff --git a/arch/mips/include/asm/fixmap.h b/arch/mips/include/asm/fixmap.h index dfaaf493e9d..6842ffafd1e 100644 --- a/arch/mips/include/asm/fixmap.h +++ b/arch/mips/include/asm/fixmap.h @@ -48,11 +48,7 @@  enum fixed_addresses {  #define FIX_N_COLOURS 8  	FIX_CMAP_BEGIN, -#ifdef CONFIG_MIPS_MT_SMTC -	FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS * 2), -#else  	FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * 2), -#endif  #ifdef CONFIG_HIGHMEM  	/* reserved pte's for temporary kernel mappings */  	FIX_KMAP_BEGIN = FIX_CMAP_END + 1, @@ -71,38 +67,7 @@ enum fixed_addresses {  #define FIXADDR_SIZE	(__end_of_fixed_addresses << PAGE_SHIFT)  #define FIXADDR_START	(FIXADDR_TOP - FIXADDR_SIZE) -#define __fix_to_virt(x)	(FIXADDR_TOP - ((x) << PAGE_SHIFT)) -#define __virt_to_fix(x)	((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) - -extern void __this_fixmap_does_not_exist(void); - -/* - * 'index to address' translation. If anyone tries to use the idx - * directly without tranlation, we catch the bug with a NULL-deference - * kernel oops. Illegal ranges of incoming indices are caught too. - */ -static inline unsigned long fix_to_virt(const unsigned int idx) -{ -	/* -	 * this branch gets completely eliminated after inlining, -	 * except when someone tries to use fixaddr indices in an -	 * illegal way. (such as mixing up address types or using -	 * out-of-range indices). -	 * -	 * If it doesn't get removed, the linker will complain -	 * loudly with a reasonably clear error message.. -	 */ -	if (idx >= __end_of_fixed_addresses) -		__this_fixmap_does_not_exist(); - -	return __fix_to_virt(idx); -} - -static inline unsigned long virt_to_fix(const unsigned long vaddr) -{ -	BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); -	return __virt_to_fix(vaddr); -} +#include <asm-generic/fixmap.h>  #define kmap_get_fixmap_pte(vaddr)					\  	pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index d088e5db490..a939574f829 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -17,6 +17,7 @@  #include <asm/mipsregs.h>  #include <asm/cpu.h>  #include <asm/cpu-features.h> +#include <asm/fpu_emulator.h>  #include <asm/hazards.h>  #include <asm/processor.h>  #include <asm/current.h> @@ -28,16 +29,54 @@  struct sigcontext;  struct sigcontext32; -extern void fpu_emulator_init_fpu(void);  extern void _init_fpu(void);  extern void _save_fp(struct task_struct *);  extern void _restore_fp(struct task_struct *); -#define __enable_fpu()							\ -do {									\ -	set_c0_status(ST0_CU1);						\ -	enable_fpu_hazard();						\ -} while (0) +/* + * This enum specifies a mode in which we want the FPU to operate, for cores + * which implement the Status.FR bit. Note that FPU_32BIT & FPU_64BIT + * purposefully have the values 0 & 1 respectively, so that an integer value + * of Status.FR can be trivially casted to the corresponding enum fpu_mode. + */ +enum fpu_mode { +	FPU_32BIT = 0,		/* FR = 0 */ +	FPU_64BIT,		/* FR = 1 */ +	FPU_AS_IS, +}; + +static inline int __enable_fpu(enum fpu_mode mode) +{ +	int fr; + +	switch (mode) { +	case FPU_AS_IS: +		/* just enable the FPU in its current mode */ +		set_c0_status(ST0_CU1); +		enable_fpu_hazard(); +		return 0; + +	case FPU_64BIT: +#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT)) +		/* we only have a 32-bit FPU */ +		return SIGFPE; +#endif +		/* fall through */ +	case FPU_32BIT: +		/* set CU1 & change FR appropriately */ +		fr = (int)mode; +		change_c0_status(ST0_CU1 | ST0_FR, ST0_CU1 | (fr ? ST0_FR : 0)); +		enable_fpu_hazard(); + +		/* check FR has the desired value */ +		return (!!(read_c0_status() & ST0_FR) == !!fr) ? 0 : SIGFPE; + +	default: +		BUG(); +	} + +	return SIGFPE; +}  #define __disable_fpu()							\  do {									\ @@ -45,19 +84,6 @@ do {									\  	disable_fpu_hazard();						\  } while (0) -#define enable_fpu()							\ -do {									\ -	if (cpu_has_fpu)						\ -		__enable_fpu();						\ -} while (0) - -#define disable_fpu()							\ -do {									\ -	if (cpu_has_fpu)						\ -		__disable_fpu();					\ -} while (0) - -  #define clear_fpu_owner()	clear_thread_flag(TIF_USEDFPU)  static inline int __is_fpu_owner(void) @@ -70,27 +96,46 @@ static inline int is_fpu_owner(void)  	return cpu_has_fpu && __is_fpu_owner();  } -static inline void __own_fpu(void) +static inline int __own_fpu(void)  { -	__enable_fpu(); +	enum fpu_mode mode; +	int ret; + +	mode = !test_thread_flag(TIF_32BIT_FPREGS); +	ret = __enable_fpu(mode); +	if (ret) +		return ret; +  	KSTK_STATUS(current) |= ST0_CU1; +	if (mode == FPU_64BIT) +		KSTK_STATUS(current) |= ST0_FR; +	else /* mode == FPU_32BIT */ +		KSTK_STATUS(current) &= ~ST0_FR; +  	set_thread_flag(TIF_USEDFPU); +	return 0;  } -static inline void own_fpu_inatomic(int restore) +static inline int own_fpu_inatomic(int restore)  { +	int ret = 0; +  	if (cpu_has_fpu && !__is_fpu_owner()) { -		__own_fpu(); -		if (restore) +		ret = __own_fpu(); +		if (restore && !ret)  			_restore_fp(current);  	} +	return ret;  } -static inline void own_fpu(int restore) +static inline int own_fpu(int restore)  { +	int ret; +  	preempt_disable(); -	own_fpu_inatomic(restore); +	ret = own_fpu_inatomic(restore);  	preempt_enable(); +	return ret;  }  static inline void lose_fpu(int save) @@ -106,16 +151,22 @@ static inline void lose_fpu(int save)  	preempt_enable();  } -static inline void init_fpu(void) +static inline int init_fpu(void)  { +	int ret = 0; +  	preempt_disable(); +  	if (cpu_has_fpu) { -		__own_fpu(); -		_init_fpu(); -	} else { +		ret = __own_fpu(); +		if (!ret) +			_init_fpu(); +	} else  		fpu_emulator_init_fpu(); -	} +  	preempt_enable(); + +	return ret;  }  static inline void save_fp(struct task_struct *tsk) @@ -130,7 +181,7 @@ static inline void restore_fp(struct task_struct *tsk)  		_restore_fp(tsk);  } -static inline fpureg_t *get_fpu_regs(struct task_struct *tsk) +static inline union fpureg *get_fpu_regs(struct task_struct *tsk)  {  	if (tsk == current) {  		preempt_disable(); diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h index 2abb587d5ab..0195745b4b1 100644 --- a/arch/mips/include/asm/fpu_emulator.h +++ b/arch/mips/include/asm/fpu_emulator.h @@ -23,9 +23,12 @@  #ifndef _ASM_FPU_EMULATOR_H  #define _ASM_FPU_EMULATOR_H +#include <linux/sched.h>  #include <asm/break.h> +#include <asm/thread_info.h>  #include <asm/inst.h>  #include <asm/local.h> +#include <asm/processor.h>  #ifdef CONFIG_DEBUG_FS @@ -36,6 +39,11 @@ struct mips_fpu_emulator_stats {  	local_t cp1ops;  	local_t cp1xops;  	local_t errors; +	local_t ieee754_inexact; +	local_t ieee754_underflow; +	local_t ieee754_overflow; +	local_t ieee754_zerodiv; +	local_t ieee754_invalidop;  };  DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); @@ -71,4 +79,17 @@ int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,   */  #define BREAK_MATH (0x0000000d | (BRK_MEMU << 16)) +#define SIGNALLING_NAN 0x7ff800007ff80000LL + +static inline void fpu_emulator_init_fpu(void) +{ +	struct task_struct *t = current; +	int i; + +	t->thread.fpu.fcr31 = 0; + +	for (i = 0; i < 32; i++) +		set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN); +} +  #endif /* _ASM_FPU_EMULATOR_H */ diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h index ce35c9af0c2..992aaba603b 100644 --- a/arch/mips/include/asm/ftrace.h +++ b/arch/mips/include/asm/ftrace.h @@ -22,12 +22,12 @@ extern void _mcount(void);  #define safe_load(load, src, dst, error)		\  do {							\  	asm volatile (					\ -		"1: " load " %[" STR(dst) "], 0(%[" STR(src) "])\n"\ -		"   li %[" STR(error) "], 0\n"		\ +		"1: " load " %[tmp_dst], 0(%[tmp_src])\n"	\ +		"   li %[tmp_err], 0\n"			\  		"2:\n"					\  							\  		".section .fixup, \"ax\"\n"		\ -		"3: li %[" STR(error) "], 1\n"		\ +		"3: li %[tmp_err], 1\n"			\  		"   j 2b\n"				\  		".previous\n"				\  							\ @@ -35,8 +35,8 @@ do {							\  		STR(PTR) "\t1b, 3b\n\t"			\  		".previous\n"				\  							\ -		: [dst] "=&r" (dst), [error] "=r" (error)\ -		: [src] "r" (src)			\ +		: [tmp_dst] "=&r" (dst), [tmp_err] "=r" (error)\ +		: [tmp_src] "r" (src)			\  		: "memory"				\  	);						\  } while (0) @@ -44,12 +44,12 @@ do {							\  #define safe_store(store, src, dst, error)	\  do {						\  	asm volatile (				\ -		"1: " store " %[" STR(src) "], 0(%[" STR(dst) "])\n"\ -		"   li %[" STR(error) "], 0\n"	\ +		"1: " store " %[tmp_src], 0(%[tmp_dst])\n"\ +		"   li %[tmp_err], 0\n"		\  		"2:\n"				\  						\  		".section .fixup, \"ax\"\n"	\ -		"3: li %[" STR(error) "], 1\n"	\ +		"3: li %[tmp_err], 1\n"		\  		"   j 2b\n"			\  		".previous\n"			\  						\ @@ -57,8 +57,8 @@ do {						\  		STR(PTR) "\t1b, 3b\n\t"		\  		".previous\n"			\  						\ -		: [error] "=r" (error)		\ -		: [dst] "r" (dst), [src] "r" (src)\ +		: [tmp_err] "=r" (error)	\ +		: [tmp_dst] "r" (dst), [tmp_src] "r" (src)\  		: "memory"			\  	);					\  } while (0) diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h index 6ea15815d3e..194cda0396a 100644 --- a/arch/mips/include/asm/futex.h +++ b/arch/mips/include/asm/futex.h @@ -12,6 +12,7 @@  #include <linux/futex.h>  #include <linux/uaccess.h> +#include <asm/asm-eva.h>  #include <asm/barrier.h>  #include <asm/errno.h>  #include <asm/war.h> @@ -22,11 +23,11 @@  		__asm__ __volatile__(					\  		"	.set	push				\n"	\  		"	.set	noat				\n"	\ -		"	.set	mips3				\n"	\ +		"	.set	arch=r4000			\n"	\  		"1:	ll	%1, %4	# __futex_atomic_op	\n"	\  		"	.set	mips0				\n"	\  		"	" insn	"				\n"	\ -		"	.set	mips3				\n"	\ +		"	.set	arch=r4000			\n"	\  		"2:	sc	$1, %2				\n"	\  		"	beqzl	$1, 1b				\n"	\  		__WEAK_LLSC_MB						\ @@ -48,12 +49,12 @@  		__asm__ __volatile__(					\  		"	.set	push				\n"	\  		"	.set	noat				\n"	\ -		"	.set	mips3				\n"	\ -		"1:	ll	%1, %4	# __futex_atomic_op	\n"	\ +		"	.set	arch=r4000			\n"	\ +		"1:	"user_ll("%1", "%4")" # __futex_atomic_op\n"	\  		"	.set	mips0				\n"	\  		"	" insn	"				\n"	\ -		"	.set	mips3				\n"	\ -		"2:	sc	$1, %2				\n"	\ +		"	.set	arch=r4000			\n"	\ +		"2:	"user_sc("$1", "%2")"			\n"	\  		"	beqz	$1, 1b				\n"	\  		__WEAK_LLSC_MB						\  		"3:						\n"	\ @@ -146,12 +147,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,  		"# futex_atomic_cmpxchg_inatomic			\n"  		"	.set	push					\n"  		"	.set	noat					\n" -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:	ll	%1, %3					\n"  		"	bne	%1, %z4, 3f				\n"  		"	.set	mips0					\n"  		"	move	$1, %z5					\n" -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"2:	sc	$1, %2					\n"  		"	beqzl	$1, 1b					\n"  		__WEAK_LLSC_MB @@ -173,13 +174,13 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,  		"# futex_atomic_cmpxchg_inatomic			\n"  		"	.set	push					\n"  		"	.set	noat					\n" -		"	.set	mips3					\n" -		"1:	ll	%1, %3					\n" +		"	.set	arch=r4000				\n" +		"1:	"user_ll("%1", "%3")"				\n"  		"	bne	%1, %z4, 3f				\n"  		"	.set	mips0					\n"  		"	move	$1, %z5					\n" -		"	.set	mips3					\n" -		"2:	sc	$1, %2					\n" +		"	.set	arch=r4000				\n" +		"2:	"user_sc("$1", "%2")"				\n"  		"	beqz	$1, 1b					\n"  		__WEAK_LLSC_MB  		"3:							\n" diff --git a/arch/mips/include/asm/fw/fw.h b/arch/mips/include/asm/fw/fw.h index d6c50a7e9ed..f3e6978aad7 100644 --- a/arch/mips/include/asm/fw/fw.h +++ b/arch/mips/include/asm/fw/fw.h @@ -38,7 +38,7 @@ extern int *_fw_envp;  extern void fw_init_cmdline(void);  extern char *fw_getcmdline(void); -extern fw_memblock_t *fw_getmdesc(void); +extern fw_memblock_t *fw_getmdesc(int);  extern void fw_meminit(void);  extern char *fw_getenv(char *name);  extern unsigned long fw_getenvl(char *name); diff --git a/arch/mips/include/asm/gcmpregs.h b/arch/mips/include/asm/gcmpregs.h deleted file mode 100644 index a7359f77a48..00000000000 --- a/arch/mips/include/asm/gcmpregs.h +++ /dev/null @@ -1,125 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License.  See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2000, 07 MIPS Technologies, Inc. - * - * Multiprocessor Subsystem Register Definitions - * - */ -#ifndef _ASM_GCMPREGS_H -#define _ASM_GCMPREGS_H - - -/* Offsets to major blocks within GCMP from GCMP base */ -#define GCMP_GCB_OFS		0x0000 /* Global Control Block */ -#define GCMP_CLCB_OFS		0x2000 /* Core Local Control Block */ -#define GCMP_COCB_OFS		0x4000 /* Core Other Control Block */ -#define GCMP_GDB_OFS		0x8000 /* Global Debug Block */ - -/* Offsets to individual GCMP registers from GCMP base */ -#define GCMPOFS(block, tag, reg)	\ -	(GCMP_##block##_OFS + GCMP_##tag##_##reg##_OFS) -#define GCMPOFSn(block, tag, reg, n) \ -	(GCMP_##block##_OFS + GCMP_##tag##_##reg##_OFS(n)) - -#define GCMPGCBOFS(reg)		GCMPOFS(GCB, GCB, reg) -#define GCMPGCBOFSn(reg, n)	GCMPOFSn(GCB, GCB, reg, n) -#define GCMPCLCBOFS(reg)	GCMPOFS(CLCB, CCB, reg) -#define GCMPCOCBOFS(reg)	GCMPOFS(COCB, CCB, reg) -#define GCMPGDBOFS(reg)		GCMPOFS(GDB, GDB, reg) - -/* GCMP register access */ -#define GCMPGCB(reg)			REGP(_gcmp_base, GCMPGCBOFS(reg)) -#define GCMPGCBn(reg, n)	       REGP(_gcmp_base, GCMPGCBOFSn(reg, n)) -#define GCMPCLCB(reg)			REGP(_gcmp_base, GCMPCLCBOFS(reg)) -#define GCMPCOCB(reg)			REGP(_gcmp_base, GCMPCOCBOFS(reg)) -#define GCMPGDB(reg)			REGP(_gcmp_base, GCMPGDBOFS(reg)) - -/* Mask generation */ -#define GCMPMSK(block, reg, bits)	(MSK(bits)<<GCMP_##block##_##reg##_SHF) -#define GCMPGCBMSK(reg, bits)		GCMPMSK(GCB, reg, bits) -#define GCMPCCBMSK(reg, bits)		GCMPMSK(CCB, reg, bits) -#define GCMPGDBMSK(reg, bits)		GCMPMSK(GDB, reg, bits) - -/* GCB registers */ -#define GCMP_GCB_GC_OFS			0x0000	/* Global Config Register */ -#define	 GCMP_GCB_GC_NUMIOCU_SHF	8 -#define	 GCMP_GCB_GC_NUMIOCU_MSK	GCMPGCBMSK(GC_NUMIOCU, 4) -#define	 GCMP_GCB_GC_NUMCORES_SHF	0 -#define	 GCMP_GCB_GC_NUMCORES_MSK	GCMPGCBMSK(GC_NUMCORES, 8) -#define GCMP_GCB_GCMPB_OFS		0x0008		/* Global GCMP Base */ -#define	 GCMP_GCB_GCMPB_GCMPBASE_SHF	15 -#define	 GCMP_GCB_GCMPB_GCMPBASE_MSK	GCMPGCBMSK(GCMPB_GCMPBASE, 17) -#define	 GCMP_GCB_GCMPB_CMDEFTGT_SHF	0 -#define	 GCMP_GCB_GCMPB_CMDEFTGT_MSK	GCMPGCBMSK(GCMPB_CMDEFTGT, 2) -#define	 GCMP_GCB_GCMPB_CMDEFTGT_DISABLED	0 -#define	 GCMP_GCB_GCMPB_CMDEFTGT_MEM		1 -#define	 GCMP_GCB_GCMPB_CMDEFTGT_IOCU1		2 -#define	 GCMP_GCB_GCMPB_CMDEFTGT_IOCU2		3 -#define GCMP_GCB_CCMC_OFS		0x0010	/* Global CM Control */ -#define GCMP_GCB_GCSRAP_OFS		0x0020	/* Global CSR Access Privilege */ -#define	 GCMP_GCB_GCSRAP_CMACCESS_SHF	0 -#define	 GCMP_GCB_GCSRAP_CMACCESS_MSK	GCMPGCBMSK(GCSRAP_CMACCESS, 8) -#define GCMP_GCB_GCMPREV_OFS		0x0030	/* GCMP Revision Register */ -#define GCMP_GCB_GCMEM_OFS		0x0040	/* Global CM Error Mask */ -#define GCMP_GCB_GCMEC_OFS		0x0048	/* Global CM Error Cause */ -#define	 GCMP_GCB_GMEC_ERROR_TYPE_SHF	27 -#define	 GCMP_GCB_GMEC_ERROR_TYPE_MSK	GCMPGCBMSK(GMEC_ERROR_TYPE, 5) -#define	 GCMP_GCB_GMEC_ERROR_INFO_SHF	0 -#define	 GCMP_GCB_GMEC_ERROR_INFO_MSK	GCMPGCBMSK(GMEC_ERROR_INFO, 27) -#define GCMP_GCB_GCMEA_OFS		0x0050	/* Global CM Error Address */ -#define GCMP_GCB_GCMEO_OFS		0x0058	/* Global CM Error Multiple */ -#define	 GCMP_GCB_GMEO_ERROR_2ND_SHF	0 -#define	 GCMP_GCB_GMEO_ERROR_2ND_MSK	GCMPGCBMSK(GMEO_ERROR_2ND, 5) -#define GCMP_GCB_GICBA_OFS		0x0080	/* Global Interrupt Controller Base Address */ -#define	 GCMP_GCB_GICBA_BASE_SHF	17 -#define	 GCMP_GCB_GICBA_BASE_MSK	GCMPGCBMSK(GICBA_BASE, 15) -#define	 GCMP_GCB_GICBA_EN_SHF		0 -#define	 GCMP_GCB_GICBA_EN_MSK		GCMPGCBMSK(GICBA_EN, 1) - -/* GCB Regions */ -#define GCMP_GCB_CMxBASE_OFS(n)		(0x0090+16*(n))		/* Global Region[0-3] Base Address */ -#define	 GCMP_GCB_CMxBASE_BASE_SHF	16 -#define	 GCMP_GCB_CMxBASE_BASE_MSK	GCMPGCBMSK(CMxBASE_BASE, 16) -#define GCMP_GCB_CMxMASK_OFS(n)		(0x0098+16*(n))		/* Global Region[0-3] Address Mask */ -#define	 GCMP_GCB_CMxMASK_MASK_SHF	16 -#define	 GCMP_GCB_CMxMASK_MASK_MSK	GCMPGCBMSK(CMxMASK_MASK, 16) -#define	 GCMP_GCB_CMxMASK_CMREGTGT_SHF	0 -#define	 GCMP_GCB_CMxMASK_CMREGTGT_MSK	GCMPGCBMSK(CMxMASK_CMREGTGT, 2) -#define	 GCMP_GCB_CMxMASK_CMREGTGT_MEM	 0 -#define	 GCMP_GCB_CMxMASK_CMREGTGT_MEM1	 1 -#define	 GCMP_GCB_CMxMASK_CMREGTGT_IOCU1 2 -#define	 GCMP_GCB_CMxMASK_CMREGTGT_IOCU2 3 - - -/* Core local/Core other control block registers */ -#define GCMP_CCB_RESETR_OFS		0x0000			/* Reset Release */ -#define	 GCMP_CCB_RESETR_INRESET_SHF	0 -#define	 GCMP_CCB_RESETR_INRESET_MSK	GCMPCCBMSK(RESETR_INRESET, 16) -#define GCMP_CCB_COHCTL_OFS		0x0008			/* Coherence Control */ -#define	 GCMP_CCB_COHCTL_DOMAIN_SHF	0 -#define	 GCMP_CCB_COHCTL_DOMAIN_MSK	GCMPCCBMSK(COHCTL_DOMAIN, 8) -#define GCMP_CCB_CFG_OFS		0x0010			/* Config */ -#define	 GCMP_CCB_CFG_IOCUTYPE_SHF	10 -#define	 GCMP_CCB_CFG_IOCUTYPE_MSK	GCMPCCBMSK(CFG_IOCUTYPE, 2) -#define	  GCMP_CCB_CFG_IOCUTYPE_CPU	0 -#define	  GCMP_CCB_CFG_IOCUTYPE_NCIOCU	1 -#define	  GCMP_CCB_CFG_IOCUTYPE_CIOCU	2 -#define	 GCMP_CCB_CFG_NUMVPE_SHF	0 -#define	 GCMP_CCB_CFG_NUMVPE_MSK	GCMPCCBMSK(CFG_NUMVPE, 10) -#define GCMP_CCB_OTHER_OFS		0x0018		/* Other Address */ -#define	 GCMP_CCB_OTHER_CORENUM_SHF	16 -#define	 GCMP_CCB_OTHER_CORENUM_MSK	GCMPCCBMSK(OTHER_CORENUM, 16) -#define GCMP_CCB_RESETBASE_OFS		0x0020		/* Reset Exception Base */ -#define	 GCMP_CCB_RESETBASE_BEV_SHF	12 -#define	 GCMP_CCB_RESETBASE_BEV_MSK	GCMPCCBMSK(RESETBASE_BEV, 20) -#define GCMP_CCB_ID_OFS			0x0028		/* Identification */ -#define GCMP_CCB_DINTGROUP_OFS		0x0030		/* DINT Group Participate */ -#define GCMP_CCB_DBGGROUP_OFS		0x0100		/* DebugBreak Group */ - -extern int __init gcmp_probe(unsigned long, unsigned long); -extern int __init gcmp_niocu(void); -extern void __init gcmp_setregion(int, unsigned long, unsigned long, int); -#endif /* _ASM_GCMPREGS_H */ diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h index b2e3e93dd7d..10f6a99f92c 100644 --- a/arch/mips/include/asm/gic.h +++ b/arch/mips/include/asm/gic.h @@ -11,6 +11,9 @@  #ifndef _ASM_GICREGS_H  #define _ASM_GICREGS_H +#include <linux/bitmap.h> +#include <linux/threads.h> +  #undef	GICISBYTELITTLEENDIAN  /* Constants */ @@ -377,6 +380,7 @@ extern unsigned int gic_compare_int (void);  extern cycle_t gic_read_count(void);  extern cycle_t gic_read_compare(void);  extern void gic_write_compare(cycle_t cnt); +extern void gic_write_cpu_compare(cycle_t cnt, int cpu);  extern void gic_send_ipi(unsigned int intr);  extern unsigned int plat_ipi_call_int_xlate(unsigned int);  extern unsigned int plat_ipi_resched_int_xlate(unsigned int); diff --git a/arch/mips/include/asm/gio_device.h b/arch/mips/include/asm/gio_device.h index 0878701712f..4be1a57cdbb 100644 --- a/arch/mips/include/asm/gio_device.h +++ b/arch/mips/include/asm/gio_device.h @@ -50,7 +50,7 @@ static inline void gio_device_free(struct gio_device *dev)  extern int gio_register_driver(struct gio_driver *);  extern void gio_unregister_driver(struct gio_driver *); -#define gio_get_drvdata(_dev)	     drv_get_drvdata(&(_dev)->dev) -#define gio_set_drvdata(_dev, data)  drv_set_drvdata(&(_dev)->dev, (data)) +#define gio_get_drvdata(_dev)	     dev_get_drvdata(&(_dev)->dev) +#define gio_set_drvdata(_dev, data)  dev_set_drvdata(&(_dev)->dev, (data))  extern void gio_set_master(struct gio_device *); diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h index b0dd0c84df7..572e63ec2a3 100644 --- a/arch/mips/include/asm/highmem.h +++ b/arch/mips/include/asm/highmem.h @@ -19,7 +19,6 @@  #ifdef __KERNEL__ -#include <linux/init.h>  #include <linux/interrupt.h>  #include <linux/uaccess.h>  #include <asm/kmap_types.h> diff --git a/arch/mips/include/asm/idle.h b/arch/mips/include/asm/idle.h index d192158886b..d9f932de80e 100644 --- a/arch/mips/include/asm/idle.h +++ b/arch/mips/include/asm/idle.h @@ -1,6 +1,7 @@  #ifndef __ASM_IDLE_H  #define __ASM_IDLE_H +#include <linux/cpuidle.h>  #include <linux/linkage.h>  extern void (*cpu_wait)(void); @@ -20,4 +21,17 @@ static inline int address_is_in_r4k_wait_irqoff(unsigned long addr)  	       addr < (unsigned long)__pastwait;  } +extern int mips_cpuidle_wait_enter(struct cpuidle_device *dev, +				   struct cpuidle_driver *drv, int index); + +#define MIPS_CPUIDLE_WAIT_STATE {\ +	.enter			= mips_cpuidle_wait_enter,\ +	.exit_latency		= 1,\ +	.target_residency	= 1,\ +	.power_usage		= UINT_MAX,\ +	.flags			= CPUIDLE_FLAG_TIME_VALID,\ +	.name			= "wait",\ +	.desc			= "MIPS wait",\ +} +  #endif /* __ASM_IDLE_H  */ diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 3321dd5a887..933b50e125a 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -331,7 +331,7 @@ static inline void pfx##write##bwlq(type val,				\  		if (irq)						\  			local_irq_save(__flags);			\  		__asm__ __volatile__(					\ -			".set	mips3"		"\t\t# __writeq""\n\t"	\ +			".set	arch=r4000"	"\t\t# __writeq""\n\t"	\  			"dsll32 %L0, %L0, 0"			"\n\t"	\  			"dsrl32 %L0, %L0, 0"			"\n\t"	\  			"dsll32 %M0, %M0, 0"			"\n\t"	\ @@ -361,7 +361,7 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem)	\  		if (irq)						\  			local_irq_save(__flags);			\  		__asm__ __volatile__(					\ -			".set	mips3"		"\t\t# __readq" "\n\t"	\ +			".set	arch=r4000"	"\t\t# __readq" "\n\t"	\  			"ld	%L0, %1"			"\n\t"	\  			"dsra32 %M0, %L0, 0"			"\n\t"	\  			"sll	%L0, %L0, 0"			"\n\t"	\ @@ -584,7 +584,7 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int   *   * This API used to be exported; it now is for arch code internal use only.   */ -#ifdef CONFIG_DMA_NONCOHERENT +#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)  extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);  extern void (*_dma_cache_wback)(unsigned long start, unsigned long size); @@ -603,7 +603,7 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);  #define dma_cache_inv(start,size)	\  	do { (void) (start); (void) (size); } while (0) -#endif /* CONFIG_DMA_NONCOHERENT */ +#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */  /*   * Read a 32-bit register that requires a 64-bit read cycle on the bus. diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index 7bc2cdb3505..ae1f7b24dd1 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h @@ -26,104 +26,8 @@ static inline int irq_canonicalize(int irq)  #define irq_canonicalize(irq) (irq)	/* Sane hardware, sane code ... */  #endif -#ifdef CONFIG_MIPS_MT_SMTC - -struct irqaction; - -extern unsigned long irq_hwmask[]; -extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, -			  unsigned long hwmask); - -static inline void smtc_im_ack_irq(unsigned int irq) -{ -	if (irq_hwmask[irq] & ST0_IM) -		set_c0_status(irq_hwmask[irq] & ST0_IM); -} - -#else - -static inline void smtc_im_ack_irq(unsigned int irq) -{ -} - -#endif /* CONFIG_MIPS_MT_SMTC */ - -#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF -#include <linux/cpumask.h> - -extern int plat_set_irq_affinity(struct irq_data *d, -				 const struct cpumask *affinity, bool force); -extern void smtc_forward_irq(struct irq_data *d); - -/* - * IRQ affinity hook invoked at the beginning of interrupt dispatch - * if option is enabled. - * - * Up through Linux 2.6.22 (at least) cpumask operations are very - * inefficient on MIPS.	 Initial prototypes of SMTC IRQ affinity - * used a "fast path" per-IRQ-descriptor cache of affinity information - * to reduce latency.  As there is a project afoot to optimize the - * cpumask implementations, this version is optimistically assuming - * that cpumask.h macro overhead is reasonable during interrupt dispatch. - */ -static inline int handle_on_other_cpu(unsigned int irq) -{ -	struct irq_data *d = irq_get_irq_data(irq); - -	if (cpumask_test_cpu(smp_processor_id(), d->affinity)) -		return 0; -	smtc_forward_irq(d); -	return 1; -} - -#else /* Not doing SMTC affinity */ - -static inline int handle_on_other_cpu(unsigned int irq) { return 0; } - -#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ - -#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP - -static inline void smtc_im_backstop(unsigned int irq) -{ -	if (irq_hwmask[irq] & 0x0000ff00) -		write_c0_tccontext(read_c0_tccontext() & -				   ~(irq_hwmask[irq] & 0x0000ff00)); -} - -/* - * Clear interrupt mask handling "backstop" if irq_hwmask - * entry so indicates. This implies that the ack() or end() - * functions will take over re-enabling the low-level mask. - * Otherwise it will be done on return from exception. - */ -static inline int smtc_handle_on_other_cpu(unsigned int irq) -{ -	int ret = handle_on_other_cpu(irq); - -	if (!ret) -		smtc_im_backstop(irq); -	return ret; -} - -#else - -static inline void smtc_im_backstop(unsigned int irq) { } -static inline int smtc_handle_on_other_cpu(unsigned int irq) -{ -	return handle_on_other_cpu(irq); -} - -#endif -  extern void do_IRQ(unsigned int irq); -#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF - -extern void do_IRQ_no_affinity(unsigned int irq); - -#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ -  extern void arch_init_irq(void);  extern void spurious_interrupt(void); diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h index 45c00951888..0fa5fdcd1f0 100644 --- a/arch/mips/include/asm/irqflags.h +++ b/arch/mips/include/asm/irqflags.h @@ -17,7 +17,7 @@  #include <linux/stringify.h>  #include <asm/hazards.h> -#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) +#ifdef CONFIG_CPU_MIPSR2  static inline void arch_local_irq_disable(void)  { @@ -118,30 +118,15 @@ void arch_local_irq_disable(void);  unsigned long arch_local_irq_save(void);  void arch_local_irq_restore(unsigned long flags);  void __arch_local_irq_restore(unsigned long flags); -#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ - - -extern void smtc_ipi_replay(void); +#endif /* CONFIG_CPU_MIPSR2 */  static inline void arch_local_irq_enable(void)  { -#ifdef CONFIG_MIPS_MT_SMTC -	/* -	 * SMTC kernel needs to do a software replay of queued -	 * IPIs, at the cost of call overhead on each local_irq_enable() -	 */ -	smtc_ipi_replay(); -#endif  	__asm__ __volatile__(  	"	.set	push						\n"  	"	.set	reorder						\n"  	"	.set	noat						\n" -#ifdef CONFIG_MIPS_MT_SMTC -	"	mfc0	$1, $2, 1	# SMTC - clear TCStatus.IXMT	\n" -	"	ori	$1, 0x400					\n" -	"	xori	$1, 0x400					\n" -	"	mtc0	$1, $2, 1					\n" -#elif defined(CONFIG_CPU_MIPSR2) +#if   defined(CONFIG_CPU_MIPSR2)  	"	ei							\n"  #else  	"	mfc0	$1,$12						\n" @@ -163,11 +148,7 @@ static inline unsigned long arch_local_save_flags(void)  	asm __volatile__(  	"	.set	push						\n"  	"	.set	reorder						\n" -#ifdef CONFIG_MIPS_MT_SMTC -	"	mfc0	%[flags], $2, 1					\n" -#else  	"	mfc0	%[flags], $12					\n" -#endif  	"	.set	pop						\n"  	: [flags] "=r" (flags)); @@ -177,14 +158,7 @@ static inline unsigned long arch_local_save_flags(void)  static inline int arch_irqs_disabled_flags(unsigned long flags)  { -#ifdef CONFIG_MIPS_MT_SMTC -	/* -	 * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU -	 */ -	return flags & 0x400; -#else  	return !(flags & 1); -#endif  }  #endif /* #ifndef __ASSEMBLY__ */ diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h index 4d6d77ed9b9..e194f957ca8 100644 --- a/arch/mips/include/asm/jump_label.h +++ b/arch/mips/include/asm/jump_label.h @@ -22,7 +22,7 @@  static __always_inline bool arch_static_branch(struct static_key *key)  { -	asm goto("1:\tnop\n\t" +	asm_volatile_goto("1:\tnop\n\t"  		"nop\n\t"  		".pushsection __jump_table,  \"aw\"\n\t"  		WORD_INSN " 1b, %l[l_yes], %0\n\t" diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 4d6fa0bf130..b0aa9556575 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -19,6 +19,38 @@  #include <linux/threads.h>  #include <linux/spinlock.h> +/* MIPS KVM register ids */ +#define MIPS_CP0_32(_R, _S)					\ +	(KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S))) + +#define MIPS_CP0_64(_R, _S)					\ +	(KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S))) + +#define KVM_REG_MIPS_CP0_INDEX		MIPS_CP0_32(0, 0) +#define KVM_REG_MIPS_CP0_ENTRYLO0	MIPS_CP0_64(2, 0) +#define KVM_REG_MIPS_CP0_ENTRYLO1	MIPS_CP0_64(3, 0) +#define KVM_REG_MIPS_CP0_CONTEXT	MIPS_CP0_64(4, 0) +#define KVM_REG_MIPS_CP0_USERLOCAL	MIPS_CP0_64(4, 2) +#define KVM_REG_MIPS_CP0_PAGEMASK	MIPS_CP0_32(5, 0) +#define KVM_REG_MIPS_CP0_PAGEGRAIN	MIPS_CP0_32(5, 1) +#define KVM_REG_MIPS_CP0_WIRED		MIPS_CP0_32(6, 0) +#define KVM_REG_MIPS_CP0_HWRENA		MIPS_CP0_32(7, 0) +#define KVM_REG_MIPS_CP0_BADVADDR	MIPS_CP0_64(8, 0) +#define KVM_REG_MIPS_CP0_COUNT		MIPS_CP0_32(9, 0) +#define KVM_REG_MIPS_CP0_ENTRYHI	MIPS_CP0_64(10, 0) +#define KVM_REG_MIPS_CP0_COMPARE	MIPS_CP0_32(11, 0) +#define KVM_REG_MIPS_CP0_STATUS		MIPS_CP0_32(12, 0) +#define KVM_REG_MIPS_CP0_CAUSE		MIPS_CP0_32(13, 0) +#define KVM_REG_MIPS_CP0_EPC		MIPS_CP0_64(14, 0) +#define KVM_REG_MIPS_CP0_EBASE		MIPS_CP0_64(15, 1) +#define KVM_REG_MIPS_CP0_CONFIG		MIPS_CP0_32(16, 0) +#define KVM_REG_MIPS_CP0_CONFIG1	MIPS_CP0_32(16, 1) +#define KVM_REG_MIPS_CP0_CONFIG2	MIPS_CP0_32(16, 2) +#define KVM_REG_MIPS_CP0_CONFIG3	MIPS_CP0_32(16, 3) +#define KVM_REG_MIPS_CP0_CONFIG7	MIPS_CP0_32(16, 7) +#define KVM_REG_MIPS_CP0_XCONTEXT	MIPS_CP0_64(20, 0) +#define KVM_REG_MIPS_CP0_ERROREPC	MIPS_CP0_64(30, 0) +  #define KVM_MAX_VCPUS		1  #define KVM_USER_MEM_SLOTS	8 @@ -27,26 +59,19 @@  #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 -/* Don't support huge pages */ -#define KVM_HPAGE_GFN_SHIFT(x)	0 - -/* We don't currently support large pages. */ -#define KVM_NR_PAGE_SIZES	1 -#define KVM_PAGES_PER_HPAGE(x)	1 -  /* Special address that contains the comm page, used for reducing # of traps */ -#define KVM_GUEST_COMMPAGE_ADDR     0x0 +#define KVM_GUEST_COMMPAGE_ADDR		0x0  #define KVM_GUEST_KERNEL_MODE(vcpu)	((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \  					((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0)) -#define KVM_GUEST_KUSEG             0x00000000UL -#define KVM_GUEST_KSEG0             0x40000000UL -#define KVM_GUEST_KSEG23            0x60000000UL -#define KVM_GUEST_KSEGX(a)          ((_ACAST32_(a)) & 0x60000000) -#define KVM_GUEST_CPHYSADDR(a)      ((_ACAST32_(a)) & 0x1fffffff) +#define KVM_GUEST_KUSEG			0x00000000UL +#define KVM_GUEST_KSEG0			0x40000000UL +#define KVM_GUEST_KSEG23		0x60000000UL +#define KVM_GUEST_KSEGX(a)		((_ACAST32_(a)) & 0x60000000) +#define KVM_GUEST_CPHYSADDR(a)		((_ACAST32_(a)) & 0x1fffffff)  #define KVM_GUEST_CKSEG0ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)  #define KVM_GUEST_CKSEG1ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) @@ -59,17 +84,17 @@  #define KVM_GUEST_KSEG1ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)  #define KVM_GUEST_KSEG23ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) -#define KVM_INVALID_PAGE            0xdeadbeef -#define KVM_INVALID_INST            0xdeadbeef -#define KVM_INVALID_ADDR            0xdeadbeef +#define KVM_INVALID_PAGE		0xdeadbeef +#define KVM_INVALID_INST		0xdeadbeef +#define KVM_INVALID_ADDR		0xdeadbeef -#define KVM_MALTA_GUEST_RTC_ADDR    0xb8000070UL +#define KVM_MALTA_GUEST_RTC_ADDR	0xb8000070UL -#define GUEST_TICKS_PER_JIFFY (40000000/HZ) -#define MS_TO_NS(x) (x * 1E6L) +#define GUEST_TICKS_PER_JIFFY		(40000000/HZ) +#define MS_TO_NS(x)			(x * 1E6L) -#define CAUSEB_DC       27 -#define CAUSEF_DC       (_ULCAST_(1)   << 27) +#define CAUSEB_DC			27 +#define CAUSEF_DC			(_ULCAST_(1) << 27)  struct kvm;  struct kvm_run; @@ -133,8 +158,8 @@ struct kvm_arch {  	int commpage_tlb;  }; -#define N_MIPS_COPROC_REGS      32 -#define N_MIPS_COPROC_SEL   	8 +#define N_MIPS_COPROC_REGS	32 +#define N_MIPS_COPROC_SEL	8  struct mips_coproc {  	unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; @@ -146,124 +171,124 @@ struct mips_coproc {  /*   * Coprocessor 0 register names   */ -#define	MIPS_CP0_TLB_INDEX	    0 -#define	MIPS_CP0_TLB_RANDOM	    1 -#define	MIPS_CP0_TLB_LOW	    2 -#define	MIPS_CP0_TLB_LO0	    2 -#define	MIPS_CP0_TLB_LO1	    3 -#define	MIPS_CP0_TLB_CONTEXT	4 -#define	MIPS_CP0_TLB_PG_MASK	5 -#define	MIPS_CP0_TLB_WIRED	    6 -#define	MIPS_CP0_HWRENA 	    7 -#define	MIPS_CP0_BAD_VADDR	    8 -#define	MIPS_CP0_COUNT	        9 -#define	MIPS_CP0_TLB_HI	        10 -#define	MIPS_CP0_COMPARE	    11 -#define	MIPS_CP0_STATUS	        12 -#define	MIPS_CP0_CAUSE	        13 -#define	MIPS_CP0_EXC_PC	        14 -#define	MIPS_CP0_PRID		    15 -#define	MIPS_CP0_CONFIG	        16 -#define	MIPS_CP0_LLADDR	        17 -#define	MIPS_CP0_WATCH_LO	    18 -#define	MIPS_CP0_WATCH_HI	    19 -#define	MIPS_CP0_TLB_XCONTEXT   20 -#define	MIPS_CP0_ECC		    26 -#define	MIPS_CP0_CACHE_ERR	    27 -#define	MIPS_CP0_TAG_LO	        28 -#define	MIPS_CP0_TAG_HI	        29 -#define	MIPS_CP0_ERROR_PC	    30 -#define	MIPS_CP0_DEBUG	        23 -#define	MIPS_CP0_DEPC		    24 -#define	MIPS_CP0_PERFCNT	    25 -#define	MIPS_CP0_ERRCTL         26 -#define	MIPS_CP0_DATA_LO	    28 -#define	MIPS_CP0_DATA_HI	    29 -#define	MIPS_CP0_DESAVE	        31 - -#define MIPS_CP0_CONFIG_SEL	    0 -#define MIPS_CP0_CONFIG1_SEL    1 -#define MIPS_CP0_CONFIG2_SEL    2 -#define MIPS_CP0_CONFIG3_SEL    3 +#define MIPS_CP0_TLB_INDEX	0 +#define MIPS_CP0_TLB_RANDOM	1 +#define MIPS_CP0_TLB_LOW	2 +#define MIPS_CP0_TLB_LO0	2 +#define MIPS_CP0_TLB_LO1	3 +#define MIPS_CP0_TLB_CONTEXT	4 +#define MIPS_CP0_TLB_PG_MASK	5 +#define MIPS_CP0_TLB_WIRED	6 +#define MIPS_CP0_HWRENA		7 +#define MIPS_CP0_BAD_VADDR	8 +#define MIPS_CP0_COUNT		9 +#define MIPS_CP0_TLB_HI		10 +#define MIPS_CP0_COMPARE	11 +#define MIPS_CP0_STATUS		12 +#define MIPS_CP0_CAUSE		13 +#define MIPS_CP0_EXC_PC		14 +#define MIPS_CP0_PRID		15 +#define MIPS_CP0_CONFIG		16 +#define MIPS_CP0_LLADDR		17 +#define MIPS_CP0_WATCH_LO	18 +#define MIPS_CP0_WATCH_HI	19 +#define MIPS_CP0_TLB_XCONTEXT	20 +#define MIPS_CP0_ECC		26 +#define MIPS_CP0_CACHE_ERR	27 +#define MIPS_CP0_TAG_LO		28 +#define MIPS_CP0_TAG_HI		29 +#define MIPS_CP0_ERROR_PC	30 +#define MIPS_CP0_DEBUG		23 +#define MIPS_CP0_DEPC		24 +#define MIPS_CP0_PERFCNT	25 +#define MIPS_CP0_ERRCTL		26 +#define MIPS_CP0_DATA_LO	28 +#define MIPS_CP0_DATA_HI	29 +#define MIPS_CP0_DESAVE		31 + +#define MIPS_CP0_CONFIG_SEL	0 +#define MIPS_CP0_CONFIG1_SEL	1 +#define MIPS_CP0_CONFIG2_SEL	2 +#define MIPS_CP0_CONFIG3_SEL	3  /* Config0 register bits */ -#define CP0C0_M    31 -#define CP0C0_K23  28 -#define CP0C0_KU   25 -#define CP0C0_MDU  20 -#define CP0C0_MM   17 -#define CP0C0_BM   16 -#define CP0C0_BE   15 -#define CP0C0_AT   13 -#define CP0C0_AR   10 -#define CP0C0_MT   7 -#define CP0C0_VI   3 -#define CP0C0_K0   0 +#define CP0C0_M			31 +#define CP0C0_K23		28 +#define CP0C0_KU		25 +#define CP0C0_MDU		20 +#define CP0C0_MM		17 +#define CP0C0_BM		16 +#define CP0C0_BE		15 +#define CP0C0_AT		13 +#define CP0C0_AR		10 +#define CP0C0_MT		7 +#define CP0C0_VI		3 +#define CP0C0_K0		0  /* Config1 register bits */ -#define CP0C1_M    31 -#define CP0C1_MMU  25 -#define CP0C1_IS   22 -#define CP0C1_IL   19 -#define CP0C1_IA   16 -#define CP0C1_DS   13 -#define CP0C1_DL   10 -#define CP0C1_DA   7 -#define CP0C1_C2   6 -#define CP0C1_MD   5 -#define CP0C1_PC   4 -#define CP0C1_WR   3 -#define CP0C1_CA   2 -#define CP0C1_EP   1 -#define CP0C1_FP   0 +#define CP0C1_M			31 +#define CP0C1_MMU		25 +#define CP0C1_IS		22 +#define CP0C1_IL		19 +#define CP0C1_IA		16 +#define CP0C1_DS		13 +#define CP0C1_DL		10 +#define CP0C1_DA		7 +#define CP0C1_C2		6 +#define CP0C1_MD		5 +#define CP0C1_PC		4 +#define CP0C1_WR		3 +#define CP0C1_CA		2 +#define CP0C1_EP		1 +#define CP0C1_FP		0  /* Config2 Register bits */ -#define CP0C2_M    31 -#define CP0C2_TU   28 -#define CP0C2_TS   24 -#define CP0C2_TL   20 -#define CP0C2_TA   16 -#define CP0C2_SU   12 -#define CP0C2_SS   8 -#define CP0C2_SL   4 -#define CP0C2_SA   0 +#define CP0C2_M			31 +#define CP0C2_TU		28 +#define CP0C2_TS		24 +#define CP0C2_TL		20 +#define CP0C2_TA		16 +#define CP0C2_SU		12 +#define CP0C2_SS		8 +#define CP0C2_SL		4 +#define CP0C2_SA		0  /* Config3 Register bits */ -#define CP0C3_M    31 -#define CP0C3_ISA_ON_EXC 16 -#define CP0C3_ULRI  13 -#define CP0C3_DSPP 10 -#define CP0C3_LPA  7 -#define CP0C3_VEIC 6 -#define CP0C3_VInt 5 -#define CP0C3_SP   4 -#define CP0C3_MT   2 -#define CP0C3_SM   1 -#define CP0C3_TL   0 +#define CP0C3_M			31 +#define CP0C3_ISA_ON_EXC	16 +#define CP0C3_ULRI		13 +#define CP0C3_DSPP		10 +#define CP0C3_LPA		7 +#define CP0C3_VEIC		6 +#define CP0C3_VInt		5 +#define CP0C3_SP		4 +#define CP0C3_MT		2 +#define CP0C3_SM		1 +#define CP0C3_TL		0  /* Have config1, Cacheable, noncoherent, write-back, write allocate*/ -#define MIPS_CONFIG0                                              \ +#define MIPS_CONFIG0						\    ((1 << CP0C0_M) | (0x3 << CP0C0_K0))  /* Have config2, no coprocessor2 attached, no MDMX support attached,     no performance counters, watch registers present,     no code compression, EJTAG present, no FPU, no watch registers */ -#define MIPS_CONFIG1                                              \ -((1 << CP0C1_M) |                                                 \ - (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) |            \ - (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) |            \ +#define MIPS_CONFIG1						\ +((1 << CP0C1_M) |						\ + (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) |		\ + (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) |		\   (0 << CP0C1_FP))  /* Have config3, no tertiary/secondary caches implemented */ -#define MIPS_CONFIG2                                              \ +#define MIPS_CONFIG2						\  ((1 << CP0C2_M))  /* No config4, no DSP ASE, no large physaddr (PABITS),     no external interrupt controller, no vectored interrupts,     no 1kb pages, no SmartMIPS ASE, no trace logic */ -#define MIPS_CONFIG3                                              \ -((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) |          \ - (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) |        \ +#define MIPS_CONFIG3						\ +((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) |	\ + (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) |	\   (0 << CP0C3_SM) | (0 << CP0C3_TL))  /* MMU types, the first four entries have the same layout as the @@ -281,36 +306,36 @@ enum mips_mmu_types {  /*   * Trap codes   */ -#define T_INT           0	/* Interrupt pending */ -#define T_TLB_MOD       1	/* TLB modified fault */ -#define T_TLB_LD_MISS       2	/* TLB miss on load or ifetch */ -#define T_TLB_ST_MISS       3	/* TLB miss on a store */ -#define T_ADDR_ERR_LD       4	/* Address error on a load or ifetch */ -#define T_ADDR_ERR_ST       5	/* Address error on a store */ -#define T_BUS_ERR_IFETCH    6	/* Bus error on an ifetch */ -#define T_BUS_ERR_LD_ST     7	/* Bus error on a load or store */ -#define T_SYSCALL       8	/* System call */ -#define T_BREAK         9	/* Breakpoint */ -#define T_RES_INST      10	/* Reserved instruction exception */ -#define T_COP_UNUSABLE      11	/* Coprocessor unusable */ -#define T_OVFLOW        12	/* Arithmetic overflow */ +#define T_INT			0	/* Interrupt pending */ +#define T_TLB_MOD		1	/* TLB modified fault */ +#define T_TLB_LD_MISS		2	/* TLB miss on load or ifetch */ +#define T_TLB_ST_MISS		3	/* TLB miss on a store */ +#define T_ADDR_ERR_LD		4	/* Address error on a load or ifetch */ +#define T_ADDR_ERR_ST		5	/* Address error on a store */ +#define T_BUS_ERR_IFETCH	6	/* Bus error on an ifetch */ +#define T_BUS_ERR_LD_ST		7	/* Bus error on a load or store */ +#define T_SYSCALL		8	/* System call */ +#define T_BREAK			9	/* Breakpoint */ +#define T_RES_INST		10	/* Reserved instruction exception */ +#define T_COP_UNUSABLE		11	/* Coprocessor unusable */ +#define T_OVFLOW		12	/* Arithmetic overflow */  /*   * Trap definitions added for r4000 port.   */ -#define T_TRAP          13	/* Trap instruction */ -#define T_VCEI          14	/* Virtual coherency exception */ -#define T_FPE           15	/* Floating point exception */ -#define T_WATCH         23	/* Watch address reference */ -#define T_VCED          31	/* Virtual coherency data */ +#define T_TRAP			13	/* Trap instruction */ +#define T_VCEI			14	/* Virtual coherency exception */ +#define T_FPE			15	/* Floating point exception */ +#define T_WATCH			23	/* Watch address reference */ +#define T_VCED			31	/* Virtual coherency data */  /* Resume Flags */ -#define RESUME_FLAG_DR          (1<<0)	/* Reload guest nonvolatile state? */ -#define RESUME_FLAG_HOST        (1<<1)	/* Resume host? */ +#define RESUME_FLAG_DR		(1<<0)	/* Reload guest nonvolatile state? */ +#define RESUME_FLAG_HOST	(1<<1)	/* Resume host? */ -#define RESUME_GUEST            0 -#define RESUME_GUEST_DR         RESUME_FLAG_DR -#define RESUME_HOST             RESUME_FLAG_HOST +#define RESUME_GUEST		0 +#define RESUME_GUEST_DR		RESUME_FLAG_DR +#define RESUME_HOST		RESUME_FLAG_HOST  enum emulation_result {  	EMULATE_DONE,		/* no further processing */ @@ -320,24 +345,27 @@ enum emulation_result {  	EMULATE_PRIV_FAIL,  }; -#define MIPS3_PG_G  0x00000001	/* Global; ignore ASID if in lo0 & lo1 */ -#define MIPS3_PG_V  0x00000002	/* Valid */ -#define MIPS3_PG_NV 0x00000000 -#define MIPS3_PG_D  0x00000004	/* Dirty */ +#define MIPS3_PG_G	0x00000001 /* Global; ignore ASID if in lo0 & lo1 */ +#define MIPS3_PG_V	0x00000002 /* Valid */ +#define MIPS3_PG_NV	0x00000000 +#define MIPS3_PG_D	0x00000004 /* Dirty */  #define mips3_paddr_to_tlbpfn(x) \ -    (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME) +	(((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)  #define mips3_tlbpfn_to_paddr(x) \ -    ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT) +	((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT) -#define MIPS3_PG_SHIFT      6 -#define MIPS3_PG_FRAME      0x3fffffc0 +#define MIPS3_PG_SHIFT		6 +#define MIPS3_PG_FRAME		0x3fffffc0 -#define VPN2_MASK           0xffffe000 -#define TLB_IS_GLOBAL(x)    (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G)) -#define TLB_VPN2(x)         ((x).tlb_hi & VPN2_MASK) -#define TLB_ASID(x)         ((x).tlb_hi & ASID_MASK) -#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V)) +#define VPN2_MASK		0xffffe000 +#define TLB_IS_GLOBAL(x)	(((x).tlb_lo0 & MIPS3_PG_G) &&	\ +				 ((x).tlb_lo1 & MIPS3_PG_G)) +#define TLB_VPN2(x)		((x).tlb_hi & VPN2_MASK) +#define TLB_ASID(x)		((x).tlb_hi & ASID_MASK) +#define TLB_IS_VALID(x, va)	(((va) & (1 << PAGE_SHIFT))	\ +				 ? ((x).tlb_lo1 & MIPS3_PG_V)	\ +				 : ((x).tlb_lo0 & MIPS3_PG_V))  struct kvm_mips_tlb {  	long tlb_mask; @@ -346,7 +374,7 @@ struct kvm_mips_tlb {  	long tlb_lo1;  }; -#define KVM_MIPS_GUEST_TLB_SIZE     64 +#define KVM_MIPS_GUEST_TLB_SIZE	64  struct kvm_vcpu_arch {  	void *host_ebase, *guest_ebase;  	unsigned long host_stack; @@ -376,8 +404,19 @@ struct kvm_vcpu_arch {  	u32 io_gpr;		/* GPR used as IO source/target */ -	/* Used to calibrate the virutal count register for the guest */ -	int32_t host_cp0_count; +	struct hrtimer comparecount_timer; +	/* Count timer control KVM register */ +	uint32_t count_ctl; +	/* Count bias from the raw time */ +	uint32_t count_bias; +	/* Frequency of timer in Hz */ +	uint32_t count_hz; +	/* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */ +	s64 count_dyn_bias; +	/* Resume time */ +	ktime_t count_resume; +	/* Period of timer tick in ns */ +	u64 count_period;  	/* Bitmask of exceptions that are pending */  	unsigned long pending_exceptions; @@ -398,11 +437,6 @@ struct kvm_vcpu_arch {  	uint32_t guest_kernel_asid[NR_CPUS];  	struct mm_struct guest_kernel_mm, guest_user_mm; -	struct kvm_mips_tlb shadow_tlb[NR_CPUS][KVM_MIPS_GUEST_TLB_SIZE]; - - -	struct hrtimer comparecount_timer; -  	int last_sched_cpu;  	/* WAIT executed */ @@ -410,92 +444,158 @@ struct kvm_vcpu_arch {  }; -#define kvm_read_c0_guest_index(cop0)               (cop0->reg[MIPS_CP0_TLB_INDEX][0]) -#define kvm_write_c0_guest_index(cop0, val)         (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val) -#define kvm_read_c0_guest_entrylo0(cop0)            (cop0->reg[MIPS_CP0_TLB_LO0][0]) -#define kvm_read_c0_guest_entrylo1(cop0)            (cop0->reg[MIPS_CP0_TLB_LO1][0]) -#define kvm_read_c0_guest_context(cop0)             (cop0->reg[MIPS_CP0_TLB_CONTEXT][0]) -#define kvm_write_c0_guest_context(cop0, val)       (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val)) -#define kvm_read_c0_guest_userlocal(cop0)           (cop0->reg[MIPS_CP0_TLB_CONTEXT][2]) -#define kvm_read_c0_guest_pagemask(cop0)            (cop0->reg[MIPS_CP0_TLB_PG_MASK][0]) -#define kvm_write_c0_guest_pagemask(cop0, val)      (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val)) -#define kvm_read_c0_guest_wired(cop0)               (cop0->reg[MIPS_CP0_TLB_WIRED][0]) -#define kvm_write_c0_guest_wired(cop0, val)         (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val)) -#define kvm_read_c0_guest_badvaddr(cop0)            (cop0->reg[MIPS_CP0_BAD_VADDR][0]) -#define kvm_write_c0_guest_badvaddr(cop0, val)      (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val)) -#define kvm_read_c0_guest_count(cop0)               (cop0->reg[MIPS_CP0_COUNT][0]) -#define kvm_write_c0_guest_count(cop0, val)         (cop0->reg[MIPS_CP0_COUNT][0] = (val)) -#define kvm_read_c0_guest_entryhi(cop0)             (cop0->reg[MIPS_CP0_TLB_HI][0]) -#define kvm_write_c0_guest_entryhi(cop0, val)       (cop0->reg[MIPS_CP0_TLB_HI][0] = (val)) -#define kvm_read_c0_guest_compare(cop0)             (cop0->reg[MIPS_CP0_COMPARE][0]) -#define kvm_write_c0_guest_compare(cop0, val)       (cop0->reg[MIPS_CP0_COMPARE][0] = (val)) -#define kvm_read_c0_guest_status(cop0)              (cop0->reg[MIPS_CP0_STATUS][0]) -#define kvm_write_c0_guest_status(cop0, val)        (cop0->reg[MIPS_CP0_STATUS][0] = (val)) -#define kvm_read_c0_guest_intctl(cop0)              (cop0->reg[MIPS_CP0_STATUS][1]) -#define kvm_write_c0_guest_intctl(cop0, val)        (cop0->reg[MIPS_CP0_STATUS][1] = (val)) -#define kvm_read_c0_guest_cause(cop0)               (cop0->reg[MIPS_CP0_CAUSE][0]) -#define kvm_write_c0_guest_cause(cop0, val)         (cop0->reg[MIPS_CP0_CAUSE][0] = (val)) -#define kvm_read_c0_guest_epc(cop0)                 (cop0->reg[MIPS_CP0_EXC_PC][0]) -#define kvm_write_c0_guest_epc(cop0, val)           (cop0->reg[MIPS_CP0_EXC_PC][0] = (val)) -#define kvm_read_c0_guest_prid(cop0)                (cop0->reg[MIPS_CP0_PRID][0]) -#define kvm_write_c0_guest_prid(cop0, val)          (cop0->reg[MIPS_CP0_PRID][0] = (val)) -#define kvm_read_c0_guest_ebase(cop0)               (cop0->reg[MIPS_CP0_PRID][1]) -#define kvm_write_c0_guest_ebase(cop0, val)         (cop0->reg[MIPS_CP0_PRID][1] = (val)) -#define kvm_read_c0_guest_config(cop0)              (cop0->reg[MIPS_CP0_CONFIG][0]) -#define kvm_read_c0_guest_config1(cop0)             (cop0->reg[MIPS_CP0_CONFIG][1]) -#define kvm_read_c0_guest_config2(cop0)             (cop0->reg[MIPS_CP0_CONFIG][2]) -#define kvm_read_c0_guest_config3(cop0)             (cop0->reg[MIPS_CP0_CONFIG][3]) -#define kvm_read_c0_guest_config7(cop0)             (cop0->reg[MIPS_CP0_CONFIG][7]) -#define kvm_write_c0_guest_config(cop0, val)        (cop0->reg[MIPS_CP0_CONFIG][0] = (val)) -#define kvm_write_c0_guest_config1(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][1] = (val)) -#define kvm_write_c0_guest_config2(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][2] = (val)) -#define kvm_write_c0_guest_config3(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][3] = (val)) -#define kvm_write_c0_guest_config7(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][7] = (val)) -#define kvm_read_c0_guest_errorepc(cop0)            (cop0->reg[MIPS_CP0_ERROR_PC][0]) -#define kvm_write_c0_guest_errorepc(cop0, val)      (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val)) - -#define kvm_set_c0_guest_status(cop0, val)          (cop0->reg[MIPS_CP0_STATUS][0] |= (val)) -#define kvm_clear_c0_guest_status(cop0, val)        (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val)) -#define kvm_set_c0_guest_cause(cop0, val)           (cop0->reg[MIPS_CP0_CAUSE][0] |= (val)) -#define kvm_clear_c0_guest_cause(cop0, val)         (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val)) -#define kvm_change_c0_guest_cause(cop0, change, val)  \ -{                                                     \ -    kvm_clear_c0_guest_cause(cop0, change);           \ -    kvm_set_c0_guest_cause(cop0, ((val) & (change))); \ +#define kvm_read_c0_guest_index(cop0)		(cop0->reg[MIPS_CP0_TLB_INDEX][0]) +#define kvm_write_c0_guest_index(cop0, val)	(cop0->reg[MIPS_CP0_TLB_INDEX][0] = val) +#define kvm_read_c0_guest_entrylo0(cop0)	(cop0->reg[MIPS_CP0_TLB_LO0][0]) +#define kvm_read_c0_guest_entrylo1(cop0)	(cop0->reg[MIPS_CP0_TLB_LO1][0]) +#define kvm_read_c0_guest_context(cop0)		(cop0->reg[MIPS_CP0_TLB_CONTEXT][0]) +#define kvm_write_c0_guest_context(cop0, val)	(cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val)) +#define kvm_read_c0_guest_userlocal(cop0)	(cop0->reg[MIPS_CP0_TLB_CONTEXT][2]) +#define kvm_write_c0_guest_userlocal(cop0, val)	(cop0->reg[MIPS_CP0_TLB_CONTEXT][2] = (val)) +#define kvm_read_c0_guest_pagemask(cop0)	(cop0->reg[MIPS_CP0_TLB_PG_MASK][0]) +#define kvm_write_c0_guest_pagemask(cop0, val)	(cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val)) +#define kvm_read_c0_guest_wired(cop0)		(cop0->reg[MIPS_CP0_TLB_WIRED][0]) +#define kvm_write_c0_guest_wired(cop0, val)	(cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val)) +#define kvm_read_c0_guest_hwrena(cop0)		(cop0->reg[MIPS_CP0_HWRENA][0]) +#define kvm_write_c0_guest_hwrena(cop0, val)	(cop0->reg[MIPS_CP0_HWRENA][0] = (val)) +#define kvm_read_c0_guest_badvaddr(cop0)	(cop0->reg[MIPS_CP0_BAD_VADDR][0]) +#define kvm_write_c0_guest_badvaddr(cop0, val)	(cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val)) +#define kvm_read_c0_guest_count(cop0)		(cop0->reg[MIPS_CP0_COUNT][0]) +#define kvm_write_c0_guest_count(cop0, val)	(cop0->reg[MIPS_CP0_COUNT][0] = (val)) +#define kvm_read_c0_guest_entryhi(cop0)		(cop0->reg[MIPS_CP0_TLB_HI][0]) +#define kvm_write_c0_guest_entryhi(cop0, val)	(cop0->reg[MIPS_CP0_TLB_HI][0] = (val)) +#define kvm_read_c0_guest_compare(cop0)		(cop0->reg[MIPS_CP0_COMPARE][0]) +#define kvm_write_c0_guest_compare(cop0, val)	(cop0->reg[MIPS_CP0_COMPARE][0] = (val)) +#define kvm_read_c0_guest_status(cop0)		(cop0->reg[MIPS_CP0_STATUS][0]) +#define kvm_write_c0_guest_status(cop0, val)	(cop0->reg[MIPS_CP0_STATUS][0] = (val)) +#define kvm_read_c0_guest_intctl(cop0)		(cop0->reg[MIPS_CP0_STATUS][1]) +#define kvm_write_c0_guest_intctl(cop0, val)	(cop0->reg[MIPS_CP0_STATUS][1] = (val)) +#define kvm_read_c0_guest_cause(cop0)		(cop0->reg[MIPS_CP0_CAUSE][0]) +#define kvm_write_c0_guest_cause(cop0, val)	(cop0->reg[MIPS_CP0_CAUSE][0] = (val)) +#define kvm_read_c0_guest_epc(cop0)		(cop0->reg[MIPS_CP0_EXC_PC][0]) +#define kvm_write_c0_guest_epc(cop0, val)	(cop0->reg[MIPS_CP0_EXC_PC][0] = (val)) +#define kvm_read_c0_guest_prid(cop0)		(cop0->reg[MIPS_CP0_PRID][0]) +#define kvm_write_c0_guest_prid(cop0, val)	(cop0->reg[MIPS_CP0_PRID][0] = (val)) +#define kvm_read_c0_guest_ebase(cop0)		(cop0->reg[MIPS_CP0_PRID][1]) +#define kvm_write_c0_guest_ebase(cop0, val)	(cop0->reg[MIPS_CP0_PRID][1] = (val)) +#define kvm_read_c0_guest_config(cop0)		(cop0->reg[MIPS_CP0_CONFIG][0]) +#define kvm_read_c0_guest_config1(cop0)		(cop0->reg[MIPS_CP0_CONFIG][1]) +#define kvm_read_c0_guest_config2(cop0)		(cop0->reg[MIPS_CP0_CONFIG][2]) +#define kvm_read_c0_guest_config3(cop0)		(cop0->reg[MIPS_CP0_CONFIG][3]) +#define kvm_read_c0_guest_config7(cop0)		(cop0->reg[MIPS_CP0_CONFIG][7]) +#define kvm_write_c0_guest_config(cop0, val)	(cop0->reg[MIPS_CP0_CONFIG][0] = (val)) +#define kvm_write_c0_guest_config1(cop0, val)	(cop0->reg[MIPS_CP0_CONFIG][1] = (val)) +#define kvm_write_c0_guest_config2(cop0, val)	(cop0->reg[MIPS_CP0_CONFIG][2] = (val)) +#define kvm_write_c0_guest_config3(cop0, val)	(cop0->reg[MIPS_CP0_CONFIG][3] = (val)) +#define kvm_write_c0_guest_config7(cop0, val)	(cop0->reg[MIPS_CP0_CONFIG][7] = (val)) +#define kvm_read_c0_guest_errorepc(cop0)	(cop0->reg[MIPS_CP0_ERROR_PC][0]) +#define kvm_write_c0_guest_errorepc(cop0, val)	(cop0->reg[MIPS_CP0_ERROR_PC][0] = (val)) + +/* + * Some of the guest registers may be modified asynchronously (e.g. from a + * hrtimer callback in hard irq context) and therefore need stronger atomicity + * guarantees than other registers. + */ + +static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg, +						unsigned long val) +{ +	unsigned long temp; +	do { +		__asm__ __volatile__( +		"	.set	mips3				\n" +		"	" __LL "%0, %1				\n" +		"	or	%0, %2				\n" +		"	" __SC	"%0, %1				\n" +		"	.set	mips0				\n" +		: "=&r" (temp), "+m" (*reg) +		: "r" (val)); +	} while (unlikely(!temp)); +} + +static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg, +						  unsigned long val) +{ +	unsigned long temp; +	do { +		__asm__ __volatile__( +		"	.set	mips3				\n" +		"	" __LL "%0, %1				\n" +		"	and	%0, %2				\n" +		"	" __SC	"%0, %1				\n" +		"	.set	mips0				\n" +		: "=&r" (temp), "+m" (*reg) +		: "r" (~val)); +	} while (unlikely(!temp)); +} + +static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg, +						   unsigned long change, +						   unsigned long val) +{ +	unsigned long temp; +	do { +		__asm__ __volatile__( +		"	.set	mips3				\n" +		"	" __LL "%0, %1				\n" +		"	and	%0, %2				\n" +		"	or	%0, %3				\n" +		"	" __SC	"%0, %1				\n" +		"	.set	mips0				\n" +		: "=&r" (temp), "+m" (*reg) +		: "r" (~change), "r" (val & change)); +	} while (unlikely(!temp));  } -#define kvm_set_c0_guest_ebase(cop0, val)           (cop0->reg[MIPS_CP0_PRID][1] |= (val)) -#define kvm_clear_c0_guest_ebase(cop0, val)         (cop0->reg[MIPS_CP0_PRID][1] &= ~(val)) -#define kvm_change_c0_guest_ebase(cop0, change, val)  \ -{                                                     \ -    kvm_clear_c0_guest_ebase(cop0, change);           \ -    kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \ + +#define kvm_set_c0_guest_status(cop0, val)	(cop0->reg[MIPS_CP0_STATUS][0] |= (val)) +#define kvm_clear_c0_guest_status(cop0, val)	(cop0->reg[MIPS_CP0_STATUS][0] &= ~(val)) + +/* Cause can be modified asynchronously from hardirq hrtimer callback */ +#define kvm_set_c0_guest_cause(cop0, val)				\ +	_kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val) +#define kvm_clear_c0_guest_cause(cop0, val)				\ +	_kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val) +#define kvm_change_c0_guest_cause(cop0, change, val)			\ +	_kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0],	\ +					change, val) + +#define kvm_set_c0_guest_ebase(cop0, val)	(cop0->reg[MIPS_CP0_PRID][1] |= (val)) +#define kvm_clear_c0_guest_ebase(cop0, val)	(cop0->reg[MIPS_CP0_PRID][1] &= ~(val)) +#define kvm_change_c0_guest_ebase(cop0, change, val)			\ +{									\ +	kvm_clear_c0_guest_ebase(cop0, change);				\ +	kvm_set_c0_guest_ebase(cop0, ((val) & (change)));		\  }  struct kvm_mips_callbacks { -	int (*handle_cop_unusable) (struct kvm_vcpu *vcpu); -	int (*handle_tlb_mod) (struct kvm_vcpu *vcpu); -	int (*handle_tlb_ld_miss) (struct kvm_vcpu *vcpu); -	int (*handle_tlb_st_miss) (struct kvm_vcpu *vcpu); -	int (*handle_addr_err_st) (struct kvm_vcpu *vcpu); -	int (*handle_addr_err_ld) (struct kvm_vcpu *vcpu); -	int (*handle_syscall) (struct kvm_vcpu *vcpu); -	int (*handle_res_inst) (struct kvm_vcpu *vcpu); -	int (*handle_break) (struct kvm_vcpu *vcpu); -	int (*vm_init) (struct kvm *kvm); -	int (*vcpu_init) (struct kvm_vcpu *vcpu); -	int (*vcpu_setup) (struct kvm_vcpu *vcpu); -	 gpa_t(*gva_to_gpa) (gva_t gva); -	void (*queue_timer_int) (struct kvm_vcpu *vcpu); -	void (*dequeue_timer_int) (struct kvm_vcpu *vcpu); -	void (*queue_io_int) (struct kvm_vcpu *vcpu, -			      struct kvm_mips_interrupt *irq); -	void (*dequeue_io_int) (struct kvm_vcpu *vcpu, -				struct kvm_mips_interrupt *irq); -	int (*irq_deliver) (struct kvm_vcpu *vcpu, unsigned int priority, -			    uint32_t cause); -	int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority, -			  uint32_t cause); +	int (*handle_cop_unusable)(struct kvm_vcpu *vcpu); +	int (*handle_tlb_mod)(struct kvm_vcpu *vcpu); +	int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu); +	int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu); +	int (*handle_addr_err_st)(struct kvm_vcpu *vcpu); +	int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu); +	int (*handle_syscall)(struct kvm_vcpu *vcpu); +	int (*handle_res_inst)(struct kvm_vcpu *vcpu); +	int (*handle_break)(struct kvm_vcpu *vcpu); +	int (*vm_init)(struct kvm *kvm); +	int (*vcpu_init)(struct kvm_vcpu *vcpu); +	int (*vcpu_setup)(struct kvm_vcpu *vcpu); +	gpa_t (*gva_to_gpa)(gva_t gva); +	void (*queue_timer_int)(struct kvm_vcpu *vcpu); +	void (*dequeue_timer_int)(struct kvm_vcpu *vcpu); +	void (*queue_io_int)(struct kvm_vcpu *vcpu, +			     struct kvm_mips_interrupt *irq); +	void (*dequeue_io_int)(struct kvm_vcpu *vcpu, +			       struct kvm_mips_interrupt *irq); +	int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority, +			   uint32_t cause); +	int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority, +			 uint32_t cause); +	int (*get_one_reg)(struct kvm_vcpu *vcpu, +			   const struct kvm_one_reg *reg, s64 *v); +	int (*set_one_reg)(struct kvm_vcpu *vcpu, +			   const struct kvm_one_reg *reg, s64 v);  };  extern struct kvm_mips_callbacks *kvm_mips_callbacks;  int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); @@ -536,7 +636,6 @@ extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause,  extern void kvm_mips_dump_host_tlbs(void);  extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu); -extern void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu);  extern void kvm_mips_flush_host_tlb(int skip_kseg0);  extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);  extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index); @@ -548,10 +647,7 @@ extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu  						   unsigned long gva);  extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,  				    struct kvm_vcpu *vcpu); -extern void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu); -extern void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu);  extern void kvm_local_flush_tlb_all(void); -extern void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu);  extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu);  extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);  extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu); @@ -618,7 +714,16 @@ extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,  extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,  							 struct kvm_run *run); -enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu); +uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu); +void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count); +void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare); +void kvm_mips_init_count(struct kvm_vcpu *vcpu); +int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl); +int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume); +int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz); +void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu); +void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu); +enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);  enum emulation_result kvm_mips_check_privilege(unsigned long cause,  					       uint32_t *opc, @@ -655,7 +760,6 @@ extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,  			       struct kvm_vcpu *vcpu);  /* Misc */ -extern void mips32_SyncICache(unsigned long addr, unsigned long size);  extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu);  extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); diff --git a/arch/mips/include/asm/kvm_para.h b/arch/mips/include/asm/kvm_para.h new file mode 100644 index 00000000000..5a9aa918abe --- /dev/null +++ b/arch/mips/include/asm/kvm_para.h @@ -0,0 +1,109 @@ +#ifndef _ASM_MIPS_KVM_PARA_H +#define _ASM_MIPS_KVM_PARA_H + +#include <uapi/asm/kvm_para.h> + +#define KVM_HYPERCALL ".word 0x42000028" + +/* + * Hypercalls for KVM. + * + * Hypercall number is passed in v0. + * Return value will be placed in v0. + * Up to 3 arguments are passed in a0, a1, and a2. + */ +static inline unsigned long kvm_hypercall0(unsigned long num) +{ +	register unsigned long n asm("v0"); +	register unsigned long r asm("v0"); + +	n = num; +	__asm__ __volatile__( +		KVM_HYPERCALL +		: "=r" (r) : "r" (n) : "memory" +		); + +	return r; +} + +static inline unsigned long kvm_hypercall1(unsigned long num, +					unsigned long arg0) +{ +	register unsigned long n asm("v0"); +	register unsigned long r asm("v0"); +	register unsigned long a0 asm("a0"); + +	n = num; +	a0 = arg0; +	__asm__ __volatile__( +		KVM_HYPERCALL +		: "=r" (r) : "r" (n), "r" (a0) : "memory" +		); + +	return r; +} + +static inline unsigned long kvm_hypercall2(unsigned long num, +					unsigned long arg0, unsigned long arg1) +{ +	register unsigned long n asm("v0"); +	register unsigned long r asm("v0"); +	register unsigned long a0 asm("a0"); +	register unsigned long a1 asm("a1"); + +	n = num; +	a0 = arg0; +	a1 = arg1; +	__asm__ __volatile__( +		KVM_HYPERCALL +		: "=r" (r) : "r" (n), "r" (a0), "r" (a1) : "memory" +		); + +	return r; +} + +static inline unsigned long kvm_hypercall3(unsigned long num, +	unsigned long arg0, unsigned long arg1, unsigned long arg2) +{ +	register unsigned long n asm("v0"); +	register unsigned long r asm("v0"); +	register unsigned long a0 asm("a0"); +	register unsigned long a1 asm("a1"); +	register unsigned long a2 asm("a2"); + +	n = num; +	a0 = arg0; +	a1 = arg1; +	a2 = arg2; +	__asm__ __volatile__( +		KVM_HYPERCALL +		: "=r" (r) : "r" (n), "r" (a0), "r" (a1), "r" (a2) : "memory" +		); + +	return r; +} + +static inline bool kvm_check_and_clear_guest_paused(void) +{ +	return false; +} + +static inline unsigned int kvm_arch_para_features(void) +{ +	return 0; +} + +#ifdef CONFIG_MIPS_PARAVIRT +static inline bool kvm_para_available(void) +{ +	return true; +} +#else +static inline bool kvm_para_available(void) +{ +	return false; +} +#endif + + +#endif /* _ASM_MIPS_KVM_PARA_H */ diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h index d44622cd74b..46dfc3c1fd4 100644 --- a/arch/mips/include/asm/local.h +++ b/arch/mips/include/asm/local.h @@ -33,7 +33,7 @@ static __inline__ long local_add_return(long i, local_t * l)  		unsigned long temp;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:"	__LL	"%1, %2		# local_add_return	\n"  		"	addu	%0, %1, %3				\n"  			__SC	"%0, %2					\n" @@ -47,7 +47,7 @@ static __inline__ long local_add_return(long i, local_t * l)  		unsigned long temp;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:"	__LL	"%1, %2		# local_add_return	\n"  		"	addu	%0, %1, %3				\n"  			__SC	"%0, %2					\n" @@ -78,7 +78,7 @@ static __inline__ long local_sub_return(long i, local_t * l)  		unsigned long temp;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:"	__LL	"%1, %2		# local_sub_return	\n"  		"	subu	%0, %1, %3				\n"  			__SC	"%0, %2					\n" @@ -92,7 +92,7 @@ static __inline__ long local_sub_return(long i, local_t * l)  		unsigned long temp;  		__asm__ __volatile__( -		"	.set	mips3					\n" +		"	.set	arch=r4000				\n"  		"1:"	__LL	"%1, %2		# local_sub_return	\n"  		"	subu	%0, %1, %3				\n"  			__SC	"%0, %2					\n" diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h index b86a1253a5b..cd41e93bc1d 100644 --- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h +++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h @@ -16,7 +16,6 @@  #define __ASM_MACH_AR71XX_REGS_H  #include <linux/types.h> -#include <linux/init.h>  #include <linux/io.h>  #include <linux/bitops.h> diff --git a/arch/mips/include/asm/mach-ath79/ar933x_uart_platform.h b/arch/mips/include/asm/mach-ath79/ar933x_uart_platform.h deleted file mode 100644 index 6cb30f2b719..00000000000 --- a/arch/mips/include/asm/mach-ath79/ar933x_uart_platform.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - *  Platform data definition for Atheros AR933X UART - * - *  Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org> - * - *  This program is free software; you can redistribute it and/or modify it - *  under the terms of the GNU General Public License version 2 as published - *  by the Free Software Foundation. - */ - -#ifndef _AR933X_UART_PLATFORM_H -#define _AR933X_UART_PLATFORM_H - -struct ar933x_uart_platform_data { -	unsigned	uartclk; -}; - -#endif /* _AR933X_UART_PLATFORM_H */ diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h index 3e11a468cdf..b4c3ecb17d4 100644 --- a/arch/mips/include/asm/mach-au1x00/au1000.h +++ b/arch/mips/include/asm/mach-au1x00/au1000.h @@ -43,6 +43,8 @@  #include <linux/io.h>  #include <linux/irq.h> +#include <asm/cpu.h> +  /* cpu pipeline flush */  void static inline au_sync(void)  { @@ -140,7 +142,7 @@ static inline int au1xxx_cpu_needs_config_od(void)  static inline int alchemy_get_cputype(void)  { -	switch (read_c0_prid() & 0xffff0000) { +	switch (read_c0_prid() & (PRID_OPT_MASK | PRID_COMP_MASK)) {  	case 0x00030000:  		return ALCHEMY_CPU_AU1000;  		break; @@ -1159,18 +1161,6 @@ enum soc_au1200_ints {  #define MAC_RX_BUFF3_STATUS	0x30  #define MAC_RX_BUFF3_ADDR	0x34 -#define UART_RX		0	/* Receive buffer */ -#define UART_TX		4	/* Transmit buffer */ -#define UART_IER	8	/* Interrupt Enable Register */ -#define UART_IIR	0xC	/* Interrupt ID Register */ -#define UART_FCR	0x10	/* FIFO Control Register */ -#define UART_LCR	0x14	/* Line Control Register */ -#define UART_MCR	0x18	/* Modem Control Register */ -#define UART_LSR	0x1C	/* Line Status Register */ -#define UART_MSR	0x20	/* Modem Status Register */ -#define UART_CLK	0x28	/* Baud Rate Clock Divider */ -#define UART_MOD_CNTRL	0x100	/* Module Control */ -  /* SSIO */  #define SSI0_STATUS		0xB1600000  #  define SSI_STATUS_BF		(1 << 4) diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h index cc7563ba1cb..7527c1d33d0 100644 --- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h +++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx.h @@ -56,4 +56,6 @@ void bcm47xx_fill_bcma_boardinfo(struct bcma_boardinfo *boardinfo,  				 const char *prefix);  #endif +void bcm47xx_set_system_type(u16 chip_id); +  #endif /* __ASM_BCM47XX_H */ diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h new file mode 100644 index 00000000000..bba7399a49a --- /dev/null +++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h @@ -0,0 +1,117 @@ +#ifndef __BCM47XX_BOARD_H +#define __BCM47XX_BOARD_H + +enum bcm47xx_board { +	BCM47XX_BOARD_ASUS_RTAC66U, +	BCM47XX_BOARD_ASUS_RTN10, +	BCM47XX_BOARD_ASUS_RTN10D, +	BCM47XX_BOARD_ASUS_RTN10U, +	BCM47XX_BOARD_ASUS_RTN12, +	BCM47XX_BOARD_ASUS_RTN12B1, +	BCM47XX_BOARD_ASUS_RTN12C1, +	BCM47XX_BOARD_ASUS_RTN12D1, +	BCM47XX_BOARD_ASUS_RTN12HP, +	BCM47XX_BOARD_ASUS_RTN15U, +	BCM47XX_BOARD_ASUS_RTN16, +	BCM47XX_BOARD_ASUS_RTN53, +	BCM47XX_BOARD_ASUS_RTN66U, +	BCM47XX_BOARD_ASUS_WL300G, +	BCM47XX_BOARD_ASUS_WL320GE, +	BCM47XX_BOARD_ASUS_WL330GE, +	BCM47XX_BOARD_ASUS_WL500GD, +	BCM47XX_BOARD_ASUS_WL500GPV1, +	BCM47XX_BOARD_ASUS_WL500GPV2, +	BCM47XX_BOARD_ASUS_WL500W, +	BCM47XX_BOARD_ASUS_WL520GC, +	BCM47XX_BOARD_ASUS_WL520GU, +	BCM47XX_BOARD_ASUS_WL700GE, +	BCM47XX_BOARD_ASUS_WLHDD, + +	BCM47XX_BOARD_BELKIN_F7D3301, +	BCM47XX_BOARD_BELKIN_F7D3302, +	BCM47XX_BOARD_BELKIN_F7D4301, +	BCM47XX_BOARD_BELKIN_F7D4302, +	BCM47XX_BOARD_BELKIN_F7D4401, + +	BCM47XX_BOARD_BUFFALO_WBR2_G54, +	BCM47XX_BOARD_BUFFALO_WHR2_A54G54, +	BCM47XX_BOARD_BUFFALO_WHR_G125, +	BCM47XX_BOARD_BUFFALO_WHR_G54S, +	BCM47XX_BOARD_BUFFALO_WHR_HP_G54, +	BCM47XX_BOARD_BUFFALO_WLA2_G54L, +	BCM47XX_BOARD_BUFFALO_WZR_G300N, +	BCM47XX_BOARD_BUFFALO_WZR_RS_G54, +	BCM47XX_BOARD_BUFFALO_WZR_RS_G54HP, + +	BCM47XX_BOARD_CISCO_M10V1, +	BCM47XX_BOARD_CISCO_M20V1, + +	BCM47XX_BOARD_DELL_TM2300, + +	BCM47XX_BOARD_DLINK_DIR130, +	BCM47XX_BOARD_DLINK_DIR330, + +	BCM47XX_BOARD_HUAWEI_E970, + +	BCM47XX_BOARD_LINKSYS_E900V1, +	BCM47XX_BOARD_LINKSYS_E1000V1, +	BCM47XX_BOARD_LINKSYS_E1000V2, +	BCM47XX_BOARD_LINKSYS_E1000V21, +	BCM47XX_BOARD_LINKSYS_E1200V2, +	BCM47XX_BOARD_LINKSYS_E2000V1, +	BCM47XX_BOARD_LINKSYS_E3000V1, +	BCM47XX_BOARD_LINKSYS_E3200V1, +	BCM47XX_BOARD_LINKSYS_E4200V1, +	BCM47XX_BOARD_LINKSYS_WRT150NV1, +	BCM47XX_BOARD_LINKSYS_WRT150NV11, +	BCM47XX_BOARD_LINKSYS_WRT160NV1, +	BCM47XX_BOARD_LINKSYS_WRT160NV3, +	BCM47XX_BOARD_LINKSYS_WRT300NV11, +	BCM47XX_BOARD_LINKSYS_WRT310NV1, +	BCM47XX_BOARD_LINKSYS_WRT310NV2, +	BCM47XX_BOARD_LINKSYS_WRT54G3GV2, +	BCM47XX_BOARD_LINKSYS_WRT54G, +	BCM47XX_BOARD_LINKSYS_WRT610NV1, +	BCM47XX_BOARD_LINKSYS_WRT610NV2, +	BCM47XX_BOARD_LINKSYS_WRTSL54GS, + +	BCM47XX_BOARD_MOTOROLA_WE800G, +	BCM47XX_BOARD_MOTOROLA_WR850GP, +	BCM47XX_BOARD_MOTOROLA_WR850GV2V3, + +	BCM47XX_BOARD_NETGEAR_WGR614V8, +	BCM47XX_BOARD_NETGEAR_WGR614V9, +	BCM47XX_BOARD_NETGEAR_WNDR3300, +	BCM47XX_BOARD_NETGEAR_WNDR3400V1, +	BCM47XX_BOARD_NETGEAR_WNDR3400V2, +	BCM47XX_BOARD_NETGEAR_WNDR3400VCNA, +	BCM47XX_BOARD_NETGEAR_WNDR3700V3, +	BCM47XX_BOARD_NETGEAR_WNDR4000, +	BCM47XX_BOARD_NETGEAR_WNDR4500V1, +	BCM47XX_BOARD_NETGEAR_WNDR4500V2, +	BCM47XX_BOARD_NETGEAR_WNR2000, +	BCM47XX_BOARD_NETGEAR_WNR3500L, +	BCM47XX_BOARD_NETGEAR_WNR3500U, +	BCM47XX_BOARD_NETGEAR_WNR3500V2, +	BCM47XX_BOARD_NETGEAR_WNR3500V2VC, +	BCM47XX_BOARD_NETGEAR_WNR834BV2, + +	BCM47XX_BOARD_PHICOMM_M1, + +	BCM47XX_BOARD_SIEMENS_SE505V2, + +	BCM47XX_BOARD_SIMPLETECH_SIMPLESHARE, + +	BCM47XX_BOARD_ZTE_H218N, + +	BCM47XX_BOARD_UNKNOWN, +	BCM47XX_BOARD_NO, +}; + +#define BCM47XX_BOARD_MAX_NAME 30 + +void bcm47xx_board_detect(void); +enum bcm47xx_board bcm47xx_board_get(void); +const char *bcm47xx_board_get_name(void); + +#endif /* __BCM47XX_BOARD_H */ diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h index b8e7be8f34d..36a3fc1aa3a 100644 --- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h +++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h @@ -48,4 +48,6 @@ static inline void bcm47xx_nvram_parse_macaddr(char *buf, u8 macaddr[6])  		printk(KERN_WARNING "Can not parse mac address: %s\n", buf);  } +int bcm47xx_nvram_gpio_pin(const char *name); +  #endif /* __BCM47XX_NVRAM_H */ diff --git a/arch/mips/include/asm/mach-bcm47xx/cpu-feature-overrides.h b/arch/mips/include/asm/mach-bcm47xx/cpu-feature-overrides.h new file mode 100644 index 00000000000..b7992cd4aaf --- /dev/null +++ b/arch/mips/include/asm/mach-bcm47xx/cpu-feature-overrides.h @@ -0,0 +1,82 @@ +#ifndef __ASM_MACH_BCM47XX_CPU_FEATURE_OVERRIDES_H +#define __ASM_MACH_BCM47XX_CPU_FEATURE_OVERRIDES_H + +#define cpu_has_tlb			1 +#define cpu_has_4kex			1 +#define cpu_has_3k_cache		0 +#define cpu_has_4k_cache		1 +#define cpu_has_tx39_cache		0 +#define cpu_has_fpu			0 +#define cpu_has_32fpr			0 +#define cpu_has_counter			1 +#if defined(CONFIG_BCM47XX_BCMA) && !defined(CONFIG_BCM47XX_SSB) +#define cpu_has_watch			1 +#elif defined(CONFIG_BCM47XX_SSB) && !defined(CONFIG_BCM47XX_BCMA) +#define cpu_has_watch			0 +#endif +#define cpu_has_divec			1 +#define cpu_has_vce			0 +#define cpu_has_cache_cdex_p		0 +#define cpu_has_cache_cdex_s		0 +#define cpu_has_prefetch		1 +#define cpu_has_mcheck			1 +#define cpu_has_ejtag			1 +#define cpu_has_llsc			1 + +/* cpu_has_mips16 */ +#define cpu_has_mdmx			0 +#define cpu_has_mips3d			0 +#define cpu_has_rixi			0 +#define cpu_has_mmips			0 +#define cpu_has_smartmips		0 +#define cpu_has_vtag_icache		0 +/* cpu_has_dc_aliases */ +#define cpu_has_ic_fills_f_dc		0 +#define cpu_has_pindexed_dcache		0 +#define cpu_icache_snoops_remote_store	0 + +#define cpu_has_mips_2			1 +#define cpu_has_mips_3			0 +#define cpu_has_mips32r1		1 +#if defined(CONFIG_BCM47XX_BCMA) && !defined(CONFIG_BCM47XX_SSB) +#define cpu_has_mips32r2		1 +#elif defined(CONFIG_BCM47XX_SSB) && !defined(CONFIG_BCM47XX_BCMA) +#define cpu_has_mips32r2		0 +#endif +#define cpu_has_mips64r1		0 +#define cpu_has_mips64r2		0 + +#if defined(CONFIG_BCM47XX_BCMA) && !defined(CONFIG_BCM47XX_SSB) +#define cpu_has_dsp			1 +#define cpu_has_dsp2			1 +#elif defined(CONFIG_BCM47XX_SSB) && !defined(CONFIG_BCM47XX_BCMA) +#define cpu_has_dsp			0 +#define cpu_has_dsp2			0 +#endif +#define cpu_has_mipsmt			0 +/* cpu_has_userlocal */ + +#define cpu_has_nofpuex			0 +#define cpu_has_64bits			0 +#define cpu_has_64bit_zero_reg		0 +#if defined(CONFIG_BCM47XX_BCMA) && !defined(CONFIG_BCM47XX_SSB) +#define cpu_has_vint			1 +#elif defined(CONFIG_BCM47XX_SSB) && !defined(CONFIG_BCM47XX_BCMA) +#define cpu_has_vint			0 +#endif +#define cpu_has_veic			0 +#define cpu_has_inclusive_pcaches	0 + +#if defined(CONFIG_BCM47XX_BCMA) && !defined(CONFIG_BCM47XX_SSB) +#define cpu_dcache_line_size()		32 +#define cpu_icache_line_size()		32 +#define cpu_has_perf_cntr_intr_bit	1 +#elif defined(CONFIG_BCM47XX_SSB) && !defined(CONFIG_BCM47XX_BCMA) +#define cpu_dcache_line_size()		16 +#define cpu_icache_line_size()		16 +#define cpu_has_perf_cntr_intr_bit	0 +#endif +#define cpu_scache_line_size()		0 +#define cpu_has_vz			0 + +#endif /* __ASM_MACH_BCM47XX_CPU_FEATURE_OVERRIDES_H */ diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h index 19f9134bfe2..3112f08f0c7 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h @@ -145,6 +145,7 @@ enum bcm63xx_regs_set {  	RSET_UART1,  	RSET_GPIO,  	RSET_SPI, +	RSET_HSSPI,  	RSET_UDC0,  	RSET_OHCI0,  	RSET_OHCI_PRIV, @@ -193,6 +194,7 @@ enum bcm63xx_regs_set {  #define RSET_ENETDMAS_SIZE(chans)	(16 * (chans))  #define RSET_ENETSW_SIZE		65536  #define RSET_UART_SIZE			24 +#define RSET_HSSPI_SIZE			1536  #define RSET_UDC_SIZE			256  #define RSET_OHCI_SIZE			256  #define RSET_EHCI_SIZE			256 @@ -265,6 +267,7 @@ enum bcm63xx_regs_set {  #define BCM_6328_UART1_BASE		(0xb0000120)  #define BCM_6328_GPIO_BASE		(0xb0000080)  #define BCM_6328_SPI_BASE		(0xdeadbeef) +#define BCM_6328_HSSPI_BASE		(0xb0001000)  #define BCM_6328_UDC0_BASE		(0xdeadbeef)  #define BCM_6328_USBDMA_BASE		(0xb000c000)  #define BCM_6328_OHCI0_BASE		(0xb0002600) @@ -313,6 +316,7 @@ enum bcm63xx_regs_set {  #define BCM_6338_UART1_BASE		(0xdeadbeef)  #define BCM_6338_GPIO_BASE		(0xfffe0400)  #define BCM_6338_SPI_BASE		(0xfffe0c00) +#define BCM_6338_HSSPI_BASE		(0xdeadbeef)  #define BCM_6338_UDC0_BASE		(0xdeadbeef)  #define BCM_6338_USBDMA_BASE		(0xfffe2400)  #define BCM_6338_OHCI0_BASE		(0xdeadbeef) @@ -360,6 +364,7 @@ enum bcm63xx_regs_set {  #define BCM_6345_UART1_BASE		(0xdeadbeef)  #define BCM_6345_GPIO_BASE		(0xfffe0400)  #define BCM_6345_SPI_BASE		(0xdeadbeef) +#define BCM_6345_HSSPI_BASE		(0xdeadbeef)  #define BCM_6345_UDC0_BASE		(0xdeadbeef)  #define BCM_6345_USBDMA_BASE		(0xfffe2800)  #define BCM_6345_ENET0_BASE		(0xfffe1800) @@ -406,6 +411,7 @@ enum bcm63xx_regs_set {  #define BCM_6348_UART1_BASE		(0xdeadbeef)  #define BCM_6348_GPIO_BASE		(0xfffe0400)  #define BCM_6348_SPI_BASE		(0xfffe0c00) +#define BCM_6348_HSSPI_BASE		(0xdeadbeef)  #define BCM_6348_UDC0_BASE		(0xfffe1000)  #define BCM_6348_USBDMA_BASE		(0xdeadbeef)  #define BCM_6348_OHCI0_BASE		(0xfffe1b00) @@ -451,6 +457,7 @@ enum bcm63xx_regs_set {  #define BCM_6358_UART1_BASE		(0xfffe0120)  #define BCM_6358_GPIO_BASE		(0xfffe0080)  #define BCM_6358_SPI_BASE		(0xfffe0800) +#define BCM_6358_HSSPI_BASE		(0xdeadbeef)  #define BCM_6358_UDC0_BASE		(0xfffe0800)  #define BCM_6358_USBDMA_BASE		(0xdeadbeef)  #define BCM_6358_OHCI0_BASE		(0xfffe1400) @@ -553,6 +560,7 @@ enum bcm63xx_regs_set {  #define BCM_6368_UART1_BASE		(0xb0000120)  #define BCM_6368_GPIO_BASE		(0xb0000080)  #define BCM_6368_SPI_BASE		(0xb0000800) +#define BCM_6368_HSSPI_BASE		(0xdeadbeef)  #define BCM_6368_UDC0_BASE		(0xdeadbeef)  #define BCM_6368_USBDMA_BASE		(0xb0004800)  #define BCM_6368_OHCI0_BASE		(0xb0001600) @@ -604,6 +612,7 @@ extern const unsigned long *bcm63xx_regs_base;  	__GEN_RSET_BASE(__cpu, UART1)					\  	__GEN_RSET_BASE(__cpu, GPIO)					\  	__GEN_RSET_BASE(__cpu, SPI)					\ +	__GEN_RSET_BASE(__cpu, HSSPI)					\  	__GEN_RSET_BASE(__cpu, UDC0)					\  	__GEN_RSET_BASE(__cpu, OHCI0)					\  	__GEN_RSET_BASE(__cpu, OHCI_PRIV)				\ @@ -647,6 +656,7 @@ extern const unsigned long *bcm63xx_regs_base;  	[RSET_UART1]		= BCM_## __cpu ##_UART1_BASE,		\  	[RSET_GPIO]		= BCM_## __cpu ##_GPIO_BASE,		\  	[RSET_SPI]		= BCM_## __cpu ##_SPI_BASE,		\ +	[RSET_HSSPI]		= BCM_## __cpu ##_HSSPI_BASE,		\  	[RSET_UDC0]		= BCM_## __cpu ##_UDC0_BASE,		\  	[RSET_OHCI0]		= BCM_## __cpu ##_OHCI0_BASE,		\  	[RSET_OHCI_PRIV]	= BCM_## __cpu ##_OHCI_PRIV_BASE,	\ @@ -727,6 +737,7 @@ enum bcm63xx_irq {  	IRQ_ENET0,  	IRQ_ENET1,  	IRQ_ENET_PHY, +	IRQ_HSSPI,  	IRQ_OHCI0,  	IRQ_EHCI0,  	IRQ_USBD, @@ -815,6 +826,7 @@ enum bcm63xx_irq {  #define BCM_6328_ENET0_IRQ		0  #define BCM_6328_ENET1_IRQ		0  #define BCM_6328_ENET_PHY_IRQ		(IRQ_INTERNAL_BASE + 12) +#define BCM_6328_HSSPI_IRQ		(IRQ_INTERNAL_BASE + 29)  #define BCM_6328_OHCI0_IRQ		(BCM_6328_HIGH_IRQ_BASE + 9)  #define BCM_6328_EHCI0_IRQ		(BCM_6328_HIGH_IRQ_BASE + 10)  #define BCM_6328_USBD_IRQ		(IRQ_INTERNAL_BASE + 4) @@ -860,6 +872,7 @@ enum bcm63xx_irq {  #define BCM_6338_ENET0_IRQ		(IRQ_INTERNAL_BASE + 8)  #define BCM_6338_ENET1_IRQ		0  #define BCM_6338_ENET_PHY_IRQ		(IRQ_INTERNAL_BASE + 9) +#define BCM_6338_HSSPI_IRQ		0  #define BCM_6338_OHCI0_IRQ		0  #define BCM_6338_EHCI0_IRQ		0  #define BCM_6338_USBD_IRQ		0 @@ -898,6 +911,7 @@ enum bcm63xx_irq {  #define BCM_6345_ENET0_IRQ		(IRQ_INTERNAL_BASE + 8)  #define BCM_6345_ENET1_IRQ		0  #define BCM_6345_ENET_PHY_IRQ		(IRQ_INTERNAL_BASE + 12) +#define BCM_6345_HSSPI_IRQ		0  #define BCM_6345_OHCI0_IRQ		0  #define BCM_6345_EHCI0_IRQ		0  #define BCM_6345_USBD_IRQ		0 @@ -936,6 +950,7 @@ enum bcm63xx_irq {  #define BCM_6348_ENET0_IRQ		(IRQ_INTERNAL_BASE + 8)  #define BCM_6348_ENET1_IRQ		(IRQ_INTERNAL_BASE + 7)  #define BCM_6348_ENET_PHY_IRQ		(IRQ_INTERNAL_BASE + 9) +#define BCM_6348_HSSPI_IRQ		0  #define BCM_6348_OHCI0_IRQ		(IRQ_INTERNAL_BASE + 12)  #define BCM_6348_EHCI0_IRQ		0  #define BCM_6348_USBD_IRQ		0 @@ -974,6 +989,7 @@ enum bcm63xx_irq {  #define BCM_6358_ENET0_IRQ		(IRQ_INTERNAL_BASE + 8)  #define BCM_6358_ENET1_IRQ		(IRQ_INTERNAL_BASE + 6)  #define BCM_6358_ENET_PHY_IRQ		(IRQ_INTERNAL_BASE + 9) +#define BCM_6358_HSSPI_IRQ		0  #define BCM_6358_OHCI0_IRQ		(IRQ_INTERNAL_BASE + 5)  #define BCM_6358_EHCI0_IRQ		(IRQ_INTERNAL_BASE + 10)  #define BCM_6358_USBD_IRQ		0 @@ -1086,6 +1102,7 @@ enum bcm63xx_irq {  #define BCM_6368_ENET0_IRQ		0  #define BCM_6368_ENET1_IRQ		0  #define BCM_6368_ENET_PHY_IRQ		(IRQ_INTERNAL_BASE + 15) +#define BCM_6368_HSSPI_IRQ		0  #define BCM_6368_OHCI0_IRQ		(IRQ_INTERNAL_BASE + 5)  #define BCM_6368_EHCI0_IRQ		(IRQ_INTERNAL_BASE + 7)  #define BCM_6368_USBD_IRQ		(IRQ_INTERNAL_BASE + 8) @@ -1133,6 +1150,7 @@ extern const int *bcm63xx_irqs;  	[IRQ_ENET0]		= BCM_## __cpu ##_ENET0_IRQ,		\  	[IRQ_ENET1]		= BCM_## __cpu ##_ENET1_IRQ,		\  	[IRQ_ENET_PHY]		= BCM_## __cpu ##_ENET_PHY_IRQ,		\ +	[IRQ_HSSPI]		= BCM_## __cpu ##_HSSPI_IRQ,		\  	[IRQ_OHCI0]		= BCM_## __cpu ##_OHCI0_IRQ,		\  	[IRQ_EHCI0]		= BCM_## __cpu ##_EHCI0_IRQ,		\  	[IRQ_USBD]		= BCM_## __cpu ##_USBD_IRQ,		\ diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_hsspi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_hsspi.h new file mode 100644 index 00000000000..1b1acafb3d7 --- /dev/null +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_hsspi.h @@ -0,0 +1,8 @@ +#ifndef BCM63XX_DEV_HSSPI_H +#define BCM63XX_DEV_HSSPI_H + +#include <linux/types.h> + +int bcm63xx_hsspi_register(void); + +#endif /* BCM63XX_DEV_HSSPI_H */ diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h index 9875db31d88..ab427f8814e 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h @@ -463,126 +463,6 @@  #define WDT_SOFTRESET_REG		0xc  /************************************************************************* - * _REG relative to RSET_UARTx - *************************************************************************/ - -/* UART Control Register */ -#define UART_CTL_REG			0x0 -#define UART_CTL_RXTMOUTCNT_SHIFT	0 -#define UART_CTL_RXTMOUTCNT_MASK	(0x1f << UART_CTL_RXTMOUTCNT_SHIFT) -#define UART_CTL_RSTTXDN_SHIFT		5 -#define UART_CTL_RSTTXDN_MASK		(1 << UART_CTL_RSTTXDN_SHIFT) -#define UART_CTL_RSTRXFIFO_SHIFT		6 -#define UART_CTL_RSTRXFIFO_MASK		(1 << UART_CTL_RSTRXFIFO_SHIFT) -#define UART_CTL_RSTTXFIFO_SHIFT		7 -#define UART_CTL_RSTTXFIFO_MASK		(1 << UART_CTL_RSTTXFIFO_SHIFT) -#define UART_CTL_STOPBITS_SHIFT		8 -#define UART_CTL_STOPBITS_MASK		(0xf << UART_CTL_STOPBITS_SHIFT) -#define UART_CTL_STOPBITS_1		(0x7 << UART_CTL_STOPBITS_SHIFT) -#define UART_CTL_STOPBITS_2		(0xf << UART_CTL_STOPBITS_SHIFT) -#define UART_CTL_BITSPERSYM_SHIFT	12 -#define UART_CTL_BITSPERSYM_MASK	(0x3 << UART_CTL_BITSPERSYM_SHIFT) -#define UART_CTL_XMITBRK_SHIFT		14 -#define UART_CTL_XMITBRK_MASK		(1 << UART_CTL_XMITBRK_SHIFT) -#define UART_CTL_RSVD_SHIFT		15 -#define UART_CTL_RSVD_MASK		(1 << UART_CTL_RSVD_SHIFT) -#define UART_CTL_RXPAREVEN_SHIFT		16 -#define UART_CTL_RXPAREVEN_MASK		(1 << UART_CTL_RXPAREVEN_SHIFT) -#define UART_CTL_RXPAREN_SHIFT		17 -#define UART_CTL_RXPAREN_MASK		(1 << UART_CTL_RXPAREN_SHIFT) -#define UART_CTL_TXPAREVEN_SHIFT		18 -#define UART_CTL_TXPAREVEN_MASK		(1 << UART_CTL_TXPAREVEN_SHIFT) -#define UART_CTL_TXPAREN_SHIFT		18 -#define UART_CTL_TXPAREN_MASK		(1 << UART_CTL_TXPAREN_SHIFT) -#define UART_CTL_LOOPBACK_SHIFT		20 -#define UART_CTL_LOOPBACK_MASK		(1 << UART_CTL_LOOPBACK_SHIFT) -#define UART_CTL_RXEN_SHIFT		21 -#define UART_CTL_RXEN_MASK		(1 << UART_CTL_RXEN_SHIFT) -#define UART_CTL_TXEN_SHIFT		22 -#define UART_CTL_TXEN_MASK		(1 << UART_CTL_TXEN_SHIFT) -#define UART_CTL_BRGEN_SHIFT		23 -#define UART_CTL_BRGEN_MASK		(1 << UART_CTL_BRGEN_SHIFT) - -/* UART Baudword register */ -#define UART_BAUD_REG			0x4 - -/* UART Misc Control register */ -#define UART_MCTL_REG			0x8 -#define UART_MCTL_DTR_SHIFT		0 -#define UART_MCTL_DTR_MASK		(1 << UART_MCTL_DTR_SHIFT) -#define UART_MCTL_RTS_SHIFT		1 -#define UART_MCTL_RTS_MASK		(1 << UART_MCTL_RTS_SHIFT) -#define UART_MCTL_RXFIFOTHRESH_SHIFT	8 -#define UART_MCTL_RXFIFOTHRESH_MASK	(0xf << UART_MCTL_RXFIFOTHRESH_SHIFT) -#define UART_MCTL_TXFIFOTHRESH_SHIFT	12 -#define UART_MCTL_TXFIFOTHRESH_MASK	(0xf << UART_MCTL_TXFIFOTHRESH_SHIFT) -#define UART_MCTL_RXFIFOFILL_SHIFT	16 -#define UART_MCTL_RXFIFOFILL_MASK	(0x1f << UART_MCTL_RXFIFOFILL_SHIFT) -#define UART_MCTL_TXFIFOFILL_SHIFT	24 -#define UART_MCTL_TXFIFOFILL_MASK	(0x1f << UART_MCTL_TXFIFOFILL_SHIFT) - -/* UART External Input Configuration register */ -#define UART_EXTINP_REG			0xc -#define UART_EXTINP_RI_SHIFT		0 -#define UART_EXTINP_RI_MASK		(1 << UART_EXTINP_RI_SHIFT) -#define UART_EXTINP_CTS_SHIFT		1 -#define UART_EXTINP_CTS_MASK		(1 << UART_EXTINP_CTS_SHIFT) -#define UART_EXTINP_DCD_SHIFT		2 -#define UART_EXTINP_DCD_MASK		(1 << UART_EXTINP_DCD_SHIFT) -#define UART_EXTINP_DSR_SHIFT		3 -#define UART_EXTINP_DSR_MASK		(1 << UART_EXTINP_DSR_SHIFT) -#define UART_EXTINP_IRSTAT(x)		(1 << (x + 4)) -#define UART_EXTINP_IRMASK(x)		(1 << (x + 8)) -#define UART_EXTINP_IR_RI		0 -#define UART_EXTINP_IR_CTS		1 -#define UART_EXTINP_IR_DCD		2 -#define UART_EXTINP_IR_DSR		3 -#define UART_EXTINP_RI_NOSENSE_SHIFT	16 -#define UART_EXTINP_RI_NOSENSE_MASK	(1 << UART_EXTINP_RI_NOSENSE_SHIFT) -#define UART_EXTINP_CTS_NOSENSE_SHIFT	17 -#define UART_EXTINP_CTS_NOSENSE_MASK	(1 << UART_EXTINP_CTS_NOSENSE_SHIFT) -#define UART_EXTINP_DCD_NOSENSE_SHIFT	18 -#define UART_EXTINP_DCD_NOSENSE_MASK	(1 << UART_EXTINP_DCD_NOSENSE_SHIFT) -#define UART_EXTINP_DSR_NOSENSE_SHIFT	19 -#define UART_EXTINP_DSR_NOSENSE_MASK	(1 << UART_EXTINP_DSR_NOSENSE_SHIFT) - -/* UART Interrupt register */ -#define UART_IR_REG			0x10 -#define UART_IR_MASK(x)			(1 << (x + 16)) -#define UART_IR_STAT(x)			(1 << (x)) -#define UART_IR_EXTIP			0 -#define UART_IR_TXUNDER			1 -#define UART_IR_TXOVER			2 -#define UART_IR_TXTRESH			3 -#define UART_IR_TXRDLATCH		4 -#define UART_IR_TXEMPTY			5 -#define UART_IR_RXUNDER			6 -#define UART_IR_RXOVER			7 -#define UART_IR_RXTIMEOUT		8 -#define UART_IR_RXFULL			9 -#define UART_IR_RXTHRESH		10 -#define UART_IR_RXNOTEMPTY		11 -#define UART_IR_RXFRAMEERR		12 -#define UART_IR_RXPARERR		13 -#define UART_IR_RXBRK			14 -#define UART_IR_TXDONE			15 - -/* UART Fifo register */ -#define UART_FIFO_REG			0x14 -#define UART_FIFO_VALID_SHIFT		0 -#define UART_FIFO_VALID_MASK		0xff -#define UART_FIFO_FRAMEERR_SHIFT	8 -#define UART_FIFO_FRAMEERR_MASK		(1 << UART_FIFO_FRAMEERR_SHIFT) -#define UART_FIFO_PARERR_SHIFT		9 -#define UART_FIFO_PARERR_MASK		(1 << UART_FIFO_PARERR_SHIFT) -#define UART_FIFO_BRKDET_SHIFT		10 -#define UART_FIFO_BRKDET_MASK		(1 << UART_FIFO_BRKDET_SHIFT) -#define UART_FIFO_ANYERR_MASK		(UART_FIFO_FRAMEERR_MASK |	\ -					UART_FIFO_PARERR_MASK |		\ -					UART_FIFO_BRKDET_MASK) - - -/*************************************************************************   * _REG relative to RSET_GPIO   *************************************************************************/ diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h index 94ed063eec9..cf802287289 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h @@ -22,7 +22,6 @@  #define cpu_has_3k_cache	0  #define cpu_has_4k_cache	0  #define cpu_has_tx39_cache	0 -#define cpu_has_fpu		0  #define cpu_has_counter		1  #define cpu_has_watch		1  #define cpu_has_divec		1 diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h index 47fb247f966..f9f44865050 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h +++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h @@ -52,23 +52,11 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)  	return 0;  } -static inline void plat_extra_sync_for_device(struct device *dev) -{ -	BUG(); -} -  static inline int plat_device_is_coherent(struct device *dev)  {  	return 1;  } -static inline int plat_dma_mapping_error(struct device *dev, -					 dma_addr_t dma_addr) -{ -	BUG(); -	return 0; -} -  dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);  phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h index 60fc4c347c4..cceae32a073 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/irq.h +++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h @@ -35,6 +35,8 @@ enum octeon_irq {  	OCTEON_IRQ_PCI_MSI2,  	OCTEON_IRQ_PCI_MSI3, +	OCTEON_IRQ_TWSI, +	OCTEON_IRQ_TWSI2,  	OCTEON_IRQ_RML,  	OCTEON_IRQ_TIMER0,  	OCTEON_IRQ_TIMER1, diff --git a/arch/mips/include/asm/mach-db1x00/db1200.h b/arch/mips/include/asm/mach-db1x00/db1200.h deleted file mode 100644 index d3cce7326dd..00000000000 --- a/arch/mips/include/asm/mach-db1x00/db1200.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * AMD Alchemy DBAu1200 Reference Board - * Board register defines. - * - * ######################################################################## - * - *  This program is free software; you can distribute it and/or modify it - *  under the terms of the GNU General Public License (Version 2) as - *  published by the Free Software Foundation. - * - *  This program is distributed in the hope it will be useful, but WITHOUT - *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License - *  for more details. - * - *  You should have received a copy of the GNU General Public License along - *  with this program; if not, write to the Free Software Foundation, Inc., - *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA. - * - * ######################################################################## - * - * - */ -#ifndef __ASM_DB1200_H -#define __ASM_DB1200_H - -#include <linux/types.h> -#include <asm/mach-au1x00/au1000.h> -#include <asm/mach-au1x00/au1xxx_psc.h> - -/* Bit positions for the different interrupt sources */ -#define BCSR_INT_IDE		0x0001 -#define BCSR_INT_ETH		0x0002 -#define BCSR_INT_PC0		0x0004 -#define BCSR_INT_PC0STSCHG	0x0008 -#define BCSR_INT_PC1		0x0010 -#define BCSR_INT_PC1STSCHG	0x0020 -#define BCSR_INT_DC		0x0040 -#define BCSR_INT_FLASHBUSY	0x0080 -#define BCSR_INT_PC0INSERT	0x0100 -#define BCSR_INT_PC0EJECT	0x0200 -#define BCSR_INT_PC1INSERT	0x0400 -#define BCSR_INT_PC1EJECT	0x0800 -#define BCSR_INT_SD0INSERT	0x1000 -#define BCSR_INT_SD0EJECT	0x2000 -#define BCSR_INT_SD1INSERT	0x4000 -#define BCSR_INT_SD1EJECT	0x8000 - -#define IDE_REG_SHIFT		5 - -#define DB1200_IDE_PHYS_ADDR	0x18800000 -#define DB1200_IDE_PHYS_LEN	(16 << IDE_REG_SHIFT) -#define DB1200_ETH_PHYS_ADDR	0x19000300 -#define DB1200_NAND_PHYS_ADDR	0x20000000 - -#define PB1200_IDE_PHYS_ADDR	0x0C800000 -#define PB1200_ETH_PHYS_ADDR	0x0D000300 -#define PB1200_NAND_PHYS_ADDR	0x1C000000 - -/* - * External Interrupts for DBAu1200 as of 8/6/2004. - * Bit positions in the CPLD registers can be calculated by taking - * the interrupt define and subtracting the DB1200_INT_BEGIN value. - * - *   Example: IDE bis pos is  = 64 - 64 - *	      ETH bit pos is  = 65 - 64 - */ -enum external_db1200_ints { -	DB1200_INT_BEGIN	= AU1000_MAX_INTR + 1, - -	DB1200_IDE_INT		= DB1200_INT_BEGIN, -	DB1200_ETH_INT, -	DB1200_PC0_INT, -	DB1200_PC0_STSCHG_INT, -	DB1200_PC1_INT, -	DB1200_PC1_STSCHG_INT, -	DB1200_DC_INT, -	DB1200_FLASHBUSY_INT, -	DB1200_PC0_INSERT_INT, -	DB1200_PC0_EJECT_INT, -	DB1200_PC1_INSERT_INT, -	DB1200_PC1_EJECT_INT, -	DB1200_SD0_INSERT_INT, -	DB1200_SD0_EJECT_INT, -	PB1200_SD1_INSERT_INT, -	PB1200_SD1_EJECT_INT, - -	DB1200_INT_END		= DB1200_INT_BEGIN + 15, -}; - -#endif /* __ASM_DB1200_H */ diff --git a/arch/mips/include/asm/mach-db1x00/db1300.h b/arch/mips/include/asm/mach-db1x00/db1300.h deleted file mode 100644 index 3d1ede46f05..00000000000 --- a/arch/mips/include/asm/mach-db1x00/db1300.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * NetLogic DB1300 board constants - */ - -#ifndef _DB1300_H_ -#define _DB1300_H_ - -/* FPGA (external mux) interrupt sources */ -#define DB1300_FIRST_INT	(ALCHEMY_GPIC_INT_LAST + 1) -#define DB1300_IDE_INT		(DB1300_FIRST_INT + 0) -#define DB1300_ETH_INT		(DB1300_FIRST_INT + 1) -#define DB1300_CF_INT		(DB1300_FIRST_INT + 2) -#define DB1300_VIDEO_INT	(DB1300_FIRST_INT + 4) -#define DB1300_HDMI_INT		(DB1300_FIRST_INT + 5) -#define DB1300_DC_INT		(DB1300_FIRST_INT + 6) -#define DB1300_FLASH_INT	(DB1300_FIRST_INT + 7) -#define DB1300_CF_INSERT_INT	(DB1300_FIRST_INT + 8) -#define DB1300_CF_EJECT_INT	(DB1300_FIRST_INT + 9) -#define DB1300_AC97_INT		(DB1300_FIRST_INT + 10) -#define DB1300_AC97_PEN_INT	(DB1300_FIRST_INT + 11) -#define DB1300_SD1_INSERT_INT	(DB1300_FIRST_INT + 12) -#define DB1300_SD1_EJECT_INT	(DB1300_FIRST_INT + 13) -#define DB1300_OTG_VBUS_OC_INT	(DB1300_FIRST_INT + 14) -#define DB1300_HOST_VBUS_OC_INT (DB1300_FIRST_INT + 15) -#define DB1300_LAST_INT		(DB1300_FIRST_INT + 15) - -/* SMSC9210 CS */ -#define DB1300_ETH_PHYS_ADDR	0x19000000 -#define DB1300_ETH_PHYS_END	0x197fffff - -/* ATA CS */ -#define DB1300_IDE_PHYS_ADDR	0x18800000 -#define DB1300_IDE_REG_SHIFT	5 -#define DB1300_IDE_PHYS_LEN	(16 << DB1300_IDE_REG_SHIFT) - -/* NAND CS */ -#define DB1300_NAND_PHYS_ADDR	0x20000000 -#define DB1300_NAND_PHYS_END	0x20000fff - -#endif	/* _DB1300_H_ */ diff --git a/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h b/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h new file mode 100644 index 00000000000..acce27fd2bb --- /dev/null +++ b/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h @@ -0,0 +1,87 @@ +/* + *	CPU feature overrides for DECstation systems.  Two variations + *	are generally applicable. + * + *	Copyright (C) 2013  Maciej W. Rozycki + * + *	This program is free software; you can redistribute it and/or + *	modify it under the terms of the GNU General Public License + *	as published by the Free Software Foundation; either version + *	2 of the License, or (at your option) any later version. + */ +#ifndef __ASM_MACH_DEC_CPU_FEATURE_OVERRIDES_H +#define __ASM_MACH_DEC_CPU_FEATURE_OVERRIDES_H + +/* Generic ones first.  */ +#define cpu_has_tlb			1 +#define cpu_has_tx39_cache		0 +#define cpu_has_fpu			1 +#define cpu_has_divec			0 +#define cpu_has_prefetch		0 +#define cpu_has_mcheck			0 +#define cpu_has_ejtag			0 +#define cpu_has_mips16			0 +#define cpu_has_mdmx			0 +#define cpu_has_mips3d			0 +#define cpu_has_smartmips		0 +#define cpu_has_rixi			0 +#define cpu_has_vtag_icache		0 +#define cpu_has_ic_fills_f_dc		0 +#define cpu_has_pindexed_dcache		0 +#define cpu_has_local_ebase		0 +#define cpu_icache_snoops_remote_store	1 +#define cpu_has_mips_4			0 +#define cpu_has_mips_5			0 +#define cpu_has_mips32r1		0 +#define cpu_has_mips32r2		0 +#define cpu_has_mips64r1		0 +#define cpu_has_mips64r2		0 +#define cpu_has_dsp			0 +#define cpu_has_mipsmt			0 +#define cpu_has_userlocal		0 + +/* R3k-specific ones.  */ +#ifdef CONFIG_CPU_R3000 +#define cpu_has_4kex			0 +#define cpu_has_3k_cache		1 +#define cpu_has_4k_cache		0 +#define cpu_has_32fpr			0 +#define cpu_has_counter			0 +#define cpu_has_watch			0 +#define cpu_has_vce			0 +#define cpu_has_cache_cdex_p		0 +#define cpu_has_cache_cdex_s		0 +#define cpu_has_llsc			0 +#define cpu_has_dc_aliases		0 +#define cpu_has_mips_2			0 +#define cpu_has_mips_3			0 +#define cpu_has_nofpuex			1 +#define cpu_has_inclusive_pcaches	0 +#define cpu_dcache_line_size()		4 +#define cpu_icache_line_size()		4 +#define cpu_scache_line_size()		0 +#endif /* CONFIG_CPU_R3000 */ + +/* R4k-specific ones.  */ +#ifdef CONFIG_CPU_R4X00 +#define cpu_has_4kex			1 +#define cpu_has_3k_cache		0 +#define cpu_has_4k_cache		1 +#define cpu_has_32fpr			1 +#define cpu_has_counter			1 +#define cpu_has_watch			1 +#define cpu_has_vce			1 +#define cpu_has_cache_cdex_p		1 +#define cpu_has_cache_cdex_s		1 +#define cpu_has_llsc			1 +#define cpu_has_dc_aliases		(PAGE_SIZE < 0x4000) +#define cpu_has_mips_2			1 +#define cpu_has_mips_3			1 +#define cpu_has_nofpuex			0 +#define cpu_has_inclusive_pcaches	1 +#define cpu_dcache_line_size()		16 +#define cpu_icache_line_size()		16 +#define cpu_scache_line_size()		32 +#endif /* CONFIG_CPU_R4X00 */ + +#endif /* __ASM_MACH_DEC_CPU_FEATURE_OVERRIDES_H */ diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h index 74cb99257d5..7629c35986f 100644 --- a/arch/mips/include/asm/mach-generic/dma-coherence.h +++ b/arch/mips/include/asm/mach-generic/dma-coherence.h @@ -47,23 +47,9 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)  	return 1;  } -static inline void plat_extra_sync_for_device(struct device *dev) -{ -} - -static inline int plat_dma_mapping_error(struct device *dev, -					 dma_addr_t dma_addr) -{ -	return 0; -} -  static inline int plat_device_is_coherent(struct device *dev)  { -#ifdef CONFIG_DMA_COHERENT -	return 1; -#else  	return coherentio; -#endif  }  #ifdef CONFIG_SWIOTLB diff --git a/arch/mips/include/asm/mach-generic/floppy.h b/arch/mips/include/asm/mach-generic/floppy.h index 5b5cd689a2f..e2561d99a3f 100644 --- a/arch/mips/include/asm/mach-generic/floppy.h +++ b/arch/mips/include/asm/mach-generic/floppy.h @@ -9,7 +9,6 @@  #define __ASM_MACH_GENERIC_FLOPPY_H  #include <linux/delay.h> -#include <linux/init.h>  #include <linux/ioport.h>  #include <linux/sched.h>  #include <linux/linkage.h> diff --git a/arch/mips/include/asm/mach-generic/ide.h b/arch/mips/include/asm/mach-generic/ide.h index affa66f5c2d..4ae5fbcb15a 100644 --- a/arch/mips/include/asm/mach-generic/ide.h +++ b/arch/mips/include/asm/mach-generic/ide.h @@ -23,7 +23,7 @@  static inline void __ide_flush_prologue(void)  {  #ifdef CONFIG_SMP -	if (cpu_has_dc_aliases) +	if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)  		preempt_disable();  #endif  } @@ -31,14 +31,14 @@ static inline void __ide_flush_prologue(void)  static inline void __ide_flush_epilogue(void)  {  #ifdef CONFIG_SMP -	if (cpu_has_dc_aliases) +	if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)  		preempt_enable();  #endif  }  static inline void __ide_flush_dcache_range(unsigned long addr, unsigned long size)  { -	if (cpu_has_dc_aliases) { +	if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) {  		unsigned long end = addr + size;  		while (addr < end) { diff --git a/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h index f4caacd2555..1dfe47453ea 100644 --- a/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h @@ -8,6 +8,8 @@  #ifndef __ASM_MACH_IP22_CPU_FEATURE_OVERRIDES_H  #define __ASM_MACH_IP22_CPU_FEATURE_OVERRIDES_H +#include <asm/cpu.h> +  /*   * IP22 with a variety of processors so we can't use defaults for everything.   */ @@ -37,6 +39,10 @@  #define cpu_has_nofpuex		0  #define cpu_has_64bits		1 +#define cpu_has_mips_2		1 +#define cpu_has_mips_3		1 +#define cpu_has_mips_5		0 +  #define cpu_has_mips32r1	0  #define cpu_has_mips32r2	0  #define cpu_has_mips64r1	0 diff --git a/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h index 1d2b6ff60d3..d6111aa2e88 100644 --- a/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h @@ -8,6 +8,8 @@  #ifndef __ASM_MACH_IP27_CPU_FEATURE_OVERRIDES_H  #define __ASM_MACH_IP27_CPU_FEATURE_OVERRIDES_H +#include <asm/cpu.h> +  /*   * IP27 only comes with R10000 family processors all using the same config   */ diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h index 06c441968e6..4ffddfdb506 100644 --- a/arch/mips/include/asm/mach-ip27/dma-coherence.h +++ b/arch/mips/include/asm/mach-ip27/dma-coherence.h @@ -58,16 +58,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)  	return 1;  } -static inline void plat_extra_sync_for_device(struct device *dev) -{ -} - -static inline int plat_dma_mapping_error(struct device *dev, -					 dma_addr_t dma_addr) -{ -	return 0; -} -  static inline int plat_device_is_coherent(struct device *dev)  {  	return 1;		/* IP27 non-cohernet mode is unsupported */ diff --git a/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h index 65e9c856390..4cec06d133d 100644 --- a/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h @@ -9,6 +9,8 @@  #ifndef __ASM_MACH_IP28_CPU_FEATURE_OVERRIDES_H  #define __ASM_MACH_IP28_CPU_FEATURE_OVERRIDES_H +#include <asm/cpu.h> +  /*   * IP28 only comes with R10000 family processors all using the same config   */ diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h index 073f0c4760b..104cfbc3ed6 100644 --- a/arch/mips/include/asm/mach-ip32/dma-coherence.h +++ b/arch/mips/include/asm/mach-ip32/dma-coherence.h @@ -80,17 +80,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)  	return 1;  } -static inline void plat_extra_sync_for_device(struct device *dev) -{ -	return; -} - -static inline int plat_dma_mapping_error(struct device *dev, -					 dma_addr_t dma_addr) -{ -	return 0; -} -  static inline int plat_device_is_coherent(struct device *dev)  {  	return 0;		/* IP32 is non-cohernet */ diff --git a/arch/mips/include/asm/mach-jazz/dma-coherence.h b/arch/mips/include/asm/mach-jazz/dma-coherence.h index 9fc1e9ad703..949003ef97b 100644 --- a/arch/mips/include/asm/mach-jazz/dma-coherence.h +++ b/arch/mips/include/asm/mach-jazz/dma-coherence.h @@ -48,16 +48,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)  	return 1;  } -static inline void plat_extra_sync_for_device(struct device *dev) -{ -} - -static inline int plat_dma_mapping_error(struct device *dev, -					 dma_addr_t dma_addr) -{ -	return 0; -} -  static inline int plat_device_is_coherent(struct device *dev)  {  	return 0; diff --git a/arch/mips/include/asm/mach-jazz/floppy.h b/arch/mips/include/asm/mach-jazz/floppy.h index 62aa1e287fb..4b86c88a03b 100644 --- a/arch/mips/include/asm/mach-jazz/floppy.h +++ b/arch/mips/include/asm/mach-jazz/floppy.h @@ -9,7 +9,6 @@  #define __ASM_MACH_JAZZ_FLOPPY_H  #include <linux/delay.h> -#include <linux/init.h>  #include <linux/linkage.h>  #include <linux/types.h>  #include <linux/mm.h> diff --git a/arch/mips/include/asm/mach-jz4740/dma.h b/arch/mips/include/asm/mach-jz4740/dma.h index 509cd582804..14ecc5313d2 100644 --- a/arch/mips/include/asm/mach-jz4740/dma.h +++ b/arch/mips/include/asm/mach-jz4740/dma.h @@ -22,8 +22,6 @@ enum jz4740_dma_request_type {  	JZ4740_DMA_TYPE_UART_RECEIVE	= 21,  	JZ4740_DMA_TYPE_SPI_TRANSMIT	= 22,  	JZ4740_DMA_TYPE_SPI_RECEIVE	= 23, -	JZ4740_DMA_TYPE_AIC_TRANSMIT	= 24, -	JZ4740_DMA_TYPE_AIC_RECEIVE	= 25,  	JZ4740_DMA_TYPE_MMC_TRANSMIT	= 26,  	JZ4740_DMA_TYPE_MMC_RECEIVE	= 27,  	JZ4740_DMA_TYPE_TCU		= 28, diff --git a/arch/mips/include/asm/mach-jz4740/platform.h b/arch/mips/include/asm/mach-jz4740/platform.h index 05988c2d656..069b43a9da6 100644 --- a/arch/mips/include/asm/mach-jz4740/platform.h +++ b/arch/mips/include/asm/mach-jz4740/platform.h @@ -21,6 +21,7 @@  extern struct platform_device jz4740_usb_ohci_device;  extern struct platform_device jz4740_udc_device; +extern struct platform_device jz4740_udc_xceiv_device;  extern struct platform_device jz4740_mmc_device;  extern struct platform_device jz4740_rtc_device;  extern struct platform_device jz4740_i2c_device; diff --git a/arch/mips/include/asm/mach-loongson/boot_param.h b/arch/mips/include/asm/mach-loongson/boot_param.h new file mode 100644 index 00000000000..829a7ec185f --- /dev/null +++ b/arch/mips/include/asm/mach-loongson/boot_param.h @@ -0,0 +1,163 @@ +#ifndef __ASM_MACH_LOONGSON_BOOT_PARAM_H_ +#define __ASM_MACH_LOONGSON_BOOT_PARAM_H_ + +#define SYSTEM_RAM_LOW		1 +#define SYSTEM_RAM_HIGH		2 +#define MEM_RESERVED		3 +#define PCI_IO			4 +#define PCI_MEM			5 +#define LOONGSON_CFG_REG	6 +#define VIDEO_ROM		7 +#define ADAPTER_ROM		8 +#define ACPI_TABLE		9 +#define MAX_MEMORY_TYPE		10 + +#define LOONGSON3_BOOT_MEM_MAP_MAX 128 +struct efi_memory_map_loongson { +	u16 vers;	/* version of efi_memory_map */ +	u32 nr_map;	/* number of memory_maps */ +	u32 mem_freq;	/* memory frequence */ +	struct mem_map { +		u32 node_id;	/* node_id which memory attached to */ +		u32 mem_type;	/* system memory, pci memory, pci io, etc. */ +		u64 mem_start;	/* memory map start address */ +		u32 mem_size;	/* each memory_map size, not the total size */ +	} map[LOONGSON3_BOOT_MEM_MAP_MAX]; +} __packed; + +enum loongson_cpu_type { +	Loongson_2E = 0, +	Loongson_2F = 1, +	Loongson_3A = 2, +	Loongson_3B = 3, +	Loongson_1A = 4, +	Loongson_1B = 5 +}; + +/* + * Capability and feature descriptor structure for MIPS CPU + */ +struct efi_cpuinfo_loongson { +	u16 vers;     /* version of efi_cpuinfo_loongson */ +	u32 processor_id; /* PRID, e.g. 6305, 6306 */ +	u32 cputype;  /* Loongson_3A/3B, etc. */ +	u32 total_node;   /* num of total numa nodes */ +	u32 cpu_startup_core_id; /* Core id */ +	u32 cpu_clock_freq; /* cpu_clock */ +	u32 nr_cpus; +} __packed; + +struct system_loongson { +	u16 vers;     /* version of system_loongson */ +	u32 ccnuma_smp; /* 0: no numa; 1: has numa */ +	u32 sing_double_channel; /* 1:single; 2:double */ +} __packed; + +struct irq_source_routing_table { +	u16 vers; +	u16 size; +	u16 rtr_bus; +	u16 rtr_devfn; +	u32 vendor; +	u32 device; +	u32 PIC_type;   /* conform use HT or PCI to route to CPU-PIC */ +	u64 ht_int_bit; /* 3A: 1<<24; 3B: 1<<16 */ +	u64 ht_enable;  /* irqs used in this PIC */ +	u32 node_id;    /* node id: 0x0-0; 0x1-1; 0x10-2; 0x11-3 */ +	u64 pci_mem_start_addr; +	u64 pci_mem_end_addr; +	u64 pci_io_start_addr; +	u64 pci_io_end_addr; +	u64 pci_config_addr; +	u32 dma_mask_bits; +} __packed; + +struct interface_info { +	u16 vers; /* version of the specificition */ +	u16 size; +	u8  flag; +	char description[64]; +} __packed; + +#define MAX_RESOURCE_NUMBER 128 +struct resource_loongson { +	u64 start; /* resource start address */ +	u64 end;   /* resource end address */ +	char name[64]; +	u32 flags; +}; + +struct archdev_data {};  /* arch specific additions */ + +struct board_devices { +	char name[64];    /* hold the device name */ +	u32 num_resources; /* number of device_resource */ +	/* for each device's resource */ +	struct resource_loongson resource[MAX_RESOURCE_NUMBER]; +	/* arch specific additions */ +	struct archdev_data archdata; +}; + +struct loongson_special_attribute { +	u16 vers;     /* version of this special */ +	char special_name[64]; /* special_atribute_name */ +	u32 loongson_special_type; /* type of special device */ +	/* for each device's resource */ +	struct resource_loongson resource[MAX_RESOURCE_NUMBER]; +}; + +struct loongson_params { +	u64 memory_offset;	/* efi_memory_map_loongson struct offset */ +	u64 cpu_offset;		/* efi_cpuinfo_loongson struct offset */ +	u64 system_offset;	/* system_loongson struct offset */ +	u64 irq_offset;		/* irq_source_routing_table struct offset */ +	u64 interface_offset;	/* interface_info struct offset */ +	u64 special_offset;	/* loongson_special_attribute struct offset */ +	u64 boarddev_table_offset;  /* board_devices offset */ +}; + +struct smbios_tables { +	u16 vers;     /* version of smbios */ +	u64 vga_bios; /* vga_bios address */ +	struct loongson_params lp; +}; + +struct efi_reset_system_t { +	u64 ResetCold; +	u64 ResetWarm; +	u64 ResetType; +	u64 Shutdown; +	u64 DoSuspend; /* NULL if not support */ +}; + +struct efi_loongson { +	u64 mps;	/* MPS table */ +	u64 acpi;	/* ACPI table (IA64 ext 0.71) */ +	u64 acpi20;	/* ACPI table (ACPI 2.0) */ +	struct smbios_tables smbios;	/* SM BIOS table */ +	u64 sal_systab;	/* SAL system table */ +	u64 boot_info;	/* boot info table */ +}; + +struct boot_params { +	struct efi_loongson efi; +	struct efi_reset_system_t reset_system; +}; + +struct loongson_system_configuration { +	u32 nr_cpus; +	enum loongson_cpu_type cputype; +	u64 ht_control_base; +	u64 pci_mem_start_addr; +	u64 pci_mem_end_addr; +	u64 pci_io_base; +	u64 restart_addr; +	u64 poweroff_addr; +	u64 suspend_addr; +	u64 vgabios_addr; +	u32 dma_mask_bits; +}; + +extern struct efi_memory_map_loongson *loongson_memmap; +extern struct loongson_system_configuration loongson_sysconf; +#endif diff --git a/arch/mips/include/asm/mach-loongson/dma-coherence.h b/arch/mips/include/asm/mach-loongson/dma-coherence.h index e1433055fe9..6a902751cc7 100644 --- a/arch/mips/include/asm/mach-loongson/dma-coherence.h +++ b/arch/mips/include/asm/mach-loongson/dma-coherence.h @@ -11,24 +11,40 @@  #ifndef __ASM_MACH_LOONGSON_DMA_COHERENCE_H  #define __ASM_MACH_LOONGSON_DMA_COHERENCE_H +#ifdef CONFIG_SWIOTLB +#include <linux/swiotlb.h> +#endif +  struct device; +extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); +extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);  static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,  					  size_t size)  { +#ifdef CONFIG_CPU_LOONGSON3 +	return virt_to_phys(addr); +#else  	return virt_to_phys(addr) | 0x80000000; +#endif  }  static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,  					       struct page *page)  { +#ifdef CONFIG_CPU_LOONGSON3 +	return page_to_phys(page); +#else  	return page_to_phys(page) | 0x80000000; +#endif  }  static inline unsigned long plat_dma_addr_to_phys(struct device *dev,  	dma_addr_t dma_addr)  { -#if defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT) +#if defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_64BIT) +	return dma_addr; +#elif defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)  	return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff);  #else  	return dma_addr & 0x7fffffff; @@ -53,19 +69,13 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)  	return 1;  } -static inline void plat_extra_sync_for_device(struct device *dev) -{ -} - -static inline int plat_dma_mapping_error(struct device *dev, -					 dma_addr_t dma_addr) -{ -	return 0; -} -  static inline int plat_device_is_coherent(struct device *dev)  { +#ifdef CONFIG_DMA_NONCOHERENT  	return 0; +#else +	return 1; +#endif /* CONFIG_DMA_NONCOHERENT */  }  #endif /* __ASM_MACH_LOONGSON_DMA_COHERENCE_H */ diff --git a/arch/mips/include/asm/mach-loongson/irq.h b/arch/mips/include/asm/mach-loongson/irq.h new file mode 100644 index 00000000000..34560bda662 --- /dev/null +++ b/arch/mips/include/asm/mach-loongson/irq.h @@ -0,0 +1,44 @@ +#ifndef __ASM_MACH_LOONGSON_IRQ_H_ +#define __ASM_MACH_LOONGSON_IRQ_H_ + +#include <boot_param.h> + +#ifdef CONFIG_CPU_LOONGSON3 + +/* cpu core interrupt numbers */ +#define MIPS_CPU_IRQ_BASE 56 + +#define LOONGSON_UART_IRQ   (MIPS_CPU_IRQ_BASE + 2) /* UART */ +#define LOONGSON_HT1_IRQ    (MIPS_CPU_IRQ_BASE + 3) /* HT1 */ +#define LOONGSON_TIMER_IRQ  (MIPS_CPU_IRQ_BASE + 7) /* CPU Timer */ + +#define LOONGSON_HT1_CFG_BASE		loongson_sysconf.ht_control_base +#define LOONGSON_HT1_INT_VECTOR_BASE	(LOONGSON_HT1_CFG_BASE + 0x80) +#define LOONGSON_HT1_INT_EN_BASE	(LOONGSON_HT1_CFG_BASE + 0xa0) +#define LOONGSON_HT1_INT_VECTOR(n)	\ +		LOONGSON3_REG32(LOONGSON_HT1_INT_VECTOR_BASE, 4 * (n)) +#define LOONGSON_HT1_INTN_EN(n)		\ +		LOONGSON3_REG32(LOONGSON_HT1_INT_EN_BASE, 4 * (n)) + +#define LOONGSON_INT_ROUTER_OFFSET	0x1400 +#define LOONGSON_INT_ROUTER_INTEN	\ +	  LOONGSON3_REG32(LOONGSON3_REG_BASE, LOONGSON_INT_ROUTER_OFFSET + 0x24) +#define LOONGSON_INT_ROUTER_INTENSET	\ +	  LOONGSON3_REG32(LOONGSON3_REG_BASE, LOONGSON_INT_ROUTER_OFFSET + 0x28) +#define LOONGSON_INT_ROUTER_INTENCLR	\ +	  LOONGSON3_REG32(LOONGSON3_REG_BASE, LOONGSON_INT_ROUTER_OFFSET + 0x2c) +#define LOONGSON_INT_ROUTER_ENTRY(n)	\ +	  LOONGSON3_REG8(LOONGSON3_REG_BASE, LOONGSON_INT_ROUTER_OFFSET + n) +#define LOONGSON_INT_ROUTER_LPC		LOONGSON_INT_ROUTER_ENTRY(0x0a) +#define LOONGSON_INT_ROUTER_HT1(n)	LOONGSON_INT_ROUTER_ENTRY(n + 0x18) + +#define LOONGSON_INT_CORE0_INT0		0x11 /* route to int 0 of core 0 */ +#define LOONGSON_INT_CORE0_INT1		0x21 /* route to int 1 of core 0 */ + +#endif + +extern void fixup_irqs(void); +extern void loongson3_ipi_interrupt(struct pt_regs *regs); + +#include_next <irq.h> +#endif /* __ASM_MACH_LOONGSON_IRQ_H_ */ diff --git a/arch/mips/include/asm/mach-loongson/loongson.h b/arch/mips/include/asm/mach-loongson/loongson.h index b286534fef0..f3fd1eb8e3d 100644 --- a/arch/mips/include/asm/mach-loongson/loongson.h +++ b/arch/mips/include/asm/mach-loongson/loongson.h @@ -15,6 +15,7 @@  #include <linux/init.h>  #include <linux/irq.h>  #include <linux/kconfig.h> +#include <boot_param.h>  /* loongson internal northbridge initialization */  extern void bonito_irq_init(void); @@ -24,8 +25,9 @@ extern void mach_prepare_reboot(void);  extern void mach_prepare_shutdown(void);  /* environment arguments from bootloader */ -extern unsigned long cpu_clock_freq; -extern unsigned long memsize, highmemsize; +extern u32 cpu_clock_freq; +extern u32 memsize, highmemsize; +extern struct plat_smp_ops loongson3_smp_ops;  /* loongson-specific command line, env and memory initialization */  extern void __init prom_init_memory(void); @@ -61,6 +63,12 @@ extern int mach_i8259_irq(void);  #define LOONGSON_REG(x) \  	(*(volatile u32 *)((char *)CKSEG1ADDR(LOONGSON_REG_BASE) + (x))) +#define LOONGSON3_REG8(base, x) \ +	(*(volatile u8 *)((char *)TO_UNCAC(base) + (x))) + +#define LOONGSON3_REG32(base, x) \ +	(*(volatile u32 *)((char *)TO_UNCAC(base) + (x))) +  #define LOONGSON_IRQ_BASE	32  #define LOONGSON2_PERFCNT_IRQ	(MIPS_CPU_IRQ_BASE + 6) /* cpu perf counter */ @@ -86,6 +94,10 @@ static inline void do_perfcnt_IRQ(void)  #define LOONGSON_REG_BASE	0x1fe00000  #define LOONGSON_REG_SIZE	0x00100000	/* 256Bytes + 256Bytes + ??? */  #define LOONGSON_REG_TOP	(LOONGSON_REG_BASE+LOONGSON_REG_SIZE-1) +/* Loongson-3 specific registers */ +#define LOONGSON3_REG_BASE	0x3ff00000 +#define LOONGSON3_REG_SIZE	0x00100000	/* 256Bytes + 256Bytes + ??? */ +#define LOONGSON3_REG_TOP	(LOONGSON3_REG_BASE+LOONGSON3_REG_SIZE-1)  #define LOONGSON_LIO1_BASE	0x1ff00000  #define LOONGSON_LIO1_SIZE	0x00100000	/* 1M */ @@ -101,7 +113,13 @@ static inline void do_perfcnt_IRQ(void)  #define LOONGSON_PCICFG_BASE	0x1fe80000  #define LOONGSON_PCICFG_SIZE	0x00000800	/* 2K */  #define LOONGSON_PCICFG_TOP	(LOONGSON_PCICFG_BASE+LOONGSON_PCICFG_SIZE-1) + +#if defined(CONFIG_HT_PCI) +#define LOONGSON_PCIIO_BASE	loongson_sysconf.pci_io_base +#else  #define LOONGSON_PCIIO_BASE	0x1fd00000 +#endif +  #define LOONGSON_PCIIO_SIZE	0x00100000	/* 1M */  #define LOONGSON_PCIIO_TOP	(LOONGSON_PCIIO_BASE+LOONGSON_PCIIO_SIZE-1) @@ -231,6 +249,9 @@ static inline void do_perfcnt_IRQ(void)  #define LOONGSON_PXARB_CFG		LOONGSON_REG(LOONGSON_REGBASE + 0x68)  #define LOONGSON_PXARB_STATUS		LOONGSON_REG(LOONGSON_REGBASE + 0x6c) +/* Chip Config */ +#define LOONGSON_CHIPCFG0		LOONGSON_REG(LOONGSON_REGBASE + 0x80) +  /* pcimap */  #define LOONGSON_PCIMAP_PCIMAP_LO0	0x0000003f @@ -246,9 +267,6 @@ static inline void do_perfcnt_IRQ(void)  #ifdef CONFIG_CPU_SUPPORTS_CPUFREQ  #include <linux/cpufreq.h>  extern struct cpufreq_frequency_table loongson2_clockmod_table[]; - -/* Chip Config */ -#define LOONGSON_CHIPCFG0		LOONGSON_REG(LOONGSON_REGBASE + 0x80)  #endif  /* diff --git a/arch/mips/include/asm/mach-loongson/machine.h b/arch/mips/include/asm/mach-loongson/machine.h index 3810d5ca84a..1b1f592fa2b 100644 --- a/arch/mips/include/asm/mach-loongson/machine.h +++ b/arch/mips/include/asm/mach-loongson/machine.h @@ -24,4 +24,10 @@  #endif +#ifdef CONFIG_LEMOTE_MACH3A + +#define LOONGSON_MACHTYPE MACH_LEMOTE_A1101 + +#endif /* CONFIG_LEMOTE_MACH3A */ +  #endif /* __ASM_MACH_LOONGSON_MACHINE_H */ diff --git a/arch/mips/include/asm/mach-loongson/pci.h b/arch/mips/include/asm/mach-loongson/pci.h index bc99dab4ef6..1212774f66e 100644 --- a/arch/mips/include/asm/mach-loongson/pci.h +++ b/arch/mips/include/asm/mach-loongson/pci.h @@ -40,8 +40,13 @@ extern struct pci_ops loongson_pci_ops;  #else	/* loongson2f/32bit & loongson2e */  /* this pci memory space is mapped by pcimap in pci.c */ +#ifdef CONFIG_CPU_LOONGSON3 +#define LOONGSON_PCI_MEM_START	0x40000000UL +#define LOONGSON_PCI_MEM_END	0x7effffffUL +#else  #define LOONGSON_PCI_MEM_START	LOONGSON_PCILO1_BASE  #define LOONGSON_PCI_MEM_END	(LOONGSON_PCILO1_BASE + 0x04000000 * 2) +#endif  /* this is an offset from mips_io_port_base */  #define LOONGSON_PCI_IO_START	0x00004000UL diff --git a/arch/mips/include/asm/mach-loongson/spaces.h b/arch/mips/include/asm/mach-loongson/spaces.h new file mode 100644 index 00000000000..e2506ee9004 --- /dev/null +++ b/arch/mips/include/asm/mach-loongson/spaces.h @@ -0,0 +1,9 @@ +#ifndef __ASM_MACH_LOONGSON_SPACES_H_ +#define __ASM_MACH_LOONGSON_SPACES_H_ + +#if defined(CONFIG_64BIT) +#define CAC_BASE        _AC(0x9800000000000000, UL) +#endif /* CONFIG_64BIT */ + +#include <asm/mach-generic/spaces.h> +#endif diff --git a/arch/mips/include/asm/mach-malta/kernel-entry-init.h b/arch/mips/include/asm/mach-malta/kernel-entry-init.h index 0b793e7bf67..77eeda77e73 100644 --- a/arch/mips/include/asm/mach-malta/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-malta/kernel-entry-init.h @@ -5,27 +5,104 @@   *   * Chris Dearman (chris@mips.com)   * Copyright (C) 2007 Mips Technologies, Inc. + * Copyright (C) 2014 Imagination Technologies Ltd.   */  #ifndef __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H  #define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H +	/* +	 * Prepare segments for EVA boot: +	 * +	 * This is in case the processor boots in legacy configuration +	 * (SI_EVAReset is de-asserted and CONFIG5.K == 0) +	 * +	 * On entry, t1 is loaded with CP0_CONFIG +	 * +	 * ========================= Mappings ============================= +	 * Virtual memory           Physical memory           Mapping +	 * 0x00000000 - 0x7fffffff  0x80000000 - 0xfffffffff   MUSUK (kuseg) +	 *                          Flat 2GB physical memory +	 * +	 * 0x80000000 - 0x9fffffff  0x00000000 - 0x1ffffffff   MUSUK (kseg0) +	 * 0xa0000000 - 0xbf000000  0x00000000 - 0x1ffffffff   MUSUK (kseg1) +	 * 0xc0000000 - 0xdfffffff             -                 MK  (kseg2) +	 * 0xe0000000 - 0xffffffff             -                 MK  (kseg3) +	 * +	 * +	 * Lowmem is expanded to 2GB +	 */ +	.macro	eva_entry +	/* +	 * Get Config.K0 value and use it to program +	 * the segmentation registers +	 */ +	andi	t1, 0x7 /* CCA */ +	move	t2, t1 +	ins	t2, t1, 16, 3 +	/* SegCtl0 */ +	li      t0, ((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) |		\ +		(0 << MIPS_SEGCFG_PA_SHIFT) |				\ +		(1 << MIPS_SEGCFG_EU_SHIFT)) |				\ +		(((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) |		\ +		(0 << MIPS_SEGCFG_PA_SHIFT) |				\ +		(1 << MIPS_SEGCFG_EU_SHIFT)) << 16) +	or	t0, t2 +	mtc0	t0, $5, 2 + +	/* SegCtl1 */ +	li      t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |	\ +		(0 << MIPS_SEGCFG_PA_SHIFT) |				\ +		(2 << MIPS_SEGCFG_C_SHIFT) |				\ +		(1 << MIPS_SEGCFG_EU_SHIFT)) |				\ +		(((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |		\ +		(0 << MIPS_SEGCFG_PA_SHIFT) |				\ +		(1 << MIPS_SEGCFG_EU_SHIFT)) << 16) +	ins	t0, t1, 16, 3 +	mtc0	t0, $5, 3 + +	/* SegCtl2 */ +	li	t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |	\ +		(6 << MIPS_SEGCFG_PA_SHIFT) |				\ +		(1 << MIPS_SEGCFG_EU_SHIFT)) |				\ +		(((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |		\ +		(4 << MIPS_SEGCFG_PA_SHIFT) |				\ +		(1 << MIPS_SEGCFG_EU_SHIFT)) << 16) +	or	t0, t2 +	mtc0	t0, $5, 4 + +	jal	mips_ihb +	mfc0    t0, $16, 5 +	li      t2, 0x40000000      /* K bit */ +	or      t0, t0, t2 +	mtc0    t0, $16, 5 +	sync +	jal	mips_ihb +	.endm +  	.macro	kernel_entry_setup -#ifdef CONFIG_MIPS_MT_SMTC -	mfc0	t0, CP0_CONFIG -	bgez	t0, 9f + +#ifdef CONFIG_EVA +	sync +	ehb + +	mfc0    t1, CP0_CONFIG +	bgez    t1, 9f  	mfc0	t0, CP0_CONFIG, 1  	bgez	t0, 9f  	mfc0	t0, CP0_CONFIG, 2  	bgez	t0, 9f  	mfc0	t0, CP0_CONFIG, 3 -	and	t0, 1<<2 -	bnez	t0, 0f +	sll     t0, t0, 6   /* SC bit */ +	bgez    t0, 9f + +	eva_entry +	b       0f  9:  	/* Assume we came from YAMON... */  	PTR_LA	v0, 0x9fc00534	/* YAMON print */  	lw	v0, (v0)  	move	a0, zero -	PTR_LA	a1, nonmt_processor +	PTR_LA  a1, nonsc_processor  	jal	v0  	PTR_LA	v0, 0x9fc00520	/* YAMON exit */ @@ -34,19 +111,25 @@  	jal	v0  1:	b	1b - +	nop  	__INITDATA -nonmt_processor: -	.asciz	"SMTC kernel requires the MT ASE to run\n" +nonsc_processor: +	.asciz  "EVA kernel requires a MIPS core with Segment Control implemented\n"  	__FINIT +#endif /* CONFIG_EVA */  0: -#endif  	.endm  /*   * Do SMP slave processor setup necessary before we can safely execute C code.   */  	.macro	smp_slave_setup +#ifdef CONFIG_EVA +	sync +	ehb +	mfc0    t1, CP0_CONFIG +	eva_entry +#endif  	.endm  #endif /* __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H */ diff --git a/arch/mips/include/asm/mach-malta/malta-pm.h b/arch/mips/include/asm/mach-malta/malta-pm.h new file mode 100644 index 00000000000..c2c2e201013 --- /dev/null +++ b/arch/mips/include/asm/mach-malta/malta-pm.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2014 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#ifndef __ASM_MIPS_MACH_MALTA_PM_H__ +#define __ASM_MIPS_MACH_MALTA_PM_H__ + +#include <asm/mips-boards/piix4.h> + +#ifdef CONFIG_MIPS_MALTA_PM + +/** + * mips_pm_suspend - enter a suspend state + * @state: the state to enter, one of PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_* + * + * Enters a suspend state via the Malta's PIIX4. If the state to be entered + * is one which loses context (eg. SOFF) then this function will never + * return. + */ +extern int mips_pm_suspend(unsigned state); + +#else /* !CONFIG_MIPS_MALTA_PM */ + +static inline int mips_pm_suspend(unsigned state) +{ +	return -EINVAL; +} + +#endif /* !CONFIG_MIPS_MALTA_PM */ + +#endif /* __ASM_MIPS_MACH_MALTA_PM_H__ */ diff --git a/arch/mips/include/asm/mach-malta/spaces.h b/arch/mips/include/asm/mach-malta/spaces.h new file mode 100644 index 00000000000..d7e54971ec6 --- /dev/null +++ b/arch/mips/include/asm/mach-malta/spaces.h @@ -0,0 +1,46 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License.  See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2014 Imagination Technologies Ltd. + */ + +#ifndef _ASM_MALTA_SPACES_H +#define _ASM_MALTA_SPACES_H + +#ifdef CONFIG_EVA + +/* + * Traditional Malta Board Memory Map for EVA + * + * 0x00000000 - 0x0fffffff: 1st RAM region, 256MB + * 0x10000000 - 0x1bffffff: GIC and CPC Control Registers + * 0x1c000000 - 0x1fffffff: I/O And Flash + * 0x20000000 - 0x7fffffff: 2nd RAM region, 1.5GB + * 0x80000000 - 0xffffffff: Physical memory aliases to 0x0 (2GB) + * + * The kernel is still located in 0x80000000(kseg0). However, + * the physical mask has been shifted to 0x80000000 which exploits the alias + * on the Malta board. As a result of which, we override the __pa_symbol + * to peform direct mapping from virtual to physical addresses. In other + * words, the 0x80000000 virtual address maps to 0x80000000 physical address + * which in turn aliases to 0x0. We do this in order to be able to use a flat + * 2GB of memory (0x80000000 - 0xffffffff) so we can avoid the I/O hole in + * 0x10000000 - 0x1fffffff. + * The last 64KB of physical memory are reserved for correct HIGHMEM + * macros arithmetics. + * + */ + +#define PAGE_OFFSET	_AC(0x0, UL) +#define PHYS_OFFSET	_AC(0x80000000, UL) +#define HIGHMEM_START	_AC(0xffff0000, UL) + +#define __pa_symbol(x)	(RELOC_HIDE((unsigned long)(x), 0)) + +#endif /* CONFIG_EVA */ + +#include <asm/mach-generic/spaces.h> + +#endif /* _ASM_MALTA_SPACES_H */ diff --git a/arch/mips/include/asm/mach-netlogic/irq.h b/arch/mips/include/asm/mach-netlogic/irq.h index 868ed8a2ed5..c0dbd530cca 100644 --- a/arch/mips/include/asm/mach-netlogic/irq.h +++ b/arch/mips/include/asm/mach-netlogic/irq.h @@ -9,7 +9,8 @@  #define __ASM_NETLOGIC_IRQ_H  #include <asm/mach-netlogic/multi-node.h> -#define NR_IRQS			(64 * NLM_NR_NODES) +#define NLM_IRQS_PER_NODE	1024 +#define NR_IRQS			(NLM_IRQS_PER_NODE * NLM_NR_NODES)  #define MIPS_CPU_IRQ_BASE	0 diff --git a/arch/mips/include/asm/mach-netlogic/multi-node.h b/arch/mips/include/asm/mach-netlogic/multi-node.h index d62fc773f4d..9ed8dacdc37 100644 --- a/arch/mips/include/asm/mach-netlogic/multi-node.h +++ b/arch/mips/include/asm/mach-netlogic/multi-node.h @@ -47,8 +47,37 @@  #endif  #endif -#define NLM_CORES_PER_NODE	8  #define NLM_THREADS_PER_CORE	4 -#define NLM_CPUS_PER_NODE	(NLM_CORES_PER_NODE * NLM_THREADS_PER_CORE) +#ifdef CONFIG_CPU_XLR +#define nlm_cores_per_node()	8 +#else +extern unsigned int xlp_cores_per_node; +#define nlm_cores_per_node()	xlp_cores_per_node +#endif + +#define nlm_threads_per_node()	(nlm_cores_per_node() * NLM_THREADS_PER_CORE) +#define nlm_cpuid_to_node(c)	((c) / nlm_threads_per_node()) + +struct nlm_soc_info { +	unsigned long	coremask;	/* cores enabled on the soc */ +	unsigned long	ebase;		/* not used now */ +	uint64_t	irqmask;	/* EIMR for the node */ +	uint64_t	sysbase;	/* only for XLP - sys block base */ +	uint64_t	picbase;	/* PIC block base */ +	spinlock_t	piclock;	/* lock for PIC access */ +	cpumask_t	cpumask;	/* logical cpu mask for node */ +	unsigned int	socbus; +}; + +extern struct nlm_soc_info nlm_nodes[NLM_NR_NODES]; +#define nlm_get_node(i)		(&nlm_nodes[i]) +#define nlm_node_present(n)	((n) >= 0 && (n) < NLM_NR_NODES && \ +					nlm_get_node(n)->coremask != 0) +#ifdef CONFIG_CPU_XLR +#define nlm_current_node()	(&nlm_nodes[0]) +#else +#define nlm_current_node()	(&nlm_nodes[nlm_nodeid()]) +#endif +void nlm_node_init(int node);  #endif diff --git a/arch/mips/include/asm/mach-netlogic/topology.h b/arch/mips/include/asm/mach-netlogic/topology.h new file mode 100644 index 00000000000..ceeb1f5e712 --- /dev/null +++ b/arch/mips/include/asm/mach-netlogic/topology.h @@ -0,0 +1,22 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License.  See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2013 Broadcom Corporation + */ +#ifndef _ASM_MACH_NETLOGIC_TOPOLOGY_H +#define _ASM_MACH_NETLOGIC_TOPOLOGY_H + +#include <asm/mach-netlogic/multi-node.h> + +#ifdef CONFIG_SMP +#define topology_physical_package_id(cpu)	cpu_to_node(cpu) +#define topology_core_id(cpu)	(cpu_logical_map(cpu) / NLM_THREADS_PER_CORE) +#define topology_thread_cpumask(cpu)		(&cpu_sibling_map[cpu]) +#define topology_core_cpumask(cpu)	cpumask_of_node(cpu_to_node(cpu)) +#endif + +#include <asm-generic/topology.h> + +#endif /* _ASM_MACH_NETLOGIC_TOPOLOGY_H */ diff --git a/arch/mips/include/asm/mach-paravirt/cpu-feature-overrides.h b/arch/mips/include/asm/mach-paravirt/cpu-feature-overrides.h new file mode 100644 index 00000000000..725e1ed83f6 --- /dev/null +++ b/arch/mips/include/asm/mach-paravirt/cpu-feature-overrides.h @@ -0,0 +1,36 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License.  See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2013 Cavium, Inc. + */ +#ifndef __ASM_MACH_PARAVIRT_CPU_FEATURE_OVERRIDES_H +#define __ASM_MACH_PARAVIRT_CPU_FEATURE_OVERRIDES_H + +#define cpu_has_4kex		1 +#define cpu_has_3k_cache	0 +#define cpu_has_tx39_cache	0 +#define cpu_has_counter		1 +#define cpu_has_llsc		1 +/* + * We Disable LL/SC on non SMP systems as it is faster to disable + * interrupts for atomic access than a LL/SC. + */ +#ifdef CONFIG_SMP +# define kernel_uses_llsc	1 +#else +# define kernel_uses_llsc	0 +#endif + +#ifdef CONFIG_CPU_CAVIUM_OCTEON +#define cpu_dcache_line_size()	128 +#define cpu_icache_line_size()	128 +#define cpu_has_octeon_cache	1 +#define cpu_has_4k_cache	0 +#else +#define cpu_has_octeon_cache	0 +#define cpu_has_4k_cache	1 +#endif + +#endif /* __ASM_MACH_PARAVIRT_CPU_FEATURE_OVERRIDES_H */ diff --git a/arch/mips/include/asm/mach-paravirt/irq.h b/arch/mips/include/asm/mach-paravirt/irq.h new file mode 100644 index 00000000000..9b4d35eca97 --- /dev/null +++ b/arch/mips/include/asm/mach-paravirt/irq.h @@ -0,0 +1,19 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License.  See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2013 Cavium, Inc. + */ +#ifndef __ASM_MACH_PARAVIRT_IRQ_H__ +#define  __ASM_MACH_PARAVIRT_IRQ_H__ + +#define NR_IRQS 64 +#define MIPS_CPU_IRQ_BASE 1 + +#define MIPS_IRQ_PCIA (MIPS_CPU_IRQ_BASE + 8) + +#define MIPS_IRQ_MBOX0 (MIPS_CPU_IRQ_BASE + 32) +#define MIPS_IRQ_MBOX1 (MIPS_CPU_IRQ_BASE + 33) + +#endif /* __ASM_MACH_PARAVIRT_IRQ_H__ */ diff --git a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h new file mode 100644 index 00000000000..2f82bfa3a77 --- /dev/null +++ b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h @@ -0,0 +1,50 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License.  See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2013 Cavium, Inc + */ +#ifndef __ASM_MACH_PARAVIRT_KERNEL_ENTRY_H +#define __ASM_MACH_PARAVIRT_KERNEL_ENTRY_H + +#define CP0_EBASE $15, 1 + +	.macro  kernel_entry_setup +	mfc0	t0, CP0_EBASE +	andi	t0, t0, 0x3ff		# CPUNum +	beqz	t0, 1f +	# CPUs other than zero goto smp_bootstrap +	j	smp_bootstrap + +1: +	.endm + +/* + * Do SMP slave processor setup necessary before we can safely execute + * C code. + */ +	.macro  smp_slave_setup +	mfc0	t0, CP0_EBASE +	andi	t0, t0, 0x3ff		# CPUNum +	slti	t1, t0, NR_CPUS +	bnez	t1, 1f +2: +	di +	wait +	b	2b			# Unknown CPU, loop forever. +1: +	PTR_LA	t1, paravirt_smp_sp +	PTR_SLL	t0, PTR_SCALESHIFT +	PTR_ADDU t1, t1, t0 +3: +	PTR_L	sp, 0(t1) +	beqz	sp, 3b			# Spin until told to proceed. + +	PTR_LA	t1, paravirt_smp_gp +	PTR_ADDU t1, t1, t0 +	sync +	PTR_L	gp, 0(t1) +	.endm + +#endif /* __ASM_MACH_PARAVIRT_KERNEL_ENTRY_H */ diff --git a/arch/mips/include/asm/mach-powertv/war.h b/arch/mips/include/asm/mach-paravirt/war.h index c5651c8e58d..36d3afb9845 100644 --- a/arch/mips/include/asm/mach-powertv/war.h +++ b/arch/mips/include/asm/mach-paravirt/war.h @@ -3,13 +3,11 @@   * License.  See the file "COPYING" in the main directory of this archive   * for more details.   * - * This version for the PowerTV platform copied from the Malta version. - *   * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org> - * Portions copyright (C) 2009 Cisco Systems, Inc. + * Copyright (C) 2013 Cavium Networks <support@caviumnetworks.com>   */ -#ifndef __ASM_MACH_POWERTV_WAR_H -#define __ASM_MACH_POWERTV_WAR_H +#ifndef __ASM_MIPS_MACH_PARAVIRT_WAR_H +#define __ASM_MIPS_MACH_PARAVIRT_WAR_H  #define R4600_V1_INDEX_ICACHEOP_WAR	0  #define R4600_V1_HIT_CACHEOP_WAR	0 @@ -17,11 +15,11 @@  #define R5432_CP0_INTERRUPT_WAR		0  #define BCM1250_M3_WAR			0  #define SIBYTE_1956_WAR			0 -#define MIPS4K_ICACHE_REFILL_WAR	1 -#define MIPS_CACHE_SYNC_WAR		1 +#define MIPS4K_ICACHE_REFILL_WAR	0 +#define MIPS_CACHE_SYNC_WAR		0  #define TX49XX_ICACHE_INDEX_INV_WAR	0 -#define ICACHE_REFILLS_WORKAROUND_WAR	1 +#define ICACHE_REFILLS_WORKAROUND_WAR	0  #define R10000_LLSC_WAR			0  #define MIPS34K_MISSED_ITLB_WAR		0 -#endif /* __ASM_MACH_POWERTV_WAR_H */ +#endif /* __ASM_MIPS_MACH_PARAVIRT_WAR_H */ diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h index 2dbc7a8cec1..fc946c83599 100644 --- a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h +++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h @@ -76,7 +76,7 @@ static inline void set_value_reg32(volatile u32 *const addr,  	__asm__ __volatile__(  	"	.set	push				\n" -	"	.set	mips3				\n" +	"	.set	arch=r4000			\n"  	"1:	ll	%0, %1	# set_value_reg32	\n"  	"	and	%0, %2				\n"  	"	or	%0, %3				\n" @@ -98,7 +98,7 @@ static inline void set_reg32(volatile u32 *const addr,  	__asm__ __volatile__(  	"	.set	push				\n" -	"	.set	mips3				\n" +	"	.set	arch=r4000			\n"  	"1:	ll	%0, %1		# set_reg32	\n"  	"	or	%0, %2				\n"  	"	sc	%0, %1				\n" @@ -119,7 +119,7 @@ static inline void clear_reg32(volatile u32 *const addr,  	__asm__ __volatile__(  	"	.set	push				\n" -	"	.set	mips3				\n" +	"	.set	arch=r4000			\n"  	"1:	ll	%0, %1		# clear_reg32	\n"  	"	and	%0, %2				\n"  	"	sc	%0, %1				\n" @@ -140,7 +140,7 @@ static inline void toggle_reg32(volatile u32 *const addr,  	__asm__ __volatile__(  	"	.set	push				\n" -	"	.set	mips3				\n" +	"	.set	arch=r4000			\n"  	"1:	ll	%0, %1		# toggle_reg32	\n"  	"	xor	%0, %2				\n"  	"	sc	%0, %1				\n" @@ -216,7 +216,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)  #define custom_read_reg32(address, tmp)				\  	__asm__ __volatile__(					\  	"	.set	push				\n"	\ -	"	.set	mips3				\n"	\ +	"	.set	arch=r4000			\n"	\  	"1:	ll	%0, %1	#custom_read_reg32	\n"	\  	"	.set	pop				\n"	\  	: "=r" (tmp), "=m" (*address)				\ @@ -225,7 +225,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)  #define custom_write_reg32(address, tmp)			\  	__asm__ __volatile__(					\  	"	.set	push				\n"	\ -	"	.set	mips3				\n"	\ +	"	.set	arch=r4000			\n"	\  	"	sc	%0, %1	#custom_write_reg32	\n"	\  	"	"__beqz"%0, 1b				\n"	\  	"	nop					\n"	\ diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h index aa45e6a0712..fe1566f2913 100644 --- a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h +++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h @@ -25,11 +25,7 @@  #ifndef MSP_USB_H_  #define MSP_USB_H_ -#ifdef CONFIG_MSP_HAS_DUAL_USB -#define NUM_USB_DEVS   2 -#else  #define NUM_USB_DEVS   1 -#endif  /* Register spaces for USB host 0 */  #define MSP_USB0_MAB_START	(MSP_USB0_BASE + 0x0) diff --git a/arch/mips/include/asm/mach-powertv/asic.h b/arch/mips/include/asm/mach-powertv/asic.h deleted file mode 100644 index b341108d12f..00000000000 --- a/arch/mips/include/asm/mach-powertv/asic.h +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright (C) 2009  Cisco Systems, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA - */ - -#ifndef _ASM_MACH_POWERTV_ASIC_H -#define _ASM_MACH_POWERTV_ASIC_H - -#include <linux/ioport.h> -#include <linux/platform_device.h> -#include <asm/mach-powertv/asic_regs.h> - -#define DVR_CAPABLE	(1<<0) -#define PCIE_CAPABLE	(1<<1) -#define FFS_CAPABLE	(1<<2) -#define DISPLAY_CAPABLE (1<<3) - -/* Platform Family types - * For compitability, the new value must be added in the end */ -enum family_type { -	FAMILY_8500, -	FAMILY_8500RNG, -	FAMILY_4500, -	FAMILY_1500, -	FAMILY_8600, -	FAMILY_4600, -	FAMILY_4600VZA, -	FAMILY_8600VZB, -	FAMILY_1500VZE, -	FAMILY_1500VZF, -	FAMILY_8700, -	FAMILIES -}; - -/* Register maps for each ASIC */ -extern const struct register_map calliope_register_map; -extern const struct register_map cronus_register_map; -extern const struct register_map gaia_register_map; -extern const struct register_map zeus_register_map; - -extern struct resource dvr_cronus_resources[]; -extern struct resource dvr_gaia_resources[]; -extern struct resource dvr_zeus_resources[]; -extern struct resource non_dvr_calliope_resources[]; -extern struct resource non_dvr_cronus_resources[]; -extern struct resource non_dvr_cronuslite_resources[]; -extern struct resource non_dvr_gaia_resources[]; -extern struct resource non_dvr_vz_calliope_resources[]; -extern struct resource non_dvr_vze_calliope_resources[]; -extern struct resource non_dvr_vzf_calliope_resources[]; -extern struct resource non_dvr_zeus_resources[]; - -extern void powertv_platform_init(void); -extern void platform_alloc_bootmem(void); -extern enum asic_type platform_get_asic(void); -extern enum family_type platform_get_family(void); -extern int platform_supports_dvr(void); -extern int platform_supports_ffs(void); -extern int platform_supports_pcie(void); -extern int platform_supports_display(void); -extern void configure_platform(void); - -/* Platform Resources */ -#define ASIC_RESOURCE_GET_EXISTS 1 -extern struct resource *asic_resource_get(const char *name); -extern void platform_release_memory(void *baddr, int size); - -/* USB configuration */ -struct usb_hcd;			/* Forward reference */ -extern void platform_configure_usb_ehci(void); -extern void platform_unconfigure_usb_ehci(void); -extern void platform_configure_usb_ohci(void); -extern void platform_unconfigure_usb_ohci(void); - -/* Resource for ASIC registers */ -extern struct resource asic_resource; -extern int platform_usb_devices_init(struct platform_device **echi_dev, -	struct platform_device **ohci_dev); - -/* Reboot Cause */ -extern void set_reboot_cause(char code, unsigned int data, unsigned int data2); -extern void set_locked_reboot_cause(char code, unsigned int data, -	unsigned int data2); - -enum sys_reboot_type { -	sys_unknown_reboot = 0x00,	/* Unknown reboot cause */ -	sys_davic_change = 0x01,	/* Reboot due to change in DAVIC -					 * mode */ -	sys_user_reboot = 0x02,		/* Reboot initiated by user */ -	sys_system_reboot = 0x03,	/* Reboot initiated by OS */ -	sys_trap_reboot = 0x04,		/* Reboot due to a CPU trap */ -	sys_silent_reboot = 0x05,	/* Silent reboot */ -	sys_boot_ldr_reboot = 0x06,	/* Bootloader reboot */ -	sys_power_up_reboot = 0x07,	/* Power on bootup.  Older -					 * drivers may report as -					 * userReboot. */ -	sys_code_change = 0x08,		/* Reboot to take code change. -					 * Older drivers may report as -					 * userReboot. */ -	sys_hardware_reset = 0x09,	/* HW watchdog or front-panel -					 * reset button reset.	Older -					 * drivers may report as -					 * userReboot. */ -	sys_watchdogInterrupt = 0x0A	/* Pre-watchdog interrupt */ -}; - -#endif /* _ASM_MACH_POWERTV_ASIC_H */ diff --git a/arch/mips/include/asm/mach-powertv/asic_reg_map.h b/arch/mips/include/asm/mach-powertv/asic_reg_map.h deleted file mode 100644 index 20348e817b0..00000000000 --- a/arch/mips/include/asm/mach-powertv/asic_reg_map.h +++ /dev/null @@ -1,90 +0,0 @@ -/* - *				asic_reg_map.h - * - * A macro-enclosed list of the elements for the register_map structure for - * use in defining and manipulating the structure. - * - * Copyright (C) 2009  Cisco Systems, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA - */ - -REGISTER_MAP_ELEMENT(eic_slow0_strt_add) -REGISTER_MAP_ELEMENT(eic_cfg_bits) -REGISTER_MAP_ELEMENT(eic_ready_status) -REGISTER_MAP_ELEMENT(chipver3) -REGISTER_MAP_ELEMENT(chipver2) -REGISTER_MAP_ELEMENT(chipver1) -REGISTER_MAP_ELEMENT(chipver0) -REGISTER_MAP_ELEMENT(uart1_intstat) -REGISTER_MAP_ELEMENT(uart1_inten) -REGISTER_MAP_ELEMENT(uart1_config1) -REGISTER_MAP_ELEMENT(uart1_config2) -REGISTER_MAP_ELEMENT(uart1_divisorhi) -REGISTER_MAP_ELEMENT(uart1_divisorlo) -REGISTER_MAP_ELEMENT(uart1_data) -REGISTER_MAP_ELEMENT(uart1_status) -REGISTER_MAP_ELEMENT(int_stat_3) -REGISTER_MAP_ELEMENT(int_stat_2) -REGISTER_MAP_ELEMENT(int_stat_1) -REGISTER_MAP_ELEMENT(int_stat_0) -REGISTER_MAP_ELEMENT(int_config) -REGISTER_MAP_ELEMENT(int_int_scan) -REGISTER_MAP_ELEMENT(ien_int_3) -REGISTER_MAP_ELEMENT(ien_int_2) -REGISTER_MAP_ELEMENT(ien_int_1) -REGISTER_MAP_ELEMENT(ien_int_0) -REGISTER_MAP_ELEMENT(int_level_3_3) -REGISTER_MAP_ELEMENT(int_level_3_2) -REGISTER_MAP_ELEMENT(int_level_3_1) -REGISTER_MAP_ELEMENT(int_level_3_0) -REGISTER_MAP_ELEMENT(int_level_2_3) -REGISTER_MAP_ELEMENT(int_level_2_2) -REGISTER_MAP_ELEMENT(int_level_2_1) -REGISTER_MAP_ELEMENT(int_level_2_0) -REGISTER_MAP_ELEMENT(int_level_1_3) -REGISTER_MAP_ELEMENT(int_level_1_2) -REGISTER_MAP_ELEMENT(int_level_1_1) -REGISTER_MAP_ELEMENT(int_level_1_0) -REGISTER_MAP_ELEMENT(int_level_0_3) -REGISTER_MAP_ELEMENT(int_level_0_2) -REGISTER_MAP_ELEMENT(int_level_0_1) -REGISTER_MAP_ELEMENT(int_level_0_0) -REGISTER_MAP_ELEMENT(int_docsis_en) -REGISTER_MAP_ELEMENT(mips_pll_setup) -REGISTER_MAP_ELEMENT(fs432x4b4_usb_ctl) -REGISTER_MAP_ELEMENT(test_bus) -REGISTER_MAP_ELEMENT(crt_spare) -REGISTER_MAP_ELEMENT(usb2_ohci_int_mask) -REGISTER_MAP_ELEMENT(usb2_strap) -REGISTER_MAP_ELEMENT(ehci_hcapbase) -REGISTER_MAP_ELEMENT(ohci_hc_revision) -REGISTER_MAP_ELEMENT(bcm1_bs_lmi_steer) -REGISTER_MAP_ELEMENT(usb2_control) -REGISTER_MAP_ELEMENT(usb2_stbus_obc) -REGISTER_MAP_ELEMENT(usb2_stbus_mess_size) -REGISTER_MAP_ELEMENT(usb2_stbus_chunk_size) -REGISTER_MAP_ELEMENT(pcie_regs) -REGISTER_MAP_ELEMENT(tim_ch) -REGISTER_MAP_ELEMENT(tim_cl) -REGISTER_MAP_ELEMENT(gpio_dout) -REGISTER_MAP_ELEMENT(gpio_din) -REGISTER_MAP_ELEMENT(gpio_dir) -REGISTER_MAP_ELEMENT(watchdog) -REGISTER_MAP_ELEMENT(front_panel) -REGISTER_MAP_ELEMENT(misc_clk_ctl1) -REGISTER_MAP_ELEMENT(misc_clk_ctl2) -REGISTER_MAP_ELEMENT(crt_ext_ctl) -REGISTER_MAP_ELEMENT(register_maps) diff --git a/arch/mips/include/asm/mach-powertv/asic_regs.h b/arch/mips/include/asm/mach-powertv/asic_regs.h deleted file mode 100644 index 06712abb3e5..00000000000 --- a/arch/mips/include/asm/mach-powertv/asic_regs.h +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright (C) 2009  Cisco Systems, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA - */ - -#ifndef __ASM_MACH_POWERTV_ASIC_H_ -#define __ASM_MACH_POWERTV_ASIC_H_ -#include <linux/io.h> - -/* ASIC types */ -enum asic_type { -	ASIC_UNKNOWN, -	ASIC_ZEUS, -	ASIC_CALLIOPE, -	ASIC_CRONUS, -	ASIC_CRONUSLITE, -	ASIC_GAIA, -	ASICS			/* Number of supported ASICs */ -}; - -/* hardcoded values read from Chip Version registers */ -#define CRONUS_10	0x0B4C1C20 -#define CRONUS_11	0x0B4C1C21 -#define CRONUSLITE_10	0x0B4C1C40 - -#define NAND_FLASH_BASE		0x03000000 -#define CALLIOPE_IO_BASE	0x08000000 -#define GAIA_IO_BASE		0x09000000 -#define CRONUS_IO_BASE		0x09000000 -#define ZEUS_IO_BASE		0x09000000 - -#define ASIC_IO_SIZE		0x01000000 - -/* Definitions for backward compatibility */ -#define UART1_INTSTAT	uart1_intstat -#define UART1_INTEN	uart1_inten -#define UART1_CONFIG1	uart1_config1 -#define UART1_CONFIG2	uart1_config2 -#define UART1_DIVISORHI uart1_divisorhi -#define UART1_DIVISORLO uart1_divisorlo -#define UART1_DATA	uart1_data -#define UART1_STATUS	uart1_status - -/* ASIC register enumeration */ -union register_map_entry { -	unsigned long phys; -	u32 *virt; -}; - -#define REGISTER_MAP_ELEMENT(x) union register_map_entry x; -struct register_map { -#include <asm/mach-powertv/asic_reg_map.h> -}; -#undef REGISTER_MAP_ELEMENT - -/** - * register_map_offset_phys - add an offset to the physical address - * @map:	Pointer to the &struct register_map - * @offset:	Value to add - * - * Only adds the base to non-zero physical addresses - */ -static inline void register_map_offset_phys(struct register_map *map, -	unsigned long offset) -{ -#define REGISTER_MAP_ELEMENT(x)		do {				\ -		if (map->x.phys != 0)					\ -			map->x.phys += offset;				\ -	} while (false); - -#include <asm/mach-powertv/asic_reg_map.h> -#undef REGISTER_MAP_ELEMENT -} - -/** - * register_map_virtualize - Convert ®ister_map to virtual addresses - * @map:	Pointer to ®ister_map to virtualize - */ -static inline void register_map_virtualize(struct register_map *map) -{ -#define REGISTER_MAP_ELEMENT(x)		do {				\ -		map->x.virt = (!map->x.phys) ? NULL :			\ -			UNCAC_ADDR(phys_to_virt(map->x.phys));		\ -	} while (false); - -#include <asm/mach-powertv/asic_reg_map.h> -#undef REGISTER_MAP_ELEMENT -} - -extern struct register_map _asic_register_map; -extern unsigned long asic_phy_base; - -/* - * Macros to interface to registers through their ioremapped address - * asic_reg_phys_addr	Returns the physical address of the given register - * asic_reg_addr	Returns the iomapped virtual address of the given - *			register. - */ -#define asic_reg_addr(x)	(_asic_register_map.x.virt) -#define asic_reg_phys_addr(x)	(virt_to_phys((void *) CAC_ADDR(	\ -					(unsigned long) asic_reg_addr(x)))) - -/* - * The asic_reg macro is gone. It should be replaced by either asic_read or - * asic_write, as appropriate. - */ - -#define asic_read(x)		readl(asic_reg_addr(x)) -#define asic_write(v, x)	writel(v, asic_reg_addr(x)) - -extern void asic_irq_init(void); -#endif diff --git a/arch/mips/include/asm/mach-powertv/cpu-feature-overrides.h b/arch/mips/include/asm/mach-powertv/cpu-feature-overrides.h deleted file mode 100644 index 58c76ec32a1..00000000000 --- a/arch/mips/include/asm/mach-powertv/cpu-feature-overrides.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (C) 2010  Cisco Systems, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA - */ - -#ifndef _ASM_MACH_POWERTV_CPU_FEATURE_OVERRIDES_H_ -#define _ASM_MACH_POWERTV_CPU_FEATURE_OVERRIDES_H_ -#define cpu_has_tlb			1 -#define cpu_has_4kex			1 -#define cpu_has_3k_cache		0 -#define cpu_has_4k_cache		1 -#define cpu_has_tx39_cache		0 -#define cpu_has_fpu			0 -#define cpu_has_counter			1 -#define cpu_has_watch			1 -#define cpu_has_divec			1 -#define cpu_has_vce			0 -#define cpu_has_cache_cdex_p		0 -#define cpu_has_cache_cdex_s		0 -#define cpu_has_mcheck			1 -#define cpu_has_ejtag			1 -#define cpu_has_llsc			1 -#define cpu_has_mips16			0 -#define cpu_has_mdmx			0 -#define cpu_has_mips3d			0 -#define cpu_has_smartmips		0 -#define cpu_has_vtag_icache		0 -#define cpu_has_dc_aliases		0 -#define cpu_has_ic_fills_f_dc		0 -#define cpu_has_mips32r1		0 -#define cpu_has_mips32r2		1 -#define cpu_has_mips64r1		0 -#define cpu_has_mips64r2		0 -#define cpu_has_dsp			0 -#define cpu_has_dsp2			0 -#define cpu_has_mipsmt			0 -#define cpu_has_userlocal		0 -#define cpu_has_nofpuex			0 -#define cpu_has_64bits			0 -#define cpu_has_64bit_zero_reg		0 -#define cpu_has_vint			1 -#define cpu_has_veic			1 -#define cpu_has_inclusive_pcaches	0 - -#define cpu_dcache_line_size()		32 -#define cpu_icache_line_size()		32 -#endif diff --git a/arch/mips/include/asm/mach-powertv/dma-coherence.h b/arch/mips/include/asm/mach-powertv/dma-coherence.h deleted file mode 100644 index f8316720a21..00000000000 --- a/arch/mips/include/asm/mach-powertv/dma-coherence.h +++ /dev/null @@ -1,117 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License.  See the file "COPYING" in the main directory of this archive - * for more details. - * - * Version from mach-generic modified to support PowerTV port - * Portions Copyright (C) 2009	Cisco Systems, Inc. - * Copyright (C) 2006  Ralf Baechle <ralf@linux-mips.org> - * - */ - -#ifndef __ASM_MACH_POWERTV_DMA_COHERENCE_H -#define __ASM_MACH_POWERTV_DMA_COHERENCE_H - -#include <linux/sched.h> -#include <linux/device.h> -#include <asm/mach-powertv/asic.h> - -static inline bool is_kseg2(void *addr) -{ -	return (unsigned long)addr >= KSEG2; -} - -static inline unsigned long virt_to_phys_from_pte(void *addr) -{ -	pgd_t *pgd; -	pud_t *pud; -	pmd_t *pmd; -	pte_t *ptep, pte; - -	unsigned long virt_addr = (unsigned long)addr; -	unsigned long phys_addr = 0UL; - -	/* get the page global directory. */ -	pgd = pgd_offset_k(virt_addr); - -	if (!pgd_none(*pgd)) { -		/* get the page upper directory */ -		pud = pud_offset(pgd, virt_addr); -		if (!pud_none(*pud)) { -			/* get the page middle directory */ -			pmd = pmd_offset(pud, virt_addr); -			if (!pmd_none(*pmd)) { -				/* get a pointer to the page table entry */ -				ptep = pte_offset(pmd, virt_addr); -				pte = *ptep; -				/* check for a valid page */ -				if (pte_present(pte)) { -					/* get the physical address the page is -					 * referring to */ -					phys_addr = (unsigned long) -						page_to_phys(pte_page(pte)); -					/* add the offset within the page */ -					phys_addr |= (virt_addr & ~PAGE_MASK); -				} -			} -		} -	} - -	return phys_addr; -} - -static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, -	size_t size) -{ -	if (is_kseg2(addr)) -		return phys_to_dma(virt_to_phys_from_pte(addr)); -	else -		return phys_to_dma(virt_to_phys(addr)); -} - -static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, -	struct page *page) -{ -	return phys_to_dma(page_to_phys(page)); -} - -static inline unsigned long plat_dma_addr_to_phys(struct device *dev, -	dma_addr_t dma_addr) -{ -	return dma_to_phys(dma_addr); -} - -static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, -	size_t size, enum dma_data_direction direction) -{ -} - -static inline int plat_dma_supported(struct device *dev, u64 mask) -{ -	/* -	 * we fall back to GFP_DMA when the mask isn't all 1s, -	 * so we can't guarantee allocations that must be -	 * within a tighter range than GFP_DMA.. -	 */ -	if (mask < DMA_BIT_MASK(24)) -		return 0; - -	return 1; -} - -static inline void plat_extra_sync_for_device(struct device *dev) -{ -} - -static inline int plat_dma_mapping_error(struct device *dev, -					 dma_addr_t dma_addr) -{ -	return 0; -} - -static inline int plat_device_is_coherent(struct device *dev) -{ -	return 0; -} - -#endif /* __ASM_MACH_POWERTV_DMA_COHERENCE_H */ diff --git a/arch/mips/include/asm/mach-powertv/interrupts.h b/arch/mips/include/asm/mach-powertv/interrupts.h deleted file mode 100644 index 6c463be6215..00000000000 --- a/arch/mips/include/asm/mach-powertv/interrupts.h +++ /dev/null @@ -1,253 +0,0 @@ -/* - * Copyright (C) 2009  Cisco Systems, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA - */ - -#ifndef _ASM_MACH_POWERTV_INTERRUPTS_H_ -#define _ASM_MACH_POWERTV_INTERRUPTS_H_ - -/* - * Defines for all of the interrupt lines - */ - -/* Definitions for backward compatibility */ -#define kIrq_Uart1		irq_uart1 - -#define ibase 0 - -/*------------- Register: int_stat_3 */ -/* 126 unused (bit 31) */ -#define irq_asc2video		(ibase+126)	/* ASC 2 Video Interrupt */ -#define irq_asc1video		(ibase+125)	/* ASC 1 Video Interrupt */ -#define irq_comms_block_wd	(ibase+124)	/* ASC 1 Video Interrupt */ -#define irq_fdma_mailbox	(ibase+123)	/* FDMA Mailbox Output */ -#define irq_fdma_gp		(ibase+122)	/* FDMA GP Output */ -#define irq_mips_pic		(ibase+121)	/* MIPS Performance Counter -						 * Interrupt */ -#define irq_mips_timer		(ibase+120)	/* MIPS Timer Interrupt */ -#define irq_memory_protect	(ibase+119)	/* Memory Protection Interrupt -						 * -- Ored by glue logic inside -						 *  SPARC ILC (see -						 *  INT_MEM_PROT_STAT, below, -						 *  for individual interrupts) -						 */ -/* 118 unused (bit 22) */ -#define irq_sbag		(ibase+117)	/* SBAG Interrupt -- Ored by -						 * glue logic inside SPARC ILC -						 * (see INT_SBAG_STAT, below, -						 * for individual interrupts) */ -#define irq_qam_b_fec		(ibase+116)	/* QAM	B FEC Interrupt */ -#define irq_qam_a_fec		(ibase+115)	/* QAM A FEC Interrupt */ -/* 114 unused	(bit 18) */ -#define irq_mailbox		(ibase+113)	/* Mailbox Debug Interrupt  -- -						 * Ored by glue logic inside -						 * SPARC ILC (see -						 * INT_MAILBOX_STAT, below, for -						 * individual interrupts) */ -#define irq_fuse_stat1		(ibase+112)	/* Fuse Status 1 */ -#define irq_fuse_stat2		(ibase+111)	/* Fuse Status 2 */ -#define irq_fuse_stat3		(ibase+110)	/* Blitter Interrupt / Fuse -						 * Status 3 */ -#define irq_blitter		(ibase+110)	/* Blitter Interrupt / Fuse -						 * Status 3 */ -#define irq_avc1_pp0		(ibase+109)	/* AVC Decoder #1 PP0 -						 * Interrupt */ -#define irq_avc1_pp1		(ibase+108)	/* AVC Decoder #1 PP1 -						 * Interrupt */ -#define irq_avc1_mbe		(ibase+107)	/* AVC Decoder #1 MBE -						 * Interrupt */ -#define irq_avc2_pp0		(ibase+106)	/* AVC Decoder #2 PP0 -						 * Interrupt */ -#define irq_avc2_pp1		(ibase+105)	/* AVC Decoder #2 PP1 -						 * Interrupt */ -#define irq_avc2_mbe		(ibase+104)	/* AVC Decoder #2 MBE -						 * Interrupt */ -#define irq_zbug_spi		(ibase+103)	/* Zbug SPI Slave Interrupt */ -#define irq_qam_mod2		(ibase+102)	/* QAM Modulator 2 DMA -						 * Interrupt */ -#define irq_ir_rx		(ibase+101)	/* IR RX 2 Interrupt */ -#define irq_aud_dsp2		(ibase+100)	/* Audio DSP #2 Interrupt */ -#define irq_aud_dsp1		(ibase+99)	/* Audio DSP #1 Interrupt */ -#define irq_docsis		(ibase+98)	/* DOCSIS Debug Interrupt */ -#define irq_sd_dvp1		(ibase+97)	/* SD DVP #1 Interrupt */ -#define irq_sd_dvp2		(ibase+96)	/* SD DVP #2 Interrupt */ -/*------------- Register: int_stat_2 */ -#define irq_hd_dvp		(ibase+95)	/* HD DVP Interrupt */ -#define kIrq_Prewatchdog	(ibase+94)	/* watchdog Pre-Interrupt */ -#define irq_timer2		(ibase+93)	/* Programmable Timer -						 * Interrupt 2 */ -#define irq_1394		(ibase+92)	/* 1394 Firewire Interrupt */ -#define irq_usbohci		(ibase+91)	/* USB 2.0 OHCI Interrupt */ -#define irq_usbehci		(ibase+90)	/* USB 2.0 EHCI Interrupt */ -#define irq_pciexp		(ibase+89)	/* PCI Express 0 Interrupt */ -#define irq_pciexp0		(ibase+89)	/* PCI Express 0 Interrupt */ -#define irq_afe1		(ibase+88)	/* AFE 1 Interrupt */ -#define irq_sata		(ibase+87)	/* SATA 1 Interrupt */ -#define irq_sata1		(ibase+87)	/* SATA 1 Interrupt */ -#define irq_dtcp		(ibase+86)	/* DTCP Interrupt */ -#define irq_pciexp1		(ibase+85)	/* PCI Express 1 Interrupt */ -/* 84 unused	(bit 20) */ -/* 83 unused	(bit 19) */ -/* 82 unused	(bit 18) */ -#define irq_sata2		(ibase+81)	/* SATA2 Interrupt */ -#define irq_uart2		(ibase+80)	/* UART2 Interrupt */ -#define irq_legacy_usb		(ibase+79)	/* Legacy USB Host ISR (1.1 -						 * Host module) */ -#define irq_pod			(ibase+78)	/* POD Interrupt */ -#define irq_slave_usb		(ibase+77)	/* Slave USB */ -#define irq_denc1		(ibase+76)	/* DENC #1 VTG Interrupt */ -#define irq_vbi_vtg		(ibase+75)	/* VBI VTG Interrupt */ -#define irq_afe2		(ibase+74)	/* AFE 2 Interrupt */ -#define irq_denc2		(ibase+73)	/* DENC #2 VTG Interrupt */ -#define irq_asc2		(ibase+72)	/* ASC #2 Interrupt */ -#define irq_asc1		(ibase+71)	/* ASC #1 Interrupt */ -#define irq_mod_dma		(ibase+70)	/* Modulator DMA Interrupt */ -#define irq_byte_eng1		(ibase+69)	/* Byte Engine Interrupt [1] */ -#define irq_byte_eng0		(ibase+68)	/* Byte Engine Interrupt [0] */ -/* 67 unused	(bit 03) */ -/* 66 unused	(bit 02) */ -/* 65 unused	(bit 01) */ -/* 64 unused	(bit 00) */ -/*------------- Register: int_stat_1 */ -/* 63 unused	(bit 31) */ -/* 62 unused	(bit 30) */ -/* 61 unused	(bit 29) */ -/* 60 unused	(bit 28) */ -/* 59 unused	(bit 27) */ -/* 58 unused	(bit 26) */ -/* 57 unused	(bit 25) */ -/* 56 unused	(bit 24) */ -#define irq_buf_dma_mem2mem	(ibase+55)	/* BufDMA Memory to Memory -						 * Interrupt */ -#define irq_buf_dma_usbtransmit (ibase+54)	/* BufDMA USB Transmit -						 * Interrupt */ -#define irq_buf_dma_qpskpodtransmit (ibase+53)	/* BufDMA QPSK/POD Tramsit -						 * Interrupt */ -#define irq_buf_dma_transmit_error (ibase+52)	/* BufDMA Transmit Error -						 * Interrupt */ -#define irq_buf_dma_usbrecv	(ibase+51)	/* BufDMA USB Receive -						 * Interrupt */ -#define irq_buf_dma_qpskpodrecv (ibase+50)	/* BufDMA QPSK/POD Receive -						 * Interrupt */ -#define irq_buf_dma_recv_error	(ibase+49)	/* BufDMA Receive Error -						 * Interrupt */ -#define irq_qamdma_transmit_play (ibase+48)	/* QAMDMA Transmit/Play -						 * Interrupt */ -#define irq_qamdma_transmit_error (ibase+47)	/* QAMDMA Transmit Error -						 * Interrupt */ -#define irq_qamdma_recv2high	(ibase+46)	/* QAMDMA Receive 2 High -						 * (Chans 63-32) */ -#define irq_qamdma_recv2low	(ibase+45)	/* QAMDMA Receive 2 Low -						 * (Chans 31-0) */ -#define irq_qamdma_recv1high	(ibase+44)	/* QAMDMA Receive 1 High -						 * (Chans 63-32) */ -#define irq_qamdma_recv1low	(ibase+43)	/* QAMDMA Receive 1 Low -						 * (Chans 31-0) */ -#define irq_qamdma_recv_error	(ibase+42)	/* QAMDMA Receive Error -						 * Interrupt */ -#define irq_mpegsplice		(ibase+41)	/* MPEG Splice Interrupt */ -#define irq_deinterlace_rdy	(ibase+40)	/* Deinterlacer Frame Ready -						 * Interrupt */ -#define irq_ext_in0		(ibase+39)	/* External Interrupt irq_in0 */ -#define irq_gpio3		(ibase+38)	/* GP I/O IRQ 3 - From GP I/O -						 * Module */ -#define irq_gpio2		(ibase+37)	/* GP I/O IRQ 2 - From GP I/O -						 * Module (ABE_intN) */ -#define irq_pcrcmplt1		(ibase+36)	/* PCR Capture Complete	 or -						 * Discontinuity 1 */ -#define irq_pcrcmplt2		(ibase+35)	/* PCR Capture Complete or -						 * Discontinuity 2 */ -#define irq_parse_peierr	(ibase+34)	/* PID Parser Error Detect -						 * (PEI) */ -#define irq_parse_cont_err	(ibase+33)	/* PID Parser continuity error -						 * detect */ -#define irq_ds1framer		(ibase+32)	/* DS1 Framer Interrupt */ -/*------------- Register: int_stat_0 */ -#define irq_gpio1		(ibase+31)	/* GP I/O IRQ 1 - From GP I/O -						 * Module */ -#define irq_gpio0		(ibase+30)	/* GP I/O IRQ 0 - From GP I/O -						 * Module */ -#define irq_qpsk_out_aloha	(ibase+29)	/* QPSK Output Slotted Aloha -						 * (chan 3) Transmission -						 * Completed OK */ -#define irq_qpsk_out_tdma	(ibase+28)	/* QPSK Output TDMA (chan 2) -						 * Transmission Completed OK */ -#define irq_qpsk_out_reserve	(ibase+27)	/* QPSK Output Reservation -						 * (chan 1) Transmission -						 * Completed OK */ -#define irq_qpsk_out_aloha_err	(ibase+26)	/* QPSK Output Slotted Aloha -						 * (chan 3)Transmission -						 * completed with Errors. */ -#define irq_qpsk_out_tdma_err	(ibase+25)	/* QPSK Output TDMA (chan 2) -						 * Transmission completed with -						 * Errors. */ -#define irq_qpsk_out_rsrv_err	(ibase+24)	/* QPSK Output Reservation -						 * (chan 1) Transmission -						 * completed with Errors */ -#define irq_aloha_fail		(ibase+23)	/* Unsuccessful Resend of Aloha -						 * for N times. Aloha retry -						 * timeout for channel 3. */ -#define irq_timer1		(ibase+22)	/* Programmable Timer -						 * Interrupt */ -#define irq_keyboard		(ibase+21)	/* Keyboard Module Interrupt */ -#define irq_i2c			(ibase+20)	/* I2C Module Interrupt */ -#define irq_spi			(ibase+19)	/* SPI Module Interrupt */ -#define irq_irblaster		(ibase+18)	/* IR Blaster Interrupt */ -#define irq_splice_detect	(ibase+17)	/* PID Key Change Interrupt or -						 * Splice Detect Interrupt */ -#define irq_se_micro		(ibase+16)	/* Secure Micro I/F Module -						 * Interrupt */ -#define irq_uart1		(ibase+15)	/* UART Interrupt */ -#define irq_irrecv		(ibase+14)	/* IR Receiver Interrupt */ -#define irq_host_int1		(ibase+13)	/* Host-to-Host Interrupt 1 */ -#define irq_host_int0		(ibase+12)	/* Host-to-Host Interrupt 0 */ -#define irq_qpsk_hecerr		(ibase+11)	/* QPSK HEC Error Interrupt */ -#define irq_qpsk_crcerr		(ibase+10)	/* QPSK AAL-5 CRC Error -						 * Interrupt */ -/* 9 unused	(bit 09) */ -/* 8 unused	(bit 08) */ -#define irq_psicrcerr		(ibase+7)	/* QAM PSI CRC Error -						 * Interrupt */ -#define irq_psilength_err	(ibase+6)	/* QAM PSI Length Error -						 * Interrupt */ -#define irq_esfforward		(ibase+5)	/* ESF Interrupt Mark From -						 * Forward Path Reference - -						 * every 3ms when forward Mbits -						 * and forward slot control -						 * bytes are updated. */ -#define irq_esfreverse		(ibase+4)	/* ESF Interrupt Mark from -						 * Reverse Path Reference - -						 * delayed from forward mark by -						 * the ranging delay plus a -						 * fixed amount. When reverse -						 * Mbits and reverse slot -						 * control bytes are updated. -						 * Occurs every 3ms for 3.0M and -						 * 1.554 M upstream rates and -						 * every 6 ms for 256K upstream -						 * rate. */ -#define irq_aloha_timeout	(ibase+3)	/* Slotted-Aloha timeout on -						 * Channel 1. */ -#define irq_reservation		(ibase+2)	/* Partial (or Incremental) -						 * Reservation Message Completed -						 * or Slotted aloha verify for -						 * channel 1. */ -#define irq_aloha3		(ibase+1)	/* Slotted-Aloha Message Verify -						 * Interrupt or Reservation -						 * increment completed for -						 * channel 3. */ -#define irq_mpeg_d		(ibase+0)	/* MPEG Decoder Interrupt */ -#endif	/* _ASM_MACH_POWERTV_INTERRUPTS_H_ */ diff --git a/arch/mips/include/asm/mach-powertv/ioremap.h b/arch/mips/include/asm/mach-powertv/ioremap.h deleted file mode 100644 index c86ef094ec3..00000000000 --- a/arch/mips/include/asm/mach-powertv/ioremap.h +++ /dev/null @@ -1,167 +0,0 @@ -/* - *	This program is free software; you can redistribute it and/or - *	modify it under the terms of the GNU General Public License - *	as published by the Free Software Foundation; either version - *	2 of the License, or (at your option) any later version. - * - * Portions Copyright (C)  Cisco Systems, Inc. - */ -#ifndef __ASM_MACH_POWERTV_IOREMAP_H -#define __ASM_MACH_POWERTV_IOREMAP_H - -#include <linux/types.h> -#include <linux/log2.h> -#include <linux/compiler.h> - -#include <asm/pgtable-bits.h> -#include <asm/addrspace.h> - -/* We're going to mess with bits, so get sizes */ -#define IOR_BPC			8			/* Bits per char */ -#define IOR_PHYS_BITS		(IOR_BPC * sizeof(phys_addr_t)) -#define IOR_DMA_BITS		(IOR_BPC * sizeof(dma_addr_t)) - -/* - * Define the granularity of physical/DMA mapping in terms of the number - * of bits that defines the offset within a grain. These will be the - * least significant bits of the address. The rest of a physical or DMA - * address will be used to index into an appropriate table to find the - * offset to add to the address to yield the corresponding DMA or physical - * address, respectively. - */ -#define IOR_LSBITS		22			/* Bits in a grain */ - -/* - * Compute the number of most significant address bits after removing those - * used for the offset within a grain and then compute the number of table - * entries for the conversion. - */ -#define IOR_PHYS_MSBITS		(IOR_PHYS_BITS - IOR_LSBITS) -#define IOR_NUM_PHYS_TO_DMA	((phys_addr_t) 1 << IOR_PHYS_MSBITS) - -#define IOR_DMA_MSBITS		(IOR_DMA_BITS - IOR_LSBITS) -#define IOR_NUM_DMA_TO_PHYS	((dma_addr_t) 1 << IOR_DMA_MSBITS) - -/* - * Define data structures used as elements in the arrays for the conversion - * between physical and DMA addresses. We do some slightly fancy math to - * compute the width of the offset element of the conversion tables so - * that we can have the smallest conversion tables. Next, round up the - * sizes to the next higher power of two, i.e. the offset element will have - * 8, 16, 32, 64, etc. bits. This eliminates the need to mask off any - * bits.  Finally, we compute a shift value that puts the most significant - * bits of the offset into the most significant bits of the offset element. - * This makes it more efficient on processors without barrel shifters and - * easier to see the values if the conversion table is dumped in binary. - */ -#define _IOR_OFFSET_WIDTH(n)	(1 << order_base_2(n)) -#define IOR_OFFSET_WIDTH(n) \ -	(_IOR_OFFSET_WIDTH(n) < 8 ? 8 : _IOR_OFFSET_WIDTH(n)) - -#define IOR_PHYS_OFFSET_BITS	IOR_OFFSET_WIDTH(IOR_PHYS_MSBITS) -#define IOR_PHYS_SHIFT		(IOR_PHYS_BITS - IOR_PHYS_OFFSET_BITS) - -#define IOR_DMA_OFFSET_BITS	IOR_OFFSET_WIDTH(IOR_DMA_MSBITS) -#define IOR_DMA_SHIFT		(IOR_DMA_BITS - IOR_DMA_OFFSET_BITS) - -struct ior_phys_to_dma { -	dma_addr_t offset:IOR_DMA_OFFSET_BITS __packed -		__aligned((IOR_DMA_OFFSET_BITS / IOR_BPC)); -}; - -struct ior_dma_to_phys { -	dma_addr_t offset:IOR_PHYS_OFFSET_BITS __packed -		__aligned((IOR_PHYS_OFFSET_BITS / IOR_BPC)); -}; - -extern struct ior_phys_to_dma _ior_phys_to_dma[IOR_NUM_PHYS_TO_DMA]; -extern struct ior_dma_to_phys _ior_dma_to_phys[IOR_NUM_DMA_TO_PHYS]; - -static inline dma_addr_t _phys_to_dma_offset_raw(phys_addr_t phys) -{ -	return (dma_addr_t)_ior_phys_to_dma[phys >> IOR_LSBITS].offset; -} - -static inline dma_addr_t _dma_to_phys_offset_raw(dma_addr_t dma) -{ -	return (dma_addr_t)_ior_dma_to_phys[dma >> IOR_LSBITS].offset; -} - -/* These are not portable and should not be used in drivers. Drivers should - * be using ioremap() and friends to map physical addresses to virtual - * addresses and dma_map*() and friends to map virtual addresses into DMA - * addresses and back. - */ -static inline dma_addr_t phys_to_dma(phys_addr_t phys) -{ -	return phys + (_phys_to_dma_offset_raw(phys) << IOR_PHYS_SHIFT); -} - -static inline phys_addr_t dma_to_phys(dma_addr_t dma) -{ -	return dma + (_dma_to_phys_offset_raw(dma) << IOR_DMA_SHIFT); -} - -extern void ioremap_add_map(dma_addr_t phys, phys_addr_t alias, -	dma_addr_t size); - -/* - * Allow physical addresses to be fixed up to help peripherals located - * outside the low 32-bit range -- generic pass-through version. - */ -static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size) -{ -	return phys_addr; -} - -/* - * Handle the special case of addresses the area aliased into the first - * 512 MiB of the processor's physical address space. These turn into either - * kseg0 or kseg1 addresses, depending on flags. - */ -static inline void __iomem *plat_ioremap(phys_t start, unsigned long size, -	unsigned long flags) -{ -	phys_addr_t start_offset; -	void __iomem *result = NULL; - -	/* Start by checking to see whether this is an aliased address */ -	start_offset = _dma_to_phys_offset_raw(start); - -	/* -	 * If: -	 * o	the memory is aliased into the first 512 MiB, and -	 * o	the start and end are in the same RAM bank, and -	 * o	we don't have a zero size or wrap around, and -	 * o	we are supposed to create an uncached mapping, -	 *	handle this is a kseg0 or kseg1 address -	 */ -	if (start_offset != 0) { -		phys_addr_t last; -		dma_addr_t dma_to_phys_offset; - -		last = start + size - 1; -		dma_to_phys_offset = -			_dma_to_phys_offset_raw(last) << IOR_DMA_SHIFT; - -		if (dma_to_phys_offset == start_offset && -			size != 0 && start <= last) { -			phys_t adjusted_start; -			adjusted_start = start + start_offset; -			if (flags == _CACHE_UNCACHED) -				result = (void __iomem *) (unsigned long) -					CKSEG1ADDR(adjusted_start); -			else -				result = (void __iomem *) (unsigned long) -					CKSEG0ADDR(adjusted_start); -		} -	} - -	return result; -} - -static inline int plat_iounmap(const volatile void __iomem *addr) -{ -	return 0; -} -#endif /* __ASM_MACH_POWERTV_IOREMAP_H */ diff --git a/arch/mips/include/asm/mach-powertv/irq.h b/arch/mips/include/asm/mach-powertv/irq.h deleted file mode 100644 index 4bd5d0c61a9..00000000000 --- a/arch/mips/include/asm/mach-powertv/irq.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (C) 2009  Cisco Systems, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA - */ - -#ifndef _ASM_MACH_POWERTV_IRQ_H -#define _ASM_MACH_POWERTV_IRQ_H -#include <asm/mach-powertv/interrupts.h> - -#define MIPS_CPU_IRQ_BASE	ibase -#define NR_IRQS			127 -#endif diff --git a/arch/mips/include/asm/mach-powertv/powertv-clock.h b/arch/mips/include/asm/mach-powertv/powertv-clock.h deleted file mode 100644 index 6f3e9a0fcf8..00000000000 --- a/arch/mips/include/asm/mach-powertv/powertv-clock.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (C) 2009  Cisco Systems, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA - */ -/* - * Local definitions for the powertv PCI code - */ - -#ifndef _POWERTV_PCI_POWERTV_PCI_H_ -#define _POWERTV_PCI_POWERTV_PCI_H_ -extern int asic_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); -extern int asic_pcie_init(void); -extern int asic_pcie_init(void); - -extern int log_level; -#endif diff --git a/arch/mips/include/asm/mach-ralink/war.h b/arch/mips/include/asm/mach-ralink/war.h index a7b712cf2d2..c074b5dc1f8 100644 --- a/arch/mips/include/asm/mach-ralink/war.h +++ b/arch/mips/include/asm/mach-ralink/war.h @@ -17,7 +17,6 @@  #define MIPS4K_ICACHE_REFILL_WAR	0  #define MIPS_CACHE_SYNC_WAR		0  #define TX49XX_ICACHE_INDEX_INV_WAR	0 -#define RM9000_CDEX_SMP_WAR		0  #define ICACHE_REFILLS_WORKAROUND_WAR	0  #define R10000_LLSC_WAR			0  #define MIPS34K_MISSED_ITLB_WAR		0 diff --git a/arch/mips/include/asm/mach-sead3/kernel-entry-init.h b/arch/mips/include/asm/mach-sead3/kernel-entry-init.h index 3dfbd8e7947..6cccd4d558d 100644 --- a/arch/mips/include/asm/mach-sead3/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-sead3/kernel-entry-init.h @@ -10,37 +10,6 @@  #define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H  	.macro	kernel_entry_setup -#ifdef CONFIG_MIPS_MT_SMTC -	mfc0	t0, CP0_CONFIG -	bgez	t0, 9f -	mfc0	t0, CP0_CONFIG, 1 -	bgez	t0, 9f -	mfc0	t0, CP0_CONFIG, 2 -	bgez	t0, 9f -	mfc0	t0, CP0_CONFIG, 3 -	and	t0, 1<<2 -	bnez	t0, 0f -9 : -	/* Assume we came from YAMON... */ -	PTR_LA	v0, 0x9fc00534	/* YAMON print */ -	lw	v0, (v0) -	move	a0, zero -	PTR_LA	a1, nonmt_processor -	jal	v0 - -	PTR_LA	v0, 0x9fc00520	/* YAMON exit */ -	lw	v0, (v0) -	li	a0, 1 -	jal	v0 - -1 :	b	1b - -	__INITDATA -nonmt_processor : -	.asciz	"SMTC kernel requires the MT ASE to run\n" -	__FINIT -0 : -#endif  	.endm  /* diff --git a/arch/mips/include/asm/mips-boards/generic.h b/arch/mips/include/asm/mips-boards/generic.h index 48616816bcb..c904c24550f 100644 --- a/arch/mips/include/asm/mips-boards/generic.h +++ b/arch/mips/include/asm/mips-boards/generic.h @@ -67,10 +67,6 @@  extern int mips_revision_sconid; -#ifdef CONFIG_OF -extern struct boot_param_header __dtb_start; -#endif -  #ifdef CONFIG_PCI  extern void mips_pcibios_init(void);  #else diff --git a/arch/mips/include/asm/mips-boards/malta.h b/arch/mips/include/asm/mips-boards/malta.h index 722bc889eab..fd9774269a5 100644 --- a/arch/mips/include/asm/mips-boards/malta.h +++ b/arch/mips/include/asm/mips-boards/malta.h @@ -64,6 +64,11 @@ static inline unsigned long get_msc_port_base(unsigned long reg)  #define GIC_ADDRSPACE_SZ		(128 * 1024)  /* + * CPC Specific definitions + */ +#define CPC_BASE_ADDR			0x1bde0000 + +/*   * MSC01 BIU Specific definitions   * FIXME : These should be elsewhere ?   */ diff --git a/arch/mips/include/asm/mips-boards/piix4.h b/arch/mips/include/asm/mips-boards/piix4.h index a02596cf1ab..9e340be52a5 100644 --- a/arch/mips/include/asm/mips-boards/piix4.h +++ b/arch/mips/include/asm/mips-boards/piix4.h @@ -1,6 +1,7 @@  /*   * Carsten Langgaard, carstenl@mips.com   * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved. + * Copyright (C) 2013 Imagination Technologies Ltd.   *   *  This program is free software; you can distribute it and/or modify it   *  under the terms of the GNU General Public License (Version 2) as @@ -20,61 +21,50 @@  #ifndef __ASM_MIPS_BOARDS_PIIX4_H  #define __ASM_MIPS_BOARDS_PIIX4_H -/************************************************************************ - *  IO register offsets - ************************************************************************/ -#define PIIX4_ICTLR1_ICW1	0x20 -#define PIIX4_ICTLR1_ICW2	0x21 -#define PIIX4_ICTLR1_ICW3	0x21 -#define PIIX4_ICTLR1_ICW4	0x21 -#define PIIX4_ICTLR2_ICW1	0xa0 -#define PIIX4_ICTLR2_ICW2	0xa1 -#define PIIX4_ICTLR2_ICW3	0xa1 -#define PIIX4_ICTLR2_ICW4	0xa1 -#define PIIX4_ICTLR1_OCW1	0x21 -#define PIIX4_ICTLR1_OCW2	0x20 -#define PIIX4_ICTLR1_OCW3	0x20 -#define PIIX4_ICTLR1_OCW4	0x20 -#define PIIX4_ICTLR2_OCW1	0xa1 -#define PIIX4_ICTLR2_OCW2	0xa0 -#define PIIX4_ICTLR2_OCW3	0xa0 -#define PIIX4_ICTLR2_OCW4	0xa0 +/* PIRQX Route Control */ +#define PIIX4_FUNC0_PIRQRC			0x60 +#define   PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_DISABLE	(1 << 7) +#define   PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MASK		0xf +#define   PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MAX		16 +/* SERIRQ Control */ +#define PIIX4_FUNC0_SERIRQC			0x64 +#define   PIIX4_FUNC0_SERIRQC_EN			(1 << 7) +#define   PIIX4_FUNC0_SERIRQC_CONT			(1 << 6) +/* Top Of Memory */ +#define PIIX4_FUNC0_TOM				0x69 +#define   PIIX4_FUNC0_TOM_TOP_OF_MEMORY_MASK		0xf0 +/* Deterministic Latency Control */ +#define PIIX4_FUNC0_DLC				0x82 +#define   PIIX4_FUNC0_DLC_USBPR_EN			(1 << 2) +#define   PIIX4_FUNC0_DLC_PASSIVE_RELEASE_EN		(1 << 1) +#define   PIIX4_FUNC0_DLC_DELAYED_TRANSACTION_EN	(1 << 0) +/* General Configuration */ +#define PIIX4_FUNC0_GENCFG			0xb0 +#define   PIIX4_FUNC0_GENCFG_SERIRQ			(1 << 16) +/* IDE Timing */ +#define PIIX4_FUNC1_IDETIM_PRIMARY_LO		0x40 +#define PIIX4_FUNC1_IDETIM_PRIMARY_HI		0x41 +#define   PIIX4_FUNC1_IDETIM_PRIMARY_HI_IDE_DECODE_EN	(1 << 7) +#define PIIX4_FUNC1_IDETIM_SECONDARY_LO		0x42 +#define PIIX4_FUNC1_IDETIM_SECONDARY_HI		0x43 +#define   PIIX4_FUNC1_IDETIM_SECONDARY_HI_IDE_DECODE_EN	(1 << 7) -/************************************************************************ - *  Register encodings. - ************************************************************************/ -#define PIIX4_OCW2_NSEOI	(0x1 << 5) -#define PIIX4_OCW2_SEOI		(0x3 << 5) -#define PIIX4_OCW2_RNSEOI	(0x5 << 5) -#define PIIX4_OCW2_RAEOIS	(0x4 << 5) -#define PIIX4_OCW2_RAEOIC	(0x0 << 5) -#define PIIX4_OCW2_RSEOI	(0x7 << 5) -#define PIIX4_OCW2_SP		(0x6 << 5) -#define PIIX4_OCW2_NOP		(0x2 << 5) +/* Power Management Configuration Space */ +#define PIIX4_FUNC3_PMBA			0x40 +#define PIIX4_FUNC3_PMREGMISC			0x80 +#define   PIIX4_FUNC3_PMREGMISC_EN			(1 << 0) -#define PIIX4_OCW2_SEL		(0x0 << 3) +/* Power Management IO Space */ +#define PIIX4_FUNC3IO_PMSTS			0x00 +#define   PIIX4_FUNC3IO_PMSTS_PWRBTN_STS		(1 << 8) +#define PIIX4_FUNC3IO_PMCNTRL			0x04 +#define   PIIX4_FUNC3IO_PMCNTRL_SUS_EN			(1 << 13) +#define   PIIX4_FUNC3IO_PMCNTRL_SUS_TYP			(0x7 << 10) +#define   PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF		(0x0 << 10) +#define   PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_STR		(0x1 << 10) -#define PIIX4_OCW2_ILS_0	0 -#define PIIX4_OCW2_ILS_1	1 -#define PIIX4_OCW2_ILS_2	2 -#define PIIX4_OCW2_ILS_3	3 -#define PIIX4_OCW2_ILS_4	4 -#define PIIX4_OCW2_ILS_5	5 -#define PIIX4_OCW2_ILS_6	6 -#define PIIX4_OCW2_ILS_7	7 -#define PIIX4_OCW2_ILS_8	0 -#define PIIX4_OCW2_ILS_9	1 -#define PIIX4_OCW2_ILS_10	2 -#define PIIX4_OCW2_ILS_11	3 -#define PIIX4_OCW2_ILS_12	4 -#define PIIX4_OCW2_ILS_13	5 -#define PIIX4_OCW2_ILS_14	6 -#define PIIX4_OCW2_ILS_15	7 - -#define PIIX4_OCW3_SEL		(0x1 << 3) - -#define PIIX4_OCW3_IRR		0x2 -#define PIIX4_OCW3_ISR		0x3 +/* Data for magic special PCI cycle */ +#define PIIX4_SUSPEND_MAGIC			0x00120002  #endif /* __ASM_MIPS_BOARDS_PIIX4_H */ diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h new file mode 100644 index 00000000000..6a9d2dd005c --- /dev/null +++ b/arch/mips/include/asm/mips-cm.h @@ -0,0 +1,322 @@ +/* + * Copyright (C) 2013 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#ifndef __MIPS_ASM_MIPS_CM_H__ +#define __MIPS_ASM_MIPS_CM_H__ + +#include <linux/io.h> +#include <linux/types.h> + +/* The base address of the CM GCR block */ +extern void __iomem *mips_cm_base; + +/* The base address of the CM L2-only sync region */ +extern void __iomem *mips_cm_l2sync_base; + +/** + * __mips_cm_phys_base - retrieve the physical base address of the CM + * + * This function returns the physical base address of the Coherence Manager + * global control block, or 0 if no Coherence Manager is present. It provides + * a default implementation which reads the CMGCRBase register where available, + * and may be overriden by platforms which determine this address in a + * different way by defining a function with the same prototype except for the + * name mips_cm_phys_base (without underscores). + */ +extern phys_t __mips_cm_phys_base(void); + +/** + * mips_cm_probe - probe for a Coherence Manager + * + * Attempt to detect the presence of a Coherence Manager. Returns 0 if a CM + * is successfully detected, else -errno. + */ +#ifdef CONFIG_MIPS_CM +extern int mips_cm_probe(void); +#else +static inline int mips_cm_probe(void) +{ +	return -ENODEV; +} +#endif + +/** + * mips_cm_present - determine whether a Coherence Manager is present + * + * Returns true if a CM is present in the system, else false. + */ +static inline bool mips_cm_present(void) +{ +#ifdef CONFIG_MIPS_CM +	return mips_cm_base != NULL; +#else +	return false; +#endif +} + +/** + * mips_cm_has_l2sync - determine whether an L2-only sync region is present + * + * Returns true if the system implements an L2-only sync region, else false. + */ +static inline bool mips_cm_has_l2sync(void) +{ +#ifdef CONFIG_MIPS_CM +	return mips_cm_l2sync_base != NULL; +#else +	return false; +#endif +} + +/* Offsets to register blocks from the CM base address */ +#define MIPS_CM_GCB_OFS		0x0000 /* Global Control Block */ +#define MIPS_CM_CLCB_OFS	0x2000 /* Core Local Control Block */ +#define MIPS_CM_COCB_OFS	0x4000 /* Core Other Control Block */ +#define MIPS_CM_GDB_OFS		0x6000 /* Global Debug Block */ + +/* Total size of the CM memory mapped registers */ +#define MIPS_CM_GCR_SIZE	0x8000 + +/* Size of the L2-only sync region */ +#define MIPS_CM_L2SYNC_SIZE	0x1000 + +/* Macros to ease the creation of register access functions */ +#define BUILD_CM_R_(name, off)					\ +static inline u32 *addr_gcr_##name(void)			\ +{								\ +	return (u32 *)(mips_cm_base + (off));			\ +}								\ +								\ +static inline u32 read_gcr_##name(void)				\ +{								\ +	return __raw_readl(addr_gcr_##name());			\ +} + +#define BUILD_CM__W(name, off)					\ +static inline void write_gcr_##name(u32 value)			\ +{								\ +	__raw_writel(value, addr_gcr_##name());			\ +} + +#define BUILD_CM_RW(name, off)					\ +	BUILD_CM_R_(name, off)					\ +	BUILD_CM__W(name, off) + +#define BUILD_CM_Cx_R_(name, off)				\ +	BUILD_CM_R_(cl_##name, MIPS_CM_CLCB_OFS + (off))	\ +	BUILD_CM_R_(co_##name, MIPS_CM_COCB_OFS + (off)) + +#define BUILD_CM_Cx__W(name, off)				\ +	BUILD_CM__W(cl_##name, MIPS_CM_CLCB_OFS + (off))	\ +	BUILD_CM__W(co_##name, MIPS_CM_COCB_OFS + (off)) + +#define BUILD_CM_Cx_RW(name, off)				\ +	BUILD_CM_Cx_R_(name, off)				\ +	BUILD_CM_Cx__W(name, off) + +/* GCB register accessor functions */ +BUILD_CM_R_(config,		MIPS_CM_GCB_OFS + 0x00) +BUILD_CM_RW(base,		MIPS_CM_GCB_OFS + 0x08) +BUILD_CM_RW(access,		MIPS_CM_GCB_OFS + 0x20) +BUILD_CM_R_(rev,		MIPS_CM_GCB_OFS + 0x30) +BUILD_CM_RW(error_mask,		MIPS_CM_GCB_OFS + 0x40) +BUILD_CM_RW(error_cause,	MIPS_CM_GCB_OFS + 0x48) +BUILD_CM_RW(error_addr,		MIPS_CM_GCB_OFS + 0x50) +BUILD_CM_RW(error_mult,		MIPS_CM_GCB_OFS + 0x58) +BUILD_CM_RW(l2_only_sync_base,	MIPS_CM_GCB_OFS + 0x70) +BUILD_CM_RW(gic_base,		MIPS_CM_GCB_OFS + 0x80) +BUILD_CM_RW(cpc_base,		MIPS_CM_GCB_OFS + 0x88) +BUILD_CM_RW(reg0_base,		MIPS_CM_GCB_OFS + 0x90) +BUILD_CM_RW(reg0_mask,		MIPS_CM_GCB_OFS + 0x98) +BUILD_CM_RW(reg1_base,		MIPS_CM_GCB_OFS + 0xa0) +BUILD_CM_RW(reg1_mask,		MIPS_CM_GCB_OFS + 0xa8) +BUILD_CM_RW(reg2_base,		MIPS_CM_GCB_OFS + 0xb0) +BUILD_CM_RW(reg2_mask,		MIPS_CM_GCB_OFS + 0xb8) +BUILD_CM_RW(reg3_base,		MIPS_CM_GCB_OFS + 0xc0) +BUILD_CM_RW(reg3_mask,		MIPS_CM_GCB_OFS + 0xc8) +BUILD_CM_R_(gic_status,		MIPS_CM_GCB_OFS + 0xd0) +BUILD_CM_R_(cpc_status,		MIPS_CM_GCB_OFS + 0xf0) + +/* Core Local & Core Other register accessor functions */ +BUILD_CM_Cx_RW(reset_release,	0x00) +BUILD_CM_Cx_RW(coherence,	0x08) +BUILD_CM_Cx_R_(config,		0x10) +BUILD_CM_Cx_RW(other,		0x18) +BUILD_CM_Cx_RW(reset_base,	0x20) +BUILD_CM_Cx_R_(id,		0x28) +BUILD_CM_Cx_RW(reset_ext_base,	0x30) +BUILD_CM_Cx_R_(tcid_0_priority,	0x40) +BUILD_CM_Cx_R_(tcid_1_priority,	0x48) +BUILD_CM_Cx_R_(tcid_2_priority,	0x50) +BUILD_CM_Cx_R_(tcid_3_priority,	0x58) +BUILD_CM_Cx_R_(tcid_4_priority,	0x60) +BUILD_CM_Cx_R_(tcid_5_priority,	0x68) +BUILD_CM_Cx_R_(tcid_6_priority,	0x70) +BUILD_CM_Cx_R_(tcid_7_priority,	0x78) +BUILD_CM_Cx_R_(tcid_8_priority,	0x80) + +/* GCR_CONFIG register fields */ +#define CM_GCR_CONFIG_NUMIOCU_SHF		8 +#define CM_GCR_CONFIG_NUMIOCU_MSK		(_ULCAST_(0xf) << 8) +#define CM_GCR_CONFIG_PCORES_SHF		0 +#define CM_GCR_CONFIG_PCORES_MSK		(_ULCAST_(0xff) << 0) + +/* GCR_BASE register fields */ +#define CM_GCR_BASE_GCRBASE_SHF			15 +#define CM_GCR_BASE_GCRBASE_MSK			(_ULCAST_(0x1ffff) << 15) +#define CM_GCR_BASE_CMDEFTGT_SHF		0 +#define CM_GCR_BASE_CMDEFTGT_MSK		(_ULCAST_(0x3) << 0) +#define  CM_GCR_BASE_CMDEFTGT_DISABLED		0 +#define  CM_GCR_BASE_CMDEFTGT_MEM		1 +#define  CM_GCR_BASE_CMDEFTGT_IOCU0		2 +#define  CM_GCR_BASE_CMDEFTGT_IOCU1		3 + +/* GCR_ACCESS register fields */ +#define CM_GCR_ACCESS_ACCESSEN_SHF		0 +#define CM_GCR_ACCESS_ACCESSEN_MSK		(_ULCAST_(0xff) << 0) + +/* GCR_REV register fields */ +#define CM_GCR_REV_MAJOR_SHF			8 +#define CM_GCR_REV_MAJOR_MSK			(_ULCAST_(0xff) << 8) +#define CM_GCR_REV_MINOR_SHF			0 +#define CM_GCR_REV_MINOR_MSK			(_ULCAST_(0xff) << 0) + +/* GCR_ERROR_CAUSE register fields */ +#define CM_GCR_ERROR_CAUSE_ERRTYPE_SHF		27 +#define CM_GCR_ERROR_CAUSE_ERRTYPE_MSK		(_ULCAST_(0x1f) << 27) +#define CM_GCR_ERROR_CAUSE_ERRINFO_SHF		0 +#define CM_GCR_ERROR_CAUSE_ERRINGO_MSK		(_ULCAST_(0x7ffffff) << 0) + +/* GCR_ERROR_MULT register fields */ +#define CM_GCR_ERROR_MULT_ERR2ND_SHF		0 +#define CM_GCR_ERROR_MULT_ERR2ND_MSK		(_ULCAST_(0x1f) << 0) + +/* GCR_L2_ONLY_SYNC_BASE register fields */ +#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_SHF	12 +#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK	(_ULCAST_(0xfffff) << 12) +#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_SHF	0 +#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK	(_ULCAST_(0x1) << 0) + +/* GCR_GIC_BASE register fields */ +#define CM_GCR_GIC_BASE_GICBASE_SHF		17 +#define CM_GCR_GIC_BASE_GICBASE_MSK		(_ULCAST_(0x7fff) << 17) +#define CM_GCR_GIC_BASE_GICEN_SHF		0 +#define CM_GCR_GIC_BASE_GICEN_MSK		(_ULCAST_(0x1) << 0) + +/* GCR_CPC_BASE register fields */ +#define CM_GCR_CPC_BASE_CPCBASE_SHF		17 +#define CM_GCR_CPC_BASE_CPCBASE_MSK		(_ULCAST_(0x7fff) << 17) +#define CM_GCR_CPC_BASE_CPCEN_SHF		0 +#define CM_GCR_CPC_BASE_CPCEN_MSK		(_ULCAST_(0x1) << 0) + +/* GCR_REGn_BASE register fields */ +#define CM_GCR_REGn_BASE_BASEADDR_SHF		16 +#define CM_GCR_REGn_BASE_BASEADDR_MSK		(_ULCAST_(0xffff) << 16) + +/* GCR_REGn_MASK register fields */ +#define CM_GCR_REGn_MASK_ADDRMASK_SHF		16 +#define CM_GCR_REGn_MASK_ADDRMASK_MSK		(_ULCAST_(0xffff) << 16) +#define CM_GCR_REGn_MASK_CCAOVR_SHF		5 +#define CM_GCR_REGn_MASK_CCAOVR_MSK		(_ULCAST_(0x3) << 5) +#define CM_GCR_REGn_MASK_CCAOVREN_SHF		4 +#define CM_GCR_REGn_MASK_CCAOVREN_MSK		(_ULCAST_(0x1) << 4) +#define CM_GCR_REGn_MASK_DROPL2_SHF		2 +#define CM_GCR_REGn_MASK_DROPL2_MSK		(_ULCAST_(0x1) << 2) +#define CM_GCR_REGn_MASK_CMTGT_SHF		0 +#define CM_GCR_REGn_MASK_CMTGT_MSK		(_ULCAST_(0x3) << 0) +#define  CM_GCR_REGn_MASK_CMTGT_DISABLED	(_ULCAST_(0x0) << 0) +#define  CM_GCR_REGn_MASK_CMTGT_MEM		(_ULCAST_(0x1) << 0) +#define  CM_GCR_REGn_MASK_CMTGT_IOCU0		(_ULCAST_(0x2) << 0) +#define  CM_GCR_REGn_MASK_CMTGT_IOCU1		(_ULCAST_(0x3) << 0) + +/* GCR_GIC_STATUS register fields */ +#define CM_GCR_GIC_STATUS_EX_SHF		0 +#define CM_GCR_GIC_STATUS_EX_MSK		(_ULCAST_(0x1) << 0) + +/* GCR_CPC_STATUS register fields */ +#define CM_GCR_CPC_STATUS_EX_SHF		0 +#define CM_GCR_CPC_STATUS_EX_MSK		(_ULCAST_(0x1) << 0) + +/* GCR_Cx_COHERENCE register fields */ +#define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF	0 +#define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK	(_ULCAST_(0xff) << 0) + +/* GCR_Cx_CONFIG register fields */ +#define CM_GCR_Cx_CONFIG_IOCUTYPE_SHF		10 +#define CM_GCR_Cx_CONFIG_IOCUTYPE_MSK		(_ULCAST_(0x3) << 10) +#define CM_GCR_Cx_CONFIG_PVPE_SHF		0 +#define CM_GCR_Cx_CONFIG_PVPE_MSK		(_ULCAST_(0x1ff) << 0) + +/* GCR_Cx_OTHER register fields */ +#define CM_GCR_Cx_OTHER_CORENUM_SHF		16 +#define CM_GCR_Cx_OTHER_CORENUM_MSK		(_ULCAST_(0xffff) << 16) + +/* GCR_Cx_RESET_BASE register fields */ +#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE_SHF	12 +#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE_MSK	(_ULCAST_(0xfffff) << 12) + +/* GCR_Cx_RESET_EXT_BASE register fields */ +#define CM_GCR_Cx_RESET_EXT_BASE_EVARESET_SHF	31 +#define CM_GCR_Cx_RESET_EXT_BASE_EVARESET_MSK	(_ULCAST_(0x1) << 31) +#define CM_GCR_Cx_RESET_EXT_BASE_UEB_SHF	30 +#define CM_GCR_Cx_RESET_EXT_BASE_UEB_MSK	(_ULCAST_(0x1) << 30) +#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK_SHF	20 +#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK_MSK	(_ULCAST_(0xff) << 20) +#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA_SHF	1 +#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA_MSK	(_ULCAST_(0x7f) << 1) +#define CM_GCR_Cx_RESET_EXT_BASE_PRESENT_SHF	0 +#define CM_GCR_Cx_RESET_EXT_BASE_PRESENT_MSK	(_ULCAST_(0x1) << 0) + +/** + * mips_cm_numcores - return the number of cores present in the system + * + * Returns the value of the PCORES field of the GCR_CONFIG register plus 1, or + * zero if no Coherence Manager is present. + */ +static inline unsigned mips_cm_numcores(void) +{ +	if (!mips_cm_present()) +		return 0; + +	return ((read_gcr_config() & CM_GCR_CONFIG_PCORES_MSK) +		>> CM_GCR_CONFIG_PCORES_SHF) + 1; +} + +/** + * mips_cm_numiocu - return the number of IOCUs present in the system + * + * Returns the value of the NUMIOCU field of the GCR_CONFIG register, or zero + * if no Coherence Manager is present. + */ +static inline unsigned mips_cm_numiocu(void) +{ +	if (!mips_cm_present()) +		return 0; + +	return (read_gcr_config() & CM_GCR_CONFIG_NUMIOCU_MSK) +		>> CM_GCR_CONFIG_NUMIOCU_SHF; +} + +/** + * mips_cm_l2sync - perform an L2-only sync operation + * + * If an L2-only sync region is present in the system then this function + * performs and L2-only sync and returns zero. Otherwise it returns -ENODEV. + */ +static inline int mips_cm_l2sync(void) +{ +	if (!mips_cm_has_l2sync()) +		return -ENODEV; + +	writel(0, mips_cm_l2sync_base); +	return 0; +} + +#endif /* __MIPS_ASM_MIPS_CM_H__ */ diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h new file mode 100644 index 00000000000..e139a534e0f --- /dev/null +++ b/arch/mips/include/asm/mips-cpc.h @@ -0,0 +1,182 @@ +/* + * Copyright (C) 2013 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#ifndef __MIPS_ASM_MIPS_CPC_H__ +#define __MIPS_ASM_MIPS_CPC_H__ + +#include <linux/io.h> +#include <linux/types.h> + +/* The base address of the CPC registers */ +extern void __iomem *mips_cpc_base; + +/** + * mips_cpc_default_phys_base - retrieve the default physical base address of + *                              the CPC + * + * Returns the default physical base address of the Cluster Power Controller + * memory mapped registers. This is platform dependant & must therefore be + * implemented per-platform. + */ +extern phys_t mips_cpc_default_phys_base(void); + +/** + * mips_cpc_phys_base - retrieve the physical base address of the CPC + * + * This function returns the physical base address of the Cluster Power + * Controller memory mapped registers, or 0 if no Cluster Power Controller + * is present. It may be overriden by individual platforms which determine + * this address in a different way. + */ +extern phys_t __weak mips_cpc_phys_base(void); + +/** + * mips_cpc_probe - probe for a Cluster Power Controller + * + * Attempt to detect the presence of a Cluster Power Controller. Returns 0 if + * a CPC is successfully detected, else -errno. + */ +#ifdef CONFIG_MIPS_CPC +extern int mips_cpc_probe(void); +#else +static inline int mips_cpc_probe(void) +{ +	return -ENODEV; +} +#endif + +/** + * mips_cpc_present - determine whether a Cluster Power Controller is present + * + * Returns true if a CPC is present in the system, else false. + */ +static inline bool mips_cpc_present(void) +{ +#ifdef CONFIG_MIPS_CPC +	return mips_cpc_base != NULL; +#else +	return false; +#endif +} + +/* Offsets from the CPC base address to various control blocks */ +#define MIPS_CPC_GCB_OFS	0x0000 +#define MIPS_CPC_CLCB_OFS	0x2000 +#define MIPS_CPC_COCB_OFS	0x4000 + +/* Macros to ease the creation of register access functions */ +#define BUILD_CPC_R_(name, off)					\ +static inline u32 *addr_cpc_##name(void)			\ +{								\ +	return (u32 *)(mips_cpc_base + (off));			\ +}								\ +								\ +static inline u32 read_cpc_##name(void)				\ +{								\ +	return __raw_readl(mips_cpc_base + (off));		\ +} + +#define BUILD_CPC__W(name, off) \ +static inline void write_cpc_##name(u32 value)			\ +{								\ +	__raw_writel(value, mips_cpc_base + (off));		\ +} + +#define BUILD_CPC_RW(name, off)					\ +	BUILD_CPC_R_(name, off)					\ +	BUILD_CPC__W(name, off) + +#define BUILD_CPC_Cx_R_(name, off)				\ +	BUILD_CPC_R_(cl_##name, MIPS_CPC_CLCB_OFS + (off))	\ +	BUILD_CPC_R_(co_##name, MIPS_CPC_COCB_OFS + (off)) + +#define BUILD_CPC_Cx__W(name, off)				\ +	BUILD_CPC__W(cl_##name, MIPS_CPC_CLCB_OFS + (off))	\ +	BUILD_CPC__W(co_##name, MIPS_CPC_COCB_OFS + (off)) + +#define BUILD_CPC_Cx_RW(name, off)				\ +	BUILD_CPC_Cx_R_(name, off)				\ +	BUILD_CPC_Cx__W(name, off) + +/* GCB register accessor functions */ +BUILD_CPC_RW(access,		MIPS_CPC_GCB_OFS + 0x00) +BUILD_CPC_RW(seqdel,		MIPS_CPC_GCB_OFS + 0x08) +BUILD_CPC_RW(rail,		MIPS_CPC_GCB_OFS + 0x10) +BUILD_CPC_RW(resetlen,		MIPS_CPC_GCB_OFS + 0x18) +BUILD_CPC_R_(revision,		MIPS_CPC_GCB_OFS + 0x20) + +/* Core Local & Core Other accessor functions */ +BUILD_CPC_Cx_RW(cmd,		0x00) +BUILD_CPC_Cx_RW(stat_conf,	0x08) +BUILD_CPC_Cx_RW(other,		0x10) + +/* CPC_Cx_CMD register fields */ +#define CPC_Cx_CMD_SHF				0 +#define CPC_Cx_CMD_MSK				(_ULCAST_(0xf) << 0) +#define  CPC_Cx_CMD_CLOCKOFF			(_ULCAST_(0x1) << 0) +#define  CPC_Cx_CMD_PWRDOWN			(_ULCAST_(0x2) << 0) +#define  CPC_Cx_CMD_PWRUP			(_ULCAST_(0x3) << 0) +#define  CPC_Cx_CMD_RESET			(_ULCAST_(0x4) << 0) + +/* CPC_Cx_STAT_CONF register fields */ +#define CPC_Cx_STAT_CONF_PWRUPE_SHF		23 +#define CPC_Cx_STAT_CONF_PWRUPE_MSK		(_ULCAST_(0x1) << 23) +#define CPC_Cx_STAT_CONF_SEQSTATE_SHF		19 +#define CPC_Cx_STAT_CONF_SEQSTATE_MSK		(_ULCAST_(0xf) << 19) +#define  CPC_Cx_STAT_CONF_SEQSTATE_D0		(_ULCAST_(0x0) << 19) +#define  CPC_Cx_STAT_CONF_SEQSTATE_U0		(_ULCAST_(0x1) << 19) +#define  CPC_Cx_STAT_CONF_SEQSTATE_U1		(_ULCAST_(0x2) << 19) +#define  CPC_Cx_STAT_CONF_SEQSTATE_U2		(_ULCAST_(0x3) << 19) +#define  CPC_Cx_STAT_CONF_SEQSTATE_U3		(_ULCAST_(0x4) << 19) +#define  CPC_Cx_STAT_CONF_SEQSTATE_U4		(_ULCAST_(0x5) << 19) +#define  CPC_Cx_STAT_CONF_SEQSTATE_U5		(_ULCAST_(0x6) << 19) +#define  CPC_Cx_STAT_CONF_SEQSTATE_U6		(_ULCAST_(0x7) << 19) +#define  CPC_Cx_STAT_CONF_SEQSTATE_D1		(_ULCAST_(0x8) << 19) +#define  CPC_Cx_STAT_CONF_SEQSTATE_D3		(_ULCAST_(0x9) << 19) +#define  CPC_Cx_STAT_CONF_SEQSTATE_D2		(_ULCAST_(0xa) << 19) +#define CPC_Cx_STAT_CONF_CLKGAT_IMPL_SHF	17 +#define CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK	(_ULCAST_(0x1) << 17) +#define CPC_Cx_STAT_CONF_PWRDN_IMPL_SHF		16 +#define CPC_Cx_STAT_CONF_PWRDN_IMPL_MSK		(_ULCAST_(0x1) << 16) +#define CPC_Cx_STAT_CONF_EJTAG_PROBE_SHF	15 +#define CPC_Cx_STAT_CONF_EJTAG_PROBE_MSK	(_ULCAST_(0x1) << 15) + +/* CPC_Cx_OTHER register fields */ +#define CPC_Cx_OTHER_CORENUM_SHF		16 +#define CPC_Cx_OTHER_CORENUM_MSK		(_ULCAST_(0xff) << 16) + +#ifdef CONFIG_MIPS_CPC + +/** + * mips_cpc_lock_other - lock access to another core + * core: the other core to be accessed + * + * Call before operating upon a core via the 'other' register region in + * order to prevent the region being moved during access. Must be followed + * by a call to mips_cpc_unlock_other. + */ +extern void mips_cpc_lock_other(unsigned int core); + +/** + * mips_cpc_unlock_other - unlock access to another core + * + * Call after operating upon another core via the 'other' register region. + * Must be called after mips_cpc_lock_other. + */ +extern void mips_cpc_unlock_other(void); + +#else /* !CONFIG_MIPS_CPC */ + +static inline void mips_cpc_lock_other(unsigned int core) { } +static inline void mips_cpc_unlock_other(void) { } + +#endif /* !CONFIG_MIPS_CPC */ + +#endif /* __MIPS_ASM_MIPS_CPC_H__ */ diff --git a/arch/mips/include/asm/mips_mt.h b/arch/mips/include/asm/mips_mt.h index ac7935203f8..f6ba004a771 100644 --- a/arch/mips/include/asm/mips_mt.h +++ b/arch/mips/include/asm/mips_mt.h @@ -1,7 +1,6 @@  /* - * Definitions and decalrations for MIPS MT support - * that are common between SMTC, VSMP, and/or AP/SP - * kernel models. + * Definitions and decalrations for MIPS MT support that are common between + * the VSMP, and AP/SP kernel models.   */  #ifndef __ASM_MIPS_MT_H  #define __ASM_MIPS_MT_H @@ -18,7 +17,12 @@ extern cpumask_t mt_fpu_cpumask;  extern unsigned long mt_fpemul_threshold;  extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value); + +#ifdef CONFIG_MIPS_MT  extern void mips_mt_set_cpuoptions(void); +#else +static inline void mips_mt_set_cpuoptions(void) { } +#endif  struct class;  extern struct class *mt_class; diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h index 38b7704ee37..5f8052ce43b 100644 --- a/arch/mips/include/asm/mipsmtregs.h +++ b/arch/mips/include/asm/mipsmtregs.h @@ -36,6 +36,8 @@  #define read_c0_tcbind()		__read_32bit_c0_register($2, 2) +#define write_c0_tchalt(val)		__write_32bit_c0_register($2, 4, val) +  #define read_c0_tccontext()		__read_32bit_c0_register($2, 5)  #define write_c0_tccontext(val)		__write_32bit_c0_register($2, 5, val) @@ -176,6 +178,17 @@  #ifndef __ASSEMBLY__ +static inline unsigned core_nvpes(void) +{ +	unsigned conf0; + +	if (!cpu_has_mipsmt) +		return 1; + +	conf0 = read_c0_mvpconf0(); +	return ((conf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; +} +  static inline unsigned int dvpe(void)  {  	int res = 0; diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index fed1c3e9b48..98e9754a4b6 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -14,6 +14,7 @@  #define _ASM_MIPSREGS_H  #include <linux/linkage.h> +#include <linux/types.h>  #include <asm/hazards.h>  #include <asm/war.h> @@ -567,13 +568,27 @@  #define MIPS_CONF1_PC		(_ULCAST_(1) <<	 4)  #define MIPS_CONF1_MD		(_ULCAST_(1) <<	 5)  #define MIPS_CONF1_C2		(_ULCAST_(1) <<	 6) +#define MIPS_CONF1_DA_SHF	7 +#define MIPS_CONF1_DA_SZ	3  #define MIPS_CONF1_DA		(_ULCAST_(7) <<	 7) +#define MIPS_CONF1_DL_SHF	10 +#define MIPS_CONF1_DL_SZ	3  #define MIPS_CONF1_DL		(_ULCAST_(7) << 10) +#define MIPS_CONF1_DS_SHF	13 +#define MIPS_CONF1_DS_SZ	3  #define MIPS_CONF1_DS		(_ULCAST_(7) << 13) +#define MIPS_CONF1_IA_SHF	16 +#define MIPS_CONF1_IA_SZ	3  #define MIPS_CONF1_IA		(_ULCAST_(7) << 16) +#define MIPS_CONF1_IL_SHF	19 +#define MIPS_CONF1_IL_SZ	3  #define MIPS_CONF1_IL		(_ULCAST_(7) << 19) +#define MIPS_CONF1_IS_SHF	22 +#define MIPS_CONF1_IS_SZ	3  #define MIPS_CONF1_IS		(_ULCAST_(7) << 22) -#define MIPS_CONF1_TLBS		(_ULCAST_(63)<< 25) +#define MIPS_CONF1_TLBS_SHIFT   (25) +#define MIPS_CONF1_TLBS_SIZE    (6) +#define MIPS_CONF1_TLBS         (_ULCAST_(63) << MIPS_CONF1_TLBS_SHIFT)  #define MIPS_CONF2_SA		(_ULCAST_(15)<<	 0)  #define MIPS_CONF2_SL		(_ULCAST_(15)<<	 4) @@ -587,28 +602,78 @@  #define MIPS_CONF3_TL		(_ULCAST_(1) <<	 0)  #define MIPS_CONF3_SM		(_ULCAST_(1) <<	 1)  #define MIPS_CONF3_MT		(_ULCAST_(1) <<	 2) +#define MIPS_CONF3_CDMM		(_ULCAST_(1) <<	 3)  #define MIPS_CONF3_SP		(_ULCAST_(1) <<	 4)  #define MIPS_CONF3_VINT		(_ULCAST_(1) <<	 5)  #define MIPS_CONF3_VEIC		(_ULCAST_(1) <<	 6)  #define MIPS_CONF3_LPA		(_ULCAST_(1) <<	 7) +#define MIPS_CONF3_ITL		(_ULCAST_(1) <<	 8) +#define MIPS_CONF3_CTXTC	(_ULCAST_(1) <<	 9)  #define MIPS_CONF3_DSP		(_ULCAST_(1) << 10)  #define MIPS_CONF3_DSP2P	(_ULCAST_(1) << 11)  #define MIPS_CONF3_RXI		(_ULCAST_(1) << 12)  #define MIPS_CONF3_ULRI		(_ULCAST_(1) << 13)  #define MIPS_CONF3_ISA		(_ULCAST_(3) << 14)  #define MIPS_CONF3_ISA_OE	(_ULCAST_(1) << 16) +#define MIPS_CONF3_MCU		(_ULCAST_(1) << 17) +#define MIPS_CONF3_MMAR		(_ULCAST_(7) << 18) +#define MIPS_CONF3_IPLW		(_ULCAST_(3) << 21)  #define MIPS_CONF3_VZ		(_ULCAST_(1) << 23) - +#define MIPS_CONF3_PW		(_ULCAST_(1) << 24) +#define MIPS_CONF3_SC		(_ULCAST_(1) << 25) +#define MIPS_CONF3_BI		(_ULCAST_(1) << 26) +#define MIPS_CONF3_BP		(_ULCAST_(1) << 27) +#define MIPS_CONF3_MSA		(_ULCAST_(1) << 28) +#define MIPS_CONF3_CMGCR	(_ULCAST_(1) << 29) +#define MIPS_CONF3_BPG		(_ULCAST_(1) << 30) + +#define MIPS_CONF4_MMUSIZEEXT_SHIFT	(0)  #define MIPS_CONF4_MMUSIZEEXT	(_ULCAST_(255) << 0) +#define MIPS_CONF4_FTLBSETS_SHIFT	(0) +#define MIPS_CONF4_FTLBSETS_SHIFT	(0) +#define MIPS_CONF4_FTLBSETS	(_ULCAST_(15) << MIPS_CONF4_FTLBSETS_SHIFT) +#define MIPS_CONF4_FTLBWAYS_SHIFT	(4) +#define MIPS_CONF4_FTLBWAYS	(_ULCAST_(15) << MIPS_CONF4_FTLBWAYS_SHIFT) +#define MIPS_CONF4_FTLBPAGESIZE_SHIFT	(8) +/* bits 10:8 in FTLB-only configurations */ +#define MIPS_CONF4_FTLBPAGESIZE (_ULCAST_(7) << MIPS_CONF4_FTLBPAGESIZE_SHIFT) +/* bits 12:8 in VTLB-FTLB only configurations */ +#define MIPS_CONF4_VFTLBPAGESIZE (_ULCAST_(31) << MIPS_CONF4_FTLBPAGESIZE_SHIFT)  #define MIPS_CONF4_MMUEXTDEF	(_ULCAST_(3) << 14)  #define MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT (_ULCAST_(1) << 14) +#define MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT	(_ULCAST_(2) << 14) +#define MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT	(_ULCAST_(3) << 14) +#define MIPS_CONF4_KSCREXIST	(_ULCAST_(255) << 16) +#define MIPS_CONF4_VTLBSIZEEXT_SHIFT	(24) +#define MIPS_CONF4_VTLBSIZEEXT	(_ULCAST_(15) << MIPS_CONF4_VTLBSIZEEXT_SHIFT) +#define MIPS_CONF4_AE		(_ULCAST_(1) << 28) +#define MIPS_CONF4_IE		(_ULCAST_(3) << 29) +#define MIPS_CONF4_TLBINV	(_ULCAST_(2) << 29) + +#define MIPS_CONF5_NF		(_ULCAST_(1) << 0) +#define MIPS_CONF5_UFR		(_ULCAST_(1) << 2) +#define MIPS_CONF5_MSAEN	(_ULCAST_(1) << 27) +#define MIPS_CONF5_EVA		(_ULCAST_(1) << 28) +#define MIPS_CONF5_CV		(_ULCAST_(1) << 29) +#define MIPS_CONF5_K		(_ULCAST_(1) << 30)  #define MIPS_CONF6_SYND		(_ULCAST_(1) << 13) +/* proAptiv FTLB on/off bit */ +#define MIPS_CONF6_FTLBEN	(_ULCAST_(1) << 15)  #define MIPS_CONF7_WII		(_ULCAST_(1) << 31)  #define MIPS_CONF7_RPS		(_ULCAST_(1) << 2) +#define MIPS_CONF7_IAR		(_ULCAST_(1) << 10) +#define MIPS_CONF7_AR		(_ULCAST_(1) << 16) + +/*  EntryHI bit definition */ +#define MIPS_ENTRYHI_EHINV	(_ULCAST_(1) << 10) + +/* CMGCRBase bit definitions */ +#define MIPS_CMGCRB_BASE	11 +#define MIPS_CMGCRF_BASE	(~_ULCAST_((1 << MIPS_CMGCRB_BASE) - 1))  /*   * Bits in the MIPS32/64 coprocessor 1 (FPU) revision register. @@ -621,14 +686,41 @@  #define MIPS_FPIR_L		(_ULCAST_(1) << 21)  #define MIPS_FPIR_F64		(_ULCAST_(1) << 22) +/* + * Bits in the MIPS32 Memory Segmentation registers. + */ +#define MIPS_SEGCFG_PA_SHIFT	9 +#define MIPS_SEGCFG_PA		(_ULCAST_(127) << MIPS_SEGCFG_PA_SHIFT) +#define MIPS_SEGCFG_AM_SHIFT	4 +#define MIPS_SEGCFG_AM		(_ULCAST_(7) << MIPS_SEGCFG_AM_SHIFT) +#define MIPS_SEGCFG_EU_SHIFT	3 +#define MIPS_SEGCFG_EU		(_ULCAST_(1) << MIPS_SEGCFG_EU_SHIFT) +#define MIPS_SEGCFG_C_SHIFT	0 +#define MIPS_SEGCFG_C		(_ULCAST_(7) << MIPS_SEGCFG_C_SHIFT) + +#define MIPS_SEGCFG_UUSK	_ULCAST_(7) +#define MIPS_SEGCFG_USK		_ULCAST_(5) +#define MIPS_SEGCFG_MUSUK	_ULCAST_(4) +#define MIPS_SEGCFG_MUSK	_ULCAST_(3) +#define MIPS_SEGCFG_MSK		_ULCAST_(2) +#define MIPS_SEGCFG_MK		_ULCAST_(1) +#define MIPS_SEGCFG_UK		_ULCAST_(0) +  #ifndef __ASSEMBLY__  /* - * Macros for handling the ISA mode bit for microMIPS. + * Macros for handling the ISA mode bit for MIPS16 and microMIPS.   */ +#if defined(CONFIG_SYS_SUPPORTS_MIPS16) || \ +    defined(CONFIG_SYS_SUPPORTS_MICROMIPS)  #define get_isa16_mode(x)		((x) & 0x1)  #define msk_isa16_mode(x)		((x) & ~0x1)  #define set_isa16_mode(x)		do { (x) |= 0x1; } while(0) +#else +#define get_isa16_mode(x)		0 +#define msk_isa16_mode(x)		(x) +#define set_isa16_mode(x)		do { } while(0) +#endif  /*   * microMIPS instructions can be 16-bit or 32-bit in length. This @@ -642,6 +734,19 @@ static inline int mm_insn_16bit(u16 insn)  }  /* + * TLB Invalidate Flush + */ +static inline void tlbinvf(void) +{ +	__asm__ __volatile__( +		".set push\n\t" +		".set noreorder\n\t" +		".word 0x42000004\n\t" /* tlbinvf */ +		".set pop"); +} + + +/*   * Functions to access the R10000 performance counters.	 These are basically   * mfc0 and mtc0 instructions from and to coprocessor register with a 5-bit   * performance counter number encoded into bits 1 ... 5 of the instruction. @@ -909,19 +1014,8 @@ do {									\  #define write_c0_compare3(val)	__write_32bit_c0_register($11, 7, val)  #define read_c0_status()	__read_32bit_c0_register($12, 0) -#ifdef CONFIG_MIPS_MT_SMTC -#define write_c0_status(val)						\ -do {									\ -	__write_32bit_c0_register($12, 0, val);				\ -	__ehb();							\ -} while (0) -#else -/* - * Legacy non-SMTC code, which may be hazardous - * but which might not support EHB - */ +  #define write_c0_status(val)	__write_32bit_c0_register($12, 0, val) -#endif /* CONFIG_MIPS_MT_SMTC */  #define read_c0_cause()		__read_32bit_c0_register($13, 0)  #define write_c0_cause(val)	__write_32bit_c0_register($13, 0, val) @@ -931,6 +1025,8 @@ do {									\  #define read_c0_prid()		__read_32bit_c0_register($15, 0) +#define read_c0_cmgcrbase()	__read_ulong_c0_register($15, 3) +  #define read_c0_config()	__read_32bit_c0_register($16, 0)  #define read_c0_config1()	__read_32bit_c0_register($16, 1)  #define read_c0_config2()	__read_32bit_c0_register($16, 2) @@ -1095,6 +1191,15 @@ do {									\  #define read_c0_ebase()		__read_32bit_c0_register($15, 1)  #define write_c0_ebase(val)	__write_32bit_c0_register($15, 1, val) +/* MIPSR3 */ +#define read_c0_segctl0()	__read_32bit_c0_register($5, 2) +#define write_c0_segctl0(val)	__write_32bit_c0_register($5, 2, val) + +#define read_c0_segctl1()	__read_32bit_c0_register($5, 3) +#define write_c0_segctl1(val)	__write_32bit_c0_register($5, 3, val) + +#define read_c0_segctl2()	__read_32bit_c0_register($5, 4) +#define write_c0_segctl2(val)	__write_32bit_c0_register($5, 4, val)  /* Cavium OCTEON (cnMIPS) */  #define read_c0_cvmcount()	__read_ulong_c0_register($9, 6) @@ -1634,11 +1739,6 @@ static inline void tlb_write_random(void)  /*   * Manipulate bits in a c0 register.   */ -#ifndef CONFIG_MIPS_MT_SMTC -/* - * SMTC Linux requires shutting-down microthread scheduling - * during CP0 register read-modify-write sequences. - */  #define __BUILD_SET_C0(name)					\  static inline unsigned int					\  set_c0_##name(unsigned int set)					\ @@ -1677,124 +1777,10 @@ change_c0_##name(unsigned int change, unsigned int val)		\  	return res;						\  } -#else /* SMTC versions that manage MT scheduling */ - -#include <linux/irqflags.h> - -/* - * This is a duplicate of dmt() in mipsmtregs.h to avoid problems with - * header file recursion. - */ -static inline unsigned int __dmt(void) -{ -	int res; - -	__asm__ __volatile__( -	"	.set	push						\n" -	"	.set	mips32r2					\n" -	"	.set	noat						\n" -	"	.word	0x41610BC1			# dmt $1	\n" -	"	ehb							\n" -	"	move	%0, $1						\n" -	"	.set	pop						\n" -	: "=r" (res)); - -	instruction_hazard(); - -	return res; -} - -#define __VPECONTROL_TE_SHIFT	15 -#define __VPECONTROL_TE		(1UL << __VPECONTROL_TE_SHIFT) - -#define __EMT_ENABLE		__VPECONTROL_TE - -static inline void __emt(unsigned int previous) -{ -	if ((previous & __EMT_ENABLE)) -		__asm__ __volatile__( -		"	.set	mips32r2				\n" -		"	.word	0x41600be1		# emt		\n" -		"	ehb						\n" -		"	.set	mips0					\n"); -} - -static inline void __ehb(void) -{ -	__asm__ __volatile__( -	"	.set	mips32r2					\n" -	"	ehb							\n"		"	.set	mips0						\n"); -} - -/* - * Note that local_irq_save/restore affect TC-specific IXMT state, - * not Status.IE as in non-SMTC kernel. - */ - -#define __BUILD_SET_C0(name)					\ -static inline unsigned int					\ -set_c0_##name(unsigned int set)					\ -{								\ -	unsigned int res;					\ -	unsigned int new;					\ -	unsigned int omt;					\ -	unsigned long flags;					\ -								\ -	local_irq_save(flags);					\ -	omt = __dmt();						\ -	res = read_c0_##name();					\ -	new = res | set;					\ -	write_c0_##name(new);					\ -	__emt(omt);						\ -	local_irq_restore(flags);				\ -								\ -	return res;						\ -}								\ -								\ -static inline unsigned int					\ -clear_c0_##name(unsigned int clear)				\ -{								\ -	unsigned int res;					\ -	unsigned int new;					\ -	unsigned int omt;					\ -	unsigned long flags;					\ -								\ -	local_irq_save(flags);					\ -	omt = __dmt();						\ -	res = read_c0_##name();					\ -	new = res & ~clear;					\ -	write_c0_##name(new);					\ -	__emt(omt);						\ -	local_irq_restore(flags);				\ -								\ -	return res;						\ -}								\ -								\ -static inline unsigned int					\ -change_c0_##name(unsigned int change, unsigned int newbits)	\ -{								\ -	unsigned int res;					\ -	unsigned int new;					\ -	unsigned int omt;					\ -	unsigned long flags;					\ -								\ -	local_irq_save(flags);					\ -								\ -	omt = __dmt();						\ -	res = read_c0_##name();					\ -	new = res & ~change;					\ -	new |= (newbits & change);				\ -	write_c0_##name(new);					\ -	__emt(omt);						\ -	local_irq_restore(flags);				\ -								\ -	return res;						\ -} -#endif -  __BUILD_SET_C0(status)  __BUILD_SET_C0(cause)  __BUILD_SET_C0(config) +__BUILD_SET_C0(config5)  __BUILD_SET_C0(intcontrol)  __BUILD_SET_C0(intctl)  __BUILD_SET_C0(srsmap) @@ -1806,6 +1792,15 @@ __BUILD_SET_C0(brcm_cmt_ctrl)  __BUILD_SET_C0(brcm_config)  __BUILD_SET_C0(brcm_mode) +/* + * Return low 10 bits of ebase. + * Note that under KVM (MIPSVZ) this returns vcpu id. + */ +static inline unsigned int get_ebase_cpunum(void) +{ +	return read_c0_ebase() & 0x3ff; +} +  #endif /* !__ASSEMBLY__ */  #endif /* _ASM_MIPSREGS_H */ diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 3b29079b542..2e373da5f8e 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -18,27 +18,27 @@  #include <asm/cacheflush.h>  #include <asm/hazards.h>  #include <asm/tlbflush.h> -#ifdef CONFIG_MIPS_MT_SMTC -#include <asm/mipsmtregs.h> -#include <asm/smtc.h> -#endif /* SMTC */  #include <asm-generic/mm_hooks.h> -#ifdef CONFIG_MIPS_PGD_C0_CONTEXT -  #define TLBMISS_HANDLER_SETUP_PGD(pgd)					\  do {									\  	extern void tlbmiss_handler_setup_pgd(unsigned long);		\  	tlbmiss_handler_setup_pgd((unsigned long)(pgd));		\  } while (0) +#ifdef CONFIG_MIPS_PGD_C0_CONTEXT + +#define TLBMISS_HANDLER_RESTORE()					\ +	write_c0_xcontext((unsigned long) smp_processor_id() <<		\ +			  SMP_CPUID_REGSHIFT) +  #define TLBMISS_HANDLER_SETUP()						\  	do {								\  		TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir);		\ -		write_c0_xcontext((unsigned long) smp_processor_id() << 51); \ +		TLBMISS_HANDLER_RESTORE();				\  	} while (0) -#else /* CONFIG_MIPS_PGD_C0_CONTEXT: using  pgd_current*/ +#else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using  pgd_current*/  /*   * For the fast tlb miss handlers, we keep a per cpu array of pointers @@ -47,21 +47,14 @@ do {									\   */  extern unsigned long pgd_current[]; -#define TLBMISS_HANDLER_SETUP_PGD(pgd) \ -	pgd_current[smp_processor_id()] = (unsigned long)(pgd) +#define TLBMISS_HANDLER_RESTORE()					\ +	write_c0_context((unsigned long) smp_processor_id() <<		\ +			 SMP_CPUID_REGSHIFT) -#ifdef CONFIG_32BIT  #define TLBMISS_HANDLER_SETUP()						\ -	write_c0_context((unsigned long) smp_processor_id() << 25);	\ +	TLBMISS_HANDLER_RESTORE();					\  	back_to_back_c0_hazard();					\  	TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) -#endif -#ifdef CONFIG_64BIT -#define TLBMISS_HANDLER_SETUP()						\ -	write_c0_context((unsigned long) smp_processor_id() << 26);	\ -	back_to_back_c0_hazard();					\ -	TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) -#endif  #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/  #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) @@ -73,13 +66,6 @@ extern unsigned long pgd_current[];  #define ASID_INC	0x10  #define ASID_MASK	0xff0 -#elif defined(CONFIG_MIPS_MT_SMTC) - -#define ASID_INC	0x1 -extern unsigned long smtc_asid_mask; -#define ASID_MASK	(smtc_asid_mask) -#define HW_ASID_MASK	0xff -/* End SMTC/34K debug hack */  #else /* FIXME: not correct for R6000 */  #define ASID_INC	0x1 @@ -102,7 +88,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)  #define ASID_VERSION_MASK  ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))  #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) -#ifndef CONFIG_MIPS_MT_SMTC  /* Normal, classic MIPS get_new_mmu_context */  static inline void  get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) @@ -125,12 +110,6 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)  	cpu_context(cpu, mm) = asid_cache(cpu) = asid;  } -#else /* CONFIG_MIPS_MT_SMTC */ - -#define get_new_mmu_context(mm, cpu) smtc_get_new_mmu_context((mm), (cpu)) - -#endif /* CONFIG_MIPS_MT_SMTC */ -  /*   * Initialize the context related info for a new mm_struct   * instance. @@ -151,46 +130,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,  {  	unsigned int cpu = smp_processor_id();  	unsigned long flags; -#ifdef CONFIG_MIPS_MT_SMTC -	unsigned long oldasid; -	unsigned long mtflags; -	int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; -	local_irq_save(flags); -	mtflags = dvpe(); -#else /* Not SMTC */  	local_irq_save(flags); -#endif /* CONFIG_MIPS_MT_SMTC */  	/* Check if our ASID is of an older version and thus invalid */  	if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)  		get_new_mmu_context(next, cpu); -#ifdef CONFIG_MIPS_MT_SMTC -	/* -	 * If the EntryHi ASID being replaced happens to be -	 * the value flagged at ASID recycling time as having -	 * an extended life, clear the bit showing it being -	 * in use by this "CPU", and if that's the last bit, -	 * free up the ASID value for use and flush any old -	 * instances of it from the TLB. -	 */ -	oldasid = (read_c0_entryhi() & ASID_MASK); -	if(smtc_live_asid[mytlb][oldasid]) { -		smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); -		if(smtc_live_asid[mytlb][oldasid] == 0) -			smtc_flush_tlb_asid(oldasid); -	} -	/* -	 * Tread softly on EntryHi, and so long as we support -	 * having ASID_MASK smaller than the hardware maximum, -	 * make sure no "soft" bits become "hard"... -	 */ -	write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | -			 cpu_asid(cpu, next)); -	ehb(); /* Make sure it propagates to TCStatus */ -	evpe(mtflags); -#else  	write_c0_entryhi(cpu_asid(cpu, next)); -#endif /* CONFIG_MIPS_MT_SMTC */  	TLBMISS_HANDLER_SETUP_PGD(next->pgd);  	/* @@ -223,34 +168,12 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)  	unsigned long flags;  	unsigned int cpu = smp_processor_id(); -#ifdef CONFIG_MIPS_MT_SMTC -	unsigned long oldasid; -	unsigned long mtflags; -	int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; -#endif /* CONFIG_MIPS_MT_SMTC */ -  	local_irq_save(flags);  	/* Unconditionally get a new ASID.  */  	get_new_mmu_context(next, cpu); -#ifdef CONFIG_MIPS_MT_SMTC -	/* See comments for similar code above */ -	mtflags = dvpe(); -	oldasid = read_c0_entryhi() & ASID_MASK; -	if(smtc_live_asid[mytlb][oldasid]) { -		smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); -		if(smtc_live_asid[mytlb][oldasid] == 0) -			 smtc_flush_tlb_asid(oldasid); -	} -	/* See comments for similar code above */ -	write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | -			 cpu_asid(cpu, next)); -	ehb(); /* Make sure it propagates to TCStatus */ -	evpe(mtflags); -#else  	write_c0_entryhi(cpu_asid(cpu, next)); -#endif /* CONFIG_MIPS_MT_SMTC */  	TLBMISS_HANDLER_SETUP_PGD(next->pgd);  	/* mark mmu ownership change */ @@ -268,48 +191,15 @@ static inline void  drop_mmu_context(struct mm_struct *mm, unsigned cpu)  {  	unsigned long flags; -#ifdef CONFIG_MIPS_MT_SMTC -	unsigned long oldasid; -	/* Can't use spinlock because called from TLB flush within DVPE */ -	unsigned int prevvpe; -	int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; -#endif /* CONFIG_MIPS_MT_SMTC */  	local_irq_save(flags);  	if (cpumask_test_cpu(cpu, mm_cpumask(mm)))  {  		get_new_mmu_context(mm, cpu); -#ifdef CONFIG_MIPS_MT_SMTC -		/* See comments for similar code above */ -		prevvpe = dvpe(); -		oldasid = (read_c0_entryhi() & ASID_MASK); -		if (smtc_live_asid[mytlb][oldasid]) { -			smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); -			if(smtc_live_asid[mytlb][oldasid] == 0) -				smtc_flush_tlb_asid(oldasid); -		} -		/* See comments for similar code above */ -		write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) -				| cpu_asid(cpu, mm)); -		ehb(); /* Make sure it propagates to TCStatus */ -		evpe(prevvpe); -#else /* not CONFIG_MIPS_MT_SMTC */  		write_c0_entryhi(cpu_asid(cpu, mm)); -#endif /* CONFIG_MIPS_MT_SMTC */  	} else {  		/* will get a new context next time */ -#ifndef CONFIG_MIPS_MT_SMTC  		cpu_context(cpu, mm) = 0; -#else /* SMTC */ -		int i; - -		/* SMTC shares the TLB (and ASIDs) across VPEs */ -		for_each_online_cpu(i) { -		    if((smtc_status & SMTC_TLB_SHARED) -		    || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) -			cpu_context(i, mm) = 0; -		} -#endif /* CONFIG_MIPS_MT_SMTC */  	}  	local_irq_restore(flags);  } diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h index 44b705d0826..800fe578dc9 100644 --- a/arch/mips/include/asm/module.h +++ b/arch/mips/include/asm/module.h @@ -126,6 +126,8 @@ search_module_dbetables(unsigned long addr)  #define MODULE_PROC_FAMILY "LOONGSON1 "  #elif defined CONFIG_CPU_LOONGSON2  #define MODULE_PROC_FAMILY "LOONGSON2 " +#elif defined CONFIG_CPU_LOONGSON3 +#define MODULE_PROC_FAMILY "LOONGSON3 "  #elif defined CONFIG_CPU_CAVIUM_OCTEON  #define MODULE_PROC_FAMILY "OCTEON "  #elif defined CONFIG_CPU_XLR @@ -142,13 +144,7 @@ search_module_dbetables(unsigned long addr)  #define MODULE_KERNEL_TYPE "64BIT "  #endif -#ifdef CONFIG_MIPS_MT_SMTC -#define MODULE_KERNEL_SMTC "MT_SMTC " -#else -#define MODULE_KERNEL_SMTC "" -#endif -  #define MODULE_ARCH_VERMAGIC \ -	MODULE_PROC_FAMILY MODULE_KERNEL_TYPE MODULE_KERNEL_SMTC +	MODULE_PROC_FAMILY MODULE_KERNEL_TYPE  #endif /* _ASM_MODULE_H */ diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h new file mode 100644 index 00000000000..538f6d482db --- /dev/null +++ b/arch/mips/include/asm/msa.h @@ -0,0 +1,212 @@ +/* + * Copyright (C) 2013 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ +#ifndef _ASM_MSA_H +#define _ASM_MSA_H + +#include <asm/mipsregs.h> + +extern void _save_msa(struct task_struct *); +extern void _restore_msa(struct task_struct *); + +static inline void enable_msa(void) +{ +	if (cpu_has_msa) { +		set_c0_config5(MIPS_CONF5_MSAEN); +		enable_fpu_hazard(); +	} +} + +static inline void disable_msa(void) +{ +	if (cpu_has_msa) { +		clear_c0_config5(MIPS_CONF5_MSAEN); +		disable_fpu_hazard(); +	} +} + +static inline int is_msa_enabled(void) +{ +	if (!cpu_has_msa) +		return 0; + +	return read_c0_config5() & MIPS_CONF5_MSAEN; +} + +static inline int thread_msa_context_live(void) +{ +	/* +	 * Check cpu_has_msa only if it's a constant. This will allow the +	 * compiler to optimise out code for CPUs without MSA without adding +	 * an extra redundant check for CPUs with MSA. +	 */ +	if (__builtin_constant_p(cpu_has_msa) && !cpu_has_msa) +		return 0; + +	return test_thread_flag(TIF_MSA_CTX_LIVE); +} + +static inline void save_msa(struct task_struct *t) +{ +	if (cpu_has_msa) +		_save_msa(t); +} + +static inline void restore_msa(struct task_struct *t) +{ +	if (cpu_has_msa) +		_restore_msa(t); +} + +#ifdef TOOLCHAIN_SUPPORTS_MSA + +#define __BUILD_MSA_CTL_REG(name, cs)				\ +static inline unsigned int read_msa_##name(void)		\ +{								\ +	unsigned int reg;					\ +	__asm__ __volatile__(					\ +	"	.set	push\n"					\ +	"	.set	msa\n"					\ +	"	cfcmsa	%0, $" #cs "\n"				\ +	"	.set	pop\n"					\ +	: "=r"(reg));						\ +	return reg;						\ +}								\ +								\ +static inline void write_msa_##name(unsigned int val)		\ +{								\ +	__asm__ __volatile__(					\ +	"	.set	push\n"					\ +	"	.set	msa\n"					\ +	"	ctcmsa	$" #cs ", %0\n"				\ +	"	.set	pop\n"					\ +	: : "r"(val));						\ +} + +#else /* !TOOLCHAIN_SUPPORTS_MSA */ + +/* + * Define functions using .word for the c[ft]cmsa instructions in order to + * allow compilation with toolchains that do not support MSA. Once all + * toolchains in use support MSA these can be removed. + */ +#ifdef CONFIG_CPU_MICROMIPS +#define CFC_MSA_INSN	0x587e0056 +#define CTC_MSA_INSN	0x583e0816 +#else +#define CFC_MSA_INSN	0x787e0059 +#define CTC_MSA_INSN	0x783e0819 +#endif + +#define __BUILD_MSA_CTL_REG(name, cs)				\ +static inline unsigned int read_msa_##name(void)		\ +{								\ +	unsigned int reg;					\ +	__asm__ __volatile__(					\ +	"	.set	push\n"					\ +	"	.set	noat\n"					\ +	"	.insn\n"					\ +	"	.word	#CFC_MSA_INSN | (" #cs " << 11)\n"	\ +	"	move	%0, $1\n"				\ +	"	.set	pop\n"					\ +	: "=r"(reg));						\ +	return reg;						\ +}								\ +								\ +static inline void write_msa_##name(unsigned int val)		\ +{								\ +	__asm__ __volatile__(					\ +	"	.set	push\n"					\ +	"	.set	noat\n"					\ +	"	move	$1, %0\n"				\ +	"	.insn\n"					\ +	"	.word	#CTC_MSA_INSN | (" #cs " << 6)\n"	\ +	"	.set	pop\n"					\ +	: : "r"(val));						\ +} + +#endif /* !TOOLCHAIN_SUPPORTS_MSA */ + +#define MSA_IR		0 +#define MSA_CSR		1 +#define MSA_ACCESS	2 +#define MSA_SAVE	3 +#define MSA_MODIFY	4 +#define MSA_REQUEST	5 +#define MSA_MAP		6 +#define MSA_UNMAP	7 + +__BUILD_MSA_CTL_REG(ir, 0) +__BUILD_MSA_CTL_REG(csr, 1) +__BUILD_MSA_CTL_REG(access, 2) +__BUILD_MSA_CTL_REG(save, 3) +__BUILD_MSA_CTL_REG(modify, 4) +__BUILD_MSA_CTL_REG(request, 5) +__BUILD_MSA_CTL_REG(map, 6) +__BUILD_MSA_CTL_REG(unmap, 7) + +/* MSA Implementation Register (MSAIR) */ +#define MSA_IR_REVB		0 +#define MSA_IR_REVF		(_ULCAST_(0xff) << MSA_IR_REVB) +#define MSA_IR_PROCB		8 +#define MSA_IR_PROCF		(_ULCAST_(0xff) << MSA_IR_PROCB) +#define MSA_IR_WRPB		16 +#define MSA_IR_WRPF		(_ULCAST_(0x1) << MSA_IR_WRPB) + +/* MSA Control & Status Register (MSACSR) */ +#define MSA_CSR_RMB		0 +#define MSA_CSR_RMF		(_ULCAST_(0x3) << MSA_CSR_RMB) +#define MSA_CSR_RM_NEAREST	0 +#define MSA_CSR_RM_TO_ZERO	1 +#define MSA_CSR_RM_TO_POS	2 +#define MSA_CSR_RM_TO_NEG	3 +#define MSA_CSR_FLAGSB		2 +#define MSA_CSR_FLAGSF		(_ULCAST_(0x1f) << MSA_CSR_FLAGSB) +#define MSA_CSR_FLAGS_IB	2 +#define MSA_CSR_FLAGS_IF	(_ULCAST_(0x1) << MSA_CSR_FLAGS_IB) +#define MSA_CSR_FLAGS_UB	3 +#define MSA_CSR_FLAGS_UF	(_ULCAST_(0x1) << MSA_CSR_FLAGS_UB) +#define MSA_CSR_FLAGS_OB	4 +#define MSA_CSR_FLAGS_OF	(_ULCAST_(0x1) << MSA_CSR_FLAGS_OB) +#define MSA_CSR_FLAGS_ZB	5 +#define MSA_CSR_FLAGS_ZF	(_ULCAST_(0x1) << MSA_CSR_FLAGS_ZB) +#define MSA_CSR_FLAGS_VB	6 +#define MSA_CSR_FLAGS_VF	(_ULCAST_(0x1) << MSA_CSR_FLAGS_VB) +#define MSA_CSR_ENABLESB	7 +#define MSA_CSR_ENABLESF	(_ULCAST_(0x1f) << MSA_CSR_ENABLESB) +#define MSA_CSR_ENABLES_IB	7 +#define MSA_CSR_ENABLES_IF	(_ULCAST_(0x1) << MSA_CSR_ENABLES_IB) +#define MSA_CSR_ENABLES_UB	8 +#define MSA_CSR_ENABLES_UF	(_ULCAST_(0x1) << MSA_CSR_ENABLES_UB) +#define MSA_CSR_ENABLES_OB	9 +#define MSA_CSR_ENABLES_OF	(_ULCAST_(0x1) << MSA_CSR_ENABLES_OB) +#define MSA_CSR_ENABLES_ZB	10 +#define MSA_CSR_ENABLES_ZF	(_ULCAST_(0x1) << MSA_CSR_ENABLES_ZB) +#define MSA_CSR_ENABLES_VB	11 +#define MSA_CSR_ENABLES_VF	(_ULCAST_(0x1) << MSA_CSR_ENABLES_VB) +#define MSA_CSR_CAUSEB		12 +#define MSA_CSR_CAUSEF		(_ULCAST_(0x3f) << MSA_CSR_CAUSEB) +#define MSA_CSR_CAUSE_IB	12 +#define MSA_CSR_CAUSE_IF	(_ULCAST_(0x1) << MSA_CSR_CAUSE_IB) +#define MSA_CSR_CAUSE_UB	13 +#define MSA_CSR_CAUSE_UF	(_ULCAST_(0x1) << MSA_CSR_CAUSE_UB) +#define MSA_CSR_CAUSE_OB	14 +#define MSA_CSR_CAUSE_OF	(_ULCAST_(0x1) << MSA_CSR_CAUSE_OB) +#define MSA_CSR_CAUSE_ZB	15 +#define MSA_CSR_CAUSE_ZF	(_ULCAST_(0x1) << MSA_CSR_CAUSE_ZB) +#define MSA_CSR_CAUSE_VB	16 +#define MSA_CSR_CAUSE_VF	(_ULCAST_(0x1) << MSA_CSR_CAUSE_VB) +#define MSA_CSR_CAUSE_EB	17 +#define MSA_CSR_CAUSE_EF	(_ULCAST_(0x1) << MSA_CSR_CAUSE_EB) +#define MSA_CSR_NXB		18 +#define MSA_CSR_NXF		(_ULCAST_(0x1) << MSA_CSR_NXB) +#define MSA_CSR_FSB		24 +#define MSA_CSR_FSF		(_ULCAST_(0x1) << MSA_CSR_FSB) + +#endif /* _ASM_MSA_H */ diff --git a/arch/mips/include/asm/netlogic/common.h b/arch/mips/include/asm/netlogic/common.h index bb68c3398c8..c281f03eb31 100644 --- a/arch/mips/include/asm/netlogic/common.h +++ b/arch/mips/include/asm/netlogic/common.h @@ -84,7 +84,6 @@ nlm_set_nmi_handler(void *handler)   */  void nlm_init_boot_cpu(void);  unsigned int nlm_get_cpu_frequency(void); -void nlm_node_init(int node);  extern struct plat_smp_ops nlm_smp_ops;  extern char nlm_reset_entry[], nlm_reset_entry_end[]; @@ -94,26 +93,16 @@ extern struct dma_map_ops nlm_swiotlb_dma_ops;  extern unsigned int nlm_threads_per_core;  extern cpumask_t nlm_cpumask; -struct nlm_soc_info { -	unsigned long coremask; /* cores enabled on the soc */ -	unsigned long ebase; -	uint64_t irqmask; -	uint64_t sysbase;	/* only for XLP */ -	uint64_t picbase; -	spinlock_t piclock; -}; - -#define nlm_get_node(i)		(&nlm_nodes[i]) -#ifdef CONFIG_CPU_XLR -#define nlm_current_node()	(&nlm_nodes[0]) -#else -#define nlm_current_node()	(&nlm_nodes[nlm_nodeid()]) -#endif -  struct irq_data;  uint64_t nlm_pci_irqmask(int node); +void nlm_setup_pic_irq(int node, int picirq, int irq, int irt);  void nlm_set_pic_extra_ack(int node, int irq,  void (*xack)(struct irq_data *)); +#ifdef CONFIG_PCI_MSI +void nlm_dispatch_msi(int node, int lirq); +void nlm_dispatch_msix(int node, int msixirq); +#endif +  /*   * The NR_IRQs is divided between nodes, each of them has a separate irq space   */ @@ -122,7 +111,6 @@ static inline int nlm_irq_to_xirq(int node, int irq)  	return node * NR_IRQS / NLM_NR_NODES + irq;  } -extern struct nlm_soc_info nlm_nodes[NLM_NR_NODES];  extern int nlm_cpu_ready[];  #endif  #endif /* _NETLOGIC_COMMON_H_ */ diff --git a/arch/mips/include/asm/netlogic/mips-extns.h b/arch/mips/include/asm/netlogic/mips-extns.h index f299d31d7c1..06f1f75bfa9 100644 --- a/arch/mips/include/asm/netlogic/mips-extns.h +++ b/arch/mips/include/asm/netlogic/mips-extns.h @@ -146,7 +146,13 @@ static inline int hard_smp_processor_id(void)  static inline int nlm_nodeid(void)  { -	return (__read_32bit_c0_register($15, 1) >> 5) & 0x3; +	uint32_t prid = read_c0_prid() & PRID_IMP_MASK; + +	if ((prid == PRID_IMP_NETLOGIC_XLP9XX) || +			(prid == PRID_IMP_NETLOGIC_XLP5XX)) +		return (__read_32bit_c0_register($15, 1) >> 7) & 0x7; +	else +		return (__read_32bit_c0_register($15, 1) >> 5) & 0x3;  }  static inline unsigned int nlm_core_id(void) diff --git a/arch/mips/include/asm/netlogic/xlp-hal/bridge.h b/arch/mips/include/asm/netlogic/xlp-hal/bridge.h index 4e8eacb9588..3067f983495 100644 --- a/arch/mips/include/asm/netlogic/xlp-hal/bridge.h +++ b/arch/mips/include/asm/netlogic/xlp-hal/bridge.h @@ -69,44 +69,9 @@  #define BRIDGE_FLASH_LIMIT3		0x13  #define BRIDGE_DRAM_BAR(i)		(0x14 + (i)) -#define BRIDGE_DRAM_BAR0		0x14 -#define BRIDGE_DRAM_BAR1		0x15 -#define BRIDGE_DRAM_BAR2		0x16 -#define BRIDGE_DRAM_BAR3		0x17 -#define BRIDGE_DRAM_BAR4		0x18 -#define BRIDGE_DRAM_BAR5		0x19 -#define BRIDGE_DRAM_BAR6		0x1a -#define BRIDGE_DRAM_BAR7		0x1b -  #define BRIDGE_DRAM_LIMIT(i)		(0x1c + (i)) -#define BRIDGE_DRAM_LIMIT0		0x1c -#define BRIDGE_DRAM_LIMIT1		0x1d -#define BRIDGE_DRAM_LIMIT2		0x1e -#define BRIDGE_DRAM_LIMIT3		0x1f -#define BRIDGE_DRAM_LIMIT4		0x20 -#define BRIDGE_DRAM_LIMIT5		0x21 -#define BRIDGE_DRAM_LIMIT6		0x22 -#define BRIDGE_DRAM_LIMIT7		0x23 -  #define BRIDGE_DRAM_NODE_TRANSLN(i)	(0x24 + (i)) -#define BRIDGE_DRAM_NODE_TRANSLN0	0x24 -#define BRIDGE_DRAM_NODE_TRANSLN1	0x25 -#define BRIDGE_DRAM_NODE_TRANSLN2	0x26 -#define BRIDGE_DRAM_NODE_TRANSLN3	0x27 -#define BRIDGE_DRAM_NODE_TRANSLN4	0x28 -#define BRIDGE_DRAM_NODE_TRANSLN5	0x29 -#define BRIDGE_DRAM_NODE_TRANSLN6	0x2a -#define BRIDGE_DRAM_NODE_TRANSLN7	0x2b -  #define BRIDGE_DRAM_CHNL_TRANSLN(i)	(0x2c + (i)) -#define BRIDGE_DRAM_CHNL_TRANSLN0	0x2c -#define BRIDGE_DRAM_CHNL_TRANSLN1	0x2d -#define BRIDGE_DRAM_CHNL_TRANSLN2	0x2e -#define BRIDGE_DRAM_CHNL_TRANSLN3	0x2f -#define BRIDGE_DRAM_CHNL_TRANSLN4	0x30 -#define BRIDGE_DRAM_CHNL_TRANSLN5	0x31 -#define BRIDGE_DRAM_CHNL_TRANSLN6	0x32 -#define BRIDGE_DRAM_CHNL_TRANSLN7	0x33  #define BRIDGE_PCIEMEM_BASE0		0x34  #define BRIDGE_PCIEMEM_BASE1		0x35 @@ -178,12 +143,42 @@  #define BRIDGE_GIO_WEIGHT		0x2cb  #define BRIDGE_FLASH_WEIGHT		0x2cc +/* FIXME verify */ +#define BRIDGE_9XX_FLASH_BAR(i)		(0x11 + (i)) +#define BRIDGE_9XX_FLASH_BAR_LIMIT(i)	(0x15 + (i)) + +#define BRIDGE_9XX_DRAM_BAR(i)		(0x19 + (i)) +#define BRIDGE_9XX_DRAM_LIMIT(i)	(0x29 + (i)) +#define BRIDGE_9XX_DRAM_NODE_TRANSLN(i)	(0x39 + (i)) +#define BRIDGE_9XX_DRAM_CHNL_TRANSLN(i)	(0x49 + (i)) + +#define BRIDGE_9XX_ADDRESS_ERROR0	0x9d +#define BRIDGE_9XX_ADDRESS_ERROR1	0x9e +#define BRIDGE_9XX_ADDRESS_ERROR2	0x9f + +#define BRIDGE_9XX_PCIEMEM_BASE0	0x59 +#define BRIDGE_9XX_PCIEMEM_BASE1	0x5a +#define BRIDGE_9XX_PCIEMEM_BASE2	0x5b +#define BRIDGE_9XX_PCIEMEM_BASE3	0x5c +#define BRIDGE_9XX_PCIEMEM_LIMIT0	0x5d +#define BRIDGE_9XX_PCIEMEM_LIMIT1	0x5e +#define BRIDGE_9XX_PCIEMEM_LIMIT2	0x5f +#define BRIDGE_9XX_PCIEMEM_LIMIT3	0x60 +#define BRIDGE_9XX_PCIEIO_BASE0		0x61 +#define BRIDGE_9XX_PCIEIO_BASE1		0x62 +#define BRIDGE_9XX_PCIEIO_BASE2		0x63 +#define BRIDGE_9XX_PCIEIO_BASE3		0x64 +#define BRIDGE_9XX_PCIEIO_LIMIT0	0x65 +#define BRIDGE_9XX_PCIEIO_LIMIT1	0x66 +#define BRIDGE_9XX_PCIEIO_LIMIT2	0x67 +#define BRIDGE_9XX_PCIEIO_LIMIT3	0x68 +  #ifndef __ASSEMBLY__  #define nlm_read_bridge_reg(b, r)	nlm_read_reg(b, r)  #define nlm_write_bridge_reg(b, r, v)	nlm_write_reg(b, r, v) -#define nlm_get_bridge_pcibase(node)	\ -			nlm_pcicfg_base(XLP_IO_BRIDGE_OFFSET(node)) +#define nlm_get_bridge_pcibase(node)	nlm_pcicfg_base(cpu_is_xlp9xx() ? \ +		XLP9XX_IO_BRIDGE_OFFSET(node) : XLP_IO_BRIDGE_OFFSET(node))  #define nlm_get_bridge_regbase(node)	\  			(nlm_get_bridge_pcibase(node) + XLP_IO_PCI_HDRSZ) diff --git a/arch/mips/include/asm/netlogic/xlp-hal/iomap.h b/arch/mips/include/asm/netlogic/xlp-hal/iomap.h index 55eee77adac..805bfd21f33 100644 --- a/arch/mips/include/asm/netlogic/xlp-hal/iomap.h +++ b/arch/mips/include/asm/netlogic/xlp-hal/iomap.h @@ -48,8 +48,10 @@  #define XLP_IO_SIZE			(64 << 20)	/* ECFG space size */  #define XLP_IO_PCI_HDRSZ		0x100  #define XLP_IO_DEV(node, dev)		((dev) + (node) * 8) -#define XLP_HDR_OFFSET(node, bus, dev, fn)	(((bus) << 20) | \ -				((XLP_IO_DEV(node, dev)) << 15) | ((fn) << 12)) +#define XLP_IO_PCI_OFFSET(b, d, f)	(((b) << 20) | ((d) << 15) | ((f) << 12)) + +#define XLP_HDR_OFFSET(node, bus, dev, fn) \ +		XLP_IO_PCI_OFFSET(bus, XLP_IO_DEV(node, dev), fn)  #define XLP_IO_BRIDGE_OFFSET(node)	XLP_HDR_OFFSET(node, 0, 0, 0)  /* coherent inter chip */ @@ -72,6 +74,8 @@  #define XLP_IO_USB_OHCI2_OFFSET(node)	XLP_HDR_OFFSET(node, 0, 2, 4)  #define XLP_IO_USB_OHCI3_OFFSET(node)	XLP_HDR_OFFSET(node, 0, 2, 5) +#define XLP_IO_SATA_OFFSET(node)	XLP_HDR_OFFSET(node, 0, 3, 2) +  /* XLP2xx has an updated USB block */  #define XLP2XX_IO_USB_OFFSET(node, i)	XLP_HDR_OFFSET(node, 0, 4, i)  #define XLP2XX_IO_USB_XHCI0_OFFSET(node)	XLP_HDR_OFFSET(node, 0, 4, 1) @@ -101,13 +105,43 @@  #define XLP_IO_SYS_OFFSET(node)		XLP_HDR_OFFSET(node, 0, 6, 5)  #define XLP_IO_JTAG_OFFSET(node)	XLP_HDR_OFFSET(node, 0, 6, 6) +/* Flash */  #define XLP_IO_NOR_OFFSET(node)		XLP_HDR_OFFSET(node, 0, 7, 0)  #define XLP_IO_NAND_OFFSET(node)	XLP_HDR_OFFSET(node, 0, 7, 1)  #define XLP_IO_SPI_OFFSET(node)		XLP_HDR_OFFSET(node, 0, 7, 2) -/* SD flash */ -#define XLP_IO_SD_OFFSET(node)		XLP_HDR_OFFSET(node, 0, 7, 3) -#define XLP_IO_MMC_OFFSET(node, slot)	\ -		((XLP_IO_SD_OFFSET(node))+(slot*0x100)+XLP_IO_PCI_HDRSZ) +#define XLP_IO_MMC_OFFSET(node)		XLP_HDR_OFFSET(node, 0, 7, 3) + +/* Things have changed drastically in XLP 9XX */ +#define XLP9XX_HDR_OFFSET(n, d, f)	\ +			XLP_IO_PCI_OFFSET(xlp9xx_get_socbus(n), d, f) + +#define XLP9XX_IO_BRIDGE_OFFSET(node)	XLP_IO_PCI_OFFSET(0, 0, node) +#define XLP9XX_IO_PIC_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 2, 0) +#define XLP9XX_IO_UART_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 2, 2) +#define XLP9XX_IO_SYS_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 6, 0) +#define XLP9XX_IO_FUSE_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 6, 1) +#define XLP9XX_IO_CLOCK_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 6, 2) +#define XLP9XX_IO_POWER_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 6, 3) +#define XLP9XX_IO_JTAG_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 6, 4) + +#define XLP9XX_IO_PCIE_OFFSET(node, i)	XLP9XX_HDR_OFFSET(node, 1, i) +#define XLP9XX_IO_PCIE0_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 1, 0) +#define XLP9XX_IO_PCIE2_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 1, 2) +#define XLP9XX_IO_PCIE3_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 1, 3) + +/* XLP9xx USB block */ +#define XLP9XX_IO_USB_OFFSET(node, i)		XLP9XX_HDR_OFFSET(node, 4, i) +#define XLP9XX_IO_USB_XHCI0_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 4, 1) +#define XLP9XX_IO_USB_XHCI1_OFFSET(node)	XLP9XX_HDR_OFFSET(node, 4, 2) + +/* XLP9XX on-chip SATA controller */ +#define XLP9XX_IO_SATA_OFFSET(node)		XLP9XX_HDR_OFFSET(node, 3, 2) + +/* Flash */ +#define XLP9XX_IO_NOR_OFFSET(node)		XLP9XX_HDR_OFFSET(node, 7, 0) +#define XLP9XX_IO_NAND_OFFSET(node)		XLP9XX_HDR_OFFSET(node, 7, 1) +#define XLP9XX_IO_SPI_OFFSET(node)		XLP9XX_HDR_OFFSET(node, 7, 2) +#define XLP9XX_IO_MMC_OFFSET(node)		XLP9XX_HDR_OFFSET(node, 7, 3)  /* PCI config header register id's */  #define XLP_PCI_CFGREG0			0x00 @@ -154,13 +188,27 @@  #define PCI_DEVICE_ID_NLM_NOR		0x1015  #define PCI_DEVICE_ID_NLM_NAND		0x1016  #define PCI_DEVICE_ID_NLM_MMC		0x1018 -#define PCI_DEVICE_ID_NLM_XHCI		0x101d +#define PCI_DEVICE_ID_NLM_SATA		0x101A +#define PCI_DEVICE_ID_NLM_XHCI		0x101D + +#define PCI_DEVICE_ID_XLP9XX_MMC	0x9018 +#define PCI_DEVICE_ID_XLP9XX_SATA	0x901A +#define PCI_DEVICE_ID_XLP9XX_XHCI	0x901D  #ifndef __ASSEMBLY__  #define nlm_read_pci_reg(b, r)		nlm_read_reg(b, r)  #define nlm_write_pci_reg(b, r, v)	nlm_write_reg(b, r, v) +static inline int xlp9xx_get_socbus(int node) +{ +	uint64_t socbridge; + +	if (node == 0) +		return 1; +	socbridge = nlm_pcicfg_base(XLP9XX_IO_BRIDGE_OFFSET(node)); +	return (nlm_read_pci_reg(socbridge, 0x6) >> 8) & 0xff; +}  #endif /* !__ASSEMBLY */  #endif /* __NLM_HAL_IOMAP_H__ */ diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h b/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h index b559cb9f56e..91540f41e1e 100644 --- a/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h +++ b/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h @@ -52,25 +52,62 @@  #define PCIE_BYTE_SWAP_MEM_LIM		0x248  #define PCIE_BYTE_SWAP_IO_BASE		0x249  #define PCIE_BYTE_SWAP_IO_LIM		0x24A + +#define PCIE_BRIDGE_MSIX_ADDR_BASE	0x24F +#define PCIE_BRIDGE_MSIX_ADDR_LIMIT	0x250  #define PCIE_MSI_STATUS			0x25A  #define PCIE_MSI_EN			0x25B +#define PCIE_MSIX_STATUS		0x25D +#define PCIE_INT_STATUS0		0x25F +#define PCIE_INT_STATUS1		0x260  #define PCIE_INT_EN0			0x261 +#define PCIE_INT_EN1			0x262 + +/* XLP9XX has basic changes */ +#define PCIE_9XX_BYTE_SWAP_MEM_BASE	0x25c +#define PCIE_9XX_BYTE_SWAP_MEM_LIM	0x25d +#define PCIE_9XX_BYTE_SWAP_IO_BASE	0x25e +#define PCIE_9XX_BYTE_SWAP_IO_LIM	0x25f -/* PCIE_MSI_EN */ -#define PCIE_MSI_VECTOR_INT_EN		0xFFFFFFFF +#define PCIE_9XX_BRIDGE_MSIX_ADDR_BASE	0x264 +#define PCIE_9XX_BRIDGE_MSIX_ADDR_LIMIT	0x265 +#define PCIE_9XX_MSI_STATUS		0x283 +#define PCIE_9XX_MSI_EN			0x284 +/* 128 MSIX vectors available in 9xx */ +#define PCIE_9XX_MSIX_STATUS0		0x286 +#define PCIE_9XX_MSIX_STATUSX(n)	(n + 0x286) +#define PCIE_9XX_MSIX_VEC		0x296 +#define PCIE_9XX_MSIX_VECX(n)		(n + 0x296) +#define PCIE_9XX_INT_STATUS0		0x397 +#define PCIE_9XX_INT_STATUS1		0x398 +#define PCIE_9XX_INT_EN0		0x399 +#define PCIE_9XX_INT_EN1		0x39a -/* PCIE_INT_EN0 */ -#define PCIE_MSI_INT_EN			(1 << 9) +/* other */ +#define PCIE_NLINKS			4 +/* MSI addresses */ +#define MSI_ADDR_BASE			0xfffee00000ULL +#define MSI_ADDR_SZ			0x10000 +#define MSI_LINK_ADDR(n, l)		(MSI_ADDR_BASE + \ +				(PCIE_NLINKS * (n) + (l)) * MSI_ADDR_SZ) +#define MSIX_ADDR_BASE			0xfffef00000ULL +#define MSIX_LINK_ADDR(n, l)		(MSIX_ADDR_BASE + \ +				(PCIE_NLINKS * (n) + (l)) * MSI_ADDR_SZ)  #ifndef __ASSEMBLY__  #define nlm_read_pcie_reg(b, r)		nlm_read_reg(b, r)  #define nlm_write_pcie_reg(b, r, v)	nlm_write_reg(b, r, v) -#define nlm_get_pcie_base(node, inst)	\ -			nlm_pcicfg_base(XLP_IO_PCIE_OFFSET(node, inst)) -#define nlm_get_pcie_regbase(node, inst)	\ -			(nlm_get_pcie_base(node, inst) + XLP_IO_PCI_HDRSZ) +#define nlm_get_pcie_base(node, inst)	nlm_pcicfg_base(cpu_is_xlp9xx() ? \ +	XLP9XX_IO_PCIE_OFFSET(node, inst) : XLP_IO_PCIE_OFFSET(node, inst)) + +#ifdef CONFIG_PCI_MSI +void xlp_init_node_msi_irqs(int node, int link); +#else +static inline void xlp_init_node_msi_irqs(int node, int link) {} +#endif + +struct pci_dev *xlp_get_pcie_link(const struct pci_dev *dev); -int xlp_pcie_link_irt(int link);  #endif  #endif /* __NLM_HAL_PCIBUS_H__ */ diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pic.h b/arch/mips/include/asm/netlogic/xlp-hal/pic.h index 105389b79f0..41cefe94f0c 100644 --- a/arch/mips/include/asm/netlogic/xlp-hal/pic.h +++ b/arch/mips/include/asm/netlogic/xlp-hal/pic.h @@ -150,12 +150,19 @@  #define PIC_IRT0		0x74  #define PIC_IRT(i)		(PIC_IRT0 + ((i) * 2)) -#define TIMER_CYCLES_MAXVAL	0xffffffffffffffffULL +#define PIC_9XX_PENDING_0	0x6 +#define PIC_9XX_PENDING_1	0x8 +#define PIC_9XX_PENDING_2	0xa +#define PIC_9XX_PENDING_3	0xc + +#define PIC_9XX_IRT0		0x1c0 +#define PIC_9XX_IRT(i)		(PIC_9XX_IRT0 + ((i) * 2))  /*   *    IRT Map   */  #define PIC_NUM_IRTS		160 +#define PIC_9XX_NUM_IRTS	256  #define PIC_IRT_WD_0_INDEX	0  #define PIC_IRT_WD_1_INDEX	1 @@ -192,15 +199,14 @@  #define PIC_IRT_PCIE_LINK_3_INDEX	81  #define PIC_IRT_PCIE_LINK_INDEX(num)	((num) + PIC_IRT_PCIE_LINK_0_INDEX) +#define PIC_9XX_IRT_PCIE_LINK_0_INDEX	191 +#define PIC_9XX_IRT_PCIE_LINK_INDEX(num) \ +				((num) + PIC_9XX_IRT_PCIE_LINK_0_INDEX) +  #define PIC_CLOCK_TIMER			7 -#define PIC_IRQ_BASE			8  #if !defined(LOCORE) && !defined(__ASSEMBLY__) -#define PIC_IRT_FIRST_IRQ		(PIC_IRQ_BASE) -#define PIC_IRT_LAST_IRQ		63 -#define PIC_IRQ_IS_IRT(irq)		((irq) >= PIC_IRT_FIRST_IRQ) -  /*   *   Misc   */ @@ -210,30 +216,26 @@  #define nlm_read_pic_reg(b, r)	nlm_read_reg64(b, r)  #define nlm_write_pic_reg(b, r, v) nlm_write_reg64(b, r, v) -#define nlm_get_pic_pcibase(node) nlm_pcicfg_base(XLP_IO_PIC_OFFSET(node)) +#define nlm_get_pic_pcibase(node)	nlm_pcicfg_base(cpu_is_xlp9xx() ? \ +		XLP9XX_IO_PIC_OFFSET(node) : XLP_IO_PIC_OFFSET(node))  #define nlm_get_pic_regbase(node) (nlm_get_pic_pcibase(node) + XLP_IO_PCI_HDRSZ)  /* We use PIC on node 0 as a timer */  #define pic_timer_freq()		nlm_get_pic_frequency(0)  /* IRT and h/w interrupt routines */ -static inline int -nlm_pic_read_irt(uint64_t base, int irt_index) -{ -	return nlm_read_pic_reg(base, PIC_IRT(irt_index)); -} -  static inline void -nlm_set_irt_to_cpu(uint64_t base, int irt, int cpu) +nlm_9xx_pic_write_irt(uint64_t base, int irt_num, int en, int nmi, +	int sch, int vec, int dt, int db, int cpu)  {  	uint64_t val; -	val = nlm_read_pic_reg(base, PIC_IRT(irt)); -	/* clear cpuset and mask */ -	val &= ~((0x7ull << 16) | 0xffff); -	/* set DB, cpuset and cpumask */ -	val |= (1 << 19) | ((cpu >> 4) << 16) | (1 << (cpu & 0xf)); -	nlm_write_pic_reg(base, PIC_IRT(irt), val); +	val = (((uint64_t)en & 0x1) << 22) | ((nmi & 0x1) << 23) | +			((0 /*mc*/) << 20) | ((vec & 0x3f) << 24) | +			((dt & 0x1) << 21) | (0 /*ptr*/ << 16) | +			(cpu & 0x3ff); + +	nlm_write_pic_reg(base, PIC_9XX_IRT(irt_num), val);  }  static inline void @@ -254,9 +256,13 @@ static inline void  nlm_pic_write_irt_direct(uint64_t base, int irt_num, int en, int nmi,  	int sch, int vec, int cpu)  { -	nlm_pic_write_irt(base, irt_num, en, nmi, sch, vec, 1, -		(cpu >> 4),		/* thread group */ -		1 << (cpu & 0xf));	/* thread mask */ +	if (cpu_is_xlp9xx()) +		nlm_9xx_pic_write_irt(base, irt_num, en, nmi, sch, vec, +							1, 0, cpu); +	else +		nlm_pic_write_irt(base, irt_num, en, nmi, sch, vec, 1, +			(cpu >> 4),		/* thread group */ +			1 << (cpu & 0xf));	/* thread mask */  }  static inline uint64_t @@ -298,8 +304,13 @@ nlm_pic_enable_irt(uint64_t base, int irt)  {  	uint64_t reg; -	reg = nlm_read_pic_reg(base, PIC_IRT(irt)); -	nlm_write_pic_reg(base, PIC_IRT(irt), reg | (1u << 31)); +	if (cpu_is_xlp9xx()) { +		reg = nlm_read_pic_reg(base, PIC_9XX_IRT(irt)); +		nlm_write_pic_reg(base, PIC_9XX_IRT(irt), reg | (1 << 22)); +	} else { +		reg = nlm_read_pic_reg(base, PIC_IRT(irt)); +		nlm_write_pic_reg(base, PIC_IRT(irt), reg | (1u << 31)); +	}  }  static inline void @@ -307,8 +318,15 @@ nlm_pic_disable_irt(uint64_t base, int irt)  {  	uint64_t reg; -	reg = nlm_read_pic_reg(base, PIC_IRT(irt)); -	nlm_write_pic_reg(base, PIC_IRT(irt), reg & ~((uint64_t)1 << 31)); +	if (cpu_is_xlp9xx()) { +		reg = nlm_read_pic_reg(base, PIC_9XX_IRT(irt)); +		reg &= ~((uint64_t)1 << 22); +		nlm_write_pic_reg(base, PIC_9XX_IRT(irt), reg); +	} else { +		reg = nlm_read_pic_reg(base, PIC_IRT(irt)); +		reg &= ~((uint64_t)1 << 31); +		nlm_write_pic_reg(base, PIC_IRT(irt), reg); +	}  }  static inline void @@ -316,8 +334,13 @@ nlm_pic_send_ipi(uint64_t base, int hwt, int irq, int nmi)  {  	uint64_t ipi; -	ipi = ((uint64_t)nmi << 31) | (irq << 20); -	ipi |= ((hwt >> 4) << 16) | (1 << (hwt & 0xf)); /* cpuset and mask */ +	if (cpu_is_xlp9xx()) +		ipi = (nmi << 23) | (irq << 24) | +			(0/*mcm*/ << 20) | (0/*ptr*/ << 16) | hwt; +	else +		ipi = ((uint64_t)nmi << 31) | (irq << 20) | +			((hwt >> 4) << 16) | (1 << (hwt & 0xf)); +  	nlm_write_pic_reg(base, PIC_IPI_CTL, ipi);  } diff --git a/arch/mips/include/asm/netlogic/xlp-hal/sys.h b/arch/mips/include/asm/netlogic/xlp-hal/sys.h index fcf2833c16c..bc7bddf25be 100644 --- a/arch/mips/include/asm/netlogic/xlp-hal/sys.h +++ b/arch/mips/include/asm/netlogic/xlp-hal/sys.h @@ -118,6 +118,10 @@  #define SYS_SCRTCH3				0x4c  /* PLL registers XLP2XX */ +#define SYS_CPU_PLL_CTRL0(core)			(0x1c0 + (core * 4)) +#define SYS_CPU_PLL_CTRL1(core)			(0x1c1 + (core * 4)) +#define SYS_CPU_PLL_CTRL2(core)			(0x1c2 + (core * 4)) +#define SYS_CPU_PLL_CTRL3(core)			(0x1c3 + (core * 4))  #define SYS_PLL_CTRL0				0x240  #define SYS_PLL_CTRL1				0x241  #define SYS_PLL_CTRL2				0x242 @@ -147,13 +151,60 @@  #define SYS_SYS_PLL_MEM_REQ			0x2a3  #define SYS_PLL_MEM_STAT			0x2a4 +/* PLL registers XLP9XX */ +#define SYS_9XX_CPU_PLL_CTRL0(core)		(0xc0 + (core * 4)) +#define SYS_9XX_CPU_PLL_CTRL1(core)		(0xc1 + (core * 4)) +#define SYS_9XX_CPU_PLL_CTRL2(core)		(0xc2 + (core * 4)) +#define SYS_9XX_CPU_PLL_CTRL3(core)		(0xc3 + (core * 4)) +#define SYS_9XX_DMC_PLL_CTRL0			0x140 +#define SYS_9XX_DMC_PLL_CTRL1			0x141 +#define SYS_9XX_DMC_PLL_CTRL2			0x142 +#define SYS_9XX_DMC_PLL_CTRL3			0x143 +#define SYS_9XX_PLL_CTRL0			0x144 +#define SYS_9XX_PLL_CTRL1			0x145 +#define SYS_9XX_PLL_CTRL2			0x146 +#define SYS_9XX_PLL_CTRL3			0x147 + +#define SYS_9XX_PLL_CTRL0_DEVX(x)		(0x148 + (x) * 4) +#define SYS_9XX_PLL_CTRL1_DEVX(x)		(0x149 + (x) * 4) +#define SYS_9XX_PLL_CTRL2_DEVX(x)		(0x14a + (x) * 4) +#define SYS_9XX_PLL_CTRL3_DEVX(x)		(0x14b + (x) * 4) + +#define SYS_9XX_CPU_PLL_CHG_CTRL		0x188 +#define SYS_9XX_PLL_CHG_CTRL			0x189 +#define SYS_9XX_CLK_DEV_DIS			0x18a +#define SYS_9XX_CLK_DEV_SEL			0x18b +#define SYS_9XX_CLK_DEV_DIV			0x18d +#define SYS_9XX_CLK_DEV_CHG			0x18f + +/* Registers changed on 9XX */ +#define SYS_9XX_POWER_ON_RESET_CFG		0x00 +#define SYS_9XX_CHIP_RESET			0x01 +#define SYS_9XX_CPU_RESET			0x02 +#define SYS_9XX_CPU_NONCOHERENT_MODE		0x03 + +/* XLP 9XX fuse block registers */ +#define FUSE_9XX_DEVCFG6			0xc6 +  #ifndef __ASSEMBLY__  #define nlm_read_sys_reg(b, r)		nlm_read_reg(b, r)  #define nlm_write_sys_reg(b, r, v)	nlm_write_reg(b, r, v) -#define nlm_get_sys_pcibase(node) nlm_pcicfg_base(XLP_IO_SYS_OFFSET(node)) +#define nlm_get_sys_pcibase(node)	nlm_pcicfg_base(cpu_is_xlp9xx() ? \ +		XLP9XX_IO_SYS_OFFSET(node) : XLP_IO_SYS_OFFSET(node))  #define nlm_get_sys_regbase(node) (nlm_get_sys_pcibase(node) + XLP_IO_PCI_HDRSZ) +/* XLP9XX fuse block */ +#define nlm_get_fuse_pcibase(node)	\ +			nlm_pcicfg_base(XLP9XX_IO_FUSE_OFFSET(node)) +#define nlm_get_fuse_regbase(node)	\ +			(nlm_get_fuse_pcibase(node) + XLP_IO_PCI_HDRSZ) + +#define nlm_get_clock_pcibase(node)	\ +			nlm_pcicfg_base(XLP9XX_IO_CLOCK_OFFSET(node)) +#define nlm_get_clock_regbase(node)	\ +			(nlm_get_clock_pcibase(node) + XLP_IO_PCI_HDRSZ) +  unsigned int nlm_get_pic_frequency(int node);  #endif  #endif diff --git a/arch/mips/include/asm/netlogic/xlp-hal/uart.h b/arch/mips/include/asm/netlogic/xlp-hal/uart.h index 86d16e1e607..a6c54424dd9 100644 --- a/arch/mips/include/asm/netlogic/xlp-hal/uart.h +++ b/arch/mips/include/asm/netlogic/xlp-hal/uart.h @@ -94,7 +94,8 @@  #define nlm_read_uart_reg(b, r)		nlm_read_reg(b, r)  #define nlm_write_uart_reg(b, r, v)	nlm_write_reg(b, r, v)  #define nlm_get_uart_pcibase(node, inst)	\ -		nlm_pcicfg_base(XLP_IO_UART_OFFSET(node, inst)) +	nlm_pcicfg_base(cpu_is_xlp9xx() ?  XLP9XX_IO_UART_OFFSET(node) : \ +						XLP_IO_UART_OFFSET(node, inst))  #define nlm_get_uart_regbase(node, inst)	\  			(nlm_get_uart_pcibase(node, inst) + XLP_IO_PCI_HDRSZ) diff --git a/arch/mips/include/asm/netlogic/xlp-hal/xlp.h b/arch/mips/include/asm/netlogic/xlp-hal/xlp.h index 17daffb280a..a862b93223c 100644 --- a/arch/mips/include/asm/netlogic/xlp-hal/xlp.h +++ b/arch/mips/include/asm/netlogic/xlp-hal/xlp.h @@ -37,10 +37,9 @@  #define PIC_UART_0_IRQ			17  #define PIC_UART_1_IRQ			18 -#define PIC_PCIE_LINK_0_IRQ		19 -#define PIC_PCIE_LINK_1_IRQ		20 -#define PIC_PCIE_LINK_2_IRQ		21 -#define PIC_PCIE_LINK_3_IRQ		22 + +#define PIC_PCIE_LINK_LEGACY_IRQ_BASE	19 +#define PIC_PCIE_LINK_LEGACY_IRQ(i)	(19 + (i))  #define PIC_EHCI_0_IRQ			23  #define PIC_EHCI_1_IRQ			24 @@ -51,12 +50,36 @@  #define PIC_2XX_XHCI_0_IRQ		23  #define PIC_2XX_XHCI_1_IRQ		24  #define PIC_2XX_XHCI_2_IRQ		25 +#define PIC_9XX_XHCI_0_IRQ		23 +#define PIC_9XX_XHCI_1_IRQ		24  #define PIC_MMC_IRQ			29  #define PIC_I2C_0_IRQ			30  #define PIC_I2C_1_IRQ			31  #define PIC_I2C_2_IRQ			32  #define PIC_I2C_3_IRQ			33 +#define PIC_SPI_IRQ			34 +#define PIC_NAND_IRQ			37 +#define PIC_SATA_IRQ			38 +#define PIC_GPIO_IRQ			39 + +#define PIC_PCIE_LINK_MSI_IRQ_BASE	44	/* 44 - 47 MSI IRQ */ +#define PIC_PCIE_LINK_MSI_IRQ(i)	(44 + (i)) + +/* MSI-X with second link-level dispatch */ +#define PIC_PCIE_MSIX_IRQ_BASE		48	/* 48 - 51 MSI-X IRQ */ +#define PIC_PCIE_MSIX_IRQ(i)		(48 + (i)) + +/* XLP9xx and XLP8xx has 128 and 32 MSIX vectors respectively */ +#define NLM_MSIX_VEC_BASE		96	/* 96 - 223 - MSIX mapped */ +#define NLM_MSI_VEC_BASE		224	/* 224 -351 - MSI mapped */ + +#define NLM_PIC_INDIRECT_VEC_BASE	512 +#define NLM_GPIO_VEC_BASE		768 + +#define PIC_IRQ_BASE			8 +#define PIC_IRT_FIRST_IRQ		PIC_IRQ_BASE +#define PIC_IRT_LAST_IRQ		63  #ifndef __ASSEMBLY__ @@ -68,15 +91,28 @@ void xlp_mmu_init(void);  void nlm_hal_init(void);  int xlp_get_dram_map(int n, uint64_t *dram_map); +struct pci_dev; +int xlp_socdev_to_node(const struct pci_dev *dev); +  /* Device tree related */ +void xlp_early_init_devtree(void);  void *xlp_dt_init(void *fdtp);  static inline int cpu_is_xlpii(void)  { -	int chip = read_c0_prid() & 0xff00; +	int chip = read_c0_prid() & PRID_IMP_MASK; -	return chip == PRID_IMP_NETLOGIC_XLP2XX; +	return chip == PRID_IMP_NETLOGIC_XLP2XX || +		chip == PRID_IMP_NETLOGIC_XLP9XX || +		chip == PRID_IMP_NETLOGIC_XLP5XX;  } +static inline int cpu_is_xlp9xx(void) +{ +	int chip = read_c0_prid() & PRID_IMP_MASK; + +	return chip == PRID_IMP_NETLOGIC_XLP9XX || +		chip == PRID_IMP_NETLOGIC_XLP5XX; +}  #endif /* !__ASSEMBLY__ */  #endif /* _ASM_NLM_XLP_H */ diff --git a/arch/mips/include/asm/netlogic/xlr/xlr.h b/arch/mips/include/asm/netlogic/xlr/xlr.h index c1667e0c272..ceb991ca843 100644 --- a/arch/mips/include/asm/netlogic/xlr/xlr.h +++ b/arch/mips/include/asm/netlogic/xlr/xlr.h @@ -35,11 +35,6 @@  #ifndef _ASM_NLM_XLR_H  #define _ASM_NLM_XLR_H -/* Platform UART functions */ -struct uart_port; -unsigned int nlm_xlr_uart_in(struct uart_port *, int); -void nlm_xlr_uart_out(struct uart_port *, int, int); -  /* SMP helpers */  void xlr_wakeup_secondary_cpus(void); diff --git a/arch/mips/include/asm/nile4.h b/arch/mips/include/asm/nile4.h index 2e2436d0e94..99e97f8bfbc 100644 --- a/arch/mips/include/asm/nile4.h +++ b/arch/mips/include/asm/nile4.h @@ -1,7 +1,7 @@  /*   *  asm-mips/nile4.h -- NEC Vrc-5074 Nile 4 definitions   * - *  Copyright (C) 2000 Geert Uytterhoeven <geert@sonycom.com> + *  Copyright (C) 2000 Geert Uytterhoeven <geert@linux-m68k.org>   *		       Sony Software Development Center Europe (SDCE), Brussels   *   *  This file is based on the following documentation: diff --git a/arch/mips/include/asm/octeon/cvmx-helper-board.h b/arch/mips/include/asm/octeon/cvmx-helper-board.h index 41785dd0ddd..893320375ae 100644 --- a/arch/mips/include/asm/octeon/cvmx-helper-board.h +++ b/arch/mips/include/asm/octeon/cvmx-helper-board.h @@ -36,6 +36,13 @@  #include <asm/octeon/cvmx-helper.h> +enum cvmx_helper_board_usb_clock_types { +	USB_CLOCK_TYPE_REF_12, +	USB_CLOCK_TYPE_REF_24, +	USB_CLOCK_TYPE_REF_48, +	USB_CLOCK_TYPE_CRYSTAL_12, +}; +  typedef enum {  	set_phy_link_flags_autoneg = 0x1,  	set_phy_link_flags_flow_control_dont_touch = 0x0 << 1, @@ -154,4 +161,6 @@ extern int __cvmx_helper_board_interface_probe(int interface,   */  extern int __cvmx_helper_board_hardware_enable(int interface); +enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(void); +  #endif /* __CVMX_HELPER_BOARD_H__ */ diff --git a/arch/mips/include/asm/octeon/cvmx-pip.h b/arch/mips/include/asm/octeon/cvmx-pip.h index a76fe5a57a9..df69bfd2b00 100644 --- a/arch/mips/include/asm/octeon/cvmx-pip.h +++ b/arch/mips/include/asm/octeon/cvmx-pip.h @@ -192,13 +192,13 @@ typedef struct {  	/* Number of packets processed by PIP */  	uint32_t packets;  	/* -	 * Number of indentified L2 multicast packets.	Does not +	 * Number of identified L2 multicast packets.	Does not  	 * include broadcast packets.  Only includes packets whose  	 * parse mode is SKIP_TO_L2  	 */  	uint32_t multicast_packets;  	/* -	 * Number of indentified L2 broadcast packets.	Does not +	 * Number of identified L2 broadcast packets.	Does not  	 * include multicast packets.  Only includes packets whose  	 * parse mode is SKIP_TO_L2  	 */ diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h index f5d77b91537..d781f9e6688 100644 --- a/arch/mips/include/asm/octeon/octeon.h +++ b/arch/mips/include/asm/octeon/octeon.h @@ -211,7 +211,6 @@ union octeon_cvmemctl {  extern void octeon_write_lcd(const char *s);  extern void octeon_check_cpu_bist(void); -extern int octeon_get_boot_debug_flag(void);  extern int octeon_get_boot_uart(void);  struct uart_port; diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index f6be4741f7e..5699ec3a71a 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -11,6 +11,8 @@  #include <spaces.h>  #include <linux/const.h> +#include <linux/kernel.h> +#include <asm/mipsregs.h>  /*   * PAGE_SHIFT determines the page size @@ -33,6 +35,29 @@  #define PAGE_SIZE	(_AC(1,UL) << PAGE_SHIFT)  #define PAGE_MASK	(~((1 << PAGE_SHIFT) - 1)) +/* + * This is used for calculating the real page sizes + * for FTLB or VTLB + FTLB confugrations. + */ +static inline unsigned int page_size_ftlb(unsigned int mmuextdef) +{ +	switch (mmuextdef) { +	case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT: +		if (PAGE_SIZE == (1 << 30)) +			return 5; +		if (PAGE_SIZE == (1llu << 32)) +			return 6; +		if (PAGE_SIZE > (256 << 10)) +			return 7; /* reserved */ +			/* fall through */ +	case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT: +		return (PAGE_SHIFT - 10) / 2; +	default: +		panic("Invalid FTLB configuration with Conf4_mmuextdef=%d value\n", +		      mmuextdef >> 14); +	} +} +  #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT  #define HPAGE_SHIFT	(PAGE_SHIFT + PAGE_SHIFT - 3)  #define HPAGE_SIZE	(_AC(1,UL) << HPAGE_SHIFT) @@ -165,7 +190,9 @@ typedef struct { unsigned long pgprot; } pgprot_t;   * https://patchwork.linux-mips.org/patch/1541/   */ +#ifndef __pa_symbol  #define __pa_symbol(x)	__pa(RELOC_HIDE((unsigned long)(x), 0)) +#endif  #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT) diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index f194c08bd05..974b0e30896 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h @@ -73,16 +73,23 @@ extern unsigned long PCIBIOS_MIN_MEM;  extern void pcibios_set_master(struct pci_dev *dev); -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ -	/* We don't do dynamic PCI IRQ allocation */ -} -  #define HAVE_PCI_MMAP  extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,  	enum pci_mmap_state mmap_state, int write_combine); +#define HAVE_ARCH_PCI_RESOURCE_TO_USER + +static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, +		const struct resource *rsrc, resource_size_t *start, +		resource_size_t *end) +{ +	phys_t size = resource_size(rsrc); + +	*start = fixup_bigphys_addr(rsrc->start, size); +	*end = rsrc->start + size; +} +  /*   * Dynamic DMA mapping stuff.   * MIPS has everything mapped statically. diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index 881d18b4e29..b336037e876 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -80,9 +80,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,  	struct page *pte;  	pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); -	if (pte) { -		clear_highpage(pte); -		pgtable_page_ctor(pte); +	if (!pte) +		return NULL; +	clear_highpage(pte); +	if (!pgtable_page_ctor(pte)) { +		__free_page(pte); +		return NULL;  	}  	return pte;  } diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h index 32aea4852fb..e592f3687d6 100644 --- a/arch/mips/include/asm/pgtable-bits.h +++ b/arch/mips/include/asm/pgtable-bits.h @@ -235,6 +235,15 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)  #define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT)  #define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) +#elif defined(CONFIG_CPU_LOONGSON3) + +/* Using COHERENT flag for NONCOHERENT doesn't hurt. */ + +#define _CACHE_UNCACHED             (2<<_CACHE_SHIFT)  /* LOONGSON       */ +#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT)  /* LOONGSON       */ +#define _CACHE_CACHABLE_COHERENT    (3<<_CACHE_SHIFT)  /* LOONGSON-3     */ +#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)  /* LOONGSON       */ +  #else  #define _CACHE_CACHABLE_NO_WA	    (0<<_CACHE_SHIFT)  /* R4600 only	  */ diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 008324d1c26..539ddd148bb 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -32,6 +32,8 @@ struct vm_area_struct;  				 _page_cachable_default)  #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \  				 _PAGE_GLOBAL | _page_cachable_default) +#define PAGE_KERNEL_NC	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ +				 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)  #define PAGE_USERIO	__pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \  				 _page_cachable_default)  #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ diff --git a/arch/mips/include/asm/pm-cps.h b/arch/mips/include/asm/pm-cps.h new file mode 100644 index 00000000000..625eda53d57 --- /dev/null +++ b/arch/mips/include/asm/pm-cps.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2014 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#ifndef __MIPS_ASM_PM_CPS_H__ +#define __MIPS_ASM_PM_CPS_H__ + +/* + * The CM & CPC can only handle coherence & power control on a per-core basis, + * thus in an MT system the VPEs within each core are coupled and can only + * enter or exit states requiring CM or CPC assistance in unison. + */ +#ifdef CONFIG_MIPS_MT +# define coupled_coherence cpu_has_mipsmt +#else +# define coupled_coherence 0 +#endif + +/* Enumeration of possible PM states */ +enum cps_pm_state { +	CPS_PM_NC_WAIT,		/* MIPS wait instruction, non-coherent */ +	CPS_PM_CLOCK_GATED,	/* Core clock gated */ +	CPS_PM_POWER_GATED,	/* Core power gated */ +	CPS_PM_STATE_COUNT, +}; + +/** + * cps_pm_support_state - determine whether the system supports a PM state + * @state: the state to test for support + * + * Returns true if the system supports the given state, otherwise false. + */ +extern bool cps_pm_support_state(enum cps_pm_state state); + +/** + * cps_pm_enter_state - enter a PM state + * @state: the state to enter + * + * Enter the given PM state. If coupled_coherence is non-zero then it is + * expected that this function be called at approximately the same time on + * each coupled CPU. Returns 0 on successful entry & exit, otherwise -errno. + */ +extern int cps_pm_enter_state(enum cps_pm_state state); + +#endif /* __MIPS_ASM_PM_CPS_H__ */ diff --git a/arch/mips/include/asm/pm.h b/arch/mips/include/asm/pm.h new file mode 100644 index 00000000000..7c03469e043 --- /dev/null +++ b/arch/mips/include/asm/pm.h @@ -0,0 +1,159 @@ +/* + * Copyright (C) 2014 Imagination Technologies Ltd + * + * This program is free software; you can redistribute	it and/or modify it + * under  the terms of	the GNU General	 Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + * + * PM helper macros for CPU power off (e.g. Suspend-to-RAM). + */ + +#ifndef __ASM_PM_H +#define __ASM_PM_H + +#ifdef __ASSEMBLY__ + +#include <asm/asm-offsets.h> +#include <asm/asm.h> +#include <asm/mipsregs.h> +#include <asm/regdef.h> + +/* Save CPU state to stack for suspend to RAM */ +.macro SUSPEND_SAVE_REGS +	subu	sp, PT_SIZE +	/* Call preserved GPRs */ +	LONG_S	$16, PT_R16(sp) +	LONG_S	$17, PT_R17(sp) +	LONG_S	$18, PT_R18(sp) +	LONG_S	$19, PT_R19(sp) +	LONG_S	$20, PT_R20(sp) +	LONG_S	$21, PT_R21(sp) +	LONG_S	$22, PT_R22(sp) +	LONG_S	$23, PT_R23(sp) +	LONG_S	$28, PT_R28(sp) +	LONG_S	$30, PT_R30(sp) +	LONG_S	$31, PT_R31(sp) +	/* A couple of CP0 registers with space in pt_regs */ +	mfc0	k0, CP0_STATUS +	LONG_S	k0, PT_STATUS(sp) +.endm + +/* Restore CPU state from stack after resume from RAM */ +.macro RESUME_RESTORE_REGS_RETURN +	.set	push +	.set	noreorder +	/* A couple of CP0 registers with space in pt_regs */ +	LONG_L	k0, PT_STATUS(sp) +	mtc0	k0, CP0_STATUS +	/* Call preserved GPRs */ +	LONG_L	$16, PT_R16(sp) +	LONG_L	$17, PT_R17(sp) +	LONG_L	$18, PT_R18(sp) +	LONG_L	$19, PT_R19(sp) +	LONG_L	$20, PT_R20(sp) +	LONG_L	$21, PT_R21(sp) +	LONG_L	$22, PT_R22(sp) +	LONG_L	$23, PT_R23(sp) +	LONG_L	$28, PT_R28(sp) +	LONG_L	$30, PT_R30(sp) +	LONG_L	$31, PT_R31(sp) +	/* Pop and return */ +	jr	ra +	 addiu	sp, PT_SIZE +	.set	pop +.endm + +/* Get address of static suspend state into t1 */ +.macro LA_STATIC_SUSPEND +	la	t1, mips_static_suspend_state +.endm + +/* Save important CPU state for early restoration to global data */ +.macro SUSPEND_SAVE_STATIC +#ifdef CONFIG_EVA +	/* +	 * Segment configuration is saved in global data where it can be easily +	 * reloaded without depending on the segment configuration. +	 */ +	mfc0	k0, CP0_PAGEMASK, 2	/* SegCtl0 */ +	LONG_S	k0, SSS_SEGCTL0(t1) +	mfc0	k0, CP0_PAGEMASK, 3	/* SegCtl1 */ +	LONG_S	k0, SSS_SEGCTL1(t1) +	mfc0	k0, CP0_PAGEMASK, 4	/* SegCtl2 */ +	LONG_S	k0, SSS_SEGCTL2(t1) +#endif +	/* save stack pointer (pointing to GPRs) */ +	LONG_S	sp, SSS_SP(t1) +.endm + +/* Restore important CPU state early from global data */ +.macro RESUME_RESTORE_STATIC +#ifdef CONFIG_EVA +	/* +	 * Segment configuration must be restored prior to any access to +	 * allocated memory, as it may reside outside of the legacy kernel +	 * segments. +	 */ +	LONG_L	k0, SSS_SEGCTL0(t1) +	mtc0	k0, CP0_PAGEMASK, 2	/* SegCtl0 */ +	LONG_L	k0, SSS_SEGCTL1(t1) +	mtc0	k0, CP0_PAGEMASK, 3	/* SegCtl1 */ +	LONG_L	k0, SSS_SEGCTL2(t1) +	mtc0	k0, CP0_PAGEMASK, 4	/* SegCtl2 */ +	tlbw_use_hazard +#endif +	/* restore stack pointer (pointing to GPRs) */ +	LONG_L	sp, SSS_SP(t1) +.endm + +/* flush caches to make sure context has reached memory */ +.macro SUSPEND_CACHE_FLUSH +	.extern	__wback_cache_all +	.set	push +	.set	noreorder +	la	t1, __wback_cache_all +	LONG_L	t0, 0(t1) +	jalr	t0 +	 nop +	.set	pop + .endm + +/* Save suspend state and flush data caches to RAM */ +.macro SUSPEND_SAVE +	SUSPEND_SAVE_REGS +	LA_STATIC_SUSPEND +	SUSPEND_SAVE_STATIC +	SUSPEND_CACHE_FLUSH +.endm + +/* Restore saved state after resume from RAM and return */ +.macro RESUME_RESTORE_RETURN +	LA_STATIC_SUSPEND +	RESUME_RESTORE_STATIC +	RESUME_RESTORE_REGS_RETURN +.endm + +#else /* __ASSEMBLY__ */ + +/** + * struct mips_static_suspend_state - Core saved CPU state across S2R. + * @segctl:	CP0 Segment control registers. + * @sp:		Stack frame where GP register context is saved. + * + * This structure contains minimal CPU state that must be saved in static kernel + * data in order to be able to restore the rest of the state. This includes + * segmentation configuration in the case of EVA being enabled, as they must be + * restored prior to any kmalloc'd memory being referenced (even the stack + * pointer). + */ +struct mips_static_suspend_state { +#ifdef CONFIG_EVA +	unsigned long segctl[3]; +#endif +	unsigned long sp; +}; + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_PM_HELPERS_H */ diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 3605b844ad8..ad70cba8daf 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -97,18 +97,48 @@ extern unsigned int vced_count, vcei_count;  #define NUM_FPU_REGS	32 -typedef __u64 fpureg_t; +#ifdef CONFIG_CPU_HAS_MSA +# define FPU_REG_WIDTH	128 +#else +# define FPU_REG_WIDTH	64 +#endif + +union fpureg { +	__u32	val32[FPU_REG_WIDTH / 32]; +	__u64	val64[FPU_REG_WIDTH / 64]; +}; + +#ifdef CONFIG_CPU_LITTLE_ENDIAN +# define FPR_IDX(width, idx)	(idx) +#else +# define FPR_IDX(width, idx)	((FPU_REG_WIDTH / (width)) - 1 - (idx)) +#endif + +#define BUILD_FPR_ACCESS(width) \ +static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx)	\ +{									\ +	return fpr->val##width[FPR_IDX(width, idx)];			\ +}									\ +									\ +static inline void set_fpr##width(union fpureg *fpr, unsigned idx,	\ +				  u##width val)				\ +{									\ +	fpr->val##width[FPR_IDX(width, idx)] = val;			\ +} + +BUILD_FPR_ACCESS(32) +BUILD_FPR_ACCESS(64)  /* - * It would be nice to add some more fields for emulator statistics, but there - * are a number of fixed offsets in offset.h and elsewhere that would have to - * be recalculated by hand.  So the additional information will be private to - * the FPU emulator for now.  See asm-mips/fpu_emulator.h. + * It would be nice to add some more fields for emulator statistics, + * the additional information is private to the FPU emulator for now. + * See arch/mips/include/asm/fpu_emulator.h.   */  struct mips_fpu_struct { -	fpureg_t	fpr[NUM_FPU_REGS]; +	union fpureg	fpr[NUM_FPU_REGS];  	unsigned int	fcr31; +	unsigned int	msacsr;  };  #define NUM_DSP_REGS   6 @@ -284,8 +314,9 @@ struct thread_struct {  	 * Saved FPU/FPU emulator stuff				\  	 */							\  	.fpu			= {				\ -		.fpr		= {0,},				\ +		.fpr		= {{{0,},},},			\  		.fcr31		= 0,				\ +		.msacsr		= 0,				\  	},							\  	/*							\  	 * FPU affinity state (null if not FPAFF)		\ diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h index 1e7e0961064..a9494c0141f 100644 --- a/arch/mips/include/asm/prom.h +++ b/arch/mips/include/asm/prom.h @@ -17,31 +17,17 @@  #include <linux/types.h>  #include <asm/bootinfo.h> -extern int early_init_dt_scan_memory_arch(unsigned long node, -	const char *uname, int depth, void *data); -  extern void device_tree_init(void); -static inline unsigned long pci_address_to_pio(phys_addr_t address) -{ -	/* -	 * The ioport address can be directly used by inX() / outX() -	 */ -	BUG_ON(address > IO_SPACE_LIMIT); - -	return (unsigned long) address; -} -#define pci_address_to_pio pci_address_to_pio -  struct boot_param_header; -extern void __dt_setup_arch(struct boot_param_header *bph); +extern void __dt_setup_arch(void *bph);  #define dt_setup_arch(sym)						\  ({									\ -	extern struct boot_param_header __dtb_##sym##_begin;		\ +	extern char __dtb_##sym##_begin[];				\  									\ -	__dt_setup_arch(&__dtb_##sym##_begin);				\ +	__dt_setup_arch(__dtb_##sym##_begin);				\  })  #else /* CONFIG_OF */ diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h index 5e6cd094739..7e6e682aece 100644 --- a/arch/mips/include/asm/ptrace.h +++ b/arch/mips/include/asm/ptrace.h @@ -39,9 +39,6 @@ struct pt_regs {  	unsigned long cp0_badvaddr;  	unsigned long cp0_cause;  	unsigned long cp0_epc; -#ifdef CONFIG_MIPS_MT_SMTC -	unsigned long cp0_tcstatus; -#endif /* CONFIG_MIPS_MT_SMTC */  #ifdef CONFIG_CPU_CAVIUM_OCTEON  	unsigned long long mpl[3];	  /* MTM{0,1,2} */  	unsigned long long mtp[3];	  /* MTP{0,1,2} */ @@ -81,9 +78,8 @@ static inline long regs_return_value(struct pt_regs *regs)  #define instruction_pointer(regs) ((regs)->cp0_epc)  #define profile_pc(regs) instruction_pointer(regs) -#define user_stack_pointer(r) ((r)->regs[29]) -extern asmlinkage void syscall_trace_enter(struct pt_regs *regs); +extern asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall);  extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);  extern void die(const char *, struct pt_regs *) __noreturn; @@ -100,4 +96,17 @@ static inline void die_if_kernel(const char *str, struct pt_regs *regs)  	(struct pt_regs *)((sp | (THREAD_SIZE - 1)) + 1 - 32) - 1;	\  }) +/* Helpers for working with the user stack pointer */ + +static inline unsigned long user_stack_pointer(struct pt_regs *regs) +{ +	return regs->regs[29]; +} + +static inline void user_stack_pointer_set(struct pt_regs *regs, +	unsigned long val) +{ +	regs->regs[29] = val; +} +  #endif /* _ASM_PTRACE_H */ diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index a0b2650516a..0b8bd28a0df 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -15,7 +15,9 @@  #include <asm/asm.h>  #include <asm/cacheops.h>  #include <asm/cpu-features.h> +#include <asm/cpu-type.h>  #include <asm/mipsmtregs.h> +#include <asm/uaccess.h> /* for segment_eq() */  /*   * This macro return a properly sign-extended address suitable as base address @@ -34,18 +36,17 @@  	__asm__ __volatile__(						\  	"	.set	push					\n"	\  	"	.set	noreorder				\n"	\ -	"	.set	mips3\n\t				\n"	\ +	"	.set	arch=r4000				\n"	\  	"	cache	%0, %1					\n"	\  	"	.set	pop					\n"	\  	:								\  	: "i" (op), "R" (*(unsigned char *)(addr)))  #ifdef CONFIG_MIPS_MT +  /* - * Temporary hacks for SMTC debug. Optionally force single-threaded - * execution during I-cache flushes. + * Optionally force single-threaded execution during I-cache flushes.   */ -  #define PROTECT_CACHE_FLUSHES 1  #ifdef PROTECT_CACHE_FLUSHES @@ -162,7 +163,15 @@ static inline void flush_scache_line_indexed(unsigned long addr)  static inline void flush_icache_line(unsigned long addr)  {  	__iflush_prologue -	cache_op(Hit_Invalidate_I, addr); +	switch (boot_cpu_type()) { +	case CPU_LOONGSON2: +		cache_op(Hit_Invalidate_I_Loongson2, addr); +		break; + +	default: +		cache_op(Hit_Invalidate_I, addr); +		break; +	}  	__iflush_epilogue  } @@ -194,7 +203,7 @@ static inline void flush_scache_line(unsigned long addr)  	__asm__ __volatile__(					\  	"	.set	push			\n"		\  	"	.set	noreorder		\n"		\ -	"	.set	mips3			\n"		\ +	"	.set	arch=r4000		\n"		\  	"1:	cache	%0, (%1)		\n"		\  	"2:	.set	pop			\n"		\  	"	.section __ex_table,\"a\"	\n"		\ @@ -203,12 +212,38 @@ static inline void flush_scache_line(unsigned long addr)  	:							\  	: "i" (op), "r" (addr)) +#define protected_cachee_op(op,addr)				\ +	__asm__ __volatile__(					\ +	"	.set	push			\n"		\ +	"	.set	noreorder		\n"		\ +	"	.set	mips0			\n"		\ +	"	.set	eva			\n"		\ +	"1:	cachee	%0, (%1)		\n"		\ +	"2:	.set	pop			\n"		\ +	"	.section __ex_table,\"a\"	\n"		\ +	"	"STR(PTR)" 1b, 2b		\n"		\ +	"	.previous"					\ +	:							\ +	: "i" (op), "r" (addr)) +  /*   * The next two are for badland addresses like signal trampolines.   */  static inline void protected_flush_icache_line(unsigned long addr)  { -	protected_cache_op(Hit_Invalidate_I, addr); +	switch (boot_cpu_type()) { +	case CPU_LOONGSON2: +		protected_cache_op(Hit_Invalidate_I_Loongson2, addr); +		break; + +	default: +#ifdef CONFIG_EVA +		protected_cachee_op(Hit_Invalidate_I, addr); +#else +		protected_cache_op(Hit_Invalidate_I, addr); +#endif +		break; +	}  }  /* @@ -339,9 +374,94 @@ static inline void invalidate_tcache_page(unsigned long addr)  		: "r" (base),						\  		  "i" (op)); +/* + * Perform the cache operation specified by op using a user mode virtual + * address while in kernel mode. + */ +#define cache16_unroll32_user(base,op)					\ +	__asm__ __volatile__(						\ +	"	.set push					\n"	\ +	"	.set noreorder					\n"	\ +	"	.set mips0					\n"	\ +	"	.set eva					\n"	\ +	"	cachee %1, 0x000(%0); cachee %1, 0x010(%0)	\n"	\ +	"	cachee %1, 0x020(%0); cachee %1, 0x030(%0)	\n"	\ +	"	cachee %1, 0x040(%0); cachee %1, 0x050(%0)	\n"	\ +	"	cachee %1, 0x060(%0); cachee %1, 0x070(%0)	\n"	\ +	"	cachee %1, 0x080(%0); cachee %1, 0x090(%0)	\n"	\ +	"	cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0)	\n"	\ +	"	cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0)	\n"	\ +	"	cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0)	\n"	\ +	"	cachee %1, 0x100(%0); cachee %1, 0x110(%0)	\n"	\ +	"	cachee %1, 0x120(%0); cachee %1, 0x130(%0)	\n"	\ +	"	cachee %1, 0x140(%0); cachee %1, 0x150(%0)	\n"	\ +	"	cachee %1, 0x160(%0); cachee %1, 0x170(%0)	\n"	\ +	"	cachee %1, 0x180(%0); cachee %1, 0x190(%0)	\n"	\ +	"	cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0)	\n"	\ +	"	cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0)	\n"	\ +	"	cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0)	\n"	\ +	"	.set pop					\n"	\ +		:							\ +		: "r" (base),						\ +		  "i" (op)); + +#define cache32_unroll32_user(base, op)					\ +	__asm__ __volatile__(						\ +	"	.set push					\n"	\ +	"	.set noreorder					\n"	\ +	"	.set mips0					\n"	\ +	"	.set eva					\n"	\ +	"	cachee %1, 0x000(%0); cachee %1, 0x020(%0)	\n"	\ +	"	cachee %1, 0x040(%0); cachee %1, 0x060(%0)	\n"	\ +	"	cachee %1, 0x080(%0); cachee %1, 0x0a0(%0)	\n"	\ +	"	cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0)	\n"	\ +	"	cachee %1, 0x100(%0); cachee %1, 0x120(%0)	\n"	\ +	"	cachee %1, 0x140(%0); cachee %1, 0x160(%0)	\n"	\ +	"	cachee %1, 0x180(%0); cachee %1, 0x1a0(%0)	\n"	\ +	"	cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0)	\n"	\ +	"	cachee %1, 0x200(%0); cachee %1, 0x220(%0)	\n"	\ +	"	cachee %1, 0x240(%0); cachee %1, 0x260(%0)	\n"	\ +	"	cachee %1, 0x280(%0); cachee %1, 0x2a0(%0)	\n"	\ +	"	cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0)	\n"	\ +	"	cachee %1, 0x300(%0); cachee %1, 0x320(%0)	\n"	\ +	"	cachee %1, 0x340(%0); cachee %1, 0x360(%0)	\n"	\ +	"	cachee %1, 0x380(%0); cachee %1, 0x3a0(%0)	\n"	\ +	"	cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0)	\n"	\ +	"	.set pop					\n"	\ +		:							\ +		: "r" (base),						\ +		  "i" (op)); + +#define cache64_unroll32_user(base, op)					\ +	__asm__ __volatile__(						\ +	"	.set push					\n"	\ +	"	.set noreorder					\n"	\ +	"	.set mips0					\n"	\ +	"	.set eva					\n"	\ +	"	cachee %1, 0x000(%0); cachee %1, 0x040(%0)	\n"	\ +	"	cachee %1, 0x080(%0); cachee %1, 0x0c0(%0)	\n"	\ +	"	cachee %1, 0x100(%0); cachee %1, 0x140(%0)	\n"	\ +	"	cachee %1, 0x180(%0); cachee %1, 0x1c0(%0)	\n"	\ +	"	cachee %1, 0x200(%0); cachee %1, 0x240(%0)	\n"	\ +	"	cachee %1, 0x280(%0); cachee %1, 0x2c0(%0)	\n"	\ +	"	cachee %1, 0x300(%0); cachee %1, 0x340(%0)	\n"	\ +	"	cachee %1, 0x380(%0); cachee %1, 0x3c0(%0)	\n"	\ +	"	cachee %1, 0x400(%0); cachee %1, 0x440(%0)	\n"	\ +	"	cachee %1, 0x480(%0); cachee %1, 0x4c0(%0)	\n"	\ +	"	cachee %1, 0x500(%0); cachee %1, 0x540(%0)	\n"	\ +	"	cachee %1, 0x580(%0); cachee %1, 0x5c0(%0)	\n"	\ +	"	cachee %1, 0x600(%0); cachee %1, 0x640(%0)	\n"	\ +	"	cachee %1, 0x680(%0); cachee %1, 0x6c0(%0)	\n"	\ +	"	cachee %1, 0x700(%0); cachee %1, 0x740(%0)	\n"	\ +	"	cachee %1, 0x780(%0); cachee %1, 0x7c0(%0)	\n"	\ +	"	.set pop					\n"	\ +		:							\ +		: "r" (base),						\ +		  "i" (op)); +  /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */ -#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \ -static inline void blast_##pfx##cache##lsize(void)			\ +#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)	\ +static inline void extra##blast_##pfx##cache##lsize(void)		\  {									\  	unsigned long start = INDEX_BASE;				\  	unsigned long end = start + current_cpu_data.desc.waysize;	\ @@ -359,7 +479,7 @@ static inline void blast_##pfx##cache##lsize(void)			\  	__##pfx##flush_epilogue						\  }									\  									\ -static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ +static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \  {									\  	unsigned long start = page;					\  	unsigned long end = page + PAGE_SIZE;				\ @@ -374,7 +494,7 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \  	__##pfx##flush_epilogue						\  }									\  									\ -static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ +static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \  {									\  	unsigned long indexmask = current_cpu_data.desc.waysize - 1;	\  	unsigned long start = INDEX_BASE + (page & indexmask);		\ @@ -393,27 +513,56 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page)  	__##pfx##flush_epilogue						\  } -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16) -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16) -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16) -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32) -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32) -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32) -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64) -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64) -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64) -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128) - -__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16) -__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32) -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16) -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32) -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64) -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128) +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, ) +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, ) +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, ) +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, ) +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, ) +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_) +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, ) +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, ) +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, ) +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, ) +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, ) +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, ) +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, ) + +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, ) +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, ) +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, ) +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, ) +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, ) +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, ) + +#define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \ +static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \ +{									\ +	unsigned long start = page;					\ +	unsigned long end = page + PAGE_SIZE;				\ +									\ +	__##pfx##flush_prologue						\ +									\ +	do {								\ +		cache##lsize##_unroll32_user(start, hitop);             \ +		start += lsize * 32;					\ +	} while (start < end);						\ +									\ +	__##pfx##flush_epilogue						\ +} + +__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, +			 16) +__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16) +__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, +			 32) +__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32) +__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, +			 64) +__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)  /* build blast_xxx_range, protected_blast_xxx_range */ -#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \ -static inline void prot##blast_##pfx##cache##_range(unsigned long start, \ +#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)	\ +static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \  						    unsigned long end)	\  {									\  	unsigned long lsize = cpu_##desc##_line_size();			\ @@ -432,13 +581,54 @@ static inline void prot##blast_##pfx##cache##_range(unsigned long start, \  	__##pfx##flush_epilogue						\  } -__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_) -__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_) -__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_) -__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, ) -__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, ) +#ifndef CONFIG_EVA + +__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, ) +__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, ) + +#else + +#define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop)		\ +static inline void protected_blast_##pfx##cache##_range(unsigned long start,\ +							unsigned long end) \ +{									\ +	unsigned long lsize = cpu_##desc##_line_size();			\ +	unsigned long addr = start & ~(lsize - 1);			\ +	unsigned long aend = (end - 1) & ~(lsize - 1);			\ +									\ +	__##pfx##flush_prologue						\ +									\ +	if (segment_eq(get_fs(), USER_DS)) {				\ +		while (1) {						\ +			protected_cachee_op(hitop, addr);		\ +			if (addr == aend)				\ +				break;					\ +			addr += lsize;					\ +		}							\ +	} else {							\ +		while (1) {						\ +			protected_cache_op(hitop, addr);		\ +			if (addr == aend)				\ +				break;					\ +			addr += lsize;					\ +		}                                                       \ +									\ +	}								\ +	__##pfx##flush_epilogue						\ +} + +__BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D) +__BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I) + +#endif +__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, ) +__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \ +	protected_, loongson2_) +__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , ) +__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , ) +__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )  /* blast_inv_dcache_range */ -__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, ) -__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, ) +__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , ) +__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )  #endif /* _ASM_R4KCACHE_H */ diff --git a/arch/mips/include/asm/rm9k-ocd.h b/arch/mips/include/asm/rm9k-ocd.h deleted file mode 100644 index b0b80d9ecf9..00000000000 --- a/arch/mips/include/asm/rm9k-ocd.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - *  Copyright (C) 2004 by Basler Vision Technologies AG - *  Author: Thomas Koeller <thomas.koeller@baslerweb.com> - * - *  This program is free software; you can redistribute it and/or modify - *  it under the terms of the GNU General Public License as published by - *  the Free Software Foundation; either version 2 of the License, or - *  (at your option) any later version. - * - *  This program is distributed in the hope that it will be useful, - *  but WITHOUT ANY WARRANTY; without even the implied warranty of - *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - *  GNU General Public License for more details. - * - *  You should have received a copy of the GNU General Public License - *  along with this program; if not, write to the Free Software - *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA - */ - -#if !defined(_ASM_RM9K_OCD_H) -#define _ASM_RM9K_OCD_H - -#include <linux/types.h> -#include <linux/spinlock.h> -#include <asm/io.h> - -extern volatile void __iomem * const ocd_base; -extern volatile void __iomem * const titan_base; - -#define ocd_addr(__x__)		(ocd_base + (__x__)) -#define titan_addr(__x__)	(titan_base + (__x__)) -#define scram_addr(__x__)	(scram_base + (__x__)) - -/* OCD register access */ -#define ocd_readl(__offs__) __raw_readl(ocd_addr(__offs__)) -#define ocd_readw(__offs__) __raw_readw(ocd_addr(__offs__)) -#define ocd_readb(__offs__) __raw_readb(ocd_addr(__offs__)) -#define ocd_writel(__val__, __offs__) \ -	__raw_writel((__val__), ocd_addr(__offs__)) -#define ocd_writew(__val__, __offs__) \ -	__raw_writew((__val__), ocd_addr(__offs__)) -#define ocd_writeb(__val__, __offs__) \ -	__raw_writeb((__val__), ocd_addr(__offs__)) - -/* TITAN register access - 32 bit-wide only */ -#define titan_readl(__offs__) __raw_readl(titan_addr(__offs__)) -#define titan_writel(__val__, __offs__) \ -	__raw_writel((__val__), titan_addr(__offs__)) - -/* Protect access to shared TITAN registers */ -extern spinlock_t titan_lock; -extern int titan_irqflags; -#define lock_titan_regs() spin_lock_irqsave(&titan_lock, titan_irqflags) -#define unlock_titan_regs() spin_unlock_irqrestore(&titan_lock, titan_irqflags) - -#endif	/* !defined(_ASM_RM9K_OCD_H) */ diff --git a/arch/mips/include/asm/rtlx.h b/arch/mips/include/asm/rtlx.h index 90985b61dbd..c1020654876 100644 --- a/arch/mips/include/asm/rtlx.h +++ b/arch/mips/include/asm/rtlx.h @@ -1,13 +1,18 @@  /* - * Copyright (C) 2004, 2005 MIPS Technologies, Inc.  All rights reserved. + * This file is subject to the terms and conditions of the GNU General Public + * License.  See the file "COPYING" in the main directory of this archive + * for more details.   * + * Copyright (C) 2004, 2005 MIPS Technologies, Inc.  All rights reserved. + * Copyright (C) 2013 Imagination Technologies Ltd.   */ -  #ifndef __ASM_RTLX_H_  #define __ASM_RTLX_H_  #include <irq.h> +#define RTLX_MODULE_NAME "rtlx" +  #define LX_NODE_BASE 10  #define MIPS_CPU_RTLX_IRQ 0 @@ -15,18 +20,31 @@  #define RTLX_VERSION 2  #define RTLX_xID 0x12345600  #define RTLX_ID (RTLX_xID | RTLX_VERSION) +#define RTLX_BUFFER_SIZE 2048  #define RTLX_CHANNELS 8  #define RTLX_CHANNEL_STDIO	0  #define RTLX_CHANNEL_DBG	1  #define RTLX_CHANNEL_SYSIO	2 -extern int rtlx_open(int index, int can_sleep); -extern int rtlx_release(int index); -extern ssize_t rtlx_read(int index, void __user *buff, size_t count); -extern ssize_t rtlx_write(int index, const void __user *buffer, size_t count); -extern unsigned int rtlx_read_poll(int index, int can_sleep); -extern unsigned int rtlx_write_poll(int index); +void rtlx_starting(int vpe); +void rtlx_stopping(int vpe); + +int rtlx_open(int index, int can_sleep); +int rtlx_release(int index); +ssize_t rtlx_read(int index, void __user *buff, size_t count); +ssize_t rtlx_write(int index, const void __user *buffer, size_t count); +unsigned int rtlx_read_poll(int index, int can_sleep); +unsigned int rtlx_write_poll(int index); + +int __init rtlx_module_init(void); +void __exit rtlx_module_exit(void); + +void _interrupt_sp(void); + +extern struct vpe_notifications rtlx_notify; +extern const struct file_operations rtlx_fops; +extern void (*aprp_hook)(void);  enum rtlx_state {  	RTLX_STATE_UNUSED = 0, @@ -35,10 +53,15 @@ enum rtlx_state {  	RTLX_STATE_OPENED  }; -#define RTLX_BUFFER_SIZE 2048 +extern struct chan_waitqueues { +	wait_queue_head_t rt_queue; +	wait_queue_head_t lx_queue; +	atomic_t in_open; +	struct mutex mutex; +} channel_wqs[RTLX_CHANNELS];  /* each channel supports read and write. -   linux (vpe0) reads lx_buffer	 and writes rt_buffer +   linux (vpe0) reads lx_buffer and writes rt_buffer     SP (vpe1) reads rt_buffer and writes lx_buffer  */  struct rtlx_channel { @@ -55,11 +78,11 @@ struct rtlx_channel {  	char *lx_buffer;  }; -struct rtlx_info { +extern struct rtlx_info {  	unsigned long id;  	enum rtlx_state state; +	int ap_int_pending;	/* Status of 0 or 1 for CONFIG_MIPS_CMP only */  	struct rtlx_channel channel[RTLX_CHANNELS]; -}; - +} *rtlx;  #endif /* __ASM_RTLX_H_ */ diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h index e26589ef36e..d7bfdeba9e8 100644 --- a/arch/mips/include/asm/setup.h +++ b/arch/mips/include/asm/setup.h @@ -5,6 +5,14 @@  extern void setup_early_printk(void); +#ifdef CONFIG_EARLY_PRINTK_8250 +extern void setup_8250_early_printk_port(unsigned long base, +	unsigned int reg_shift, unsigned int timeout); +#else +static inline void setup_8250_early_printk_port(unsigned long base, +	unsigned int reg_shift, unsigned int timeout) {} +#endif +  extern void set_handler(unsigned long offset, void *addr, unsigned long len);  extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len); diff --git a/arch/mips/include/asm/sgi/ip22.h b/arch/mips/include/asm/sgi/ip22.h index 8db1a3588cf..87ec9eaa04e 100644 --- a/arch/mips/include/asm/sgi/ip22.h +++ b/arch/mips/include/asm/sgi/ip22.h @@ -69,6 +69,8 @@  #define SGI_EISA_IRQ	SGINT_LOCAL2 + 3	/* EISA interrupts */  #define SGI_KEYBD_IRQ	SGINT_LOCAL2 + 4	/* keyboard */  #define SGI_SERIAL_IRQ	SGINT_LOCAL2 + 5	/* onboard serial */ +#define SGI_GIOEXP0_IRQ	(SGINT_LOCAL2 + 6)	/* Indy GIO EXP0 */ +#define SGI_GIOEXP1_IRQ	(SGINT_LOCAL2 + 7)	/* Indy GIO EXP1 */  #define ip22_is_fullhouse()	(sgioc->sysid & SGIOC_SYSID_FULLHOUSE) diff --git a/arch/mips/include/asm/smp-cps.h b/arch/mips/include/asm/smp-cps.h new file mode 100644 index 00000000000..a06a08a9afc --- /dev/null +++ b/arch/mips/include/asm/smp-cps.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2013 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#ifndef __MIPS_ASM_SMP_CPS_H__ +#define __MIPS_ASM_SMP_CPS_H__ + +#ifndef __ASSEMBLY__ + +struct vpe_boot_config { +	unsigned long pc; +	unsigned long sp; +	unsigned long gp; +}; + +struct core_boot_config { +	atomic_t vpe_mask; +	struct vpe_boot_config *vpe_config; +}; + +extern struct core_boot_config *mips_cps_core_bootcfg; + +extern void mips_cps_core_entry(void); +extern void mips_cps_core_init(void); + +extern struct vpe_boot_config *mips_cps_boot_vpes(void); + +extern bool mips_cps_smp_in_use(void); + +extern void mips_cps_pm_save(void); +extern void mips_cps_pm_restore(void); + +#else /* __ASSEMBLY__ */ + +.extern mips_cps_bootcfg; + +#endif /* __ASSEMBLY__ */ +#endif /* __MIPS_ASM_SMP_CPS_H__ */ diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h index ef2a8041e78..6ba1fb8b11e 100644 --- a/arch/mips/include/asm/smp-ops.h +++ b/arch/mips/include/asm/smp-ops.h @@ -13,6 +13,8 @@  #include <linux/errno.h> +#include <asm/mips-cm.h> +  #ifdef CONFIG_SMP  #include <linux/cpumask.h> @@ -24,7 +26,6 @@ struct plat_smp_ops {  	void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action);  	void (*init_secondary)(void);  	void (*smp_finish)(void); -	void (*cpus_done)(void);  	void (*boot_secondary)(int cpu, struct task_struct *idle);  	void (*smp_setup)(void);  	void (*prepare_cpus)(unsigned int max_cpus); @@ -43,6 +44,9 @@ static inline void plat_smp_setup(void)  	mp_ops->smp_setup();  } +extern void gic_send_ipi_single(int cpu, unsigned int action); +extern void gic_send_ipi_mask(const struct cpumask *mask, unsigned int action); +  #else /* !CONFIG_SMP */  struct plat_smp_ops; @@ -76,6 +80,9 @@ static inline int register_cmp_smp_ops(void)  #ifdef CONFIG_MIPS_CMP  	extern struct plat_smp_ops cmp_smp_ops; +	if (!mips_cm_present()) +		return -ENODEV; +  	register_smp_ops(&cmp_smp_ops);  	return 0; @@ -97,4 +104,13 @@ static inline int register_vsmp_smp_ops(void)  #endif  } +#ifdef CONFIG_MIPS_CPS +extern int register_cps_smp_ops(void); +#else +static inline int register_cps_smp_ops(void) +{ +	return -ENODEV; +} +#endif +  #endif /* __ASM_SMP_OPS_H */ diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index eb600875848..b037334fca2 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -42,9 +42,13 @@ extern int __cpu_logical_map[NR_CPUS];  #define SMP_ICACHE_FLUSH	0x4  /* Used by kexec crashdump to save all cpu's state */  #define SMP_DUMP		0x8 +#define SMP_ASK_C0COUNT		0x10  extern volatile cpumask_t cpu_callin_map; +/* Mask of CPUs which are currently definitely operating coherently */ +extern cpumask_t cpu_coherent_mask; +  extern void asmlinkage smp_bootstrap(void);  /* diff --git a/arch/mips/include/asm/smtc.h b/arch/mips/include/asm/smtc.h deleted file mode 100644 index e56b439b787..00000000000 --- a/arch/mips/include/asm/smtc.h +++ /dev/null @@ -1,78 +0,0 @@ -#ifndef _ASM_SMTC_MT_H -#define _ASM_SMTC_MT_H - -/* - * Definitions for SMTC multitasking on MIPS MT cores - */ - -#include <asm/mips_mt.h> -#include <asm/smtc_ipi.h> - -/* - * System-wide SMTC status information - */ - -extern unsigned int smtc_status; - -#define SMTC_TLB_SHARED 0x00000001 -#define SMTC_MTC_ACTIVE 0x00000002 - -/* - * TLB/ASID Management information - */ - -#define MAX_SMTC_TLBS 2 -#define MAX_SMTC_ASIDS 256 -#if NR_CPUS <= 8 -typedef char asiduse; -#else -#if NR_CPUS <= 16 -typedef short asiduse; -#else -typedef long asiduse; -#endif -#endif - -/* - * VPE Management information - */ - -#define MAX_SMTC_VPES	MAX_SMTC_TLBS	/* FIXME: May not always be true. */ - -extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; - -struct mm_struct; -struct task_struct; - -void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); -void self_ipi(struct smtc_ipi *); -void smtc_flush_tlb_asid(unsigned long asid); -extern int smtc_build_cpu_map(int startslot); -extern void smtc_prepare_cpus(int cpus); -extern void smtc_smp_finish(void); -extern void smtc_boot_secondary(int cpu, struct task_struct *t); -extern void smtc_cpus_done(void); -extern void smtc_init_secondary(void); - - -/* - * Sharing the TLB between multiple VPEs means that the - * "random" index selection function is not allowed to - * select the current value of the Index register. To - * avoid additional TLB pressure, the Index registers - * are "parked" with an non-Valid value. - */ - -#define PARKED_INDEX	((unsigned int)0x80000000) - -/* - * Define low-level interrupt mask for IPIs, if necessary. - * By default, use SW interrupt 1, which requires no external - * hardware support, but which works only for single-core - * MIPS MT systems. - */ -#ifndef MIPS_CPU_IPI_IRQ -#define MIPS_CPU_IPI_IRQ 1 -#endif - -#endif /*  _ASM_SMTC_MT_H */ diff --git a/arch/mips/include/asm/smtc_ipi.h b/arch/mips/include/asm/smtc_ipi.h deleted file mode 100644 index 15278dbd7e7..00000000000 --- a/arch/mips/include/asm/smtc_ipi.h +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Definitions used in MIPS MT SMTC "Interprocessor Interrupt" code. - */ -#ifndef __ASM_SMTC_IPI_H -#define __ASM_SMTC_IPI_H - -#include <linux/spinlock.h> - -//#define SMTC_IPI_DEBUG - -#ifdef SMTC_IPI_DEBUG -#include <asm/mipsregs.h> -#include <asm/mipsmtregs.h> -#endif /* SMTC_IPI_DEBUG */ - -/* - * An IPI "message" - */ - -struct smtc_ipi { -	struct smtc_ipi *flink; -	int type; -	void *arg; -	int dest; -#ifdef	SMTC_IPI_DEBUG -	int sender; -	long stamp; -#endif /* SMTC_IPI_DEBUG */ -}; - -/* - * Defined IPI Types - */ - -#define LINUX_SMP_IPI 1 -#define SMTC_CLOCK_TICK 2 -#define IRQ_AFFINITY_IPI 3 - -/* - * A queue of IPI messages - */ - -struct smtc_ipi_q { -	struct smtc_ipi *head; -	spinlock_t lock; -	struct smtc_ipi *tail; -	int depth; -	int resched_flag;	/* reschedule already queued */ -}; - -static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p) -{ -	unsigned long flags; - -	spin_lock_irqsave(&q->lock, flags); -	if (q->head == NULL) -		q->head = q->tail = p; -	else -		q->tail->flink = p; -	p->flink = NULL; -	q->tail = p; -	q->depth++; -#ifdef	SMTC_IPI_DEBUG -	p->sender = read_c0_tcbind(); -	p->stamp = read_c0_count(); -#endif /* SMTC_IPI_DEBUG */ -	spin_unlock_irqrestore(&q->lock, flags); -} - -static inline struct smtc_ipi *__smtc_ipi_dq(struct smtc_ipi_q *q) -{ -	struct smtc_ipi *p; - -	if (q->head == NULL) -		p = NULL; -	else { -		p = q->head; -		q->head = q->head->flink; -		q->depth--; -		/* Arguably unnecessary, but leaves queue cleaner */ -		if (q->head == NULL) -			q->tail = NULL; -	} - -	return p; -} - -static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q) -{ -	unsigned long flags; -	struct smtc_ipi *p; - -	spin_lock_irqsave(&q->lock, flags); -	p = __smtc_ipi_dq(q); -	spin_unlock_irqrestore(&q->lock, flags); - -	return p; -} - -static inline void smtc_ipi_req(struct smtc_ipi_q *q, struct smtc_ipi *p) -{ -	unsigned long flags; - -	spin_lock_irqsave(&q->lock, flags); -	if (q->head == NULL) { -		q->head = q->tail = p; -		p->flink = NULL; -	} else { -		p->flink = q->head; -		q->head = p; -	} -	q->depth++; -	spin_unlock_irqrestore(&q->lock, flags); -} - -static inline int smtc_ipi_qdepth(struct smtc_ipi_q *q) -{ -	unsigned long flags; -	int retval; - -	spin_lock_irqsave(&q->lock, flags); -	retval = q->depth; -	spin_unlock_irqrestore(&q->lock, flags); -	return retval; -} - -extern void smtc_send_ipi(int cpu, int type, unsigned int action); - -#endif /* __ASM_SMTC_IPI_H */ diff --git a/arch/mips/include/asm/smtc_proc.h b/arch/mips/include/asm/smtc_proc.h deleted file mode 100644 index 25da651f1f5..00000000000 --- a/arch/mips/include/asm/smtc_proc.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Definitions for SMTC /proc entries - * Copyright(C) 2005 MIPS Technologies Inc. - */ -#ifndef __ASM_SMTC_PROC_H -#define __ASM_SMTC_PROC_H - -/* - * per-"CPU" statistics - */ - -struct smtc_cpu_proc { -	unsigned long timerints; -	unsigned long selfipis; -}; - -extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; - -/* Count of number of recoveries of "stolen" FPU access rights on 34K */ - -extern atomic_t smtc_fpu_recoveries; - -#endif /* __ASM_SMTC_PROC_H */ diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index 23fc95e6567..b188c797565 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -17,23 +17,14 @@  #include <asm/asmmacro.h>  #include <asm/mipsregs.h>  #include <asm/asm-offsets.h> +#include <asm/thread_info.h> -/* - * For SMTC kernel, global IE should be left set, and interrupts - * controlled exclusively via IXMT. - */ -#ifdef CONFIG_MIPS_MT_SMTC -#define STATMASK 0x1e -#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) +#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)  #define STATMASK 0x3f  #else  #define STATMASK 0x1f  #endif -#ifdef CONFIG_MIPS_MT_SMTC -#include <asm/mipsmtregs.h> -#endif /* CONFIG_MIPS_MT_SMTC */ -  		.macro	SAVE_AT  		.set	push  		.set	noat @@ -93,21 +84,8 @@  		.endm  #ifdef CONFIG_SMP -#ifdef CONFIG_MIPS_MT_SMTC -#define PTEBASE_SHIFT	19	/* TCBIND */ -#define CPU_ID_REG CP0_TCBIND -#define CPU_ID_MFC0 mfc0 -#elif defined(CONFIG_MIPS_PGD_C0_CONTEXT) -#define PTEBASE_SHIFT	48	/* XCONTEXT */ -#define CPU_ID_REG CP0_XCONTEXT -#define CPU_ID_MFC0 MFC0 -#else -#define PTEBASE_SHIFT	23	/* CONTEXT */ -#define CPU_ID_REG CP0_CONTEXT -#define CPU_ID_MFC0 MFC0 -#endif  		.macro	get_saved_sp	/* SMP variation */ -		CPU_ID_MFC0	k0, CPU_ID_REG +		ASM_CPUID_MFC0	k0, ASM_SMP_CPUID_REG  #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)  		lui	k1, %hi(kernelsp)  #else @@ -117,17 +95,17 @@  		daddiu	k1, %hi(kernelsp)  		dsll	k1, 16  #endif -		LONG_SRL	k0, PTEBASE_SHIFT +		LONG_SRL	k0, SMP_CPUID_PTRSHIFT  		LONG_ADDU	k1, k0  		LONG_L	k1, %lo(kernelsp)(k1)  		.endm  		.macro	set_saved_sp stackp temp temp2 -		CPU_ID_MFC0	\temp, CPU_ID_REG -		LONG_SRL	\temp, PTEBASE_SHIFT +		ASM_CPUID_MFC0	\temp, ASM_SMP_CPUID_REG +		LONG_SRL	\temp, SMP_CPUID_PTRSHIFT  		LONG_S	\stackp, kernelsp(\temp)  		.endm -#else +#else /* !CONFIG_SMP */  		.macro	get_saved_sp	/* Uniprocessor variation */  #ifdef CONFIG_CPU_JUMP_WORKAROUNDS  		/* @@ -198,16 +176,6 @@  		mfc0	v1, CP0_STATUS  		LONG_S	$2, PT_R2(sp)  		LONG_S	v1, PT_STATUS(sp) -#ifdef CONFIG_MIPS_MT_SMTC -		/* -		 * Ideally, these instructions would be shuffled in -		 * to cover the pipeline delay. -		 */ -		.set	mips32 -		mfc0	k0, CP0_TCSTATUS -		.set	mips0 -		LONG_S	k0, PT_TCSTATUS(sp) -#endif /* CONFIG_MIPS_MT_SMTC */  		LONG_S	$4, PT_R4(sp)  		mfc0	v1, CP0_CAUSE  		LONG_S	$5, PT_R5(sp) @@ -333,36 +301,6 @@  		.set	push  		.set	reorder  		.set	noat -#ifdef CONFIG_MIPS_MT_SMTC -		.set	mips32r2 -		/* -		 * We need to make sure the read-modify-write -		 * of Status below isn't perturbed by an interrupt -		 * or cross-TC access, so we need to do at least a DMT, -		 * protected by an interrupt-inhibit. But setting IXMT -		 * also creates a few-cycle window where an IPI could -		 * be queued and not be detected before potentially -		 * returning to a WAIT or user-mode loop. It must be -		 * replayed. -		 * -		 * We're in the middle of a context switch, and -		 * we can't dispatch it directly without trashing -		 * some registers, so we'll try to detect this unlikely -		 * case and program a software interrupt in the VPE, -		 * as would be done for a cross-VPE IPI.  To accommodate -		 * the handling of that case, we're doing a DVPE instead -		 * of just a DMT here to protect against other threads. -		 * This is a lot of cruft to cover a tiny window. -		 * If you can find a better design, implement it! -		 * -		 */ -		mfc0	v0, CP0_TCSTATUS -		ori	v0, TCSTATUS_IXMT -		mtc0	v0, CP0_TCSTATUS -		_ehb -		DVPE	5				# dvpe a1 -		jal	mips_ihb -#endif /* CONFIG_MIPS_MT_SMTC */  		mfc0	a0, CP0_STATUS  		ori	a0, STATMASK  		xori	a0, STATMASK @@ -374,59 +312,6 @@  		and	v0, v1  		or	v0, a0  		mtc0	v0, CP0_STATUS -#ifdef CONFIG_MIPS_MT_SMTC -/* - * Only after EXL/ERL have been restored to status can we - * restore TCStatus.IXMT. - */ -		LONG_L	v1, PT_TCSTATUS(sp) -		_ehb -		mfc0	a0, CP0_TCSTATUS -		andi	v1, TCSTATUS_IXMT -		bnez	v1, 0f - -/* - * We'd like to detect any IPIs queued in the tiny window - * above and request an software interrupt to service them - * when we ERET. - * - * Computing the offset into the IPIQ array of the executing - * TC's IPI queue in-line would be tedious.  We use part of - * the TCContext register to hold 16 bits of offset that we - * can add in-line to find the queue head. - */ -		mfc0	v0, CP0_TCCONTEXT -		la	a2, IPIQ -		srl	v0, v0, 16 -		addu	a2, a2, v0 -		LONG_L	v0, 0(a2) -		beqz	v0, 0f -/* - * If we have a queue, provoke dispatch within the VPE by setting C_SW1 - */ -		mfc0	v0, CP0_CAUSE -		ori	v0, v0, C_SW1 -		mtc0	v0, CP0_CAUSE -0: -		/* -		 * This test should really never branch but -		 * let's be prudent here.  Having atomized -		 * the shared register modifications, we can -		 * now EVPE, and must do so before interrupts -		 * are potentially re-enabled. -		 */ -		andi	a1, a1, MVPCONTROL_EVP -		beqz	a1, 1f -		evpe -1: -		/* We know that TCStatua.IXMT should be set from above */ -		xori	a0, a0, TCSTATUS_IXMT -		or	a0, a0, v1 -		mtc0	a0, CP0_TCSTATUS -		_ehb - -		.set	mips0 -#endif /* CONFIG_MIPS_MT_SMTC */  		LONG_L	v1, PT_EPC(sp)  		MTC0	v1, CP0_EPC  		LONG_L	$31, PT_R31(sp) @@ -447,7 +332,7 @@  		.macro	RESTORE_SP_AND_RET  		LONG_L	sp, PT_R29(sp) -		.set	mips3 +		.set	arch=r4000  		eret  		.set	mips0  		.endm @@ -479,33 +364,11 @@   * Set cp0 enable bit as sign that we're running on the kernel stack   */  		.macro	CLI -#if !defined(CONFIG_MIPS_MT_SMTC)  		mfc0	t0, CP0_STATUS  		li	t1, ST0_CU0 | STATMASK  		or	t0, t1  		xori	t0, STATMASK  		mtc0	t0, CP0_STATUS -#else /* CONFIG_MIPS_MT_SMTC */ -		/* -		 * For SMTC, we need to set privilege -		 * and disable interrupts only for the -		 * current TC, using the TCStatus register. -		 */ -		mfc0	t0, CP0_TCSTATUS -		/* Fortunately CU 0 is in the same place in both registers */ -		/* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ -		li	t1, ST0_CU0 | 0x08001c00 -		or	t0, t1 -		/* Clear TKSU, leave IXMT */ -		xori	t0, 0x00001800 -		mtc0	t0, CP0_TCSTATUS -		_ehb -		/* We need to leave the global IE bit set, but clear EXL...*/ -		mfc0	t0, CP0_STATUS -		ori	t0, ST0_EXL | ST0_ERL -		xori	t0, ST0_EXL | ST0_ERL -		mtc0	t0, CP0_STATUS -#endif /* CONFIG_MIPS_MT_SMTC */  		irq_disable_hazard  		.endm @@ -514,35 +377,11 @@   * Set cp0 enable bit as sign that we're running on the kernel stack   */  		.macro	STI -#if !defined(CONFIG_MIPS_MT_SMTC)  		mfc0	t0, CP0_STATUS  		li	t1, ST0_CU0 | STATMASK  		or	t0, t1  		xori	t0, STATMASK & ~1  		mtc0	t0, CP0_STATUS -#else /* CONFIG_MIPS_MT_SMTC */ -		/* -		 * For SMTC, we need to set privilege -		 * and enable interrupts only for the -		 * current TC, using the TCStatus register. -		 */ -		_ehb -		mfc0	t0, CP0_TCSTATUS -		/* Fortunately CU 0 is in the same place in both registers */ -		/* Set TCU0, TKSU (for later inversion) and IXMT */ -		li	t1, ST0_CU0 | 0x08001c00 -		or	t0, t1 -		/* Clear TKSU *and* IXMT */ -		xori	t0, 0x00001c00 -		mtc0	t0, CP0_TCSTATUS -		_ehb -		/* We need to leave the global IE bit set, but clear EXL...*/ -		mfc0	t0, CP0_STATUS -		ori	t0, ST0_EXL -		xori	t0, ST0_EXL -		mtc0	t0, CP0_STATUS -		/* irq_enable_hazard below should expand to EHB for 24K/34K cpus */ -#endif /* CONFIG_MIPS_MT_SMTC */  		irq_enable_hazard  		.endm @@ -552,32 +391,6 @@   * Set cp0 enable bit as sign that we're running on the kernel stack   */  		.macro	KMODE -#ifdef CONFIG_MIPS_MT_SMTC -		/* -		 * This gets baroque in SMTC.  We want to -		 * protect the non-atomic clearing of EXL -		 * with DMT/EMT, but we don't want to take -		 * an interrupt while DMT is still in effect. -		 */ - -		/* KMODE gets invoked from both reorder and noreorder code */ -		.set	push -		.set	mips32r2 -		.set	noreorder -		mfc0	v0, CP0_TCSTATUS -		andi	v1, v0, TCSTATUS_IXMT -		ori	v0, TCSTATUS_IXMT -		mtc0	v0, CP0_TCSTATUS -		_ehb -		DMT	2				# dmt	v0 -		/* -		 * We don't know a priori if ra is "live" -		 */ -		move	t0, ra -		jal	mips_ihb -		nop	/* delay slot */ -		move	ra, t0 -#endif /* CONFIG_MIPS_MT_SMTC */  		mfc0	t0, CP0_STATUS  		li	t1, ST0_CU0 | (STATMASK & ~1)  #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) @@ -588,25 +401,6 @@  		or	t0, t1  		xori	t0, STATMASK & ~1  		mtc0	t0, CP0_STATUS -#ifdef CONFIG_MIPS_MT_SMTC -		_ehb -		andi	v0, v0, VPECONTROL_TE -		beqz	v0, 2f -		nop	/* delay slot */ -		emt -2: -		mfc0	v0, CP0_TCSTATUS -		/* Clear IXMT, then OR in previous value */ -		ori	v0, TCSTATUS_IXMT -		xori	v0, TCSTATUS_IXMT -		or	v0, v1, v0 -		mtc0	v0, CP0_TCSTATUS -		/* -		 * irq_disable_hazard below should expand to EHB -		 * on 24K/34K CPUS -		 */ -		.set pop -#endif /* CONFIG_MIPS_MT_SMTC */  		irq_disable_hazard  		.endm diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index eb0af15ac65..495c1041a2c 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h @@ -16,14 +16,29 @@  #include <asm/watch.h>  #include <asm/dsp.h>  #include <asm/cop2.h> +#include <asm/msa.h>  struct task_struct; -/* - * switch_to(n) should switch tasks to task nr n, first - * checking that n isn't the current task, in which case it does nothing. +enum { +	FP_SAVE_NONE	= 0, +	FP_SAVE_VECTOR	= -1, +	FP_SAVE_SCALAR	= 1, +}; + +/** + * resume - resume execution of a task + * @prev:	The task previously executed. + * @next:	The task to begin executing. + * @next_ti:	task_thread_info(next). + * @fp_save:	Which, if any, FP context to save for prev. + * + * This function is used whilst scheduling to save the context of prev & load + * the context of next. Returns prev.   */ -extern asmlinkage void *resume(void *last, void *next, void *next_ti, u32 __usedfpu); +extern asmlinkage struct task_struct *resume(struct task_struct *prev, +		struct task_struct *next, struct thread_info *next_ti, +		s32 fp_save);  extern unsigned int ll_bit;  extern struct task_struct *ll_task; @@ -67,7 +82,8 @@ do {									\  #define switch_to(prev, next, last)					\  do {									\ -	u32 __usedfpu, __c0_stat;					\ +	u32 __c0_stat;							\ +	s32 __fpsave = FP_SAVE_NONE;					\  	__mips_mt_fpaff_switch_to(prev);				\  	if (cpu_has_dsp)						\  		__save_dsp(prev);					\ @@ -80,8 +96,12 @@ do {									\  		write_c0_status(__c0_stat & ~ST0_CU2);			\  	}								\  	__clear_software_ll_bit();					\ -	__usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU);	\ -	(last) = resume(prev, next, task_thread_info(next), __usedfpu); \ +	if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU))		\ +		__fpsave = FP_SAVE_SCALAR;				\ +	if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA))		\ +		__fpsave = FP_SAVE_VECTOR;				\ +	(last) = resume(prev, next, task_thread_info(next), __fpsave);	\ +	disable_msa();							\  } while (0)  #define finish_arch_switch(prev)					\ diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h new file mode 100644 index 00000000000..17960fe7a8c --- /dev/null +++ b/arch/mips/include/asm/syscall.h @@ -0,0 +1,145 @@ +/* + * Access to user system call parameters and results + * + * This file is subject to the terms and conditions of the GNU General Public + * License.  See the file "COPYING" in the main directory of this archive + * for more details. + * + * See asm-generic/syscall.h for descriptions of what we must do here. + * + * Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org> + */ + +#ifndef __ASM_MIPS_SYSCALL_H +#define __ASM_MIPS_SYSCALL_H + +#include <linux/compiler.h> +#include <uapi/linux/audit.h> +#include <linux/elf-em.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/uaccess.h> +#include <asm/ptrace.h> +#include <asm/unistd.h> + +#ifndef __NR_syscall /* Only defined if _MIPS_SIM == _MIPS_SIM_ABI32 */ +#define __NR_syscall 4000 +#endif + +static inline long syscall_get_nr(struct task_struct *task, +				  struct pt_regs *regs) +{ +	/* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */ +	if ((config_enabled(CONFIG_32BIT) || +	    test_tsk_thread_flag(task, TIF_32BIT_REGS)) && +	    (regs->regs[2] == __NR_syscall)) +		return regs->regs[4]; +	else +		return regs->regs[2]; +} + +static inline unsigned long mips_get_syscall_arg(unsigned long *arg, +	struct task_struct *task, struct pt_regs *regs, unsigned int n) +{ +	unsigned long usp __maybe_unused = regs->regs[29]; + +	switch (n) { +	case 0: case 1: case 2: case 3: +		*arg = regs->regs[4 + n]; + +		return 0; + +#ifdef CONFIG_32BIT +	case 4: case 5: case 6: case 7: +		return get_user(*arg, (int *)usp + n); +#endif + +#ifdef CONFIG_64BIT +	case 4: case 5: case 6: case 7: +#ifdef CONFIG_MIPS32_O32 +		if (test_thread_flag(TIF_32BIT_REGS)) +			return get_user(*arg, (int *)usp + n); +		else +#endif +			*arg = regs->regs[4 + n]; + +		return 0; +#endif + +	default: +		BUG(); +	} + +	unreachable(); +} + +static inline long syscall_get_return_value(struct task_struct *task, +					    struct pt_regs *regs) +{ +	return regs->regs[2]; +} + +static inline void syscall_rollback(struct task_struct *task, +				    struct pt_regs *regs) +{ +	/* Do nothing */ +} + +static inline void syscall_set_return_value(struct task_struct *task, +					    struct pt_regs *regs, +					    int error, long val) +{ +	if (error) { +		regs->regs[2] = -error; +		regs->regs[7] = -1; +	} else { +		regs->regs[2] = val; +		regs->regs[7] = 0; +	} +} + +static inline void syscall_get_arguments(struct task_struct *task, +					 struct pt_regs *regs, +					 unsigned int i, unsigned int n, +					 unsigned long *args) +{ +	int ret; +	/* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */ +	if ((config_enabled(CONFIG_32BIT) || +	    test_tsk_thread_flag(task, TIF_32BIT_REGS)) && +	    (regs->regs[2] == __NR_syscall)) { +		i++; +		n++; +	} + +	while (n--) +		ret |= mips_get_syscall_arg(args++, task, regs, i++); + +	/* +	 * No way to communicate an error because this is a void function. +	 */ +#if 0 +	return ret; +#endif +} + +extern const unsigned long sys_call_table[]; +extern const unsigned long sys32_call_table[]; +extern const unsigned long sysn32_call_table[]; + +static inline int syscall_get_arch(void) +{ +	int arch = EM_MIPS; +#ifdef CONFIG_64BIT +	if (!test_thread_flag(TIF_32BIT_REGS)) +		arch |= __AUDIT_ARCH_64BIT; +	if (test_thread_flag(TIF_32BIT_ADDR)) +		arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32; +#endif +#if defined(__LITTLE_ENDIAN) +	arch |=  __AUDIT_ARCH_LE; +#endif +	return arch; +} + +#endif	/* __ASM_MIPS_SYSCALL_H */ diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 61215a34acc..7de865805de 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -92,8 +92,6 @@ static inline struct thread_info *current_thread_info(void)  #define STACK_WARN	(THREAD_SIZE / 8) -#define PREEMPT_ACTIVE		0x10000000 -  /*   * thread information flags   * - these are process state flags that various assembly files may need to @@ -112,10 +110,14 @@ static inline struct thread_info *current_thread_info(void)  #define TIF_NOHZ		19	/* in adaptive nohz mode */  #define TIF_FIXADE		20	/* Fix address errors in software */  #define TIF_LOGADE		21	/* Log address errors to syslog */ -#define TIF_32BIT_REGS		22	/* also implies 16/32 fprs */ +#define TIF_32BIT_REGS		22	/* 32-bit general purpose registers */  #define TIF_32BIT_ADDR		23	/* 32-bit address space (o32/n32) */  #define TIF_FPUBOUND		24	/* thread bound to FPU-full CPU set */  #define TIF_LOAD_WATCH		25	/* If set, load watch registers */ +#define TIF_SYSCALL_TRACEPOINT	26	/* syscall tracepoint instrumentation */ +#define TIF_32BIT_FPREGS	27	/* 32-bit floating point registers */ +#define TIF_USEDMSA		29	/* MSA has been used this quantum */ +#define TIF_MSA_CTX_LIVE	30	/* MSA context must be preserved */  #define TIF_SYSCALL_TRACE	31	/* syscall trace active */  #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE) @@ -132,21 +134,49 @@ static inline struct thread_info *current_thread_info(void)  #define _TIF_32BIT_ADDR		(1<<TIF_32BIT_ADDR)  #define _TIF_FPUBOUND		(1<<TIF_FPUBOUND)  #define _TIF_LOAD_WATCH		(1<<TIF_LOAD_WATCH) +#define _TIF_32BIT_FPREGS	(1<<TIF_32BIT_FPREGS) +#define _TIF_USEDMSA		(1<<TIF_USEDMSA) +#define _TIF_MSA_CTX_LIVE	(1<<TIF_MSA_CTX_LIVE) +#define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)  #define _TIF_WORK_SYSCALL_ENTRY	(_TIF_NOHZ | _TIF_SYSCALL_TRACE |	\ -				 _TIF_SYSCALL_AUDIT) +				 _TIF_SYSCALL_AUDIT | \ +				 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)  /* work to do in syscall_trace_leave() */  #define _TIF_WORK_SYSCALL_EXIT	(_TIF_NOHZ | _TIF_SYSCALL_TRACE |	\ -				 _TIF_SYSCALL_AUDIT) +				 _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)  /* work to do on interrupt/exception return */  #define _TIF_WORK_MASK		\  	(_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)  /* work to do on any return to u-space */  #define _TIF_ALLWORK_MASK	(_TIF_NOHZ | _TIF_WORK_MASK |		\ -				 _TIF_WORK_SYSCALL_EXIT) +				 _TIF_WORK_SYSCALL_EXIT |		\ +				 _TIF_SYSCALL_TRACEPOINT) -#endif /* __KERNEL__ */ +/* + * We stash processor id into a COP0 register to retrieve it fast + * at kernel exception entry. + */ +#if   defined(CONFIG_MIPS_PGD_C0_CONTEXT) +#define SMP_CPUID_REG		20, 0	/* XCONTEXT */ +#define ASM_SMP_CPUID_REG	$20 +#define SMP_CPUID_PTRSHIFT	48 +#else +#define SMP_CPUID_REG		4, 0	/* CONTEXT */ +#define ASM_SMP_CPUID_REG	$4 +#define SMP_CPUID_PTRSHIFT	23 +#endif +#ifdef CONFIG_64BIT +#define SMP_CPUID_REGSHIFT	(SMP_CPUID_PTRSHIFT + 3) +#else +#define SMP_CPUID_REGSHIFT	(SMP_CPUID_PTRSHIFT + 2) +#endif + +#define ASM_CPUID_MFC0		MFC0 +#define UASM_i_CPUID_MFC0	UASM_i_MFC0 + +#endif /* __KERNEL__ */  #endif /* _ASM_THREAD_INFO_H */ diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h index 2d7b9df4542..8f3047d611e 100644 --- a/arch/mips/include/asm/time.h +++ b/arch/mips/include/asm/time.h @@ -52,14 +52,11 @@ extern int (*perf_irq)(void);   */  extern unsigned int __weak get_c0_compare_int(void);  extern int r4k_clockevent_init(void); -extern int smtc_clockevent_init(void);  extern int gic_clockevent_init(void);  static inline int mips_clockevent_init(void)  { -#ifdef CONFIG_MIPS_MT_SMTC -	return smtc_clockevent_init(); -#elif defined(CONFIG_CEVT_GIC) +#if   defined(CONFIG_CEVT_GIC)  	return (gic_clockevent_init() | r4k_clockevent_init());  #elif defined(CONFIG_CEVT_R4K)  	return r4k_clockevent_init(); @@ -75,7 +72,7 @@ extern int init_r4k_clocksource(void);  static inline int init_mips_clocksource(void)  { -#if defined(CONFIG_CSRC_R4K) && !defined(CONFIG_CSRC_GIC) +#ifdef CONFIG_CSRC_R4K  	return init_r4k_clocksource();  #else  	return 0; diff --git a/arch/mips/include/asm/timex.h b/arch/mips/include/asm/timex.h index 6529704aa73..b05bb70a2e4 100644 --- a/arch/mips/include/asm/timex.h +++ b/arch/mips/include/asm/timex.h @@ -4,13 +4,19 @@   * for more details.   *   * Copyright (C) 1998, 1999, 2003 by Ralf Baechle + * Copyright (C) 2014 by Maciej W. Rozycki   */  #ifndef _ASM_TIMEX_H  #define _ASM_TIMEX_H  #ifdef __KERNEL__ +#include <linux/compiler.h> + +#include <asm/cpu.h> +#include <asm/cpu-features.h>  #include <asm/mipsregs.h> +#include <asm/cpu-type.h>  /*   * This is the clock rate of the i8253 PIT.  A MIPS system may not have @@ -33,10 +39,64 @@  typedef unsigned int cycles_t; +/* + * On R4000/R4400 before version 5.0 an erratum exists such that if the + * cycle counter is read in the exact moment that it is matching the + * compare register, no interrupt will be generated. + * + * There is a suggested workaround and also the erratum can't strike if + * the compare interrupt isn't being used as the clock source device. + * However for now the implementaton of this function doesn't get these + * fine details right. + */ +static inline int can_use_mips_counter(unsigned int prid) +{ +	int comp = (prid & PRID_COMP_MASK) != PRID_COMP_LEGACY; + +	if (__builtin_constant_p(cpu_has_counter) && !cpu_has_counter) +		return 0; +	else if (__builtin_constant_p(cpu_has_mips_r) && cpu_has_mips_r) +		return 1; +	else if (likely(!__builtin_constant_p(cpu_has_mips_r) && comp)) +		return 1; +	/* Make sure we don't peek at cpu_data[0].options in the fast path! */ +	if (!__builtin_constant_p(cpu_has_counter)) +		asm volatile("" : "=m" (cpu_data[0].options)); +	if (likely(cpu_has_counter && +		   prid >= (PRID_IMP_R4000 | PRID_REV_ENCODE_44(5, 0)))) +		return 1; +	else +		return 0; +} +  static inline cycles_t get_cycles(void)  { -	return 0; +	if (can_use_mips_counter(read_c0_prid())) +		return read_c0_count(); +	else +		return 0;	/* no usable counter */ +} + +/* + * Like get_cycles - but where c0_count is not available we desperately + * use c0_random in an attempt to get at least a little bit of entropy. + * + * R6000 and R6000A neither have a count register nor a random register. + * That leaves no entropy source in the CPU itself. + */ +static inline unsigned long random_get_entropy(void) +{ +	unsigned int prid = read_c0_prid(); +	unsigned int imp = prid & PRID_IMP_MASK; + +	if (can_use_mips_counter(prid)) +		return read_c0_count(); +	else if (likely(imp != PRID_IMP_R6000 && imp != PRID_IMP_R6000A)) +		return read_c0_random(); +	else +		return 0;	/* no usable register */  } +#define random_get_entropy random_get_entropy  #endif /* __KERNEL__ */ diff --git a/arch/mips/include/asm/tlb.h b/arch/mips/include/asm/tlb.h index c67842bc8ef..4a2349302b5 100644 --- a/arch/mips/include/asm/tlb.h +++ b/arch/mips/include/asm/tlb.h @@ -18,6 +18,10 @@   */  #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) +#define UNIQUE_ENTRYHI(idx)						\ +		((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) |		\ +		 (cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0)) +  #include <asm-generic/tlb.h>  #endif /* __ASM_TLB_H */ diff --git a/arch/mips/include/asm/topology.h b/arch/mips/include/asm/topology.h index 12609a17dc8..20ea4859c82 100644 --- a/arch/mips/include/asm/topology.h +++ b/arch/mips/include/asm/topology.h @@ -10,8 +10,4 @@  #include <topology.h> -#ifdef CONFIG_SMP -#define smt_capable()	(smp_num_siblings > 1) -#endif -  #endif /* __ASM_TOPOLOGY_H */ diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index f3fa3750f57..a1095109023 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -6,6 +6,7 @@   * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle   * Copyright (C) 1999, 2000 Silicon Graphics, Inc.   * Copyright (C) 2007  Maciej W. Rozycki + * Copyright (C) 2014, Imagination Technologies Ltd.   */  #ifndef _ASM_UACCESS_H  #define _ASM_UACCESS_H @@ -13,6 +14,7 @@  #include <linux/kernel.h>  #include <linux/errno.h>  #include <linux/thread_info.h> +#include <asm/asm-eva.h>  /*   * The fs value determines whether argument validity checking should be @@ -222,11 +224,44 @@ struct __large_struct { unsigned long buf[100]; };   * Yuck.  We need two variants, one for 64bit operation and one   * for 32 bit mode and old iron.   */ +#ifndef CONFIG_EVA +#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr) +#else +/* + * Kernel specific functions for EVA. We need to use normal load instructions + * to read data from kernel when operating in EVA mode. We use these macros to + * avoid redefining __get_user_asm for EVA. + */ +#undef _loadd +#undef _loadw +#undef _loadh +#undef _loadb  #ifdef CONFIG_32BIT -#define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr) +#define _loadd			_loadw +#else +#define _loadd(reg, addr)	"ld " reg ", " addr +#endif +#define _loadw(reg, addr)	"lw " reg ", " addr +#define _loadh(reg, addr)	"lh " reg ", " addr +#define _loadb(reg, addr)	"lb " reg ", " addr + +#define __get_kernel_common(val, size, ptr)				\ +do {									\ +	switch (size) {							\ +	case 1: __get_data_asm(val, _loadb, ptr); break;		\ +	case 2: __get_data_asm(val, _loadh, ptr); break;		\ +	case 4: __get_data_asm(val, _loadw, ptr); break;		\ +	case 8: __GET_DW(val, _loadd, ptr); break;			\ +	default: __get_user_unknown(); break;				\ +	}								\ +} while (0) +#endif + +#ifdef CONFIG_32BIT +#define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)  #endif  #ifdef CONFIG_64BIT -#define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr) +#define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)  #endif  extern void __get_user_unknown(void); @@ -234,10 +269,10 @@ extern void __get_user_unknown(void);  #define __get_user_common(val, size, ptr)				\  do {									\  	switch (size) {							\ -	case 1: __get_user_asm(val, "lb", ptr); break;			\ -	case 2: __get_user_asm(val, "lh", ptr); break;			\ -	case 4: __get_user_asm(val, "lw", ptr); break;			\ -	case 8: __GET_USER_DW(val, ptr); break;				\ +	case 1: __get_data_asm(val, user_lb, ptr); break;		\ +	case 2: __get_data_asm(val, user_lh, ptr); break;		\ +	case 4: __get_data_asm(val, user_lw, ptr); break;		\ +	case 8: __GET_DW(val, user_ld, ptr); break;			\  	default: __get_user_unknown(); break;				\  	}								\  } while (0) @@ -246,8 +281,12 @@ do {									\  ({									\  	int __gu_err;							\  									\ -	__chk_user_ptr(ptr);						\ -	__get_user_common((x), size, ptr);				\ +	if (segment_eq(get_fs(), get_ds())) {				\ +		__get_kernel_common((x), size, ptr);			\ +	} else {							\ +		__chk_user_ptr(ptr);					\ +		__get_user_common((x), size, ptr);			\ +	}								\  	__gu_err;							\  }) @@ -257,18 +296,22 @@ do {									\  	const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);		\  									\  	might_fault();							\ -	if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))		\ -		__get_user_common((x), size, __gu_ptr);			\ +	if (likely(access_ok(VERIFY_READ,  __gu_ptr, size))) {		\ +		if (segment_eq(get_fs(), get_ds()))			\ +			__get_kernel_common((x), size, __gu_ptr);	\ +		else							\ +			__get_user_common((x), size, __gu_ptr);		\ +	}								\  									\  	__gu_err;							\  }) -#define __get_user_asm(val, insn, addr)					\ +#define __get_data_asm(val, insn, addr)					\  {									\  	long __gu_tmp;							\  									\  	__asm__ __volatile__(						\ -	"1:	" insn "	%1, %3				\n"	\ +	"1:	"insn("%1", "%3")"				\n"	\  	"2:							\n"	\  	"	.insn						\n"	\  	"	.section .fixup,\"ax\"				\n"	\ @@ -287,7 +330,7 @@ do {									\  /*   * Get a long long 64 using 32 bit registers.   */ -#define __get_user_asm_ll32(val, addr)					\ +#define __get_data_asm_ll32(val, insn, addr)				\  {									\  	union {								\  		unsigned long long	l;				\ @@ -295,8 +338,8 @@ do {									\  	} __gu_tmp;							\  									\  	__asm__ __volatile__(						\ -	"1:	lw	%1, (%3)				\n"	\ -	"2:	lw	%D1, 4(%3)				\n"	\ +	"1:	" insn("%1", "(%3)")"				\n"	\ +	"2:	" insn("%D1", "4(%3)")"				\n"	\  	"3:							\n"	\  	"	.insn						\n"	\  	"	.section	.fixup,\"ax\"			\n"	\ @@ -315,30 +358,73 @@ do {									\  	(val) = __gu_tmp.t;						\  } +#ifndef CONFIG_EVA +#define __put_kernel_common(ptr, size) __put_user_common(ptr, size) +#else +/* + * Kernel specific functions for EVA. We need to use normal load instructions + * to read data from kernel when operating in EVA mode. We use these macros to + * avoid redefining __get_data_asm for EVA. + */ +#undef _stored +#undef _storew +#undef _storeh +#undef _storeb +#ifdef CONFIG_32BIT +#define _stored			_storew +#else +#define _stored(reg, addr)	"ld " reg ", " addr +#endif + +#define _storew(reg, addr)	"sw " reg ", " addr +#define _storeh(reg, addr)	"sh " reg ", " addr +#define _storeb(reg, addr)	"sb " reg ", " addr + +#define __put_kernel_common(ptr, size)					\ +do {									\ +	switch (size) {							\ +	case 1: __put_data_asm(_storeb, ptr); break;			\ +	case 2: __put_data_asm(_storeh, ptr); break;			\ +	case 4: __put_data_asm(_storew, ptr); break;			\ +	case 8: __PUT_DW(_stored, ptr); break;				\ +	default: __put_user_unknown(); break;				\ +	}								\ +} while(0) +#endif +  /*   * Yuck.  We need two variants, one for 64bit operation and one   * for 32 bit mode and old iron.   */  #ifdef CONFIG_32BIT -#define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr) +#define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)  #endif  #ifdef CONFIG_64BIT -#define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr) +#define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)  #endif +#define __put_user_common(ptr, size)					\ +do {									\ +	switch (size) {							\ +	case 1: __put_data_asm(user_sb, ptr); break;			\ +	case 2: __put_data_asm(user_sh, ptr); break;			\ +	case 4: __put_data_asm(user_sw, ptr); break;			\ +	case 8: __PUT_DW(user_sd, ptr); break;				\ +	default: __put_user_unknown(); break;				\ +	}								\ +} while (0) +  #define __put_user_nocheck(x, ptr, size)				\  ({									\  	__typeof__(*(ptr)) __pu_val;					\  	int __pu_err = 0;						\  									\ -	__chk_user_ptr(ptr);						\  	__pu_val = (x);							\ -	switch (size) {							\ -	case 1: __put_user_asm("sb", ptr); break;			\ -	case 2: __put_user_asm("sh", ptr); break;			\ -	case 4: __put_user_asm("sw", ptr); break;			\ -	case 8: __PUT_USER_DW(ptr); break;				\ -	default: __put_user_unknown(); break;				\ +	if (segment_eq(get_fs(), get_ds())) {				\ +		__put_kernel_common(ptr, size);				\ +	} else {							\ +		__chk_user_ptr(ptr);					\ +		__put_user_common(ptr, size);				\  	}								\  	__pu_err;							\  }) @@ -351,21 +437,19 @@ do {									\  									\  	might_fault();							\  	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {	\ -		switch (size) {						\ -		case 1: __put_user_asm("sb", __pu_addr); break;		\ -		case 2: __put_user_asm("sh", __pu_addr); break;		\ -		case 4: __put_user_asm("sw", __pu_addr); break;		\ -		case 8: __PUT_USER_DW(__pu_addr); break;		\ -		default: __put_user_unknown(); break;			\ -		}							\ +		if (segment_eq(get_fs(), get_ds()))			\ +			__put_kernel_common(__pu_addr, size);		\ +		else							\ +			__put_user_common(__pu_addr, size);		\  	}								\ +									\  	__pu_err;							\  }) -#define __put_user_asm(insn, ptr)					\ +#define __put_data_asm(insn, ptr)					\  {									\  	__asm__ __volatile__(						\ -	"1:	" insn "	%z2, %3		# __put_user_asm\n"	\ +	"1:	"insn("%z2", "%3")"	# __put_data_asm	\n"	\  	"2:							\n"	\  	"	.insn						\n"	\  	"	.section	.fixup,\"ax\"			\n"	\ @@ -380,11 +464,11 @@ do {									\  	  "i" (-EFAULT));						\  } -#define __put_user_asm_ll32(ptr)					\ +#define __put_data_asm_ll32(insn, ptr)					\  {									\  	__asm__ __volatile__(						\ -	"1:	sw	%2, (%3)	# __put_user_asm_ll32	\n"	\ -	"2:	sw	%D2, 4(%3)				\n"	\ +	"1:	"insn("%2", "(%3)")"	# __put_data_asm_ll32	\n"	\ +	"2:	"insn("%D2", "4(%3)")"				\n"	\  	"3:							\n"	\  	"	.insn						\n"	\  	"	.section	.fixup,\"ax\"			\n"	\ @@ -403,6 +487,11 @@ do {									\  extern void __put_user_unknown(void);  /* + * ul{b,h,w} are macros and there are no equivalent macros for EVA. + * EVA unaligned access is handled in the ADE exception handler. + */ +#ifndef CONFIG_EVA +/*   * put_user_unaligned: - Write a simple value into user space.   * @x:	 Value to copy to user space.   * @ptr: Destination address, in user space. @@ -504,7 +593,7 @@ extern void __get_user_unaligned_unknown(void);  #define __get_user_unaligned_common(val, size, ptr)			\  do {									\  	switch (size) {							\ -	case 1: __get_user_asm(val, "lb", ptr); break;			\ +	case 1: __get_data_asm(val, "lb", ptr); break;			\  	case 2: __get_user_unaligned_asm(val, "ulh", ptr); break;	\  	case 4: __get_user_unaligned_asm(val, "ulw", ptr); break;	\  	case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;		\ @@ -531,7 +620,7 @@ do {									\  	__gu_err;							\  }) -#define __get_user_unaligned_asm(val, insn, addr)			\ +#define __get_data_unaligned_asm(val, insn, addr)			\  {									\  	long __gu_tmp;							\  									\ @@ -594,19 +683,23 @@ do {									\  #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)  #endif +#define __put_user_unaligned_common(ptr, size)				\ +do {									\ +	switch (size) {							\ +	case 1: __put_data_asm("sb", ptr); break;			\ +	case 2: __put_user_unaligned_asm("ush", ptr); break;		\ +	case 4: __put_user_unaligned_asm("usw", ptr); break;		\ +	case 8: __PUT_USER_UNALIGNED_DW(ptr); break;			\ +	default: __put_user_unaligned_unknown(); break;			\ +} while (0) +  #define __put_user_unaligned_nocheck(x,ptr,size)			\  ({									\  	__typeof__(*(ptr)) __pu_val;					\  	int __pu_err = 0;						\  									\  	__pu_val = (x);							\ -	switch (size) {							\ -	case 1: __put_user_asm("sb", ptr); break;			\ -	case 2: __put_user_unaligned_asm("ush", ptr); break;		\ -	case 4: __put_user_unaligned_asm("usw", ptr); break;		\ -	case 8: __PUT_USER_UNALIGNED_DW(ptr); break;			\ -	default: __put_user_unaligned_unknown(); break;			\ -	}								\ +	__put_user_unaligned_common(ptr, size);				\  	__pu_err;							\  }) @@ -616,15 +709,9 @@ do {									\  	__typeof__(*(ptr)) __pu_val = (x);				\  	int __pu_err = -EFAULT;						\  									\ -	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {	\ -		switch (size) {						\ -		case 1: __put_user_asm("sb", __pu_addr); break;		\ -		case 2: __put_user_unaligned_asm("ush", __pu_addr); break; \ -		case 4: __put_user_unaligned_asm("usw", __pu_addr); break; \ -		case 8: __PUT_USER_UNALGINED_DW(__pu_addr); break;	\ -		default: __put_user_unaligned_unknown(); break;		\ -		}							\ -	}								\ +	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size)))		\ +		__put_user_unaligned_common(__pu_addr, size);		\ +									\  	__pu_err;							\  }) @@ -669,6 +756,7 @@ do {									\  }  extern void __put_user_unaligned_unknown(void); +#endif  /*   * We're generating jump to subroutines which will be outside the range of @@ -693,6 +781,7 @@ extern void __put_user_unaligned_unknown(void);  extern size_t __copy_user(void *__to, const void *__from, size_t __n); +#ifndef CONFIG_EVA  #define __invoke_copy_to_user(to, from, n)				\  ({									\  	register void __user *__cu_to_r __asm__("$4");			\ @@ -711,6 +800,11 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);  	__cu_len_r;							\  }) +#define __invoke_copy_to_kernel(to, from, n)				\ +	__invoke_copy_to_user(to, from, n) + +#endif +  /*   * __copy_to_user: - Copy a block of data into user space, with less checking.   * @to:	  Destination address, in user space. @@ -735,7 +829,12 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);  	__cu_from = (from);						\  	__cu_len = (n);							\  	might_fault();							\ -	__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ +	if (segment_eq(get_fs(), get_ds()))				\ +		__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,	\ +						   __cu_len);		\ +	else								\ +		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\ +						 __cu_len);		\  	__cu_len;							\  }) @@ -750,7 +849,12 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);  	__cu_to = (to);							\  	__cu_from = (from);						\  	__cu_len = (n);							\ -	__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ +	if (segment_eq(get_fs(), get_ds()))				\ +		__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,	\ +						   __cu_len);		\ +	else								\ +		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\ +						 __cu_len);		\  	__cu_len;							\  }) @@ -763,8 +867,14 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);  	__cu_to = (to);							\  	__cu_from = (from);						\  	__cu_len = (n);							\ -	__cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \ -						    __cu_len);		\ +	if (segment_eq(get_fs(), get_ds()))				\ +		__cu_len = __invoke_copy_from_kernel_inatomic(__cu_to,	\ +							      __cu_from,\ +							      __cu_len);\ +	else								\ +		__cu_len = __invoke_copy_from_user_inatomic(__cu_to,	\ +							    __cu_from,	\ +							    __cu_len);	\  	__cu_len;							\  }) @@ -790,14 +900,23 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);  	__cu_to = (to);							\  	__cu_from = (from);						\  	__cu_len = (n);							\ -	if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {		\ -		might_fault();						\ -		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\ -						 __cu_len);		\ +	if (segment_eq(get_fs(), get_ds())) {				\ +		__cu_len = __invoke_copy_to_kernel(__cu_to,		\ +						   __cu_from,		\ +						   __cu_len);		\ +	} else {							\ +		if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {       \ +			might_fault();                                  \ +			__cu_len = __invoke_copy_to_user(__cu_to,	\ +							 __cu_from,	\ +							 __cu_len);     \ +		}							\  	}								\  	__cu_len;							\  }) +#ifndef CONFIG_EVA +  #define __invoke_copy_from_user(to, from, n)				\  ({									\  	register void *__cu_to_r __asm__("$4");				\ @@ -821,6 +940,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);  	__cu_len_r;							\  }) +#define __invoke_copy_from_kernel(to, from, n)				\ +	__invoke_copy_from_user(to, from, n) + +/* For userland <-> userland operations */ +#define ___invoke_copy_in_user(to, from, n)				\ +	__invoke_copy_from_user(to, from, n) + +/* For kernel <-> kernel operations */ +#define ___invoke_copy_in_kernel(to, from, n)				\ +	__invoke_copy_from_user(to, from, n) +  #define __invoke_copy_from_user_inatomic(to, from, n)			\  ({									\  	register void *__cu_to_r __asm__("$4");				\ @@ -844,6 +974,97 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);  	__cu_len_r;							\  }) +#define __invoke_copy_from_kernel_inatomic(to, from, n)			\ +	__invoke_copy_from_user_inatomic(to, from, n)			\ + +#else + +/* EVA specific functions */ + +extern size_t __copy_user_inatomic_eva(void *__to, const void *__from, +				       size_t __n); +extern size_t __copy_from_user_eva(void *__to, const void *__from, +				   size_t __n); +extern size_t __copy_to_user_eva(void *__to, const void *__from, +				 size_t __n); +extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); + +#define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr)	\ +({									\ +	register void *__cu_to_r __asm__("$4");				\ +	register const void __user *__cu_from_r __asm__("$5");		\ +	register long __cu_len_r __asm__("$6");				\ +									\ +	__cu_to_r = (to);						\ +	__cu_from_r = (from);						\ +	__cu_len_r = (n);						\ +	__asm__ __volatile__(						\ +	".set\tnoreorder\n\t"						\ +	__MODULE_JAL(func_ptr)						\ +	".set\tnoat\n\t"						\ +	__UA_ADDU "\t$1, %1, %2\n\t"					\ +	".set\tat\n\t"							\ +	".set\treorder"							\ +	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\ +	:								\ +	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\ +	  DADDI_SCRATCH, "memory");					\ +	__cu_len_r;							\ +}) + +#define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr)	\ +({									\ +	register void *__cu_to_r __asm__("$4");				\ +	register const void __user *__cu_from_r __asm__("$5");		\ +	register long __cu_len_r __asm__("$6");				\ +									\ +	__cu_to_r = (to);						\ +	__cu_from_r = (from);						\ +	__cu_len_r = (n);						\ +	__asm__ __volatile__(						\ +	__MODULE_JAL(func_ptr)						\ +	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\ +	:								\ +	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\ +	  DADDI_SCRATCH, "memory");					\ +	__cu_len_r;							\ +}) + +/* + * Source or destination address is in userland. We need to go through + * the TLB + */ +#define __invoke_copy_from_user(to, from, n)				\ +	__invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva) + +#define __invoke_copy_from_user_inatomic(to, from, n)			\ +	__invoke_copy_from_user_eva_generic(to, from, n,		\ +					    __copy_user_inatomic_eva) + +#define __invoke_copy_to_user(to, from, n)				\ +	__invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva) + +#define ___invoke_copy_in_user(to, from, n)				\ +	__invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva) + +/* + * Source or destination address in the kernel. We are not going through + * the TLB + */ +#define __invoke_copy_from_kernel(to, from, n)				\ +	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user) + +#define __invoke_copy_from_kernel_inatomic(to, from, n)			\ +	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic) + +#define __invoke_copy_to_kernel(to, from, n)				\ +	__invoke_copy_to_user_eva_generic(to, from, n, __copy_user) + +#define ___invoke_copy_in_kernel(to, from, n)				\ +	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user) + +#endif /* CONFIG_EVA */ +  /*   * __copy_from_user: - Copy a block of data from user space, with less checking.   * @to:	  Destination address, in kernel space. @@ -901,10 +1122,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);  	__cu_to = (to);							\  	__cu_from = (from);						\  	__cu_len = (n);							\ -	if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {		\ -		might_fault();						\ -		__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,	\ -						   __cu_len);		\ +	if (segment_eq(get_fs(), get_ds())) {				\ +		__cu_len = __invoke_copy_from_kernel(__cu_to,		\ +						     __cu_from,		\ +						     __cu_len);		\ +	} else {							\ +		if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {	\ +			might_fault();                                  \ +			__cu_len = __invoke_copy_from_user(__cu_to,	\ +							   __cu_from,	\ +							   __cu_len);   \ +		}							\  	}								\  	__cu_len;							\  }) @@ -918,9 +1146,14 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);  	__cu_to = (to);							\  	__cu_from = (from);						\  	__cu_len = (n);							\ -	might_fault();							\ -	__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,		\ -					   __cu_len);			\ +	if (segment_eq(get_fs(), get_ds())) {				\ +		__cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from,	\ +						    __cu_len);		\ +	} else {							\ +		might_fault();						\ +		__cu_len = ___invoke_copy_in_user(__cu_to, __cu_from,	\ +						  __cu_len);		\ +	}								\  	__cu_len;							\  }) @@ -933,11 +1166,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);  	__cu_to = (to);							\  	__cu_from = (from);						\  	__cu_len = (n);							\ -	if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&	\ -		   access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {	\ -		might_fault();						\ -		__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,	\ -						   __cu_len);		\ +	if (segment_eq(get_fs(), get_ds())) {				\ +		__cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from,	\ +						    __cu_len);		\ +	} else {							\ +		if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\ +			   access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\ +			might_fault();					\ +			__cu_len = ___invoke_copy_in_user(__cu_to,	\ +							  __cu_from,	\ +							  __cu_len);	\ +		}							\  	}								\  	__cu_len;							\  }) @@ -1007,16 +1246,28 @@ __strncpy_from_user(char *__to, const char __user *__from, long __len)  {  	long res; -	might_fault(); -	__asm__ __volatile__( -		"move\t$4, %1\n\t" -		"move\t$5, %2\n\t" -		"move\t$6, %3\n\t" -		__MODULE_JAL(__strncpy_from_user_nocheck_asm) -		"move\t%0, $2" -		: "=r" (res) -		: "r" (__to), "r" (__from), "r" (__len) -		: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); +	if (segment_eq(get_fs(), get_ds())) { +		__asm__ __volatile__( +			"move\t$4, %1\n\t" +			"move\t$5, %2\n\t" +			"move\t$6, %3\n\t" +			__MODULE_JAL(__strncpy_from_kernel_nocheck_asm) +			"move\t%0, $2" +			: "=r" (res) +			: "r" (__to), "r" (__from), "r" (__len) +			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); +	} else { +		might_fault(); +		__asm__ __volatile__( +			"move\t$4, %1\n\t" +			"move\t$5, %2\n\t" +			"move\t$6, %3\n\t" +			__MODULE_JAL(__strncpy_from_user_nocheck_asm) +			"move\t%0, $2" +			: "=r" (res) +			: "r" (__to), "r" (__from), "r" (__len) +			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); +	}  	return res;  } @@ -1044,16 +1295,28 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)  {  	long res; -	might_fault(); -	__asm__ __volatile__( -		"move\t$4, %1\n\t" -		"move\t$5, %2\n\t" -		"move\t$6, %3\n\t" -		__MODULE_JAL(__strncpy_from_user_asm) -		"move\t%0, $2" -		: "=r" (res) -		: "r" (__to), "r" (__from), "r" (__len) -		: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); +	if (segment_eq(get_fs(), get_ds())) { +		__asm__ __volatile__( +			"move\t$4, %1\n\t" +			"move\t$5, %2\n\t" +			"move\t$6, %3\n\t" +			__MODULE_JAL(__strncpy_from_kernel_asm) +			"move\t%0, $2" +			: "=r" (res) +			: "r" (__to), "r" (__from), "r" (__len) +			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); +	} else { +		might_fault(); +		__asm__ __volatile__( +			"move\t$4, %1\n\t" +			"move\t$5, %2\n\t" +			"move\t$6, %3\n\t" +			__MODULE_JAL(__strncpy_from_user_asm) +			"move\t%0, $2" +			: "=r" (res) +			: "r" (__to), "r" (__from), "r" (__len) +			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); +	}  	return res;  } @@ -1063,14 +1326,24 @@ static inline long __strlen_user(const char __user *s)  {  	long res; -	might_fault(); -	__asm__ __volatile__( -		"move\t$4, %1\n\t" -		__MODULE_JAL(__strlen_user_nocheck_asm) -		"move\t%0, $2" -		: "=r" (res) -		: "r" (s) -		: "$2", "$4", __UA_t0, "$31"); +	if (segment_eq(get_fs(), get_ds())) { +		__asm__ __volatile__( +			"move\t$4, %1\n\t" +			__MODULE_JAL(__strlen_kernel_nocheck_asm) +			"move\t%0, $2" +			: "=r" (res) +			: "r" (s) +			: "$2", "$4", __UA_t0, "$31"); +	} else { +		might_fault(); +		__asm__ __volatile__( +			"move\t$4, %1\n\t" +			__MODULE_JAL(__strlen_user_nocheck_asm) +			"move\t%0, $2" +			: "=r" (res) +			: "r" (s) +			: "$2", "$4", __UA_t0, "$31"); +	}  	return res;  } @@ -1093,14 +1366,24 @@ static inline long strlen_user(const char __user *s)  {  	long res; -	might_fault(); -	__asm__ __volatile__( -		"move\t$4, %1\n\t" -		__MODULE_JAL(__strlen_user_asm) -		"move\t%0, $2" -		: "=r" (res) -		: "r" (s) -		: "$2", "$4", __UA_t0, "$31"); +	if (segment_eq(get_fs(), get_ds())) { +		__asm__ __volatile__( +			"move\t$4, %1\n\t" +			__MODULE_JAL(__strlen_kernel_asm) +			"move\t%0, $2" +			: "=r" (res) +			: "r" (s) +			: "$2", "$4", __UA_t0, "$31"); +	} else { +		might_fault(); +		__asm__ __volatile__( +			"move\t$4, %1\n\t" +			__MODULE_JAL(__strlen_kernel_asm) +			"move\t%0, $2" +			: "=r" (res) +			: "r" (s) +			: "$2", "$4", __UA_t0, "$31"); +	}  	return res;  } @@ -1110,15 +1393,26 @@ static inline long __strnlen_user(const char __user *s, long n)  {  	long res; -	might_fault(); -	__asm__ __volatile__( -		"move\t$4, %1\n\t" -		"move\t$5, %2\n\t" -		__MODULE_JAL(__strnlen_user_nocheck_asm) -		"move\t%0, $2" -		: "=r" (res) -		: "r" (s), "r" (n) -		: "$2", "$4", "$5", __UA_t0, "$31"); +	if (segment_eq(get_fs(), get_ds())) { +		__asm__ __volatile__( +			"move\t$4, %1\n\t" +			"move\t$5, %2\n\t" +			__MODULE_JAL(__strnlen_kernel_nocheck_asm) +			"move\t%0, $2" +			: "=r" (res) +			: "r" (s), "r" (n) +			: "$2", "$4", "$5", __UA_t0, "$31"); +	} else { +		might_fault(); +		__asm__ __volatile__( +			"move\t$4, %1\n\t" +			"move\t$5, %2\n\t" +			__MODULE_JAL(__strnlen_user_nocheck_asm) +			"move\t%0, $2" +			: "=r" (res) +			: "r" (s), "r" (n) +			: "$2", "$4", "$5", __UA_t0, "$31"); +	}  	return res;  } @@ -1142,14 +1436,25 @@ static inline long strnlen_user(const char __user *s, long n)  	long res;  	might_fault(); -	__asm__ __volatile__( -		"move\t$4, %1\n\t" -		"move\t$5, %2\n\t" -		__MODULE_JAL(__strnlen_user_asm) -		"move\t%0, $2" -		: "=r" (res) -		: "r" (s), "r" (n) -		: "$2", "$4", "$5", __UA_t0, "$31"); +	if (segment_eq(get_fs(), get_ds())) { +		__asm__ __volatile__( +			"move\t$4, %1\n\t" +			"move\t$5, %2\n\t" +			__MODULE_JAL(__strnlen_kernel_asm) +			"move\t%0, $2" +			: "=r" (res) +			: "r" (s), "r" (n) +			: "$2", "$4", "$5", __UA_t0, "$31"); +	} else { +		__asm__ __volatile__( +			"move\t$4, %1\n\t" +			"move\t$5, %2\n\t" +			__MODULE_JAL(__strnlen_user_asm) +			"move\t%0, $2" +			: "=r" (res) +			: "r" (s), "r" (n) +			: "$2", "$4", "$5", __UA_t0, "$31"); +	}  	return res;  } diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index c33a9564fb4..708c5d41490 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h @@ -55,6 +55,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)  #define Ip_u2u1u3(op)							\  void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c) +#define Ip_u3u2u1(op)							\ +void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c) +  #define Ip_u3u1u2(op)							\  void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c) @@ -64,6 +67,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)  #define Ip_u2s3u1(op)							\  void ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c) +#define Ip_s3s1s2(op)							\ +void ISAOPC(op)(u32 **buf, int a, int b, int c) +  #define Ip_u2u1s3(op)							\  void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c) @@ -74,6 +80,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \  #define Ip_u1u2(op)							\  void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b) +#define Ip_u2u1(op)							\ +void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b) +  #define Ip_u1s2(op)							\  void ISAOPC(op)(u32 **buf, unsigned int a, signed int b) @@ -99,6 +108,7 @@ Ip_u2u1s3(_daddiu);  Ip_u3u1u2(_daddu);  Ip_u2u1msbu3(_dins);  Ip_u2u1msbu3(_dinsm); +Ip_u1u2(_divu);  Ip_u1u2u3(_dmfc0);  Ip_u1u2u3(_dmtc0);  Ip_u2u1u3(_drotr); @@ -114,16 +124,22 @@ Ip_u2u1msbu3(_ext);  Ip_u2u1msbu3(_ins);  Ip_u1(_j);  Ip_u1(_jal); +Ip_u2u1(_jalr);  Ip_u1(_jr); +Ip_u2s3u1(_lb);  Ip_u2s3u1(_ld);  Ip_u3u1u2(_ldx); +Ip_u2s3u1(_lh);  Ip_u2s3u1(_ll);  Ip_u2s3u1(_lld);  Ip_u1s2(_lui);  Ip_u2s3u1(_lw);  Ip_u3u1u2(_lwx);  Ip_u1u2u3(_mfc0); +Ip_u1(_mfhi); +Ip_u1(_mflo);  Ip_u1u2u3(_mtc0); +Ip_u3u1u2(_mul);  Ip_u3u1u2(_or);  Ip_u2u1u3(_ori);  Ip_u2s3u1(_pref); @@ -133,17 +149,26 @@ Ip_u2s3u1(_sc);  Ip_u2s3u1(_scd);  Ip_u2s3u1(_sd);  Ip_u2u1u3(_sll); +Ip_u3u2u1(_sllv); +Ip_s3s1s2(_slt); +Ip_u2u1s3(_sltiu); +Ip_u3u1u2(_sltu);  Ip_u2u1u3(_sra);  Ip_u2u1u3(_srl); +Ip_u3u2u1(_srlv);  Ip_u3u1u2(_subu);  Ip_u2s3u1(_sw); +Ip_u1(_sync);  Ip_u1(_syscall);  Ip_0(_tlbp);  Ip_0(_tlbr);  Ip_0(_tlbwi);  Ip_0(_tlbwr); +Ip_u1(_wait); +Ip_u2u1(_wsbh);  Ip_u3u1u2(_xor);  Ip_u2u1u3(_xori); +Ip_u2u1(_yield);  /* Handle labels. */ @@ -264,6 +289,8 @@ void uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,  		   unsigned int bit, int lid);  void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,  		   unsigned int bit, int lid); +void uasm_il_beq(u32 **p, struct uasm_reloc **r, unsigned int r1, +		 unsigned int r2, int lid);  void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);  void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);  void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h index 63c9c886173..e55813029d5 100644 --- a/arch/mips/include/asm/unistd.h +++ b/arch/mips/include/asm/unistd.h @@ -14,16 +14,21 @@  #include <uapi/asm/unistd.h> +#ifdef CONFIG_MIPS32_N32 +#define NR_syscalls  (__NR_N32_Linux + __NR_N32_Linux_syscalls) +#elif defined(CONFIG_64BIT) +#define NR_syscalls  (__NR_64_Linux + __NR_64_Linux_syscalls) +#else +#define NR_syscalls  (__NR_O32_Linux + __NR_O32_Linux_syscalls) +#endif  #ifndef __ASSEMBLY__ -#define __ARCH_OMIT_COMPAT_SYS_GETDENTS64  #define __ARCH_WANT_OLD_READDIR  #define __ARCH_WANT_SYS_ALARM  #define __ARCH_WANT_SYS_GETHOSTNAME  #define __ARCH_WANT_SYS_IPC  #define __ARCH_WANT_SYS_PAUSE -#define __ARCH_WANT_SYS_SGETMASK  #define __ARCH_WANT_SYS_UTIME  #define __ARCH_WANT_SYS_WAITPID  #define __ARCH_WANT_SYS_SOCKETCALL diff --git a/arch/mips/include/asm/vga.h b/arch/mips/include/asm/vga.h index f4cff7e4fa8..f82c83749a0 100644 --- a/arch/mips/include/asm/vga.h +++ b/arch/mips/include/asm/vga.h @@ -6,6 +6,7 @@  #ifndef _ASM_VGA_H  #define _ASM_VGA_H +#include <asm/addrspace.h>  #include <asm/byteorder.h>  /* @@ -13,7 +14,7 @@   *	access the videoram directly without any black magic.   */ -#define VGA_MAP_MEM(x, s)	(0xb0000000L + (unsigned long)(x)) +#define VGA_MAP_MEM(x, s)	CKSEG1ADDR(0x10000000L + (unsigned long)(x))  #define vga_readb(x)	(*(x))  #define vga_writeb(x, y)	(*(y) = (x)) diff --git a/arch/mips/include/asm/vpe.h b/arch/mips/include/asm/vpe.h index c6e1b961537..7849f3978fe 100644 --- a/arch/mips/include/asm/vpe.h +++ b/arch/mips/include/asm/vpe.h @@ -1,24 +1,95 @@  /* - * Copyright (C) 2005 MIPS Technologies, Inc.  All rights reserved. - * - *  This program is free software; you can distribute it and/or modify it - *  under the terms of the GNU General Public License (Version 2) as - *  published by the Free Software Foundation. - * - *  This program is distributed in the hope it will be useful, but WITHOUT - *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License - *  for more details. - * - *  You should have received a copy of the GNU General Public License along - *  with this program; if not, write to the Free Software Foundation, Inc., - *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * This file is subject to the terms and conditions of the GNU General Public + * License.  See the file "COPYING" in the main directory of this archive + * for more details.   * + * Copyright (C) 2005 MIPS Technologies, Inc.  All rights reserved. + * Copyright (C) 2013 Imagination Technologies Ltd.   */ -  #ifndef _ASM_VPE_H  #define _ASM_VPE_H +#include <linux/init.h> +#include <linux/list.h> +#include <linux/smp.h> +#include <linux/spinlock.h> + +#define VPE_MODULE_NAME "vpe" +#define VPE_MODULE_MINOR 1 + +/* grab the likely amount of memory we will need. */ +#ifdef CONFIG_MIPS_VPE_LOADER_TOM +#define P_SIZE (2 * 1024 * 1024) +#else +/* add an overhead to the max kmalloc size for non-striped symbols/etc */ +#define P_SIZE (256 * 1024) +#endif + +#define MAX_VPES 16 +#define VPE_PATH_MAX 256 + +static inline int aprp_cpu_index(void) +{ +#ifdef CONFIG_MIPS_CMP +	return setup_max_cpus; +#else +	extern int tclimit; +	return tclimit; +#endif +} + +enum vpe_state { +	VPE_STATE_UNUSED = 0, +	VPE_STATE_INUSE, +	VPE_STATE_RUNNING +}; + +enum tc_state { +	TC_STATE_UNUSED = 0, +	TC_STATE_INUSE, +	TC_STATE_RUNNING, +	TC_STATE_DYNAMIC +}; + +struct vpe { +	enum vpe_state state; + +	/* (device) minor associated with this vpe */ +	int minor; + +	/* elfloader stuff */ +	void *load_addr; +	unsigned long len; +	char *pbuffer; +	unsigned long plen; +	char cwd[VPE_PATH_MAX]; + +	unsigned long __start; + +	/* tc's associated with this vpe */ +	struct list_head tc; + +	/* The list of vpe's */ +	struct list_head list; + +	/* shared symbol address */ +	void *shared_ptr; + +	/* the list of who wants to know when something major happens */ +	struct list_head notify; + +	unsigned int ntcs; +}; + +struct tc { +	enum tc_state state; +	int index; + +	struct vpe *pvpe;	/* parent VPE */ +	struct list_head tc;	/* The list of TC's with this VPE */ +	struct list_head list;	/* The global list of tc's */ +}; +  struct vpe_notifications {  	void (*start)(int vpe);  	void (*stop)(int vpe); @@ -26,12 +97,34 @@ struct vpe_notifications {  	struct list_head list;  }; +struct vpe_control { +	spinlock_t vpe_list_lock; +	struct list_head vpe_list;      /* Virtual processing elements */ +	spinlock_t tc_list_lock; +	struct list_head tc_list;       /* Thread contexts */ +}; + +extern unsigned long physical_memsize; +extern struct vpe_control vpecontrol; +extern const struct file_operations vpe_fops; + +int vpe_notify(int index, struct vpe_notifications *notify); + +void *vpe_get_shared(int index); +char *vpe_getcwd(int index); + +struct vpe *get_vpe(int minor); +struct tc *get_tc(int index); +struct vpe *alloc_vpe(int minor); +struct tc *alloc_tc(int index); +void release_vpe(struct vpe *v); -extern int vpe_notify(int index, struct vpe_notifications *notify); +void *alloc_progmem(unsigned long len); +void release_progmem(void *ptr); -extern void *vpe_get_shared(int index); -extern int vpe_getuid(int index); -extern int vpe_getgid(int index); -extern char *vpe_getcwd(int index); +int __weak vpe_run(struct vpe *v); +void cleanup_tc(struct tc *tc); +int __init vpe_module_init(void); +void __exit vpe_module_exit(void);  #endif /* _ASM_VPE_H */ diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild index be7196eacb8..96fe7395ed8 100644 --- a/arch/mips/include/uapi/asm/Kbuild +++ b/arch/mips/include/uapi/asm/Kbuild @@ -4,6 +4,7 @@ include include/uapi/asm-generic/Kbuild.asm  generic-y += auxvec.h  generic-y += ipcbuf.h +header-y += bitfield.h  header-y += bitsperlong.h  header-y += break.h  header-y += byteorder.h diff --git a/arch/mips/include/uapi/asm/bitfield.h b/arch/mips/include/uapi/asm/bitfield.h new file mode 100644 index 00000000000..ad9861359ce --- /dev/null +++ b/arch/mips/include/uapi/asm/bitfield.h @@ -0,0 +1,29 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License.  See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2014 by Ralf Baechle <ralf@linux-mips.org> + */ +#ifndef __UAPI_ASM_BITFIELD_H +#define __UAPI_ASM_BITFIELD_H + +/* + *  * Damn ...  bitfields depend from byteorder :-( + *   */ +#ifdef __MIPSEB__ +#define __BITFIELD_FIELD(field, more)					\ +	field;								\ +	more + +#elif defined(__MIPSEL__) + +#define __BITFIELD_FIELD(field, more)					\ +	more								\ +	field; + +#else /* !defined (__MIPSEB__) && !defined (__MIPSEL__) */ +#error "MIPS but neither __MIPSEL__ nor __MIPSEB__?" +#endif + +#endif /* __UAPI_ASM_BITFIELD_H */ diff --git a/arch/mips/include/uapi/asm/errno.h b/arch/mips/include/uapi/asm/errno.h index 31575e2fd1b..02d645d7aa9 100644 --- a/arch/mips/include/uapi/asm/errno.h +++ b/arch/mips/include/uapi/asm/errno.h @@ -102,7 +102,7 @@  #define EWOULDBLOCK	EAGAIN	/* Operation would block */  #define EALREADY	149	/* Operation already in progress */  #define EINPROGRESS	150	/* Operation now in progress */ -#define ESTALE		151	/* Stale NFS file handle */ +#define ESTALE		151	/* Stale file handle */  #define ECANCELED	158	/* AIO operation canceled */  /* diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index e5a676e3d3c..4bfdb9d4c18 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -8,10 +8,13 @@   * Copyright (C) 1996, 2000 by Ralf Baechle   * Copyright (C) 2006 by Thiemo Seufer   * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved. + * Copyright (C) 2014 Imagination Technologies Ltd.   */  #ifndef _UAPI_ASM_INST_H  #define _UAPI_ASM_INST_H +#include <asm/bitfield.h> +  /*   * Major opcodes; before MIPS IV cop1x was called cop3.   */ @@ -73,10 +76,17 @@ enum spec2_op {  enum spec3_op {  	ext_op, dextm_op, dextu_op, dext_op,  	ins_op, dinsm_op, dinsu_op, dins_op, -	lx_op = 0x0a, -	bshfl_op = 0x20, -	dbshfl_op = 0x24, -	rdhwr_op = 0x3b +	yield_op  = 0x09, lx_op     = 0x0a, +	lwle_op   = 0x19, lwre_op   = 0x1a, +	cachee_op = 0x1b, sbe_op    = 0x1c, +	she_op    = 0x1d, sce_op    = 0x1e, +	swe_op    = 0x1f, bshfl_op  = 0x20, +	swle_op   = 0x21, swre_op   = 0x22, +	prefe_op  = 0x23, dbshfl_op = 0x24, +	lbue_op   = 0x28, lhue_op   = 0x29, +	lbe_op    = 0x2c, lhe_op    = 0x2d, +	lle_op    = 0x2e, lwe_op    = 0x2f, +	rdhwr_op  = 0x3b  };  /* @@ -98,8 +108,9 @@ enum rt_op {   */  enum cop_op {  	mfc_op	      = 0x00, dmfc_op	    = 0x01, -	cfc_op	      = 0x02, mtc_op	    = 0x04, -	dmtc_op	      = 0x05, ctc_op	    = 0x06, +	cfc_op	      = 0x02, mfhc_op	    = 0x03, +	mtc_op        = 0x04, dmtc_op	    = 0x05, +	ctc_op	      = 0x06, mthc_op	    = 0x07,  	bc_op	      = 0x08, cop_op	    = 0x10,  	copm_op	      = 0x18  }; @@ -117,7 +128,8 @@ enum bcop_op {  enum cop0_coi_func {  	tlbr_op	      = 0x01, tlbwi_op	    = 0x02,  	tlbwr_op      = 0x06, tlbp_op	    = 0x08, -	rfe_op	      = 0x10, eret_op	    = 0x18 +	rfe_op	      = 0x10, eret_op	    = 0x18, +	wait_op       = 0x20,  };  /* @@ -162,8 +174,8 @@ enum cop1_sdw_func {   */  enum cop1x_func {  	lwxc1_op     =	0x00, ldxc1_op	   =  0x01, -	pfetch_op    =	0x07, swxc1_op	   =  0x08, -	sdxc1_op     =	0x09, madd_s_op	   =  0x20, +	swxc1_op     =  0x08, sdxc1_op	   =  0x09, +	pfetch_op    =	0x0f, madd_s_op	   =  0x20,  	madd_d_op    =	0x21, madd_e_op	   =  0x22,  	msub_s_op    =	0x28, msub_d_op	   =  0x29,  	msub_e_op    =	0x2a, nmadd_s_op   =  0x30, @@ -194,6 +206,16 @@ enum lx_func {  };  /* + * BSHFL opcodes + */ +enum bshfl_func { +	wsbh_op = 0x2, +	dshd_op = 0x5, +	seb_op  = 0x10, +	seh_op  = 0x18, +}; + +/*   * (microMIPS) Major opcodes.   */  enum mm_major_op { @@ -236,17 +258,23 @@ enum mm_32i_minor_op {  enum mm_32a_minor_op {  	mm_sll32_op = 0x000,  	mm_ins_op = 0x00c, +	mm_sllv32_op = 0x010,  	mm_ext_op = 0x02c,  	mm_pool32axf_op = 0x03c,  	mm_srl32_op = 0x040,  	mm_sra_op = 0x080, +	mm_srlv32_op = 0x090,  	mm_rotr_op = 0x0c0,  	mm_lwxs_op = 0x118,  	mm_addu32_op = 0x150,  	mm_subu32_op = 0x1d0, +	mm_wsbh_op = 0x1ec, +	mm_mul_op = 0x210,  	mm_and_op = 0x250,  	mm_or32_op = 0x290,  	mm_xor32_op = 0x310, +	mm_slt_op = 0x350, +	mm_sltu_op = 0x390,  };  /* @@ -286,15 +314,20 @@ enum mm_32axf_minor_op {  	mm_mfc0_op = 0x003,  	mm_mtc0_op = 0x00b,  	mm_tlbp_op = 0x00d, +	mm_mfhi32_op = 0x035,  	mm_jalr_op = 0x03c,  	mm_tlbr_op = 0x04d, +	mm_mflo32_op = 0x075,  	mm_jalrhb_op = 0x07c,  	mm_tlbwi_op = 0x08d,  	mm_tlbwr_op = 0x0cd,  	mm_jalrs_op = 0x13c,  	mm_jalrshb_op = 0x17c, +	mm_sync_op = 0x1ad,  	mm_syscall_op = 0x22d, +	mm_wait_op = 0x24d,  	mm_eret_op = 0x3cd, +	mm_divu_op = 0x5dc,  };  /* @@ -397,8 +430,10 @@ enum mm_32f_73_minor_op {  	mm_movt1_op = 0xa5,  	mm_ftruncw_op = 0xac,  	mm_fneg1_op = 0xad, +	mm_mfhc1_op = 0xc0,  	mm_froundl_op = 0xcc,  	mm_fcvtd1_op = 0xcd, +	mm_mthc1_op = 0xe0,  	mm_froundw_op = 0xec,  	mm_fcvts1_op = 0xed,  }; @@ -470,125 +505,116 @@ enum MIPS6e_i8_func {   */  #define MM_NOP16	0x0c00 -/* - * Damn ...  bitfields depend from byteorder :-( - */ -#ifdef __MIPSEB__ -#define BITFIELD_FIELD(field, more)					\ -	field;								\ -	more - -#elif defined(__MIPSEL__) - -#define BITFIELD_FIELD(field, more)					\ -	more								\ -	field; - -#else /* !defined (__MIPSEB__) && !defined (__MIPSEL__) */ -#error "MIPS but neither __MIPSEL__ nor __MIPSEB__?" -#endif -  struct j_format { -	BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */ -	BITFIELD_FIELD(unsigned int target : 26, +	__BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */ +	__BITFIELD_FIELD(unsigned int target : 26,  	;))  };  struct i_format {			/* signed immediate format */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int rs : 5, -	BITFIELD_FIELD(unsigned int rt : 5, -	BITFIELD_FIELD(signed int simmediate : 16, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int rs : 5, +	__BITFIELD_FIELD(unsigned int rt : 5, +	__BITFIELD_FIELD(signed int simmediate : 16,  	;))))  };  struct u_format {			/* unsigned immediate format */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int rs : 5, -	BITFIELD_FIELD(unsigned int rt : 5, -	BITFIELD_FIELD(unsigned int uimmediate : 16, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int rs : 5, +	__BITFIELD_FIELD(unsigned int rt : 5, +	__BITFIELD_FIELD(unsigned int uimmediate : 16,  	;))))  };  struct c_format {			/* Cache (>= R6000) format */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int rs : 5, -	BITFIELD_FIELD(unsigned int c_op : 3, -	BITFIELD_FIELD(unsigned int cache : 2, -	BITFIELD_FIELD(unsigned int simmediate : 16, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int rs : 5, +	__BITFIELD_FIELD(unsigned int c_op : 3, +	__BITFIELD_FIELD(unsigned int cache : 2, +	__BITFIELD_FIELD(unsigned int simmediate : 16,  	;)))))  };  struct r_format {			/* Register format */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int rs : 5, -	BITFIELD_FIELD(unsigned int rt : 5, -	BITFIELD_FIELD(unsigned int rd : 5, -	BITFIELD_FIELD(unsigned int re : 5, -	BITFIELD_FIELD(unsigned int func : 6, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int rs : 5, +	__BITFIELD_FIELD(unsigned int rt : 5, +	__BITFIELD_FIELD(unsigned int rd : 5, +	__BITFIELD_FIELD(unsigned int re : 5, +	__BITFIELD_FIELD(unsigned int func : 6,  	;))))))  };  struct p_format {		/* Performance counter format (R10000) */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int rs : 5, -	BITFIELD_FIELD(unsigned int rt : 5, -	BITFIELD_FIELD(unsigned int rd : 5, -	BITFIELD_FIELD(unsigned int re : 5, -	BITFIELD_FIELD(unsigned int func : 6, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int rs : 5, +	__BITFIELD_FIELD(unsigned int rt : 5, +	__BITFIELD_FIELD(unsigned int rd : 5, +	__BITFIELD_FIELD(unsigned int re : 5, +	__BITFIELD_FIELD(unsigned int func : 6,  	;))))))  };  struct f_format {			/* FPU register format */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int : 1, -	BITFIELD_FIELD(unsigned int fmt : 4, -	BITFIELD_FIELD(unsigned int rt : 5, -	BITFIELD_FIELD(unsigned int rd : 5, -	BITFIELD_FIELD(unsigned int re : 5, -	BITFIELD_FIELD(unsigned int func : 6, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int : 1, +	__BITFIELD_FIELD(unsigned int fmt : 4, +	__BITFIELD_FIELD(unsigned int rt : 5, +	__BITFIELD_FIELD(unsigned int rd : 5, +	__BITFIELD_FIELD(unsigned int re : 5, +	__BITFIELD_FIELD(unsigned int func : 6,  	;)))))))  };  struct ma_format {		/* FPU multiply and add format (MIPS IV) */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int fr : 5, -	BITFIELD_FIELD(unsigned int ft : 5, -	BITFIELD_FIELD(unsigned int fs : 5, -	BITFIELD_FIELD(unsigned int fd : 5, -	BITFIELD_FIELD(unsigned int func : 4, -	BITFIELD_FIELD(unsigned int fmt : 2, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int fr : 5, +	__BITFIELD_FIELD(unsigned int ft : 5, +	__BITFIELD_FIELD(unsigned int fs : 5, +	__BITFIELD_FIELD(unsigned int fd : 5, +	__BITFIELD_FIELD(unsigned int func : 4, +	__BITFIELD_FIELD(unsigned int fmt : 2,  	;)))))))  };  struct b_format {			/* BREAK and SYSCALL */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int code : 20, -	BITFIELD_FIELD(unsigned int func : 6, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int code : 20, +	__BITFIELD_FIELD(unsigned int func : 6,  	;)))  };  struct ps_format {			/* MIPS-3D / paired single format */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int rs : 5, -	BITFIELD_FIELD(unsigned int ft : 5, -	BITFIELD_FIELD(unsigned int fs : 5, -	BITFIELD_FIELD(unsigned int fd : 5, -	BITFIELD_FIELD(unsigned int func : 6, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int rs : 5, +	__BITFIELD_FIELD(unsigned int ft : 5, +	__BITFIELD_FIELD(unsigned int fs : 5, +	__BITFIELD_FIELD(unsigned int fd : 5, +	__BITFIELD_FIELD(unsigned int func : 6,  	;))))))  };  struct v_format {				/* MDMX vector format */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int sel : 4, -	BITFIELD_FIELD(unsigned int fmt : 1, -	BITFIELD_FIELD(unsigned int vt : 5, -	BITFIELD_FIELD(unsigned int vs : 5, -	BITFIELD_FIELD(unsigned int vd : 5, -	BITFIELD_FIELD(unsigned int func : 6, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int sel : 4, +	__BITFIELD_FIELD(unsigned int fmt : 1, +	__BITFIELD_FIELD(unsigned int vt : 5, +	__BITFIELD_FIELD(unsigned int vs : 5, +	__BITFIELD_FIELD(unsigned int vd : 5, +	__BITFIELD_FIELD(unsigned int func : 6,  	;)))))))  }; +struct spec3_format {   /* SPEC3 */ +	__BITFIELD_FIELD(unsigned int opcode:6, +	__BITFIELD_FIELD(unsigned int rs:5, +	__BITFIELD_FIELD(unsigned int rt:5, +	__BITFIELD_FIELD(signed int simmediate:9, +	__BITFIELD_FIELD(unsigned int func:7, +	;))))) +}; +  /*   * microMIPS instruction formats (32-bit length)   * @@ -597,141 +623,141 @@ struct v_format {				/* MDMX vector format */   *	if it is MIPS32 instruction re-encoded for use in the microMIPS ASE.   */  struct fb_format {		/* FPU branch format (MIPS32) */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int bc : 5, -	BITFIELD_FIELD(unsigned int cc : 3, -	BITFIELD_FIELD(unsigned int flag : 2, -	BITFIELD_FIELD(signed int simmediate : 16, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int bc : 5, +	__BITFIELD_FIELD(unsigned int cc : 3, +	__BITFIELD_FIELD(unsigned int flag : 2, +	__BITFIELD_FIELD(signed int simmediate : 16,  	;)))))  };  struct fp0_format {		/* FPU multiply and add format (MIPS32) */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int fmt : 5, -	BITFIELD_FIELD(unsigned int ft : 5, -	BITFIELD_FIELD(unsigned int fs : 5, -	BITFIELD_FIELD(unsigned int fd : 5, -	BITFIELD_FIELD(unsigned int func : 6, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int fmt : 5, +	__BITFIELD_FIELD(unsigned int ft : 5, +	__BITFIELD_FIELD(unsigned int fs : 5, +	__BITFIELD_FIELD(unsigned int fd : 5, +	__BITFIELD_FIELD(unsigned int func : 6,  	;))))))  };  struct mm_fp0_format {		/* FPU multipy and add format (microMIPS) */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int ft : 5, -	BITFIELD_FIELD(unsigned int fs : 5, -	BITFIELD_FIELD(unsigned int fd : 5, -	BITFIELD_FIELD(unsigned int fmt : 3, -	BITFIELD_FIELD(unsigned int op : 2, -	BITFIELD_FIELD(unsigned int func : 6, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int ft : 5, +	__BITFIELD_FIELD(unsigned int fs : 5, +	__BITFIELD_FIELD(unsigned int fd : 5, +	__BITFIELD_FIELD(unsigned int fmt : 3, +	__BITFIELD_FIELD(unsigned int op : 2, +	__BITFIELD_FIELD(unsigned int func : 6,  	;)))))))  };  struct fp1_format {		/* FPU mfc1 and cfc1 format (MIPS32) */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int op : 5, -	BITFIELD_FIELD(unsigned int rt : 5, -	BITFIELD_FIELD(unsigned int fs : 5, -	BITFIELD_FIELD(unsigned int fd : 5, -	BITFIELD_FIELD(unsigned int func : 6, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int op : 5, +	__BITFIELD_FIELD(unsigned int rt : 5, +	__BITFIELD_FIELD(unsigned int fs : 5, +	__BITFIELD_FIELD(unsigned int fd : 5, +	__BITFIELD_FIELD(unsigned int func : 6,  	;))))))  };  struct mm_fp1_format {		/* FPU mfc1 and cfc1 format (microMIPS) */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int rt : 5, -	BITFIELD_FIELD(unsigned int fs : 5, -	BITFIELD_FIELD(unsigned int fmt : 2, -	BITFIELD_FIELD(unsigned int op : 8, -	BITFIELD_FIELD(unsigned int func : 6, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int rt : 5, +	__BITFIELD_FIELD(unsigned int fs : 5, +	__BITFIELD_FIELD(unsigned int fmt : 2, +	__BITFIELD_FIELD(unsigned int op : 8, +	__BITFIELD_FIELD(unsigned int func : 6,  	;))))))  };  struct mm_fp2_format {		/* FPU movt and movf format (microMIPS) */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int fd : 5, -	BITFIELD_FIELD(unsigned int fs : 5, -	BITFIELD_FIELD(unsigned int cc : 3, -	BITFIELD_FIELD(unsigned int zero : 2, -	BITFIELD_FIELD(unsigned int fmt : 2, -	BITFIELD_FIELD(unsigned int op : 3, -	BITFIELD_FIELD(unsigned int func : 6, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int fd : 5, +	__BITFIELD_FIELD(unsigned int fs : 5, +	__BITFIELD_FIELD(unsigned int cc : 3, +	__BITFIELD_FIELD(unsigned int zero : 2, +	__BITFIELD_FIELD(unsigned int fmt : 2, +	__BITFIELD_FIELD(unsigned int op : 3, +	__BITFIELD_FIELD(unsigned int func : 6,  	;))))))))  };  struct mm_fp3_format {		/* FPU abs and neg format (microMIPS) */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int rt : 5, -	BITFIELD_FIELD(unsigned int fs : 5, -	BITFIELD_FIELD(unsigned int fmt : 3, -	BITFIELD_FIELD(unsigned int op : 7, -	BITFIELD_FIELD(unsigned int func : 6, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int rt : 5, +	__BITFIELD_FIELD(unsigned int fs : 5, +	__BITFIELD_FIELD(unsigned int fmt : 3, +	__BITFIELD_FIELD(unsigned int op : 7, +	__BITFIELD_FIELD(unsigned int func : 6,  	;))))))  };  struct mm_fp4_format {		/* FPU c.cond format (microMIPS) */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int rt : 5, -	BITFIELD_FIELD(unsigned int fs : 5, -	BITFIELD_FIELD(unsigned int cc : 3, -	BITFIELD_FIELD(unsigned int fmt : 3, -	BITFIELD_FIELD(unsigned int cond : 4, -	BITFIELD_FIELD(unsigned int func : 6, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int rt : 5, +	__BITFIELD_FIELD(unsigned int fs : 5, +	__BITFIELD_FIELD(unsigned int cc : 3, +	__BITFIELD_FIELD(unsigned int fmt : 3, +	__BITFIELD_FIELD(unsigned int cond : 4, +	__BITFIELD_FIELD(unsigned int func : 6,  	;)))))))  };  struct mm_fp5_format {		/* FPU lwxc1 and swxc1 format (microMIPS) */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int index : 5, -	BITFIELD_FIELD(unsigned int base : 5, -	BITFIELD_FIELD(unsigned int fd : 5, -	BITFIELD_FIELD(unsigned int op : 5, -	BITFIELD_FIELD(unsigned int func : 6, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int index : 5, +	__BITFIELD_FIELD(unsigned int base : 5, +	__BITFIELD_FIELD(unsigned int fd : 5, +	__BITFIELD_FIELD(unsigned int op : 5, +	__BITFIELD_FIELD(unsigned int func : 6,  	;))))))  };  struct fp6_format {		/* FPU madd and msub format (MIPS IV) */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int fr : 5, -	BITFIELD_FIELD(unsigned int ft : 5, -	BITFIELD_FIELD(unsigned int fs : 5, -	BITFIELD_FIELD(unsigned int fd : 5, -	BITFIELD_FIELD(unsigned int func : 6, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int fr : 5, +	__BITFIELD_FIELD(unsigned int ft : 5, +	__BITFIELD_FIELD(unsigned int fs : 5, +	__BITFIELD_FIELD(unsigned int fd : 5, +	__BITFIELD_FIELD(unsigned int func : 6,  	;))))))  };  struct mm_fp6_format {		/* FPU madd and msub format (microMIPS) */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int ft : 5, -	BITFIELD_FIELD(unsigned int fs : 5, -	BITFIELD_FIELD(unsigned int fd : 5, -	BITFIELD_FIELD(unsigned int fr : 5, -	BITFIELD_FIELD(unsigned int func : 6, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int ft : 5, +	__BITFIELD_FIELD(unsigned int fs : 5, +	__BITFIELD_FIELD(unsigned int fd : 5, +	__BITFIELD_FIELD(unsigned int fr : 5, +	__BITFIELD_FIELD(unsigned int func : 6,  	;))))))  };  struct mm_i_format {		/* Immediate format (microMIPS) */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int rt : 5, -	BITFIELD_FIELD(unsigned int rs : 5, -	BITFIELD_FIELD(signed int simmediate : 16, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int rt : 5, +	__BITFIELD_FIELD(unsigned int rs : 5, +	__BITFIELD_FIELD(signed int simmediate : 16,  	;))))  };  struct mm_m_format {		/* Multi-word load/store format (microMIPS) */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int rd : 5, -	BITFIELD_FIELD(unsigned int base : 5, -	BITFIELD_FIELD(unsigned int func : 4, -	BITFIELD_FIELD(signed int simmediate : 12, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int rd : 5, +	__BITFIELD_FIELD(unsigned int base : 5, +	__BITFIELD_FIELD(unsigned int func : 4, +	__BITFIELD_FIELD(signed int simmediate : 12,  	;)))))  };  struct mm_x_format {		/* Scaled indexed load format (microMIPS) */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int index : 5, -	BITFIELD_FIELD(unsigned int base : 5, -	BITFIELD_FIELD(unsigned int rd : 5, -	BITFIELD_FIELD(unsigned int func : 11, +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int index : 5, +	__BITFIELD_FIELD(unsigned int base : 5, +	__BITFIELD_FIELD(unsigned int rd : 5, +	__BITFIELD_FIELD(unsigned int func : 11,  	;)))))  }; @@ -739,51 +765,51 @@ struct mm_x_format {		/* Scaled indexed load format (microMIPS) */   * microMIPS instruction formats (16-bit length)   */  struct mm_b0_format {		/* Unconditional branch format (microMIPS) */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(signed int simmediate : 10, -	BITFIELD_FIELD(unsigned int : 16, /* Ignored */ +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(signed int simmediate : 10, +	__BITFIELD_FIELD(unsigned int : 16, /* Ignored */  	;)))  };  struct mm_b1_format {		/* Conditional branch format (microMIPS) */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int rs : 3, -	BITFIELD_FIELD(signed int simmediate : 7, -	BITFIELD_FIELD(unsigned int : 16, /* Ignored */ +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int rs : 3, +	__BITFIELD_FIELD(signed int simmediate : 7, +	__BITFIELD_FIELD(unsigned int : 16, /* Ignored */  	;))))  };  struct mm16_m_format {		/* Multi-word load/store format */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int func : 4, -	BITFIELD_FIELD(unsigned int rlist : 2, -	BITFIELD_FIELD(unsigned int imm : 4, -	BITFIELD_FIELD(unsigned int : 16, /* Ignored */ +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int func : 4, +	__BITFIELD_FIELD(unsigned int rlist : 2, +	__BITFIELD_FIELD(unsigned int imm : 4, +	__BITFIELD_FIELD(unsigned int : 16, /* Ignored */  	;)))))  };  struct mm16_rb_format {		/* Signed immediate format */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int rt : 3, -	BITFIELD_FIELD(unsigned int base : 3, -	BITFIELD_FIELD(signed int simmediate : 4, -	BITFIELD_FIELD(unsigned int : 16, /* Ignored */ +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int rt : 3, +	__BITFIELD_FIELD(unsigned int base : 3, +	__BITFIELD_FIELD(signed int simmediate : 4, +	__BITFIELD_FIELD(unsigned int : 16, /* Ignored */  	;)))))  };  struct mm16_r3_format {		/* Load from global pointer format */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int rt : 3, -	BITFIELD_FIELD(signed int simmediate : 7, -	BITFIELD_FIELD(unsigned int : 16, /* Ignored */ +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int rt : 3, +	__BITFIELD_FIELD(signed int simmediate : 7, +	__BITFIELD_FIELD(unsigned int : 16, /* Ignored */  	;))))  };  struct mm16_r5_format {		/* Load/store from stack pointer format */ -	BITFIELD_FIELD(unsigned int opcode : 6, -	BITFIELD_FIELD(unsigned int rt : 5, -	BITFIELD_FIELD(signed int simmediate : 5, -	BITFIELD_FIELD(unsigned int : 16, /* Ignored */ +	__BITFIELD_FIELD(unsigned int opcode : 6, +	__BITFIELD_FIELD(unsigned int rt : 5, +	__BITFIELD_FIELD(signed int simmediate : 5, +	__BITFIELD_FIELD(unsigned int : 16, /* Ignored */  	;))))  }; @@ -791,57 +817,57 @@ struct mm16_r5_format {		/* Load/store from stack pointer format */   * MIPS16e instruction formats (16-bit length)   */  struct m16e_rr { -	BITFIELD_FIELD(unsigned int opcode : 5, -	BITFIELD_FIELD(unsigned int rx : 3, -	BITFIELD_FIELD(unsigned int nd : 1, -	BITFIELD_FIELD(unsigned int l : 1, -	BITFIELD_FIELD(unsigned int ra : 1, -	BITFIELD_FIELD(unsigned int func : 5, +	__BITFIELD_FIELD(unsigned int opcode : 5, +	__BITFIELD_FIELD(unsigned int rx : 3, +	__BITFIELD_FIELD(unsigned int nd : 1, +	__BITFIELD_FIELD(unsigned int l : 1, +	__BITFIELD_FIELD(unsigned int ra : 1, +	__BITFIELD_FIELD(unsigned int func : 5,  	;))))))  };  struct m16e_jal { -	BITFIELD_FIELD(unsigned int opcode : 5, -	BITFIELD_FIELD(unsigned int x : 1, -	BITFIELD_FIELD(unsigned int imm20_16 : 5, -	BITFIELD_FIELD(signed int imm25_21 : 5, +	__BITFIELD_FIELD(unsigned int opcode : 5, +	__BITFIELD_FIELD(unsigned int x : 1, +	__BITFIELD_FIELD(unsigned int imm20_16 : 5, +	__BITFIELD_FIELD(signed int imm25_21 : 5,  	;))))  };  struct m16e_i64 { -	BITFIELD_FIELD(unsigned int opcode : 5, -	BITFIELD_FIELD(unsigned int func : 3, -	BITFIELD_FIELD(unsigned int imm : 8, +	__BITFIELD_FIELD(unsigned int opcode : 5, +	__BITFIELD_FIELD(unsigned int func : 3, +	__BITFIELD_FIELD(unsigned int imm : 8,  	;)))  };  struct m16e_ri64 { -	BITFIELD_FIELD(unsigned int opcode : 5, -	BITFIELD_FIELD(unsigned int func : 3, -	BITFIELD_FIELD(unsigned int ry : 3, -	BITFIELD_FIELD(unsigned int imm : 5, +	__BITFIELD_FIELD(unsigned int opcode : 5, +	__BITFIELD_FIELD(unsigned int func : 3, +	__BITFIELD_FIELD(unsigned int ry : 3, +	__BITFIELD_FIELD(unsigned int imm : 5,  	;))))  };  struct m16e_ri { -	BITFIELD_FIELD(unsigned int opcode : 5, -	BITFIELD_FIELD(unsigned int rx : 3, -	BITFIELD_FIELD(unsigned int imm : 8, +	__BITFIELD_FIELD(unsigned int opcode : 5, +	__BITFIELD_FIELD(unsigned int rx : 3, +	__BITFIELD_FIELD(unsigned int imm : 8,  	;)))  };  struct m16e_rri { -	BITFIELD_FIELD(unsigned int opcode : 5, -	BITFIELD_FIELD(unsigned int rx : 3, -	BITFIELD_FIELD(unsigned int ry : 3, -	BITFIELD_FIELD(unsigned int imm : 5, +	__BITFIELD_FIELD(unsigned int opcode : 5, +	__BITFIELD_FIELD(unsigned int rx : 3, +	__BITFIELD_FIELD(unsigned int ry : 3, +	__BITFIELD_FIELD(unsigned int imm : 5,  	;))))  };  struct m16e_i8 { -	BITFIELD_FIELD(unsigned int opcode : 5, -	BITFIELD_FIELD(unsigned int func : 3, -	BITFIELD_FIELD(unsigned int imm : 8, +	__BITFIELD_FIELD(unsigned int opcode : 5, +	__BITFIELD_FIELD(unsigned int func : 3, +	__BITFIELD_FIELD(unsigned int imm : 8,  	;)))  }; @@ -860,6 +886,7 @@ union mips_instruction {  	struct b_format b_format;  	struct ps_format ps_format;  	struct v_format v_format; +	struct spec3_format spec3_format;  	struct fb_format fb_format;  	struct fp0_format fp0_format;  	struct mm_fp0_format mm_fp0_format; diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h index f09ff5ae205..2c04b6d9ff8 100644 --- a/arch/mips/include/uapi/asm/kvm.h +++ b/arch/mips/include/uapi/asm/kvm.h @@ -106,6 +106,41 @@ struct kvm_fpu {  #define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33)  #define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34) +/* KVM specific control registers */ + +/* + * CP0_Count control + * DC:    Set 0: Master disable CP0_Count and set COUNT_RESUME to now + *        Set 1: Master re-enable CP0_Count with unchanged bias, handling timer + *               interrupts since COUNT_RESUME + *        This can be used to freeze the timer to get a consistent snapshot of + *        the CP0_Count and timer interrupt pending state, while also resuming + *        safely without losing time or guest timer interrupts. + * Other: Reserved, do not change. + */ +#define KVM_REG_MIPS_COUNT_CTL		(KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ +					 0x20000 | 0) +#define KVM_REG_MIPS_COUNT_CTL_DC	0x00000001 + +/* + * CP0_Count resume monotonic nanoseconds + * The monotonic nanosecond time of the last set of COUNT_CTL.DC (master + * disable). Any reads and writes of Count related registers while + * COUNT_CTL.DC=1 will appear to occur at this time. When COUNT_CTL.DC is + * cleared again (master enable) any timer interrupts since this time will be + * emulated. + * Modifications to times in the future are rejected. + */ +#define KVM_REG_MIPS_COUNT_RESUME	(KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ +					 0x20000 | 1) +/* + * CP0_Count rate in Hz + * Specifies the rate of the CP0_Count timer in Hz. Modifications occur without + * discontinuities in CP0_Count. + */ +#define KVM_REG_MIPS_COUNT_HZ		(KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ +					 0x20000 | 2) +  /*   * KVM MIPS specific structures and definitions   * diff --git a/arch/mips/include/uapi/asm/kvm_para.h b/arch/mips/include/uapi/asm/kvm_para.h index 14fab8f0b95..7e16d7c42e6 100644 --- a/arch/mips/include/uapi/asm/kvm_para.h +++ b/arch/mips/include/uapi/asm/kvm_para.h @@ -1 +1,5 @@ -#include <asm-generic/kvm_para.h> +#ifndef _UAPI_ASM_MIPS_KVM_PARA_H +#define _UAPI_ASM_MIPS_KVM_PARA_H + + +#endif /* _UAPI_ASM_MIPS_KVM_PARA_H */ diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h index 88e292b7719..e81174432ba 100644 --- a/arch/mips/include/uapi/asm/siginfo.h +++ b/arch/mips/include/uapi/asm/siginfo.h @@ -33,6 +33,8 @@ struct siginfo;  #error _MIPS_SZLONG neither 32 nor 64  #endif +#define __ARCH_SIGSYS +  #include <asm-generic/siginfo.h>  typedef struct siginfo { @@ -97,6 +99,13 @@ typedef struct siginfo {  			__ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */  			int _fd;  		} _sigpoll; + +		/* SIGSYS */ +		struct { +			void __user *_call_addr; /* calling user insn */ +			int _syscall;	/* triggering system call number */ +			unsigned int _arch;	/* AUDIT_ARCH_* of syscall */ +		} _sigsys;  	} _sifields;  } siginfo_t; diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 61c01f054d1..a14baa218c7 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -94,4 +94,8 @@  #define SO_BUSY_POLL		46 +#define SO_MAX_PACING_RATE	47 + +#define SO_BPF_EXTENSIONS	48 +  #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/mips/include/uapi/asm/types.h b/arch/mips/include/uapi/asm/types.h index 7ac9d0baad8..f3dd9ff0cc0 100644 --- a/arch/mips/include/uapi/asm/types.h +++ b/arch/mips/include/uapi/asm/types.h @@ -14,9 +14,12 @@  /*   * We don't use int-l64.h for the kernel anymore but still use it for   * userspace to avoid code changes. + * + * However, some user programs (e.g. perf) may not want this. They can + * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.   */  #ifndef __KERNEL__ -# if _MIPS_SZLONG == 64 +# if _MIPS_SZLONG == 64 && !defined(__SANE_USERSPACE_TYPES__)  #  include <asm-generic/int-l64.h>  # else  #  include <asm-generic/int-ll64.h> diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index 1dee279f966..5805414777e 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -369,16 +369,19 @@  #define __NR_process_vm_writev		(__NR_Linux + 346)  #define __NR_kcmp			(__NR_Linux + 347)  #define __NR_finit_module		(__NR_Linux + 348) +#define __NR_sched_setattr		(__NR_Linux + 349) +#define __NR_sched_getattr		(__NR_Linux + 350) +#define __NR_renameat2			(__NR_Linux + 351)  /*   * Offset of the last Linux o32 flavoured syscall   */ -#define __NR_Linux_syscalls		348 +#define __NR_Linux_syscalls		351  #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */  #define __NR_O32_Linux			4000 -#define __NR_O32_Linux_syscalls		348 +#define __NR_O32_Linux_syscalls		351  #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -695,16 +698,19 @@  #define __NR_kcmp			(__NR_Linux + 306)  #define __NR_finit_module		(__NR_Linux + 307)  #define __NR_getdents64			(__NR_Linux + 308) +#define __NR_sched_setattr		(__NR_Linux + 309) +#define __NR_sched_getattr		(__NR_Linux + 310) +#define __NR_renameat2			(__NR_Linux + 311)  /*   * Offset of the last Linux 64-bit flavoured syscall   */ -#define __NR_Linux_syscalls		308 +#define __NR_Linux_syscalls		311  #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */  #define __NR_64_Linux			5000 -#define __NR_64_Linux_syscalls		308 +#define __NR_64_Linux_syscalls		311  #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1025,15 +1031,18 @@  #define __NR_process_vm_writev		(__NR_Linux + 310)  #define __NR_kcmp			(__NR_Linux + 311)  #define __NR_finit_module		(__NR_Linux + 312) +#define __NR_sched_setattr		(__NR_Linux + 313) +#define __NR_sched_getattr		(__NR_Linux + 314) +#define __NR_renameat2			(__NR_Linux + 315)  /*   * Offset of the last N32 flavoured syscall   */ -#define __NR_Linux_syscalls		312 +#define __NR_Linux_syscalls		315  #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */  #define __NR_N32_Linux			6000 -#define __NR_N32_Linux_syscalls		312 +#define __NR_N32_Linux_syscalls		315  #endif /* _UAPI_ASM_UNISTD_H */  | 
