diff options
Diffstat (limited to 'arch/mips/mm/page.c')
| -rw-r--r-- | arch/mips/mm/page.c | 141 | 
1 files changed, 55 insertions, 86 deletions
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index 36272f7d374..b611102e23b 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c @@ -6,8 +6,8 @@   * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)   * Copyright (C) 2007  Maciej W. Rozycki   * Copyright (C) 2008  Thiemo Seufer + * Copyright (C) 2012  MIPS Technologies, Inc.   */ -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/sched.h>  #include <linux/smp.h> @@ -17,12 +17,12 @@  #include <asm/bugs.h>  #include <asm/cacheops.h> +#include <asm/cpu-type.h>  #include <asm/inst.h>  #include <asm/io.h>  #include <asm/page.h>  #include <asm/pgtable.h>  #include <asm/prefetch.h> -#include <asm/system.h>  #include <asm/bootinfo.h>  #include <asm/mipsregs.h>  #include <asm/mmu_context.h> @@ -66,68 +66,29 @@ UASM_L_LA(_copy_pref_both)  UASM_L_LA(_copy_pref_store)  /* We need one branch and therefore one relocation per target label. */ -static struct uasm_label __cpuinitdata labels[5]; -static struct uasm_reloc __cpuinitdata relocs[5]; +static struct uasm_label labels[5]; +static struct uasm_reloc relocs[5];  #define cpu_is_r4600_v1_x()	((read_c0_prid() & 0xfffffff0) == 0x00002010)  #define cpu_is_r4600_v2_x()	((read_c0_prid() & 0xfffffff0) == 0x00002020) -/* - * Maximum sizes: - * - * R4000 128 bytes S-cache:		0x058 bytes - * R4600 v1.7:				0x05c bytes - * R4600 v2.0:				0x060 bytes - * With prefetching, 16 word strides	0x120 bytes - */ - -static u32 clear_page_array[0x120 / 4]; - -#ifdef CONFIG_SIBYTE_DMA_PAGEOPS -void clear_page_cpu(void *page) __attribute__((alias("clear_page_array"))); -#else -void clear_page(void *page) __attribute__((alias("clear_page_array"))); -#endif - -EXPORT_SYMBOL(clear_page); - -/* - * Maximum sizes: - * - * R4000 128 bytes S-cache:		0x11c bytes - * R4600 v1.7:				0x080 bytes - * R4600 v2.0:				0x07c bytes - * With prefetching, 16 word strides	0x540 bytes - */ -static u32 copy_page_array[0x540 / 4]; - -#ifdef CONFIG_SIBYTE_DMA_PAGEOPS -void -copy_page_cpu(void *to, void *from) __attribute__((alias("copy_page_array"))); -#else -void copy_page(void *to, void *from) __attribute__((alias("copy_page_array"))); -#endif +static int pref_bias_clear_store; +static int pref_bias_copy_load; +static int pref_bias_copy_store; -EXPORT_SYMBOL(copy_page); +static u32 pref_src_mode; +static u32 pref_dst_mode; +static int clear_word_size; +static int copy_word_size; -static int pref_bias_clear_store __cpuinitdata; -static int pref_bias_copy_load __cpuinitdata; -static int pref_bias_copy_store __cpuinitdata; +static int half_clear_loop_size; +static int half_copy_loop_size; -static u32 pref_src_mode __cpuinitdata; -static u32 pref_dst_mode __cpuinitdata; - -static int clear_word_size __cpuinitdata; -static int copy_word_size __cpuinitdata; - -static int half_clear_loop_size __cpuinitdata; -static int half_copy_loop_size __cpuinitdata; - -static int cache_line_size __cpuinitdata; +static int cache_line_size;  #define cache_line_mask() (cache_line_size - 1) -static inline void __cpuinit +static inline void  pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)  {  	if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) { @@ -147,7 +108,7 @@ pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)  	}  } -static void __cpuinit set_prefetch_parameters(void) +static void set_prefetch_parameters(void)  {  	if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)  		clear_word_size = 8; @@ -179,15 +140,6 @@ static void __cpuinit set_prefetch_parameters(void)  			pref_bias_copy_load = 256;  			break; -		case CPU_RM9000: -			/* -			 * As a workaround for erratum G105 which make the -			 * PrepareForStore hint unusable we fall back to -			 * StoreRetained on the RM9000.  Once it is known which -			 * versions of the RM9000 we'll be able to condition- -			 * alize this. -			 */ -  		case CPU_R10000:  		case CPU_R12000:  		case CPU_R14000: @@ -247,7 +199,7 @@ static void __cpuinit set_prefetch_parameters(void)  				      4 * copy_word_size));  } -static void __cpuinit build_clear_store(u32 **buf, int off) +static void build_clear_store(u32 **buf, int off)  {  	if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {  		uasm_i_sd(buf, ZERO, off, A0); @@ -256,7 +208,7 @@ static void __cpuinit build_clear_store(u32 **buf, int off)  	}  } -static inline void __cpuinit build_clear_pref(u32 **buf, int off) +static inline void build_clear_pref(u32 **buf, int off)  {  	if (off & cache_line_mask())  		return; @@ -280,16 +232,26 @@ static inline void __cpuinit build_clear_pref(u32 **buf, int off)  			uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);  		} -		} +	}  } -void __cpuinit build_clear_page(void) +extern u32 __clear_page_start; +extern u32 __clear_page_end; +extern u32 __copy_page_start; +extern u32 __copy_page_end; + +void build_clear_page(void)  {  	int off; -	u32 *buf = (u32 *)&clear_page_array; +	u32 *buf = &__clear_page_start;  	struct uasm_label *l = labels;  	struct uasm_reloc *r = relocs;  	int i; +	static atomic_t run_once = ATOMIC_INIT(0); + +	if (atomic_xchg(&run_once, 1)) { +		return; +	}  	memset(labels, 0, sizeof(labels));  	memset(relocs, 0, sizeof(relocs)); @@ -311,10 +273,10 @@ void __cpuinit build_clear_page(void)  		uasm_i_ori(&buf, A2, A0, off);  	if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) -		uasm_i_lui(&buf, AT, 0xa000); +		uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));  	off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size) -	                        * cache_line_size : 0; +				* cache_line_size : 0;  	while (off) {  		build_clear_pref(&buf, -off);  		off -= cache_line_size; @@ -357,21 +319,21 @@ void __cpuinit build_clear_page(void)  	uasm_i_jr(&buf, RA);  	uasm_i_nop(&buf); -	BUG_ON(buf > clear_page_array + ARRAY_SIZE(clear_page_array)); +	BUG_ON(buf > &__clear_page_end);  	uasm_resolve_relocs(relocs, labels);  	pr_debug("Synthesized clear page handler (%u instructions).\n", -		 (u32)(buf - clear_page_array)); +		 (u32)(buf - &__clear_page_start));  	pr_debug("\t.set push\n");  	pr_debug("\t.set noreorder\n"); -	for (i = 0; i < (buf - clear_page_array); i++) -		pr_debug("\t.word 0x%08x\n", clear_page_array[i]); +	for (i = 0; i < (buf - &__clear_page_start); i++) +		pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]);  	pr_debug("\t.set pop\n");  } -static void __cpuinit build_copy_load(u32 **buf, int reg, int off) +static void build_copy_load(u32 **buf, int reg, int off)  {  	if (cpu_has_64bit_gp_regs) {  		uasm_i_ld(buf, reg, off, A1); @@ -380,7 +342,7 @@ static void __cpuinit build_copy_load(u32 **buf, int reg, int off)  	}  } -static void __cpuinit build_copy_store(u32 **buf, int reg, int off) +static void build_copy_store(u32 **buf, int reg, int off)  {  	if (cpu_has_64bit_gp_regs) {  		uasm_i_sd(buf, reg, off, A0); @@ -425,13 +387,18 @@ static inline void build_copy_store_pref(u32 **buf, int off)  	}  } -void __cpuinit build_copy_page(void) +void build_copy_page(void)  {  	int off; -	u32 *buf = (u32 *)©_page_array; +	u32 *buf = &__copy_page_start;  	struct uasm_label *l = labels;  	struct uasm_reloc *r = relocs;  	int i; +	static atomic_t run_once = ATOMIC_INIT(0); + +	if (atomic_xchg(&run_once, 1)) { +		return; +	}  	memset(labels, 0, sizeof(labels));  	memset(relocs, 0, sizeof(relocs)); @@ -457,16 +424,16 @@ void __cpuinit build_copy_page(void)  		uasm_i_ori(&buf, A2, A0, off);  	if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) -		uasm_i_lui(&buf, AT, 0xa000); +		uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));  	off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) * -	                        cache_line_size : 0; +				cache_line_size : 0;  	while (off) {  		build_copy_load_pref(&buf, -off);  		off -= cache_line_size;  	}  	off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) * -	                        cache_line_size : 0; +				cache_line_size : 0;  	while (off) {  		build_copy_store_pref(&buf, -off);  		off -= cache_line_size; @@ -596,21 +563,23 @@ void __cpuinit build_copy_page(void)  	uasm_i_jr(&buf, RA);  	uasm_i_nop(&buf); -	BUG_ON(buf > copy_page_array + ARRAY_SIZE(copy_page_array)); +	BUG_ON(buf > &__copy_page_end);  	uasm_resolve_relocs(relocs, labels);  	pr_debug("Synthesized copy page handler (%u instructions).\n", -		 (u32)(buf - copy_page_array)); +		 (u32)(buf - &__copy_page_start));  	pr_debug("\t.set push\n");  	pr_debug("\t.set noreorder\n"); -	for (i = 0; i < (buf - copy_page_array); i++) -		pr_debug("\t.word 0x%08x\n", copy_page_array[i]); +	for (i = 0; i < (buf - &__copy_page_start); i++) +		pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]);  	pr_debug("\t.set pop\n");  }  #ifdef CONFIG_SIBYTE_DMA_PAGEOPS +extern void clear_page_cpu(void *page); +extern void copy_page_cpu(void *to, void *from);  /*   * Pad descriptors to cacheline, since each is exclusively owned by a  | 
