diff options
Diffstat (limited to 'arch/mips/mm')
| -rw-r--r-- | arch/mips/mm/c-octeon.c | 3 | ||||
| -rw-r--r-- | arch/mips/mm/c-r3k.c | 1 | ||||
| -rw-r--r-- | arch/mips/mm/c-r4k.c | 311 | ||||
| -rw-r--r-- | arch/mips/mm/cache.c | 10 | ||||
| -rw-r--r-- | arch/mips/mm/cex-sb1.S | 1 | ||||
| -rw-r--r-- | arch/mips/mm/dma-default.c | 6 | ||||
| -rw-r--r-- | arch/mips/mm/hugetlbpage.c | 6 | ||||
| -rw-r--r-- | arch/mips/mm/init.c | 99 | ||||
| -rw-r--r-- | arch/mips/mm/page.c | 5 | ||||
| -rw-r--r-- | arch/mips/mm/sc-mips.c | 4 | ||||
| -rw-r--r-- | arch/mips/mm/sc-rm7k.c | 1 | ||||
| -rw-r--r-- | arch/mips/mm/tlb-funcs.S | 6 | ||||
| -rw-r--r-- | arch/mips/mm/tlb-r3k.c | 1 | ||||
| -rw-r--r-- | arch/mips/mm/tlb-r4k.c | 176 | ||||
| -rw-r--r-- | arch/mips/mm/tlb-r8k.c | 1 | ||||
| -rw-r--r-- | arch/mips/mm/tlbex.c | 324 | ||||
| -rw-r--r-- | arch/mips/mm/uasm-micromips.c | 16 | ||||
| -rw-r--r-- | arch/mips/mm/uasm-mips.c | 17 | ||||
| -rw-r--r-- | arch/mips/mm/uasm.c | 59 | 
19 files changed, 658 insertions, 389 deletions
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c index c8efdb5b6ee..05b1d7cf951 100644 --- a/arch/mips/mm/c-octeon.c +++ b/arch/mips/mm/c-octeon.c @@ -6,7 +6,6 @@   * Copyright (C) 2005-2007 Cavium Networks   */  #include <linux/export.h> -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/sched.h>  #include <linux/smp.h> @@ -138,8 +137,10 @@ static void octeon_flush_cache_sigtramp(unsigned long addr)  {  	struct vm_area_struct *vma; +	down_read(¤t->mm->mmap_sem);  	vma = find_vma(current->mm, addr);  	octeon_flush_icache_all_cores(vma); +	up_read(¤t->mm->mmap_sem);  } diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c index 2fcde0c8ea0..135ec313c1f 100644 --- a/arch/mips/mm/c-r3k.c +++ b/arch/mips/mm/c-r3k.c @@ -9,7 +9,6 @@   * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov   * Copyright (C) 2001, 2004, 2007  Maciej W. Rozycki   */ -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/sched.h>  #include <linux/smp.h> diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 627883bc6d5..f2e8302fa70 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -7,6 +7,7 @@   * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)   * Copyright (C) 1999, 2000 Silicon Graphics, Inc.   */ +#include <linux/cpu_pm.h>  #include <linux/hardirq.h>  #include <linux/init.h>  #include <linux/highmem.h> @@ -50,14 +51,14 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)  {  	preempt_disable(); -#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) +#ifndef CONFIG_MIPS_MT_SMP  	smp_call_function(func, info, 1);  #endif  	func(info);  	preempt_enable();  } -#if defined(CONFIG_MIPS_CMP) +#if defined(CONFIG_MIPS_CMP) || defined(CONFIG_MIPS_CPS)  #define cpu_has_safe_index_cacheops 0  #else  #define cpu_has_safe_index_cacheops 1 @@ -105,24 +106,61 @@ static inline void r4k_blast_dcache_page_dc32(unsigned long addr)  static inline void r4k_blast_dcache_page_dc64(unsigned long addr)  { -	R4600_HIT_CACHEOP_WAR_IMPL;  	blast_dcache64_page(addr);  } +static inline void r4k_blast_dcache_page_dc128(unsigned long addr) +{ +	blast_dcache128_page(addr); +} +  static void r4k_blast_dcache_page_setup(void)  {  	unsigned long  dc_lsize = cpu_dcache_line_size(); -	if (dc_lsize == 0) +	switch (dc_lsize) { +	case 0:  		r4k_blast_dcache_page = (void *)cache_noop; -	else if (dc_lsize == 16) +		break; +	case 16:  		r4k_blast_dcache_page = blast_dcache16_page; -	else if (dc_lsize == 32) +		break; +	case 32:  		r4k_blast_dcache_page = r4k_blast_dcache_page_dc32; -	else if (dc_lsize == 64) +		break; +	case 64:  		r4k_blast_dcache_page = r4k_blast_dcache_page_dc64; +		break; +	case 128: +		r4k_blast_dcache_page = r4k_blast_dcache_page_dc128; +		break; +	default: +		break; +	} +} + +#ifndef CONFIG_EVA +#define r4k_blast_dcache_user_page  r4k_blast_dcache_page +#else + +static void (*r4k_blast_dcache_user_page)(unsigned long addr); + +static void r4k_blast_dcache_user_page_setup(void) +{ +	unsigned long  dc_lsize = cpu_dcache_line_size(); + +	if (dc_lsize == 0) +		r4k_blast_dcache_user_page = (void *)cache_noop; +	else if (dc_lsize == 16) +		r4k_blast_dcache_user_page = blast_dcache16_user_page; +	else if (dc_lsize == 32) +		r4k_blast_dcache_user_page = blast_dcache32_user_page; +	else if (dc_lsize == 64) +		r4k_blast_dcache_user_page = blast_dcache64_user_page;  } +#endif +  static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);  static void r4k_blast_dcache_page_indexed_setup(void) @@ -137,6 +175,8 @@ static void r4k_blast_dcache_page_indexed_setup(void)  		r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;  	else if (dc_lsize == 64)  		r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed; +	else if (dc_lsize == 128) +		r4k_blast_dcache_page_indexed = blast_dcache128_page_indexed;  }  void (* r4k_blast_dcache)(void); @@ -154,6 +194,8 @@ static void r4k_blast_dcache_setup(void)  		r4k_blast_dcache = blast_dcache32;  	else if (dc_lsize == 64)  		r4k_blast_dcache = blast_dcache64; +	else if (dc_lsize == 128) +		r4k_blast_dcache = blast_dcache128;  }  /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */ @@ -237,12 +279,37 @@ static void r4k_blast_icache_page_setup(void)  		r4k_blast_icache_page = (void *)cache_noop;  	else if (ic_lsize == 16)  		r4k_blast_icache_page = blast_icache16_page; +	else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2) +		r4k_blast_icache_page = loongson2_blast_icache32_page;  	else if (ic_lsize == 32)  		r4k_blast_icache_page = blast_icache32_page;  	else if (ic_lsize == 64)  		r4k_blast_icache_page = blast_icache64_page; +	else if (ic_lsize == 128) +		r4k_blast_icache_page = blast_icache128_page;  } +#ifndef CONFIG_EVA +#define r4k_blast_icache_user_page  r4k_blast_icache_page +#else + +static void (*r4k_blast_icache_user_page)(unsigned long addr); + +static void __cpuinit r4k_blast_icache_user_page_setup(void) +{ +	unsigned long ic_lsize = cpu_icache_line_size(); + +	if (ic_lsize == 0) +		r4k_blast_icache_user_page = (void *)cache_noop; +	else if (ic_lsize == 16) +		r4k_blast_icache_user_page = blast_icache16_user_page; +	else if (ic_lsize == 32) +		r4k_blast_icache_user_page = blast_icache32_user_page; +	else if (ic_lsize == 64) +		r4k_blast_icache_user_page = blast_icache64_user_page; +} + +#endif  static void (* r4k_blast_icache_page_indexed)(unsigned long addr); @@ -261,6 +328,9 @@ static void r4k_blast_icache_page_indexed_setup(void)  		else if (TX49XX_ICACHE_INDEX_INV_WAR)  			r4k_blast_icache_page_indexed =  				tx49_blast_icache32_page_indexed; +		else if (current_cpu_type() == CPU_LOONGSON2) +			r4k_blast_icache_page_indexed = +				loongson2_blast_icache32_page_indexed;  		else  			r4k_blast_icache_page_indexed =  				blast_icache32_page_indexed; @@ -284,10 +354,14 @@ static void r4k_blast_icache_setup(void)  			r4k_blast_icache = blast_r4600_v1_icache32;  		else if (TX49XX_ICACHE_INDEX_INV_WAR)  			r4k_blast_icache = tx49_blast_icache32; +		else if (current_cpu_type() == CPU_LOONGSON2) +			r4k_blast_icache = loongson2_blast_icache32;  		else  			r4k_blast_icache = blast_icache32;  	} else if (ic_lsize == 64)  		r4k_blast_icache = blast_icache64; +	else if (ic_lsize == 128) +		r4k_blast_icache = blast_icache128;  }  static void (* r4k_blast_scache_page)(unsigned long addr); @@ -346,14 +420,9 @@ static void r4k_blast_scache_setup(void)  static inline void local_r4k___flush_cache_all(void * args)  { -#if defined(CONFIG_CPU_LOONGSON2) -	r4k_blast_scache(); -	return; -#endif -	r4k_blast_dcache(); -	r4k_blast_icache(); -  	switch (current_cpu_type()) { +	case CPU_LOONGSON2: +	case CPU_LOONGSON3:  	case CPU_R4000SC:  	case CPU_R4000MC:  	case CPU_R4400SC: @@ -361,7 +430,18 @@ static inline void local_r4k___flush_cache_all(void * args)  	case CPU_R10000:  	case CPU_R12000:  	case CPU_R14000: +		/* +		 * These caches are inclusive caches, that is, if something +		 * is not cached in the S-cache, we know it also won't be +		 * in one of the primary caches. +		 */  		r4k_blast_scache(); +		break; + +	default: +		r4k_blast_dcache(); +		r4k_blast_icache(); +		break;  	}  } @@ -372,7 +452,7 @@ static void r4k___flush_cache_all(void)  static inline int has_valid_asid(const struct mm_struct *mm)  { -#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) +#ifdef CONFIG_MIPS_MT_SMP  	int i;  	for_each_online_cpu(i) @@ -507,7 +587,8 @@ static inline void local_r4k_flush_cache_page(void *args)  	}  	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { -		r4k_blast_dcache_page(addr); +		vaddr ? r4k_blast_dcache_page(addr) : +			r4k_blast_dcache_user_page(addr);  		if (exec && !cpu_icache_snoops_remote_store)  			r4k_blast_scache_page(addr);  	} @@ -518,7 +599,8 @@ static inline void local_r4k_flush_cache_page(void *args)  			if (cpu_context(cpu, mm) != 0)  				drop_mmu_context(mm, cpu);  		} else -			r4k_blast_icache_page(addr); +			vaddr ? r4k_blast_icache_page(addr) : +				r4k_blast_icache_user_page(addr);  	}  	if (vaddr) { @@ -572,8 +654,28 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo  	if (end - start > icache_size)  		r4k_blast_icache(); -	else -		protected_blast_icache_range(start, end); +	else { +		switch (boot_cpu_type()) { +		case CPU_LOONGSON2: +			protected_loongson2_blast_icache_range(start, end); +			break; + +		default: +			protected_blast_icache_range(start, end); +			break; +		} +	} +#ifdef CONFIG_EVA +	/* +	 * Due to all possible segment mappings, there might cache aliases +	 * caused by the bootloader being in non-EVA mode, and the CPU switching +	 * to EVA during early kernel init. It's best to flush the scache +	 * to avoid having secondary cores fetching stale data and lead to +	 * kernel crashes. +	 */ +	bc_wback_inv(start, (end - start)); +	__sync(); +#endif  }  static inline void local_r4k_flush_icache_range_ipi(void *args) @@ -596,7 +698,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)  	instruction_hazard();  } -#ifdef CONFIG_DMA_NONCOHERENT +#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)  static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)  { @@ -609,6 +711,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)  			r4k_blast_scache();  		else  			blast_scache_range(addr, addr + size); +		preempt_enable();  		__sync();  		return;  	} @@ -650,6 +753,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)  			 */  			blast_inv_scache_range(addr, addr + size);  		} +		preempt_enable();  		__sync();  		return;  	} @@ -665,7 +769,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)  	bc_inv(addr, size);  	__sync();  } -#endif /* CONFIG_DMA_NONCOHERENT */ +#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */  /*   * While we're protected against bad userland addresses we don't care @@ -987,6 +1091,48 @@ static void probe_pcache(void)  		c->dcache.waybit = 0;  		break; +	case CPU_LOONGSON3: +		config1 = read_c0_config1(); +		lsize = (config1 >> 19) & 7; +		if (lsize) +			c->icache.linesz = 2 << lsize; +		else +			c->icache.linesz = 0; +		c->icache.sets = 64 << ((config1 >> 22) & 7); +		c->icache.ways = 1 + ((config1 >> 16) & 7); +		icache_size = c->icache.sets * +					  c->icache.ways * +					  c->icache.linesz; +		c->icache.waybit = 0; + +		lsize = (config1 >> 10) & 7; +		if (lsize) +			c->dcache.linesz = 2 << lsize; +		else +			c->dcache.linesz = 0; +		c->dcache.sets = 64 << ((config1 >> 13) & 7); +		c->dcache.ways = 1 + ((config1 >> 7) & 7); +		dcache_size = c->dcache.sets * +					  c->dcache.ways * +					  c->dcache.linesz; +		c->dcache.waybit = 0; +		break; + +	case CPU_CAVIUM_OCTEON3: +		/* For now lie about the number of ways. */ +		c->icache.linesz = 128; +		c->icache.sets = 16; +		c->icache.ways = 8; +		c->icache.flags |= MIPS_CACHE_VTAG; +		icache_size = c->icache.sets * c->icache.ways * c->icache.linesz; + +		c->dcache.linesz = 128; +		c->dcache.ways = 8; +		c->dcache.sets = 8; +		dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz; +		c->options |= MIPS_CPU_PREFETCH; +		break; +  	default:  		if (!(config & MIPS_CONF_M))  			panic("Don't know how to probe P-caches on this cpu."); @@ -997,10 +1143,14 @@ static void probe_pcache(void)  		 */  		config1 = read_c0_config1(); -		if ((lsize = ((config1 >> 19) & 7))) -			c->icache.linesz = 2 << lsize; -		else -			c->icache.linesz = lsize; +		lsize = (config1 >> 19) & 7; + +		/* IL == 7 is reserved */ +		if (lsize == 7) +			panic("Invalid icache line size"); + +		c->icache.linesz = lsize ? 2 << lsize : 0; +  		c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);  		c->icache.ways = 1 + ((config1 >> 16) & 7); @@ -1017,10 +1167,14 @@ static void probe_pcache(void)  		 */  		c->dcache.flags = 0; -		if ((lsize = ((config1 >> 10) & 7))) -			c->dcache.linesz = 2 << lsize; -		else -			c->dcache.linesz= lsize; +		lsize = (config1 >> 10) & 7; + +		/* DL == 7 is reserved */ +		if (lsize == 7) +			panic("Invalid dcache line size"); + +		c->dcache.linesz = lsize ? 2 << lsize : 0; +  		c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);  		c->dcache.ways = 1 + ((config1 >> 7) & 7); @@ -1082,11 +1236,21 @@ static void probe_pcache(void)  	case CPU_34K:  	case CPU_74K:  	case CPU_1004K: -		if (current_cpu_type() == CPU_74K) +	case CPU_1074K: +	case CPU_INTERAPTIV: +	case CPU_P5600: +	case CPU_PROAPTIV: +	case CPU_M5150: +		if ((c->cputype == CPU_74K) || (c->cputype == CPU_1074K))  			alias_74k_erratum(c); -		if ((read_c0_config7() & (1 << 16))) { -			/* effectively physically indexed dcache, -			   thus no virtual aliases. */ +		if (!(read_c0_config7() & MIPS_CONF7_IAR) && +		    (c->icache.waysize > PAGE_SIZE)) +			c->icache.flags |= MIPS_CACHE_ALIASES; +		if (read_c0_config7() & MIPS_CONF7_AR) { +			/* +			 * Effectively physically indexed dcache, +			 * thus no virtual aliases. +			*/  			c->dcache.flags |= MIPS_CACHE_PINDEX;  			break;  		} @@ -1107,15 +1271,14 @@ static void probe_pcache(void)  	case CPU_ALCHEMY:  		c->icache.flags |= MIPS_CACHE_IC_F_DC;  		break; -	} -#ifdef	CONFIG_CPU_LOONGSON2 -	/* -	 * LOONGSON2 has 4 way icache, but when using indexed cache op, -	 * one op will act on all 4 ways -	 */ -	c->icache.ways = 1; -#endif +	case CPU_LOONGSON2: +		/* +		 * LOONGSON2 has 4 way icache, but when using indexed cache op, +		 * one op will act on all 4 ways +		 */ +		c->icache.ways = 1; +	}  	printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",  	       icache_size >> 10, @@ -1191,7 +1354,6 @@ static int probe_scache(void)  	return 1;  } -#if defined(CONFIG_CPU_LOONGSON2)  static void __init loongson2_sc_init(void)  {  	struct cpuinfo_mips *c = ¤t_cpu_data; @@ -1207,7 +1369,33 @@ static void __init loongson2_sc_init(void)  	c->options |= MIPS_CPU_INCLUSIVE_CACHES;  } -#endif + +static void __init loongson3_sc_init(void) +{ +	struct cpuinfo_mips *c = ¤t_cpu_data; +	unsigned int config2, lsize; + +	config2 = read_c0_config2(); +	lsize = (config2 >> 4) & 15; +	if (lsize) +		c->scache.linesz = 2 << lsize; +	else +		c->scache.linesz = 0; +	c->scache.sets = 64 << ((config2 >> 8) & 15); +	c->scache.ways = 1 + (config2 & 15); + +	scache_size = c->scache.sets * +				  c->scache.ways * +				  c->scache.linesz; +	/* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */ +	scache_size *= 4; +	c->scache.waybit = 0; +	pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", +	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); +	if (scache_size) +		c->options |= MIPS_CPU_INCLUSIVE_CACHES; +	return; +}  extern int r5k_sc_init(void);  extern int rm7k_sc_init(void); @@ -1257,11 +1445,15 @@ static void setup_scache(void)  #endif  		return; -#if defined(CONFIG_CPU_LOONGSON2)  	case CPU_LOONGSON2:  		loongson2_sc_init();  		return; -#endif + +	case CPU_LOONGSON3: +		loongson3_sc_init(); +		return; + +	case CPU_CAVIUM_OCTEON3:  	case CPU_XLP:  		/* don't need to worry about L2, fully coherent */  		return; @@ -1432,6 +1624,10 @@ void r4k_cache_init(void)  	r4k_blast_scache_page_setup();  	r4k_blast_scache_page_indexed_setup();  	r4k_blast_scache_setup(); +#ifdef CONFIG_EVA +	r4k_blast_dcache_user_page_setup(); +	r4k_blast_icache_user_page_setup(); +#endif  	/*  	 * Some MIPS32 and MIPS64 processors have physically indexed caches. @@ -1463,7 +1659,7 @@ void r4k_cache_init(void)  	flush_icache_range	= r4k_flush_icache_range;  	local_flush_icache_range	= local_r4k_flush_icache_range; -#if defined(CONFIG_DMA_NONCOHERENT) +#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)  	if (coherentio) {  		_dma_cache_wback_inv	= (void *)cache_noop;  		_dma_cache_wback	= (void *)cache_noop; @@ -1488,3 +1684,26 @@ void r4k_cache_init(void)  	coherency_setup();  	board_cache_error_setup = r4k_cache_error_setup;  } + +static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd, +			       void *v) +{ +	switch (cmd) { +	case CPU_PM_ENTER_FAILED: +	case CPU_PM_EXIT: +		coherency_setup(); +		break; +	} + +	return NOTIFY_OK; +} + +static struct notifier_block r4k_cache_pm_notifier_block = { +	.notifier_call = r4k_cache_pm_notifier, +}; + +int __init r4k_cache_init_pm(void) +{ +	return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block); +} +arch_initcall(r4k_cache_init_pm); diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 15f813c303b..f7b91d3a371 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -8,7 +8,6 @@   */  #include <linux/fs.h>  #include <linux/fcntl.h> -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/linkage.h>  #include <linux/module.h> @@ -30,15 +29,16 @@ void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,  void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,  	unsigned long pfn);  void (*flush_icache_range)(unsigned long start, unsigned long end); +EXPORT_SYMBOL_GPL(flush_icache_range);  void (*local_flush_icache_range)(unsigned long start, unsigned long end); +EXPORT_SYMBOL_GPL(local_flush_icache_range);  void (*__flush_cache_vmap)(void);  void (*__flush_cache_vunmap)(void);  void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size); -void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size); -  EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range); +void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);  /* MIPS specific cache operations */  void (*flush_cache_sigtramp)(unsigned long addr); @@ -50,7 +50,7 @@ EXPORT_SYMBOL_GPL(local_flush_data_cache_page);  EXPORT_SYMBOL(flush_data_cache_page);  EXPORT_SYMBOL(flush_icache_all); -#ifdef CONFIG_DMA_NONCOHERENT +#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)  /* DMA cache operations. */  void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); @@ -59,7 +59,7 @@ void (*_dma_cache_inv)(unsigned long start, unsigned long size);  EXPORT_SYMBOL(_dma_cache_wback_inv); -#endif /* CONFIG_DMA_NONCOHERENT */ +#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */  /*   * We could optimize the case where the cache argument is not BCACHE but diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S index 191cf6e0c72..5d5f29681a2 100644 --- a/arch/mips/mm/cex-sb1.S +++ b/arch/mips/mm/cex-sb1.S @@ -15,7 +15,6 @@   * along with this program; if not, write to the Free Software   * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.   */ -#include <linux/init.h>  #include <asm/asm.h>  #include <asm/regdef.h> diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 5f8b9551258..44b6dff5aba 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -23,6 +23,7 @@  #include <dma-coherence.h> +#ifdef CONFIG_DMA_MAYBE_COHERENT  int coherentio = 0;	/* User defined DMA coherency from command line. */  EXPORT_SYMBOL_GPL(coherentio);  int hw_coherentio = 0;	/* Actual hardware supported DMA coherency setting. */ @@ -42,6 +43,7 @@ static int __init setnocoherentio(char *str)  	return 0;  }  early_param("nocoherentio", setnocoherentio); +#endif  static inline struct page *dma_addr_to_page(struct device *dev,  	dma_addr_t dma_addr) @@ -297,7 +299,6 @@ static void mips_dma_sync_single_for_cpu(struct device *dev,  static void mips_dma_sync_single_for_device(struct device *dev,  	dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)  { -	plat_extra_sync_for_device(dev);  	if (!plat_device_is_coherent(dev))  		__dma_sync(dma_addr_to_page(dev, dma_handle),  			   dma_handle & ~PAGE_MASK, size, direction); @@ -327,7 +328,7 @@ static void mips_dma_sync_sg_for_device(struct device *dev,  int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)  { -	return plat_dma_mapping_error(dev, dma_addr); +	return 0;  }  int mips_dma_supported(struct device *dev, u64 mask) @@ -340,7 +341,6 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,  {  	BUG_ON(direction == DMA_NONE); -	plat_extra_sync_for_device(dev);  	if (!plat_device_is_coherent(dev))  		__dma_sync_virtual(vaddr, size, direction);  } diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c index 01fda4419ed..4ec8ee10d37 100644 --- a/arch/mips/mm/hugetlbpage.c +++ b/arch/mips/mm/hugetlbpage.c @@ -11,7 +11,6 @@   * Copyright (C) 2008, 2009 Cavium Networks, Inc.   */ -#include <linux/init.h>  #include <linux/fs.h>  #include <linux/mm.h>  #include <linux/hugetlb.h> @@ -85,11 +84,6 @@ int pud_huge(pud_t pud)  	return (pud_val(pud) & _PAGE_HUGE) != 0;  } -int pmd_huge_support(void) -{ -	return 1; -} -  struct page *  follow_huge_pmd(struct mm_struct *mm, unsigned long address,  		pmd_t *pmd, int write) diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index e205ef598e9..6e4413330e3 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -44,27 +44,6 @@  #include <asm/tlb.h>  #include <asm/fixmap.h> -/* Atomicity and interruptability */ -#ifdef CONFIG_MIPS_MT_SMTC - -#include <asm/mipsmtregs.h> - -#define ENTER_CRITICAL(flags) \ -	{ \ -	unsigned int mvpflags; \ -	local_irq_save(flags);\ -	mvpflags = dvpe() -#define EXIT_CRITICAL(flags) \ -	evpe(mvpflags); \ -	local_irq_restore(flags); \ -	} -#else - -#define ENTER_CRITICAL(flags) local_irq_save(flags) -#define EXIT_CRITICAL(flags) local_irq_restore(flags) - -#endif /* CONFIG_MIPS_MT_SMTC */ -  /*   * We have up to 8 empty zeroed pages so we can map one of the right colour   * when needed.	 This is necessary only on R4000 / R4400 SC and MC versions @@ -100,21 +79,7 @@ void setup_zero_pages(void)  	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;  } -#ifdef CONFIG_MIPS_MT_SMTC -static pte_t *kmap_coherent_pte; -static void __init kmap_coherent_init(void) -{ -	unsigned long vaddr; - -	/* cache the first coherent kmap pte */ -	vaddr = __fix_to_virt(FIX_CMAP_BEGIN); -	kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); -} -#else -static inline void kmap_coherent_init(void) {} -#endif - -void *kmap_coherent(struct page *page, unsigned long addr) +static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)  {  	enum fixed_addresses idx;  	unsigned long vaddr, flags, entrylo; @@ -124,62 +89,50 @@ void *kmap_coherent(struct page *page, unsigned long addr)  	BUG_ON(Page_dcache_dirty(page)); -	inc_preempt_count(); +	pagefault_disable();  	idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); -#ifdef CONFIG_MIPS_MT_SMTC -	idx += FIX_N_COLOURS * smp_processor_id() + -		(in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0); -#else  	idx += in_interrupt() ? FIX_N_COLOURS : 0; -#endif  	vaddr = __fix_to_virt(FIX_CMAP_END - idx); -	pte = mk_pte(page, PAGE_KERNEL); +	pte = mk_pte(page, prot);  #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)  	entrylo = pte.pte_high;  #else  	entrylo = pte_to_entrylo(pte_val(pte));  #endif -	ENTER_CRITICAL(flags); +	local_irq_save(flags);  	old_ctx = read_c0_entryhi();  	write_c0_entryhi(vaddr & (PAGE_MASK << 1));  	write_c0_entrylo0(entrylo);  	write_c0_entrylo1(entrylo); -#ifdef CONFIG_MIPS_MT_SMTC -	set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); -	/* preload TLB instead of local_flush_tlb_one() */ -	mtc0_tlbw_hazard(); -	tlb_probe(); -	tlb_probe_hazard(); -	tlbidx = read_c0_index(); -	mtc0_tlbw_hazard(); -	if (tlbidx < 0) -		tlb_write_random(); -	else -		tlb_write_indexed(); -#else  	tlbidx = read_c0_wired();  	write_c0_wired(tlbidx + 1);  	write_c0_index(tlbidx);  	mtc0_tlbw_hazard();  	tlb_write_indexed(); -#endif  	tlbw_use_hazard();  	write_c0_entryhi(old_ctx); -	EXIT_CRITICAL(flags); +	local_irq_restore(flags);  	return (void*) vaddr;  } -#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) +void *kmap_coherent(struct page *page, unsigned long addr) +{ +	return __kmap_pgprot(page, addr, PAGE_KERNEL); +} + +void *kmap_noncoherent(struct page *page, unsigned long addr) +{ +	return __kmap_pgprot(page, addr, PAGE_KERNEL_NC); +}  void kunmap_coherent(void)  { -#ifndef CONFIG_MIPS_MT_SMTC  	unsigned int wired;  	unsigned long flags, old_ctx; -	ENTER_CRITICAL(flags); +	local_irq_save(flags);  	old_ctx = read_c0_entryhi();  	wired = read_c0_wired() - 1;  	write_c0_wired(wired); @@ -191,10 +144,8 @@ void kunmap_coherent(void)  	tlb_write_indexed();  	tlbw_use_hazard();  	write_c0_entryhi(old_ctx); -	EXIT_CRITICAL(flags); -#endif -	dec_preempt_count(); -	preempt_check_resched(); +	local_irq_restore(flags); +	pagefault_enable();  }  void copy_user_highpage(struct page *to, struct page *from, @@ -259,7 +210,7 @@ EXPORT_SYMBOL_GPL(copy_from_user_page);  void __init fixrange_init(unsigned long start, unsigned long end,  	pgd_t *pgd_base)  { -#if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC) +#ifdef CONFIG_HIGHMEM  	pgd_t *pgd;  	pud_t *pud;  	pmd_t *pmd; @@ -330,8 +281,6 @@ void __init paging_init(void)  #ifdef CONFIG_HIGHMEM  	kmap_init();  #endif -	kmap_coherent_init(); -  #ifdef CONFIG_ZONE_DMA  	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;  #endif @@ -425,10 +374,20 @@ void free_initrd_mem(unsigned long start, unsigned long end)  }  #endif +void (*free_init_pages_eva)(void *begin, void *end) = NULL; +  void __init_refok free_initmem(void)  {  	prom_free_prom_memory(); -	free_initmem_default(POISON_FREE_INITMEM); +	/* +	 * Let the platform define a specific function to free the +	 * init section since EVA may have used any possible mapping +	 * between virtual and physical addresses. +	 */ +	if (free_init_pages_eva) +		free_init_pages_eva((void *)&__init_begin, (void *)&__init_end); +	else +		free_initmem_default(POISON_FREE_INITMEM);  }  #ifndef CONFIG_MIPS_PGD_C0_CONTEXT diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index cbd81d17793..b611102e23b 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c @@ -8,7 +8,6 @@   * Copyright (C) 2008  Thiemo Seufer   * Copyright (C) 2012  MIPS Technologies, Inc.   */ -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/sched.h>  #include <linux/smp.h> @@ -274,7 +273,7 @@ void build_clear_page(void)  		uasm_i_ori(&buf, A2, A0, off);  	if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) -		uasm_i_lui(&buf, AT, 0xa000); +		uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));  	off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)  				* cache_line_size : 0; @@ -425,7 +424,7 @@ void build_copy_page(void)  		uasm_i_ori(&buf, A2, A0, off);  	if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) -		uasm_i_lui(&buf, AT, 0xa000); +		uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));  	off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *  				cache_line_size : 0; diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 08d05aee878..99eb8fabab6 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -76,6 +76,10 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c)  	case CPU_34K:  	case CPU_74K:  	case CPU_1004K: +	case CPU_1074K: +	case CPU_INTERAPTIV: +	case CPU_PROAPTIV: +	case CPU_P5600:  	case CPU_BMIPS5000:  		if (config2 & (1 << 12))  			return 0; diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c index aaffbba3370..9ac1efcfbcc 100644 --- a/arch/mips/mm/sc-rm7k.c +++ b/arch/mips/mm/sc-rm7k.c @@ -6,7 +6,6 @@  #undef DEBUG -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/mm.h>  #include <linux/bitops.h> diff --git a/arch/mips/mm/tlb-funcs.S b/arch/mips/mm/tlb-funcs.S index 79bca3130bd..a5427c6e975 100644 --- a/arch/mips/mm/tlb-funcs.S +++ b/arch/mips/mm/tlb-funcs.S @@ -16,12 +16,12 @@  #define FASTPATH_SIZE	128 -#ifdef CONFIG_MIPS_PGD_C0_CONTEXT +EXPORT(tlbmiss_handler_setup_pgd_start)  LEAF(tlbmiss_handler_setup_pgd) -	.space		16 * 4 +1:	j	1b		/* Dummy, will be replaced. */ +	.space	64  END(tlbmiss_handler_setup_pgd)  EXPORT(tlbmiss_handler_setup_pgd_end) -#endif  LEAF(handle_tlbm)  	.space		FASTPATH_SIZE * 4 diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c index 9aca10994cd..d657493ef56 100644 --- a/arch/mips/mm/tlb-r3k.c +++ b/arch/mips/mm/tlb-r3k.c @@ -10,7 +10,6 @@   * Copyright (C) 2002  Ralf Baechle   * Copyright (C) 2002  Maciej W. Rozycki   */ -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/sched.h>  #include <linux/smp.h> diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index bb3a5f643e9..3914e27456f 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -8,6 +8,7 @@   * Carsten Langgaard, carstenl@mips.com   * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.   */ +#include <linux/cpu_pm.h>  #include <linux/init.h>  #include <linux/sched.h>  #include <linux/smp.h> @@ -20,61 +21,40 @@  #include <asm/bootinfo.h>  #include <asm/mmu_context.h>  #include <asm/pgtable.h> +#include <asm/tlb.h>  #include <asm/tlbmisc.h>  extern void build_tlb_refill_handler(void);  /* - * Make sure all entries differ.  If they're not different - * MIPS32 will take revenge ... + * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb, + * unfortunately, itlb is not totally transparent to software.   */ -#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) - -/* Atomicity and interruptability */ -#ifdef CONFIG_MIPS_MT_SMTC - -#include <asm/smtc.h> -#include <asm/mipsmtregs.h> - -#define ENTER_CRITICAL(flags) \ -	{ \ -	unsigned int mvpflags; \ -	local_irq_save(flags);\ -	mvpflags = dvpe() -#define EXIT_CRITICAL(flags) \ -	evpe(mvpflags); \ -	local_irq_restore(flags); \ +static inline void flush_itlb(void) +{ +	switch (current_cpu_type()) { +	case CPU_LOONGSON2: +	case CPU_LOONGSON3: +		write_c0_diag(4); +		break; +	default: +		break;  	} -#else - -#define ENTER_CRITICAL(flags) local_irq_save(flags) -#define EXIT_CRITICAL(flags) local_irq_restore(flags) - -#endif /* CONFIG_MIPS_MT_SMTC */ - -#if defined(CONFIG_CPU_LOONGSON2) -/* - * LOONGSON2 has a 4 entry itlb which is a subset of dtlb, - * unfortrunately, itlb is not totally transparent to software. - */ -#define FLUSH_ITLB write_c0_diag(4); - -#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC)  write_c0_diag(4); } - -#else - -#define FLUSH_ITLB -#define FLUSH_ITLB_VM(vma) +} -#endif +static inline void flush_itlb_vm(struct vm_area_struct *vma) +{ +	if (vma->vm_flags & VM_EXEC) +		flush_itlb(); +}  void local_flush_tlb_all(void)  {  	unsigned long flags;  	unsigned long old_ctx; -	int entry; +	int entry, ftlbhighset; -	ENTER_CRITICAL(flags); +	local_irq_save(flags);  	/* Save old context and create impossible VPN2 value */  	old_ctx = read_c0_entryhi();  	write_c0_entrylo0(0); @@ -83,18 +63,35 @@ void local_flush_tlb_all(void)  	entry = read_c0_wired();  	/* Blast 'em all away. */ -	while (entry < current_cpu_data.tlbsize) { -		/* Make sure all entries differ. */ -		write_c0_entryhi(UNIQUE_ENTRYHI(entry)); -		write_c0_index(entry); -		mtc0_tlbw_hazard(); -		tlb_write_indexed(); -		entry++; +	if (cpu_has_tlbinv) { +		if (current_cpu_data.tlbsizevtlb) { +			write_c0_index(0); +			mtc0_tlbw_hazard(); +			tlbinvf();  /* invalidate VTLB */ +		} +		ftlbhighset = current_cpu_data.tlbsizevtlb + +			current_cpu_data.tlbsizeftlbsets; +		for (entry = current_cpu_data.tlbsizevtlb; +		     entry < ftlbhighset; +		     entry++) { +			write_c0_index(entry); +			mtc0_tlbw_hazard(); +			tlbinvf();  /* invalidate one FTLB set */ +		} +	} else { +		while (entry < current_cpu_data.tlbsize) { +			/* Make sure all entries differ. */ +			write_c0_entryhi(UNIQUE_ENTRYHI(entry)); +			write_c0_index(entry); +			mtc0_tlbw_hazard(); +			tlb_write_indexed(); +			entry++; +		}  	}  	tlbw_use_hazard();  	write_c0_entryhi(old_ctx); -	FLUSH_ITLB; -	EXIT_CRITICAL(flags); +	flush_itlb(); +	local_irq_restore(flags);  }  EXPORT_SYMBOL(local_flush_tlb_all); @@ -124,11 +121,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,  	if (cpu_context(cpu, mm) != 0) {  		unsigned long size, flags; -		ENTER_CRITICAL(flags); +		local_irq_save(flags);  		start = round_down(start, PAGE_SIZE << 1);  		end = round_up(end, PAGE_SIZE << 1);  		size = (end - start) >> (PAGE_SHIFT + 1); -		if (size <= current_cpu_data.tlbsize/2) { +		if (size <= (current_cpu_data.tlbsizeftlbsets ? +			     current_cpu_data.tlbsize / 8 : +			     current_cpu_data.tlbsize / 2)) {  			int oldpid = read_c0_entryhi();  			int newpid = cpu_asid(cpu, mm); @@ -155,8 +154,8 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,  		} else {  			drop_mmu_context(mm, cpu);  		} -		FLUSH_ITLB; -		EXIT_CRITICAL(flags); +		flush_itlb(); +		local_irq_restore(flags);  	}  } @@ -164,10 +163,12 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)  {  	unsigned long size, flags; -	ENTER_CRITICAL(flags); +	local_irq_save(flags);  	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;  	size = (size + 1) >> 1; -	if (size <= current_cpu_data.tlbsize / 2) { +	if (size <= (current_cpu_data.tlbsizeftlbsets ? +		     current_cpu_data.tlbsize / 8 : +		     current_cpu_data.tlbsize / 2)) {  		int pid = read_c0_entryhi();  		start &= (PAGE_MASK << 1); @@ -197,8 +198,8 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)  	} else {  		local_flush_tlb_all();  	} -	FLUSH_ITLB; -	EXIT_CRITICAL(flags); +	flush_itlb(); +	local_irq_restore(flags);  }  void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) @@ -211,7 +212,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)  		newpid = cpu_asid(cpu, vma->vm_mm);  		page &= (PAGE_MASK << 1); -		ENTER_CRITICAL(flags); +		local_irq_save(flags);  		oldpid = read_c0_entryhi();  		write_c0_entryhi(page | newpid);  		mtc0_tlbw_hazard(); @@ -230,8 +231,8 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)  	finish:  		write_c0_entryhi(oldpid); -		FLUSH_ITLB_VM(vma); -		EXIT_CRITICAL(flags); +		flush_itlb_vm(vma); +		local_irq_restore(flags);  	}  } @@ -244,7 +245,7 @@ void local_flush_tlb_one(unsigned long page)  	unsigned long flags;  	int oldpid, idx; -	ENTER_CRITICAL(flags); +	local_irq_save(flags);  	oldpid = read_c0_entryhi();  	page &= (PAGE_MASK << 1);  	write_c0_entryhi(page); @@ -262,8 +263,8 @@ void local_flush_tlb_one(unsigned long page)  		tlbw_use_hazard();  	}  	write_c0_entryhi(oldpid); -	FLUSH_ITLB; -	EXIT_CRITICAL(flags); +	flush_itlb(); +	local_irq_restore(flags);  }  /* @@ -286,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)  	if (current->active_mm != vma->vm_mm)  		return; -	ENTER_CRITICAL(flags); +	local_irq_save(flags);  	pid = read_c0_entryhi() & ASID_MASK;  	address &= (PAGE_MASK << 1); @@ -335,8 +336,8 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)  			tlb_write_indexed();  	}  	tlbw_use_hazard(); -	FLUSH_ITLB_VM(vma); -	EXIT_CRITICAL(flags); +	flush_itlb_vm(vma); +	local_irq_restore(flags);  }  void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, @@ -347,7 +348,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,  	unsigned long old_pagemask;  	unsigned long old_ctx; -	ENTER_CRITICAL(flags); +	local_irq_save(flags);  	/* Save old context and create impossible VPN2 value */  	old_ctx = read_c0_entryhi();  	old_pagemask = read_c0_pagemask(); @@ -367,7 +368,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,  	tlbw_use_hazard();	/* What is the hazard here? */  	write_c0_pagemask(old_pagemask);  	local_flush_tlb_all(); -	EXIT_CRITICAL(flags); +	local_irq_restore(flags);  }  #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -377,13 +378,13 @@ int __init has_transparent_hugepage(void)  	unsigned int mask;  	unsigned long flags; -	ENTER_CRITICAL(flags); +	local_irq_save(flags);  	write_c0_pagemask(PM_HUGE_MASK);  	back_to_back_c0_hazard();  	mask = read_c0_pagemask();  	write_c0_pagemask(PM_DEFAULT_MASK); -	EXIT_CRITICAL(flags); +	local_irq_restore(flags);  	return mask == PM_HUGE_MASK;  } @@ -399,7 +400,10 @@ static int __init set_ntlb(char *str)  __setup("ntlb=", set_ntlb); -void tlb_init(void) +/* + * Configure TLB (for init or after a CPU has been powered off). + */ +static void r4k_tlb_configure(void)  {  	/*  	 * You should never change this register: @@ -431,6 +435,11 @@ void tlb_init(void)  	local_flush_tlb_all();  	/* Did I tell you that ARC SUCKS?  */ +} + +void tlb_init(void) +{ +	r4k_tlb_configure();  	if (ntlb) {  		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) { @@ -444,3 +453,26 @@ void tlb_init(void)  	build_tlb_refill_handler();  } + +static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd, +			       void *v) +{ +	switch (cmd) { +	case CPU_PM_ENTER_FAILED: +	case CPU_PM_EXIT: +		r4k_tlb_configure(); +		break; +	} + +	return NOTIFY_OK; +} + +static struct notifier_block r4k_tlb_pm_notifier_block = { +	.notifier_call = r4k_tlb_pm_notifier, +}; + +static int __init r4k_tlb_init_pm(void) +{ +	return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block); +} +arch_initcall(r4k_tlb_init_pm); diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c index 6a99733a444..138a2ec7cc6 100644 --- a/arch/mips/mm/tlb-r8k.c +++ b/arch/mips/mm/tlb-r8k.c @@ -8,7 +8,6 @@   * Carsten Langgaard, carstenl@mips.com   * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.   */ -#include <linux/init.h>  #include <linux/sched.h>  #include <linux/smp.h>  #include <linux/mm.h> diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 9bb3a9363b0..e80e10bafc8 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -26,7 +26,6 @@  #include <linux/types.h>  #include <linux/smp.h>  #include <linux/string.h> -#include <linux/init.h>  #include <linux/cache.h>  #include <asm/cacheflush.h> @@ -340,10 +339,6 @@ static struct work_registers build_get_work_registers(u32 **p)  {  	struct work_registers r; -	int smp_processor_id_reg; -	int smp_processor_id_sel; -	int smp_processor_id_shift; -  	if (scratch_reg >= 0) {  		/* Save in CPU local C0_KScratch? */  		UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg); @@ -354,25 +349,9 @@ static struct work_registers build_get_work_registers(u32 **p)  	}  	if (num_possible_cpus() > 1) { -#ifdef CONFIG_MIPS_PGD_C0_CONTEXT -		smp_processor_id_shift = 51; -		smp_processor_id_reg = 20; /* XContext */ -		smp_processor_id_sel = 0; -#else -# ifdef CONFIG_32BIT -		smp_processor_id_shift = 25; -		smp_processor_id_reg = 4; /* Context */ -		smp_processor_id_sel = 0; -# endif -# ifdef CONFIG_64BIT -		smp_processor_id_shift = 26; -		smp_processor_id_reg = 4; /* Context */ -		smp_processor_id_sel = 0; -# endif -#endif  		/* Get smp_processor_id */ -		UASM_i_MFC0(p, K0, smp_processor_id_reg, smp_processor_id_sel); -		UASM_i_SRL_SAFE(p, K0, K0, smp_processor_id_shift); +		UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG); +		UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT);  		/* handler_reg_save index in K0 */  		UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save))); @@ -530,6 +509,10 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,  		switch (current_cpu_type()) {  		case CPU_M14KC:  		case CPU_74K: +		case CPU_1074K: +		case CPU_PROAPTIV: +		case CPU_P5600: +		case CPU_M5150:  			break;  		default: @@ -599,6 +582,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,  	case CPU_BMIPS4380:  	case CPU_BMIPS5000:  	case CPU_LOONGSON2: +	case CPU_LOONGSON3:  	case CPU_R5500:  		if (m4kc_tlbp_war())  			uasm_i_nop(p); @@ -641,7 +625,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,  	default:  		panic("No TLB refill handler yet (CPU type: %d)", -		      current_cpu_data.cputype); +		      current_cpu_type());  		break;  	}  } @@ -819,11 +803,11 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,  	}  	/* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ -#ifdef CONFIG_MIPS_PGD_C0_CONTEXT  	if (pgd_reg != -1) {  		/* pgd is in pgd_reg */  		UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);  	} else { +#if defined(CONFIG_MIPS_PGD_C0_CONTEXT)  		/*  		 * &pgd << 11 stored in CONTEXT [23..63].  		 */ @@ -835,30 +819,18 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,  		/* 1 0	1 0 1  << 6  xkphys cached */  		uasm_i_ori(p, ptr, ptr, 0x540);  		uasm_i_drotr(p, ptr, ptr, 11); -	}  #elif defined(CONFIG_SMP) -# ifdef	 CONFIG_MIPS_MT_SMTC -	/* -	 * SMTC uses TCBind value as "CPU" index -	 */ -	uasm_i_mfc0(p, ptr, C0_TCBIND); -	uasm_i_dsrl_safe(p, ptr, ptr, 19); -# else -	/* -	 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 -	 * stored in CONTEXT. -	 */ -	uasm_i_dmfc0(p, ptr, C0_CONTEXT); -	uasm_i_dsrl_safe(p, ptr, ptr, 23); -# endif -	UASM_i_LA_mostly(p, tmp, pgdc); -	uasm_i_daddu(p, ptr, ptr, tmp); -	uasm_i_dmfc0(p, tmp, C0_BADVADDR); -	uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); +		UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG); +		uasm_i_dsrl_safe(p, ptr, ptr, SMP_CPUID_PTRSHIFT); +		UASM_i_LA_mostly(p, tmp, pgdc); +		uasm_i_daddu(p, ptr, ptr, tmp); +		uasm_i_dmfc0(p, tmp, C0_BADVADDR); +		uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);  #else -	UASM_i_LA_mostly(p, ptr, pgdc); -	uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); +		UASM_i_LA_mostly(p, ptr, pgdc); +		uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);  #endif +	}  	uasm_l_vmalloc_done(l, *p); @@ -953,31 +925,25 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,  static void __maybe_unused  build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)  { -	long pgdc = (long)pgd_current; +	if (pgd_reg != -1) { +		/* pgd is in pgd_reg */ +		uasm_i_mfc0(p, ptr, c0_kscratch(), pgd_reg); +		uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ +	} else { +		long pgdc = (long)pgd_current; -	/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ +		/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */  #ifdef CONFIG_SMP -#ifdef	CONFIG_MIPS_MT_SMTC -	/* -	 * SMTC uses TCBind value as "CPU" index -	 */ -	uasm_i_mfc0(p, ptr, C0_TCBIND); -	UASM_i_LA_mostly(p, tmp, pgdc); -	uasm_i_srl(p, ptr, ptr, 19); -#else -	/* -	 * smp_processor_id() << 2 is stored in CONTEXT. -	 */ -	uasm_i_mfc0(p, ptr, C0_CONTEXT); -	UASM_i_LA_mostly(p, tmp, pgdc); -	uasm_i_srl(p, ptr, ptr, 23); -#endif -	uasm_i_addu(p, ptr, tmp, ptr); +		uasm_i_mfc0(p, ptr, SMP_CPUID_REG); +		UASM_i_LA_mostly(p, tmp, pgdc); +		uasm_i_srl(p, ptr, ptr, SMP_CPUID_PTRSHIFT); +		uasm_i_addu(p, ptr, tmp, ptr);  #else -	UASM_i_LA_mostly(p, ptr, pgdc); +		UASM_i_LA_mostly(p, ptr, pgdc);  #endif -	uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ -	uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); +		uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ +		uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); +	}  	uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */  	uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);  	uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ @@ -1290,7 +1256,7 @@ static void build_r4000_tlb_refill_handler(void)  	memset(relocs, 0, sizeof(relocs));  	memset(final_handler, 0, sizeof(final_handler)); -	if ((scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) { +	if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) {  		htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,  							  scratch_reg);  		vmalloc_mode = refill_scratch; @@ -1349,95 +1315,100 @@ static void build_r4000_tlb_refill_handler(void)  	 * need three, with the second nop'ed and the third being  	 * unused.  	 */ -	/* Loongson2 ebase is different than r4k, we have more space */ -#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) -	if ((p - tlb_handler) > 64) -		panic("TLB refill handler space exceeded"); -#else -	if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1) -	    || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3) -		&& uasm_insn_has_bdelay(relocs, -					tlb_handler + MIPS64_REFILL_INSNS - 3))) -		panic("TLB refill handler space exceeded"); -#endif - -	/* -	 * Now fold the handler in the TLB refill handler space. -	 */ -#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) -	f = final_handler; -	/* Simplest case, just copy the handler. */ -	uasm_copy_handler(relocs, labels, tlb_handler, p, f); -	final_len = p - tlb_handler; -#else /* CONFIG_64BIT */ -	f = final_handler + MIPS64_REFILL_INSNS; -	if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) { -		/* Just copy the handler. */ -		uasm_copy_handler(relocs, labels, tlb_handler, p, f); -		final_len = p - tlb_handler; -	} else { -#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT -		const enum label_id ls = label_tlb_huge_update; -#else -		const enum label_id ls = label_vmalloc; -#endif -		u32 *split; -		int ov = 0; -		int i; - -		for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++) -			; -		BUG_ON(i == ARRAY_SIZE(labels)); -		split = labels[i].addr; - -		/* -		 * See if we have overflown one way or the other. -		 */ -		if (split > tlb_handler + MIPS64_REFILL_INSNS || -		    split < p - MIPS64_REFILL_INSNS) -			ov = 1; - -		if (ov) { +	switch (boot_cpu_type()) { +	default: +		if (sizeof(long) == 4) { +	case CPU_LOONGSON2: +		/* Loongson2 ebase is different than r4k, we have more space */ +			if ((p - tlb_handler) > 64) +				panic("TLB refill handler space exceeded");  			/* -			 * Split two instructions before the end.  One -			 * for the branch and one for the instruction -			 * in the delay slot. +			 * Now fold the handler in the TLB refill handler space.  			 */ -			split = tlb_handler + MIPS64_REFILL_INSNS - 2; - +			f = final_handler; +			/* Simplest case, just copy the handler. */ +			uasm_copy_handler(relocs, labels, tlb_handler, p, f); +			final_len = p - tlb_handler; +			break; +		} else { +			if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1) +			    || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3) +				&& uasm_insn_has_bdelay(relocs, +							tlb_handler + MIPS64_REFILL_INSNS - 3))) +				panic("TLB refill handler space exceeded");  			/* -			 * If the branch would fall in a delay slot, -			 * we must back up an additional instruction -			 * so that it is no longer in a delay slot. +			 * Now fold the handler in the TLB refill handler space.  			 */ -			if (uasm_insn_has_bdelay(relocs, split - 1)) -				split--; -		} -		/* Copy first part of the handler. */ -		uasm_copy_handler(relocs, labels, tlb_handler, split, f); -		f += split - tlb_handler; - -		if (ov) { -			/* Insert branch. */ -			uasm_l_split(&l, final_handler); -			uasm_il_b(&f, &r, label_split); -			if (uasm_insn_has_bdelay(relocs, split)) -				uasm_i_nop(&f); -			else { -				uasm_copy_handler(relocs, labels, -						  split, split + 1, f); -				uasm_move_labels(labels, f, f + 1, -1); -				f++; -				split++; +			f = final_handler + MIPS64_REFILL_INSNS; +			if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) { +				/* Just copy the handler. */ +				uasm_copy_handler(relocs, labels, tlb_handler, p, f); +				final_len = p - tlb_handler; +			} else { +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT +				const enum label_id ls = label_tlb_huge_update; +#else +				const enum label_id ls = label_vmalloc; +#endif +				u32 *split; +				int ov = 0; +				int i; + +				for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++) +					; +				BUG_ON(i == ARRAY_SIZE(labels)); +				split = labels[i].addr; + +				/* +				 * See if we have overflown one way or the other. +				 */ +				if (split > tlb_handler + MIPS64_REFILL_INSNS || +				    split < p - MIPS64_REFILL_INSNS) +					ov = 1; + +				if (ov) { +					/* +					 * Split two instructions before the end.  One +					 * for the branch and one for the instruction +					 * in the delay slot. +					 */ +					split = tlb_handler + MIPS64_REFILL_INSNS - 2; + +					/* +					 * If the branch would fall in a delay slot, +					 * we must back up an additional instruction +					 * so that it is no longer in a delay slot. +					 */ +					if (uasm_insn_has_bdelay(relocs, split - 1)) +						split--; +				} +				/* Copy first part of the handler. */ +				uasm_copy_handler(relocs, labels, tlb_handler, split, f); +				f += split - tlb_handler; + +				if (ov) { +					/* Insert branch. */ +					uasm_l_split(&l, final_handler); +					uasm_il_b(&f, &r, label_split); +					if (uasm_insn_has_bdelay(relocs, split)) +						uasm_i_nop(&f); +					else { +						uasm_copy_handler(relocs, labels, +								  split, split + 1, f); +						uasm_move_labels(labels, f, f + 1, -1); +						f++; +						split++; +					} +				} + +				/* Copy the rest of the handler. */ +				uasm_copy_handler(relocs, labels, split, p, final_handler); +				final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) + +					    (p - split);  			}  		} - -		/* Copy the rest of the handler. */ -		uasm_copy_handler(relocs, labels, split, p, final_handler); -		final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) + -			    (p - split); +		break;  	} -#endif /* CONFIG_64BIT */  	uasm_resolve_relocs(relocs, labels);  	pr_debug("Wrote TLB refill handler (%u instructions).\n", @@ -1451,28 +1422,31 @@ static void build_r4000_tlb_refill_handler(void)  extern u32 handle_tlbl[], handle_tlbl_end[];  extern u32 handle_tlbs[], handle_tlbs_end[];  extern u32 handle_tlbm[], handle_tlbm_end[]; +extern u32 tlbmiss_handler_setup_pgd_start[], tlbmiss_handler_setup_pgd[]; +extern u32 tlbmiss_handler_setup_pgd_end[]; -#ifdef CONFIG_MIPS_PGD_C0_CONTEXT -extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[]; - -static void build_r4000_setup_pgd(void) +static void build_setup_pgd(void)  {  	const int a0 = 4; -	const int a1 = 5; -	u32 *p = tlbmiss_handler_setup_pgd; +	const int __maybe_unused a1 = 5; +	const int __maybe_unused a2 = 6; +	u32 *p = tlbmiss_handler_setup_pgd_start;  	const int tlbmiss_handler_setup_pgd_size = -		tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd; -	struct uasm_label *l = labels; -	struct uasm_reloc *r = relocs; +		tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd_start; +#ifndef CONFIG_MIPS_PGD_C0_CONTEXT +	long pgdc = (long)pgd_current; +#endif  	memset(tlbmiss_handler_setup_pgd, 0, tlbmiss_handler_setup_pgd_size *  					sizeof(tlbmiss_handler_setup_pgd[0]));  	memset(labels, 0, sizeof(labels));  	memset(relocs, 0, sizeof(relocs)); -  	pgd_reg = allocate_kscratch(); - +#ifdef CONFIG_MIPS_PGD_C0_CONTEXT  	if (pgd_reg == -1) { +		struct uasm_label *l = labels; +		struct uasm_reloc *r = relocs; +  		/* PGD << 11 in c0_Context */  		/*  		 * If it is a ckseg0 address, convert to a physical @@ -1494,6 +1468,26 @@ static void build_r4000_setup_pgd(void)  		uasm_i_jr(&p, 31);  		UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);  	} +#else +#ifdef CONFIG_SMP +	/* Save PGD to pgd_current[smp_processor_id()] */ +	UASM_i_CPUID_MFC0(&p, a1, SMP_CPUID_REG); +	UASM_i_SRL_SAFE(&p, a1, a1, SMP_CPUID_PTRSHIFT); +	UASM_i_LA_mostly(&p, a2, pgdc); +	UASM_i_ADDU(&p, a2, a2, a1); +	UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2); +#else +	UASM_i_LA_mostly(&p, a2, pgdc); +	UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2); +#endif /* SMP */ +	uasm_i_jr(&p, 31); + +	/* if pgd_reg is allocated, save PGD also to scratch register */ +	if (pgd_reg != -1) +		UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); +	else +		uasm_i_nop(&p); +#endif  	if (p >= tlbmiss_handler_setup_pgd_end)  		panic("tlbmiss_handler_setup_pgd space exceeded"); @@ -1504,7 +1498,6 @@ static void build_r4000_setup_pgd(void)  	dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd,  					tlbmiss_handler_setup_pgd_size);  } -#endif  static void  iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) @@ -2197,10 +2190,8 @@ static void flush_tlb_handlers(void)  			   (unsigned long)handle_tlbs_end);  	local_flush_icache_range((unsigned long)handle_tlbm,  			   (unsigned long)handle_tlbm_end); -#ifdef CONFIG_MIPS_PGD_C0_CONTEXT  	local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,  			   (unsigned long)tlbmiss_handler_setup_pgd_end); -#endif  }  void build_tlb_refill_handler(void) @@ -2232,6 +2223,7 @@ void build_tlb_refill_handler(void)  		if (!run_once) {  			if (!cpu_has_local_ebase)  				build_r3000_tlb_refill_handler(); +			build_setup_pgd();  			build_r3000_tlb_load_handler();  			build_r3000_tlb_store_handler();  			build_r3000_tlb_modify_handler(); @@ -2255,9 +2247,7 @@ void build_tlb_refill_handler(void)  	default:  		if (!run_once) {  			scratch_reg = allocate_kscratch(); -#ifdef CONFIG_MIPS_PGD_C0_CONTEXT -			build_r4000_setup_pgd(); -#endif +			build_setup_pgd();  			build_r4000_tlb_load_handler();  			build_r4000_tlb_store_handler();  			build_r4000_tlb_modify_handler(); diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c index 060000fa653..8399ddf03a0 100644 --- a/arch/mips/mm/uasm-micromips.c +++ b/arch/mips/mm/uasm-micromips.c @@ -15,7 +15,6 @@  #include <linux/kernel.h>  #include <linux/types.h> -#include <linux/init.h>  #include <asm/inst.h>  #include <asm/elf.h> @@ -64,6 +63,7 @@ static struct insn insn_table_MM[] = {  	{ insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM },  	{ insn_daddu, 0, 0 },  	{ insn_daddiu, 0, 0 }, +	{ insn_divu, M(mm_pool32a_op, 0, 0, 0, mm_divu_op, mm_pool32axf_op), RT | RS },  	{ insn_dmfc0, 0, 0 },  	{ insn_dmtc0, 0, 0 },  	{ insn_dsll, 0, 0 }, @@ -79,14 +79,20 @@ static struct insn insn_table_MM[] = {  	{ insn_ext, M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE },  	{ insn_j, M(mm_j32_op, 0, 0, 0, 0, 0), JIMM },  	{ insn_jal, M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM }, +	{ insn_jalr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RT | RS },  	{ insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS }, +	{ insn_lb, M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },  	{ insn_ld, 0, 0 }, +	{ insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM },  	{ insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM },  	{ insn_lld, 0, 0 },  	{ insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM },  	{ insn_lw, M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },  	{ insn_mfc0, M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD }, +	{ insn_mfhi, M(mm_pool32a_op, 0, 0, 0, mm_mfhi32_op, mm_pool32axf_op), RS }, +	{ insn_mflo, M(mm_pool32a_op, 0, 0, 0, mm_mflo32_op, mm_pool32axf_op), RS },  	{ insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD }, +	{ insn_mul, M(mm_pool32a_op, 0, 0, 0, 0, mm_mul_op), RT | RS | RD },  	{ insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD },  	{ insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },  	{ insn_pref, M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM }, @@ -95,15 +101,23 @@ static struct insn insn_table_MM[] = {  	{ insn_scd, 0, 0 },  	{ insn_sd, 0, 0 },  	{ insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD }, +	{ insn_sllv, M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD }, +	{ insn_slt, M(mm_pool32a_op, 0, 0, 0, 0, mm_slt_op), RT | RS | RD }, +	{ insn_sltiu, M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, +	{ insn_sltu, M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD },  	{ insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD },  	{ insn_srl, M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD }, +	{ insn_srlv, M(mm_pool32a_op, 0, 0, 0, 0, mm_srlv32_op), RT | RS | RD },  	{ insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD },  	{ insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD },  	{ insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, +	{ insn_sync, M(mm_pool32a_op, 0, 0, 0, mm_sync_op, mm_pool32axf_op), RS },  	{ insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 },  	{ insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 },  	{ insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 },  	{ insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 }, +	{ insn_wait, M(mm_pool32a_op, 0, 0, 0, mm_wait_op, mm_pool32axf_op), SCIMM }, +	{ insn_wsbh, M(mm_pool32a_op, 0, 0, 0, mm_wsbh_op, mm_pool32axf_op), RT | RS },  	{ insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD },  	{ insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },  	{ insn_dins, 0, 0 }, diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index 0c724589854..6708a2dbf93 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c @@ -15,7 +15,6 @@  #include <linux/kernel.h>  #include <linux/types.h> -#include <linux/init.h>  #include <asm/inst.h>  #include <asm/elf.h> @@ -68,6 +67,7 @@ static struct insn insn_table[] = {  	{ insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },  	{ insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },  	{ insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE }, +	{ insn_divu, M(spec_op, 0, 0, 0, 0, divu_op), RS | RT },  	{ insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},  	{ insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},  	{ insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE }, @@ -83,17 +83,23 @@ static struct insn insn_table[] = {  	{ insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },  	{ insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },  	{ insn_jal,  M(jal_op, 0, 0, 0, 0, 0),	JIMM }, +	{ insn_jalr,  M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD },  	{ insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },  	{ insn_jr,  M(spec_op, 0, 0, 0, 0, jr_op),  RS }, +	{ insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM },  	{ insn_ld,  M(ld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },  	{ insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, +	{ insn_lh,  M(lh_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },  	{ insn_lld,  M(lld_op, 0, 0, 0, 0, 0),	RS | RT | SIMM },  	{ insn_ll,  M(ll_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },  	{ insn_lui,  M(lui_op, 0, 0, 0, 0, 0),	RT | SIMM },  	{ insn_lw,  M(lw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },  	{ insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },  	{ insn_mfc0,  M(cop0_op, mfc_op, 0, 0, 0, 0),  RT | RD | SET}, +	{ insn_mfhi,  M(spec_op, 0, 0, 0, 0, mfhi_op), RD }, +	{ insn_mflo,  M(spec_op, 0, 0, 0, 0, mflo_op), RD },  	{ insn_mtc0,  M(cop0_op, mtc_op, 0, 0, 0, 0),  RT | RD | SET}, +	{ insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},  	{ insn_ori,  M(ori_op, 0, 0, 0, 0, 0),	RS | RT | UIMM },  	{ insn_or,  M(spec_op, 0, 0, 0, 0, or_op),  RS | RT | RD },  	{ insn_pref,  M(pref_op, 0, 0, 0, 0, 0),  RS | RT | SIMM }, @@ -103,17 +109,26 @@ static struct insn insn_table[] = {  	{ insn_sc,  M(sc_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },  	{ insn_sd,  M(sd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },  	{ insn_sll,  M(spec_op, 0, 0, 0, 0, sll_op),  RT | RD | RE }, +	{ insn_sllv,  M(spec_op, 0, 0, 0, 0, sllv_op),  RS | RT | RD }, +	{ insn_slt,  M(spec_op, 0, 0, 0, 0, slt_op),  RS | RT | RD }, +	{ insn_sltiu, M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, +	{ insn_sltu, M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD },  	{ insn_sra,  M(spec_op, 0, 0, 0, 0, sra_op),  RT | RD | RE },  	{ insn_srl,  M(spec_op, 0, 0, 0, 0, srl_op),  RT | RD | RE }, +	{ insn_srlv,  M(spec_op, 0, 0, 0, 0, srlv_op),  RS | RT | RD },  	{ insn_subu,  M(spec_op, 0, 0, 0, 0, subu_op),	RS | RT | RD },  	{ insn_sw,  M(sw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM }, +	{ insn_sync, M(spec_op, 0, 0, 0, 0, sync_op), RE },  	{ insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},  	{ insn_tlbp,  M(cop0_op, cop_op, 0, 0, 0, tlbp_op),  0 },  	{ insn_tlbr,  M(cop0_op, cop_op, 0, 0, 0, tlbr_op),  0 },  	{ insn_tlbwi,  M(cop0_op, cop_op, 0, 0, 0, tlbwi_op),  0 },  	{ insn_tlbwr,  M(cop0_op, cop_op, 0, 0, 0, tlbwr_op),  0 }, +	{ insn_wait, M(cop0_op, cop_op, 0, 0, 0, wait_op), SCIMM }, +	{ insn_wsbh, M(spec3_op, 0, 0, 0, wsbh_op, bshfl_op), RT | RD },  	{ insn_xori,  M(xori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },  	{ insn_xor,  M(spec_op, 0, 0, 0, 0, xor_op),  RS | RT | RD }, +	{ insn_yield, M(spec3_op, 0, 0, 0, 0, yield_op), RS | RD },  	{ insn_invalid, 0, 0 }  }; diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index b9d14b6c7f5..a01b0d6cedd 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -47,14 +47,16 @@ enum opcode {  	insn_addiu, insn_addu, insn_and, insn_andi, insn_bbit0, insn_bbit1,  	insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,  	insn_bne, insn_cache, insn_daddiu, insn_daddu, insn_dins, insn_dinsm, -	insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll, +	insn_divu, insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll,  	insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret, -	insn_ext, insn_ins, insn_j, insn_jal, insn_jr, insn_ld, insn_ldx, -	insn_ll, insn_lld, insn_lui, insn_lw, insn_lwx, insn_mfc0, insn_mtc0, +	insn_ext, insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_lb, +	insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw, +	insn_lwx, insn_mfc0, insn_mfhi, insn_mflo, insn_mtc0, insn_mul,  	insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, -	insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw, -	insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, -	insn_xori, +	insn_sd, insn_sll, insn_sllv, insn_slt, insn_sltiu, insn_sltu, insn_sra, +	insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, +	insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, +	insn_xor, insn_xori, insn_yield,  };  struct insn { @@ -137,6 +139,13 @@ Ip_u1u2u3(op)						\  }							\  UASM_EXPORT_SYMBOL(uasm_i##op); +#define I_s3s1s2(op)					\ +Ip_s3s1s2(op)						\ +{							\ +	build_insn(buf, insn##op, b, c, a);		\ +}							\ +UASM_EXPORT_SYMBOL(uasm_i##op); +  #define I_u2u1u3(op)					\  Ip_u2u1u3(op)						\  {							\ @@ -144,6 +153,13 @@ Ip_u2u1u3(op)						\  }							\  UASM_EXPORT_SYMBOL(uasm_i##op); +#define I_u3u2u1(op)					\ +Ip_u3u2u1(op)						\ +{							\ +	build_insn(buf, insn##op, c, b, a);		\ +}							\ +UASM_EXPORT_SYMBOL(uasm_i##op); +  #define I_u3u1u2(op)					\  Ip_u3u1u2(op)						\  {							\ @@ -200,6 +216,13 @@ Ip_u1u2(op)						\  }							\  UASM_EXPORT_SYMBOL(uasm_i##op); +#define I_u2u1(op)					\ +Ip_u1u2(op)						\ +{							\ +	build_insn(buf, insn##op, b, a);		\ +}							\ +UASM_EXPORT_SYMBOL(uasm_i##op); +  #define I_u1s2(op)					\  Ip_u1s2(op)						\  {							\ @@ -237,6 +260,7 @@ I_u1u2u3(_dmfc0)  I_u1u2u3(_dmtc0)  I_u2u1s3(_daddiu)  I_u3u1u2(_daddu) +I_u1u2(_divu)  I_u2u1u3(_dsll)  I_u2u1u3(_dsll32)  I_u2u1u3(_dsra) @@ -250,14 +274,20 @@ I_u2u1msbdu3(_ext)  I_u2u1msbu3(_ins)  I_u1(_j)  I_u1(_jal) +I_u2u1(_jalr)  I_u1(_jr) +I_u2s3u1(_lb)  I_u2s3u1(_ld) +I_u2s3u1(_lh)  I_u2s3u1(_ll)  I_u2s3u1(_lld)  I_u1s2(_lui)  I_u2s3u1(_lw)  I_u1u2u3(_mfc0) +I_u1(_mfhi) +I_u1(_mflo)  I_u1u2u3(_mtc0) +I_u3u1u2(_mul)  I_u2u1u3(_ori)  I_u3u1u2(_or)  I_0(_rfe) @@ -265,17 +295,26 @@ I_u2s3u1(_sc)  I_u2s3u1(_scd)  I_u2s3u1(_sd)  I_u2u1u3(_sll) +I_u3u2u1(_sllv) +I_s3s1s2(_slt) +I_u2u1s3(_sltiu) +I_u3u1u2(_sltu)  I_u2u1u3(_sra)  I_u2u1u3(_srl) +I_u3u2u1(_srlv)  I_u2u1u3(_rotr)  I_u3u1u2(_subu)  I_u2s3u1(_sw) +I_u1(_sync)  I_0(_tlbp)  I_0(_tlbr)  I_0(_tlbwi)  I_0(_tlbwr) +I_u1(_wait); +I_u2u1(_wsbh)  I_u3u1u2(_xor)  I_u2u1u3(_xori) +I_u2u1(_yield)  I_u2u1msbu3(_dins);  I_u2u1msb32u3(_dinsm);  I_u1(_syscall); @@ -469,6 +508,14 @@ void ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)  }  UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b)); +void ISAFUNC(uasm_il_beq)(u32 **p, struct uasm_reloc **r, unsigned int r1, +			  unsigned int r2, int lid) +{ +	uasm_r_mips_pc16(r, *p, lid); +	ISAFUNC(uasm_i_beq)(p, r1, r2, 0); +} +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beq)); +  void ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg,  			   int lid)  {  | 
