diff options
Diffstat (limited to 'arch/mips/mm/tlb-r4k.c')
| -rw-r--r-- | arch/mips/mm/tlb-r4k.c | 177 | 
1 files changed, 105 insertions, 72 deletions
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 00b26a67a06..3914e27456f 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -8,6 +8,7 @@   * Carsten Langgaard, carstenl@mips.com   * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.   */ +#include <linux/cpu_pm.h>  #include <linux/init.h>  #include <linux/sched.h>  #include <linux/smp.h> @@ -16,64 +17,44 @@  #include <linux/module.h>  #include <asm/cpu.h> +#include <asm/cpu-type.h>  #include <asm/bootinfo.h>  #include <asm/mmu_context.h>  #include <asm/pgtable.h> +#include <asm/tlb.h>  #include <asm/tlbmisc.h>  extern void build_tlb_refill_handler(void);  /* - * Make sure all entries differ.  If they're not different - * MIPS32 will take revenge ... + * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb, + * unfortunately, itlb is not totally transparent to software.   */ -#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) - -/* Atomicity and interruptability */ -#ifdef CONFIG_MIPS_MT_SMTC - -#include <asm/smtc.h> -#include <asm/mipsmtregs.h> - -#define ENTER_CRITICAL(flags) \ -	{ \ -	unsigned int mvpflags; \ -	local_irq_save(flags);\ -	mvpflags = dvpe() -#define EXIT_CRITICAL(flags) \ -	evpe(mvpflags); \ -	local_irq_restore(flags); \ +static inline void flush_itlb(void) +{ +	switch (current_cpu_type()) { +	case CPU_LOONGSON2: +	case CPU_LOONGSON3: +		write_c0_diag(4); +		break; +	default: +		break;  	} -#else - -#define ENTER_CRITICAL(flags) local_irq_save(flags) -#define EXIT_CRITICAL(flags) local_irq_restore(flags) - -#endif /* CONFIG_MIPS_MT_SMTC */ - -#if defined(CONFIG_CPU_LOONGSON2) -/* - * LOONGSON2 has a 4 entry itlb which is a subset of dtlb, - * unfortrunately, itlb is not totally transparent to software. - */ -#define FLUSH_ITLB write_c0_diag(4); - -#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC)  write_c0_diag(4); } - -#else - -#define FLUSH_ITLB -#define FLUSH_ITLB_VM(vma) +} -#endif +static inline void flush_itlb_vm(struct vm_area_struct *vma) +{ +	if (vma->vm_flags & VM_EXEC) +		flush_itlb(); +}  void local_flush_tlb_all(void)  {  	unsigned long flags;  	unsigned long old_ctx; -	int entry; +	int entry, ftlbhighset; -	ENTER_CRITICAL(flags); +	local_irq_save(flags);  	/* Save old context and create impossible VPN2 value */  	old_ctx = read_c0_entryhi();  	write_c0_entrylo0(0); @@ -82,18 +63,35 @@ void local_flush_tlb_all(void)  	entry = read_c0_wired();  	/* Blast 'em all away. */ -	while (entry < current_cpu_data.tlbsize) { -		/* Make sure all entries differ. */ -		write_c0_entryhi(UNIQUE_ENTRYHI(entry)); -		write_c0_index(entry); -		mtc0_tlbw_hazard(); -		tlb_write_indexed(); -		entry++; +	if (cpu_has_tlbinv) { +		if (current_cpu_data.tlbsizevtlb) { +			write_c0_index(0); +			mtc0_tlbw_hazard(); +			tlbinvf();  /* invalidate VTLB */ +		} +		ftlbhighset = current_cpu_data.tlbsizevtlb + +			current_cpu_data.tlbsizeftlbsets; +		for (entry = current_cpu_data.tlbsizevtlb; +		     entry < ftlbhighset; +		     entry++) { +			write_c0_index(entry); +			mtc0_tlbw_hazard(); +			tlbinvf();  /* invalidate one FTLB set */ +		} +	} else { +		while (entry < current_cpu_data.tlbsize) { +			/* Make sure all entries differ. */ +			write_c0_entryhi(UNIQUE_ENTRYHI(entry)); +			write_c0_index(entry); +			mtc0_tlbw_hazard(); +			tlb_write_indexed(); +			entry++; +		}  	}  	tlbw_use_hazard();  	write_c0_entryhi(old_ctx); -	FLUSH_ITLB; -	EXIT_CRITICAL(flags); +	flush_itlb(); +	local_irq_restore(flags);  }  EXPORT_SYMBOL(local_flush_tlb_all); @@ -123,11 +121,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,  	if (cpu_context(cpu, mm) != 0) {  		unsigned long size, flags; -		ENTER_CRITICAL(flags); +		local_irq_save(flags);  		start = round_down(start, PAGE_SIZE << 1);  		end = round_up(end, PAGE_SIZE << 1);  		size = (end - start) >> (PAGE_SHIFT + 1); -		if (size <= current_cpu_data.tlbsize/2) { +		if (size <= (current_cpu_data.tlbsizeftlbsets ? +			     current_cpu_data.tlbsize / 8 : +			     current_cpu_data.tlbsize / 2)) {  			int oldpid = read_c0_entryhi();  			int newpid = cpu_asid(cpu, mm); @@ -154,8 +154,8 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,  		} else {  			drop_mmu_context(mm, cpu);  		} -		FLUSH_ITLB; -		EXIT_CRITICAL(flags); +		flush_itlb(); +		local_irq_restore(flags);  	}  } @@ -163,10 +163,12 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)  {  	unsigned long size, flags; -	ENTER_CRITICAL(flags); +	local_irq_save(flags);  	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;  	size = (size + 1) >> 1; -	if (size <= current_cpu_data.tlbsize / 2) { +	if (size <= (current_cpu_data.tlbsizeftlbsets ? +		     current_cpu_data.tlbsize / 8 : +		     current_cpu_data.tlbsize / 2)) {  		int pid = read_c0_entryhi();  		start &= (PAGE_MASK << 1); @@ -196,8 +198,8 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)  	} else {  		local_flush_tlb_all();  	} -	FLUSH_ITLB; -	EXIT_CRITICAL(flags); +	flush_itlb(); +	local_irq_restore(flags);  }  void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) @@ -210,7 +212,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)  		newpid = cpu_asid(cpu, vma->vm_mm);  		page &= (PAGE_MASK << 1); -		ENTER_CRITICAL(flags); +		local_irq_save(flags);  		oldpid = read_c0_entryhi();  		write_c0_entryhi(page | newpid);  		mtc0_tlbw_hazard(); @@ -229,8 +231,8 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)  	finish:  		write_c0_entryhi(oldpid); -		FLUSH_ITLB_VM(vma); -		EXIT_CRITICAL(flags); +		flush_itlb_vm(vma); +		local_irq_restore(flags);  	}  } @@ -243,7 +245,7 @@ void local_flush_tlb_one(unsigned long page)  	unsigned long flags;  	int oldpid, idx; -	ENTER_CRITICAL(flags); +	local_irq_save(flags);  	oldpid = read_c0_entryhi();  	page &= (PAGE_MASK << 1);  	write_c0_entryhi(page); @@ -261,8 +263,8 @@ void local_flush_tlb_one(unsigned long page)  		tlbw_use_hazard();  	}  	write_c0_entryhi(oldpid); -	FLUSH_ITLB; -	EXIT_CRITICAL(flags); +	flush_itlb(); +	local_irq_restore(flags);  }  /* @@ -285,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)  	if (current->active_mm != vma->vm_mm)  		return; -	ENTER_CRITICAL(flags); +	local_irq_save(flags);  	pid = read_c0_entryhi() & ASID_MASK;  	address &= (PAGE_MASK << 1); @@ -334,8 +336,8 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)  			tlb_write_indexed();  	}  	tlbw_use_hazard(); -	FLUSH_ITLB_VM(vma); -	EXIT_CRITICAL(flags); +	flush_itlb_vm(vma); +	local_irq_restore(flags);  }  void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, @@ -346,7 +348,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,  	unsigned long old_pagemask;  	unsigned long old_ctx; -	ENTER_CRITICAL(flags); +	local_irq_save(flags);  	/* Save old context and create impossible VPN2 value */  	old_ctx = read_c0_entryhi();  	old_pagemask = read_c0_pagemask(); @@ -366,7 +368,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,  	tlbw_use_hazard();	/* What is the hazard here? */  	write_c0_pagemask(old_pagemask);  	local_flush_tlb_all(); -	EXIT_CRITICAL(flags); +	local_irq_restore(flags);  }  #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -376,13 +378,13 @@ int __init has_transparent_hugepage(void)  	unsigned int mask;  	unsigned long flags; -	ENTER_CRITICAL(flags); +	local_irq_save(flags);  	write_c0_pagemask(PM_HUGE_MASK);  	back_to_back_c0_hazard();  	mask = read_c0_pagemask();  	write_c0_pagemask(PM_DEFAULT_MASK); -	EXIT_CRITICAL(flags); +	local_irq_restore(flags);  	return mask == PM_HUGE_MASK;  } @@ -398,7 +400,10 @@ static int __init set_ntlb(char *str)  __setup("ntlb=", set_ntlb); -void tlb_init(void) +/* + * Configure TLB (for init or after a CPU has been powered off). + */ +static void r4k_tlb_configure(void)  {  	/*  	 * You should never change this register: @@ -430,6 +435,11 @@ void tlb_init(void)  	local_flush_tlb_all();  	/* Did I tell you that ARC SUCKS?  */ +} + +void tlb_init(void) +{ +	r4k_tlb_configure();  	if (ntlb) {  		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) { @@ -443,3 +453,26 @@ void tlb_init(void)  	build_tlb_refill_handler();  } + +static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd, +			       void *v) +{ +	switch (cmd) { +	case CPU_PM_ENTER_FAILED: +	case CPU_PM_EXIT: +		r4k_tlb_configure(); +		break; +	} + +	return NOTIFY_OK; +} + +static struct notifier_block r4k_tlb_pm_notifier_block = { +	.notifier_call = r4k_tlb_pm_notifier, +}; + +static int __init r4k_tlb_init_pm(void) +{ +	return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block); +} +arch_initcall(r4k_tlb_init_pm);  | 
