diff options
Diffstat (limited to 'arch/xtensa/mm')
| -rw-r--r-- | arch/xtensa/mm/Makefile | 1 | ||||
| -rw-r--r-- | arch/xtensa/mm/cache.c | 39 | ||||
| -rw-r--r-- | arch/xtensa/mm/fault.c | 39 | ||||
| -rw-r--r-- | arch/xtensa/mm/highmem.c | 72 | ||||
| -rw-r--r-- | arch/xtensa/mm/init.c | 362 | ||||
| -rw-r--r-- | arch/xtensa/mm/misc.S | 55 | ||||
| -rw-r--r-- | arch/xtensa/mm/mmu.c | 92 | ||||
| -rw-r--r-- | arch/xtensa/mm/pgtable.c | 72 | ||||
| -rw-r--r-- | arch/xtensa/mm/tlb.c | 176 | 
9 files changed, 660 insertions, 248 deletions
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile index f0b646d2f84..f54f78e24d7 100644 --- a/arch/xtensa/mm/Makefile +++ b/arch/xtensa/mm/Makefile @@ -4,3 +4,4 @@  obj-y			:= init.o cache.o misc.o  obj-$(CONFIG_MMU)	+= fault.o mmu.o tlb.o +obj-$(CONFIG_HIGHMEM)	+= highmem.o diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index 85df4655d32..63cbb867dad 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c @@ -59,6 +59,10 @@   *   */ +#if (DCACHE_WAY_SIZE > PAGE_SIZE) && defined(CONFIG_HIGHMEM) +#error "HIGHMEM is not supported on cores with aliasing cache." +#endif +  #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK  /* @@ -118,7 +122,7 @@ void flush_dcache_page(struct page *page)   * For now, flush the whole cache. FIXME??   */ -void flush_cache_range(struct vm_area_struct* vma,  +void local_flush_cache_range(struct vm_area_struct *vma,  		       unsigned long start, unsigned long end)  {  	__flush_invalidate_dcache_all(); @@ -132,8 +136,8 @@ void flush_cache_range(struct vm_area_struct* vma,   * alias versions of the cache flush functions.   */ -void flush_cache_page(struct vm_area_struct* vma, unsigned long address, -    		      unsigned long pfn) +void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address, +		      unsigned long pfn)  {  	/* Note that we have to use the 'alias' address to avoid multi-hit */ @@ -159,31 +163,31 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)  	/* Invalidate old entry in TLBs */ -	invalidate_itlb_mapping(addr); -	invalidate_dtlb_mapping(addr); +	flush_tlb_page(vma, addr);  #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK  	if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { -		unsigned long vaddr = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);  		unsigned long paddr = (unsigned long) page_address(page);  		unsigned long phys = page_to_phys(page); +		unsigned long tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);  		__flush_invalidate_dcache_page(paddr); -		__flush_invalidate_dcache_page_alias(vaddr, phys); -		__invalidate_icache_page_alias(vaddr, phys); +		__flush_invalidate_dcache_page_alias(tmp, phys); +		__invalidate_icache_page_alias(tmp, phys);  		clear_bit(PG_arch_1, &page->flags);  	}  #else  	if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)  	    && (vma->vm_flags & VM_EXEC) != 0) { -	    	unsigned long paddr = (unsigned long) page_address(page); +		unsigned long paddr = (unsigned long)kmap_atomic(page);  		__flush_dcache_page(paddr);  		__invalidate_icache_page(paddr);  		set_bit(PG_arch_1, &page->flags); +		kunmap_atomic((void *)paddr);  	}  #endif  } @@ -195,7 +199,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)  #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK -void copy_to_user_page(struct vm_area_struct *vma, struct page *page,  +void copy_to_user_page(struct vm_area_struct *vma, struct page *page,  		unsigned long vaddr, void *dst, const void *src,  		unsigned long len)  { @@ -205,8 +209,8 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,  	/* Flush and invalidate user page if aliased. */  	if (alias) { -		unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); -		__flush_invalidate_dcache_page_alias(temp, phys); +		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); +		__flush_invalidate_dcache_page_alias(t, phys);  	}  	/* Copy data */ @@ -219,12 +223,11 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,  	 */  	if (alias) { -		unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); +		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);  		__flush_invalidate_dcache_range((unsigned long) dst, len); -		if ((vma->vm_flags & VM_EXEC) != 0) { -			__invalidate_icache_page_alias(temp, phys); -		} +		if ((vma->vm_flags & VM_EXEC) != 0) +			__invalidate_icache_page_alias(t, phys);  	} else if ((vma->vm_flags & VM_EXEC) != 0) {  		__flush_dcache_range((unsigned long)dst,len); @@ -245,8 +248,8 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,  	 */  	if (alias) { -		unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); -		__flush_invalidate_dcache_page_alias(temp, phys); +		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); +		__flush_invalidate_dcache_page_alias(t, phys);  	}  	memcpy(dst, src, len); diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index e367e302643..b57c4f91f48 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -6,7 +6,7 @@   * License.  See the file "COPYING" in the main directory of this archive   * for more details.   * - * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2001 - 2010 Tensilica Inc.   *   * Chris Zankel <chris@zankel.net>   * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com> @@ -19,10 +19,9 @@  #include <asm/cacheflush.h>  #include <asm/hardirq.h>  #include <asm/uaccess.h> -#include <asm/system.h>  #include <asm/pgalloc.h> -unsigned long asid_cache = ASID_USER_FIRST; +DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;  void bad_page_fault(struct pt_regs*, unsigned long, int);  #undef DEBUG_PAGE_FAULT @@ -45,6 +44,7 @@ void do_page_fault(struct pt_regs *regs)  	int is_write, is_exec;  	int fault; +	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;  	info.si_code = SEGV_MAPERR; @@ -72,6 +72,9 @@ void do_page_fault(struct pt_regs *regs)  	       address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");  #endif +	if (user_mode(regs)) +		flags |= FAULT_FLAG_USER; +retry:  	down_read(&mm->mmap_sem);  	vma = find_vma(mm, address); @@ -94,6 +97,7 @@ good_area:  	if (is_write) {  		if (!(vma->vm_flags & VM_WRITE))  			goto bad_area; +		flags |= FAULT_FLAG_WRITE;  	} else if (is_exec) {  		if (!(vma->vm_flags & VM_EXEC))  			goto bad_area; @@ -105,7 +109,11 @@ good_area:  	 * make sure we exit gracefully rather than endlessly redo  	 * the fault.  	 */ -	fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); +	fault = handle_mm_fault(mm, vma, address, flags); + +	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) +		return; +  	if (unlikely(fault & VM_FAULT_ERROR)) {  		if (fault & VM_FAULT_OOM)  			goto out_of_memory; @@ -113,10 +121,23 @@ good_area:  			goto do_sigbus;  		BUG();  	} -	if (fault & VM_FAULT_MAJOR) -		current->maj_flt++; -	else -		current->min_flt++; +	if (flags & FAULT_FLAG_ALLOW_RETRY) { +		if (fault & VM_FAULT_MAJOR) +			current->maj_flt++; +		else +			current->min_flt++; +		if (fault & VM_FAULT_RETRY) { +			flags &= ~FAULT_FLAG_ALLOW_RETRY; +			flags |= FAULT_FLAG_TRIED; + +			 /* No need to up_read(&mm->mmap_sem) as we would +			 * have already released it in __lock_page_or_retry +			 * in mm/filemap.c. +			 */ + +			goto retry; +		} +	}  	up_read(&mm->mmap_sem);  	return; @@ -167,6 +188,7 @@ do_sigbus:  	/* Kernel mode? Handle exceptions or die */  	if (!user_mode(regs))  		bad_page_fault(regs, address, SIGBUS); +	return;  vmalloc_fault:  	{ @@ -234,4 +256,3 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)  	die("Oops", regs, sig);  	do_exit(sig);  } - diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c new file mode 100644 index 00000000000..17a8c0d6fd1 --- /dev/null +++ b/arch/xtensa/mm/highmem.c @@ -0,0 +1,72 @@ +/* + * High memory support for Xtensa architecture + * + * This file is subject to the terms and conditions of the GNU General + * Public License.  See the file "COPYING" in the main directory of + * this archive for more details. + * + * Copyright (C) 2014 Cadence Design Systems Inc. + */ + +#include <linux/export.h> +#include <linux/highmem.h> +#include <asm/tlbflush.h> + +static pte_t *kmap_pte; + +void *kmap_atomic(struct page *page) +{ +	enum fixed_addresses idx; +	unsigned long vaddr; +	int type; + +	pagefault_disable(); +	if (!PageHighMem(page)) +		return page_address(page); + +	type = kmap_atomic_idx_push(); +	idx = type + KM_TYPE_NR * smp_processor_id(); +	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +#ifdef CONFIG_DEBUG_HIGHMEM +	BUG_ON(!pte_none(*(kmap_pte - idx))); +#endif +	set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL_EXEC)); + +	return (void *)vaddr; +} +EXPORT_SYMBOL(kmap_atomic); + +void __kunmap_atomic(void *kvaddr) +{ +	int idx, type; + +	if (kvaddr >= (void *)FIXADDR_START && +	    kvaddr < (void *)FIXADDR_TOP) { +		type = kmap_atomic_idx(); +		idx = type + KM_TYPE_NR * smp_processor_id(); + +		/* +		 * Force other mappings to Oops if they'll try to access this +		 * pte without first remap it.  Keeping stale mappings around +		 * is a bad idea also, in case the page changes cacheability +		 * attributes or becomes a protected page in a hypervisor. +		 */ +		pte_clear(&init_mm, kvaddr, kmap_pte - idx); +		local_flush_tlb_kernel_range((unsigned long)kvaddr, +					     (unsigned long)kvaddr + PAGE_SIZE); + +		kmap_atomic_idx_pop(); +	} + +	pagefault_enable(); +} +EXPORT_SYMBOL(__kunmap_atomic); + +void __init kmap_init(void) +{ +	unsigned long kmap_vstart; + +	/* cache the first kmap pte */ +	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); +	kmap_pte = kmap_get_fixmap_pte(kmap_vstart); +} diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index ba150e5de2e..77ed20209ca 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -8,6 +8,7 @@   * for more details.   *   * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2014 Cadence Design Systems Inc.   *   * Chris Zankel	<chris@zankel.net>   * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com> @@ -19,6 +20,7 @@  #include <linux/errno.h>  #include <linux/bootmem.h>  #include <linux/gfp.h> +#include <linux/highmem.h>  #include <linux/swap.h>  #include <linux/mman.h>  #include <linux/nodemask.h> @@ -26,16 +28,134 @@  #include <asm/bootparam.h>  #include <asm/page.h> +#include <asm/sections.h> +#include <asm/sysmem.h> -/* References to section boundaries */ +struct sysmem_info sysmem __initdata; -extern char _ftext, _etext, _fdata, _edata, _rodata_end; -extern char __init_begin, __init_end; +static void __init sysmem_dump(void) +{ +	unsigned i; + +	pr_debug("Sysmem:\n"); +	for (i = 0; i < sysmem.nr_banks; ++i) +		pr_debug("  0x%08lx - 0x%08lx (%ldK)\n", +			 sysmem.bank[i].start, sysmem.bank[i].end, +			 (sysmem.bank[i].end - sysmem.bank[i].start) >> 10); +} + +/* + * Find bank with maximal .start such that bank.start <= start + */ +static inline struct meminfo * __init find_bank(unsigned long start) +{ +	unsigned i; +	struct meminfo *it = NULL; + +	for (i = 0; i < sysmem.nr_banks; ++i) +		if (sysmem.bank[i].start <= start) +			it = sysmem.bank + i; +		else +			break; +	return it; +} + +/* + * Move all memory banks starting at 'from' to a new place at 'to', + * adjust nr_banks accordingly. + * Both 'from' and 'to' must be inside the sysmem.bank. + * + * Returns: 0 (success), -ENOMEM (not enough space in the sysmem.bank). + */ +static int __init move_banks(struct meminfo *to, struct meminfo *from) +{ +	unsigned n = sysmem.nr_banks - (from - sysmem.bank); + +	if (to > from && to - from + sysmem.nr_banks > SYSMEM_BANKS_MAX) +		return -ENOMEM; +	if (to != from) +		memmove(to, from, n * sizeof(struct meminfo)); +	sysmem.nr_banks += to - from; +	return 0; +} + +/* + * Add new bank to sysmem. Resulting sysmem is the union of bytes of the + * original sysmem and the new bank. + * + * Returns: 0 (success), < 0 (error) + */ +int __init add_sysmem_bank(unsigned long start, unsigned long end) +{ +	unsigned i; +	struct meminfo *it = NULL; +	unsigned long sz; +	unsigned long bank_sz = 0; + +	if (start == end || +	    (start < end) != (PAGE_ALIGN(start) < (end & PAGE_MASK))) { +		pr_warn("Ignoring small memory bank 0x%08lx size: %ld bytes\n", +			start, end - start); +		return -EINVAL; +	} + +	start = PAGE_ALIGN(start); +	end &= PAGE_MASK; +	sz = end - start; + +	it = find_bank(start); + +	if (it) +		bank_sz = it->end - it->start; + +	if (it && bank_sz >= start - it->start) { +		if (end - it->start > bank_sz) +			it->end = end; +		else +			return 0; +	} else { +		if (!it) +			it = sysmem.bank; +		else +			++it; + +		if (it - sysmem.bank < sysmem.nr_banks && +		    it->start - start <= sz) { +			it->start = start; +			if (it->end - it->start < sz) +				it->end = end; +			else +				return 0; +		} else { +			if (move_banks(it + 1, it) < 0) { +				pr_warn("Ignoring memory bank 0x%08lx size %ld bytes\n", +					start, end - start); +				return -EINVAL; +			} +			it->start = start; +			it->end = end; +			return 0; +		} +	} +	sz = it->end - it->start; +	for (i = it + 1 - sysmem.bank; i < sysmem.nr_banks; ++i) +		if (sysmem.bank[i].start - it->start <= sz) { +			if (sz < sysmem.bank[i].end - it->start) +				it->end = sysmem.bank[i].end; +		} else { +			break; +		} + +	move_banks(it + 1, sysmem.bank + i); +	return 0; +}  /*   * mem_reserve(start, end, must_exist)   *   * Reserve some memory from the memory pool. + * If must_exist is set and a part of the region being reserved does not exist + * memory map is not altered.   *   * Parameters:   *  start	Start of region, @@ -43,58 +163,74 @@ extern char __init_begin, __init_end;   *  must_exist	Must exist in memory pool.   *   * Returns: - *  0 (memory area couldn't be mapped) - * -1 (success) + *  0 (success) + *  < 0 (error)   */  int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)  { -	int i; - -	if (start == end) -		return 0; +	struct meminfo *it; +	struct meminfo *rm = NULL; +	unsigned long sz; +	unsigned long bank_sz = 0;  	start = start & PAGE_MASK;  	end = PAGE_ALIGN(end); +	sz = end - start; +	if (!sz) +		return -EINVAL; -	for (i = 0; i < sysmem.nr_banks; i++) -		if (start < sysmem.bank[i].end -		    && end >= sysmem.bank[i].start) -			break; +	it = find_bank(start); -	if (i == sysmem.nr_banks) { -		if (must_exist) -			printk (KERN_WARNING "mem_reserve: [0x%0lx, 0x%0lx) " -				"not in any region!\n", start, end); -		return 0; +	if (it) +		bank_sz = it->end - it->start; + +	if ((!it || end - it->start > bank_sz) && must_exist) { +		pr_warn("mem_reserve: [0x%0lx, 0x%0lx) not in any region!\n", +			start, end); +		return -EINVAL;  	} -	if (start > sysmem.bank[i].start) { -		if (end < sysmem.bank[i].end) { -			/* split entry */ -			if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) -				panic("meminfo overflow\n"); -			sysmem.bank[sysmem.nr_banks].start = end; -			sysmem.bank[sysmem.nr_banks].end = sysmem.bank[i].end; -			sysmem.nr_banks++; +	if (it && start - it->start <= bank_sz) { +		if (start == it->start) { +			if (end - it->start < bank_sz) { +				it->start = end; +				return 0; +			} else { +				rm = it; +			} +		} else { +			it->end = start; +			if (end - it->start < bank_sz) +				return add_sysmem_bank(end, +						       it->start + bank_sz); +			++it;  		} -		sysmem.bank[i].end = start; -	} else { -		if (end < sysmem.bank[i].end) -			sysmem.bank[i].start = end; -		else { -			/* remove entry */ -			sysmem.nr_banks--; -			sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start; -			sysmem.bank[i].end   = sysmem.bank[sysmem.nr_banks].end; +	} + +	if (!it) +		it = sysmem.bank; + +	for (; it < sysmem.bank + sysmem.nr_banks; ++it) { +		if (it->end - start <= sz) { +			if (!rm) +				rm = it; +		} else { +			if (it->start - start < sz) +				it->start = end; +			break;  		}  	} -	return -1; + +	if (rm) +		move_banks(rm, it); + +	return 0;  }  /* - * Initialize the bootmem system and give it all the memory we have available. + * Initialize the bootmem system and give it all low memory we have available.   */  void __init bootmem_init(void) @@ -103,6 +239,7 @@ void __init bootmem_init(void)  	unsigned long bootmap_start, bootmap_size;  	int i; +	sysmem_dump();  	max_low_pfn = max_pfn = 0;  	min_low_pfn = ~0; @@ -146,28 +283,27 @@ void __init bootmem_init(void)  	/* Add all remaining memory pieces into the bootmem map */ -	for (i=0; i<sysmem.nr_banks; i++) -		free_bootmem(sysmem.bank[i].start, -			     sysmem.bank[i].end - sysmem.bank[i].start); +	for (i = 0; i < sysmem.nr_banks; i++) { +		if (sysmem.bank[i].start >> PAGE_SHIFT < max_low_pfn) { +			unsigned long end = min(max_low_pfn << PAGE_SHIFT, +						sysmem.bank[i].end); +			free_bootmem(sysmem.bank[i].start, +				     end - sysmem.bank[i].start); +		} +	}  }  void __init zones_init(void)  { -	unsigned long zones_size[MAX_NR_ZONES]; -	int i; -  	/* All pages are DMA-able, so we put them all in the DMA zone. */ - -	zones_size[ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET; -	for (i = 1; i < MAX_NR_ZONES; i++) -		zones_size[i] = 0; - +	unsigned long zones_size[MAX_NR_ZONES] = { +		[ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET,  #ifdef CONFIG_HIGHMEM -	zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn; +		[ZONE_HIGHMEM] = max_pfn - max_low_pfn,  #endif - +	};  	free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);  } @@ -177,50 +313,38 @@ void __init zones_init(void)  void __init mem_init(void)  { -	unsigned long codesize, reservedpages, datasize, initsize; -	unsigned long highmemsize, tmp, ram; - -	max_mapnr = num_physpages = max_low_pfn - ARCH_PFN_OFFSET; -	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); -	highmemsize = 0; -  #ifdef CONFIG_HIGHMEM -#error HIGHGMEM not implemented in init.c -#endif +	unsigned long tmp; -	totalram_pages += free_all_bootmem(); +	reset_all_zones_managed_pages(); +	for (tmp = max_low_pfn; tmp < max_pfn; tmp++) +		free_highmem_page(pfn_to_page(tmp)); +#endif -	reservedpages = ram = 0; -	for (tmp = 0; tmp < max_mapnr; tmp++) { -		ram++; -		if (PageReserved(mem_map+tmp)) -			reservedpages++; -	} +	max_mapnr = max_pfn - ARCH_PFN_OFFSET; +	high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); -	codesize =  (unsigned long) &_etext - (unsigned long) &_ftext; -	datasize =  (unsigned long) &_edata - (unsigned long) &_fdata; -	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin; - -	printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, " -	       "%ldk data, %ldk init %ldk highmem)\n", -	       nr_free_pages() << (PAGE_SHIFT-10), -	       ram << (PAGE_SHIFT-10), -	       codesize >> 10, -	       reservedpages << (PAGE_SHIFT-10), -	       datasize >> 10, -	       initsize >> 10, -	       highmemsize >> 10); -} +	free_all_bootmem(); -void -free_reserved_mem(void *start, void *end) -{ -	for (; start < end; start += PAGE_SIZE) { -		ClearPageReserved(virt_to_page(start)); -		init_page_count(virt_to_page(start)); -		free_page((unsigned long)start); -		totalram_pages++; -	} +	mem_init_print_info(NULL); +	pr_info("virtual kernel memory layout:\n" +#ifdef CONFIG_HIGHMEM +		"    pkmap   : 0x%08lx - 0x%08lx  (%5lu kB)\n" +		"    fixmap  : 0x%08lx - 0x%08lx  (%5lu kB)\n" +#endif +		"    vmalloc : 0x%08x - 0x%08x  (%5u MB)\n" +		"    lowmem  : 0x%08x - 0x%08lx  (%5lu MB)\n", +#ifdef CONFIG_HIGHMEM +		PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE, +		(LAST_PKMAP*PAGE_SIZE) >> 10, +		FIXADDR_START, FIXADDR_TOP, +		(FIXADDR_TOP - FIXADDR_START) >> 10, +#endif +		VMALLOC_START, VMALLOC_END, +		(VMALLOC_END - VMALLOC_START) >> 20, +		PAGE_OFFSET, PAGE_OFFSET + +		(max_low_pfn - min_low_pfn) * PAGE_SIZE, +		((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20);  }  #ifdef CONFIG_BLK_DEV_INITRD @@ -228,16 +352,62 @@ extern int initrd_is_mapped;  void free_initrd_mem(unsigned long start, unsigned long end)  { -	if (initrd_is_mapped) { -		free_reserved_mem((void*)start, (void*)end); -		printk ("Freeing initrd memory: %ldk freed\n",(end-start)>>10); -	} +	if (initrd_is_mapped) +		free_reserved_area((void *)start, (void *)end, -1, "initrd");  }  #endif  void free_initmem(void)  { -	free_reserved_mem(&__init_begin, &__init_end); -	printk("Freeing unused kernel memory: %dk freed\n", -	       (&__init_end - &__init_begin) >> 10); +	free_initmem_default(-1); +} + +static void __init parse_memmap_one(char *p) +{ +	char *oldp; +	unsigned long start_at, mem_size; + +	if (!p) +		return; + +	oldp = p; +	mem_size = memparse(p, &p); +	if (p == oldp) +		return; + +	switch (*p) { +	case '@': +		start_at = memparse(p + 1, &p); +		add_sysmem_bank(start_at, start_at + mem_size); +		break; + +	case '$': +		start_at = memparse(p + 1, &p); +		mem_reserve(start_at, start_at + mem_size, 0); +		break; + +	case 0: +		mem_reserve(mem_size, 0, 0); +		break; + +	default: +		pr_warn("Unrecognized memmap syntax: %s\n", p); +		break; +	} +} + +static int __init parse_memmap_opt(char *str) +{ +	while (str) { +		char *k = strchr(str, ','); + +		if (k) +			*k++ = 0; + +		parse_memmap_one(str); +		str = k; +	} + +	return 0;  } +early_param("memmap", parse_memmap_opt); diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S index b048406d875..1f68558dbcc 100644 --- a/arch/xtensa/mm/misc.S +++ b/arch/xtensa/mm/misc.S @@ -29,6 +29,7 @@   */  ENTRY(clear_page) +  	entry	a1, 16  	movi	a3, 0 @@ -45,6 +46,8 @@ ENTRY(clear_page)  	retw +ENDPROC(clear_page) +  /*   * copy_page and copy_user_page are the same for non-cache-aliased configs.   * @@ -53,6 +56,7 @@ ENTRY(clear_page)   */  ENTRY(copy_page) +  	entry	a1, 16  	__loopi a2, a4, PAGE_SIZE, 32 @@ -84,6 +88,8 @@ ENTRY(copy_page)  	retw +ENDPROC(copy_page) +  #ifdef CONFIG_MMU  /*   * If we have to deal with cache aliasing, we use temporary memory mappings @@ -109,6 +115,7 @@ ENTRY(__tlbtemp_mapping_start)   */  ENTRY(clear_user_page) +  	entry	a1, 32  	/* Mark page dirty and determine alias. */ @@ -133,7 +140,7 @@ ENTRY(clear_user_page)  	/* Setup a temporary DTLB with the color of the VPN */ -	movi	a4, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE) +	movi	a4, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff  	movi	a5, TLBTEMP_BASE_1			# virt  	add	a6, a2, a4				# ppn  	add	a2, a5, a3				# add 'color' @@ -164,6 +171,8 @@ ENTRY(clear_user_page)  	retw +ENDPROC(clear_user_page) +  /*   * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page)   *                    a2          a3	        a4		    a5 @@ -171,7 +180,7 @@ ENTRY(clear_user_page)  ENTRY(copy_user_page) -	entry	a1, 32  +	entry	a1, 32  	/* Mark page dirty and determine alias for destination. */ @@ -185,7 +194,7 @@ ENTRY(copy_user_page)  	or	a9, a9, a8  	slli	a4, a4, PAGE_SHIFT  	s32i	a9, a5, PAGE_FLAGS -	movi	a5, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE) +	movi	a5, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff  	beqz	a6, 1f @@ -262,6 +271,8 @@ ENTRY(copy_user_page)  	retw +ENDPROC(copy_user_page) +  #endif  #if (DCACHE_WAY_SIZE > PAGE_SIZE) @@ -272,6 +283,7 @@ ENTRY(copy_user_page)   */  ENTRY(__flush_invalidate_dcache_page_alias) +  	entry	sp, 16  	movi	a7, 0			# required for exception handler @@ -287,6 +299,7 @@ ENTRY(__flush_invalidate_dcache_page_alias)  	retw +ENDPROC(__flush_invalidate_dcache_page_alias)  #endif  ENTRY(__tlbtemp_mapping_itlb) @@ -294,6 +307,7 @@ ENTRY(__tlbtemp_mapping_itlb)  #if (ICACHE_WAY_SIZE > PAGE_SIZE)  ENTRY(__invalidate_icache_page_alias) +  	entry	sp, 16  	addi	a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE) @@ -307,11 +321,14 @@ ENTRY(__invalidate_icache_page_alias)  	isync  	retw +ENDPROC(__invalidate_icache_page_alias) +  #endif  /* End of special treatment in tlb miss exception */  ENTRY(__tlbtemp_mapping_end) +  #endif /* CONFIG_MMU  /* @@ -319,6 +336,7 @@ ENTRY(__tlbtemp_mapping_end)   */  ENTRY(__invalidate_icache_page) +  	entry	sp, 16  	___invalidate_icache_page a2 a3 @@ -326,11 +344,14 @@ ENTRY(__invalidate_icache_page)  	retw +ENDPROC(__invalidate_icache_page) +  /*   * void __invalidate_dcache_page(ulong start)   */  ENTRY(__invalidate_dcache_page) +  	entry	sp, 16  	___invalidate_dcache_page a2 a3 @@ -338,11 +359,14 @@ ENTRY(__invalidate_dcache_page)  	retw +ENDPROC(__invalidate_dcache_page) +  /*   * void __flush_invalidate_dcache_page(ulong start)   */  ENTRY(__flush_invalidate_dcache_page) +  	entry	sp, 16  	___flush_invalidate_dcache_page a2 a3 @@ -350,11 +374,14 @@ ENTRY(__flush_invalidate_dcache_page)  	dsync  	retw +ENDPROC(__flush_invalidate_dcache_page) +  /*   * void __flush_dcache_page(ulong start)   */  ENTRY(__flush_dcache_page) +  	entry	sp, 16  	___flush_dcache_page a2 a3 @@ -362,11 +389,14 @@ ENTRY(__flush_dcache_page)  	dsync  	retw +ENDPROC(__flush_dcache_page) +  /*   * void __invalidate_icache_range(ulong start, ulong size)   */  ENTRY(__invalidate_icache_range) +  	entry	sp, 16  	___invalidate_icache_range a2 a3 a4 @@ -374,11 +404,14 @@ ENTRY(__invalidate_icache_range)  	retw +ENDPROC(__invalidate_icache_range) +  /*   * void __flush_invalidate_dcache_range(ulong start, ulong size)   */  ENTRY(__flush_invalidate_dcache_range) +  	entry	sp, 16  	___flush_invalidate_dcache_range a2 a3 a4 @@ -386,11 +419,14 @@ ENTRY(__flush_invalidate_dcache_range)  	retw +ENDPROC(__flush_invalidate_dcache_range) +  /*   * void _flush_dcache_range(ulong start, ulong size)   */  ENTRY(__flush_dcache_range) +  	entry	sp, 16  	___flush_dcache_range a2 a3 a4 @@ -398,22 +434,28 @@ ENTRY(__flush_dcache_range)  	retw +ENDPROC(__flush_dcache_range) +  /*   * void _invalidate_dcache_range(ulong start, ulong size)   */  ENTRY(__invalidate_dcache_range) +  	entry	sp, 16  	___invalidate_dcache_range a2 a3 a4  	retw +ENDPROC(__invalidate_dcache_range) +  /*   * void _invalidate_icache_all(void)   */  ENTRY(__invalidate_icache_all) +  	entry	sp, 16  	___invalidate_icache_all a2 a3 @@ -421,11 +463,14 @@ ENTRY(__invalidate_icache_all)  	retw +ENDPROC(__invalidate_icache_all) +  /*   * void _flush_invalidate_dcache_all(void)   */  ENTRY(__flush_invalidate_dcache_all) +  	entry	sp, 16  	___flush_invalidate_dcache_all a2 a3 @@ -433,11 +478,14 @@ ENTRY(__flush_invalidate_dcache_all)  	retw +ENDPROC(__flush_invalidate_dcache_all) +  /*   * void _invalidate_dcache_all(void)   */  ENTRY(__invalidate_dcache_all) +  	entry	sp, 16  	___invalidate_dcache_all a2 a3 @@ -445,3 +493,4 @@ ENTRY(__invalidate_dcache_all)  	retw +ENDPROC(__invalidate_dcache_all) diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index 4bb91a970f1..3429b483d9f 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c @@ -3,6 +3,7 @@   *   * Extracted from init.c   */ +#include <linux/bootmem.h>  #include <linux/percpu.h>  #include <linux/init.h>  #include <linux/string.h> @@ -13,33 +14,86 @@  #include <asm/tlbflush.h>  #include <asm/mmu_context.h>  #include <asm/page.h> +#include <asm/initialize_mmu.h> +#include <asm/io.h> -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); +#if defined(CONFIG_HIGHMEM) +static void * __init init_pmd(unsigned long vaddr) +{ +	pgd_t *pgd = pgd_offset_k(vaddr); +	pmd_t *pmd = pmd_offset(pgd, vaddr); + +	if (pmd_none(*pmd)) { +		unsigned i; +		pte_t *pte = alloc_bootmem_low_pages(PAGE_SIZE); + +		for (i = 0; i < 1024; i++) +			pte_clear(NULL, 0, pte + i); + +		set_pmd(pmd, __pmd(((unsigned long)pte) & PAGE_MASK)); +		BUG_ON(pte != pte_offset_kernel(pmd, 0)); +		pr_debug("%s: vaddr: 0x%08lx, pmd: 0x%p, pte: 0x%p\n", +			 __func__, vaddr, pmd, pte); +		return pte; +	} else { +		return pte_offset_kernel(pmd, 0); +	} +} + +static void __init fixedrange_init(void) +{ +	BUILD_BUG_ON(FIXADDR_SIZE > PMD_SIZE); +	init_pmd(__fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK); +} +#endif  void __init paging_init(void)  {  	memset(swapper_pg_dir, 0, PAGE_SIZE); +#ifdef CONFIG_HIGHMEM +	fixedrange_init(); +	pkmap_page_table = init_pmd(PKMAP_BASE); +	kmap_init(); +#endif  }  /*   * Flush the mmu and reset associated register to default values.   */ -void __init init_mmu(void) +void init_mmu(void)  { -	/* Writing zeros to the <t>TLBCFG special registers ensure -	 * that valid values exist in the register.  For existing -	 * PGSZID<w> fields, zero selects the first element of the -	 * page-size array.  For nonexistent PGSZID<w> fields, zero is -	 * the best value to write.  Also, when changing PGSZID<w> +#if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) +	/* +	 * Writing zeros to the instruction and data TLBCFG special +	 * registers ensure that valid values exist in the register. +	 * +	 * For existing PGSZID<w> fields, zero selects the first element +	 * of the page-size array.  For nonexistent PGSZID<w> fields, +	 * zero is the best value to write.  Also, when changing PGSZID<w>  	 * fields, the corresponding TLB must be flushed.  	 */  	set_itlbcfg_register(0);  	set_dtlbcfg_register(0); -	flush_tlb_all(); +#endif +#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF) +	/* +	 * Update the IO area mapping in case xtensa_kio_paddr has changed +	 */ +	write_dtlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK), +			XCHAL_KIO_CACHED_VADDR + 6); +	write_itlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK), +			XCHAL_KIO_CACHED_VADDR + 6); +	write_dtlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS), +			XCHAL_KIO_BYPASS_VADDR + 6); +	write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS), +			XCHAL_KIO_BYPASS_VADDR + 6); +#endif + +	local_flush_tlb_all();  	/* Set rasid register to a known value. */ -	set_rasid_register(ASID_USER_FIRST); +	set_rasid_register(ASID_INSERT(ASID_USER_FIRST));  	/* Set PTEVADDR special register to the start of the page  	 * table, which is in kernel mappable space (ie. not @@ -48,23 +102,3 @@ void __init init_mmu(void)  	 */  	set_ptevaddr_register(PGTABLE_START);  } - -struct kmem_cache *pgtable_cache __read_mostly; - -static void pgd_ctor(void *addr) -{ -	pte_t *ptep = (pte_t *)addr; -	int i; - -	for (i = 0; i < 1024; i++, ptep++) -		pte_clear(NULL, 0, ptep); - -} - -void __init pgtable_cache_init(void) -{ -	pgtable_cache = kmem_cache_create("pgd", -			PAGE_SIZE, PAGE_SIZE, -			SLAB_HWCACHE_ALIGN, -			pgd_ctor); -} diff --git a/arch/xtensa/mm/pgtable.c b/arch/xtensa/mm/pgtable.c deleted file mode 100644 index 69799273820..00000000000 --- a/arch/xtensa/mm/pgtable.c +++ /dev/null @@ -1,72 +0,0 @@ -/* - * arch/xtensa/mm/pgtable.c - * - * This file is subject to the terms and conditions of the GNU General Public - * License.  See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2001 - 2005 Tensilica Inc. - * - * Chris Zankel <chris@zankel.net> - */ - -#if (DCACHE_SIZE > PAGE_SIZE) - -pte_t* pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) -{ -	pte_t *pte = NULL, *p; -	int color = ADDR_COLOR(address); -	int i; - -	p = (pte_t*) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, COLOR_ORDER); - -	if (likely(p)) { -		split_page(virt_to_page(p), COLOR_ORDER); - -		for (i = 0; i < COLOR_SIZE; i++) { -			if (ADDR_COLOR(p) == color) -				pte = p; -			else -				free_page(p); -			p += PTRS_PER_PTE; -		} -		clear_page(pte); -	} -	return pte; -} - -#ifdef PROFILING - -int mask; -int hit; -int flush; - -#endif - -struct page* pte_alloc_one(struct mm_struct *mm, unsigned long address) -{ -	struct page *page = NULL, *p; -	int color = ADDR_COLOR(address); - -	p = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); - -	if (likely(p)) { -		split_page(p, COLOR_ORDER); - -		for (i = 0; i < PAGE_ORDER; i++) { -			if (PADDR_COLOR(page_address(p)) == color) -				page = p; -			else -				__free_page(p); -			p++; -		} -		clear_highpage(page); -	} - -	return page; -} - -#endif - - - diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index 239461d8ea8..5ece856c572 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -18,7 +18,6 @@  #include <asm/processor.h>  #include <asm/mmu_context.h>  #include <asm/tlbflush.h> -#include <asm/system.h>  #include <asm/cacheflush.h> @@ -49,7 +48,7 @@ static inline void __flush_dtlb_all (void)  } -void flush_tlb_all (void) +void local_flush_tlb_all(void)  {  	__flush_itlb_all();  	__flush_dtlb_all(); @@ -61,19 +60,23 @@ void flush_tlb_all (void)   * a new context will be assigned to it.   */ -void flush_tlb_mm(struct mm_struct *mm) +void local_flush_tlb_mm(struct mm_struct *mm)  { +	int cpu = smp_processor_id(); +  	if (mm == current->active_mm) { -		int flags; -		local_save_flags(flags); -		__get_new_mmu_context(mm); -		__load_mmu_context(mm); +		unsigned long flags; +		local_irq_save(flags); +		mm->context.asid[cpu] = NO_CONTEXT; +		activate_context(mm, cpu);  		local_irq_restore(flags); +	} else { +		mm->context.asid[cpu] = NO_CONTEXT; +		mm->context.cpu = -1;  	} -	else -		mm->context = 0;  } +  #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)  #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)  #if _ITLB_ENTRIES > _DTLB_ENTRIES @@ -82,26 +85,28 @@ void flush_tlb_mm(struct mm_struct *mm)  # define _TLB_ENTRIES _DTLB_ENTRIES  #endif -void flush_tlb_range (struct vm_area_struct *vma, -    		      unsigned long start, unsigned long end) +void local_flush_tlb_range(struct vm_area_struct *vma, +		unsigned long start, unsigned long end)  { +	int cpu = smp_processor_id();  	struct mm_struct *mm = vma->vm_mm;  	unsigned long flags; -	if (mm->context == NO_CONTEXT) +	if (mm->context.asid[cpu] == NO_CONTEXT)  		return;  #if 0  	printk("[tlbrange<%02lx,%08lx,%08lx>]\n", -			(unsigned long)mm->context, start, end); +			(unsigned long)mm->context.asid[cpu], start, end);  #endif -	local_save_flags(flags); +	local_irq_save(flags);  	if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {  		int oldpid = get_rasid_register(); -		set_rasid_register (ASID_INSERT(mm->context)); + +		set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));  		start &= PAGE_MASK; - 		if (vma->vm_flags & VM_EXEC) +		if (vma->vm_flags & VM_EXEC)  			while(start < end) {  				invalidate_itlb_mapping(start);  				invalidate_dtlb_mapping(start); @@ -115,23 +120,25 @@ void flush_tlb_range (struct vm_area_struct *vma,  		set_rasid_register(oldpid);  	} else { -		flush_tlb_mm(mm); +		local_flush_tlb_mm(mm);  	}  	local_irq_restore(flags);  } -void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) +void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)  { +	int cpu = smp_processor_id();  	struct mm_struct* mm = vma->vm_mm;  	unsigned long flags;  	int oldpid; -	if(mm->context == NO_CONTEXT) +	if (mm->context.asid[cpu] == NO_CONTEXT)  		return; -	local_save_flags(flags); +	local_irq_save(flags); -       	oldpid = get_rasid_register(); +	oldpid = get_rasid_register(); +	set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));  	if (vma->vm_flags & VM_EXEC)  		invalidate_itlb_mapping(page); @@ -142,3 +149,130 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)  	local_irq_restore(flags);  } +void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ +	if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET && +	    end - start < _TLB_ENTRIES << PAGE_SHIFT) { +		start &= PAGE_MASK; +		while (start < end) { +			invalidate_itlb_mapping(start); +			invalidate_dtlb_mapping(start); +			start += PAGE_SIZE; +		} +	} else { +		local_flush_tlb_all(); +	} +} + +#ifdef CONFIG_DEBUG_TLB_SANITY + +static unsigned get_pte_for_vaddr(unsigned vaddr) +{ +	struct task_struct *task = get_current(); +	struct mm_struct *mm = task->mm; +	pgd_t *pgd; +	pmd_t *pmd; +	pte_t *pte; + +	if (!mm) +		mm = task->active_mm; +	pgd = pgd_offset(mm, vaddr); +	if (pgd_none_or_clear_bad(pgd)) +		return 0; +	pmd = pmd_offset(pgd, vaddr); +	if (pmd_none_or_clear_bad(pmd)) +		return 0; +	pte = pte_offset_map(pmd, vaddr); +	if (!pte) +		return 0; +	return pte_val(*pte); +} + +enum { +	TLB_SUSPICIOUS	= 1, +	TLB_INSANE	= 2, +}; + +static void tlb_insane(void) +{ +	BUG_ON(1); +} + +static void tlb_suspicious(void) +{ +	WARN_ON(1); +} + +/* + * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE), + * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE. + * + * Check that valid TLB entries either have the same PA as the PTE, or PTE is + * marked as non-present. Non-present PTE and the page with non-zero refcount + * and zero mapcount is normal for batched TLB flush operation. Zero refcount + * means that the page was freed prematurely. Non-zero mapcount is unusual, + * but does not necessary means an error, thus marked as suspicious. + */ +static int check_tlb_entry(unsigned w, unsigned e, bool dtlb) +{ +	unsigned tlbidx = w | (e << PAGE_SHIFT); +	unsigned r0 = dtlb ? +		read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx); +	unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT); +	unsigned pte = get_pte_for_vaddr(vpn); +	unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK; +	unsigned tlb_asid = r0 & ASID_MASK; +	bool kernel = tlb_asid == 1; +	int rc = 0; + +	if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) { +		pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n", +				dtlb ? 'D' : 'I', w, e, vpn, +				kernel ? "kernel" : "user"); +		rc |= TLB_INSANE; +	} + +	if (tlb_asid == mm_asid) { +		unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) : +			read_itlb_translation(tlbidx); +		if ((pte ^ r1) & PAGE_MASK) { +			pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n", +					dtlb ? 'D' : 'I', w, e, r0, r1, pte); +			if (pte == 0 || !pte_present(__pte(pte))) { +				struct page *p = pfn_to_page(r1 >> PAGE_SHIFT); +				pr_err("page refcount: %d, mapcount: %d\n", +						page_count(p), +						page_mapcount(p)); +				if (!page_count(p)) +					rc |= TLB_INSANE; +				else if (page_mapped(p)) +					rc |= TLB_SUSPICIOUS; +			} else { +				rc |= TLB_INSANE; +			} +		} +	} +	return rc; +} + +void check_tlb_sanity(void) +{ +	unsigned long flags; +	unsigned w, e; +	int bug = 0; + +	local_irq_save(flags); +	for (w = 0; w < DTLB_ARF_WAYS; ++w) +		for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e) +			bug |= check_tlb_entry(w, e, true); +	for (w = 0; w < ITLB_ARF_WAYS; ++w) +		for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e) +			bug |= check_tlb_entry(w, e, false); +	if (bug & TLB_INSANE) +		tlb_insane(); +	if (bug & TLB_SUSPICIOUS) +		tlb_suspicious(); +	local_irq_restore(flags); +} + +#endif /* CONFIG_DEBUG_TLB_SANITY */  | 
