aboutsummaryrefslogtreecommitdiff
path: root/arch/xtensa/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa/mm')
-rw-r--r--arch/xtensa/mm/Makefile12
-rw-r--r--arch/xtensa/mm/cache.c258
-rw-r--r--arch/xtensa/mm/fault.c81
-rw-r--r--arch/xtensa/mm/highmem.c72
-rw-r--r--arch/xtensa/mm/init.c643
-rw-r--r--arch/xtensa/mm/misc.S556
-rw-r--r--arch/xtensa/mm/mmu.c104
-rw-r--r--arch/xtensa/mm/pgtable.c72
-rw-r--r--arch/xtensa/mm/tlb.c563
9 files changed, 1226 insertions, 1135 deletions
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
index a5aed5932d7..f54f78e24d7 100644
--- a/arch/xtensa/mm/Makefile
+++ b/arch/xtensa/mm/Makefile
@@ -1,13 +1,7 @@
#
# Makefile for the Linux/Xtensa-specific parts of the memory manager.
#
-# Note! Dependencies are done automagically by 'make dep', which also
-# removes any old dependencies. DON'T put your own dependencies here
-# unless it's something special (ie not a .c file).
-#
-# Note 2! The CFLAGS definition is now in the main makefile...
-obj-y := init.o fault.o tlb.o misc.o
-obj-m :=
-obj-n :=
-obj- :=
+obj-y := init.o cache.o misc.o
+obj-$(CONFIG_MMU) += fault.o mmu.o tlb.o
+obj-$(CONFIG_HIGHMEM) += highmem.o
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
new file mode 100644
index 00000000000..63cbb867dad
--- /dev/null
+++ b/arch/xtensa/mm/cache.c
@@ -0,0 +1,258 @@
+/*
+ * arch/xtensa/mm/cache.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001-2006 Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Joe Taylor
+ * Marc Gauthier
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/bootmem.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
+
+#include <asm/bootparam.h>
+#include <asm/mmu_context.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+
+//#define printd(x...) printk(x)
+#define printd(x...) do { } while(0)
+
+/*
+ * Note:
+ * The kernel provides one architecture bit PG_arch_1 in the page flags that
+ * can be used for cache coherency.
+ *
+ * I$-D$ coherency.
+ *
+ * The Xtensa architecture doesn't keep the instruction cache coherent with
+ * the data cache. We use the architecture bit to indicate if the caches
+ * are coherent. The kernel clears this bit whenever a page is added to the
+ * page cache. At that time, the caches might not be in sync. We, therefore,
+ * define this flag as 'clean' if set.
+ *
+ * D-cache aliasing.
+ *
+ * With cache aliasing, we have to always flush the cache when pages are
+ * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
+ * page.
+ *
+ *
+ *
+ */
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && defined(CONFIG_HIGHMEM)
+#error "HIGHMEM is not supported on cores with aliasing cache."
+#endif
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+
+/*
+ * Any time the kernel writes to a user page cache page, or it is about to
+ * read from a page cache page this routine is called.
+ *
+ */
+
+void flush_dcache_page(struct page *page)
+{
+ struct address_space *mapping = page_mapping(page);
+
+ /*
+ * If we have a mapping but the page is not mapped to user-space
+ * yet, we simply mark this page dirty and defer flushing the
+ * caches until update_mmu().
+ */
+
+ if (mapping && !mapping_mapped(mapping)) {
+ if (!test_bit(PG_arch_1, &page->flags))
+ set_bit(PG_arch_1, &page->flags);
+ return;
+
+ } else {
+
+ unsigned long phys = page_to_phys(page);
+ unsigned long temp = page->index << PAGE_SHIFT;
+ unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
+ unsigned long virt;
+
+ /*
+ * Flush the page in kernel space and user space.
+ * Note that we can omit that step if aliasing is not
+ * an issue, but we do have to synchronize I$ and D$
+ * if we have a mapping.
+ */
+
+ if (!alias && !mapping)
+ return;
+
+ __flush_invalidate_dcache_page((long)page_address(page));
+
+ virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
+
+ if (alias)
+ __flush_invalidate_dcache_page_alias(virt, phys);
+
+ if (mapping)
+ __invalidate_icache_page_alias(virt, phys);
+ }
+
+ /* There shouldn't be an entry in the cache for this page anymore. */
+}
+
+
+/*
+ * For now, flush the whole cache. FIXME??
+ */
+
+void local_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ __flush_invalidate_dcache_all();
+ __invalidate_icache_all();
+}
+
+/*
+ * Remove any entry in the cache for this page.
+ *
+ * Note that this function is only called for user pages, so use the
+ * alias versions of the cache flush functions.
+ */
+
+void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
+ unsigned long pfn)
+{
+ /* Note that we have to use the 'alias' address to avoid multi-hit */
+
+ unsigned long phys = page_to_phys(pfn_to_page(pfn));
+ unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
+
+ __flush_invalidate_dcache_page_alias(virt, phys);
+ __invalidate_icache_page_alias(virt, phys);
+}
+
+#endif
+
+void
+update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
+{
+ unsigned long pfn = pte_pfn(*ptep);
+ struct page *page;
+
+ if (!pfn_valid(pfn))
+ return;
+
+ page = pfn_to_page(pfn);
+
+ /* Invalidate old entry in TLBs */
+
+ flush_tlb_page(vma, addr);
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+
+ if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
+
+ unsigned long paddr = (unsigned long) page_address(page);
+ unsigned long phys = page_to_phys(page);
+ unsigned long tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
+
+ __flush_invalidate_dcache_page(paddr);
+
+ __flush_invalidate_dcache_page_alias(tmp, phys);
+ __invalidate_icache_page_alias(tmp, phys);
+
+ clear_bit(PG_arch_1, &page->flags);
+ }
+#else
+ if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
+ && (vma->vm_flags & VM_EXEC) != 0) {
+ unsigned long paddr = (unsigned long)kmap_atomic(page);
+ __flush_dcache_page(paddr);
+ __invalidate_icache_page(paddr);
+ set_bit(PG_arch_1, &page->flags);
+ kunmap_atomic((void *)paddr);
+ }
+#endif
+}
+
+/*
+ * access_process_vm() has called get_user_pages(), which has done a
+ * flush_dcache_page() on the page.
+ */
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ unsigned long phys = page_to_phys(page);
+ unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
+
+ /* Flush and invalidate user page if aliased. */
+
+ if (alias) {
+ unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ __flush_invalidate_dcache_page_alias(t, phys);
+ }
+
+ /* Copy data */
+
+ memcpy(dst, src, len);
+
+ /*
+ * Flush and invalidate kernel page if aliased and synchronize
+ * data and instruction caches for executable pages.
+ */
+
+ if (alias) {
+ unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+
+ __flush_invalidate_dcache_range((unsigned long) dst, len);
+ if ((vma->vm_flags & VM_EXEC) != 0)
+ __invalidate_icache_page_alias(t, phys);
+
+ } else if ((vma->vm_flags & VM_EXEC) != 0) {
+ __flush_dcache_range((unsigned long)dst,len);
+ __invalidate_icache_range((unsigned long) dst, len);
+ }
+}
+
+extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ unsigned long phys = page_to_phys(page);
+ unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
+
+ /*
+ * Flush user page if aliased.
+ * (Note: a simply flush would be sufficient)
+ */
+
+ if (alias) {
+ unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ __flush_invalidate_dcache_page_alias(t, phys);
+ }
+
+ memcpy(dst, src, len);
+}
+
+#endif
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index a945a33e85a..b57c4f91f48 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -6,7 +6,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2010 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
@@ -14,16 +14,18 @@
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/hardirq.h>
#include <asm/uaccess.h>
-#include <asm/system.h>
#include <asm/pgalloc.h>
-unsigned long asid_cache = ASID_FIRST_VERSION;
+DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
void bad_page_fault(struct pt_regs*, unsigned long, int);
+#undef DEBUG_PAGE_FAULT
+
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
@@ -41,6 +43,8 @@ void do_page_fault(struct pt_regs *regs)
siginfo_t info;
int is_write, is_exec;
+ int fault;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
info.si_code = SEGV_MAPERR;
@@ -58,16 +62,19 @@ void do_page_fault(struct pt_regs *regs)
return;
}
- is_write = (exccause == XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
- is_exec = (exccause == XCHAL_EXCCAUSE_ITLB_PRIVILEGE ||
- exccause == XCHAL_EXCCAUSE_ITLB_MISS ||
- exccause == XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
+ is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
+ is_exec = (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
+ exccause == EXCCAUSE_ITLB_MISS ||
+ exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
-#if 0
+#ifdef DEBUG_PAGE_FAULT
printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
#endif
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
@@ -90,6 +97,7 @@ good_area:
if (is_write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
+ flags |= FAULT_FLAG_WRITE;
} else if (is_exec) {
if (!(vma->vm_flags & VM_EXEC))
goto bad_area;
@@ -101,21 +109,35 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
-survive:
- switch (handle_mm_fault(mm, vma, address, is_write)) {
- case VM_FAULT_MINOR:
- current->min_flt++;
- break;
- case VM_FAULT_MAJOR:
- current->maj_flt++;
- break;
- case VM_FAULT_SIGBUS:
- goto do_sigbus;
- case VM_FAULT_OOM:
- goto out_of_memory;
- default:
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return;
+
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto do_sigbus;
BUG();
}
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ flags |= FAULT_FLAG_TRIED;
+
+ /* No need to up_read(&mm->mmap_sem) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
+ }
+ }
up_read(&mm->mmap_sem);
return;
@@ -144,15 +166,10 @@ bad_area:
*/
out_of_memory:
up_read(&mm->mmap_sem);
- if (current->pid == 1) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
- printk("VM: killing process %s\n", current->comm);
- if (user_mode(regs))
- do_exit(SIGKILL);
- bad_page_fault(regs, address, SIGKILL);
+ if (!user_mode(regs))
+ bad_page_fault(regs, address, SIGKILL);
+ else
+ pagefault_out_of_memory();
return;
do_sigbus:
@@ -171,6 +188,7 @@ do_sigbus:
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
bad_page_fault(regs, address, SIGBUS);
+ return;
vmalloc_fault:
{
@@ -220,7 +238,7 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
/* Are we prepared to handle this kernel fault? */
if ((entry = search_exception_tables(regs->pc)) != NULL) {
-#if 1
+#ifdef DEBUG_PAGE_FAULT
printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n",
current->comm, regs->pc, entry->fixup);
#endif
@@ -238,4 +256,3 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
die("Oops", regs, sig);
do_exit(sig);
}
-
diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c
new file mode 100644
index 00000000000..17a8c0d6fd1
--- /dev/null
+++ b/arch/xtensa/mm/highmem.c
@@ -0,0 +1,72 @@
+/*
+ * High memory support for Xtensa architecture
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#include <linux/export.h>
+#include <linux/highmem.h>
+#include <asm/tlbflush.h>
+
+static pte_t *kmap_pte;
+
+void *kmap_atomic(struct page *page)
+{
+ enum fixed_addresses idx;
+ unsigned long vaddr;
+ int type;
+
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ type = kmap_atomic_idx_push();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(!pte_none(*(kmap_pte - idx)));
+#endif
+ set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL_EXEC));
+
+ return (void *)vaddr;
+}
+EXPORT_SYMBOL(kmap_atomic);
+
+void __kunmap_atomic(void *kvaddr)
+{
+ int idx, type;
+
+ if (kvaddr >= (void *)FIXADDR_START &&
+ kvaddr < (void *)FIXADDR_TOP) {
+ type = kmap_atomic_idx();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+
+ /*
+ * Force other mappings to Oops if they'll try to access this
+ * pte without first remap it. Keeping stale mappings around
+ * is a bad idea also, in case the page changes cacheability
+ * attributes or becomes a protected page in a hypervisor.
+ */
+ pte_clear(&init_mm, kvaddr, kmap_pte - idx);
+ local_flush_tlb_kernel_range((unsigned long)kvaddr,
+ (unsigned long)kvaddr + PAGE_SIZE);
+
+ kmap_atomic_idx_pop();
+ }
+
+ pagefault_enable();
+}
+EXPORT_SYMBOL(__kunmap_atomic);
+
+void __init kmap_init(void)
+{
+ unsigned long kmap_vstart;
+
+ /* cache the first kmap pte */
+ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+ kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+}
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 660ef058c14..77ed20209ca 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -8,6 +8,7 @@
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2014 Cadence Design Systems Inc.
*
* Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
@@ -15,50 +16,146 @@
* Kevin Chea
*/
-#include <linux/init.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
#include <linux/bootmem.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
#include <linux/swap.h>
+#include <linux/mman.h>
+#include <linux/nodemask.h>
+#include <linux/mm.h>
-#include <asm/pgtable.h>
#include <asm/bootparam.h>
-#include <asm/mmu_context.h>
-#include <asm/tlb.h>
-#include <asm/tlbflush.h>
#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
+#include <asm/sections.h>
+#include <asm/sysmem.h>
+struct sysmem_info sysmem __initdata;
-#define DEBUG 0
+static void __init sysmem_dump(void)
+{
+ unsigned i;
+
+ pr_debug("Sysmem:\n");
+ for (i = 0; i < sysmem.nr_banks; ++i)
+ pr_debug(" 0x%08lx - 0x%08lx (%ldK)\n",
+ sysmem.bank[i].start, sysmem.bank[i].end,
+ (sysmem.bank[i].end - sysmem.bank[i].start) >> 10);
+}
+
+/*
+ * Find bank with maximal .start such that bank.start <= start
+ */
+static inline struct meminfo * __init find_bank(unsigned long start)
+{
+ unsigned i;
+ struct meminfo *it = NULL;
+
+ for (i = 0; i < sysmem.nr_banks; ++i)
+ if (sysmem.bank[i].start <= start)
+ it = sysmem.bank + i;
+ else
+ break;
+ return it;
+}
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-//static DEFINE_SPINLOCK(tlb_lock);
+/*
+ * Move all memory banks starting at 'from' to a new place at 'to',
+ * adjust nr_banks accordingly.
+ * Both 'from' and 'to' must be inside the sysmem.bank.
+ *
+ * Returns: 0 (success), -ENOMEM (not enough space in the sysmem.bank).
+ */
+static int __init move_banks(struct meminfo *to, struct meminfo *from)
+{
+ unsigned n = sysmem.nr_banks - (from - sysmem.bank);
+
+ if (to > from && to - from + sysmem.nr_banks > SYSMEM_BANKS_MAX)
+ return -ENOMEM;
+ if (to != from)
+ memmove(to, from, n * sizeof(struct meminfo));
+ sysmem.nr_banks += to - from;
+ return 0;
+}
/*
- * This flag is used to indicate that the page was mapped and modified in
- * kernel space, so the cache is probably dirty at that address.
- * If cache aliasing is enabled and the page color mismatches, update_mmu_cache
- * synchronizes the caches if this bit is set.
+ * Add new bank to sysmem. Resulting sysmem is the union of bytes of the
+ * original sysmem and the new bank.
+ *
+ * Returns: 0 (success), < 0 (error)
*/
+int __init add_sysmem_bank(unsigned long start, unsigned long end)
+{
+ unsigned i;
+ struct meminfo *it = NULL;
+ unsigned long sz;
+ unsigned long bank_sz = 0;
+
+ if (start == end ||
+ (start < end) != (PAGE_ALIGN(start) < (end & PAGE_MASK))) {
+ pr_warn("Ignoring small memory bank 0x%08lx size: %ld bytes\n",
+ start, end - start);
+ return -EINVAL;
+ }
-#define PG_cache_clean PG_arch_1
+ start = PAGE_ALIGN(start);
+ end &= PAGE_MASK;
+ sz = end - start;
-/* References to section boundaries */
+ it = find_bank(start);
-extern char _ftext, _etext, _fdata, _edata, _rodata_end;
-extern char __init_begin, __init_end;
+ if (it)
+ bank_sz = it->end - it->start;
+
+ if (it && bank_sz >= start - it->start) {
+ if (end - it->start > bank_sz)
+ it->end = end;
+ else
+ return 0;
+ } else {
+ if (!it)
+ it = sysmem.bank;
+ else
+ ++it;
+
+ if (it - sysmem.bank < sysmem.nr_banks &&
+ it->start - start <= sz) {
+ it->start = start;
+ if (it->end - it->start < sz)
+ it->end = end;
+ else
+ return 0;
+ } else {
+ if (move_banks(it + 1, it) < 0) {
+ pr_warn("Ignoring memory bank 0x%08lx size %ld bytes\n",
+ start, end - start);
+ return -EINVAL;
+ }
+ it->start = start;
+ it->end = end;
+ return 0;
+ }
+ }
+ sz = it->end - it->start;
+ for (i = it + 1 - sysmem.bank; i < sysmem.nr_banks; ++i)
+ if (sysmem.bank[i].start - it->start <= sz) {
+ if (sz < sysmem.bank[i].end - it->start)
+ it->end = sysmem.bank[i].end;
+ } else {
+ break;
+ }
+
+ move_banks(it + 1, sysmem.bank + i);
+ return 0;
+}
/*
* mem_reserve(start, end, must_exist)
*
* Reserve some memory from the memory pool.
+ * If must_exist is set and a part of the region being reserved does not exist
+ * memory map is not altered.
*
* Parameters:
* start Start of region,
@@ -66,58 +163,74 @@ extern char __init_begin, __init_end;
* must_exist Must exist in memory pool.
*
* Returns:
- * 0 (memory area couldn't be mapped)
- * -1 (success)
+ * 0 (success)
+ * < 0 (error)
*/
int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
{
- int i;
-
- if (start == end)
- return 0;
+ struct meminfo *it;
+ struct meminfo *rm = NULL;
+ unsigned long sz;
+ unsigned long bank_sz = 0;
start = start & PAGE_MASK;
end = PAGE_ALIGN(end);
+ sz = end - start;
+ if (!sz)
+ return -EINVAL;
- for (i = 0; i < sysmem.nr_banks; i++)
- if (start < sysmem.bank[i].end
- && end >= sysmem.bank[i].start)
- break;
+ it = find_bank(start);
+
+ if (it)
+ bank_sz = it->end - it->start;
- if (i == sysmem.nr_banks) {
- if (must_exist)
- printk (KERN_WARNING "mem_reserve: [0x%0lx, 0x%0lx) "
- "not in any region!\n", start, end);
- return 0;
+ if ((!it || end - it->start > bank_sz) && must_exist) {
+ pr_warn("mem_reserve: [0x%0lx, 0x%0lx) not in any region!\n",
+ start, end);
+ return -EINVAL;
}
- if (start > sysmem.bank[i].start) {
- if (end < sysmem.bank[i].end) {
- /* split entry */
- if (sysmem.nr_banks >= SYSMEM_BANKS_MAX)
- panic("meminfo overflow\n");
- sysmem.bank[sysmem.nr_banks].start = end;
- sysmem.bank[sysmem.nr_banks].end = sysmem.bank[i].end;
- sysmem.nr_banks++;
+ if (it && start - it->start <= bank_sz) {
+ if (start == it->start) {
+ if (end - it->start < bank_sz) {
+ it->start = end;
+ return 0;
+ } else {
+ rm = it;
+ }
+ } else {
+ it->end = start;
+ if (end - it->start < bank_sz)
+ return add_sysmem_bank(end,
+ it->start + bank_sz);
+ ++it;
}
- sysmem.bank[i].end = start;
- } else {
- if (end < sysmem.bank[i].end)
- sysmem.bank[i].start = end;
- else {
- /* remove entry */
- sysmem.nr_banks--;
- sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
- sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end;
+ }
+
+ if (!it)
+ it = sysmem.bank;
+
+ for (; it < sysmem.bank + sysmem.nr_banks; ++it) {
+ if (it->end - start <= sz) {
+ if (!rm)
+ rm = it;
+ } else {
+ if (it->start - start < sz)
+ it->start = end;
+ break;
}
}
- return -1;
+
+ if (rm)
+ move_banks(rm, it);
+
+ return 0;
}
/*
- * Initialize the bootmem system and give it all the memory we have available.
+ * Initialize the bootmem system and give it all low memory we have available.
*/
void __init bootmem_init(void)
@@ -126,6 +239,7 @@ void __init bootmem_init(void)
unsigned long bootmap_start, bootmap_size;
int i;
+ sysmem_dump();
max_low_pfn = max_pfn = 0;
min_low_pfn = ~0;
@@ -141,12 +255,13 @@ void __init bootmem_init(void)
if (min_low_pfn > max_pfn)
panic("No memory found!\n");
- max_low_pfn = max_pfn < MAX_LOW_MEMORY >> PAGE_SHIFT ?
- max_pfn : MAX_LOW_MEMORY >> PAGE_SHIFT;
+ max_low_pfn = max_pfn < MAX_MEM_PFN >> PAGE_SHIFT ?
+ max_pfn : MAX_MEM_PFN >> PAGE_SHIFT;
/* Find an area to use for the bootmem bitmap. */
- bootmap_size = bootmem_bootmap_pages(max_low_pfn) << PAGE_SHIFT;
+ bootmap_size = bootmem_bootmap_pages(max_low_pfn - min_low_pfn);
+ bootmap_size <<= PAGE_SHIFT;
bootmap_start = ~0;
for (i=0; i<sysmem.nr_banks; i++)
@@ -161,68 +276,35 @@ void __init bootmem_init(void)
/* Reserve the bootmem bitmap area */
mem_reserve(bootmap_start, bootmap_start + bootmap_size, 1);
- bootmap_size = init_bootmem_node(NODE_DATA(0), min_low_pfn,
+ bootmap_size = init_bootmem_node(NODE_DATA(0),
bootmap_start >> PAGE_SHIFT,
+ min_low_pfn,
max_low_pfn);
/* Add all remaining memory pieces into the bootmem map */
- for (i=0; i<sysmem.nr_banks; i++)
- free_bootmem(sysmem.bank[i].start,
- sysmem.bank[i].end - sysmem.bank[i].start);
+ for (i = 0; i < sysmem.nr_banks; i++) {
+ if (sysmem.bank[i].start >> PAGE_SHIFT < max_low_pfn) {
+ unsigned long end = min(max_low_pfn << PAGE_SHIFT,
+ sysmem.bank[i].end);
+ free_bootmem(sysmem.bank[i].start,
+ end - sysmem.bank[i].start);
+ }
+ }
}
-void __init paging_init(void)
+void __init zones_init(void)
{
- unsigned long zones_size[MAX_NR_ZONES];
- int i;
-
/* All pages are DMA-able, so we put them all in the DMA zone. */
-
- zones_size[ZONE_DMA] = max_low_pfn;
- for (i = 1; i < MAX_NR_ZONES; i++)
- zones_size[i] = 0;
-
+ unsigned long zones_size[MAX_NR_ZONES] = {
+ [ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET,
#ifdef CONFIG_HIGHMEM
- zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
+ [ZONE_HIGHMEM] = max_pfn - max_low_pfn,
#endif
-
- /* Initialize the kernel's page tables. */
-
- memset(swapper_pg_dir, 0, PAGE_SIZE);
-
- free_area_init(zones_size);
-}
-
-/*
- * Flush the mmu and reset associated register to default values.
- */
-
-void __init init_mmu (void)
-{
- /* Writing zeros to the <t>TLBCFG special registers ensure
- * that valid values exist in the register. For existing
- * PGSZID<w> fields, zero selects the first element of the
- * page-size array. For nonexistant PGSZID<w> fields, zero is
- * the best value to write. Also, when changing PGSZID<w>
- * fields, the corresponding TLB must be flushed.
- */
- set_itlbcfg_register (0);
- set_dtlbcfg_register (0);
- flush_tlb_all ();
-
- /* Set rasid register to a known value. */
-
- set_rasid_register (ASID_ALL_RESERVED);
-
- /* Set PTEVADDR special register to the start of the page
- * table, which is in kernel mappable space (ie. not
- * statically mapped). This register's value is undefined on
- * reset.
- */
- set_ptevaddr_register (PGTABLE_START);
+ };
+ free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
}
/*
@@ -231,50 +313,38 @@ void __init init_mmu (void)
void __init mem_init(void)
{
- unsigned long codesize, reservedpages, datasize, initsize;
- unsigned long highmemsize, tmp, ram;
-
- max_mapnr = num_physpages = max_low_pfn;
- high_memory = (void *) __va(max_mapnr << PAGE_SHIFT);
- highmemsize = 0;
-
#ifdef CONFIG_HIGHMEM
-#error HIGHGMEM not implemented in init.c
-#endif
+ unsigned long tmp;
- totalram_pages += free_all_bootmem();
+ reset_all_zones_managed_pages();
+ for (tmp = max_low_pfn; tmp < max_pfn; tmp++)
+ free_highmem_page(pfn_to_page(tmp));
+#endif
- reservedpages = ram = 0;
- for (tmp = 0; tmp < max_low_pfn; tmp++) {
- ram++;
- if (PageReserved(mem_map+tmp))
- reservedpages++;
- }
+ max_mapnr = max_pfn - ARCH_PFN_OFFSET;
+ high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
- codesize = (unsigned long) &_etext - (unsigned long) &_ftext;
- datasize = (unsigned long) &_edata - (unsigned long) &_fdata;
- initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
-
- printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, "
- "%ldk data, %ldk init %ldk highmem)\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
- ram << (PAGE_SHIFT-10),
- codesize >> 10,
- reservedpages << (PAGE_SHIFT-10),
- datasize >> 10,
- initsize >> 10,
- highmemsize >> 10);
-}
+ free_all_bootmem();
-void
-free_reserved_mem(void *start, void *end)
-{
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page((unsigned long)start);
- totalram_pages++;
- }
+ mem_init_print_info(NULL);
+ pr_info("virtual kernel memory layout:\n"
+#ifdef CONFIG_HIGHMEM
+ " pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
+#endif
+ " vmalloc : 0x%08x - 0x%08x (%5u MB)\n"
+ " lowmem : 0x%08x - 0x%08lx (%5lu MB)\n",
+#ifdef CONFIG_HIGHMEM
+ PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
+ (LAST_PKMAP*PAGE_SIZE) >> 10,
+ FIXADDR_START, FIXADDR_TOP,
+ (FIXADDR_TOP - FIXADDR_START) >> 10,
+#endif
+ VMALLOC_START, VMALLOC_END,
+ (VMALLOC_END - VMALLOC_START) >> 20,
+ PAGE_OFFSET, PAGE_OFFSET +
+ (max_low_pfn - min_low_pfn) * PAGE_SIZE,
+ ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -282,269 +352,62 @@ extern int initrd_is_mapped;
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (initrd_is_mapped) {
- free_reserved_mem((void*)start, (void*)end);
- printk ("Freeing initrd memory: %ldk freed\n",(end-start)>>10);
- }
+ if (initrd_is_mapped)
+ free_reserved_area((void *)start, (void *)end, -1, "initrd");
}
#endif
void free_initmem(void)
{
- free_reserved_mem(&__init_begin, &__init_end);
- printk("Freeing unused kernel memory: %dk freed\n",
- (&__init_end - &__init_begin) >> 10);
+ free_initmem_default(-1);
}
-void show_mem(void)
+static void __init parse_memmap_one(char *p)
{
- int i, free = 0, total = 0, reserved = 0;
- int shared = 0, cached = 0;
-
- printk("Mem-info:\n");
- show_free_areas();
- printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
- i = max_mapnr;
- while (i-- > 0) {
- total++;
- if (PageReserved(mem_map+i))
- reserved++;
- else if (PageSwapCache(mem_map+i))
- cached++;
- else if (!page_count(mem_map + i))
- free++;
- else
- shared += page_count(mem_map + i) - 1;
- }
- printk("%d pages of RAM\n", total);
- printk("%d reserved pages\n", reserved);
- printk("%d pages shared\n", shared);
- printk("%d pages swap cached\n",cached);
- printk("%d free pages\n", free);
-}
-
-/* ------------------------------------------------------------------------- */
-
-#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+ char *oldp;
+ unsigned long start_at, mem_size;
-/*
- * With cache aliasing, the page color of the page in kernel space and user
- * space might mismatch. We temporarily map the page to a different virtual
- * address with the same color and clear the page there.
- */
-
-void clear_user_page(void *kaddr, unsigned long vaddr, struct page* page)
-{
-
- /* There shouldn't be any entries for this page. */
-
- __flush_invalidate_dcache_page_phys(__pa(page_address(page)));
-
- if (!PAGE_COLOR_EQ(vaddr, kaddr)) {
- unsigned long v, p;
-
- /* Temporarily map page to DTLB_WAY_DCACHE_ALIAS0. */
-
- spin_lock(&tlb_lock);
-
- p = (unsigned long)pte_val((mk_pte(page,PAGE_KERNEL)));
- kaddr = (void*)PAGE_COLOR_MAP0(vaddr);
- v = (unsigned long)kaddr | DTLB_WAY_DCACHE_ALIAS0;
- __asm__ __volatile__("wdtlb %0,%1; dsync" : :"a" (p), "a" (v));
-
- clear_page(kaddr);
-
- spin_unlock(&tlb_lock);
- } else {
- clear_page(kaddr);
- }
-
- /* We need to make sure that i$ and d$ are coherent. */
-
- clear_bit(PG_cache_clean, &page->flags);
-}
-
-/*
- * With cache aliasing, we have to make sure that the page color of the page
- * in kernel space matches that of the virtual user address before we read
- * the page. If the page color differ, we create a temporary DTLB entry with
- * the corrent page color and use this 'temporary' address as the source.
- * We then use the same approach as in clear_user_page and copy the data
- * to the kernel space and clear the PG_cache_clean bit to synchronize caches
- * later.
- *
- * Note:
- * Instead of using another 'way' for the temporary DTLB entry, we could
- * probably use the same entry that points to the kernel address (after
- * saving the original value and restoring it when we are done).
- */
-
-void copy_user_page(void* to, void* from, unsigned long vaddr,
- struct page* to_page)
-{
- /* There shouldn't be any entries for the new page. */
-
- __flush_invalidate_dcache_page_phys(__pa(page_address(to_page)));
-
- spin_lock(&tlb_lock);
-
- if (!PAGE_COLOR_EQ(vaddr, from)) {
- unsigned long v, p, t;
-
- __asm__ __volatile__ ("pdtlb %1,%2; rdtlb1 %0,%1"
- : "=a"(p), "=a"(t) : "a"(from));
- from = (void*)PAGE_COLOR_MAP0(vaddr);
- v = (unsigned long)from | DTLB_WAY_DCACHE_ALIAS0;
- __asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v));
- }
-
- if (!PAGE_COLOR_EQ(vaddr, to)) {
- unsigned long v, p;
-
- p = (unsigned long)pte_val((mk_pte(to_page,PAGE_KERNEL)));
- to = (void*)PAGE_COLOR_MAP1(vaddr);
- v = (unsigned long)to | DTLB_WAY_DCACHE_ALIAS1;
- __asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v));
- }
- copy_page(to, from);
-
- spin_unlock(&tlb_lock);
-
- /* We need to make sure that i$ and d$ are coherent. */
-
- clear_bit(PG_cache_clean, &to_page->flags);
-}
-
-
-
-/*
- * Any time the kernel writes to a user page cache page, or it is about to
- * read from a page cache page this routine is called.
- *
- * Note:
- * The kernel currently only provides one architecture bit in the page
- * flags that we use for I$/D$ coherency. Maybe, in future, we can
- * use a sepearte bit for deferred dcache aliasing:
- * If the page is not mapped yet, we only need to set a flag,
- * if mapped, we need to invalidate the page.
- */
-// FIXME: we probably need this for WB caches not only for Page Coloring..
-
-void flush_dcache_page(struct page *page)
-{
- unsigned long addr = __pa(page_address(page));
- struct address_space *mapping = page_mapping(page);
-
- __flush_invalidate_dcache_page_phys(addr);
-
- if (!test_bit(PG_cache_clean, &page->flags))
+ if (!p)
return;
- /* If this page hasn't been mapped, yet, handle I$/D$ coherency later.*/
-#if 0
- if (mapping && !mapping_mapped(mapping))
- clear_bit(PG_cache_clean, &page->flags);
- else
-#endif
- __invalidate_icache_page_phys(addr);
-}
-
-void flush_cache_range(struct vm_area_struct* vma, unsigned long s,
- unsigned long e)
-{
- __flush_invalidate_cache_all();
-}
-
-void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
- unsigned long pfn)
-{
- struct page *page = pfn_to_page(pfn);
-
- /* Remove any entry for the old mapping. */
+ oldp = p;
+ mem_size = memparse(p, &p);
+ if (p == oldp)
+ return;
- if (current->active_mm == vma->vm_mm) {
- unsigned long addr = __pa(page_address(page));
- __flush_invalidate_dcache_page_phys(addr);
- if ((vma->vm_flags & VM_EXEC) != 0)
- __invalidate_icache_page_phys(addr);
- } else {
- BUG();
- }
-}
+ switch (*p) {
+ case '@':
+ start_at = memparse(p + 1, &p);
+ add_sysmem_bank(start_at, start_at + mem_size);
+ break;
-#endif /* (DCACHE_WAY_SIZE > PAGE_SIZE) */
+ case '$':
+ start_at = memparse(p + 1, &p);
+ mem_reserve(start_at, start_at + mem_size, 0);
+ break;
+ case 0:
+ mem_reserve(mem_size, 0, 0);
+ break;
-pte_t* pte_alloc_one_kernel (struct mm_struct* mm, unsigned long addr)
-{
- pte_t* pte = (pte_t*)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 0);
- if (likely(pte)) {
- pte_t* ptep = (pte_t*)(pte_val(*pte) + PAGE_OFFSET);
- int i;
- for (i = 0; i < 1024; i++, ptep++)
- pte_clear(mm, addr, ptep);
+ default:
+ pr_warn("Unrecognized memmap syntax: %s\n", p);
+ break;
}
- return pte;
}
-struct page* pte_alloc_one(struct mm_struct *mm, unsigned long addr)
+static int __init parse_memmap_opt(char *str)
{
- struct page *page;
+ while (str) {
+ char *k = strchr(str, ',');
- page = alloc_pages(GFP_KERNEL | __GFP_REPEAT, 0);
+ if (k)
+ *k++ = 0;
- if (likely(page)) {
- pte_t* ptep = kmap_atomic(page, KM_USER0);
- int i;
-
- for (i = 0; i < 1024; i++, ptep++)
- pte_clear(mm, addr, ptep);
-
- kunmap_atomic(ptep, KM_USER0);
+ parse_memmap_one(str);
+ str = k;
}
- return page;
-}
-
-/*
- * Handle D$/I$ coherency.
- *
- * Note:
- * We only have one architecture bit for the page flags, so we cannot handle
- * cache aliasing, yet.
- */
-
-void
-update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte)
-{
- unsigned long pfn = pte_pfn(pte);
- struct page *page;
- unsigned long vaddr = addr & PAGE_MASK;
-
- if (!pfn_valid(pfn))
- return;
-
- page = pfn_to_page(pfn);
-
- invalidate_itlb_mapping(addr);
- invalidate_dtlb_mapping(addr);
-
- /* We have a new mapping. Use it. */
-
- write_dtlb_entry(pte, dtlb_probe(addr));
-
- /* If the processor can execute from this page, synchronize D$/I$. */
-
- if ((vma->vm_flags & VM_EXEC) != 0) {
-
- write_itlb_entry(pte, itlb_probe(addr));
-
- /* Synchronize caches, if not clean. */
-
- if (!test_and_set_bit(PG_cache_clean, &page->flags)) {
- __flush_dcache_page(vaddr);
- __invalidate_icache_page(vaddr);
- }
- }
+ return 0;
}
-
+early_param("memmap", parse_memmap_opt);
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
index 327c0f17187..1f68558dbcc 100644
--- a/arch/xtensa/mm/misc.S
+++ b/arch/xtensa/mm/misc.S
@@ -7,30 +7,34 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2007 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
*/
-/* Note: we might want to implement some of the loops as zero-overhead-loops,
- * where applicable and if supported by the processor.
- */
#include <linux/linkage.h>
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheasm.h>
+#include <asm/tlbflush.h>
-#include <xtensa/cacheasm.h>
-#include <xtensa/cacheattrasm.h>
-/* clear_page (page) */
+/*
+ * clear_page and clear_user_page are the same for non-cache-aliased configs.
+ *
+ * clear_page (unsigned long page)
+ * a2
+ */
ENTRY(clear_page)
+
entry a1, 16
- addi a4, a2, PAGE_SIZE
- movi a3, 0
-1: s32i a3, a2, 0
+ movi a3, 0
+ __loopi a2, a7, PAGE_SIZE, 32
+ s32i a3, a2, 0
s32i a3, a2, 4
s32i a3, a2, 8
s32i a3, a2, 12
@@ -38,337 +42,455 @@ ENTRY(clear_page)
s32i a3, a2, 20
s32i a3, a2, 24
s32i a3, a2, 28
- addi a2, a2, 32
- blt a2, a4, 1b
+ __endla a2, a7, 32
retw
+ENDPROC(clear_page)
+
/*
+ * copy_page and copy_user_page are the same for non-cache-aliased configs.
+ *
* copy_page (void *to, void *from)
- * a2 a3
+ * a2 a3
*/
ENTRY(copy_page)
+
entry a1, 16
- addi a4, a2, PAGE_SIZE
-
-1: l32i a5, a3, 0
- l32i a6, a3, 4
- l32i a7, a3, 8
- s32i a5, a2, 0
- s32i a6, a2, 4
- s32i a7, a2, 8
- l32i a5, a3, 12
- l32i a6, a3, 16
- l32i a7, a3, 20
- s32i a5, a2, 12
- s32i a6, a2, 16
- s32i a7, a2, 20
- l32i a5, a3, 24
- l32i a6, a3, 28
- s32i a5, a2, 24
- s32i a6, a2, 28
- addi a2, a2, 32
- addi a3, a3, 32
- blt a2, a4, 1b
- retw
+ __loopi a2, a4, PAGE_SIZE, 32
+ l32i a8, a3, 0
+ l32i a9, a3, 4
+ s32i a8, a2, 0
+ s32i a9, a2, 4
-/*
- * void __flush_invalidate_cache_all(void)
- */
+ l32i a8, a3, 8
+ l32i a9, a3, 12
+ s32i a8, a2, 8
+ s32i a9, a2, 12
-ENTRY(__flush_invalidate_cache_all)
- entry sp, 16
- dcache_writeback_inv_all a2, a3
- icache_invalidate_all a2, a3
- retw
+ l32i a8, a3, 16
+ l32i a9, a3, 20
+ s32i a8, a2, 16
+ s32i a9, a2, 20
-/*
- * void __invalidate_icache_all(void)
- */
+ l32i a8, a3, 24
+ l32i a9, a3, 28
+ s32i a8, a2, 24
+ s32i a9, a2, 28
-ENTRY(__invalidate_icache_all)
- entry sp, 16
- icache_invalidate_all a2, a3
- retw
+ addi a2, a2, 32
+ addi a3, a3, 32
-/*
- * void __flush_invalidate_dcache_all(void)
- */
+ __endl a2, a4
-ENTRY(__flush_invalidate_dcache_all)
- entry sp, 16
- dcache_writeback_inv_all a2, a3
retw
+ENDPROC(copy_page)
+#ifdef CONFIG_MMU
/*
- * void __flush_invalidate_cache_range(ulong start, ulong size)
+ * If we have to deal with cache aliasing, we use temporary memory mappings
+ * to ensure that the source and destination pages have the same color as
+ * the virtual address. We use way 0 and 1 for temporary mappings in such cases.
+ *
+ * The temporary DTLB entries shouldn't be flushed by interrupts, but are
+ * flushed by preemptive task switches. Special code in the
+ * fast_second_level_miss handler re-established the temporary mapping.
+ * It requires that the PPNs for the destination and source addresses are
+ * in a6, and a7, respectively.
*/
-ENTRY(__flush_invalidate_cache_range)
- entry sp, 16
- mov a4, a2
- mov a5, a3
- dcache_writeback_inv_region a4, a5, a6
- icache_invalidate_region a2, a3, a4
- retw
+/* TLB miss exceptions are treated special in the following region */
+
+ENTRY(__tlbtemp_mapping_start)
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
/*
- * void __invalidate_icache_page(ulong start)
+ * clear_user_page (void *addr, unsigned long vaddr, struct page *page)
+ * a2 a3 a4
*/
-ENTRY(__invalidate_icache_page)
- entry sp, 16
- movi a3, PAGE_SIZE
- icache_invalidate_region a2, a3, a4
+ENTRY(clear_user_page)
+
+ entry a1, 32
+
+ /* Mark page dirty and determine alias. */
+
+ movi a7, (1 << PG_ARCH_1)
+ l32i a5, a4, PAGE_FLAGS
+ xor a6, a2, a3
+ extui a3, a3, PAGE_SHIFT, DCACHE_ALIAS_ORDER
+ extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER
+ or a5, a5, a7
+ slli a3, a3, PAGE_SHIFT
+ s32i a5, a4, PAGE_FLAGS
+
+ /* Skip setting up a temporary DTLB if not aliased. */
+
+ beqz a6, 1f
+
+ /* Invalidate kernel page. */
+
+ mov a10, a2
+ call8 __invalidate_dcache_page
+
+ /* Setup a temporary DTLB with the color of the VPN */
+
+ movi a4, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff
+ movi a5, TLBTEMP_BASE_1 # virt
+ add a6, a2, a4 # ppn
+ add a2, a5, a3 # add 'color'
+
+ wdtlb a6, a2
+ dsync
+
+1: movi a3, 0
+ __loopi a2, a7, PAGE_SIZE, 32
+ s32i a3, a2, 0
+ s32i a3, a2, 4
+ s32i a3, a2, 8
+ s32i a3, a2, 12
+ s32i a3, a2, 16
+ s32i a3, a2, 20
+ s32i a3, a2, 24
+ s32i a3, a2, 28
+ __endla a2, a7, 32
+
+ bnez a6, 1f
retw
-/*
- * void __invalidate_dcache_page(ulong start)
- */
+ /* We need to invalidate the temporary idtlb entry, if any. */
+
+1: addi a2, a2, -PAGE_SIZE
+ idtlb a2
+ dsync
-ENTRY(__invalidate_dcache_page)
- entry sp, 16
- movi a3, PAGE_SIZE
- dcache_invalidate_region a2, a3, a4
retw
+ENDPROC(clear_user_page)
+
/*
- * void __invalidate_icache_range(ulong start, ulong size)
+ * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page)
+ * a2 a3 a4 a5
*/
-ENTRY(__invalidate_icache_range)
- entry sp, 16
- icache_invalidate_region a2, a3, a4
+ENTRY(copy_user_page)
+
+ entry a1, 32
+
+ /* Mark page dirty and determine alias for destination. */
+
+ movi a8, (1 << PG_ARCH_1)
+ l32i a9, a5, PAGE_FLAGS
+ xor a6, a2, a4
+ xor a7, a3, a4
+ extui a4, a4, PAGE_SHIFT, DCACHE_ALIAS_ORDER
+ extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER
+ extui a7, a7, PAGE_SHIFT, DCACHE_ALIAS_ORDER
+ or a9, a9, a8
+ slli a4, a4, PAGE_SHIFT
+ s32i a9, a5, PAGE_FLAGS
+ movi a5, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff
+
+ beqz a6, 1f
+
+ /* Invalidate dcache */
+
+ mov a10, a2
+ call8 __invalidate_dcache_page
+
+ /* Setup a temporary DTLB with a matching color. */
+
+ movi a8, TLBTEMP_BASE_1 # base
+ add a6, a2, a5 # ppn
+ add a2, a8, a4 # add 'color'
+
+ wdtlb a6, a2
+ dsync
+
+ /* Skip setting up a temporary DTLB for destination if not aliased. */
+
+1: beqz a7, 1f
+
+ /* Setup a temporary DTLB with a matching color. */
+
+ movi a8, TLBTEMP_BASE_2 # base
+ add a7, a3, a5 # ppn
+ add a3, a8, a4
+ addi a8, a3, 1 # way1
+
+ wdtlb a7, a8
+ dsync
+
+1: __loopi a2, a4, PAGE_SIZE, 32
+
+ l32i a8, a3, 0
+ l32i a9, a3, 4
+ s32i a8, a2, 0
+ s32i a9, a2, 4
+
+ l32i a8, a3, 8
+ l32i a9, a3, 12
+ s32i a8, a2, 8
+ s32i a9, a2, 12
+
+ l32i a8, a3, 16
+ l32i a9, a3, 20
+ s32i a8, a2, 16
+ s32i a9, a2, 20
+
+ l32i a8, a3, 24
+ l32i a9, a3, 28
+ s32i a8, a2, 24
+ s32i a9, a2, 28
+
+ addi a2, a2, 32
+ addi a3, a3, 32
+
+ __endl a2, a4
+
+ /* We need to invalidate any temporary mapping! */
+
+ bnez a6, 1f
+ bnez a7, 2f
retw
-/*
- * void __invalidate_dcache_range(ulong start, ulong size)
- */
+1: addi a2, a2, -PAGE_SIZE
+ idtlb a2
+ dsync
+ bnez a7, 2f
+ retw
+
+2: addi a3, a3, -PAGE_SIZE+1
+ idtlb a3
+ dsync
-ENTRY(__invalidate_dcache_range)
- entry sp, 16
- dcache_invalidate_region a2, a3, a4
retw
+ENDPROC(copy_user_page)
+
+#endif
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+
/*
- * void __flush_dcache_page(ulong start)
+ * void __flush_invalidate_dcache_page_alias (addr, phys)
+ * a2 a3
*/
-ENTRY(__flush_dcache_page)
+ENTRY(__flush_invalidate_dcache_page_alias)
+
entry sp, 16
- movi a3, PAGE_SIZE
- dcache_writeback_region a2, a3, a4
+
+ movi a7, 0 # required for exception handler
+ addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
+ mov a4, a2
+ wdtlb a6, a2
+ dsync
+
+ ___flush_invalidate_dcache_page a2 a3
+
+ idtlb a4
+ dsync
+
retw
-/*
- * void __flush_invalidate_dcache_page(ulong start)
- */
+ENDPROC(__flush_invalidate_dcache_page_alias)
+#endif
+
+ENTRY(__tlbtemp_mapping_itlb)
+
+#if (ICACHE_WAY_SIZE > PAGE_SIZE)
+
+ENTRY(__invalidate_icache_page_alias)
-ENTRY(__flush_invalidate_dcache_page)
entry sp, 16
- movi a3, PAGE_SIZE
- dcache_writeback_inv_region a2, a3, a4
+
+ addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE)
+ mov a4, a2
+ witlb a6, a2
+ isync
+
+ ___invalidate_icache_page a2 a3
+
+ iitlb a4
+ isync
retw
+ENDPROC(__invalidate_icache_page_alias)
+
+#endif
+
+/* End of special treatment in tlb miss exception */
+
+ENTRY(__tlbtemp_mapping_end)
+
+#endif /* CONFIG_MMU
+
/*
- * void __flush_invalidate_dcache_range(ulong start, ulong size)
+ * void __invalidate_icache_page(ulong start)
*/
-ENTRY(__flush_invalidate_dcache_range)
+ENTRY(__invalidate_icache_page)
+
entry sp, 16
- dcache_writeback_inv_region a2, a3, a4
+
+ ___invalidate_icache_page a2 a3
+ isync
+
retw
+ENDPROC(__invalidate_icache_page)
+
/*
- * void __invalidate_dcache_all(void)
+ * void __invalidate_dcache_page(ulong start)
*/
-ENTRY(__invalidate_dcache_all)
+ENTRY(__invalidate_dcache_page)
+
entry sp, 16
- dcache_invalidate_all a2, a3
+
+ ___invalidate_dcache_page a2 a3
+ dsync
+
retw
+ENDPROC(__invalidate_dcache_page)
+
/*
- * void __flush_invalidate_dcache_page_phys(ulong start)
+ * void __flush_invalidate_dcache_page(ulong start)
*/
-ENTRY(__flush_invalidate_dcache_page_phys)
- entry sp, 16
+ENTRY(__flush_invalidate_dcache_page)
- movi a3, XCHAL_DCACHE_SIZE
- movi a4, PAGE_MASK | 1
- addi a2, a2, 1
+ entry sp, 16
-1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
+ ___flush_invalidate_dcache_page a2 a3
- ldct a6, a3
dsync
- and a6, a6, a4
- beq a6, a2, 2f
- bgeui a3, 2, 1b
retw
-2: diwbi a3, 0
- bgeui a3, 2, 1b
- retw
+ENDPROC(__flush_invalidate_dcache_page)
-ENTRY(check_dcache_low0)
- entry sp, 16
+/*
+ * void __flush_dcache_page(ulong start)
+ */
- movi a3, XCHAL_DCACHE_SIZE / 4
- movi a4, PAGE_MASK | 1
- addi a2, a2, 1
+ENTRY(__flush_dcache_page)
+
+ entry sp, 16
-1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
+ ___flush_dcache_page a2 a3
- ldct a6, a3
dsync
- and a6, a6, a4
- beq a6, a2, 2f
- bgeui a3, 2, 1b
retw
-2: j 2b
+ENDPROC(__flush_dcache_page)
-ENTRY(check_dcache_high0)
- entry sp, 16
+/*
+ * void __invalidate_icache_range(ulong start, ulong size)
+ */
- movi a5, XCHAL_DCACHE_SIZE / 4
- movi a3, XCHAL_DCACHE_SIZE / 2
- movi a4, PAGE_MASK | 1
- addi a2, a2, 1
+ENTRY(__invalidate_icache_range)
-1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
- addi a5, a5, -XCHAL_DCACHE_LINESIZE
+ entry sp, 16
+
+ ___invalidate_icache_range a2 a3 a4
+ isync
- ldct a6, a3
- dsync
- and a6, a6, a4
- beq a6, a2, 2f
- bgeui a5, 2, 1b
retw
-2: j 2b
+ENDPROC(__invalidate_icache_range)
-ENTRY(check_dcache_low1)
- entry sp, 16
+/*
+ * void __flush_invalidate_dcache_range(ulong start, ulong size)
+ */
- movi a5, XCHAL_DCACHE_SIZE / 4
- movi a3, XCHAL_DCACHE_SIZE * 3 / 4
- movi a4, PAGE_MASK | 1
- addi a2, a2, 1
+ENTRY(__flush_invalidate_dcache_range)
-1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
- addi a5, a5, -XCHAL_DCACHE_LINESIZE
+ entry sp, 16
- ldct a6, a3
+ ___flush_invalidate_dcache_range a2 a3 a4
dsync
- and a6, a6, a4
- beq a6, a2, 2f
- bgeui a5, 2, 1b
+
retw
-2: j 2b
+ENDPROC(__flush_invalidate_dcache_range)
-ENTRY(check_dcache_high1)
- entry sp, 16
+/*
+ * void _flush_dcache_range(ulong start, ulong size)
+ */
- movi a5, XCHAL_DCACHE_SIZE / 4
- movi a3, XCHAL_DCACHE_SIZE
- movi a4, PAGE_MASK | 1
- addi a2, a2, 1
+ENTRY(__flush_dcache_range)
-1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
- addi a5, a5, -XCHAL_DCACHE_LINESIZE
+ entry sp, 16
- ldct a6, a3
+ ___flush_dcache_range a2 a3 a4
dsync
- and a6, a6, a4
- beq a6, a2, 2f
- bgeui a5, 2, 1b
- retw
-2: j 2b
+ retw
+ENDPROC(__flush_dcache_range)
/*
- * void __invalidate_icache_page_phys(ulong start)
+ * void _invalidate_dcache_range(ulong start, ulong size)
*/
-ENTRY(__invalidate_icache_page_phys)
- entry sp, 16
-
- movi a3, XCHAL_ICACHE_SIZE
- movi a4, PAGE_MASK | 1
- addi a2, a2, 1
+ENTRY(__invalidate_dcache_range)
-1: addi a3, a3, -XCHAL_ICACHE_LINESIZE
+ entry sp, 16
- lict a6, a3
- isync
- and a6, a6, a4
- beq a6, a2, 2f
- bgeui a3, 2, 1b
- retw
+ ___invalidate_dcache_range a2 a3 a4
-2: iii a3, 0
- bgeui a3, 2, 1b
retw
+ENDPROC(__invalidate_dcache_range)
-#if 0
-
- movi a3, XCHAL_DCACHE_WAYS - 1
- movi a4, PAGE_SIZE
-
-1: mov a5, a2
- add a6, a2, a4
+/*
+ * void _invalidate_icache_all(void)
+ */
-2: diwbi a5, 0
- diwbi a5, XCHAL_DCACHE_LINESIZE
- diwbi a5, XCHAL_DCACHE_LINESIZE * 2
- diwbi a5, XCHAL_DCACHE_LINESIZE * 3
+ENTRY(__invalidate_icache_all)
- addi a5, a5, XCHAL_DCACHE_LINESIZE * 4
- blt a5, a6, 2b
+ entry sp, 16
- addi a3, a3, -1
- addi a2, a2, XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS
- bgez a3, 1b
+ ___invalidate_icache_all a2 a3
+ isync
retw
-ENTRY(__invalidate_icache_page_index)
- entry sp, 16
-
- movi a3, XCHAL_ICACHE_WAYS - 1
- movi a4, PAGE_SIZE
+ENDPROC(__invalidate_icache_all)
-1: mov a5, a2
- add a6, a2, a4
+/*
+ * void _flush_invalidate_dcache_all(void)
+ */
-2: iii a5, 0
- iii a5, XCHAL_ICACHE_LINESIZE
- iii a5, XCHAL_ICACHE_LINESIZE * 2
- iii a5, XCHAL_ICACHE_LINESIZE * 3
+ENTRY(__flush_invalidate_dcache_all)
- addi a5, a5, XCHAL_ICACHE_LINESIZE * 4
- blt a5, a6, 2b
+ entry sp, 16
- addi a3, a3, -1
- addi a2, a2, XCHAL_ICACHE_SIZE / XCHAL_ICACHE_WAYS
- bgez a3, 2b
+ ___flush_invalidate_dcache_all a2 a3
+ dsync
retw
-#endif
+ENDPROC(__flush_invalidate_dcache_all)
+/*
+ * void _invalidate_dcache_all(void)
+ */
+ENTRY(__invalidate_dcache_all)
+ entry sp, 16
+ ___invalidate_dcache_all a2 a3
+ dsync
+ retw
+ENDPROC(__invalidate_dcache_all)
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
new file mode 100644
index 00000000000..3429b483d9f
--- /dev/null
+++ b/arch/xtensa/mm/mmu.c
@@ -0,0 +1,104 @@
+/*
+ * xtensa mmu stuff
+ *
+ * Extracted from init.c
+ */
+#include <linux/bootmem.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/cache.h>
+
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
+#include <asm/page.h>
+#include <asm/initialize_mmu.h>
+#include <asm/io.h>
+
+#if defined(CONFIG_HIGHMEM)
+static void * __init init_pmd(unsigned long vaddr)
+{
+ pgd_t *pgd = pgd_offset_k(vaddr);
+ pmd_t *pmd = pmd_offset(pgd, vaddr);
+
+ if (pmd_none(*pmd)) {
+ unsigned i;
+ pte_t *pte = alloc_bootmem_low_pages(PAGE_SIZE);
+
+ for (i = 0; i < 1024; i++)
+ pte_clear(NULL, 0, pte + i);
+
+ set_pmd(pmd, __pmd(((unsigned long)pte) & PAGE_MASK));
+ BUG_ON(pte != pte_offset_kernel(pmd, 0));
+ pr_debug("%s: vaddr: 0x%08lx, pmd: 0x%p, pte: 0x%p\n",
+ __func__, vaddr, pmd, pte);
+ return pte;
+ } else {
+ return pte_offset_kernel(pmd, 0);
+ }
+}
+
+static void __init fixedrange_init(void)
+{
+ BUILD_BUG_ON(FIXADDR_SIZE > PMD_SIZE);
+ init_pmd(__fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK);
+}
+#endif
+
+void __init paging_init(void)
+{
+ memset(swapper_pg_dir, 0, PAGE_SIZE);
+#ifdef CONFIG_HIGHMEM
+ fixedrange_init();
+ pkmap_page_table = init_pmd(PKMAP_BASE);
+ kmap_init();
+#endif
+}
+
+/*
+ * Flush the mmu and reset associated register to default values.
+ */
+void init_mmu(void)
+{
+#if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
+ /*
+ * Writing zeros to the instruction and data TLBCFG special
+ * registers ensure that valid values exist in the register.
+ *
+ * For existing PGSZID<w> fields, zero selects the first element
+ * of the page-size array. For nonexistent PGSZID<w> fields,
+ * zero is the best value to write. Also, when changing PGSZID<w>
+ * fields, the corresponding TLB must be flushed.
+ */
+ set_itlbcfg_register(0);
+ set_dtlbcfg_register(0);
+#endif
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
+ /*
+ * Update the IO area mapping in case xtensa_kio_paddr has changed
+ */
+ write_dtlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
+ XCHAL_KIO_CACHED_VADDR + 6);
+ write_itlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
+ XCHAL_KIO_CACHED_VADDR + 6);
+ write_dtlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
+ XCHAL_KIO_BYPASS_VADDR + 6);
+ write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
+ XCHAL_KIO_BYPASS_VADDR + 6);
+#endif
+
+ local_flush_tlb_all();
+
+ /* Set rasid register to a known value. */
+
+ set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
+
+ /* Set PTEVADDR special register to the start of the page
+ * table, which is in kernel mappable space (ie. not
+ * statically mapped). This register's value is undefined on
+ * reset.
+ */
+ set_ptevaddr_register(PGTABLE_START);
+}
diff --git a/arch/xtensa/mm/pgtable.c b/arch/xtensa/mm/pgtable.c
deleted file mode 100644
index 7d28914d11c..00000000000
--- a/arch/xtensa/mm/pgtable.c
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * arch/xtensa/mm/fault.c
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- *
- * Chris Zankel <chris@zankel.net>
- */
-
-#if (DCACHE_SIZE > PAGE_SIZE)
-
-pte_t* pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-{
- pte_t *pte = NULL, *p;
- int color = ADDR_COLOR(address);
- int i;
-
- p = (pte_t*) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, COLOR_ORDER);
-
- if (likely(p)) {
- split_page(virt_to_page(p), COLOR_ORDER);
-
- for (i = 0; i < COLOR_SIZE; i++) {
- if (ADDR_COLOR(p) == color)
- pte = p;
- else
- free_page(p);
- p += PTRS_PER_PTE;
- }
- clear_page(pte);
- }
- return pte;
-}
-
-#ifdef PROFILING
-
-int mask;
-int hit;
-int flush;
-
-#endif
-
-struct page* pte_alloc_one(struct mm_struct *mm, unsigned long address)
-{
- struct page *page = NULL, *p;
- int color = ADDR_COLOR(address);
-
- p = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
-
- if (likely(p)) {
- split_page(p, COLOR_ORDER);
-
- for (i = 0; i < PAGE_ORDER; i++) {
- if (PADDR_COLOR(page_address(p)) == color)
- page = p;
- else
- __free_page(p);
- p++;
- }
- clear_highpage(page);
- }
-
- return page;
-}
-
-#endif
-
-
-
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index d3bd3bfc3b3..5ece856c572 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -1,5 +1,5 @@
/*
- * arch/xtensa/mm/mmu.c
+ * arch/xtensa/mm/tlb.c
*
* Logic that manipulates the Xtensa MMU. Derived from MIPS.
*
@@ -18,18 +18,17 @@
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
-#include <asm/system.h>
#include <asm/cacheflush.h>
static inline void __flush_itlb_all (void)
{
- int way, index;
+ int w, i;
- for (way = 0; way < XCHAL_ITLB_ARF_WAYS; way++) {
- for (index = 0; index < ITLB_ENTRIES_PER_ARF_WAY; index++) {
- int entry = way + (index << PAGE_SHIFT);
- invalidate_itlb_entry_no_isync (entry);
+ for (w = 0; w < ITLB_ARF_WAYS; w++) {
+ for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
+ int e = w + (i << PAGE_SHIFT);
+ invalidate_itlb_entry_no_isync(e);
}
}
asm volatile ("isync\n");
@@ -37,19 +36,19 @@ static inline void __flush_itlb_all (void)
static inline void __flush_dtlb_all (void)
{
- int way, index;
+ int w, i;
- for (way = 0; way < XCHAL_DTLB_ARF_WAYS; way++) {
- for (index = 0; index < DTLB_ENTRIES_PER_ARF_WAY; index++) {
- int entry = way + (index << PAGE_SHIFT);
- invalidate_dtlb_entry_no_isync (entry);
+ for (w = 0; w < DTLB_ARF_WAYS; w++) {
+ for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
+ int e = w + (i << PAGE_SHIFT);
+ invalidate_dtlb_entry_no_isync(e);
}
}
asm volatile ("isync\n");
}
-void flush_tlb_all (void)
+void local_flush_tlb_all(void)
{
__flush_itlb_all();
__flush_dtlb_all();
@@ -61,43 +60,53 @@ void flush_tlb_all (void)
* a new context will be assigned to it.
*/
-void flush_tlb_mm(struct mm_struct *mm)
+void local_flush_tlb_mm(struct mm_struct *mm)
{
-#if 0
- printk("[tlbmm<%lx>]\n", (unsigned long)mm->context);
-#endif
+ int cpu = smp_processor_id();
if (mm == current->active_mm) {
- int flags;
- local_save_flags(flags);
- get_new_mmu_context(mm, asid_cache);
- set_rasid_register(ASID_INSERT(mm->context));
+ unsigned long flags;
+ local_irq_save(flags);
+ mm->context.asid[cpu] = NO_CONTEXT;
+ activate_context(mm, cpu);
local_irq_restore(flags);
+ } else {
+ mm->context.asid[cpu] = NO_CONTEXT;
+ mm->context.cpu = -1;
}
- else
- mm->context = 0;
}
-void flush_tlb_range (struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+
+#define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
+#define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
+#if _ITLB_ENTRIES > _DTLB_ENTRIES
+# define _TLB_ENTRIES _ITLB_ENTRIES
+#else
+# define _TLB_ENTRIES _DTLB_ENTRIES
+#endif
+
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
{
+ int cpu = smp_processor_id();
struct mm_struct *mm = vma->vm_mm;
unsigned long flags;
- if (mm->context == NO_CONTEXT)
+ if (mm->context.asid[cpu] == NO_CONTEXT)
return;
#if 0
printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
- (unsigned long)mm->context, start, end);
+ (unsigned long)mm->context.asid[cpu], start, end);
#endif
- local_save_flags(flags);
+ local_irq_save(flags);
- if (end-start + (PAGE_SIZE-1) <= SMALLEST_NTLB_ENTRIES << PAGE_SHIFT) {
+ if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
int oldpid = get_rasid_register();
- set_rasid_register (ASID_INSERT(mm->context));
+
+ set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
start &= PAGE_MASK;
- if (vma->vm_flags & VM_EXEC)
+ if (vma->vm_flags & VM_EXEC)
while(start < end) {
invalidate_itlb_mapping(start);
invalidate_dtlb_mapping(start);
@@ -111,29 +120,25 @@ void flush_tlb_range (struct vm_area_struct *vma,
set_rasid_register(oldpid);
} else {
- get_new_mmu_context(mm, asid_cache);
- if (mm == current->active_mm)
- set_rasid_register(ASID_INSERT(mm->context));
+ local_flush_tlb_mm(mm);
}
local_irq_restore(flags);
}
-void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
+ int cpu = smp_processor_id();
struct mm_struct* mm = vma->vm_mm;
unsigned long flags;
int oldpid;
-#if 0
- printk("[tlbpage<%02lx,%08lx>]\n",
- (unsigned long)mm->context, page);
-#endif
- if(mm->context == NO_CONTEXT)
+ if (mm->context.asid[cpu] == NO_CONTEXT)
return;
- local_save_flags(flags);
+ local_irq_save(flags);
- oldpid = get_rasid_register();
+ oldpid = get_rasid_register();
+ set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
if (vma->vm_flags & VM_EXEC)
invalidate_itlb_mapping(page);
@@ -142,404 +147,132 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
set_rasid_register(oldpid);
local_irq_restore(flags);
-
-#if 0
- flush_tlb_all();
- return;
-#endif
-}
-
-
-#ifdef DEBUG_TLB
-
-#define USE_ITLB 0
-#define USE_DTLB 1
-
-struct way_config_t {
- int indicies;
- int indicies_log2;
- int pgsz_log2;
- int arf;
-};
-
-static struct way_config_t itlb[XCHAL_ITLB_WAYS] =
-{
- { XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES_LOG2),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, PAGESZ_LOG2_MIN),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ARF)
- },
- { XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES_LOG2),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, PAGESZ_LOG2_MIN),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ARF)
- },
- { XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES_LOG2),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, PAGESZ_LOG2_MIN),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ARF)
- },
- { XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES_LOG2),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, PAGESZ_LOG2_MIN),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ARF)
- },
- { XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES_LOG2),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, PAGESZ_LOG2_MIN),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ARF)
- },
- { XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES_LOG2),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, PAGESZ_LOG2_MIN),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ARF)
- },
- { XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES_LOG2),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, PAGESZ_LOG2_MIN),
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ARF)
- }
-};
-
-static struct way_config_t dtlb[XCHAL_DTLB_WAYS] =
-{
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ARF)
- },
- { XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES_LOG2),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, PAGESZ_LOG2_MIN),
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ARF)
- }
-};
-
-/* Total number of entries: */
-#define ITLB_TOTAL_ENTRIES \
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES) + \
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES) + \
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES) + \
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES) + \
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES) + \
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES) + \
- XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES)
-#define DTLB_TOTAL_ENTRIES \
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES) + \
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES) + \
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES) + \
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES) + \
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES) + \
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES) + \
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES) + \
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES) + \
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES) + \
- XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES)
-
-
-typedef struct {
- unsigned va;
- unsigned pa;
- unsigned char asid;
- unsigned char ca;
- unsigned char way;
- unsigned char index;
- unsigned char pgsz_log2; /* 0 .. 32 */
- unsigned char type; /* 0=ITLB 1=DTLB */
-} tlb_dump_entry_t;
-
-/* Return -1 if a precedes b, +1 if a follows b, 0 if same: */
-int cmp_tlb_dump_info( tlb_dump_entry_t *a, tlb_dump_entry_t *b )
-{
- if (a->asid < b->asid) return -1;
- if (a->asid > b->asid) return 1;
- if (a->va < b->va) return -1;
- if (a->va > b->va) return 1;
- if (a->pa < b->pa) return -1;
- if (a->pa > b->pa) return 1;
- if (a->ca < b->ca) return -1;
- if (a->ca > b->ca) return 1;
- if (a->way < b->way) return -1;
- if (a->way > b->way) return 1;
- if (a->index < b->index) return -1;
- if (a->index > b->index) return 1;
- return 0;
-}
-
-void sort_tlb_dump_info( tlb_dump_entry_t *t, int n )
-{
- int i, j;
- /* Simple O(n*n) sort: */
- for (i = 0; i < n-1; i++)
- for (j = i+1; j < n; j++)
- if (cmp_tlb_dump_info(t+i, t+j) > 0) {
- tlb_dump_entry_t tmp = t[i];
- t[i] = t[j];
- t[j] = tmp;
- }
-}
-
-
-static tlb_dump_entry_t itlb_dump_info[ITLB_TOTAL_ENTRIES];
-static tlb_dump_entry_t dtlb_dump_info[DTLB_TOTAL_ENTRIES];
-
-
-static inline char *way_type (int type)
-{
- return type ? "autorefill" : "non-autorefill";
-}
-
-void print_entry (struct way_config_t *way_info,
- unsigned int way,
- unsigned int index,
- unsigned int virtual,
- unsigned int translation)
-{
- char valid_chr;
- unsigned int va, pa, asid, ca;
-
- va = virtual &
- ~((1 << (way_info->pgsz_log2 + way_info->indicies_log2)) - 1);
- asid = virtual & ((1 << XCHAL_MMU_ASID_BITS) - 1);
- pa = translation & ~((1 << way_info->pgsz_log2) - 1);
- ca = translation & ((1 << XCHAL_MMU_CA_BITS) - 1);
- valid_chr = asid ? 'V' : 'I';
-
- /* Compute and incorporate the effect of the index bits on the
- * va. It's more useful for kernel debugging, since we always
- * want to know the effective va anyway. */
-
- va += index << way_info->pgsz_log2;
-
- printk ("\t[%d,%d] (%c) vpn 0x%.8x ppn 0x%.8x asid 0x%.2x am 0x%x\n",
- way, index, valid_chr, va, pa, asid, ca);
-}
-
-void print_itlb_entry (struct way_config_t *way_info, int way, int index)
-{
- print_entry (way_info, way, index,
- read_itlb_virtual (way + (index << way_info->pgsz_log2)),
- read_itlb_translation (way + (index << way_info->pgsz_log2)));
-}
-
-void print_dtlb_entry (struct way_config_t *way_info, int way, int index)
-{
- print_entry (way_info, way, index,
- read_dtlb_virtual (way + (index << way_info->pgsz_log2)),
- read_dtlb_translation (way + (index << way_info->pgsz_log2)));
-}
-
-void dump_itlb (void)
-{
- int way, index;
-
- printk ("\nITLB: ways = %d\n", XCHAL_ITLB_WAYS);
-
- for (way = 0; way < XCHAL_ITLB_WAYS; way++) {
- printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
- way, itlb[way].indicies,
- itlb[way].pgsz_log2, way_type(itlb[way].arf));
- for (index = 0; index < itlb[way].indicies; index++) {
- print_itlb_entry(&itlb[way], way, index);
- }
- }
}
-void dump_dtlb (void)
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
- int way, index;
-
- printk ("\nDTLB: ways = %d\n", XCHAL_DTLB_WAYS);
-
- for (way = 0; way < XCHAL_DTLB_WAYS; way++) {
- printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
- way, dtlb[way].indicies,
- dtlb[way].pgsz_log2, way_type(dtlb[way].arf));
- for (index = 0; index < dtlb[way].indicies; index++) {
- print_dtlb_entry(&dtlb[way], way, index);
+ if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
+ end - start < _TLB_ENTRIES << PAGE_SHIFT) {
+ start &= PAGE_MASK;
+ while (start < end) {
+ invalidate_itlb_mapping(start);
+ invalidate_dtlb_mapping(start);
+ start += PAGE_SIZE;
}
+ } else {
+ local_flush_tlb_all();
}
}
-void dump_tlb (tlb_dump_entry_t *tinfo, struct way_config_t *config,
- int entries, int ways, int type, int show_invalid)
-{
- tlb_dump_entry_t *e = tinfo;
- int way, i;
-
- /* Gather all info: */
- for (way = 0; way < ways; way++) {
- struct way_config_t *cfg = config + way;
- for (i = 0; i < cfg->indicies; i++) {
- unsigned wayindex = way + (i << cfg->pgsz_log2);
- unsigned vv = (type ? read_dtlb_virtual (wayindex)
- : read_itlb_virtual (wayindex));
- unsigned pp = (type ? read_dtlb_translation (wayindex)
- : read_itlb_translation (wayindex));
-
- /* Compute and incorporate the effect of the index bits on the
- * va. It's more useful for kernel debugging, since we always
- * want to know the effective va anyway. */
-
- e->va = (vv & ~((1 << (cfg->pgsz_log2 + cfg->indicies_log2)) - 1));
- e->va += (i << cfg->pgsz_log2);
- e->pa = (pp & ~((1 << cfg->pgsz_log2) - 1));
- e->asid = (vv & ((1 << XCHAL_MMU_ASID_BITS) - 1));
- e->ca = (pp & ((1 << XCHAL_MMU_CA_BITS) - 1));
- e->way = way;
- e->index = i;
- e->pgsz_log2 = cfg->pgsz_log2;
- e->type = type;
- e++;
- }
- }
-#if 1
- /* Sort by ASID and VADDR: */
- sort_tlb_dump_info (tinfo, entries);
-#endif
-
- /* Display all sorted info: */
- printk ("\n%cTLB dump:\n", (type ? 'D' : 'I'));
- for (e = tinfo, i = 0; i < entries; i++, e++) {
-#if 0
- if (e->asid == 0 && !show_invalid)
- continue;
-#endif
- printk ("%c way=%d i=%d ASID=%02X V=%08X -> P=%08X CA=%X (%d %cB)\n",
- (e->type ? 'D' : 'I'), e->way, e->index,
- e->asid, e->va, e->pa, e->ca,
- (1 << (e->pgsz_log2 % 10)),
- " kMG"[e->pgsz_log2 / 10]
- );
- }
-}
+#ifdef CONFIG_DEBUG_TLB_SANITY
-void dump_tlbs2 (int showinv)
+static unsigned get_pte_for_vaddr(unsigned vaddr)
{
- dump_tlb (itlb_dump_info, itlb, ITLB_TOTAL_ENTRIES, XCHAL_ITLB_WAYS, 0, showinv);
- dump_tlb (dtlb_dump_info, dtlb, DTLB_TOTAL_ENTRIES, XCHAL_DTLB_WAYS, 1, showinv);
+ struct task_struct *task = get_current();
+ struct mm_struct *mm = task->mm;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if (!mm)
+ mm = task->active_mm;
+ pgd = pgd_offset(mm, vaddr);
+ if (pgd_none_or_clear_bad(pgd))
+ return 0;
+ pmd = pmd_offset(pgd, vaddr);
+ if (pmd_none_or_clear_bad(pmd))
+ return 0;
+ pte = pte_offset_map(pmd, vaddr);
+ if (!pte)
+ return 0;
+ return pte_val(*pte);
}
-void dump_all_tlbs (void)
-{
- dump_tlbs2 (1);
-}
+enum {
+ TLB_SUSPICIOUS = 1,
+ TLB_INSANE = 2,
+};
-void dump_valid_tlbs (void)
+static void tlb_insane(void)
{
- dump_tlbs2 (0);
+ BUG_ON(1);
}
-
-void dump_tlbs (void)
+static void tlb_suspicious(void)
{
- dump_itlb();
- dump_dtlb();
+ WARN_ON(1);
}
-void dump_cache_tag(int dcache, int idx)
+/*
+ * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
+ * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
+ *
+ * Check that valid TLB entries either have the same PA as the PTE, or PTE is
+ * marked as non-present. Non-present PTE and the page with non-zero refcount
+ * and zero mapcount is normal for batched TLB flush operation. Zero refcount
+ * means that the page was freed prematurely. Non-zero mapcount is unusual,
+ * but does not necessary means an error, thus marked as suspicious.
+ */
+static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
{
- int w, i, s, e;
- unsigned long tag, index;
- unsigned long num_lines, num_ways, cache_size, line_size;
-
- num_ways = dcache ? XCHAL_DCACHE_WAYS : XCHAL_ICACHE_WAYS;
- cache_size = dcache ? XCHAL_DCACHE_SIZE : XCHAL_ICACHE_SIZE;
- line_size = dcache ? XCHAL_DCACHE_LINESIZE : XCHAL_ICACHE_LINESIZE;
-
- num_lines = cache_size / num_ways;
-
- s = 0; e = num_lines;
-
- if (idx >= 0)
- e = (s = idx * line_size) + 1;
-
- for (i = s; i < e; i+= line_size) {
- printk("\nline %#08x:", i);
- for (w = 0; w < num_ways; w++) {
- index = w * num_lines + i;
- if (dcache)
- __asm__ __volatile__("ldct %0, %1\n\t"
- : "=a"(tag) : "a"(index));
- else
- __asm__ __volatile__("lict %0, %1\n\t"
- : "=a"(tag) : "a"(index));
-
- printk(" %#010lx", tag);
- }
+ unsigned tlbidx = w | (e << PAGE_SHIFT);
+ unsigned r0 = dtlb ?
+ read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
+ unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
+ unsigned pte = get_pte_for_vaddr(vpn);
+ unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
+ unsigned tlb_asid = r0 & ASID_MASK;
+ bool kernel = tlb_asid == 1;
+ int rc = 0;
+
+ if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
+ pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
+ dtlb ? 'D' : 'I', w, e, vpn,
+ kernel ? "kernel" : "user");
+ rc |= TLB_INSANE;
}
- printk ("\n");
-}
-
-void dump_icache(int index)
-{
- unsigned long data, addr;
- int w, i;
-
- const unsigned long num_ways = XCHAL_ICACHE_WAYS;
- const unsigned long cache_size = XCHAL_ICACHE_SIZE;
- const unsigned long line_size = XCHAL_ICACHE_LINESIZE;
- const unsigned long num_lines = cache_size / num_ways / line_size;
- for (w = 0; w < num_ways; w++) {
- printk ("\nWay %d", w);
-
- for (i = 0; i < line_size; i+= 4) {
- addr = w * num_lines + index * line_size + i;
- __asm__ __volatile__("licw %0, %1\n\t"
- : "=a"(data) : "a"(addr));
- printk(" %#010lx", data);
+ if (tlb_asid == mm_asid) {
+ unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
+ read_itlb_translation(tlbidx);
+ if ((pte ^ r1) & PAGE_MASK) {
+ pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
+ dtlb ? 'D' : 'I', w, e, r0, r1, pte);
+ if (pte == 0 || !pte_present(__pte(pte))) {
+ struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
+ pr_err("page refcount: %d, mapcount: %d\n",
+ page_count(p),
+ page_mapcount(p));
+ if (!page_count(p))
+ rc |= TLB_INSANE;
+ else if (page_mapped(p))
+ rc |= TLB_SUSPICIOUS;
+ } else {
+ rc |= TLB_INSANE;
+ }
}
}
- printk ("\n");
+ return rc;
}
-void dump_cache_tags(void)
+void check_tlb_sanity(void)
{
- printk("Instruction cache\n");
- dump_cache_tag(0, -1);
- printk("Data cache\n");
- dump_cache_tag(1, -1);
+ unsigned long flags;
+ unsigned w, e;
+ int bug = 0;
+
+ local_irq_save(flags);
+ for (w = 0; w < DTLB_ARF_WAYS; ++w)
+ for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
+ bug |= check_tlb_entry(w, e, true);
+ for (w = 0; w < ITLB_ARF_WAYS; ++w)
+ for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
+ bug |= check_tlb_entry(w, e, false);
+ if (bug & TLB_INSANE)
+ tlb_insane();
+ if (bug & TLB_SUSPICIOUS)
+ tlb_suspicious();
+ local_irq_restore(flags);
}
-#endif
+#endif /* CONFIG_DEBUG_TLB_SANITY */