aboutsummaryrefslogtreecommitdiff
path: root/arch/mn10300/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mn10300/mm')
-rw-r--r--arch/mn10300/mm/Makefile14
-rw-r--r--arch/mn10300/mm/cache-flush-mn10300.S192
-rw-r--r--arch/mn10300/mm/cache-mn10300.S289
-rw-r--r--arch/mn10300/mm/cache.c121
-rw-r--r--arch/mn10300/mm/dma-alloc.c56
-rw-r--r--arch/mn10300/mm/extable.c26
-rw-r--r--arch/mn10300/mm/fault.c405
-rw-r--r--arch/mn10300/mm/init.c160
-rw-r--r--arch/mn10300/mm/misalignment.c661
-rw-r--r--arch/mn10300/mm/mmu-context.c80
-rw-r--r--arch/mn10300/mm/pgtable.c197
-rw-r--r--arch/mn10300/mm/tlb-mn10300.S207
12 files changed, 2408 insertions, 0 deletions
diff --git a/arch/mn10300/mm/Makefile b/arch/mn10300/mm/Makefile
new file mode 100644
index 00000000000..28b9d983db0
--- /dev/null
+++ b/arch/mn10300/mm/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the MN10300-specific memory management code
+#
+
+obj-y := \
+ init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
+ misalignment.o dma-alloc.o
+
+ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
+obj-y += cache.o cache-mn10300.o
+ifeq ($(CONFIG_MN10300_CACHE_WBACK),y)
+obj-y += cache-flush-mn10300.o
+endif
+endif
diff --git a/arch/mn10300/mm/cache-flush-mn10300.S b/arch/mn10300/mm/cache-flush-mn10300.S
new file mode 100644
index 00000000000..c8ed1cbac10
--- /dev/null
+++ b/arch/mn10300/mm/cache-flush-mn10300.S
@@ -0,0 +1,192 @@
+/* MN10300 CPU core caching routines
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+
+ .am33_2
+ .globl mn10300_dcache_flush
+ .globl mn10300_dcache_flush_page
+ .globl mn10300_dcache_flush_range
+ .globl mn10300_dcache_flush_range2
+ .globl mn10300_dcache_flush_inv
+ .globl mn10300_dcache_flush_inv_page
+ .globl mn10300_dcache_flush_inv_range
+ .globl mn10300_dcache_flush_inv_range2
+
+###############################################################################
+#
+# void mn10300_dcache_flush(void)
+# Flush the entire data cache back to RAM
+#
+###############################################################################
+ ALIGN
+mn10300_dcache_flush:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_dcache_flush_end
+
+ # read the addresses tagged in the cache's tag RAM and attempt to flush
+ # those addresses specifically
+ # - we rely on the hardware to filter out invalid tag entry addresses
+ mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address
+ mov DCACHE_PURGE(0,0),a1 # dcache purge request address
+ mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
+
+mn10300_dcache_flush_loop:
+ mov (a0),d0
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
+ or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
+ # cache
+ mov d0,(a1) # conditional purge
+
+mn10300_dcache_flush_skip:
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ add -1,d1
+ bne mn10300_dcache_flush_loop
+
+mn10300_dcache_flush_end:
+ ret [],0
+
+###############################################################################
+#
+# void mn10300_dcache_flush_page(unsigned start)
+# void mn10300_dcache_flush_range(unsigned start, unsigned end)
+# void mn10300_dcache_flush_range2(unsigned start, unsigned size)
+# Flush a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+mn10300_dcache_flush_page:
+ mov PAGE_SIZE,d1
+mn10300_dcache_flush_range2:
+ add d0,d1
+mn10300_dcache_flush_range:
+ movm [d2,d3],(sp)
+
+ movhu (CHCTR),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_dcache_flush_range_end
+
+ # round start addr down
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
+ mov d0,a1
+
+ add L1_CACHE_BYTES,d1 # round end addr up
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+
+ # write a request to flush all instances of an address from the cache
+ mov DCACHE_PURGE(0,0),a0
+ mov a1,d0
+ and L1_CACHE_TAG_ENTRY,d0
+ add d0,a0 # starting dcache purge control
+ # reg address
+
+ sub a1,d1
+ lsr L1_CACHE_SHIFT,d1 # total number of entries to
+ # examine
+
+ or L1_CACHE_TAG_VALID,a1 # retain valid entries in the
+ # cache
+
+mn10300_dcache_flush_range_loop:
+ mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
+ # all ways
+
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
+ add -1,d1
+ bne mn10300_dcache_flush_range_loop
+
+mn10300_dcache_flush_range_end:
+ ret [d2,d3],8
+
+###############################################################################
+#
+# void mn10300_dcache_flush_inv(void)
+# Flush the entire data cache and invalidate all entries
+#
+###############################################################################
+ ALIGN
+mn10300_dcache_flush_inv:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_dcache_flush_inv_end
+
+ # hit each line in the dcache with an unconditional purge
+ mov DCACHE_PURGE(0,0),a1 # dcache purge request address
+ mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
+
+mn10300_dcache_flush_inv_loop:
+ mov (a1),d0 # unconditional purge
+
+ add L1_CACHE_BYTES,a1
+ add -1,d1
+ bne mn10300_dcache_flush_inv_loop
+
+mn10300_dcache_flush_inv_end:
+ ret [],0
+
+###############################################################################
+#
+# void mn10300_dcache_flush_inv_page(unsigned start)
+# void mn10300_dcache_flush_inv_range(unsigned start, unsigned end)
+# void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size)
+# Flush and invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+mn10300_dcache_flush_inv_page:
+ mov PAGE_SIZE,d1
+mn10300_dcache_flush_inv_range2:
+ add d0,d1
+mn10300_dcache_flush_inv_range:
+ movm [d2,d3],(sp)
+ movhu (CHCTR),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_dcache_flush_inv_range_end
+
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
+ # addr down
+ mov d0,a1
+
+ add L1_CACHE_BYTES,d1 # round end addr up
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+
+ # write a request to flush and invalidate all instances of an address
+ # from the cache
+ mov DCACHE_PURGE(0,0),a0
+ mov a1,d0
+ and L1_CACHE_TAG_ENTRY,d0
+ add d0,a0 # starting dcache purge control
+ # reg address
+
+ sub a1,d1
+ lsr L1_CACHE_SHIFT,d1 # total number of entries to
+ # examine
+
+mn10300_dcache_flush_inv_range_loop:
+ mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
+ # in all ways
+
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
+ add -1,d1
+ bne mn10300_dcache_flush_inv_range_loop
+
+mn10300_dcache_flush_inv_range_end:
+ ret [d2,d3],8
diff --git a/arch/mn10300/mm/cache-mn10300.S b/arch/mn10300/mm/cache-mn10300.S
new file mode 100644
index 00000000000..e839d0aedd6
--- /dev/null
+++ b/arch/mn10300/mm/cache-mn10300.S
@@ -0,0 +1,289 @@
+/* MN10300 CPU core caching routines
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+
+#define mn10300_dcache_inv_range_intr_interval \
+ +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
+
+#if mn10300_dcache_inv_range_intr_interval > 0xff
+#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
+#endif
+
+ .am33_2
+
+ .globl mn10300_icache_inv
+ .globl mn10300_dcache_inv
+ .globl mn10300_dcache_inv_range
+ .globl mn10300_dcache_inv_range2
+ .globl mn10300_dcache_inv_page
+
+###############################################################################
+#
+# void mn10300_icache_inv(void)
+# Invalidate the entire icache
+#
+###############################################################################
+ ALIGN
+mn10300_icache_inv:
+ mov CHCTR,a0
+
+ movhu (a0),d0
+ btst CHCTR_ICEN,d0
+ beq mn10300_icache_inv_end
+
+ mov epsw,d1
+ and ~EPSW_IE,epsw
+ nop
+ nop
+
+ # disable the icache
+ and ~CHCTR_ICEN,d0
+ movhu d0,(a0)
+
+ # and wait for it to calm down
+ setlb
+ movhu (a0),d0
+ btst CHCTR_ICBUSY,d0
+ lne
+
+ # invalidate
+ or CHCTR_ICINV,d0
+ movhu d0,(a0)
+
+ # wait for the cache to finish
+ mov CHCTR,a0
+ setlb
+ movhu (a0),d0
+ btst CHCTR_ICBUSY,d0
+ lne
+
+ # and reenable it
+ and ~CHCTR_ICINV,d0
+ or CHCTR_ICEN,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+
+ mov d1,epsw
+
+mn10300_icache_inv_end:
+ ret [],0
+
+###############################################################################
+#
+# void mn10300_dcache_inv(void)
+# Invalidate the entire dcache
+#
+###############################################################################
+ ALIGN
+mn10300_dcache_inv:
+ mov CHCTR,a0
+
+ movhu (a0),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_dcache_inv_end
+
+ mov epsw,d1
+ and ~EPSW_IE,epsw
+ nop
+ nop
+
+ # disable the dcache
+ and ~CHCTR_DCEN,d0
+ movhu d0,(a0)
+
+ # and wait for it to calm down
+ setlb
+ movhu (a0),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+ # invalidate
+ or CHCTR_DCINV,d0
+ movhu d0,(a0)
+
+ # wait for the cache to finish
+ mov CHCTR,a0
+ setlb
+ movhu (a0),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+ # and reenable it
+ and ~CHCTR_DCINV,d0
+ or CHCTR_DCEN,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+
+ mov d1,epsw
+
+mn10300_dcache_inv_end:
+ ret [],0
+
+###############################################################################
+#
+# void mn10300_dcache_inv_range(unsigned start, unsigned end)
+# void mn10300_dcache_inv_range2(unsigned start, unsigned size)
+# void mn10300_dcache_inv_page(unsigned start)
+# Invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+mn10300_dcache_inv_page:
+ mov PAGE_SIZE,d1
+mn10300_dcache_inv_range2:
+ add d0,d1
+mn10300_dcache_inv_range:
+ movm [d2,d3,a2],(sp)
+ mov CHCTR,a2
+
+ movhu (a2),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_dcache_inv_range_end
+
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
+ # addr down
+ mov d0,a1
+
+ add L1_CACHE_BYTES,d1 # round end addr up
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+
+ clr d2 # we're going to clear tag ram
+ # entries
+
+ # read the tags from the tag RAM, and if they indicate a valid dirty
+ # cache line then invalidate that line
+ mov DCACHE_TAG(0,0),a0
+ mov a1,d0
+ and L1_CACHE_TAG_ENTRY,d0
+ add d0,a0 # starting dcache tag RAM
+ # access address
+
+ sub a1,d1
+ lsr L1_CACHE_SHIFT,d1 # total number of entries to
+ # examine
+
+ and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base
+
+mn10300_dcache_inv_range_outer_loop:
+ # disable interrupts
+ mov epsw,d3
+ and ~EPSW_IE,epsw
+ nop # note that reading CHCTR and
+ # AND'ing D0 occupy two delay
+ # slots after disabling
+ # interrupts
+
+ # disable the dcache
+ movhu (a2),d0
+ and ~CHCTR_DCEN,d0
+ movhu d0,(a2)
+
+ # and wait for it to calm down
+ setlb
+ movhu (a2),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+mn10300_dcache_inv_range_loop:
+
+ # process the way 0 slot
+ mov (L1_CACHE_WAYDISP*0,a0),d0 # read the tag in the way 0 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_dcache_inv_range_skip_0 # jump if this cacheline is not
+ # valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_dcache_inv_range_skip_0 # jump if not this cacheline
+
+ mov d2,(a0) # kill the tag
+
+mn10300_dcache_inv_range_skip_0:
+
+ # process the way 1 slot
+ mov (L1_CACHE_WAYDISP*1,a0),d0 # read the tag in the way 1 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_dcache_inv_range_skip_1 # jump if this cacheline is not
+ # valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_dcache_inv_range_skip_1 # jump if not this cacheline
+
+ mov d2,(a0) # kill the tag
+
+mn10300_dcache_inv_range_skip_1:
+
+ # process the way 2 slot
+ mov (L1_CACHE_WAYDISP*2,a0),d0 # read the tag in the way 2 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_dcache_inv_range_skip_2 # jump if this cacheline is not
+ # valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_dcache_inv_range_skip_2 # jump if not this cacheline
+
+ mov d2,(a0) # kill the tag
+
+mn10300_dcache_inv_range_skip_2:
+
+ # process the way 3 slot
+ mov (L1_CACHE_WAYDISP*3,a0),d0 # read the tag in the way 3 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_dcache_inv_range_skip_3 # jump if this cacheline is not
+ # valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_dcache_inv_range_skip_3 # jump if not this cacheline
+
+ mov d2,(a0) # kill the tag
+
+mn10300_dcache_inv_range_skip_3:
+
+ # approx every N steps we re-enable the cache and see if there are any
+ # interrupts to be processed
+ # we also break out if we've reached the end of the loop
+ # (the bottom nibble of the count is zero in both cases)
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ add -1,d1
+ btst mn10300_dcache_inv_range_intr_interval,d1
+ bne mn10300_dcache_inv_range_loop
+
+ # wait for the cache to finish what it's doing
+ setlb
+ movhu (a2),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+ # and reenable it
+ or CHCTR_DCEN,d0
+ movhu d0,(a2)
+ movhu (a2),d0
+
+ # re-enable interrupts
+ # - we don't bother with delay NOPs as we'll have enough instructions
+ # before we disable interrupts again to give the interrupts a chance
+ # to happen
+ mov d3,epsw
+
+ # go around again if the counter hasn't yet reached zero
+ add 0,d1
+ bne mn10300_dcache_inv_range_outer_loop
+
+mn10300_dcache_inv_range_end:
+ ret [d2,d3,a2],12
diff --git a/arch/mn10300/mm/cache.c b/arch/mn10300/mm/cache.c
new file mode 100644
index 00000000000..1b76719ec1c
--- /dev/null
+++ b/arch/mn10300/mm/cache.c
@@ -0,0 +1,121 @@
+/* MN10300 Cache flushing routines
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/threads.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+EXPORT_SYMBOL(mn10300_icache_inv);
+EXPORT_SYMBOL(mn10300_dcache_inv);
+EXPORT_SYMBOL(mn10300_dcache_inv_range);
+EXPORT_SYMBOL(mn10300_dcache_inv_range2);
+EXPORT_SYMBOL(mn10300_dcache_inv_page);
+
+#ifdef CONFIG_MN10300_CACHE_WBACK
+EXPORT_SYMBOL(mn10300_dcache_flush);
+EXPORT_SYMBOL(mn10300_dcache_flush_inv);
+EXPORT_SYMBOL(mn10300_dcache_flush_inv_range);
+EXPORT_SYMBOL(mn10300_dcache_flush_inv_range2);
+EXPORT_SYMBOL(mn10300_dcache_flush_inv_page);
+EXPORT_SYMBOL(mn10300_dcache_flush_range);
+EXPORT_SYMBOL(mn10300_dcache_flush_range2);
+EXPORT_SYMBOL(mn10300_dcache_flush_page);
+#endif
+
+/*
+ * write a page back from the dcache and invalidate the icache so that we can
+ * run code from it that we've just written into it
+ */
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ mn10300_dcache_flush_page(page_to_phys(page));
+ mn10300_icache_inv();
+}
+EXPORT_SYMBOL(flush_icache_page);
+
+/*
+ * write some code we've just written back from the dcache and invalidate the
+ * icache so that we can run that code
+ */
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+#ifdef CONFIG_MN10300_CACHE_WBACK
+ unsigned long addr, size, off;
+ struct page *page;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ppte, pte;
+
+ for (; start < end; start += size) {
+ /* work out how much of the page to flush */
+ off = start & (PAGE_SIZE - 1);
+
+ size = end - start;
+ if (size > PAGE_SIZE - off)
+ size = PAGE_SIZE - off;
+
+ /* get the physical address the page is mapped to from the page
+ * tables */
+ pgd = pgd_offset(current->mm, start);
+ if (!pgd || !pgd_val(*pgd))
+ continue;
+
+ pud = pud_offset(pgd, start);
+ if (!pud || !pud_val(*pud))
+ continue;
+
+ pmd = pmd_offset(pud, start);
+ if (!pmd || !pmd_val(*pmd))
+ continue;
+
+ ppte = pte_offset_map(pmd, start);
+ if (!ppte)
+ continue;
+ pte = *ppte;
+ pte_unmap(ppte);
+
+ if (pte_none(pte))
+ continue;
+
+ page = pte_page(pte);
+ if (!page)
+ continue;
+
+ addr = page_to_phys(page);
+
+ /* flush the dcache and invalidate the icache coverage on that
+ * region */
+ mn10300_dcache_flush_range2(addr + off, size);
+ }
+#endif
+
+ mn10300_icache_inv();
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+/*
+ * allow userspace to flush the instruction cache
+ */
+asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
+{
+ if (end < start)
+ return -EINVAL;
+
+ flush_icache_range(start, end);
+ return 0;
+}
diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c
new file mode 100644
index 00000000000..f3649d8f50e
--- /dev/null
+++ b/arch/mn10300/mm/dma-alloc.c
@@ -0,0 +1,56 @@
+/* MN10300 Dynamic DMA mapping support
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ * Derived from: arch/i386/kernel/pci-dma.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, int gfp)
+{
+ unsigned long addr;
+ void *ret;
+
+ /* ignore region specifiers */
+ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+
+ if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
+ gfp |= GFP_DMA;
+
+ addr = __get_free_pages(gfp, get_order(size));
+ if (!addr)
+ return NULL;
+
+ /* map the coherent memory through the uncached memory window */
+ ret = (void *) (addr | 0x20000000);
+
+ /* fill the memory with obvious rubbish */
+ memset((void *) addr, 0xfb, size);
+
+ /* write back and evict all cache lines covering this region */
+ mn10300_dcache_flush_inv_range2(virt_to_phys((void *) addr), PAGE_SIZE);
+
+ *dma_handle = virt_to_bus((void *) addr);
+ return ret;
+}
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
+{
+ unsigned long addr = (unsigned long) vaddr & ~0x20000000;
+
+ free_pages(addr, get_order(size));
+}
+EXPORT_SYMBOL(dma_free_coherent);
diff --git a/arch/mn10300/mm/extable.c b/arch/mn10300/mm/extable.c
new file mode 100644
index 00000000000..25e5485ab87
--- /dev/null
+++ b/arch/mn10300/mm/extable.c
@@ -0,0 +1,26 @@
+/* MN10300 In-kernel exception handling
+ *
+ * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <asm/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_exception_tables(regs->pc);
+ if (fixup) {
+ regs->pc = fixup->fixup;
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
new file mode 100644
index 00000000000..78f092ca031
--- /dev/null
+++ b/arch/mn10300/mm/fault.c
@@ -0,0 +1,405 @@
+/* MN10300 MMU Fault handler
+ *
+ * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Modified by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/vt_kern.h> /* For unblank_screen() */
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+#include <asm/hardirq.h>
+#include <asm/gdb-stub.h>
+#include <asm/cpu-regs.h>
+
+/*
+ * Unlock any spinlocks which will prevent us from getting the
+ * message out
+ */
+void bust_spinlocks(int yes)
+{
+ if (yes) {
+ oops_in_progress = 1;
+#ifdef CONFIG_SMP
+ /* Many serial drivers do __global_cli() */
+ global_irq_lock = 0;
+#endif
+ } else {
+ int loglevel_save = console_loglevel;
+#ifdef CONFIG_VT
+ unblank_screen();
+#endif
+ oops_in_progress = 0;
+ /*
+ * OK, the message is on the console. Now we call printk()
+ * without oops_in_progress set so that printk will give klogd
+ * a poke. Hold onto your hats...
+ */
+ console_loglevel = 15; /* NMI oopser may have shut the console
+ * up */
+ printk(" ");
+ console_loglevel = loglevel_save;
+ }
+}
+
+void do_BUG(const char *file, int line)
+{
+ bust_spinlocks(1);
+ printk(KERN_EMERG "------------[ cut here ]------------\n");
+ printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
+}
+
+#if 0
+static void print_pagetable_entries(pgd_t *pgdir, unsigned long address)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pgd = pgdir + __pgd_offset(address);
+ printk(KERN_DEBUG "pgd entry %p: %016Lx\n",
+ pgd, (long long) pgd_val(*pgd));
+
+ if (!pgd_present(*pgd)) {
+ printk(KERN_DEBUG "... pgd not present!\n");
+ return;
+ }
+ pmd = pmd_offset(pgd, address);
+ printk(KERN_DEBUG "pmd entry %p: %016Lx\n",
+ pmd, (long long)pmd_val(*pmd));
+
+ if (!pmd_present(*pmd)) {
+ printk(KERN_DEBUG "... pmd not present!\n");
+ return;
+ }
+ pte = pte_offset(pmd, address);
+ printk(KERN_DEBUG "pte entry %p: %016Lx\n",
+ pte, (long long) pte_val(*pte));
+
+ if (!pte_present(*pte))
+ printk(KERN_DEBUG "... pte not present!\n");
+}
+#endif
+
+asmlinkage void monitor_signal(struct pt_regs *);
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ *
+ * fault_code:
+ * - LSW: either MMUFCR_IFC or MMUFCR_DFC as appropriate
+ * - MSW: 0 if data access, 1 if instruction access
+ * - bit 0: TLB miss flag
+ * - bit 1: initial write
+ * - bit 2: page invalid
+ * - bit 3: protection violation
+ * - bit 4: accessor (0=user 1=kernel)
+ * - bit 5: 0=read 1=write
+ * - bit 6-8: page protection spec
+ * - bit 9: illegal address
+ * - bit 16: 0=data 1=ins
+ *
+ */
+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
+ unsigned long address)
+{
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ unsigned long page;
+ siginfo_t info;
+ int write, fault;
+
+#ifdef CONFIG_GDBSTUB
+ /* handle GDB stub causing a fault */
+ if (gdbstub_busy) {
+ gdbstub_exception(regs, TBR & TBR_INT_CODE);
+ return;
+ }
+#endif
+
+#if 0
+ printk(KERN_DEBUG "--- do_page_fault(%p,%s:%04lx,%08lx)\n",
+ regs,
+ fault_code & 0x10000 ? "ins" : "data",
+ fault_code & 0xffff, address);
+#endif
+
+ tsk = current;
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ *
+ * This verifies that the fault happens in kernel space
+ * and that the fault was a page not present (invalid) error
+ */
+ if (address >= VMALLOC_START && address < VMALLOC_END &&
+ (fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR &&
+ (fault_code & MMUFCR_xFC_PGINVAL) == MMUFCR_xFC_PGINVAL
+ )
+ goto vmalloc_fault;
+
+ mm = tsk->mm;
+ info.si_code = SEGV_MAPERR;
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (in_interrupt() || !mm)
+ goto no_context;
+
+ down_read(&mm->mmap_sem);
+
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+
+ if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
+ /* accessing the stack below the stack pointer is always a
+ * bug */
+ if ((address & PAGE_MASK) + 2 * PAGE_SIZE < regs->sp) {
+#if 0
+ printk(KERN_WARNING
+ "[%d] ### Access below stack @%lx (sp=%lx)\n",
+ current->pid, address, regs->sp);
+ printk(KERN_WARNING
+ "vma [%08x - %08x]\n",
+ vma->vm_start, vma->vm_end);
+ show_registers(regs);
+ printk(KERN_WARNING
+ "[%d] ### Code: [%08lx]"
+ " %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ current->pid,
+ regs->pc,
+ ((u8 *) regs->pc)[0],
+ ((u8 *) regs->pc)[1],
+ ((u8 *) regs->pc)[2],
+ ((u8 *) regs->pc)[3],
+ ((u8 *) regs->pc)[4],
+ ((u8 *) regs->pc)[5],
+ ((u8 *) regs->pc)[6],
+ ((u8 *) regs->pc)[7]
+ );
+#endif
+ goto bad_area;
+ }
+ }
+
+ if (expand_stack(vma, address))
+ goto bad_area;
+
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ info.si_code = SEGV_ACCERR;
+ write = 0;
+ switch (fault_code & (MMUFCR_xFC_PGINVAL|MMUFCR_xFC_TYPE)) {
+ default: /* 3: write, present */
+ case MMUFCR_xFC_TYPE_WRITE:
+#ifdef TEST_VERIFY_AREA
+ if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR)
+ printk(KERN_DEBUG "WP fault at %08lx\n", regs->pc);
+#endif
+ /* write to absent page */
+ case MMUFCR_xFC_PGINVAL | MMUFCR_xFC_TYPE_WRITE:
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ write++;
+ break;
+
+ /* read from protected page */
+ case MMUFCR_xFC_TYPE_READ:
+ goto bad_area;
+
+ /* read from absent page present */
+ case MMUFCR_xFC_PGINVAL | MMUFCR_xFC_TYPE_READ:
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ break;
+ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ fault = handle_mm_fault(mm, vma, address, write);
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto do_sigbus;
+ BUG();
+ }
+ if (fault & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
+
+ up_read(&mm->mmap_sem);
+ return;
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up_read(&mm->mmap_sem);
+ monitor_signal(regs);
+
+ /* User mode accesses just cause a SIGSEGV */
+ if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code has been set above */
+ info.si_addr = (void *)address;
+ force_sig_info(SIGSEGV, &info, tsk);
+ return;
+ }
+
+no_context:
+ monitor_signal(regs);
+ /* Are we prepared to handle this kernel fault? */
+ if (fixup_exception(regs))
+ return;
+
+/*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+
+ bust_spinlocks(1);
+
+ if (address < PAGE_SIZE)
+ printk(KERN_ALERT
+ "Unable to handle kernel NULL pointer dereference");
+ else
+ printk(KERN_ALERT
+ "Unable to handle kernel paging request");
+ printk(" at virtual address %08lx\n", address);
+ printk(" printing pc:\n");
+ printk(KERN_ALERT "%08lx\n", regs->pc);
+
+#ifdef CONFIG_GDBSTUB
+ gdbstub_intercept(
+ regs, fault_code & 0x00010000 ? EXCEP_IAERROR : EXCEP_DAERROR);
+#endif
+
+ page = PTBR;
+ page = ((unsigned long *) __va(page))[address >> 22];
+ printk(KERN_ALERT "*pde = %08lx\n", page);
+ if (page & 1) {
+ page &= PAGE_MASK;
+ address &= 0x003ff000;
+ page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
+ printk(KERN_ALERT "*pte = %08lx\n", page);
+ }
+
+ die("Oops", regs, fault_code);
+ do_exit(SIGKILL);
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+ up_read(&mm->mmap_sem);
+ monitor_signal(regs);
+ printk(KERN_ALERT "VM: killing process %s\n", tsk->comm);
+ if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
+ do_exit(SIGKILL);
+ goto no_context;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+ monitor_signal(regs);
+
+ /*
+ * Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void *)address;
+ force_sig_info(SIGBUS, &info, tsk);
+
+ /* Kernel mode? Handle exceptions or die */
+ if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR)
+ goto no_context;
+ return;
+
+vmalloc_fault:
+ {
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "tsk" here. We might be inside
+ * an interrupt in the middle of a task switch..
+ */
+ int index = pgd_index(address);
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+
+ pgd_k = init_mm.pgd + index;
+
+ if (!pgd_present(*pgd_k))
+ goto no_context;
+
+ pud_k = pud_offset(pgd_k, address);
+ if (!pud_present(*pud_k))
+ goto no_context;
+
+ pmd_k = pmd_offset(pud_k, address);