aboutsummaryrefslogtreecommitdiff
path: root/arch/frv/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/frv/mm
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/frv/mm')
-rw-r--r--arch/frv/mm/Makefile9
-rw-r--r--arch/frv/mm/cache-page.c66
-rw-r--r--arch/frv/mm/dma-alloc.c188
-rw-r--r--arch/frv/mm/elf-fdpic.c123
-rw-r--r--arch/frv/mm/extable.c91
-rw-r--r--arch/frv/mm/fault.c325
-rw-r--r--arch/frv/mm/highmem.c33
-rw-r--r--arch/frv/mm/init.c241
-rw-r--r--arch/frv/mm/kmap.c62
-rw-r--r--arch/frv/mm/mmu-context.c208
-rw-r--r--arch/frv/mm/pgalloc.c159
-rw-r--r--arch/frv/mm/tlb-flush.S185
-rw-r--r--arch/frv/mm/tlb-miss.S631
-rw-r--r--arch/frv/mm/unaligned.c218
14 files changed, 2539 insertions, 0 deletions
diff --git a/arch/frv/mm/Makefile b/arch/frv/mm/Makefile
new file mode 100644
index 00000000000..fb8b1d860f4
--- /dev/null
+++ b/arch/frv/mm/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the arch-specific parts of the memory manager.
+#
+
+obj-y := init.o kmap.o
+
+obj-$(CONFIG_MMU) += \
+ pgalloc.o highmem.o fault.o extable.o cache-page.o tlb-flush.o tlb-miss.o \
+ mmu-context.o dma-alloc.o unaligned.o elf-fdpic.o
diff --git a/arch/frv/mm/cache-page.c b/arch/frv/mm/cache-page.c
new file mode 100644
index 00000000000..683b5e34431
--- /dev/null
+++ b/arch/frv/mm/cache-page.c
@@ -0,0 +1,66 @@
+/* cache-page.c: whole-page cache wrangling functions for MMU linux
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <asm/pgalloc.h>
+
+/*****************************************************************************/
+/*
+ * DCF takes a virtual address and the page may not currently have one
+ * - temporarily hijack a kmap_atomic() slot and attach the page to it
+ */
+void flush_dcache_page(struct page *page)
+{
+ unsigned long dampr2;
+ void *vaddr;
+
+ dampr2 = __get_DAMPR(2);
+
+ vaddr = kmap_atomic(page, __KM_CACHE);
+
+ frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE);
+
+ kunmap_atomic(vaddr, __KM_CACHE);
+
+ if (dampr2) {
+ __set_DAMPR(2, dampr2);
+ __set_IAMPR(2, dampr2);
+ }
+
+} /* end flush_dcache_page() */
+
+/*****************************************************************************/
+/*
+ * ICI takes a virtual address and the page may not currently have one
+ * - so we temporarily attach the page to a bit of virtual space so that is can be flushed
+ */
+void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+ unsigned long start, unsigned long len)
+{
+ unsigned long dampr2;
+ void *vaddr;
+
+ dampr2 = __get_DAMPR(2);
+
+ vaddr = kmap_atomic(page, __KM_CACHE);
+
+ start = (start & ~PAGE_MASK) | (unsigned long) vaddr;
+ frv_cache_wback_inv(start, start + len);
+
+ kunmap_atomic(vaddr, __KM_CACHE);
+
+ if (dampr2) {
+ __set_DAMPR(2, dampr2);
+ __set_IAMPR(2, dampr2);
+ }
+
+} /* end flush_icache_user_range() */
diff --git a/arch/frv/mm/dma-alloc.c b/arch/frv/mm/dma-alloc.c
new file mode 100644
index 00000000000..4b38d45435f
--- /dev/null
+++ b/arch/frv/mm/dma-alloc.c
@@ -0,0 +1,188 @@
+/* dma-alloc.c: consistent DMA memory allocation
+ *
+ * Derived from arch/ppc/mm/cachemap.c
+ *
+ * PowerPC version derived from arch/arm/mm/consistent.c
+ * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
+ *
+ * linux/arch/arm/mm/consistent.c
+ *
+ * Copyright (C) 2000 Russell King
+ *
+ * Consistent memory allocators. Used for DMA devices that want to
+ * share uncached memory with the processor core. The function return
+ * is the virtual address and 'dma_handle' is the physical address.
+ * Mostly stolen from the ARM port, with some changes for PowerPC.
+ * -- Dan
+ * Modified for 36-bit support. -Matt
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <asm/pgalloc.h>
+#include <asm/io.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+
+static int map_page(unsigned long va, unsigned long pa, pgprot_t prot)
+{
+ pgd_t *pge;
+ pud_t *pue;
+ pmd_t *pme;
+ pte_t *pte;
+ int err = -ENOMEM;
+
+ spin_lock(&init_mm.page_table_lock);
+
+ /* Use upper 10 bits of VA to index the first level map */
+ pge = pgd_offset_k(va);
+ pue = pud_offset(pge, va);
+ pme = pmd_offset(pue, va);
+
+ /* Use middle 10 bits of VA to index the second-level map */
+ pte = pte_alloc_kernel(&init_mm, pme, va);
+ if (pte != 0) {
+ err = 0;
+ set_pte(pte, mk_pte_phys(pa & PAGE_MASK, prot));
+ }
+
+ spin_unlock(&init_mm.page_table_lock);
+ return err;
+}
+
+/*
+ * This function will allocate the requested contiguous pages and
+ * map them into the kernel's vmalloc() space. This is done so we
+ * get unique mapping for these pages, outside of the kernel's 1:1
+ * virtual:physical mapping. This is necessary so we can cover large
+ * portions of the kernel with single large page TLB entries, and
+ * still get unique uncached pages for consistent DMA.
+ */
+void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
+{
+ struct vm_struct *area;
+ unsigned long page, va, pa;
+ void *ret;
+ int order, err, i;
+
+ if (in_interrupt())
+ BUG();
+
+ /* only allocate page size areas */
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+
+ page = __get_free_pages(gfp, order);
+ if (!page) {
+ BUG();
+ return NULL;
+ }
+
+ /* allocate some common virtual space to map the new pages */
+ area = get_vm_area(size, VM_ALLOC);
+ if (area == 0) {
+ free_pages(page, order);
+ return NULL;
+ }
+ va = VMALLOC_VMADDR(area->addr);
+ ret = (void *) va;
+
+ /* this gives us the real physical address of the first page */
+ *dma_handle = pa = virt_to_bus((void *) page);
+
+ /* set refcount=1 on all pages in an order>0 allocation so that vfree() will actually free
+ * all pages that were allocated.
+ */
+ if (order > 0) {
+ struct page *rpage = virt_to_page(page);
+
+ for (i = 1; i < (1 << order); i++)
+ set_page_count(rpage + i, 1);
+ }
+
+ err = 0;
+ for (i = 0; i < size && err == 0; i += PAGE_SIZE)
+ err = map_page(va + i, pa + i, PAGE_KERNEL_NOCACHE);
+
+ if (err) {
+ vfree((void *) va);
+ return NULL;
+ }
+
+ /* we need to ensure that there are no cachelines in use, or worse dirty in this area
+ * - can't do until after virtual address mappings are created
+ */
+ frv_cache_invalidate(va, va + size);
+
+ return ret;
+}
+
+/*
+ * free page(s) as defined by the above mapping.
+ */
+void consistent_free(void *vaddr)
+{
+ if (in_interrupt())
+ BUG();
+ vfree(vaddr);
+}
+
+/*
+ * make an area consistent.
+ */
+void consistent_sync(void *vaddr, size_t size, int direction)
+{
+ unsigned long start = (unsigned long) vaddr;
+ unsigned long end = start + size;
+
+ switch (direction) {
+ case PCI_DMA_NONE:
+ BUG();
+ case PCI_DMA_FROMDEVICE: /* invalidate only */
+ frv_cache_invalidate(start, end);
+ break;
+ case PCI_DMA_TODEVICE: /* writeback only */
+ frv_dcache_writeback(start, end);
+ break;
+ case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
+ frv_dcache_writeback(start, end);
+ break;
+ }
+}
+
+/*
+ * consistent_sync_page make a page are consistent. identical
+ * to consistent_sync, but takes a struct page instead of a virtual address
+ */
+
+void consistent_sync_page(struct page *page, unsigned long offset,
+ size_t size, int direction)
+{
+ void *start;
+
+ start = page_address(page) + offset;
+ consistent_sync(start, size, direction);
+}
diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
new file mode 100644
index 00000000000..f5a653033fe
--- /dev/null
+++ b/arch/frv/mm/elf-fdpic.c
@@ -0,0 +1,123 @@
+/* elf-fdpic.c: ELF FDPIC memory layout management
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/elf-fdpic.h>
+
+/*****************************************************************************/
+/*
+ * lay out the userspace VM according to our grand design
+ */
+#ifdef CONFIG_MMU
+void elf_fdpic_arch_lay_out_mm(struct elf_fdpic_params *exec_params,
+ struct elf_fdpic_params *interp_params,
+ unsigned long *start_stack,
+ unsigned long *start_brk)
+{
+ *start_stack = 0x02200000UL;
+
+ /* if the only executable is a shared object, assume that it is an interpreter rather than
+ * a true executable, and map it such that "ld.so --list" comes out right
+ */
+ if (!(interp_params->flags & ELF_FDPIC_FLAG_PRESENT) &&
+ exec_params->hdr.e_type != ET_EXEC
+ ) {
+ exec_params->load_addr = PAGE_SIZE;
+
+ *start_brk = 0x80000000UL;
+ }
+ else {
+ exec_params->load_addr = 0x02200000UL;
+
+ if ((exec_params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) ==
+ ELF_FDPIC_FLAG_INDEPENDENT
+ ) {
+ exec_params->flags &= ~ELF_FDPIC_FLAG_ARRANGEMENT;
+ exec_params->flags |= ELF_FDPIC_FLAG_CONSTDISP;
+ }
+ }
+
+} /* end elf_fdpic_arch_lay_out_mm() */
+#endif
+
+/*****************************************************************************/
+/*
+ * place non-fixed mmaps firstly in the bottom part of memory, working up, and then in the top part
+ * of memory, working down
+ */
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags)
+{
+ struct vm_area_struct *vma;
+ unsigned long limit;
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ /* only honour a hint if we're not going to clobber something doing so */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(current->mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ goto success;
+ }
+
+ /* search between the bottom of user VM and the stack grow area */
+ addr = PAGE_SIZE;
+ limit = (current->mm->start_stack - 0x00200000);
+ if (addr + len <= limit) {
+ limit -= len;
+
+ if (addr <= limit) {
+ vma = find_vma(current->mm, PAGE_SIZE);
+ for (; vma; vma = vma->vm_next) {
+ if (addr > limit)
+ break;
+ if (addr + len <= vma->vm_start)
+ goto success;
+ addr = vma->vm_end;
+ }
+ }
+ }
+
+ /* search from just above the WorkRAM area to the top of memory */
+ addr = PAGE_ALIGN(0x80000000);
+ limit = TASK_SIZE - len;
+ if (addr <= limit) {
+ vma = find_vma(current->mm, addr);
+ for (; vma; vma = vma->vm_next) {
+ if (addr > limit)
+ break;
+ if (addr + len <= vma->vm_start)
+ goto success;
+ addr = vma->vm_end;
+ }
+
+ if (!vma && addr <= limit)
+ goto success;
+ }
+
+#if 0
+ printk("[area] l=%lx (ENOMEM) f='%s'\n",
+ len, filp ? filp->f_dentry->d_name.name : "");
+#endif
+ return -ENOMEM;
+
+ success:
+#if 0
+ printk("[area] l=%lx ad=%lx f='%s'\n",
+ len, addr, filp ? filp->f_dentry->d_name.name : "");
+#endif
+ return addr;
+} /* end arch_get_unmapped_area() */
diff --git a/arch/frv/mm/extable.c b/arch/frv/mm/extable.c
new file mode 100644
index 00000000000..41be1128dc6
--- /dev/null
+++ b/arch/frv/mm/extable.c
@@ -0,0 +1,91 @@
+/*
+ * linux/arch/frv/mm/extable.c
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <asm/uaccess.h>
+
+extern const struct exception_table_entry __attribute__((aligned(8))) __start___ex_table[];
+extern const struct exception_table_entry __attribute__((aligned(8))) __stop___ex_table[];
+extern const void __memset_end, __memset_user_error_lr, __memset_user_error_handler;
+extern const void __memcpy_end, __memcpy_user_error_lr, __memcpy_user_error_handler;
+extern spinlock_t modlist_lock;
+
+/*****************************************************************************/
+/*
+ *
+ */
+static inline unsigned long search_one_table(const struct exception_table_entry *first,
+ const struct exception_table_entry *last,
+ unsigned long value)
+{
+ while (first <= last) {
+ const struct exception_table_entry __attribute__((aligned(8))) *mid;
+ long diff;
+
+ mid = (last - first) / 2 + first;
+ diff = mid->insn - value;
+ if (diff == 0)
+ return mid->fixup;
+ else if (diff < 0)
+ first = mid + 1;
+ else
+ last = mid - 1;
+ }
+ return 0;
+} /* end search_one_table() */
+
+/*****************************************************************************/
+/*
+ * see if there's a fixup handler available to deal with a kernel fault
+ */
+unsigned long search_exception_table(unsigned long pc)
+{
+ unsigned long ret = 0;
+
+ /* determine if the fault lay during a memcpy_user or a memset_user */
+ if (__frame->lr == (unsigned long) &__memset_user_error_lr &&
+ (unsigned long) &memset <= pc && pc < (unsigned long) &__memset_end
+ ) {
+ /* the fault occurred in a protected memset
+ * - we search for the return address (in LR) instead of the program counter
+ * - it was probably during a clear_user()
+ */
+ return (unsigned long) &__memset_user_error_handler;
+ }
+ else if (__frame->lr == (unsigned long) &__memcpy_user_error_lr &&
+ (unsigned long) &memcpy <= pc && pc < (unsigned long) &__memcpy_end
+ ) {
+ /* the fault occurred in a protected memset
+ * - we search for the return address (in LR) instead of the program counter
+ * - it was probably during a copy_to/from_user()
+ */
+ return (unsigned long) &__memcpy_user_error_handler;
+ }
+
+#ifndef CONFIG_MODULES
+ /* there is only the kernel to search. */
+ ret = search_one_table(__start___ex_table, __stop___ex_table - 1, pc);
+ return ret;
+
+#else
+ /* the kernel is the last "module" -- no need to treat it special */
+ unsigned long flags;
+ struct module *mp;
+
+ spin_lock_irqsave(&modlist_lock, flags);
+
+ for (mp = module_list; mp != NULL; mp = mp->next) {
+ if (mp->ex_table_start == NULL || !(mp->flags & (MOD_RUNNING | MOD_INITIALIZING)))
+ continue;
+ ret = search_one_table(mp->ex_table_start, mp->ex_table_end - 1, pc);
+ if (ret)
+ break;
+ }
+
+ spin_unlock_irqrestore(&modlist_lock, flags);
+ return ret;
+#endif
+} /* end search_exception_table() */
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
new file mode 100644
index 00000000000..41d02ac4823
--- /dev/null
+++ b/arch/frv/mm/fault.c
@@ -0,0 +1,325 @@
+/*
+ * linux/arch/frv/mm/fault.c
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * - Written by David Howells (dhowells@redhat.com)
+ * - Derived from arch/m68knommu/mm/fault.c
+ * - Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
+ * - Copyright (C) 2000 Lineo, Inc. (www.lineo.com)
+ *
+ * Based on:
+ *
+ * linux/arch/m68k/mm/fault.c
+ *
+ * Copyright (C) 1995 Hamish Macdonald
+ */
+
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/hardirq.h>
+
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/gdb-stub.h>
+
+/*****************************************************************************/
+/*
+ * This routine handles page faults. It determines the problem, and
+ * then passes it off to one of the appropriate routines.
+ */
+asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear0)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm;
+ unsigned long _pme, lrai, lrad, fixup;
+ siginfo_t info;
+ pgd_t *pge;
+ pud_t *pue;
+ pte_t *pte;
+ int write;
+
+#if 0
+ const char *atxc[16] = {
+ [0x0] = "mmu-miss", [0x8] = "multi-dat", [0x9] = "multi-sat",
+ [0xa] = "tlb-miss", [0xc] = "privilege", [0xd] = "write-prot",
+ };
+
+ printk("do_page_fault(%d,%lx [%s],%lx)\n",
+ datammu, esr0, atxc[esr0 >> 20 & 0xf], ear0);
+#endif
+
+ mm = current->mm;
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ *
+ * This verifies that the fault happens in kernel space
+ * and that the fault was a page not present (invalid) error
+ */
+ if (!user_mode(__frame) && (esr0 & ESR0_ATXC) == ESR0_ATXC_AMRTLB_MISS) {
+ if (ear0 >= VMALLOC_START && ear0 < VMALLOC_END)
+ goto kernel_pte_fault;
+ if (ear0 >= PKMAP_BASE && ear0 < PKMAP_END)
+ goto kernel_pte_fault;
+ }
+
+ info.si_code = SEGV_MAPERR;
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (in_interrupt() || !mm)
+ goto no_context;
+
+ down_read(&mm->mmap_sem);
+
+ vma = find_vma(mm, ear0);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= ear0)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+
+ if (user_mode(__frame)) {
+ /*
+ * accessing the stack below %esp is always a bug.
+ * The "+ 32" is there due to some instructions (like
+ * pusha) doing post-decrement on the stack and that
+ * doesn't show up until later..
+ */
+ if ((ear0 & PAGE_MASK) + 2 * PAGE_SIZE < __frame->sp) {
+#if 0
+ printk("[%d] ### Access below stack @%lx (sp=%lx)\n",
+ current->pid, ear0, __frame->sp);
+ show_registers(__frame);
+ printk("[%d] ### Code: [%08lx] %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ current->pid,
+ __frame->pc,
+ ((u8*)__frame->pc)[0],
+ ((u8*)__frame->pc)[1],
+ ((u8*)__frame->pc)[2],
+ ((u8*)__frame->pc)[3],
+ ((u8*)__frame->pc)[4],
+ ((u8*)__frame->pc)[5],
+ ((u8*)__frame->pc)[6],
+ ((u8*)__frame->pc)[7]
+ );
+#endif
+ goto bad_area;
+ }
+ }
+
+ if (expand_stack(vma, ear0))
+ goto bad_area;
+
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+ good_area:
+ info.si_code = SEGV_ACCERR;
+ write = 0;
+ switch (esr0 & ESR0_ATXC) {
+ default:
+ /* handle write to write protected page */
+ case ESR0_ATXC_WP_EXCEP:
+#ifdef TEST_VERIFY_AREA
+ if (!(user_mode(__frame)))
+ printk("WP fault at %08lx\n", __frame->pc);
+#endif
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ write = 1;
+ break;
+
+ /* handle read from protected page */
+ case ESR0_ATXC_PRIV_EXCEP:
+ goto bad_area;
+
+ /* handle read, write or exec on absent page
+ * - can't support write without permitting read
+ * - don't support execute without permitting read and vice-versa
+ */
+ case ESR0_ATXC_AMRTLB_MISS:
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
+ goto bad_area;
+ break;
+ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ switch (handle_mm_fault(mm, vma, ear0, write)) {
+ case 1:
+ current->min_flt++;
+ break;
+ case 2:
+ current->maj_flt++;
+ break;
+ case 0:
+ goto do_sigbus;
+ default:
+ goto out_of_memory;
+ }
+
+ up_read(&mm->mmap_sem);
+ return;
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+ bad_area:
+ up_read(&mm->mmap_sem);
+
+ /* User mode accesses just cause a SIGSEGV */
+ if (user_mode(__frame)) {
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code has been set above */
+ info.si_addr = (void *) ear0;
+ force_sig_info(SIGSEGV, &info, current);
+ return;
+ }
+
+ no_context:
+ /* are we prepared to handle this kernel fault? */
+ if ((fixup = search_exception_table(__frame->pc)) != 0) {
+ __frame->pc = fixup;
+ return;
+ }
+
+/*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+
+ bust_spinlocks(1);
+
+ if (ear0 < PAGE_SIZE)
+ printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
+ else
+ printk(KERN_ALERT "Unable to handle kernel paging request");
+ printk(" at virtual addr %08lx\n", ear0);
+ printk(" PC : %08lx\n", __frame->pc);
+ printk(" EXC : esr0=%08lx ear0=%08lx\n", esr0, ear0);
+
+ asm("lrai %1,%0,#1,#0,#0" : "=&r"(lrai) : "r"(ear0));
+ asm("lrad %1,%0,#1,#0,#0" : "=&r"(lrad) : "r"(ear0));
+
+ printk(KERN_ALERT " LRAI: %08lx\n", lrai);
+ printk(KERN_ALERT " LRAD: %08lx\n", lrad);
+
+ __break_hijack_kernel_event();
+
+ pge = pgd_offset(current->mm, ear0);
+ pue = pud_offset(pge, ear0);
+ _pme = pue->pue[0].ste[0];
+
+ printk(KERN_ALERT " PGE : %8p { PME %08lx }\n", pge, _pme);
+
+ if (_pme & xAMPRx_V) {
+ unsigned long dampr, damlr, val;
+
+ asm volatile("movsg dampr2,%0 ! movgs %2,dampr2 ! movsg damlr2,%1"
+ : "=&r"(dampr), "=r"(damlr)
+ : "r" (_pme | xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V)
+ );
+
+ pte = (pte_t *) damlr + __pte_index(ear0);
+ val = pte_val(*pte);
+
+ asm volatile("movgs %0,dampr2" :: "r" (dampr));
+
+ printk(KERN_ALERT " PTE : %8p { %08lx }\n", pte, val);
+ }
+
+ die_if_kernel("Oops\n");
+ do_exit(SIGKILL);
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+ out_of_memory:
+ up_read(&mm->mmap_sem);
+ printk("VM: killing process %s\n", current->comm);
+ if (user_mode(__frame))
+ do_exit(SIGKILL);
+ goto no_context;
+
+ do_sigbus:
+ up_read(&mm->mmap_sem);
+
+ /*
+ * Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void *) ear0;
+ force_sig_info(SIGBUS, &info, current);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(__frame))
+ goto no_context;
+ return;
+
+/*
+ * The fault was caused by a kernel PTE (such as installed by vmalloc or kmap)
+ */
+ kernel_pte_fault:
+ {
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "tsk" here. We might be inside
+ * an interrupt in the middle of a task switch..
+ */
+ int index = pgd_index(ear0);
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+
+ pgd = (pgd_t *) __get_TTBR();
+ pgd = (pgd_t *)__va(pgd) + index;
+ pgd_k = ((pgd_t *)(init_mm.pgd)) + index;
+
+ if (!pgd_present(*pgd_k))
+ goto no_context;
+ //set_pgd(pgd, *pgd_k); /////// gcc ICE's on this line
+
+ pud_k = pud_offset(pgd_k, ear0);
+ if (!pud_present(*pud_k))
+ goto no_context;
+
+ pmd_k = pmd_offset(pud_k, ear0);
+ if (!pmd_present(*pmd_k))
+ goto no_context;
+
+ pud = pud_offset(pgd, ear0);
+ pmd = pmd_offset(pud, ear0);
+ set_pmd(pmd, *pmd_k);
+
+ pte_k = pte_offset_kernel(pmd_k, ear0);
+ if (!pte_present(*pte_k))
+ goto no_context;
+ return;
+ }
+} /* end do_page_fault() */
diff --git a/arch/frv/mm/highmem.c b/arch/frv/mm/highmem.c
new file mode 100644
index 00000000000..7dc8fbf3af9
--- /dev/null
+++ b/arch/frv/mm/highmem.c
@@ -0,0 +1,33 @@
+/* highmem.c: arch-specific highmem stuff
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/highmem.h>
+
+void *kmap(struct page *page)
+{
+ might_sleep();
+ if (!PageHighMem(page))
+ return page_address(page);
+ return kmap_high(page);
+}
+
+void kunmap(struct page *page)
+{
+ if (in_interrupt())
+ BUG();
+ if (!PageHighMem(page))
+ return;
+ kunmap_high(page);
+}
+
+struct page *kmap_atomic_to_page(void *ptr)
+{
+ return virt_to_page(ptr);
+}
diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c
new file mode 100644
index 00000000000..41958f57c83
--- /dev/null
+++ b/arch/frv/mm/init.c
@@ -0,0 +1,241 @@
+/* init.c: memory initialisation for FRV
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Derived from:
+ * - linux/arch/m68knommu/mm/init.c
+ * - Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>, Kenneth Albanowski <kjahds@kjahds.com>,
+ * - Copyright (C) 2000 Lineo, Inc. (www.lineo.com)
+ * - linux/arch/m68k/mm/init.c
+ * - Copyright (C) 1995 Hamish Macdonald
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+
+#include <asm/setup.h>
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/mmu_context.h>
+#include <asm/virtconvert.h>
+#include <asm/sections.h>
+#include <asm/tlb.h>
+
+#undef DEBUG
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+/*
+ * BAD_PAGE is the page that is used for page faults when linux
+ * is out-of-memory. Older versions of linux just did a
+ * do_exit(), but using this instead means there is less risk
+ * for a process dying in kernel mode, possibly leaving a inode
+ * unused etc..
+ *
+ * BAD_PAGETABLE is the accompanying page-table: it is initialized
+ * to point to BAD_PAGE entries.
+ *
+ * ZERO_PAGE is a special page that is used for zero-initialized
+ * data and COW.
+ */
+static unsigned long empty_bad_page_table;
+static unsigned long empty_bad_page;
+unsigned long empty_zero_page;
+
+/*****************************************************************************/
+/*
+ *
+ */
+void show_mem(void)
+{
+ unsigned long i;
+ int free = 0, total = 0, reserved = 0, shared = 0;
+
+ printk("\nMem-info:\n");
+ show_free_areas();
+ i = max_mapnr;
+ while (i-- > 0) {
+ struct page *page = &mem_map[i];
+
+ total++;
+ if (PageReserved(page))
+ reserved++;
+ else if (!page_count(page))
+ free++;
+ else
+ shared += page_count(page) - 1;
+ }
+
+ printk("%d pages of RAM\n",total);
+ printk("%d free pages\n",free);
+ printk("%d reserved pages\n",reserved);
+ printk("%d pages shared\n",shared);
+
+} /* end show_mem() */
+
+/*****************************************************************************/
+/*
+ * paging_init() continues the virtual memory environment setup which
+ * was begun by the code in arch/head.S.
+ * The parameters are pointers to where to stick the starting and ending
+ * addresses of available kernel virtual memory.
+ */
+void __init paging_init(void)
+{
+ unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
+
+ /* allocate some pages for kernel housekeeping tasks */
+ empty_bad_page_table = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
+ empty_bad_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
+ empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
+
+ memset((void *) empty_zero_page, 0, PAGE_SIZE);
+
+#if CONFIG_HIGHMEM
+ if (num_physpages - num_mappedpages) {
+ pgd_t *pge;
+ pud_t *pue;
+ pmd_t *pme;
+
+ pkmap_page_table = alloc_bootmem_pages(PAGE_SIZE);
+
+ memset(pkmap_page_table, 0, PAGE_SIZE);
+
+ pge = swapper_pg_dir + pgd_index_k(PKMAP_BASE);
+ pue = pud_offset(pge, PKMAP_BASE);
+ pme = pmd_offset(pue, PKMAP_BASE);
+ __set_pmd(pme, virt_to_phys(pkmap_page_table) | _PAGE_TABLE);
+ }
+#endif
+
+ /* distribute the allocatable pages across the various zones and pass them to the allocator
+ */
+ zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn;
+ zones_size[ZONE_NORMAL] = 0;
+#ifdef CONFIG_HIGHMEM
+ zones_size[ZONE_HIGHMEM] = num_physpages - num_mappedpages;
+#endif
+
+ free_area_init(zones_size);
+
+#ifdef CONFIG_MMU
+ /* initialise init's MMU context */
+ init_new_context(&init_task, &init_mm);
+#endif
+
+} /* end paging_init() */
+
+/*****************************************************************************/
+/*
+ *
+ */
+void __init mem_init(void)
+{
+ unsigned long npages = (memory_end - memory_start) >> PAGE_SHIFT;
+ unsigned long tmp;
+#ifdef CONFIG_MMU
+ unsigned long loop, pfn;
+ int datapages = 0;
+#endif
+ int codek = 0, datak = 0;
+
+ /* this will put all memory onto the freelists */
+ totalram_pages = free_all_bootmem();
+
+#ifdef CONFIG_MMU
+ for (loop = 0 ; loop < npages ; loop++)
+ if (PageReserved(&mem_map[loop]))
+ datapages++;
+
+#ifdef CONFIG_HIGHMEM
+ for (pfn = num_physpages - 1; pfn >= num_mappedpages; pfn--) {
+ struct page *page = &mem_map[pfn];
+
+ ClearPageReserved(page);
+ set_bit(PG_highmem, &page->fl