aboutsummaryrefslogtreecommitdiff
path: root/arch/m68k/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m68k/mm')
-rw-r--r--arch/m68k/mm/Makefile9
-rw-r--r--arch/m68k/mm/cache.c136
-rw-r--r--arch/m68k/mm/fault.c119
-rw-r--r--arch/m68k/mm/init.c194
-rw-r--r--arch/m68k/mm/kmap.c50
-rw-r--r--arch/m68k/mm/mcfmmu.c195
-rw-r--r--arch/m68k/mm/memory.c199
-rw-r--r--arch/m68k/mm/motorola.c135
-rw-r--r--arch/m68k/mm/sun3kmap.c13
-rw-r--r--arch/m68k/mm/sun3mmu.c18
10 files changed, 646 insertions, 422 deletions
diff --git a/arch/m68k/mm/Makefile b/arch/m68k/mm/Makefile
index 90f1c735c11..cfbf3205724 100644
--- a/arch/m68k/mm/Makefile
+++ b/arch/m68k/mm/Makefile
@@ -2,7 +2,10 @@
# Makefile for the linux m68k-specific parts of the memory manager.
#
-obj-y := init.o fault.o hwtest.o
+obj-y := init.o
+
+obj-$(CONFIG_MMU) += cache.o fault.o
+obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o hwtest.o
+obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o hwtest.o
+obj-$(CONFIG_MMU_COLDFIRE) += kmap.o memory.o mcfmmu.o
-obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o
-obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o
diff --git a/arch/m68k/mm/cache.c b/arch/m68k/mm/cache.c
new file mode 100644
index 00000000000..3d84c1f2ffb
--- /dev/null
+++ b/arch/m68k/mm/cache.c
@@ -0,0 +1,136 @@
+/*
+ * linux/arch/m68k/mm/cache.c
+ *
+ * Instruction cache handling
+ *
+ * Copyright (C) 1995 Hamish Macdonald
+ */
+
+#include <linux/module.h>
+#include <asm/pgalloc.h>
+#include <asm/traps.h>
+
+
+static unsigned long virt_to_phys_slow(unsigned long vaddr)
+{
+ if (CPU_IS_060) {
+ unsigned long paddr;
+
+ /* The PLPAR instruction causes an access error if the translation
+ * is not possible. To catch this we use the same exception mechanism
+ * as for user space accesses in <asm/uaccess.h>. */
+ asm volatile (".chip 68060\n"
+ "1: plpar (%0)\n"
+ ".chip 68k\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ " .even\n"
+ "3: sub.l %0,%0\n"
+ " jra 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b,3b\n"
+ ".previous"
+ : "=a" (paddr)
+ : "0" (vaddr));
+ return paddr;
+ } else if (CPU_IS_040) {
+ unsigned long mmusr;
+
+ asm volatile (".chip 68040\n\t"
+ "ptestr (%1)\n\t"
+ "movec %%mmusr, %0\n\t"
+ ".chip 68k"
+ : "=r" (mmusr)
+ : "a" (vaddr));
+
+ if (mmusr & MMU_R_040)
+ return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
+ } else {
+ unsigned short mmusr;
+ unsigned long *descaddr;
+
+ asm volatile ("ptestr %3,%2@,#7,%0\n\t"
+ "pmove %%psr,%1"
+ : "=a&" (descaddr), "=m" (mmusr)
+ : "a" (vaddr), "d" (get_fs().seg));
+ if (mmusr & (MMU_I|MMU_B|MMU_L))
+ return 0;
+ descaddr = phys_to_virt((unsigned long)descaddr);
+ switch (mmusr & MMU_NUM) {
+ case 1:
+ return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
+ case 2:
+ return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
+ case 3:
+ return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
+ }
+ }
+ return 0;
+}
+
+/* Push n pages at kernel virtual address and clear the icache */
+/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
+void flush_icache_range(unsigned long address, unsigned long endaddr)
+{
+ if (CPU_IS_COLDFIRE) {
+ unsigned long start, end;
+ start = address & ICACHE_SET_MASK;
+ end = endaddr & ICACHE_SET_MASK;
+ if (start > end) {
+ flush_cf_icache(0, end);
+ end = ICACHE_MAX_ADDR;
+ }
+ flush_cf_icache(start, end);
+ } else if (CPU_IS_040_OR_060) {
+ address &= PAGE_MASK;
+
+ do {
+ asm volatile ("nop\n\t"
+ ".chip 68040\n\t"
+ "cpushp %%bc,(%0)\n\t"
+ ".chip 68k"
+ : : "a" (virt_to_phys_slow(address)));
+ address += PAGE_SIZE;
+ } while (address < endaddr);
+ } else {
+ unsigned long tmp;
+ asm volatile ("movec %%cacr,%0\n\t"
+ "orw %1,%0\n\t"
+ "movec %0,%%cacr"
+ : "=&d" (tmp)
+ : "di" (FLUSH_I));
+ }
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+ unsigned long addr, int len)
+{
+ if (CPU_IS_COLDFIRE) {
+ unsigned long start, end;
+ start = addr & ICACHE_SET_MASK;
+ end = (addr + len) & ICACHE_SET_MASK;
+ if (start > end) {
+ flush_cf_icache(0, end);
+ end = ICACHE_MAX_ADDR;
+ }
+ flush_cf_icache(start, end);
+
+ } else if (CPU_IS_040_OR_060) {
+ asm volatile ("nop\n\t"
+ ".chip 68040\n\t"
+ "cpushp %%bc,(%0)\n\t"
+ ".chip 68k"
+ : : "a" (page_to_phys(page)));
+ } else {
+ unsigned long tmp;
+ asm volatile ("movec %%cacr,%0\n\t"
+ "orw %1,%0\n\t"
+ "movec %0,%%cacr"
+ : "=&d" (tmp)
+ : "di" (FLUSH_I));
+ }
+}
+
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index aec15270d33..2bd7487440c 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -13,12 +13,10 @@
#include <asm/setup.h>
#include <asm/traps.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
extern void die_if_kernel(char *, struct pt_regs *, long);
-extern const int frame_extra_sizes[]; /* in m68k/kernel/signal.c */
int send_fault_sig(struct pt_regs *regs)
{
@@ -27,29 +25,15 @@ int send_fault_sig(struct pt_regs *regs)
siginfo.si_signo = current->thread.signo;
siginfo.si_code = current->thread.code;
siginfo.si_addr = (void *)current->thread.faddr;
-#ifdef DEBUG
- printk("send_fault_sig: %p,%d,%d\n", siginfo.si_addr, siginfo.si_signo, siginfo.si_code);
-#endif
+ pr_debug("send_fault_sig: %p,%d,%d\n", siginfo.si_addr,
+ siginfo.si_signo, siginfo.si_code);
if (user_mode(regs)) {
force_sig_info(siginfo.si_signo,
&siginfo, current);
} else {
- const struct exception_table_entry *fixup;
-
- /* Are we prepared to handle this kernel fault? */
- if ((fixup = search_exception_tables(regs->pc))) {
- struct pt_regs *tregs;
- /* Create a new four word stack frame, discarding the old
- one. */
- regs->stkadj = frame_extra_sizes[regs->format];
- tregs = (struct pt_regs *)((ulong)regs + regs->stkadj);
- tregs->vector = regs->vector;
- tregs->format = 0;
- tregs->pc = fixup->fixup;
- tregs->sr = regs->sr;
+ if (handle_kernel_fault(regs))
return -1;
- }
//if (siginfo.si_signo == SIGBUS)
// force_sig_info(siginfo.si_signo,
@@ -60,10 +44,10 @@ int send_fault_sig(struct pt_regs *regs)
* terminate things with extreme prejudice.
*/
if ((unsigned long)siginfo.si_addr < PAGE_SIZE)
- printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
+ pr_alert("Unable to handle kernel NULL pointer dereference");
else
- printk(KERN_ALERT "Unable to handle kernel access");
- printk(" at virtual address %p\n", siginfo.si_addr);
+ pr_alert("Unable to handle kernel access");
+ pr_cont(" at virtual address %p\n", siginfo.si_addr);
die_if_kernel("Oops", regs, 0 /*error_code*/);
do_exit(SIGKILL);
}
@@ -87,21 +71,22 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
{
struct mm_struct *mm = current->mm;
struct vm_area_struct * vma;
- int write, fault;
+ int fault;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
-#ifdef DEBUG
- printk ("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n",
- regs->sr, regs->pc, address, error_code,
- current->mm->pgd);
-#endif
+ pr_debug("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n",
+ regs->sr, regs->pc, address, error_code, mm ? mm->pgd : NULL);
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_interrupt() || !mm)
+ if (in_atomic() || !mm)
goto no_context;
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
@@ -129,22 +114,19 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
* we can handle it..
*/
good_area:
-#ifdef DEBUG
- printk("do_page_fault: good_area\n");
-#endif
- write = 0;
+ pr_debug("do_page_fault: good_area\n");
switch (error_code & 3) {
default: /* 3: write, present */
/* fall through */
case 2: /* write, not present */
if (!(vma->vm_flags & VM_WRITE))
goto acc_err;
- write++;
+ flags |= FAULT_FLAG_WRITE;
break;
case 1: /* read, present */
goto acc_err;
case 0: /* read, not present */
- if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
goto acc_err;
}
@@ -154,22 +136,44 @@ good_area:
* the fault.
*/
- survive:
- fault = handle_mm_fault(mm, vma, address, write);
-#ifdef DEBUG
- printk("handle_mm_fault returns %d\n",fault);
-#endif
- switch (fault) {
- case VM_FAULT_MINOR:
- current->min_flt++;
- break;
- case VM_FAULT_MAJOR:
- current->maj_flt++;
- break;
- case VM_FAULT_SIGBUS:
- goto bus_err;
- default:
- goto out_of_memory;
+ fault = handle_mm_fault(mm, vma, address, flags);
+ pr_debug("handle_mm_fault returns %d\n", fault);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return 0;
+
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto bus_err;
+ BUG();
+ }
+
+ /*
+ * Major/minor page fault accounting is only done on the
+ * initial attempt. If we go through a retry, it is extremely
+ * likely that the page will be found in page cache at that point.
+ */
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+ * of starvation. */
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ flags |= FAULT_FLAG_TRIED;
+
+ /*
+ * No need to up_read(&mm->mmap_sem) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
+ }
}
up_read(&mm->mmap_sem);
@@ -181,15 +185,10 @@ good_area:
*/
out_of_memory:
up_read(&mm->mmap_sem);
- if (current->pid == 1) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
-
- printk("VM: killing process %s\n", current->comm);
- if (user_mode(regs))
- do_exit(SIGKILL);
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
+ return 0;
no_context:
current->thread.signo = SIGBUS;
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index c45beb95594..acaff6a49e3 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -7,7 +7,7 @@
* to motorola.c and sun3mmu.c
*/
-#include <linux/config.h>
+#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/mm.h>
@@ -17,100 +17,140 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/bootmem.h>
+#include <linux/gfp.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/system.h>
+#include <asm/traps.h>
#include <asm/machdep.h>
#include <asm/io.h>
#ifdef CONFIG_ATARI
#include <asm/atari_stram.h>
#endif
+#include <asm/sections.h>
#include <asm/tlb.h>
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-
/*
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
*/
-
void *empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
-void show_mem(void)
-{
- unsigned long i;
- int free = 0, total = 0, reserved = 0, shared = 0;
- int cached = 0;
-
- printk("\nMem-info:\n");
- show_free_areas();
- printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
- i = max_mapnr;
- while (i-- > 0) {
- total++;
- if (PageReserved(mem_map+i))
- reserved++;
- else if (PageSwapCache(mem_map+i))
- cached++;
- else if (!page_count(mem_map+i))
- free++;
- else
- shared += page_count(mem_map+i) - 1;
- }
- printk("%d pages of RAM\n",total);
- printk("%d free pages\n",free);
- printk("%d reserved pages\n",reserved);
- printk("%d pages shared\n",shared);
- printk("%d pages swap cached\n",cached);
-}
-
+#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
extern void init_pointer_table(unsigned long ptable);
+extern pmd_t *zero_pgtable;
+#endif
-/* References to section boundaries */
+#ifdef CONFIG_MMU
-extern char _text, _etext, _edata, __bss_start, _end;
-extern char __init_begin, __init_end;
+pg_data_t pg_data_map[MAX_NUMNODES];
+EXPORT_SYMBOL(pg_data_map);
-extern pmd_t *zero_pgtable;
+int m68k_virt_to_node_shift;
-void __init mem_init(void)
+#ifndef CONFIG_SINGLE_MEMORY_CHUNK
+pg_data_t *pg_data_table[65];
+EXPORT_SYMBOL(pg_data_table);
+#endif
+
+void __init m68k_setup_node(int node)
{
- int codepages = 0;
- int datapages = 0;
- int initpages = 0;
- unsigned long tmp;
-#ifndef CONFIG_SUN3
- int i;
+#ifndef CONFIG_SINGLE_MEMORY_CHUNK
+ struct m68k_mem_info *info = m68k_memory + node;
+ int i, end;
+
+ i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift();
+ end = (unsigned long)phys_to_virt(info->addr + info->size - 1) >> __virt_to_node_shift();
+ for (; i <= end; i++) {
+ if (pg_data_table[i])
+ printk("overlap at %u for chunk %u\n", i, node);
+ pg_data_table[i] = pg_data_map + node;
+ }
#endif
+ pg_data_map[node].bdata = bootmem_node_data + node;
+ node_set_online(node);
+}
- max_mapnr = num_physpages = (((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT);
+#else /* CONFIG_MMU */
-#ifdef CONFIG_ATARI
- if (MACH_IS_ATARI)
- atari_stram_mem_init_hook();
+/*
+ * paging_init() continues the virtual memory environment setup which
+ * was begun by the code in arch/head.S.
+ * The parameters are pointers to where to stick the starting and ending
+ * addresses of available kernel virtual memory.
+ */
+void __init paging_init(void)
+{
+ /*
+ * Make sure start_mem is page aligned, otherwise bootmem and
+ * page_alloc get different views of the world.
+ */
+ unsigned long end_mem = memory_end & PAGE_MASK;
+ unsigned long zones_size[MAX_NR_ZONES] = { 0, };
+
+ high_memory = (void *) end_mem;
+
+ empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
+ memset(empty_zero_page, 0, PAGE_SIZE);
+
+ /*
+ * Set up SFC/DFC registers (user data space).
+ */
+ set_fs (USER_DS);
+
+ zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
+ free_area_init(zones_size);
+}
+
+#endif /* CONFIG_MMU */
+
+void free_initmem(void)
+{
+#ifndef CONFIG_MMU_SUN3
+ free_initmem_default(-1);
+#endif /* CONFIG_MMU_SUN3 */
+}
+
+#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
+#define VECTORS &vectors[0]
+#else
+#define VECTORS _ramvec
#endif
- /* this will put all memory onto the freelists */
- totalram_pages = free_all_bootmem();
-
- for (tmp = PAGE_OFFSET ; tmp < (unsigned long)high_memory; tmp += PAGE_SIZE) {
- if (PageReserved(virt_to_page(tmp))) {
- if (tmp >= (unsigned long)&_text
- && tmp < (unsigned long)&_etext)
- codepages++;
- else if (tmp >= (unsigned long) &__init_begin
- && tmp < (unsigned long) &__init_end)
- initpages++;
- else
- datapages++;
- continue;
- }
- }
+void __init print_memmap(void)
+{
+#define UL(x) ((unsigned long) (x))
+#define MLK(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 10
+#define MLM(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 20
+#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), 1024)
+
+ pr_notice("Virtual kernel memory layout:\n"
+ " vector : 0x%08lx - 0x%08lx (%4ld KiB)\n"
+ " kmap : 0x%08lx - 0x%08lx (%4ld MiB)\n"
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MiB)\n"
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MiB)\n"
+ " .init : 0x%p" " - 0x%p" " (%4d KiB)\n"
+ " .text : 0x%p" " - 0x%p" " (%4d KiB)\n"
+ " .data : 0x%p" " - 0x%p" " (%4d KiB)\n"
+ " .bss : 0x%p" " - 0x%p" " (%4d KiB)\n",
+ MLK(VECTORS, VECTORS + 256),
+ MLM(KMAP_START, KMAP_END),
+ MLM(VMALLOC_START, VMALLOC_END),
+ MLM(PAGE_OFFSET, (unsigned long)high_memory),
+ MLK_ROUNDUP(__init_begin, __init_end),
+ MLK_ROUNDUP(_stext, _etext),
+ MLK_ROUNDUP(_sdata, _edata),
+ MLK_ROUNDUP(__bss_start, __bss_stop));
+}
+
+static inline void init_pointer_tables(void)
+{
+#if defined(CONFIG_MMU) && !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+ int i;
-#ifndef CONFIG_SUN3
/* insert pointer tables allocated so far into the tablelist */
init_pointer_table((unsigned long)kernel_pg_dir);
for (i = 0; i < PTRS_PER_PGD; i++) {
@@ -122,26 +162,20 @@ void __init mem_init(void)
if (zero_pgtable)
init_pointer_table((unsigned long)zero_pgtable);
#endif
+}
- printk("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n",
- (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
- max_mapnr << (PAGE_SHIFT-10),
- codepages << (PAGE_SHIFT-10),
- datapages << (PAGE_SHIFT-10),
- initpages << (PAGE_SHIFT-10));
+void __init mem_init(void)
+{
+ /* this will put all memory onto the freelists */
+ free_all_bootmem();
+ init_pointer_tables();
+ mem_init_print_info(NULL);
+ print_memmap();
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- int pages = 0;
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
- free_page(start);
- totalram_pages++;
- pages++;
- }
- printk ("Freeing initrd memory: %dk freed\n", pages);
+ free_reserved_area((void *)start, (void *)end, -1, "initrd");
}
#endif
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
index 5dcb3fa35ea..6e4955bc542 100644
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -7,7 +7,7 @@
* used by other architectures /Roman Zippel
*/
-#include <linux/config.h>
+#include <linux/module.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -20,7 +20,6 @@
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/io.h>
-#include <asm/system.h>
#undef DEBUG
@@ -28,9 +27,9 @@
/*
* For 040/060 we can use the virtual memory area like other architectures,
- * but for 020/030 we want to use early termination page descriptor and we
+ * but for 020/030 we want to use early termination page descriptors and we
* can't mix this with normal page descriptors, so we have to copy that code
- * (mm/vmalloc.c) and return appriorate aligned addresses.
+ * (mm/vmalloc.c) and return appropriately aligned addresses.
*/
#ifdef CPU_M68040_OR_M68060_ONLY
@@ -59,15 +58,17 @@ static struct vm_struct *get_io_area(unsigned long size)
unsigned long addr;
struct vm_struct **p, *tmp, *area;
- area = (struct vm_struct *)kmalloc(sizeof(*area), GFP_KERNEL);
+ area = kmalloc(sizeof(*area), GFP_KERNEL);
if (!area)
return NULL;
addr = KMAP_START;
for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
if (size + addr < (unsigned long)tmp->addr)
break;
- if (addr > KMAP_END-size)
+ if (addr > KMAP_END-size) {
+ kfree(area);
return NULL;
+ }
addr = tmp->size + (unsigned long)tmp->addr;
}
area->addr = (void *)addr;
@@ -97,12 +98,11 @@ static inline void free_io_area(void *addr)
#endif
/*
- * Map some physical address range into the kernel address space. The
- * code is copied and adapted from map_chunk().
+ * Map some physical address range into the kernel address space.
*/
/* Rewritten by Andreas Schwab to remove all races. */
-void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
+void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
{
struct vm_struct *area;
unsigned long virtaddr, retaddr;
@@ -114,14 +114,14 @@ void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
/*
* Don't allow mappings that wrap..
*/
- if (!size || size > physaddr + size)
+ if (!size || physaddr > (unsigned long)(-size))
return NULL;
#ifdef CONFIG_AMIGA
if (MACH_IS_AMIGA) {
if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
&& (cacheflag == IOMAP_NOCACHE_SER))
- return (void *)physaddr;
+ return (void __iomem *)physaddr;
}
#endif
@@ -170,7 +170,8 @@ void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
break;
}
} else {
- physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
+ physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
+ _PAGE_DIRTY | _PAGE_READWRITE);
switch (cacheflag) {
case IOMAP_NOCACHE_SER:
case IOMAP_NOCACHE_NONSER:
@@ -201,7 +202,7 @@ void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
} else {
- pte_dir = pte_alloc_kernel(&init_mm, pmd_dir, virtaddr);
+ pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
if (!pte_dir) {
printk("ioremap: no mem for pte_dir\n");
return NULL;
@@ -218,28 +219,30 @@ void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
#endif
flush_tlb_all();
- return (void *)retaddr;
+ return (void __iomem *)retaddr;
}
+EXPORT_SYMBOL(__ioremap);
/*
- * Unmap a ioremap()ed region again
+ * Unmap an ioremap()ed region again
*/
-void iounmap(void *addr)
+void iounmap(void __iomem *addr)
{
#ifdef CONFIG_AMIGA
if ((!MACH_IS_AMIGA) ||
(((unsigned long)addr < 0x40000000) ||
((unsigned long)addr > 0x60000000)))
- free_io_area(addr);
+ free_io_area((__force void *)addr);
#else
- free_io_area(addr);
+ free_io_area((__force void *)addr);
#endif
}
+EXPORT_SYMBOL(iounmap);
/*
* __iounmap unmaps nearly everything, so be careful
- * it doesn't free currently pointer/page tables anymore but it
- * wans't used anyway and might be added later.
+ * Currently it doesn't free pointer/page tables anymore but this
+ * wasn't used anyway and might be added later.
*/
void __iounmap(void *addr, unsigned long size)
{
@@ -259,13 +262,15 @@ void __iounmap(void *addr, unsigned long size)
if (CPU_IS_020_OR_030) {
int pmd_off = (virtaddr/PTRTREESIZE) & 15;
+ int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
- if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
+ if (pmd_type == _PAGE_PRESENT) {
pmd_dir->pmd[pmd_off] = 0;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
continue;
- }
+ } else if (pmd_type == 0)
+ continue;
}
if (pmd_bad(*pmd_dir)) {
@@ -359,3 +364,4 @@ void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
flush_tlb_all();
}
+EXPORT_SYMBOL(kernel_set_cachemode);
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
new file mode 100644
index 00000000000..f58fafe7e4c
--- /dev/null
+++ b/arch/m68k/mm/mcfmmu.c
@@ -0,0 +1,195 @@
+/*
+ * Based upon linux/arch/m68k/mm/sun3mmu.c
+ * Based upon linux/arch/ppc/mm/mmu_context.c
+ *
+ * Implementations of mm routines specific to the Coldfire MMU.
+ *
+ * Copyright (c) 2008 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/bootmem.h>
+
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+#include <asm/mcf_pgalloc.h>
+#include <asm/tlbflush.h>
+
+#define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
+
+mm_context_t next_mmu_context;
+unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
+atomic_t nr_free_contexts;
+struct mm_struct *context_mm[LAST_CONTEXT+1];
+extern unsigned long num_pages;
+
+/*
+ * ColdFire paging_init derived from sun3.
+ */
+void __init paging_init(void)
+{
+ pgd_t *pg_dir;
+ pte_t *pg_table;
+ unsigned long address, size;
+ unsigned long next_pgtable, bootmem_end;
+ unsigned long zones_size[MAX_NR_ZONES];
+ enum zone_type zone;
+ int i;
+
+ empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE);
+ memset((void *) empty_zero_page, 0, PAGE_SIZE);
+
+ pg_dir = swapper_pg_dir;
+ memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
+
+ size = num_pages * sizeof(pte_t);
+ size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
+ next_pgtable = (unsigned long) alloc_bootmem_pages(size);
+
+ bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
+ pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
+
+ address = PAGE_OFFSET;
+ while (address < (unsigned long)high_memory) {
+ pg_table = (pte_t *) next_pgtable;
+ next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
+ pgd_val(*pg_dir) = (unsigned long) pg_table;
+ pg_dir++;
+
+ /* now change pg_table to kernel virtual addresses */
+ for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
+ pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
+ if (address >= (unsigned long) high_memory)
+ pte_val(pte) = 0;
+
+ set_pte(pg_table, pte);
+ address += PAGE_SIZE;
+ }
+ }
+
+ current->mm = NULL;
+
+ for (zone = 0; zone < MAX_NR_ZONES; zone++)
+ zones_size[zone] = 0x0;
+ zones_size[ZONE_DMA] = num_pages;
+ free_area_init(zones_size);
+}
+
+int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
+{
+ unsigned long flags, mmuar, mmutr;
+ struct mm_struct *mm;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ int asid;
+
+ local_irq_save(flags);
+
+ mmuar = (dtlb) ? mmu_read(MMUAR) :
+ regs->pc + (extension_word * sizeof(long));
+
+ mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
+ if (!mm) {
+ local_irq_restore(flags);
+ return -1;
+ }
+
+ pgd = pgd_offset(mm, mmuar);
+ if (pgd_none(*pgd)) {
+ local_irq_restore(flags);
+ return -1;
+ }
+
+ pmd = pmd_offset(pgd, mmuar);
+ if (pmd_none(*pmd)) {
+ local_irq_restore(flags);
+ return -1;
+ }
+
+ pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
+ : pte_offset_map(pmd, mmuar);
+ if (pte_none(*pte) || !pte_present(*pte)) {
+ local_irq_restore(flags);
+ return -1;
+ }
+
+ if (write) {
+ if (!pte_write(*pte)) {
+ local_irq_restore(flags);
+ return -1;
+ }
+ set_pte(pte, pte_mkdirty(*pte));
+ }
+
+ set_pte(pte, pte_mkyoung(*pte));
+ asid = mm->context & 0xff;
+ if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
+ set_pte(pte, pte_wrprotect(*pte));
+
+ mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V;
+ if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE))
+ mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT;
+ mmu_write(MMUTR, mmutr);
+
+ mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
+ ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
+
+ if (dtlb)
+ mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
+ else
+ mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
+
+ local_irq_restore(flags);
+ return 0;
+}
+
+/*
+ * Initialize the context management stuff.
+ * The following was taken from arch/ppc/mmu_context.c
+ */
+void __init mmu_context_init(void)
+{
+ /*
+ * Some processors have too few contexts to reserve one for
+ * init_mm, and require using context 0 for a normal task.
+ * Other processors reserve the use of context zero for the kernel.
+ * This code assumes FIRST_CONTEXT < 32.
+ */
+ context_map[0] = (1 << FIRST_CONTEXT) - 1;
+ next_mmu_context = FIRST_CONTEXT;
+ atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
+}
+
+/*
+ * Steal a context from a task that has one at the moment.
+ * This is only used on 8xx and 4xx and we presently assume that
+ * they don't do SMP. If they do then thicfpgalloc.hs will have to check
+ * whether the MM we steal is in use.
+ * We also assume that this is only used on systems that don't
+ * use an MMU hash table - this is true for 8xx and 4xx.
+ * This isn't an LRU system, it just frees up each context in
+ * turn (sort-of pseudo-random replacement :). This would be the
+ * place to implement an LRU scheme if anyone was motivated to do it.
+ * -- paulus
+ */
+void steal_context(void)
+{
+ struct mm_struct *mm;
+ /*
+ * free up context `next_mmu_context'
+ * if we shouldn't free context 0, don't...
+ */
+ if (next_mmu_context < FIRST_CONTEXT)
+ next_mmu_context = FIRST_CONTEXT;
+ mm = context_mm[next_mmu_context];
+ flush_tlb_mm(mm);
+ destroy_context(mm);
+}
+
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c
index 1453a601372..51bc9d258ed 100644
--- a/arch/m68k/mm/memory.c
+++ b/arch/m68k/mm/memory.c
@@ -4,20 +4,19 @@
* Copyright (C) 1995 Hamish Macdonald
*/
-#include <linux/config.h>
+#include <linux/module.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/types.h>
-#include <linux/slab.h>
#include <linux/init.h>
#include <linux/pagemap.h>
+#include <linux/gfp.h>
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/system.h>
#include <asm/traps.h>
#include <asm/machdep.h>
@@ -54,7 +53,7 @@ void __init init_pointer_table(unsigned long ptable)
/* unreserve the page so it's possible to free that page */
PD_PAGE(dp)->flags &= ~(1 << PG_reserved);
- set_page_count(PD_PAGE(dp), 1);
+ init_page_count(PD_PAGE(dp));
return;
}
@@ -94,8 +93,7 @@ pmd_t *get_pointer_table (void)
PD_MARKBITS(dp) = mask & ~tmp;
if (!PD_MARKBITS(dp)) {
/* move to end of list */
- list_del(dp);
- list_add_tail(dp, &ptable_list);
+ list_move_tail(dp, &ptable_list);
}
return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
}
@@ -123,73 +121,11 @@ int free_pointer_table (pmd_t *ptable)
* move this descriptor to the front of the list, since
* it has one or more free tables.
*/
- list_del(dp);
- list_add(dp, &ptable_list);
+ list_move(dp, &ptable_list);
}
return 0;
}
-#ifdef DEBUG_INVALID_PTOV
-int mm_inv_cnt = 5;
-#endif
-
-#ifndef CONFIG_SINGLE_MEMORY_CHUNK
-/*
- * The following two routines map from a physical address to a kernel
- * virtual address and vice versa.
- */
-unsigned long mm_vtop(unsigned long vaddr)
-{
- int i=0;
- unsigned long voff = (unsigned long)vaddr - PAGE_OFFSET;
-
- do {
- if (voff < m68k_memory[i].size) {
-#ifdef DEBUGPV
- printk ("VTOP(%p)=%lx\n", vaddr,
- m68k_memory[i].addr + voff);
-#endif
- return m68k_memory[i].addr + voff;
- }
- voff -= m68k_memory[i].size;
- } while (++i < m68k_num_memory);
-
- /* As a special case allow `__pa(high_memory)'. */
- if (voff == 0)
- return m68k_memory[i-1].addr + m68k_memory[i-1].size;
-
- return -1;
-}
-#endif
-
-#ifndef CONFIG_SINGLE_MEMORY_CHUNK
-unsigned long mm_ptov (unsigned long paddr)
-{
- int i = 0;
- unsigned long poff, voff = PAGE_OFFSET;
-
- do {
- poff = paddr - m68k_memory[i].addr;
- if (poff < m68k_memory[i].size) {
-#ifdef DEBUGPV
- printk ("PTOV(%lx)=%lx\n", paddr, poff + voff);
-#endif
- return poff + voff;
- }
- voff += m68k_memory[i].size;
- } while (++i < m68k_num_memory);
-
-#ifdef DEBUG_INVALID_PTOV
- if (mm_inv_cnt > 0) {
- mm_inv_cnt--;
- printk("Invalid use of phys_to_virt(0x%lx) at 0x%p!\n",
- paddr, __builtin_return_address(0));
- }
-#endif
- return -1;
-}
-#endif
-
/* invalidate page in both caches */
static inline void clear040(unsigned long paddr)
{
@@ -266,7 +202,9 @@ static inline void pushcl040(unsigned long paddr)
void cache_clear (unsigned long paddr, int len)
{
- if (CPU_IS_040_OR_060) {
+ if (CPU_IS_COLDFIRE) {
+ clear_cf_bcache(0, DCACHE_MAX_ADDR);
+ } else if (CPU_IS_040_OR_060) {
int tmp;
/*
@@ -301,6 +239,7 @@ void cache_clear (unsigned long paddr, int len)
mach_l2_flush(0);
#endif
}
+EXPORT_SYMBOL(cache_clear);
/*
@@ -312,7 +251,9 @@ void cache_clear (unsigned long paddr, int len)
void cache_push (unsigned long paddr, int len)
{
- if (CPU_IS_040_OR_060) {
+ if (CPU_IS_COLDFIRE) {
+ flush_cf_bcache(0, DCACHE_MAX_ADDR);
+ } else if (CPU_IS_040_OR_060) {
int tmp = PAGE_SIZE;
/*
@@ -353,119 +294,5 @@ void cache_push (unsigned long paddr, int len)
mach_l2_flush(1);
#endif
}
+EXPORT_SYMBOL(cache_push);
-static unsigned long virt_to_phys_slow(unsigned long vaddr)
-{
- if (CPU_IS_060) {
- mm_segment_t fs = get_fs();
- unsigned long paddr;
-
- set_fs(get_ds());
-
- /* The PLPAR instruction causes an access error if the translation
- * is not possible. To catch this we use the same exception mechanism
- * as for user space accesses in <asm/uaccess.h>. */
- asm volatile (".chip 68060\n"
- "1: plpar (%0)\n"
- ".chip 68k\n"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "3: sub.l %0,%0\n"
- " jra 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,3b\n"
- ".previous"
- : "=a" (paddr)
- : "0" (vaddr));
- set_fs(fs);
- return paddr;
- } else if (CPU_IS_040) {
- mm_segment_t fs = get_fs();
- unsigned long mmusr;
-
- set_fs(get_ds());
-
- asm volatile (".chip 68040\n\t"
- "ptestr (%1)\n\t"
- "movec %%mmusr, %0\n\t"
- ".chip 68k"
- : "=r" (mmusr)
- : "a" (vaddr));
- set_fs(fs);
-
- if (mmusr & MMU_R_040)
- return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
- } else {
- unsigned short mmusr;
- unsigned long *descaddr;
-
- asm volatile ("ptestr #5,%2@,#7,%0\n\t"
- "pmove %%psr,%1@"
- : "=a&" (descaddr)
- : "a" (&mmusr), "a" (vaddr));
- if (mmusr & (MMU_I|MMU_B|MMU_L))
- return 0;
- descaddr = phys_to_virt((unsigned long)descaddr);
- switch (mmusr & MMU_NUM) {
- case 1:
- return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
- case 2:
- return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
- case 3:
- return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
- }
- }
- return 0;
-}
-
-/* Push n pages at kernel virtual address and clear the icache */
-/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
-void flush_icache_range(unsigned long address, unsigned long endaddr)
-{
- if (CPU_IS_040_OR_060) {
- address &= PAGE_MASK;
-
- if (address >= PAGE_OFFSET && address < (unsigned long)high_memory) {
- do {
- asm volatile ("nop\n\t"
- ".chip 68040\n\t"
- "cpushp %%bc,(%0)\n\t"
- ".chip 68k"
- : : "a" (virt_to_phys((void *)address)));
- address += PAGE_SIZE;
- } while (address < endaddr);
- } else {
- do {
- asm volatile ("nop\n\t"
- ".chip 68040\n\t"
- "cpushp %%bc,(%0)\n\t"
- ".chip 68k"
- : : "a" (virt_to_phys_slow(address)));
- address += PAGE_SIZE;
- } while (address < endaddr);
- }
- } else {
- unsigned long tmp;
- asm volatile ("movec %%cacr,%0\n\t"
- "orw %1,%0\n\t"
- "movec %0,%%cacr"
- : "=&d" (tmp)
- : "di" (FLUSH_I));
- }
-}
-
-
-#ifndef CONFIG_SINGLE_MEMORY_CHUNK
-int mm_end_of_chunk (unsigned long addr, int len)
-{
- int i;
-
- for (i = 0; i < m68k_num_memory; i++)
- if (m68k_memory[i].addr + m68k_memory[i].size == addr + len)
- return 1;
- return 0;
-}
-#endif
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index d855fec2631..b958916e5ea 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/m68k/motorola.c
+ * linux/arch/m68k/mm/motorola.c
*
* Routines specific to the Motorola MMU, originally from:
* linux/arch/m68k/init.c
@@ -8,7 +8,6 @@
* Moved 8/20/1999 Sam Creasey
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
@@ -19,18 +18,19 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/bootmem.h>
+#include <linux/gfp.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/system.h>
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/dma.h>
#ifdef CONFIG_ATARI
#include <asm/atari_stram.h>
#endif
+#include <asm/sections.h>
#undef DEBUG
@@ -44,6 +44,11 @@ unsigned long mm_cachebits;
EXPORT_SYMBOL(mm_cachebits);
#endif
+/* size of memory already mapped in head.S */
+extern __initdata unsigned long m68k_init_mapped_size;
+
+extern unsigned long availmem;
+
static pte_t * __init kernel_page_table(void)
{
pte_t *ptablep;
@@ -99,19 +104,20 @@ static pmd_t * __init kernel_ptr_table(void)
return last_pgtable;
}
-static unsigned long __init
-map_chunk (unsigned long addr, long size)
+static void __init map_node(int node)
{
#define PTRTREESIZE (256*1024)
#define ROOTTREESIZE (32*1024*1024)
- static unsigned long virtaddr = PAGE_OFFSET;
- unsigned long physaddr;
+ unsigned long physaddr, virtaddr, size;
pgd_t *pgd_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
- physaddr = (addr | m68k_supervisor_cachemode |
- _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
+ size = m68k_memory[node].size;
+ physaddr = m68k_memory[node].addr;
+ virtaddr = (unsigned long)phys_to_virt(physaddr);
+ physaddr |= m68k_supervisor_cachemode |
+ _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
if (CPU_IS_040_OR_060)
physaddr |= _PAGE_GLOBAL040;
@@ -191,8 +197,6 @@ map_chunk (unsigned long addr, long size)
#ifdef DEBUG
printk("\n");
#endif
-
- return virtaddr;
}
/*
@@ -201,16 +205,13 @@ map_chunk (unsigned long addr, long size)
*/
void __init paging_init(void)
{
- int chunk;
- unsigned long mem_avail = 0;
- unsigned long zones_size[3] = { 0, };
+ unsigned long zones_size[MAX_NR_ZONES] = { 0, };
+ unsigned long min_addr, max_addr;
+ unsigned long addr, size, end;
+ int i;
#ifdef DEBUG
- {
- extern unsigned long availmem;
- printk ("start of paging_init (%p, %lx, %lx, %lx)\n",
- kernel_pg_dir, availmem, start_mem, end_mem);
- }
+ printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem);
#endif
/* Fix the cache mode in the page descriptors for the 680[46]0. */
@@ -223,31 +224,70 @@ void __init paging_init(void)
pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
}
+ min_addr = m68k_memory[0].addr;
+ max_addr = min_addr + m68k_memory[0].size;
+ for (i = 1; i < m68k_num_memory;) {
+ if (m68k_memory[i].addr < min_addr) {
+ printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
+ m68k_memory[i].addr, m68k_memory[i].size);
+ printk("Fix your bootloader or use a memfile to make use of this area!\n");
+ m68k_num_memory--;
+ memmove(m68k_memory + i, m68k_memory + i + 1,
+ (m68k_num_memory - i) * sizeof(struct m68k_mem_info));
+ continue;
+ }
+ addr = m68k_memory[i].addr + m68k_memory[i].size;
+ if (addr > max_addr)
+ max_addr = addr;
+ i++;
+ }
+ m68k_memoffset = min_addr - PAGE_OFFSET;
+ m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;
+
+ module_fixup(NULL, __start_fixup, __stop_fixup);
+ flush_icache();
+
+ high_memory = phys_to_virt(max_addr);
+
+ min_low_pfn = availmem >> PAGE_SHIFT;
+ max_low_pfn = max_addr >> PAGE_SHIFT;
+
+ for (i = 0; i < m68k_num_memory; i++) {
+ addr = m68k_memory[i].addr;
+ end = addr + m68k_memory[i].size;
+ m68k_setup_node(i);
+ availmem = PAGE_ALIGN(availmem);
+ availmem += init_bootmem_node(NODE_DATA(i),
+ availmem >> PAGE_SHIFT,
+ addr >> PAGE_SHIFT,
+ end >> PAGE_SHIFT);
+ }
+
/*
* Map the physical memory available into the kernel virtual
- * address space. It may allocate some memory for page
- * tables and thus modify availmem.
+ * address space. First initialize the bootmem allocator with
+ * the memory we already mapped, so map_node() has something
+ * to allocate.
*/
-
- for (chunk = 0; chunk < m68k_num_memory; chunk++) {
- mem_avail = map_chunk (m68k_memory[chunk].addr,
- m68k_memory[chunk].size);
-
- }
+ addr = m68k_memory[0].addr;
+ size = m68k_memory[0].size;
+ free_bootmem_node(NODE_DATA(0), availmem,
+ min(m68k_init_mapped_size, size) - (availmem - addr));
+ map_node(0);
+ if (size > m68k_init_mapped_size)
+ free_bootmem_node(NODE_DATA(0), addr + m68k_init_mapped_size,
+ size - m68k_init_mapped_size);
+
+ for (i = 1; i < m68k_num_memory; i++)
+ map_node(i);
flush_tlb_all();
-#ifdef DEBUG
- printk ("memory available is %ldKB\n", mem_avail >> 10);
- printk ("start_mem is %#lx\nvirtual_end is %#lx\n",
- start_mem, end_mem);
-#endif
/*
* initialize the bad page table and bad page to point
* to a couple of allocated pages
*/
empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
- memset(empty_zero_page, 0, PAGE_SIZE);
/*
* Set up SFC/DFC registers
@@ -257,29 +297,12 @@ void __init paging_init(void)
#ifdef DEBUG
printk ("before free_area_init\n");
#endif
- zones_size[0] = (mach_max_dma_address < (unsigned long)high_memory ?
- (mach_max_dma_address+1) : (unsigned long)high_memory);
- zones_size[1] = (unsigned long)high_memory - zones_size[0];
-
- zones_size[0] = (zones_size[0] - PAGE_OFFSET) >> PAGE_SHIFT;
- zones_size[1] >>= PAGE_SHIFT;
-
- free_area_init(zones_size);
-}
-
-extern char __init_begin, __init_end;
-
-void free_initmem(void)
-{
- unsigned long addr;
-
- addr = (unsigned long)&__init_begin;
- for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) {
- virt_to_page(addr)->flags &= ~(1 << PG_reserved);
- set_page_count(virt_to_page(addr), 1);
- free_page(addr);
- totalram_pages++;
+ for (i = 0; i < m68k_num_memory; i++) {
+ zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT;
+ free_area_init_node(i, zones_size,
+ m68k_memory[i].addr >> PAGE_SHIFT, NULL);
+ if (node_present_pages(i))
+ node_set_state(i, N_NORMAL_MEMORY);
}
}
-
diff --git a/arch/m68k/mm/sun3kmap.c b/arch/m68k/mm/sun3kmap.c
index 7f0d86f3fe7..3dc41158c05 100644
--- a/arch/m68k/mm/sun3kmap.c
+++ b/arch/m68k/mm/sun3kmap.c
@@ -8,6 +8,7 @@
* for more details.
*/
+#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -59,7 +60,7 @@ static inline void do_pmeg_mapin(unsigned long phys, unsigned long virt,
}
}
-void *sun3_ioremap(unsigned long phys, unsigned long size,
+void __iomem *sun3_ioremap(unsigned long phys, unsigned long size,
unsigned long type)
{
struct vm_struct *area;
@@ -101,22 +102,25 @@ void *sun3_ioremap(unsigned long phys, unsigned long size,
virt += seg_pages * PAGE_SIZE;
}
- return (void *)ret;
+ return (void __iomem *)ret;
}
+EXPORT_SYMBOL(sun3_ioremap);
-void *__ioremap(unsigned long phys, unsigned long size, int cache)
+void __iomem *__ioremap(unsigned long phys, unsigned long size, int cache)
{
return sun3_ioremap(phys, size, SUN3_PAGE_TYPE_IO);
}
+EXPORT_SYMBOL(__ioremap);
-void iounmap(void *addr)
+void iounmap(void __iomem *addr)
{
vfree((void *)(PAGE_MASK & (unsigned long)addr));
}
+EXPORT_SYMBOL(iounmap);
/* sun3_map_test(addr, val) -- Reads a byte from addr, storing to val,
* trapping the potential read fault. Returns 0 if the access faulted,
@@ -154,3 +158,4 @@ int sun3_map_test(unsigned long addr, char *val)
return ret;
}
+EXPORT_SYMBOL(sun3_map_test);
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
index a47be196a47..269f81158a3 100644
--- a/arch/m68k/mm/sun3mmu.c
+++ b/arch/m68k/mm/sun3mmu.c
@@ -21,7 +21,6 @@
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/pgtable.h>
-#include <asm/system.h>
#include <asm/machdep.h>
#include <asm/io.h>
@@ -31,10 +30,6 @@ const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
extern unsigned long num_pages;
-void free_initmem(void)
-{
-}
-
/* For the sun3 we try to follow the i386 paging_init() more closely */
/* start_mem and end_mem have PAGE_OFFSET added already */
/* now sets up tables using sun3 PTEs rather than i386 as before. --m */
@@ -46,15 +41,13 @@ void __init paging_init(void)
unsigned long address;
unsigned long next_pgtable;
unsigned long bootmem_end;
- unsigned long zones_size[3] = {0, 0, 0};
+ unsigned long zones_size[MAX_NR_ZONES] = { 0, };
unsigned long size;
-
#ifdef TEST_VERIFY_AREA
wp_works_ok = 0;
#endif
empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
- memset(empty_zero_page, 0, PAGE_SIZE);
address = PAGE_OFFSET;
pg_dir = swapper_pg_dir;
@@ -92,10 +85,13 @@ void __init paging_init(void)
current->mm = NULL;
/* memory sizing is a hack stolen from motorola.c.. hope it works for us */
- zones_size[0] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
- zones_size[1] = 0;
+ zones_size[ZONE_DMA] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
+
+ /* I really wish I knew why the following change made things better... -- Sam */
+/* free_area_init(zones_size); */
+ free_area_init_node(0, zones_size,
+ (__pa(PAGE_OFFSET) >> PAGE_SHIFT) + 1, NULL);
- free_area_init(zones_size);
}