diff options
Diffstat (limited to 'arch/m68k/mm')
| -rw-r--r-- | arch/m68k/mm/Makefile | 9 | ||||
| -rw-r--r-- | arch/m68k/mm/cache.c | 30 | ||||
| -rw-r--r-- | arch/m68k/mm/fault.c | 88 | ||||
| -rw-r--r-- | arch/m68k/mm/init.c | 145 | ||||
| -rw-r--r-- | arch/m68k/mm/kmap.c | 14 | ||||
| -rw-r--r-- | arch/m68k/mm/mcfmmu.c | 195 | ||||
| -rw-r--r-- | arch/m68k/mm/memory.c | 9 | ||||
| -rw-r--r-- | arch/m68k/mm/motorola.c | 29 | ||||
| -rw-r--r-- | arch/m68k/mm/sun3mmu.c | 5 |
9 files changed, 382 insertions, 142 deletions
diff --git a/arch/m68k/mm/Makefile b/arch/m68k/mm/Makefile index 5eaa43c4cb3..cfbf3205724 100644 --- a/arch/m68k/mm/Makefile +++ b/arch/m68k/mm/Makefile @@ -2,7 +2,10 @@ # Makefile for the linux m68k-specific parts of the memory manager. # -obj-y := cache.o init.o fault.o hwtest.o +obj-y := init.o + +obj-$(CONFIG_MMU) += cache.o fault.o +obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o hwtest.o +obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o hwtest.o +obj-$(CONFIG_MMU_COLDFIRE) += kmap.o memory.o mcfmmu.o -obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o -obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o diff --git a/arch/m68k/mm/cache.c b/arch/m68k/mm/cache.c index 5437fff5fe0..3d84c1f2ffb 100644 --- a/arch/m68k/mm/cache.c +++ b/arch/m68k/mm/cache.c @@ -52,9 +52,9 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr) unsigned long *descaddr; asm volatile ("ptestr %3,%2@,#7,%0\n\t" - "pmove %%psr,%1@" - : "=a&" (descaddr) - : "a" (&mmusr), "a" (vaddr), "d" (get_fs().seg)); + "pmove %%psr,%1" + : "=a&" (descaddr), "=m" (mmusr) + : "a" (vaddr), "d" (get_fs().seg)); if (mmusr & (MMU_I|MMU_B|MMU_L)) return 0; descaddr = phys_to_virt((unsigned long)descaddr); @@ -74,8 +74,16 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr) /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ void flush_icache_range(unsigned long address, unsigned long endaddr) { - - if (CPU_IS_040_OR_060) { + if (CPU_IS_COLDFIRE) { + unsigned long start, end; + start = address & ICACHE_SET_MASK; + end = endaddr & ICACHE_SET_MASK; + if (start > end) { + flush_cf_icache(0, end); + end = ICACHE_MAX_ADDR; + } + flush_cf_icache(start, end); + } else if (CPU_IS_040_OR_060) { address &= PAGE_MASK; do { @@ -100,7 +108,17 @@ EXPORT_SYMBOL(flush_icache_range); void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len) { - if (CPU_IS_040_OR_060) { + if (CPU_IS_COLDFIRE) { + unsigned long start, end; + start = addr & ICACHE_SET_MASK; + end = (addr + len) & ICACHE_SET_MASK; + if (start > end) { + flush_cf_icache(0, end); + end = ICACHE_MAX_ADDR; + } + flush_cf_icache(start, end); + + } else if (CPU_IS_040_OR_060) { asm volatile ("nop\n\t" ".chip 68040\n\t" "cpushp %%bc,(%0)\n\t" diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index a96394a0333..2bd7487440c 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -13,12 +13,10 @@ #include <asm/setup.h> #include <asm/traps.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> extern void die_if_kernel(char *, struct pt_regs *, long); -extern const int frame_extra_sizes[]; /* in m68k/kernel/signal.c */ int send_fault_sig(struct pt_regs *regs) { @@ -27,29 +25,15 @@ int send_fault_sig(struct pt_regs *regs) siginfo.si_signo = current->thread.signo; siginfo.si_code = current->thread.code; siginfo.si_addr = (void *)current->thread.faddr; -#ifdef DEBUG - printk("send_fault_sig: %p,%d,%d\n", siginfo.si_addr, siginfo.si_signo, siginfo.si_code); -#endif + pr_debug("send_fault_sig: %p,%d,%d\n", siginfo.si_addr, + siginfo.si_signo, siginfo.si_code); if (user_mode(regs)) { force_sig_info(siginfo.si_signo, &siginfo, current); } else { - const struct exception_table_entry *fixup; - - /* Are we prepared to handle this kernel fault? */ - if ((fixup = search_exception_tables(regs->pc))) { - struct pt_regs *tregs; - /* Create a new four word stack frame, discarding the old - one. */ - regs->stkadj = frame_extra_sizes[regs->format]; - tregs = (struct pt_regs *)((ulong)regs + regs->stkadj); - tregs->vector = regs->vector; - tregs->format = 0; - tregs->pc = fixup->fixup; - tregs->sr = regs->sr; + if (handle_kernel_fault(regs)) return -1; - } //if (siginfo.si_signo == SIGBUS) // force_sig_info(siginfo.si_signo, @@ -60,10 +44,10 @@ int send_fault_sig(struct pt_regs *regs) * terminate things with extreme prejudice. */ if ((unsigned long)siginfo.si_addr < PAGE_SIZE) - printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); + pr_alert("Unable to handle kernel NULL pointer dereference"); else - printk(KERN_ALERT "Unable to handle kernel access"); - printk(" at virtual address %p\n", siginfo.si_addr); + pr_alert("Unable to handle kernel access"); + pr_cont(" at virtual address %p\n", siginfo.si_addr); die_if_kernel("Oops", regs, 0 /*error_code*/); do_exit(SIGKILL); } @@ -87,13 +71,11 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, { struct mm_struct *mm = current->mm; struct vm_area_struct * vma; - int write, fault; + int fault; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; -#ifdef DEBUG - printk ("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n", - regs->sr, regs->pc, address, error_code, - current->mm->pgd); -#endif + pr_debug("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n", + regs->sr, regs->pc, address, error_code, mm ? mm->pgd : NULL); /* * If we're in an interrupt or have no user @@ -102,6 +84,9 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, if (in_atomic() || !mm) goto no_context; + if (user_mode(regs)) + flags |= FAULT_FLAG_USER; +retry: down_read(&mm->mmap_sem); vma = find_vma(mm, address); @@ -129,17 +114,14 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, * we can handle it.. */ good_area: -#ifdef DEBUG - printk("do_page_fault: good_area\n"); -#endif - write = 0; + pr_debug("do_page_fault: good_area\n"); switch (error_code & 3) { default: /* 3: write, present */ /* fall through */ case 2: /* write, not present */ if (!(vma->vm_flags & VM_WRITE)) goto acc_err; - write++; + flags |= FAULT_FLAG_WRITE; break; case 1: /* read, present */ goto acc_err; @@ -154,10 +136,12 @@ good_area: * the fault. */ - fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); -#ifdef DEBUG - printk("handle_mm_fault returns %d\n",fault); -#endif + fault = handle_mm_fault(mm, vma, address, flags); + pr_debug("handle_mm_fault returns %d\n", fault); + + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + return 0; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; @@ -165,10 +149,32 @@ good_area: goto bus_err; BUG(); } - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; + + /* + * Major/minor page fault accounting is only done on the + * initial attempt. If we go through a retry, it is extremely + * likely that the page will be found in page cache at that point. + */ + if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; + if (fault & VM_FAULT_RETRY) { + /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk + * of starvation. */ + flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; + + /* + * No need to up_read(&mm->mmap_sem) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; + } + } up_read(&mm->mmap_sem); return 0; diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 8bc842554e5..acaff6a49e3 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -23,7 +23,7 @@ #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgalloc.h> -#include <asm/system.h> +#include <asm/traps.h> #include <asm/machdep.h> #include <asm/io.h> #ifdef CONFIG_ATARI @@ -32,7 +32,19 @@ #include <asm/sections.h> #include <asm/tlb.h> -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); +/* + * ZERO_PAGE is a special page that is used for zero-initialized + * data and COW. + */ +void *empty_zero_page; +EXPORT_SYMBOL(empty_zero_page); + +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) +extern void init_pointer_table(unsigned long ptable); +extern pmd_t *zero_pgtable; +#endif + +#ifdef CONFIG_MMU pg_data_t pg_data_map[MAX_NUMNODES]; EXPORT_SYMBOL(pg_data_map); @@ -47,7 +59,7 @@ EXPORT_SYMBOL(pg_data_table); void __init m68k_setup_node(int node) { #ifndef CONFIG_SINGLE_MEMORY_CHUNK - struct mem_info *info = m68k_memory + node; + struct m68k_mem_info *info = m68k_memory + node; int i, end; i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift(); @@ -62,58 +74,83 @@ void __init m68k_setup_node(int node) node_set_online(node); } +#else /* CONFIG_MMU */ /* - * ZERO_PAGE is a special page that is used for zero-initialized - * data and COW. + * paging_init() continues the virtual memory environment setup which + * was begun by the code in arch/head.S. + * The parameters are pointers to where to stick the starting and ending + * addresses of available kernel virtual memory. */ +void __init paging_init(void) +{ + /* + * Make sure start_mem is page aligned, otherwise bootmem and + * page_alloc get different views of the world. + */ + unsigned long end_mem = memory_end & PAGE_MASK; + unsigned long zones_size[MAX_NR_ZONES] = { 0, }; -void *empty_zero_page; -EXPORT_SYMBOL(empty_zero_page); + high_memory = (void *) end_mem; -extern void init_pointer_table(unsigned long ptable); + empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); + memset(empty_zero_page, 0, PAGE_SIZE); -/* References to section boundaries */ + /* + * Set up SFC/DFC registers (user data space). + */ + set_fs (USER_DS); -extern pmd_t *zero_pgtable; + zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT; + free_area_init(zones_size); +} -void __init mem_init(void) +#endif /* CONFIG_MMU */ + +void free_initmem(void) { - pg_data_t *pgdat; - int codepages = 0; - int datapages = 0; - int initpages = 0; - int i; +#ifndef CONFIG_MMU_SUN3 + free_initmem_default(-1); +#endif /* CONFIG_MMU_SUN3 */ +} -#ifdef CONFIG_ATARI - if (MACH_IS_ATARI) - atari_stram_mem_init_hook(); +#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE) +#define VECTORS &vectors[0] +#else +#define VECTORS _ramvec #endif - /* this will put all memory onto the freelists */ - totalram_pages = num_physpages = 0; - for_each_online_pgdat(pgdat) { - num_physpages += pgdat->node_present_pages; - - totalram_pages += free_all_bootmem_node(pgdat); - for (i = 0; i < pgdat->node_spanned_pages; i++) { - struct page *page = pgdat->node_mem_map + i; - char *addr = page_to_virt(page); - - if (!PageReserved(page)) - continue; - if (addr >= _text && - addr < _etext) - codepages++; - else if (addr >= __init_begin && - addr < __init_end) - initpages++; - else - datapages++; - } - } +void __init print_memmap(void) +{ +#define UL(x) ((unsigned long) (x)) +#define MLK(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 10 +#define MLM(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 20 +#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), 1024) + + pr_notice("Virtual kernel memory layout:\n" + " vector : 0x%08lx - 0x%08lx (%4ld KiB)\n" + " kmap : 0x%08lx - 0x%08lx (%4ld MiB)\n" + " vmalloc : 0x%08lx - 0x%08lx (%4ld MiB)\n" + " lowmem : 0x%08lx - 0x%08lx (%4ld MiB)\n" + " .init : 0x%p" " - 0x%p" " (%4d KiB)\n" + " .text : 0x%p" " - 0x%p" " (%4d KiB)\n" + " .data : 0x%p" " - 0x%p" " (%4d KiB)\n" + " .bss : 0x%p" " - 0x%p" " (%4d KiB)\n", + MLK(VECTORS, VECTORS + 256), + MLM(KMAP_START, KMAP_END), + MLM(VMALLOC_START, VMALLOC_END), + MLM(PAGE_OFFSET, (unsigned long)high_memory), + MLK_ROUNDUP(__init_begin, __init_end), + MLK_ROUNDUP(_stext, _etext), + MLK_ROUNDUP(_sdata, _edata), + MLK_ROUNDUP(__bss_start, __bss_stop)); +} + +static inline void init_pointer_tables(void) +{ +#if defined(CONFIG_MMU) && !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) + int i; -#ifndef CONFIG_SUN3 /* insert pointer tables allocated so far into the tablelist */ init_pointer_table((unsigned long)kernel_pg_dir); for (i = 0; i < PTRS_PER_PGD; i++) { @@ -125,26 +162,20 @@ void __init mem_init(void) if (zero_pgtable) init_pointer_table((unsigned long)zero_pgtable); #endif +} - printk("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n", - nr_free_pages() << (PAGE_SHIFT-10), - totalram_pages << (PAGE_SHIFT-10), - codepages << (PAGE_SHIFT-10), - datapages << (PAGE_SHIFT-10), - initpages << (PAGE_SHIFT-10)); +void __init mem_init(void) +{ + /* this will put all memory onto the freelists */ + free_all_bootmem(); + init_pointer_tables(); + mem_init_print_info(NULL); + print_memmap(); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - int pages = 0; - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - pages++; - } - printk ("Freeing initrd memory: %dk freed\n", pages); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c index 69345849454..6e4955bc542 100644 --- a/arch/m68k/mm/kmap.c +++ b/arch/m68k/mm/kmap.c @@ -20,7 +20,6 @@ #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/io.h> -#include <asm/system.h> #undef DEBUG @@ -28,9 +27,9 @@ /* * For 040/060 we can use the virtual memory area like other architectures, - * but for 020/030 we want to use early termination page descriptor and we + * but for 020/030 we want to use early termination page descriptors and we * can't mix this with normal page descriptors, so we have to copy that code - * (mm/vmalloc.c) and return appriorate aligned addresses. + * (mm/vmalloc.c) and return appropriately aligned addresses. */ #ifdef CPU_M68040_OR_M68060_ONLY @@ -171,7 +170,8 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla break; } } else { - physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); + physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | + _PAGE_DIRTY | _PAGE_READWRITE); switch (cacheflag) { case IOMAP_NOCACHE_SER: case IOMAP_NOCACHE_NONSER: @@ -224,7 +224,7 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla EXPORT_SYMBOL(__ioremap); /* - * Unmap a ioremap()ed region again + * Unmap an ioremap()ed region again */ void iounmap(void __iomem *addr) { @@ -241,8 +241,8 @@ EXPORT_SYMBOL(iounmap); /* * __iounmap unmaps nearly everything, so be careful - * it doesn't free currently pointer/page tables anymore but it - * wans't used anyway and might be added later. + * Currently it doesn't free pointer/page tables anymore but this + * wasn't used anyway and might be added later. */ void __iounmap(void *addr, unsigned long size) { diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c new file mode 100644 index 00000000000..f58fafe7e4c --- /dev/null +++ b/arch/m68k/mm/mcfmmu.c @@ -0,0 +1,195 @@ +/* + * Based upon linux/arch/m68k/mm/sun3mmu.c + * Based upon linux/arch/ppc/mm/mmu_context.c + * + * Implementations of mm routines specific to the Coldfire MMU. + * + * Copyright (c) 2008 Freescale Semiconductor, Inc. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <linux/string.h> +#include <linux/bootmem.h> + +#include <asm/setup.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/mmu_context.h> +#include <asm/mcf_pgalloc.h> +#include <asm/tlbflush.h> + +#define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END)) + +mm_context_t next_mmu_context; +unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; +atomic_t nr_free_contexts; +struct mm_struct *context_mm[LAST_CONTEXT+1]; +extern unsigned long num_pages; + +/* + * ColdFire paging_init derived from sun3. + */ +void __init paging_init(void) +{ + pgd_t *pg_dir; + pte_t *pg_table; + unsigned long address, size; + unsigned long next_pgtable, bootmem_end; + unsigned long zones_size[MAX_NR_ZONES]; + enum zone_type zone; + int i; + + empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE); + memset((void *) empty_zero_page, 0, PAGE_SIZE); + + pg_dir = swapper_pg_dir; + memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); + + size = num_pages * sizeof(pte_t); + size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); + next_pgtable = (unsigned long) alloc_bootmem_pages(size); + + bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; + pg_dir += PAGE_OFFSET >> PGDIR_SHIFT; + + address = PAGE_OFFSET; + while (address < (unsigned long)high_memory) { + pg_table = (pte_t *) next_pgtable; + next_pgtable += PTRS_PER_PTE * sizeof(pte_t); + pgd_val(*pg_dir) = (unsigned long) pg_table; + pg_dir++; + + /* now change pg_table to kernel virtual addresses */ + for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) { + pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT); + if (address >= (unsigned long) high_memory) + pte_val(pte) = 0; + + set_pte(pg_table, pte); + address += PAGE_SIZE; + } + } + + current->mm = NULL; + + for (zone = 0; zone < MAX_NR_ZONES; zone++) + zones_size[zone] = 0x0; + zones_size[ZONE_DMA] = num_pages; + free_area_init(zones_size); +} + +int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word) +{ + unsigned long flags, mmuar, mmutr; + struct mm_struct *mm; + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int asid; + + local_irq_save(flags); + + mmuar = (dtlb) ? mmu_read(MMUAR) : + regs->pc + (extension_word * sizeof(long)); + + mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm; + if (!mm) { + local_irq_restore(flags); + return -1; + } + + pgd = pgd_offset(mm, mmuar); + if (pgd_none(*pgd)) { + local_irq_restore(flags); + return -1; + } + + pmd = pmd_offset(pgd, mmuar); + if (pmd_none(*pmd)) { + local_irq_restore(flags); + return -1; + } + + pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar) + : pte_offset_map(pmd, mmuar); + if (pte_none(*pte) || !pte_present(*pte)) { + local_irq_restore(flags); + return -1; + } + + if (write) { + if (!pte_write(*pte)) { + local_irq_restore(flags); + return -1; + } + set_pte(pte, pte_mkdirty(*pte)); + } + + set_pte(pte, pte_mkyoung(*pte)); + asid = mm->context & 0xff; + if (!pte_dirty(*pte) && !KMAPAREA(mmuar)) + set_pte(pte, pte_wrprotect(*pte)); + + mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V; + if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE)) + mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT; + mmu_write(MMUTR, mmutr); + + mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) | + ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X); + + if (dtlb) + mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA); + else + mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA); + + local_irq_restore(flags); + return 0; +} + +/* + * Initialize the context management stuff. + * The following was taken from arch/ppc/mmu_context.c + */ +void __init mmu_context_init(void) +{ + /* + * Some processors have too few contexts to reserve one for + * init_mm, and require using context 0 for a normal task. + * Other processors reserve the use of context zero for the kernel. + * This code assumes FIRST_CONTEXT < 32. + */ + context_map[0] = (1 << FIRST_CONTEXT) - 1; + next_mmu_context = FIRST_CONTEXT; + atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1); +} + +/* + * Steal a context from a task that has one at the moment. + * This is only used on 8xx and 4xx and we presently assume that + * they don't do SMP. If they do then thicfpgalloc.hs will have to check + * whether the MM we steal is in use. + * We also assume that this is only used on systems that don't + * use an MMU hash table - this is true for 8xx and 4xx. + * This isn't an LRU system, it just frees up each context in + * turn (sort-of pseudo-random replacement :). This would be the + * place to implement an LRU scheme if anyone was motivated to do it. + * -- paulus + */ +void steal_context(void) +{ + struct mm_struct *mm; + /* + * free up context `next_mmu_context' + * if we shouldn't free context 0, don't... + */ + if (next_mmu_context < FIRST_CONTEXT) + next_mmu_context = FIRST_CONTEXT; + mm = context_mm[next_mmu_context]; + flush_tlb_mm(mm); + destroy_context(mm); +} + diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c index 34c77ce24fb..51bc9d258ed 100644 --- a/arch/m68k/mm/memory.c +++ b/arch/m68k/mm/memory.c @@ -17,7 +17,6 @@ #include <asm/segment.h> #include <asm/page.h> #include <asm/pgalloc.h> -#include <asm/system.h> #include <asm/traps.h> #include <asm/machdep.h> @@ -203,7 +202,9 @@ static inline void pushcl040(unsigned long paddr) void cache_clear (unsigned long paddr, int len) { - if (CPU_IS_040_OR_060) { + if (CPU_IS_COLDFIRE) { + clear_cf_bcache(0, DCACHE_MAX_ADDR); + } else if (CPU_IS_040_OR_060) { int tmp; /* @@ -250,7 +251,9 @@ EXPORT_SYMBOL(cache_clear); void cache_push (unsigned long paddr, int len) { - if (CPU_IS_040_OR_060) { + if (CPU_IS_COLDFIRE) { + flush_cf_bcache(0, DCACHE_MAX_ADDR); + } else if (CPU_IS_040_OR_060) { int tmp = PAGE_SIZE; /* diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 02b7a03e422..b958916e5ea 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -24,7 +24,6 @@ #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgalloc.h> -#include <asm/system.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/dma.h> @@ -46,7 +45,7 @@ EXPORT_SYMBOL(mm_cachebits); #endif /* size of memory already mapped in head.S */ -#define INIT_MAPPED_SIZE (4UL<<20) +extern __initdata unsigned long m68k_init_mapped_size; extern unsigned long availmem; @@ -234,7 +233,7 @@ void __init paging_init(void) printk("Fix your bootloader or use a memfile to make use of this area!\n"); m68k_num_memory--; memmove(m68k_memory + i, m68k_memory + i + 1, - (m68k_num_memory - i) * sizeof(struct mem_info)); + (m68k_num_memory - i) * sizeof(struct m68k_mem_info)); continue; } addr = m68k_memory[i].addr + m68k_memory[i].size; @@ -272,10 +271,12 @@ void __init paging_init(void) */ addr = m68k_memory[0].addr; size = m68k_memory[0].size; - free_bootmem_node(NODE_DATA(0), availmem, min(INIT_MAPPED_SIZE, size) - (availmem - addr)); + free_bootmem_node(NODE_DATA(0), availmem, + min(m68k_init_mapped_size, size) - (availmem - addr)); map_node(0); - if (size > INIT_MAPPED_SIZE) - free_bootmem_node(NODE_DATA(0), addr + INIT_MAPPED_SIZE, size - INIT_MAPPED_SIZE); + if (size > m68k_init_mapped_size) + free_bootmem_node(NODE_DATA(0), addr + m68k_init_mapped_size, + size - m68k_init_mapped_size); for (i = 1; i < m68k_num_memory; i++) map_node(i); @@ -300,20 +301,8 @@ void __init paging_init(void) zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT; free_area_init_node(i, zones_size, m68k_memory[i].addr >> PAGE_SHIFT, NULL); + if (node_present_pages(i)) + node_set_state(i, N_NORMAL_MEMORY); } } -void free_initmem(void) -{ - unsigned long addr; - - addr = (unsigned long)__init_begin; - for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) { - virt_to_page(addr)->flags &= ~(1 << PG_reserved); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } -} - - diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c index 1b902dbd437..269f81158a3 100644 --- a/arch/m68k/mm/sun3mmu.c +++ b/arch/m68k/mm/sun3mmu.c @@ -21,7 +21,6 @@ #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/machdep.h> #include <asm/io.h> @@ -31,10 +30,6 @@ const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n"; extern unsigned long num_pages; -void free_initmem(void) -{ -} - /* For the sun3 we try to follow the i386 paging_init() more closely */ /* start_mem and end_mem have PAGE_OFFSET added already */ /* now sets up tables using sun3 PTEs rather than i386 as before. --m */ |
