diff options
Diffstat (limited to 'arch/avr32/mm')
| -rw-r--r-- | arch/avr32/mm/cache.c | 49 | ||||
| -rw-r--r-- | arch/avr32/mm/dma-coherent.c | 29 | ||||
| -rw-r--r-- | arch/avr32/mm/fault.c | 229 | ||||
| -rw-r--r-- | arch/avr32/mm/init.c | 391 | ||||
| -rw-r--r-- | arch/avr32/mm/ioremap.c | 124 | ||||
| -rw-r--r-- | arch/avr32/mm/tlb.c | 187 |
6 files changed, 261 insertions, 748 deletions
diff --git a/arch/avr32/mm/cache.c b/arch/avr32/mm/cache.c index 450515b245a..85d635cd7b2 100644 --- a/arch/avr32/mm/cache.c +++ b/arch/avr32/mm/cache.c @@ -13,6 +13,7 @@ #include <asm/cachectl.h> #include <asm/processor.h> #include <asm/uaccess.h> +#include <asm/syscalls.h> /* * If you attempt to flush anything more than this, you need superuser @@ -22,18 +23,30 @@ void invalidate_dcache_region(void *start, size_t size) { - unsigned long v, begin, end, linesz; + unsigned long v, begin, end, linesz, mask; linesz = boot_cpu_data.dcache.linesz; + mask = linesz - 1; - //printk("invalidate dcache: %p + %u\n", start, size); + /* when first and/or last cachelines are shared, flush them + * instead of invalidating ... never discard valid data! + */ + begin = (unsigned long)start; + end = begin + size; - /* You asked for it, you got it */ - begin = (unsigned long)start & ~(linesz - 1); - end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1); + if (begin & mask) { + flush_dcache_line(start); + begin += linesz; + } + if (end & mask) { + flush_dcache_line((void *)end); + end &= ~mask; + } + /* remaining cachelines only need invalidation */ for (v = begin; v < end; v += linesz) invalidate_dcache_line((void *)v); + flush_write_buffer(); } void clean_dcache_region(void *start, size_t size) @@ -98,29 +111,19 @@ void flush_icache_range(unsigned long start, unsigned long end) __flush_icache_range(start & ~(linesz - 1), (end + linesz - 1) & ~(linesz - 1)); } +EXPORT_SYMBOL(flush_icache_range); /* - * This one is called from do_no_page(), do_swap_page() and install_page(). + * This one is called from __do_fault() and do_swap_page(). */ void flush_icache_page(struct vm_area_struct *vma, struct page *page) { if (vma->vm_flags & VM_EXEC) { - void *v = kmap(page); + void *v = page_address(page); __flush_icache_range((unsigned long)v, (unsigned long)v + PAGE_SIZE); - kunmap(v); } } -/* - * This one is used by copy_to_user_page() - */ -void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, - unsigned long addr, int len) -{ - if (vma->vm_flags & VM_EXEC) - flush_icache_range(addr, addr + len); -} - asmlinkage int sys_cacheflush(int operation, void __user *addr, size_t len) { int ret; @@ -148,3 +151,13 @@ asmlinkage int sys_cacheflush(int operation, void __user *addr, size_t len) out: return ret; } + +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long vaddr, void *dst, const void *src, + unsigned long len) +{ + memcpy(dst, src, len); + if (vma->vm_flags & VM_EXEC) + flush_icache_range((unsigned long)dst, + (unsigned long)dst + len); +} diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c index 44ab8a7bdae..50cdb5b10f0 100644 --- a/arch/avr32/mm/dma-coherent.c +++ b/arch/avr32/mm/dma-coherent.c @@ -7,11 +7,13 @@ */ #include <linux/dma-mapping.h> +#include <linux/gfp.h> +#include <linux/export.h> #include <asm/addrspace.h> #include <asm/cacheflush.h> -void dma_cache_sync(void *vaddr, size_t size, int direction) +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction) { /* * No need to sync an uncached area @@ -21,13 +23,13 @@ void dma_cache_sync(void *vaddr, size_t size, int direction) switch (direction) { case DMA_FROM_DEVICE: /* invalidate only */ - dma_cache_inv(vaddr, size); + invalidate_dcache_region(vaddr, size); break; case DMA_TO_DEVICE: /* writeback only */ - dma_cache_wback(vaddr, size); + clean_dcache_region(vaddr, size); break; case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - dma_cache_wback_inv(vaddr, size); + flush_dcache_region(vaddr, size); break; default: BUG(); @@ -41,6 +43,13 @@ static struct page *__dma_alloc(struct device *dev, size_t size, struct page *page, *free, *end; int order; + /* Following is a work-around (a.k.a. hack) to prevent pages + * with __GFP_COMP being passed to split_page() which cannot + * handle them. The real problem is that this flag probably + * should be 0 on AVR32 as it is not supported on this + * platform--see CONFIG_HUGETLB_PAGE. */ + gfp &= ~(__GFP_COMP); + size = PAGE_ALIGN(size); order = get_order(size); @@ -112,16 +121,21 @@ void dma_free_coherent(struct device *dev, size_t size, } EXPORT_SYMBOL(dma_free_coherent); -#if 0 void *dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) { struct page *page; + dma_addr_t phys; page = __dma_alloc(dev, size, handle, gfp); + if (!page) + return NULL; + + phys = page_to_phys(page); + *handle = phys; /* Now, map the page into P3 with write-combining turned on */ - return __ioremap(page_to_phys(page), size, _PAGE_BUFFER); + return __ioremap(phys, size, _PAGE_BUFFER); } EXPORT_SYMBOL(dma_alloc_writecombine); @@ -132,8 +146,7 @@ void dma_free_writecombine(struct device *dev, size_t size, iounmap(cpu_addr); - page = bus_to_page(handle); + page = phys_to_page(handle); __dma_free(dev, size, page, handle); } EXPORT_SYMBOL(dma_free_writecombine); -#endif diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index 678557260a3..0eca9332719 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c @@ -12,73 +12,46 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/pagemap.h> +#include <linux/kdebug.h> +#include <linux/kprobes.h> -#include <asm/kdebug.h> #include <asm/mmu_context.h> #include <asm/sysreg.h> -#include <asm/uaccess.h> #include <asm/tlb.h> - -#ifdef DEBUG -static void dump_code(unsigned long pc) -{ - char *p = (char *)pc; - char val; - int i; - - - printk(KERN_DEBUG "Code:"); - for (i = 0; i < 16; i++) { - if (__get_user(val, p + i)) - break; - printk(" %02x", val); - } - printk("\n"); -} -#endif +#include <asm/uaccess.h> #ifdef CONFIG_KPROBES -ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); - -/* Hook to register for page fault notifications */ -int register_page_fault_notifier(struct notifier_block *nb) +static inline int notify_page_fault(struct pt_regs *regs, int trap) { - return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); -} + int ret = 0; -int unregister_page_fault_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); -} + if (!user_mode(regs)) { + if (kprobe_running() && kprobe_fault_handler(regs, trap)) + ret = 1; + } -static inline int notify_page_fault(enum die_val val, struct pt_regs *regs, - int trap, int sig) -{ - struct die_args args = { - .regs = regs, - .trapnr = trap, - }; - return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); + return ret; } #else -static inline int notify_page_fault(enum die_val val, struct pt_regs *regs, - int trap, int sig) +static inline int notify_page_fault(struct pt_regs *regs, int trap) { - return NOTIFY_DONE; + return 0; } #endif +int exception_trace = 1; + /* * This routine handles page faults. It determines the address and the * problem, and then passes it off to one of the appropriate routines. * * ecr is the Exception Cause Register. Possible values are: - * 5: Page not found (instruction access) * 6: Protection fault (instruction access) - * 12: Page not found (read access) - * 13: Page not found (write access) - * 14: Protection fault (read access) - * 15: Protection fault (write access) + * 15: Protection fault (read access) + * 16: Protection fault (write access) + * 20: Page not found (instruction access) + * 24: Page not found (read access) + * 28: Page not found (write access) */ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) { @@ -88,10 +61,12 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) const struct exception_table_entry *fixup; unsigned long address; unsigned long page; - int writeaccess = 0; + long signr; + int code; + int fault; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; - if (notify_page_fault(DIE_PAGE_FAULT, regs, - ecr, SIGSEGV) == NOTIFY_STOP) + if (notify_page_fault(regs, ecr)) return; address = sysreg_read(TLBEAR); @@ -99,6 +74,9 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) tsk = current; mm = tsk->mm; + signr = SIGSEGV; + code = SEGV_MAPERR; + /* * If we're in an interrupt or have no user context, we must * not take the fault... @@ -108,6 +86,9 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) local_irq_enable(); + if (user_mode(regs)) + flags |= FAULT_FLAG_USER; +retry: down_read(&mm->mmap_sem); vma = find_vma(mm, address); @@ -125,7 +106,8 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) * can handle it... */ good_area: - //pr_debug("good area: vm_flags = 0x%lx\n", vma->vm_flags); + code = SEGV_ACCERR; + switch (ecr) { case ECR_PROTECTION_X: case ECR_TLB_MISS_X: @@ -141,7 +123,7 @@ good_area: case ECR_TLB_MISS_W: if (!(vma->vm_flags & VM_WRITE)) goto bad_area; - writeaccess = 1; + flags |= FAULT_FLAG_WRITE; break; default: panic("Unhandled case %lu in do_page_fault!", ecr); @@ -152,22 +134,37 @@ good_area: * sure we exit gracefully rather than endlessly redo the * fault. */ -survive: - switch (handle_mm_fault(mm, vma, address, writeaccess)) { - case VM_FAULT_MINOR: - tsk->min_flt++; - break; - case VM_FAULT_MAJOR: - tsk->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto do_sigbus; - case VM_FAULT_OOM: - goto out_of_memory; - default: + fault = handle_mm_fault(mm, vma, address, flags); + + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + return; + + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; BUG(); } + if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; + if (fault & VM_FAULT_RETRY) { + flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; + + /* + * No need to up_read(&mm->mmap_sem) as we would have + * already released it in __lock_page_or_retry() in + * mm/filemap.c. + */ + goto retry; + } + } + up_read(&mm->mmap_sem); return; @@ -176,46 +173,24 @@ survive: * map. Fix it, but check if it's kernel or user first... */ bad_area: - pr_debug("Bad area [%s:%u]: addr %08lx, ecr %lu\n", - tsk->comm, tsk->pid, address, ecr); - up_read(&mm->mmap_sem); if (user_mode(regs)) { - /* Hmm...we have to pass address and ecr somehow... */ - /* tsk->thread.address = address; - tsk->thread.error_code = ecr; */ -#ifdef DEBUG - show_regs(regs); - dump_code(regs->pc); - - page = sysreg_read(PTBR); - printk("ptbr = %08lx", page); - if (page) { - page = ((unsigned long *)page)[address >> 22]; - printk(" pgd = %08lx", page); - if (page & _PAGE_PRESENT) { - page &= PAGE_MASK; - address &= 0x003ff000; - page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT]; - printk(" pte = %08lx\n", page); - } - } -#endif - pr_debug("Sending SIGSEGV to PID %d...\n", - tsk->pid); - force_sig(SIGSEGV, tsk); + if (exception_trace && printk_ratelimit()) + printk("%s%s[%d]: segfault at %08lx pc %08lx " + "sp %08lx ecr %lu\n", + is_global_init(tsk) ? KERN_EMERG : KERN_INFO, + tsk->comm, tsk->pid, address, regs->pc, + regs->sp, ecr); + _exception(SIGSEGV, regs, code, address); return; } no_context: - pr_debug("No context\n"); - /* Are we prepared to handle this kernel fault? */ fixup = search_exception_tables(regs->pc); if (fixup) { regs->pc = fixup->fixup; - pr_debug("Found fixup at %08lx\n", fixup->fixup); return; } @@ -230,10 +205,11 @@ no_context: printk(KERN_ALERT "Unable to handle kernel paging request"); printk(" at virtual address %08lx\n", address); - printk(KERN_ALERT "pc = %08lx\n", regs->pc); page = sysreg_read(PTBR); printk(KERN_ALERT "ptbr = %08lx", page); + if (address >= TASK_SIZE) + page = (unsigned long)swapper_pg_dir; if (page) { page = ((unsigned long *)page)[address >> 22]; printk(" pgd = %08lx", page); @@ -241,47 +217,41 @@ no_context: page &= PAGE_MASK; address &= 0x003ff000; page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT]; - printk(" pte = %08lx\n", page); + printk(" pte = %08lx", page); } } - die("\nOops", regs, ecr); - do_exit(SIGKILL); + printk("\n"); + die("Kernel access of bad area", regs, signr); + return; /* * We ran out of memory, or some other thing happened to us * that made us unable to handle the page fault gracefully. */ out_of_memory: - printk("Out of memory\n"); up_read(&mm->mmap_sem); - if (current->pid == 1) { - yield(); - down_read(&mm->mmap_sem); - goto survive; - } - printk("VM: Killing process %s\n", tsk->comm); - if (user_mode(regs)) - do_exit(SIGKILL); - goto no_context; + if (!user_mode(regs)) + goto no_context; + pagefault_out_of_memory(); + return; do_sigbus: up_read(&mm->mmap_sem); - /* - * Send a sigbus, regardless of whether we were in kernel or - * user mode. - */ - /* address, error_code, trap_no, ... */ -#ifdef DEBUG - show_regs(regs); - dump_code(regs->pc); -#endif - pr_debug("Sending SIGBUS to PID %d...\n", tsk->pid); - force_sig(SIGBUS, tsk); - /* Kernel mode? Handle exceptions or die */ + signr = SIGBUS; + code = BUS_ADRERR; if (!user_mode(regs)) goto no_context; + + if (exception_trace) + printk("%s%s[%d]: bus error at %08lx pc %08lx " + "sp %08lx ecr %lu\n", + is_global_init(tsk) ? KERN_EMERG : KERN_INFO, + tsk->comm, tsk->pid, address, regs->pc, + regs->sp, ecr); + + _exception(SIGBUS, regs, BUS_ADRERR, address); } asmlinkage void do_bus_error(unsigned long addr, int write_access, @@ -292,24 +262,5 @@ asmlinkage void do_bus_error(unsigned long addr, int write_access, addr, write_access ? "write" : "read"); printk(KERN_INFO "DTLB dump:\n"); dump_dtlb(); - die("Bus Error", regs, write_access); - do_exit(SIGKILL); + die("Bus Error", regs, SIGKILL); } - -/* - * This functionality is currently not possible to implement because - * we're using segmentation to ensure a fixed mapping of the kernel - * virtual address space. - * - * It would be possible to implement this, but it would require us to - * disable segmentation at startup and load the kernel mappings into - * the TLB like any other pages. There will be lots of trickery to - * avoid recursive invocation of the TLB miss handler, though... - */ -#ifdef CONFIG_DEBUG_PAGEALLOC -void kernel_map_pages(struct page *page, int numpages, int enable) -{ - -} -EXPORT_SYMBOL(kernel_map_pages); -#endif diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c index 3e6c4103980..def5391d927 100644 --- a/arch/avr32/mm/init.c +++ b/arch/avr32/mm/init.c @@ -7,14 +7,14 @@ */ #include <linux/kernel.h> +#include <linux/gfp.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/init.h> -#include <linux/initrd.h> #include <linux/mmzone.h> +#include <linux/module.h> #include <linux/bootmem.h> #include <linux/pagemap.h> -#include <linux/pfn.h> #include <linux/nodemask.h> #include <asm/page.h> @@ -25,295 +25,16 @@ #include <asm/setup.h> #include <asm/sections.h> -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); - -pgd_t swapper_pg_dir[PTRS_PER_PGD]; +pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data; struct page *empty_zero_page; +EXPORT_SYMBOL(empty_zero_page); /* * Cache of MMU context last used. */ unsigned long mmu_context_cache = NO_CONTEXT; -#define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT) -#define MAX_LOW_PFN (NODE_DATA(0)->bdata->node_low_pfn) - -void show_mem(void) -{ - int total = 0, reserved = 0, cached = 0; - int slab = 0, free = 0, shared = 0; - pg_data_t *pgdat; - - printk("Mem-info:\n"); - show_free_areas(); - - for_each_online_pgdat(pgdat) { - struct page *page, *end; - - page = pgdat->node_mem_map; - end = page + pgdat->node_spanned_pages; - - do { - total++; - if (PageReserved(page)) - reserved++; - else if (PageSwapCache(page)) - cached++; - else if (PageSlab(page)) - slab++; - else if (!page_count(page)) - free++; - else - shared += page_count(page) - 1; - page++; - } while (page < end); - } - - printk ("%d pages of RAM\n", total); - printk ("%d free pages\n", free); - printk ("%d reserved pages\n", reserved); - printk ("%d slab pages\n", slab); - printk ("%d pages shared\n", shared); - printk ("%d pages swap cached\n", cached); -} - -static void __init print_memory_map(const char *what, - struct tag_mem_range *mem) -{ - printk ("%s:\n", what); - for (; mem; mem = mem->next) { - printk (" %08lx - %08lx\n", - (unsigned long)mem->addr, - (unsigned long)(mem->addr + mem->size)); - } -} - -#define MAX_LOWMEM HIGHMEM_START -#define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM) - -/* - * Sort a list of memory regions in-place by ascending address. - * - * We're using bubble sort because we only have singly linked lists - * with few elements. - */ -static void __init sort_mem_list(struct tag_mem_range **pmem) -{ - int done; - struct tag_mem_range **a, **b; - - if (!*pmem) - return; - - do { - done = 1; - a = pmem, b = &(*pmem)->next; - while (*b) { - if ((*a)->addr > (*b)->addr) { - struct tag_mem_range *tmp; - tmp = (*b)->next; - (*b)->next = *a; - *a = *b; - *b = tmp; - done = 0; - } - a = &(*a)->next; - b = &(*a)->next; - } - } while (!done); -} - -/* - * Find a free memory region large enough for storing the - * bootmem bitmap. - */ -static unsigned long __init -find_bootmap_pfn(const struct tag_mem_range *mem) -{ - unsigned long bootmap_pages, bootmap_len; - unsigned long node_pages = PFN_UP(mem->size); - unsigned long bootmap_addr = mem->addr; - struct tag_mem_range *reserved = mem_reserved; - struct tag_mem_range *ramdisk = mem_ramdisk; - unsigned long kern_start = virt_to_phys(_stext); - unsigned long kern_end = virt_to_phys(_end); - - bootmap_pages = bootmem_bootmap_pages(node_pages); - bootmap_len = bootmap_pages << PAGE_SHIFT; - - /* - * Find a large enough region without reserved pages for - * storing the bootmem bitmap. We can take advantage of the - * fact that all lists have been sorted. - * - * We have to check explicitly reserved regions as well as the - * kernel image and any RAMDISK images... - * - * Oh, and we have to make sure we don't overwrite the taglist - * since we're going to use it until the bootmem allocator is - * fully up and running. - */ - while (1) { - if ((bootmap_addr < kern_end) && - ((bootmap_addr + bootmap_len) > kern_start)) - bootmap_addr = kern_end; - - while (reserved && - (bootmap_addr >= (reserved->addr + reserved->size))) - reserved = reserved->next; - - if (reserved && - ((bootmap_addr + bootmap_len) >= reserved->addr)) { - bootmap_addr = reserved->addr + reserved->size; - continue; - } - - while (ramdisk && - (bootmap_addr >= (ramdisk->addr + ramdisk->size))) - ramdisk = ramdisk->next; - - if (!ramdisk || - ((bootmap_addr + bootmap_len) < ramdisk->addr)) - break; - - bootmap_addr = ramdisk->addr + ramdisk->size; - } - - if ((PFN_UP(bootmap_addr) + bootmap_len) >= (mem->addr + mem->size)) - return ~0UL; - - return PFN_UP(bootmap_addr); -} - -void __init setup_bootmem(void) -{ - unsigned bootmap_size; - unsigned long first_pfn, bootmap_pfn, pages; - unsigned long max_pfn, max_low_pfn; - unsigned long kern_start = virt_to_phys(_stext); - unsigned long kern_end = virt_to_phys(_end); - unsigned node = 0; - struct tag_mem_range *bank, *res; - - sort_mem_list(&mem_phys); - sort_mem_list(&mem_reserved); - - print_memory_map("Physical memory", mem_phys); - print_memory_map("Reserved memory", mem_reserved); - - nodes_clear(node_online_map); - - if (mem_ramdisk) { -#ifdef CONFIG_BLK_DEV_INITRD - initrd_start = __va(mem_ramdisk->addr); - initrd_end = initrd_start + mem_ramdisk->size; - - print_memory_map("RAMDISK images", mem_ramdisk); - if (mem_ramdisk->next) - printk(KERN_WARNING - "Warning: Only the first RAMDISK image " - "will be used\n"); - sort_mem_list(&mem_ramdisk); -#else - printk(KERN_WARNING "RAM disk image present, but " - "no initrd support in kernel!\n"); -#endif - } - - if (mem_phys->next) - printk(KERN_WARNING "Only using first memory bank\n"); - - for (bank = mem_phys; bank; bank = NULL) { - first_pfn = PFN_UP(bank->addr); - max_low_pfn = max_pfn = PFN_DOWN(bank->addr + bank->size); - bootmap_pfn = find_bootmap_pfn(bank); - if (bootmap_pfn > max_pfn) - panic("No space for bootmem bitmap!\n"); - - if (max_low_pfn > MAX_LOWMEM_PFN) { - max_low_pfn = MAX_LOWMEM_PFN; -#ifndef CONFIG_HIGHMEM - /* - * Lowmem is memory that can be addressed - * directly through P1/P2 - */ - printk(KERN_WARNING - "Node %u: Only %ld MiB of memory will be used.\n", - node, MAX_LOWMEM >> 20); - printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); -#else -#error HIGHMEM is not supported by AVR32 yet -#endif - } - - /* Initialize the boot-time allocator with low memory only. */ - bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn, - first_pfn, max_low_pfn); - - printk("Node %u: bdata = %p, bdata->node_bootmem_map = %p\n", - node, NODE_DATA(node)->bdata, - NODE_DATA(node)->bdata->node_bootmem_map); - - /* - * Register fully available RAM pages with the bootmem - * allocator. - */ - pages = max_low_pfn - first_pfn; - free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn), - PFN_PHYS(pages)); - - /* - * Reserve space for the kernel image (if present in - * this node)... - */ - if ((kern_start >= PFN_PHYS(first_pfn)) && - (kern_start < PFN_PHYS(max_pfn))) { - printk("Node %u: Kernel image %08lx - %08lx\n", - node, kern_start, kern_end); - reserve_bootmem_node(NODE_DATA(node), kern_start, - kern_end - kern_start); - } - - /* ...the bootmem bitmap... */ - reserve_bootmem_node(NODE_DATA(node), - PFN_PHYS(bootmap_pfn), - bootmap_size); - - /* ...any RAMDISK images... */ - for (res = mem_ramdisk; res; res = res->next) { - if (res->addr > PFN_PHYS(max_pfn)) - break; - - if (res->addr >= PFN_PHYS(first_pfn)) { - printk("Node %u: RAMDISK %08lx - %08lx\n", - node, - (unsigned long)res->addr, - (unsigned long)(res->addr + res->size)); - reserve_bootmem_node(NODE_DATA(node), - res->addr, res->size); - } - } - - /* ...and any other reserved regions. */ - for (res = mem_reserved; res; res = res->next) { - if (res->addr > PFN_PHYS(max_pfn)) - break; - - if (res->addr >= PFN_PHYS(first_pfn)) { - printk("Node %u: Reserved %08lx - %08lx\n", - node, - (unsigned long)res->addr, - (unsigned long)(res->addr + res->size)); - reserve_bootmem_node(NODE_DATA(node), - res->addr, res->size); - } - } - - node_set_online(node); - } -} - /* * paging_init() sets up the page tables * @@ -347,27 +68,16 @@ void __init paging_init(void) zero_page = alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE); - { - pgd_t *pg_dir; - int i; - - pg_dir = swapper_pg_dir; - sysreg_write(PTBR, (unsigned long)pg_dir); - - for (i = 0; i < PTRS_PER_PGD; i++) - pgd_val(pg_dir[i]) = 0; - - enable_mmu(); - printk ("CPU: Paging enabled\n"); - } + sysreg_write(PTBR, (unsigned long)swapper_pg_dir); + enable_mmu(); + printk ("CPU: Paging enabled\n"); for_each_online_node(nid) { pg_data_t *pgdat = NODE_DATA(nid); unsigned long zones_size[MAX_NR_ZONES]; unsigned long low, start_pfn; - start_pfn = pgdat->bdata->node_boot_start; - start_pfn >>= PAGE_SHIFT; + start_pfn = pgdat->bdata->node_min_pfn; low = pgdat->bdata->node_low_pfn; memset(zones_size, 0, sizeof(zones_size)); @@ -376,7 +86,7 @@ void __init paging_init(void) printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", nid, start_pfn, low); - free_area_init_node(nid, pgdat, zones_size, start_pfn, NULL); + free_area_init_node(nid, zones_size, start_pfn, NULL); printk("Node %u: mem_map starts at %p\n", pgdat->node_id, pgdat->node_mem_map); @@ -384,97 +94,32 @@ void __init paging_init(void) mem_map = NODE_DATA(0)->node_mem_map; - memset(zero_page, 0, PAGE_SIZE); empty_zero_page = virt_to_page(zero_page); flush_dcache_page(empty_zero_page); } void __init mem_init(void) { - int codesize, reservedpages, datasize, initsize; - int nid, i; + pg_data_t *pgdat; - reservedpages = 0; high_memory = NULL; + for_each_online_pgdat(pgdat) + high_memory = max_t(void *, high_memory, + __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT)); - /* this will put all low memory onto the freelists */ - for_each_online_node(nid) { - pg_data_t *pgdat = NODE_DATA(nid); - unsigned long node_pages = 0; - void *node_high_memory; - - num_physpages += pgdat->node_present_pages; - - if (pgdat->node_spanned_pages != 0) - node_pages = free_all_bootmem_node(pgdat); - - totalram_pages += node_pages; - - for (i = 0; i < node_pages; i++) - if (PageReserved(pgdat->node_mem_map + i)) - reservedpages++; - - node_high_memory = (void *)((pgdat->node_start_pfn - + pgdat->node_spanned_pages) - << PAGE_SHIFT); - if (node_high_memory > high_memory) - high_memory = node_high_memory; - } - - max_mapnr = MAP_NR(high_memory); - - codesize = (unsigned long)_etext - (unsigned long)_text; - datasize = (unsigned long)_edata - (unsigned long)_data; - initsize = (unsigned long)__init_end - (unsigned long)__init_begin; - - printk ("Memory: %luk/%luk available (%dk kernel code, " - "%dk reserved, %dk data, %dk init)\n", - (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10), - totalram_pages << (PAGE_SHIFT - 10), - codesize >> 10, - reservedpages << (PAGE_SHIFT - 10), - datasize >> 10, - initsize >> 10); -} - -static inline void free_area(unsigned long addr, unsigned long end, char *s) -{ - unsigned int size = (end - addr) >> 10; - - for (; addr < end; addr += PAGE_SIZE) { - struct page *page = virt_to_page(addr); - ClearPageReserved(page); - init_page_count(page); - free_page(addr); - totalram_pages++; - } - - if (size && s) - printk(KERN_INFO "Freeing %s memory: %dK (%lx - %lx)\n", - s, size, end - (size << 10), end); + set_max_mapnr(MAP_NR(high_memory)); + free_all_bootmem(); + mem_init_print_info(NULL); } void free_initmem(void) { - free_area((unsigned long)__init_begin, (unsigned long)__init_end, - "init"); + free_initmem_default(-1); } #ifdef CONFIG_BLK_DEV_INITRD - -static int keep_initrd; - void free_initrd_mem(unsigned long start, unsigned long end) { - if (!keep_initrd) - free_area(start, end, "initrd"); -} - -static int __init keepinitrd_setup(char *__unused) -{ - keep_initrd = 1; - return 1; + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } - -__setup("keepinitrd", keepinitrd_setup); #endif diff --git a/arch/avr32/mm/ioremap.c b/arch/avr32/mm/ioremap.c index 536021877df..7def0d84cec 100644 --- a/arch/avr32/mm/ioremap.c +++ b/arch/avr32/mm/ioremap.c @@ -6,120 +6,14 @@ * published by the Free Software Foundation. */ #include <linux/vmalloc.h> +#include <linux/mm.h> #include <linux/module.h> +#include <linux/io.h> +#include <linux/slab.h> -#include <asm/io.h> #include <asm/pgtable.h> -#include <asm/cacheflush.h> -#include <asm/tlbflush.h> #include <asm/addrspace.h> -static inline int remap_area_pte(pte_t *pte, unsigned long address, - unsigned long end, unsigned long phys_addr, - pgprot_t prot) -{ - unsigned long pfn; - - pfn = phys_addr >> PAGE_SHIFT; - do { - WARN_ON(!pte_none(*pte)); - - set_pte(pte, pfn_pte(pfn, prot)); - address += PAGE_SIZE; - pfn++; - pte++; - } while (address && (address < end)); - - return 0; -} - -static inline int remap_area_pmd(pmd_t *pmd, unsigned long address, - unsigned long end, unsigned long phys_addr, - pgprot_t prot) -{ - unsigned long next; - - phys_addr -= address; - - do { - pte_t *pte = pte_alloc_kernel(pmd, address); - if (!pte) - return -ENOMEM; - - next = (address + PMD_SIZE) & PMD_MASK; - if (remap_area_pte(pte, address, next, - address + phys_addr, prot)) - return -ENOMEM; - - address = next; - pmd++; - } while (address && (address < end)); - return 0; -} - -static int remap_area_pud(pud_t *pud, unsigned long address, - unsigned long end, unsigned long phys_addr, - pgprot_t prot) -{ - unsigned long next; - - phys_addr -= address; - - do { - pmd_t *pmd = pmd_alloc(&init_mm, pud, address); - if (!pmd) - return -ENOMEM; - next = (address + PUD_SIZE) & PUD_MASK; - if (remap_area_pmd(pmd, address, next, - phys_addr + address, prot)) - return -ENOMEM; - - address = next; - pud++; - } while (address && address < end); - - return 0; -} - -static int remap_area_pages(unsigned long address, unsigned long phys_addr, - size_t size, pgprot_t prot) -{ - unsigned long end = address + size; - unsigned long next; - pgd_t *pgd; - int err = 0; - - phys_addr -= address; - - pgd = pgd_offset_k(address); - flush_cache_all(); - BUG_ON(address >= end); - - spin_lock(&init_mm.page_table_lock); - do { - pud_t *pud = pud_alloc(&init_mm, pgd, address); - - err = -ENOMEM; - if (!pud) - break; - - next = (address + PGDIR_SIZE) & PGDIR_MASK; - if (next < address || next > end) - next = end; - err = remap_area_pud(pud, address, next, - phys_addr + address, prot); - if (err) - break; - - address = next; - pgd++; - } while (address && (address < end)); - - spin_unlock(&init_mm.page_table_lock); - flush_tlb_all(); - return err; -} - /* * Re-map an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access physical @@ -128,7 +22,7 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, void __iomem *__ioremap(unsigned long phys_addr, size_t size, unsigned long flags) { - void *addr; + unsigned long addr; struct vm_struct *area; unsigned long offset, last_addr; pgprot_t prot; @@ -159,7 +53,7 @@ void __iomem *__ioremap(unsigned long phys_addr, size_t size, phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr + 1) - phys_addr; - prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY + prot = __pgprot(_PAGE_PRESENT | _PAGE_GLOBAL | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_TYPE_SMALL | flags); /* @@ -169,9 +63,9 @@ void __iomem *__ioremap(unsigned long phys_addr, size_t size, if (!area) return NULL; area->phys_addr = phys_addr; - addr = area->addr; - if (remap_area_pages((unsigned long)addr, phys_addr, size, prot)) { - vunmap(addr); + addr = (unsigned long )area->addr; + if (ioremap_page_range(addr, addr + size, phys_addr, prot)) { + vunmap((void *)addr); return NULL; } @@ -185,6 +79,8 @@ void __iounmap(void __iomem *addr) if ((unsigned long)addr >= P4SEG) return; + if (PXSEG(addr) == P2SEG) + return; p = remove_vm_area((void *)(PAGE_MASK & (unsigned long __force)addr)); if (unlikely(!p)) { diff --git a/arch/avr32/mm/tlb.c b/arch/avr32/mm/tlb.c index 7b073052203..0da23109f81 100644 --- a/arch/avr32/mm/tlb.c +++ b/arch/avr32/mm/tlb.c @@ -11,21 +11,21 @@ #include <asm/mmu_context.h> -#define _TLBEHI_I 0x100 +/* TODO: Get the correct number from the CONFIG1 system register */ +#define NR_TLB_ENTRIES 32 -void show_dtlb_entry(unsigned int index) +static void show_dtlb_entry(unsigned int index) { - unsigned int tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save; + u32 tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save; unsigned long flags; local_irq_save(flags); mmucr_save = sysreg_read(MMUCR); tlbehi_save = sysreg_read(TLBEHI); - mmucr = mmucr_save & 0x13; - mmucr |= index << 14; + mmucr = SYSREG_BFINS(DRP, index, mmucr_save); sysreg_write(MMUCR, mmucr); - asm volatile("tlbr" : : : "memory"); + __builtin_tlbr(); cpu_sync_pipeline(); tlbehi = sysreg_read(TLBEHI); @@ -33,15 +33,17 @@ void show_dtlb_entry(unsigned int index) printk("%2u: %c %c %02x %05x %05x %o %o %c %c %c %c\n", index, - (tlbehi & 0x200)?'1':'0', - (tlbelo & 0x100)?'1':'0', - (tlbehi & 0xff), - (tlbehi >> 12), (tlbelo >> 12), - (tlbelo >> 4) & 7, (tlbelo >> 2) & 3, - (tlbelo & 0x200)?'1':'0', - (tlbelo & 0x080)?'1':'0', - (tlbelo & 0x001)?'1':'0', - (tlbelo & 0x002)?'1':'0'); + SYSREG_BFEXT(TLBEHI_V, tlbehi) ? '1' : '0', + SYSREG_BFEXT(G, tlbelo) ? '1' : '0', + SYSREG_BFEXT(ASID, tlbehi), + SYSREG_BFEXT(VPN, tlbehi) >> 2, + SYSREG_BFEXT(PFN, tlbelo) >> 2, + SYSREG_BFEXT(AP, tlbelo), + SYSREG_BFEXT(SZ, tlbelo), + SYSREG_BFEXT(TLBELO_C, tlbelo) ? 'C' : ' ', + SYSREG_BFEXT(B, tlbelo) ? 'B' : ' ', + SYSREG_BFEXT(W, tlbelo) ? 'W' : ' ', + SYSREG_BFEXT(TLBELO_D, tlbelo) ? 'D' : ' '); sysreg_write(MMUCR, mmucr_save); sysreg_write(TLBEHI, tlbehi_save); @@ -54,29 +56,33 @@ void dump_dtlb(void) unsigned int i; printk("ID V G ASID VPN PFN AP SZ C B W D\n"); - for (i = 0; i < 32; i++) + for (i = 0; i < NR_TLB_ENTRIES; i++) show_dtlb_entry(i); } -static unsigned long last_mmucr; - -static inline void set_replacement_pointer(unsigned shift) +static void update_dtlb(unsigned long address, pte_t pte) { - unsigned long mmucr, mmucr_save; + u32 tlbehi; + u32 mmucr; - mmucr = mmucr_save = sysreg_read(MMUCR); + /* + * We're not changing the ASID here, so no need to flush the + * pipeline. + */ + tlbehi = sysreg_read(TLBEHI); + tlbehi = SYSREG_BF(ASID, SYSREG_BFEXT(ASID, tlbehi)); + tlbehi |= address & MMU_VPN_MASK; + tlbehi |= SYSREG_BIT(TLBEHI_V); + sysreg_write(TLBEHI, tlbehi); /* Does this mapping already exist? */ - __asm__ __volatile__( - " tlbs\n" - " mfsr %0, %1" - : "=r"(mmucr) - : "i"(SYSREG_MMUCR)); + __builtin_tlbs(); + mmucr = sysreg_read(MMUCR); if (mmucr & SYSREG_BIT(MMUCR_N)) { /* Not found -- pick a not-recently-accessed entry */ - unsigned long rp; - unsigned long tlbar = sysreg_read(TLBARLO); + unsigned int rp; + u32 tlbar = sysreg_read(TLBARLO); rp = 32 - fls(tlbar); if (rp == 32) { @@ -84,34 +90,18 @@ static inline void set_replacement_pointer(unsigned shift) sysreg_write(TLBARLO, -1L); } - mmucr &= 0x13; - mmucr |= (rp << shift); - + mmucr = SYSREG_BFINS(DRP, rp, mmucr); sysreg_write(MMUCR, mmucr); } - last_mmucr = mmucr; -} - -static void update_dtlb(unsigned long address, pte_t pte, unsigned long asid) -{ - unsigned long vpn; - - vpn = (address & MMU_VPN_MASK) | _TLBEHI_VALID | asid; - sysreg_write(TLBEHI, vpn); - cpu_sync_pipeline(); - - set_replacement_pointer(14); - sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK); /* Let's go */ - asm volatile("nop\n\ttlbw" : : : "memory"); - cpu_sync_pipeline(); + __builtin_tlbw(); } void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) { unsigned long flags; @@ -120,39 +110,40 @@ void update_mmu_cache(struct vm_area_struct *vma, return; local_irq_save(flags); - update_dtlb(address, pte, get_asid()); + update_dtlb(address, *ptep); local_irq_restore(flags); } -void __flush_tlb_page(unsigned long asid, unsigned long page) +static void __flush_tlb_page(unsigned long asid, unsigned long page) { - unsigned long mmucr, tlbehi; + u32 mmucr, tlbehi; - page |= asid; - sysreg_write(TLBEHI, page); - cpu_sync_pipeline(); - asm volatile("tlbs"); + /* + * Caller is responsible for masking out non-PFN bits in page + * and changing the current ASID if necessary. This means that + * we don't need to flush the pipeline after writing TLBEHI. + */ + tlbehi = page | asid; + sysreg_write(TLBEHI, tlbehi); + + __builtin_tlbs(); mmucr = sysreg_read(MMUCR); if (!(mmucr & SYSREG_BIT(MMUCR_N))) { - unsigned long tlbarlo; - unsigned long entry; + unsigned int entry; + u32 tlbarlo; /* Clear the "valid" bit */ - tlbehi = sysreg_read(TLBEHI); - tlbehi &= ~_TLBEHI_VALID; sysreg_write(TLBEHI, tlbehi); - cpu_sync_pipeline(); /* mark the entry as "not accessed" */ - entry = (mmucr >> 14) & 0x3f; + entry = SYSREG_BFEXT(DRP, mmucr); tlbarlo = sysreg_read(TLBARLO); - tlbarlo |= (0x80000000 >> entry); + tlbarlo |= (0x80000000UL >> entry); sysreg_write(TLBARLO, tlbarlo); /* update the entry with valid bit clear */ - asm volatile("tlbw"); - cpu_sync_pipeline(); + __builtin_tlbw(); } } @@ -190,17 +181,22 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */ mm->context = NO_CONTEXT; if (mm == current->mm) activate_context(mm); } else { - unsigned long asid = mm->context & MMU_CONTEXT_ASID_MASK; - unsigned long saved_asid = MMU_NO_ASID; + unsigned long asid; + unsigned long saved_asid; + + asid = mm->context & MMU_CONTEXT_ASID_MASK; + saved_asid = MMU_NO_ASID; start &= PAGE_MASK; end += (PAGE_SIZE - 1); end &= PAGE_MASK; + if (mm != current->mm) { saved_asid = get_asid(); set_asid(asid); @@ -218,33 +214,34 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, } /* - * TODO: If this is only called for addresses > TASK_SIZE, we can probably - * skip the ASID stuff and just use the Global bit... + * This function depends on the pages to be flushed having the G + * (global) bit set in their pte. This is true for all + * PAGE_KERNEL(_RO) pages. */ void flush_tlb_kernel_range(unsigned long start, unsigned long end) { unsigned long flags; int size; - local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */ flush_tlb_all(); } else { - unsigned long asid = init_mm.context & MMU_CONTEXT_ASID_MASK; - unsigned long saved_asid = get_asid(); + unsigned long asid; + + local_irq_save(flags); + asid = get_asid(); start &= PAGE_MASK; end += (PAGE_SIZE - 1); end &= PAGE_MASK; - set_asid(asid); + while (start < end) { __flush_tlb_page(asid, start); start += PAGE_SIZE; } - set_asid(saved_asid); + local_irq_restore(flags); } - local_irq_restore(flags); } void flush_tlb_mm(struct mm_struct *mm) @@ -280,7 +277,7 @@ static void *tlb_start(struct seq_file *tlb, loff_t *pos) { static unsigned long tlb_index; - if (*pos >= 32) + if (*pos >= NR_TLB_ENTRIES) return NULL; tlb_index = 0; @@ -291,7 +288,7 @@ static void *tlb_next(struct seq_file *tlb, void *v, loff_t *pos) { unsigned long *index = v; - if (*index >= 31) + if (*index >= NR_TLB_ENTRIES - 1) return NULL; ++*pos; @@ -313,16 +310,16 @@ static int tlb_show(struct seq_file *tlb, void *v) if (*index == 0) seq_puts(tlb, "ID V G ASID VPN PFN AP SZ C B W D\n"); - BUG_ON(*index >= 32); + BUG_ON(*index >= NR_TLB_ENTRIES); local_irq_save(flags); mmucr_save = sysreg_read(MMUCR); tlbehi_save = sysreg_read(TLBEHI); - mmucr = mmucr_save & 0x13; - mmucr |= *index << 14; + mmucr = SYSREG_BFINS(DRP, *index, mmucr_save); sysreg_write(MMUCR, mmucr); - asm volatile("tlbr" : : : "memory"); + /* TLBR might change the ASID */ + __builtin_tlbr(); cpu_sync_pipeline(); tlbehi = sysreg_read(TLBEHI); @@ -334,21 +331,23 @@ static int tlb_show(struct seq_file *tlb, void *v) local_irq_restore(flags); seq_printf(tlb, "%2lu: %c %c %02x %05x %05x %o %o %c %c %c %c\n", - *index, - (tlbehi & 0x200)?'1':'0', - (tlbelo & 0x100)?'1':'0', - (tlbehi & 0xff), - (tlbehi >> 12), (tlbelo >> 12), - (tlbelo >> 4) & 7, (tlbelo >> 2) & 3, - (tlbelo & 0x200)?'1':'0', - (tlbelo & 0x080)?'1':'0', - (tlbelo & 0x001)?'1':'0', - (tlbelo & 0x002)?'1':'0'); + *index, + SYSREG_BFEXT(TLBEHI_V, tlbehi) ? '1' : '0', + SYSREG_BFEXT(G, tlbelo) ? '1' : '0', + SYSREG_BFEXT(ASID, tlbehi), + SYSREG_BFEXT(VPN, tlbehi) >> 2, + SYSREG_BFEXT(PFN, tlbelo) >> 2, + SYSREG_BFEXT(AP, tlbelo), + SYSREG_BFEXT(SZ, tlbelo), + SYSREG_BFEXT(TLBELO_C, tlbelo) ? '1' : '0', + SYSREG_BFEXT(B, tlbelo) ? '1' : '0', + SYSREG_BFEXT(W, tlbelo) ? '1' : '0', + SYSREG_BFEXT(TLBELO_D, tlbelo) ? '1' : '0'); return 0; } -static struct seq_operations tlb_ops = { +static const struct seq_operations tlb_ops = { .start = tlb_start, .next = tlb_next, .stop = tlb_stop, @@ -360,7 +359,7 @@ static int tlb_open(struct inode *inode, struct file *file) return seq_open(file, &tlb_ops); } -static struct file_operations proc_tlb_operations = { +static const struct file_operations proc_tlb_operations = { .open = tlb_open, .read = seq_read, .llseek = seq_lseek, @@ -369,11 +368,7 @@ static struct file_operations proc_tlb_operations = { static int __init proctlb_init(void) { - struct proc_dir_entry *entry; - - entry = create_proc_entry("tlb", 0, NULL); - if (entry) - entry->proc_fops = &proc_tlb_operations; + proc_create("tlb", 0, NULL, &proc_tlb_operations); return 0; } late_initcall(proctlb_init); |
