diff options
Diffstat (limited to 'arch/avr32/mm')
| -rw-r--r-- | arch/avr32/mm/cache.c | 38 | ||||
| -rw-r--r-- | arch/avr32/mm/dma-coherent.c | 15 | ||||
| -rw-r--r-- | arch/avr32/mm/fault.c | 89 | ||||
| -rw-r--r-- | arch/avr32/mm/init.c | 153 | ||||
| -rw-r--r-- | arch/avr32/mm/ioremap.c | 2 | ||||
| -rw-r--r-- | arch/avr32/mm/tlb.c | 185 |
6 files changed, 183 insertions, 299 deletions
diff --git a/arch/avr32/mm/cache.c b/arch/avr32/mm/cache.c index 8f7b1c3cd0f..85d635cd7b2 100644 --- a/arch/avr32/mm/cache.c +++ b/arch/avr32/mm/cache.c @@ -13,6 +13,7 @@ #include <asm/cachectl.h> #include <asm/processor.h> #include <asm/uaccess.h> +#include <asm/syscalls.h> /* * If you attempt to flush anything more than this, you need superuser @@ -23,7 +24,6 @@ void invalidate_dcache_region(void *start, size_t size) { unsigned long v, begin, end, linesz, mask; - int flush = 0; linesz = boot_cpu_data.dcache.linesz; mask = linesz - 1; @@ -32,24 +32,21 @@ void invalidate_dcache_region(void *start, size_t size) * instead of invalidating ... never discard valid data! */ begin = (unsigned long)start; - end = begin + size - 1; + end = begin + size; if (begin & mask) { flush_dcache_line(start); begin += linesz; - flush = 1; } - if ((end & mask) != mask) { + if (end & mask) { flush_dcache_line((void *)end); - end -= linesz; - flush = 1; + end &= ~mask; } /* remaining cachelines only need invalidation */ - for (v = begin; v <= end; v += linesz) + for (v = begin; v < end; v += linesz) invalidate_dcache_line((void *)v); - if (flush) - flush_write_buffer(); + flush_write_buffer(); } void clean_dcache_region(void *start, size_t size) @@ -114,9 +111,10 @@ void flush_icache_range(unsigned long start, unsigned long end) __flush_icache_range(start & ~(linesz - 1), (end + linesz - 1) & ~(linesz - 1)); } +EXPORT_SYMBOL(flush_icache_range); /* - * This one is called from do_no_page(), do_swap_page() and install_page(). + * This one is called from __do_fault() and do_swap_page(). */ void flush_icache_page(struct vm_area_struct *vma, struct page *page) { @@ -126,16 +124,6 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page) } } -/* - * This one is used by copy_to_user_page() - */ -void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, - unsigned long addr, int len) -{ - if (vma->vm_flags & VM_EXEC) - flush_icache_range(addr, addr + len); -} - asmlinkage int sys_cacheflush(int operation, void __user *addr, size_t len) { int ret; @@ -163,3 +151,13 @@ asmlinkage int sys_cacheflush(int operation, void __user *addr, size_t len) out: return ret; } + +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long vaddr, void *dst, const void *src, + unsigned long len) +{ + memcpy(dst, src, len); + if (vma->vm_flags & VM_EXEC) + flush_icache_range((unsigned long)dst, + (unsigned long)dst + len); +} diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c index 099212d4567..50cdb5b10f0 100644 --- a/arch/avr32/mm/dma-coherent.c +++ b/arch/avr32/mm/dma-coherent.c @@ -7,6 +7,8 @@ */ #include <linux/dma-mapping.h> +#include <linux/gfp.h> +#include <linux/export.h> #include <asm/addrspace.h> #include <asm/cacheflush.h> @@ -21,13 +23,13 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction) switch (direction) { case DMA_FROM_DEVICE: /* invalidate only */ - dma_cache_inv(vaddr, size); + invalidate_dcache_region(vaddr, size); break; case DMA_TO_DEVICE: /* writeback only */ - dma_cache_wback(vaddr, size); + clean_dcache_region(vaddr, size); break; case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - dma_cache_wback_inv(vaddr, size); + flush_dcache_region(vaddr, size); break; default: BUG(); @@ -41,6 +43,13 @@ static struct page *__dma_alloc(struct device *dev, size_t size, struct page *page, *free, *end; int order; + /* Following is a work-around (a.k.a. hack) to prevent pages + * with __GFP_COMP being passed to split_page() which cannot + * handle them. The real problem is that this flag probably + * should be 0 on AVR32 as it is not supported on this + * platform--see CONFIG_HUGETLB_PAGE. */ + gfp &= ~(__GFP_COMP); + size = PAGE_ALIGN(size); order = get_order(size); diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index e011f1ce187..0eca9332719 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c @@ -61,9 +61,10 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) const struct exception_table_entry *fixup; unsigned long address; unsigned long page; - int writeaccess; long signr; int code; + int fault; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; if (notify_page_fault(regs, ecr)) return; @@ -85,6 +86,9 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) local_irq_enable(); + if (user_mode(regs)) + flags |= FAULT_FLAG_USER; +retry: down_read(&mm->mmap_sem); vma = find_vma(mm, address); @@ -103,7 +107,6 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) */ good_area: code = SEGV_ACCERR; - writeaccess = 0; switch (ecr) { case ECR_PROTECTION_X: @@ -120,7 +123,7 @@ good_area: case ECR_TLB_MISS_W: if (!(vma->vm_flags & VM_WRITE)) goto bad_area; - writeaccess = 1; + flags |= FAULT_FLAG_WRITE; break; default: panic("Unhandled case %lu in do_page_fault!", ecr); @@ -131,22 +134,37 @@ good_area: * sure we exit gracefully rather than endlessly redo the * fault. */ -survive: - switch (handle_mm_fault(mm, vma, address, writeaccess)) { - case VM_FAULT_MINOR: - tsk->min_flt++; - break; - case VM_FAULT_MAJOR: - tsk->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto do_sigbus; - case VM_FAULT_OOM: - goto out_of_memory; - default: + fault = handle_mm_fault(mm, vma, address, flags); + + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + return; + + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; BUG(); } + if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; + if (fault & VM_FAULT_RETRY) { + flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; + + /* + * No need to up_read(&mm->mmap_sem) as we would have + * already released it in __lock_page_or_retry() in + * mm/filemap.c. + */ + goto retry; + } + } + up_read(&mm->mmap_sem); return; @@ -158,10 +176,10 @@ bad_area: up_read(&mm->mmap_sem); if (user_mode(regs)) { - if (exception_trace) + if (exception_trace && printk_ratelimit()) printk("%s%s[%d]: segfault at %08lx pc %08lx " "sp %08lx ecr %lu\n", - is_init(tsk) ? KERN_EMERG : KERN_INFO, + is_global_init(tsk) ? KERN_EMERG : KERN_INFO, tsk->comm, tsk->pid, address, regs->pc, regs->sp, ecr); _exception(SIGSEGV, regs, code, address); @@ -190,6 +208,8 @@ no_context: page = sysreg_read(PTBR); printk(KERN_ALERT "ptbr = %08lx", page); + if (address >= TASK_SIZE) + page = (unsigned long)swapper_pg_dir; if (page) { page = ((unsigned long *)page)[address >> 22]; printk(" pgd = %08lx", page); @@ -210,15 +230,10 @@ no_context: */ out_of_memory: up_read(&mm->mmap_sem); - if (is_init(current)) { - yield(); - down_read(&mm->mmap_sem); - goto survive; - } - printk("VM: Killing process %s\n", tsk->comm); - if (user_mode(regs)) - do_exit(SIGKILL); - goto no_context; + if (!user_mode(regs)) + goto no_context; + pagefault_out_of_memory(); + return; do_sigbus: up_read(&mm->mmap_sem); @@ -232,7 +247,7 @@ do_sigbus: if (exception_trace) printk("%s%s[%d]: bus error at %08lx pc %08lx " "sp %08lx ecr %lu\n", - is_init(tsk) ? KERN_EMERG : KERN_INFO, + is_global_init(tsk) ? KERN_EMERG : KERN_INFO, tsk->comm, tsk->pid, address, regs->pc, regs->sp, ecr); @@ -249,21 +264,3 @@ asmlinkage void do_bus_error(unsigned long addr, int write_access, dump_dtlb(); die("Bus Error", regs, SIGKILL); } - -/* - * This functionality is currently not possible to implement because - * we're using segmentation to ensure a fixed mapping of the kernel - * virtual address space. - * - * It would be possible to implement this, but it would require us to - * disable segmentation at startup and load the kernel mappings into - * the TLB like any other pages. There will be lots of trickery to - * avoid recursive invocation of the TLB miss handler, though... - */ -#ifdef CONFIG_DEBUG_PAGEALLOC -void kernel_map_pages(struct page *page, int numpages, int enable) -{ - -} -EXPORT_SYMBOL(kernel_map_pages); -#endif diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c index 82cf70854b9..def5391d927 100644 --- a/arch/avr32/mm/init.c +++ b/arch/avr32/mm/init.c @@ -7,10 +7,12 @@ */ #include <linux/kernel.h> +#include <linux/gfp.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/init.h> #include <linux/mmzone.h> +#include <linux/module.h> #include <linux/bootmem.h> #include <linux/pagemap.h> #include <linux/nodemask.h> @@ -23,59 +25,16 @@ #include <asm/setup.h> #include <asm/sections.h> -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); - -pgd_t swapper_pg_dir[PTRS_PER_PGD]; +pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data; struct page *empty_zero_page; +EXPORT_SYMBOL(empty_zero_page); /* * Cache of MMU context last used. */ unsigned long mmu_context_cache = NO_CONTEXT; -#define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT) -#define MAX_LOW_PFN (NODE_DATA(0)->bdata->node_low_pfn) - -void show_mem(void) -{ - int total = 0, reserved = 0, cached = 0; - int slab = 0, free = 0, shared = 0; - pg_data_t *pgdat; - - printk("Mem-info:\n"); - show_free_areas(); - - for_each_online_pgdat(pgdat) { - struct page *page, *end; - - page = pgdat->node_mem_map; - end = page + pgdat->node_spanned_pages; - - do { - total++; - if (PageReserved(page)) - reserved++; - else if (PageSwapCache(page)) - cached++; - else if (PageSlab(page)) - slab++; - else if (!page_count(page)) - free++; - else - shared += page_count(page) - 1; - page++; - } while (page < end); - } - - printk ("%d pages of RAM\n", total); - printk ("%d free pages\n", free); - printk ("%d reserved pages\n", reserved); - printk ("%d slab pages\n", slab); - printk ("%d pages shared\n", shared); - printk ("%d pages swap cached\n", cached); -} - /* * paging_init() sets up the page tables * @@ -109,27 +68,16 @@ void __init paging_init(void) zero_page = alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE); - { - pgd_t *pg_dir; - int i; - - pg_dir = swapper_pg_dir; - sysreg_write(PTBR, (unsigned long)pg_dir); - - for (i = 0; i < PTRS_PER_PGD; i++) - pgd_val(pg_dir[i]) = 0; - - enable_mmu(); - printk ("CPU: Paging enabled\n"); - } + sysreg_write(PTBR, (unsigned long)swapper_pg_dir); + enable_mmu(); + printk ("CPU: Paging enabled\n"); for_each_online_node(nid) { pg_data_t *pgdat = NODE_DATA(nid); unsigned long zones_size[MAX_NR_ZONES]; unsigned long low, start_pfn; - start_pfn = pgdat->bdata->node_boot_start; - start_pfn >>= PAGE_SHIFT; + start_pfn = pgdat->bdata->node_min_pfn; low = pgdat->bdata->node_low_pfn; memset(zones_size, 0, sizeof(zones_size)); @@ -138,7 +86,7 @@ void __init paging_init(void) printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", nid, start_pfn, low); - free_area_init_node(nid, pgdat, zones_size, start_pfn, NULL); + free_area_init_node(nid, zones_size, start_pfn, NULL); printk("Node %u: mem_map starts at %p\n", pgdat->node_id, pgdat->node_mem_map); @@ -146,97 +94,32 @@ void __init paging_init(void) mem_map = NODE_DATA(0)->node_mem_map; - memset(zero_page, 0, PAGE_SIZE); empty_zero_page = virt_to_page(zero_page); flush_dcache_page(empty_zero_page); } void __init mem_init(void) { - int codesize, reservedpages, datasize, initsize; - int nid, i; + pg_data_t *pgdat; - reservedpages = 0; high_memory = NULL; + for_each_online_pgdat(pgdat) + high_memory = max_t(void *, high_memory, + __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT)); - /* this will put all low memory onto the freelists */ - for_each_online_node(nid) { - pg_data_t *pgdat = NODE_DATA(nid); - unsigned long node_pages = 0; - void *node_high_memory; - - num_physpages += pgdat->node_present_pages; - - if (pgdat->node_spanned_pages != 0) - node_pages = free_all_bootmem_node(pgdat); - - totalram_pages += node_pages; - - for (i = 0; i < node_pages; i++) - if (PageReserved(pgdat->node_mem_map + i)) - reservedpages++; - - node_high_memory = (void *)((pgdat->node_start_pfn - + pgdat->node_spanned_pages) - << PAGE_SHIFT); - if (node_high_memory > high_memory) - high_memory = node_high_memory; - } - - max_mapnr = MAP_NR(high_memory); - - codesize = (unsigned long)_etext - (unsigned long)_text; - datasize = (unsigned long)_edata - (unsigned long)_data; - initsize = (unsigned long)__init_end - (unsigned long)__init_begin; - - printk ("Memory: %luk/%luk available (%dk kernel code, " - "%dk reserved, %dk data, %dk init)\n", - (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10), - totalram_pages << (PAGE_SHIFT - 10), - codesize >> 10, - reservedpages << (PAGE_SHIFT - 10), - datasize >> 10, - initsize >> 10); -} - -static inline void free_area(unsigned long addr, unsigned long end, char *s) -{ - unsigned int size = (end - addr) >> 10; - - for (; addr < end; addr += PAGE_SIZE) { - struct page *page = virt_to_page(addr); - ClearPageReserved(page); - init_page_count(page); - free_page(addr); - totalram_pages++; - } - - if (size && s) - printk(KERN_INFO "Freeing %s memory: %dK (%lx - %lx)\n", - s, size, end - (size << 10), end); + set_max_mapnr(MAP_NR(high_memory)); + free_all_bootmem(); + mem_init_print_info(NULL); } void free_initmem(void) { - free_area((unsigned long)__init_begin, (unsigned long)__init_end, - "init"); + free_initmem_default(-1); } #ifdef CONFIG_BLK_DEV_INITRD - -static int keep_initrd; - void free_initrd_mem(unsigned long start, unsigned long end) { - if (!keep_initrd) - free_area(start, end, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } - -static int __init keepinitrd_setup(char *__unused) -{ - keep_initrd = 1; - return 1; -} - -__setup("keepinitrd", keepinitrd_setup); #endif diff --git a/arch/avr32/mm/ioremap.c b/arch/avr32/mm/ioremap.c index 3437c82434a..7def0d84cec 100644 --- a/arch/avr32/mm/ioremap.c +++ b/arch/avr32/mm/ioremap.c @@ -6,8 +6,10 @@ * published by the Free Software Foundation. */ #include <linux/vmalloc.h> +#include <linux/mm.h> #include <linux/module.h> #include <linux/io.h> +#include <linux/slab.h> #include <asm/pgtable.h> #include <asm/addrspace.h> diff --git a/arch/avr32/mm/tlb.c b/arch/avr32/mm/tlb.c index 56672018e42..0da23109f81 100644 --- a/arch/avr32/mm/tlb.c +++ b/arch/avr32/mm/tlb.c @@ -11,21 +11,21 @@ #include <asm/mmu_context.h> -#define _TLBEHI_I 0x100 +/* TODO: Get the correct number from the CONFIG1 system register */ +#define NR_TLB_ENTRIES 32 -void show_dtlb_entry(unsigned int index) +static void show_dtlb_entry(unsigned int index) { - unsigned int tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save; + u32 tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save; unsigned long flags; local_irq_save(flags); mmucr_save = sysreg_read(MMUCR); tlbehi_save = sysreg_read(TLBEHI); - mmucr = mmucr_save & 0x13; - mmucr |= index << 14; + mmucr = SYSREG_BFINS(DRP, index, mmucr_save); sysreg_write(MMUCR, mmucr); - asm volatile("tlbr" : : : "memory"); + __builtin_tlbr(); cpu_sync_pipeline(); tlbehi = sysreg_read(TLBEHI); @@ -33,15 +33,17 @@ void show_dtlb_entry(unsigned int index) printk("%2u: %c %c %02x %05x %05x %o %o %c %c %c %c\n", index, - (tlbehi & 0x200)?'1':'0', - (tlbelo & 0x100)?'1':'0', - (tlbehi & 0xff), - (tlbehi >> 12), (tlbelo >> 12), - (tlbelo >> 4) & 7, (tlbelo >> 2) & 3, - (tlbelo & 0x200)?'1':'0', - (tlbelo & 0x080)?'1':'0', - (tlbelo & 0x001)?'1':'0', - (tlbelo & 0x002)?'1':'0'); + SYSREG_BFEXT(TLBEHI_V, tlbehi) ? '1' : '0', + SYSREG_BFEXT(G, tlbelo) ? '1' : '0', + SYSREG_BFEXT(ASID, tlbehi), + SYSREG_BFEXT(VPN, tlbehi) >> 2, + SYSREG_BFEXT(PFN, tlbelo) >> 2, + SYSREG_BFEXT(AP, tlbelo), + SYSREG_BFEXT(SZ, tlbelo), + SYSREG_BFEXT(TLBELO_C, tlbelo) ? 'C' : ' ', + SYSREG_BFEXT(B, tlbelo) ? 'B' : ' ', + SYSREG_BFEXT(W, tlbelo) ? 'W' : ' ', + SYSREG_BFEXT(TLBELO_D, tlbelo) ? 'D' : ' '); sysreg_write(MMUCR, mmucr_save); sysreg_write(TLBEHI, tlbehi_save); @@ -54,29 +56,33 @@ void dump_dtlb(void) unsigned int i; printk("ID V G ASID VPN PFN AP SZ C B W D\n"); - for (i = 0; i < 32; i++) + for (i = 0; i < NR_TLB_ENTRIES; i++) show_dtlb_entry(i); } -static unsigned long last_mmucr; - -static inline void set_replacement_pointer(unsigned shift) +static void update_dtlb(unsigned long address, pte_t pte) { - unsigned long mmucr, mmucr_save; + u32 tlbehi; + u32 mmucr; - mmucr = mmucr_save = sysreg_read(MMUCR); + /* + * We're not changing the ASID here, so no need to flush the + * pipeline. + */ + tlbehi = sysreg_read(TLBEHI); + tlbehi = SYSREG_BF(ASID, SYSREG_BFEXT(ASID, tlbehi)); + tlbehi |= address & MMU_VPN_MASK; + tlbehi |= SYSREG_BIT(TLBEHI_V); + sysreg_write(TLBEHI, tlbehi); /* Does this mapping already exist? */ - __asm__ __volatile__( - " tlbs\n" - " mfsr %0, %1" - : "=r"(mmucr) - : "i"(SYSREG_MMUCR)); + __builtin_tlbs(); + mmucr = sysreg_read(MMUCR); if (mmucr & SYSREG_BIT(MMUCR_N)) { /* Not found -- pick a not-recently-accessed entry */ - unsigned long rp; - unsigned long tlbar = sysreg_read(TLBARLO); + unsigned int rp; + u32 tlbar = sysreg_read(TLBARLO); rp = 32 - fls(tlbar); if (rp == 32) { @@ -84,34 +90,18 @@ static inline void set_replacement_pointer(unsigned shift) sysreg_write(TLBARLO, -1L); } - mmucr &= 0x13; - mmucr |= (rp << shift); - + mmucr = SYSREG_BFINS(DRP, rp, mmucr); sysreg_write(MMUCR, mmucr); } - last_mmucr = mmucr; -} - -static void update_dtlb(unsigned long address, pte_t pte, unsigned long asid) -{ - unsigned long vpn; - - vpn = (address & MMU_VPN_MASK) | _TLBEHI_VALID | asid; - sysreg_write(TLBEHI, vpn); - cpu_sync_pipeline(); - - set_replacement_pointer(14); - sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK); /* Let's go */ - asm volatile("nop\n\ttlbw" : : : "memory"); - cpu_sync_pipeline(); + __builtin_tlbw(); } void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) { unsigned long flags; @@ -120,39 +110,40 @@ void update_mmu_cache(struct vm_area_struct *vma, return; local_irq_save(flags); - update_dtlb(address, pte, get_asid()); + update_dtlb(address, *ptep); local_irq_restore(flags); } -void __flush_tlb_page(unsigned long asid, unsigned long page) +static void __flush_tlb_page(unsigned long asid, unsigned long page) { - unsigned long mmucr, tlbehi; + u32 mmucr, tlbehi; - page |= asid; - sysreg_write(TLBEHI, page); - cpu_sync_pipeline(); - asm volatile("tlbs"); + /* + * Caller is responsible for masking out non-PFN bits in page + * and changing the current ASID if necessary. This means that + * we don't need to flush the pipeline after writing TLBEHI. + */ + tlbehi = page | asid; + sysreg_write(TLBEHI, tlbehi); + + __builtin_tlbs(); mmucr = sysreg_read(MMUCR); if (!(mmucr & SYSREG_BIT(MMUCR_N))) { - unsigned long tlbarlo; - unsigned long entry; + unsigned int entry; + u32 tlbarlo; /* Clear the "valid" bit */ - tlbehi = sysreg_read(TLBEHI); - tlbehi &= ~_TLBEHI_VALID; sysreg_write(TLBEHI, tlbehi); - cpu_sync_pipeline(); /* mark the entry as "not accessed" */ - entry = (mmucr >> 14) & 0x3f; + entry = SYSREG_BFEXT(DRP, mmucr); tlbarlo = sysreg_read(TLBARLO); - tlbarlo |= (0x80000000 >> entry); + tlbarlo |= (0x80000000UL >> entry); sysreg_write(TLBARLO, tlbarlo); /* update the entry with valid bit clear */ - asm volatile("tlbw"); - cpu_sync_pipeline(); + __builtin_tlbw(); } } @@ -190,17 +181,22 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */ mm->context = NO_CONTEXT; if (mm == current->mm) activate_context(mm); } else { - unsigned long asid = mm->context & MMU_CONTEXT_ASID_MASK; - unsigned long saved_asid = MMU_NO_ASID; + unsigned long asid; + unsigned long saved_asid; + + asid = mm->context & MMU_CONTEXT_ASID_MASK; + saved_asid = MMU_NO_ASID; start &= PAGE_MASK; end += (PAGE_SIZE - 1); end &= PAGE_MASK; + if (mm != current->mm) { saved_asid = get_asid(); set_asid(asid); @@ -218,33 +214,34 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, } /* - * TODO: If this is only called for addresses > TASK_SIZE, we can probably - * skip the ASID stuff and just use the Global bit... + * This function depends on the pages to be flushed having the G + * (global) bit set in their pte. This is true for all + * PAGE_KERNEL(_RO) pages. */ void flush_tlb_kernel_range(unsigned long start, unsigned long end) { unsigned long flags; int size; - local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */ flush_tlb_all(); } else { - unsigned long asid = init_mm.context & MMU_CONTEXT_ASID_MASK; - unsigned long saved_asid = get_asid(); + unsigned long asid; + + local_irq_save(flags); + asid = get_asid(); start &= PAGE_MASK; end += (PAGE_SIZE - 1); end &= PAGE_MASK; - set_asid(asid); + while (start < end) { __flush_tlb_page(asid, start); start += PAGE_SIZE; } - set_asid(saved_asid); + local_irq_restore(flags); } - local_irq_restore(flags); } void flush_tlb_mm(struct mm_struct *mm) @@ -280,7 +277,7 @@ static void *tlb_start(struct seq_file *tlb, loff_t *pos) { static unsigned long tlb_index; - if (*pos >= 32) + if (*pos >= NR_TLB_ENTRIES) return NULL; tlb_index = 0; @@ -291,7 +288,7 @@ static void *tlb_next(struct seq_file *tlb, void *v, loff_t *pos) { unsigned long *index = v; - if (*index >= 31) + if (*index >= NR_TLB_ENTRIES - 1) return NULL; ++*pos; @@ -313,16 +310,16 @@ static int tlb_show(struct seq_file *tlb, void *v) if (*index == 0) seq_puts(tlb, "ID V G ASID VPN PFN AP SZ C B W D\n"); - BUG_ON(*index >= 32); + BUG_ON(*index >= NR_TLB_ENTRIES); local_irq_save(flags); mmucr_save = sysreg_read(MMUCR); tlbehi_save = sysreg_read(TLBEHI); - mmucr = mmucr_save & 0x13; - mmucr |= *index << 14; + mmucr = SYSREG_BFINS(DRP, *index, mmucr_save); sysreg_write(MMUCR, mmucr); - asm volatile("tlbr" : : : "memory"); + /* TLBR might change the ASID */ + __builtin_tlbr(); cpu_sync_pipeline(); tlbehi = sysreg_read(TLBEHI); @@ -334,21 +331,23 @@ static int tlb_show(struct seq_file *tlb, void *v) local_irq_restore(flags); seq_printf(tlb, "%2lu: %c %c %02x %05x %05x %o %o %c %c %c %c\n", - *index, - (tlbehi & 0x200)?'1':'0', - (tlbelo & 0x100)?'1':'0', - (tlbehi & 0xff), - (tlbehi >> 12), (tlbelo >> 12), - (tlbelo >> 4) & 7, (tlbelo >> 2) & 3, - (tlbelo & 0x200)?'1':'0', - (tlbelo & 0x080)?'1':'0', - (tlbelo & 0x001)?'1':'0', - (tlbelo & 0x002)?'1':'0'); + *index, + SYSREG_BFEXT(TLBEHI_V, tlbehi) ? '1' : '0', + SYSREG_BFEXT(G, tlbelo) ? '1' : '0', + SYSREG_BFEXT(ASID, tlbehi), + SYSREG_BFEXT(VPN, tlbehi) >> 2, + SYSREG_BFEXT(PFN, tlbelo) >> 2, + SYSREG_BFEXT(AP, tlbelo), + SYSREG_BFEXT(SZ, tlbelo), + SYSREG_BFEXT(TLBELO_C, tlbelo) ? '1' : '0', + SYSREG_BFEXT(B, tlbelo) ? '1' : '0', + SYSREG_BFEXT(W, tlbelo) ? '1' : '0', + SYSREG_BFEXT(TLBELO_D, tlbelo) ? '1' : '0'); return 0; } -static struct seq_operations tlb_ops = { +static const struct seq_operations tlb_ops = { .start = tlb_start, .next = tlb_next, .stop = tlb_stop, @@ -369,11 +368,7 @@ static const struct file_operations proc_tlb_operations = { static int __init proctlb_init(void) { - struct proc_dir_entry *entry; - - entry = create_proc_entry("tlb", 0, NULL); - if (entry) - entry->proc_fops = &proc_tlb_operations; + proc_create("tlb", 0, NULL, &proc_tlb_operations); return 0; } late_initcall(proctlb_init); |
