diff options
Diffstat (limited to 'arch/frv/mm')
| -rw-r--r-- | arch/frv/mm/Makefile | 2 | ||||
| -rw-r--r-- | arch/frv/mm/cache-page.c | 13 | ||||
| -rw-r--r-- | arch/frv/mm/dma-alloc.c | 15 | ||||
| -rw-r--r-- | arch/frv/mm/elf-fdpic.c | 58 | ||||
| -rw-r--r-- | arch/frv/mm/extable.c | 35 | ||||
| -rw-r--r-- | arch/frv/mm/fault.c | 42 | ||||
| -rw-r--r-- | arch/frv/mm/highmem.c | 56 | ||||
| -rw-r--r-- | arch/frv/mm/init.c | 133 | ||||
| -rw-r--r-- | arch/frv/mm/kmap.c | 17 | ||||
| -rw-r--r-- | arch/frv/mm/mmu-context.c | 8 | ||||
| -rw-r--r-- | arch/frv/mm/pgalloc.c | 56 | ||||
| -rw-r--r-- | arch/frv/mm/tlb-flush.S | 1 | ||||
| -rw-r--r-- | arch/frv/mm/tlb-miss.S | 4 | ||||
| -rw-r--r-- | arch/frv/mm/unaligned.c | 218 |
14 files changed, 186 insertions, 472 deletions
diff --git a/arch/frv/mm/Makefile b/arch/frv/mm/Makefile index fb8b1d860f4..1bca5ab8a6a 100644 --- a/arch/frv/mm/Makefile +++ b/arch/frv/mm/Makefile @@ -6,4 +6,4 @@ obj-y := init.o kmap.o obj-$(CONFIG_MMU) += \ pgalloc.o highmem.o fault.o extable.o cache-page.o tlb-flush.o tlb-miss.o \ - mmu-context.o dma-alloc.o unaligned.o elf-fdpic.o + mmu-context.o dma-alloc.o elf-fdpic.o diff --git a/arch/frv/mm/cache-page.c b/arch/frv/mm/cache-page.c index 683b5e34431..8e09dae0ec3 100644 --- a/arch/frv/mm/cache-page.c +++ b/arch/frv/mm/cache-page.c @@ -11,6 +11,7 @@ #include <linux/sched.h> #include <linux/mm.h> #include <linux/highmem.h> +#include <linux/module.h> #include <asm/pgalloc.h> /*****************************************************************************/ @@ -25,11 +26,11 @@ void flush_dcache_page(struct page *page) dampr2 = __get_DAMPR(2); - vaddr = kmap_atomic(page, __KM_CACHE); + vaddr = kmap_atomic_primary(page); frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE); - kunmap_atomic(vaddr, __KM_CACHE); + kunmap_atomic_primary(vaddr); if (dampr2) { __set_DAMPR(2, dampr2); @@ -38,6 +39,8 @@ void flush_dcache_page(struct page *page) } /* end flush_dcache_page() */ +EXPORT_SYMBOL(flush_dcache_page); + /*****************************************************************************/ /* * ICI takes a virtual address and the page may not currently have one @@ -51,12 +54,12 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, dampr2 = __get_DAMPR(2); - vaddr = kmap_atomic(page, __KM_CACHE); + vaddr = kmap_atomic_primary(page); start = (start & ~PAGE_MASK) | (unsigned long) vaddr; frv_cache_wback_inv(start, start + len); - kunmap_atomic(vaddr, __KM_CACHE); + kunmap_atomic_primary(vaddr); if (dampr2) { __set_DAMPR(2, dampr2); @@ -64,3 +67,5 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, } } /* end flush_icache_user_range() */ + +EXPORT_SYMBOL(flush_icache_user_range); diff --git a/arch/frv/mm/dma-alloc.c b/arch/frv/mm/dma-alloc.c index 4b38d45435f..7a73aaeae3a 100644 --- a/arch/frv/mm/dma-alloc.c +++ b/arch/frv/mm/dma-alloc.c @@ -21,7 +21,6 @@ * published by the Free Software Foundation. */ -#include <linux/config.h> #include <linux/module.h> #include <linux/signal.h> #include <linux/sched.h> @@ -37,10 +36,11 @@ #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/pci.h> +#include <linux/hardirq.h> +#include <linux/gfp.h> #include <asm/pgalloc.h> #include <asm/io.h> -#include <asm/hardirq.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/mmu.h> @@ -55,21 +55,18 @@ static int map_page(unsigned long va, unsigned long pa, pgprot_t prot) pte_t *pte; int err = -ENOMEM; - spin_lock(&init_mm.page_table_lock); - /* Use upper 10 bits of VA to index the first level map */ pge = pgd_offset_k(va); pue = pud_offset(pge, va); pme = pmd_offset(pue, va); /* Use middle 10 bits of VA to index the second-level map */ - pte = pte_alloc_kernel(&init_mm, pme, va); + pte = pte_alloc_kernel(pme, va); if (pte != 0) { err = 0; set_pte(pte, mk_pte_phys(pa & PAGE_MASK, prot)); } - spin_unlock(&init_mm.page_table_lock); return err; } @@ -81,7 +78,7 @@ static int map_page(unsigned long va, unsigned long pa, pgprot_t prot) * portions of the kernel with single large page TLB entries, and * still get unique uncached pages for consistent DMA. */ -void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) +void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle) { struct vm_struct *area; unsigned long page, va, pa; @@ -118,9 +115,7 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) */ if (order > 0) { struct page *rpage = virt_to_page(page); - - for (i = 1; i < (1 << order); i++) - set_page_count(rpage + i, 1); + split_page(rpage, order); } err = 0; diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c index f5a653033fe..836f14707a6 100644 --- a/arch/frv/mm/elf-fdpic.c +++ b/arch/frv/mm/elf-fdpic.c @@ -13,6 +13,7 @@ #include <linux/mm.h> #include <linux/fs.h> #include <linux/elf-fdpic.h> +#include <asm/mman.h> /*****************************************************************************/ /* @@ -59,11 +60,15 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi unsigned long pgoff, unsigned long flags) { struct vm_area_struct *vma; - unsigned long limit; + struct vm_unmapped_area_info info; if (len > TASK_SIZE) return -ENOMEM; + /* handle MAP_FIXED */ + if (flags & MAP_FIXED) + return addr; + /* only honour a hint if we're not going to clobber something doing so */ if (addr) { addr = PAGE_ALIGN(addr); @@ -74,50 +79,35 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi } /* search between the bottom of user VM and the stack grow area */ - addr = PAGE_SIZE; - limit = (current->mm->start_stack - 0x00200000); - if (addr + len <= limit) { - limit -= len; - - if (addr <= limit) { - vma = find_vma(current->mm, PAGE_SIZE); - for (; vma; vma = vma->vm_next) { - if (addr > limit) - break; - if (addr + len <= vma->vm_start) - goto success; - addr = vma->vm_end; - } - } - } + info.flags = 0; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = (current->mm->start_stack - 0x00200000); + info.align_mask = 0; + info.align_offset = 0; + addr = vm_unmapped_area(&info); + if (!(addr & ~PAGE_MASK)) + goto success; + VM_BUG_ON(addr != -ENOMEM); /* search from just above the WorkRAM area to the top of memory */ - addr = PAGE_ALIGN(0x80000000); - limit = TASK_SIZE - len; - if (addr <= limit) { - vma = find_vma(current->mm, addr); - for (; vma; vma = vma->vm_next) { - if (addr > limit) - break; - if (addr + len <= vma->vm_start) - goto success; - addr = vma->vm_end; - } - - if (!vma && addr <= limit) - goto success; - } + info.low_limit = PAGE_ALIGN(0x80000000); + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + if (!(addr & ~PAGE_MASK)) + goto success; + VM_BUG_ON(addr != -ENOMEM); #if 0 printk("[area] l=%lx (ENOMEM) f='%s'\n", - len, filp ? filp->f_dentry->d_name.name : ""); + len, filp ? filp->f_path.dentry->d_name.name : ""); #endif return -ENOMEM; success: #if 0 printk("[area] l=%lx ad=%lx f='%s'\n", - len, addr, filp ? filp->f_dentry->d_name.name : ""); + len, addr, filp ? filp->f_path.dentry->d_name.name : ""); #endif return addr; } /* end arch_get_unmapped_area() */ diff --git a/arch/frv/mm/extable.c b/arch/frv/mm/extable.c index 41be1128dc6..6aea124f574 100644 --- a/arch/frv/mm/extable.c +++ b/arch/frv/mm/extable.c @@ -2,7 +2,6 @@ * linux/arch/frv/mm/extable.c */ -#include <linux/config.h> #include <linux/module.h> #include <linux/spinlock.h> #include <asm/uaccess.h> @@ -43,7 +42,7 @@ static inline unsigned long search_one_table(const struct exception_table_entry */ unsigned long search_exception_table(unsigned long pc) { - unsigned long ret = 0; + const struct exception_table_entry *extab; /* determine if the fault lay during a memcpy_user or a memset_user */ if (__frame->lr == (unsigned long) &__memset_user_error_lr && @@ -55,9 +54,10 @@ unsigned long search_exception_table(unsigned long pc) */ return (unsigned long) &__memset_user_error_handler; } - else if (__frame->lr == (unsigned long) &__memcpy_user_error_lr && - (unsigned long) &memcpy <= pc && pc < (unsigned long) &__memcpy_end - ) { + + if (__frame->lr == (unsigned long) &__memcpy_user_error_lr && + (unsigned long) &memcpy <= pc && pc < (unsigned long) &__memcpy_end + ) { /* the fault occurred in a protected memset * - we search for the return address (in LR) instead of the program counter * - it was probably during a copy_to/from_user() @@ -65,27 +65,10 @@ unsigned long search_exception_table(unsigned long pc) return (unsigned long) &__memcpy_user_error_handler; } -#ifndef CONFIG_MODULES - /* there is only the kernel to search. */ - ret = search_one_table(__start___ex_table, __stop___ex_table - 1, pc); - return ret; - -#else - /* the kernel is the last "module" -- no need to treat it special */ - unsigned long flags; - struct module *mp; + extab = search_exception_tables(pc); + if (extab) + return extab->fixup; - spin_lock_irqsave(&modlist_lock, flags); - - for (mp = module_list; mp != NULL; mp = mp->next) { - if (mp->ex_table_start == NULL || !(mp->flags & (MOD_RUNNING | MOD_INITIALIZING))) - continue; - ret = search_one_table(mp->ex_table_start, mp->ex_table_end - 1, pc); - if (ret) - break; - } + return 0; - spin_unlock_irqrestore(&modlist_lock, flags); - return ret; -#endif } /* end search_exception_table() */ diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c index 41d02ac4823..9a66372fc7c 100644 --- a/arch/frv/mm/fault.c +++ b/arch/frv/mm/fault.c @@ -20,7 +20,6 @@ #include <linux/ptrace.h> #include <linux/hardirq.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/uaccess.h> #include <asm/gdb-stub.h> @@ -35,11 +34,12 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear struct vm_area_struct *vma; struct mm_struct *mm; unsigned long _pme, lrai, lrad, fixup; + unsigned long flags = 0; siginfo_t info; pgd_t *pge; pud_t *pue; pte_t *pte; - int write; + int fault; #if 0 const char *atxc[16] = { @@ -78,9 +78,12 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_interrupt() || !mm) + if (in_atomic() || !mm) goto no_context; + if (user_mode(__frame)) + flags |= FAULT_FLAG_USER; + down_read(&mm->mmap_sem); vma = find_vma(mm, ear0); @@ -129,7 +132,6 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear */ good_area: info.si_code = SEGV_ACCERR; - write = 0; switch (esr0 & ESR0_ATXC) { default: /* handle write to write protected page */ @@ -140,7 +142,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear #endif if (!(vma->vm_flags & VM_WRITE)) goto bad_area; - write = 1; + flags |= FAULT_FLAG_WRITE; break; /* handle read from protected page */ @@ -162,18 +164,18 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear * make sure we exit gracefully rather than endlessly redo * the fault. */ - switch (handle_mm_fault(mm, vma, ear0, write)) { - case 1: - current->min_flt++; - break; - case 2: - current->maj_flt++; - break; - case 0: - goto do_sigbus; - default: - goto out_of_memory; + fault = handle_mm_fault(mm, vma, ear0, flags); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); } + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; up_read(&mm->mmap_sem); return; @@ -256,10 +258,10 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear */ out_of_memory: up_read(&mm->mmap_sem); - printk("VM: killing process %s\n", current->comm); - if (user_mode(__frame)) - do_exit(SIGKILL); - goto no_context; + if (!user_mode(__frame)) + goto no_context; + pagefault_out_of_memory(); + return; do_sigbus: up_read(&mm->mmap_sem); diff --git a/arch/frv/mm/highmem.c b/arch/frv/mm/highmem.c index 7dc8fbf3af9..bed9a9bd3c1 100644 --- a/arch/frv/mm/highmem.c +++ b/arch/frv/mm/highmem.c @@ -9,6 +9,7 @@ * 2 of the License, or (at your option) any later version. */ #include <linux/highmem.h> +#include <linux/module.h> void *kmap(struct page *page) { @@ -18,6 +19,8 @@ void *kmap(struct page *page) return kmap_high(page); } +EXPORT_SYMBOL(kmap); + void kunmap(struct page *page) { if (in_interrupt()) @@ -27,7 +30,60 @@ void kunmap(struct page *page) kunmap_high(page); } +EXPORT_SYMBOL(kunmap); + struct page *kmap_atomic_to_page(void *ptr) { return virt_to_page(ptr); } + +void *kmap_atomic(struct page *page) +{ + unsigned long paddr; + int type; + + pagefault_disable(); + type = kmap_atomic_idx_push(); + paddr = page_to_phys(page); + + switch (type) { + /* + * The first 4 primary maps are reserved for architecture code + */ + case 0: return __kmap_atomic_primary(0, paddr, 6); + case 1: return __kmap_atomic_primary(0, paddr, 7); + case 2: return __kmap_atomic_primary(0, paddr, 8); + case 3: return __kmap_atomic_primary(0, paddr, 9); + case 4: return __kmap_atomic_primary(0, paddr, 10); + + case 5 ... 5 + NR_TLB_LINES - 1: + return __kmap_atomic_secondary(type - 5, paddr); + + default: + BUG(); + return NULL; + } +} +EXPORT_SYMBOL(kmap_atomic); + +void __kunmap_atomic(void *kvaddr) +{ + int type = kmap_atomic_idx(); + switch (type) { + case 0: __kunmap_atomic_primary(0, 6); break; + case 1: __kunmap_atomic_primary(0, 7); break; + case 2: __kunmap_atomic_primary(0, 8); break; + case 3: __kunmap_atomic_primary(0, 9); break; + case 4: __kunmap_atomic_primary(0, 10); break; + + case 5 ... 5 + NR_TLB_LINES - 1: + __kunmap_atomic_secondary(type - 5, kvaddr); + break; + + default: + BUG(); + } + kmap_atomic_idx_pop(); + pagefault_enable(); +} +EXPORT_SYMBOL(__kunmap_atomic); diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c index 79433159b5f..88a15974352 100644 --- a/arch/frv/mm/init.c +++ b/arch/frv/mm/init.c @@ -16,10 +16,10 @@ * - Copyright (C) 1995 Hamish Macdonald */ -#include <linux/config.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/pagemap.h> +#include <linux/gfp.h> #include <linux/swap.h> #include <linux/mm.h> #include <linux/kernel.h> @@ -27,12 +27,12 @@ #include <linux/types.h> #include <linux/bootmem.h> #include <linux/highmem.h> +#include <linux/module.h> #include <asm/setup.h> #include <asm/segment.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/mmu_context.h> #include <asm/virtconvert.h> #include <asm/sections.h> @@ -40,8 +40,6 @@ #undef DEBUG -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); - /* * BAD_PAGE is the page that is used for page faults when linux * is out-of-memory. Older versions of linux just did a @@ -57,38 +55,9 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); */ static unsigned long empty_bad_page_table; static unsigned long empty_bad_page; -unsigned long empty_zero_page; - -/*****************************************************************************/ -/* - * - */ -void show_mem(void) -{ - unsigned long i; - int free = 0, total = 0, reserved = 0, shared = 0; - - printk("\nMem-info:\n"); - show_free_areas(); - i = max_mapnr; - while (i-- > 0) { - struct page *page = &mem_map[i]; - - total++; - if (PageReserved(page)) - reserved++; - else if (!page_count(page)) - free++; - else - shared += page_count(page) - 1; - } - printk("%d pages of RAM\n",total); - printk("%d free pages\n",free); - printk("%d reserved pages\n",reserved); - printk("%d pages shared\n",shared); - -} /* end show_mem() */ +unsigned long empty_zero_page; +EXPORT_SYMBOL(empty_zero_page); /*****************************************************************************/ /* @@ -99,7 +68,7 @@ void show_mem(void) */ void __init paging_init(void) { - unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; + unsigned long zones_size[MAX_NR_ZONES] = {0, }; /* allocate some pages for kernel housekeeping tasks */ empty_bad_page_table = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); @@ -108,16 +77,14 @@ void __init paging_init(void) memset((void *) empty_zero_page, 0, PAGE_SIZE); -#if CONFIG_HIGHMEM - if (num_physpages - num_mappedpages) { +#ifdef CONFIG_HIGHMEM + if (get_num_physpages() - num_mappedpages) { pgd_t *pge; pud_t *pue; pmd_t *pme; pkmap_page_table = alloc_bootmem_pages(PAGE_SIZE); - memset(pkmap_page_table, 0, PAGE_SIZE); - pge = swapper_pg_dir + pgd_index_k(PKMAP_BASE); pue = pud_offset(pge, PKMAP_BASE); pme = pmd_offset(pue, PKMAP_BASE); @@ -127,10 +94,9 @@ void __init paging_init(void) /* distribute the allocatable pages across the various zones and pass them to the allocator */ - zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn; - zones_size[ZONE_NORMAL] = 0; + zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn; #ifdef CONFIG_HIGHMEM - zones_size[ZONE_HIGHMEM] = num_physpages - num_mappedpages; + zones_size[ZONE_HIGHMEM] = get_num_physpages() - num_mappedpages; #endif free_area_init(zones_size); @@ -148,75 +114,34 @@ void __init paging_init(void) */ void __init mem_init(void) { - unsigned long npages = (memory_end - memory_start) >> PAGE_SHIFT; - unsigned long tmp; -#ifdef CONFIG_MMU - unsigned long loop, pfn; - int datapages = 0; -#endif - int codek = 0, datak = 0; - - /* this will put all memory onto the freelists */ - totalram_pages = free_all_bootmem(); + unsigned long code_size = _etext - _stext; -#ifdef CONFIG_MMU - for (loop = 0 ; loop < npages ; loop++) - if (PageReserved(&mem_map[loop])) - datapages++; + /* this will put all low memory onto the freelists */ + free_all_bootmem(); +#if defined(CONFIG_MMU) && defined(CONFIG_HIGHMEM) + { + unsigned long pfn; -#ifdef CONFIG_HIGHMEM - for (pfn = num_physpages - 1; pfn >= num_mappedpages; pfn--) { - struct page *page = &mem_map[pfn]; - - ClearPageReserved(page); - set_page_count(page, 1); - __free_page(page); - totalram_pages++; + for (pfn = get_num_physpages() - 1; + pfn >= num_mappedpages; pfn--) + free_highmem_page(&mem_map[pfn]); } #endif - codek = ((unsigned long) &_etext - (unsigned long) &_stext) >> 10; - datak = datapages << (PAGE_SHIFT - 10); - -#else - codek = (_etext - _stext) >> 10; - datak = 0; //(_ebss - _sdata) >> 10; -#endif - - tmp = nr_free_pages() << PAGE_SHIFT; - printk("Memory available: %luKiB/%luKiB RAM, %luKiB/%luKiB ROM (%dKiB kernel code, %dKiB data)\n", - tmp >> 10, - npages << (PAGE_SHIFT - 10), - (rom_length > 0) ? ((rom_length >> 10) - codek) : 0, - rom_length >> 10, - codek, - datak - ); - + mem_init_print_info(NULL); + if (rom_length > 0 && rom_length >= code_size) + printk("Memory available: %luKiB/%luKiB ROM\n", + (rom_length - code_size) >> 10, rom_length >> 10); } /* end mem_init() */ /*****************************************************************************/ /* * free the memory that was only required for initialisation */ -void __init free_initmem(void) +void free_initmem(void) { #if defined(CONFIG_RAMKERNEL) && !defined(CONFIG_PROTECT_KERNEL) - unsigned long start, end, addr; - - start = PAGE_ALIGN((unsigned long) &__init_begin); /* round up */ - end = ((unsigned long) &__init_end) & PAGE_MASK; /* round down */ - - /* next to check that the page we free is not a partial page */ - for (addr = start; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - set_page_count(virt_to_page(addr), 1); - free_page(addr); - totalram_pages++; - } - - printk("Freeing unused kernel memory: %ldKiB freed (0x%lx - 0x%lx)\n", - (end - start) >> 10, start, end); + free_initmem_default(-1); #endif } /* end free_initmem() */ @@ -227,14 +152,6 @@ void __init free_initmem(void) #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { - int pages = 0; - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - set_page_count(virt_to_page(start), 1); - free_page(start); - totalram_pages++; - pages++; - } - printk("Freeing initrd memory: %dKiB freed\n", (pages * PAGE_SIZE) >> 10); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } /* end free_initrd_mem() */ #endif diff --git a/arch/frv/mm/kmap.c b/arch/frv/mm/kmap.c index 539f45e6d15..e9217e605aa 100644 --- a/arch/frv/mm/kmap.c +++ b/arch/frv/mm/kmap.c @@ -10,7 +10,6 @@ * 2 of the License, or (at your option) any later version. */ -#include <linux/config.h> #include <linux/mm.h> #include <linux/kernel.h> #include <linux/string.h> @@ -22,7 +21,6 @@ #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/io.h> -#include <asm/system.h> #undef DEBUG @@ -31,24 +29,15 @@ * Map some physical address range into the kernel address space. */ -void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag) +void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag) { - return (void *)physaddr; + return (void __iomem *)physaddr; } /* * Unmap a ioremap()ed region again */ -void iounmap(void *addr) -{ -} - -/* - * __iounmap unmaps nearly everything, so be careful - * it doesn't free currently pointer/page tables anymore but it - * wans't used anyway and might be added later. - */ -void __iounmap(void *addr, unsigned long size) +void iounmap(void volatile __iomem *addr) { } diff --git a/arch/frv/mm/mmu-context.c b/arch/frv/mm/mmu-context.c index f2c6866fc88..81757d55a5b 100644 --- a/arch/frv/mm/mmu-context.c +++ b/arch/frv/mm/mmu-context.c @@ -54,9 +54,9 @@ static unsigned get_cxn(mm_context_t *ctx) /* find the first unallocated context number * - 0 is reserved for the kernel */ - cxn = find_next_zero_bit(&cxn_bitmap, NR_CXN, 1); + cxn = find_next_zero_bit(cxn_bitmap, NR_CXN, 1); if (cxn < NR_CXN) { - set_bit(cxn, &cxn_bitmap); + set_bit(cxn, cxn_bitmap); } else { /* none remaining - need to steal someone else's cxn */ @@ -138,7 +138,7 @@ void destroy_context(struct mm_struct *mm) cxn_pinned = -1; list_del_init(&ctx->id_link); - clear_bit(ctx->id, &cxn_bitmap); + clear_bit(ctx->id, cxn_bitmap); __flush_tlb_mm(ctx->id); ctx->id = 0; } @@ -181,7 +181,7 @@ int cxn_pin_by_pid(pid_t pid) /* get a handle on the mm_struct */ read_lock(&tasklist_lock); - tsk = find_task_by_pid(pid); + tsk = find_task_by_vpid(pid); if (tsk) { ret = -EINVAL; diff --git a/arch/frv/mm/pgalloc.c b/arch/frv/mm/pgalloc.c index 4eaec0f3525..41907d25ed3 100644 --- a/arch/frv/mm/pgalloc.c +++ b/arch/frv/mm/pgalloc.c @@ -10,15 +10,15 @@ */ #include <linux/sched.h> -#include <linux/slab.h> +#include <linux/gfp.h> #include <linux/mm.h> #include <linux/highmem.h> +#include <linux/quicklist.h> #include <asm/pgalloc.h> #include <asm/page.h> #include <asm/cacheflush.h> pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE))); -kmem_cache_t *pgd_cache; pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { @@ -28,7 +28,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) return pte; } -struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) +pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) { struct page *page; @@ -37,8 +37,14 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) #else page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); #endif - if (page) - clear_highpage(page); + if (!page) + return NULL; + + clear_highpage(page); + if (!pgtable_page_ctor(page)) { + __free_page(page); + return NULL; + } flush_dcache_page(page); return page; } @@ -75,7 +81,7 @@ void __set_pmd(pmd_t *pmdptr, unsigned long pmd) * checks at dup_mmap(), exec(), and other mmlist addition points * could be used. The locking scheme was chosen on the basis of * manfred's recommendations and having no core impact whatsoever. - * -- wli + * -- nyc */ DEFINE_SPINLOCK(pgd_lock); struct page *pgd_list; @@ -85,22 +91,22 @@ static inline void pgd_list_add(pgd_t *pgd) struct page *page = virt_to_page(pgd); page->index = (unsigned long) pgd_list; if (pgd_list) - pgd_list->private = (unsigned long) &page->index; + set_page_private(pgd_list, (unsigned long) &page->index); pgd_list = page; - page->private = (unsigned long) &pgd_list; + set_page_private(page, (unsigned long)&pgd_list); } static inline void pgd_list_del(pgd_t *pgd) { struct page *next, **pprev, *page = virt_to_page(pgd); next = (struct page *) page->index; - pprev = (struct page **) page->private; + pprev = (struct page **) page_private(page); *pprev = next; if (next) - next->private = (unsigned long) pprev; + set_page_private(next, (unsigned long) pprev); } -void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) +void pgd_ctor(void *pgd) { unsigned long flags; @@ -120,7 +126,7 @@ void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) } /* never called when PTRS_PER_PMD > 1 */ -void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused) +void pgd_dtor(void *pgd) { unsigned long flags; /* can be called from interrupt context */ @@ -131,29 +137,21 @@ void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused) pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *pgd; - - pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL); - if (!pgd) - return pgd; - - return pgd; + return quicklist_alloc(0, GFP_KERNEL, pgd_ctor); } -void pgd_free(pgd_t *pgd) +void pgd_free(struct mm_struct *mm, pgd_t *pgd) { /* in the non-PAE case, clear_page_tables() clears user pgd entries */ - kmem_cache_free(pgd_cache, pgd); + quicklist_free(0, pgd_dtor, pgd); } void __init pgtable_cache_init(void) { - pgd_cache = kmem_cache_create("pgd", - PTRS_PER_PGD * sizeof(pgd_t), - PTRS_PER_PGD * sizeof(pgd_t), - 0, - pgd_ctor, - pgd_dtor); - if (!pgd_cache) - panic("pgtable_cache_init(): Cannot create pgd cache"); } + +void check_pgt_cache(void) +{ + quicklist_trim(0, pgd_dtor, 25, 16); +} + diff --git a/arch/frv/mm/tlb-flush.S b/arch/frv/mm/tlb-flush.S index 6f43c74c5d9..79b3c70910a 100644 --- a/arch/frv/mm/tlb-flush.S +++ b/arch/frv/mm/tlb-flush.S @@ -10,7 +10,6 @@ */ #include <linux/sys.h> -#include <linux/config.h> #include <linux/linkage.h> #include <asm/page.h> #include <asm/ptrace.h> diff --git a/arch/frv/mm/tlb-miss.S b/arch/frv/mm/tlb-miss.S index 8729f7d7c6e..f3ac019bb18 100644 --- a/arch/frv/mm/tlb-miss.S +++ b/arch/frv/mm/tlb-miss.S @@ -10,14 +10,12 @@ */ #include <linux/sys.h> -#include <linux/config.h> #include <linux/linkage.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/highmem.h> #include <asm/spr-regs.h> - .section .text + .section .text..tlbmiss .balign 4 .globl __entry_insn_mmu_miss diff --git a/arch/frv/mm/unaligned.c b/arch/frv/mm/unaligned.c deleted file mode 100644 index 09b361443fc..00000000000 --- a/arch/frv/mm/unaligned.c +++ /dev/null @@ -1,218 +0,0 @@ -/* unaligned.c: unalignment fixup handler for CPUs on which it is supported (FR451 only) - * - * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <linux/config.h> -#include <linux/sched.h> -#include <linux/signal.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/types.h> -#include <linux/user.h> -#include <linux/string.h> -#include <linux/linkage.h> -#include <linux/init.h> - -#include <asm/setup.h> -#include <asm/system.h> -#include <asm/uaccess.h> - -#if 0 -#define kdebug(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ ) -#else -#define kdebug(fmt, ...) do {} while(0) -#endif - -#define _MA_SIGNED 0x01 -#define _MA_HALF 0x02 -#define _MA_WORD 0x04 -#define _MA_DWORD 0x08 -#define _MA_SZ_MASK 0x0e -#define _MA_LOAD 0x10 -#define _MA_STORE 0x20 -#define _MA_UPDATE 0x40 -#define _MA_IMM 0x80 - -#define _MA_LDxU _MA_LOAD | _MA_UPDATE -#define _MA_LDxI _MA_LOAD | _MA_IMM -#define _MA_STxU _MA_STORE | _MA_UPDATE -#define _MA_STxI _MA_STORE | _MA_IMM - -static const uint8_t tbl_LDGRk_reg[0x40] = { - [0x02] = _MA_LOAD | _MA_HALF | _MA_SIGNED, /* LDSH @(GRi,GRj),GRk */ - [0x03] = _MA_LOAD | _MA_HALF, /* LDUH @(GRi,GRj),GRk */ - [0x04] = _MA_LOAD | _MA_WORD, /* LD @(GRi,GRj),GRk */ - [0x05] = _MA_LOAD | _MA_DWORD, /* LDD @(GRi,GRj),GRk */ - [0x12] = _MA_LDxU | _MA_HALF | _MA_SIGNED, /* LDSHU @(GRi,GRj),GRk */ - [0x13] = _MA_LDxU | _MA_HALF, /* LDUHU @(GRi,GRj),GRk */ - [0x14] = _MA_LDxU | _MA_WORD, /* LDU @(GRi,GRj),GRk */ - [0x15] = _MA_LDxU | _MA_DWORD, /* LDDU @(GRi,GRj),GRk */ -}; - -static const uint8_t tbl_STGRk_reg[0x40] = { - [0x01] = _MA_STORE | _MA_HALF, /* STH @(GRi,GRj),GRk */ - [0x02] = _MA_STORE | _MA_WORD, /* ST @(GRi,GRj),GRk */ - [0x03] = _MA_STORE | _MA_DWORD, /* STD @(GRi,GRj),GRk */ - [0x11] = _MA_STxU | _MA_HALF, /* STHU @(GRi,GRj),GRk */ - [0x12] = _MA_STxU | _MA_WORD, /* STU @(GRi,GRj),GRk */ - [0x13] = _MA_STxU | _MA_DWORD, /* STDU @(GRi,GRj),GRk */ -}; - -static const uint8_t tbl_LDSTGRk_imm[0x80] = { - [0x31] = _MA_LDxI | _MA_HALF | _MA_SIGNED, /* LDSHI @(GRi,d12),GRk */ - [0x32] = _MA_LDxI | _MA_WORD, /* LDI @(GRi,d12),GRk */ - [0x33] = _MA_LDxI | _MA_DWORD, /* LDDI @(GRi,d12),GRk */ - [0x36] = _MA_LDxI | _MA_HALF, /* LDUHI @(GRi,d12),GRk */ - [0x51] = _MA_STxI | _MA_HALF, /* STHI @(GRi,d12),GRk */ - [0x52] = _MA_STxI | _MA_WORD, /* STI @(GRi,d12),GRk */ - [0x53] = _MA_STxI | _MA_DWORD, /* STDI @(GRi,d12),GRk */ -}; - - -/*****************************************************************************/ -/* - * see if we can handle the exception by fixing up a misaligned memory access - */ -int handle_misalignment(unsigned long esr0, unsigned long ear0, unsigned long epcr0) -{ - unsigned long insn, addr, *greg; - int GRi, GRj, GRk, D12, op; - - union { - uint64_t _64; - uint32_t _32[2]; - uint16_t _16; - uint8_t _8[8]; - } x; - - if (!(esr0 & ESR0_EAV) || !(epcr0 & EPCR0_V) || !(ear0 & 7)) - return -EAGAIN; - - epcr0 &= EPCR0_PC; - - if (__frame->pc != epcr0) { - kdebug("MISALIGN: Execution not halted on excepting instruction\n"); - BUG(); - } - - if (__get_user(insn, (unsigned long *) epcr0) < 0) - return -EFAULT; - - /* determine the instruction type first */ - switch ((insn >> 18) & 0x7f) { - case 0x2: - /* LDx @(GRi,GRj),GRk */ - op = tbl_LDGRk_reg[(insn >> 6) & 0x3f]; - break; - - case 0x3: - /* STx GRk,@(GRi,GRj) */ - op = tbl_STGRk_reg[(insn >> 6) & 0x3f]; - break; - - default: - op = tbl_LDSTGRk_imm[(insn >> 18) & 0x7f]; - break; - } - - if (!op) - return -EAGAIN; - - kdebug("MISALIGN: pc=%08lx insn=%08lx ad=%08lx op=%02x\n", epcr0, insn, ear0, op); - - memset(&x, 0xba, 8); - - /* validate the instruction parameters */ - greg = (unsigned long *) &__frame->tbr; - - GRi = (insn >> 12) & 0x3f; - GRk = (insn >> 25) & 0x3f; - - if (GRi > 31 || GRk > 31) - return -ENOENT; - - if (op & _MA_DWORD && GRk & 1) - return -EINVAL; - - if (op & _MA_IMM) { - D12 = insn & 0xfff; - asm ("slli %0,#20,%0 ! srai %0,#20,%0" : "=r"(D12) : "0"(D12)); /* sign extend */ - addr = (GRi ? greg[GRi] : 0) + D12; - } - else { - GRj = (insn >> 0) & 0x3f; - if (GRj > 31) - return -ENOENT; - addr = (GRi ? greg[GRi] : 0) + (GRj ? greg[GRj] : 0); - } - - if (addr != ear0) { - kdebug("MISALIGN: Calculated addr (%08lx) does not match EAR0 (%08lx)\n", - addr, ear0); - return -EFAULT; - } - - /* check the address is okay */ - if (user_mode(__frame) && ___range_ok(ear0, 8) < 0) - return -EFAULT; - - /* perform the memory op */ - if (op & _MA_STORE) { - /* perform a store */ - x._32[0] = 0; - if (GRk != 0) { - if (op & _MA_HALF) { - x._16 = greg[GRk]; - } - else { - x._32[0] = greg[GRk]; - } - } - if (op & _MA_DWORD) - x._32[1] = greg[GRk + 1]; - - kdebug("MISALIGN: Store GR%d { %08x:%08x } -> %08lx (%dB)\n", - GRk, x._32[1], x._32[0], addr, op & _MA_SZ_MASK); - - if (__memcpy_user((void *) addr, &x, op & _MA_SZ_MASK) != 0) - return -EFAULT; - } - else { - /* perform a load */ - if (__memcpy_user(&x, (void *) addr, op & _MA_SZ_MASK) != 0) - return -EFAULT; - - if (op & _MA_HALF) { - if (op & _MA_SIGNED) - asm ("slli %0,#16,%0 ! srai %0,#16,%0" - : "=r"(x._32[0]) : "0"(x._16)); - else - asm ("sethi #0,%0" - : "=r"(x._32[0]) : "0"(x._16)); - } - - kdebug("MISALIGN: Load %08lx (%dB) -> GR%d, { %08x:%08x }\n", - addr, op & _MA_SZ_MASK, GRk, x._32[1], x._32[0]); - - if (GRk != 0) - greg[GRk] = x._32[0]; - if (op & _MA_DWORD) - greg[GRk + 1] = x._32[1]; - } - - /* update the base pointer if required */ - if (op & _MA_UPDATE) - greg[GRi] = addr; - - /* well... we've done that insn */ - __frame->pc = __frame->pc + 4; - - return 0; -} /* end handle_misalignment() */ |
