diff options
Diffstat (limited to 'arch/frv/mm')
| -rw-r--r-- | arch/frv/mm/Makefile | 2 | ||||
| -rw-r--r-- | arch/frv/mm/cache-page.c | 8 | ||||
| -rw-r--r-- | arch/frv/mm/dma-alloc.c | 3 | ||||
| -rw-r--r-- | arch/frv/mm/elf-fdpic.c | 58 | ||||
| -rw-r--r-- | arch/frv/mm/fault.c | 42 | ||||
| -rw-r--r-- | arch/frv/mm/highmem.c | 50 | ||||
| -rw-r--r-- | arch/frv/mm/init.c | 128 | ||||
| -rw-r--r-- | arch/frv/mm/kmap.c | 1 | ||||
| -rw-r--r-- | arch/frv/mm/mmu-context.c | 2 | ||||
| -rw-r--r-- | arch/frv/mm/pgalloc.c | 48 | ||||
| -rw-r--r-- | arch/frv/mm/tlb-miss.S | 3 | ||||
| -rw-r--r-- | arch/frv/mm/unaligned.c | 217 |
12 files changed, 150 insertions, 412 deletions
diff --git a/arch/frv/mm/Makefile b/arch/frv/mm/Makefile index fb8b1d860f4..1bca5ab8a6a 100644 --- a/arch/frv/mm/Makefile +++ b/arch/frv/mm/Makefile @@ -6,4 +6,4 @@ obj-y := init.o kmap.o obj-$(CONFIG_MMU) += \ pgalloc.o highmem.o fault.o extable.o cache-page.o tlb-flush.o tlb-miss.o \ - mmu-context.o dma-alloc.o unaligned.o elf-fdpic.o + mmu-context.o dma-alloc.o elf-fdpic.o diff --git a/arch/frv/mm/cache-page.c b/arch/frv/mm/cache-page.c index 0261cbe153b..8e09dae0ec3 100644 --- a/arch/frv/mm/cache-page.c +++ b/arch/frv/mm/cache-page.c @@ -26,11 +26,11 @@ void flush_dcache_page(struct page *page) dampr2 = __get_DAMPR(2); - vaddr = kmap_atomic(page, __KM_CACHE); + vaddr = kmap_atomic_primary(page); frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE); - kunmap_atomic(vaddr, __KM_CACHE); + kunmap_atomic_primary(vaddr); if (dampr2) { __set_DAMPR(2, dampr2); @@ -54,12 +54,12 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, dampr2 = __get_DAMPR(2); - vaddr = kmap_atomic(page, __KM_CACHE); + vaddr = kmap_atomic_primary(page); start = (start & ~PAGE_MASK) | (unsigned long) vaddr; frv_cache_wback_inv(start, start + len); - kunmap_atomic(vaddr, __KM_CACHE); + kunmap_atomic_primary(vaddr); if (dampr2) { __set_DAMPR(2, dampr2); diff --git a/arch/frv/mm/dma-alloc.c b/arch/frv/mm/dma-alloc.c index dc6522c464d..7a73aaeae3a 100644 --- a/arch/frv/mm/dma-alloc.c +++ b/arch/frv/mm/dma-alloc.c @@ -36,10 +36,11 @@ #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/pci.h> +#include <linux/hardirq.h> +#include <linux/gfp.h> #include <asm/pgalloc.h> #include <asm/io.h> -#include <asm/hardirq.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/mmu.h> diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c index f5a653033fe..836f14707a6 100644 --- a/arch/frv/mm/elf-fdpic.c +++ b/arch/frv/mm/elf-fdpic.c @@ -13,6 +13,7 @@ #include <linux/mm.h> #include <linux/fs.h> #include <linux/elf-fdpic.h> +#include <asm/mman.h> /*****************************************************************************/ /* @@ -59,11 +60,15 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi unsigned long pgoff, unsigned long flags) { struct vm_area_struct *vma; - unsigned long limit; + struct vm_unmapped_area_info info; if (len > TASK_SIZE) return -ENOMEM; + /* handle MAP_FIXED */ + if (flags & MAP_FIXED) + return addr; + /* only honour a hint if we're not going to clobber something doing so */ if (addr) { addr = PAGE_ALIGN(addr); @@ -74,50 +79,35 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi } /* search between the bottom of user VM and the stack grow area */ - addr = PAGE_SIZE; - limit = (current->mm->start_stack - 0x00200000); - if (addr + len <= limit) { - limit -= len; - - if (addr <= limit) { - vma = find_vma(current->mm, PAGE_SIZE); - for (; vma; vma = vma->vm_next) { - if (addr > limit) - break; - if (addr + len <= vma->vm_start) - goto success; - addr = vma->vm_end; - } - } - } + info.flags = 0; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = (current->mm->start_stack - 0x00200000); + info.align_mask = 0; + info.align_offset = 0; + addr = vm_unmapped_area(&info); + if (!(addr & ~PAGE_MASK)) + goto success; + VM_BUG_ON(addr != -ENOMEM); /* search from just above the WorkRAM area to the top of memory */ - addr = PAGE_ALIGN(0x80000000); - limit = TASK_SIZE - len; - if (addr <= limit) { - vma = find_vma(current->mm, addr); - for (; vma; vma = vma->vm_next) { - if (addr > limit) - break; - if (addr + len <= vma->vm_start) - goto success; - addr = vma->vm_end; - } - - if (!vma && addr <= limit) - goto success; - } + info.low_limit = PAGE_ALIGN(0x80000000); + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + if (!(addr & ~PAGE_MASK)) + goto success; + VM_BUG_ON(addr != -ENOMEM); #if 0 printk("[area] l=%lx (ENOMEM) f='%s'\n", - len, filp ? filp->f_dentry->d_name.name : ""); + len, filp ? filp->f_path.dentry->d_name.name : ""); #endif return -ENOMEM; success: #if 0 printk("[area] l=%lx ad=%lx f='%s'\n", - len, addr, filp ? filp->f_dentry->d_name.name : ""); + len, addr, filp ? filp->f_path.dentry->d_name.name : ""); #endif return addr; } /* end arch_get_unmapped_area() */ diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c index 8b3eb50c510..9a66372fc7c 100644 --- a/arch/frv/mm/fault.c +++ b/arch/frv/mm/fault.c @@ -20,7 +20,6 @@ #include <linux/ptrace.h> #include <linux/hardirq.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/uaccess.h> #include <asm/gdb-stub.h> @@ -35,11 +34,12 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear struct vm_area_struct *vma; struct mm_struct *mm; unsigned long _pme, lrai, lrad, fixup; + unsigned long flags = 0; siginfo_t info; pgd_t *pge; pud_t *pue; pte_t *pte; - int write; + int fault; #if 0 const char *atxc[16] = { @@ -78,9 +78,12 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_interrupt() || !mm) + if (in_atomic() || !mm) goto no_context; + if (user_mode(__frame)) + flags |= FAULT_FLAG_USER; + down_read(&mm->mmap_sem); vma = find_vma(mm, ear0); @@ -129,7 +132,6 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear */ good_area: info.si_code = SEGV_ACCERR; - write = 0; switch (esr0 & ESR0_ATXC) { default: /* handle write to write protected page */ @@ -140,7 +142,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear #endif if (!(vma->vm_flags & VM_WRITE)) goto bad_area; - write = 1; + flags |= FAULT_FLAG_WRITE; break; /* handle read from protected page */ @@ -162,18 +164,18 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear * make sure we exit gracefully rather than endlessly redo * the fault. */ - switch (handle_mm_fault(mm, vma, ear0, write)) { - case VM_FAULT_MINOR: - current->min_flt++; - break; - case VM_FAULT_MAJOR: - current->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto do_sigbus; - default: - goto out_of_memory; + fault = handle_mm_fault(mm, vma, ear0, flags); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); } + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; up_read(&mm->mmap_sem); return; @@ -256,10 +258,10 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear */ out_of_memory: up_read(&mm->mmap_sem); - printk("VM: killing process %s\n", current->comm); - if (user_mode(__frame)) - do_exit(SIGKILL); - goto no_context; + if (!user_mode(__frame)) + goto no_context; + pagefault_out_of_memory(); + return; do_sigbus: up_read(&mm->mmap_sem); diff --git a/arch/frv/mm/highmem.c b/arch/frv/mm/highmem.c index 7f77db7fabc..bed9a9bd3c1 100644 --- a/arch/frv/mm/highmem.c +++ b/arch/frv/mm/highmem.c @@ -37,5 +37,53 @@ struct page *kmap_atomic_to_page(void *ptr) return virt_to_page(ptr); } +void *kmap_atomic(struct page *page) +{ + unsigned long paddr; + int type; + + pagefault_disable(); + type = kmap_atomic_idx_push(); + paddr = page_to_phys(page); + + switch (type) { + /* + * The first 4 primary maps are reserved for architecture code + */ + case 0: return __kmap_atomic_primary(0, paddr, 6); + case 1: return __kmap_atomic_primary(0, paddr, 7); + case 2: return __kmap_atomic_primary(0, paddr, 8); + case 3: return __kmap_atomic_primary(0, paddr, 9); + case 4: return __kmap_atomic_primary(0, paddr, 10); + + case 5 ... 5 + NR_TLB_LINES - 1: + return __kmap_atomic_secondary(type - 5, paddr); + + default: + BUG(); + return NULL; + } +} +EXPORT_SYMBOL(kmap_atomic); + +void __kunmap_atomic(void *kvaddr) +{ + int type = kmap_atomic_idx(); + switch (type) { + case 0: __kunmap_atomic_primary(0, 6); break; + case 1: __kunmap_atomic_primary(0, 7); break; + case 2: __kunmap_atomic_primary(0, 8); break; + case 3: __kunmap_atomic_primary(0, 9); break; + case 4: __kunmap_atomic_primary(0, 10); break; -EXPORT_SYMBOL(kmap_atomic_to_page); + case 5 ... 5 + NR_TLB_LINES - 1: + __kunmap_atomic_secondary(type - 5, kvaddr); + break; + + default: + BUG(); + } + kmap_atomic_idx_pop(); + pagefault_enable(); +} +EXPORT_SYMBOL(__kunmap_atomic); diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c index 3f3a0ed3539..88a15974352 100644 --- a/arch/frv/mm/init.c +++ b/arch/frv/mm/init.c @@ -19,6 +19,7 @@ #include <linux/signal.h> #include <linux/sched.h> #include <linux/pagemap.h> +#include <linux/gfp.h> #include <linux/swap.h> #include <linux/mm.h> #include <linux/kernel.h> @@ -26,12 +27,12 @@ #include <linux/types.h> #include <linux/bootmem.h> #include <linux/highmem.h> +#include <linux/module.h> #include <asm/setup.h> #include <asm/segment.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/mmu_context.h> #include <asm/virtconvert.h> #include <asm/sections.h> @@ -39,8 +40,6 @@ #undef DEBUG -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); - /* * BAD_PAGE is the page that is used for page faults when linux * is out-of-memory. Older versions of linux just did a @@ -56,38 +55,9 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); */ static unsigned long empty_bad_page_table; static unsigned long empty_bad_page; -unsigned long empty_zero_page; - -/*****************************************************************************/ -/* - * - */ -void show_mem(void) -{ - unsigned long i; - int free = 0, total = 0, reserved = 0, shared = 0; - - printk("\nMem-info:\n"); - show_free_areas(); - i = max_mapnr; - while (i-- > 0) { - struct page *page = &mem_map[i]; - - total++; - if (PageReserved(page)) - reserved++; - else if (!page_count(page)) - free++; - else - shared += page_count(page) - 1; - } - printk("%d pages of RAM\n",total); - printk("%d free pages\n",free); - printk("%d reserved pages\n",reserved); - printk("%d pages shared\n",shared); - -} /* end show_mem() */ +unsigned long empty_zero_page; +EXPORT_SYMBOL(empty_zero_page); /*****************************************************************************/ /* @@ -108,15 +78,13 @@ void __init paging_init(void) memset((void *) empty_zero_page, 0, PAGE_SIZE); #ifdef CONFIG_HIGHMEM - if (num_physpages - num_mappedpages) { + if (get_num_physpages() - num_mappedpages) { pgd_t *pge; pud_t *pue; pmd_t *pme; pkmap_page_table = alloc_bootmem_pages(PAGE_SIZE); - memset(pkmap_page_table, 0, PAGE_SIZE); - pge = swapper_pg_dir + pgd_index_k(PKMAP_BASE); pue = pud_offset(pge, PKMAP_BASE); pme = pmd_offset(pue, PKMAP_BASE); @@ -126,10 +94,9 @@ void __init paging_init(void) /* distribute the allocatable pages across the various zones and pass them to the allocator */ - zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn; - zones_size[ZONE_NORMAL] = 0; + zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn; #ifdef CONFIG_HIGHMEM - zones_size[ZONE_HIGHMEM] = num_physpages - num_mappedpages; + zones_size[ZONE_HIGHMEM] = get_num_physpages() - num_mappedpages; #endif free_area_init(zones_size); @@ -147,75 +114,34 @@ void __init paging_init(void) */ void __init mem_init(void) { - unsigned long npages = (memory_end - memory_start) >> PAGE_SHIFT; - unsigned long tmp; -#ifdef CONFIG_MMU - unsigned long loop, pfn; - int datapages = 0; -#endif - int codek = 0, datak = 0; + unsigned long code_size = _etext - _stext; - /* this will put all memory onto the freelists */ - totalram_pages = free_all_bootmem(); + /* this will put all low memory onto the freelists */ + free_all_bootmem(); +#if defined(CONFIG_MMU) && defined(CONFIG_HIGHMEM) + { + unsigned long pfn; -#ifdef CONFIG_MMU - for (loop = 0 ; loop < npages ; loop++) - if (PageReserved(&mem_map[loop])) - datapages++; - -#ifdef CONFIG_HIGHMEM - for (pfn = num_physpages - 1; pfn >= num_mappedpages; pfn--) { - struct page *page = &mem_map[pfn]; - - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - totalram_pages++; + for (pfn = get_num_physpages() - 1; + pfn >= num_mappedpages; pfn--) + free_highmem_page(&mem_map[pfn]); } #endif - codek = ((unsigned long) &_etext - (unsigned long) &_stext) >> 10; - datak = datapages << (PAGE_SHIFT - 10); - -#else - codek = (_etext - _stext) >> 10; - datak = 0; //(_ebss - _sdata) >> 10; -#endif - - tmp = nr_free_pages() << PAGE_SHIFT; - printk("Memory available: %luKiB/%luKiB RAM, %luKiB/%luKiB ROM (%dKiB kernel code, %dKiB data)\n", - tmp >> 10, - npages << (PAGE_SHIFT - 10), - (rom_length > 0) ? ((rom_length >> 10) - codek) : 0, - rom_length >> 10, - codek, - datak - ); - + mem_init_print_info(NULL); + if (rom_length > 0 && rom_length >= code_size) + printk("Memory available: %luKiB/%luKiB ROM\n", + (rom_length - code_size) >> 10, rom_length >> 10); } /* end mem_init() */ /*****************************************************************************/ /* * free the memory that was only required for initialisation */ -void __init free_initmem(void) +void free_initmem(void) { #if defined(CONFIG_RAMKERNEL) && !defined(CONFIG_PROTECT_KERNEL) - unsigned long start, end, addr; - - start = PAGE_ALIGN((unsigned long) &__init_begin); /* round up */ - end = ((unsigned long) &__init_end) & PAGE_MASK; /* round down */ - - /* next to check that the page we free is not a partial page */ - for (addr = start; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - free_page(addr); - totalram_pages++; - } - - printk("Freeing unused kernel memory: %ldKiB freed (0x%lx - 0x%lx)\n", - (end - start) >> 10, start, end); + free_initmem_default(-1); #endif } /* end free_initmem() */ @@ -226,14 +152,6 @@ void __init free_initmem(void) #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { - int pages = 0; - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - pages++; - } - printk("Freeing initrd memory: %dKiB freed\n", (pages * PAGE_SIZE) >> 10); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } /* end free_initrd_mem() */ #endif diff --git a/arch/frv/mm/kmap.c b/arch/frv/mm/kmap.c index fb78be38ea0..e9217e605aa 100644 --- a/arch/frv/mm/kmap.c +++ b/arch/frv/mm/kmap.c @@ -21,7 +21,6 @@ #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/io.h> -#include <asm/system.h> #undef DEBUG diff --git a/arch/frv/mm/mmu-context.c b/arch/frv/mm/mmu-context.c index 1530a4111e6..81757d55a5b 100644 --- a/arch/frv/mm/mmu-context.c +++ b/arch/frv/mm/mmu-context.c @@ -181,7 +181,7 @@ int cxn_pin_by_pid(pid_t pid) /* get a handle on the mm_struct */ read_lock(&tasklist_lock); - tsk = find_task_by_pid(pid); + tsk = find_task_by_vpid(pid); if (tsk) { ret = -EINVAL; diff --git a/arch/frv/mm/pgalloc.c b/arch/frv/mm/pgalloc.c index f76dd03ddd9..41907d25ed3 100644 --- a/arch/frv/mm/pgalloc.c +++ b/arch/frv/mm/pgalloc.c @@ -10,15 +10,15 @@ */ #include <linux/sched.h> -#include <linux/slab.h> +#include <linux/gfp.h> #include <linux/mm.h> #include <linux/highmem.h> +#include <linux/quicklist.h> #include <asm/pgalloc.h> #include <asm/page.h> #include <asm/cacheflush.h> pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE))); -kmem_cache_t *pgd_cache; pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { @@ -28,7 +28,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) return pte; } -struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) +pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) { struct page *page; @@ -37,8 +37,14 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) #else page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); #endif - if (page) - clear_highpage(page); + if (!page) + return NULL; + + clear_highpage(page); + if (!pgtable_page_ctor(page)) { + __free_page(page); + return NULL; + } flush_dcache_page(page); return page; } @@ -75,7 +81,7 @@ void __set_pmd(pmd_t *pmdptr, unsigned long pmd) * checks at dup_mmap(), exec(), and other mmlist addition points * could be used. The locking scheme was chosen on the basis of * manfred's recommendations and having no core impact whatsoever. - * -- wli + * -- nyc */ DEFINE_SPINLOCK(pgd_lock); struct page *pgd_list; @@ -100,7 +106,7 @@ static inline void pgd_list_del(pgd_t *pgd) set_page_private(next, (unsigned long) pprev); } -void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) +void pgd_ctor(void *pgd) { unsigned long flags; @@ -120,7 +126,7 @@ void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) } /* never called when PTRS_PER_PMD > 1 */ -void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused) +void pgd_dtor(void *pgd) { unsigned long flags; /* can be called from interrupt context */ @@ -131,29 +137,21 @@ void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused) pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *pgd; - - pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL); - if (!pgd) - return pgd; - - return pgd; + return quicklist_alloc(0, GFP_KERNEL, pgd_ctor); } -void pgd_free(pgd_t *pgd) +void pgd_free(struct mm_struct *mm, pgd_t *pgd) { /* in the non-PAE case, clear_page_tables() clears user pgd entries */ - kmem_cache_free(pgd_cache, pgd); + quicklist_free(0, pgd_dtor, pgd); } void __init pgtable_cache_init(void) { - pgd_cache = kmem_cache_create("pgd", - PTRS_PER_PGD * sizeof(pgd_t), - PTRS_PER_PGD * sizeof(pgd_t), - 0, - pgd_ctor, - pgd_dtor); - if (!pgd_cache) - panic("pgtable_cache_init(): Cannot create pgd cache"); } + +void check_pgt_cache(void) +{ + quicklist_trim(0, pgd_dtor, 25, 16); +} + diff --git a/arch/frv/mm/tlb-miss.S b/arch/frv/mm/tlb-miss.S index 04da6746837..f3ac019bb18 100644 --- a/arch/frv/mm/tlb-miss.S +++ b/arch/frv/mm/tlb-miss.S @@ -13,10 +13,9 @@ #include <linux/linkage.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/highmem.h> #include <asm/spr-regs.h> - .section .text + .section .text..tlbmiss .balign 4 .globl __entry_insn_mmu_miss diff --git a/arch/frv/mm/unaligned.c b/arch/frv/mm/unaligned.c deleted file mode 100644 index 8f0375fc15a..00000000000 --- a/arch/frv/mm/unaligned.c +++ /dev/null @@ -1,217 +0,0 @@ -/* unaligned.c: unalignment fixup handler for CPUs on which it is supported (FR451 only) - * - * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <linux/sched.h> -#include <linux/signal.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/types.h> -#include <linux/user.h> -#include <linux/string.h> -#include <linux/linkage.h> -#include <linux/init.h> - -#include <asm/setup.h> -#include <asm/system.h> -#include <asm/uaccess.h> - -#if 0 -#define kdebug(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ ) -#else -#define kdebug(fmt, ...) do {} while(0) -#endif - -#define _MA_SIGNED 0x01 -#define _MA_HALF 0x02 -#define _MA_WORD 0x04 -#define _MA_DWORD 0x08 -#define _MA_SZ_MASK 0x0e -#define _MA_LOAD 0x10 -#define _MA_STORE 0x20 -#define _MA_UPDATE 0x40 -#define _MA_IMM 0x80 - -#define _MA_LDxU _MA_LOAD | _MA_UPDATE -#define _MA_LDxI _MA_LOAD | _MA_IMM -#define _MA_STxU _MA_STORE | _MA_UPDATE -#define _MA_STxI _MA_STORE | _MA_IMM - -static const uint8_t tbl_LDGRk_reg[0x40] = { - [0x02] = _MA_LOAD | _MA_HALF | _MA_SIGNED, /* LDSH @(GRi,GRj),GRk */ - [0x03] = _MA_LOAD | _MA_HALF, /* LDUH @(GRi,GRj),GRk */ - [0x04] = _MA_LOAD | _MA_WORD, /* LD @(GRi,GRj),GRk */ - [0x05] = _MA_LOAD | _MA_DWORD, /* LDD @(GRi,GRj),GRk */ - [0x12] = _MA_LDxU | _MA_HALF | _MA_SIGNED, /* LDSHU @(GRi,GRj),GRk */ - [0x13] = _MA_LDxU | _MA_HALF, /* LDUHU @(GRi,GRj),GRk */ - [0x14] = _MA_LDxU | _MA_WORD, /* LDU @(GRi,GRj),GRk */ - [0x15] = _MA_LDxU | _MA_DWORD, /* LDDU @(GRi,GRj),GRk */ -}; - -static const uint8_t tbl_STGRk_reg[0x40] = { - [0x01] = _MA_STORE | _MA_HALF, /* STH @(GRi,GRj),GRk */ - [0x02] = _MA_STORE | _MA_WORD, /* ST @(GRi,GRj),GRk */ - [0x03] = _MA_STORE | _MA_DWORD, /* STD @(GRi,GRj),GRk */ - [0x11] = _MA_STxU | _MA_HALF, /* STHU @(GRi,GRj),GRk */ - [0x12] = _MA_STxU | _MA_WORD, /* STU @(GRi,GRj),GRk */ - [0x13] = _MA_STxU | _MA_DWORD, /* STDU @(GRi,GRj),GRk */ -}; - -static const uint8_t tbl_LDSTGRk_imm[0x80] = { - [0x31] = _MA_LDxI | _MA_HALF | _MA_SIGNED, /* LDSHI @(GRi,d12),GRk */ - [0x32] = _MA_LDxI | _MA_WORD, /* LDI @(GRi,d12),GRk */ - [0x33] = _MA_LDxI | _MA_DWORD, /* LDDI @(GRi,d12),GRk */ - [0x36] = _MA_LDxI | _MA_HALF, /* LDUHI @(GRi,d12),GRk */ - [0x51] = _MA_STxI | _MA_HALF, /* STHI @(GRi,d12),GRk */ - [0x52] = _MA_STxI | _MA_WORD, /* STI @(GRi,d12),GRk */ - [0x53] = _MA_STxI | _MA_DWORD, /* STDI @(GRi,d12),GRk */ -}; - - -/*****************************************************************************/ -/* - * see if we can handle the exception by fixing up a misaligned memory access - */ -int handle_misalignment(unsigned long esr0, unsigned long ear0, unsigned long epcr0) -{ - unsigned long insn, addr, *greg; - int GRi, GRj, GRk, D12, op; - - union { - uint64_t _64; - uint32_t _32[2]; - uint16_t _16; - uint8_t _8[8]; - } x; - - if (!(esr0 & ESR0_EAV) || !(epcr0 & EPCR0_V) || !(ear0 & 7)) - return -EAGAIN; - - epcr0 &= EPCR0_PC; - - if (__frame->pc != epcr0) { - kdebug("MISALIGN: Execution not halted on excepting instruction\n"); - BUG(); - } - - if (__get_user(insn, (unsigned long *) epcr0) < 0) - return -EFAULT; - - /* determine the instruction type first */ - switch ((insn >> 18) & 0x7f) { - case 0x2: - /* LDx @(GRi,GRj),GRk */ - op = tbl_LDGRk_reg[(insn >> 6) & 0x3f]; - break; - - case 0x3: - /* STx GRk,@(GRi,GRj) */ - op = tbl_STGRk_reg[(insn >> 6) & 0x3f]; - break; - - default: - op = tbl_LDSTGRk_imm[(insn >> 18) & 0x7f]; - break; - } - - if (!op) - return -EAGAIN; - - kdebug("MISALIGN: pc=%08lx insn=%08lx ad=%08lx op=%02x\n", epcr0, insn, ear0, op); - - memset(&x, 0xba, 8); - - /* validate the instruction parameters */ - greg = (unsigned long *) &__frame->tbr; - - GRi = (insn >> 12) & 0x3f; - GRk = (insn >> 25) & 0x3f; - - if (GRi > 31 || GRk > 31) - return -ENOENT; - - if (op & _MA_DWORD && GRk & 1) - return -EINVAL; - - if (op & _MA_IMM) { - D12 = insn & 0xfff; - asm ("slli %0,#20,%0 ! srai %0,#20,%0" : "=r"(D12) : "0"(D12)); /* sign extend */ - addr = (GRi ? greg[GRi] : 0) + D12; - } - else { - GRj = (insn >> 0) & 0x3f; - if (GRj > 31) - return -ENOENT; - addr = (GRi ? greg[GRi] : 0) + (GRj ? greg[GRj] : 0); - } - - if (addr != ear0) { - kdebug("MISALIGN: Calculated addr (%08lx) does not match EAR0 (%08lx)\n", - addr, ear0); - return -EFAULT; - } - - /* check the address is okay */ - if (user_mode(__frame) && ___range_ok(ear0, 8) < 0) - return -EFAULT; - - /* perform the memory op */ - if (op & _MA_STORE) { - /* perform a store */ - x._32[0] = 0; - if (GRk != 0) { - if (op & _MA_HALF) { - x._16 = greg[GRk]; - } - else { - x._32[0] = greg[GRk]; - } - } - if (op & _MA_DWORD) - x._32[1] = greg[GRk + 1]; - - kdebug("MISALIGN: Store GR%d { %08x:%08x } -> %08lx (%dB)\n", - GRk, x._32[1], x._32[0], addr, op & _MA_SZ_MASK); - - if (__memcpy_user((void *) addr, &x, op & _MA_SZ_MASK) != 0) - return -EFAULT; - } - else { - /* perform a load */ - if (__memcpy_user(&x, (void *) addr, op & _MA_SZ_MASK) != 0) - return -EFAULT; - - if (op & _MA_HALF) { - if (op & _MA_SIGNED) - asm ("slli %0,#16,%0 ! srai %0,#16,%0" - : "=r"(x._32[0]) : "0"(x._16)); - else - asm ("sethi #0,%0" - : "=r"(x._32[0]) : "0"(x._16)); - } - - kdebug("MISALIGN: Load %08lx (%dB) -> GR%d, { %08x:%08x }\n", - addr, op & _MA_SZ_MASK, GRk, x._32[1], x._32[0]); - - if (GRk != 0) - greg[GRk] = x._32[0]; - if (op & _MA_DWORD) - greg[GRk + 1] = x._32[1]; - } - - /* update the base pointer if required */ - if (op & _MA_UPDATE) - greg[GRi] = addr; - - /* well... we've done that insn */ - __frame->pc = __frame->pc + 4; - - return 0; -} /* end handle_misalignment() */ |
