diff options
Diffstat (limited to 'arch/unicore32/mm')
| -rw-r--r-- | arch/unicore32/mm/alignment.c | 3 | ||||
| -rw-r--r-- | arch/unicore32/mm/dma-swiotlb.c | 18 | ||||
| -rw-r--r-- | arch/unicore32/mm/fault.c | 56 | ||||
| -rw-r--r-- | arch/unicore32/mm/flush.c | 1 | ||||
| -rw-r--r-- | arch/unicore32/mm/init.c | 79 | ||||
| -rw-r--r-- | arch/unicore32/mm/ioremap.c | 21 | ||||
| -rw-r--r-- | arch/unicore32/mm/mm.h | 5 | ||||
| -rw-r--r-- | arch/unicore32/mm/mmu.c | 2 | ||||
| -rw-r--r-- | arch/unicore32/mm/proc-syms.c | 2 |
9 files changed, 78 insertions, 109 deletions
diff --git a/arch/unicore32/mm/alignment.c b/arch/unicore32/mm/alignment.c index 28f576d733e..24e836023e6 100644 --- a/arch/unicore32/mm/alignment.c +++ b/arch/unicore32/mm/alignment.c @@ -21,9 +21,12 @@ #include <linux/sched.h> #include <linux/uaccess.h> +#include <asm/pgtable.h> #include <asm/tlbflush.h> #include <asm/unaligned.h> +#include "mm.h" + #define CODING_BITS(i) (i & 0xe0000120) #define LDST_P_BIT(i) (i & (1 << 28)) /* Preindex */ diff --git a/arch/unicore32/mm/dma-swiotlb.c b/arch/unicore32/mm/dma-swiotlb.c index bfa9fbb2bbb..16c08b2143a 100644 --- a/arch/unicore32/mm/dma-swiotlb.c +++ b/arch/unicore32/mm/dma-swiotlb.c @@ -17,9 +17,23 @@ #include <asm/dma.h> +static void *unicore_swiotlb_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flags, + struct dma_attrs *attrs) +{ + return swiotlb_alloc_coherent(dev, size, dma_handle, flags); +} + +static void unicore_swiotlb_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_addr, + struct dma_attrs *attrs) +{ + swiotlb_free_coherent(dev, size, vaddr, dma_addr); +} + struct dma_map_ops swiotlb_dma_map_ops = { - .alloc_coherent = swiotlb_alloc_coherent, - .free_coherent = swiotlb_free_coherent, + .alloc = unicore_swiotlb_alloc_coherent, + .free = unicore_swiotlb_free_coherent, .map_sg = swiotlb_map_sg_attrs, .unmap_sg = swiotlb_unmap_sg_attrs, .dma_supported = swiotlb_dma_supported, diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c index 283aa4b50b7..0dc922dba91 100644 --- a/arch/unicore32/mm/fault.c +++ b/arch/unicore32/mm/fault.c @@ -20,7 +20,6 @@ #include <linux/sched.h> #include <linux/io.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> @@ -169,7 +168,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) } static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr, - struct task_struct *tsk) + unsigned int flags, struct task_struct *tsk) { struct vm_area_struct *vma; int fault; @@ -195,14 +194,7 @@ good_area: * If for any reason at all we couldn't handle the fault, make * sure we exit gracefully rather than endlessly redo the fault. */ - fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, - (!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0); - if (unlikely(fault & VM_FAULT_ERROR)) - return fault; - if (fault & VM_FAULT_MAJOR) - tsk->maj_flt++; - else - tsk->min_flt++; + fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); return fault; check_stack: @@ -217,6 +209,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) struct task_struct *tsk; struct mm_struct *mm; int fault, sig, code; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; tsk = current; mm = tsk->mm; @@ -228,6 +221,11 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (in_atomic() || !mm) goto no_context; + if (user_mode(regs)) + flags |= FAULT_FLAG_USER; + if (!(fsr ^ 0x12)) + flags |= FAULT_FLAG_WRITE; + /* * As per x86, we may deadlock here. However, since the kernel only * validly references user space from well defined areas of the code, @@ -237,6 +235,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (!user_mode(regs) && !search_exception_tables(regs->UCreg_pc)) goto no_context; +retry: down_read(&mm->mmap_sem); } else { /* @@ -252,7 +251,28 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) #endif } - fault = __do_pf(mm, addr, fsr, tsk); + fault = __do_pf(mm, addr, fsr, flags, tsk); + + /* If we need to retry but a fatal signal is pending, handle the + * signal first. We do not need to release the mmap_sem because + * it would already be released in __lock_page_or_retry in + * mm/filemap.c. */ + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + return 0; + + if (!(fault & VM_FAULT_ERROR) && (flags & FAULT_FLAG_ALLOW_RETRY)) { + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; + if (fault & VM_FAULT_RETRY) { + /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk + * of starvation. */ + flags &= ~FAULT_FLAG_ALLOW_RETRY; + goto retry; + } + } + up_read(&mm->mmap_sem); /* @@ -262,6 +282,13 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS)))) return 0; + /* + * If we are in kernel mode at this point, we + * have no context to handle this fault with. + */ + if (!user_mode(regs)) + goto no_context; + if (fault & VM_FAULT_OOM) { /* * We ran out of memory, call the OOM killer, and return to @@ -272,13 +299,6 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) return 0; } - /* - * If we are in kernel mode at this point, we - * have no context to handle this fault with. - */ - if (!user_mode(regs)) - goto no_context; - if (fault & VM_FAULT_SIGBUS) { /* * We had some memory, but were unable to diff --git a/arch/unicore32/mm/flush.c b/arch/unicore32/mm/flush.c index 93478cc8b26..6d4c096ffa2 100644 --- a/arch/unicore32/mm/flush.c +++ b/arch/unicore32/mm/flush.c @@ -14,7 +14,6 @@ #include <linux/pagemap.h> #include <asm/cacheflush.h> -#include <asm/system.h> #include <asm/tlbflush.h> void flush_cache_mm(struct mm_struct *mm) diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c index de186bde897..be2bde9b07c 100644 --- a/arch/unicore32/mm/init.c +++ b/arch/unicore32/mm/init.c @@ -313,24 +313,6 @@ void __init bootmem_init(void) max_pfn = max_high - PHYS_PFN_OFFSET; } -static inline int free_area(unsigned long pfn, unsigned long end, char *s) -{ - unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); - - for (; pfn < end; pfn++) { - struct page *page = pfn_to_page(pfn); - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - pages++; - } - - if (size && s) - printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); - - return pages; -} - static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) { @@ -398,59 +380,14 @@ static void __init free_unused_memmap(struct meminfo *mi) */ void __init mem_init(void) { - unsigned long reserved_pages, free_pages; - struct memblock_region *reg; - int i; - max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; - /* this will put all unused low memory onto the freelists */ free_unused_memmap(&meminfo); - totalram_pages += free_all_bootmem(); - - reserved_pages = free_pages = 0; - - for_each_bank(i, &meminfo) { - struct membank *bank = &meminfo.bank[i]; - unsigned int pfn1, pfn2; - struct page *page, *end; - - pfn1 = bank_pfn_start(bank); - pfn2 = bank_pfn_end(bank); - - page = pfn_to_page(pfn1); - end = pfn_to_page(pfn2 - 1) + 1; - - do { - if (PageReserved(page)) - reserved_pages++; - else if (!page_count(page)) - free_pages++; - page++; - } while (page < end); - } - - /* - * Since our memory may not be contiguous, calculate the - * real number of pages we have in this system - */ - printk(KERN_INFO "Memory:"); - num_physpages = 0; - for_each_memblock(memory, reg) { - unsigned long pages = memblock_region_memory_end_pfn(reg) - - memblock_region_memory_base_pfn(reg); - num_physpages += pages; - printk(" %ldMB", pages >> (20 - PAGE_SHIFT)); - } - printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); - - printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n", - nr_free_pages() << (PAGE_SHIFT-10), - free_pages << (PAGE_SHIFT-10), - reserved_pages << (PAGE_SHIFT-10), - totalhigh_pages << (PAGE_SHIFT-10)); + /* this will put all unused low memory onto the freelists */ + free_all_bootmem(); + mem_init_print_info(NULL); printk(KERN_NOTICE "Virtual kernel memory layout:\n" " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" @@ -479,7 +416,7 @@ void __init mem_init(void) BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); BUG_ON(TASK_SIZE > MODULES_VADDR); - if (PAGE_SIZE >= 16384 && num_physpages <= 128) { + if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { /* * On a machine this small we won't get * anywhere without overcommit, so turn @@ -491,9 +428,7 @@ void __init mem_init(void) void free_initmem(void) { - totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), - __phys_to_pfn(__pa(__init_end)), - "init"); + free_initmem_default(-1); } #ifdef CONFIG_BLK_DEV_INITRD @@ -503,9 +438,7 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { if (!keep_initrd) - totalram_pages += free_area(__phys_to_pfn(__pa(start)), - __phys_to_pfn(__pa(end)), - "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } static int __init keepinitrd_setup(char *__unused) diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c index b7a605597b0..bf012b2b71a 100644 --- a/arch/unicore32/mm/ioremap.c +++ b/arch/unicore32/mm/ioremap.c @@ -144,11 +144,11 @@ void __iomem *__uc32_ioremap_pfn_caller(unsigned long pfn, * Don't allow RAM to be mapped */ if (pfn_valid(pfn)) { - printk(KERN_WARNING "BUG: Your driver calls ioremap() on\n" + WARN(1, "BUG: Your driver calls ioremap() on\n" "system memory. This leads to architecturally\n" "unpredictable behaviour, and ioremap() will fail in\n" "the next kernel release. Please fix your driver.\n"); - WARN_ON(1); + return NULL; } type = get_mem_type(mtype); @@ -235,7 +235,7 @@ EXPORT_SYMBOL(__uc32_ioremap_cached); void __uc32_iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); - struct vm_struct **p, *tmp; + struct vm_struct *vm; /* * If this is a section based mapping we need to handle it @@ -244,17 +244,10 @@ void __uc32_iounmap(volatile void __iomem *io_addr) * all the mappings before the area can be reclaimed * by someone else. */ - write_lock(&vmlist_lock); - for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { - if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { - if (tmp->flags & VM_UNICORE_SECTION_MAPPING) { - unmap_area_sections((unsigned long)tmp->addr, - tmp->size); - } - break; - } - } - write_unlock(&vmlist_lock); + vm = find_vm_area(addr); + if (vm && (vm->flags & VM_IOREMAP) && + (vm->flags & VM_UNICORE_SECTION_MAPPING)) + unmap_area_sections((unsigned long)vm->addr, vm->size); vunmap(addr); } diff --git a/arch/unicore32/mm/mm.h b/arch/unicore32/mm/mm.h index 3296bca0f1f..05c7f532eee 100644 --- a/arch/unicore32/mm/mm.h +++ b/arch/unicore32/mm/mm.h @@ -9,6 +9,8 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#include <asm/hwdef-copro.h> + /* the upper-most page table pointer */ extern pmd_t *top_pmd; extern int sysctl_overcommit_memory; @@ -34,6 +36,9 @@ struct mem_type { const struct mem_type *get_mem_type(unsigned int type); extern void __flush_dcache_page(struct address_space *, struct page *); +extern void hook_fault_code(int nr, int (*fn) + (unsigned long, unsigned int, struct pt_regs *), + int sig, int code, const char *name); void __init bootmem_init(void); void uc32_mm_memblock_reserve(void); diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c index 43c20b40e44..4f5a532bee1 100644 --- a/arch/unicore32/mm/mmu.c +++ b/arch/unicore32/mm/mmu.c @@ -445,7 +445,7 @@ void __init paging_init(void) * the user-mode pages. This will then ensure that we have predictable * results when turning the mmu off */ -void setup_mm_for_reboot(char mode) +void setup_mm_for_reboot(void) { unsigned long base_pmdval; pgd_t *pgd; diff --git a/arch/unicore32/mm/proc-syms.c b/arch/unicore32/mm/proc-syms.c index f30071e3665..21c00fc85c9 100644 --- a/arch/unicore32/mm/proc-syms.c +++ b/arch/unicore32/mm/proc-syms.c @@ -19,5 +19,7 @@ EXPORT_SYMBOL(cpu_dcache_clean_area); EXPORT_SYMBOL(cpu_set_pte); +EXPORT_SYMBOL(__cpuc_coherent_kern_range); + EXPORT_SYMBOL(__cpuc_dma_flush_range); EXPORT_SYMBOL(__cpuc_dma_clean_range); |
