diff options
Diffstat (limited to 'arch/mn10300/mm')
| -rw-r--r-- | arch/mn10300/mm/cache-dbg-flush-by-reg.S | 4 | ||||
| -rw-r--r-- | arch/mn10300/mm/cache-smp.c | 8 | ||||
| -rw-r--r-- | arch/mn10300/mm/dma-alloc.c | 1 | ||||
| -rw-r--r-- | arch/mn10300/mm/fault.c | 43 | ||||
| -rw-r--r-- | arch/mn10300/mm/init.c | 55 | ||||
| -rw-r--r-- | arch/mn10300/mm/misalignment.c | 3 | ||||
| -rw-r--r-- | arch/mn10300/mm/pgtable.c | 12 | ||||
| -rw-r--r-- | arch/mn10300/mm/tlb-smp.c | 37 |
8 files changed, 71 insertions, 92 deletions
diff --git a/arch/mn10300/mm/cache-dbg-flush-by-reg.S b/arch/mn10300/mm/cache-dbg-flush-by-reg.S index 665919f2ab6..a775ea5d7ce 100644 --- a/arch/mn10300/mm/cache-dbg-flush-by-reg.S +++ b/arch/mn10300/mm/cache-dbg-flush-by-reg.S @@ -120,14 +120,14 @@ debugger_local_cache_flushinv_one: # conditionally purge this line in all ways mov d1,(L1_CACHE_WAYDISP*0,a0) -debugger_local_cache_flushinv_no_dcache: +debugger_local_cache_flushinv_one_no_dcache: # # now try to flush the icache # mov CHCTR,a0 movhu (a0),d0 btst CHCTR_ICEN,d0 - beq mn10300_local_icache_inv_range_reg_end + beq debugger_local_cache_flushinv_one_end LOCAL_CLI_SAVE(d1) diff --git a/arch/mn10300/mm/cache-smp.c b/arch/mn10300/mm/cache-smp.c index 4a6e9a4b5b2..2d23b9eeee6 100644 --- a/arch/mn10300/mm/cache-smp.c +++ b/arch/mn10300/mm/cache-smp.c @@ -74,7 +74,7 @@ void smp_cache_interrupt(void) break; } - cpu_clear(smp_processor_id(), smp_cache_ipi_map); + cpumask_clear_cpu(smp_processor_id(), &smp_cache_ipi_map); } /** @@ -94,12 +94,12 @@ void smp_cache_call(unsigned long opr_mask, smp_cache_mask = opr_mask; smp_cache_start = start; smp_cache_end = end; - smp_cache_ipi_map = cpu_online_map; - cpu_clear(smp_processor_id(), smp_cache_ipi_map); + cpumask_copy(&smp_cache_ipi_map, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &smp_cache_ipi_map); send_IPI_allbutself(FLUSH_CACHE_IPI); - while (!cpus_empty(smp_cache_ipi_map)) + while (!cpumask_empty(&smp_cache_ipi_map)) /* nothing. lockup detection does not belong here */ mb(); } diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c index 159acb02cfd..e244ebe637e 100644 --- a/arch/mn10300/mm/dma-alloc.c +++ b/arch/mn10300/mm/dma-alloc.c @@ -15,6 +15,7 @@ #include <linux/string.h> #include <linux/pci.h> #include <linux/gfp.h> +#include <linux/export.h> #include <asm/io.h> static unsigned long pci_sram_allocated = 0xbc000000; diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c index 0945409a802..3516cbdf1ee 100644 --- a/arch/mn10300/mm/fault.c +++ b/arch/mn10300/mm/fault.c @@ -24,7 +24,6 @@ #include <linux/init.h> #include <linux/vt_kern.h> /* For unblank_screen() */ -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/hardirq.h> @@ -124,7 +123,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code, struct mm_struct *mm; unsigned long page; siginfo_t info; - int write, fault; + int fault; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; #ifdef CONFIG_GDBSTUB /* handle GDB stub causing a fault */ @@ -171,6 +171,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code, if (in_atomic() || !mm) goto no_context; + if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) + flags |= FAULT_FLAG_USER; +retry: down_read(&mm->mmap_sem); vma = find_vma(mm, address); @@ -221,7 +224,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code, */ good_area: info.si_code = SEGV_ACCERR; - write = 0; switch (fault_code & (MMUFCR_xFC_PGINVAL|MMUFCR_xFC_TYPE)) { default: /* 3: write, present */ case MMUFCR_xFC_TYPE_WRITE: @@ -233,7 +235,7 @@ good_area: case MMUFCR_xFC_PGINVAL | MMUFCR_xFC_TYPE_WRITE: if (!(vma->vm_flags & VM_WRITE)) goto bad_area; - write++; + flags |= FAULT_FLAG_WRITE; break; /* read from protected page */ @@ -252,7 +254,11 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); + fault = handle_mm_fault(mm, vma, address, flags); + + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + return; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; @@ -260,10 +266,22 @@ good_area: goto do_sigbus; BUG(); } - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; + if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; + if (fault & VM_FAULT_RETRY) { + flags &= ~FAULT_FLAG_ALLOW_RETRY; + + /* No need to up_read(&mm->mmap_sem) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; + } + } up_read(&mm->mmap_sem); return; @@ -329,9 +347,10 @@ no_context: */ out_of_memory: up_read(&mm->mmap_sem); - printk(KERN_ALERT "VM: killing process %s\n", tsk->comm); - if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) - do_exit(SIGKILL); + if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) { + pagefault_out_of_memory(); + return; + } goto no_context; do_sigbus: diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c index 48907cc3bdb..97a1ec0beee 100644 --- a/arch/mn10300/mm/init.c +++ b/arch/mn10300/mm/init.c @@ -29,7 +29,6 @@ #include <linux/gfp.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> @@ -37,8 +36,6 @@ #include <asm/tlb.h> #include <asm/sections.h> -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); - unsigned long highstart_pfn, highend_pfn; #ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT @@ -102,60 +99,21 @@ void __init paging_init(void) */ void __init mem_init(void) { - int codesize, reservedpages, datasize, initsize; - int tmp; - BUG_ON(!mem_map); #define START_PFN (contig_page_data.bdata->node_min_pfn) #define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn) - max_mapnr = num_physpages = MAX_LOW_PFN - START_PFN; + max_mapnr = MAX_LOW_PFN - START_PFN; high_memory = (void *) __va(MAX_LOW_PFN * PAGE_SIZE); /* clear the zero-page */ memset(empty_zero_page, 0, PAGE_SIZE); /* this will put all low memory onto the freelists */ - totalram_pages += free_all_bootmem(); - - reservedpages = 0; - for (tmp = 0; tmp < num_physpages; tmp++) - if (PageReserved(&mem_map[tmp])) - reservedpages++; - - codesize = (unsigned long) &_etext - (unsigned long) &_stext; - datasize = (unsigned long) &_edata - (unsigned long) &_etext; - initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - - printk(KERN_INFO - "Memory: %luk/%luk available" - " (%dk kernel code, %dk reserved, %dk data, %dk init," - " %ldk highmem)\n", - nr_free_pages() << (PAGE_SHIFT - 10), - max_mapnr << (PAGE_SHIFT - 10), - codesize >> 10, - reservedpages << (PAGE_SHIFT - 10), - datasize >> 10, - initsize >> 10, - totalhigh_pages << (PAGE_SHIFT - 10)); -} + free_all_bootmem(); -/* - * - */ -void free_init_pages(char *what, unsigned long begin, unsigned long end) -{ - unsigned long addr; - - for (addr = begin; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - memset((void *) addr, 0xcc, PAGE_SIZE); - free_page(addr); - totalram_pages++; - } - printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); + mem_init_print_info(NULL); } /* @@ -163,9 +121,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) */ void free_initmem(void) { - free_init_pages("unused kernel memory", - (unsigned long) &__init_begin, - (unsigned long) &__init_end); + free_initmem_default(POISON_FREE_INITMEM); } /* @@ -174,6 +130,7 @@ void free_initmem(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_init_pages("initrd memory", start, end); + free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, + "initrd"); } #endif diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c index eef989c1d0c..b9920b1edd5 100644 --- a/arch/mn10300/mm/misalignment.c +++ b/arch/mn10300/mm/misalignment.c @@ -23,10 +23,9 @@ #include <linux/interrupt.h> #include <linux/pci.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/io.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/smp.h> #include <asm/pgalloc.h> #include <asm/cpu-regs.h> diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c index 450f7ba3f8f..e77a7c72808 100644 --- a/arch/mn10300/mm/pgtable.c +++ b/arch/mn10300/mm/pgtable.c @@ -21,7 +21,6 @@ #include <linux/spinlock.h> #include <linux/quicklist.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/tlb.h> @@ -79,8 +78,13 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) #else pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); #endif - if (pte) - clear_highpage(pte); + if (!pte) + return NULL; + clear_highpage(pte); + if (!pgtable_page_ctor(pte)) { + __free_page(pte); + return NULL; + } return pte; } @@ -96,7 +100,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) * checks at dup_mmap(), exec(), and other mmlist addition points * could be used. The locking scheme was chosen on the basis of * manfred's recommendations and having no core impact whatsoever. - * -- wli + * -- nyc */ DEFINE_SPINLOCK(pgd_lock); struct page *pgd_list; diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c index 0b6a5ad1960..e5d0ef722bf 100644 --- a/arch/mn10300/mm/tlb-smp.c +++ b/arch/mn10300/mm/tlb-smp.c @@ -24,7 +24,6 @@ #include <linux/profile.h> #include <linux/smp.h> #include <asm/tlbflush.h> -#include <asm/system.h> #include <asm/bitops.h> #include <asm/processor.h> #include <asm/bug.h> @@ -64,7 +63,7 @@ void smp_flush_tlb(void *unused) cpu_id = get_cpu(); - if (!cpu_isset(cpu_id, flush_cpumask)) + if (!cpumask_test_cpu(cpu_id, &flush_cpumask)) /* This was a BUG() but until someone can quote me the line * from the intel manual that guarantees an IPI to multiple * CPUs is retried _only_ on the erroring CPUs its staying as a @@ -79,9 +78,9 @@ void smp_flush_tlb(void *unused) else local_flush_tlb_page(flush_mm, flush_va); - smp_mb__before_clear_bit(); - cpu_clear(cpu_id, flush_cpumask); - smp_mb__after_clear_bit(); + smp_mb__before_atomic(); + cpumask_clear_cpu(cpu_id, &flush_cpumask); + smp_mb__after_atomic(); out: put_cpu(); } @@ -103,11 +102,11 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, * - we do not send IPIs to as-yet unbooted CPUs. */ BUG_ON(!mm); - BUG_ON(cpus_empty(cpumask)); - BUG_ON(cpu_isset(smp_processor_id(), cpumask)); + BUG_ON(cpumask_empty(&cpumask)); + BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask)); - cpus_and(tmp, cpumask, cpu_online_map); - BUG_ON(!cpus_equal(cpumask, tmp)); + cpumask_and(&tmp, &cpumask, cpu_online_mask); + BUG_ON(!cpumask_equal(&cpumask, &tmp)); /* I'm not happy about this global shared spinlock in the MM hot path, * but we'll see how contended it is. @@ -128,7 +127,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, /* FIXME: if NR_CPUS>=3, change send_IPI_mask */ smp_call_function(smp_flush_tlb, NULL, 1); - while (!cpus_empty(flush_cpumask)) + while (!cpumask_empty(&flush_cpumask)) /* Lockup detection does not belong here */ smp_mb(); @@ -146,11 +145,11 @@ void flush_tlb_mm(struct mm_struct *mm) cpumask_t cpu_mask; preempt_disable(); - cpu_mask = mm->cpu_vm_mask; - cpu_clear(smp_processor_id(), cpu_mask); + cpumask_copy(&cpu_mask, mm_cpumask(mm)); + cpumask_clear_cpu(smp_processor_id(), &cpu_mask); local_flush_tlb(); - if (!cpus_empty(cpu_mask)) + if (!cpumask_empty(&cpu_mask)) flush_tlb_others(cpu_mask, mm, FLUSH_ALL); preempt_enable(); @@ -165,11 +164,11 @@ void flush_tlb_current_task(void) cpumask_t cpu_mask; preempt_disable(); - cpu_mask = mm->cpu_vm_mask; - cpu_clear(smp_processor_id(), cpu_mask); + cpumask_copy(&cpu_mask, mm_cpumask(mm)); + cpumask_clear_cpu(smp_processor_id(), &cpu_mask); local_flush_tlb(); - if (!cpus_empty(cpu_mask)) + if (!cpumask_empty(&cpu_mask)) flush_tlb_others(cpu_mask, mm, FLUSH_ALL); preempt_enable(); @@ -186,11 +185,11 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) cpumask_t cpu_mask; preempt_disable(); - cpu_mask = mm->cpu_vm_mask; - cpu_clear(smp_processor_id(), cpu_mask); + cpumask_copy(&cpu_mask, mm_cpumask(mm)); + cpumask_clear_cpu(smp_processor_id(), &cpu_mask); local_flush_tlb_page(mm, va); - if (!cpus_empty(cpu_mask)) + if (!cpumask_empty(&cpu_mask)) flush_tlb_others(cpu_mask, mm, va); preempt_enable(); |
