diff options
Diffstat (limited to 'arch/m32r/mm/fault.c')
| -rw-r--r-- | arch/m32r/mm/fault.c | 128 |
1 files changed, 46 insertions, 82 deletions
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index bf7fb58ef02..e9c6a8014bd 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c @@ -8,7 +8,6 @@ * Copyright (C) 1995 Linus Torvalds */ -#include <linux/config.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> @@ -19,7 +18,6 @@ #include <linux/mman.h> #include <linux/mm.h> #include <linux/smp.h> -#include <linux/smp_lock.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/tty.h> @@ -28,7 +26,6 @@ #include <linux/module.h> #include <asm/m32r.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/hardirq.h> #include <asm/mmu_context.h> @@ -50,32 +47,6 @@ unsigned int tlb_entry_d_dat[NR_CPUS]; extern void init_tlb(void); -/* - * Unlock any spinlocks which will prevent us from getting the - * message out - */ -void bust_spinlocks(int yes) -{ - int loglevel_save = console_loglevel; - - if (yes) { - oops_in_progress = 1; - return; - } -#ifdef CONFIG_VT - unblank_screen(); -#endif - oops_in_progress = 0; - /* - * OK, the message is on the console. Now we call printk() - * without oops_in_progress set so that printk will give klogd - * a poke. Hold onto your hats... - */ - console_loglevel = 15; /* NMI oopser may have shut the console up */ - printk(" "); - console_loglevel = loglevel_save; -} - /*======================================================================* * do_page_fault() *======================================================================* @@ -107,7 +78,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, struct mm_struct *mm; struct vm_area_struct * vma; unsigned long page, addr; - int write; + unsigned long flags = 0; + int fault; siginfo_t info; /* @@ -145,17 +117,20 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, if (in_atomic() || !mm) goto bad_area_nosemaphore; + if (error_code & ACE_USERMODE) + flags |= FAULT_FLAG_USER; + /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the - * kernel and should generate an OOPS. Unfortunatly, in the case of an - * erroneous fault occuring in a code path which already holds mmap_sem + * kernel and should generate an OOPS. Unfortunately, in the case of an + * erroneous fault occurring in a code path which already holds mmap_sem * we will deadlock attempting to validate the fault against the * address space. Luckily the kernel only validly references user * space from well defined areas of code, which are listed in the * exceptions table. * * As the vast majority of faults will be valid we will only perform - * the source reference check when there is a possibilty of a deadlock. + * the source reference check when there is a possibility of a deadlock. * Attempt to lock the address space, if we cannot we then validate the * source. If this is invalid we can skip the address space check, * thus avoiding the deadlock. @@ -174,7 +149,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; -#if 0 + if (error_code & ACE_USERMODE) { /* * accessing the stack below "spu" is always a bug. @@ -185,7 +160,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, if (address + 4 < regs->spu) goto bad_area; } -#endif + if (expand_stack(vma, address)) goto bad_area; /* @@ -194,14 +169,13 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, */ good_area: info.si_code = SEGV_ACCERR; - write = 0; switch (error_code & (ACE_WRITE|ACE_PROTECTION)) { default: /* 3: write, present */ /* fall through */ case ACE_WRITE: /* write, not present */ if (!(vma->vm_flags & VM_WRITE)) goto bad_area; - write++; + flags |= FAULT_FLAG_WRITE; break; case ACE_PROTECTION: /* read, present */ case 0: /* read, not present */ @@ -215,7 +189,6 @@ good_area: if ((error_code & ACE_INSTRUCTION) && !(vma->vm_flags & VM_EXEC)) goto bad_area; -survive: /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo @@ -223,20 +196,18 @@ survive: */ addr = (address & PAGE_MASK); set_thread_fault_code(error_code); - switch (handle_mm_fault(mm, vma, addr, write)) { - case VM_FAULT_MINOR: - tsk->min_flt++; - break; - case VM_FAULT_MAJOR: - tsk->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto do_sigbus; - case VM_FAULT_OOM: + fault = handle_mm_fault(mm, vma, addr, flags); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) goto out_of_memory; - default: - BUG(); + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); } + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; set_thread_fault_code(0); up_read(&mm->mmap_sem); return; @@ -300,15 +271,10 @@ no_context: */ out_of_memory: up_read(&mm->mmap_sem); - if (tsk->pid == 1) { - yield(); - down_read(&mm->mmap_sem); - goto survive; - } - printk("VM: killing process %s\n", tsk->comm); - if (error_code & ACE_USERMODE) - do_exit(SIGKILL); - goto no_context; + if (!(error_code & ACE_USERMODE)) + goto no_context; + pagefault_out_of_memory(); + return; do_sigbus: up_read(&mm->mmap_sem); @@ -363,8 +329,10 @@ vmalloc_fault: if (!pte_present(*pte_k)) goto no_context; - addr = (address & PAGE_MASK) | (error_code & ACE_INSTRUCTION); - update_mmu_cache(NULL, addr, *pte_k); + addr = (address & PAGE_MASK); + set_thread_fault_code(error_code); + update_mmu_cache(NULL, addr, pte_k); + set_thread_fault_code(0); return; } } @@ -376,9 +344,9 @@ vmalloc_fault: #define ITLB_END (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8)) #define DTLB_END (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8)) void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr, - pte_t pte) + pte_t *ptep) { - unsigned long *entry1, *entry2; + volatile unsigned long *entry1, *entry2; unsigned long pte_data, flags; unsigned int *entry_dat; int inst = get_thread_fault_code() & ACE_INSTRUCTION; @@ -392,30 +360,26 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr, vaddr = (vaddr & PAGE_MASK) | get_asid(); + pte_data = pte_val(*ptep); + #ifdef CONFIG_CHIP_OPSP entry1 = (unsigned long *)ITLB_BASE; - for(i = 0 ; i < NR_TLB_ENTRIES; i++) { - if(*entry1++ == vaddr) { - pte_data = pte_val(pte); - set_tlb_data(entry1, pte_data); - break; - } - entry1++; + for (i = 0; i < NR_TLB_ENTRIES; i++) { + if (*entry1++ == vaddr) { + set_tlb_data(entry1, pte_data); + break; + } + entry1++; } entry2 = (unsigned long *)DTLB_BASE; - for(i = 0 ; i < NR_TLB_ENTRIES ; i++) { - if(*entry2++ == vaddr) { - pte_data = pte_val(pte); - set_tlb_data(entry2, pte_data); - break; - } - entry2++; + for (i = 0; i < NR_TLB_ENTRIES; i++) { + if (*entry2++ == vaddr) { + set_tlb_data(entry2, pte_data); + break; + } + entry2++; } - local_irq_restore(flags); - return; #else - pte_data = pte_val(pte); - /* * Update TLB entries * entry1: ITLB entry address @@ -440,6 +404,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr, "i" (MSVA_offset), "i" (MTOP_offset), "i" (MIDXI_offset) : "r4", "memory" ); +#endif if ((!inst && entry2 >= DTLB_END) || (inst && entry1 >= ITLB_END)) goto notfound; @@ -483,7 +448,6 @@ notfound: set_tlb_data(entry1, pte_data); goto found; -#endif } /*======================================================================* |
