diff options
Diffstat (limited to 'arch/s390/mm/fault.c')
-rw-r--r-- | arch/s390/mm/fault.c | 77 |
1 files changed, 46 insertions, 31 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 2505b2ea0ef..fe5701e9efb 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -52,6 +52,14 @@ #define VM_FAULT_BADMAP 0x020000 #define VM_FAULT_BADACCESS 0x040000 +static unsigned long store_indication; + +void fault_init(void) +{ + if (test_facility(2) && test_facility(75)) + store_indication = 0xc00; +} + static inline int notify_page_fault(struct pt_regs *regs) { int ret = 0; @@ -199,14 +207,21 @@ static noinline void do_sigbus(struct pt_regs *regs, long int_code, unsigned long trans_exc_code) { struct task_struct *tsk = current; + unsigned long address; + struct siginfo si; /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ - tsk->thread.prot_addr = trans_exc_code & __FAIL_ADDR_MASK; + address = trans_exc_code & __FAIL_ADDR_MASK; + tsk->thread.prot_addr = address; tsk->thread.trap_no = int_code; - force_sig(SIGBUS, tsk); + si.si_signo = SIGBUS; + si.si_errno = 0; + si.si_code = BUS_ADRERR; + si.si_addr = (void __user *) address; + force_sig_info(SIGBUS, &si, tsk); } #ifdef CONFIG_S390_EXEC_PROTECT @@ -266,10 +281,11 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code, if (fault & VM_FAULT_OOM) pagefault_out_of_memory(); else if (fault & VM_FAULT_SIGBUS) { - do_sigbus(regs, int_code, trans_exc_code); /* Kernel mode? Handle exceptions or die */ if (!(regs->psw.mask & PSW_MASK_PSTATE)) do_no_context(regs, int_code, trans_exc_code); + else + do_sigbus(regs, int_code, trans_exc_code); } else BUG(); break; @@ -294,7 +310,7 @@ static inline int do_exception(struct pt_regs *regs, int access, struct mm_struct *mm; struct vm_area_struct *vma; unsigned long address; - int fault; + int fault, write; if (notify_page_fault(regs)) return 0; @@ -312,12 +328,6 @@ static inline int do_exception(struct pt_regs *regs, int access, goto out; address = trans_exc_code & __FAIL_ADDR_MASK; - /* - * When we get here, the fault happened in the current - * task's user address space, so we can switch on the - * interrupts again and then search the VMAs - */ - local_irq_enable(); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); down_read(&mm->mmap_sem); @@ -348,8 +358,10 @@ static inline int do_exception(struct pt_regs *regs, int access, * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, - (access == VM_WRITE) ? FAULT_FLAG_WRITE : 0); + write = (access == VM_WRITE || + (trans_exc_code & store_indication) == 0x400) ? + FAULT_FLAG_WRITE : 0; + fault = handle_mm_fault(mm, vma, address, write); if (unlikely(fault & VM_FAULT_ERROR)) goto out_up; @@ -374,20 +386,20 @@ out: return fault; } -void __kprobes do_protection_exception(struct pt_regs *regs, long int_code) +void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code, + unsigned long trans_exc_code) { - unsigned long trans_exc_code = S390_lowcore.trans_exc_code; int fault; /* Protection exception is supressing, decrement psw address. */ - regs->psw.addr -= (int_code >> 16); + regs->psw.addr -= (pgm_int_code >> 16); /* * Check for low-address protection. This needs to be treated * as a special case because the translation exception code * field is not guaranteed to contain valid data in this case. */ if (unlikely(!(trans_exc_code & 4))) { - do_low_address(regs, int_code, trans_exc_code); + do_low_address(regs, pgm_int_code, trans_exc_code); return; } fault = do_exception(regs, VM_WRITE, trans_exc_code); @@ -395,9 +407,9 @@ void __kprobes do_protection_exception(struct pt_regs *regs, long int_code) do_fault_error(regs, 4, trans_exc_code, fault); } -void __kprobes do_dat_exception(struct pt_regs *regs, long int_code) +void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code, + unsigned long trans_exc_code) { - unsigned long trans_exc_code = S390_lowcore.trans_exc_code; int access, fault; access = VM_READ | VM_EXEC | VM_WRITE; @@ -408,21 +420,19 @@ void __kprobes do_dat_exception(struct pt_regs *regs, long int_code) #endif fault = do_exception(regs, access, trans_exc_code); if (unlikely(fault)) - do_fault_error(regs, int_code & 255, trans_exc_code, fault); + do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault); } #ifdef CONFIG_64BIT -void __kprobes do_asce_exception(struct pt_regs *regs, long int_code) +void __kprobes do_asce_exception(struct pt_regs *regs, long pgm_int_code, + unsigned long trans_exc_code) { - unsigned long trans_exc_code = S390_lowcore.trans_exc_code; struct mm_struct *mm = current->mm; struct vm_area_struct *vma; if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) goto no_context; - local_irq_enable(); - down_read(&mm->mmap_sem); vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK); up_read(&mm->mmap_sem); @@ -434,16 +444,16 @@ void __kprobes do_asce_exception(struct pt_regs *regs, long int_code) /* User mode accesses just cause a SIGSEGV */ if (regs->psw.mask & PSW_MASK_PSTATE) { - do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code); + do_sigsegv(regs, pgm_int_code, SEGV_MAPERR, trans_exc_code); return; } no_context: - do_no_context(regs, int_code, trans_exc_code); + do_no_context(regs, pgm_int_code, trans_exc_code); } #endif -int __handle_fault(unsigned long uaddr, unsigned long int_code, int write_user) +int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) { struct pt_regs regs; int access, fault; @@ -454,14 +464,14 @@ int __handle_fault(unsigned long uaddr, unsigned long int_code, int write_user) regs.psw.addr = (unsigned long) __builtin_return_address(0); regs.psw.addr |= PSW_ADDR_AMODE; uaddr &= PAGE_MASK; - access = write_user ? VM_WRITE : VM_READ; + access = write ? VM_WRITE : VM_READ; fault = do_exception(®s, access, uaddr | 2); if (unlikely(fault)) { if (fault & VM_FAULT_OOM) { pagefault_out_of_memory(); fault = 0; } else if (fault & VM_FAULT_SIGBUS) - do_sigbus(®s, int_code, uaddr); + do_sigbus(®s, pgm_int_code, uaddr); } return fault ? -EFAULT : 0; } @@ -527,7 +537,8 @@ void pfault_fini(void) : : "a" (&refbk), "m" (refbk) : "cc"); } -static void pfault_interrupt(__u16 int_code) +static void pfault_interrupt(unsigned int ext_int_code, + unsigned int param32, unsigned long param64) { struct task_struct *tsk; __u16 subcode; @@ -538,14 +549,18 @@ static void pfault_interrupt(__u16 int_code) * in the 'cpu address' field associated with the * external interrupt. */ - subcode = S390_lowcore.cpu_addr; + subcode = ext_int_code >> 16; if ((subcode & 0xff00) != __SUBCODE_MASK) return; /* * Get the token (= address of the task structure of the affected task). */ - tsk = *(struct task_struct **) __LC_PFAULT_INTPARM; +#ifdef CONFIG_64BIT + tsk = *(struct task_struct **) param64; +#else + tsk = *(struct task_struct **) param32; +#endif if (subcode & 0x0080) { /* signal bit is set -> a page has been swapped in by VM */ |