diff options
Diffstat (limited to 'arch/x86/kernel/vm86_32.c')
| -rw-r--r-- | arch/x86/kernel/vm86_32.c | 293 |
1 files changed, 147 insertions, 146 deletions
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 738c2104df3..e8edcf52e06 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -28,9 +28,12 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/capability.h> #include <linux/errno.h> #include <linux/interrupt.h> +#include <linux/syscalls.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/signal.h> @@ -64,7 +67,7 @@ #define KVM86 ((struct kernel_vm86_struct *)regs) -#define VMPI KVM86->vm86plus +#define VMPI KVM86->vm86plus /* @@ -81,7 +84,7 @@ #define VFLAGS (*(unsigned short *)&(current->thread.v86flags)) #define VEFLAGS (current->thread.v86flags) -#define set_flags(X,new,mask) \ +#define set_flags(X, new, mask) \ ((X) = ((X) & ~(mask)) | ((new) & (mask))) #define SAFE_MASK (0xDD5) @@ -93,8 +96,10 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user, { int ret = 0; - /* kernel_vm86_regs is missing gs, so copy everything up to - (but not including) orig_eax, and then rest including orig_eax. */ + /* + * kernel_vm86_regs is missing gs, so copy everything up to + * (but not including) orig_eax, and then rest including orig_eax. + */ ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax)); ret += copy_to_user(&user->orig_eax, ®s->pt.orig_ax, sizeof(struct kernel_vm86_regs) - @@ -120,7 +125,7 @@ static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs, return ret; } -struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs) +struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) { struct tss_struct *tss; struct pt_regs *ret; @@ -134,14 +139,14 @@ struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs) local_irq_enable(); if (!current->thread.vm86_info) { - printk("no vm86_info: BAD\n"); + pr_alert("no vm86_info: BAD\n"); do_exit(SIGSEGV); } - set_flags(regs->pt.flags, VEFLAGS, VIF_MASK | current->thread.v86mask); - tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs,regs); - tmp += put_user(current->thread.screen_bitmap,¤t->thread.vm86_info->screen_bitmap); + set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask); + tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs, regs); + tmp += put_user(current->thread.screen_bitmap, ¤t->thread.vm86_info->screen_bitmap); if (tmp) { - printk("vm86: could not access userspace vm86_info\n"); + pr_alert("could not access userspace vm86_info\n"); do_exit(SIGSEGV); } @@ -155,7 +160,7 @@ struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs) ret = KVM86->regs32; ret->fs = current->thread.saved_fs; - loadsegment(gs, current->thread.saved_gs); + set_user_gs(ret, current->thread.saved_gs); return ret; } @@ -169,6 +174,7 @@ static void mark_screen_rdonly(struct mm_struct *mm) spinlock_t *ptl; int i; + down_write(&mm->mmap_sem); pgd = pgd_offset(mm, 0xA0000); if (pgd_none_or_clear_bad(pgd)) goto out; @@ -176,6 +182,7 @@ static void mark_screen_rdonly(struct mm_struct *mm) if (pud_none_or_clear_bad(pud)) goto out; pmd = pmd_offset(pud, 0xA0000); + split_huge_page_pmd_mm(mm, 0xA0000, pmd); if (pmd_none_or_clear_bad(pmd)) goto out; pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); @@ -186,6 +193,7 @@ static void mark_screen_rdonly(struct mm_struct *mm) } pte_unmap_unlock(pte, ptl); out: + up_write(&mm->mmap_sem); flush_tlb(); } @@ -194,37 +202,32 @@ out: static int do_vm86_irq_handling(int subfunction, int irqnumber); static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); -asmlinkage int sys_vm86old(struct pt_regs regs) +SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86) { - struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.bx; struct kernel_vm86_struct info; /* declare this _on top_, * this avoids wasting of stack space. * This remains on the stack until we * return to 32 bit user space. */ - struct task_struct *tsk; - int tmp, ret = -EPERM; + struct task_struct *tsk = current; + int tmp; - tsk = current; if (tsk->thread.saved_sp0) - goto out; + return -EPERM; tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, offsetof(struct kernel_vm86_struct, vm86plus) - sizeof(info.regs)); - ret = -EFAULT; if (tmp) - goto out; + return -EFAULT; memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus); - info.regs32 = ®s; + info.regs32 = current_pt_regs(); tsk->thread.vm86_info = v86; do_sys_vm86(&info, tsk); - ret = 0; /* we never return here */ -out: - return ret; + return 0; /* we never return here */ } -asmlinkage int sys_vm86(struct pt_regs regs) +SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg) { struct kernel_vm86_struct info; /* declare this _on top_, * this avoids wasting of stack space. @@ -232,45 +235,40 @@ asmlinkage int sys_vm86(struct pt_regs regs) * return to 32 bit user space. */ struct task_struct *tsk; - int tmp, ret; + int tmp; struct vm86plus_struct __user *v86; tsk = current; - switch (regs.bx) { - case VM86_REQUEST_IRQ: - case VM86_FREE_IRQ: - case VM86_GET_IRQ_BITS: - case VM86_GET_AND_RESET_IRQ: - ret = do_vm86_irq_handling(regs.bx, (int)regs.cx); - goto out; - case VM86_PLUS_INSTALL_CHECK: - /* NOTE: on old vm86 stuff this will return the error - from access_ok(), because the subfunction is - interpreted as (invalid) address to vm86_struct. - So the installation check works. - */ - ret = 0; - goto out; + switch (cmd) { + case VM86_REQUEST_IRQ: + case VM86_FREE_IRQ: + case VM86_GET_IRQ_BITS: + case VM86_GET_AND_RESET_IRQ: + return do_vm86_irq_handling(cmd, (int)arg); + case VM86_PLUS_INSTALL_CHECK: + /* + * NOTE: on old vm86 stuff this will return the error + * from access_ok(), because the subfunction is + * interpreted as (invalid) address to vm86_struct. + * So the installation check works. + */ + return 0; } /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ - ret = -EPERM; if (tsk->thread.saved_sp0) - goto out; - v86 = (struct vm86plus_struct __user *)regs.cx; + return -EPERM; + v86 = (struct vm86plus_struct __user *)arg; tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, offsetof(struct kernel_vm86_struct, regs32) - sizeof(info.regs)); - ret = -EFAULT; if (tmp) - goto out; - info.regs32 = ®s; + return -EFAULT; + info.regs32 = current_pt_regs(); info.vm86plus.is_vm86pus = 1; tsk->thread.vm86_info = (struct vm86_struct __user *)v86; do_sys_vm86(&info, tsk); - ret = 0; /* we never return here */ -out: - return ret; + return 0; /* we never return here */ } @@ -283,10 +281,9 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk info->regs.pt.ds = 0; info->regs.pt.es = 0; info->regs.pt.fs = 0; - -/* we are clearing gs later just before "jmp resume_userspace", - * because it is not saved/restored. - */ +#ifndef CONFIG_X86_32_LAZY_GS + info->regs.pt.gs = 0; +#endif /* * The flags register is also special: we cannot trust that the user @@ -296,30 +293,30 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk VEFLAGS = info->regs.pt.flags; info->regs.pt.flags &= SAFE_MASK; info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK; - info->regs.pt.flags |= VM_MASK; + info->regs.pt.flags |= X86_VM_MASK; switch (info->cpu_type) { - case CPU_286: - tsk->thread.v86mask = 0; - break; - case CPU_386: - tsk->thread.v86mask = NT_MASK | IOPL_MASK; - break; - case CPU_486: - tsk->thread.v86mask = AC_MASK | NT_MASK | IOPL_MASK; - break; - default: - tsk->thread.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK; - break; + case CPU_286: + tsk->thread.v86mask = 0; + break; + case CPU_386: + tsk->thread.v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL; + break; + case CPU_486: + tsk->thread.v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; + break; + default: + tsk->thread.v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; + break; } /* - * Save old state, set default return value (%ax) to 0 + * Save old state, set default return value (%ax) to 0 (VM86_SIGNAL) */ - info->regs32->ax = 0; + info->regs32->ax = VM86_SIGNAL; tsk->thread.saved_sp0 = tsk->thread.sp0; tsk->thread.saved_fs = info->regs32->fs; - savesegment(gs, tsk->thread.saved_gs); + tsk->thread.saved_gs = get_user_gs(info->regs32); tss = &per_cpu(init_tss, get_cpu()); tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0; @@ -332,23 +329,27 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk if (info->flags & VM86_SCREEN_BITMAP) mark_screen_rdonly(tsk->mm); - /*call audit_syscall_exit since we do not exit via the normal paths */ + /*call __audit_syscall_exit since we do not exit via the normal paths */ +#ifdef CONFIG_AUDITSYSCALL if (unlikely(current->audit_context)) - audit_syscall_exit(AUDITSC_RESULT(0), 0); + __audit_syscall_exit(1, 0); +#endif __asm__ __volatile__( "movl %0,%%esp\n\t" "movl %1,%%ebp\n\t" +#ifdef CONFIG_X86_32_LAZY_GS "mov %2, %%gs\n\t" +#endif "jmp resume_userspace" : /* no outputs */ :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0)); /* we never return here */ } -static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval) +static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval) { - struct pt_regs * regs32; + struct pt_regs *regs32; regs32 = save_v86_state(regs16); regs32->ax = retval; @@ -358,29 +359,30 @@ static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval) : : "r" (regs32), "r" (current_thread_info())); } -static inline void set_IF(struct kernel_vm86_regs * regs) +static inline void set_IF(struct kernel_vm86_regs *regs) { - VEFLAGS |= VIF_MASK; - if (VEFLAGS & VIP_MASK) + VEFLAGS |= X86_EFLAGS_VIF; + if (VEFLAGS & X86_EFLAGS_VIP) return_to_32bit(regs, VM86_STI); } -static inline void clear_IF(struct kernel_vm86_regs * regs) +static inline void clear_IF(struct kernel_vm86_regs *regs) { - VEFLAGS &= ~VIF_MASK; + VEFLAGS &= ~X86_EFLAGS_VIF; } -static inline void clear_TF(struct kernel_vm86_regs * regs) +static inline void clear_TF(struct kernel_vm86_regs *regs) { - regs->pt.flags &= ~TF_MASK; + regs->pt.flags &= ~X86_EFLAGS_TF; } -static inline void clear_AC(struct kernel_vm86_regs * regs) +static inline void clear_AC(struct kernel_vm86_regs *regs) { - regs->pt.flags &= ~AC_MASK; + regs->pt.flags &= ~X86_EFLAGS_AC; } -/* It is correct to call set_IF(regs) from the set_vflags_* +/* + * It is correct to call set_IF(regs) from the set_vflags_* * functions. However someone forgot to call clear_IF(regs) * in the opposite case. * After the command sequence CLI PUSHF STI POPF you should @@ -391,41 +393,41 @@ static inline void clear_AC(struct kernel_vm86_regs * regs) * [KD] */ -static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs * regs) +static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs) { set_flags(VEFLAGS, flags, current->thread.v86mask); set_flags(regs->pt.flags, flags, SAFE_MASK); - if (flags & IF_MASK) + if (flags & X86_EFLAGS_IF) set_IF(regs); else clear_IF(regs); } -static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs) +static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs) { set_flags(VFLAGS, flags, current->thread.v86mask); set_flags(regs->pt.flags, flags, SAFE_MASK); - if (flags & IF_MASK) + if (flags & X86_EFLAGS_IF) set_IF(regs); else clear_IF(regs); } -static inline unsigned long get_vflags(struct kernel_vm86_regs * regs) +static inline unsigned long get_vflags(struct kernel_vm86_regs *regs) { unsigned long flags = regs->pt.flags & RETURN_MASK; - if (VEFLAGS & VIF_MASK) - flags |= IF_MASK; - flags |= IOPL_MASK; + if (VEFLAGS & X86_EFLAGS_VIF) + flags |= X86_EFLAGS_IF; + flags |= X86_EFLAGS_IOPL; return flags | (VEFLAGS & current->thread.v86mask); } -static inline int is_revectored(int nr, struct revectored_struct * bitmap) +static inline int is_revectored(int nr, struct revectored_struct *bitmap) { __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" :"=r" (nr) - :"m" (*bitmap),"r" (nr)); + :"m" (*bitmap), "r" (nr)); return nr; } @@ -437,7 +439,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap) ptr--; \ if (put_user(__val, base + ptr) < 0) \ goto err_label; \ - } while(0) + } while (0) #define pushw(base, ptr, val, err_label) \ do { \ @@ -448,7 +450,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap) ptr--; \ if (put_user(val_byte(__val, 0), base + ptr) < 0) \ goto err_label; \ - } while(0) + } while (0) #define pushl(base, ptr, val, err_label) \ do { \ @@ -465,7 +467,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap) ptr--; \ if (put_user(val_byte(__val, 0), base + ptr) < 0) \ goto err_label; \ - } while(0) + } while (0) #define popb(base, ptr, err_label) \ ({ \ @@ -512,7 +514,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap) * in userspace is always better than an Oops anyway.) [KD] */ static void do_int(struct kernel_vm86_regs *regs, int i, - unsigned char __user * ssp, unsigned short sp) + unsigned char __user *ssp, unsigned short sp) { unsigned long __user *intr_ptr; unsigned long segoffs; @@ -521,7 +523,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i, goto cannot_handle; if (is_revectored(i, &KVM86->int_revectored)) goto cannot_handle; - if (i==0x21 && is_revectored(AH(regs),&KVM86->int21_revectored)) + if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored)) goto cannot_handle; intr_ptr = (unsigned long __user *) (i << 2); if (get_user(segoffs, intr_ptr)) @@ -543,30 +545,29 @@ cannot_handle: return_to_32bit(regs, VM86_INTx + (i << 8)); } -int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno) +int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) { if (VMPI.is_vm86pus) { - if ( (trapno==3) || (trapno==1) ) - return_to_32bit(regs, VM86_TRAP + (trapno << 8)); + if ((trapno == 3) || (trapno == 1)) { + KVM86->regs32->ax = VM86_TRAP + (trapno << 8); + /* setting this flag forces the code in entry_32.S to + the path where we call save_v86_state() and change + the stack pointer to KVM86->regs32 */ + set_thread_flag(TIF_NOTIFY_RESUME); + return 0; + } do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); return 0; } - if (trapno !=1) + if (trapno != 1) return 1; /* we let this handle by the calling routine */ - if (current->ptrace & PT_PTRACED) { - unsigned long flags; - spin_lock_irqsave(¤t->sighand->siglock, flags); - sigdelset(¤t->blocked, SIGTRAP); - recalc_sigpending(); - spin_unlock_irqrestore(¤t->sighand->siglock, flags); - } - send_sig(SIGTRAP, current, 1); - current->thread.trap_no = trapno; + current->thread.trap_nr = trapno; current->thread.error_code = error_code; + force_sig(SIGTRAP, current); return 0; } -void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) +void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) { unsigned char opcode; unsigned char __user *csp; @@ -576,11 +577,11 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) #define CHECK_IF_IN_TRAP \ if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \ - newflags |= TF_MASK + newflags |= X86_EFLAGS_TF #define VM86_FAULT_RETURN do { \ - if (VMPI.force_return_for_pic && (VEFLAGS & (IF_MASK | VIF_MASK))) \ + if (VMPI.force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \ return_to_32bit(regs, VM86_PICRETURN); \ - if (orig_flags & TF_MASK) \ + if (orig_flags & X86_EFLAGS_TF) \ handle_vm86_trap(regs, 0, 1); \ return; } while (0) @@ -595,17 +596,17 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) pref_done = 0; do { switch (opcode = popb(csp, ip, simulate_sigsegv)) { - case 0x66: /* 32-bit data */ data32=1; break; - case 0x67: /* 32-bit address */ break; - case 0x2e: /* CS */ break; - case 0x3e: /* DS */ break; - case 0x26: /* ES */ break; - case 0x36: /* SS */ break; - case 0x65: /* GS */ break; - case 0x64: /* FS */ break; - case 0xf2: /* repnz */ break; - case 0xf3: /* rep */ break; - default: pref_done = 1; + case 0x66: /* 32-bit data */ data32 = 1; break; + case 0x67: /* 32-bit address */ break; + case 0x2e: /* CS */ break; + case 0x3e: /* DS */ break; + case 0x26: /* ES */ break; + case 0x36: /* SS */ break; + case 0x65: /* GS */ break; + case 0x64: /* FS */ break; + case 0xf2: /* repnz */ break; + case 0xf3: /* rep */ break; + default: pref_done = 1; } } while (!pref_done); @@ -628,7 +629,7 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) { unsigned long newflags; if (data32) { - newflags=popl(ssp, sp, simulate_sigsegv); + newflags = popl(ssp, sp, simulate_sigsegv); SP(regs) += 4; } else { newflags = popw(ssp, sp, simulate_sigsegv); @@ -636,20 +637,20 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) } IP(regs) = ip; CHECK_IF_IN_TRAP; - if (data32) { + if (data32) set_vflags_long(newflags, regs); - } else { + else set_vflags_short(newflags, regs); - } + VM86_FAULT_RETURN; } /* int xx */ case 0xcd: { - int intno=popb(csp, ip, simulate_sigsegv); + int intno = popb(csp, ip, simulate_sigsegv); IP(regs) = ip; if (VMPI.vm86dbg_active) { - if ( (1 << (intno &7)) & VMPI.vm86dbg_intxxtab[intno >> 3] ) + if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3]) return_to_32bit(regs, VM86_INTx + (intno << 8)); } do_int(regs, intno, ssp, sp); @@ -663,9 +664,9 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) unsigned long newcs; unsigned long newflags; if (data32) { - newip=popl(ssp, sp, simulate_sigsegv); - newcs=popl(ssp, sp, simulate_sigsegv); - newflags=popl(ssp, sp, simulate_sigsegv); + newip = popl(ssp, sp, simulate_sigsegv); + newcs = popl(ssp, sp, simulate_sigsegv); + newflags = popl(ssp, sp, simulate_sigsegv); SP(regs) += 12; } else { newip = popw(ssp, sp, simulate_sigsegv); @@ -734,18 +735,18 @@ static struct vm86_irqs { static DEFINE_SPINLOCK(irqbits_lock); static int irqbits; -#define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \ +#define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \ | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ - | (1 << SIGUNUSED) ) - + | (1 << SIGUNUSED)) + static irqreturn_t irq_handler(int intno, void *dev_id) { int irq_bit; unsigned long flags; - spin_lock_irqsave(&irqbits_lock, flags); + spin_lock_irqsave(&irqbits_lock, flags); irq_bit = 1 << intno; - if ((irqbits & irq_bit) || ! vm86_irqs[intno].tsk) + if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk) goto out; irqbits |= irq_bit; if (vm86_irqs[intno].sig) @@ -759,7 +760,7 @@ static irqreturn_t irq_handler(int intno, void *dev_id) return IRQ_HANDLED; out: - spin_unlock_irqrestore(&irqbits_lock, flags); + spin_unlock_irqrestore(&irqbits_lock, flags); return IRQ_NONE; } @@ -770,9 +771,9 @@ static inline void free_vm86_irq(int irqnumber) free_irq(irqnumber, NULL); vm86_irqs[irqnumber].tsk = NULL; - spin_lock_irqsave(&irqbits_lock, flags); + spin_lock_irqsave(&irqbits_lock, flags); irqbits &= ~(1 << irqnumber); - spin_unlock_irqrestore(&irqbits_lock, flags); + spin_unlock_irqrestore(&irqbits_lock, flags); } void release_vm86_irqs(struct task_struct *task) @@ -788,10 +789,10 @@ static inline int get_and_reset_irq(int irqnumber) int bit; unsigned long flags; int ret = 0; - + if (invalid_vm86_irq(irqnumber)) return 0; if (vm86_irqs[irqnumber].tsk != current) return 0; - spin_lock_irqsave(&irqbits_lock, flags); + spin_lock_irqsave(&irqbits_lock, flags); bit = irqbits & (1 << irqnumber); irqbits &= ~bit; if (bit) { @@ -799,7 +800,7 @@ static inline int get_and_reset_irq(int irqnumber) ret = 1; } - spin_unlock_irqrestore(&irqbits_lock, flags); + spin_unlock_irqrestore(&irqbits_lock, flags); return ret; } |
