Commit 83e714e8 authored by Paolo Ciarrocchi's avatar Paolo Ciarrocchi Committed by Ingo Molnar

x86: coding style fixes to arch/x86/kernel/vm86_32.c

Before:
   total: 64 errors, 18 warnings, 840 lines checked
After:
   total: 12 errors, 15 warnings, 844 lines checked

No code changed:

arch/x86/kernel/vm86_32.o:

   text	   data	    bss	    dec	    hex	filename
   4449	     28	    132	   4609	   1201	vm86_32.o.before
   4449	     28	    132	   4609	   1201	vm86_32.o.after

md5:
   e4e51ed7689d17f04148554a3c6d5bb6  vm86_32.o.before.asm
   e4e51ed7689d17f04148554a3c6d5bb6  vm86_32.o.after.asm
Signed-off-by: default avatarPaolo Ciarrocchi <paolo.ciarrocchi@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent fb87a298
...@@ -64,7 +64,7 @@ ...@@ -64,7 +64,7 @@
#define KVM86 ((struct kernel_vm86_struct *)regs) #define KVM86 ((struct kernel_vm86_struct *)regs)
#define VMPI KVM86->vm86plus #define VMPI KVM86->vm86plus
/* /*
...@@ -81,7 +81,7 @@ ...@@ -81,7 +81,7 @@
#define VFLAGS (*(unsigned short *)&(current->thread.v86flags)) #define VFLAGS (*(unsigned short *)&(current->thread.v86flags))
#define VEFLAGS (current->thread.v86flags) #define VEFLAGS (current->thread.v86flags)
#define set_flags(X,new,mask) \ #define set_flags(X, new, mask) \
((X) = ((X) & ~(mask)) | ((new) & (mask))) ((X) = ((X) & ~(mask)) | ((new) & (mask)))
#define SAFE_MASK (0xDD5) #define SAFE_MASK (0xDD5)
...@@ -93,8 +93,10 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user, ...@@ -93,8 +93,10 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
{ {
int ret = 0; int ret = 0;
/* kernel_vm86_regs is missing gs, so copy everything up to /*
(but not including) orig_eax, and then rest including orig_eax. */ * kernel_vm86_regs is missing gs, so copy everything up to
* (but not including) orig_eax, and then rest including orig_eax.
*/
ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax)); ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax));
ret += copy_to_user(&user->orig_eax, &regs->pt.orig_ax, ret += copy_to_user(&user->orig_eax, &regs->pt.orig_ax,
sizeof(struct kernel_vm86_regs) - sizeof(struct kernel_vm86_regs) -
...@@ -120,7 +122,7 @@ static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs, ...@@ -120,7 +122,7 @@ static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
return ret; return ret;
} }
struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs) struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
{ {
struct tss_struct *tss; struct tss_struct *tss;
struct pt_regs *ret; struct pt_regs *ret;
...@@ -138,8 +140,8 @@ struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs) ...@@ -138,8 +140,8 @@ struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs)
do_exit(SIGSEGV); do_exit(SIGSEGV);
} }
set_flags(regs->pt.flags, VEFLAGS, VIF_MASK | current->thread.v86mask); set_flags(regs->pt.flags, VEFLAGS, VIF_MASK | current->thread.v86mask);
tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs,regs); tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs, regs);
tmp += put_user(current->thread.screen_bitmap,&current->thread.vm86_info->screen_bitmap); tmp += put_user(current->thread.screen_bitmap, &current->thread.vm86_info->screen_bitmap);
if (tmp) { if (tmp) {
printk("vm86: could not access userspace vm86_info\n"); printk("vm86: could not access userspace vm86_info\n");
do_exit(SIGSEGV); do_exit(SIGSEGV);
...@@ -237,20 +239,21 @@ asmlinkage int sys_vm86(struct pt_regs regs) ...@@ -237,20 +239,21 @@ asmlinkage int sys_vm86(struct pt_regs regs)
tsk = current; tsk = current;
switch (regs.bx) { switch (regs.bx) {
case VM86_REQUEST_IRQ: case VM86_REQUEST_IRQ:
case VM86_FREE_IRQ: case VM86_FREE_IRQ:
case VM86_GET_IRQ_BITS: case VM86_GET_IRQ_BITS:
case VM86_GET_AND_RESET_IRQ: case VM86_GET_AND_RESET_IRQ:
ret = do_vm86_irq_handling(regs.bx, (int)regs.cx); ret = do_vm86_irq_handling(regs.bx, (int)regs.cx);
goto out; goto out;
case VM86_PLUS_INSTALL_CHECK: case VM86_PLUS_INSTALL_CHECK:
/* NOTE: on old vm86 stuff this will return the error /*
from access_ok(), because the subfunction is * NOTE: on old vm86 stuff this will return the error
interpreted as (invalid) address to vm86_struct. * from access_ok(), because the subfunction is
So the installation check works. * interpreted as (invalid) address to vm86_struct.
*/ * So the installation check works.
ret = 0; */
goto out; ret = 0;
goto out;
} }
/* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
...@@ -299,18 +302,18 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk ...@@ -299,18 +302,18 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
info->regs.pt.flags |= VM_MASK; info->regs.pt.flags |= VM_MASK;
switch (info->cpu_type) { switch (info->cpu_type) {
case CPU_286: case CPU_286:
tsk->thread.v86mask = 0; tsk->thread.v86mask = 0;
break; break;
case CPU_386: case CPU_386:
tsk->thread.v86mask = NT_MASK | IOPL_MASK; tsk->thread.v86mask = NT_MASK | IOPL_MASK;
break; break;
case CPU_486: case CPU_486:
tsk->thread.v86mask = AC_MASK | NT_MASK | IOPL_MASK; tsk->thread.v86mask = AC_MASK | NT_MASK | IOPL_MASK;
break; break;
default: default:
tsk->thread.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK; tsk->thread.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK;
break; break;
} }
/* /*
...@@ -346,9 +349,9 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk ...@@ -346,9 +349,9 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
/* we never return here */ /* we never return here */
} }
static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval) static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval)
{ {
struct pt_regs * regs32; struct pt_regs *regs32;
regs32 = save_v86_state(regs16); regs32 = save_v86_state(regs16);
regs32->ax = retval; regs32->ax = retval;
...@@ -358,29 +361,30 @@ static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval) ...@@ -358,29 +361,30 @@ static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval)
: : "r" (regs32), "r" (current_thread_info())); : : "r" (regs32), "r" (current_thread_info()));
} }
static inline void set_IF(struct kernel_vm86_regs * regs) static inline void set_IF(struct kernel_vm86_regs *regs)
{ {
VEFLAGS |= VIF_MASK; VEFLAGS |= VIF_MASK;
if (VEFLAGS & VIP_MASK) if (VEFLAGS & VIP_MASK)
return_to_32bit(regs, VM86_STI); return_to_32bit(regs, VM86_STI);
} }
static inline void clear_IF(struct kernel_vm86_regs * regs) static inline void clear_IF(struct kernel_vm86_regs *regs)
{ {
VEFLAGS &= ~VIF_MASK; VEFLAGS &= ~VIF_MASK;
} }
static inline void clear_TF(struct kernel_vm86_regs * regs) static inline void clear_TF(struct kernel_vm86_regs *regs)
{ {
regs->pt.flags &= ~TF_MASK; regs->pt.flags &= ~TF_MASK;
} }
static inline void clear_AC(struct kernel_vm86_regs * regs) static inline void clear_AC(struct kernel_vm86_regs *regs)
{ {
regs->pt.flags &= ~AC_MASK; regs->pt.flags &= ~AC_MASK;
} }
/* It is correct to call set_IF(regs) from the set_vflags_* /*
* It is correct to call set_IF(regs) from the set_vflags_*
* functions. However someone forgot to call clear_IF(regs) * functions. However someone forgot to call clear_IF(regs)
* in the opposite case. * in the opposite case.
* After the command sequence CLI PUSHF STI POPF you should * After the command sequence CLI PUSHF STI POPF you should
...@@ -391,7 +395,7 @@ static inline void clear_AC(struct kernel_vm86_regs * regs) ...@@ -391,7 +395,7 @@ static inline void clear_AC(struct kernel_vm86_regs * regs)
* [KD] * [KD]
*/ */
static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs * regs) static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
{ {
set_flags(VEFLAGS, flags, current->thread.v86mask); set_flags(VEFLAGS, flags, current->thread.v86mask);
set_flags(regs->pt.flags, flags, SAFE_MASK); set_flags(regs->pt.flags, flags, SAFE_MASK);
...@@ -401,7 +405,7 @@ static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs ...@@ -401,7 +405,7 @@ static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs
clear_IF(regs); clear_IF(regs);
} }
static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs) static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
{ {
set_flags(VFLAGS, flags, current->thread.v86mask); set_flags(VFLAGS, flags, current->thread.v86mask);
set_flags(regs->pt.flags, flags, SAFE_MASK); set_flags(regs->pt.flags, flags, SAFE_MASK);
...@@ -411,7 +415,7 @@ static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_reg ...@@ -411,7 +415,7 @@ static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_reg
clear_IF(regs); clear_IF(regs);
} }
static inline unsigned long get_vflags(struct kernel_vm86_regs * regs) static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
{ {
unsigned long flags = regs->pt.flags & RETURN_MASK; unsigned long flags = regs->pt.flags & RETURN_MASK;
...@@ -421,11 +425,11 @@ static inline unsigned long get_vflags(struct kernel_vm86_regs * regs) ...@@ -421,11 +425,11 @@ static inline unsigned long get_vflags(struct kernel_vm86_regs * regs)
return flags | (VEFLAGS & current->thread.v86mask); return flags | (VEFLAGS & current->thread.v86mask);
} }
static inline int is_revectored(int nr, struct revectored_struct * bitmap) static inline int is_revectored(int nr, struct revectored_struct *bitmap)
{ {
__asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
:"=r" (nr) :"=r" (nr)
:"m" (*bitmap),"r" (nr)); :"m" (*bitmap), "r" (nr));
return nr; return nr;
} }
...@@ -437,7 +441,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap) ...@@ -437,7 +441,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap)
ptr--; \ ptr--; \
if (put_user(__val, base + ptr) < 0) \ if (put_user(__val, base + ptr) < 0) \
goto err_label; \ goto err_label; \
} while(0) } while (0)
#define pushw(base, ptr, val, err_label) \ #define pushw(base, ptr, val, err_label) \
do { \ do { \
...@@ -448,7 +452,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap) ...@@ -448,7 +452,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap)
ptr--; \ ptr--; \
if (put_user(val_byte(__val, 0), base + ptr) < 0) \ if (put_user(val_byte(__val, 0), base + ptr) < 0) \
goto err_label; \ goto err_label; \
} while(0) } while (0)
#define pushl(base, ptr, val, err_label) \ #define pushl(base, ptr, val, err_label) \
do { \ do { \
...@@ -465,7 +469,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap) ...@@ -465,7 +469,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap)
ptr--; \ ptr--; \
if (put_user(val_byte(__val, 0), base + ptr) < 0) \ if (put_user(val_byte(__val, 0), base + ptr) < 0) \
goto err_label; \ goto err_label; \
} while(0) } while (0)
#define popb(base, ptr, err_label) \ #define popb(base, ptr, err_label) \
({ \ ({ \
...@@ -512,7 +516,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap) ...@@ -512,7 +516,7 @@ static inline int is_revectored(int nr, struct revectored_struct * bitmap)
* in userspace is always better than an Oops anyway.) [KD] * in userspace is always better than an Oops anyway.) [KD]
*/ */
static void do_int(struct kernel_vm86_regs *regs, int i, static void do_int(struct kernel_vm86_regs *regs, int i,
unsigned char __user * ssp, unsigned short sp) unsigned char __user *ssp, unsigned short sp)
{ {
unsigned long __user *intr_ptr; unsigned long __user *intr_ptr;
unsigned long segoffs; unsigned long segoffs;
...@@ -521,7 +525,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i, ...@@ -521,7 +525,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
goto cannot_handle; goto cannot_handle;
if (is_revectored(i, &KVM86->int_revectored)) if (is_revectored(i, &KVM86->int_revectored))
goto cannot_handle; goto cannot_handle;
if (i==0x21 && is_revectored(AH(regs),&KVM86->int21_revectored)) if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
goto cannot_handle; goto cannot_handle;
intr_ptr = (unsigned long __user *) (i << 2); intr_ptr = (unsigned long __user *) (i << 2);
if (get_user(segoffs, intr_ptr)) if (get_user(segoffs, intr_ptr))
...@@ -543,15 +547,15 @@ static void do_int(struct kernel_vm86_regs *regs, int i, ...@@ -543,15 +547,15 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
return_to_32bit(regs, VM86_INTx + (i << 8)); return_to_32bit(regs, VM86_INTx + (i << 8));
} }
int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno) int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
{ {
if (VMPI.is_vm86pus) { if (VMPI.is_vm86pus) {
if ( (trapno==3) || (trapno==1) ) if ((trapno == 3) || (trapno == 1))
return_to_32bit(regs, VM86_TRAP + (trapno << 8)); return_to_32bit(regs, VM86_TRAP + (trapno << 8));
do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
return 0; return 0;
} }
if (trapno !=1) if (trapno != 1)
return 1; /* we let this handle by the calling routine */ return 1; /* we let this handle by the calling routine */
if (current->ptrace & PT_PTRACED) { if (current->ptrace & PT_PTRACED) {
unsigned long flags; unsigned long flags;
...@@ -566,7 +570,7 @@ int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno ...@@ -566,7 +570,7 @@ int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno
return 0; return 0;
} }
void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
{ {
unsigned char opcode; unsigned char opcode;
unsigned char __user *csp; unsigned char __user *csp;
...@@ -595,17 +599,17 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) ...@@ -595,17 +599,17 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
pref_done = 0; pref_done = 0;
do { do {
switch (opcode = popb(csp, ip, simulate_sigsegv)) { switch (opcode = popb(csp, ip, simulate_sigsegv)) {
case 0x66: /* 32-bit data */ data32=1; break; case 0x66: /* 32-bit data */ data32 = 1; break;
case 0x67: /* 32-bit address */ break; case 0x67: /* 32-bit address */ break;
case 0x2e: /* CS */ break; case 0x2e: /* CS */ break;
case 0x3e: /* DS */ break; case 0x3e: /* DS */ break;
case 0x26: /* ES */ break; case 0x26: /* ES */ break;
case 0x36: /* SS */ break; case 0x36: /* SS */ break;
case 0x65: /* GS */ break; case 0x65: /* GS */ break;
case 0x64: /* FS */ break; case 0x64: /* FS */ break;
case 0xf2: /* repnz */ break; case 0xf2: /* repnz */ break;
case 0xf3: /* rep */ break; case 0xf3: /* rep */ break;
default: pref_done = 1; default: pref_done = 1;
} }
} while (!pref_done); } while (!pref_done);
...@@ -628,7 +632,7 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) ...@@ -628,7 +632,7 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
{ {
unsigned long newflags; unsigned long newflags;
if (data32) { if (data32) {
newflags=popl(ssp, sp, simulate_sigsegv); newflags = popl(ssp, sp, simulate_sigsegv);
SP(regs) += 4; SP(regs) += 4;
} else { } else {
newflags = popw(ssp, sp, simulate_sigsegv); newflags = popw(ssp, sp, simulate_sigsegv);
...@@ -636,20 +640,20 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) ...@@ -636,20 +640,20 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
} }
IP(regs) = ip; IP(regs) = ip;
CHECK_IF_IN_TRAP; CHECK_IF_IN_TRAP;
if (data32) { if (data32)
set_vflags_long(newflags, regs); set_vflags_long(newflags, regs);
} else { else
set_vflags_short(newflags, regs); set_vflags_short(newflags, regs);
}
VM86_FAULT_RETURN; VM86_FAULT_RETURN;
} }
/* int xx */ /* int xx */
case 0xcd: { case 0xcd: {
int intno=popb(csp, ip, simulate_sigsegv); int intno = popb(csp, ip, simulate_sigsegv);
IP(regs) = ip; IP(regs) = ip;
if (VMPI.vm86dbg_active) { if (VMPI.vm86dbg_active) {
if ( (1 << (intno &7)) & VMPI.vm86dbg_intxxtab[intno >> 3] ) if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3])
return_to_32bit(regs, VM86_INTx + (intno << 8)); return_to_32bit(regs, VM86_INTx + (intno << 8));
} }
do_int(regs, intno, ssp, sp); do_int(regs, intno, ssp, sp);
...@@ -663,9 +667,9 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) ...@@ -663,9 +667,9 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
unsigned long newcs; unsigned long newcs;
unsigned long newflags; unsigned long newflags;
if (data32) { if (data32) {
newip=popl(ssp, sp, simulate_sigsegv); newip = popl(ssp, sp, simulate_sigsegv);
newcs=popl(ssp, sp, simulate_sigsegv); newcs = popl(ssp, sp, simulate_sigsegv);
newflags=popl(ssp, sp, simulate_sigsegv); newflags = popl(ssp, sp, simulate_sigsegv);
SP(regs) += 12; SP(regs) += 12;
} else { } else {
newip = popw(ssp, sp, simulate_sigsegv); newip = popw(ssp, sp, simulate_sigsegv);
...@@ -734,18 +738,18 @@ static struct vm86_irqs { ...@@ -734,18 +738,18 @@ static struct vm86_irqs {
static DEFINE_SPINLOCK(irqbits_lock); static DEFINE_SPINLOCK(irqbits_lock);
static int irqbits; static int irqbits;
#define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \ #define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \
| (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \
| (1 << SIGUNUSED) ) | (1 << SIGUNUSED))
static irqreturn_t irq_handler(int intno, void *dev_id) static irqreturn_t irq_handler(int intno, void *dev_id)
{ {
int irq_bit; int irq_bit;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&irqbits_lock, flags); spin_lock_irqsave(&irqbits_lock, flags);
irq_bit = 1 << intno; irq_bit = 1 << intno;
if ((irqbits & irq_bit) || ! vm86_irqs[intno].tsk) if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
goto out; goto out;
irqbits |= irq_bit; irqbits |= irq_bit;
if (vm86_irqs[intno].sig) if (vm86_irqs[intno].sig)
...@@ -759,7 +763,7 @@ static irqreturn_t irq_handler(int intno, void *dev_id) ...@@ -759,7 +763,7 @@ static irqreturn_t irq_handler(int intno, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
out: out:
spin_unlock_irqrestore(&irqbits_lock, flags); spin_unlock_irqrestore(&irqbits_lock, flags);
return IRQ_NONE; return IRQ_NONE;
} }
...@@ -770,9 +774,9 @@ static inline void free_vm86_irq(int irqnumber) ...@@ -770,9 +774,9 @@ static inline void free_vm86_irq(int irqnumber)
free_irq(irqnumber, NULL); free_irq(irqnumber, NULL);
vm86_irqs[irqnumber].tsk = NULL; vm86_irqs[irqnumber].tsk = NULL;
spin_lock_irqsave(&irqbits_lock, flags); spin_lock_irqsave(&irqbits_lock, flags);
irqbits &= ~(1 << irqnumber); irqbits &= ~(1 << irqnumber);
spin_unlock_irqrestore(&irqbits_lock, flags); spin_unlock_irqrestore(&irqbits_lock, flags);
} }
void release_vm86_irqs(struct task_struct *task) void release_vm86_irqs(struct task_struct *task)
...@@ -788,10 +792,10 @@ static inline int get_and_reset_irq(int irqnumber) ...@@ -788,10 +792,10 @@ static inline int get_and_reset_irq(int irqnumber)
int bit; int bit;
unsigned long flags; unsigned long flags;
int ret = 0; int ret = 0;
if (invalid_vm86_irq(irqnumber)) return 0; if (invalid_vm86_irq(irqnumber)) return 0;
if (vm86_irqs[irqnumber].tsk != current) return 0; if (vm86_irqs[irqnumber].tsk != current) return 0;
spin_lock_irqsave(&irqbits_lock, flags); spin_lock_irqsave(&irqbits_lock, flags);
bit = irqbits & (1 << irqnumber); bit = irqbits & (1 << irqnumber);
irqbits &= ~bit; irqbits &= ~bit;
if (bit) { if (bit) {
...@@ -799,7 +803,7 @@ static inline int get_and_reset_irq(int irqnumber) ...@@ -799,7 +803,7 @@ static inline int get_and_reset_irq(int irqnumber)
ret = 1; ret = 1;
} }
spin_unlock_irqrestore(&irqbits_lock, flags); spin_unlock_irqrestore(&irqbits_lock, flags);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment