Commit 6f84f62a authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://jfs.bkbits.net/linux-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents 2b0c7536 4a6fdb2d
...@@ -485,8 +485,7 @@ static void apic_pm_suspend(void *data) ...@@ -485,8 +485,7 @@ static void apic_pm_suspend(void *data)
apic_pm_state.apic_tdcr = apic_read(APIC_TDCR); apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR); apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
disable_local_APIC(); disable_local_APIC();
rdmsr(MSR_IA32_APICBASE, l, h); rdmsr(MSR_IA32_APICBASE, l, h);
l &= ~MSR_IA32_APICBASE_ENABLE; l &= ~MSR_IA32_APICBASE_ENABLE;
...@@ -499,8 +498,7 @@ static void apic_pm_resume(void *data) ...@@ -499,8 +498,7 @@ static void apic_pm_resume(void *data)
unsigned int l, h; unsigned int l, h;
unsigned long flags; unsigned long flags;
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
rdmsr(MSR_IA32_APICBASE, l, h); rdmsr(MSR_IA32_APICBASE, l, h);
l &= ~MSR_IA32_APICBASE_BASE; l &= ~MSR_IA32_APICBASE_BASE;
l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
...@@ -1087,9 +1085,6 @@ void smp_apic_timer_interrupt(struct pt_regs regs) ...@@ -1087,9 +1085,6 @@ void smp_apic_timer_interrupt(struct pt_regs regs)
irq_enter(); irq_enter();
smp_local_timer_interrupt(&regs); smp_local_timer_interrupt(&regs);
irq_exit(); irq_exit();
if (softirq_pending(cpu))
do_softirq();
} }
/* /*
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/tqueue.h> #include <linux/tqueue.h>
#include <linux/interrupt.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/system.h> #include <asm/system.h>
......
...@@ -185,8 +185,10 @@ ENTRY(ret_from_fork) ...@@ -185,8 +185,10 @@ ENTRY(ret_from_fork)
# userspace resumption stub bypassing syscall exit tracing # userspace resumption stub bypassing syscall exit tracing
ALIGN ALIGN
ret_from_intr:
ret_from_exception: ret_from_exception:
preempt_stop
ret_from_intr:
GET_THREAD_INFO(%ebx)
movl EFLAGS(%esp), %eax # mix EFLAGS and CS movl EFLAGS(%esp), %eax # mix EFLAGS and CS
movb CS(%esp), %al movb CS(%esp), %al
testl $(VM_MASK | 3), %eax testl $(VM_MASK | 3), %eax
...@@ -262,7 +264,7 @@ work_resched: ...@@ -262,7 +264,7 @@ work_resched:
work_notifysig: # deal with pending signals and work_notifysig: # deal with pending signals and
# notify-resume requests # notify-resume requests
testl $(VM_MASK),EFLAGS(%esp) testl $VM_MASK, EFLAGS(%esp)
movl %esp, %eax movl %esp, %eax
jne work_notifysig_v86 # returning to kernel-space or jne work_notifysig_v86 # returning to kernel-space or
# vm86-space # vm86-space
...@@ -333,14 +335,12 @@ vector=vector+1 ...@@ -333,14 +335,12 @@ vector=vector+1
common_interrupt: common_interrupt:
SAVE_ALL SAVE_ALL
call do_IRQ call do_IRQ
GET_THREAD_INFO(%ebx)
jmp ret_from_intr jmp ret_from_intr
#define BUILD_INTERRUPT(name, nr) \ #define BUILD_INTERRUPT(name, nr) \
ENTRY(name) \ ENTRY(name) \
pushl $nr-256; \ pushl $nr-256; \
SAVE_ALL \ SAVE_ALL \
GET_THREAD_INFO(%ebx); \
call smp_/**/name; \ call smp_/**/name; \
jmp ret_from_intr; jmp ret_from_intr;
...@@ -400,10 +400,8 @@ error_code: ...@@ -400,10 +400,8 @@ error_code:
movl $(__KERNEL_DS), %edx movl $(__KERNEL_DS), %edx
movl %edx, %ds movl %edx, %ds
movl %edx, %es movl %edx, %es
GET_THREAD_INFO(%ebx)
call *%edi call *%edi
addl $8, %esp addl $8, %esp
preempt_stop
jmp ret_from_exception jmp ret_from_exception
ENTRY(coprocessor_error) ENTRY(coprocessor_error)
...@@ -430,7 +428,6 @@ device_not_available_emulate: ...@@ -430,7 +428,6 @@ device_not_available_emulate:
pushl $0 # temporary storage for ORIG_EIP pushl $0 # temporary storage for ORIG_EIP
call math_emulate call math_emulate
addl $4, %esp addl $4, %esp
preempt_stop
jmp ret_from_exception jmp ret_from_exception
ENTRY(debug) ENTRY(debug)
......
...@@ -187,6 +187,10 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -187,6 +187,10 @@ int show_interrupts(struct seq_file *p, void *v)
#if CONFIG_SMP #if CONFIG_SMP
inline void synchronize_irq(unsigned int irq) inline void synchronize_irq(unsigned int irq)
{ {
/* is there anything to synchronize with? */
if (!irq_desc[irq].action)
return;
while (irq_desc[irq].status & IRQ_INPROGRESS) while (irq_desc[irq].status & IRQ_INPROGRESS)
cpu_relax(); cpu_relax();
} }
...@@ -392,8 +396,6 @@ asmlinkage unsigned int do_IRQ(struct pt_regs regs) ...@@ -392,8 +396,6 @@ asmlinkage unsigned int do_IRQ(struct pt_regs regs)
irq_exit(); irq_exit();
if (softirq_pending(cpu))
do_softirq();
return 1; return 1;
} }
......
...@@ -381,7 +381,7 @@ static int arr3_protected; ...@@ -381,7 +381,7 @@ static int arr3_protected;
static void set_mtrr_prepare_save (struct set_mtrr_context *ctxt) static void set_mtrr_prepare_save (struct set_mtrr_context *ctxt)
{ {
/* Disable interrupts locally */ /* Disable interrupts locally */
local_save_flags (ctxt->flags); local_irq_disable (); local_irq_save(ctxt->flags);
if ( mtrr_if != MTRR_IF_INTEL && mtrr_if != MTRR_IF_CYRIX_ARR ) if ( mtrr_if != MTRR_IF_INTEL && mtrr_if != MTRR_IF_CYRIX_ARR )
return; return;
...@@ -546,7 +546,7 @@ static void cyrix_get_arr (unsigned int reg, unsigned long *base, ...@@ -546,7 +546,7 @@ static void cyrix_get_arr (unsigned int reg, unsigned long *base,
arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */ arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
/* Save flags and disable interrupts */ /* Save flags and disable interrupts */
local_save_flags (flags); local_irq_disable (); local_irq_save(flags);
ccr3 = getCx86 (CX86_CCR3); ccr3 = getCx86 (CX86_CCR3);
setCx86 (CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ setCx86 (CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
......
...@@ -161,9 +161,7 @@ static inline void send_IPI_mask_bitmask(int mask, int vector) ...@@ -161,9 +161,7 @@ static inline void send_IPI_mask_bitmask(int mask, int vector)
unsigned long cfg; unsigned long cfg;
unsigned long flags; unsigned long flags;
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
/* /*
* Wait for idle. * Wait for idle.
...@@ -200,8 +198,7 @@ static inline void send_IPI_mask_sequence(int mask, int vector) ...@@ -200,8 +198,7 @@ static inline void send_IPI_mask_sequence(int mask, int vector)
* should be modified to do 1 message per cluster ID - mbligh * should be modified to do 1 message per cluster ID - mbligh
*/ */
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) { for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
query_mask = 1 << query_cpu; query_mask = 1 << query_cpu;
......
...@@ -306,7 +306,7 @@ static struct pci_ops * __devinit pci_check_direct(void) ...@@ -306,7 +306,7 @@ static struct pci_ops * __devinit pci_check_direct(void)
unsigned int tmp; unsigned int tmp;
unsigned long flags; unsigned long flags;
local_save_flags(flags); local_irq_disable(); local_irq_save(flags);
/* /*
* Check if configuration type 1 works. * Check if configuration type 1 works.
......
...@@ -82,7 +82,7 @@ static unsigned long bios32_service(unsigned long service) ...@@ -82,7 +82,7 @@ static unsigned long bios32_service(unsigned long service)
unsigned long entry; /* %edx */ unsigned long entry; /* %edx */
unsigned long flags; unsigned long flags;
local_save_flags(flags); local_irq_disable(); local_irq_save(flags);
__asm__("lcall *(%%edi); cld" __asm__("lcall *(%%edi); cld"
: "=a" (return_code), : "=a" (return_code),
"=b" (address), "=b" (address),
...@@ -122,7 +122,7 @@ static int __devinit check_pcibios(void) ...@@ -122,7 +122,7 @@ static int __devinit check_pcibios(void)
if ((pcibios_entry = bios32_service(PCI_SERVICE))) { if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
pci_indirect.address = pcibios_entry + PAGE_OFFSET; pci_indirect.address = pcibios_entry + PAGE_OFFSET;
local_save_flags(flags); local_irq_disable(); local_irq_save(flags);
__asm__( __asm__(
"lcall *(%%edi); cld\n\t" "lcall *(%%edi); cld\n\t"
"jc 1f\n\t" "jc 1f\n\t"
......
...@@ -460,7 +460,7 @@ void do_tty_hangup(void *data) ...@@ -460,7 +460,7 @@ void do_tty_hangup(void *data)
{ {
unsigned long flags; unsigned long flags;
local_save_flags(flags); local_irq_disable(); // FIXME: is this safe? local_irq_save(flags); // FIXME: is this safe?
if (tty->ldisc.flush_buffer) if (tty->ldisc.flush_buffer)
tty->ldisc.flush_buffer(tty); tty->ldisc.flush_buffer(tty);
if (tty->driver.flush_buffer) if (tty->driver.flush_buffer)
...@@ -1900,7 +1900,7 @@ static void flush_to_ldisc(void *private_) ...@@ -1900,7 +1900,7 @@ static void flush_to_ldisc(void *private_)
fp = tty->flip.flag_buf + TTY_FLIPBUF_SIZE; fp = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
tty->flip.buf_num = 0; tty->flip.buf_num = 0;
local_save_flags(flags); local_irq_disable(); // FIXME: is this safe? local_irq_save(flags); // FIXME: is this safe?
tty->flip.char_buf_ptr = tty->flip.char_buf; tty->flip.char_buf_ptr = tty->flip.char_buf;
tty->flip.flag_buf_ptr = tty->flip.flag_buf; tty->flip.flag_buf_ptr = tty->flip.flag_buf;
} else { } else {
...@@ -1908,7 +1908,7 @@ static void flush_to_ldisc(void *private_) ...@@ -1908,7 +1908,7 @@ static void flush_to_ldisc(void *private_)
fp = tty->flip.flag_buf; fp = tty->flip.flag_buf;
tty->flip.buf_num = 1; tty->flip.buf_num = 1;
local_save_flags(flags); local_irq_disable(); // FIXME: is this safe? local_irq_save(flags); // FIXME: is this safe?
tty->flip.char_buf_ptr = tty->flip.char_buf + TTY_FLIPBUF_SIZE; tty->flip.char_buf_ptr = tty->flip.char_buf + TTY_FLIPBUF_SIZE;
tty->flip.flag_buf_ptr = tty->flip.flag_buf + TTY_FLIPBUF_SIZE; tty->flip.flag_buf_ptr = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
} }
......
...@@ -113,8 +113,7 @@ _kd_mksound(unsigned int hz, unsigned int ticks) ...@@ -113,8 +113,7 @@ _kd_mksound(unsigned int hz, unsigned int ticks)
if (hz > 20 && hz < 32767) if (hz > 20 && hz < 32767)
count = 1193180 / hz; count = 1193180 / hz;
local_save_flags(flags); // FIXME: is this safe? local_irq_save(flags); // FIXME: is this safe?
local_irq_disable();
del_timer(&sound_timer); del_timer(&sound_timer);
if (count) { if (count) {
/* enable counter 2 */ /* enable counter 2 */
......
...@@ -194,22 +194,16 @@ int ide_raw_taskfile(struct ata_device *drive, struct ata_taskfile *ar, char *bu ...@@ -194,22 +194,16 @@ int ide_raw_taskfile(struct ata_device *drive, struct ata_taskfile *ar, char *bu
request_queue_t *q = &drive->queue; request_queue_t *q = &drive->queue;
struct list_head *queue_head = &q->queue_head; struct list_head *queue_head = &q->queue_head;
DECLARE_COMPLETION(wait); DECLARE_COMPLETION(wait);
struct request req;
#ifdef CONFIG_BLK_DEV_PDC4030 #ifdef CONFIG_BLK_DEV_PDC4030
if (ch->chipset == ide_pdc4030 && buf) if (ch->chipset == ide_pdc4030 && buf)
return -ENOSYS; /* special drive cmds not supported */ return -ENOSYS; /* special drive cmds not supported */
#endif #endif
rq = __blk_get_request(&drive->queue, READ); memset(&req, 0, sizeof(req));
if (!rq) rq = &req;
rq = __blk_get_request(&drive->queue, WRITE);
/*
* FIXME: Make sure there is a free slot on the list!
*/
BUG_ON(!rq);
rq->flags = REQ_SPECIAL; rq->flags = REQ_SPECIAL;
rq->buffer = buf; rq->buffer = buf;
rq->special = ar; rq->special = ar;
......
...@@ -1080,30 +1080,19 @@ int ide_register_subdriver(struct ata_device *drive, struct ata_operations *driv ...@@ -1080,30 +1080,19 @@ int ide_register_subdriver(struct ata_device *drive, struct ata_operations *driv
*/ */
int ide_unregister_subdriver(struct ata_device *drive) int ide_unregister_subdriver(struct ata_device *drive)
{ {
unsigned long flags;
local_save_flags(flags); // FIXME: is this safe?
local_irq_disable();
#if 0 #if 0
if (__MOD_IN_USE(ata_ops(drive)->owner)) { if (__MOD_IN_USE(ata_ops(drive)->owner))
local_irq_restore(flags); // FIXME: is this safe?
return 1; return 1;
}
#endif #endif
if (drive->usage || drive->busy || !ata_ops(drive)) { if (drive->usage || drive->busy || !ata_ops(drive))
local_irq_restore(flags); // FIXME: is this safe?
return 1; return 1;
}
#if defined(CONFIG_BLK_DEV_ISAPNP) && defined(CONFIG_ISAPNP) && defined(MODULE) #if defined(CONFIG_BLK_DEV_ISAPNP) && defined(CONFIG_ISAPNP) && defined(MODULE)
pnpide_init(0); pnpide_init(0);
#endif #endif
drive->driver = NULL; drive->driver = NULL;
local_irq_restore(flags); // FIXME: is this safe?
return 0; return 0;
} }
......
...@@ -84,8 +84,7 @@ static int a3d_read_packet(struct gameport *gameport, int length, char *data) ...@@ -84,8 +84,7 @@ static int a3d_read_packet(struct gameport *gameport, int length, char *data)
t = gameport_time(gameport, A3D_MAX_START); t = gameport_time(gameport, A3D_MAX_START);
s = gameport_time(gameport, A3D_MAX_STROBE); s = gameport_time(gameport, A3D_MAX_STROBE);
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
gameport_trigger(gameport); gameport_trigger(gameport);
v = gameport_read(gameport); v = gameport_read(gameport);
......
...@@ -152,8 +152,7 @@ static void adi_read_packet(struct adi_port *port) ...@@ -152,8 +152,7 @@ static void adi_read_packet(struct adi_port *port)
s[i] = 0; s[i] = 0;
} }
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
gameport_trigger(gameport); gameport_trigger(gameport);
v = z = gameport_read(gameport); v = z = gameport_read(gameport);
......
...@@ -232,8 +232,7 @@ static int analog_cooked_read(struct analog_port *port) ...@@ -232,8 +232,7 @@ static int analog_cooked_read(struct analog_port *port)
loopout = (ANALOG_LOOP_TIME * port->loop) / 1000; loopout = (ANALOG_LOOP_TIME * port->loop) / 1000;
timeout = ANALOG_MAX_TIME * port->speed; timeout = ANALOG_MAX_TIME * port->speed;
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
gameport_trigger(gameport); gameport_trigger(gameport);
GET_TIME(now); GET_TIME(now);
local_irq_restore(flags); local_irq_restore(flags);
......
...@@ -73,8 +73,7 @@ static unsigned char cobra_read_packet(struct gameport *gameport, unsigned int * ...@@ -73,8 +73,7 @@ static unsigned char cobra_read_packet(struct gameport *gameport, unsigned int *
t[i] = COBRA_MAX_STROBE; t[i] = COBRA_MAX_STROBE;
} }
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
u = gameport_read(gameport); u = gameport_read(gameport);
......
...@@ -113,8 +113,7 @@ static void gc_n64_read_packet(struct gc *gc, unsigned char *data) ...@@ -113,8 +113,7 @@ static void gc_n64_read_packet(struct gc *gc, unsigned char *data)
* Request the pad to transmit data * Request the pad to transmit data
*/ */
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
for (i = 0; i < GC_N64_REQUEST_LENGTH; i++) { for (i = 0; i < GC_N64_REQUEST_LENGTH; i++) {
parport_write_data(gc->pd->port, GC_N64_POWER_W | ((GC_N64_REQUEST >> i) & 1 ? GC_N64_OUT : 0)); parport_write_data(gc->pd->port, GC_N64_POWER_W | ((GC_N64_REQUEST >> i) & 1 ? GC_N64_OUT : 0));
udelay(GC_N64_DWS); udelay(GC_N64_DWS);
...@@ -270,8 +269,7 @@ static int gc_psx_read_packet(struct gc *gc, unsigned char *data) ...@@ -270,8 +269,7 @@ static int gc_psx_read_packet(struct gc *gc, unsigned char *data)
parport_write_data(gc->pd->port, GC_PSX_CLOCK | GC_PSX_POWER); /* Deselect, begin command */ parport_write_data(gc->pd->port, GC_PSX_CLOCK | GC_PSX_POWER); /* Deselect, begin command */
udelay(GC_PSX_DELAY * 2); udelay(GC_PSX_DELAY * 2);
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
gc_psx_command(gc, 0x01); /* Access pad */ gc_psx_command(gc, 0x01); /* Access pad */
id = gc_psx_command(gc, 0x42); /* Get device id */ id = gc_psx_command(gc, 0x42); /* Get device id */
......
...@@ -106,8 +106,7 @@ static int gf2k_read_packet(struct gameport *gameport, int length, char *data) ...@@ -106,8 +106,7 @@ static int gf2k_read_packet(struct gameport *gameport, int length, char *data)
i = 0; i = 0;
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
gameport_trigger(gameport); gameport_trigger(gameport);
v = gameport_read(gameport);; v = gameport_read(gameport);;
...@@ -137,8 +136,7 @@ static void gf2k_trigger_seq(struct gameport *gameport, short *seq) ...@@ -137,8 +136,7 @@ static void gf2k_trigger_seq(struct gameport *gameport, short *seq)
unsigned long flags; unsigned long flags;
int i, t; int i, t;
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
i = 0; i = 0;
do { do {
......
...@@ -98,8 +98,7 @@ static int grip_gpp_read_packet(struct gameport *gameport, int shift, unsigned i ...@@ -98,8 +98,7 @@ static int grip_gpp_read_packet(struct gameport *gameport, int shift, unsigned i
t = strobe; t = strobe;
i = 0; i = 0;
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
v = gameport_read(gameport) >> shift; v = gameport_read(gameport) >> shift;
...@@ -140,8 +139,7 @@ static int grip_xt_read_packet(struct gameport *gameport, int shift, unsigned in ...@@ -140,8 +139,7 @@ static int grip_xt_read_packet(struct gameport *gameport, int shift, unsigned in
status = buf = i = j = 0; status = buf = i = j = 0;
t = strobe; t = strobe;
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
v = w = (gameport_read(gameport) >> shift) & 3; v = w = (gameport_read(gameport) >> shift) & 3;
......
...@@ -98,8 +98,7 @@ static int guillemot_read_packet(struct gameport *gameport, u8 *data) ...@@ -98,8 +98,7 @@ static int guillemot_read_packet(struct gameport *gameport, u8 *data)
t = gameport_time(gameport, GUILLEMOT_MAX_START); t = gameport_time(gameport, GUILLEMOT_MAX_START);
s = gameport_time(gameport, GUILLEMOT_MAX_STROBE); s = gameport_time(gameport, GUILLEMOT_MAX_STROBE);
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
gameport_trigger(gameport); gameport_trigger(gameport);
v = gameport_read(gameport); v = gameport_read(gameport);
......
...@@ -103,8 +103,7 @@ static int interact_read_packet(struct gameport *gameport, int length, u32 *data ...@@ -103,8 +103,7 @@ static int interact_read_packet(struct gameport *gameport, int length, u32 *data
t = gameport_time(gameport, INTERACT_MAX_START); t = gameport_time(gameport, INTERACT_MAX_START);
s = gameport_time(gameport, INTERACT_MAX_STROBE); s = gameport_time(gameport, INTERACT_MAX_STROBE);
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
gameport_trigger(gameport); gameport_trigger(gameport);
v = gameport_read(gameport); v = gameport_read(gameport);
......
...@@ -149,8 +149,7 @@ static int sw_read_packet(struct gameport *gameport, unsigned char *buf, int len ...@@ -149,8 +149,7 @@ static int sw_read_packet(struct gameport *gameport, unsigned char *buf, int len
pending = 0; pending = 0;
sched = 0; sched = 0;
local_save_flags(flags); /* Quiet, please */ local_irq_save(flags); /* Quiet, please */
local_irq_disable();
gameport_trigger(gameport); /* Trigger */ gameport_trigger(gameport); /* Trigger */
v = gameport_read(gameport); v = gameport_read(gameport);
...@@ -245,8 +244,7 @@ static void sw_init_digital(struct gameport *gameport) ...@@ -245,8 +244,7 @@ static void sw_init_digital(struct gameport *gameport)
unsigned long flags; unsigned long flags;
int i, t; int i, t;
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
i = 0; i = 0;
do { do {
......
...@@ -125,8 +125,7 @@ static int tmdc_read_packet(struct gameport *gameport, unsigned char data[2][TMD ...@@ -125,8 +125,7 @@ static int tmdc_read_packet(struct gameport *gameport, unsigned char data[2][TMD
i[k] = j[k] = 0; i[k] = j[k] = 0;
} }
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
gameport_trigger(gameport); gameport_trigger(gameport);
w = gameport_read(gameport) >> 4; w = gameport_read(gameport) >> 4;
......
...@@ -3389,8 +3389,7 @@ static int __init read_eeprom_byte(struct net_device *dev, ...@@ -3389,8 +3389,7 @@ static int __init read_eeprom_byte(struct net_device *dev,
* Don't take interrupts on this CPU will bit banging * Don't take interrupts on this CPU will bit banging
* the %#%#@$ I2C device * the %#%#@$ I2C device
*/ */
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
eeprom_start(regs); eeprom_start(regs);
......
...@@ -338,8 +338,7 @@ extern void fs3270_devfs_unregister(tub_t *); ...@@ -338,8 +338,7 @@ extern void fs3270_devfs_unregister(tub_t *);
#define spin_trylock_irqsave(lock, flags) \ #define spin_trylock_irqsave(lock, flags) \
({ \ ({ \
int success; \ int success; \
local_save_flags(flags); \ local_irq_save(flags); \
local_irq_disable(); \
success = spin_trylock(lock); \ success = spin_trylock(lock); \
if (success == 0) \ if (success == 0) \
local_irq_restore(flags); \ local_irq_restore(flags); \
......
...@@ -163,11 +163,10 @@ s390_init_IRQ (void) ...@@ -163,11 +163,10 @@ s390_init_IRQ (void)
* function we resestablish the old environment. * function we resestablish the old environment.
* *
* Note : as we don't need a system wide lock, therefore * Note : as we don't need a system wide lock, therefore
* we shouldn't use cli(), but local_irq_disable() as this * we shouldn't use cli(), but local_irq_save() as this
* affects the current CPU only. * affects the current CPU only.
*/ */
local_save_flags (flags); local_irq_save(flags);
local_irq_disable ();
/* /*
* disable all interrupts * disable all interrupts
...@@ -578,10 +577,9 @@ read_dev_chars (int irq, void **buffer, int length) ...@@ -578,10 +577,9 @@ read_dev_chars (int irq, void **buffer, int length)
* also require to run disabled. * also require to run disabled.
* *
* Note : as no global lock is required, we must not use * Note : as no global lock is required, we must not use
* cli(), but local_irq_disable() instead. * cli(), but local_irq_save() instead.
*/ */
local_save_flags (flags); local_irq_save(flags);
local_irq_disable ();
rdc_ccw = &ioinfo[irq]->senseccw; rdc_ccw = &ioinfo[irq]->senseccw;
...@@ -720,8 +718,7 @@ read_conf_data (int irq, void **buffer, int *length, __u8 lpm) ...@@ -720,8 +718,7 @@ read_conf_data (int irq, void **buffer, int *length, __u8 lpm)
int emulated = 0; /* no i/O handler installed */ int emulated = 0; /* no i/O handler installed */
int retry = 5; /* retry count */ int retry = 5; /* retry count */
local_save_flags (flags); local_irq_save(flags);
local_irq_disable ();
if (!ioinfo[irq]->ui.flags.ready) { if (!ioinfo[irq]->ui.flags.ready) {
pdevstat = &devstat; pdevstat = &devstat;
......
...@@ -254,8 +254,7 @@ s390_machine_check_handler(void *parm) ...@@ -254,8 +254,7 @@ s390_machine_check_handler(void *parm)
found = 0; /* init ... */ found = 0; /* init ... */
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
do { do {
......
...@@ -62,10 +62,7 @@ void __buffer_error(char *file, int line) ...@@ -62,10 +62,7 @@ void __buffer_error(char *file, int line)
printk("buffer layer error at %s:%d\n", file, line); printk("buffer layer error at %s:%d\n", file, line);
#ifdef CONFIG_X86 #ifdef CONFIG_X86
printk("Pass this trace through ksymoops for reporting\n"); printk("Pass this trace through ksymoops for reporting\n");
{ show_stack(0);
extern void show_stack(long *esp);
show_stack(0);
}
#endif #endif
} }
EXPORT_SYMBOL(__buffer_error); EXPORT_SYMBOL(__buffer_error);
......
...@@ -16,22 +16,78 @@ typedef struct { ...@@ -16,22 +16,78 @@ typedef struct {
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
#define IRQ_OFFSET 64 /*
* We put the hardirq and softirq counter into the preemption
* counter. The bitmask has the following meaning:
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
* - bits 16-23 are the hardirq count (max # of hardirqs: 256)
*
* - ( bit 26 is the PREEMPT_ACTIVE flag. )
*
* PREEMPT_MASK: 0x000000ff
* HARDIRQ_MASK: 0x0000ff00
* SOFTIRQ_MASK: 0x00ff0000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 8
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#define __MASK(x) ((1UL << (x))-1)
#define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
/*
* The hardirq mask has to be large enough to have
* space for potentially all IRQ sources in the system
* nesting on a single CPU:
*/
#if (1 << HARDIRQ_BITS) < NR_IRQS
# error HARDIRQ_BITS is too low!
#endif
/* /*
* Are we in an interrupt context? Either doing bottom half * Are we doing bottom half or hardware interrupt processing?
* or hardware interrupt processing? * Are we in a softirq context? Interrupt context?
*/ */
#define in_interrupt() \ #define in_irq() (hardirq_count())
((preempt_count() & ~PREEMPT_ACTIVE) >= IRQ_OFFSET) #define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#define in_irq in_interrupt
#define hardirq_trylock() (!in_interrupt()) #define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0) #define hardirq_endlock() do { } while (0)
#define irq_enter() (preempt_count() += IRQ_OFFSET) #define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#define irq_exit() (preempt_count() -= IRQ_OFFSET)
#if CONFIG_PREEMPT
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
#define irq_exit() \
do { \
preempt_count() -= IRQ_EXIT_OFFSET; \
if (!in_interrupt() && softirq_pending(smp_processor_id())) \
do_softirq(); \
preempt_enable_no_resched(); \
} while (0)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
# define synchronize_irq(irq) barrier() # define synchronize_irq(irq) barrier()
...@@ -39,4 +95,6 @@ typedef struct { ...@@ -39,4 +95,6 @@ typedef struct {
extern void synchronize_irq(unsigned int irq); extern void synchronize_irq(unsigned int irq);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
extern void show_stack(unsigned long * esp);
#endif /* __ASM_HARDIRQ_H */ #endif /* __ASM_HARDIRQ_H */
...@@ -5,23 +5,16 @@ ...@@ -5,23 +5,16 @@
#include <asm/hardirq.h> #include <asm/hardirq.h>
#define local_bh_disable() \ #define local_bh_disable() \
do { preempt_count() += IRQ_OFFSET; barrier(); } while (0) do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0)
#define __local_bh_enable() \ #define __local_bh_enable() \
do { barrier(); preempt_count() -= IRQ_OFFSET; } while (0) do { barrier(); preempt_count() -= SOFTIRQ_OFFSET; } while (0)
#define local_bh_enable() \ #define local_bh_enable() \
do { \ do { \
if (unlikely((preempt_count() == IRQ_OFFSET) && \ __local_bh_enable(); \
softirq_pending(smp_processor_id()))) { \ if (unlikely(!in_interrupt() && softirq_pending(smp_processor_id()))) \
__local_bh_enable(); \
do_softirq(); \ do_softirq(); \
preempt_check_resched(); \ preempt_check_resched(); \
} else { \
__local_bh_enable(); \
preempt_check_resched(); \
} \
} while (0) } while (0)
#define in_softirq() in_interrupt()
#endif /* __ASM_SOFTIRQ_H */ #endif /* __ASM_SOFTIRQ_H */
...@@ -318,19 +318,16 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -318,19 +318,16 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
/* used in the idle loop; sti takes one instruction cycle to complete */ /* used in the idle loop; sti takes one instruction cycle to complete */
#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
#define irqs_disabled() \
({ \
unsigned long flags; \
local_save_flags(flags); \
!(flags & (1<<9)); \
})
/* For spinlocks etc */ /* For spinlocks etc */
#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory") #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
/*
* Compatibility macros - they will be removed after some time.
*/
#if !CONFIG_SMP
# define sti() local_irq_enable()
# define cli() local_irq_disable()
# define save_flags(flags) local_save_flags(flags)
# define restore_flags(flags) local_irq_restore(flags)
#endif
/* /*
* disable hlt during certain critical i/o operations * disable hlt during certain critical i/o operations
*/ */
......
...@@ -51,6 +51,8 @@ struct thread_info { ...@@ -51,6 +51,8 @@ struct thread_info {
/* /*
* macros/functions for gaining access to the thread information structure * macros/functions for gaining access to the thread information structure
*
* preempt_count needs to be 1 initially, until the scheduler is functional.
*/ */
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define INIT_THREAD_INFO(tsk) \ #define INIT_THREAD_INFO(tsk) \
...@@ -59,6 +61,7 @@ struct thread_info { ...@@ -59,6 +61,7 @@ struct thread_info {
exec_domain: &default_exec_domain, \ exec_domain: &default_exec_domain, \
flags: 0, \ flags: 0, \
cpu: 0, \ cpu: 0, \
preempt_count: 1, \
addr_limit: KERNEL_DS, \ addr_limit: KERNEL_DS, \
} }
......
...@@ -26,17 +26,17 @@ ...@@ -26,17 +26,17 @@
#define write_lock_irq(lock) do { local_irq_disable(); write_lock(lock); } while (0) #define write_lock_irq(lock) do { local_irq_disable(); write_lock(lock); } while (0)
#define write_lock_bh(lock) do { local_bh_disable(); write_lock(lock); } while (0) #define write_lock_bh(lock) do { local_bh_disable(); write_lock(lock); } while (0)
#define spin_unlock_irqrestore(lock, flags) do { spin_unlock(lock); local_irq_restore(flags); } while (0) #define spin_unlock_irqrestore(lock, flags) do { _raw_spin_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0)
#define _raw_spin_unlock_irqrestore(lock, flags) do { _raw_spin_unlock(lock); local_irq_restore(flags); } while (0) #define _raw_spin_unlock_irqrestore(lock, flags) do { _raw_spin_unlock(lock); local_irq_restore(flags); } while (0)
#define spin_unlock_irq(lock) do { spin_unlock(lock); local_irq_enable(); } while (0) #define spin_unlock_irq(lock) do { _raw_spin_unlock(lock); local_irq_enable(); preempt_enable(); } while (0)
#define spin_unlock_bh(lock) do { spin_unlock(lock); local_bh_enable(); } while (0) #define spin_unlock_bh(lock) do { spin_unlock(lock); local_bh_enable(); } while (0)
#define read_unlock_irqrestore(lock, flags) do { read_unlock(lock); local_irq_restore(flags); } while (0) #define read_unlock_irqrestore(lock, flags) do { _raw_read_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0)
#define read_unlock_irq(lock) do { read_unlock(lock); local_irq_enable(); } while (0) #define read_unlock_irq(lock) do { _raw_read_unlock(lock); local_irq_enable(); preempt_enable(); } while (0)
#define read_unlock_bh(lock) do { read_unlock(lock); local_bh_enable(); } while (0) #define read_unlock_bh(lock) do { read_unlock(lock); local_bh_enable(); } while (0)
#define write_unlock_irqrestore(lock, flags) do { write_unlock(lock); local_irq_restore(flags); } while (0) #define write_unlock_irqrestore(lock, flags) do { _raw_write_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0)
#define write_unlock_irq(lock) do { write_unlock(lock); local_irq_enable(); } while (0) #define write_unlock_irq(lock) do { _raw_write_unlock(lock); local_irq_enable(); preempt_enable(); } while (0)
#define write_unlock_bh(lock) do { write_unlock(lock); local_bh_enable(); } while (0) #define write_unlock_bh(lock) do { write_unlock(lock); local_bh_enable(); } while (0)
#define spin_trylock_bh(lock) ({ int __r; local_bh_disable();\ #define spin_trylock_bh(lock) ({ int __r; local_bh_disable();\
__r = spin_trylock(lock); \ __r = spin_trylock(lock); \
...@@ -135,12 +135,6 @@ do { \ ...@@ -135,12 +135,6 @@ do { \
preempt_enable(); \ preempt_enable(); \
} while (0) } while (0)
#define spin_unlock_no_resched(lock) \
do { \
_raw_spin_unlock(lock); \
preempt_enable_no_resched(); \
} while (0)
#define read_lock(lock) ({preempt_disable(); _raw_read_lock(lock);}) #define read_lock(lock) ({preempt_disable(); _raw_read_lock(lock);})
#define read_unlock(lock) ({_raw_read_unlock(lock); preempt_enable();}) #define read_unlock(lock) ({_raw_read_unlock(lock); preempt_enable();})
#define write_lock(lock) ({preempt_disable(); _raw_write_lock(lock);}) #define write_lock(lock) ({preempt_disable(); _raw_write_lock(lock);})
...@@ -153,7 +147,6 @@ do { \ ...@@ -153,7 +147,6 @@ do { \
#define spin_lock(lock) _raw_spin_lock(lock) #define spin_lock(lock) _raw_spin_lock(lock)
#define spin_trylock(lock) _raw_spin_trylock(lock) #define spin_trylock(lock) _raw_spin_trylock(lock)
#define spin_unlock(lock) _raw_spin_unlock(lock) #define spin_unlock(lock) _raw_spin_unlock(lock)
#define spin_unlock_no_resched(lock) _raw_spin_unlock(lock)
#define read_lock(lock) _raw_read_lock(lock) #define read_lock(lock) _raw_read_lock(lock)
#define read_unlock(lock) _raw_read_unlock(lock) #define read_unlock(lock) _raw_read_unlock(lock)
......
...@@ -105,12 +105,14 @@ static void shm_open (struct vm_area_struct *shmd) ...@@ -105,12 +105,14 @@ static void shm_open (struct vm_area_struct *shmd)
* *
* @shp: struct to free * @shp: struct to free
* *
* It has to be called with shp and shm_ids.sem locked * It has to be called with shp and shm_ids.sem locked,
* but returns with shp unlocked and freed.
*/ */
static void shm_destroy (struct shmid_kernel *shp) static void shm_destroy (struct shmid_kernel *shp)
{ {
shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
shm_rmid (shp->id); shm_rmid (shp->id);
shm_unlock(shp->id);
shmem_lock(shp->shm_file, 0); shmem_lock(shp->shm_file, 0);
fput (shp->shm_file); fput (shp->shm_file);
kfree (shp); kfree (shp);
...@@ -138,8 +140,8 @@ static void shm_close (struct vm_area_struct *shmd) ...@@ -138,8 +140,8 @@ static void shm_close (struct vm_area_struct *shmd)
if(shp->shm_nattch == 0 && if(shp->shm_nattch == 0 &&
shp->shm_flags & SHM_DEST) shp->shm_flags & SHM_DEST)
shm_destroy (shp); shm_destroy (shp);
else
shm_unlock(id); shm_unlock(id);
up (&shm_ids.sem); up (&shm_ids.sem);
} }
...@@ -502,11 +504,9 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds *buf) ...@@ -502,11 +504,9 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
shp->shm_flags |= SHM_DEST; shp->shm_flags |= SHM_DEST;
/* Do not find it any more */ /* Do not find it any more */
shp->shm_perm.key = IPC_PRIVATE; shp->shm_perm.key = IPC_PRIVATE;
shm_unlock(shmid);
} else } else
shm_destroy (shp); shm_destroy (shp);
/* Unlock */
shm_unlock(shmid);
up(&shm_ids.sem); up(&shm_ids.sem);
return err; return err;
} }
...@@ -644,7 +644,8 @@ asmlinkage long sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr) ...@@ -644,7 +644,8 @@ asmlinkage long sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
if(shp->shm_nattch == 0 && if(shp->shm_nattch == 0 &&
shp->shm_flags & SHM_DEST) shp->shm_flags & SHM_DEST)
shm_destroy (shp); shm_destroy (shp);
shm_unlock(shmid); else
shm_unlock(shmid);
up (&shm_ids.sem); up (&shm_ids.sem);
*raddr = (unsigned long) user_addr; *raddr = (unsigned long) user_addr;
......
...@@ -740,10 +740,10 @@ struct task_struct *do_fork(unsigned long clone_flags, ...@@ -740,10 +740,10 @@ struct task_struct *do_fork(unsigned long clone_flags,
* total amount of pending timeslices in the system doesnt change, * total amount of pending timeslices in the system doesnt change,
* resulting in more scheduling fairness. * resulting in more scheduling fairness.
*/ */
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
p->time_slice = (current->time_slice + 1) >> 1; p->time_slice = (current->time_slice + 1) >> 1;
current->time_slice >>= 1; current->time_slice >>= 1;
p->sleep_timestamp = jiffies;
if (!current->time_slice) { if (!current->time_slice) {
/* /*
* This case is rare, it happens when the parent has only * This case is rare, it happens when the parent has only
...@@ -751,10 +751,12 @@ struct task_struct *do_fork(unsigned long clone_flags, ...@@ -751,10 +751,12 @@ struct task_struct *do_fork(unsigned long clone_flags,
* runqueue lock is not a problem. * runqueue lock is not a problem.
*/ */
current->time_slice = 1; current->time_slice = 1;
preempt_disable();
scheduler_tick(0, 0); scheduler_tick(0, 0);
} local_irq_restore(flags);
p->sleep_timestamp = jiffies; preempt_enable();
local_irq_restore(flags); } else
local_irq_restore(flags);
/* /*
* Ok, add it to the run-queues and make it * Ok, add it to the run-queues and make it
......
...@@ -728,7 +728,7 @@ void scheduler_tick(int user_tick, int system) ...@@ -728,7 +728,7 @@ void scheduler_tick(int user_tick, int system)
if (p == rq->idle) { if (p == rq->idle) {
/* note: this timer irq context must be accounted for as well */ /* note: this timer irq context must be accounted for as well */
if (preempt_count() >= 2*IRQ_OFFSET) if (irq_count() >= 2*HARDIRQ_OFFSET)
kstat.per_cpu_system[cpu] += system; kstat.per_cpu_system[cpu] += system;
#if CONFIG_SMP #if CONFIG_SMP
idle_tick(); idle_tick();
...@@ -902,6 +902,12 @@ asmlinkage void preempt_schedule(void) ...@@ -902,6 +902,12 @@ asmlinkage void preempt_schedule(void)
if (unlikely(ti->preempt_count)) if (unlikely(ti->preempt_count))
return; return;
if (unlikely(irqs_disabled())) {
preempt_disable();
printk("bad: schedule() with irqs disabled!\n");
show_stack(NULL);
preempt_enable_no_resched();
}
need_resched: need_resched:
ti->preempt_count = PREEMPT_ACTIVE; ti->preempt_count = PREEMPT_ACTIVE;
...@@ -1020,7 +1026,7 @@ void wait_for_completion(struct completion *x) ...@@ -1020,7 +1026,7 @@ void wait_for_completion(struct completion *x)
wait_queue_t wait; \ wait_queue_t wait; \
init_waitqueue_entry(&wait, current); init_waitqueue_entry(&wait, current);
#define SLEEP_ON_HEAD \ #define SLEEP_ON_HEAD \
spin_lock_irqsave(&q->lock,flags); \ spin_lock_irqsave(&q->lock,flags); \
__add_wait_queue(q, &wait); \ __add_wait_queue(q, &wait); \
spin_unlock(&q->lock); spin_unlock(&q->lock);
...@@ -1467,7 +1473,12 @@ asmlinkage long sys_sched_yield(void) ...@@ -1467,7 +1473,12 @@ asmlinkage long sys_sched_yield(void)
list_add_tail(&current->run_list, array->queue + current->prio); list_add_tail(&current->run_list, array->queue + current->prio);
__set_bit(current->prio, array->bitmap); __set_bit(current->prio, array->bitmap);
} }
spin_unlock_no_resched(&rq->lock); /*
* Since we are going to call schedule() anyway, there's
* no need to preempt:
*/
_raw_spin_unlock(&rq->lock);
preempt_enable_no_resched();
schedule(); schedule();
...@@ -1680,8 +1691,7 @@ void __init init_idle(task_t *idle, int cpu) ...@@ -1680,8 +1691,7 @@ void __init init_idle(task_t *idle, int cpu)
runqueue_t *idle_rq = cpu_rq(cpu), *rq = cpu_rq(task_cpu(idle)); runqueue_t *idle_rq = cpu_rq(cpu), *rq = cpu_rq(task_cpu(idle));
unsigned long flags; unsigned long flags;
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
double_rq_lock(idle_rq, rq); double_rq_lock(idle_rq, rq);
idle_rq->curr = idle_rq->idle = idle; idle_rq->curr = idle_rq->idle = idle;
...@@ -1697,6 +1707,8 @@ void __init init_idle(task_t *idle, int cpu) ...@@ -1697,6 +1707,8 @@ void __init init_idle(task_t *idle, int cpu)
/* Set the preempt count _outside_ the spinlocks! */ /* Set the preempt count _outside_ the spinlocks! */
#if CONFIG_PREEMPT #if CONFIG_PREEMPT
idle->thread_info->preempt_count = (idle->lock_depth >= 0); idle->thread_info->preempt_count = (idle->lock_depth >= 0);
#else
idle->thread_info->preempt_count = 0;
#endif #endif
} }
......
...@@ -605,6 +605,7 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct ...@@ -605,6 +605,7 @@ static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct
} }
error = move_from_swap_cache(page, idx, mapping); error = move_from_swap_cache(page, idx, mapping);
if (error < 0) { if (error < 0) {
spin_unlock(&info->lock);
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
return ERR_PTR(error); return ERR_PTR(error);
...@@ -942,7 +943,6 @@ shmem_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos) ...@@ -942,7 +943,6 @@ shmem_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos)
fail_write: fail_write:
status = -EFAULT; status = -EFAULT;
ClearPageUptodate(page); ClearPageUptodate(page);
kunmap(page);
goto unlock; goto unlock;
} }
...@@ -1080,9 +1080,6 @@ static int shmem_link(struct dentry *old_dentry, struct inode * dir, struct dent ...@@ -1080,9 +1080,6 @@ static int shmem_link(struct dentry *old_dentry, struct inode * dir, struct dent
{ {
struct inode *inode = old_dentry->d_inode; struct inode *inode = old_dentry->d_inode;
if (S_ISDIR(inode->i_mode))
return -EPERM;
inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
inode->i_nlink++; inode->i_nlink++;
atomic_inc(&inode->i_count); /* New dentry reference */ atomic_inc(&inode->i_count); /* New dentry reference */
......
...@@ -1355,7 +1355,11 @@ void* kmem_cache_alloc_batch(kmem_cache_t* cachep, int flags) ...@@ -1355,7 +1355,11 @@ void* kmem_cache_alloc_batch(kmem_cache_t* cachep, int flags)
cc_entry(cc)[cc->avail++] = cc_entry(cc)[cc->avail++] =
kmem_cache_alloc_one_tail(cachep, slabp); kmem_cache_alloc_one_tail(cachep, slabp);
} }
spin_unlock(&cachep->spinlock); /*
* CAREFUL: do not enable preemption yet, the per-CPU
* entries rely on us being atomic.
*/
_raw_spin_unlock(&cachep->spinlock);
if (cc->avail) if (cc->avail)
return cc_entry(cc)[--cc->avail]; return cc_entry(cc)[--cc->avail];
...@@ -1382,8 +1386,12 @@ static inline void * __kmem_cache_alloc (kmem_cache_t *cachep, int flags) ...@@ -1382,8 +1386,12 @@ static inline void * __kmem_cache_alloc (kmem_cache_t *cachep, int flags)
} else { } else {
STATS_INC_ALLOCMISS(cachep); STATS_INC_ALLOCMISS(cachep);
objp = kmem_cache_alloc_batch(cachep,flags); objp = kmem_cache_alloc_batch(cachep,flags);
local_irq_restore(save_flags);
/* end of non-preemptible region */
preempt_enable();
if (!objp) if (!objp)
goto alloc_new_slab_nolock; goto alloc_new_slab_nolock;
return objp;
} }
} else { } else {
spin_lock(&cachep->spinlock); spin_lock(&cachep->spinlock);
...@@ -1399,9 +1407,11 @@ static inline void * __kmem_cache_alloc (kmem_cache_t *cachep, int flags) ...@@ -1399,9 +1407,11 @@ static inline void * __kmem_cache_alloc (kmem_cache_t *cachep, int flags)
alloc_new_slab: alloc_new_slab:
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
spin_unlock(&cachep->spinlock); spin_unlock(&cachep->spinlock);
alloc_new_slab_nolock:
#endif #endif
local_irq_restore(save_flags); local_irq_restore(save_flags);
#ifdef CONFIG_SMP
alloc_new_slab_nolock:
#endif
if (kmem_cache_grow(cachep, flags)) if (kmem_cache_grow(cachep, flags))
/* Someone may have stolen our objs. Doesn't matter, we'll /* Someone may have stolen our objs. Doesn't matter, we'll
* just come back here again. * just come back here again.
......
...@@ -318,7 +318,7 @@ void __kfree_skb(struct sk_buff *skb) ...@@ -318,7 +318,7 @@ void __kfree_skb(struct sk_buff *skb)
dst_release(skb->dst); dst_release(skb->dst);
if(skb->destructor) { if(skb->destructor) {
if (0 && in_irq()) if (in_irq())
printk(KERN_WARNING "Warning: kfree_skb on " printk(KERN_WARNING "Warning: kfree_skb on "
"hard IRQ %p\n", NET_CALLER(skb)); "hard IRQ %p\n", NET_CALLER(skb));
skb->destructor(skb); skb->destructor(skb);
......
...@@ -235,10 +235,9 @@ static inline void write_seq(struct solo1_state *s, unsigned char data) ...@@ -235,10 +235,9 @@ static inline void write_seq(struct solo1_state *s, unsigned char data)
int i; int i;
unsigned long flags; unsigned long flags;
/* the local_irq_disable stunt is to send the data within the command window */ /* the local_irq_save stunt is to send the data within the command window */
for (i = 0; i < 0xffff; i++) { for (i = 0; i < 0xffff; i++) {
local_save_flags(flags); local_irq_save(flags);
local_irq_disable();
if (!(inb(s->sbbase+0xc) & 0x80)) { if (!(inb(s->sbbase+0xc) & 0x80)) {
outb(data, s->sbbase+0xc); outb(data, s->sbbase+0xc);
local_irq_restore(flags); local_irq_restore(flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment