Commit a6efb709 authored by Ingo Molnar's avatar Ingo Molnar

[PATCH] irqlock patch 2.5.27-H6

 - init thread needs to have preempt_count of 1 until sched_init().
   (William Lee Irwin III)
 - clean up the irq-mask macros. (Linus)
 - add barrier() to irq_enter() and irq_exit(). (based on Oleg Nesterov's
   comment.)
 - move the irqs-off check into preempt_schedule() and remove
   CONFIG_DEBUG_IRQ_SCHEDULE.
 - remove spin_unlock_no_resched() and comment the affected places more
   agressively.
 - slab.c needs to spin_unlock_no_resched(), instead of spin_unlock(). (It
   also has to check for preemption in the right spot.) This should fix
   the memory corruption.
 - irq_exit() needs to run softirqs if interrupts not active - in the
   previous patch it ran them when preempt_count() was 0, which is
   incorrect.
 - spinlock macros are updated to enable preemption after enabling
   interrupts. Besides avoiding false positive warnings, this also
 - fork.c has to call scheduler_tick() with preemption disabled -
   otherwise scheduler_tick()'s spin_unlock can preempt!
 - irqs_disabled() macro introduced.
 - [ all other local_irq_enable() or sti instances conditional on
     CONFIG_DEBUG_IRQ_SCHEDULE are to fix false positive warnings. ]
 - fix buggy in_softirq(). Fortunately the bug made the test broader,
   which didnt result in algorithmical breakage, just suboptimal
   performance.
 - move do_softirq() processing into irq_exit() => this also fixes the
   softirq processing bugs present in apic.c IRQ handlers that did not
   test for softirqs after irq_exit().
 - simplify local_bh_enable().
parent 1da3174f
......@@ -485,8 +485,7 @@ static void apic_pm_suspend(void *data)
apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
disable_local_APIC();
rdmsr(MSR_IA32_APICBASE, l, h);
l &= ~MSR_IA32_APICBASE_ENABLE;
......@@ -499,8 +498,7 @@ static void apic_pm_resume(void *data)
unsigned int l, h;
unsigned long flags;
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
rdmsr(MSR_IA32_APICBASE, l, h);
l &= ~MSR_IA32_APICBASE_BASE;
l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
......@@ -1087,9 +1085,6 @@ void smp_apic_timer_interrupt(struct pt_regs regs)
irq_enter();
smp_local_timer_interrupt(&regs);
irq_exit();
if (softirq_pending(cpu))
do_softirq();
}
/*
......
......@@ -185,8 +185,10 @@ ENTRY(ret_from_fork)
# userspace resumption stub bypassing syscall exit tracing
ALIGN
ret_from_intr:
ret_from_exception:
preempt_stop
ret_from_intr:
GET_THREAD_INFO(%ebx)
movl EFLAGS(%esp), %eax # mix EFLAGS and CS
movb CS(%esp), %al
testl $(VM_MASK | 3), %eax
......@@ -262,7 +264,7 @@ work_resched:
work_notifysig: # deal with pending signals and
# notify-resume requests
testl $(VM_MASK),EFLAGS(%esp)
testl $VM_MASK, EFLAGS(%esp)
movl %esp, %eax
jne work_notifysig_v86 # returning to kernel-space or
# vm86-space
......@@ -333,14 +335,12 @@ vector=vector+1
common_interrupt:
SAVE_ALL
call do_IRQ
GET_THREAD_INFO(%ebx)
jmp ret_from_intr
#define BUILD_INTERRUPT(name, nr) \
ENTRY(name) \
pushl $nr-256; \
SAVE_ALL \
GET_THREAD_INFO(%ebx); \
call smp_/**/name; \
jmp ret_from_intr;
......@@ -400,10 +400,8 @@ error_code:
movl $(__KERNEL_DS), %edx
movl %edx, %ds
movl %edx, %es
GET_THREAD_INFO(%ebx)
call *%edi
addl $8, %esp
preempt_stop
jmp ret_from_exception
ENTRY(coprocessor_error)
......@@ -430,7 +428,6 @@ device_not_available_emulate:
pushl $0 # temporary storage for ORIG_EIP
call math_emulate
addl $4, %esp
preempt_stop
jmp ret_from_exception
ENTRY(debug)
......
......@@ -187,6 +187,10 @@ int show_interrupts(struct seq_file *p, void *v)
#if CONFIG_SMP
inline void synchronize_irq(unsigned int irq)
{
/* is there anything to synchronize with? */
if (!irq_desc[irq].action)
return;
while (irq_desc[irq].status & IRQ_INPROGRESS)
cpu_relax();
}
......@@ -392,8 +396,6 @@ asmlinkage unsigned int do_IRQ(struct pt_regs regs)
irq_exit();
if (softirq_pending(cpu))
do_softirq();
return 1;
}
......
......@@ -381,7 +381,7 @@ static int arr3_protected;
static void set_mtrr_prepare_save (struct set_mtrr_context *ctxt)
{
/* Disable interrupts locally */
local_save_flags (ctxt->flags); local_irq_disable ();
local_irq_save(ctxt->flags);
if ( mtrr_if != MTRR_IF_INTEL && mtrr_if != MTRR_IF_CYRIX_ARR )
return;
......@@ -546,7 +546,7 @@ static void cyrix_get_arr (unsigned int reg, unsigned long *base,
arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
/* Save flags and disable interrupts */
local_save_flags (flags); local_irq_disable ();
local_irq_save(flags);
ccr3 = getCx86 (CX86_CCR3);
setCx86 (CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
......
......@@ -161,9 +161,7 @@ static inline void send_IPI_mask_bitmask(int mask, int vector)
unsigned long cfg;
unsigned long flags;
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
/*
* Wait for idle.
......@@ -200,8 +198,7 @@ static inline void send_IPI_mask_sequence(int mask, int vector)
* should be modified to do 1 message per cluster ID - mbligh
*/
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
query_mask = 1 << query_cpu;
......
......@@ -306,7 +306,7 @@ static struct pci_ops * __devinit pci_check_direct(void)
unsigned int tmp;
unsigned long flags;
local_save_flags(flags); local_irq_disable();
local_irq_save(flags);
/*
* Check if configuration type 1 works.
......
......@@ -82,7 +82,7 @@ static unsigned long bios32_service(unsigned long service)
unsigned long entry; /* %edx */
unsigned long flags;
local_save_flags(flags); local_irq_disable();
local_irq_save(flags);
__asm__("lcall *(%%edi); cld"
: "=a" (return_code),
"=b" (address),
......@@ -122,7 +122,7 @@ static int __devinit check_pcibios(void)
if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
pci_indirect.address = pcibios_entry + PAGE_OFFSET;
local_save_flags(flags); local_irq_disable();
local_irq_save(flags);
__asm__(
"lcall *(%%edi); cld\n\t"
"jc 1f\n\t"
......
......@@ -460,7 +460,7 @@ void do_tty_hangup(void *data)
{
unsigned long flags;
local_save_flags(flags); local_irq_disable(); // FIXME: is this safe?
local_irq_save(flags); // FIXME: is this safe?
if (tty->ldisc.flush_buffer)
tty->ldisc.flush_buffer(tty);
if (tty->driver.flush_buffer)
......@@ -1900,7 +1900,7 @@ static void flush_to_ldisc(void *private_)
fp = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
tty->flip.buf_num = 0;
local_save_flags(flags); local_irq_disable(); // FIXME: is this safe?
local_irq_save(flags); // FIXME: is this safe?
tty->flip.char_buf_ptr = tty->flip.char_buf;
tty->flip.flag_buf_ptr = tty->flip.flag_buf;
} else {
......@@ -1908,7 +1908,7 @@ static void flush_to_ldisc(void *private_)
fp = tty->flip.flag_buf;
tty->flip.buf_num = 1;
local_save_flags(flags); local_irq_disable(); // FIXME: is this safe?
local_irq_save(flags); // FIXME: is this safe?
tty->flip.char_buf_ptr = tty->flip.char_buf + TTY_FLIPBUF_SIZE;
tty->flip.flag_buf_ptr = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
}
......
......@@ -113,8 +113,7 @@ _kd_mksound(unsigned int hz, unsigned int ticks)
if (hz > 20 && hz < 32767)
count = 1193180 / hz;
local_save_flags(flags); // FIXME: is this safe?
local_irq_disable();
local_irq_save(flags); // FIXME: is this safe?
del_timer(&sound_timer);
if (count) {
/* enable counter 2 */
......
......@@ -1082,28 +1082,19 @@ int ide_unregister_subdriver(struct ata_device *drive)
{
unsigned long flags;
local_save_flags(flags); // FIXME: is this safe?
local_irq_disable();
#if 0
if (__MOD_IN_USE(ata_ops(drive)->owner)) {
local_irq_restore(flags); // FIXME: is this safe?
if (__MOD_IN_USE(ata_ops(drive)->owner))
return 1;
}
#endif
if (drive->usage || drive->busy || !ata_ops(drive)) {
local_irq_restore(flags); // FIXME: is this safe?
if (drive->usage || drive->busy || !ata_ops(drive))
return 1;
}
#if defined(CONFIG_BLK_DEV_ISAPNP) && defined(CONFIG_ISAPNP) && defined(MODULE)
pnpide_init(0);
#endif
drive->driver = NULL;
local_irq_restore(flags); // FIXME: is this safe?
return 0;
}
......
......@@ -84,8 +84,7 @@ static int a3d_read_packet(struct gameport *gameport, int length, char *data)
t = gameport_time(gameport, A3D_MAX_START);
s = gameport_time(gameport, A3D_MAX_STROBE);
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
gameport_trigger(gameport);
v = gameport_read(gameport);
......
......@@ -152,8 +152,7 @@ static void adi_read_packet(struct adi_port *port)
s[i] = 0;
}
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
gameport_trigger(gameport);
v = z = gameport_read(gameport);
......
......@@ -232,8 +232,7 @@ static int analog_cooked_read(struct analog_port *port)
loopout = (ANALOG_LOOP_TIME * port->loop) / 1000;
timeout = ANALOG_MAX_TIME * port->speed;
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
gameport_trigger(gameport);
GET_TIME(now);
local_irq_restore(flags);
......
......@@ -73,8 +73,7 @@ static unsigned char cobra_read_packet(struct gameport *gameport, unsigned int *
t[i] = COBRA_MAX_STROBE;
}
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
u = gameport_read(gameport);
......
......@@ -113,8 +113,7 @@ static void gc_n64_read_packet(struct gc *gc, unsigned char *data)
* Request the pad to transmit data
*/
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
for (i = 0; i < GC_N64_REQUEST_LENGTH; i++) {
parport_write_data(gc->pd->port, GC_N64_POWER_W | ((GC_N64_REQUEST >> i) & 1 ? GC_N64_OUT : 0));
udelay(GC_N64_DWS);
......@@ -270,8 +269,7 @@ static int gc_psx_read_packet(struct gc *gc, unsigned char *data)
parport_write_data(gc->pd->port, GC_PSX_CLOCK | GC_PSX_POWER); /* Deselect, begin command */
udelay(GC_PSX_DELAY * 2);
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
gc_psx_command(gc, 0x01); /* Access pad */
id = gc_psx_command(gc, 0x42); /* Get device id */
......
......@@ -106,8 +106,7 @@ static int gf2k_read_packet(struct gameport *gameport, int length, char *data)
i = 0;
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
gameport_trigger(gameport);
v = gameport_read(gameport);;
......@@ -137,8 +136,7 @@ static void gf2k_trigger_seq(struct gameport *gameport, short *seq)
unsigned long flags;
int i, t;
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
i = 0;
do {
......
......@@ -98,8 +98,7 @@ static int grip_gpp_read_packet(struct gameport *gameport, int shift, unsigned i
t = strobe;
i = 0;
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
v = gameport_read(gameport) >> shift;
......@@ -140,8 +139,7 @@ static int grip_xt_read_packet(struct gameport *gameport, int shift, unsigned in
status = buf = i = j = 0;
t = strobe;
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
v = w = (gameport_read(gameport) >> shift) & 3;
......
......@@ -98,8 +98,7 @@ static int guillemot_read_packet(struct gameport *gameport, u8 *data)
t = gameport_time(gameport, GUILLEMOT_MAX_START);
s = gameport_time(gameport, GUILLEMOT_MAX_STROBE);
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
gameport_trigger(gameport);
v = gameport_read(gameport);
......
......@@ -103,8 +103,7 @@ static int interact_read_packet(struct gameport *gameport, int length, u32 *data
t = gameport_time(gameport, INTERACT_MAX_START);
s = gameport_time(gameport, INTERACT_MAX_STROBE);
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
gameport_trigger(gameport);
v = gameport_read(gameport);
......
......@@ -149,8 +149,7 @@ static int sw_read_packet(struct gameport *gameport, unsigned char *buf, int len
pending = 0;
sched = 0;
local_save_flags(flags); /* Quiet, please */
local_irq_disable();
local_irq_save(flags); /* Quiet, please */
gameport_trigger(gameport); /* Trigger */
v = gameport_read(gameport);
......@@ -245,8 +244,7 @@ static void sw_init_digital(struct gameport *gameport)
unsigned long flags;
int i, t;
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
i = 0;
do {
......
......@@ -125,8 +125,7 @@ static int tmdc_read_packet(struct gameport *gameport, unsigned char data[2][TMD
i[k] = j[k] = 0;
}
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
gameport_trigger(gameport);
w = gameport_read(gameport) >> 4;
......
......@@ -3389,8 +3389,7 @@ static int __init read_eeprom_byte(struct net_device *dev,
* Don't take interrupts on this CPU will bit banging
* the %#%#@$ I2C device
*/
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
eeprom_start(regs);
......
......@@ -338,8 +338,7 @@ extern void fs3270_devfs_unregister(tub_t *);
#define spin_trylock_irqsave(lock, flags) \
({ \
int success; \
local_save_flags(flags); \
local_irq_disable(); \
local_irq_save(flags); \
success = spin_trylock(lock); \
if (success == 0) \
local_irq_restore(flags); \
......
......@@ -163,11 +163,10 @@ s390_init_IRQ (void)
* function we resestablish the old environment.
*
* Note : as we don't need a system wide lock, therefore
* we shouldn't use cli(), but local_irq_disable() as this
* we shouldn't use cli(), but local_irq_save() as this
* affects the current CPU only.
*/
local_save_flags (flags);
local_irq_disable ();
local_irq_save(flags);
/*
* disable all interrupts
......@@ -578,10 +577,9 @@ read_dev_chars (int irq, void **buffer, int length)
* also require to run disabled.
*
* Note : as no global lock is required, we must not use
* cli(), but local_irq_disable() instead.
* cli(), but local_irq_save() instead.
*/
local_save_flags (flags);
local_irq_disable ();
local_irq_save(flags);
rdc_ccw = &ioinfo[irq]->senseccw;
......@@ -720,8 +718,7 @@ read_conf_data (int irq, void **buffer, int *length, __u8 lpm)
int emulated = 0; /* no i/O handler installed */
int retry = 5; /* retry count */
local_save_flags (flags);
local_irq_disable ();
local_irq_save(flags);
if (!ioinfo[irq]->ui.flags.ready) {
pdevstat = &devstat;
......
......@@ -254,8 +254,7 @@ s390_machine_check_handler(void *parm)
found = 0; /* init ... */
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
do {
......
......@@ -62,10 +62,7 @@ void __buffer_error(char *file, int line)
printk("buffer layer error at %s:%d\n", file, line);
#ifdef CONFIG_X86
printk("Pass this trace through ksymoops for reporting\n");
{
extern void show_stack(long *esp);
show_stack(0);
}
#endif
}
EXPORT_SYMBOL(__buffer_error);
......
......@@ -16,22 +16,81 @@ typedef struct {
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
#define IRQ_OFFSET 64
/*
* We put the hardirq and softirq counter into the preemption
* counter. The bitmask has the following meaning:
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
* - bits 16-23 are the hardirq count (max # of hardirqs: 256)
*
* - ( bit 26 is the PREEMPT_ACTIVE flag. )
*
* PREEMPT_MASK: 0x000000ff
* HARDIRQ_MASK: 0x0000ff00
* SOFTIRQ_MASK: 0x00ff0000
* IRQ_MASK: 0x00ffff00
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 8
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#define __MASK(x) ((1UL << (x))-1)
#define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define IRQ_MASK (HARDIRQ_MASK | SOFTIRQ_MASK)
/*
* The hardirq mask has to be large enough to have
* space for potentially all IRQ sources in the system
* nesting on a single CPU:
*/
#if (1 << HARDIRQ_BITS) < NR_IRQS
# error HARDIRQ_BITS is too low!
#endif
/*
* Are we in an interrupt context? Either doing bottom half
* or hardware interrupt processing?
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context? Interrupt context?
*/
#define in_interrupt() \
((preempt_count() & ~PREEMPT_ACTIVE) >= IRQ_OFFSET)
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#define in_irq in_interrupt
#define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0)
#define irq_enter() (preempt_count() += IRQ_OFFSET)
#define irq_exit() (preempt_count() -= IRQ_OFFSET)
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#if CONFIG_PREEMPT
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
#define irq_exit() \
do { \
preempt_count() -= IRQ_EXIT_OFFSET; \
if (!in_interrupt() && softirq_pending(smp_processor_id())) \
do_softirq(); \
preempt_enable_no_resched(); \
} while (0)
#ifndef CONFIG_SMP
# define synchronize_irq(irq) barrier()
......@@ -39,4 +98,6 @@ typedef struct {
extern void synchronize_irq(unsigned int irq);
#endif /* CONFIG_SMP */
extern void show_stack(unsigned long * esp);
#endif /* __ASM_HARDIRQ_H */
......@@ -5,23 +5,16 @@
#include <asm/hardirq.h>
#define local_bh_disable() \
do { preempt_count() += IRQ_OFFSET; barrier(); } while (0)
do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0)
#define __local_bh_enable() \
do { barrier(); preempt_count() -= IRQ_OFFSET; } while (0)
do { barrier(); preempt_count() -= SOFTIRQ_OFFSET; } while (0)
#define local_bh_enable() \
do { \
if (unlikely((preempt_count() == IRQ_OFFSET) && \
softirq_pending(smp_processor_id()))) { \
__local_bh_enable(); \
if (unlikely(!in_interrupt() && softirq_pending(smp_processor_id()))) \
do_softirq(); \
preempt_check_resched(); \
} else { \
__local_bh_enable(); \
preempt_check_resched(); \
} \
} while (0)
#define in_softirq() in_interrupt()
#endif /* __ASM_SOFTIRQ_H */
......@@ -318,19 +318,16 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
/* used in the idle loop; sti takes one instruction cycle to complete */
#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
#define irqs_disabled() \
({ \
unsigned long flags; \
local_save_flags(flags); \
!(flags & (1<<9)); \
})
/* For spinlocks etc */
#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
/*
* Compatibility macros - they will be removed after some time.
*/
#if !CONFIG_SMP
# define sti() local_irq_enable()
# define cli() local_irq_disable()
# define save_flags(flags) local_save_flags(flags)
# define restore_flags(flags) local_irq_restore(flags)
#endif
/*
* disable hlt during certain critical i/o operations
*/
......
......@@ -51,6 +51,8 @@ struct thread_info {
/*
* macros/functions for gaining access to the thread information structure
*
* preempt_count needs to be 1 initially, until the scheduler is functional.
*/
#ifndef __ASSEMBLY__
#define INIT_THREAD_INFO(tsk) \
......@@ -59,6 +61,7 @@ struct thread_info {
exec_domain: &default_exec_domain, \
flags: 0, \
cpu: 0, \
preempt_count: 1, \
addr_limit: KERNEL_DS, \
}
......
......@@ -26,17 +26,17 @@
#define write_lock_irq(lock) do { local_irq_disable(); write_lock(lock); } while (0)
#define write_lock_bh(lock) do { local_bh_disable(); write_lock(lock); } while (0)
#define spin_unlock_irqrestore(lock, flags) do { spin_unlock(lock); local_irq_restore(flags); } while (0)
#define spin_unlock_irqrestore(lock, flags) do { _raw_spin_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0)
#define _raw_spin_unlock_irqrestore(lock, flags) do { _raw_spin_unlock(lock); local_irq_restore(flags); } while (0)
#define spin_unlock_irq(lock) do { spin_unlock(lock); local_irq_enable(); } while (0)
#define spin_unlock_irq(lock) do { _raw_spin_unlock(lock); local_irq_enable(); preempt_enable(); } while (0)
#define spin_unlock_bh(lock) do { spin_unlock(lock); local_bh_enable(); } while (0)
#define read_unlock_irqrestore(lock, flags) do { read_unlock(lock); local_irq_restore(flags); } while (0)
#define read_unlock_irq(lock) do { read_unlock(lock); local_irq_enable(); } while (0)
#define read_unlock_irqrestore(lock, flags) do { _raw_read_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0)
#define read_unlock_irq(lock) do { _raw_read_unlock(lock); local_irq_enable(); preempt_enable(); } while (0)
#define read_unlock_bh(lock) do { read_unlock(lock); local_bh_enable(); } while (0)
#define write_unlock_irqrestore(lock, flags) do { write_unlock(lock); local_irq_restore(flags); } while (0)
#define write_unlock_irq(lock) do { write_unlock(lock); local_irq_enable(); } while (0)
#define write_unlock_irqrestore(lock, flags) do { _raw_write_unlock(lock); local_irq_restore(flags); preempt_enable(); } while (0)
#define write_unlock_irq(lock) do { _raw_write_unlock(lock); local_irq_enable(); preempt_enable(); } while (0)
#define write_unlock_bh(lock) do { write_unlock(lock); local_bh_enable(); } while (0)
#define spin_trylock_bh(lock) ({ int __r; local_bh_disable();\
__r = spin_trylock(lock); \
......@@ -135,12 +135,6 @@ do { \
preempt_enable(); \
} while (0)
#define spin_unlock_no_resched(lock) \
do { \
_raw_spin_unlock(lock); \
preempt_enable_no_resched(); \
} while (0)
#define read_lock(lock) ({preempt_disable(); _raw_read_lock(lock);})
#define read_unlock(lock) ({_raw_read_unlock(lock); preempt_enable();})
#define write_lock(lock) ({preempt_disable(); _raw_write_lock(lock);})
......@@ -153,7 +147,6 @@ do { \
#define spin_lock(lock) _raw_spin_lock(lock)
#define spin_trylock(lock) _raw_spin_trylock(lock)
#define spin_unlock(lock) _raw_spin_unlock(lock)
#define spin_unlock_no_resched(lock) _raw_spin_unlock(lock)
#define read_lock(lock) _raw_read_lock(lock)
#define read_unlock(lock) _raw_read_unlock(lock)
......
......@@ -740,10 +740,10 @@ struct task_struct *do_fork(unsigned long clone_flags,
* total amount of pending timeslices in the system doesnt change,
* resulting in more scheduling fairness.
*/
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
p->time_slice = (current->time_slice + 1) >> 1;
current->time_slice >>= 1;
p->sleep_timestamp = jiffies;
if (!current->time_slice) {
/*
* This case is rare, it happens when the parent has only
......@@ -751,9 +751,11 @@ struct task_struct *do_fork(unsigned long clone_flags,
* runqueue lock is not a problem.
*/
current->time_slice = 1;
preempt_disable();
scheduler_tick(0, 0);
}
p->sleep_timestamp = jiffies;
local_irq_restore(flags);
preempt_enable();
} else
local_irq_restore(flags);
/*
......
......@@ -728,7 +728,7 @@ void scheduler_tick(int user_tick, int system)
if (p == rq->idle) {
/* note: this timer irq context must be accounted for as well */
if (preempt_count() >= 2*IRQ_OFFSET)
if (irq_count() >= 2*HARDIRQ_OFFSET)
kstat.per_cpu_system[cpu] += system;
#if CONFIG_SMP
idle_tick();
......@@ -902,6 +902,12 @@ asmlinkage void preempt_schedule(void)
if (unlikely(ti->preempt_count))
return;
if (unlikely(irqs_disabled())) {
preempt_disable();
printk("bad: schedule() with irqs disabled!\n");
show_stack(NULL);
preempt_enable_no_resched();
}
need_resched:
ti->preempt_count = PREEMPT_ACTIVE;
......@@ -1467,7 +1473,12 @@ asmlinkage long sys_sched_yield(void)
list_add_tail(&current->run_list, array->queue + current->prio);
__set_bit(current->prio, array->bitmap);
}
spin_unlock_no_resched(&rq->lock);
/*
* Since we are going to call schedule() anyway, there's
* no need to preempt:
*/
_raw_spin_unlock(&rq->lock);
preempt_enable_no_resched();
schedule();
......@@ -1680,8 +1691,7 @@ void __init init_idle(task_t *idle, int cpu)
runqueue_t *idle_rq = cpu_rq(cpu), *rq = cpu_rq(task_cpu(idle));
unsigned long flags;
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
double_rq_lock(idle_rq, rq);
idle_rq->curr = idle_rq->idle = idle;
......@@ -1697,6 +1707,8 @@ void __init init_idle(task_t *idle, int cpu)
/* Set the preempt count _outside_ the spinlocks! */
#if CONFIG_PREEMPT
idle->thread_info->preempt_count = (idle->lock_depth >= 0);
#else
idle->thread_info->preempt_count = 0;
#endif
}
......
......@@ -1355,7 +1355,11 @@ void* kmem_cache_alloc_batch(kmem_cache_t* cachep, int flags)
cc_entry(cc)[cc->avail++] =
kmem_cache_alloc_one_tail(cachep, slabp);
}
spin_unlock(&cachep->spinlock);
/*
* CAREFUL: do not enable preemption yet, the per-CPU
* entries rely on us being atomic.
*/
_raw_spin_unlock(&cachep->spinlock);
if (cc->avail)
return cc_entry(cc)[--cc->avail];
......@@ -1382,8 +1386,12 @@ static inline void * __kmem_cache_alloc (kmem_cache_t *cachep, int flags)
} else {
STATS_INC_ALLOCMISS(cachep);
objp = kmem_cache_alloc_batch(cachep,flags);
local_irq_restore(save_flags);
/* end of non-preemptible region */
preempt_enable();
if (!objp)
goto alloc_new_slab_nolock;
return objp;
}
} else {
spin_lock(&cachep->spinlock);
......@@ -1399,9 +1407,11 @@ static inline void * __kmem_cache_alloc (kmem_cache_t *cachep, int flags)
alloc_new_slab:
#ifdef CONFIG_SMP
spin_unlock(&cachep->spinlock);
alloc_new_slab_nolock:
#endif
local_irq_restore(save_flags);
#ifdef CONFIG_SMP
alloc_new_slab_nolock:
#endif
if (kmem_cache_grow(cachep, flags))
/* Someone may have stolen our objs. Doesn't matter, we'll
* just come back here again.
......
......@@ -318,7 +318,7 @@ void __kfree_skb(struct sk_buff *skb)
dst_release(skb->dst);
if(skb->destructor) {
if (0 && in_irq())
if (in_irq())
printk(KERN_WARNING "Warning: kfree_skb on "
"hard IRQ %p\n", NET_CALLER(skb));
skb->destructor(skb);
......
......@@ -235,10 +235,9 @@ static inline void write_seq(struct solo1_state *s, unsigned char data)
int i;
unsigned long flags;
/* the local_irq_disable stunt is to send the data within the command window */
/* the local_irq_save stunt is to send the data within the command window */
for (i = 0; i < 0xffff; i++) {
local_save_flags(flags);
local_irq_disable();
local_irq_save(flags);
if (!(inb(s->sbbase+0xc) & 0x80)) {
outb(data, s->sbbase+0xc);
local_irq_restore(flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment