Commit 7ee2f6c9 authored by Linus Torvalds's avatar Linus Torvalds Committed by Linus Torvalds

Ingo Molnar's update to remove irqlock (documentation and fixing

a number of drivers)
parent 8d27a647
......@@ -2,10 +2,10 @@
#### cli()/sti() removal guide, started by Ingo Molnar <mingo@redhat.com>
as of 2.5.28, four popular macros have been removed on SMP, and
as of 2.5.28, five popular macros have been removed on SMP, and
are being phased out on UP:
cli(), sti(), save_flags(flags), restore_flags(flags)
cli(), sti(), save_flags(flags), save_flags_cli(flags), restore_flags(flags)
until now it was possible to protect driver code against interrupt
handlers via a cli(), but from now on other, more lightweight methods
......@@ -86,25 +86,44 @@ the above code has a number of advantages:
cli() on the other hand was used by many drivers, and extended
the critical section to the whole IRQ handler function - creating
serious lock contention.
to make the transition easier, we've still kept the cli(), sti(),
save_flags() and restore_flags() macros defined on UP systems - but
their usage will be phased out until the 2.6 kernel is released.
save_flags(), save_flags_cli() and restore_flags() macros defined
on UP systems - but their usage will be phased out until 2.6 is
released.
drivers that want to disable local interrupts (interrupts on the
current CPU), can use the following four macros:
current CPU), can use the following five macros:
__cli(), __sti(), __save_flags(flags), __restore_flags(flags)
local_irq_disable(), local_irq_enable(), local_irq_save(flags),
local_irq_save_off(flags), local_irq_restore(flags)
but beware, their meaning and semantics are much simpler, far from
that of cli(), sti(), save_flags(flags) and restore_flags(flags).
that of the old cli(), sti(), save_flags(flags) and restore_flags(flags)
SMP meaning:
local_irq_disable() => turn local IRQs off
local_irq_enable() => turn local IRQs on
local_irq_save(flags) => save the current IRQ state into flags. The
state can be on or off. (on some
architectures there's even more bits in it.)
local_irq_save_off(flags) => save the current IRQ state into flags and
disable interrupts.
local_irq_restore(flags) => restore the IRQ state from flags.
(local_irq_save can save both irqs on and irqs off state, and
local_irq_restore can restore into both irqs on and irqs off state.)
another related change is that synchronize_irq() now takes a parameter:
synchronize_irq(irq). This change too has the purpose of making SMP
synchronization more lightweight - this way you can wait for your own
interrupt handler to finish, no need to wait for other IRQ sources.
to make the transition easier, we've still kept the cli(), sti(),
save_flags() and restore_flags() macros defined on UP systems - but
their usage will be phased out until the 2.6 kernel is released.
why were these changes done? The main reason was the architectural burden
......
......@@ -82,6 +82,13 @@ Note that you do not need to explicitly prevent preemption if you are holding
any locks or interrupts are disabled, since preemption is implicitly disabled
in those cases.
But keep in mind that 'irqs disabled' is a fundamentally unsafe way of
disabling preemption - any spin_unlock() decreasing the preemption count
to 0 might trigger a reschedule. A simple printk() might trigger a reschedule.
So use this implicit preemption-disabling property only if you know that the
affected codepath does not do any of this. Best policy is to use this only for
small, atomic code that you wrote and which calls no complex functions.
Example:
cpucache_t *cc; /* this is per-CPU */
......
......@@ -1099,6 +1099,7 @@ asmlinkage void smp_spurious_interrupt(void)
{
unsigned long v;
irq_enter();
/*
* Check if this really is a spurious interrupt and ACK it
* if it is a vectored one. Just in case...
......@@ -1111,6 +1112,7 @@ asmlinkage void smp_spurious_interrupt(void)
/* see sw-dev-man vol 3, chapter 7.4.13.5 */
printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should never happen.\n",
smp_processor_id());
irq_exit();
}
/*
......@@ -1121,6 +1123,7 @@ asmlinkage void smp_error_interrupt(void)
{
unsigned long v, v1;
irq_enter();
/* First tickle the hardware, only then report what went on. -- REW */
v = apic_read(APIC_ESR);
apic_write(APIC_ESR, 0);
......@@ -1140,6 +1143,7 @@ asmlinkage void smp_error_interrupt(void)
*/
printk (KERN_ERR "APIC error on CPU%d: %02lx(%02lx)\n",
smp_processor_id(), v , v1);
irq_exit();
}
/*
......
......@@ -74,7 +74,9 @@ static void (*vendor_thermal_interrupt)(struct pt_regs *regs) = unexpected_therm
asmlinkage void smp_thermal_interrupt(struct pt_regs regs)
{
irq_enter();
vendor_thermal_interrupt(&regs);
irq_exit();
}
/* P4/Xeon Thermal regulation detect and init */
......
......@@ -186,7 +186,6 @@ ENTRY(ret_from_fork)
# userspace resumption stub bypassing syscall exit tracing
ALIGN
ret_from_intr:
preempt_stop
ret_from_exception:
movl EFLAGS(%esp), %eax # mix EFLAGS and CS
movb CS(%esp), %al
......
......@@ -387,7 +387,7 @@ asmlinkage void smp_invalidate_interrupt (void)
clear_bit(cpu, &flush_cpumask);
out:
put_cpu();
put_cpu_no_resched();
}
static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
......
......@@ -926,13 +926,15 @@ static struct net_device_stats *cp_get_stats(struct net_device *dev)
static void cp_stop_hw (struct cp_private *cp)
{
struct net_device *dev = cp->dev;
cpw16(IntrMask, 0);
cpr16(IntrMask);
cpw8(Cmd, 0);
cpw16(CpCmd, 0);
cpr16(CpCmd);
cpw16(IntrStatus, ~(cpr16(IntrStatus)));
synchronize_irq();
synchronize_irq(dev->irq);
udelay(10);
cp->rx_tail = 0;
......
......@@ -2130,7 +2130,9 @@ static int rtl8139_close (struct net_device *dev)
spin_unlock_irqrestore (&tp->lock, flags);
synchronize_irq ();
/* TODO: isn't this code racy? we synchronize the IRQ and then free it, */
/* but another IRQ could've happened in between the sync and free */
synchronize_irq (dev->irq);
free_irq (dev->irq, dev);
rtl8139_tx_clear (tp);
......
......@@ -518,7 +518,7 @@ plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
spin_unlock_irq(&nl->lock);
if (error == HS_TIMEOUT) {
DISABLE(dev->irq);
synchronize_irq();
synchronize_irq(dev->irq);
}
disable_parport_interrupts (dev);
netif_stop_queue (dev);
......@@ -840,7 +840,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl,
if (c0 & 0x08) {
spin_unlock_irq(&nl->lock);
DISABLE(dev->irq);
synchronize_irq();
synchronize_irq(dev->irq);
if (nl->connection == PLIP_CN_RECEIVE) {
/* Interrupted.
We don't need to enable irq,
......@@ -1178,7 +1178,7 @@ plip_close(struct net_device *dev)
netif_stop_queue (dev);
DISABLE(dev->irq);
synchronize_irq();
synchronize_irq(dev->irq);
if (dev->irq == -1)
{
......
......@@ -1455,7 +1455,7 @@ static void de_tx_timeout (struct net_device *dev)
/* Update the error counts. */
__de_get_stats(de);
synchronize_irq();
synchronize_irq(dev->irq);
de_clean_rings(de);
de_init_hw(de);
......
......@@ -84,14 +84,11 @@ struct {
{ \
unsigned long flags; \
\
save_flags(flags); \
cli(); \
trace.buf[trace.next].name = (w); \
trace.buf[trace.next].time = jiffies; \
trace.buf[trace.next].index = (i); \
trace.buf[trace.next].addr = (long) (a); \
trace.next = (trace.next + 1) & (TRACE_BUF_LEN - 1); \
restore_flags(flags); \
}
#else
......@@ -1704,9 +1701,6 @@ static int isp1020_load_parameters(struct Scsi_Host *host)
ENTER("isp1020_load_parameters");
save_flags(flags);
cli();
hwrev = isp_inw(host, ISP_CFG0) & ISP_CFG0_HWMSK;
isp_cfg1 = ISP_CFG1_F64 | ISP_CFG1_BENAB;
if (hwrev == ISP_CFG0_1040A) {
......@@ -1724,7 +1718,6 @@ static int isp1020_load_parameters(struct Scsi_Host *host)
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : set initiator id failure\n");
return 1;
}
......@@ -1736,7 +1729,6 @@ static int isp1020_load_parameters(struct Scsi_Host *host)
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : set retry count failure\n");
return 1;
}
......@@ -1747,7 +1739,6 @@ static int isp1020_load_parameters(struct Scsi_Host *host)
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : async data setup time failure\n");
return 1;
}
......@@ -1759,7 +1750,6 @@ static int isp1020_load_parameters(struct Scsi_Host *host)
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : set active negation state failure\n");
return 1;
}
......@@ -1771,7 +1761,6 @@ static int isp1020_load_parameters(struct Scsi_Host *host)
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : set pci control parameter failure\n");
return 1;
}
......@@ -1782,7 +1771,6 @@ static int isp1020_load_parameters(struct Scsi_Host *host)
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : set tag age limit failure\n");
return 1;
}
......@@ -1793,7 +1781,6 @@ static int isp1020_load_parameters(struct Scsi_Host *host)
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : set selection timeout failure\n");
return 1;
}
......@@ -1812,7 +1799,6 @@ static int isp1020_load_parameters(struct Scsi_Host *host)
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : set target parameter failure\n");
return 1;
}
......@@ -1827,7 +1813,6 @@ static int isp1020_load_parameters(struct Scsi_Host *host)
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : set device queue "
"parameter failure\n");
return 1;
......@@ -1854,7 +1839,6 @@ static int isp1020_load_parameters(struct Scsi_Host *host)
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : set response queue failure\n");
return 1;
}
......@@ -1879,13 +1863,10 @@ static int isp1020_load_parameters(struct Scsi_Host *host)
isp1020_mbox_command(host, param);
if (param[0] != MBOX_COMMAND_COMPLETE) {
restore_flags(flags);
printk("qlogicisp : set request queue failure\n");
return 1;
}
restore_flags(flags);
LEAVE("isp1020_load_parameters");
return 0;
......
......@@ -44,6 +44,16 @@ enum {
#include <asm/hardirq.h>
#include <asm/softirq.h>
/*
* Temporary defines for UP kernels, until all code gets fixed.
*/
#if !CONFIG_SMP
# define cli() local_irq_disable()
# define sti() local_irq_enable()
# define save_flags(x) local_irq_save(x)
# define restore_flags(x) local_irq_restore(x)
# define save_and_cli(x) local_irq_save_off(x)
#endif
/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
......
......@@ -95,7 +95,8 @@ static inline void smp_send_reschedule_all(void) { }
#endif /* !SMP */
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
#define put_cpu() preempt_enable()
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
#define put_cpu() preempt_enable()
#define put_cpu_no_resched() preempt_enable_no_resched()
#endif /* __LINUX_SMP_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment