Commit b7133a9a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq core changes from Ingo Molnar:
 "The biggest changes are the IRQ-work and printk changes from Frederic
  Weisbecker, which prepare the code for 'full dynticks' (the ability to
  stop or slow down the periodic tick arbitrarily, not just in idle time
  as today):

   - Don't stop tick with irq works pending.  This fix is generally
     useful and concerns archs that can't raise self IPIs.

   - Flush irq works before CPU offlining.

   - Introduce "lazy" irq works that can wait for the next tick to be
     executed, unless it's stopped.

   - Implement klogd wake up using irq work.  This removes the ad-hoc
     printk_tick()/printk_needs_cpu() hooks and make it working even in
     dynticks mode.

   - Cleanups and fixes."

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  genirq: Export enable/disable_percpu_irq()
  arch Kconfig: Remove references to IRQ_PER_CPU
  irq_work: Remove return value from the irq_work_queue() function
  genirq: Avoid deadlock in spurious handling
  printk: Wake up klogd using irq_work
  irq_work: Make self-IPIs optable
  irq_work: Warn if there's still work on cpu_down
  irq_work: Flush work on CPU_DYING
  irq_work: Don't stop the tick with pending works
  nohz: Add API to check tick state
  irq_work: Remove CONFIG_HAVE_IRQ_WORK
  irq_work: Fix racy check on work pending flag
  irq_work: Fix racy IRQ_WORK_BUSY flag setting
parents e84cf5d0 36a5df85
...@@ -5,7 +5,6 @@ config ALPHA ...@@ -5,7 +5,6 @@ config ALPHA
select HAVE_IDE select HAVE_IDE
select HAVE_OPROFILE select HAVE_OPROFILE
select HAVE_SYSCALL_WRAPPERS select HAVE_SYSCALL_WRAPPERS
select HAVE_IRQ_WORK
select HAVE_PCSPKR_PLATFORM select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_DMA_ATTRS select HAVE_DMA_ATTRS
......
...@@ -36,7 +36,6 @@ config ARM ...@@ -36,7 +36,6 @@ config ARM
select HAVE_GENERIC_HARDIRQS select HAVE_GENERIC_HARDIRQS
select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
select HAVE_IDE if PCI || ISA || PCMCIA select HAVE_IDE if PCI || ISA || PCMCIA
select HAVE_IRQ_WORK
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZMA select HAVE_KERNEL_LZMA
select HAVE_KERNEL_LZO select HAVE_KERNEL_LZO
......
...@@ -21,7 +21,6 @@ config ARM64 ...@@ -21,7 +21,6 @@ config ARM64
select HAVE_GENERIC_DMA_COHERENT select HAVE_GENERIC_DMA_COHERENT
select HAVE_GENERIC_HARDIRQS select HAVE_GENERIC_HARDIRQS
select HAVE_HW_BREAKPOINT if PERF_EVENTS select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_WORK
select HAVE_MEMBLOCK select HAVE_MEMBLOCK
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select IRQ_DOMAIN select IRQ_DOMAIN
......
...@@ -24,7 +24,6 @@ config BLACKFIN ...@@ -24,7 +24,6 @@ config BLACKFIN
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_IDE select HAVE_IDE
select HAVE_IRQ_WORK
select HAVE_KERNEL_GZIP if RAMKERNEL select HAVE_KERNEL_GZIP if RAMKERNEL
select HAVE_KERNEL_BZIP2 if RAMKERNEL select HAVE_KERNEL_BZIP2 if RAMKERNEL
select HAVE_KERNEL_LZMA if RAMKERNEL select HAVE_KERNEL_LZMA if RAMKERNEL
...@@ -38,7 +37,6 @@ config BLACKFIN ...@@ -38,7 +37,6 @@ config BLACKFIN
select HAVE_GENERIC_HARDIRQS select HAVE_GENERIC_HARDIRQS
select GENERIC_ATOMIC64 select GENERIC_ATOMIC64
select GENERIC_IRQ_PROBE select GENERIC_IRQ_PROBE
select IRQ_PER_CPU if SMP
select USE_GENERIC_SMP_HELPERS if SMP select USE_GENERIC_SMP_HELPERS if SMP
select HAVE_NMI_WATCHDOG if NMI_WATCHDOG select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
......
...@@ -3,7 +3,6 @@ config FRV ...@@ -3,7 +3,6 @@ config FRV
default y default y
select HAVE_IDE select HAVE_IDE
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_UID16 select HAVE_UID16
select HAVE_GENERIC_HARDIRQS select HAVE_GENERIC_HARDIRQS
......
...@@ -12,9 +12,7 @@ config HEXAGON ...@@ -12,9 +12,7 @@ config HEXAGON
# select ARCH_WANT_OPTIONAL_GPIOLIB # select ARCH_WANT_OPTIONAL_GPIOLIB
# select ARCH_REQUIRE_GPIOLIB # select ARCH_REQUIRE_GPIOLIB
# select HAVE_CLK # select HAVE_CLK
# select IRQ_PER_CPU
# select GENERIC_PENDING_IRQ if SMP # select GENERIC_PENDING_IRQ if SMP
select HAVE_IRQ_WORK
select GENERIC_ATOMIC64 select GENERIC_ATOMIC64
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_GENERIC_HARDIRQS select HAVE_GENERIC_HARDIRQS
......
...@@ -29,7 +29,6 @@ config IA64 ...@@ -29,7 +29,6 @@ config IA64
select ARCH_DISCARD_MEMBLOCK select ARCH_DISCARD_MEMBLOCK
select GENERIC_IRQ_PROBE select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP select GENERIC_PENDING_IRQ if SMP
select IRQ_PER_CPU
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAVE_NMI_SAFE_CMPXCHG
......
...@@ -4,7 +4,6 @@ config MIPS ...@@ -4,7 +4,6 @@ config MIPS
select HAVE_GENERIC_DMA_COHERENT select HAVE_GENERIC_DMA_COHERENT
select HAVE_IDE select HAVE_IDE
select HAVE_OPROFILE select HAVE_OPROFILE
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC select PERF_USE_VMALLOC
select HAVE_ARCH_KGDB select HAVE_ARCH_KGDB
...@@ -2161,7 +2160,6 @@ source "mm/Kconfig" ...@@ -2161,7 +2160,6 @@ source "mm/Kconfig"
config SMP config SMP
bool "Multi-Processing support" bool "Multi-Processing support"
depends on SYS_SUPPORTS_SMP depends on SYS_SUPPORTS_SMP
select IRQ_PER_CPU
select USE_GENERIC_SMP_HELPERS select USE_GENERIC_SMP_HELPERS
help help
This enables support for systems with more than one CPU. If you have This enables support for systems with more than one CPU. If you have
......
...@@ -9,14 +9,12 @@ config PARISC ...@@ -9,14 +9,12 @@ config PARISC
select RTC_DRV_GENERIC select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE select INIT_ALL_POSSIBLE
select BUG select BUG
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select GENERIC_ATOMIC64 if !64BIT select GENERIC_ATOMIC64 if !64BIT
select HAVE_GENERIC_HARDIRQS select HAVE_GENERIC_HARDIRQS
select BROKEN_RODATA select BROKEN_RODATA
select GENERIC_IRQ_PROBE select GENERIC_IRQ_PROBE
select GENERIC_PCI_IOMAP select GENERIC_PCI_IOMAP
select IRQ_PER_CPU
select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNCPY_FROM_USER
......
...@@ -118,14 +118,12 @@ config PPC ...@@ -118,14 +118,12 @@ config PPC
select HAVE_SYSCALL_WRAPPERS if PPC64 select HAVE_SYSCALL_WRAPPERS if PPC64
select GENERIC_ATOMIC64 if PPC32 select GENERIC_ATOMIC64 if PPC32
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64 select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
select HAVE_GENERIC_HARDIRQS select HAVE_GENERIC_HARDIRQS
select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IPC_PARSE_VERSION
select SPARSE_IRQ select SPARSE_IRQ
select IRQ_PER_CPU
select IRQ_DOMAIN select IRQ_DOMAIN
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL select GENERIC_IRQ_SHOW_LEVEL
......
...@@ -78,7 +78,6 @@ config S390 ...@@ -78,7 +78,6 @@ config S390
select HAVE_KVM if 64BIT select HAVE_KVM if 64BIT
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select INIT_ALL_POSSIBLE select INIT_ALL_POSSIBLE
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAVE_NMI_SAFE_CMPXCHG
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
......
...@@ -11,7 +11,6 @@ config SUPERH ...@@ -11,7 +11,6 @@ config SUPERH
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG select HAVE_DMA_API_DEBUG
select HAVE_DMA_ATTRS select HAVE_DMA_ATTRS
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_BUGVERBOSE
select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAVE_CUSTOM_GPIO_H
...@@ -91,9 +90,6 @@ config GENERIC_CSUM ...@@ -91,9 +90,6 @@ config GENERIC_CSUM
config GENERIC_HWEIGHT config GENERIC_HWEIGHT
def_bool y def_bool y
config IRQ_PER_CPU
def_bool y
config GENERIC_GPIO config GENERIC_GPIO
def_bool n def_bool n
......
...@@ -23,7 +23,6 @@ config SPARC ...@@ -23,7 +23,6 @@ config SPARC
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select RTC_CLASS select RTC_CLASS
select RTC_DRV_M48T59 select RTC_DRV_M48T59
select HAVE_IRQ_WORK
select HAVE_DMA_ATTRS select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG select HAVE_DMA_API_DEBUG
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
......
...@@ -28,7 +28,6 @@ config X86 ...@@ -28,7 +28,6 @@ config X86
select HAVE_OPROFILE select HAVE_OPROFILE
select HAVE_PCSPKR_PLATFORM select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_IRQ_WORK
select HAVE_IOREMAP_PROT select HAVE_IOREMAP_PROT
select HAVE_KPROBES select HAVE_KPROBES
select HAVE_MEMBLOCK select HAVE_MEMBLOCK
......
...@@ -21,7 +21,6 @@ config IIO_GPIO_TRIGGER ...@@ -21,7 +21,6 @@ config IIO_GPIO_TRIGGER
config IIO_SYSFS_TRIGGER config IIO_SYSFS_TRIGGER
tristate "SYSFS trigger" tristate "SYSFS trigger"
depends on SYSFS depends on SYSFS
depends on HAVE_IRQ_WORK
select IRQ_WORK select IRQ_WORK
help help
Provides support for using SYSFS entry as IIO triggers. Provides support for using SYSFS entry as IIO triggers.
......
...@@ -3,6 +3,20 @@ ...@@ -3,6 +3,20 @@
#include <linux/llist.h> #include <linux/llist.h>
/*
* An entry can be in one of four states:
*
* free NULL, 0 -> {claimed} : free to be used
* claimed NULL, 3 -> {pending} : claimed to be enqueued
* pending next, 3 -> {busy} : queued, pending callback
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
*/
#define IRQ_WORK_PENDING 1UL
#define IRQ_WORK_BUSY 2UL
#define IRQ_WORK_FLAGS 3UL
#define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
struct irq_work { struct irq_work {
unsigned long flags; unsigned long flags;
struct llist_node llnode; struct llist_node llnode;
...@@ -16,8 +30,14 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) ...@@ -16,8 +30,14 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
work->func = func; work->func = func;
} }
bool irq_work_queue(struct irq_work *work); void irq_work_queue(struct irq_work *work);
void irq_work_run(void); void irq_work_run(void);
void irq_work_sync(struct irq_work *work); void irq_work_sync(struct irq_work *work);
#ifdef CONFIG_IRQ_WORK
bool irq_work_needs_cpu(void);
#else
static bool irq_work_needs_cpu(void) { return false; }
#endif
#endif /* _LINUX_IRQ_WORK_H */ #endif /* _LINUX_IRQ_WORK_H */
...@@ -98,9 +98,6 @@ int no_printk(const char *fmt, ...) ...@@ -98,9 +98,6 @@ int no_printk(const char *fmt, ...)
extern asmlinkage __printf(1, 2) extern asmlinkage __printf(1, 2)
void early_printk(const char *fmt, ...); void early_printk(const char *fmt, ...);
extern int printk_needs_cpu(int cpu);
extern void printk_tick(void);
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK
asmlinkage __printf(5, 0) asmlinkage __printf(5, 0)
int vprintk_emit(int facility, int level, int vprintk_emit(int facility, int level,
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#include <linux/clockchips.h> #include <linux/clockchips.h>
#include <linux/irqflags.h> #include <linux/irqflags.h>
#include <linux/percpu.h>
#include <linux/hrtimer.h>
#ifdef CONFIG_GENERIC_CLOCKEVENTS #ifdef CONFIG_GENERIC_CLOCKEVENTS
...@@ -122,13 +124,26 @@ static inline int tick_oneshot_mode_active(void) { return 0; } ...@@ -122,13 +124,26 @@ static inline int tick_oneshot_mode_active(void) { return 0; }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
# ifdef CONFIG_NO_HZ # ifdef CONFIG_NO_HZ
DECLARE_PER_CPU(struct tick_sched, tick_cpu_sched);
static inline int tick_nohz_tick_stopped(void)
{
return __this_cpu_read(tick_cpu_sched.tick_stopped);
}
extern void tick_nohz_idle_enter(void); extern void tick_nohz_idle_enter(void);
extern void tick_nohz_idle_exit(void); extern void tick_nohz_idle_exit(void);
extern void tick_nohz_irq_exit(void); extern void tick_nohz_irq_exit(void);
extern ktime_t tick_nohz_get_sleep_length(void); extern ktime_t tick_nohz_get_sleep_length(void);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
# else
# else /* !CONFIG_NO_HZ */
static inline int tick_nohz_tick_stopped(void)
{
return 0;
}
static inline void tick_nohz_idle_enter(void) { } static inline void tick_nohz_idle_enter(void) { }
static inline void tick_nohz_idle_exit(void) { } static inline void tick_nohz_idle_exit(void) { }
......
...@@ -20,12 +20,8 @@ config CONSTRUCTORS ...@@ -20,12 +20,8 @@ config CONSTRUCTORS
bool bool
depends on !UML depends on !UML
config HAVE_IRQ_WORK
bool
config IRQ_WORK config IRQ_WORK
bool bool
depends on HAVE_IRQ_WORK
config BUILDTIME_EXTABLE_SORT config BUILDTIME_EXTABLE_SORT
bool bool
...@@ -1273,6 +1269,7 @@ config HOTPLUG ...@@ -1273,6 +1269,7 @@ config HOTPLUG
config PRINTK config PRINTK
default y default y
bool "Enable support for printk" if EXPERT bool "Enable support for printk" if EXPERT
select IRQ_WORK
help help
This option enables normal printk support. Removing it This option enables normal printk support. Removing it
eliminates most of the message strings from the kernel image eliminates most of the message strings from the kernel image
......
...@@ -1524,6 +1524,7 @@ void enable_percpu_irq(unsigned int irq, unsigned int type) ...@@ -1524,6 +1524,7 @@ void enable_percpu_irq(unsigned int irq, unsigned int type)
out: out:
irq_put_desc_unlock(desc, flags); irq_put_desc_unlock(desc, flags);
} }
EXPORT_SYMBOL_GPL(enable_percpu_irq);
void disable_percpu_irq(unsigned int irq) void disable_percpu_irq(unsigned int irq)
{ {
...@@ -1537,6 +1538,7 @@ void disable_percpu_irq(unsigned int irq) ...@@ -1537,6 +1538,7 @@ void disable_percpu_irq(unsigned int irq)
irq_percpu_disable(desc, cpu); irq_percpu_disable(desc, cpu);
irq_put_desc_unlock(desc, flags); irq_put_desc_unlock(desc, flags);
} }
EXPORT_SYMBOL_GPL(disable_percpu_irq);
/* /*
* Internal function to unregister a percpu irqaction. * Internal function to unregister a percpu irqaction.
......
...@@ -80,13 +80,11 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) ...@@ -80,13 +80,11 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
/* /*
* All handlers must agree on IRQF_SHARED, so we test just the * All handlers must agree on IRQF_SHARED, so we test just the
* first. Check for action->next as well. * first.
*/ */
action = desc->action; action = desc->action;
if (!action || !(action->flags & IRQF_SHARED) || if (!action || !(action->flags & IRQF_SHARED) ||
(action->flags & __IRQF_TIMER) || (action->flags & __IRQF_TIMER))
(action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
!action->next)
goto out; goto out;
/* Already running on another processor */ /* Already running on another processor */
...@@ -104,6 +102,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) ...@@ -104,6 +102,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
do { do {
if (handle_irq_event(desc) == IRQ_HANDLED) if (handle_irq_event(desc) == IRQ_HANDLED)
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
/* Make sure that there is still a valid action */
action = desc->action; action = desc->action;
} while ((desc->istate & IRQS_PENDING) && action); } while ((desc->istate & IRQS_PENDING) && action);
desc->istate &= ~IRQS_POLL_INPROGRESS; desc->istate &= ~IRQS_POLL_INPROGRESS;
......
...@@ -12,37 +12,36 @@ ...@@ -12,37 +12,36 @@
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/irqflags.h> #include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/tick.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <asm/processor.h> #include <asm/processor.h>
/*
* An entry can be in one of four states:
*
* free NULL, 0 -> {claimed} : free to be used
* claimed NULL, 3 -> {pending} : claimed to be enqueued
* pending next, 3 -> {busy} : queued, pending callback
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
*/
#define IRQ_WORK_PENDING 1UL
#define IRQ_WORK_BUSY 2UL
#define IRQ_WORK_FLAGS 3UL
static DEFINE_PER_CPU(struct llist_head, irq_work_list); static DEFINE_PER_CPU(struct llist_head, irq_work_list);
static DEFINE_PER_CPU(int, irq_work_raised);
/* /*
* Claim the entry so that no one else will poke at it. * Claim the entry so that no one else will poke at it.
*/ */
static bool irq_work_claim(struct irq_work *work) static bool irq_work_claim(struct irq_work *work)
{ {
unsigned long flags, nflags; unsigned long flags, oflags, nflags;
/*
* Start with our best wish as a premise but only trust any
* flag value after cmpxchg() result.
*/
flags = work->flags & ~IRQ_WORK_PENDING;
for (;;) { for (;;) {
flags = work->flags;
if (flags & IRQ_WORK_PENDING)
return false;
nflags = flags | IRQ_WORK_FLAGS; nflags = flags | IRQ_WORK_FLAGS;
if (cmpxchg(&work->flags, flags, nflags) == flags) oflags = cmpxchg(&work->flags, flags, nflags);
if (oflags == flags)
break; break;
if (oflags & IRQ_WORK_PENDING)
return false;
flags = oflags;
cpu_relax(); cpu_relax();
} }
...@@ -57,57 +56,69 @@ void __weak arch_irq_work_raise(void) ...@@ -57,57 +56,69 @@ void __weak arch_irq_work_raise(void)
} }
/* /*
* Queue the entry and raise the IPI if needed. * Enqueue the irq_work @entry unless it's already pending
* somewhere.
*
* Can be re-enqueued while the callback is still in progress.
*/ */
static void __irq_work_queue(struct irq_work *work) void irq_work_queue(struct irq_work *work)
{ {
bool empty; /* Only queue if not already pending */
if (!irq_work_claim(work))
return;
/* Queue the entry and raise the IPI if needed. */
preempt_disable(); preempt_disable();
empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
/* The list was empty, raise self-interrupt to start processing. */
if (empty) /*
arch_irq_work_raise(); * If the work is not "lazy" or the tick is stopped, raise the irq
* work interrupt (if supported by the arch), otherwise, just wait
* for the next tick.
*/
if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) {
if (!this_cpu_cmpxchg(irq_work_raised, 0, 1))
arch_irq_work_raise();
}
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL_GPL(irq_work_queue);
/* bool irq_work_needs_cpu(void)
* Enqueue the irq_work @entry, returns true on success, failure when the
* @entry was already enqueued by someone else.
*
* Can be re-enqueued while the callback is still in progress.
*/
bool irq_work_queue(struct irq_work *work)
{ {
if (!irq_work_claim(work)) { struct llist_head *this_list;
/*
* Already enqueued, can't do! this_list = &__get_cpu_var(irq_work_list);
*/ if (llist_empty(this_list))
return false; return false;
}
__irq_work_queue(work); /* All work should have been flushed before going offline */
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
return true; return true;
} }
EXPORT_SYMBOL_GPL(irq_work_queue);
/* static void __irq_work_run(void)
* Run the irq_work entries on this cpu. Requires to be ran from hardirq
* context with local IRQs disabled.
*/
void irq_work_run(void)
{ {
unsigned long flags;
struct irq_work *work; struct irq_work *work;
struct llist_head *this_list; struct llist_head *this_list;
struct llist_node *llnode; struct llist_node *llnode;
/*
* Reset the "raised" state right before we check the list because
* an NMI may enqueue after we find the list empty from the runner.
*/
__this_cpu_write(irq_work_raised, 0);
barrier();
this_list = &__get_cpu_var(irq_work_list); this_list = &__get_cpu_var(irq_work_list);
if (llist_empty(this_list)) if (llist_empty(this_list))
return; return;
BUG_ON(!in_irq());
BUG_ON(!irqs_disabled()); BUG_ON(!irqs_disabled());
llnode = llist_del_all(this_list); llnode = llist_del_all(this_list);
...@@ -119,16 +130,31 @@ void irq_work_run(void) ...@@ -119,16 +130,31 @@ void irq_work_run(void)
/* /*
* Clear the PENDING bit, after this point the @work * Clear the PENDING bit, after this point the @work
* can be re-used. * can be re-used.
* Make it immediately visible so that other CPUs trying
* to claim that work don't rely on us to handle their data
* while we are in the middle of the func.
*/ */
work->flags = IRQ_WORK_BUSY; flags = work->flags & ~IRQ_WORK_PENDING;
xchg(&work->flags, flags);
work->func(work); work->func(work);
/* /*
* Clear the BUSY bit and return to the free state if * Clear the BUSY bit and return to the free state if
* no-one else claimed it meanwhile. * no-one else claimed it meanwhile.
*/ */
(void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0); (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
} }
} }
/*
* Run the irq_work entries on this cpu. Requires to be ran from hardirq
* context with local IRQs disabled.
*/
void irq_work_run(void)
{
BUG_ON(!in_irq());
__irq_work_run();
}
EXPORT_SYMBOL_GPL(irq_work_run); EXPORT_SYMBOL_GPL(irq_work_run);
/* /*
...@@ -143,3 +169,35 @@ void irq_work_sync(struct irq_work *work) ...@@ -143,3 +169,35 @@ void irq_work_sync(struct irq_work *work)
cpu_relax(); cpu_relax();
} }
EXPORT_SYMBOL_GPL(irq_work_sync); EXPORT_SYMBOL_GPL(irq_work_sync);
#ifdef CONFIG_HOTPLUG_CPU
static int irq_work_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
switch (action) {
case CPU_DYING:
/* Called from stop_machine */
if (WARN_ON_ONCE(cpu != smp_processor_id()))
break;
__irq_work_run();
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block cpu_notify;
static __init int irq_work_init_cpu_notifier(void)
{
cpu_notify.notifier_call = irq_work_cpu_notify;
cpu_notify.priority = 0;
register_cpu_notifier(&cpu_notify);
return 0;
}
device_initcall(irq_work_init_cpu_notifier);
#endif /* CONFIG_HOTPLUG_CPU */
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/rculist.h> #include <linux/rculist.h>
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/irq_work.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -1959,30 +1960,32 @@ int is_console_locked(void) ...@@ -1959,30 +1960,32 @@ int is_console_locked(void)
static DEFINE_PER_CPU(int, printk_pending); static DEFINE_PER_CPU(int, printk_pending);
static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf); static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
void printk_tick(void) static void wake_up_klogd_work_func(struct irq_work *irq_work)
{ {
if (__this_cpu_read(printk_pending)) { int pending = __this_cpu_xchg(printk_pending, 0);
int pending = __this_cpu_xchg(printk_pending, 0);
if (pending & PRINTK_PENDING_SCHED) { if (pending & PRINTK_PENDING_SCHED) {
char *buf = __get_cpu_var(printk_sched_buf); char *buf = __get_cpu_var(printk_sched_buf);
printk(KERN_WARNING "[sched_delayed] %s", buf); printk(KERN_WARNING "[sched_delayed] %s", buf);
}
if (pending & PRINTK_PENDING_WAKEUP)
wake_up_interruptible(&log_wait);
} }
}
int printk_needs_cpu(int cpu) if (pending & PRINTK_PENDING_WAKEUP)
{ wake_up_interruptible(&log_wait);
if (cpu_is_offline(cpu))
printk_tick();
return __this_cpu_read(printk_pending);
} }
static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
.func = wake_up_klogd_work_func,
.flags = IRQ_WORK_LAZY,
};
void wake_up_klogd(void) void wake_up_klogd(void)
{ {
if (waitqueue_active(&log_wait)) preempt_disable();
if (waitqueue_active(&log_wait)) {
this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
}
preempt_enable();
} }
static void console_cont_flush(char *text, size_t size) static void console_cont_flush(char *text, size_t size)
...@@ -2462,6 +2465,7 @@ int printk_sched(const char *fmt, ...) ...@@ -2462,6 +2465,7 @@ int printk_sched(const char *fmt, ...)
va_end(args); va_end(args);
__this_cpu_or(printk_pending, PRINTK_PENDING_SCHED); __this_cpu_or(printk_pending, PRINTK_PENDING_SCHED);
irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
local_irq_restore(flags); local_irq_restore(flags);
return r; return r;
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/profile.h> #include <linux/profile.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/irq_work.h>
#include <asm/irq_regs.h> #include <asm/irq_regs.h>
...@@ -28,7 +29,7 @@ ...@@ -28,7 +29,7 @@
/* /*
* Per cpu nohz control structure * Per cpu nohz control structure
*/ */
static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
/* /*
* The time, when the last jiffy update happened. Protected by jiffies_lock. * The time, when the last jiffy update happened. Protected by jiffies_lock.
...@@ -331,8 +332,8 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, ...@@ -331,8 +332,8 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
time_delta = timekeeping_max_deferment(); time_delta = timekeeping_max_deferment();
} while (read_seqretry(&jiffies_lock, seq)); } while (read_seqretry(&jiffies_lock, seq));
if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) ||
arch_needs_cpu(cpu)) { arch_needs_cpu(cpu) || irq_work_needs_cpu()) {
next_jiffies = last_jiffies + 1; next_jiffies = last_jiffies + 1;
delta_jiffies = 1; delta_jiffies = 1;
} else { } else {
......
...@@ -1351,7 +1351,6 @@ void update_process_times(int user_tick) ...@@ -1351,7 +1351,6 @@ void update_process_times(int user_tick)
account_process_tick(p, user_tick); account_process_tick(p, user_tick);
run_local_timers(); run_local_timers();
rcu_check_callbacks(cpu, user_tick); rcu_check_callbacks(cpu, user_tick);
printk_tick();
#ifdef CONFIG_IRQ_WORK #ifdef CONFIG_IRQ_WORK
if (in_irq()) if (in_irq())
irq_work_run(); irq_work_run();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment