Commit 1eaec821 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-3.9-cleanups' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

Pull workqueue [delayed_]work_pending() cleanups from Tejun Heo:
 "This is part of on-going cleanups to remove / minimize usages of
  workqueue interfaces which are deprecated and/or misleading.

  This round drops a number of usages of [delayed_]work_pending(), which
  are dangerous as they lack any form of synchronization and thus often
  lead to buggy / unnecessary code.  There are a couple legitimate use
  cases in kernel.  Hopefully, they can be converted and
  [delayed_]work_pending() can be removed completely.  Even if not,
  removing most of misuses should make it more difficult to find
  examples of misuses and thus slow down growth of them.

  These changes are independent from other workqueue changes."

* 'for-3.9-cleanups' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  wimax/i2400m: fix i2400m->wake_tx_skb handling
  kprobes: fix wait_for_kprobe_optimizer()
  ipw2x00: simplify scan_event handling
  video/exynos: don't use [delayed_]work_pending()
  tty/max3100: don't use [delayed_]work_pending()
  x86/mce: don't use [delayed_]work_pending()
  rfkill: don't use [delayed_]work_pending()
  wl1251: don't use [delayed_]work_pending()
  thinkpad_acpi: don't use [delayed_]work_pending()
  mwifiex: don't use [delayed_]work_pending()
  sja1000: don't use [delayed_]work_pending()
parents 1a13c0b1 23663c87
...@@ -512,11 +512,8 @@ int mce_available(struct cpuinfo_x86 *c) ...@@ -512,11 +512,8 @@ int mce_available(struct cpuinfo_x86 *c)
static void mce_schedule_work(void) static void mce_schedule_work(void)
{ {
if (!mce_ring_empty()) { if (!mce_ring_empty())
struct work_struct *work = &__get_cpu_var(mce_work); schedule_work(&__get_cpu_var(mce_work));
if (!work_pending(work))
schedule_work(work);
}
} }
DEFINE_PER_CPU(struct irq_work, mce_irq_work); DEFINE_PER_CPU(struct irq_work, mce_irq_work);
...@@ -1351,12 +1348,7 @@ int mce_notify_irq(void) ...@@ -1351,12 +1348,7 @@ int mce_notify_irq(void)
/* wake processes polling /dev/mcelog */ /* wake processes polling /dev/mcelog */
wake_up_interruptible(&mce_chrdev_wait); wake_up_interruptible(&mce_chrdev_wait);
/* if (mce_helper[0])
* There is no risk of missing notifications because
* work_pending is always cleared before the function is
* executed.
*/
if (mce_helper[0] && !work_pending(&mce_trigger_work))
schedule_work(&mce_trigger_work); schedule_work(&mce_trigger_work);
if (__ratelimit(&ratelimit)) if (__ratelimit(&ratelimit))
......
...@@ -339,8 +339,7 @@ static void peak_pciec_set_leds(struct peak_pciec_card *card, u8 led_mask, u8 s) ...@@ -339,8 +339,7 @@ static void peak_pciec_set_leds(struct peak_pciec_card *card, u8 led_mask, u8 s)
*/ */
static void peak_pciec_start_led_work(struct peak_pciec_card *card) static void peak_pciec_start_led_work(struct peak_pciec_card *card)
{ {
if (!delayed_work_pending(&card->led_work)) schedule_delayed_work(&card->led_work, HZ);
schedule_delayed_work(&card->led_work, HZ);
} }
/* /*
......
...@@ -156,7 +156,7 @@ void i2400m_wake_tx_work(struct work_struct *ws) ...@@ -156,7 +156,7 @@ void i2400m_wake_tx_work(struct work_struct *ws)
struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws); struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws);
struct net_device *net_dev = i2400m->wimax_dev.net_dev; struct net_device *net_dev = i2400m->wimax_dev.net_dev;
struct device *dev = i2400m_dev(i2400m); struct device *dev = i2400m_dev(i2400m);
struct sk_buff *skb = i2400m->wake_tx_skb; struct sk_buff *skb;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&i2400m->tx_lock, flags); spin_lock_irqsave(&i2400m->tx_lock, flags);
...@@ -236,23 +236,26 @@ void i2400m_tx_prep_header(struct sk_buff *skb) ...@@ -236,23 +236,26 @@ void i2400m_tx_prep_header(struct sk_buff *skb)
void i2400m_net_wake_stop(struct i2400m *i2400m) void i2400m_net_wake_stop(struct i2400m *i2400m)
{ {
struct device *dev = i2400m_dev(i2400m); struct device *dev = i2400m_dev(i2400m);
struct sk_buff *wake_tx_skb;
unsigned long flags;
d_fnstart(3, dev, "(i2400m %p)\n", i2400m); d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
/* See i2400m_hard_start_xmit(), references are taken there /*
* and here we release them if the work was still * See i2400m_hard_start_xmit(), references are taken there and
* pending. Note we can't differentiate work not pending vs * here we release them if the packet was still pending.
* never scheduled, so the NULL check does that. */ */
if (cancel_work_sync(&i2400m->wake_tx_ws) == 0 cancel_work_sync(&i2400m->wake_tx_ws);
&& i2400m->wake_tx_skb != NULL) {
unsigned long flags; spin_lock_irqsave(&i2400m->tx_lock, flags);
struct sk_buff *wake_tx_skb; wake_tx_skb = i2400m->wake_tx_skb;
spin_lock_irqsave(&i2400m->tx_lock, flags); i2400m->wake_tx_skb = NULL;
wake_tx_skb = i2400m->wake_tx_skb; /* compat help */ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
i2400m->wake_tx_skb = NULL; /* compat help */
spin_unlock_irqrestore(&i2400m->tx_lock, flags); if (wake_tx_skb) {
i2400m_put(i2400m); i2400m_put(i2400m);
kfree_skb(wake_tx_skb); kfree_skb(wake_tx_skb);
} }
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
} }
...@@ -288,7 +291,7 @@ int i2400m_net_wake_tx(struct i2400m *i2400m, struct net_device *net_dev, ...@@ -288,7 +291,7 @@ int i2400m_net_wake_tx(struct i2400m *i2400m, struct net_device *net_dev,
* and if pending, release those resources. */ * and if pending, release those resources. */
result = 0; result = 0;
spin_lock_irqsave(&i2400m->tx_lock, flags); spin_lock_irqsave(&i2400m->tx_lock, flags);
if (!work_pending(&i2400m->wake_tx_ws)) { if (!i2400m->wake_tx_skb) {
netif_stop_queue(net_dev); netif_stop_queue(net_dev);
i2400m_get(i2400m); i2400m_get(i2400m);
i2400m->wake_tx_skb = skb_get(skb); /* transfer ref count */ i2400m->wake_tx_skb = skb_get(skb); /* transfer ref count */
......
...@@ -2181,9 +2181,10 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) ...@@ -2181,9 +2181,10 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
mod_delayed_work(system_wq, &priv->rf_kill, round_jiffies_relative(HZ)); mod_delayed_work(system_wq, &priv->rf_kill, round_jiffies_relative(HZ));
} }
static void send_scan_event(void *data) static void ipw2100_scan_event(struct work_struct *work)
{ {
struct ipw2100_priv *priv = data; struct ipw2100_priv *priv = container_of(work, struct ipw2100_priv,
scan_event.work);
union iwreq_data wrqu; union iwreq_data wrqu;
wrqu.data.length = 0; wrqu.data.length = 0;
...@@ -2191,18 +2192,6 @@ static void send_scan_event(void *data) ...@@ -2191,18 +2192,6 @@ static void send_scan_event(void *data)
wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL); wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
} }
static void ipw2100_scan_event_later(struct work_struct *work)
{
send_scan_event(container_of(work, struct ipw2100_priv,
scan_event_later.work));
}
static void ipw2100_scan_event_now(struct work_struct *work)
{
send_scan_event(container_of(work, struct ipw2100_priv,
scan_event_now));
}
static void isr_scan_complete(struct ipw2100_priv *priv, u32 status) static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
{ {
IPW_DEBUG_SCAN("scan complete\n"); IPW_DEBUG_SCAN("scan complete\n");
...@@ -2212,13 +2201,11 @@ static void isr_scan_complete(struct ipw2100_priv *priv, u32 status) ...@@ -2212,13 +2201,11 @@ static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
/* Only userspace-requested scan completion events go out immediately */ /* Only userspace-requested scan completion events go out immediately */
if (!priv->user_requested_scan) { if (!priv->user_requested_scan) {
if (!delayed_work_pending(&priv->scan_event_later)) schedule_delayed_work(&priv->scan_event,
schedule_delayed_work(&priv->scan_event_later, round_jiffies_relative(msecs_to_jiffies(4000)));
round_jiffies_relative(msecs_to_jiffies(4000)));
} else { } else {
priv->user_requested_scan = 0; priv->user_requested_scan = 0;
cancel_delayed_work(&priv->scan_event_later); mod_delayed_work(system_wq, &priv->scan_event, 0);
schedule_work(&priv->scan_event_now);
} }
} }
...@@ -4459,8 +4446,7 @@ static void ipw2100_kill_works(struct ipw2100_priv *priv) ...@@ -4459,8 +4446,7 @@ static void ipw2100_kill_works(struct ipw2100_priv *priv)
cancel_delayed_work_sync(&priv->wx_event_work); cancel_delayed_work_sync(&priv->wx_event_work);
cancel_delayed_work_sync(&priv->hang_check); cancel_delayed_work_sync(&priv->hang_check);
cancel_delayed_work_sync(&priv->rf_kill); cancel_delayed_work_sync(&priv->rf_kill);
cancel_work_sync(&priv->scan_event_now); cancel_delayed_work_sync(&priv->scan_event);
cancel_delayed_work_sync(&priv->scan_event_later);
} }
static int ipw2100_tx_allocate(struct ipw2100_priv *priv) static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
...@@ -6195,8 +6181,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, ...@@ -6195,8 +6181,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work); INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work);
INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check); INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check);
INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill); INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
INIT_WORK(&priv->scan_event_now, ipw2100_scan_event_now); INIT_DELAYED_WORK(&priv->scan_event, ipw2100_scan_event);
INIT_DELAYED_WORK(&priv->scan_event_later, ipw2100_scan_event_later);
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
ipw2100_irq_tasklet, (unsigned long)priv); ipw2100_irq_tasklet, (unsigned long)priv);
......
...@@ -577,8 +577,7 @@ struct ipw2100_priv { ...@@ -577,8 +577,7 @@ struct ipw2100_priv {
struct delayed_work wx_event_work; struct delayed_work wx_event_work;
struct delayed_work hang_check; struct delayed_work hang_check;
struct delayed_work rf_kill; struct delayed_work rf_kill;
struct work_struct scan_event_now; struct delayed_work scan_event;
struct delayed_work scan_event_later;
int user_requested_scan; int user_requested_scan;
......
...@@ -4480,18 +4480,11 @@ static void handle_scan_event(struct ipw_priv *priv) ...@@ -4480,18 +4480,11 @@ static void handle_scan_event(struct ipw_priv *priv)
{ {
/* Only userspace-requested scan completion events go out immediately */ /* Only userspace-requested scan completion events go out immediately */
if (!priv->user_requested_scan) { if (!priv->user_requested_scan) {
if (!delayed_work_pending(&priv->scan_event)) schedule_delayed_work(&priv->scan_event,
schedule_delayed_work(&priv->scan_event, round_jiffies_relative(msecs_to_jiffies(4000)));
round_jiffies_relative(msecs_to_jiffies(4000)));
} else { } else {
union iwreq_data wrqu;
priv->user_requested_scan = 0; priv->user_requested_scan = 0;
cancel_delayed_work(&priv->scan_event); mod_delayed_work(system_wq, &priv->scan_event, 0);
wrqu.data.length = 0;
wrqu.data.flags = 0;
wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
} }
} }
......
...@@ -1752,6 +1752,8 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port) ...@@ -1752,6 +1752,8 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
static struct mmc_host *reset_host; static struct mmc_host *reset_host;
static void sdio_card_reset_worker(struct work_struct *work) static void sdio_card_reset_worker(struct work_struct *work)
{ {
struct mmc_host *target = reset_host;
/* The actual reset operation must be run outside of driver thread. /* The actual reset operation must be run outside of driver thread.
* This is because mmc_remove_host() will cause the device to be * This is because mmc_remove_host() will cause the device to be
* instantly destroyed, and the driver then needs to end its thread, * instantly destroyed, and the driver then needs to end its thread,
...@@ -1761,10 +1763,10 @@ static void sdio_card_reset_worker(struct work_struct *work) ...@@ -1761,10 +1763,10 @@ static void sdio_card_reset_worker(struct work_struct *work)
*/ */
pr_err("Resetting card...\n"); pr_err("Resetting card...\n");
mmc_remove_host(reset_host); mmc_remove_host(target);
/* 20ms delay is based on experiment with sdhci controller */ /* 20ms delay is based on experiment with sdhci controller */
mdelay(20); mdelay(20);
mmc_add_host(reset_host); mmc_add_host(target);
} }
static DECLARE_WORK(card_reset_work, sdio_card_reset_worker); static DECLARE_WORK(card_reset_work, sdio_card_reset_worker);
...@@ -1773,9 +1775,6 @@ static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter) ...@@ -1773,9 +1775,6 @@ static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter)
{ {
struct sdio_mmc_card *card = adapter->card; struct sdio_mmc_card *card = adapter->card;
if (work_pending(&card_reset_work))
return;
reset_host = card->func->card->host; reset_host = card->func->card->host;
schedule_work(&card_reset_work); schedule_work(&card_reset_work);
} }
......
...@@ -68,8 +68,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl) ...@@ -68,8 +68,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
unsigned long timeout, start; unsigned long timeout, start;
u32 elp_reg; u32 elp_reg;
if (delayed_work_pending(&wl->elp_work)) cancel_delayed_work(&wl->elp_work);
cancel_delayed_work(&wl->elp_work);
if (!wl->elp) if (!wl->elp)
return 0; return 0;
......
...@@ -4877,8 +4877,7 @@ static int __init light_init(struct ibm_init_struct *iibm) ...@@ -4877,8 +4877,7 @@ static int __init light_init(struct ibm_init_struct *iibm)
static void light_exit(void) static void light_exit(void)
{ {
led_classdev_unregister(&tpacpi_led_thinklight.led_classdev); led_classdev_unregister(&tpacpi_led_thinklight.led_classdev);
if (work_pending(&tpacpi_led_thinklight.work)) flush_workqueue(tpacpi_wq);
flush_workqueue(tpacpi_wq);
} }
static int light_read(struct seq_file *m) static int light_read(struct seq_file *m)
......
...@@ -179,8 +179,7 @@ static void max3100_work(struct work_struct *w); ...@@ -179,8 +179,7 @@ static void max3100_work(struct work_struct *w);
static void max3100_dowork(struct max3100_port *s) static void max3100_dowork(struct max3100_port *s)
{ {
if (!s->force_end_work && !work_pending(&s->work) && if (!s->force_end_work && !freezing(current) && !s->suspending)
!freezing(current) && !s->suspending)
queue_work(s->workqueue, &s->work); queue_work(s->workqueue, &s->work);
} }
......
...@@ -1121,8 +1121,7 @@ static int exynos_dp_remove(struct platform_device *pdev) ...@@ -1121,8 +1121,7 @@ static int exynos_dp_remove(struct platform_device *pdev)
disable_irq(dp->irq); disable_irq(dp->irq);
if (work_pending(&dp->hotplug_work)) flush_work(&dp->hotplug_work);
flush_work(&dp->hotplug_work);
if (pdev->dev.of_node) { if (pdev->dev.of_node) {
if (dp->phy_addr) if (dp->phy_addr)
...@@ -1144,8 +1143,7 @@ static int exynos_dp_suspend(struct device *dev) ...@@ -1144,8 +1143,7 @@ static int exynos_dp_suspend(struct device *dev)
struct exynos_dp_platdata *pdata = dev->platform_data; struct exynos_dp_platdata *pdata = dev->platform_data;
struct exynos_dp_device *dp = dev_get_drvdata(dev); struct exynos_dp_device *dp = dev_get_drvdata(dev);
if (work_pending(&dp->hotplug_work)) flush_work(&dp->hotplug_work);
flush_work(&dp->hotplug_work);
if (dev->of_node) { if (dev->of_node) {
if (dp->phy_addr) if (dp->phy_addr)
......
...@@ -471,7 +471,6 @@ static LIST_HEAD(unoptimizing_list); ...@@ -471,7 +471,6 @@ static LIST_HEAD(unoptimizing_list);
static void kprobe_optimizer(struct work_struct *work); static void kprobe_optimizer(struct work_struct *work);
static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
static DECLARE_COMPLETION(optimizer_comp);
#define OPTIMIZE_DELAY 5 #define OPTIMIZE_DELAY 5
/* /*
...@@ -552,8 +551,7 @@ static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list) ...@@ -552,8 +551,7 @@ static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list)
/* Start optimizer after OPTIMIZE_DELAY passed */ /* Start optimizer after OPTIMIZE_DELAY passed */
static __kprobes void kick_kprobe_optimizer(void) static __kprobes void kick_kprobe_optimizer(void)
{ {
if (!delayed_work_pending(&optimizing_work)) schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
} }
/* Kprobe jump optimizer */ /* Kprobe jump optimizer */
...@@ -592,16 +590,25 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) ...@@ -592,16 +590,25 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
/* Step 5: Kick optimizer again if needed */ /* Step 5: Kick optimizer again if needed */
if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
kick_kprobe_optimizer(); kick_kprobe_optimizer();
else
/* Wake up all waiters */
complete_all(&optimizer_comp);
} }
/* Wait for completing optimization and unoptimization */ /* Wait for completing optimization and unoptimization */
static __kprobes void wait_for_kprobe_optimizer(void) static __kprobes void wait_for_kprobe_optimizer(void)
{ {
if (delayed_work_pending(&optimizing_work)) mutex_lock(&kprobe_mutex);
wait_for_completion(&optimizer_comp);
while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
mutex_unlock(&kprobe_mutex);
/* this will also make optimizing_work execute immmediately */
flush_delayed_work(&optimizing_work);
/* @optimizing_work might not have been queued yet, relax */
cpu_relax();
mutex_lock(&kprobe_mutex);
}
mutex_unlock(&kprobe_mutex);
} }
/* Optimize kprobe if p is ready to be optimized */ /* Optimize kprobe if p is ready to be optimized */
......
...@@ -148,11 +148,9 @@ static unsigned long rfkill_ratelimit(const unsigned long last) ...@@ -148,11 +148,9 @@ static unsigned long rfkill_ratelimit(const unsigned long last)
static void rfkill_schedule_ratelimited(void) static void rfkill_schedule_ratelimited(void)
{ {
if (delayed_work_pending(&rfkill_op_work)) if (schedule_delayed_work(&rfkill_op_work,
return; rfkill_ratelimit(rfkill_last_scheduled)))
schedule_delayed_work(&rfkill_op_work, rfkill_last_scheduled = jiffies;
rfkill_ratelimit(rfkill_last_scheduled));
rfkill_last_scheduled = jiffies;
} }
static void rfkill_schedule_global_op(enum rfkill_sched_op op) static void rfkill_schedule_global_op(enum rfkill_sched_op op)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment