Commit f0626710 authored by Tejun Heo's avatar Tejun Heo Committed by Roland Dreier

RDMA: Update workqueue usage

* ib_wq is added, which is used as the common workqueue for infiniband
  instead of the system workqueue.  All system workqueue usages
  including flush_scheduled_work() callers are converted to use and
  flush ib_wq.

* cancel_delayed_work() + flush_scheduled_work() converted to
  cancel_delayed_work_sync().

* qib_wq is removed and ib_wq is used instead.

This is to prepare for deprecation of flush_scheduled_work().
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 948579cd
...@@ -308,7 +308,7 @@ static void ib_cache_event(struct ib_event_handler *handler, ...@@ -308,7 +308,7 @@ static void ib_cache_event(struct ib_event_handler *handler,
INIT_WORK(&work->work, ib_cache_task); INIT_WORK(&work->work, ib_cache_task);
work->device = event->device; work->device = event->device;
work->port_num = event->element.port_num; work->port_num = event->element.port_num;
schedule_work(&work->work); queue_work(ib_wq, &work->work);
} }
} }
} }
...@@ -368,7 +368,7 @@ static void ib_cache_cleanup_one(struct ib_device *device) ...@@ -368,7 +368,7 @@ static void ib_cache_cleanup_one(struct ib_device *device)
int p; int p;
ib_unregister_event_handler(&device->cache.event_handler); ib_unregister_event_handler(&device->cache.event_handler);
flush_scheduled_work(); flush_workqueue(ib_wq);
for (p = 0; p <= end_port(device) - start_port(device); ++p) { for (p = 0; p <= end_port(device) - start_port(device); ++p) {
kfree(device->cache.pkey_cache[p]); kfree(device->cache.pkey_cache[p]);
......
...@@ -38,7 +38,6 @@ ...@@ -38,7 +38,6 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/workqueue.h>
#include "core_priv.h" #include "core_priv.h"
...@@ -52,6 +51,9 @@ struct ib_client_data { ...@@ -52,6 +51,9 @@ struct ib_client_data {
void * data; void * data;
}; };
struct workqueue_struct *ib_wq;
EXPORT_SYMBOL_GPL(ib_wq);
static LIST_HEAD(device_list); static LIST_HEAD(device_list);
static LIST_HEAD(client_list); static LIST_HEAD(client_list);
...@@ -718,6 +720,10 @@ static int __init ib_core_init(void) ...@@ -718,6 +720,10 @@ static int __init ib_core_init(void)
{ {
int ret; int ret;
ib_wq = alloc_workqueue("infiniband", 0, 0);
if (!ib_wq)
return -ENOMEM;
ret = ib_sysfs_setup(); ret = ib_sysfs_setup();
if (ret) if (ret)
printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
...@@ -726,6 +732,7 @@ static int __init ib_core_init(void) ...@@ -726,6 +732,7 @@ static int __init ib_core_init(void)
if (ret) { if (ret) {
printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
ib_sysfs_cleanup(); ib_sysfs_cleanup();
destroy_workqueue(ib_wq);
} }
return ret; return ret;
...@@ -736,7 +743,7 @@ static void __exit ib_core_cleanup(void) ...@@ -736,7 +743,7 @@ static void __exit ib_core_cleanup(void)
ib_cache_cleanup(); ib_cache_cleanup();
ib_sysfs_cleanup(); ib_sysfs_cleanup();
/* Make sure that any pending umem accounting work is done. */ /* Make sure that any pending umem accounting work is done. */
flush_scheduled_work(); destroy_workqueue(ib_wq);
} }
module_init(ib_core_init); module_init(ib_core_init);
......
...@@ -425,7 +425,7 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event ...@@ -425,7 +425,7 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
port->sm_ah = NULL; port->sm_ah = NULL;
spin_unlock_irqrestore(&port->ah_lock, flags); spin_unlock_irqrestore(&port->ah_lock, flags);
schedule_work(&sa_dev->port[event->element.port_num - queue_work(ib_wq, &sa_dev->port[event->element.port_num -
sa_dev->start_port].update_task); sa_dev->start_port].update_task);
} }
} }
......
...@@ -262,7 +262,7 @@ void ib_umem_release(struct ib_umem *umem) ...@@ -262,7 +262,7 @@ void ib_umem_release(struct ib_umem *umem)
umem->mm = mm; umem->mm = mm;
umem->diff = diff; umem->diff = diff;
schedule_work(&umem->work); queue_work(ib_wq, &umem->work);
return; return;
} }
} else } else
......
...@@ -755,7 +755,7 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev) ...@@ -755,7 +755,7 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev)
*/ */
ipath_shutdown_device(dd); ipath_shutdown_device(dd);
flush_scheduled_work(); flush_workqueue(ib_wq);
if (dd->verbs_dev) if (dd->verbs_dev)
ipath_unregister_ib_device(dd->verbs_dev); ipath_unregister_ib_device(dd->verbs_dev);
......
...@@ -220,7 +220,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages) ...@@ -220,7 +220,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
work->mm = mm; work->mm = mm;
work->num_pages = num_pages; work->num_pages = num_pages;
schedule_work(&work->work); queue_work(ib_wq, &work->work);
return; return;
bail_mm: bail_mm:
......
...@@ -1692,8 +1692,7 @@ static void qib_7220_quiet_serdes(struct qib_pportdata *ppd) ...@@ -1692,8 +1692,7 @@ static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
spin_unlock_irqrestore(&ppd->lflags_lock, flags); spin_unlock_irqrestore(&ppd->lflags_lock, flags);
wake_up(&ppd->cpspec->autoneg_wait); wake_up(&ppd->cpspec->autoneg_wait);
cancel_delayed_work(&ppd->cpspec->autoneg_work); cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
flush_scheduled_work();
shutdown_7220_relock_poll(ppd->dd); shutdown_7220_relock_poll(ppd->dd);
val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg); val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
...@@ -3515,8 +3514,8 @@ static void try_7220_autoneg(struct qib_pportdata *ppd) ...@@ -3515,8 +3514,8 @@ static void try_7220_autoneg(struct qib_pportdata *ppd)
toggle_7220_rclkrls(ppd->dd); toggle_7220_rclkrls(ppd->dd);
/* 2 msec is minimum length of a poll cycle */ /* 2 msec is minimum length of a poll cycle */
schedule_delayed_work(&ppd->cpspec->autoneg_work, queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
msecs_to_jiffies(2)); msecs_to_jiffies(2));
} }
/* /*
......
...@@ -2406,10 +2406,9 @@ static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd) ...@@ -2406,10 +2406,9 @@ static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
spin_unlock_irqrestore(&ppd->lflags_lock, flags); spin_unlock_irqrestore(&ppd->lflags_lock, flags);
wake_up(&ppd->cpspec->autoneg_wait); wake_up(&ppd->cpspec->autoneg_wait);
cancel_delayed_work(&ppd->cpspec->autoneg_work); cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
if (ppd->dd->cspec->r1) if (ppd->dd->cspec->r1)
cancel_delayed_work(&ppd->cpspec->ipg_work); cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
flush_scheduled_work();
ppd->cpspec->chase_end = 0; ppd->cpspec->chase_end = 0;
if (ppd->cpspec->chase_timer.data) /* if initted */ if (ppd->cpspec->chase_timer.data) /* if initted */
...@@ -2706,7 +2705,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd) ...@@ -2706,7 +2705,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
if (!(pins & mask)) { if (!(pins & mask)) {
++handled; ++handled;
qd->t_insert = get_jiffies_64(); qd->t_insert = get_jiffies_64();
schedule_work(&qd->work); queue_work(ib_wq, &qd->work);
} }
} }
} }
...@@ -4990,8 +4989,8 @@ static void try_7322_autoneg(struct qib_pportdata *ppd) ...@@ -4990,8 +4989,8 @@ static void try_7322_autoneg(struct qib_pportdata *ppd)
set_7322_ibspeed_fast(ppd, QIB_IB_DDR); set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
qib_7322_mini_pcs_reset(ppd); qib_7322_mini_pcs_reset(ppd);
/* 2 msec is minimum length of a poll cycle */ /* 2 msec is minimum length of a poll cycle */
schedule_delayed_work(&ppd->cpspec->autoneg_work, queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
msecs_to_jiffies(2)); msecs_to_jiffies(2));
} }
/* /*
...@@ -5121,7 +5120,8 @@ static void try_7322_ipg(struct qib_pportdata *ppd) ...@@ -5121,7 +5120,8 @@ static void try_7322_ipg(struct qib_pportdata *ppd)
ib_free_send_mad(send_buf); ib_free_send_mad(send_buf);
retry: retry:
delay = 2 << ppd->cpspec->ipg_tries; delay = 2 << ppd->cpspec->ipg_tries;
schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay)); queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
msecs_to_jiffies(delay));
} }
/* /*
......
...@@ -80,7 +80,6 @@ unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */ ...@@ -80,7 +80,6 @@ unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
struct workqueue_struct *qib_wq;
struct workqueue_struct *qib_cq_wq; struct workqueue_struct *qib_cq_wq;
static void verify_interrupt(unsigned long); static void verify_interrupt(unsigned long);
...@@ -1044,24 +1043,10 @@ static int __init qlogic_ib_init(void) ...@@ -1044,24 +1043,10 @@ static int __init qlogic_ib_init(void)
if (ret) if (ret)
goto bail; goto bail;
/*
* We create our own workqueue mainly because we want to be
* able to flush it when devices are being removed. We can't
* use schedule_work()/flush_scheduled_work() because both
* unregister_netdev() and linkwatch_event take the rtnl lock,
* so flush_scheduled_work() can deadlock during device
* removal.
*/
qib_wq = create_workqueue("qib");
if (!qib_wq) {
ret = -ENOMEM;
goto bail_dev;
}
qib_cq_wq = create_singlethread_workqueue("qib_cq"); qib_cq_wq = create_singlethread_workqueue("qib_cq");
if (!qib_cq_wq) { if (!qib_cq_wq) {
ret = -ENOMEM; ret = -ENOMEM;
goto bail_wq; goto bail_dev;
} }
/* /*
...@@ -1091,8 +1076,6 @@ static int __init qlogic_ib_init(void) ...@@ -1091,8 +1076,6 @@ static int __init qlogic_ib_init(void)
idr_destroy(&qib_unit_table); idr_destroy(&qib_unit_table);
bail_cq_wq: bail_cq_wq:
destroy_workqueue(qib_cq_wq); destroy_workqueue(qib_cq_wq);
bail_wq:
destroy_workqueue(qib_wq);
bail_dev: bail_dev:
qib_dev_cleanup(); qib_dev_cleanup();
bail: bail:
...@@ -1116,7 +1099,6 @@ static void __exit qlogic_ib_cleanup(void) ...@@ -1116,7 +1099,6 @@ static void __exit qlogic_ib_cleanup(void)
pci_unregister_driver(&qib_driver); pci_unregister_driver(&qib_driver);
destroy_workqueue(qib_wq);
destroy_workqueue(qib_cq_wq); destroy_workqueue(qib_cq_wq);
qib_cpulist_count = 0; qib_cpulist_count = 0;
...@@ -1289,7 +1271,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev, ...@@ -1289,7 +1271,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev,
if (qib_mini_init || initfail || ret) { if (qib_mini_init || initfail || ret) {
qib_stop_timers(dd); qib_stop_timers(dd);
flush_scheduled_work(); flush_workqueue(ib_wq);
for (pidx = 0; pidx < dd->num_pports; ++pidx) for (pidx = 0; pidx < dd->num_pports; ++pidx)
dd->f_quiet_serdes(dd->pport + pidx); dd->f_quiet_serdes(dd->pport + pidx);
if (qib_mini_init) if (qib_mini_init)
...@@ -1338,8 +1320,8 @@ static void __devexit qib_remove_one(struct pci_dev *pdev) ...@@ -1338,8 +1320,8 @@ static void __devexit qib_remove_one(struct pci_dev *pdev)
qib_stop_timers(dd); qib_stop_timers(dd);
/* wait until all of our (qsfp) schedule_work() calls complete */ /* wait until all of our (qsfp) queue_work() calls complete */
flush_scheduled_work(); flush_workqueue(ib_wq);
ret = qibfs_remove(dd); ret = qibfs_remove(dd);
if (ret) if (ret)
......
...@@ -485,7 +485,7 @@ void qib_qsfp_init(struct qib_qsfp_data *qd, ...@@ -485,7 +485,7 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
goto bail; goto bail;
/* We see a module, but it may be unwise to look yet. Just schedule */ /* We see a module, but it may be unwise to look yet. Just schedule */
qd->t_insert = get_jiffies_64(); qd->t_insert = get_jiffies_64();
schedule_work(&qd->work); queue_work(ib_wq, &qd->work);
bail: bail:
return; return;
} }
...@@ -493,10 +493,9 @@ void qib_qsfp_init(struct qib_qsfp_data *qd, ...@@ -493,10 +493,9 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
void qib_qsfp_deinit(struct qib_qsfp_data *qd) void qib_qsfp_deinit(struct qib_qsfp_data *qd)
{ {
/* /*
* There is nothing to do here for now. our * There is nothing to do here for now. our work is scheduled
* work is scheduled with schedule_work(), and * with queue_work(), and flush_workqueue() from remove_one
* flush_scheduled_work() from remove_one will * will block until all work setup with queue_work()
* block until all work ssetup with schedule_work()
* completes. * completes.
*/ */
} }
......
...@@ -805,7 +805,6 @@ static inline int qib_send_ok(struct qib_qp *qp) ...@@ -805,7 +805,6 @@ static inline int qib_send_ok(struct qib_qp *qp)
!(qp->s_flags & QIB_S_ANY_WAIT_SEND)); !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
} }
extern struct workqueue_struct *qib_wq;
extern struct workqueue_struct *qib_cq_wq; extern struct workqueue_struct *qib_cq_wq;
/* /*
...@@ -814,7 +813,7 @@ extern struct workqueue_struct *qib_cq_wq; ...@@ -814,7 +813,7 @@ extern struct workqueue_struct *qib_cq_wq;
static inline void qib_schedule_send(struct qib_qp *qp) static inline void qib_schedule_send(struct qib_qp *qp)
{ {
if (qib_send_ok(qp)) if (qib_send_ok(qp))
queue_work(qib_wq, &qp->s_work); queue_work(ib_wq, &qp->s_work);
} }
static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
......
...@@ -638,7 +638,7 @@ static int srp_reconnect_target(struct srp_target_port *target) ...@@ -638,7 +638,7 @@ static int srp_reconnect_target(struct srp_target_port *target)
if (target->state == SRP_TARGET_CONNECTING) { if (target->state == SRP_TARGET_CONNECTING) {
target->state = SRP_TARGET_DEAD; target->state = SRP_TARGET_DEAD;
INIT_WORK(&target->work, srp_remove_work); INIT_WORK(&target->work, srp_remove_work);
schedule_work(&target->work); queue_work(ib_wq, &target->work);
} }
spin_unlock_irq(&target->lock); spin_unlock_irq(&target->lock);
...@@ -2199,7 +2199,7 @@ static void srp_remove_one(struct ib_device *device) ...@@ -2199,7 +2199,7 @@ static void srp_remove_one(struct ib_device *device)
* started before we marked our target ports as * started before we marked our target ports as
* removed, and any target port removal tasks. * removed, and any target port removal tasks.
*/ */
flush_scheduled_work(); flush_workqueue(ib_wq);
list_for_each_entry_safe(target, tmp_target, list_for_each_entry_safe(target, tmp_target,
&host->target_list, list) { &host->target_list, list) {
......
...@@ -47,10 +47,13 @@ ...@@ -47,10 +47,13 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/rwsem.h> #include <linux/rwsem.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/workqueue.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
extern struct workqueue_struct *ib_wq;
union ib_gid { union ib_gid {
u8 raw[16]; u8 raw[16];
struct { struct {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment