Commit 0306eda2 authored by Roland Dreier's avatar Roland Dreier

Revert "IPoIB: Use dedicated workqueues per interface"

This reverts commit 5141861c.

The series of IPoIB bug fixes that went into 3.19-rc1 introduce
regressions, and after trying to sort things out, we decided to revert
to 3.18's IPoIB driver and get things right for 3.20.
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
parent 4e0ab200
...@@ -323,7 +323,6 @@ struct ipoib_dev_priv { ...@@ -323,7 +323,6 @@ struct ipoib_dev_priv {
struct list_head multicast_list; struct list_head multicast_list;
struct rb_root multicast_tree; struct rb_root multicast_tree;
struct workqueue_struct *wq;
struct delayed_work mcast_task; struct delayed_work mcast_task;
struct work_struct carrier_on_task; struct work_struct carrier_on_task;
struct work_struct flush_light; struct work_struct flush_light;
......
...@@ -474,7 +474,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even ...@@ -474,7 +474,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
} }
spin_lock_irq(&priv->lock); spin_lock_irq(&priv->lock);
queue_delayed_work(priv->wq, queue_delayed_work(ipoib_workqueue,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY); &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
/* Add this entry to passive ids list head, but do not re-add it /* Add this entry to passive ids list head, but do not re-add it
* if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */ * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
...@@ -576,7 +576,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -576,7 +576,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&priv->lock, flags);
list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
ipoib_cm_start_rx_drain(priv); ipoib_cm_start_rx_drain(priv);
queue_work(priv->wq, &priv->cm.rx_reap_task); queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
} else } else
ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
...@@ -603,7 +603,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -603,7 +603,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&priv->lock, flags);
list_move(&p->list, &priv->cm.rx_reap_list); list_move(&p->list, &priv->cm.rx_reap_list);
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
queue_work(priv->wq, &priv->cm.rx_reap_task); queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
} }
return; return;
} }
...@@ -827,7 +827,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -827,7 +827,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list); list_move(&tx->list, &priv->cm.reap_list);
queue_work(priv->wq, &priv->cm.reap_task); queue_work(ipoib_workqueue, &priv->cm.reap_task);
} }
clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
...@@ -1255,7 +1255,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, ...@@ -1255,7 +1255,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list); list_move(&tx->list, &priv->cm.reap_list);
queue_work(priv->wq, &priv->cm.reap_task); queue_work(ipoib_workqueue, &priv->cm.reap_task);
} }
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
...@@ -1284,7 +1284,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path ...@@ -1284,7 +1284,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
tx->dev = dev; tx->dev = dev;
list_add(&tx->list, &priv->cm.start_list); list_add(&tx->list, &priv->cm.start_list);
set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
queue_work(priv->wq, &priv->cm.start_task); queue_work(ipoib_workqueue, &priv->cm.start_task);
return tx; return tx;
} }
...@@ -1295,7 +1295,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) ...@@ -1295,7 +1295,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&priv->lock, flags);
list_move(&tx->list, &priv->cm.reap_list); list_move(&tx->list, &priv->cm.reap_list);
queue_work(priv->wq, &priv->cm.reap_task); queue_work(ipoib_workqueue, &priv->cm.reap_task);
ipoib_dbg(priv, "Reap connection for gid %pI6\n", ipoib_dbg(priv, "Reap connection for gid %pI6\n",
tx->neigh->daddr + 4); tx->neigh->daddr + 4);
tx->neigh = NULL; tx->neigh = NULL;
...@@ -1417,7 +1417,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb, ...@@ -1417,7 +1417,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
skb_queue_tail(&priv->cm.skb_queue, skb); skb_queue_tail(&priv->cm.skb_queue, skb);
if (e) if (e)
queue_work(priv->wq, &priv->cm.skb_task); queue_work(ipoib_workqueue, &priv->cm.skb_task);
} }
static void ipoib_cm_rx_reap(struct work_struct *work) static void ipoib_cm_rx_reap(struct work_struct *work)
...@@ -1450,7 +1450,7 @@ static void ipoib_cm_stale_task(struct work_struct *work) ...@@ -1450,7 +1450,7 @@ static void ipoib_cm_stale_task(struct work_struct *work)
} }
if (!list_empty(&priv->cm.passive_ids)) if (!list_empty(&priv->cm.passive_ids))
queue_delayed_work(priv->wq, queue_delayed_work(ipoib_workqueue,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY); &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
spin_unlock_irq(&priv->lock); spin_unlock_irq(&priv->lock);
} }
......
...@@ -655,7 +655,7 @@ void ipoib_reap_ah(struct work_struct *work) ...@@ -655,7 +655,7 @@ void ipoib_reap_ah(struct work_struct *work)
__ipoib_reap_ah(dev); __ipoib_reap_ah(dev);
if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
queue_delayed_work(priv->wq, &priv->ah_reap_task, queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
round_jiffies_relative(HZ)); round_jiffies_relative(HZ));
} }
...@@ -696,7 +696,7 @@ int ipoib_ib_dev_open(struct net_device *dev, int flush) ...@@ -696,7 +696,7 @@ int ipoib_ib_dev_open(struct net_device *dev, int flush)
} }
clear_bit(IPOIB_STOP_REAPER, &priv->flags); clear_bit(IPOIB_STOP_REAPER, &priv->flags);
queue_delayed_work(priv->wq, &priv->ah_reap_task, queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
round_jiffies_relative(HZ)); round_jiffies_relative(HZ));
if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
...@@ -881,7 +881,7 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush) ...@@ -881,7 +881,7 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush)
set_bit(IPOIB_STOP_REAPER, &priv->flags); set_bit(IPOIB_STOP_REAPER, &priv->flags);
cancel_delayed_work(&priv->ah_reap_task); cancel_delayed_work(&priv->ah_reap_task);
if (flush) if (flush)
flush_workqueue(priv->wq); flush_workqueue(ipoib_workqueue);
begin = jiffies; begin = jiffies;
......
...@@ -839,7 +839,7 @@ static void ipoib_set_mcast_list(struct net_device *dev) ...@@ -839,7 +839,7 @@ static void ipoib_set_mcast_list(struct net_device *dev)
return; return;
} }
queue_work(priv->wq, &priv->restart_task); queue_work(ipoib_workqueue, &priv->restart_task);
} }
static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
...@@ -954,7 +954,7 @@ static void ipoib_reap_neigh(struct work_struct *work) ...@@ -954,7 +954,7 @@ static void ipoib_reap_neigh(struct work_struct *work)
__ipoib_reap_neigh(priv); __ipoib_reap_neigh(priv);
if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
queue_delayed_work(priv->wq, &priv->neigh_reap_task, queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
arp_tbl.gc_interval); arp_tbl.gc_interval);
} }
...@@ -1133,7 +1133,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) ...@@ -1133,7 +1133,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
/* start garbage collection */ /* start garbage collection */
clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
queue_delayed_work(priv->wq, &priv->neigh_reap_task, queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
arp_tbl.gc_interval); arp_tbl.gc_interval);
return 0; return 0;
...@@ -1293,7 +1293,7 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) ...@@ -1293,7 +1293,7 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
return 0; return 0;
out_dev_uninit: out_dev_uninit:
ipoib_ib_dev_cleanup(dev); ipoib_ib_dev_cleanup();
out_tx_ring_cleanup: out_tx_ring_cleanup:
vfree(priv->tx_ring); vfree(priv->tx_ring);
...@@ -1646,7 +1646,7 @@ static struct net_device *ipoib_add_port(const char *format, ...@@ -1646,7 +1646,7 @@ static struct net_device *ipoib_add_port(const char *format,
/* Stop GC if started before flush */ /* Stop GC if started before flush */
set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
cancel_delayed_work(&priv->neigh_reap_task); cancel_delayed_work(&priv->neigh_reap_task);
flush_workqueue(priv->wq); flush_workqueue(ipoib_workqueue);
event_failed: event_failed:
ipoib_dev_cleanup(priv->dev); ipoib_dev_cleanup(priv->dev);
...@@ -1717,7 +1717,7 @@ static void ipoib_remove_one(struct ib_device *device) ...@@ -1717,7 +1717,7 @@ static void ipoib_remove_one(struct ib_device *device)
/* Stop GC */ /* Stop GC */
set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
cancel_delayed_work(&priv->neigh_reap_task); cancel_delayed_work(&priv->neigh_reap_task);
flush_workqueue(priv->wq); flush_workqueue(ipoib_workqueue);
unregister_netdev(priv->dev); unregister_netdev(priv->dev);
free_netdev(priv->dev); free_netdev(priv->dev);
...@@ -1758,13 +1758,8 @@ static int __init ipoib_init_module(void) ...@@ -1758,13 +1758,8 @@ static int __init ipoib_init_module(void)
* unregister_netdev() and linkwatch_event take the rtnl lock, * unregister_netdev() and linkwatch_event take the rtnl lock,
* so flush_scheduled_work() can deadlock during device * so flush_scheduled_work() can deadlock during device
* removal. * removal.
*
* In addition, bringing one device up and another down at the
* same time can deadlock a single workqueue, so we have this
* global fallback workqueue, but we also attempt to open a
* per device workqueue each time we bring an interface up
*/ */
ipoib_workqueue = create_singlethread_workqueue("ipoib_flush"); ipoib_workqueue = create_singlethread_workqueue("ipoib");
if (!ipoib_workqueue) { if (!ipoib_workqueue) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_fs; goto err_fs;
......
...@@ -388,7 +388,7 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work) ...@@ -388,7 +388,7 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
* the workqueue while holding the rtnl lock, so loop * the workqueue while holding the rtnl lock, so loop
* on trylock until either we get the lock or we see * on trylock until either we get the lock or we see
* FLAG_ADMIN_UP go away as that signals that we are bailing * FLAG_ADMIN_UP go away as that signals that we are bailing
* and can safely ignore the carrier on work. * and can safely ignore the carrier on work
*/ */
while (!rtnl_trylock()) { while (!rtnl_trylock()) {
if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
...@@ -432,14 +432,15 @@ static int ipoib_mcast_join_complete(int status, ...@@ -432,14 +432,15 @@ static int ipoib_mcast_join_complete(int status,
if (!status) { if (!status) {
mcast->backoff = 1; mcast->backoff = 1;
if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
queue_delayed_work(priv->wq, &priv->mcast_task, 0); queue_delayed_work(ipoib_workqueue,
&priv->mcast_task, 0);
/* /*
* Defer carrier on work to priv->wq to avoid a * Defer carrier on work to ipoib_workqueue to avoid a
* deadlock on rtnl_lock here. * deadlock on rtnl_lock here.
*/ */
if (mcast == priv->broadcast) if (mcast == priv->broadcast)
queue_work(priv->wq, &priv->carrier_on_task); queue_work(ipoib_workqueue, &priv->carrier_on_task);
} else { } else {
if (mcast->logcount++ < 20) { if (mcast->logcount++ < 20) {
if (status == -ETIMEDOUT || status == -EAGAIN) { if (status == -ETIMEDOUT || status == -EAGAIN) {
...@@ -464,7 +465,7 @@ static int ipoib_mcast_join_complete(int status, ...@@ -464,7 +465,7 @@ static int ipoib_mcast_join_complete(int status,
if (status == -ENETRESET) if (status == -ENETRESET)
status = 0; status = 0;
if (status && test_bit(IPOIB_MCAST_RUN, &priv->flags)) if (status && test_bit(IPOIB_MCAST_RUN, &priv->flags))
queue_delayed_work(priv->wq, &priv->mcast_task, queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
mcast->backoff * HZ); mcast->backoff * HZ);
spin_unlock_irq(&priv->lock); spin_unlock_irq(&priv->lock);
mutex_unlock(&mcast_mutex); mutex_unlock(&mcast_mutex);
...@@ -534,7 +535,8 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, ...@@ -534,7 +535,8 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
queue_delayed_work(priv->wq, &priv->mcast_task, queue_delayed_work(ipoib_workqueue,
&priv->mcast_task,
mcast->backoff * HZ); mcast->backoff * HZ);
} }
mutex_unlock(&mcast_mutex); mutex_unlock(&mcast_mutex);
...@@ -574,8 +576,8 @@ void ipoib_mcast_join_task(struct work_struct *work) ...@@ -574,8 +576,8 @@ void ipoib_mcast_join_task(struct work_struct *work)
ipoib_warn(priv, "failed to allocate broadcast group\n"); ipoib_warn(priv, "failed to allocate broadcast group\n");
mutex_lock(&mcast_mutex); mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
queue_delayed_work(priv->wq, &priv->mcast_task, queue_delayed_work(ipoib_workqueue,
HZ); &priv->mcast_task, HZ);
mutex_unlock(&mcast_mutex); mutex_unlock(&mcast_mutex);
return; return;
} }
...@@ -642,7 +644,7 @@ int ipoib_mcast_start_thread(struct net_device *dev) ...@@ -642,7 +644,7 @@ int ipoib_mcast_start_thread(struct net_device *dev)
mutex_lock(&mcast_mutex); mutex_lock(&mcast_mutex);
if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
queue_delayed_work(priv->wq, &priv->mcast_task, 0); queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
mutex_unlock(&mcast_mutex); mutex_unlock(&mcast_mutex);
return 0; return 0;
...@@ -660,7 +662,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush) ...@@ -660,7 +662,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
mutex_unlock(&mcast_mutex); mutex_unlock(&mcast_mutex);
if (flush) if (flush)
flush_workqueue(priv->wq); flush_workqueue(ipoib_workqueue);
return 0; return 0;
} }
...@@ -727,7 +729,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb) ...@@ -727,7 +729,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
__ipoib_mcast_add(dev, mcast); __ipoib_mcast_add(dev, mcast);
list_add_tail(&mcast->list, &priv->multicast_list); list_add_tail(&mcast->list, &priv->multicast_list);
if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
queue_delayed_work(priv->wq, &priv->mcast_task, 0); queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
} }
if (!mcast->ah) { if (!mcast->ah) {
...@@ -942,7 +944,7 @@ void ipoib_mcast_restart_task(struct work_struct *work) ...@@ -942,7 +944,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
* completes. So do like the carrier on task and attempt to * completes. So do like the carrier on task and attempt to
* take the rtnl lock, but if we can't before the ADMIN_UP flag * take the rtnl lock, but if we can't before the ADMIN_UP flag
* goes away, then just return and know that the remove list will * goes away, then just return and know that the remove list will
* get flushed later by mcast_stop_thread. * get flushed later by mcast_dev_flush.
*/ */
while (!rtnl_trylock()) { while (!rtnl_trylock()) {
if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
......
...@@ -145,20 +145,10 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) ...@@ -145,20 +145,10 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
int ret, size; int ret, size;
int i; int i;
/*
* the various IPoIB tasks assume they will never race against
* themselves, so always use a single thread workqueue
*/
priv->wq = create_singlethread_workqueue("ipoib_wq");
if (!priv->wq) {
printk(KERN_WARNING "ipoib: failed to allocate device WQ\n");
return -ENODEV;
}
priv->pd = ib_alloc_pd(priv->ca); priv->pd = ib_alloc_pd(priv->ca);
if (IS_ERR(priv->pd)) { if (IS_ERR(priv->pd)) {
printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name); printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name);
goto out_free_wq; return -ENODEV;
} }
priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE); priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE);
...@@ -252,10 +242,6 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) ...@@ -252,10 +242,6 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
out_free_pd: out_free_pd:
ib_dealloc_pd(priv->pd); ib_dealloc_pd(priv->pd);
out_free_wq:
destroy_workqueue(priv->wq);
priv->wq = NULL;
return -ENODEV; return -ENODEV;
} }
...@@ -284,12 +270,6 @@ void ipoib_transport_dev_cleanup(struct net_device *dev) ...@@ -284,12 +270,6 @@ void ipoib_transport_dev_cleanup(struct net_device *dev)
if (ib_dealloc_pd(priv->pd)) if (ib_dealloc_pd(priv->pd))
ipoib_warn(priv, "ib_dealloc_pd failed\n"); ipoib_warn(priv, "ib_dealloc_pd failed\n");
if (priv->wq) {
flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
priv->wq = NULL;
}
} }
void ipoib_event(struct ib_event_handler *handler, void ipoib_event(struct ib_event_handler *handler,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment