Commit 4a41c421 authored by Sukadev Bhattiprolu's avatar Sukadev Bhattiprolu Committed by David S. Miller

ibmvnic: serialize access to work queue on remove

The work queue is used to queue reset requests like CHANGE-PARAM or
FAILOVER resets for the worker thread. When the adapter is being removed
the adapter state is set to VNIC_REMOVING and the work queue is flushed
so no new work is added. However the check for adapter being removed is
racy in that the adapter can go into REMOVING state just after we check
and we might end up adding work just as it is being flushed (or after).

The ->rwi_lock is already being used to serialize queue/dequeue work.
Extend its usage ensure there is no race when scheduling/flushing work.

Fixes: 6954a9e4 ("ibmvnic: Flush existing work items before device removal")
Signed-off-by: default avatarSukadev Bhattiprolu <sukadev@linux.ibm.com>
Cc:Uwe Kleine-König <uwe@kleine-koenig.org>
Cc:Saeed Mahameed <saeed@kernel.org>
Reviewed-by: default avatarDany Madden <drt@linux.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7d3a7b9e
...@@ -2395,6 +2395,8 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, ...@@ -2395,6 +2395,8 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
unsigned long flags; unsigned long flags;
int ret; int ret;
spin_lock_irqsave(&adapter->rwi_lock, flags);
/* /*
* If failover is pending don't schedule any other reset. * If failover is pending don't schedule any other reset.
* Instead let the failover complete. If there is already a * Instead let the failover complete. If there is already a
...@@ -2415,14 +2417,11 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, ...@@ -2415,14 +2417,11 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
goto err; goto err;
} }
spin_lock_irqsave(&adapter->rwi_lock, flags);
list_for_each(entry, &adapter->rwi_list) { list_for_each(entry, &adapter->rwi_list) {
tmp = list_entry(entry, struct ibmvnic_rwi, list); tmp = list_entry(entry, struct ibmvnic_rwi, list);
if (tmp->reset_reason == reason) { if (tmp->reset_reason == reason) {
netdev_dbg(netdev, "Skipping matching reset, reason=%d\n", netdev_dbg(netdev, "Skipping matching reset, reason=%d\n",
reason); reason);
spin_unlock_irqrestore(&adapter->rwi_lock, flags);
ret = EBUSY; ret = EBUSY;
goto err; goto err;
} }
...@@ -2430,8 +2429,6 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, ...@@ -2430,8 +2429,6 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC); rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
if (!rwi) { if (!rwi) {
spin_unlock_irqrestore(&adapter->rwi_lock, flags);
ibmvnic_close(netdev);
ret = ENOMEM; ret = ENOMEM;
goto err; goto err;
} }
...@@ -2444,12 +2441,17 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, ...@@ -2444,12 +2441,17 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
} }
rwi->reset_reason = reason; rwi->reset_reason = reason;
list_add_tail(&rwi->list, &adapter->rwi_list); list_add_tail(&rwi->list, &adapter->rwi_list);
spin_unlock_irqrestore(&adapter->rwi_lock, flags);
netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason); netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
schedule_work(&adapter->ibmvnic_reset); schedule_work(&adapter->ibmvnic_reset);
return 0; ret = 0;
err: err:
/* ibmvnic_close() below can block, so drop the lock first */
spin_unlock_irqrestore(&adapter->rwi_lock, flags);
if (ret == ENOMEM)
ibmvnic_close(netdev);
return -ret; return -ret;
} }
...@@ -5467,7 +5469,18 @@ static int ibmvnic_remove(struct vio_dev *dev) ...@@ -5467,7 +5469,18 @@ static int ibmvnic_remove(struct vio_dev *dev)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&adapter->state_lock, flags); spin_lock_irqsave(&adapter->state_lock, flags);
/* If ibmvnic_reset() is scheduling a reset, wait for it to
* finish. Then, set the state to REMOVING to prevent it from
* scheduling any more work and to have reset functions ignore
* any resets that have already been scheduled. Drop the lock
* after setting state, so __ibmvnic_reset() which is called
* from the flush_work() below, can make progress.
*/
spin_lock_irqsave(&adapter->rwi_lock, flags);
adapter->state = VNIC_REMOVING; adapter->state = VNIC_REMOVING;
spin_unlock_irqrestore(&adapter->rwi_lock, flags);
spin_unlock_irqrestore(&adapter->state_lock, flags); spin_unlock_irqrestore(&adapter->state_lock, flags);
flush_work(&adapter->ibmvnic_reset); flush_work(&adapter->ibmvnic_reset);
......
...@@ -1081,6 +1081,7 @@ struct ibmvnic_adapter { ...@@ -1081,6 +1081,7 @@ struct ibmvnic_adapter {
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
enum vnic_state state; enum vnic_state state;
enum ibmvnic_reset_reason reset_reason; enum ibmvnic_reset_reason reset_reason;
/* when taking both state and rwi locks, take state lock first */
spinlock_t rwi_lock; spinlock_t rwi_lock;
struct list_head rwi_list; struct list_head rwi_list;
struct work_struct ibmvnic_reset; struct work_struct ibmvnic_reset;
...@@ -1097,6 +1098,8 @@ struct ibmvnic_adapter { ...@@ -1097,6 +1098,8 @@ struct ibmvnic_adapter {
struct ibmvnic_tunables desired; struct ibmvnic_tunables desired;
struct ibmvnic_tunables fallback; struct ibmvnic_tunables fallback;
/* Used for serializatin of state field */ /* Used for serialization of state field. When taking both state
* and rwi locks, take state lock first.
*/
spinlock_t state_lock; spinlock_t state_lock;
}; };
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment