Commit ea663abb authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Greg Kroah-Hartman

IB/hfi1: Close PSM sdma_progress sleep window

commit da9de5f8 upstream.

The call to sdma_progress() is called outside the wait lock.

In this case, there is a race condition where sdma_progress() can return
false and the sdma_engine can idle.  If that happens, there will be no
more sdma interrupts to cause the wakeup and the user_sdma xmit will hang.

Fix by moving the lock to enclose the sdma_progress() call.

Also, delete busycount. The need for this was removed by:
commit bcad2913 ("IB/hfi1: Serve the most starved iowait entry first")

Cc: <stable@vger.kernel.org>
Fixes: 77241056 ("IB/hfi1: add driver files")
Reviewed-by: default avatarGary Leshner <Gary.S.Leshner@intel.com>
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 997ef649
...@@ -260,7 +260,6 @@ struct user_sdma_txreq { ...@@ -260,7 +260,6 @@ struct user_sdma_txreq {
struct list_head list; struct list_head list;
struct user_sdma_request *req; struct user_sdma_request *req;
u16 flags; u16 flags;
unsigned busycount;
u64 seqnum; u64 seqnum;
}; };
...@@ -323,25 +322,22 @@ static int defer_packet_queue( ...@@ -323,25 +322,22 @@ static int defer_packet_queue(
struct hfi1_user_sdma_pkt_q *pq = struct hfi1_user_sdma_pkt_q *pq =
container_of(wait, struct hfi1_user_sdma_pkt_q, busy); container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
struct hfi1_ibdev *dev = &pq->dd->verbs_dev; struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
struct user_sdma_txreq *tx =
container_of(txreq, struct user_sdma_txreq, txreq);
if (sdma_progress(sde, seq, txreq)) { write_seqlock(&dev->iowait_lock);
if (tx->busycount++ < MAX_DEFER_RETRY_COUNT) if (sdma_progress(sde, seq, txreq))
goto eagain; goto eagain;
}
/* /*
* We are assuming that if the list is enqueued somewhere, it * We are assuming that if the list is enqueued somewhere, it
* is to the dmawait list since that is the only place where * is to the dmawait list since that is the only place where
* it is supposed to be enqueued. * it is supposed to be enqueued.
*/ */
xchg(&pq->state, SDMA_PKT_Q_DEFERRED); xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
write_seqlock(&dev->iowait_lock);
if (list_empty(&pq->busy.list)) if (list_empty(&pq->busy.list))
list_add_tail(&pq->busy.list, &sde->dmawait); list_add_tail(&pq->busy.list, &sde->dmawait);
write_sequnlock(&dev->iowait_lock); write_sequnlock(&dev->iowait_lock);
return -EBUSY; return -EBUSY;
eagain: eagain:
write_sequnlock(&dev->iowait_lock);
return -EAGAIN; return -EAGAIN;
} }
...@@ -925,7 +921,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) ...@@ -925,7 +921,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
tx->flags = 0; tx->flags = 0;
tx->req = req; tx->req = req;
tx->busycount = 0;
INIT_LIST_HEAD(&tx->list); INIT_LIST_HEAD(&tx->list);
if (req->seqnum == req->info.npkts - 1) if (req->seqnum == req->info.npkts - 1)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment