Commit fd453d0e authored by Simon Derr's avatar Simon Derr Committed by Eric Van Hensbergen

9P/RDMA: Use a semaphore to protect the RQ

The current code keeps track of the number of buffers posted in the RQ,
and will prevent it from overflowing. But it does so by simply dropping
post requests (And leaking memory in the process).
When this happens there will actually be too few buffers posted, and
soon the 9P server will complain about 'RNR retry counter exceeded'
errors.

Instead, use a semaphore, and block until the RQ is ready for another
buffer to be posted.
Signed-off-by: default avatarSimon Derr <simon.derr@bull.net>
Signed-off-by: default avatarEric Van Hensbergen <ericvh@gmail.com>
parent 47229ff8
...@@ -73,7 +73,7 @@ ...@@ -73,7 +73,7 @@
* @sq_depth: The depth of the Send Queue * @sq_depth: The depth of the Send Queue
* @sq_sem: Semaphore for the SQ * @sq_sem: Semaphore for the SQ
* @rq_depth: The depth of the Receive Queue. * @rq_depth: The depth of the Receive Queue.
* @rq_count: Count of requests in the Receive Queue. * @rq_sem: Semaphore for the RQ
* @addr: The remote peer's address * @addr: The remote peer's address
* @req_lock: Protects the active request list * @req_lock: Protects the active request list
* @cm_done: Completion event for connection management tracking * @cm_done: Completion event for connection management tracking
...@@ -98,7 +98,7 @@ struct p9_trans_rdma { ...@@ -98,7 +98,7 @@ struct p9_trans_rdma {
int sq_depth; int sq_depth;
struct semaphore sq_sem; struct semaphore sq_sem;
int rq_depth; int rq_depth;
atomic_t rq_count; struct semaphore rq_sem;
struct sockaddr_in addr; struct sockaddr_in addr;
spinlock_t req_lock; spinlock_t req_lock;
...@@ -341,8 +341,8 @@ static void cq_comp_handler(struct ib_cq *cq, void *cq_context) ...@@ -341,8 +341,8 @@ static void cq_comp_handler(struct ib_cq *cq, void *cq_context)
switch (c->wc_op) { switch (c->wc_op) {
case IB_WC_RECV: case IB_WC_RECV:
atomic_dec(&rdma->rq_count);
handle_recv(client, rdma, c, wc.status, wc.byte_len); handle_recv(client, rdma, c, wc.status, wc.byte_len);
up(&rdma->rq_sem);
break; break;
case IB_WC_SEND: case IB_WC_SEND:
...@@ -441,12 +441,14 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req) ...@@ -441,12 +441,14 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
* outstanding request, so we must keep a count to avoid * outstanding request, so we must keep a count to avoid
* overflowing the RQ. * overflowing the RQ.
*/ */
if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) { if (down_interruptible(&rdma->rq_sem))
err = post_recv(client, rpl_context); goto error; /* FIXME : -EINTR instead */
if (err)
goto err_free1; err = post_recv(client, rpl_context);
} else if (err) {
atomic_dec(&rdma->rq_count); p9_debug(P9_DEBUG_FCALL, "POST RECV failed\n");
goto err_free1;
}
/* remove posted receive buffer from request structure */ /* remove posted receive buffer from request structure */
req->rc = NULL; req->rc = NULL;
...@@ -537,7 +539,7 @@ static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts) ...@@ -537,7 +539,7 @@ static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts)
spin_lock_init(&rdma->req_lock); spin_lock_init(&rdma->req_lock);
init_completion(&rdma->cm_done); init_completion(&rdma->cm_done);
sema_init(&rdma->sq_sem, rdma->sq_depth); sema_init(&rdma->sq_sem, rdma->sq_depth);
atomic_set(&rdma->rq_count, 0); sema_init(&rdma->rq_sem, rdma->rq_depth);
return rdma; return rdma;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment