Commit 72691a26 authored by Trond Myklebust's avatar Trond Myklebust

SUNRPC: Don't reuse bvec on retransmission of the request

If a request is re-encoded and then retransmitted, we need to make sure
that we also re-encode the bvec, in case the page lists have changed.

Fixes: ff053dbb ("SUNRPC: Move the call to xprt_send_pagedata() out of xprt_sock_sendmsg()")
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
parent 6622e3a7
...@@ -144,7 +144,8 @@ struct rpc_xprt_ops { ...@@ -144,7 +144,8 @@ struct rpc_xprt_ops {
unsigned short (*get_srcport)(struct rpc_xprt *xprt); unsigned short (*get_srcport)(struct rpc_xprt *xprt);
int (*buf_alloc)(struct rpc_task *task); int (*buf_alloc)(struct rpc_task *task);
void (*buf_free)(struct rpc_task *task); void (*buf_free)(struct rpc_task *task);
int (*prepare_request)(struct rpc_rqst *req); int (*prepare_request)(struct rpc_rqst *req,
struct xdr_buf *buf);
int (*send_request)(struct rpc_rqst *req); int (*send_request)(struct rpc_rqst *req);
void (*wait_for_reply_request)(struct rpc_task *task); void (*wait_for_reply_request)(struct rpc_task *task);
void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task); void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
......
...@@ -1870,7 +1870,6 @@ rpc_xdr_encode(struct rpc_task *task) ...@@ -1870,7 +1870,6 @@ rpc_xdr_encode(struct rpc_task *task)
req->rq_snd_buf.head[0].iov_len = 0; req->rq_snd_buf.head[0].iov_len = 0;
xdr_init_encode(&xdr, &req->rq_snd_buf, xdr_init_encode(&xdr, &req->rq_snd_buf,
req->rq_snd_buf.head[0].iov_base, req); req->rq_snd_buf.head[0].iov_base, req);
xdr_free_bvec(&req->rq_snd_buf);
if (rpc_encode_header(task, &xdr)) if (rpc_encode_header(task, &xdr))
return; return;
......
...@@ -73,7 +73,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net); ...@@ -73,7 +73,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net);
static __be32 xprt_alloc_xid(struct rpc_xprt *xprt); static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
static void xprt_destroy(struct rpc_xprt *xprt); static void xprt_destroy(struct rpc_xprt *xprt);
static void xprt_request_init(struct rpc_task *task); static void xprt_request_init(struct rpc_task *task);
static int xprt_request_prepare(struct rpc_rqst *req); static int xprt_request_prepare(struct rpc_rqst *req, struct xdr_buf *buf);
static DEFINE_SPINLOCK(xprt_list_lock); static DEFINE_SPINLOCK(xprt_list_lock);
static LIST_HEAD(xprt_list); static LIST_HEAD(xprt_list);
...@@ -1149,7 +1149,7 @@ xprt_request_enqueue_receive(struct rpc_task *task) ...@@ -1149,7 +1149,7 @@ xprt_request_enqueue_receive(struct rpc_task *task)
if (!xprt_request_need_enqueue_receive(task, req)) if (!xprt_request_need_enqueue_receive(task, req))
return 0; return 0;
ret = xprt_request_prepare(task->tk_rqstp); ret = xprt_request_prepare(task->tk_rqstp, &req->rq_rcv_buf);
if (ret) if (ret)
return ret; return ret;
spin_lock(&xprt->queue_lock); spin_lock(&xprt->queue_lock);
...@@ -1179,8 +1179,11 @@ xprt_request_dequeue_receive_locked(struct rpc_task *task) ...@@ -1179,8 +1179,11 @@ xprt_request_dequeue_receive_locked(struct rpc_task *task)
{ {
struct rpc_rqst *req = task->tk_rqstp; struct rpc_rqst *req = task->tk_rqstp;
if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
xprt_request_rb_remove(req->rq_xprt, req); xprt_request_rb_remove(req->rq_xprt, req);
xdr_free_bvec(&req->rq_rcv_buf);
req->rq_private_buf.bvec = NULL;
}
} }
/** /**
...@@ -1336,8 +1339,14 @@ xprt_request_enqueue_transmit(struct rpc_task *task) ...@@ -1336,8 +1339,14 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
{ {
struct rpc_rqst *pos, *req = task->tk_rqstp; struct rpc_rqst *pos, *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt; struct rpc_xprt *xprt = req->rq_xprt;
int ret;
if (xprt_request_need_enqueue_transmit(task, req)) { if (xprt_request_need_enqueue_transmit(task, req)) {
ret = xprt_request_prepare(task->tk_rqstp, &req->rq_snd_buf);
if (ret) {
task->tk_status = ret;
return;
}
req->rq_bytes_sent = 0; req->rq_bytes_sent = 0;
spin_lock(&xprt->queue_lock); spin_lock(&xprt->queue_lock);
/* /*
...@@ -1397,6 +1406,7 @@ xprt_request_dequeue_transmit_locked(struct rpc_task *task) ...@@ -1397,6 +1406,7 @@ xprt_request_dequeue_transmit_locked(struct rpc_task *task)
} else } else
list_del(&req->rq_xmit2); list_del(&req->rq_xmit2);
atomic_long_dec(&req->rq_xprt->xmit_queuelen); atomic_long_dec(&req->rq_xprt->xmit_queuelen);
xdr_free_bvec(&req->rq_snd_buf);
} }
/** /**
...@@ -1433,8 +1443,6 @@ xprt_request_dequeue_xprt(struct rpc_task *task) ...@@ -1433,8 +1443,6 @@ xprt_request_dequeue_xprt(struct rpc_task *task)
test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) || test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
xprt_is_pinned_rqst(req)) { xprt_is_pinned_rqst(req)) {
spin_lock(&xprt->queue_lock); spin_lock(&xprt->queue_lock);
xprt_request_dequeue_transmit_locked(task);
xprt_request_dequeue_receive_locked(task);
while (xprt_is_pinned_rqst(req)) { while (xprt_is_pinned_rqst(req)) {
set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate); set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
spin_unlock(&xprt->queue_lock); spin_unlock(&xprt->queue_lock);
...@@ -1442,6 +1450,8 @@ xprt_request_dequeue_xprt(struct rpc_task *task) ...@@ -1442,6 +1450,8 @@ xprt_request_dequeue_xprt(struct rpc_task *task)
spin_lock(&xprt->queue_lock); spin_lock(&xprt->queue_lock);
clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate); clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
} }
xprt_request_dequeue_transmit_locked(task);
xprt_request_dequeue_receive_locked(task);
spin_unlock(&xprt->queue_lock); spin_unlock(&xprt->queue_lock);
} }
} }
...@@ -1449,18 +1459,19 @@ xprt_request_dequeue_xprt(struct rpc_task *task) ...@@ -1449,18 +1459,19 @@ xprt_request_dequeue_xprt(struct rpc_task *task)
/** /**
* xprt_request_prepare - prepare an encoded request for transport * xprt_request_prepare - prepare an encoded request for transport
* @req: pointer to rpc_rqst * @req: pointer to rpc_rqst
* @buf: pointer to send/rcv xdr_buf
* *
* Calls into the transport layer to do whatever is needed to prepare * Calls into the transport layer to do whatever is needed to prepare
* the request for transmission or receive. * the request for transmission or receive.
* Returns error, or zero. * Returns error, or zero.
*/ */
static int static int
xprt_request_prepare(struct rpc_rqst *req) xprt_request_prepare(struct rpc_rqst *req, struct xdr_buf *buf)
{ {
struct rpc_xprt *xprt = req->rq_xprt; struct rpc_xprt *xprt = req->rq_xprt;
if (xprt->ops->prepare_request) if (xprt->ops->prepare_request)
return xprt->ops->prepare_request(req); return xprt->ops->prepare_request(req, buf);
return 0; return 0;
} }
...@@ -1961,8 +1972,6 @@ void xprt_release(struct rpc_task *task) ...@@ -1961,8 +1972,6 @@ void xprt_release(struct rpc_task *task)
spin_unlock(&xprt->transport_lock); spin_unlock(&xprt->transport_lock);
if (req->rq_buffer) if (req->rq_buffer)
xprt->ops->buf_free(task); xprt->ops->buf_free(task);
xdr_free_bvec(&req->rq_rcv_buf);
xdr_free_bvec(&req->rq_snd_buf);
if (req->rq_cred != NULL) if (req->rq_cred != NULL)
put_rpccred(req->rq_cred); put_rpccred(req->rq_cred);
if (req->rq_release_snd_buf) if (req->rq_release_snd_buf)
......
...@@ -822,17 +822,9 @@ static int xs_stream_nospace(struct rpc_rqst *req, bool vm_wait) ...@@ -822,17 +822,9 @@ static int xs_stream_nospace(struct rpc_rqst *req, bool vm_wait)
return ret; return ret;
} }
static int static int xs_stream_prepare_request(struct rpc_rqst *req, struct xdr_buf *buf)
xs_stream_prepare_request(struct rpc_rqst *req)
{ {
gfp_t gfp = rpc_task_gfp_mask(); return xdr_alloc_bvec(buf, rpc_task_gfp_mask());
int ret;
ret = xdr_alloc_bvec(&req->rq_snd_buf, gfp);
if (ret < 0)
return ret;
xdr_free_bvec(&req->rq_rcv_buf);
return xdr_alloc_bvec(&req->rq_rcv_buf, gfp);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment