Commit 25e5cb78 authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Christoph Hellwig

nvme-tcp: fix possible crash in write_zeroes processing

We cannot look at blk_rq_payload_bytes without first checking
that the request has a mappable physical segments first (e.g.
blk_rq_nr_phys_segments(rq) != 0) and only then to take the
request payload bytes. This caused us to send a wrong sgl to
the target or even dereference a non-existing buffer in case
we actually got to the data send sequence (if it was in-capsule).
Reported-by: default avatarTony Asleson <tasleson@redhat.com>
Suggested-by: default avatarChaitanya Kulkarni <Chaitanya.Kulkarni@wdc.com>
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarKeith Busch <kbusch@kernel.org>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent d038dd81
...@@ -174,16 +174,14 @@ static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req) ...@@ -174,16 +174,14 @@ static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req) static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
{ {
struct request *rq; struct request *rq;
unsigned int bytes;
if (unlikely(nvme_tcp_async_req(req))) if (unlikely(nvme_tcp_async_req(req)))
return false; /* async events don't have a request */ return false; /* async events don't have a request */
rq = blk_mq_rq_from_pdu(req); rq = blk_mq_rq_from_pdu(req);
bytes = blk_rq_payload_bytes(rq);
return rq_data_dir(rq) == WRITE && bytes && return rq_data_dir(rq) == WRITE && req->data_len &&
bytes <= nvme_tcp_inline_data_size(req->queue); req->data_len <= nvme_tcp_inline_data_size(req->queue);
} }
static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req) static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
...@@ -2164,7 +2162,9 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue, ...@@ -2164,7 +2162,9 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
c->common.flags |= NVME_CMD_SGL_METABUF; c->common.flags |= NVME_CMD_SGL_METABUF;
if (rq_data_dir(rq) == WRITE && req->data_len && if (!blk_rq_nr_phys_segments(rq))
nvme_tcp_set_sg_null(c);
else if (rq_data_dir(rq) == WRITE &&
req->data_len <= nvme_tcp_inline_data_size(queue)) req->data_len <= nvme_tcp_inline_data_size(queue))
nvme_tcp_set_sg_inline(queue, c, req->data_len); nvme_tcp_set_sg_inline(queue, c, req->data_len);
else else
...@@ -2191,7 +2191,8 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, ...@@ -2191,7 +2191,8 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
req->data_sent = 0; req->data_sent = 0;
req->pdu_len = 0; req->pdu_len = 0;
req->pdu_sent = 0; req->pdu_sent = 0;
req->data_len = blk_rq_payload_bytes(rq); req->data_len = blk_rq_nr_phys_segments(rq) ?
blk_rq_payload_bytes(rq) : 0;
req->curr_bio = rq->bio; req->curr_bio = rq->bio;
if (rq_data_dir(rq) == WRITE && if (rq_data_dir(rq) == WRITE &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment