Commit e7006de6 authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Christoph Hellwig

nvme: code command_id with a genctr for use-after-free validation

We cannot detect a (perhaps buggy) controller that is sending us
a completion for a request that was already completed (for example
sending a completion twice), this phenomenon was seen in the wild
a few times.

So to protect against this, we use the upper 4 msbits of the nvme sqe
command_id to use as a 4-bit generation counter and verify it matches
the existing request generation that is incrementing on every execution.

The 16-bit command_id structure now is constructed by:
| xxxx | xxxxxxxxxxxx |
  gen    request tag

This means that we are giving up some possible queue depth as 12 bits
allow for a maximum queue depth of 4095 instead of 65536, however we
never create such long queues anyways so no real harm done.
Suggested-by: default avatarKeith Busch <kbusch@kernel.org>
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
Acked-by: default avatarKeith Busch <kbusch@kernel.org>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarDaniel Wagner <dwagner@suse.de>
Tested-by: default avatarDaniel Wagner <dwagner@suse.de>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 3b01a9d0
...@@ -1026,7 +1026,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req) ...@@ -1026,7 +1026,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
return BLK_STS_IOERR; return BLK_STS_IOERR;
} }
cmd->common.command_id = req->tag; nvme_req(req)->genctr++;
cmd->common.command_id = nvme_cid(req);
trace_nvme_setup_cmd(req, cmd); trace_nvme_setup_cmd(req, cmd);
return ret; return ret;
} }
......
...@@ -152,6 +152,7 @@ enum nvme_quirks { ...@@ -152,6 +152,7 @@ enum nvme_quirks {
struct nvme_request { struct nvme_request {
struct nvme_command *cmd; struct nvme_command *cmd;
union nvme_result result; union nvme_result result;
u8 genctr;
u8 retries; u8 retries;
u8 flags; u8 flags;
u16 status; u16 status;
...@@ -491,6 +492,49 @@ struct nvme_ctrl_ops { ...@@ -491,6 +492,49 @@ struct nvme_ctrl_ops {
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
}; };
/*
* nvme command_id is constructed as such:
* | xxxx | xxxxxxxxxxxx |
* gen request tag
*/
#define nvme_genctr_mask(gen) (gen & 0xf)
#define nvme_cid_install_genctr(gen) (nvme_genctr_mask(gen) << 12)
#define nvme_genctr_from_cid(cid) ((cid & 0xf000) >> 12)
#define nvme_tag_from_cid(cid) (cid & 0xfff)
static inline u16 nvme_cid(struct request *rq)
{
return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag;
}
static inline struct request *nvme_find_rq(struct blk_mq_tags *tags,
u16 command_id)
{
u8 genctr = nvme_genctr_from_cid(command_id);
u16 tag = nvme_tag_from_cid(command_id);
struct request *rq;
rq = blk_mq_tag_to_rq(tags, tag);
if (unlikely(!rq)) {
pr_err("could not locate request for tag %#x\n",
tag);
return NULL;
}
if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) {
dev_err(nvme_req(rq)->ctrl->device,
"request %#x genctr mismatch (got %#x expected %#x)\n",
tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr));
return NULL;
}
return rq;
}
static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
u16 command_id)
{
return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
}
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
const char *dev_name); const char *dev_name);
...@@ -588,7 +632,8 @@ static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl) ...@@ -588,7 +632,8 @@ static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
static inline bool nvme_is_aen_req(u16 qid, __u16 command_id) static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
{ {
return !qid && command_id >= NVME_AQ_BLK_MQ_DEPTH; return !qid &&
nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
} }
void nvme_complete_rq(struct request *req); void nvme_complete_rq(struct request *req);
......
...@@ -1010,7 +1010,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) ...@@ -1010,7 +1010,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
return; return;
} }
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id); req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id);
if (unlikely(!req)) { if (unlikely(!req)) {
dev_warn(nvmeq->dev->ctrl.device, dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n", "invalid id %d completed on queue %d\n",
......
...@@ -1730,10 +1730,10 @@ static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, ...@@ -1730,10 +1730,10 @@ static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
struct request *rq; struct request *rq;
struct nvme_rdma_request *req; struct nvme_rdma_request *req;
rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id); rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id);
if (!rq) { if (!rq) {
dev_err(queue->ctrl->ctrl.device, dev_err(queue->ctrl->ctrl.device,
"tag 0x%x on QP %#x not found\n", "got bad command_id %#x on QP %#x\n",
cqe->command_id, queue->qp->qp_num); cqe->command_id, queue->qp->qp_num);
nvme_rdma_error_recovery(queue->ctrl); nvme_rdma_error_recovery(queue->ctrl);
return; return;
......
...@@ -487,11 +487,11 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, ...@@ -487,11 +487,11 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
{ {
struct request *rq; struct request *rq;
rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id); rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
if (!rq) { if (!rq) {
dev_err(queue->ctrl->ctrl.device, dev_err(queue->ctrl->ctrl.device,
"queue %d tag 0x%x not found\n", "got bad cqe.command_id %#x on queue %d\n",
nvme_tcp_queue_id(queue), cqe->command_id); cqe->command_id, nvme_tcp_queue_id(queue));
nvme_tcp_error_recovery(&queue->ctrl->ctrl); nvme_tcp_error_recovery(&queue->ctrl->ctrl);
return -EINVAL; return -EINVAL;
} }
...@@ -508,11 +508,11 @@ static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue, ...@@ -508,11 +508,11 @@ static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
{ {
struct request *rq; struct request *rq;
rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id); rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
if (!rq) { if (!rq) {
dev_err(queue->ctrl->ctrl.device, dev_err(queue->ctrl->ctrl.device,
"queue %d tag %#x not found\n", "got bad c2hdata.command_id %#x on queue %d\n",
nvme_tcp_queue_id(queue), pdu->command_id); pdu->command_id, nvme_tcp_queue_id(queue));
return -ENOENT; return -ENOENT;
} }
...@@ -606,7 +606,7 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, ...@@ -606,7 +606,7 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
data->hdr.plen = data->hdr.plen =
cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst); cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
data->ttag = pdu->ttag; data->ttag = pdu->ttag;
data->command_id = rq->tag; data->command_id = nvme_cid(rq);
data->data_offset = cpu_to_le32(req->data_sent); data->data_offset = cpu_to_le32(req->data_sent);
data->data_length = cpu_to_le32(req->pdu_len); data->data_length = cpu_to_le32(req->pdu_len);
return 0; return 0;
...@@ -619,11 +619,11 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, ...@@ -619,11 +619,11 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
struct request *rq; struct request *rq;
int ret; int ret;
rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id); rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
if (!rq) { if (!rq) {
dev_err(queue->ctrl->ctrl.device, dev_err(queue->ctrl->ctrl.device,
"queue %d tag %#x not found\n", "got bad r2t.command_id %#x on queue %d\n",
nvme_tcp_queue_id(queue), pdu->command_id); pdu->command_id, nvme_tcp_queue_id(queue));
return -ENOENT; return -ENOENT;
} }
req = blk_mq_rq_to_pdu(rq); req = blk_mq_rq_to_pdu(rq);
...@@ -703,7 +703,7 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, ...@@ -703,7 +703,7 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
{ {
struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
struct request *rq = struct request *rq =
blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id); nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
while (true) { while (true) {
...@@ -796,7 +796,7 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue, ...@@ -796,7 +796,7 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
} }
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
pdu->command_id); pdu->command_id);
nvme_tcp_end_request(rq, NVME_SC_SUCCESS); nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
......
...@@ -107,10 +107,10 @@ static void nvme_loop_queue_response(struct nvmet_req *req) ...@@ -107,10 +107,10 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
} else { } else {
struct request *rq; struct request *rq;
rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id); rq = nvme_find_rq(nvme_loop_tagset(queue), cqe->command_id);
if (!rq) { if (!rq) {
dev_err(queue->ctrl->ctrl.device, dev_err(queue->ctrl->ctrl.device,
"tag 0x%x on queue %d not found\n", "got bad command_id %#x on queue %d\n",
cqe->command_id, nvme_loop_queue_idx(queue)); cqe->command_id, nvme_loop_queue_idx(queue));
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment