Commit 0d4ee015 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-4.10-fixes' of git://git.infradead.org/nvme into for-linus

Pull nvme target fixes from Sagi:

Given that its -rc6, I removed anything that is not
bug fix.

- nvmet-fc discard fix from Christoph
- queue disconnect fix from James
- nvmet-rdma dma sync fix from Parav
- Some more nvmet fixes
parents 690e5325 19e420bb
...@@ -1663,13 +1663,13 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, ...@@ -1663,13 +1663,13 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
return 0; return 0;
freq->sg_table.sgl = freq->first_sgl; freq->sg_table.sgl = freq->first_sgl;
ret = sg_alloc_table_chained(&freq->sg_table, rq->nr_phys_segments, ret = sg_alloc_table_chained(&freq->sg_table,
freq->sg_table.sgl); blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
if (ret) if (ret)
return -ENOMEM; return -ENOMEM;
op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
WARN_ON(op->nents > rq->nr_phys_segments); WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
op->nents, dir); op->nents, dir);
......
...@@ -631,6 +631,7 @@ static void nvmet_subsys_release(struct config_item *item) ...@@ -631,6 +631,7 @@ static void nvmet_subsys_release(struct config_item *item)
{ {
struct nvmet_subsys *subsys = to_subsys(item); struct nvmet_subsys *subsys = to_subsys(item);
nvmet_subsys_del_ctrls(subsys);
nvmet_subsys_put(subsys); nvmet_subsys_put(subsys);
} }
......
...@@ -200,7 +200,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work) ...@@ -200,7 +200,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n", pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
ctrl->cntlid, ctrl->kato); ctrl->cntlid, ctrl->kato);
ctrl->ops->delete_ctrl(ctrl); nvmet_ctrl_fatal_error(ctrl);
} }
static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
...@@ -816,6 +816,9 @@ static void nvmet_ctrl_free(struct kref *ref) ...@@ -816,6 +816,9 @@ static void nvmet_ctrl_free(struct kref *ref)
list_del(&ctrl->subsys_entry); list_del(&ctrl->subsys_entry);
mutex_unlock(&subsys->lock); mutex_unlock(&subsys->lock);
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fatal_err_work);
ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid); ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
nvmet_subsys_put(subsys); nvmet_subsys_put(subsys);
...@@ -935,6 +938,16 @@ static void nvmet_subsys_free(struct kref *ref) ...@@ -935,6 +938,16 @@ static void nvmet_subsys_free(struct kref *ref)
kfree(subsys); kfree(subsys);
} }
void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
{
struct nvmet_ctrl *ctrl;
mutex_lock(&subsys->lock);
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
ctrl->ops->delete_ctrl(ctrl);
mutex_unlock(&subsys->lock);
}
void nvmet_subsys_put(struct nvmet_subsys *subsys) void nvmet_subsys_put(struct nvmet_subsys *subsys)
{ {
kref_put(&subsys->ref, nvmet_subsys_free); kref_put(&subsys->ref, nvmet_subsys_free);
......
...@@ -1314,7 +1314,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, ...@@ -1314,7 +1314,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
(struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf; (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
struct fcnvme_ls_disconnect_acc *acc = struct fcnvme_ls_disconnect_acc *acc =
(struct fcnvme_ls_disconnect_acc *)iod->rspbuf; (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
struct nvmet_fc_tgt_queue *queue; struct nvmet_fc_tgt_queue *queue = NULL;
struct nvmet_fc_tgt_assoc *assoc; struct nvmet_fc_tgt_assoc *assoc;
int ret = 0; int ret = 0;
bool del_assoc = false; bool del_assoc = false;
...@@ -1348,7 +1348,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, ...@@ -1348,7 +1348,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
assoc = nvmet_fc_find_target_assoc(tgtport, assoc = nvmet_fc_find_target_assoc(tgtport,
be64_to_cpu(rqst->associd.association_id)); be64_to_cpu(rqst->associd.association_id));
iod->assoc = assoc; iod->assoc = assoc;
if (!assoc) if (assoc) {
if (rqst->discon_cmd.scope ==
FCNVME_DISCONN_CONNECTION) {
queue = nvmet_fc_find_target_queue(tgtport,
be64_to_cpu(
rqst->discon_cmd.id));
if (!queue) {
nvmet_fc_tgt_a_put(assoc);
ret = VERR_NO_CONN;
}
}
} else
ret = VERR_NO_ASSOC; ret = VERR_NO_ASSOC;
} }
...@@ -1373,21 +1384,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, ...@@ -1373,21 +1384,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
FCNVME_LS_DISCONNECT); FCNVME_LS_DISCONNECT);
if (rqst->discon_cmd.scope == FCNVME_DISCONN_CONNECTION) { /* are we to delete a Connection ID (queue) */
queue = nvmet_fc_find_target_queue(tgtport, if (queue) {
be64_to_cpu(rqst->discon_cmd.id)); int qid = queue->qid;
if (queue) {
int qid = queue->qid;
nvmet_fc_delete_target_queue(queue); nvmet_fc_delete_target_queue(queue);
/* release the get taken by find_target_queue */ /* release the get taken by find_target_queue */
nvmet_fc_tgt_q_put(queue); nvmet_fc_tgt_q_put(queue);
/* tear association down if io queue terminated */ /* tear association down if io queue terminated */
if (!qid) if (!qid)
del_assoc = true; del_assoc = true;
}
} }
/* release get taken in nvmet_fc_find_target_assoc */ /* release get taken in nvmet_fc_find_target_assoc */
......
...@@ -282,6 +282,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl); ...@@ -282,6 +282,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
enum nvme_subsys_type type); enum nvme_subsys_type type);
void nvmet_subsys_put(struct nvmet_subsys *subsys); void nvmet_subsys_put(struct nvmet_subsys *subsys);
void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid); struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
void nvmet_put_namespace(struct nvmet_ns *ns); void nvmet_put_namespace(struct nvmet_ns *ns);
......
...@@ -438,6 +438,10 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, ...@@ -438,6 +438,10 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
{ {
struct ib_recv_wr *bad_wr; struct ib_recv_wr *bad_wr;
ib_dma_sync_single_for_device(ndev->device,
cmd->sge[0].addr, cmd->sge[0].length,
DMA_FROM_DEVICE);
if (ndev->srq) if (ndev->srq)
return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr); return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr); return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
...@@ -538,6 +542,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req) ...@@ -538,6 +542,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
first_wr = &rsp->send_wr; first_wr = &rsp->send_wr;
nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
ib_dma_sync_single_for_device(rsp->queue->dev->device,
rsp->send_sge.addr, rsp->send_sge.length,
DMA_TO_DEVICE);
if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) { if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
pr_err("sending cmd response failed\n"); pr_err("sending cmd response failed\n");
nvmet_rdma_release_rsp(rsp); nvmet_rdma_release_rsp(rsp);
...@@ -698,6 +707,14 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, ...@@ -698,6 +707,14 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
cmd->n_rdma = 0; cmd->n_rdma = 0;
cmd->req.port = queue->port; cmd->req.port = queue->port;
ib_dma_sync_single_for_cpu(queue->dev->device,
cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
DMA_FROM_DEVICE);
ib_dma_sync_single_for_cpu(queue->dev->device,
cmd->send_sge.addr, cmd->send_sge.length,
DMA_TO_DEVICE);
if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
&queue->nvme_sq, &nvmet_rdma_ops)) &queue->nvme_sq, &nvmet_rdma_ops))
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment