Commit 6caa0503 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-4.13' of git://git.infradead.org/nvme into for-linus

Pull NVMe changes from Christoph:

"The fixes are getting really small now - two for FC, one for PCI, one
 for the fabrics layer and one for the target."
parents 3e09fc80 e9d8a0fd
...@@ -794,7 +794,8 @@ static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts, ...@@ -794,7 +794,8 @@ static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
int i; int i;
for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) { for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
if (opt_tokens[i].token & ~allowed_opts) { if ((opt_tokens[i].token & opts->mask) &&
(opt_tokens[i].token & ~allowed_opts)) {
pr_warn("invalid parameter '%s'\n", pr_warn("invalid parameter '%s'\n",
opt_tokens[i].pattern); opt_tokens[i].pattern);
} }
......
...@@ -801,6 +801,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, ...@@ -801,6 +801,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
return; return;
} }
nvmeq->cqe_seen = 1;
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id); req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
nvme_end_request(req, cqe->status, cqe->result); nvme_end_request(req, cqe->status, cqe->result);
} }
...@@ -830,10 +831,8 @@ static void nvme_process_cq(struct nvme_queue *nvmeq) ...@@ -830,10 +831,8 @@ static void nvme_process_cq(struct nvme_queue *nvmeq)
consumed++; consumed++;
} }
if (consumed) { if (consumed)
nvme_ring_cq_doorbell(nvmeq); nvme_ring_cq_doorbell(nvmeq);
nvmeq->cqe_seen = 1;
}
} }
static irqreturn_t nvme_irq(int irq, void *data) static irqreturn_t nvme_irq(int irq, void *data)
......
...@@ -199,12 +199,6 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) ...@@ -199,12 +199,6 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1); copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1);
copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE)); copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE));
memset(id->mn, ' ', sizeof(id->mn));
strncpy((char *)id->mn, "Linux", sizeof(id->mn));
memset(id->fr, ' ', sizeof(id->fr));
strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
id->rab = 6; id->rab = 6;
/* /*
......
...@@ -394,7 +394,7 @@ nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) ...@@ -394,7 +394,7 @@ nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
static struct nvmet_fc_ls_iod * static struct nvmet_fc_ls_iod *
nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
{ {
static struct nvmet_fc_ls_iod *iod; struct nvmet_fc_ls_iod *iod;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&tgtport->lock, flags); spin_lock_irqsave(&tgtport->lock, flags);
...@@ -471,7 +471,7 @@ nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, ...@@ -471,7 +471,7 @@ nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
static struct nvmet_fc_fcp_iod * static struct nvmet_fc_fcp_iod *
nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
{ {
static struct nvmet_fc_fcp_iod *fod; struct nvmet_fc_fcp_iod *fod;
lockdep_assert_held(&queue->qlock); lockdep_assert_held(&queue->qlock);
...@@ -704,7 +704,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) ...@@ -704,7 +704,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
{ {
struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
struct nvmet_fc_fcp_iod *fod = queue->fod; struct nvmet_fc_fcp_iod *fod = queue->fod;
struct nvmet_fc_defer_fcp_req *deferfcp; struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
unsigned long flags; unsigned long flags;
int i, writedataactive; int i, writedataactive;
bool disconnect; bool disconnect;
...@@ -735,7 +735,8 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) ...@@ -735,7 +735,8 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
} }
/* Cleanup defer'ed IOs in queue */ /* Cleanup defer'ed IOs in queue */
list_for_each_entry(deferfcp, &queue->avail_defer_list, req_list) { list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
req_list) {
list_del(&deferfcp->req_list); list_del(&deferfcp->req_list);
kfree(deferfcp); kfree(deferfcp);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment