Commit 9b81d512 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull more block layer fixes from Jens Axboe:
 "I wasn't going to send off a new pull before next week, but the blk
  flush fix from Jan from the other day introduced a regression.  It's
  rare enough not to have hit during testing, since it requires both a
  device that rejects the first flush, and bad timing while it does
  that.  But since someone did hit it, let's get the revert into 4.4-rc3
  so we don't have a released rc with that known issue.

  Apart from that revert, three other fixes:

   - From Christoph, a fix for a missing unmap in NVMe request
     preparation.

   - An NVMe fix from Nishanth that fixes data corruption on powerpc.

   - Also from Christoph, fix a list_del() attempt on blk-mq that didn't
     have a matching list_add() at timer start"

* 'for-linus' of git://git.kernel.dk/linux-block:
  Revert "blk-flush: Queue through IO scheduler when flush not required"
  block: fix blk_abort_request for blk-mq drivers
  nvme: add missing unmaps in nvme_queue_rq
  NVMe: default to 4k device page size
parents 4cf193b4 dcd8376c
...@@ -422,7 +422,7 @@ void blk_insert_flush(struct request *rq) ...@@ -422,7 +422,7 @@ void blk_insert_flush(struct request *rq)
if (q->mq_ops) { if (q->mq_ops) {
blk_mq_insert_request(rq, false, false, true); blk_mq_insert_request(rq, false, false, true);
} else } else
q->elevator->type->ops.elevator_add_req_fn(q, rq); list_add_tail(&rq->queuelist, &q->queue_head);
return; return;
} }
......
...@@ -158,11 +158,13 @@ void blk_abort_request(struct request *req) ...@@ -158,11 +158,13 @@ void blk_abort_request(struct request *req)
{ {
if (blk_mark_rq_complete(req)) if (blk_mark_rq_complete(req))
return; return;
blk_delete_timer(req);
if (req->q->mq_ops) if (req->q->mq_ops) {
blk_mq_rq_timed_out(req, false); blk_mq_rq_timed_out(req, false);
else } else {
blk_delete_timer(req);
blk_rq_timed_out(req); blk_rq_timed_out(req);
}
} }
EXPORT_SYMBOL_GPL(blk_abort_request); EXPORT_SYMBOL_GPL(blk_abort_request);
......
...@@ -896,21 +896,30 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -896,21 +896,30 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
goto retry_cmd; goto retry_cmd;
} }
if (blk_integrity_rq(req)) { if (blk_integrity_rq(req)) {
if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) {
dma_unmap_sg(dev->dev, iod->sg, iod->nents,
dma_dir);
goto error_cmd; goto error_cmd;
}
sg_init_table(iod->meta_sg, 1); sg_init_table(iod->meta_sg, 1);
if (blk_rq_map_integrity_sg( if (blk_rq_map_integrity_sg(
req->q, req->bio, iod->meta_sg) != 1) req->q, req->bio, iod->meta_sg) != 1) {
dma_unmap_sg(dev->dev, iod->sg, iod->nents,
dma_dir);
goto error_cmd; goto error_cmd;
}
if (rq_data_dir(req)) if (rq_data_dir(req))
nvme_dif_remap(req, nvme_dif_prep); nvme_dif_remap(req, nvme_dif_prep);
if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) {
dma_unmap_sg(dev->dev, iod->sg, iod->nents,
dma_dir);
goto error_cmd; goto error_cmd;
} }
} }
}
nvme_set_info(cmd, iod, req_completion); nvme_set_info(cmd, iod, req_completion);
spin_lock_irq(&nvmeq->q_lock); spin_lock_irq(&nvmeq->q_lock);
...@@ -1728,9 +1737,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) ...@@ -1728,9 +1737,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
u32 aqa; u32 aqa;
u64 cap = lo_hi_readq(&dev->bar->cap); u64 cap = lo_hi_readq(&dev->bar->cap);
struct nvme_queue *nvmeq; struct nvme_queue *nvmeq;
unsigned page_shift = PAGE_SHIFT; /*
* default to a 4K page size, with the intention to update this
* path in the future to accomodate architectures with differing
* kernel and IO page sizes.
*/
unsigned page_shift = 12;
unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
if (page_shift < dev_page_min) { if (page_shift < dev_page_min) {
dev_err(dev->dev, dev_err(dev->dev,
...@@ -1739,13 +1752,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) ...@@ -1739,13 +1752,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1 << page_shift); 1 << page_shift);
return -ENODEV; return -ENODEV;
} }
if (page_shift > dev_page_max) {
dev_info(dev->dev,
"Device maximum page size (%u) smaller than "
"host (%u); enabling work-around\n",
1 << dev_page_max, 1 << page_shift);
page_shift = dev_page_max;
}
dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ? dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ?
NVME_CAP_NSSRC(cap) : 0; NVME_CAP_NSSRC(cap) : 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment