Commit 7c88cb00 authored by Jens Axboe's avatar Jens Axboe

NVMe: switch to using blk_queue_write_cache()

Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent eb310e23
...@@ -999,6 +999,8 @@ EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl); ...@@ -999,6 +999,8 @@ EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
struct request_queue *q) struct request_queue *q)
{ {
bool vwc = false;
if (ctrl->max_hw_sectors) { if (ctrl->max_hw_sectors) {
u32 max_segments = u32 max_segments =
(ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1; (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
...@@ -1008,9 +1010,10 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, ...@@ -1008,9 +1010,10 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
} }
if (ctrl->stripe_size) if (ctrl->stripe_size)
blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9); blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
blk_queue_virt_boundary(q, ctrl->page_size - 1); blk_queue_virt_boundary(q, ctrl->page_size - 1);
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
vwc = true;
blk_queue_write_cache(q, vwc, vwc);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment