Commit 7ef10f3c authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-4.13' of git://git.infradead.org/nvme into for-linus

Pull NVMe fixes from Christoph:

"Three more fixes for 4.13 below:

 - fix the incorrect bit for the doorbell buffer features (Changpeng Liu)
 - always use a 4k MR page size for RDMA, to not get in trouble with
   offset in non-4k page size systems (no-op for x86) (Max Gurtovoy)
 - and a fix for the new nvme host memory buffer support to keep the
   descriptor list DMA mapped when the buffer is enabled (me)"
parents 015a2f82 223694b9
...@@ -109,6 +109,7 @@ struct nvme_dev { ...@@ -109,6 +109,7 @@ struct nvme_dev {
/* host memory buffer support: */ /* host memory buffer support: */
u64 host_mem_size; u64 host_mem_size;
u32 nr_host_mem_descs; u32 nr_host_mem_descs;
dma_addr_t host_mem_descs_dma;
struct nvme_host_mem_buf_desc *host_mem_descs; struct nvme_host_mem_buf_desc *host_mem_descs;
void **host_mem_desc_bufs; void **host_mem_desc_bufs;
}; };
...@@ -1565,16 +1566,10 @@ static inline void nvme_release_cmb(struct nvme_dev *dev) ...@@ -1565,16 +1566,10 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
{ {
size_t len = dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs); u64 dma_addr = dev->host_mem_descs_dma;
struct nvme_command c; struct nvme_command c;
u64 dma_addr;
int ret; int ret;
dma_addr = dma_map_single(dev->dev, dev->host_mem_descs, len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev->dev, dma_addr))
return -ENOMEM;
memset(&c, 0, sizeof(c)); memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_set_features; c.features.opcode = nvme_admin_set_features;
c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
...@@ -1591,7 +1586,6 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) ...@@ -1591,7 +1586,6 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
"failed to set host mem (err %d, flags %#x).\n", "failed to set host mem (err %d, flags %#x).\n",
ret, bits); ret, bits);
} }
dma_unmap_single(dev->dev, dma_addr, len, DMA_TO_DEVICE);
return ret; return ret;
} }
...@@ -1609,7 +1603,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev) ...@@ -1609,7 +1603,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
kfree(dev->host_mem_desc_bufs); kfree(dev->host_mem_desc_bufs);
dev->host_mem_desc_bufs = NULL; dev->host_mem_desc_bufs = NULL;
kfree(dev->host_mem_descs); dma_free_coherent(dev->dev,
dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
dev->host_mem_descs, dev->host_mem_descs_dma);
dev->host_mem_descs = NULL; dev->host_mem_descs = NULL;
} }
...@@ -1617,6 +1613,7 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) ...@@ -1617,6 +1613,7 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
{ {
struct nvme_host_mem_buf_desc *descs; struct nvme_host_mem_buf_desc *descs;
u32 chunk_size, max_entries, len; u32 chunk_size, max_entries, len;
dma_addr_t descs_dma;
int i = 0; int i = 0;
void **bufs; void **bufs;
u64 size = 0, tmp; u64 size = 0, tmp;
...@@ -1627,7 +1624,8 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) ...@@ -1627,7 +1624,8 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
tmp = (preferred + chunk_size - 1); tmp = (preferred + chunk_size - 1);
do_div(tmp, chunk_size); do_div(tmp, chunk_size);
max_entries = tmp; max_entries = tmp;
descs = kcalloc(max_entries, sizeof(*descs), GFP_KERNEL); descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs),
&descs_dma, GFP_KERNEL);
if (!descs) if (!descs)
goto out; goto out;
...@@ -1661,6 +1659,7 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) ...@@ -1661,6 +1659,7 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
dev->nr_host_mem_descs = i; dev->nr_host_mem_descs = i;
dev->host_mem_size = size; dev->host_mem_size = size;
dev->host_mem_descs = descs; dev->host_mem_descs = descs;
dev->host_mem_descs_dma = descs_dma;
dev->host_mem_desc_bufs = bufs; dev->host_mem_desc_bufs = bufs;
return 0; return 0;
...@@ -1674,7 +1673,8 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) ...@@ -1674,7 +1673,8 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
kfree(bufs); kfree(bufs);
out_free_descs: out_free_descs:
kfree(descs); dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
descs_dma);
out: out:
/* try a smaller chunk size if we failed early */ /* try a smaller chunk size if we failed early */
if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) { if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) {
......
...@@ -920,7 +920,11 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue, ...@@ -920,7 +920,11 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
int nr; int nr;
nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, PAGE_SIZE); /*
* Align the MR to a 4K page size to match the ctrl page size and
* the block virtual boundary.
*/
nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
if (nr < count) { if (nr < count) {
if (nr < 0) if (nr < 0)
return nr; return nr;
...@@ -1583,7 +1587,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl) ...@@ -1583,7 +1587,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
goto out_cleanup_queue; goto out_cleanup_queue;
ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_hw_sectors =
(ctrl->max_fr_pages - 1) << (PAGE_SHIFT - 9); (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
error = nvme_init_identify(&ctrl->ctrl); error = nvme_init_identify(&ctrl->ctrl);
if (error) if (error)
......
...@@ -254,7 +254,7 @@ enum { ...@@ -254,7 +254,7 @@ enum {
NVME_CTRL_VWC_PRESENT = 1 << 0, NVME_CTRL_VWC_PRESENT = 1 << 0,
NVME_CTRL_OACS_SEC_SUPP = 1 << 0, NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
NVME_CTRL_OACS_DIRECTIVES = 1 << 5, NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
NVME_CTRL_OACS_DBBUF_SUPP = 1 << 7, NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
}; };
struct nvme_lbaf { struct nvme_lbaf {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment