Commit fb857b0b authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-6.2-2022-12-22' of git://git.infradead.org/nvme into block-6.2

Pull NVMe fixes from Christoph:

"nvme fixes for Linux 6.2

 - fix doorbell buffer value endianness (Klaus Jensen)
 - fix Linux vs NVMe page size mismatch (Keith Busch)
 - fix a potential use memory access beyong the allocation limit
   (Keith Busch)
 - fix a multipath vs blktrace NULL pointer dereference
   (Yanjun Zhang)"

* tag 'nvme-6.2-2022-12-22' of git://git.infradead.org/nvme:
  nvme: fix multipath crash caused by flush request when blktrace is enabled
  nvme-pci: fix page size checks
  nvme-pci: fix mempool alloc size
  nvme-pci: fix doorbell buffer value endianness
parents 53eab8e7 3659fb5a
...@@ -893,7 +893,7 @@ static inline void nvme_trace_bio_complete(struct request *req) ...@@ -893,7 +893,7 @@ static inline void nvme_trace_bio_complete(struct request *req)
{ {
struct nvme_ns *ns = req->q->queuedata; struct nvme_ns *ns = req->q->queuedata;
if (req->cmd_flags & REQ_NVME_MPATH) if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio)
trace_block_bio_complete(ns->head->disk->queue, req->bio); trace_block_bio_complete(ns->head->disk->queue, req->bio);
} }
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
#define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
#define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) #define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc))
/* /*
* These can be higher, but we need to ensure that any command doesn't * These can be higher, but we need to ensure that any command doesn't
...@@ -144,9 +144,9 @@ struct nvme_dev { ...@@ -144,9 +144,9 @@ struct nvme_dev {
mempool_t *iod_mempool; mempool_t *iod_mempool;
/* shadow doorbell buffer support: */ /* shadow doorbell buffer support: */
u32 *dbbuf_dbs; __le32 *dbbuf_dbs;
dma_addr_t dbbuf_dbs_dma_addr; dma_addr_t dbbuf_dbs_dma_addr;
u32 *dbbuf_eis; __le32 *dbbuf_eis;
dma_addr_t dbbuf_eis_dma_addr; dma_addr_t dbbuf_eis_dma_addr;
/* host memory buffer support: */ /* host memory buffer support: */
...@@ -208,10 +208,10 @@ struct nvme_queue { ...@@ -208,10 +208,10 @@ struct nvme_queue {
#define NVMEQ_SQ_CMB 1 #define NVMEQ_SQ_CMB 1
#define NVMEQ_DELETE_ERROR 2 #define NVMEQ_DELETE_ERROR 2
#define NVMEQ_POLLED 3 #define NVMEQ_POLLED 3
u32 *dbbuf_sq_db; __le32 *dbbuf_sq_db;
u32 *dbbuf_cq_db; __le32 *dbbuf_cq_db;
u32 *dbbuf_sq_ei; __le32 *dbbuf_sq_ei;
u32 *dbbuf_cq_ei; __le32 *dbbuf_cq_ei;
struct completion delete_done; struct completion delete_done;
}; };
...@@ -343,11 +343,11 @@ static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) ...@@ -343,11 +343,11 @@ static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
} }
/* Update dbbuf and return true if an MMIO is required */ /* Update dbbuf and return true if an MMIO is required */
static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
volatile u32 *dbbuf_ei) volatile __le32 *dbbuf_ei)
{ {
if (dbbuf_db) { if (dbbuf_db) {
u16 old_value; u16 old_value, event_idx;
/* /*
* Ensure that the queue is written before updating * Ensure that the queue is written before updating
...@@ -355,8 +355,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, ...@@ -355,8 +355,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
*/ */
wmb(); wmb();
old_value = *dbbuf_db; old_value = le32_to_cpu(*dbbuf_db);
*dbbuf_db = value; *dbbuf_db = cpu_to_le32(value);
/* /*
* Ensure that the doorbell is updated before reading the event * Ensure that the doorbell is updated before reading the event
...@@ -366,7 +366,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, ...@@ -366,7 +366,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
*/ */
mb(); mb();
if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) event_idx = le32_to_cpu(*dbbuf_ei);
if (!nvme_dbbuf_need_event(event_idx, value, old_value))
return false; return false;
} }
...@@ -380,9 +381,9 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, ...@@ -380,9 +381,9 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
*/ */
static int nvme_pci_npages_prp(void) static int nvme_pci_npages_prp(void)
{ {
unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE;
NVME_CTRL_PAGE_SIZE); unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE);
return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8);
} }
/* /*
...@@ -392,7 +393,7 @@ static int nvme_pci_npages_prp(void) ...@@ -392,7 +393,7 @@ static int nvme_pci_npages_prp(void)
static int nvme_pci_npages_sgl(void) static int nvme_pci_npages_sgl(void)
{ {
return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc), return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc),
PAGE_SIZE); NVME_CTRL_PAGE_SIZE);
} }
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
...@@ -708,7 +709,7 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, ...@@ -708,7 +709,7 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
sge->length = cpu_to_le32(entries * sizeof(*sge)); sge->length = cpu_to_le32(entries * sizeof(*sge));
sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
} else { } else {
sge->length = cpu_to_le32(PAGE_SIZE); sge->length = cpu_to_le32(NVME_CTRL_PAGE_SIZE);
sge->type = NVME_SGL_FMT_SEG_DESC << 4; sge->type = NVME_SGL_FMT_SEG_DESC << 4;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment