Commit 75199aa5 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-5.2' of git://git.infradead.org/nvme into for-5.2/block

Pull NVMe changes from Christoph:

"Below is the first batch of nvme updates for 5.2. This includes the
performance improvements for single segment I/O on PCIe, which introduce
new block helpers, so it might be a good idea to get them in early.

 - various performance optimizations in the PCIe code (Keith and me)
 - new block helpers to support the above (me)
 - nvmet error conversion cleanup (me)
 - nvmet-fc variable sized array cleanup (Gustavo)
 - passthrough ioctl error printk cleanup (Kenneth)
 - small nvmet fixes (Max)
 - endianess conversion cleanup (Max)
 - nvmet-tcp faspath completion optimization (Sagi)"

* 'nvme-5.2' of git://git.infradead.org/nvme: (24 commits)
  nvme: log the error status on Identify Namespace failure
  nvmet: add safety check for subsystem lock during nvmet_ns_changed
  nvmet: never fail double namespace enablement
  nvme-pci: tidy up nvme_map_data
  nvme-pci: optimize mapping single segment requests using SGLs
  nvme-pci: optimize mapping of small single segment requests
  nvme-pci: remove the inline scatterlist optimization
  nvme-pci: split metadata handling from nvme_map_data / nvme_unmap_data
  nvme-pci: do not build a scatterlist to map metadata
  nvme-pci: only call nvme_unmap_data for requests transferring data
  nvme-pci: merge nvme_free_iod into nvme_unmap_data
  nvme-pci: move the call to nvme_cleanup_cmd out of nvme_unmap_data
  nvme-pci: remove nvme_init_iod
  block: add dma_map_bvec helper
  block: add a rq_dma_dir helper
  block: add a rq_integrity_vec helper
  block: add a req_bvec helper
  nvme-pci: remove unused nvme_iod member
  nvme-pci: remove q_dmadev from nvme_queue
  nvme-pci: use a flag for polled queues
  ...
parents 2b24e6f6 d0de579c
...@@ -1105,7 +1105,7 @@ static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl, ...@@ -1105,7 +1105,7 @@ static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl,
error = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); error = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
if (error) { if (error) {
dev_warn(ctrl->device, "Identify namespace failed\n"); dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
kfree(id); kfree(id);
return NULL; return NULL;
} }
...@@ -1588,7 +1588,7 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) ...@@ -1588,7 +1588,7 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
static void nvme_update_disk_info(struct gendisk *disk, static void nvme_update_disk_info(struct gendisk *disk,
struct nvme_ns *ns, struct nvme_id_ns *id) struct nvme_ns *ns, struct nvme_id_ns *id)
{ {
sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9); sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9);
unsigned short bs = 1 << ns->lba_shift; unsigned short bs = 1 << ns->lba_shift;
blk_mq_freeze_queue(disk->queue); blk_mq_freeze_queue(disk->queue);
...@@ -2549,7 +2549,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ...@@ -2549,7 +2549,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->crdt[2] = le16_to_cpu(id->crdt3); ctrl->crdt[2] = le16_to_cpu(id->crdt3);
ctrl->oacs = le16_to_cpu(id->oacs); ctrl->oacs = le16_to_cpu(id->oacs);
ctrl->oncs = le16_to_cpup(&id->oncs); ctrl->oncs = le16_to_cpu(id->oncs);
ctrl->oaes = le32_to_cpu(id->oaes); ctrl->oaes = le32_to_cpu(id->oaes);
atomic_set(&ctrl->abort_limit, id->acl + 1); atomic_set(&ctrl->abort_limit, id->acl + 1);
ctrl->vwc = id->vwc; ctrl->vwc = id->vwc;
......
This diff is collapsed.
...@@ -214,6 +214,8 @@ void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid) ...@@ -214,6 +214,8 @@ void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
{ {
struct nvmet_ctrl *ctrl; struct nvmet_ctrl *ctrl;
lockdep_assert_held(&subsys->lock);
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid)); nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR)) if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
...@@ -494,13 +496,14 @@ int nvmet_ns_enable(struct nvmet_ns *ns) ...@@ -494,13 +496,14 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
int ret; int ret;
mutex_lock(&subsys->lock); mutex_lock(&subsys->lock);
ret = -EMFILE;
if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
goto out_unlock;
ret = 0; ret = 0;
if (ns->enabled) if (ns->enabled)
goto out_unlock; goto out_unlock;
ret = -EMFILE;
if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
goto out_unlock;
ret = nvmet_bdev_ns_enable(ns); ret = nvmet_bdev_ns_enable(ns);
if (ret == -ENOTBLK) if (ret == -ENOTBLK)
ret = nvmet_file_ns_enable(ns); ret = nvmet_file_ns_enable(ns);
......
...@@ -128,12 +128,12 @@ struct nvmet_fc_tgt_queue { ...@@ -128,12 +128,12 @@ struct nvmet_fc_tgt_queue {
struct nvmet_cq nvme_cq; struct nvmet_cq nvme_cq;
struct nvmet_sq nvme_sq; struct nvmet_sq nvme_sq;
struct nvmet_fc_tgt_assoc *assoc; struct nvmet_fc_tgt_assoc *assoc;
struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
struct list_head fod_list; struct list_head fod_list;
struct list_head pending_cmd_list; struct list_head pending_cmd_list;
struct list_head avail_defer_list; struct list_head avail_defer_list;
struct workqueue_struct *work_q; struct workqueue_struct *work_q;
struct kref ref; struct kref ref;
struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
} __aligned(sizeof(unsigned long long)); } __aligned(sizeof(unsigned long long));
struct nvmet_fc_tgt_assoc { struct nvmet_fc_tgt_assoc {
...@@ -588,9 +588,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, ...@@ -588,9 +588,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
if (qid > NVMET_NR_QUEUES) if (qid > NVMET_NR_QUEUES)
return NULL; return NULL;
queue = kzalloc((sizeof(*queue) + queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
(sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
GFP_KERNEL);
if (!queue) if (!queue)
return NULL; return NULL;
...@@ -603,7 +601,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, ...@@ -603,7 +601,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
if (!queue->work_q) if (!queue->work_q)
goto out_a_put; goto out_a_put;
queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
queue->qid = qid; queue->qid = qid;
queue->sqsize = sqsize; queue->sqsize = sqsize;
queue->assoc = assoc; queue->assoc = assoc;
......
...@@ -196,7 +196,7 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req, ...@@ -196,7 +196,7 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
GFP_KERNEL, 0, bio); GFP_KERNEL, 0, bio);
if (ret && ret != -EOPNOTSUPP) { if (ret && ret != -EOPNOTSUPP) {
req->error_slba = le64_to_cpu(range->slba); req->error_slba = le64_to_cpu(range->slba);
return blk_to_nvme_status(req, errno_to_blk_status(ret)); return errno_to_nvme_status(req, ret);
} }
return NVME_SC_SUCCESS; return NVME_SC_SUCCESS;
} }
...@@ -252,7 +252,6 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req) ...@@ -252,7 +252,6 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
{ {
struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes; struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
struct bio *bio = NULL; struct bio *bio = NULL;
u16 status = NVME_SC_SUCCESS;
sector_t sector; sector_t sector;
sector_t nr_sector; sector_t nr_sector;
int ret; int ret;
...@@ -264,13 +263,12 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req) ...@@ -264,13 +263,12 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector, ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
GFP_KERNEL, &bio, 0); GFP_KERNEL, &bio, 0);
status = blk_to_nvme_status(req, errno_to_blk_status(ret));
if (bio) { if (bio) {
bio->bi_private = req; bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done; bio->bi_end_io = nvmet_bio_done;
submit_bio(bio); submit_bio(bio);
} else { } else {
nvmet_req_complete(req, status); nvmet_req_complete(req, errno_to_nvme_status(req, ret));
} }
} }
......
...@@ -371,7 +371,8 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) ...@@ -371,7 +371,8 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
cmd->state = NVMET_TCP_SEND_DATA_PDU; cmd->state = NVMET_TCP_SEND_DATA_PDU;
pdu->hdr.type = nvme_tcp_c2h_data; pdu->hdr.type = nvme_tcp_c2h_data;
pdu->hdr.flags = NVME_TCP_F_DATA_LAST; pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
NVME_TCP_F_DATA_SUCCESS : 0);
pdu->hdr.hlen = sizeof(*pdu); pdu->hdr.hlen = sizeof(*pdu);
pdu->hdr.pdo = pdu->hdr.hlen + hdgst; pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
pdu->hdr.plen = pdu->hdr.plen =
...@@ -541,9 +542,20 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd) ...@@ -541,9 +542,20 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
if (queue->data_digest) { if (queue->data_digest) {
cmd->state = NVMET_TCP_SEND_DDGST; cmd->state = NVMET_TCP_SEND_DDGST;
cmd->offset = 0; cmd->offset = 0;
} else {
if (queue->nvme_sq.sqhd_disabled) {
cmd->queue->snd_cmd = NULL;
nvmet_tcp_put_cmd(cmd);
} else { } else {
nvmet_setup_response_pdu(cmd); nvmet_setup_response_pdu(cmd);
} }
}
if (queue->nvme_sq.sqhd_disabled) {
kfree(cmd->iov);
sgl_free(cmd->req.sg);
}
return 1; return 1;
} }
...@@ -619,7 +631,13 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd) ...@@ -619,7 +631,13 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
return ret; return ret;
cmd->offset += ret; cmd->offset += ret;
if (queue->nvme_sq.sqhd_disabled) {
cmd->queue->snd_cmd = NULL;
nvmet_tcp_put_cmd(cmd);
} else {
nvmet_setup_response_pdu(cmd); nvmet_setup_response_pdu(cmd);
}
return 1; return 1;
} }
......
...@@ -641,6 +641,13 @@ static inline bool blk_account_rq(struct request *rq) ...@@ -641,6 +641,13 @@ static inline bool blk_account_rq(struct request *rq)
#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
#define rq_dma_dir(rq) \
(op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
#define dma_map_bvec(dev, bv, dir, attrs) \
dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
(dir), (attrs))
static inline bool queue_is_mq(struct request_queue *q) static inline bool queue_is_mq(struct request_queue *q)
{ {
return q->mq_ops; return q->mq_ops;
...@@ -932,6 +939,17 @@ static inline unsigned int blk_rq_payload_bytes(struct request *rq) ...@@ -932,6 +939,17 @@ static inline unsigned int blk_rq_payload_bytes(struct request *rq)
return blk_rq_bytes(rq); return blk_rq_bytes(rq);
} }
/*
* Return the first full biovec in the request. The caller needs to check that
* there are any bvecs before calling this helper.
*/
static inline struct bio_vec req_bvec(struct request *rq)
{
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
return rq->special_vec;
return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
}
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
int op) int op)
{ {
...@@ -1548,6 +1566,17 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, ...@@ -1548,6 +1566,17 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
return bio_integrity_intervals(bi, sectors) * bi->tuple_size; return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
} }
/*
* Return the first bvec that contains integrity data. Only drivers that are
* limited to a single integrity segment should use this helper.
*/
static inline struct bio_vec *rq_integrity_vec(struct request *rq)
{
if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
return NULL;
return rq->bio->bi_integrity->bip_vec;
}
#else /* CONFIG_BLK_DEV_INTEGRITY */ #else /* CONFIG_BLK_DEV_INTEGRITY */
struct bio; struct bio;
...@@ -1622,6 +1651,11 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, ...@@ -1622,6 +1651,11 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
return 0; return 0;
} }
static inline struct bio_vec *rq_integrity_vec(struct request *rq)
{
return NULL;
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */ #endif /* CONFIG_BLK_DEV_INTEGRITY */
struct block_device_operations { struct block_device_operations {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment