Commit ae53aea6 authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-5.18-2022-03-17' of git://git.infradead.org/nvme into for-5.18/drivers

Pull NVMe updates from Christoph:

"Second round of nvme updates for Linux 5.18

 - add lockdep annotations for in-kernel sockets (Chris Leech)
 - use vmalloc for ANA log buffer (Hannes Reinecke)
 - kerneldoc fixes (Chaitanya Kulkarni)
 - cleanups (Guoqing Jiang, Chaitanya Kulkarni, me)
 - warn about shared namespaces without multipathing (me)"

* tag 'nvme-5.18-2022-03-17' of git://git.infradead.org/nvme:
  nvme: warn about shared namespaces without CONFIG_NVME_MULTIPATH
  nvme: remove nvme_alloc_request and nvme_alloc_request_qid
  nvme: cleanup how disk->disk_name is assigned
  nvmet: move the call to nvmet_ns_changed out of nvmet_ns_revalidate
  nvmet: use snprintf() with PAGE_SIZE in configfs
  nvmet: don't fold lines
  nvmet-rdma: fix kernel-doc warning for nvmet_rdma_device_removal
  nvmet-fc: fix kernel-doc warning for nvmet_fc_unregister_targetport
  nvmet-fc: fix kernel-doc warning for nvmet_fc_register_targetport
  nvme-tcp: lockdep: annotate in-kernel sockets
  nvme-tcp: don't fold the line
  nvme-tcp: don't initialize ret variable
  nvme-multipath: call bio_io_error in nvme_ns_head_submit_bio
  nvme-multipath: use vmalloc for ANA log buffer
parents bcfe9b6c ce8d7861
...@@ -2092,6 +2092,7 @@ static void loop_remove(struct loop_device *lo) ...@@ -2092,6 +2092,7 @@ static void loop_remove(struct loop_device *lo)
del_gendisk(lo->lo_disk); del_gendisk(lo->lo_disk);
blk_cleanup_disk(lo->lo_disk); blk_cleanup_disk(lo->lo_disk);
blk_mq_free_tag_set(&lo->tag_set); blk_mq_free_tag_set(&lo->tag_set);
mutex_lock(&loop_ctl_mutex); mutex_lock(&loop_ctl_mutex);
idr_remove(&loop_index_idr, lo->lo_number); idr_remove(&loop_index_idr, lo->lo_number);
mutex_unlock(&loop_ctl_mutex); mutex_unlock(&loop_ctl_mutex);
......
...@@ -639,13 +639,8 @@ static inline void nvme_clear_nvme_request(struct request *req) ...@@ -639,13 +639,8 @@ static inline void nvme_clear_nvme_request(struct request *req)
req->rq_flags |= RQF_DONTPREP; req->rq_flags |= RQF_DONTPREP;
} }
static inline unsigned int nvme_req_op(struct nvme_command *cmd) /* initialize a passthrough request */
{ void nvme_init_request(struct request *req, struct nvme_command *cmd)
return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
}
static inline void nvme_init_request(struct request *req,
struct nvme_command *cmd)
{ {
if (req->q->queuedata) if (req->q->queuedata)
req->timeout = NVME_IO_TIMEOUT; req->timeout = NVME_IO_TIMEOUT;
...@@ -661,30 +656,7 @@ static inline void nvme_init_request(struct request *req, ...@@ -661,30 +656,7 @@ static inline void nvme_init_request(struct request *req,
nvme_clear_nvme_request(req); nvme_clear_nvme_request(req);
memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd)); memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
} }
EXPORT_SYMBOL_GPL(nvme_init_request);
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags)
{
struct request *req;
req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
if (!IS_ERR(req))
nvme_init_request(req, cmd);
return req;
}
EXPORT_SYMBOL_GPL(nvme_alloc_request);
static struct request *nvme_alloc_request_qid(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
{
struct request *req;
req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
qid ? qid - 1 : 0);
if (!IS_ERR(req))
nvme_init_request(req, cmd);
return req;
}
/* /*
* For something we're not in a state to send to the device the default action * For something we're not in a state to send to the device the default action
...@@ -1110,11 +1082,14 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, ...@@ -1110,11 +1082,14 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
int ret; int ret;
if (qid == NVME_QID_ANY) if (qid == NVME_QID_ANY)
req = nvme_alloc_request(q, cmd, flags); req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
else else
req = nvme_alloc_request_qid(q, cmd, flags, qid); req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
qid ? qid - 1 : 0);
if (IS_ERR(req)) if (IS_ERR(req))
return PTR_ERR(req); return PTR_ERR(req);
nvme_init_request(req, cmd);
if (timeout) if (timeout)
req->timeout = timeout; req->timeout = timeout;
...@@ -1304,14 +1279,15 @@ static void nvme_keep_alive_work(struct work_struct *work) ...@@ -1304,14 +1279,15 @@ static void nvme_keep_alive_work(struct work_struct *work)
return; return;
} }
rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd),
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
/* allocation failure, reset the controller */ /* allocation failure, reset the controller */
dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq)); dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
nvme_reset_ctrl(ctrl); nvme_reset_ctrl(ctrl);
return; return;
} }
nvme_init_request(rq, &ctrl->ka_cmd);
rq->timeout = ctrl->kato * HZ; rq->timeout = ctrl->kato * HZ;
rq->end_io_data = ctrl; rq->end_io_data = ctrl;
...@@ -3879,6 +3855,14 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, ...@@ -3879,6 +3855,14 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
nsid); nsid);
goto out_put_ns_head; goto out_put_ns_head;
} }
if (!multipath && !list_empty(&head->list)) {
dev_warn(ctrl->device,
"Found shared namespace %d, but multipathing not supported.\n",
nsid);
dev_warn_once(ctrl->device,
"Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
}
} }
list_add_tail_rcu(&ns->siblings, &head->list); list_add_tail_rcu(&ns->siblings, &head->list);
...@@ -3967,13 +3951,27 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid, ...@@ -3967,13 +3951,27 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
goto out_cleanup_disk; goto out_cleanup_disk;
/* /*
* Without the multipath code enabled, multiple controller per * If multipathing is enabled, the device name for all disks and not
* subsystems are visible as devices and thus we cannot use the * just those that represent shared namespaces needs to be based on the
* subsystem instance. * subsystem instance. Using the controller instance for private
* namespaces could lead to naming collisions between shared and private
* namespaces if they don't use a common numbering scheme.
*
* If multipathing is not enabled, disk names must use the controller
* instance as shared namespaces will show up as multiple block
* devices.
*/ */
if (!nvme_mpath_set_disk_name(ns, disk->disk_name, &disk->flags)) if (ns->head->disk) {
sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
ctrl->instance, ns->head->instance);
disk->flags |= GENHD_FL_HIDDEN;
} else if (multipath) {
sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance,
ns->head->instance);
} else {
sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
ns->head->instance); ns->head->instance);
}
if (nvme_update_ns_info(ns, id)) if (nvme_update_ns_info(ns, id))
goto out_unlink_ns; goto out_unlink_ns;
......
...@@ -66,9 +66,10 @@ static int nvme_submit_user_cmd(struct request_queue *q, ...@@ -66,9 +66,10 @@ static int nvme_submit_user_cmd(struct request_queue *q,
void *meta = NULL; void *meta = NULL;
int ret; int ret;
req = nvme_alloc_request(q, cmd, 0); req = blk_mq_alloc_request(q, nvme_req_op(cmd), 0);
if (IS_ERR(req)) if (IS_ERR(req))
return PTR_ERR(req); return PTR_ERR(req);
nvme_init_request(req, cmd);
if (timeout) if (timeout)
req->timeout = timeout; req->timeout = timeout;
......
...@@ -5,10 +5,11 @@ ...@@ -5,10 +5,11 @@
#include <linux/backing-dev.h> #include <linux/backing-dev.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <trace/events/block.h> #include <trace/events/block.h>
#include "nvme.h" #include "nvme.h"
static bool multipath = true; bool multipath = true;
module_param(multipath, bool, 0444); module_param(multipath, bool, 0444);
MODULE_PARM_DESC(multipath, MODULE_PARM_DESC(multipath,
"turn on native support for multiple controllers per subsystem"); "turn on native support for multiple controllers per subsystem");
...@@ -79,28 +80,6 @@ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) ...@@ -79,28 +80,6 @@ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
blk_freeze_queue_start(h->disk->queue); blk_freeze_queue_start(h->disk->queue);
} }
/*
* If multipathing is enabled we need to always use the subsystem instance
* number for numbering our devices to avoid conflicts between subsystems that
* have multiple controllers and thus use the multipath-aware subsystem node
* and those that have a single controller and use the controller node
* directly.
*/
bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags)
{
if (!multipath)
return false;
if (!ns->head->disk) {
sprintf(disk_name, "nvme%dn%d", ns->ctrl->subsys->instance,
ns->head->instance);
return true;
}
sprintf(disk_name, "nvme%dc%dn%d", ns->ctrl->subsys->instance,
ns->ctrl->instance, ns->head->instance);
*flags = GENHD_FL_HIDDEN;
return true;
}
void nvme_failover_req(struct request *req) void nvme_failover_req(struct request *req)
{ {
struct nvme_ns *ns = req->q->queuedata; struct nvme_ns *ns = req->q->queuedata;
...@@ -386,8 +365,7 @@ static void nvme_ns_head_submit_bio(struct bio *bio) ...@@ -386,8 +365,7 @@ static void nvme_ns_head_submit_bio(struct bio *bio)
} else { } else {
dev_warn_ratelimited(dev, "no available path - failing I/O\n"); dev_warn_ratelimited(dev, "no available path - failing I/O\n");
bio->bi_status = BLK_STS_IOERR; bio_io_error(bio);
bio_endio(bio);
} }
srcu_read_unlock(&head->srcu, srcu_idx); srcu_read_unlock(&head->srcu, srcu_idx);
...@@ -898,7 +876,7 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) ...@@ -898,7 +876,7 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
if (ana_log_size > ctrl->ana_log_size) { if (ana_log_size > ctrl->ana_log_size) {
nvme_mpath_stop(ctrl); nvme_mpath_stop(ctrl);
nvme_mpath_uninit(ctrl); nvme_mpath_uninit(ctrl);
ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL); ctrl->ana_log_buf = kvmalloc(ana_log_size, GFP_KERNEL);
if (!ctrl->ana_log_buf) if (!ctrl->ana_log_buf)
return -ENOMEM; return -ENOMEM;
} }
...@@ -915,7 +893,7 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) ...@@ -915,7 +893,7 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
void nvme_mpath_uninit(struct nvme_ctrl *ctrl) void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
{ {
kfree(ctrl->ana_log_buf); kvfree(ctrl->ana_log_buf);
ctrl->ana_log_buf = NULL; ctrl->ana_log_buf = NULL;
ctrl->ana_log_size = 0; ctrl->ana_log_size = 0;
} }
...@@ -698,9 +698,13 @@ void nvme_wait_freeze(struct nvme_ctrl *ctrl); ...@@ -698,9 +698,13 @@ void nvme_wait_freeze(struct nvme_ctrl *ctrl);
int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
void nvme_start_freeze(struct nvme_ctrl *ctrl); void nvme_start_freeze(struct nvme_ctrl *ctrl);
static inline unsigned int nvme_req_op(struct nvme_command *cmd)
{
return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
}
#define NVME_QID_ANY -1 #define NVME_QID_ANY -1
struct request *nvme_alloc_request(struct request_queue *q, void nvme_init_request(struct request *req, struct nvme_command *cmd);
struct nvme_command *cmd, blk_mq_req_flags_t flags);
void nvme_cleanup_cmd(struct request *req); void nvme_cleanup_cmd(struct request *req);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req); blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
...@@ -770,7 +774,6 @@ void nvme_mpath_unfreeze(struct nvme_subsystem *subsys); ...@@ -770,7 +774,6 @@ void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys); void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
void nvme_mpath_start_freeze(struct nvme_subsystem *subsys); void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys); void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags);
void nvme_failover_req(struct request *req); void nvme_failover_req(struct request *req);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
...@@ -793,20 +796,17 @@ static inline void nvme_trace_bio_complete(struct request *req) ...@@ -793,20 +796,17 @@ static inline void nvme_trace_bio_complete(struct request *req)
trace_block_bio_complete(ns->head->disk->queue, req->bio); trace_block_bio_complete(ns->head->disk->queue, req->bio);
} }
extern bool multipath;
extern struct device_attribute dev_attr_ana_grpid; extern struct device_attribute dev_attr_ana_grpid;
extern struct device_attribute dev_attr_ana_state; extern struct device_attribute dev_attr_ana_state;
extern struct device_attribute subsys_attr_iopolicy; extern struct device_attribute subsys_attr_iopolicy;
#else #else
#define multipath false
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
{ {
return false; return false;
} }
static inline bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name,
int *flags)
{
return false;
}
static inline void nvme_failover_req(struct request *req) static inline void nvme_failover_req(struct request *req)
{ {
} }
......
...@@ -424,8 +424,9 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, ...@@ -424,8 +424,9 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
return 0; return 0;
} }
static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req, static int nvme_pci_init_request(struct blk_mq_tag_set *set,
unsigned int hctx_idx, unsigned int numa_node) struct request *req, unsigned int hctx_idx,
unsigned int numa_node)
{ {
struct nvme_dev *dev = set->driver_data; struct nvme_dev *dev = set->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
...@@ -1428,12 +1429,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) ...@@ -1428,12 +1429,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
"I/O %d QID %d timeout, aborting\n", "I/O %d QID %d timeout, aborting\n",
req->tag, nvmeq->qid); req->tag, nvmeq->qid);
abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd, abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd),
BLK_MQ_REQ_NOWAIT); BLK_MQ_REQ_NOWAIT);
if (IS_ERR(abort_req)) { if (IS_ERR(abort_req)) {
atomic_inc(&dev->ctrl.abort_limit); atomic_inc(&dev->ctrl.abort_limit);
return BLK_EH_RESET_TIMER; return BLK_EH_RESET_TIMER;
} }
nvme_init_request(abort_req, &cmd);
abort_req->end_io_data = NULL; abort_req->end_io_data = NULL;
blk_execute_rq_nowait(abort_req, false, abort_endio); blk_execute_rq_nowait(abort_req, false, abort_endio);
...@@ -1722,7 +1724,7 @@ static const struct blk_mq_ops nvme_mq_admin_ops = { ...@@ -1722,7 +1724,7 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
.queue_rq = nvme_queue_rq, .queue_rq = nvme_queue_rq,
.complete = nvme_pci_complete_rq, .complete = nvme_pci_complete_rq,
.init_hctx = nvme_admin_init_hctx, .init_hctx = nvme_admin_init_hctx,
.init_request = nvme_init_request, .init_request = nvme_pci_init_request,
.timeout = nvme_timeout, .timeout = nvme_timeout,
}; };
...@@ -1732,7 +1734,7 @@ static const struct blk_mq_ops nvme_mq_ops = { ...@@ -1732,7 +1734,7 @@ static const struct blk_mq_ops nvme_mq_ops = {
.complete = nvme_pci_complete_rq, .complete = nvme_pci_complete_rq,
.commit_rqs = nvme_commit_rqs, .commit_rqs = nvme_commit_rqs,
.init_hctx = nvme_init_hctx, .init_hctx = nvme_init_hctx,
.init_request = nvme_init_request, .init_request = nvme_pci_init_request,
.map_queues = nvme_pci_map_queues, .map_queues = nvme_pci_map_queues,
.timeout = nvme_timeout, .timeout = nvme_timeout,
.poll = nvme_poll, .poll = nvme_poll,
...@@ -2475,9 +2477,10 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) ...@@ -2475,9 +2477,10 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
cmd.delete_queue.opcode = opcode; cmd.delete_queue.opcode = opcode;
cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT); req = blk_mq_alloc_request(q, nvme_req_op(&cmd), BLK_MQ_REQ_NOWAIT);
if (IS_ERR(req)) if (IS_ERR(req))
return PTR_ERR(req); return PTR_ERR(req);
nvme_init_request(req, &cmd);
req->end_io_data = nvmeq; req->end_io_data = nvmeq;
......
...@@ -30,6 +30,44 @@ static int so_priority; ...@@ -30,6 +30,44 @@ static int so_priority;
module_param(so_priority, int, 0644); module_param(so_priority, int, 0644);
MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority"); MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/* lockdep can detect a circular dependency of the form
* sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
* because dependencies are tracked for both nvme-tcp and user contexts. Using
* a separate class prevents lockdep from conflating nvme-tcp socket use with
* user-space socket API use.
*/
static struct lock_class_key nvme_tcp_sk_key[2];
static struct lock_class_key nvme_tcp_slock_key[2];
static void nvme_tcp_reclassify_socket(struct socket *sock)
{
struct sock *sk = sock->sk;
if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
return;
switch (sk->sk_family) {
case AF_INET:
sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
&nvme_tcp_slock_key[0],
"sk_lock-AF_INET-NVME",
&nvme_tcp_sk_key[0]);
break;
case AF_INET6:
sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
&nvme_tcp_slock_key[1],
"sk_lock-AF_INET6-NVME",
&nvme_tcp_sk_key[1]);
break;
default:
WARN_ON_ONCE(1);
}
}
#else
static void nvme_tcp_reclassify_socket(struct socket *sock) { }
#endif
enum nvme_tcp_send_state { enum nvme_tcp_send_state {
NVME_TCP_SEND_CMD_PDU = 0, NVME_TCP_SEND_CMD_PDU = 0,
NVME_TCP_SEND_H2C_PDU, NVME_TCP_SEND_H2C_PDU,
...@@ -1427,6 +1465,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, ...@@ -1427,6 +1465,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
goto err_destroy_mutex; goto err_destroy_mutex;
} }
nvme_tcp_reclassify_socket(queue->sock);
/* Single syn retry */ /* Single syn retry */
tcp_sock_set_syncnt(queue->sock->sk, 1); tcp_sock_set_syncnt(queue->sock->sk, 1);
...@@ -1674,7 +1714,7 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl) ...@@ -1674,7 +1714,7 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl) static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
{ {
int i, ret = 0; int i, ret;
for (i = 1; i < ctrl->queue_count; i++) { for (i = 1; i < ctrl->queue_count; i++) {
ret = nvme_tcp_start_queue(ctrl, i); ret = nvme_tcp_start_queue(ctrl, i);
...@@ -1714,8 +1754,7 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) ...@@ -1714,8 +1754,7 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
int i, ret; int i, ret;
for (i = 1; i < ctrl->queue_count; i++) { for (i = 1; i < ctrl->queue_count; i++) {
ret = nvme_tcp_alloc_queue(ctrl, i, ret = nvme_tcp_alloc_queue(ctrl, i, ctrl->sqsize + 1);
ctrl->sqsize + 1);
if (ret) if (ret)
goto out_free_queues; goto out_free_queues;
} }
......
...@@ -511,7 +511,11 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) ...@@ -511,7 +511,11 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
goto done; goto done;
} }
nvmet_ns_revalidate(req->ns); if (nvmet_ns_revalidate(req->ns)) {
mutex_lock(&req->ns->subsys->lock);
nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
mutex_unlock(&req->ns->subsys->lock);
}
/* /*
* nuse = ncap = nsze isn't always true, but we have no way to find * nuse = ncap = nsze isn't always true, but we have no way to find
......
...@@ -60,10 +60,11 @@ static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page) ...@@ -60,10 +60,11 @@ static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) { for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
if (nvmet_addr_family[i].type == adrfam) if (nvmet_addr_family[i].type == adrfam)
return sprintf(page, "%s\n", nvmet_addr_family[i].name); return snprintf(page, PAGE_SIZE, "%s\n",
nvmet_addr_family[i].name);
} }
return sprintf(page, "\n"); return snprintf(page, PAGE_SIZE, "\n");
} }
static ssize_t nvmet_addr_adrfam_store(struct config_item *item, static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
...@@ -93,10 +94,9 @@ CONFIGFS_ATTR(nvmet_, addr_adrfam); ...@@ -93,10 +94,9 @@ CONFIGFS_ATTR(nvmet_, addr_adrfam);
static ssize_t nvmet_addr_portid_show(struct config_item *item, static ssize_t nvmet_addr_portid_show(struct config_item *item,
char *page) char *page)
{ {
struct nvmet_port *port = to_nvmet_port(item); __le16 portid = to_nvmet_port(item)->disc_addr.portid;
return snprintf(page, PAGE_SIZE, "%d\n", return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid));
le16_to_cpu(port->disc_addr.portid));
} }
static ssize_t nvmet_addr_portid_store(struct config_item *item, static ssize_t nvmet_addr_portid_store(struct config_item *item,
...@@ -124,8 +124,7 @@ static ssize_t nvmet_addr_traddr_show(struct config_item *item, ...@@ -124,8 +124,7 @@ static ssize_t nvmet_addr_traddr_show(struct config_item *item,
{ {
struct nvmet_port *port = to_nvmet_port(item); struct nvmet_port *port = to_nvmet_port(item);
return snprintf(page, PAGE_SIZE, "%s\n", return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr);
port->disc_addr.traddr);
} }
static ssize_t nvmet_addr_traddr_store(struct config_item *item, static ssize_t nvmet_addr_traddr_store(struct config_item *item,
...@@ -162,10 +161,11 @@ static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page) ...@@ -162,10 +161,11 @@ static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) { for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
if (treq == nvmet_addr_treq[i].type) if (treq == nvmet_addr_treq[i].type)
return sprintf(page, "%s\n", nvmet_addr_treq[i].name); return snprintf(page, PAGE_SIZE, "%s\n",
nvmet_addr_treq[i].name);
} }
return sprintf(page, "\n"); return snprintf(page, PAGE_SIZE, "\n");
} }
static ssize_t nvmet_addr_treq_store(struct config_item *item, static ssize_t nvmet_addr_treq_store(struct config_item *item,
...@@ -199,8 +199,7 @@ static ssize_t nvmet_addr_trsvcid_show(struct config_item *item, ...@@ -199,8 +199,7 @@ static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
{ {
struct nvmet_port *port = to_nvmet_port(item); struct nvmet_port *port = to_nvmet_port(item);
return snprintf(page, PAGE_SIZE, "%s\n", return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid);
port->disc_addr.trsvcid);
} }
static ssize_t nvmet_addr_trsvcid_store(struct config_item *item, static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
...@@ -284,7 +283,8 @@ static ssize_t nvmet_addr_trtype_show(struct config_item *item, ...@@ -284,7 +283,8 @@ static ssize_t nvmet_addr_trtype_show(struct config_item *item,
for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) { for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
if (port->disc_addr.trtype == nvmet_transport[i].type) if (port->disc_addr.trtype == nvmet_transport[i].type)
return sprintf(page, "%s\n", nvmet_transport[i].name); return snprintf(page, PAGE_SIZE,
"%s\n", nvmet_transport[i].name);
} }
return sprintf(page, "\n"); return sprintf(page, "\n");
...@@ -586,7 +586,8 @@ static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item, ...@@ -586,7 +586,8 @@ static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
mutex_unlock(&ns->subsys->lock); mutex_unlock(&ns->subsys->lock);
return -EINVAL; return -EINVAL;
} }
nvmet_ns_revalidate(ns); if (nvmet_ns_revalidate(ns))
nvmet_ns_changed(ns->subsys, ns->nsid);
mutex_unlock(&ns->subsys->lock); mutex_unlock(&ns->subsys->lock);
return count; return count;
} }
...@@ -1236,8 +1237,7 @@ CONFIGFS_ATTR(nvmet_subsys_, attr_model); ...@@ -1236,8 +1237,7 @@ CONFIGFS_ATTR(nvmet_subsys_, attr_model);
static ssize_t nvmet_subsys_attr_discovery_nqn_show(struct config_item *item, static ssize_t nvmet_subsys_attr_discovery_nqn_show(struct config_item *item,
char *page) char *page)
{ {
return snprintf(page, PAGE_SIZE, "%s\n", return snprintf(page, PAGE_SIZE, "%s\n", nvmet_disc_subsys->subsysnqn);
nvmet_disc_subsys->subsysnqn);
} }
static ssize_t nvmet_subsys_attr_discovery_nqn_store(struct config_item *item, static ssize_t nvmet_subsys_attr_discovery_nqn_store(struct config_item *item,
......
...@@ -531,7 +531,7 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl, ...@@ -531,7 +531,7 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
ns->nsid); ns->nsid);
} }
void nvmet_ns_revalidate(struct nvmet_ns *ns) bool nvmet_ns_revalidate(struct nvmet_ns *ns)
{ {
loff_t oldsize = ns->size; loff_t oldsize = ns->size;
...@@ -540,8 +540,7 @@ void nvmet_ns_revalidate(struct nvmet_ns *ns) ...@@ -540,8 +540,7 @@ void nvmet_ns_revalidate(struct nvmet_ns *ns)
else else
nvmet_file_ns_revalidate(ns); nvmet_file_ns_revalidate(ns);
if (oldsize != ns->size) return oldsize != ns->size;
nvmet_ns_changed(ns->subsys, ns->nsid);
} }
int nvmet_ns_enable(struct nvmet_ns *ns) int nvmet_ns_enable(struct nvmet_ns *ns)
......
...@@ -1341,7 +1341,7 @@ nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) ...@@ -1341,7 +1341,7 @@ nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
} }
/** /**
* nvme_fc_register_targetport - transport entry point called by an * nvmet_fc_register_targetport - transport entry point called by an
* LLDD to register the existence of a local * LLDD to register the existence of a local
* NVME subystem FC port. * NVME subystem FC port.
* @pinfo: pointer to information about the port to be registered * @pinfo: pointer to information about the port to be registered
...@@ -1604,7 +1604,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) ...@@ -1604,7 +1604,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
} }
/** /**
* nvme_fc_unregister_targetport - transport entry point called by an * nvmet_fc_unregister_targetport - transport entry point called by an
* LLDD to deregister/remove a previously * LLDD to deregister/remove a previously
* registered a local NVME subsystem FC port. * registered a local NVME subsystem FC port.
* @target_port: pointer to the (registered) target port that is to be * @target_port: pointer to the (registered) target port that is to be
......
...@@ -542,7 +542,7 @@ u16 nvmet_file_flush(struct nvmet_req *req); ...@@ -542,7 +542,7 @@ u16 nvmet_file_flush(struct nvmet_req *req);
void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid); void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns); void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
void nvmet_file_ns_revalidate(struct nvmet_ns *ns); void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
void nvmet_ns_revalidate(struct nvmet_ns *ns); bool nvmet_ns_revalidate(struct nvmet_ns *ns);
u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts); u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
bool nvmet_bdev_zns_enable(struct nvmet_ns *ns); bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
......
...@@ -254,11 +254,12 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) ...@@ -254,11 +254,12 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
timeout = nvmet_req_subsys(req)->admin_timeout; timeout = nvmet_req_subsys(req)->admin_timeout;
} }
rq = nvme_alloc_request(q, req->cmd, 0); rq = blk_mq_alloc_request(q, nvme_req_op(req->cmd), 0);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
status = NVME_SC_INTERNAL; status = NVME_SC_INTERNAL;
goto out_put_ns; goto out_put_ns;
} }
nvme_init_request(rq, req->cmd);
if (timeout) if (timeout)
rq->timeout = timeout; rq->timeout = timeout;
......
...@@ -1703,7 +1703,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, ...@@ -1703,7 +1703,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
} }
/** /**
* nvme_rdma_device_removal() - Handle RDMA device removal * nvmet_rdma_device_removal() - Handle RDMA device removal
* @cm_id: rdma_cm id, used for nvmet port * @cm_id: rdma_cm id, used for nvmet port
* @queue: nvmet rdma queue (cm id qp_context) * @queue: nvmet rdma queue (cm id qp_context)
* *
......
...@@ -123,7 +123,11 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req) ...@@ -123,7 +123,11 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
goto done; goto done;
} }
nvmet_ns_revalidate(req->ns); if (nvmet_ns_revalidate(req->ns)) {
mutex_lock(&req->ns->subsys->lock);
nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
mutex_unlock(&req->ns->subsys->lock);
}
zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >> zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
req->ns->blksize_shift; req->ns->blksize_shift;
id_zns->lbafe[0].zsze = cpu_to_le64(zsze); id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment