Commit 8613dec0 authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-6.2-2022-11-29' of git://git.infradead.org/nvme into for-6.2/block

Pull NVMe updates from Christoph:

"nvme updates for Linux 6.2

 - support some passthrough commands without CAP_SYS_ADMIN
   (Kanchan Joshi)
 - refactor PCIe probing and reset (Christoph Hellwig)
 - various fabrics authentication fixes and improvements (Sagi Grimberg)
 - avoid fallback to sequential scan due to transient issues
   (Uday Shankar)
 - implement support for the DEAC bit in Write Zeroes (Christoph Hellwig)
 - allow overriding the IEEE OUI and firmware revision in configfs for
   nvmet (Aleksandr Miloserdov)
 - force reconnect when number of queue changes in nvmet (Daniel Wagner)
 - minor fixes and improvements (Uros Bizjak, Joel Granados,
   Sagi Grimberg, Christoph Hellwig, Christophe JAILLET)"

* tag 'nvme-6.2-2022-11-29' of git://git.infradead.org/nvme: (45 commits)
  nvmet: expose firmware revision to configfs
  nvmet: expose IEEE OUI to configfs
  nvme: rename the queue quiescing helpers
  nvmet: fix a memory leak in nvmet_auth_set_key
  nvme: return err on nvme_init_non_mdts_limits fail
  nvme: avoid fallback to sequential scan due to transient issues
  nvme-rdma: stop auth work after tearing down queues in error recovery
  nvme-tcp: stop auth work after tearing down queues in error recovery
  nvme-auth: have dhchap_auth_work wait for queues auth to complete
  nvme-auth: remove redundant auth_work flush
  nvme-auth: convert dhchap_auth_list to an array
  nvme-auth: check chap ctrl_key once constructed
  nvme-auth: no need to reset chap contexts on re-authentication
  nvme-auth: remove redundant deallocations
  nvme-auth: clear sensitive info right after authentication completes
  nvme-auth: guarantee dhchap buffers under memory pressure
  nvme-auth: don't keep long lived 4k dhchap buffer
  nvme-auth: remove redundant if statement
  nvme-auth: don't override ctrl keys before validation
  nvme-auth: don't ignore key generation failures when initializing ctrl keys
  ...
parents 3692fec8 68c5444c
......@@ -821,7 +821,7 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
if (!dead && shutdown && freeze)
nvme_wait_freeze_timeout(&anv->ctrl, NVME_IO_TIMEOUT);
nvme_stop_queues(&anv->ctrl);
nvme_quiesce_io_queues(&anv->ctrl);
if (!dead) {
if (READ_ONCE(anv->ioq.enabled)) {
......@@ -837,7 +837,7 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
WRITE_ONCE(anv->ioq.enabled, false);
WRITE_ONCE(anv->adminq.enabled, false);
mb(); /* ensure that nvme_queue_rq() sees that enabled is cleared */
nvme_stop_admin_queue(&anv->ctrl);
nvme_quiesce_admin_queue(&anv->ctrl);
/* last chance to complete any requests before nvme_cancel_request */
spin_lock_irqsave(&anv->lock, flags);
......@@ -854,8 +854,8 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
* deadlocking blk-mq hot-cpu notifier.
*/
if (shutdown) {
nvme_start_queues(&anv->ctrl);
nvme_start_admin_queue(&anv->ctrl);
nvme_unquiesce_io_queues(&anv->ctrl);
nvme_unquiesce_admin_queue(&anv->ctrl);
}
}
......@@ -1093,7 +1093,7 @@ static void apple_nvme_reset_work(struct work_struct *work)
dev_dbg(anv->dev, "Starting admin queue");
apple_nvme_init_queue(&anv->adminq);
nvme_start_admin_queue(&anv->ctrl);
nvme_unquiesce_admin_queue(&anv->ctrl);
if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_CONNECTING)) {
dev_warn(anv->ctrl.device,
......@@ -1102,7 +1102,7 @@ static void apple_nvme_reset_work(struct work_struct *work)
goto out;
}
ret = nvme_init_ctrl_finish(&anv->ctrl);
ret = nvme_init_ctrl_finish(&anv->ctrl, false);
if (ret)
goto out;
......@@ -1127,7 +1127,7 @@ static void apple_nvme_reset_work(struct work_struct *work)
anv->ctrl.queue_count = nr_io_queues + 1;
nvme_start_queues(&anv->ctrl);
nvme_unquiesce_io_queues(&anv->ctrl);
nvme_wait_freeze(&anv->ctrl);
blk_mq_update_nr_hw_queues(&anv->tagset, 1);
nvme_unfreeze(&anv->ctrl);
......
This diff is collapsed.
This diff is collapsed.
......@@ -1475,6 +1475,8 @@ nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
fc_dma_unmap_single(lport->dev, lsop->rspdma,
sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
kfree(lsop->rspbuf);
kfree(lsop->rqstbuf);
kfree(lsop);
nvme_fc_rport_put(rport);
......@@ -1751,20 +1753,17 @@ nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr,
goto out_put;
}
lsop = kzalloc(sizeof(*lsop) +
sizeof(union nvmefc_ls_requests) +
sizeof(union nvmefc_ls_responses),
GFP_KERNEL);
if (!lsop) {
lsop = kzalloc(sizeof(*lsop), GFP_KERNEL);
lsop->rqstbuf = kzalloc(sizeof(*lsop->rqstbuf), GFP_KERNEL);
lsop->rspbuf = kzalloc(sizeof(*lsop->rspbuf), GFP_KERNEL);
if (!lsop || !lsop->rqstbuf || !lsop->rspbuf) {
dev_info(lport->dev,
"RCV %s LS failed: No memory\n",
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
nvmefc_ls_names[w0->ls_cmd] : "");
ret = -ENOMEM;
goto out_put;
goto out_free;
}
lsop->rqstbuf = (union nvmefc_ls_requests *)&lsop[1];
lsop->rspbuf = (union nvmefc_ls_responses *)&lsop->rqstbuf[1];
lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf,
sizeof(*lsop->rspbuf),
......@@ -1801,6 +1800,8 @@ nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr,
fc_dma_unmap_single(lport->dev, lsop->rspdma,
sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
out_free:
kfree(lsop->rspbuf);
kfree(lsop->rqstbuf);
kfree(lsop);
out_put:
nvme_fc_rport_put(rport);
......@@ -2391,7 +2392,7 @@ nvme_fc_ctrl_free(struct kref *ref)
list_del(&ctrl->ctrl_list);
spin_unlock_irqrestore(&ctrl->rport->lock, flags);
nvme_start_admin_queue(&ctrl->ctrl);
nvme_unquiesce_admin_queue(&ctrl->ctrl);
nvme_remove_admin_tag_set(&ctrl->ctrl);
kfree(ctrl->queues);
......@@ -2492,13 +2493,13 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
* (but with error status).
*/
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
nvme_quiesce_io_queues(&ctrl->ctrl);
nvme_sync_io_queues(&ctrl->ctrl);
blk_mq_tagset_busy_iter(&ctrl->tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
if (start_queues)
nvme_start_queues(&ctrl->ctrl);
nvme_unquiesce_io_queues(&ctrl->ctrl);
}
/*
......@@ -2516,13 +2517,13 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
/*
* clean up the admin queue. Same thing as above.
*/
nvme_stop_admin_queue(&ctrl->ctrl);
nvme_quiesce_admin_queue(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
if (start_queues)
nvme_start_admin_queue(&ctrl->ctrl);
nvme_unquiesce_admin_queue(&ctrl->ctrl);
}
static void
......@@ -3104,9 +3105,9 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments <<
(ilog2(SZ_4K) - 9);
nvme_start_admin_queue(&ctrl->ctrl);
nvme_unquiesce_admin_queue(&ctrl->ctrl);
ret = nvme_init_ctrl_finish(&ctrl->ctrl);
ret = nvme_init_ctrl_finish(&ctrl->ctrl, false);
if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
goto out_disconnect_admin_queue;
......@@ -3250,10 +3251,10 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
nvme_fc_free_queue(&ctrl->queues[0]);
/* re-enable the admin_q so anything new can fast fail */
nvme_start_admin_queue(&ctrl->ctrl);
nvme_unquiesce_admin_queue(&ctrl->ctrl);
/* resume the io queues so that things will fast fail */
nvme_start_queues(&ctrl->ctrl);
nvme_unquiesce_io_queues(&ctrl->ctrl);
nvme_fc_ctlr_inactive_on_rport(ctrl);
}
......
......@@ -8,6 +8,48 @@
#include <linux/io_uring.h>
#include "nvme.h"
static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
fmode_t mode)
{
if (capable(CAP_SYS_ADMIN))
return true;
/*
* Do not allow unprivileged processes to send vendor specific or fabrics
* commands as we can't be sure about their effects.
*/
if (c->common.opcode >= nvme_cmd_vendor_start ||
c->common.opcode == nvme_fabrics_command)
return false;
/*
* Do not allow unprivileged passthrough of admin commands except
* for a subset of identify commands that contain information required
* to form proper I/O commands in userspace and do not expose any
* potentially sensitive information.
*/
if (!ns) {
if (c->common.opcode == nvme_admin_identify) {
switch (c->identify.cns) {
case NVME_ID_CNS_NS:
case NVME_ID_CNS_CS_NS:
case NVME_ID_CNS_NS_CS_INDEP:
return true;
}
}
return false;
}
/*
* Only allow I/O commands that transfer data to the controller if the
* special file is open for writing, but always allow I/O commands that
* transfer data from the controller.
*/
if (nvme_is_write(c))
return mode & FMODE_WRITE;
return true;
}
/*
* Convert integer values from ioctl structures to user pointers, silently
* ignoring the upper bits in the compat case to match behaviour of 32-bit
......@@ -261,7 +303,7 @@ static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
}
static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_passthru_cmd __user *ucmd)
struct nvme_passthru_cmd __user *ucmd, fmode_t mode)
{
struct nvme_passthru_cmd cmd;
struct nvme_command c;
......@@ -269,8 +311,6 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u64 result;
int status;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
return -EFAULT;
if (cmd.flags)
......@@ -291,6 +331,9 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
c.common.cdw14 = cpu_to_le32(cmd.cdw14);
c.common.cdw15 = cpu_to_le32(cmd.cdw15);
if (!nvme_cmd_allowed(ns, &c, mode))
return -EACCES;
if (cmd.timeout_ms)
timeout = msecs_to_jiffies(cmd.timeout_ms);
......@@ -308,15 +351,14 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
}
static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_passthru_cmd64 __user *ucmd, bool vec)
struct nvme_passthru_cmd64 __user *ucmd, bool vec,
fmode_t mode)
{
struct nvme_passthru_cmd64 cmd;
struct nvme_command c;
unsigned timeout = 0;
int status;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
return -EFAULT;
if (cmd.flags)
......@@ -337,6 +379,9 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
c.common.cdw14 = cpu_to_le32(cmd.cdw14);
c.common.cdw15 = cpu_to_le32(cmd.cdw15);
if (!nvme_cmd_allowed(ns, &c, mode))
return -EACCES;
if (cmd.timeout_ms)
timeout = msecs_to_jiffies(cmd.timeout_ms);
......@@ -483,9 +528,6 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
void *meta = NULL;
int ret;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
c.common.opcode = READ_ONCE(cmd->opcode);
c.common.flags = READ_ONCE(cmd->flags);
if (c.common.flags)
......@@ -507,6 +549,9 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
if (!nvme_cmd_allowed(ns, &c, ioucmd->file->f_mode))
return -EACCES;
d.metadata = READ_ONCE(cmd->metadata);
d.addr = READ_ONCE(cmd->addr);
d.data_len = READ_ONCE(cmd->data_len);
......@@ -570,13 +615,13 @@ static bool is_ctrl_ioctl(unsigned int cmd)
}
static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
void __user *argp)
void __user *argp, fmode_t mode)
{
switch (cmd) {
case NVME_IOCTL_ADMIN_CMD:
return nvme_user_cmd(ctrl, NULL, argp);
return nvme_user_cmd(ctrl, NULL, argp, mode);
case NVME_IOCTL_ADMIN64_CMD:
return nvme_user_cmd64(ctrl, NULL, argp, false);
return nvme_user_cmd64(ctrl, NULL, argp, false, mode);
default:
return sed_ioctl(ctrl->opal_dev, cmd, argp);
}
......@@ -601,14 +646,14 @@ struct nvme_user_io32 {
#endif /* COMPAT_FOR_U64_ALIGNMENT */
static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
void __user *argp)
void __user *argp, fmode_t mode)
{
switch (cmd) {
case NVME_IOCTL_ID:
force_successful_syscall_return();
return ns->head->ns_id;
case NVME_IOCTL_IO_CMD:
return nvme_user_cmd(ns->ctrl, ns, argp);
return nvme_user_cmd(ns->ctrl, ns, argp, mode);
/*
* struct nvme_user_io can have different padding on some 32-bit ABIs.
* Just accept the compat version as all fields that are used are the
......@@ -620,19 +665,20 @@ static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
case NVME_IOCTL_SUBMIT_IO:
return nvme_submit_io(ns, argp);
case NVME_IOCTL_IO64_CMD:
return nvme_user_cmd64(ns->ctrl, ns, argp, false);
return nvme_user_cmd64(ns->ctrl, ns, argp, false, mode);
case NVME_IOCTL_IO64_CMD_VEC:
return nvme_user_cmd64(ns->ctrl, ns, argp, true);
return nvme_user_cmd64(ns->ctrl, ns, argp, true, mode);
default:
return -ENOTTY;
}
}
static int __nvme_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *arg)
static int __nvme_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *arg,
fmode_t mode)
{
if (is_ctrl_ioctl(cmd))
return nvme_ctrl_ioctl(ns->ctrl, cmd, arg);
return nvme_ns_ioctl(ns, cmd, arg);
return nvme_ctrl_ioctl(ns->ctrl, cmd, arg, mode);
return nvme_ns_ioctl(ns, cmd, arg, mode);
}
int nvme_ioctl(struct block_device *bdev, fmode_t mode,
......@@ -640,7 +686,7 @@ int nvme_ioctl(struct block_device *bdev, fmode_t mode,
{
struct nvme_ns *ns = bdev->bd_disk->private_data;
return __nvme_ioctl(ns, cmd, (void __user *)arg);
return __nvme_ioctl(ns, cmd, (void __user *)arg, mode);
}
long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
......@@ -648,7 +694,7 @@ long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct nvme_ns *ns =
container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
return __nvme_ioctl(ns, cmd, (void __user *)arg);
return __nvme_ioctl(ns, cmd, (void __user *)arg, file->f_mode);
}
static int nvme_uring_cmd_checks(unsigned int issue_flags)
......@@ -716,7 +762,8 @@ int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
}
#ifdef CONFIG_NVME_MULTIPATH
static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
void __user *argp, struct nvme_ns_head *head, int srcu_idx)
void __user *argp, struct nvme_ns_head *head, int srcu_idx,
fmode_t mode)
__releases(&head->srcu)
{
struct nvme_ctrl *ctrl = ns->ctrl;
......@@ -724,7 +771,7 @@ static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
nvme_get_ctrl(ns->ctrl);
srcu_read_unlock(&head->srcu, srcu_idx);
ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp);
ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp, mode);
nvme_put_ctrl(ctrl);
return ret;
......@@ -749,9 +796,10 @@ int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
* deadlock when deleting namespaces using the passthrough interface.
*/
if (is_ctrl_ioctl(cmd))
return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
mode);
ret = nvme_ns_ioctl(ns, cmd, argp);
ret = nvme_ns_ioctl(ns, cmd, argp, mode);
out_unlock:
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
......@@ -773,9 +821,10 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
goto out_unlock;
if (is_ctrl_ioctl(cmd))
return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
file->f_mode);
ret = nvme_ns_ioctl(ns, cmd, argp);
ret = nvme_ns_ioctl(ns, cmd, argp, file->f_mode);
out_unlock:
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
......@@ -849,7 +898,8 @@ int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
return ret;
}
static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
fmode_t mode)
{
struct nvme_ns *ns;
int ret;
......@@ -873,7 +923,7 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
kref_get(&ns->kref);
up_read(&ctrl->namespaces_rwsem);
ret = nvme_user_cmd(ctrl, ns, argp);
ret = nvme_user_cmd(ctrl, ns, argp, mode);
nvme_put_ns(ns);
return ret;
......@@ -890,11 +940,11 @@ long nvme_dev_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case NVME_IOCTL_ADMIN_CMD:
return nvme_user_cmd(ctrl, NULL, argp);
return nvme_user_cmd(ctrl, NULL, argp, file->f_mode);
case NVME_IOCTL_ADMIN64_CMD:
return nvme_user_cmd64(ctrl, NULL, argp, false);
return nvme_user_cmd64(ctrl, NULL, argp, false, file->f_mode);
case NVME_IOCTL_IO_CMD:
return nvme_dev_user_cmd(ctrl, argp);
return nvme_dev_user_cmd(ctrl, argp, file->f_mode);
case NVME_IOCTL_RESET:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
......
......@@ -337,8 +337,8 @@ struct nvme_ctrl {
#ifdef CONFIG_NVME_AUTH
struct work_struct dhchap_auth_work;
struct list_head dhchap_auth_list;
struct mutex dhchap_auth_mutex;
struct nvme_dhchap_queue_context *dhchap_ctxs;
struct nvme_dhchap_key *host_key;
struct nvme_dhchap_key *ctrl_key;
u16 transaction;
......@@ -455,6 +455,7 @@ static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
enum nvme_ns_features {
NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
NVME_NS_DEAC, /* DEAC bit in Write Zeores supported */
};
struct nvme_ns {
......@@ -507,6 +508,7 @@ struct nvme_ctrl_ops {
unsigned int flags;
#define NVME_F_FABRICS (1 << 0)
#define NVME_F_METADATA_SUPPORTED (1 << 1)
const struct attribute_group **dev_attr_groups;
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
......@@ -735,7 +737,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
void nvme_start_ctrl(struct nvme_ctrl *ctrl);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended);
int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops, unsigned int flags,
unsigned int cmd_size);
......@@ -747,16 +749,13 @@ void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
bool send);
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
volatile union nvme_result *res);
void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl);
void nvme_stop_admin_queue(struct nvme_ctrl *ctrl);
void nvme_start_admin_queue(struct nvme_ctrl *ctrl);
void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl);
void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl);
void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl);
void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl);
void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl);
void nvme_sync_queues(struct nvme_ctrl *ctrl);
void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
......@@ -856,6 +855,7 @@ int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
extern const struct attribute_group *nvme_ns_id_attr_groups[];
extern const struct pr_ops nvme_pr_ops;
extern const struct block_device_operations nvme_ns_head_ops;
extern const struct attribute_group nvme_dev_attrs_group;
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
#ifdef CONFIG_NVME_MULTIPATH
......@@ -1018,14 +1018,25 @@ static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
}
#ifdef CONFIG_NVME_AUTH
void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
int __init nvme_init_auth(void);
void __exit nvme_exit_auth(void);
int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
void nvme_auth_stop(struct nvme_ctrl *ctrl);
int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid);
void nvme_auth_reset(struct nvme_ctrl *ctrl);
void nvme_auth_free(struct nvme_ctrl *ctrl);
#else
static inline void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) {};
static inline int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
{
return 0;
}
static inline int __init nvme_init_auth(void)
{
return 0;
}
static inline void __exit nvme_exit_auth(void)
{
}
static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {};
static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
{
......
This diff is collapsed.
......@@ -869,16 +869,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
else
ctrl->ctrl.max_integrity_segments = 0;
nvme_start_admin_queue(&ctrl->ctrl);
nvme_unquiesce_admin_queue(&ctrl->ctrl);
error = nvme_init_ctrl_finish(&ctrl->ctrl);
error = nvme_init_ctrl_finish(&ctrl->ctrl, false);
if (error)
goto out_quiesce_queue;
return 0;
out_quiesce_queue:
nvme_stop_admin_queue(&ctrl->ctrl);
nvme_quiesce_admin_queue(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q);
out_stop_queue:
nvme_rdma_stop_queue(&ctrl->queues[0]);
......@@ -922,7 +922,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
goto out_cleanup_tagset;
if (!new) {
nvme_start_queues(&ctrl->ctrl);
nvme_unquiesce_io_queues(&ctrl->ctrl);
if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) {
/*
* If we timed out waiting for freeze we are likely to
......@@ -949,7 +949,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
return 0;
out_wait_freeze_timed_out:
nvme_stop_queues(&ctrl->ctrl);
nvme_quiesce_io_queues(&ctrl->ctrl);
nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
out_cleanup_tagset:
......@@ -964,12 +964,12 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
nvme_stop_admin_queue(&ctrl->ctrl);
nvme_quiesce_admin_queue(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_cancel_admin_tagset(&ctrl->ctrl);
if (remove) {
nvme_start_admin_queue(&ctrl->ctrl);
nvme_unquiesce_admin_queue(&ctrl->ctrl);
nvme_remove_admin_tag_set(&ctrl->ctrl);
}
nvme_rdma_destroy_admin_queue(ctrl);
......@@ -980,12 +980,12 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
{
if (ctrl->ctrl.queue_count > 1) {
nvme_start_freeze(&ctrl->ctrl);
nvme_stop_queues(&ctrl->ctrl);
nvme_quiesce_io_queues(&ctrl->ctrl);
nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
nvme_cancel_tagset(&ctrl->ctrl);
if (remove) {
nvme_start_queues(&ctrl->ctrl);
nvme_unquiesce_io_queues(&ctrl->ctrl);
nvme_remove_io_tag_set(&ctrl->ctrl);
}
nvme_rdma_free_io_queues(ctrl);
......@@ -1106,7 +1106,7 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
destroy_io:
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
nvme_quiesce_io_queues(&ctrl->ctrl);
nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
nvme_cancel_tagset(&ctrl->ctrl);
......@@ -1115,7 +1115,7 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
nvme_rdma_free_io_queues(ctrl);
}
destroy_admin:
nvme_stop_admin_queue(&ctrl->ctrl);
nvme_quiesce_admin_queue(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_cancel_admin_tagset(&ctrl->ctrl);
......@@ -1153,13 +1153,13 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
struct nvme_rdma_ctrl *ctrl = container_of(work,
struct nvme_rdma_ctrl, err_work);
nvme_auth_stop(&ctrl->ctrl);
nvme_stop_keep_alive(&ctrl->ctrl);
flush_work(&ctrl->ctrl.async_event_work);
nvme_rdma_teardown_io_queues(ctrl, false);
nvme_start_queues(&ctrl->ctrl);
nvme_unquiesce_io_queues(&ctrl->ctrl);
nvme_rdma_teardown_admin_queue(ctrl, false);
nvme_start_admin_queue(&ctrl->ctrl);
nvme_unquiesce_admin_queue(&ctrl->ctrl);
nvme_auth_stop(&ctrl->ctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure is ok if we started ctrl delete */
......@@ -2207,7 +2207,7 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
{
nvme_rdma_teardown_io_queues(ctrl, shutdown);
nvme_stop_admin_queue(&ctrl->ctrl);
nvme_quiesce_admin_queue(&ctrl->ctrl);
if (shutdown)
nvme_shutdown_ctrl(&ctrl->ctrl);
else
......
......@@ -1875,7 +1875,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
goto out_cleanup_connect_q;
if (!new) {
nvme_start_queues(ctrl);
nvme_unquiesce_io_queues(ctrl);
if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
/*
* If we timed out waiting for freeze we are likely to
......@@ -1902,7 +1902,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
return 0;
out_wait_freeze_timed_out:
nvme_stop_queues(ctrl);
nvme_quiesce_io_queues(ctrl);
nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
out_cleanup_connect_q:
......@@ -1947,16 +1947,16 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
if (error)
goto out_stop_queue;
nvme_start_admin_queue(ctrl);
nvme_unquiesce_admin_queue(ctrl);
error = nvme_init_ctrl_finish(ctrl);
error = nvme_init_ctrl_finish(ctrl, false);
if (error)
goto out_quiesce_queue;
return 0;
out_quiesce_queue:
nvme_stop_admin_queue(ctrl);
nvme_quiesce_admin_queue(ctrl);
blk_sync_queue(ctrl->admin_q);
out_stop_queue:
nvme_tcp_stop_queue(ctrl, 0);
......@@ -1972,12 +1972,12 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
bool remove)
{
nvme_stop_admin_queue(ctrl);
nvme_quiesce_admin_queue(ctrl);
blk_sync_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0);
nvme_cancel_admin_tagset(ctrl);
if (remove)
nvme_start_admin_queue(ctrl);
nvme_unquiesce_admin_queue(ctrl);
nvme_tcp_destroy_admin_queue(ctrl, remove);
}
......@@ -1986,14 +1986,14 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
{
if (ctrl->queue_count <= 1)
return;
nvme_stop_admin_queue(ctrl);
nvme_quiesce_admin_queue(ctrl);
nvme_start_freeze(ctrl);
nvme_stop_queues(ctrl);
nvme_quiesce_io_queues(ctrl);
nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
nvme_cancel_tagset(ctrl);
if (remove)
nvme_start_queues(ctrl);
nvme_unquiesce_io_queues(ctrl);
nvme_tcp_destroy_io_queues(ctrl, remove);
}
......@@ -2074,14 +2074,14 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
destroy_io:
if (ctrl->queue_count > 1) {
nvme_stop_queues(ctrl);
nvme_quiesce_io_queues(ctrl);
nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
nvme_cancel_tagset(ctrl);
nvme_tcp_destroy_io_queues(ctrl, new);
}
destroy_admin:
nvme_stop_admin_queue(ctrl);
nvme_quiesce_admin_queue(ctrl);
blk_sync_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0);
nvme_cancel_admin_tagset(ctrl);
......@@ -2119,14 +2119,14 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
struct nvme_tcp_ctrl, err_work);
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
nvme_auth_stop(ctrl);
nvme_stop_keep_alive(ctrl);
flush_work(&ctrl->async_event_work);
nvme_tcp_teardown_io_queues(ctrl, false);
/* unquiesce to fail fast pending requests */
nvme_start_queues(ctrl);
nvme_unquiesce_io_queues(ctrl);
nvme_tcp_teardown_admin_queue(ctrl, false);
nvme_start_admin_queue(ctrl);
nvme_unquiesce_admin_queue(ctrl);
nvme_auth_stop(ctrl);
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure is ok if we started ctrl delete */
......@@ -2141,7 +2141,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
{
nvme_tcp_teardown_io_queues(ctrl, shutdown);
nvme_stop_admin_queue(ctrl);
nvme_quiesce_admin_queue(ctrl);
if (shutdown)
nvme_shutdown_ctrl(ctrl);
else
......
......@@ -370,7 +370,9 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
strlen(subsys->model_number), ' ');
memcpy_and_pad(id->fr, sizeof(id->fr),
UTS_RELEASE, strlen(UTS_RELEASE), ' ');
subsys->firmware_rev, strlen(subsys->firmware_rev), ' ');
put_unaligned_le24(subsys->ieee_oui, id->ieee);
id->rab = 6;
......@@ -379,11 +381,6 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
else
id->cntrltype = NVME_CTRL_IO;
/*
* XXX: figure out how we can assign a IEEE OUI, but until then
* the safest is to leave it as zeroes.
*/
/* we support multiple ports, multiples hosts and ANA: */
id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
NVME_CTRL_CMIC_ANA;
......
......@@ -45,9 +45,11 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
if (!dhchap_secret)
return -ENOMEM;
if (set_ctrl) {
kfree(host->dhchap_ctrl_secret);
host->dhchap_ctrl_secret = strim(dhchap_secret);
host->dhchap_ctrl_key_hash = key_hash;
} else {
kfree(host->dhchap_secret);
host->dhchap_secret = strim(dhchap_secret);
host->dhchap_key_hash = key_hash;
}
......
......@@ -1259,6 +1259,116 @@ static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
}
CONFIGFS_ATTR(nvmet_subsys_, attr_model);
static ssize_t nvmet_subsys_attr_ieee_oui_show(struct config_item *item,
char *page)
{
struct nvmet_subsys *subsys = to_subsys(item);
return sysfs_emit(page, "0x%06x\n", subsys->ieee_oui);
}
static ssize_t nvmet_subsys_attr_ieee_oui_store_locked(struct nvmet_subsys *subsys,
const char *page, size_t count)
{
uint32_t val = 0;
int ret;
if (subsys->subsys_discovered) {
pr_err("Can't set IEEE OUI. 0x%06x is already assigned\n",
subsys->ieee_oui);
return -EINVAL;
}
ret = kstrtou32(page, 0, &val);
if (ret < 0)
return ret;
if (val >= 0x1000000)
return -EINVAL;
subsys->ieee_oui = val;
return count;
}
static ssize_t nvmet_subsys_attr_ieee_oui_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_subsys *subsys = to_subsys(item);
ssize_t ret;
down_write(&nvmet_config_sem);
mutex_lock(&subsys->lock);
ret = nvmet_subsys_attr_ieee_oui_store_locked(subsys, page, count);
mutex_unlock(&subsys->lock);
up_write(&nvmet_config_sem);
return ret;
}
CONFIGFS_ATTR(nvmet_subsys_, attr_ieee_oui);
static ssize_t nvmet_subsys_attr_firmware_show(struct config_item *item,
char *page)
{
struct nvmet_subsys *subsys = to_subsys(item);
return sysfs_emit(page, "%s\n", subsys->firmware_rev);
}
static ssize_t nvmet_subsys_attr_firmware_store_locked(struct nvmet_subsys *subsys,
const char *page, size_t count)
{
int pos = 0, len;
char *val;
if (subsys->subsys_discovered) {
pr_err("Can't set firmware revision. %s is already assigned\n",
subsys->firmware_rev);
return -EINVAL;
}
len = strcspn(page, "\n");
if (!len)
return -EINVAL;
if (len > NVMET_FR_MAX_SIZE) {
pr_err("Firmware revision size can not exceed %d Bytes\n",
NVMET_FR_MAX_SIZE);
return -EINVAL;
}
for (pos = 0; pos < len; pos++) {
if (!nvmet_is_ascii(page[pos]))
return -EINVAL;
}
val = kmemdup_nul(page, len, GFP_KERNEL);
if (!val)
return -ENOMEM;
kfree(subsys->firmware_rev);
subsys->firmware_rev = val;
return count;
}
static ssize_t nvmet_subsys_attr_firmware_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_subsys *subsys = to_subsys(item);
ssize_t ret;
down_write(&nvmet_config_sem);
mutex_lock(&subsys->lock);
ret = nvmet_subsys_attr_firmware_store_locked(subsys, page, count);
mutex_unlock(&subsys->lock);
up_write(&nvmet_config_sem);
return ret;
}
CONFIGFS_ATTR(nvmet_subsys_, attr_firmware);
#ifdef CONFIG_BLK_DEV_INTEGRITY
static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
char *page)
......@@ -1290,6 +1400,8 @@ static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item,
static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
const char *page, size_t cnt)
{
struct nvmet_subsys *subsys = to_subsys(item);
struct nvmet_ctrl *ctrl;
u16 qid_max;
if (sscanf(page, "%hu\n", &qid_max) != 1)
......@@ -1299,8 +1411,13 @@ static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
return -EINVAL;
down_write(&nvmet_config_sem);
to_subsys(item)->max_qid = qid_max;
subsys->max_qid = qid_max;
/* Force reconnect */
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
ctrl->ops->delete_ctrl(ctrl);
up_write(&nvmet_config_sem);
return cnt;
}
CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max);
......@@ -1313,6 +1430,8 @@ static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_cntlid_max,
&nvmet_subsys_attr_attr_model,
&nvmet_subsys_attr_attr_qid_max,
&nvmet_subsys_attr_attr_ieee_oui,
&nvmet_subsys_attr_attr_firmware,
#ifdef CONFIG_BLK_DEV_INTEGRITY
&nvmet_subsys_attr_attr_pi_enable,
#endif
......
......@@ -10,11 +10,14 @@
#include <linux/pci-p2pdma.h>
#include <linux/scatterlist.h>
#include <generated/utsrelease.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
#include "nvmet.h"
struct kmem_cache *nvmet_bvec_cache;
struct workqueue_struct *buffered_io_wq;
struct workqueue_struct *zbd_wq;
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
......@@ -695,11 +698,10 @@ static void nvmet_update_sq_head(struct nvmet_req *req)
if (req->sq->size) {
u32 old_sqhd, new_sqhd;
old_sqhd = READ_ONCE(req->sq->sqhd);
do {
old_sqhd = req->sq->sqhd;
new_sqhd = (old_sqhd + 1) % req->sq->size;
} while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
old_sqhd);
} while (!try_cmpxchg(&req->sq->sqhd, &old_sqhd, new_sqhd));
}
req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
}
......@@ -1561,6 +1563,14 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
goto free_subsys;
}
subsys->ieee_oui = 0;
subsys->firmware_rev = kstrndup(UTS_RELEASE, NVMET_FR_MAX_SIZE, GFP_KERNEL);
if (!subsys->firmware_rev) {
ret = -ENOMEM;
goto free_mn;
}
switch (type) {
case NVME_NQN_NVME:
subsys->max_qid = NVMET_NR_QUEUES;
......@@ -1572,14 +1582,14 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
default:
pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
ret = -EINVAL;
goto free_mn;
goto free_fr;
}
subsys->type = type;
subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
GFP_KERNEL);
if (!subsys->subsysnqn) {
ret = -ENOMEM;
goto free_mn;
goto free_fr;
}
subsys->cntlid_min = NVME_CNTLID_MIN;
subsys->cntlid_max = NVME_CNTLID_MAX;
......@@ -1592,6 +1602,8 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
return subsys;
free_fr:
kfree(subsys->firmware_rev);
free_mn:
kfree(subsys->model_number);
free_subsys:
......@@ -1611,6 +1623,7 @@ static void nvmet_subsys_free(struct kref *ref)
kfree(subsys->subsysnqn);
kfree(subsys->model_number);
kfree(subsys->firmware_rev);
kfree(subsys);
}
......@@ -1631,26 +1644,28 @@ void nvmet_subsys_put(struct nvmet_subsys *subsys)
static int __init nvmet_init(void)
{
int error;
int error = -ENOMEM;
nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
nvmet_bvec_cache = kmem_cache_create("nvmet-bvec",
NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!nvmet_bvec_cache)
return -ENOMEM;
zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
if (!zbd_wq)
return -ENOMEM;
goto out_destroy_bvec_cache;
buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
WQ_MEM_RECLAIM, 0);
if (!buffered_io_wq) {
error = -ENOMEM;
if (!buffered_io_wq)
goto out_free_zbd_work_queue;
}
nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
if (!nvmet_wq) {
error = -ENOMEM;
if (!nvmet_wq)
goto out_free_buffered_work_queue;
}
error = nvmet_init_discovery();
if (error)
......@@ -1669,6 +1684,8 @@ static int __init nvmet_init(void)
destroy_workqueue(buffered_io_wq);
out_free_zbd_work_queue:
destroy_workqueue(zbd_wq);
out_destroy_bvec_cache:
kmem_cache_destroy(nvmet_bvec_cache);
return error;
}
......@@ -1680,6 +1697,7 @@ static void __exit nvmet_exit(void)
destroy_workqueue(nvmet_wq);
destroy_workqueue(buffered_io_wq);
destroy_workqueue(zbd_wq);
kmem_cache_destroy(nvmet_bvec_cache);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
......
......@@ -11,7 +11,6 @@
#include <linux/fs.h>
#include "nvmet.h"
#define NVMET_MAX_MPOOL_BVEC 16
#define NVMET_MIN_MPOOL_OBJ 16
void nvmet_file_ns_revalidate(struct nvmet_ns *ns)
......@@ -26,8 +25,6 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns)
flush_workqueue(buffered_io_wq);
mempool_destroy(ns->bvec_pool);
ns->bvec_pool = NULL;
kmem_cache_destroy(ns->bvec_cache);
ns->bvec_cache = NULL;
fput(ns->file);
ns->file = NULL;
}
......@@ -59,16 +56,8 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
ns->blksize_shift = min_t(u8,
file_inode(ns->file)->i_blkbits, 12);
ns->bvec_cache = kmem_cache_create("nvmet-bvec",
NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!ns->bvec_cache) {
ret = -ENOMEM;
goto err;
}
ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab,
mempool_free_slab, ns->bvec_cache);
mempool_free_slab, nvmet_bvec_cache);
if (!ns->bvec_pool) {
ret = -ENOMEM;
......@@ -77,9 +66,10 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
return ret;
err:
fput(ns->file);
ns->file = NULL;
ns->size = 0;
ns->blksize_shift = 0;
nvmet_file_ns_disable(ns);
return ret;
}
......
......@@ -375,9 +375,9 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
ctrl->ctrl.max_hw_sectors =
(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
nvme_start_admin_queue(&ctrl->ctrl);
nvme_unquiesce_admin_queue(&ctrl->ctrl);
error = nvme_init_ctrl_finish(&ctrl->ctrl);
error = nvme_init_ctrl_finish(&ctrl->ctrl, false);
if (error)
goto out_cleanup_tagset;
......@@ -394,12 +394,12 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
{
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
nvme_quiesce_io_queues(&ctrl->ctrl);
nvme_cancel_tagset(&ctrl->ctrl);
nvme_loop_destroy_io_queues(ctrl);
}
nvme_stop_admin_queue(&ctrl->ctrl);
nvme_quiesce_admin_queue(&ctrl->ctrl);
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
nvme_shutdown_ctrl(&ctrl->ctrl);
......
......@@ -29,6 +29,7 @@
#define NVMET_DEFAULT_CTRL_MODEL "Linux"
#define NVMET_MN_MAX_SIZE 40
#define NVMET_SN_MAX_SIZE 20
#define NVMET_FR_MAX_SIZE 8
/*
* Supported optional AENs:
......@@ -77,7 +78,6 @@ struct nvmet_ns {
struct completion disable_done;
mempool_t *bvec_pool;
struct kmem_cache *bvec_cache;
int use_p2pmem;
struct pci_dev *p2p_dev;
......@@ -264,6 +264,8 @@ struct nvmet_subsys {
struct config_group allowed_hosts_group;
char *model_number;
u32 ieee_oui;
char *firmware_rev;
#ifdef CONFIG_NVME_TARGET_PASSTHRU
struct nvme_ctrl *passthru_ctrl;
......@@ -393,6 +395,8 @@ struct nvmet_req {
u64 error_slba;
};
#define NVMET_MAX_MPOOL_BVEC 16
extern struct kmem_cache *nvmet_bvec_cache;
extern struct workqueue_struct *buffered_io_wq;
extern struct workqueue_struct *zbd_wq;
extern struct workqueue_struct *nvmet_wq;
......
......@@ -797,6 +797,7 @@ enum nvme_opcode {
nvme_cmd_zone_mgmt_send = 0x79,
nvme_cmd_zone_mgmt_recv = 0x7a,
nvme_cmd_zone_append = 0x7d,
nvme_cmd_vendor_start = 0x80,
};
#define nvme_opcode_name(opcode) { opcode, #opcode }
......@@ -963,6 +964,7 @@ enum {
NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
NVME_RW_PRINFO_PRACT = 1 << 13,
NVME_RW_DTYPE_STREAMS = 1 << 4,
NVME_WZ_DEAC = 1 << 9,
};
struct nvme_dsm_cmd {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment