Commit 210b1f65 authored by Keith Busch's avatar Keith Busch

nvme-pci: do not directly handle subsys reset fallout

Scheduling reset_work after a nvme subsystem reset is expected to fail
on pcie, but this also prevents potential handling the platform's pcie
services may provide that might successfully recovering the link without
re-enumeration. Such examples include AER, DPC, and power's EEH.

Provide a pci specific operation that safely initiates a subsystem
reset, and instead of scheduling reset work, read back the status
register to trigger a pcie read error.

Since this only affects pci, the other fabrics drivers subscribe to a
generic nvmf subsystem reset that is exactly the same as before. The
loop fabric doesn't use it because nvmet doesn't support setting that
property anyway.

And since we're using the magic NSSR value in two places now, provide a
symbolic define for it.
Reported-by: default avatarNilay Shroff <nilay@linux.ibm.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarKeith Busch <kbusch@kernel.org>
parent bf86e7d9
......@@ -280,6 +280,21 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
}
EXPORT_SYMBOL_GPL(nvmf_reg_write32);
int nvmf_subsystem_reset(struct nvme_ctrl *ctrl)
{
int ret;
if (!nvme_wait_reset(ctrl))
return -EBUSY;
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, NVME_SUBSYS_RESET);
if (ret)
return ret;
return nvme_try_sched_reset(ctrl);
}
EXPORT_SYMBOL_GPL(nvmf_subsystem_reset);
/**
* nvmf_log_connect_error() - Error-parsing-diagnostic print out function for
* connect() errors.
......
......@@ -217,6 +217,7 @@ static inline unsigned int nvmf_nr_io_queues(struct nvmf_ctrl_options *opts)
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
int nvmf_subsystem_reset(struct nvme_ctrl *ctrl);
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl);
int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid);
int nvmf_register_transport(struct nvmf_transport_ops *ops);
......
......@@ -3382,6 +3382,7 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32,
.subsystem_reset = nvmf_subsystem_reset,
.free_ctrl = nvme_fc_free_ctrl,
.submit_async_event = nvme_fc_submit_async_event,
.delete_ctrl = nvme_fc_delete_ctrl,
......
......@@ -551,6 +551,7 @@ struct nvme_ctrl_ops {
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
void (*free_ctrl)(struct nvme_ctrl *ctrl);
void (*submit_async_event)(struct nvme_ctrl *ctrl);
int (*subsystem_reset)(struct nvme_ctrl *ctrl);
void (*delete_ctrl)(struct nvme_ctrl *ctrl);
void (*stop_ctrl)(struct nvme_ctrl *ctrl);
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
......@@ -649,18 +650,9 @@ int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
{
int ret;
if (!ctrl->subsystem)
if (!ctrl->subsystem || !ctrl->ops->subsystem_reset)
return -ENOTTY;
if (!nvme_wait_reset(ctrl))
return -EBUSY;
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
if (ret)
return ret;
return nvme_try_sched_reset(ctrl);
return ctrl->ops->subsystem_reset(ctrl);
}
/*
......
......@@ -1143,6 +1143,41 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
spin_unlock(&nvmeq->sq_lock);
}
static int nvme_pci_subsystem_reset(struct nvme_ctrl *ctrl)
{
struct nvme_dev *dev = to_nvme_dev(ctrl);
int ret = 0;
/*
* Taking the shutdown_lock ensures the BAR mapping is not being
* altered by reset_work. Holding this lock before the RESETTING state
* change, if successful, also ensures nvme_remove won't be able to
* proceed to iounmap until we're done.
*/
mutex_lock(&dev->shutdown_lock);
if (!dev->bar_mapped_size) {
ret = -ENODEV;
goto unlock;
}
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
ret = -EBUSY;
goto unlock;
}
writel(NVME_SUBSYS_RESET, dev->bar + NVME_REG_NSSR);
nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE);
/*
* Read controller status to flush the previous write and trigger a
* pcie read error.
*/
readl(dev->bar + NVME_REG_CSTS);
unlock:
mutex_unlock(&dev->shutdown_lock);
return ret;
}
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
{
struct nvme_command c = { };
......@@ -2859,6 +2894,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.reg_read64 = nvme_pci_reg_read64,
.free_ctrl = nvme_pci_free_ctrl,
.submit_async_event = nvme_pci_submit_async_event,
.subsystem_reset = nvme_pci_subsystem_reset,
.get_address = nvme_pci_get_address,
.print_device_info = nvme_pci_print_device_info,
.supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma,
......
......@@ -2201,6 +2201,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
.reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32,
.subsystem_reset = nvmf_subsystem_reset,
.free_ctrl = nvme_rdma_free_ctrl,
.submit_async_event = nvme_rdma_submit_async_event,
.delete_ctrl = nvme_rdma_delete_ctrl,
......
......@@ -2662,6 +2662,7 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
.reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32,
.subsystem_reset = nvmf_subsystem_reset,
.free_ctrl = nvme_tcp_free_ctrl,
.submit_async_event = nvme_tcp_submit_async_event,
.delete_ctrl = nvme_tcp_delete_ctrl,
......
......@@ -25,6 +25,9 @@
#define NVME_NSID_ALL 0xffffffff
/* Special NSSR value, 'NVMe' */
#define NVME_SUBSYS_RESET 0x4E564D65
enum nvme_subsys_type {
/* Referral to another discovery type target subsystem */
NVME_NQN_DISC = 1,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment