Commit d11de63f authored by Yufen Yu's avatar Yufen Yu Committed by Jens Axboe

nvme-loop: init nvmet_ctrl fatal_err_work when allocate

After commit 4d43d395 (workqueue: Try to catch flush_work() without
INIT_WORK()), it can cause warning when delete nvme-loop device, trace
like:

[   76.601272] Call Trace:
[   76.601646]  ? del_timer+0x72/0xa0
[   76.602156]  __cancel_work_timer+0x1ae/0x270
[   76.602791]  cancel_work_sync+0x14/0x20
[   76.603407]  nvmet_ctrl_free+0x1b7/0x2f0 [nvmet]
[   76.604091]  ? free_percpu+0x168/0x300
[   76.604652]  nvmet_sq_destroy+0x106/0x240 [nvmet]
[   76.605346]  nvme_loop_destroy_admin_queue+0x30/0x60 [nvme_loop]
[   76.606220]  nvme_loop_shutdown_ctrl+0xc3/0xf0 [nvme_loop]
[   76.607026]  nvme_loop_delete_ctrl_host+0x19/0x30 [nvme_loop]
[   76.607871]  nvme_do_delete_ctrl+0x75/0xb0
[   76.608477]  nvme_sysfs_delete+0x7d/0xc0
[   76.609057]  dev_attr_store+0x24/0x40
[   76.609603]  sysfs_kf_write+0x4c/0x60
[   76.610144]  kernfs_fop_write+0x19a/0x260
[   76.610742]  __vfs_write+0x1c/0x60
[   76.611246]  vfs_write+0xfa/0x280
[   76.611739]  ksys_write+0x6e/0x120
[   76.612238]  __x64_sys_write+0x1e/0x30
[   76.612787]  do_syscall_64+0xbf/0x3a0
[   76.613329]  entry_SYSCALL_64_after_hwframe+0x44/0xa9

We fix it by moving fatal_err_work init to nvmet_alloc_ctrl(), which may
more reasonable.
Signed-off-by: default avatarYufen Yu <yuyufen@huawei.com>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarBart Van Assche <bvanassche@acm.org>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 01fc08ff
...@@ -1163,6 +1163,15 @@ static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl) ...@@ -1163,6 +1163,15 @@ static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
put_device(ctrl->p2p_client); put_device(ctrl->p2p_client);
} }
static void nvmet_fatal_error_handler(struct work_struct *work)
{
struct nvmet_ctrl *ctrl =
container_of(work, struct nvmet_ctrl, fatal_err_work);
pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
ctrl->ops->delete_ctrl(ctrl);
}
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp) struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
{ {
...@@ -1205,6 +1214,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, ...@@ -1205,6 +1214,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events); INIT_LIST_HEAD(&ctrl->async_events);
INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL); INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
...@@ -1308,21 +1318,11 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl) ...@@ -1308,21 +1318,11 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
kref_put(&ctrl->ref, nvmet_ctrl_free); kref_put(&ctrl->ref, nvmet_ctrl_free);
} }
static void nvmet_fatal_error_handler(struct work_struct *work)
{
struct nvmet_ctrl *ctrl =
container_of(work, struct nvmet_ctrl, fatal_err_work);
pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
ctrl->ops->delete_ctrl(ctrl);
}
void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
{ {
mutex_lock(&ctrl->lock); mutex_lock(&ctrl->lock);
if (!(ctrl->csts & NVME_CSTS_CFS)) { if (!(ctrl->csts & NVME_CSTS_CFS)) {
ctrl->csts |= NVME_CSTS_CFS; ctrl->csts |= NVME_CSTS_CFS;
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
schedule_work(&ctrl->fatal_err_work); schedule_work(&ctrl->fatal_err_work);
} }
mutex_unlock(&ctrl->lock); mutex_unlock(&ctrl->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment