Commit 5396fdac authored by Hannes Reinecke's avatar Hannes Reinecke Committed by Christoph Hellwig

nvme: fix refcounting imbalance when all paths are down

When the last path to a ns_head drops the current code
removes the ns_head from the subsystem list, but will only
delete the disk itself if the last reference to the ns_head
drops. This is causing an refcounting imbalance eg when
applications have a reference to the disk, as then they'll
never get notified that the disk is in fact dead.
This patch moves the call 'del_gendisk' into nvme_mpath_check_last_path(),
ensuring that the disk can be properly removed and applications get the
appropriate notifications.
Signed-off-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarKeith Busch <kbusch@kernel.org>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 7764656b
...@@ -3807,6 +3807,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid, ...@@ -3807,6 +3807,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
static void nvme_ns_remove(struct nvme_ns *ns) static void nvme_ns_remove(struct nvme_ns *ns)
{ {
bool last_path = false;
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return; return;
...@@ -3815,8 +3817,6 @@ static void nvme_ns_remove(struct nvme_ns *ns) ...@@ -3815,8 +3817,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
mutex_lock(&ns->ctrl->subsys->lock); mutex_lock(&ns->ctrl->subsys->lock);
list_del_rcu(&ns->siblings); list_del_rcu(&ns->siblings);
if (list_empty(&ns->head->list))
list_del_init(&ns->head->entry);
mutex_unlock(&ns->ctrl->subsys->lock); mutex_unlock(&ns->ctrl->subsys->lock);
synchronize_rcu(); /* guarantee not available in head->list */ synchronize_rcu(); /* guarantee not available in head->list */
...@@ -3836,7 +3836,15 @@ static void nvme_ns_remove(struct nvme_ns *ns) ...@@ -3836,7 +3836,15 @@ static void nvme_ns_remove(struct nvme_ns *ns)
list_del_init(&ns->list); list_del_init(&ns->list);
up_write(&ns->ctrl->namespaces_rwsem); up_write(&ns->ctrl->namespaces_rwsem);
nvme_mpath_check_last_path(ns); /* Synchronize with nvme_init_ns_head() */
mutex_lock(&ns->head->subsys->lock);
if (list_empty(&ns->head->list)) {
list_del_init(&ns->head->entry);
last_path = true;
}
mutex_unlock(&ns->head->subsys->lock);
if (last_path)
nvme_mpath_shutdown_disk(ns->head);
nvme_put_ns(ns); nvme_put_ns(ns);
} }
......
...@@ -760,14 +760,21 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id) ...@@ -760,14 +760,21 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
#endif #endif
} }
void nvme_mpath_remove_disk(struct nvme_ns_head *head) void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
{ {
if (!head->disk) if (!head->disk)
return; return;
kblockd_schedule_work(&head->requeue_work);
if (head->disk->flags & GENHD_FL_UP) { if (head->disk->flags & GENHD_FL_UP) {
nvme_cdev_del(&head->cdev, &head->cdev_device); nvme_cdev_del(&head->cdev, &head->cdev_device);
del_gendisk(head->disk); del_gendisk(head->disk);
} }
}
void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
if (!head->disk)
return;
blk_set_queue_dying(head->disk->queue); blk_set_queue_dying(head->disk->queue);
/* make sure all pending bios are cleaned up */ /* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work); kblockd_schedule_work(&head->requeue_work);
......
...@@ -716,14 +716,7 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl); ...@@ -716,14 +716,7 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl); void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns); bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{
struct nvme_ns_head *head = ns->head;
if (head->disk && list_empty(&head->list))
kblockd_schedule_work(&head->requeue_work);
}
static inline void nvme_trace_bio_complete(struct request *req) static inline void nvme_trace_bio_complete(struct request *req)
{ {
...@@ -772,7 +765,7 @@ static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns) ...@@ -772,7 +765,7 @@ static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
{ {
} }
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
{ {
} }
static inline void nvme_trace_bio_complete(struct request *req) static inline void nvme_trace_bio_complete(struct request *req)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment