Commit 32f0c4af authored by Keith Busch's avatar Keith Busch Committed by Jens Axboe

nvme: Remove RCU namespace protection

We can't sleep with RCU read lock held, but we need to do potentially
blocking stuff to namespace queues when iterating the list. This patch
removes the RCU locking and holds a mutex instead.

To prevent deadlocks, this patch removes holding the mutex during
namespace scanning and removal. The unlocked namespace scanning is made
safe by holding a reference to the namespace being scanned.

List iteration that does IO has to be unlocked to allow error recovery.
The caller must ensure the list can not be manipulated during such an
event, so this patch adds a comment explaining this requirement to the
only function that iterates an unlocked list. All callers currently
meet this requirement, so no further changes required.

List iterations that do not do IO can safely use the lock since it couldn't
block recovery from missing forced IO completions.

Reported-by: Ming Lin <mlin at kernel.org>
[fixes 0bf77e9d nvme: switch to RCU freeing the namespace]
Signed-off-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 92d21ac7
...@@ -1394,19 +1394,22 @@ static int ns_cmp(void *priv, struct list_head *a, struct list_head *b) ...@@ -1394,19 +1394,22 @@ static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
return nsa->ns_id - nsb->ns_id; return nsa->ns_id - nsb->ns_id;
} }
static struct nvme_ns *nvme_find_ns(struct nvme_ctrl *ctrl, unsigned nsid) static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{ {
struct nvme_ns *ns; struct nvme_ns *ns, *ret = NULL;
lockdep_assert_held(&ctrl->namespaces_mutex);
mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry(ns, &ctrl->namespaces, list) { list_for_each_entry(ns, &ctrl->namespaces, list) {
if (ns->ns_id == nsid) if (ns->ns_id == nsid) {
return ns; kref_get(&ns->kref);
ret = ns;
break;
}
if (ns->ns_id > nsid) if (ns->ns_id > nsid)
break; break;
} }
return NULL; mutex_unlock(&ctrl->namespaces_mutex);
return ret;
} }
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
...@@ -1415,8 +1418,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) ...@@ -1415,8 +1418,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
struct gendisk *disk; struct gendisk *disk;
int node = dev_to_node(ctrl->dev); int node = dev_to_node(ctrl->dev);
lockdep_assert_held(&ctrl->namespaces_mutex);
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
if (!ns) if (!ns)
return; return;
...@@ -1457,7 +1458,10 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) ...@@ -1457,7 +1458,10 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (nvme_revalidate_disk(ns->disk)) if (nvme_revalidate_disk(ns->disk))
goto out_free_disk; goto out_free_disk;
list_add_tail_rcu(&ns->list, &ctrl->namespaces); mutex_lock(&ctrl->namespaces_mutex);
list_add_tail(&ns->list, &ctrl->namespaces);
mutex_unlock(&ctrl->namespaces_mutex);
kref_get(&ctrl->kref); kref_get(&ctrl->kref);
if (ns->type == NVME_NS_LIGHTNVM) if (ns->type == NVME_NS_LIGHTNVM)
return; return;
...@@ -1480,8 +1484,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) ...@@ -1480,8 +1484,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
static void nvme_ns_remove(struct nvme_ns *ns) static void nvme_ns_remove(struct nvme_ns *ns)
{ {
lockdep_assert_held(&ns->ctrl->namespaces_mutex);
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return; return;
...@@ -1494,8 +1496,11 @@ static void nvme_ns_remove(struct nvme_ns *ns) ...@@ -1494,8 +1496,11 @@ static void nvme_ns_remove(struct nvme_ns *ns)
blk_mq_abort_requeue_list(ns->queue); blk_mq_abort_requeue_list(ns->queue);
blk_cleanup_queue(ns->queue); blk_cleanup_queue(ns->queue);
} }
mutex_lock(&ns->ctrl->namespaces_mutex);
list_del_init(&ns->list); list_del_init(&ns->list);
synchronize_rcu(); mutex_unlock(&ns->ctrl->namespaces_mutex);
nvme_put_ns(ns); nvme_put_ns(ns);
} }
...@@ -1503,10 +1508,11 @@ static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid) ...@@ -1503,10 +1508,11 @@ static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{ {
struct nvme_ns *ns; struct nvme_ns *ns;
ns = nvme_find_ns(ctrl, nsid); ns = nvme_find_get_ns(ctrl, nsid);
if (ns) { if (ns) {
if (revalidate_disk(ns->disk)) if (revalidate_disk(ns->disk))
nvme_ns_remove(ns); nvme_ns_remove(ns);
nvme_put_ns(ns);
} else } else
nvme_alloc_ns(ctrl, nsid); nvme_alloc_ns(ctrl, nsid);
} }
...@@ -1535,9 +1541,11 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn) ...@@ -1535,9 +1541,11 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
nvme_validate_ns(ctrl, nsid); nvme_validate_ns(ctrl, nsid);
while (++prev < nsid) { while (++prev < nsid) {
ns = nvme_find_ns(ctrl, prev); ns = nvme_find_get_ns(ctrl, prev);
if (ns) if (ns) {
nvme_ns_remove(ns); nvme_ns_remove(ns);
nvme_put_ns(ns);
}
} }
} }
nn -= j; nn -= j;
...@@ -1552,8 +1560,6 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn) ...@@ -1552,8 +1560,6 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
struct nvme_ns *ns, *next; struct nvme_ns *ns, *next;
unsigned i; unsigned i;
lockdep_assert_held(&ctrl->namespaces_mutex);
for (i = 1; i <= nn; i++) for (i = 1; i <= nn; i++)
nvme_validate_ns(ctrl, i); nvme_validate_ns(ctrl, i);
...@@ -1576,7 +1582,6 @@ static void nvme_scan_work(struct work_struct *work) ...@@ -1576,7 +1582,6 @@ static void nvme_scan_work(struct work_struct *work)
if (nvme_identify_ctrl(ctrl, &id)) if (nvme_identify_ctrl(ctrl, &id))
return; return;
mutex_lock(&ctrl->namespaces_mutex);
nn = le32_to_cpu(id->nn); nn = le32_to_cpu(id->nn);
if (ctrl->vs >= NVME_VS(1, 1) && if (ctrl->vs >= NVME_VS(1, 1) &&
!(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
...@@ -1585,6 +1590,7 @@ static void nvme_scan_work(struct work_struct *work) ...@@ -1585,6 +1590,7 @@ static void nvme_scan_work(struct work_struct *work)
} }
nvme_scan_ns_sequential(ctrl, nn); nvme_scan_ns_sequential(ctrl, nn);
done: done:
mutex_lock(&ctrl->namespaces_mutex);
list_sort(NULL, &ctrl->namespaces, ns_cmp); list_sort(NULL, &ctrl->namespaces, ns_cmp);
mutex_unlock(&ctrl->namespaces_mutex); mutex_unlock(&ctrl->namespaces_mutex);
kfree(id); kfree(id);
...@@ -1604,6 +1610,11 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl) ...@@ -1604,6 +1610,11 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
} }
EXPORT_SYMBOL_GPL(nvme_queue_scan); EXPORT_SYMBOL_GPL(nvme_queue_scan);
/*
* This function iterates the namespace list unlocked to allow recovery from
* controller failure. It is up to the caller to ensure the namespace list is
* not modified by scan work while this function is executing.
*/
void nvme_remove_namespaces(struct nvme_ctrl *ctrl) void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{ {
struct nvme_ns *ns, *next; struct nvme_ns *ns, *next;
...@@ -1617,10 +1628,8 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl) ...@@ -1617,10 +1628,8 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
if (ctrl->state == NVME_CTRL_DEAD) if (ctrl->state == NVME_CTRL_DEAD)
nvme_kill_queues(ctrl); nvme_kill_queues(ctrl);
mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
nvme_ns_remove(ns); nvme_ns_remove(ns);
mutex_unlock(&ctrl->namespaces_mutex);
} }
EXPORT_SYMBOL_GPL(nvme_remove_namespaces); EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
...@@ -1791,11 +1800,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) ...@@ -1791,11 +1800,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
{ {
struct nvme_ns *ns; struct nvme_ns *ns;
rcu_read_lock(); mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { list_for_each_entry(ns, &ctrl->namespaces, list) {
if (!kref_get_unless_zero(&ns->kref))
continue;
/* /*
* Revalidating a dead namespace sets capacity to 0. This will * Revalidating a dead namespace sets capacity to 0. This will
* end buffered writers dirtying pages that can't be synced. * end buffered writers dirtying pages that can't be synced.
...@@ -1806,10 +1812,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) ...@@ -1806,10 +1812,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
blk_set_queue_dying(ns->queue); blk_set_queue_dying(ns->queue);
blk_mq_abort_requeue_list(ns->queue); blk_mq_abort_requeue_list(ns->queue);
blk_mq_start_stopped_hw_queues(ns->queue, true); blk_mq_start_stopped_hw_queues(ns->queue, true);
nvme_put_ns(ns);
} }
rcu_read_unlock(); mutex_unlock(&ctrl->namespaces_mutex);
} }
EXPORT_SYMBOL_GPL(nvme_kill_queues); EXPORT_SYMBOL_GPL(nvme_kill_queues);
...@@ -1817,8 +1821,8 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl) ...@@ -1817,8 +1821,8 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
{ {
struct nvme_ns *ns; struct nvme_ns *ns;
rcu_read_lock(); mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { list_for_each_entry(ns, &ctrl->namespaces, list) {
spin_lock_irq(ns->queue->queue_lock); spin_lock_irq(ns->queue->queue_lock);
queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue); queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
spin_unlock_irq(ns->queue->queue_lock); spin_unlock_irq(ns->queue->queue_lock);
...@@ -1826,7 +1830,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl) ...@@ -1826,7 +1830,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
blk_mq_cancel_requeue_work(ns->queue); blk_mq_cancel_requeue_work(ns->queue);
blk_mq_stop_hw_queues(ns->queue); blk_mq_stop_hw_queues(ns->queue);
} }
rcu_read_unlock(); mutex_unlock(&ctrl->namespaces_mutex);
} }
EXPORT_SYMBOL_GPL(nvme_stop_queues); EXPORT_SYMBOL_GPL(nvme_stop_queues);
...@@ -1834,13 +1838,13 @@ void nvme_start_queues(struct nvme_ctrl *ctrl) ...@@ -1834,13 +1838,13 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
{ {
struct nvme_ns *ns; struct nvme_ns *ns;
rcu_read_lock(); mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { list_for_each_entry(ns, &ctrl->namespaces, list) {
queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue); queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
blk_mq_start_stopped_hw_queues(ns->queue, true); blk_mq_start_stopped_hw_queues(ns->queue, true);
blk_mq_kick_requeue_list(ns->queue); blk_mq_kick_requeue_list(ns->queue);
} }
rcu_read_unlock(); mutex_unlock(&ctrl->namespaces_mutex);
} }
EXPORT_SYMBOL_GPL(nvme_start_queues); EXPORT_SYMBOL_GPL(nvme_start_queues);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment