Commit 16648bac authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

md: stop using for_each_mddev in md_exit

Just do a simple list_for_each_entry_safe on all_mddevs, and only grab a
reference when we drop the lock and delete the now unused for_each_mddev
macro.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarSong Liu <song@kernel.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f2651434
......@@ -368,28 +368,6 @@ EXPORT_SYMBOL_GPL(md_new_event);
static LIST_HEAD(all_mddevs);
static DEFINE_SPINLOCK(all_mddevs_lock);
/*
* iterates through all used mddevs in the system.
* We take care to grab the all_mddevs_lock whenever navigating
* the list, and to always hold a refcount when unlocked.
* Any code which breaks out of this loop while own
* a reference to the current mddev and must mddev_put it.
*/
#define for_each_mddev(_mddev,_tmp) \
\
for (({ spin_lock(&all_mddevs_lock); \
_tmp = all_mddevs.next; \
_mddev = NULL;}); \
({ if (_tmp != &all_mddevs) \
mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
spin_unlock(&all_mddevs_lock); \
if (_mddev) mddev_put(_mddev); \
_mddev = list_entry(_tmp, struct mddev, all_mddevs); \
_tmp != &all_mddevs;}); \
({ spin_lock(&all_mddevs_lock); \
_tmp = _tmp->next;}) \
)
/* Rather than calling directly into the personality make_request function,
* IO requests come here first so that we can check if the device is
* being suspended pending a reconfiguration.
......@@ -9923,8 +9901,7 @@ void md_autostart_arrays(int part)
static __exit void md_exit(void)
{
struct mddev *mddev;
struct list_head *tmp;
struct mddev *mddev, *n;
int delay = 1;
unregister_blkdev(MD_MAJOR,"md");
......@@ -9944,17 +9921,23 @@ static __exit void md_exit(void)
}
remove_proc_entry("mdstat", NULL);
for_each_mddev(mddev, tmp) {
spin_lock(&all_mddevs_lock);
list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
export_array(mddev);
mddev->ctime = 0;
mddev->hold_active = 0;
/*
* for_each_mddev() will call mddev_put() at the end of each
* iteration. As the mddev is now fully clear, this will
* schedule the mddev for destruction by a workqueue, and the
* As the mddev is now fully clear, mddev_put will schedule
* the mddev for destruction by a workqueue, and the
* destroy_workqueue() below will wait for that to complete.
*/
mddev_put(mddev);
spin_lock(&all_mddevs_lock);
}
spin_unlock(&all_mddevs_lock);
destroy_workqueue(md_rdev_misc_wq);
destroy_workqueue(md_misc_wq);
destroy_workqueue(md_wq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment