Commit a731763f authored by Yu Kuai's avatar Yu Kuai Committed by Jens Axboe

blk-cgroup: prevent rcu_sched detected stalls warnings while iterating blkgs

We run a test that create millions of cgroups and blkgs, and then trigger
blkg_destroy_all(). blkg_destroy_all() will hold spin lock for a long
time in such situation. Thus release the lock when a batch of blkgs are
destroyed.

blkcg_activate_policy() and blkcg_deactivate_policy() might have the
same problem, however, as they are basically only called from module
init/exit paths, let's leave them alone for now.
Signed-off-by: default avatarYu Kuai <yukuai3@huawei.com>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Link: https://lore.kernel.org/r/20210707015649.1929797-1-yukuai3@huawei.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d80c228d
......@@ -56,6 +56,8 @@ static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
bool blkcg_debug_stats = false;
static struct workqueue_struct *blkcg_punt_bio_wq;
#define BLKG_DESTROY_BATCH_SIZE 64
static bool blkcg_policy_enabled(struct request_queue *q,
const struct blkcg_policy *pol)
{
......@@ -422,7 +424,9 @@ static void blkg_destroy(struct blkcg_gq *blkg)
static void blkg_destroy_all(struct request_queue *q)
{
struct blkcg_gq *blkg, *n;
int count = BLKG_DESTROY_BATCH_SIZE;
restart:
spin_lock_irq(&q->queue_lock);
list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
struct blkcg *blkcg = blkg->blkcg;
......@@ -430,6 +434,17 @@ static void blkg_destroy_all(struct request_queue *q)
spin_lock(&blkcg->lock);
blkg_destroy(blkg);
spin_unlock(&blkcg->lock);
/*
* in order to avoid holding the spin lock for too long, release
* it when a batch of blkgs are destroyed.
*/
if (!(--count)) {
count = BLKG_DESTROY_BATCH_SIZE;
spin_unlock_irq(&q->queue_lock);
cond_resched();
goto restart;
}
}
q->root_blkg = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment