Commit dae590a6 authored by Waiman Long's avatar Waiman Long Committed by Jens Axboe

blk-cgroup: Flush stats at blkgs destruction path

As noted by Michal, the blkg_iostat_set's in the lockless list
hold reference to blkg's to protect against their removal. Those
blkg's hold reference to blkcg. When a cgroup is being destroyed,
cgroup_rstat_flush() is only called at css_release_work_fn() which is
called when the blkcg reference count reaches 0. This circular dependency
will prevent blkcg from being freed until some other events cause
cgroup_rstat_flush() to be called to flush out the pending blkcg stats.

To prevent this delayed blkcg removal, add a new cgroup_rstat_css_flush()
function to flush stats for a given css and cpu and call it at the blkgs
destruction path, blkcg_destroy_blkgs(), whenever there are still some
pending stats to be flushed. This will ensure that blkcg reference
count can reach 0 ASAP.
Signed-off-by: default avatarWaiman Long <longman@redhat.com>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Link: https://lore.kernel.org/r/20221105005902.407297-4-longman@redhat.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 3b8cc629
...@@ -1084,10 +1084,12 @@ struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css) ...@@ -1084,10 +1084,12 @@ struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css)
*/ */
static void blkcg_destroy_blkgs(struct blkcg *blkcg) static void blkcg_destroy_blkgs(struct blkcg *blkcg)
{ {
int cpu;
might_sleep(); might_sleep();
css_get(&blkcg->css);
spin_lock_irq(&blkcg->lock); spin_lock_irq(&blkcg->lock);
while (!hlist_empty(&blkcg->blkg_list)) { while (!hlist_empty(&blkcg->blkg_list)) {
struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
struct blkcg_gq, blkcg_node); struct blkcg_gq, blkcg_node);
...@@ -1110,6 +1112,17 @@ static void blkcg_destroy_blkgs(struct blkcg *blkcg) ...@@ -1110,6 +1112,17 @@ static void blkcg_destroy_blkgs(struct blkcg *blkcg)
} }
spin_unlock_irq(&blkcg->lock); spin_unlock_irq(&blkcg->lock);
/*
* Flush all the non-empty percpu lockless lists.
*/
for_each_possible_cpu(cpu) {
struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu);
if (!llist_empty(lhead))
cgroup_rstat_css_cpu_flush(&blkcg->css, cpu);
}
css_put(&blkcg->css);
} }
/** /**
......
...@@ -766,6 +766,7 @@ void cgroup_rstat_flush(struct cgroup *cgrp); ...@@ -766,6 +766,7 @@ void cgroup_rstat_flush(struct cgroup *cgrp);
void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp); void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
void cgroup_rstat_flush_hold(struct cgroup *cgrp); void cgroup_rstat_flush_hold(struct cgroup *cgrp);
void cgroup_rstat_flush_release(void); void cgroup_rstat_flush_release(void);
void cgroup_rstat_css_cpu_flush(struct cgroup_subsys_state *css, int cpu);
/* /*
* Basic resource stats. * Basic resource stats.
......
...@@ -281,6 +281,26 @@ void cgroup_rstat_flush_release(void) ...@@ -281,6 +281,26 @@ void cgroup_rstat_flush_release(void)
spin_unlock_irq(&cgroup_rstat_lock); spin_unlock_irq(&cgroup_rstat_lock);
} }
/**
* cgroup_rstat_css_cpu_flush - flush stats for the given css and cpu
* @css: target css to be flush
* @cpu: the cpu that holds the stats to be flush
*
* A lightweight rstat flush operation for a given css and cpu.
* Only the cpu_lock is being held for mutual exclusion, the cgroup_rstat_lock
* isn't used.
*/
void cgroup_rstat_css_cpu_flush(struct cgroup_subsys_state *css, int cpu)
{
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
raw_spin_lock_irq(cpu_lock);
rcu_read_lock();
css->ss->css_rstat_flush(css, cpu);
rcu_read_unlock();
raw_spin_unlock_irq(cpu_lock);
}
int cgroup_rstat_init(struct cgroup *cgrp) int cgroup_rstat_init(struct cgroup *cgrp)
{ {
int cpu; int cpu;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment