Commit c58632b3 authored by Tejun Heo's avatar Tejun Heo

cgroup: Rename stat to rstat

stat is too generic a name and ends up causing subtle confusions.
It'll be made generic so that controllers can plug into it, which will
make the problem worse.  Let's rename it to something more specific -
cgroup_rstat for cgroup recursive stat.

This patch does the following renames.  No other changes.

* cpu_stat	-> rstat_cpu
* stat		-> rstat
* ?cstat	-> ?rstatc

Note that the renames are selective.  The unrenamed are the ones which
implement basic resource statistics on top of rstat.  This will be
further cleaned up in the following patches.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent a5c2b93f
......@@ -259,11 +259,11 @@ struct css_set {
};
/*
* cgroup basic resource usage statistics. Accounting is done per-cpu in
* cgroup_cpu_stat which is then lazily propagated up the hierarchy on
* reads.
* rstat - cgroup scalable recursive statistics. Accounting is done
* per-cpu in cgroup_rstat_cpu which is then lazily propagated up the
* hierarchy on reads.
*
* When a stat gets updated, the cgroup_cpu_stat and its ancestors are
* When a stat gets updated, the cgroup_rstat_cpu and its ancestors are
* linked into the updated tree. On the following read, propagation only
* considers and consumes the updated tree. This makes reading O(the
* number of descendants which have been active since last read) instead of
......@@ -274,7 +274,7 @@ struct css_set {
* become very expensive. By propagating selectively, increasing reading
* frequency decreases the cost of each read.
*/
struct cgroup_cpu_stat {
struct cgroup_rstat_cpu {
/*
* ->sync protects all the current counters. These are the only
* fields which get updated in the hot path.
......@@ -297,7 +297,7 @@ struct cgroup_cpu_stat {
* to the cgroup makes it unnecessary for each per-cpu struct to
* point back to the associated cgroup.
*
* Protected by per-cpu cgroup_cpu_stat_lock.
* Protected by per-cpu cgroup_rstat_cpu_lock.
*/
struct cgroup *updated_children; /* terminated by self cgroup */
struct cgroup *updated_next; /* NULL iff not on the list */
......@@ -408,8 +408,10 @@ struct cgroup {
*/
struct cgroup *dom_cgrp;
/* per-cpu recursive resource statistics */
struct cgroup_rstat_cpu __percpu *rstat_cpu;
/* cgroup basic resource statistics */
struct cgroup_cpu_stat __percpu *cpu_stat;
struct cgroup_stat pending_stat; /* pending from children */
struct cgroup_stat stat;
......
......@@ -201,13 +201,13 @@ int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
int cgroup_task_count(const struct cgroup *cgrp);
/*
* stat.c
* rstat.c
*/
void cgroup_stat_flush(struct cgroup *cgrp);
int cgroup_stat_init(struct cgroup *cgrp);
void cgroup_stat_exit(struct cgroup *cgrp);
void cgroup_rstat_flush(struct cgroup *cgrp);
int cgroup_rstat_init(struct cgroup *cgrp);
void cgroup_rstat_exit(struct cgroup *cgrp);
void cgroup_stat_show_cputime(struct seq_file *seq);
void cgroup_stat_boot(void);
void cgroup_rstat_boot(void);
/*
* namespace.c
......
......@@ -144,14 +144,14 @@ static struct static_key_true *cgroup_subsys_on_dfl_key[] = {
};
#undef SUBSYS
static DEFINE_PER_CPU(struct cgroup_cpu_stat, cgrp_dfl_root_cpu_stat);
static DEFINE_PER_CPU(struct cgroup_rstat_cpu, cgrp_dfl_root_rstat_cpu);
/*
* The default hierarchy, reserved for the subsystems that are otherwise
* unattached - it never has more than a single cgroup, and all tasks are
* part of that cgroup.
*/
struct cgroup_root cgrp_dfl_root = { .cgrp.cpu_stat = &cgrp_dfl_root_cpu_stat };
struct cgroup_root cgrp_dfl_root = { .cgrp.rstat_cpu = &cgrp_dfl_root_rstat_cpu };
EXPORT_SYMBOL_GPL(cgrp_dfl_root);
/*
......@@ -4592,7 +4592,7 @@ static void css_free_rwork_fn(struct work_struct *work)
cgroup_put(cgroup_parent(cgrp));
kernfs_put(cgrp->kn);
if (cgroup_on_dfl(cgrp))
cgroup_stat_exit(cgrp);
cgroup_rstat_exit(cgrp);
kfree(cgrp);
} else {
/*
......@@ -4629,7 +4629,7 @@ static void css_release_work_fn(struct work_struct *work)
trace_cgroup_release(cgrp);
if (cgroup_on_dfl(cgrp))
cgroup_stat_flush(cgrp);
cgroup_rstat_flush(cgrp);
for (tcgrp = cgroup_parent(cgrp); tcgrp;
tcgrp = cgroup_parent(tcgrp))
......@@ -4817,7 +4817,7 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
goto out_free_cgrp;
if (cgroup_on_dfl(parent)) {
ret = cgroup_stat_init(cgrp);
ret = cgroup_rstat_init(cgrp);
if (ret)
goto out_cancel_ref;
}
......@@ -4882,7 +4882,7 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
out_stat_exit:
if (cgroup_on_dfl(parent))
cgroup_stat_exit(cgrp);
cgroup_rstat_exit(cgrp);
out_cancel_ref:
percpu_ref_exit(&cgrp->self.refcnt);
out_free_cgrp:
......@@ -5275,7 +5275,7 @@ int __init cgroup_init(void)
BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files));
BUG_ON(cgroup_init_cftypes(NULL, cgroup1_base_files));
cgroup_stat_boot();
cgroup_rstat_boot();
/*
* The latency of the synchronize_sched() is too high for cgroups,
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment