Commit 0d9cabdc authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup

Pull cgroup changes from Tejun Heo:
 "Out of the 8 commits, one fixes a long-standing locking issue around
  tasklist walking and others are cleanups."

* 'for-3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup:
  cgroup: Walk task list under tasklist_lock in cgroup_enable_task_cg_list
  cgroup: Remove wrong comment on cgroup_enable_task_cg_list()
  cgroup: remove cgroup_subsys argument from callbacks
  cgroup: remove extra calls to find_existing_css_set
  cgroup: replace tasklist_lock with rcu_read_lock
  cgroup: simplify double-check locking in cgroup_attach_proc
  cgroup: move struct cgroup_pidlist out from the header file
  cgroup: remove cgroup_attach_task_current_cg()
parents 701085b2 3ce3230a
......@@ -558,8 +558,7 @@ Each subsystem may export the following methods. The only mandatory
methods are create/destroy. Any others that are null are presumed to
be successful no-ops.
struct cgroup_subsys_state *create(struct cgroup_subsys *ss,
struct cgroup *cgrp)
struct cgroup_subsys_state *create(struct cgroup *cgrp)
(cgroup_mutex held by caller)
Called to create a subsystem state object for a cgroup. The
......@@ -574,7 +573,7 @@ identified by the passed cgroup object having a NULL parent (since
it's the root of the hierarchy) and may be an appropriate place for
initialization code.
void destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
void destroy(struct cgroup *cgrp)
(cgroup_mutex held by caller)
The cgroup system is about to destroy the passed cgroup; the subsystem
......@@ -585,7 +584,7 @@ cgroup->parent is still valid. (Note - can also be called for a
newly-created cgroup if an error occurs after this subsystem's
create() method has been called for the new cgroup).
int pre_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
int pre_destroy(struct cgroup *cgrp);
Called before checking the reference count on each subsystem. This may
be useful for subsystems which have some extra references even if
......@@ -593,8 +592,7 @@ there are not tasks in the cgroup. If pre_destroy() returns error code,
rmdir() will fail with it. From this behavior, pre_destroy() can be
called multiple times against a cgroup.
int can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup_taskset *tset)
int can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
(cgroup_mutex held by caller)
Called prior to moving one or more tasks into a cgroup; if the
......@@ -615,8 +613,7 @@ fork. If this method returns 0 (success) then this should remain valid
while the caller holds cgroup_mutex and it is ensured that either
attach() or cancel_attach() will be called in future.
void cancel_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup_taskset *tset)
void cancel_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
(cgroup_mutex held by caller)
Called when a task attach operation has failed after can_attach() has succeeded.
......@@ -625,23 +622,22 @@ function, so that the subsystem can implement a rollback. If not, not necessary.
This will be called only about subsystems whose can_attach() operation have
succeeded. The parameters are identical to can_attach().
void attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup_taskset *tset)
void attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
(cgroup_mutex held by caller)
Called after the task has been attached to the cgroup, to allow any
post-attachment activity that requires memory allocations or blocking.
The parameters are identical to can_attach().
void fork(struct cgroup_subsy *ss, struct task_struct *task)
void fork(struct task_struct *task)
Called when a task is forked into a cgroup.
void exit(struct cgroup_subsys *ss, struct task_struct *task)
void exit(struct task_struct *task)
Called during task exit.
int populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
int populate(struct cgroup *cgrp)
(cgroup_mutex held by caller)
Called after creation of a cgroup to allow a subsystem to populate
......@@ -651,7 +647,7 @@ include/linux/cgroup.h for details). Note that although this
method can return an error code, the error code is currently not
always handled well.
void post_clone(struct cgroup_subsys *ss, struct cgroup *cgrp)
void post_clone(struct cgroup *cgrp)
(cgroup_mutex held by caller)
Called during cgroup_create() to do any parameter
......@@ -659,7 +655,7 @@ initialization which might be required before a task could attach. For
example in cpusets, no task may attach before 'cpus' and 'mems' are set
up.
void bind(struct cgroup_subsys *ss, struct cgroup *root)
void bind(struct cgroup *root)
(cgroup_mutex and ss->hierarchy_mutex held by caller)
Called when a cgroup subsystem is rebound to a different hierarchy
......
......@@ -28,13 +28,10 @@ static LIST_HEAD(blkio_list);
struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
EXPORT_SYMBOL_GPL(blkio_root_cgroup);
static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
struct cgroup *);
static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
struct cgroup_taskset *);
static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
struct cgroup_taskset *);
static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
static struct cgroup_subsys_state *blkiocg_create(struct cgroup *);
static int blkiocg_can_attach(struct cgroup *, struct cgroup_taskset *);
static void blkiocg_attach(struct cgroup *, struct cgroup_taskset *);
static void blkiocg_destroy(struct cgroup *);
static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
/* for encoding cft->private value on file */
......@@ -1548,7 +1545,7 @@ static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
ARRAY_SIZE(blkio_files));
}
static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
static void blkiocg_destroy(struct cgroup *cgroup)
{
struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
unsigned long flags;
......@@ -1598,8 +1595,7 @@ static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
kfree(blkcg);
}
static struct cgroup_subsys_state *
blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
{
struct blkio_cgroup *blkcg;
struct cgroup *parent = cgroup->parent;
......@@ -1628,8 +1624,7 @@ blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
* of the main cic data structures. For now we allow a task to change
* its cgroup only if it's the only owner of its ioc.
*/
static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup_taskset *tset)
static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
{
struct task_struct *task;
struct io_context *ioc;
......@@ -1648,8 +1643,7 @@ static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
return ret;
}
static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup_taskset *tset)
static void blkiocg_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
{
struct task_struct *task;
struct io_context *ioc;
......
......@@ -160,38 +160,6 @@ enum {
CGRP_CLONE_CHILDREN,
};
/* which pidlist file are we talking about? */
enum cgroup_filetype {
CGROUP_FILE_PROCS,
CGROUP_FILE_TASKS,
};
/*
* A pidlist is a list of pids that virtually represents the contents of one
* of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
* a pair (one each for procs, tasks) for each pid namespace that's relevant
* to the cgroup.
*/
struct cgroup_pidlist {
/*
* used to find which pidlist is wanted. doesn't change as long as
* this particular list stays in the list.
*/
struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
/* array of xids */
pid_t *list;
/* how many elements the above list has */
int length;
/* how many files are using the current array */
int use_count;
/* each of these stored in a list by its cgroup */
struct list_head links;
/* pointer to the cgroup we belong to, for list removal purposes */
struct cgroup *owner;
/* protects the other fields */
struct rw_semaphore mutex;
};
struct cgroup {
unsigned long flags; /* "unsigned long" so bitops work */
......@@ -484,23 +452,18 @@ int cgroup_taskset_size(struct cgroup_taskset *tset);
*/
struct cgroup_subsys {
struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss,
struct cgroup *cgrp);
int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup_taskset *tset);
void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup_taskset *tset);
void (*fork)(struct cgroup_subsys *ss, struct task_struct *task);
void (*exit)(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup *old_cgrp, struct task_struct *task);
int (*populate)(struct cgroup_subsys *ss,
struct cgroup *cgrp);
void (*post_clone)(struct cgroup_subsys *ss, struct cgroup *cgrp);
void (*bind)(struct cgroup_subsys *ss, struct cgroup *root);
struct cgroup_subsys_state *(*create)(struct cgroup *cgrp);
int (*pre_destroy)(struct cgroup *cgrp);
void (*destroy)(struct cgroup *cgrp);
int (*can_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
void (*attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
void (*fork)(struct task_struct *task);
void (*exit)(struct cgroup *cgrp, struct cgroup *old_cgrp,
struct task_struct *task);
int (*populate)(struct cgroup_subsys *ss, struct cgroup *cgrp);
void (*post_clone)(struct cgroup *cgrp);
void (*bind)(struct cgroup *root);
int subsys_id;
int active;
......@@ -602,11 +565,6 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan);
int cgroup_attach_task(struct cgroup *, struct task_struct *);
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
static inline int cgroup_attach_task_current_cg(struct task_struct *tsk)
{
return cgroup_attach_task_all(current, tsk);
}
/*
* CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
* if cgroup_subsys.use_id == true. It can be used for looking up and scanning.
......@@ -669,10 +627,6 @@ static inline int cgroup_attach_task_all(struct task_struct *from,
{
return 0;
}
static inline int cgroup_attach_task_current_cg(struct task_struct *t)
{
return 0;
}
#endif /* !CONFIG_CGROUPS */
......
......@@ -69,7 +69,7 @@ struct cgroup;
struct cgroup_subsys;
#ifdef CONFIG_NET
int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss);
void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss);
void mem_cgroup_sockets_destroy(struct cgroup *cgrp);
#else
static inline
int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
......@@ -77,7 +77,7 @@ int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
return 0;
}
static inline
void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
void mem_cgroup_sockets_destroy(struct cgroup *cgrp)
{
}
#endif
......@@ -871,8 +871,7 @@ struct proto {
*/
int (*init_cgroup)(struct cgroup *cgrp,
struct cgroup_subsys *ss);
void (*destroy_cgroup)(struct cgroup *cgrp,
struct cgroup_subsys *ss);
void (*destroy_cgroup)(struct cgroup *cgrp);
struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
#endif
};
......
......@@ -13,7 +13,7 @@ struct tcp_memcontrol {
struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg);
int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss);
void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss);
void tcp_destroy_cgroup(struct cgroup *cgrp);
unsigned long long tcp_max_memory(const struct mem_cgroup *memcg);
void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx);
#endif /* _TCP_MEMCG_H */
This diff is collapsed.
......@@ -128,8 +128,7 @@ struct cgroup_subsys freezer_subsys;
* task->alloc_lock (inside __thaw_task(), prevents race with refrigerator())
* sighand->siglock
*/
static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
struct cgroup *cgroup)
static struct cgroup_subsys_state *freezer_create(struct cgroup *cgroup)
{
struct freezer *freezer;
......@@ -142,8 +141,7 @@ static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
return &freezer->css;
}
static void freezer_destroy(struct cgroup_subsys *ss,
struct cgroup *cgroup)
static void freezer_destroy(struct cgroup *cgroup)
{
struct freezer *freezer = cgroup_freezer(cgroup);
......@@ -164,8 +162,7 @@ static bool is_task_frozen_enough(struct task_struct *task)
* a write to that file racing against an attach, and hence the
* can_attach() result will remain valid until the attach completes.
*/
static int freezer_can_attach(struct cgroup_subsys *ss,
struct cgroup *new_cgroup,
static int freezer_can_attach(struct cgroup *new_cgroup,
struct cgroup_taskset *tset)
{
struct freezer *freezer;
......@@ -185,7 +182,7 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
return 0;
}
static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
static void freezer_fork(struct task_struct *task)
{
struct freezer *freezer;
......
......@@ -1399,8 +1399,7 @@ static nodemask_t cpuset_attach_nodemask_from;
static nodemask_t cpuset_attach_nodemask_to;
/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup_taskset *tset)
static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
{
struct cpuset *cs = cgroup_cs(cgrp);
struct task_struct *task;
......@@ -1436,8 +1435,7 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
return 0;
}
static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup_taskset *tset)
static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
{
struct mm_struct *mm;
struct task_struct *task;
......@@ -1833,8 +1831,7 @@ static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
* (and likewise for mems) to the new cgroup. Called with cgroup_mutex
* held.
*/
static void cpuset_post_clone(struct cgroup_subsys *ss,
struct cgroup *cgroup)
static void cpuset_post_clone(struct cgroup *cgroup)
{
struct cgroup *parent, *child;
struct cpuset *cs, *parent_cs;
......@@ -1857,13 +1854,10 @@ static void cpuset_post_clone(struct cgroup_subsys *ss,
/*
* cpuset_create - create a cpuset
* ss: cpuset cgroup subsystem
* cont: control group that the new cpuset will be part of
*/
static struct cgroup_subsys_state *cpuset_create(
struct cgroup_subsys *ss,
struct cgroup *cont)
static struct cgroup_subsys_state *cpuset_create(struct cgroup *cont)
{
struct cpuset *cs;
struct cpuset *parent;
......@@ -1902,7 +1896,7 @@ static struct cgroup_subsys_state *cpuset_create(
* will call async_rebuild_sched_domains().
*/
static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
static void cpuset_destroy(struct cgroup *cont)
{
struct cpuset *cs = cgroup_cs(cont);
......
......@@ -7147,8 +7147,7 @@ static int __init perf_event_sysfs_init(void)
device_initcall(perf_event_sysfs_init);
#ifdef CONFIG_CGROUP_PERF
static struct cgroup_subsys_state *perf_cgroup_create(
struct cgroup_subsys *ss, struct cgroup *cont)
static struct cgroup_subsys_state *perf_cgroup_create(struct cgroup *cont)
{
struct perf_cgroup *jc;
......@@ -7165,8 +7164,7 @@ static struct cgroup_subsys_state *perf_cgroup_create(
return &jc->css;
}
static void perf_cgroup_destroy(struct cgroup_subsys *ss,
struct cgroup *cont)
static void perf_cgroup_destroy(struct cgroup *cont)
{
struct perf_cgroup *jc;
jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
......@@ -7182,8 +7180,7 @@ static int __perf_cgroup_move(void *info)
return 0;
}
static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup_taskset *tset)
static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
{
struct task_struct *task;
......@@ -7191,8 +7188,8 @@ static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
task_function_call(task, __perf_cgroup_move, task);
}
static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup *old_cgrp, struct task_struct *task)
static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
struct task_struct *task)
{
/*
* cgroup_exit() is called in the copy_process() failure path.
......
......@@ -7571,8 +7571,7 @@ static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
struct task_group, css);
}
static struct cgroup_subsys_state *
cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
static struct cgroup_subsys_state *cpu_cgroup_create(struct cgroup *cgrp)
{
struct task_group *tg, *parent;
......@@ -7589,15 +7588,14 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
return &tg->css;
}
static void
cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
static void cpu_cgroup_destroy(struct cgroup *cgrp)
{
struct task_group *tg = cgroup_tg(cgrp);
sched_destroy_group(tg);
}
static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
static int cpu_cgroup_can_attach(struct cgroup *cgrp,
struct cgroup_taskset *tset)
{
struct task_struct *task;
......@@ -7615,7 +7613,7 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
return 0;
}
static void cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
static void cpu_cgroup_attach(struct cgroup *cgrp,
struct cgroup_taskset *tset)
{
struct task_struct *task;
......@@ -7625,8 +7623,8 @@ static void cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
}
static void
cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup *old_cgrp, struct task_struct *task)
cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
struct task_struct *task)
{
/*
* cgroup_exit() is called in the copy_process() failure path.
......@@ -7976,8 +7974,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
*/
/* create a new cpu accounting group */
static struct cgroup_subsys_state *cpuacct_create(
struct cgroup_subsys *ss, struct cgroup *cgrp)
static struct cgroup_subsys_state *cpuacct_create(struct cgroup *cgrp)
{
struct cpuacct *ca;
......@@ -8007,8 +8004,7 @@ static struct cgroup_subsys_state *cpuacct_create(
}
/* destroy an existing cpu accounting group */
static void
cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
static void cpuacct_destroy(struct cgroup *cgrp)
{
struct cpuacct *ca = cgroup_ca(cgrp);
......
......@@ -4602,10 +4602,9 @@ static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
return mem_cgroup_sockets_init(cont, ss);
};
static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
struct cgroup *cont)
static void kmem_cgroup_destroy(struct cgroup *cont)
{
mem_cgroup_sockets_destroy(cont, ss);
mem_cgroup_sockets_destroy(cont);
}
#else
static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
......@@ -4613,8 +4612,7 @@ static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
return 0;
}
static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
struct cgroup *cont)
static void kmem_cgroup_destroy(struct cgroup *cont)
{
}
#endif
......@@ -4927,7 +4925,7 @@ static int mem_cgroup_soft_limit_tree_init(void)
}
static struct cgroup_subsys_state * __ref
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
mem_cgroup_create(struct cgroup *cont)
{
struct mem_cgroup *memcg, *parent;
long error = -ENOMEM;
......@@ -4989,20 +4987,18 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
return ERR_PTR(error);
}
static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
struct cgroup *cont)
static int mem_cgroup_pre_destroy(struct cgroup *cont)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
return mem_cgroup_force_empty(memcg, false);
}
static void mem_cgroup_destroy(struct cgroup_subsys *ss,
struct cgroup *cont)
static void mem_cgroup_destroy(struct cgroup *cont)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
kmem_cgroup_destroy(ss, cont);
kmem_cgroup_destroy(cont);
mem_cgroup_put(memcg);
}
......@@ -5339,9 +5335,8 @@ static void mem_cgroup_clear_mc(void)
mem_cgroup_end_move(from);
}
static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
struct cgroup *cgroup,
struct cgroup_taskset *tset)
static int mem_cgroup_can_attach(struct cgroup *cgroup,
struct cgroup_taskset *tset)
{
struct task_struct *p = cgroup_taskset_first(tset);
int ret = 0;
......@@ -5379,9 +5374,8 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
return ret;
}
static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
struct cgroup *cgroup,
struct cgroup_taskset *tset)
static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
struct cgroup_taskset *tset)
{
mem_cgroup_clear_mc();
}
......@@ -5496,9 +5490,8 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
up_read(&mm->mmap_sem);
}
static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct cgroup *cont,
struct cgroup_taskset *tset)
static void mem_cgroup_move_task(struct cgroup *cont,
struct cgroup_taskset *tset)
{
struct task_struct *p = cgroup_taskset_first(tset);
struct mm_struct *mm = get_task_mm(p);
......@@ -5513,20 +5506,17 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
mem_cgroup_clear_mc();
}
#else /* !CONFIG_MMU */
static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
struct cgroup *cgroup,
struct cgroup_taskset *tset)
static int mem_cgroup_can_attach(struct cgroup *cgroup,
struct cgroup_taskset *tset)
{
return 0;
}
static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
struct cgroup *cgroup,
struct cgroup_taskset *tset)
static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
struct cgroup_taskset *tset)
{
}
static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct cgroup *cont,
struct cgroup_taskset *tset)
static void mem_cgroup_move_task(struct cgroup *cont,
struct cgroup_taskset *tset)
{
}
#endif
......
......@@ -23,9 +23,8 @@
#include <net/sock.h>
#include <net/netprio_cgroup.h>
static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
struct cgroup *cgrp);
static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp);
static void cgrp_destroy(struct cgroup *cgrp);
static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
struct cgroup_subsys net_prio_subsys = {
......@@ -121,8 +120,7 @@ static void update_netdev_tables(void)
rtnl_unlock();
}
static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
struct cgroup *cgrp)
static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
{
struct cgroup_netprio_state *cs;
int ret;
......@@ -146,7 +144,7 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
return &cs->css;
}
static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
static void cgrp_destroy(struct cgroup *cgrp)
{
struct cgroup_netprio_state *cs;
struct net_device *dev;
......
......@@ -160,19 +160,19 @@ int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
out:
list_for_each_entry_continue_reverse(proto, &proto_list, node)
if (proto->destroy_cgroup)
proto->destroy_cgroup(cgrp, ss);
proto->destroy_cgroup(cgrp);
mutex_unlock(&proto_list_mutex);
return ret;
}
void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
void mem_cgroup_sockets_destroy(struct cgroup *cgrp)
{
struct proto *proto;
mutex_lock(&proto_list_mutex);
list_for_each_entry_reverse(proto, &proto_list, node)
if (proto->destroy_cgroup)
proto->destroy_cgroup(cgrp, ss);
proto->destroy_cgroup(cgrp);
mutex_unlock(&proto_list_mutex);
}
#endif
......
......@@ -94,7 +94,7 @@ int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
}
EXPORT_SYMBOL(tcp_init_cgroup);
void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
void tcp_destroy_cgroup(struct cgroup *cgrp)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
struct cg_proto *cg_proto;
......
......@@ -22,9 +22,8 @@
#include <net/sock.h>
#include <net/cls_cgroup.h>
static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
struct cgroup *cgrp);
static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp);
static void cgrp_destroy(struct cgroup *cgrp);
static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
struct cgroup_subsys net_cls_subsys = {
......@@ -51,8 +50,7 @@ static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
struct cgroup_cls_state, css);
}
static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
struct cgroup *cgrp)
static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
{
struct cgroup_cls_state *cs;
......@@ -66,7 +64,7 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
return &cs->css;
}
static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
static void cgrp_destroy(struct cgroup *cgrp)
{
kfree(cgrp_cls_state(cgrp));
}
......
......@@ -61,8 +61,8 @@ static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
struct cgroup_subsys devices_subsys;
static int devcgroup_can_attach(struct cgroup_subsys *ss,
struct cgroup *new_cgrp, struct cgroup_taskset *set)
static int devcgroup_can_attach(struct cgroup *new_cgrp,
struct cgroup_taskset *set)
{
struct task_struct *task = cgroup_taskset_first(set);
......@@ -156,8 +156,7 @@ static void dev_whitelist_rm(struct dev_cgroup *dev_cgroup,
/*
* called from kernel/cgroup.c with cgroup_lock() held.
*/
static struct cgroup_subsys_state *devcgroup_create(struct cgroup_subsys *ss,
struct cgroup *cgroup)
static struct cgroup_subsys_state *devcgroup_create(struct cgroup *cgroup)
{
struct dev_cgroup *dev_cgroup, *parent_dev_cgroup;
struct cgroup *parent_cgroup;
......@@ -195,8 +194,7 @@ static struct cgroup_subsys_state *devcgroup_create(struct cgroup_subsys *ss,
return &dev_cgroup->css;
}
static void devcgroup_destroy(struct cgroup_subsys *ss,
struct cgroup *cgroup)
static void devcgroup_destroy(struct cgroup *cgroup)
{
struct dev_cgroup *dev_cgroup;
struct dev_whitelist_item *wh, *tmp;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment