Commit b8dadcb5 authored by Li Zefan's avatar Li Zefan Committed by Tejun Heo

cpuset: use rcu_read_lock() to protect task_cs()

We no longer use task_lock() to protect tsk->cgroups.
Reported-by: default avatarFengguang Wu <fengguang.wu@intel.com>
Signed-off-by: default avatarLi Zefan <lizefan@huawei.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent a60bed29
...@@ -2239,10 +2239,10 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) ...@@ -2239,10 +2239,10 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
struct cpuset *cpus_cs; struct cpuset *cpus_cs;
mutex_lock(&callback_mutex); mutex_lock(&callback_mutex);
task_lock(tsk); rcu_read_lock();
cpus_cs = effective_cpumask_cpuset(task_cs(tsk)); cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
guarantee_online_cpus(cpus_cs, pmask); guarantee_online_cpus(cpus_cs, pmask);
task_unlock(tsk); rcu_read_unlock();
mutex_unlock(&callback_mutex); mutex_unlock(&callback_mutex);
} }
...@@ -2295,10 +2295,10 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) ...@@ -2295,10 +2295,10 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
nodemask_t mask; nodemask_t mask;
mutex_lock(&callback_mutex); mutex_lock(&callback_mutex);
task_lock(tsk); rcu_read_lock();
mems_cs = effective_nodemask_cpuset(task_cs(tsk)); mems_cs = effective_nodemask_cpuset(task_cs(tsk));
guarantee_online_mems(mems_cs, &mask); guarantee_online_mems(mems_cs, &mask);
task_unlock(tsk); rcu_read_unlock();
mutex_unlock(&callback_mutex); mutex_unlock(&callback_mutex);
return mask; return mask;
...@@ -2414,9 +2414,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) ...@@ -2414,9 +2414,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
/* Not hardwall and node outside mems_allowed: scan up cpusets */ /* Not hardwall and node outside mems_allowed: scan up cpusets */
mutex_lock(&callback_mutex); mutex_lock(&callback_mutex);
task_lock(current); rcu_read_lock();
cs = nearest_hardwall_ancestor(task_cs(current)); cs = nearest_hardwall_ancestor(task_cs(current));
task_unlock(current); rcu_read_unlock();
allowed = node_isset(node, cs->mems_allowed); allowed = node_isset(node, cs->mems_allowed);
mutex_unlock(&callback_mutex); mutex_unlock(&callback_mutex);
...@@ -2543,24 +2543,26 @@ int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, ...@@ -2543,24 +2543,26 @@ int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
* @task: pointer to task_struct of some task. * @task: pointer to task_struct of some task.
* *
* Description: Prints @task's name, cpuset name, and cached copy of its * Description: Prints @task's name, cpuset name, and cached copy of its
* mems_allowed to the kernel log. Must hold task_lock(task) to allow * mems_allowed to the kernel log.
* dereferencing task_cs(task).
*/ */
void cpuset_print_task_mems_allowed(struct task_struct *tsk) void cpuset_print_task_mems_allowed(struct task_struct *tsk)
{ {
/* Statically allocated to prevent using excess stack. */ /* Statically allocated to prevent using excess stack. */
static char cpuset_nodelist[CPUSET_NODELIST_LEN]; static char cpuset_nodelist[CPUSET_NODELIST_LEN];
static DEFINE_SPINLOCK(cpuset_buffer_lock); static DEFINE_SPINLOCK(cpuset_buffer_lock);
struct cgroup *cgrp = task_cs(tsk)->css.cgroup; struct cgroup *cgrp;
spin_lock(&cpuset_buffer_lock); spin_lock(&cpuset_buffer_lock);
rcu_read_lock();
cgrp = task_cs(tsk)->css.cgroup;
nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN, nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
tsk->mems_allowed); tsk->mems_allowed);
printk(KERN_INFO "%s cpuset=", tsk->comm); printk(KERN_INFO "%s cpuset=", tsk->comm);
pr_cont_cgroup_name(cgrp); pr_cont_cgroup_name(cgrp);
pr_cont(" mems_allowed=%s\n", cpuset_nodelist); pr_cont(" mems_allowed=%s\n", cpuset_nodelist);
rcu_read_unlock();
spin_unlock(&cpuset_buffer_lock); spin_unlock(&cpuset_buffer_lock);
} }
...@@ -2592,9 +2594,9 @@ int cpuset_memory_pressure_enabled __read_mostly; ...@@ -2592,9 +2594,9 @@ int cpuset_memory_pressure_enabled __read_mostly;
void __cpuset_memory_pressure_bump(void) void __cpuset_memory_pressure_bump(void)
{ {
task_lock(current); rcu_read_lock();
fmeter_markevent(&task_cs(current)->fmeter); fmeter_markevent(&task_cs(current)->fmeter);
task_unlock(current); rcu_read_unlock();
} }
#ifdef CONFIG_PROC_PID_CPUSET #ifdef CONFIG_PROC_PID_CPUSET
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment