Commit 5407a562 authored by Phil Carmody's avatar Phil Carmody Committed by Linus Torvalds

mm: remove unnecessary use of atomic

The bottom 4 hunks are atomically changing memory to which there are no
aliases as it's freshly allocated, so there's no need to use atomic
operations.

The other hunks are just atomic_read and atomic_set, and do not involve
any read-modify-write.  The use of atomic_{read,set} doesn't prevent a
read/write or write/write race, so if a race were possible (I'm not saying
one is), then it would still be there even with atomic_set.

See:
http://digitalvampire.org/blog/index.php/2007/05/13/atomic-cargo-cults/Signed-off-by: default avatarPhil Carmody <ext-phil.2.carmody@nokia.com>
Acked-by: default avatarKirill A. Shutemov <kirill@shutemov.name>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent df64f81b
...@@ -152,7 +152,7 @@ struct mem_cgroup_threshold { ...@@ -152,7 +152,7 @@ struct mem_cgroup_threshold {
/* For threshold */ /* For threshold */
struct mem_cgroup_threshold_ary { struct mem_cgroup_threshold_ary {
/* An array index points to threshold just below usage. */ /* An array index points to threshold just below usage. */
atomic_t current_threshold; int current_threshold;
/* Size of entries[] */ /* Size of entries[] */
unsigned int size; unsigned int size;
/* Array of thresholds */ /* Array of thresholds */
...@@ -3412,7 +3412,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) ...@@ -3412,7 +3412,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
* If it's not true, a threshold was crossed after last * If it's not true, a threshold was crossed after last
* call of __mem_cgroup_threshold(). * call of __mem_cgroup_threshold().
*/ */
i = atomic_read(&t->current_threshold); i = t->current_threshold;
/* /*
* Iterate backward over array of thresholds starting from * Iterate backward over array of thresholds starting from
...@@ -3436,7 +3436,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) ...@@ -3436,7 +3436,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
eventfd_signal(t->entries[i].eventfd, 1); eventfd_signal(t->entries[i].eventfd, 1);
/* Update current_threshold */ /* Update current_threshold */
atomic_set(&t->current_threshold, i - 1); t->current_threshold = i - 1;
unlock: unlock:
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -3528,7 +3528,7 @@ static int mem_cgroup_usage_register_event(struct cgroup *cgrp, ...@@ -3528,7 +3528,7 @@ static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
compare_thresholds, NULL); compare_thresholds, NULL);
/* Find current threshold */ /* Find current threshold */
atomic_set(&thresholds_new->current_threshold, -1); thresholds_new->current_threshold = -1;
for (i = 0; i < size; i++) { for (i = 0; i < size; i++) {
if (thresholds_new->entries[i].threshold < usage) { if (thresholds_new->entries[i].threshold < usage) {
/* /*
...@@ -3536,7 +3536,7 @@ static int mem_cgroup_usage_register_event(struct cgroup *cgrp, ...@@ -3536,7 +3536,7 @@ static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
* until rcu_assign_pointer(), so it's safe to increment * until rcu_assign_pointer(), so it's safe to increment
* it here. * it here.
*/ */
atomic_inc(&thresholds_new->current_threshold); ++thresholds_new->current_threshold;
} }
} }
...@@ -3607,7 +3607,7 @@ static int mem_cgroup_usage_unregister_event(struct cgroup *cgrp, ...@@ -3607,7 +3607,7 @@ static int mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
thresholds_new->size = size; thresholds_new->size = size;
/* Copy thresholds and find current threshold */ /* Copy thresholds and find current threshold */
atomic_set(&thresholds_new->current_threshold, -1); thresholds_new->current_threshold = -1;
for (i = 0, j = 0; i < thresholds->size; i++) { for (i = 0, j = 0; i < thresholds->size; i++) {
if (thresholds->entries[i].eventfd == eventfd) if (thresholds->entries[i].eventfd == eventfd)
continue; continue;
...@@ -3619,7 +3619,7 @@ static int mem_cgroup_usage_unregister_event(struct cgroup *cgrp, ...@@ -3619,7 +3619,7 @@ static int mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
* until rcu_assign_pointer(), so it's safe to increment * until rcu_assign_pointer(), so it's safe to increment
* it here. * it here.
*/ */
atomic_inc(&thresholds_new->current_threshold); ++thresholds_new->current_threshold;
} }
j++; j++;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment