Commit 1af8efe9 authored by Michal Hocko's avatar Michal Hocko Committed by Linus Torvalds

memcg: change memcg_oom_mutex to spinlock

memcg_oom_mutex is used to protect memcg OOM path and eventfd interface
for oom_control.  None of the critical sections which it protects sleep
(eventfd_signal works from atomic context and the rest are simple linked
list resp.  oom_lock atomic operations).

Mutex is also too heavyweight for those code paths because it triggers a
lot of scheduling.  It also makes makes convoying effects more visible
when we have a big number of oom killing because we take the lock
mutliple times during mem_cgroup_handle_oom so we have multiple places
where many processes can sleep.
Signed-off-by: default avatarMichal Hocko <mhocko@suse.cz>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 79dfdacc
...@@ -1725,7 +1725,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, ...@@ -1725,7 +1725,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
/* /*
* Check OOM-Killer is already running under our hierarchy. * Check OOM-Killer is already running under our hierarchy.
* If someone is running, return false. * If someone is running, return false.
* Has to be called with memcg_oom_mutex * Has to be called with memcg_oom_lock
*/ */
static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
{ {
...@@ -1770,7 +1770,7 @@ static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) ...@@ -1770,7 +1770,7 @@ static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
} }
/* /*
* Has to be called with memcg_oom_mutex * Has to be called with memcg_oom_lock
*/ */
static int mem_cgroup_oom_unlock(struct mem_cgroup *mem) static int mem_cgroup_oom_unlock(struct mem_cgroup *mem)
{ {
...@@ -1802,7 +1802,7 @@ static void mem_cgroup_unmark_under_oom(struct mem_cgroup *mem) ...@@ -1802,7 +1802,7 @@ static void mem_cgroup_unmark_under_oom(struct mem_cgroup *mem)
atomic_add_unless(&iter->under_oom, -1, 0); atomic_add_unless(&iter->under_oom, -1, 0);
} }
static DEFINE_MUTEX(memcg_oom_mutex); static DEFINE_SPINLOCK(memcg_oom_lock);
static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
struct oom_wait_info { struct oom_wait_info {
...@@ -1864,7 +1864,7 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) ...@@ -1864,7 +1864,7 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
mem_cgroup_mark_under_oom(mem); mem_cgroup_mark_under_oom(mem);
/* At first, try to OOM lock hierarchy under mem.*/ /* At first, try to OOM lock hierarchy under mem.*/
mutex_lock(&memcg_oom_mutex); spin_lock(&memcg_oom_lock);
locked = mem_cgroup_oom_lock(mem); locked = mem_cgroup_oom_lock(mem);
/* /*
* Even if signal_pending(), we can't quit charge() loop without * Even if signal_pending(), we can't quit charge() loop without
...@@ -1876,7 +1876,7 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) ...@@ -1876,7 +1876,7 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
need_to_kill = false; need_to_kill = false;
if (locked) if (locked)
mem_cgroup_oom_notify(mem); mem_cgroup_oom_notify(mem);
mutex_unlock(&memcg_oom_mutex); spin_unlock(&memcg_oom_lock);
if (need_to_kill) { if (need_to_kill) {
finish_wait(&memcg_oom_waitq, &owait.wait); finish_wait(&memcg_oom_waitq, &owait.wait);
...@@ -1885,11 +1885,11 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) ...@@ -1885,11 +1885,11 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
schedule(); schedule();
finish_wait(&memcg_oom_waitq, &owait.wait); finish_wait(&memcg_oom_waitq, &owait.wait);
} }
mutex_lock(&memcg_oom_mutex); spin_lock(&memcg_oom_lock);
if (locked) if (locked)
mem_cgroup_oom_unlock(mem); mem_cgroup_oom_unlock(mem);
memcg_wakeup_oom(mem); memcg_wakeup_oom(mem);
mutex_unlock(&memcg_oom_mutex); spin_unlock(&memcg_oom_lock);
mem_cgroup_unmark_under_oom(mem); mem_cgroup_unmark_under_oom(mem);
...@@ -4553,7 +4553,7 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp, ...@@ -4553,7 +4553,7 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
if (!event) if (!event)
return -ENOMEM; return -ENOMEM;
mutex_lock(&memcg_oom_mutex); spin_lock(&memcg_oom_lock);
event->eventfd = eventfd; event->eventfd = eventfd;
list_add(&event->list, &memcg->oom_notify); list_add(&event->list, &memcg->oom_notify);
...@@ -4561,7 +4561,7 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp, ...@@ -4561,7 +4561,7 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
/* already in OOM ? */ /* already in OOM ? */
if (atomic_read(&memcg->under_oom)) if (atomic_read(&memcg->under_oom))
eventfd_signal(eventfd, 1); eventfd_signal(eventfd, 1);
mutex_unlock(&memcg_oom_mutex); spin_unlock(&memcg_oom_lock);
return 0; return 0;
} }
...@@ -4575,7 +4575,7 @@ static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp, ...@@ -4575,7 +4575,7 @@ static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
BUG_ON(type != _OOM_TYPE); BUG_ON(type != _OOM_TYPE);
mutex_lock(&memcg_oom_mutex); spin_lock(&memcg_oom_lock);
list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) { list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
if (ev->eventfd == eventfd) { if (ev->eventfd == eventfd) {
...@@ -4584,7 +4584,7 @@ static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp, ...@@ -4584,7 +4584,7 @@ static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
} }
} }
mutex_unlock(&memcg_oom_mutex); spin_unlock(&memcg_oom_lock);
} }
static int mem_cgroup_oom_control_read(struct cgroup *cgrp, static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment