Commit af5679fb authored by Michal Hocko's avatar Michal Hocko Committed by Linus Torvalds

mm, oom: remove oom_lock from oom_reaper

oom_reaper used to rely on the oom_lock since e2fe1456 ("oom_reaper:
close race with exiting task").  We do not really need the lock anymore
though.  21292580 ("mm: oom: let oom_reap_task and exit_mmap run
concurrently") has removed serialization with the exit path based on the
mm reference count and so we do not really rely on the oom_lock anymore.

Tetsuo was arguing that at least MMF_OOM_SKIP should be set under the lock
to prevent from races when the page allocator didn't manage to get the
freed (reaped) memory in __alloc_pages_may_oom but it sees the flag later
on and move on to another victim.  Although this is possible in principle
let's wait for it to actually happen in real life before we make the
locking more complex again.

Therefore remove the oom_lock for oom_reaper paths (both exit_mmap and
oom_reap_task_mm).  The reaper serializes with exit_mmap by mmap_sem +
MMF_OOM_SKIP flag.  There is no synchronization with out_of_memory path
now.

[mhocko@kernel.org: oom_reap_task_mm should return false when __oom_reap_task_mm did]
  Link: http://lkml.kernel.org/r/20180724141747.GP28386@dhcp22.suse.cz
Link: http://lkml.kernel.org/r/20180719075922.13784-1-mhocko@kernel.orgSigned-off-by: default avatarMichal Hocko <mhocko@suse.com>
Suggested-by: default avatarDavid Rientjes <rientjes@google.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 93065ac7
...@@ -3063,9 +3063,7 @@ void exit_mmap(struct mm_struct *mm) ...@@ -3063,9 +3063,7 @@ void exit_mmap(struct mm_struct *mm)
* which clears VM_LOCKED, otherwise the oom reaper cannot * which clears VM_LOCKED, otherwise the oom reaper cannot
* reliably test it. * reliably test it.
*/ */
mutex_lock(&oom_lock);
(void)__oom_reap_task_mm(mm); (void)__oom_reap_task_mm(mm);
mutex_unlock(&oom_lock);
set_bit(MMF_OOM_SKIP, &mm->flags); set_bit(MMF_OOM_SKIP, &mm->flags);
down_write(&mm->mmap_sem); down_write(&mm->mmap_sem);
......
...@@ -535,28 +535,9 @@ bool __oom_reap_task_mm(struct mm_struct *mm) ...@@ -535,28 +535,9 @@ bool __oom_reap_task_mm(struct mm_struct *mm)
static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
{ {
bool ret = true;
/*
* We have to make sure to not race with the victim exit path
* and cause premature new oom victim selection:
* oom_reap_task_mm exit_mm
* mmget_not_zero
* mmput
* atomic_dec_and_test
* exit_oom_victim
* [...]
* out_of_memory
* select_bad_process
* # no TIF_MEMDIE task selects new victim
* unmap_page_range # frees some memory
*/
mutex_lock(&oom_lock);
if (!down_read_trylock(&mm->mmap_sem)) { if (!down_read_trylock(&mm->mmap_sem)) {
ret = false;
trace_skip_task_reaping(tsk->pid); trace_skip_task_reaping(tsk->pid);
goto unlock_oom; return false;
} }
/* /*
...@@ -568,7 +549,7 @@ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) ...@@ -568,7 +549,7 @@ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
if (test_bit(MMF_OOM_SKIP, &mm->flags)) { if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
trace_skip_task_reaping(tsk->pid); trace_skip_task_reaping(tsk->pid);
goto unlock_oom; return true;
} }
trace_start_task_reaping(tsk->pid); trace_start_task_reaping(tsk->pid);
...@@ -576,8 +557,7 @@ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) ...@@ -576,8 +557,7 @@ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
/* failed to reap part of the address space. Try again later */ /* failed to reap part of the address space. Try again later */
if (!__oom_reap_task_mm(mm)) { if (!__oom_reap_task_mm(mm)) {
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
ret = false; return false;
goto unlock_oom;
} }
pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
...@@ -588,9 +568,7 @@ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) ...@@ -588,9 +568,7 @@ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
trace_finish_task_reaping(tsk->pid); trace_finish_task_reaping(tsk->pid);
unlock_oom: return true;
mutex_unlock(&oom_lock);
return ret;
} }
#define MAX_OOM_REAP_RETRIES 10 #define MAX_OOM_REAP_RETRIES 10
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment