Commit d516904b authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Linus Torvalds

thp: merge page pre-alloc in khugepaged_loop into khugepaged_do_scan

There are two pre-alloc operations in these two function, the different is:
- it allows to sleep if page alloc fail in khugepaged_loop
- it exits immediately if page alloc fail in khugepaged_do_scan

Actually, in khugepaged_do_scan, we can allow the pre-alloc to sleep on
the first failure, then the operation in khugepaged_loop can be removed
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9817626e
...@@ -2222,10 +2222,40 @@ static int khugepaged_wait_event(void) ...@@ -2222,10 +2222,40 @@ static int khugepaged_wait_event(void)
kthread_should_stop(); kthread_should_stop();
} }
static void khugepaged_do_scan(struct page **hpage) static void khugepaged_alloc_sleep(void)
{
wait_event_freezable_timeout(khugepaged_wait, false,
msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
}
#ifndef CONFIG_NUMA
static struct page *khugepaged_alloc_hugepage(bool *wait)
{
struct page *hpage;
do {
hpage = alloc_hugepage(khugepaged_defrag());
if (!hpage) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
if (!*wait)
return NULL;
*wait = false;
khugepaged_alloc_sleep();
} else
count_vm_event(THP_COLLAPSE_ALLOC);
} while (unlikely(!hpage) && likely(khugepaged_enabled()));
return hpage;
}
#endif
static void khugepaged_do_scan(void)
{ {
struct page *hpage = NULL;
unsigned int progress = 0, pass_through_head = 0; unsigned int progress = 0, pass_through_head = 0;
unsigned int pages = khugepaged_pages_to_scan; unsigned int pages = khugepaged_pages_to_scan;
bool wait = true;
barrier(); /* write khugepaged_pages_to_scan to local stack */ barrier(); /* write khugepaged_pages_to_scan to local stack */
...@@ -2233,17 +2263,18 @@ static void khugepaged_do_scan(struct page **hpage) ...@@ -2233,17 +2263,18 @@ static void khugepaged_do_scan(struct page **hpage)
cond_resched(); cond_resched();
#ifndef CONFIG_NUMA #ifndef CONFIG_NUMA
if (!*hpage) { if (!hpage)
*hpage = alloc_hugepage(khugepaged_defrag()); hpage = khugepaged_alloc_hugepage(&wait);
if (unlikely(!*hpage)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED); if (unlikely(!hpage))
break;
#else
if (IS_ERR(hpage)) {
if (!wait)
break; break;
} wait = false;
count_vm_event(THP_COLLAPSE_ALLOC); khugepaged_alloc_sleep();
} }
#else
if (IS_ERR(*hpage))
break;
#endif #endif
if (unlikely(kthread_should_stop() || freezing(current))) if (unlikely(kthread_should_stop() || freezing(current)))
...@@ -2255,37 +2286,16 @@ static void khugepaged_do_scan(struct page **hpage) ...@@ -2255,37 +2286,16 @@ static void khugepaged_do_scan(struct page **hpage)
if (khugepaged_has_work() && if (khugepaged_has_work() &&
pass_through_head < 2) pass_through_head < 2)
progress += khugepaged_scan_mm_slot(pages - progress, progress += khugepaged_scan_mm_slot(pages - progress,
hpage); &hpage);
else else
progress = pages; progress = pages;
spin_unlock(&khugepaged_mm_lock); spin_unlock(&khugepaged_mm_lock);
} }
}
static void khugepaged_alloc_sleep(void) if (!IS_ERR_OR_NULL(hpage))
{ put_page(hpage);
wait_event_freezable_timeout(khugepaged_wait, false,
msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
} }
#ifndef CONFIG_NUMA
static struct page *khugepaged_alloc_hugepage(void)
{
struct page *hpage;
do {
hpage = alloc_hugepage(khugepaged_defrag());
if (!hpage) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
khugepaged_alloc_sleep();
} else
count_vm_event(THP_COLLAPSE_ALLOC);
} while (unlikely(!hpage) &&
likely(khugepaged_enabled()));
return hpage;
}
#endif
static void khugepaged_wait_work(void) static void khugepaged_wait_work(void)
{ {
try_to_freeze(); try_to_freeze();
...@@ -2306,25 +2316,8 @@ static void khugepaged_wait_work(void) ...@@ -2306,25 +2316,8 @@ static void khugepaged_wait_work(void)
static void khugepaged_loop(void) static void khugepaged_loop(void)
{ {
struct page *hpage = NULL;
while (likely(khugepaged_enabled())) { while (likely(khugepaged_enabled())) {
#ifndef CONFIG_NUMA khugepaged_do_scan();
hpage = khugepaged_alloc_hugepage();
if (unlikely(!hpage))
break;
#else
if (IS_ERR(hpage)) {
khugepaged_alloc_sleep();
hpage = NULL;
}
#endif
khugepaged_do_scan(&hpage);
if (!IS_ERR_OR_NULL(hpage))
put_page(hpage);
khugepaged_wait_work(); khugepaged_wait_work();
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment