Commit 420256ef authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Linus Torvalds

thp: release page in page pre-alloc path

If NUMA is enabled, we can release the page in the page pre-alloc
operation, then the CONFIG_NUMA dependent code can be reduced
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d516904b
...@@ -1873,15 +1873,12 @@ static void collapse_huge_page(struct mm_struct *mm, ...@@ -1873,15 +1873,12 @@ static void collapse_huge_page(struct mm_struct *mm,
*hpage = ERR_PTR(-ENOMEM); *hpage = ERR_PTR(-ENOMEM);
return; return;
} }
*hpage = new_page;
count_vm_event(THP_COLLAPSE_ALLOC); count_vm_event(THP_COLLAPSE_ALLOC);
#endif #endif
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
#ifdef CONFIG_NUMA
put_page(new_page);
#endif
return; return;
}
/* /*
* Prevent all access to pagetables with the exception of * Prevent all access to pagetables with the exception of
...@@ -1982,9 +1979,8 @@ static void collapse_huge_page(struct mm_struct *mm, ...@@ -1982,9 +1979,8 @@ static void collapse_huge_page(struct mm_struct *mm,
prepare_pmd_huge_pte(pgtable, mm); prepare_pmd_huge_pte(pgtable, mm);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
#ifndef CONFIG_NUMA
*hpage = NULL; *hpage = NULL;
#endif
khugepaged_pages_collapsed++; khugepaged_pages_collapsed++;
out_up_write: out_up_write:
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
...@@ -1992,9 +1988,6 @@ static void collapse_huge_page(struct mm_struct *mm, ...@@ -1992,9 +1988,6 @@ static void collapse_huge_page(struct mm_struct *mm,
out: out:
mem_cgroup_uncharge_page(new_page); mem_cgroup_uncharge_page(new_page);
#ifdef CONFIG_NUMA
put_page(new_page);
#endif
goto out_up_write; goto out_up_write;
} }
...@@ -2260,8 +2253,6 @@ static void khugepaged_do_scan(void) ...@@ -2260,8 +2253,6 @@ static void khugepaged_do_scan(void)
barrier(); /* write khugepaged_pages_to_scan to local stack */ barrier(); /* write khugepaged_pages_to_scan to local stack */
while (progress < pages) { while (progress < pages) {
cond_resched();
#ifndef CONFIG_NUMA #ifndef CONFIG_NUMA
if (!hpage) if (!hpage)
hpage = khugepaged_alloc_hugepage(&wait); hpage = khugepaged_alloc_hugepage(&wait);
...@@ -2274,8 +2265,12 @@ static void khugepaged_do_scan(void) ...@@ -2274,8 +2265,12 @@ static void khugepaged_do_scan(void)
break; break;
wait = false; wait = false;
khugepaged_alloc_sleep(); khugepaged_alloc_sleep();
} else if (hpage) {
put_page(hpage);
hpage = NULL;
} }
#endif #endif
cond_resched();
if (unlikely(kthread_should_stop() || freezing(current))) if (unlikely(kthread_should_stop() || freezing(current)))
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment