Commit ba76149f authored by Andrea Arcangeli's avatar Andrea Arcangeli Committed by Linus Torvalds

thp: khugepaged

Add khugepaged to relocate fragmented pages into hugepages if new
hugepages become available.  (this is indipendent of the defrag logic that
will have to make new hugepages available)

The fundamental reason why khugepaged is unavoidable, is that some memory
can be fragmented and not everything can be relocated.  So when a virtual
machine quits and releases gigabytes of hugepages, we want to use those
freely available hugepages to create huge-pmd in the other virtual
machines that may be running on fragmented memory, to maximize the CPU
efficiency at all times.  The scan is slow, it takes nearly zero cpu time,
except when it copies data (in which case it means we definitely want to
pay for that cpu time) so it seems a good tradeoff.

In addition to the hugepages being released by other process releasing
memory, we have the strong suspicion that the performance impact of
potentially defragmenting hugepages during or before each page fault could
lead to more performance inconsistency than allocating small pages at
first and having them collapsed into large pages later...  if they prove
themselfs to be long lived mappings (khugepaged scan is slow so short
lived mappings have low probability to run into khugepaged if compared to
long lived mappings).
Signed-off-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Acked-by: default avatarRik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 79134171
...@@ -25,6 +25,7 @@ enum transparent_hugepage_flag { ...@@ -25,6 +25,7 @@ enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
#endif #endif
......
#ifndef _LINUX_KHUGEPAGED_H
#define _LINUX_KHUGEPAGED_H
#include <linux/sched.h> /* MMF_VM_HUGEPAGE */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern int __khugepaged_enter(struct mm_struct *mm);
extern void __khugepaged_exit(struct mm_struct *mm);
extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma);
#define khugepaged_enabled() \
(transparent_hugepage_flags & \
((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
#define khugepaged_always() \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_FLAG))
#define khugepaged_req_madv() \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
#define khugepaged_defrag() \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
return __khugepaged_enter(mm);
return 0;
}
static inline void khugepaged_exit(struct mm_struct *mm)
{
if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
__khugepaged_exit(mm);
}
static inline int khugepaged_enter(struct vm_area_struct *vma)
{
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
if (khugepaged_always() ||
(khugepaged_req_madv() &&
vma->vm_flags & VM_HUGEPAGE))
if (__khugepaged_enter(vma->vm_mm))
return -ENOMEM;
return 0;
}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
return 0;
}
static inline void khugepaged_exit(struct mm_struct *mm)
{
}
static inline int khugepaged_enter(struct vm_area_struct *vma)
{
return 0;
}
static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
{
return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* _LINUX_KHUGEPAGED_H */
...@@ -434,6 +434,7 @@ extern int get_dumpable(struct mm_struct *mm); ...@@ -434,6 +434,7 @@ extern int get_dumpable(struct mm_struct *mm);
#endif #endif
/* leave room for more dump flags */ /* leave room for more dump flags */
#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
......
...@@ -66,6 +66,7 @@ ...@@ -66,6 +66,7 @@
#include <linux/posix-timers.h> #include <linux/posix-timers.h>
#include <linux/user-return-notifier.h> #include <linux/user-return-notifier.h>
#include <linux/oom.h> #include <linux/oom.h>
#include <linux/khugepaged.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
...@@ -328,6 +329,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) ...@@ -328,6 +329,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
rb_parent = NULL; rb_parent = NULL;
pprev = &mm->mmap; pprev = &mm->mmap;
retval = ksm_fork(mm, oldmm); retval = ksm_fork(mm, oldmm);
if (retval)
goto out;
retval = khugepaged_fork(mm, oldmm);
if (retval) if (retval)
goto out; goto out;
...@@ -546,6 +550,7 @@ void mmput(struct mm_struct *mm) ...@@ -546,6 +550,7 @@ void mmput(struct mm_struct *mm)
if (atomic_dec_and_test(&mm->mm_users)) { if (atomic_dec_and_test(&mm->mm_users)) {
exit_aio(mm); exit_aio(mm);
ksm_exit(mm); ksm_exit(mm);
khugepaged_exit(mm); /* must run before exit_mmap */
exit_mmap(mm); exit_mmap(mm);
set_mm_exe_file(mm, NULL); set_mm_exe_file(mm, NULL);
if (!list_empty(&mm->mmlist)) { if (!list_empty(&mm->mmlist)) {
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment