Commit a7d6e4ec authored by Andrea Arcangeli's avatar Andrea Arcangeli Committed by Linus Torvalds

thp: prevent hugepages during args/env copying into the user stack

Transparent hugepages can only be created if rmap is fully
functional. So we must prevent hugepages to be created while
is_vma_temporary_stack() is true.

This also optmizes away some harmless but unnecessary setting of
khugepaged_scan.address and it switches some BUG_ON to VM_BUG_ON.
Signed-off-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Acked-by: default avatarRik van Riel <riel@redhat.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 09f586b3
...@@ -57,7 +57,8 @@ extern pmd_t *page_check_address_pmd(struct page *page, ...@@ -57,7 +57,8 @@ extern pmd_t *page_check_address_pmd(struct page *page,
(transparent_hugepage_flags & \ (transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \ (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
((__vma)->vm_flags & VM_HUGEPAGE))) && \ ((__vma)->vm_flags & VM_HUGEPAGE))) && \
!((__vma)->vm_flags & VM_NOHUGEPAGE)) !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
!is_vma_temporary_stack(__vma))
#define transparent_hugepage_defrag(__vma) \ #define transparent_hugepage_defrag(__vma) \
((transparent_hugepage_flags & \ ((transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \ (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \
......
...@@ -1811,6 +1811,8 @@ static void collapse_huge_page(struct mm_struct *mm, ...@@ -1811,6 +1811,8 @@ static void collapse_huge_page(struct mm_struct *mm,
/* VM_PFNMAP vmas may have vm_ops null but vm_file set */ /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
if (!vma->anon_vma || vma->vm_ops || vma->vm_file) if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
goto out; goto out;
if (is_vma_temporary_stack(vma))
goto out;
VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
pgd = pgd_offset(mm, address); pgd = pgd_offset(mm, address);
...@@ -2032,32 +2034,27 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, ...@@ -2032,32 +2034,27 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
if ((!(vma->vm_flags & VM_HUGEPAGE) && if ((!(vma->vm_flags & VM_HUGEPAGE) &&
!khugepaged_always()) || !khugepaged_always()) ||
(vma->vm_flags & VM_NOHUGEPAGE)) { (vma->vm_flags & VM_NOHUGEPAGE)) {
skip:
progress++; progress++;
continue; continue;
} }
/* VM_PFNMAP vmas may have vm_ops null but vm_file set */ /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
if (!vma->anon_vma || vma->vm_ops || vma->vm_file) { if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
khugepaged_scan.address = vma->vm_end; goto skip;
progress++; if (is_vma_temporary_stack(vma))
continue; goto skip;
}
VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK;
if (hstart >= hend) { if (hstart >= hend)
progress++; goto skip;
continue; if (khugepaged_scan.address > hend)
} goto skip;
if (khugepaged_scan.address < hstart) if (khugepaged_scan.address < hstart)
khugepaged_scan.address = hstart; khugepaged_scan.address = hstart;
if (khugepaged_scan.address > hend) { VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
khugepaged_scan.address = hend + HPAGE_PMD_SIZE;
progress++;
continue;
}
BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
while (khugepaged_scan.address < hend) { while (khugepaged_scan.address < hend) {
int ret; int ret;
...@@ -2086,7 +2083,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, ...@@ -2086,7 +2083,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
breakouterloop_mmap_sem: breakouterloop_mmap_sem:
spin_lock(&khugepaged_mm_lock); spin_lock(&khugepaged_mm_lock);
BUG_ON(khugepaged_scan.mm_slot != mm_slot); VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
/* /*
* Release the current mm_slot if this mm is about to die, or * Release the current mm_slot if this mm is about to die, or
* if we scanned all vmas of this mm. * if we scanned all vmas of this mm.
...@@ -2241,9 +2238,9 @@ static int khugepaged(void *none) ...@@ -2241,9 +2238,9 @@ static int khugepaged(void *none)
for (;;) { for (;;) {
mutex_unlock(&khugepaged_mutex); mutex_unlock(&khugepaged_mutex);
BUG_ON(khugepaged_thread != current); VM_BUG_ON(khugepaged_thread != current);
khugepaged_loop(); khugepaged_loop();
BUG_ON(khugepaged_thread != current); VM_BUG_ON(khugepaged_thread != current);
mutex_lock(&khugepaged_mutex); mutex_lock(&khugepaged_mutex);
if (!khugepaged_enabled()) if (!khugepaged_enabled())
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment