Commit 36b3a772 authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Thomas Gleixner

x86/mm/64: Tighten up vmalloc_fault() sanity checks on 5-level kernels

On a 5-level kernel, if a non-init mm has a top-level entry, it needs to
match init_mm's, but the vmalloc_fault() code skipped over the BUG_ON()
that would have checked it.

While we're at it, get rid of the rather confusing 4-level folded "pgd"
logic.

Cleans-up: b50858ce ("x86/mm/vmalloc: Add 5-level paging support")
Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Neil Berrington <neil.berrington@datacore.com>
Link: https://lkml.kernel.org/r/2ae598f8c279b0a29baf75df207e6f2fdddc0a1b.1516914529.git.luto@kernel.org
parent 5beda7d5
...@@ -439,18 +439,13 @@ static noinline int vmalloc_fault(unsigned long address) ...@@ -439,18 +439,13 @@ static noinline int vmalloc_fault(unsigned long address)
if (pgd_none(*pgd_ref)) if (pgd_none(*pgd_ref))
return -1; return -1;
if (pgd_none(*pgd)) { if (CONFIG_PGTABLE_LEVELS > 4) {
set_pgd(pgd, *pgd_ref); if (pgd_none(*pgd)) {
arch_flush_lazy_mmu_mode(); set_pgd(pgd, *pgd_ref);
} else if (CONFIG_PGTABLE_LEVELS > 4) { arch_flush_lazy_mmu_mode();
/* } else {
* With folded p4d, pgd_none() is always false, so the pgd may BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
* point to an empty page table entry and pgd_page_vaddr() }
* will return garbage.
*
* We will do the correct sanity check on the p4d level.
*/
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
} }
/* With 4-level paging, copying happens on the p4d level. */ /* With 4-level paging, copying happens on the p4d level. */
...@@ -459,7 +454,7 @@ static noinline int vmalloc_fault(unsigned long address) ...@@ -459,7 +454,7 @@ static noinline int vmalloc_fault(unsigned long address)
if (p4d_none(*p4d_ref)) if (p4d_none(*p4d_ref))
return -1; return -1;
if (p4d_none(*p4d)) { if (p4d_none(*p4d) && CONFIG_PGTABLE_LEVELS == 4) {
set_p4d(p4d, *p4d_ref); set_p4d(p4d, *p4d_ref);
arch_flush_lazy_mmu_mode(); arch_flush_lazy_mmu_mode();
} else { } else {
...@@ -470,6 +465,7 @@ static noinline int vmalloc_fault(unsigned long address) ...@@ -470,6 +465,7 @@ static noinline int vmalloc_fault(unsigned long address)
* Below here mismatches are bugs because these lower tables * Below here mismatches are bugs because these lower tables
* are shared: * are shared:
*/ */
BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
pud = pud_offset(p4d, address); pud = pud_offset(p4d, address);
pud_ref = pud_offset(p4d_ref, address); pud_ref = pud_offset(p4d_ref, address);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment