Commit 509cd3f2 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/32: Simplify KASAN init

Since kasan_init_region() is not used anymore for modules,
KASAN init is done while slab_is_available() is false.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/84b27bf08b41c8343efd88e10f2eccd8e9f85593.1579024426.git.christophe.leroy@c-s.fr
parent 47febbee
...@@ -34,7 +34,6 @@ static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned ...@@ -34,7 +34,6 @@ static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned
{ {
pmd_t *pmd; pmd_t *pmd;
unsigned long k_cur, k_next; unsigned long k_cur, k_next;
pgprot_t prot = slab_is_available() ? kasan_prot_ro() : PAGE_KERNEL;
pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start); pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start);
...@@ -45,14 +44,11 @@ static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned ...@@ -45,14 +44,11 @@ static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned
if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte) if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
continue; continue;
if (slab_is_available()) new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
new = pte_alloc_one_kernel(&init_mm);
else
new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
if (!new) if (!new)
return -ENOMEM; return -ENOMEM;
kasan_populate_pte(new, prot); kasan_populate_pte(new, PAGE_KERNEL);
smp_wmb(); /* See comment in __pte_alloc */ smp_wmb(); /* See comment in __pte_alloc */
...@@ -63,39 +59,27 @@ static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned ...@@ -63,39 +59,27 @@ static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned
new = NULL; new = NULL;
} }
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
if (new && slab_is_available())
pte_free_kernel(&init_mm, new);
} }
return 0; return 0;
} }
static void __init *kasan_get_one_page(void)
{
if (slab_is_available())
return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
}
static int __init kasan_init_region(void *start, size_t size) static int __init kasan_init_region(void *start, size_t size)
{ {
unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start); unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size); unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
unsigned long k_cur; unsigned long k_cur;
int ret; int ret;
void *block = NULL; void *block;
ret = kasan_init_shadow_page_tables(k_start, k_end); ret = kasan_init_shadow_page_tables(k_start, k_end);
if (ret) if (ret)
return ret; return ret;
if (!slab_is_available()) block = memblock_alloc(k_end - k_start, PAGE_SIZE);
block = memblock_alloc(k_end - k_start, PAGE_SIZE);
for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) { for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur); pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
void *va = block ? block + k_cur - k_start : kasan_get_one_page(); void *va = block + k_cur - k_start;
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL); pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
if (!va) if (!va)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment