Commit 6cf78d4b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 mm changes from Ingo Molnar:
 "The main changes in this cycle were:

   - reduce the x86/32 PAE per task PGD allocation overhead from 4K to
     0.032k (Fenghua Yu)

   - early_ioremap/memunmap() usage cleanups (Juergen Gross)

   - gbpages support cleanups (Luis R Rodriguez)

   - improve AMD Bulldozer (family 0x15) ASLR I$ aliasing workaround to
     increase randomization by 3 bits (per bootup) (Hector
     Marco-Gisbert)

   - misc fixlets"

* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm: Improve AMD Bulldozer ASLR workaround
  x86/mm/pat: Initialize __cachemode2pte_tbl[] and __pte2cachemode_tbl[] in a bit more readable fashion
  init.h: Clean up the __setup()/early_param() macros
  x86/mm: Simplify probe_page_size_mask()
  x86/mm: Further simplify 1 GB kernel linear mappings handling
  x86/mm: Use early_param_on_off() for direct_gbpages
  init.h: Add early_param_on_off()
  x86/mm: Simplify enabling direct_gbpages
  x86/mm: Use IS_ENABLED() for direct_gbpages
  x86/mm: Unexport set_memory_ro() and set_memory_rw()
  x86/mm, efi: Use early_ioremap() in arch/x86/platform/efi/efi-bgrt.c
  x86/mm: Use early_memunmap() instead of early_iounmap()
  x86/mm/pat: Ensure different messages in STRICT_DEVMEM and PAT cases
  x86/mm: Reduce PAE-mode per task pgd allocation overhead from 4K to 32 bytes
parents 0ad5c6b3 4e26d11f
...@@ -1295,14 +1295,14 @@ config ARCH_DMA_ADDR_T_64BIT ...@@ -1295,14 +1295,14 @@ config ARCH_DMA_ADDR_T_64BIT
def_bool y def_bool y
depends on X86_64 || HIGHMEM64G depends on X86_64 || HIGHMEM64G
config DIRECT_GBPAGES config X86_DIRECT_GBPAGES
bool "Enable 1GB pages for kernel pagetables" if EXPERT def_bool y
default y depends on X86_64 && !DEBUG_PAGEALLOC && !KMEMCHECK
depends on X86_64
---help--- ---help---
Allow the kernel linear mapping to use 1GB pages on CPUs that Certain kernel features effectively disable kernel
support it. This can improve the kernel's performance a tiny bit by linear 1 GB mappings (even if the CPU otherwise
reducing TLB pressure. If in doubt, say "Y". supports them), so don't confuse the user by printing
that we have them enabled.
# Common NUMA Features # Common NUMA Features
config NUMA config NUMA
......
...@@ -366,6 +366,7 @@ enum align_flags { ...@@ -366,6 +366,7 @@ enum align_flags {
struct va_alignment { struct va_alignment {
int flags; int flags;
unsigned long mask; unsigned long mask;
unsigned long bits;
} ____cacheline_aligned; } ____cacheline_aligned;
extern struct va_alignment va_align; extern struct va_alignment va_align;
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/random.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/cpu.h> #include <asm/cpu.h>
...@@ -488,6 +489,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) ...@@ -488,6 +489,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
va_align.mask = (upperbit - 1) & PAGE_MASK; va_align.mask = (upperbit - 1) & PAGE_MASK;
va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
/* A random value per boot for bit slice [12:upper_bit) */
va_align.bits = get_random_int() & va_align.mask;
} }
} }
......
...@@ -286,13 +286,13 @@ static void __init x86_flattree_get_config(void) ...@@ -286,13 +286,13 @@ static void __init x86_flattree_get_config(void)
initial_boot_params = dt = early_memremap(initial_dtb, map_len); initial_boot_params = dt = early_memremap(initial_dtb, map_len);
size = of_get_flat_dt_size(); size = of_get_flat_dt_size();
if (map_len < size) { if (map_len < size) {
early_iounmap(dt, map_len); early_memunmap(dt, map_len);
initial_boot_params = dt = early_memremap(initial_dtb, size); initial_boot_params = dt = early_memremap(initial_dtb, size);
map_len = size; map_len = size;
} }
unflatten_and_copy_device_tree(); unflatten_and_copy_device_tree();
early_iounmap(dt, map_len); early_memunmap(dt, map_len);
} }
#else #else
static inline void x86_flattree_get_config(void) { } static inline void x86_flattree_get_config(void) { }
......
...@@ -661,7 +661,7 @@ void __init parse_e820_ext(u64 phys_addr, u32 data_len) ...@@ -661,7 +661,7 @@ void __init parse_e820_ext(u64 phys_addr, u32 data_len)
extmap = (struct e820entry *)(sdata->data); extmap = (struct e820entry *)(sdata->data);
__append_e820_map(extmap, entries); __append_e820_map(extmap, entries);
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
early_iounmap(sdata, data_len); early_memunmap(sdata, data_len);
printk(KERN_INFO "e820: extended physical RAM map:\n"); printk(KERN_INFO "e820: extended physical RAM map:\n");
e820_print_map("extended"); e820_print_map("extended");
} }
......
...@@ -354,7 +354,7 @@ static void __init relocate_initrd(void) ...@@ -354,7 +354,7 @@ static void __init relocate_initrd(void)
mapaddr = ramdisk_image & PAGE_MASK; mapaddr = ramdisk_image & PAGE_MASK;
p = early_memremap(mapaddr, clen+slop); p = early_memremap(mapaddr, clen+slop);
memcpy(q, p+slop, clen); memcpy(q, p+slop, clen);
early_iounmap(p, clen+slop); early_memunmap(p, clen+slop);
q += clen; q += clen;
ramdisk_image += clen; ramdisk_image += clen;
ramdisk_size -= clen; ramdisk_size -= clen;
...@@ -438,7 +438,7 @@ static void __init parse_setup_data(void) ...@@ -438,7 +438,7 @@ static void __init parse_setup_data(void)
data_len = data->len + sizeof(struct setup_data); data_len = data->len + sizeof(struct setup_data);
data_type = data->type; data_type = data->type;
pa_next = data->next; pa_next = data->next;
early_iounmap(data, sizeof(*data)); early_memunmap(data, sizeof(*data));
switch (data_type) { switch (data_type) {
case SETUP_E820_EXT: case SETUP_E820_EXT:
...@@ -470,7 +470,7 @@ static void __init e820_reserve_setup_data(void) ...@@ -470,7 +470,7 @@ static void __init e820_reserve_setup_data(void)
E820_RAM, E820_RESERVED_KERN); E820_RAM, E820_RESERVED_KERN);
found = 1; found = 1;
pa_data = data->next; pa_data = data->next;
early_iounmap(data, sizeof(*data)); early_memunmap(data, sizeof(*data));
} }
if (!found) if (!found)
return; return;
...@@ -491,7 +491,7 @@ static void __init memblock_x86_reserve_range_setup_data(void) ...@@ -491,7 +491,7 @@ static void __init memblock_x86_reserve_range_setup_data(void)
data = early_memremap(pa_data, sizeof(*data)); data = early_memremap(pa_data, sizeof(*data));
memblock_reserve(pa_data, sizeof(*data) + data->len); memblock_reserve(pa_data, sizeof(*data) + data->len);
pa_data = data->next; pa_data = data->next;
early_iounmap(data, sizeof(*data)); early_memunmap(data, sizeof(*data));
} }
} }
......
...@@ -34,10 +34,26 @@ static unsigned long get_align_mask(void) ...@@ -34,10 +34,26 @@ static unsigned long get_align_mask(void)
return va_align.mask; return va_align.mask;
} }
/*
* To avoid aliasing in the I$ on AMD F15h, the bits defined by the
* va_align.bits, [12:upper_bit), are set to a random value instead of
* zeroing them. This random value is computed once per boot. This form
* of ASLR is known as "per-boot ASLR".
*
* To achieve this, the random value is added to the info.align_offset
* value before calling vm_unmapped_area() or ORed directly to the
* address.
*/
static unsigned long get_align_bits(void)
{
return va_align.bits & get_align_mask();
}
unsigned long align_vdso_addr(unsigned long addr) unsigned long align_vdso_addr(unsigned long addr)
{ {
unsigned long align_mask = get_align_mask(); unsigned long align_mask = get_align_mask();
return (addr + align_mask) & ~align_mask; addr = (addr + align_mask) & ~align_mask;
return addr | get_align_bits();
} }
static int __init control_va_addr_alignment(char *str) static int __init control_va_addr_alignment(char *str)
...@@ -135,8 +151,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -135,8 +151,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.length = len; info.length = len;
info.low_limit = begin; info.low_limit = begin;
info.high_limit = end; info.high_limit = end;
info.align_mask = filp ? get_align_mask() : 0; info.align_mask = 0;
info.align_offset = pgoff << PAGE_SHIFT; info.align_offset = pgoff << PAGE_SHIFT;
if (filp) {
info.align_mask = get_align_mask();
info.align_offset += get_align_bits();
}
return vm_unmapped_area(&info); return vm_unmapped_area(&info);
} }
...@@ -174,8 +194,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, ...@@ -174,8 +194,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.length = len; info.length = len;
info.low_limit = PAGE_SIZE; info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base; info.high_limit = mm->mmap_base;
info.align_mask = filp ? get_align_mask() : 0; info.align_mask = 0;
info.align_offset = pgoff << PAGE_SHIFT; info.align_offset = pgoff << PAGE_SHIFT;
if (filp) {
info.align_mask = get_align_mask();
info.align_offset += get_align_bits();
}
addr = vm_unmapped_area(&info); addr = vm_unmapped_area(&info);
if (!(addr & ~PAGE_MASK)) if (!(addr & ~PAGE_MASK))
return addr; return addr;
......
...@@ -29,29 +29,33 @@ ...@@ -29,29 +29,33 @@
/* /*
* Tables translating between page_cache_type_t and pte encoding. * Tables translating between page_cache_type_t and pte encoding.
* Minimal supported modes are defined statically, modified if more supported *
* cache modes are available. * Minimal supported modes are defined statically, they are modified
* Index into __cachemode2pte_tbl is the cachemode. * during bootup if more supported cache modes are available.
* Index into __pte2cachemode_tbl are the caching attribute bits of the pte *
* Index into __cachemode2pte_tbl[] is the cachemode.
*
* Index into __pte2cachemode_tbl[] are the caching attribute bits of the pte
* (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2. * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2.
*/ */
uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = { uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
[_PAGE_CACHE_MODE_WB] = 0, [_PAGE_CACHE_MODE_WB ] = 0 | 0 ,
[_PAGE_CACHE_MODE_WC] = _PAGE_PWT, [_PAGE_CACHE_MODE_WC ] = _PAGE_PWT | 0 ,
[_PAGE_CACHE_MODE_UC_MINUS] = _PAGE_PCD, [_PAGE_CACHE_MODE_UC_MINUS] = 0 | _PAGE_PCD,
[_PAGE_CACHE_MODE_UC] = _PAGE_PCD | _PAGE_PWT, [_PAGE_CACHE_MODE_UC ] = _PAGE_PWT | _PAGE_PCD,
[_PAGE_CACHE_MODE_WT] = _PAGE_PCD, [_PAGE_CACHE_MODE_WT ] = 0 | _PAGE_PCD,
[_PAGE_CACHE_MODE_WP] = _PAGE_PCD, [_PAGE_CACHE_MODE_WP ] = 0 | _PAGE_PCD,
}; };
EXPORT_SYMBOL(__cachemode2pte_tbl); EXPORT_SYMBOL(__cachemode2pte_tbl);
uint8_t __pte2cachemode_tbl[8] = { uint8_t __pte2cachemode_tbl[8] = {
[__pte2cm_idx(0)] = _PAGE_CACHE_MODE_WB, [__pte2cm_idx( 0 | 0 | 0 )] = _PAGE_CACHE_MODE_WB,
[__pte2cm_idx(_PAGE_PWT)] = _PAGE_CACHE_MODE_WC, [__pte2cm_idx(_PAGE_PWT | 0 | 0 )] = _PAGE_CACHE_MODE_WC,
[__pte2cm_idx(_PAGE_PCD)] = _PAGE_CACHE_MODE_UC_MINUS, [__pte2cm_idx( 0 | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC_MINUS,
[__pte2cm_idx(_PAGE_PWT | _PAGE_PCD)] = _PAGE_CACHE_MODE_UC, [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC,
[__pte2cm_idx(_PAGE_PAT)] = _PAGE_CACHE_MODE_WB, [__pte2cm_idx( 0 | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_WB,
[__pte2cm_idx(_PAGE_PWT | _PAGE_PAT)] = _PAGE_CACHE_MODE_WC, [__pte2cm_idx(_PAGE_PWT | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_WC,
[__pte2cm_idx(_PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS, [__pte2cm_idx(0 | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
[__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC, [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
}; };
EXPORT_SYMBOL(__pte2cachemode_tbl); EXPORT_SYMBOL(__pte2cachemode_tbl);
...@@ -131,21 +135,7 @@ void __init early_alloc_pgt_buf(void) ...@@ -131,21 +135,7 @@ void __init early_alloc_pgt_buf(void)
int after_bootmem; int after_bootmem;
int direct_gbpages early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES);
#ifdef CONFIG_DIRECT_GBPAGES
= 1
#endif
;
static void __init init_gbpages(void)
{
#ifdef CONFIG_X86_64
if (direct_gbpages && cpu_has_gbpages)
printk(KERN_INFO "Using GB pages for direct mapping\n");
else
direct_gbpages = 0;
#endif
}
struct map_range { struct map_range {
unsigned long start; unsigned long start;
...@@ -157,16 +147,12 @@ static int page_size_mask; ...@@ -157,16 +147,12 @@ static int page_size_mask;
static void __init probe_page_size_mask(void) static void __init probe_page_size_mask(void)
{ {
init_gbpages();
#if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK) #if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK)
/* /*
* For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages. * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
* This will simplify cpa(), which otherwise needs to support splitting * This will simplify cpa(), which otherwise needs to support splitting
* large pages into small in interrupt context, etc. * large pages into small in interrupt context, etc.
*/ */
if (direct_gbpages)
page_size_mask |= 1 << PG_LEVEL_1G;
if (cpu_has_pse) if (cpu_has_pse)
page_size_mask |= 1 << PG_LEVEL_2M; page_size_mask |= 1 << PG_LEVEL_2M;
#endif #endif
...@@ -181,6 +167,14 @@ static void __init probe_page_size_mask(void) ...@@ -181,6 +167,14 @@ static void __init probe_page_size_mask(void)
__supported_pte_mask |= _PAGE_GLOBAL; __supported_pte_mask |= _PAGE_GLOBAL;
} else } else
__supported_pte_mask &= ~_PAGE_GLOBAL; __supported_pte_mask &= ~_PAGE_GLOBAL;
/* Enable 1 GB linear kernel mappings if available: */
if (direct_gbpages && cpu_has_gbpages) {
printk(KERN_INFO "Using GB pages for direct mapping\n");
page_size_mask |= 1 << PG_LEVEL_1G;
} else {
direct_gbpages = 0;
}
} }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
......
...@@ -130,20 +130,6 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, ...@@ -130,20 +130,6 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
return 0; return 0;
} }
static int __init parse_direct_gbpages_off(char *arg)
{
direct_gbpages = 0;
return 0;
}
early_param("nogbpages", parse_direct_gbpages_off);
static int __init parse_direct_gbpages_on(char *arg)
{
direct_gbpages = 1;
return 0;
}
early_param("gbpages", parse_direct_gbpages_on);
/* /*
* NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
* physical space so we can cache the place of the first one and move * physical space so we can cache the place of the first one and move
......
...@@ -81,11 +81,9 @@ void arch_report_meminfo(struct seq_file *m) ...@@ -81,11 +81,9 @@ void arch_report_meminfo(struct seq_file *m)
seq_printf(m, "DirectMap4M: %8lu kB\n", seq_printf(m, "DirectMap4M: %8lu kB\n",
direct_pages_count[PG_LEVEL_2M] << 12); direct_pages_count[PG_LEVEL_2M] << 12);
#endif #endif
#ifdef CONFIG_X86_64
if (direct_gbpages) if (direct_gbpages)
seq_printf(m, "DirectMap1G: %8lu kB\n", seq_printf(m, "DirectMap1G: %8lu kB\n",
direct_pages_count[PG_LEVEL_1G] << 20); direct_pages_count[PG_LEVEL_1G] << 20);
#endif
} }
#else #else
static inline void split_page_count(int level) { } static inline void split_page_count(int level) { }
...@@ -1654,13 +1652,11 @@ int set_memory_ro(unsigned long addr, int numpages) ...@@ -1654,13 +1652,11 @@ int set_memory_ro(unsigned long addr, int numpages)
{ {
return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0); return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0);
} }
EXPORT_SYMBOL_GPL(set_memory_ro);
int set_memory_rw(unsigned long addr, int numpages) int set_memory_rw(unsigned long addr, int numpages)
{ {
return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0); return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
} }
EXPORT_SYMBOL_GPL(set_memory_rw);
int set_memory_np(unsigned long addr, int numpages) int set_memory_np(unsigned long addr, int numpages)
{ {
......
...@@ -610,7 +610,7 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, ...@@ -610,7 +610,7 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
} }
#ifdef CONFIG_STRICT_DEVMEM #ifdef CONFIG_STRICT_DEVMEM
/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/ /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */
static inline int range_is_allowed(unsigned long pfn, unsigned long size) static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{ {
return 1; return 1;
...@@ -628,7 +628,7 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) ...@@ -628,7 +628,7 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
while (cursor < to) { while (cursor < to) {
if (!devmem_is_allowed(pfn)) { if (!devmem_is_allowed(pfn)) {
printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n", printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx], PAT prevents it\n",
current->comm, from, to - 1); current->comm, from, to - 1);
return 0; return 0;
} }
......
...@@ -275,12 +275,87 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) ...@@ -275,12 +275,87 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
} }
} }
/*
* Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
* assumes that pgd should be in one page.
*
* But kernel with PAE paging that is not running as a Xen domain
* only needs to allocate 32 bytes for pgd instead of one page.
*/
#ifdef CONFIG_X86_PAE
#include <linux/slab.h>
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
#define PGD_ALIGN 32
static struct kmem_cache *pgd_cache;
static int __init pgd_cache_init(void)
{
/*
* When PAE kernel is running as a Xen domain, it does not use
* shared kernel pmd. And this requires a whole page for pgd.
*/
if (!SHARED_KERNEL_PMD)
return 0;
/*
* when PAE kernel is not running as a Xen domain, it uses
* shared kernel pmd. Shared kernel pmd does not require a whole
* page for pgd. We are able to just allocate a 32-byte for pgd.
* During boot time, we create a 32-byte slab for pgd table allocation.
*/
pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
SLAB_PANIC, NULL);
if (!pgd_cache)
return -ENOMEM;
return 0;
}
core_initcall(pgd_cache_init);
static inline pgd_t *_pgd_alloc(void)
{
/*
* If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
* We allocate one page for pgd.
*/
if (!SHARED_KERNEL_PMD)
return (pgd_t *)__get_free_page(PGALLOC_GFP);
/*
* Now PAE kernel is not running as a Xen domain. We can allocate
* a 32-byte slab for pgd to save memory space.
*/
return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
}
static inline void _pgd_free(pgd_t *pgd)
{
if (!SHARED_KERNEL_PMD)
free_page((unsigned long)pgd);
else
kmem_cache_free(pgd_cache, pgd);
}
#else
static inline pgd_t *_pgd_alloc(void)
{
return (pgd_t *)__get_free_page(PGALLOC_GFP);
}
static inline void _pgd_free(pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
#endif /* CONFIG_X86_PAE */
pgd_t *pgd_alloc(struct mm_struct *mm) pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
pgd_t *pgd; pgd_t *pgd;
pmd_t *pmds[PREALLOCATED_PMDS]; pmd_t *pmds[PREALLOCATED_PMDS];
pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); pgd = _pgd_alloc();
if (pgd == NULL) if (pgd == NULL)
goto out; goto out;
...@@ -310,7 +385,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) ...@@ -310,7 +385,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
out_free_pmds: out_free_pmds:
free_pmds(mm, pmds); free_pmds(mm, pmds);
out_free_pgd: out_free_pgd:
free_page((unsigned long)pgd); _pgd_free(pgd);
out: out:
return NULL; return NULL;
} }
...@@ -320,7 +395,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd) ...@@ -320,7 +395,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
pgd_mop_up_pmds(mm, pgd); pgd_mop_up_pmds(mm, pgd);
pgd_dtor(pgd); pgd_dtor(pgd);
paravirt_pgd_free(mm, pgd); paravirt_pgd_free(mm, pgd);
free_page((unsigned long)pgd); _pgd_free(pgd);
} }
/* /*
......
...@@ -67,7 +67,7 @@ void __init efi_bgrt_init(void) ...@@ -67,7 +67,7 @@ void __init efi_bgrt_init(void)
image = efi_lookup_mapped_addr(bgrt_tab->image_address); image = efi_lookup_mapped_addr(bgrt_tab->image_address);
if (!image) { if (!image) {
image = early_memremap(bgrt_tab->image_address, image = early_ioremap(bgrt_tab->image_address,
sizeof(bmp_header)); sizeof(bmp_header));
ioremapped = true; ioremapped = true;
if (!image) { if (!image) {
...@@ -89,7 +89,7 @@ void __init efi_bgrt_init(void) ...@@ -89,7 +89,7 @@ void __init efi_bgrt_init(void)
} }
if (ioremapped) { if (ioremapped) {
image = early_memremap(bgrt_tab->image_address, image = early_ioremap(bgrt_tab->image_address,
bmp_header.size); bmp_header.size);
if (!image) { if (!image) {
pr_err("Ignoring BGRT: failed to map image memory\n"); pr_err("Ignoring BGRT: failed to map image memory\n");
......
...@@ -263,11 +263,31 @@ struct obs_kernel_param { ...@@ -263,11 +263,31 @@ struct obs_kernel_param {
#define __setup(str, fn) \ #define __setup(str, fn) \
__setup_param(str, fn, fn, 0) __setup_param(str, fn, fn, 0)
/* NOTE: fn is as per module_param, not __setup! Emits warning if fn /*
* returns non-zero. */ * NOTE: fn is as per module_param, not __setup!
* Emits warning if fn returns non-zero.
*/
#define early_param(str, fn) \ #define early_param(str, fn) \
__setup_param(str, fn, fn, 1) __setup_param(str, fn, fn, 1)
#define early_param_on_off(str_on, str_off, var, config) \
\
int var = IS_ENABLED(config); \
\
static int __init parse_##var##_on(char *arg) \
{ \
var = 1; \
return 0; \
} \
__setup_param(str_on, parse_##var##_on, parse_##var##_on, 1); \
\
static int __init parse_##var##_off(char *arg) \
{ \
var = 0; \
return 0; \
} \
__setup_param(str_off, parse_##var##_off, parse_##var##_off, 1)
/* Relies on boot_command_line being set */ /* Relies on boot_command_line being set */
void __init parse_early_param(void); void __init parse_early_param(void);
void __init parse_early_options(char *cmdline); void __init parse_early_options(char *cmdline);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment