Commit fa62aafe authored by Yinghai Lu's avatar Yinghai Lu Committed by H. Peter Anvin

x86, mm: Add global page_size_mask and probe one time only

Now we pass around use_gbpages and use_pse for calculating page table size,
Later we will need to call init_memory_mapping for every ram range one by one,
that mean those calculation will be done several times.

Those information are the same for all ram range and could be stored in
page_size_mask and could be probed it one time only.

Move that probing code out of init_memory_mapping into separated function
probe_page_size_mask(), and call it before all init_memory_mapping.
Suggested-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarYinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1353123563-3103-2-git-send-email-yinghai@kernel.orgReviewed-by: default avatarPekka Enberg <penberg@kernel.org>
Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent f4a75d2e
...@@ -602,6 +602,7 @@ static inline int pgd_none(pgd_t pgd) ...@@ -602,6 +602,7 @@ static inline int pgd_none(pgd_t pgd)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern int direct_gbpages; extern int direct_gbpages;
void probe_page_size_mask(void);
/* local pte updates need not use xchg for locking */ /* local pte updates need not use xchg for locking */
static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
......
...@@ -913,6 +913,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -913,6 +913,7 @@ void __init setup_arch(char **cmdline_p)
setup_real_mode(); setup_real_mode();
init_gbpages(); init_gbpages();
probe_page_size_mask();
/* max_pfn_mapped is updated here */ /* max_pfn_mapped is updated here */
max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT); max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
......
...@@ -35,6 +35,7 @@ struct map_range { ...@@ -35,6 +35,7 @@ struct map_range {
unsigned page_size_mask; unsigned page_size_mask;
}; };
static int page_size_mask;
/* /*
* First calculate space needed for kernel direct mapping page tables to cover * First calculate space needed for kernel direct mapping page tables to cover
* mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB
...@@ -94,6 +95,30 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range) ...@@ -94,6 +95,30 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
(pgt_buf_top << PAGE_SHIFT) - 1); (pgt_buf_top << PAGE_SHIFT) - 1);
} }
void probe_page_size_mask(void)
{
#if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK)
/*
* For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
* This will simplify cpa(), which otherwise needs to support splitting
* large pages into small in interrupt context, etc.
*/
if (direct_gbpages)
page_size_mask |= 1 << PG_LEVEL_1G;
if (cpu_has_pse)
page_size_mask |= 1 << PG_LEVEL_2M;
#endif
/* Enable PSE if available */
if (cpu_has_pse)
set_in_cr4(X86_CR4_PSE);
/* Enable PGE if available */
if (cpu_has_pge) {
set_in_cr4(X86_CR4_PGE);
__supported_pte_mask |= _PAGE_GLOBAL;
}
}
void __init native_pagetable_reserve(u64 start, u64 end) void __init native_pagetable_reserve(u64 start, u64 end)
{ {
memblock_reserve(start, end - start); memblock_reserve(start, end - start);
...@@ -129,45 +154,15 @@ static int __meminit save_mr(struct map_range *mr, int nr_range, ...@@ -129,45 +154,15 @@ static int __meminit save_mr(struct map_range *mr, int nr_range,
unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned long __init_refok init_memory_mapping(unsigned long start,
unsigned long end) unsigned long end)
{ {
unsigned long page_size_mask = 0;
unsigned long start_pfn, end_pfn; unsigned long start_pfn, end_pfn;
unsigned long ret = 0; unsigned long ret = 0;
unsigned long pos; unsigned long pos;
struct map_range mr[NR_RANGE_MR]; struct map_range mr[NR_RANGE_MR];
int nr_range, i; int nr_range, i;
int use_pse, use_gbpages;
printk(KERN_INFO "init_memory_mapping: [mem %#010lx-%#010lx]\n", printk(KERN_INFO "init_memory_mapping: [mem %#010lx-%#010lx]\n",
start, end - 1); start, end - 1);
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
/*
* For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
* This will simplify cpa(), which otherwise needs to support splitting
* large pages into small in interrupt context, etc.
*/
use_pse = use_gbpages = 0;
#else
use_pse = cpu_has_pse;
use_gbpages = direct_gbpages;
#endif
/* Enable PSE if available */
if (cpu_has_pse)
set_in_cr4(X86_CR4_PSE);
/* Enable PGE if available */
if (cpu_has_pge) {
set_in_cr4(X86_CR4_PGE);
__supported_pte_mask |= _PAGE_GLOBAL;
}
if (use_gbpages)
page_size_mask |= 1 << PG_LEVEL_1G;
if (use_pse)
page_size_mask |= 1 << PG_LEVEL_2M;
memset(mr, 0, sizeof(mr)); memset(mr, 0, sizeof(mr));
nr_range = 0; nr_range = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment