Commit 42651f15 authored by Yinghai Lu's avatar Yinghai Lu Committed by Thomas Gleixner

x86: fix trimming e820 with MTRR holes.

converting MTRR layout from continous to discrete, some time could run out of
MTRRs. So add gran_sizek to prevent that by dumpping small RAM piece less than
gran_sizek.

previous trimming only can handle highest_pfn from mtrr to end_pfn from e820.
when have more than 4g RAM installed, there will be holes below 4g. so need to
check ram below 4g is coverred well.

need to be applied after
	[PATCH] x86: mtrr cleanup for converting continuous to discrete layout v7
Signed-off-by: default avatarYinghai Lu <yinghai.lu@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 95ffa243
...@@ -1095,6 +1095,17 @@ int __init amd_special_default_mtrr(void) ...@@ -1095,6 +1095,17 @@ int __init amd_special_default_mtrr(void)
return 0; return 0;
} }
static u64 __init real_trim_memory(unsigned long start_pfn, unsigned long limit_pfn)
{
u64 trim_start, trim_size;
trim_start = start_pfn;
trim_start <<= PAGE_SHIFT;
trim_size = limit_pfn;
trim_size <<= PAGE_SHIFT;
trim_size -= trim_start;
return update_memory_range(trim_start, trim_size, E820_RAM,
E820_RESERVED);
}
/** /**
* mtrr_trim_uncached_memory - trim RAM not covered by MTRRs * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs
* @end_pfn: ending page frame number * @end_pfn: ending page frame number
...@@ -1110,8 +1121,13 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) ...@@ -1110,8 +1121,13 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
{ {
unsigned long i, base, size, highest_pfn = 0, def, dummy; unsigned long i, base, size, highest_pfn = 0, def, dummy;
mtrr_type type; mtrr_type type;
u64 trim_start, trim_size; struct res_range range[RANGE_NUM];
int nr_range;
u64 total_real_trim_size;
int changed;
/* extra one for all 0 */
int num[MTRR_NUM_TYPES + 1];
/* /*
* Make sure we only trim uncachable memory on machines that * Make sure we only trim uncachable memory on machines that
* support the Intel MTRR architecture: * support the Intel MTRR architecture:
...@@ -1123,9 +1139,6 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) ...@@ -1123,9 +1139,6 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
if (def != MTRR_TYPE_UNCACHABLE) if (def != MTRR_TYPE_UNCACHABLE)
return 0; return 0;
if (amd_special_default_mtrr())
return 0;
/* Find highest cached pfn */ /* Find highest cached pfn */
for (i = 0; i < num_var_ranges; i++) { for (i = 0; i < num_var_ranges; i++) {
mtrr_if->get(i, &base, &size, &type); mtrr_if->get(i, &base, &size, &type);
...@@ -1145,26 +1158,80 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) ...@@ -1145,26 +1158,80 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
return 0; return 0;
} }
if (highest_pfn < end_pfn) { /* check entries number */
memset(num, 0, sizeof(num));
for (i = 0; i < num_var_ranges; i++) {
mtrr_if->get(i, &base, &size, &type);
if (type >= MTRR_NUM_TYPES)
continue;
if (!size)
type = MTRR_NUM_TYPES;
num[type]++;
}
/* no entry for WB? */
if (!num[MTRR_TYPE_WRBACK])
return 0;
/* check if we only had WB and UC */
if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
num_var_ranges - num[MTRR_NUM_TYPES])
return 0;
memset(range, 0, sizeof(range));
nr_range = 0;
if (mtrr_tom2) {
range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT));
range[nr_range].end = (mtrr_tom2 >> PAGE_SHIFT) - 1;
if (highest_pfn < range[nr_range].end + 1)
highest_pfn = range[nr_range].end + 1;
nr_range++;
}
nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0);
changed = 0;
total_real_trim_size = 0;
/* check the top at first */
i = nr_range - 1;
if (range[i].end + 1 < end_pfn) {
total_real_trim_size += real_trim_memory(range[i].end + 1, end_pfn);
}
if (total_real_trim_size) {
printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover" printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover"
" all of memory, losing %luMB of RAM.\n", " all of memory, losing %lluMB of RAM.\n",
(end_pfn - highest_pfn) >> (20 - PAGE_SHIFT)); total_real_trim_size >> 20);
WARN_ON(1); WARN_ON(1);
printk(KERN_INFO "update e820 for mtrr\n"); printk(KERN_INFO "update e820 for mtrr -- end_pfn\n");
trim_start = highest_pfn;
trim_start <<= PAGE_SHIFT;
trim_size = end_pfn;
trim_size <<= PAGE_SHIFT;
trim_size -= trim_start;
update_memory_range(trim_start, trim_size, E820_RAM,
E820_RESERVED);
update_e820(); update_e820();
return 1; changed = 1;
} }
return 0; total_real_trim_size = 0;
if (range[0].start)
total_real_trim_size += real_trim_memory(0, range[0].start);
for (i = 0; i < nr_range - 1; i--) {
if (range[i].end + 1 < range[i+1].start)
total_real_trim_size += real_trim_memory(range[i].end + 1, range[i+1].start);
}
if (total_real_trim_size) {
printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover"
" all of memory, losing %lluMB of RAM.\n",
total_real_trim_size >> 20);
WARN_ON(1);
printk(KERN_INFO "update e820 for mtrr -- holes\n");
update_e820();
changed = 1;
}
return changed;
} }
/** /**
......
...@@ -783,10 +783,11 @@ static int __init parse_memmap(char *arg) ...@@ -783,10 +783,11 @@ static int __init parse_memmap(char *arg)
return 0; return 0;
} }
early_param("memmap", parse_memmap); early_param("memmap", parse_memmap);
void __init update_memory_range(u64 start, u64 size, unsigned old_type, u64 __init update_memory_range(u64 start, u64 size, unsigned old_type,
unsigned new_type) unsigned new_type)
{ {
int i; int i;
u64 real_updated_size = 0;
BUG_ON(old_type == new_type); BUG_ON(old_type == new_type);
...@@ -798,6 +799,7 @@ void __init update_memory_range(u64 start, u64 size, unsigned old_type, ...@@ -798,6 +799,7 @@ void __init update_memory_range(u64 start, u64 size, unsigned old_type,
/* totally covered? */ /* totally covered? */
if (ei->addr >= start && ei->size <= size) { if (ei->addr >= start && ei->size <= size) {
ei->type = new_type; ei->type = new_type;
real_updated_size += ei->size;
continue; continue;
} }
/* partially covered */ /* partially covered */
...@@ -807,7 +809,10 @@ void __init update_memory_range(u64 start, u64 size, unsigned old_type, ...@@ -807,7 +809,10 @@ void __init update_memory_range(u64 start, u64 size, unsigned old_type,
continue; continue;
add_memory_region(final_start, final_end - final_start, add_memory_region(final_start, final_end - final_start,
new_type); new_type);
real_updated_size += final_end - final_start;
} }
return real_updated_size;
} }
void __init finish_e820_parsing(void) void __init finish_e820_parsing(void)
......
...@@ -829,10 +829,11 @@ void __init finish_e820_parsing(void) ...@@ -829,10 +829,11 @@ void __init finish_e820_parsing(void)
} }
} }
void __init update_memory_range(u64 start, u64 size, unsigned old_type, u64 __init update_memory_range(u64 start, u64 size, unsigned old_type,
unsigned new_type) unsigned new_type)
{ {
int i; int i;
u64 real_updated_size = 0;
BUG_ON(old_type == new_type); BUG_ON(old_type == new_type);
...@@ -844,6 +845,7 @@ void __init update_memory_range(u64 start, u64 size, unsigned old_type, ...@@ -844,6 +845,7 @@ void __init update_memory_range(u64 start, u64 size, unsigned old_type,
/* totally covered? */ /* totally covered? */
if (ei->addr >= start && ei->size <= size) { if (ei->addr >= start && ei->size <= size) {
ei->type = new_type; ei->type = new_type;
real_updated_size += ei->size;
continue; continue;
} }
/* partially covered */ /* partially covered */
...@@ -853,7 +855,9 @@ void __init update_memory_range(u64 start, u64 size, unsigned old_type, ...@@ -853,7 +855,9 @@ void __init update_memory_range(u64 start, u64 size, unsigned old_type,
continue; continue;
add_memory_region(final_start, final_end - final_start, add_memory_region(final_start, final_end - final_start,
new_type); new_type);
real_updated_size += final_end - final_start;
} }
return real_updated_size;
} }
void __init update_e820(void) void __init update_e820(void)
......
...@@ -31,7 +31,7 @@ extern void propagate_e820_map(void); ...@@ -31,7 +31,7 @@ extern void propagate_e820_map(void);
extern void register_bootmem_low_pages(unsigned long max_low_pfn); extern void register_bootmem_low_pages(unsigned long max_low_pfn);
extern void add_memory_region(unsigned long long start, extern void add_memory_region(unsigned long long start,
unsigned long long size, int type); unsigned long long size, int type);
extern void update_memory_range(u64 start, u64 size, unsigned old_type, extern u64 update_memory_range(u64 start, u64 size, unsigned old_type,
unsigned new_type); unsigned new_type);
extern void e820_register_memory(void); extern void e820_register_memory(void);
extern void limit_regions(unsigned long long size); extern void limit_regions(unsigned long long size);
......
...@@ -21,7 +21,7 @@ extern unsigned long find_e820_area_size(unsigned long start, ...@@ -21,7 +21,7 @@ extern unsigned long find_e820_area_size(unsigned long start,
unsigned long align); unsigned long align);
extern void add_memory_region(unsigned long start, unsigned long size, extern void add_memory_region(unsigned long start, unsigned long size,
int type); int type);
extern void update_memory_range(u64 start, u64 size, unsigned old_type, extern u64 update_memory_range(u64 start, u64 size, unsigned old_type,
unsigned new_type); unsigned new_type);
extern void setup_memory_region(void); extern void setup_memory_region(void);
extern void contig_e820_setup(void); extern void contig_e820_setup(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment