Commit 30604bb4 authored by Yinghai Lu's avatar Yinghai Lu Committed by Ingo Molnar

x86: break up mtrr_cleanup() into several small functions.

Ingo said mtrr_cleanup() is big and ugly.

so break it up into more functions and make it more readable.
Signed-off-by: default avatarYinghai Lu <yinghai@kernel.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 49fdf678
...@@ -803,6 +803,7 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range, ...@@ -803,6 +803,7 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
} }
static struct res_range __initdata range[RANGE_NUM]; static struct res_range __initdata range[RANGE_NUM];
static int __initdata nr_range;
#ifdef CONFIG_MTRR_SANITIZER #ifdef CONFIG_MTRR_SANITIZER
...@@ -1206,39 +1207,43 @@ struct mtrr_cleanup_result { ...@@ -1206,39 +1207,43 @@ struct mtrr_cleanup_result {
#define PSHIFT (PAGE_SHIFT - 10) #define PSHIFT (PAGE_SHIFT - 10)
static struct mtrr_cleanup_result __initdata result[NUM_RESULT]; static struct mtrr_cleanup_result __initdata result[NUM_RESULT];
static struct res_range __initdata range_new[RANGE_NUM];
static unsigned long __initdata min_loss_pfn[RANGE_NUM]; static unsigned long __initdata min_loss_pfn[RANGE_NUM];
static int __init mtrr_cleanup(unsigned address_bits) static void __init print_out_mtrr_range_state(void)
{ {
unsigned long extra_remove_base, extra_remove_size;
unsigned long base, size, def, dummy;
mtrr_type type;
int nr_range, nr_range_new;
u64 chunk_size, gran_size;
unsigned long range_sums, range_sums_new;
int index_good;
int num_reg_good;
int i; int i;
char start_factor = 'K', size_factor = 'K';
unsigned long start_base, size_base;
mtrr_type type;
/* extra one for all 0 */ for (i = 0; i < num_var_ranges; i++) {
int num[MTRR_NUM_TYPES + 1];
if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1) size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10);
return 0; if (!size_base)
rdmsr(MTRRdefType_MSR, def, dummy); continue;
def &= 0xff;
if (def != MTRR_TYPE_UNCACHABLE)
return 0;
/* get it and store it aside */ size_base = to_size_factor(size_base, &size_factor),
memset(range_state, 0, sizeof(range_state)); start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10);
for (i = 0; i < num_var_ranges; i++) { start_base = to_size_factor(start_base, &start_factor),
mtrr_if->get(i, &base, &size, &type); type = range_state[i].type;
range_state[i].base_pfn = base;
range_state[i].size_pfn = size; printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
range_state[i].type = type; i, start_base, start_factor,
size_base, size_factor,
(type == MTRR_TYPE_UNCACHABLE) ? "UC" :
((type == MTRR_TYPE_WRPROT) ? "WP" :
((type == MTRR_TYPE_WRBACK) ? "WB" : "Other"))
);
} }
}
static int __init mtrr_need_cleanup(void)
{
int i;
mtrr_type type;
unsigned long size;
/* extra one for all 0 */
int num[MTRR_NUM_TYPES + 1];
/* check entries number */ /* check entries number */
memset(num, 0, sizeof(num)); memset(num, 0, sizeof(num));
...@@ -1263,29 +1268,133 @@ static int __init mtrr_cleanup(unsigned address_bits) ...@@ -1263,29 +1268,133 @@ static int __init mtrr_cleanup(unsigned address_bits)
num_var_ranges - num[MTRR_NUM_TYPES]) num_var_ranges - num[MTRR_NUM_TYPES])
return 0; return 0;
/* print original var MTRRs at first, for debugging: */ return 1;
printk(KERN_DEBUG "original variable MTRRs\n"); }
for (i = 0; i < num_var_ranges; i++) {
char start_factor = 'K', size_factor = 'K';
unsigned long start_base, size_base;
size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10); static unsigned long __initdata range_sums;
if (!size_base) static void __init mtrr_calc_range_state(u64 chunk_size, u64 gran_size,
continue; unsigned long extra_remove_base,
unsigned long extra_remove_size,
int i)
{
int num_reg;
static struct res_range range_new[RANGE_NUM];
static int nr_range_new;
unsigned long range_sums_new;
/* convert ranges to var ranges state */
num_reg = x86_setup_var_mtrrs(range, nr_range,
chunk_size, gran_size);
/* we got new setting in range_state, check it */
memset(range_new, 0, sizeof(range_new));
nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
extra_remove_base, extra_remove_size);
range_sums_new = sum_ranges(range_new, nr_range_new);
result[i].chunk_sizek = chunk_size >> 10;
result[i].gran_sizek = gran_size >> 10;
result[i].num_reg = num_reg;
if (range_sums < range_sums_new) {
result[i].lose_cover_sizek =
(range_sums_new - range_sums) << PSHIFT;
result[i].bad = 1;
} else
result[i].lose_cover_sizek =
(range_sums - range_sums_new) << PSHIFT;
size_base = to_size_factor(size_base, &size_factor), /* double check it */
start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10); if (!result[i].bad && !result[i].lose_cover_sizek) {
start_base = to_size_factor(start_base, &start_factor), if (nr_range_new != nr_range ||
type = range_state[i].type; memcmp(range, range_new, sizeof(range)))
result[i].bad = 1;
}
printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n", if (!result[i].bad && (range_sums - range_sums_new <
i, start_base, start_factor, min_loss_pfn[num_reg])) {
size_base, size_factor, min_loss_pfn[num_reg] =
(type == MTRR_TYPE_UNCACHABLE) ? "UC" : range_sums - range_sums_new;
((type == MTRR_TYPE_WRPROT) ? "WP" :
((type == MTRR_TYPE_WRBACK) ? "WB" : "Other"))
);
} }
}
static void __init mtrr_print_out_one_result(int i)
{
char gran_factor, chunk_factor, lose_factor;
unsigned long gran_base, chunk_base, lose_base;
gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
result[i].bad ? "*BAD*" : " ",
gran_base, gran_factor, chunk_base, chunk_factor);
printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n",
result[i].num_reg, result[i].bad ? "-" : "",
lose_base, lose_factor);
}
static int __init mtrr_search_optimal_index(void)
{
int i;
int num_reg_good;
int index_good;
if (nr_mtrr_spare_reg >= num_var_ranges)
nr_mtrr_spare_reg = num_var_ranges - 1;
num_reg_good = -1;
for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) {
if (!min_loss_pfn[i])
num_reg_good = i;
}
index_good = -1;
if (num_reg_good != -1) {
for (i = 0; i < NUM_RESULT; i++) {
if (!result[i].bad &&
result[i].num_reg == num_reg_good &&
!result[i].lose_cover_sizek) {
index_good = i;
break;
}
}
}
return index_good;
}
static int __init mtrr_cleanup(unsigned address_bits)
{
unsigned long extra_remove_base, extra_remove_size;
unsigned long base, size, def, dummy;
mtrr_type type;
u64 chunk_size, gran_size;
int index_good;
int i;
if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1)
return 0;
rdmsr(MTRRdefType_MSR, def, dummy);
def &= 0xff;
if (def != MTRR_TYPE_UNCACHABLE)
return 0;
/* get it and store it aside */
memset(range_state, 0, sizeof(range_state));
for (i = 0; i < num_var_ranges; i++) {
mtrr_if->get(i, &base, &size, &type);
range_state[i].base_pfn = base;
range_state[i].size_pfn = size;
range_state[i].type = type;
}
/* check if we need handle it and can handle it */
if (!mtrr_need_cleanup())
return 0;
/* print original var MTRRs at first, for debugging: */
printk(KERN_DEBUG "original variable MTRRs\n");
print_out_mtrr_range_state();
memset(range, 0, sizeof(range)); memset(range, 0, sizeof(range));
extra_remove_size = 0; extra_remove_size = 0;
...@@ -1309,176 +1418,64 @@ static int __init mtrr_cleanup(unsigned address_bits) ...@@ -1309,176 +1418,64 @@ static int __init mtrr_cleanup(unsigned address_bits)
range_sums >> (20 - PAGE_SHIFT)); range_sums >> (20 - PAGE_SHIFT));
if (mtrr_chunk_size && mtrr_gran_size) { if (mtrr_chunk_size && mtrr_gran_size) {
int num_reg; i = 0;
char gran_factor, chunk_factor, lose_factor; mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size,
unsigned long gran_base, chunk_base, lose_base; extra_remove_base, extra_remove_size, i);
debug_print++;
/* convert ranges to var ranges state */
num_reg = x86_setup_var_mtrrs(range, nr_range, mtrr_chunk_size,
mtrr_gran_size);
/* we got new setting in range_state, check it */ mtrr_print_out_one_result(i);
memset(range_new, 0, sizeof(range_new));
nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
extra_remove_base,
extra_remove_size);
range_sums_new = sum_ranges(range_new, nr_range_new);
i = 0;
result[i].chunk_sizek = mtrr_chunk_size >> 10;
result[i].gran_sizek = mtrr_gran_size >> 10;
result[i].num_reg = num_reg;
if (range_sums < range_sums_new) {
result[i].lose_cover_sizek =
(range_sums_new - range_sums) << PSHIFT;
result[i].bad = 1;
} else
result[i].lose_cover_sizek =
(range_sums - range_sums_new) << PSHIFT;
gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
result[i].bad?"*BAD*":" ",
gran_base, gran_factor, chunk_base, chunk_factor);
printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n",
result[i].num_reg, result[i].bad?"-":"",
lose_base, lose_factor);
if (!result[i].bad) { if (!result[i].bad) {
set_var_mtrr_all(address_bits); set_var_mtrr_all(address_bits);
return 1; return 1;
} }
printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, " printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, "
"will find optimal one\n"); "will find optimal one\n");
debug_print--;
memset(result, 0, sizeof(result[0]));
} }
i = 0; i = 0;
memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn)); memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn));
memset(result, 0, sizeof(result)); memset(result, 0, sizeof(result));
for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) { for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) {
char gran_factor;
unsigned long gran_base;
if (debug_print)
gran_base = to_size_factor(gran_size >> 10, &gran_factor);
for (chunk_size = gran_size; chunk_size < (1ULL<<32); for (chunk_size = gran_size; chunk_size < (1ULL<<32);
chunk_size <<= 1) { chunk_size <<= 1) {
int num_reg;
if (debug_print) {
char chunk_factor;
unsigned long chunk_base;
chunk_base = to_size_factor(chunk_size>>10, &chunk_factor),
printk(KERN_INFO "\n");
printk(KERN_INFO "gran_size: %ld%c chunk_size: %ld%c \n",
gran_base, gran_factor, chunk_base, chunk_factor);
}
if (i >= NUM_RESULT) if (i >= NUM_RESULT)
continue; continue;
/* convert ranges to var ranges state */ mtrr_calc_range_state(chunk_size, gran_size,
num_reg = x86_setup_var_mtrrs(range, nr_range, extra_remove_base, extra_remove_size, i);
chunk_size, gran_size); if (debug_print) {
mtrr_print_out_one_result(i);
/* we got new setting in range_state, check it */ printk(KERN_INFO "\n");
memset(range_new, 0, sizeof(range_new));
nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
extra_remove_base, extra_remove_size);
range_sums_new = sum_ranges(range_new, nr_range_new);
result[i].chunk_sizek = chunk_size >> 10;
result[i].gran_sizek = gran_size >> 10;
result[i].num_reg = num_reg;
if (range_sums < range_sums_new) {
result[i].lose_cover_sizek =
(range_sums_new - range_sums) << PSHIFT;
result[i].bad = 1;
} else
result[i].lose_cover_sizek =
(range_sums - range_sums_new) << PSHIFT;
/* double check it */
if (!result[i].bad && !result[i].lose_cover_sizek) {
if (nr_range_new != nr_range ||
memcmp(range, range_new, sizeof(range)))
result[i].bad = 1;
} }
if (!result[i].bad && (range_sums - range_sums_new <
min_loss_pfn[num_reg])) {
min_loss_pfn[num_reg] =
range_sums - range_sums_new;
}
i++; i++;
} }
} }
/* print out all */
for (i = 0; i < NUM_RESULT; i++) {
char gran_factor, chunk_factor, lose_factor;
unsigned long gran_base, chunk_base, lose_base;
gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
result[i].bad?"*BAD*":" ",
gran_base, gran_factor, chunk_base, chunk_factor);
printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n",
result[i].num_reg, result[i].bad?"-":"",
lose_base, lose_factor);
}
/* try to find the optimal index */ /* try to find the optimal index */
if (nr_mtrr_spare_reg >= num_var_ranges) index_good = mtrr_search_optimal_index();
nr_mtrr_spare_reg = num_var_ranges - 1;
num_reg_good = -1;
for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) {
if (!min_loss_pfn[i])
num_reg_good = i;
}
index_good = -1;
if (num_reg_good != -1) {
for (i = 0; i < NUM_RESULT; i++) {
if (!result[i].bad &&
result[i].num_reg == num_reg_good &&
!result[i].lose_cover_sizek) {
index_good = i;
break;
}
}
}
if (index_good != -1) { if (index_good != -1) {
char gran_factor, chunk_factor, lose_factor;
unsigned long gran_base, chunk_base, lose_base;
printk(KERN_INFO "Found optimal setting for mtrr clean up\n"); printk(KERN_INFO "Found optimal setting for mtrr clean up\n");
i = index_good; i = index_good;
gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), mtrr_print_out_one_result(i);
chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
printk(KERN_INFO "gran_size: %ld%c \tchunk_size: %ld%c \t",
gran_base, gran_factor, chunk_base, chunk_factor);
printk(KERN_CONT "num_reg: %d \tlose RAM: %ld%c\n",
result[i].num_reg, lose_base, lose_factor);
/* convert ranges to var ranges state */ /* convert ranges to var ranges state */
chunk_size = result[i].chunk_sizek; chunk_size = result[i].chunk_sizek;
chunk_size <<= 10; chunk_size <<= 10;
gran_size = result[i].gran_sizek; gran_size = result[i].gran_sizek;
gran_size <<= 10; gran_size <<= 10;
debug_print++;
x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
debug_print--;
set_var_mtrr_all(address_bits); set_var_mtrr_all(address_bits);
printk(KERN_DEBUG "New variable MTRRs\n");
print_out_mtrr_range_state();
return 1; return 1;
} else {
/* print out all */
for (i = 0; i < NUM_RESULT; i++)
mtrr_print_out_one_result(i);
} }
printk(KERN_INFO "mtrr_cleanup: can not find optimal value\n"); printk(KERN_INFO "mtrr_cleanup: can not find optimal value\n");
...@@ -1562,7 +1559,6 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) ...@@ -1562,7 +1559,6 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
{ {
unsigned long i, base, size, highest_pfn = 0, def, dummy; unsigned long i, base, size, highest_pfn = 0, def, dummy;
mtrr_type type; mtrr_type type;
int nr_range;
u64 total_trim_size; u64 total_trim_size;
/* extra one for all 0 */ /* extra one for all 0 */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment