Commit 3da99c97 authored by Yinghai Lu's avatar Yinghai Lu Committed by Ingo Molnar

x86: make (early)_identify_cpu more the same between 32bit and 64 bit

1. add extended_cpuid_level for 32bit
 2. add generic_identify for 64bit
 3. add early_identify_cpu for 32bit
 4. early_identify_cpu not be called by identify_cpu
 5. remove early in get_cpu_vendor for 32bit
 6. add get_cpu_cap
 7. add cpu_detect for 64bit
Signed-off-by: default avatarYinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 5031088d
...@@ -96,7 +96,7 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c) ...@@ -96,7 +96,7 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c)
unsigned int *v; unsigned int *v;
char *p, *q; char *p, *q;
if (cpuid_eax(0x80000000) < 0x80000004) if (c->extended_cpuid_level < 0x80000004)
return 0; return 0;
v = (unsigned int *) c->x86_model_id; v = (unsigned int *) c->x86_model_id;
...@@ -125,7 +125,7 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) ...@@ -125,7 +125,7 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
{ {
unsigned int n, dummy, ecx, edx, l2size; unsigned int n, dummy, ecx, edx, l2size;
n = cpuid_eax(0x80000000); n = c->extended_cpuid_level;
if (n >= 0x80000005) { if (n >= 0x80000005) {
cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
...@@ -186,7 +186,7 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) ...@@ -186,7 +186,7 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
} }
static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
{ {
char *v = c->x86_vendor_id; char *v = c->x86_vendor_id;
int i; int i;
...@@ -198,7 +198,6 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) ...@@ -198,7 +198,6 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
(cpu_devs[i]->c_ident[1] && (cpu_devs[i]->c_ident[1] &&
!strcmp(v, cpu_devs[i]->c_ident[1]))) { !strcmp(v, cpu_devs[i]->c_ident[1]))) {
c->x86_vendor = i; c->x86_vendor = i;
if (!early)
this_cpu = cpu_devs[i]; this_cpu = cpu_devs[i];
return; return;
} }
...@@ -284,13 +283,12 @@ void __init cpu_detect(struct cpuinfo_x86 *c) ...@@ -284,13 +283,12 @@ void __init cpu_detect(struct cpuinfo_x86 *c)
} }
} }
} }
static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
{ {
u32 tfms, xlvl; u32 tfms, xlvl;
unsigned int ebx; u32 ebx;
memset(&c->x86_capability, 0, sizeof c->x86_capability);
if (have_cpuid_p()) {
/* Intel-defined flags: level 0x00000001 */ /* Intel-defined flags: level 0x00000001 */
if (c->cpuid_level >= 0x00000001) { if (c->cpuid_level >= 0x00000001) {
u32 capability, excap; u32 capability, excap;
...@@ -301,17 +299,14 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) ...@@ -301,17 +299,14 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
/* AMD-defined flags: level 0x80000001 */ /* AMD-defined flags: level 0x80000001 */
xlvl = cpuid_eax(0x80000000); xlvl = cpuid_eax(0x80000000);
c->extended_cpuid_level = xlvl;
if ((xlvl & 0xffff0000) == 0x80000000) { if ((xlvl & 0xffff0000) == 0x80000000) {
if (xlvl >= 0x80000001) { if (xlvl >= 0x80000001) {
c->x86_capability[1] = cpuid_edx(0x80000001); c->x86_capability[1] = cpuid_edx(0x80000001);
c->x86_capability[6] = cpuid_ecx(0x80000001); c->x86_capability[6] = cpuid_ecx(0x80000001);
} }
} }
}
} }
/* /*
* Do minimum CPU detection early. * Do minimum CPU detection early.
* Fields really needed: vendor, cpuid_level, family, model, mask, * Fields really needed: vendor, cpuid_level, family, model, mask,
...@@ -321,25 +316,29 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) ...@@ -321,25 +316,29 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
* WARNING: this function is only called on the BP. Don't add code here * WARNING: this function is only called on the BP. Don't add code here
* that is supposed to run on all CPUs. * that is supposed to run on all CPUs.
*/ */
static void __init early_cpu_detect(void) static void __init early_identify_cpu(struct cpuinfo_x86 *c)
{ {
struct cpuinfo_x86 *c = &boot_cpu_data;
c->x86_cache_alignment = 32; c->x86_cache_alignment = 32;
c->x86_clflush_size = 32; c->x86_clflush_size = 32;
if (!have_cpuid_p()) if (!have_cpuid_p())
return; return;
c->extended_cpuid_level = 0;
memset(&c->x86_capability, 0, sizeof c->x86_capability);
cpu_detect(c); cpu_detect(c);
get_cpu_vendor(c, 1); get_cpu_vendor(c);
early_get_cap(c); get_cpu_cap(c);
if (c->x86_vendor != X86_VENDOR_UNKNOWN && if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
cpu_devs[c->x86_vendor]->c_early_init) cpu_devs[c->x86_vendor]->c_early_init)
cpu_devs[c->x86_vendor]->c_early_init(c); cpu_devs[c->x86_vendor]->c_early_init(c);
validate_pat_support(c);
} }
/* /*
...@@ -373,60 +372,32 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) ...@@ -373,60 +372,32 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
static void __cpuinit generic_identify(struct cpuinfo_x86 *c) static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
{ {
u32 tfms, xlvl; if (!have_cpuid_p())
unsigned int ebx; return;
if (have_cpuid_p()) { c->extended_cpuid_level = 0;
/* Get vendor name */
cpuid(0x00000000, (unsigned int *)&c->cpuid_level, cpu_detect(c);
(unsigned int *)&c->x86_vendor_id[0],
(unsigned int *)&c->x86_vendor_id[8], get_cpu_vendor(c);
(unsigned int *)&c->x86_vendor_id[4]);
get_cpu_cap(c);
get_cpu_vendor(c, 0);
/* Initialize the standard set of capabilities */
/* Note that the vendor-specific code below might override */
/* Intel-defined flags: level 0x00000001 */
if (c->cpuid_level >= 0x00000001) { if (c->cpuid_level >= 0x00000001) {
u32 capability, excap; c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
c->x86_capability[0] = capability;
c->x86_capability[4] = excap;
c->x86 = (tfms >> 8) & 15;
c->x86_model = (tfms >> 4) & 15;
if (c->x86 == 0xf)
c->x86 += (tfms >> 20) & 0xff;
if (c->x86 >= 0x6)
c->x86_model += ((tfms >> 16) & 0xF) << 4;
c->x86_mask = tfms & 15;
c->initial_apicid = (ebx >> 24) & 0xFF;
#ifdef CONFIG_X86_HT #ifdef CONFIG_X86_HT
c->apicid = phys_pkg_id(c->initial_apicid, 0); c->apicid = phys_pkg_id(c->initial_apicid, 0);
c->phys_proc_id = c->initial_apicid; c->phys_proc_id = c->initial_apicid;
#else #else
c->apicid = c->initial_apicid; c->apicid = c->initial_apicid;
#endif #endif
if (test_cpu_cap(c, X86_FEATURE_CLFLSH))
c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
} else {
/* Have CPUID level 0 only - unheard of */
c->x86 = 4;
} }
/* AMD-defined flags: level 0x80000001 */ if (c->extended_cpuid_level >= 0x80000004)
xlvl = cpuid_eax(0x80000000);
if ((xlvl & 0xffff0000) == 0x80000000) {
if (xlvl >= 0x80000001) {
c->x86_capability[1] = cpuid_edx(0x80000001);
c->x86_capability[6] = cpuid_ecx(0x80000001);
}
if (xlvl >= 0x80000004)
get_model_name(c); /* Default name */ get_model_name(c); /* Default name */
}
init_scattered_cpuid_features(c); init_scattered_cpuid_features(c);
detect_nopl(c); detect_nopl(c);
}
} }
static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
...@@ -651,13 +622,10 @@ void __init early_cpu_init(void) ...@@ -651,13 +622,10 @@ void __init early_cpu_init(void)
{ {
struct cpu_vendor_dev *cvdev; struct cpu_vendor_dev *cvdev;
for (cvdev = __x86cpuvendor_start ; for (cvdev = __x86cpuvendor_start; cvdev < __x86cpuvendor_end; cvdev++)
cvdev < __x86cpuvendor_end ;
cvdev++)
cpu_devs[cvdev->vendor] = cvdev->cpu_dev; cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
early_cpu_detect(); early_identify_cpu(&boot_cpu_data);
validate_pat_support(&boot_cpu_data);
} }
/* Make sure %fs is initialized properly in idle threads */ /* Make sure %fs is initialized properly in idle threads */
......
...@@ -195,6 +195,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) ...@@ -195,6 +195,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
printk(KERN_ERR "CPU: Your system may be unstable.\n"); printk(KERN_ERR "CPU: Your system may be unstable.\n");
} }
c->x86_vendor = X86_VENDOR_UNKNOWN; c->x86_vendor = X86_VENDOR_UNKNOWN;
this_cpu = &default_cpu;
} }
static void __init early_cpu_support_print(void) static void __init early_cpu_support_print(void)
...@@ -249,56 +250,18 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) ...@@ -249,56 +250,18 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
} }
} }
static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
void __init early_cpu_init(void)
{
struct cpu_vendor_dev *cvdev;
for (cvdev = __x86cpuvendor_start ;
cvdev < __x86cpuvendor_end ;
cvdev++)
cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
early_cpu_support_print();
early_identify_cpu(&boot_cpu_data);
}
/* Do some early cpuid on the boot CPU to get some parameter that are
needed before check_bugs. Everything advanced is in identify_cpu
below. */
static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
{ {
u32 tfms, xlvl;
c->loops_per_jiffy = loops_per_jiffy;
c->x86_cache_size = -1;
c->x86_vendor = X86_VENDOR_UNKNOWN;
c->x86_model = c->x86_mask = 0; /* So far unknown... */
c->x86_vendor_id[0] = '\0'; /* Unset */
c->x86_model_id[0] = '\0'; /* Unset */
c->x86_clflush_size = 64;
c->x86_cache_alignment = c->x86_clflush_size;
c->x86_max_cores = 1;
c->x86_coreid_bits = 0;
c->extended_cpuid_level = 0;
memset(&c->x86_capability, 0, sizeof c->x86_capability);
/* Get vendor name */ /* Get vendor name */
cpuid(0x00000000, (unsigned int *)&c->cpuid_level, cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
(unsigned int *)&c->x86_vendor_id[0], (unsigned int *)&c->x86_vendor_id[0],
(unsigned int *)&c->x86_vendor_id[8], (unsigned int *)&c->x86_vendor_id[8],
(unsigned int *)&c->x86_vendor_id[4]); (unsigned int *)&c->x86_vendor_id[4]);
get_cpu_vendor(c);
/* Initialize the standard set of capabilities */
/* Note that the vendor-specific code below might override */
/* Intel-defined flags: level 0x00000001 */ /* Intel-defined flags: level 0x00000001 */
if (c->cpuid_level >= 0x00000001) { if (c->cpuid_level >= 0x00000001) {
__u32 misc; u32 junk, tfms, cap0, misc;
cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4], cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
&c->x86_capability[0]);
c->x86 = (tfms >> 8) & 0xf; c->x86 = (tfms >> 8) & 0xf;
c->x86_model = (tfms >> 4) & 0xf; c->x86_model = (tfms >> 4) & 0xf;
c->x86_mask = tfms & 0xf; c->x86_mask = tfms & 0xf;
...@@ -306,17 +269,32 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) ...@@ -306,17 +269,32 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
c->x86 += (tfms >> 20) & 0xff; c->x86 += (tfms >> 20) & 0xff;
if (c->x86 >= 0x6) if (c->x86 >= 0x6)
c->x86_model += ((tfms >> 16) & 0xF) << 4; c->x86_model += ((tfms >> 16) & 0xF) << 4;
if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) if (cap0 & (1<<19))
c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
} else { } else {
/* Have CPUID level 0 only - unheard of */ /* Have CPUID level 0 only - unheard of */
c->x86 = 4; c->x86 = 4;
} }
}
static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
{
u32 tfms, xlvl;
u32 ebx;
/* Initialize the standard set of capabilities */
/* Note that the vendor-specific code below might override */
/* Intel-defined flags: level 0x00000001 */
if (c->cpuid_level >= 0x00000001) {
u32 capability, excap;
cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
c->x86_capability[0] = capability;
c->x86_capability[4] = excap;
}
c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff;
#ifdef CONFIG_SMP
c->phys_proc_id = c->initial_apicid;
#endif
/* AMD-defined flags: level 0x80000001 */ /* AMD-defined flags: level 0x80000001 */
xlvl = cpuid_eax(0x80000000); xlvl = cpuid_eax(0x80000000);
c->extended_cpuid_level = xlvl; c->extended_cpuid_level = xlvl;
...@@ -325,8 +303,6 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) ...@@ -325,8 +303,6 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
c->x86_capability[1] = cpuid_edx(0x80000001); c->x86_capability[1] = cpuid_edx(0x80000001);
c->x86_capability[6] = cpuid_ecx(0x80000001); c->x86_capability[6] = cpuid_ecx(0x80000001);
} }
if (xlvl >= 0x80000004)
get_model_name(c); /* Default name */
} }
/* Transmeta-defined flags: level 0x80860001 */ /* Transmeta-defined flags: level 0x80860001 */
...@@ -346,8 +322,26 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) ...@@ -346,8 +322,26 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
c->x86_virt_bits = (eax >> 8) & 0xff; c->x86_virt_bits = (eax >> 8) & 0xff;
c->x86_phys_bits = eax & 0xff; c->x86_phys_bits = eax & 0xff;
} }
}
detect_nopl(c); /* Do some early cpuid on the boot CPU to get some parameter that are
needed before check_bugs. Everything advanced is in identify_cpu
below. */
static void __init early_identify_cpu(struct cpuinfo_x86 *c)
{
c->x86_clflush_size = 64;
c->x86_cache_alignment = c->x86_clflush_size;
memset(&c->x86_capability, 0, sizeof c->x86_capability);
c->extended_cpuid_level = 0;
cpu_detect(c);
get_cpu_vendor(c);
get_cpu_cap(c);
if (c->x86_vendor != X86_VENDOR_UNKNOWN && if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
cpu_devs[c->x86_vendor]->c_early_init) cpu_devs[c->x86_vendor]->c_early_init)
...@@ -356,6 +350,39 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) ...@@ -356,6 +350,39 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
validate_pat_support(c); validate_pat_support(c);
} }
void __init early_cpu_init(void)
{
struct cpu_vendor_dev *cvdev;
for (cvdev = __x86cpuvendor_start; cvdev < __x86cpuvendor_end; cvdev++)
cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
early_cpu_support_print();
early_identify_cpu(&boot_cpu_data);
}
static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
{
c->extended_cpuid_level = 0;
cpu_detect(c);
get_cpu_vendor(c);
get_cpu_cap(c);
c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff;
#ifdef CONFIG_SMP
c->phys_proc_id = c->initial_apicid;
#endif
if (c->extended_cpuid_level >= 0x80000004)
get_model_name(c); /* Default name */
init_scattered_cpuid_features(c);
detect_nopl(c);
}
/* /*
* This does the hard work of actually picking apart the CPU stuff... * This does the hard work of actually picking apart the CPU stuff...
*/ */
...@@ -363,9 +390,19 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) ...@@ -363,9 +390,19 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
{ {
int i; int i;
early_identify_cpu(c); c->loops_per_jiffy = loops_per_jiffy;
c->x86_cache_size = -1;
c->x86_vendor = X86_VENDOR_UNKNOWN;
c->x86_model = c->x86_mask = 0; /* So far unknown... */
c->x86_vendor_id[0] = '\0'; /* Unset */
c->x86_model_id[0] = '\0'; /* Unset */
c->x86_clflush_size = 64;
c->x86_cache_alignment = c->x86_clflush_size;
c->x86_max_cores = 1;
c->x86_coreid_bits = 0;
memset(&c->x86_capability, 0, sizeof c->x86_capability);
init_scattered_cpuid_features(c); generic_identify(c);
c->apicid = phys_pkg_id(0); c->apicid = phys_pkg_id(0);
......
...@@ -77,9 +77,9 @@ struct cpuinfo_x86 { ...@@ -77,9 +77,9 @@ struct cpuinfo_x86 {
__u8 x86_phys_bits; __u8 x86_phys_bits;
/* CPUID returned core id bits: */ /* CPUID returned core id bits: */
__u8 x86_coreid_bits; __u8 x86_coreid_bits;
#endif
/* Max extended CPUID function supported: */ /* Max extended CPUID function supported: */
__u32 extended_cpuid_level; __u32 extended_cpuid_level;
#endif
/* Maximum supported CPUID level, -1=no CPUID: */ /* Maximum supported CPUID level, -1=no CPUID: */
int cpuid_level; int cpuid_level;
__u32 x86_capability[NCAPINTS]; __u32 x86_capability[NCAPINTS];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment