Commit b528cea7 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] New version of early CPU detect

From: Andi Kleen <ak@suse.de>

We still need some kind of early CPU detection, e.g.  for the AMD768
workaround and for the slab allocator to size its slabs correctly for the
cache line.  Also some other code already had private early CPU routines.

This patch takes a new approach compared to the previous patch which caused
Andrew so much grief.  It only fills in a few selected fields in
boot_cpu_data (only the data needed to identify the CPU type and the cache
alignment).  In particular the feature masks are not filled in, and the
other fields are also not touched to prevent unwanted side effects.

Also convert the ppro workaround to use standard cpu data now. 

I'm not sure if slab still has the necessary support to use the cache line
size early; previously Manfred showed some serious memory saving with this
for kernels that are compiled for a bigger cache line size than the CPU (is
often the case on distribution kernels).  This code could be reenable now
with this patch.
parent ed67bbe7
......@@ -138,8 +138,7 @@ static char __init *table_lookup_model(struct cpuinfo_x86 *c)
}
void __init get_cpu_vendor(struct cpuinfo_x86 *c)
void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early)
{
char *v = c->x86_vendor_id;
int i;
......@@ -150,7 +149,8 @@ void __init get_cpu_vendor(struct cpuinfo_x86 *c)
(cpu_devs[i]->c_ident[1] &&
!strcmp(v,cpu_devs[i]->c_ident[1]))) {
c->x86_vendor = i;
this_cpu = cpu_devs[i];
if (!early)
this_cpu = cpu_devs[i];
break;
}
}
......@@ -194,6 +194,44 @@ int __init have_cpuid_p(void)
return flag_is_changeable_p(X86_EFLAGS_ID);
}
/* Do minimum CPU detection early.
Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
The others are not touched to avoid unwanted side effects. */
void __init early_cpu_detect(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
if (!have_cpuid_p())
return;
/* Get vendor name */
cpuid(0x00000000, &c->cpuid_level,
(int *)&c->x86_vendor_id[0],
(int *)&c->x86_vendor_id[8],
(int *)&c->x86_vendor_id[4]);
get_cpu_vendor(c, 1);
c->x86 = 4;
c->x86_cache_alignment = 32;
if (c->cpuid_level >= 0x00000001) {
u32 junk, tfms, cap0, misc;
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
c->x86 = (tfms >> 8) & 15;
c->x86_model = (tfms >> 4) & 15;
if (c->x86 == 0xf) {
c->x86 += (tfms >> 20) & 0xff;
c->x86_model += ((tfms >> 16) & 0xF) << 4;
}
c->x86_mask = tfms & 15;
if (cap0 & (1<<19))
c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
}
early_intel_workaround(c);
}
void __init generic_identify(struct cpuinfo_x86 * c)
{
u32 tfms, xlvl;
......@@ -206,7 +244,7 @@ void __init generic_identify(struct cpuinfo_x86 * c)
(int *)&c->x86_vendor_id[8],
(int *)&c->x86_vendor_id[4]);
get_cpu_vendor(c);
get_cpu_vendor(c, 0);
/* Initialize the standard set of capabilities */
/* Note that the vendor-specific code below might override */
......@@ -384,7 +422,6 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
void __init dodgy_tsc(void)
{
get_cpu_vendor(&boot_cpu_data);
if (( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ) ||
( boot_cpu_data.x86_vendor == X86_VENDOR_NSC ))
cpu_devs[X86_VENDOR_CYRIX]->c_init(&boot_cpu_data);
......@@ -432,9 +469,11 @@ extern int transmeta_init_cpu(void);
extern int rise_init_cpu(void);
extern int nexgen_init_cpu(void);
extern int umc_init_cpu(void);
void early_cpu_detect(void);
void __init early_cpu_init(void)
{
early_cpu_detect();
intel_cpu_init();
cyrix_init_cpu();
nsc_init_cpu();
......
......@@ -26,3 +26,6 @@ extern void display_cacheinfo(struct cpuinfo_x86 *c);
extern void generic_identify(struct cpuinfo_x86 * c);
extern int have_cpuid_p(void);
extern void early_intel_workaround(struct cpuinfo_x86 *c);
......@@ -28,6 +28,15 @@ extern int trap_init_f00f_bug(void);
struct movsl_mask movsl_mask;
#endif
void __init early_intel_workaround(struct cpuinfo_x86 *c)
{
if (c->x86_vendor != X86_VENDOR_INTEL)
return;
/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
if (c->x86 == 15 && c->x86_cache_alignment == 64)
c->x86_cache_alignment = 128;
}
/*
* Early probe support logic for ppro memory erratum #50
*
......@@ -36,42 +45,14 @@ struct movsl_mask movsl_mask;
int __init ppro_with_ram_bug(void)
{
char vendor_id[16];
int ident;
/* Must have CPUID */
if(!have_cpuid_p())
return 0;
if(cpuid_eax(0)<1)
return 0;
/* Must be Intel */
cpuid(0, &ident,
(int *)&vendor_id[0],
(int *)&vendor_id[8],
(int *)&vendor_id[4]);
if(memcmp(vendor_id, "IntelInside", 12))
return 0;
ident = cpuid_eax(1);
/* Model 6 */
if(((ident>>8)&15)!=6)
return 0;
/* Pentium Pro */
if(((ident>>4)&15)!=1)
return 0;
if((ident&15) < 8)
{
/* Uses data from early_cpu_detect now */
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 6 &&
boot_cpu_data.x86_model == 1 &&
boot_cpu_data.x86_mask < 8) {
printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
return 1;
}
printk(KERN_INFO "Your Pentium Pro seems ok.\n");
return 0;
}
......
......@@ -59,6 +59,7 @@ struct cpuinfo_x86 {
char x86_model_id[64];
int x86_cache_size; /* in KB - valid for CPUS which support this
call */
int x86_cache_alignment; /* In bytes */
int fdiv_bug;
int f00f_bug;
int coma_bug;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment