Commit 4d46a89e authored by Ingo Molnar's avatar Ingo Molnar

x86: clean up include/asm-x86/processor.h

basic style cleanup to flush out years of neglect:

 - consistent indentation
 - whitespace fixes
 - consistent comments
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent eb19067d
...@@ -24,6 +24,7 @@ struct mm_struct; ...@@ -24,6 +24,7 @@ struct mm_struct;
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/desc_defs.h> #include <asm/desc_defs.h>
#include <asm/nops.h> #include <asm/nops.h>
#include <linux/personality.h> #include <linux/personality.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/cache.h> #include <linux/cache.h>
...@@ -37,16 +38,18 @@ struct mm_struct; ...@@ -37,16 +38,18 @@ struct mm_struct;
static inline void *current_text_addr(void) static inline void *current_text_addr(void)
{ {
void *pc; void *pc;
asm volatile("mov $1f,%0\n1:":"=r" (pc));
asm volatile("mov $1f, %0; 1:":"=r" (pc));
return pc; return pc;
} }
#ifdef CONFIG_X86_VSMP #ifdef CONFIG_X86_VSMP
#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
#else #else
#define ARCH_MIN_TASKALIGN 16 # define ARCH_MIN_TASKALIGN 16
#define ARCH_MIN_MMSTRUCT_ALIGN 0 # define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif #endif
/* /*
...@@ -56,69 +59,81 @@ static inline void *current_text_addr(void) ...@@ -56,69 +59,81 @@ static inline void *current_text_addr(void)
*/ */
struct cpuinfo_x86 { struct cpuinfo_x86 {
__u8 x86; /* CPU family */ __u8 x86; /* CPU family */
__u8 x86_vendor; /* CPU vendor */ __u8 x86_vendor; /* CPU vendor */
__u8 x86_model; __u8 x86_model;
__u8 x86_mask; __u8 x86_mask;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
char wp_works_ok; /* It doesn't on 386's */ char wp_works_ok; /* It doesn't on 386's */
char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
char hard_math; /* Problems on some 486Dx4's and old 386's: */
char rfu; char hlt_works_ok;
char fdiv_bug; char hard_math;
char f00f_bug; char rfu;
char coma_bug; char fdiv_bug;
char pad0; char f00f_bug;
char coma_bug;
char pad0;
#else #else
/* number of 4K pages in DTLB/ITLB combined(in pages)*/ /* Number of 4K pages in DTLB/ITLB combined(in pages): */
int x86_tlbsize; int x86_tlbsize;
__u8 x86_virt_bits, x86_phys_bits; __u8 x86_virt_bits;
/* cpuid returned core id bits */ __u8 x86_phys_bits;
__u8 x86_coreid_bits; /* CPUID returned core id bits: */
/* Max extended CPUID function supported */ __u8 x86_coreid_bits;
__u32 extended_cpuid_level; /* Max extended CPUID function supported: */
__u32 extended_cpuid_level;
#endif #endif
int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ /* Maximum supported CPUID level, -1=no CPUID: */
__u32 x86_capability[NCAPINTS]; int cpuid_level;
char x86_vendor_id[16]; __u32 x86_capability[NCAPINTS];
char x86_model_id[64]; char x86_vendor_id[16];
int x86_cache_size; /* in KB - valid for CPUS which support this char x86_model_id[64];
call */ /* in KB - valid for CPUS which support this call: */
int x86_cache_alignment; /* In bytes */ int x86_cache_size;
int x86_power; int x86_cache_alignment; /* In bytes */
unsigned long loops_per_jiffy; int x86_power;
unsigned long loops_per_jiffy;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cpumask_t llc_shared_map; /* cpus sharing the last level cache */ /* cpus sharing the last level cache: */
cpumask_t llc_shared_map;
#endif #endif
u16 x86_max_cores; /* cpuid returned max cores value */ /* cpuid returned max cores value: */
u16 apicid; u16 x86_max_cores;
u16 x86_clflush_size; u16 apicid;
u16 x86_clflush_size;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
u16 booted_cores; /* number of cores as seen by OS */ /* number of cores as seen by the OS: */
u16 phys_proc_id; /* Physical processor id. */ u16 booted_cores;
u16 cpu_core_id; /* Core id */ /* Physical processor id: */
u16 cpu_index; /* index into per_cpu list */ u16 phys_proc_id;
/* Core id: */
u16 cpu_core_id;
/* Index into per_cpu list: */
u16 cpu_index;
#endif #endif
} __attribute__((__aligned__(SMP_CACHE_BYTES))); } __attribute__((__aligned__(SMP_CACHE_BYTES)));
#define X86_VENDOR_INTEL 0 #define X86_VENDOR_INTEL 0
#define X86_VENDOR_CYRIX 1 #define X86_VENDOR_CYRIX 1
#define X86_VENDOR_AMD 2 #define X86_VENDOR_AMD 2
#define X86_VENDOR_UMC 3 #define X86_VENDOR_UMC 3
#define X86_VENDOR_NEXGEN 4 #define X86_VENDOR_NEXGEN 4
#define X86_VENDOR_CENTAUR 5 #define X86_VENDOR_CENTAUR 5
#define X86_VENDOR_TRANSMETA 7 #define X86_VENDOR_TRANSMETA 7
#define X86_VENDOR_NSC 8 #define X86_VENDOR_NSC 8
#define X86_VENDOR_NUM 9 #define X86_VENDOR_NUM 9
#define X86_VENDOR_UNKNOWN 0xff
#define X86_VENDOR_UNKNOWN 0xff
/* /*
* capabilities of CPUs * capabilities of CPUs
*/ */
extern struct cpuinfo_x86 boot_cpu_data; extern struct cpuinfo_x86 boot_cpu_data;
extern struct cpuinfo_x86 new_cpu_data; extern struct cpuinfo_x86 new_cpu_data;
extern struct tss_struct doublefault_tss;
extern __u32 cleared_cpu_caps[NCAPINTS]; extern struct tss_struct doublefault_tss;
extern __u32 cleared_cpu_caps[NCAPINTS];
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
...@@ -129,7 +144,9 @@ DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); ...@@ -129,7 +144,9 @@ DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
#define current_cpu_data boot_cpu_data #define current_cpu_data boot_cpu_data
#endif #endif
void cpu_detect(struct cpuinfo_x86 *c); #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
extern void cpu_detect(struct cpuinfo_x86 *c);
extern void identify_cpu(struct cpuinfo_x86 *); extern void identify_cpu(struct cpuinfo_x86 *);
extern void identify_boot_cpu(void); extern void identify_boot_cpu(void);
...@@ -146,7 +163,7 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {} ...@@ -146,7 +163,7 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {}
#endif #endif
static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx) unsigned int *ecx, unsigned int *edx)
{ {
/* ecx is often an input as well as an output. */ /* ecx is often an input as well as an output. */
__asm__("cpuid" __asm__("cpuid"
...@@ -165,54 +182,67 @@ static inline void load_cr3(pgd_t *pgdir) ...@@ -165,54 +182,67 @@ static inline void load_cr3(pgd_t *pgdir)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* This is the TSS defined by the hardware. */ /* This is the TSS defined by the hardware. */
struct x86_hw_tss { struct x86_hw_tss {
unsigned short back_link, __blh; unsigned short back_link, __blh;
unsigned long sp0; unsigned long sp0;
unsigned short ss0, __ss0h; unsigned short ss0, __ss0h;
unsigned long sp1; unsigned long sp1;
unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */ /* ss1 caches MSR_IA32_SYSENTER_CS: */
unsigned long sp2; unsigned short ss1, __ss1h;
unsigned short ss2, __ss2h; unsigned long sp2;
unsigned long __cr3; unsigned short ss2, __ss2h;
unsigned long ip; unsigned long __cr3;
unsigned long flags; unsigned long ip;
unsigned long ax, cx, dx, bx; unsigned long flags;
unsigned long sp, bp, si, di; unsigned long ax;
unsigned short es, __esh; unsigned long cx;
unsigned short cs, __csh; unsigned long dx;
unsigned short ss, __ssh; unsigned long bx;
unsigned short ds, __dsh; unsigned long sp;
unsigned short fs, __fsh; unsigned long bp;
unsigned short gs, __gsh; unsigned long si;
unsigned short ldt, __ldth; unsigned long di;
unsigned short trace, io_bitmap_base; unsigned short es, __esh;
unsigned short cs, __csh;
unsigned short ss, __ssh;
unsigned short ds, __dsh;
unsigned short fs, __fsh;
unsigned short gs, __gsh;
unsigned short ldt, __ldth;
unsigned short trace;
unsigned short io_bitmap_base;
} __attribute__((packed)); } __attribute__((packed));
#else #else
struct x86_hw_tss { struct x86_hw_tss {
u32 reserved1; u32 reserved1;
u64 sp0; u64 sp0;
u64 sp1; u64 sp1;
u64 sp2; u64 sp2;
u64 reserved2; u64 reserved2;
u64 ist[7]; u64 ist[7];
u32 reserved3; u32 reserved3;
u32 reserved4; u32 reserved4;
u16 reserved5; u16 reserved5;
u16 io_bitmap_base; u16 io_bitmap_base;
} __attribute__((packed)) ____cacheline_aligned; } __attribute__((packed)) ____cacheline_aligned;
#endif #endif
/* /*
* Size of io_bitmap. * IO-bitmap sizes:
*/ */
#define IO_BITMAP_BITS 65536 #define IO_BITMAP_BITS 65536
#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
#define INVALID_IO_BITMAP_OFFSET 0x8000 #define INVALID_IO_BITMAP_OFFSET 0x8000
#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
struct tss_struct { struct tss_struct {
struct x86_hw_tss x86_tss; /*
* The hardware state:
*/
struct x86_hw_tss x86_tss;
/* /*
* The extra 1 is there because the CPU will access an * The extra 1 is there because the CPU will access an
...@@ -220,48 +250,54 @@ struct tss_struct { ...@@ -220,48 +250,54 @@ struct tss_struct {
* bitmap. The extra byte must be all 1 bits, and must * bitmap. The extra byte must be all 1 bits, and must
* be within the limit. * be within the limit.
*/ */
unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
/* /*
* Cache the current maximum and the last task that used the bitmap: * Cache the current maximum and the last task that used the bitmap:
*/ */
unsigned long io_bitmap_max; unsigned long io_bitmap_max;
struct thread_struct *io_bitmap_owner; struct thread_struct *io_bitmap_owner;
/* /*
* pads the TSS to be cacheline-aligned (size is 0x100) * Pad the TSS to be cacheline-aligned (size is 0x100):
*/ */
unsigned long __cacheline_filler[35]; unsigned long __cacheline_filler[35];
/* /*
* .. and then another 0x100 bytes for emergency kernel stack * .. and then another 0x100 bytes for the emergency kernel stack:
*/ */
unsigned long stack[64]; unsigned long stack[64];
} __attribute__((packed)); } __attribute__((packed));
DECLARE_PER_CPU(struct tss_struct, init_tss); DECLARE_PER_CPU(struct tss_struct, init_tss);
/* Save the original ist values for checking stack pointers during debugging */ /*
* Save the original ist values for checking stack pointers during debugging
*/
struct orig_ist { struct orig_ist {
unsigned long ist[7]; unsigned long ist[7];
}; };
#define MXCSR_DEFAULT 0x1f80 #define MXCSR_DEFAULT 0x1f80
struct i387_fsave_struct { struct i387_fsave_struct {
u32 cwd; u32 cwd;
u32 swd; u32 swd;
u32 twd; u32 twd;
u32 fip; u32 fip;
u32 fcs; u32 fcs;
u32 foo; u32 foo;
u32 fos; u32 fos;
u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ /* 8*10 bytes for each FP-reg = 80 bytes: */
u32 status; /* software status information */ u32 st_space[20];
/* Software status information: */
u32 status;
}; };
struct i387_fxsave_struct { struct i387_fxsave_struct {
u16 cwd; u16 cwd;
u16 swd; u16 swd;
u16 twd; u16 twd;
u16 fop; u16 fop;
union { union {
struct { struct {
u64 rip; u64 rip;
...@@ -274,31 +310,40 @@ struct i387_fxsave_struct { ...@@ -274,31 +310,40 @@ struct i387_fxsave_struct {
u32 fos; u32 fos;
}; };
}; };
u32 mxcsr; u32 mxcsr;
u32 mxcsr_mask; u32 mxcsr_mask;
u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ /* 8*16 bytes for each FP-reg = 128 bytes: */
u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ u32 st_space[32];
u32 padding[24]; /* 16*16 bytes for each XMM-reg = 256 bytes: */
u32 xmm_space[64];
u32 padding[24];
} __attribute__((aligned(16))); } __attribute__((aligned(16)));
struct i387_soft_struct { struct i387_soft_struct {
u32 cwd; u32 cwd;
u32 swd; u32 swd;
u32 twd; u32 twd;
u32 fip; u32 fip;
u32 fcs; u32 fcs;
u32 foo; u32 foo;
u32 fos; u32 fos;
u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ /* 8*10 bytes for each FP-reg = 80 bytes: */
u8 ftop, changed, lookahead, no_update, rm, alimit; u32 st_space[20];
struct info *info; u8 ftop;
u32 entry_eip; u8 changed;
u8 lookahead;
u8 no_update;
u8 rm;
u8 alimit;
struct info *info;
u32 entry_eip;
}; };
union i387_union { union i387_union {
struct i387_fsave_struct fsave; struct i387_fsave_struct fsave;
struct i387_fxsave_struct fxsave; struct i387_fxsave_struct fxsave;
struct i387_soft_struct soft; struct i387_soft_struct soft;
}; };
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
...@@ -313,42 +358,50 @@ extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); ...@@ -313,42 +358,50 @@ extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern unsigned short num_cache_leaves; extern unsigned short num_cache_leaves;
struct thread_struct { struct thread_struct {
/* cached TLS descriptors. */ /* Cached TLS descriptors: */
struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
unsigned long sp0; unsigned long sp0;
unsigned long sp; unsigned long sp;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
unsigned long sysenter_cs; unsigned long sysenter_cs;
#else #else
unsigned long usersp; /* Copy from PDA */ unsigned long usersp; /* Copy from PDA */
unsigned short es, ds, fsindex, gsindex; unsigned short es;
unsigned short ds;
unsigned short fsindex;
unsigned short gsindex;
#endif #endif
unsigned long ip; unsigned long ip;
unsigned long fs; unsigned long fs;
unsigned long gs; unsigned long gs;
/* Hardware debugging registers */ /* Hardware debugging registers: */
unsigned long debugreg0; unsigned long debugreg0;
unsigned long debugreg1; unsigned long debugreg1;
unsigned long debugreg2; unsigned long debugreg2;
unsigned long debugreg3; unsigned long debugreg3;
unsigned long debugreg6; unsigned long debugreg6;
unsigned long debugreg7; unsigned long debugreg7;
/* fault info */ /* Fault info: */
unsigned long cr2, trap_no, error_code; unsigned long cr2;
/* floating point info */ unsigned long trap_no;
unsigned long error_code;
/* Floating point info: */
union i387_union i387 __attribute__((aligned(16)));; union i387_union i387 __attribute__((aligned(16)));;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* virtual 86 mode info */ /* Virtual 86 mode info */
struct vm86_struct __user *vm86_info; struct vm86_struct __user *vm86_info;
unsigned long screen_bitmap; unsigned long screen_bitmap;
unsigned long v86flags, v86mask, saved_sp0; unsigned long v86flags;
unsigned int saved_fs, saved_gs; unsigned long v86mask;
unsigned long saved_sp0;
unsigned int saved_fs;
unsigned int saved_gs;
#endif #endif
/* IO permissions */ /* IO permissions: */
unsigned long *io_bitmap_ptr; unsigned long *io_bitmap_ptr;
unsigned long iopl; unsigned long iopl;
/* max allowed port in the bitmap, in bytes: */ /* Max allowed port in the bitmap, in bytes: */
unsigned io_bitmap_max; unsigned io_bitmap_max;
/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
unsigned long debugctlmsr; unsigned long debugctlmsr;
/* Debug Store - if not 0 points to a DS Save Area configuration; /* Debug Store - if not 0 points to a DS Save Area configuration;
...@@ -358,7 +411,7 @@ struct thread_struct { ...@@ -358,7 +411,7 @@ struct thread_struct {
static inline unsigned long native_get_debugreg(int regno) static inline unsigned long native_get_debugreg(int regno)
{ {
unsigned long val = 0; /* Damn you, gcc! */ unsigned long val = 0; /* Damn you, gcc! */
switch (regno) { switch (regno) {
case 0: case 0:
...@@ -383,22 +436,22 @@ static inline void native_set_debugreg(int regno, unsigned long value) ...@@ -383,22 +436,22 @@ static inline void native_set_debugreg(int regno, unsigned long value)
{ {
switch (regno) { switch (regno) {
case 0: case 0:
asm("mov %0,%%db0" : /* no output */ :"r" (value)); asm("mov %0, %%db0" ::"r" (value));
break; break;
case 1: case 1:
asm("mov %0,%%db1" : /* no output */ :"r" (value)); asm("mov %0, %%db1" ::"r" (value));
break; break;
case 2: case 2:
asm("mov %0,%%db2" : /* no output */ :"r" (value)); asm("mov %0, %%db2" ::"r" (value));
break; break;
case 3: case 3:
asm("mov %0,%%db3" : /* no output */ :"r" (value)); asm("mov %0, %%db3" ::"r" (value));
break; break;
case 6: case 6:
asm("mov %0,%%db6" : /* no output */ :"r" (value)); asm("mov %0, %%db6" ::"r" (value));
break; break;
case 7: case 7:
asm("mov %0,%%db7" : /* no output */ :"r" (value)); asm("mov %0, %%db7" ::"r" (value));
break; break;
default: default:
BUG(); BUG();
...@@ -412,6 +465,7 @@ static inline void native_set_iopl_mask(unsigned mask) ...@@ -412,6 +465,7 @@ static inline void native_set_iopl_mask(unsigned mask)
{ {
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
unsigned int reg; unsigned int reg;
__asm__ __volatile__ ("pushfl;" __asm__ __volatile__ ("pushfl;"
"popl %0;" "popl %0;"
"andl %1, %0;" "andl %1, %0;"
...@@ -423,12 +477,12 @@ static inline void native_set_iopl_mask(unsigned mask) ...@@ -423,12 +477,12 @@ static inline void native_set_iopl_mask(unsigned mask)
#endif #endif
} }
static inline void native_load_sp0(struct tss_struct *tss, static inline void
struct thread_struct *thread) native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
{ {
tss->x86_tss.sp0 = thread->sp0; tss->x86_tss.sp0 = thread->sp0;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* Only happens when SEP is enabled, no need to test "SEP"arately */ /* Only happens when SEP is enabled, no need to test "SEP"arately: */
if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
tss->x86_tss.ss1 = thread->sysenter_cs; tss->x86_tss.ss1 = thread->sysenter_cs;
wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
...@@ -446,8 +500,8 @@ static inline void native_swapgs(void) ...@@ -446,8 +500,8 @@ static inline void native_swapgs(void)
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h> #include <asm/paravirt.h>
#else #else
#define __cpuid native_cpuid #define __cpuid native_cpuid
#define paravirt_enabled() 0 #define paravirt_enabled() 0
/* /*
* These special macros can be used to get or set a debugging register * These special macros can be used to get or set a debugging register
...@@ -457,8 +511,8 @@ static inline void native_swapgs(void) ...@@ -457,8 +511,8 @@ static inline void native_swapgs(void)
#define set_debugreg(value, register) \ #define set_debugreg(value, register) \
native_set_debugreg(register, value) native_set_debugreg(register, value)
static inline void load_sp0(struct tss_struct *tss, static inline void
struct thread_struct *thread) load_sp0(struct tss_struct *tss, struct thread_struct *thread)
{ {
native_load_sp0(tss, thread); native_load_sp0(tss, thread);
} }
...@@ -473,11 +527,12 @@ static inline void load_sp0(struct tss_struct *tss, ...@@ -473,11 +527,12 @@ static inline void load_sp0(struct tss_struct *tss,
* enable), so that any CPU's that boot up * enable), so that any CPU's that boot up
* after us can get the correct flags. * after us can get the correct flags.
*/ */
extern unsigned long mmu_cr4_features; extern unsigned long mmu_cr4_features;
static inline void set_in_cr4(unsigned long mask) static inline void set_in_cr4(unsigned long mask)
{ {
unsigned cr4; unsigned cr4;
mmu_cr4_features |= mask; mmu_cr4_features |= mask;
cr4 = read_cr4(); cr4 = read_cr4();
cr4 |= mask; cr4 |= mask;
...@@ -487,6 +542,7 @@ static inline void set_in_cr4(unsigned long mask) ...@@ -487,6 +542,7 @@ static inline void set_in_cr4(unsigned long mask)
static inline void clear_in_cr4(unsigned long mask) static inline void clear_in_cr4(unsigned long mask)
{ {
unsigned cr4; unsigned cr4;
mmu_cr4_features &= ~mask; mmu_cr4_features &= ~mask;
cr4 = read_cr4(); cr4 = read_cr4();
cr4 &= ~mask; cr4 &= ~mask;
...@@ -494,42 +550,42 @@ static inline void clear_in_cr4(unsigned long mask) ...@@ -494,42 +550,42 @@ static inline void clear_in_cr4(unsigned long mask)
} }
struct microcode_header { struct microcode_header {
unsigned int hdrver; unsigned int hdrver;
unsigned int rev; unsigned int rev;
unsigned int date; unsigned int date;
unsigned int sig; unsigned int sig;
unsigned int cksum; unsigned int cksum;
unsigned int ldrver; unsigned int ldrver;
unsigned int pf; unsigned int pf;
unsigned int datasize; unsigned int datasize;
unsigned int totalsize; unsigned int totalsize;
unsigned int reserved[3]; unsigned int reserved[3];
}; };
struct microcode { struct microcode {
struct microcode_header hdr; struct microcode_header hdr;
unsigned int bits[0]; unsigned int bits[0];
}; };
typedef struct microcode microcode_t; typedef struct microcode microcode_t;
typedef struct microcode_header microcode_header_t; typedef struct microcode_header microcode_header_t;
/* microcode format is extended from prescott processors */ /* microcode format is extended from prescott processors */
struct extended_signature { struct extended_signature {
unsigned int sig; unsigned int sig;
unsigned int pf; unsigned int pf;
unsigned int cksum; unsigned int cksum;
}; };
struct extended_sigtable { struct extended_sigtable {
unsigned int count; unsigned int count;
unsigned int cksum; unsigned int cksum;
unsigned int reserved[3]; unsigned int reserved[3];
struct extended_signature sigs[0]; struct extended_signature sigs[0];
}; };
typedef struct { typedef struct {
unsigned long seg; unsigned long seg;
} mm_segment_t; } mm_segment_t;
...@@ -541,7 +597,7 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); ...@@ -541,7 +597,7 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
extern void release_thread(struct task_struct *); extern void release_thread(struct task_struct *);
/* Prepare to copy thread state - unlazy all lazy status */ /* Prepare to copy thread state - unlazy all lazy state */
extern void prepare_to_copy(struct task_struct *tsk); extern void prepare_to_copy(struct task_struct *tsk);
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
...@@ -578,118 +634,131 @@ static inline unsigned int cpuid_eax(unsigned int op) ...@@ -578,118 +634,131 @@ static inline unsigned int cpuid_eax(unsigned int op)
unsigned int eax, ebx, ecx, edx; unsigned int eax, ebx, ecx, edx;
cpuid(op, &eax, &ebx, &ecx, &edx); cpuid(op, &eax, &ebx, &ecx, &edx);
return eax; return eax;
} }
static inline unsigned int cpuid_ebx(unsigned int op) static inline unsigned int cpuid_ebx(unsigned int op)
{ {
unsigned int eax, ebx, ecx, edx; unsigned int eax, ebx, ecx, edx;
cpuid(op, &eax, &ebx, &ecx, &edx); cpuid(op, &eax, &ebx, &ecx, &edx);
return ebx; return ebx;
} }
static inline unsigned int cpuid_ecx(unsigned int op) static inline unsigned int cpuid_ecx(unsigned int op)
{ {
unsigned int eax, ebx, ecx, edx; unsigned int eax, ebx, ecx, edx;
cpuid(op, &eax, &ebx, &ecx, &edx); cpuid(op, &eax, &ebx, &ecx, &edx);
return ecx; return ecx;
} }
static inline unsigned int cpuid_edx(unsigned int op) static inline unsigned int cpuid_edx(unsigned int op)
{ {
unsigned int eax, ebx, ecx, edx; unsigned int eax, ebx, ecx, edx;
cpuid(op, &eax, &ebx, &ecx, &edx); cpuid(op, &eax, &ebx, &ecx, &edx);
return edx; return edx;
} }
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
static inline void rep_nop(void) static inline void rep_nop(void)
{ {
__asm__ __volatile__("rep;nop": : :"memory"); __asm__ __volatile__("rep; nop" ::: "memory");
} }
/* Stop speculative execution */ static inline void cpu_relax(void)
{
rep_nop();
}
/* Stop speculative execution: */
static inline void sync_core(void) static inline void sync_core(void)
{ {
int tmp; int tmp;
asm volatile("cpuid" : "=a" (tmp) : "0" (1) asm volatile("cpuid" : "=a" (tmp) : "0" (1)
: "ebx", "ecx", "edx", "memory"); : "ebx", "ecx", "edx", "memory");
} }
#define cpu_relax() rep_nop() static inline void
__monitor(const void *eax, unsigned long ecx, unsigned long edx)
static inline void __monitor(const void *eax, unsigned long ecx,
unsigned long edx)
{ {
/* "monitor %eax,%ecx,%edx;" */ /* "monitor %eax, %ecx, %edx;" */
asm volatile( asm volatile(
".byte 0x0f,0x01,0xc8;" ".byte 0x0f, 0x01, 0xc8;"
: :"a" (eax), "c" (ecx), "d"(edx)); :: "a" (eax), "c" (ecx), "d"(edx));
} }
static inline void __mwait(unsigned long eax, unsigned long ecx) static inline void __mwait(unsigned long eax, unsigned long ecx)
{ {
/* "mwait %eax,%ecx;" */ /* "mwait %eax, %ecx;" */
asm volatile( asm volatile(
".byte 0x0f,0x01,0xc9;" ".byte 0x0f, 0x01, 0xc9;"
: :"a" (eax), "c" (ecx)); :: "a" (eax), "c" (ecx));
} }
static inline void __sti_mwait(unsigned long eax, unsigned long ecx) static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{ {
/* "mwait %eax,%ecx;" */ /* "mwait %eax, %ecx;" */
asm volatile( asm volatile(
"sti; .byte 0x0f,0x01,0xc9;" "sti; .byte 0x0f, 0x01, 0xc9;"
: :"a" (eax), "c" (ecx)); :: "a" (eax), "c" (ecx));
} }
extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
extern int force_mwait; extern int force_mwait;
extern void select_idle_routine(const struct cpuinfo_x86 *c); extern void select_idle_routine(const struct cpuinfo_x86 *c);
extern unsigned long boot_option_idle_override; extern unsigned long boot_option_idle_override;
extern void enable_sep_cpu(void); extern void enable_sep_cpu(void);
extern int sysenter_setup(void); extern int sysenter_setup(void);
/* Defined in head.S */ /* Defined in head.S */
extern struct desc_ptr early_gdt_descr; extern struct desc_ptr early_gdt_descr;
extern void cpu_set_gdt(int); extern void cpu_set_gdt(int);
extern void switch_to_new_gdt(void); extern void switch_to_new_gdt(void);
extern void cpu_init(void); extern void cpu_init(void);
extern void init_gdt(int cpu); extern void init_gdt(int cpu);
/* from system description table in BIOS. Mostly for MCA use, but /*
* others may find it useful. */ * from system description table in BIOS. Mostly for MCA use, but
extern unsigned int machine_id; * others may find it useful:
extern unsigned int machine_submodel_id; */
extern unsigned int BIOS_revision; extern unsigned int machine_id;
extern unsigned int machine_submodel_id;
extern unsigned int BIOS_revision;
/* Boot loader type from the setup header */ /* Boot loader type from the setup header: */
extern int bootloader_type; extern int bootloader_type;
extern char ignore_fpu_irq; extern char ignore_fpu_irq;
#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
#define HAVE_ARCH_PICK_MMAP_LAYOUT 1 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
#define ARCH_HAS_PREFETCHW #define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH #define ARCH_HAS_SPINLOCK_PREFETCH
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
#define BASE_PREFETCH ASM_NOP4 # define BASE_PREFETCH ASM_NOP4
#define ARCH_HAS_PREFETCH # define ARCH_HAS_PREFETCH
#else #else
#define BASE_PREFETCH "prefetcht0 (%1)" # define BASE_PREFETCH "prefetcht0 (%1)"
#endif #endif
/* Prefetch instructions for Pentium III and AMD Athlon */ /*
/* It's not worth to care about 3dnow! prefetches for the K6 * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
because they are microcoded there and very slow. *
However we don't do prefetches for pre XP Athlons currently * It's not worth to care about 3dnow prefetches for the K6
That should be fixed. */ * because they are microcoded there and very slow.
*/
static inline void prefetch(const void *x) static inline void prefetch(const void *x)
{ {
alternative_input(BASE_PREFETCH, alternative_input(BASE_PREFETCH,
...@@ -698,8 +767,11 @@ static inline void prefetch(const void *x) ...@@ -698,8 +767,11 @@ static inline void prefetch(const void *x)
"r" (x)); "r" (x));
} }
/* 3dnow! prefetch to get an exclusive cache line. Useful for /*
spinlocks to avoid one state transition in the cache coherency protocol. */ * 3dnow prefetch to get an exclusive cache line.
* Useful for spinlocks to avoid one state transition in the
* cache coherency protocol:
*/
static inline void prefetchw(const void *x) static inline void prefetchw(const void *x)
{ {
alternative_input(BASE_PREFETCH, alternative_input(BASE_PREFETCH,
...@@ -708,21 +780,25 @@ static inline void prefetchw(const void *x) ...@@ -708,21 +780,25 @@ static inline void prefetchw(const void *x)
"r" (x)); "r" (x));
} }
#define spin_lock_prefetch(x) prefetchw(x) static inline void spin_lock_prefetch(const void *x)
{
prefetchw(x);
}
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* /*
* User space process size: 3GB (default). * User space process size: 3GB (default).
*/ */
#define TASK_SIZE (PAGE_OFFSET) #define TASK_SIZE PAGE_OFFSET
#define STACK_TOP TASK_SIZE #define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX STACK_TOP #define STACK_TOP_MAX STACK_TOP
#define INIT_THREAD { \ #define INIT_THREAD { \
.sp0 = sizeof(init_stack) + (long)&init_stack, \ .sp0 = sizeof(init_stack) + (long)&init_stack, \
.vm86_info = NULL, \ .vm86_info = NULL, \
.sysenter_cs = __KERNEL_CS, \ .sysenter_cs = __KERNEL_CS, \
.io_bitmap_ptr = NULL, \ .io_bitmap_ptr = NULL, \
.fs = __KERNEL_PERCPU, \ .fs = __KERNEL_PERCPU, \
} }
/* /*
...@@ -731,26 +807,27 @@ static inline void prefetchw(const void *x) ...@@ -731,26 +807,27 @@ static inline void prefetchw(const void *x)
* permission bitmap. The extra byte must be all 1 bits, and must * permission bitmap. The extra byte must be all 1 bits, and must
* be within the limit. * be within the limit.
*/ */
#define INIT_TSS { \ #define INIT_TSS { \
.x86_tss = { \ .x86_tss = { \
.sp0 = sizeof(init_stack) + (long)&init_stack, \ .sp0 = sizeof(init_stack) + (long)&init_stack, \
.ss0 = __KERNEL_DS, \ .ss0 = __KERNEL_DS, \
.ss1 = __KERNEL_CS, \ .ss1 = __KERNEL_CS, \
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
}, \ }, \
.io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
} }
#define start_thread(regs, new_eip, new_esp) do { \ #define start_thread(regs, new_eip, new_esp) \
do { \
__asm__("movl %0,%%gs": :"r" (0)); \ __asm__("movl %0,%%gs": :"r" (0)); \
regs->fs = 0; \ regs->fs = 0; \
set_fs(USER_DS); \ set_fs(USER_DS); \
regs->ds = __USER_DS; \ regs->ds = __USER_DS; \
regs->es = __USER_DS; \ regs->es = __USER_DS; \
regs->ss = __USER_DS; \ regs->ss = __USER_DS; \
regs->cs = __USER_CS; \ regs->cs = __USER_CS; \
regs->ip = new_eip; \ regs->ip = new_eip; \
regs->sp = new_esp; \ regs->sp = new_esp; \
} while (0) } while (0)
...@@ -780,24 +857,24 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); ...@@ -780,24 +857,24 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
__regs__ - 1; \ __regs__ - 1; \
}) })
#define KSTK_ESP(task) (task_pt_regs(task)->sp) #define KSTK_ESP(task) (task_pt_regs(task)->sp)
#else #else
/* /*
* User space process size. 47bits minus one guard page. * User space process size. 47bits minus one guard page.
*/ */
#define TASK_SIZE64 (0x800000000000UL - 4096) #define TASK_SIZE64 (0x800000000000UL - 4096)
/* This decides where the kernel will search for a free chunk of vm /* This decides where the kernel will search for a free chunk of vm
* space during mmap's. * space during mmap's.
*/ */
#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
0xc0000000 : 0xFFFFe000) 0xc0000000 : 0xFFFFe000)
#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
IA32_PAGE_OFFSET : TASK_SIZE64) IA32_PAGE_OFFSET : TASK_SIZE64)
#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
IA32_PAGE_OFFSET : TASK_SIZE64) IA32_PAGE_OFFSET : TASK_SIZE64)
#define STACK_TOP TASK_SIZE #define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX TASK_SIZE64 #define STACK_TOP_MAX TASK_SIZE64
...@@ -813,12 +890,12 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); ...@@ -813,12 +890,12 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define start_thread(regs, new_rip, new_rsp) do { \ #define start_thread(regs, new_rip, new_rsp) do { \
asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \ asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
load_gs_index(0); \ load_gs_index(0); \
(regs)->ip = (new_rip); \ (regs)->ip = (new_rip); \
(regs)->sp = (new_rsp); \ (regs)->sp = (new_rsp); \
write_pda(oldrsp, (new_rsp)); \ write_pda(oldrsp, (new_rsp)); \
(regs)->cs = __USER_CS; \ (regs)->cs = __USER_CS; \
(regs)->ss = __USER_DS; \ (regs)->ss = __USER_DS; \
(regs)->flags = 0x200; \ (regs)->flags = 0x200; \
set_fs(USER_DS); \ set_fs(USER_DS); \
} while (0) } while (0)
...@@ -826,17 +903,18 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); ...@@ -826,17 +903,18 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
* Return saved PC of a blocked thread. * Return saved PC of a blocked thread.
* What is this good for? it will be always the scheduler or ret_from_fork. * What is this good for? it will be always the scheduler or ret_from_fork.
*/ */
#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
/* This decides where the kernel will search for a free chunk of vm /*
* This decides where the kernel will search for a free chunk of vm
* space during mmap's. * space during mmap's.
*/ */
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
#define KSTK_EIP(task) (task_pt_regs(task)->ip) #define KSTK_EIP(task) (task_pt_regs(task)->ip)
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment