Commit 4d46a89e authored by Ingo Molnar's avatar Ingo Molnar

x86: clean up include/asm-x86/processor.h

basic style cleanup to flush out years of neglect:

 - consistent indentation
 - whitespace fixes
 - consistent comments
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent eb19067d
......@@ -24,6 +24,7 @@ struct mm_struct;
#include <asm/msr.h>
#include <asm/desc_defs.h>
#include <asm/nops.h>
#include <linux/personality.h>
#include <linux/cpumask.h>
#include <linux/cache.h>
......@@ -37,16 +38,18 @@ struct mm_struct;
static inline void *current_text_addr(void)
{
void *pc;
asm volatile("mov $1f,%0\n1:":"=r" (pc));
asm volatile("mov $1f, %0; 1:":"=r" (pc));
return pc;
}
#ifdef CONFIG_X86_VSMP
#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
#else
#define ARCH_MIN_TASKALIGN 16
#define ARCH_MIN_MMSTRUCT_ALIGN 0
# define ARCH_MIN_TASKALIGN 16
# define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif
/*
......@@ -62,7 +65,9 @@ struct cpuinfo_x86 {
__u8 x86_mask;
#ifdef CONFIG_X86_32
char wp_works_ok; /* It doesn't on 386's */
char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
/* Problems on some 486Dx4's and old 386's: */
char hlt_works_ok;
char hard_math;
char rfu;
char fdiv_bug;
......@@ -70,34 +75,42 @@ struct cpuinfo_x86 {
char coma_bug;
char pad0;
#else
/* number of 4K pages in DTLB/ITLB combined(in pages)*/
/* Number of 4K pages in DTLB/ITLB combined(in pages): */
int x86_tlbsize;
__u8 x86_virt_bits, x86_phys_bits;
/* cpuid returned core id bits */
__u8 x86_virt_bits;
__u8 x86_phys_bits;
/* CPUID returned core id bits: */
__u8 x86_coreid_bits;
/* Max extended CPUID function supported */
/* Max extended CPUID function supported: */
__u32 extended_cpuid_level;
#endif
int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
/* Maximum supported CPUID level, -1=no CPUID: */
int cpuid_level;
__u32 x86_capability[NCAPINTS];
char x86_vendor_id[16];
char x86_model_id[64];
int x86_cache_size; /* in KB - valid for CPUS which support this
call */
/* in KB - valid for CPUS which support this call: */
int x86_cache_size;
int x86_cache_alignment; /* In bytes */
int x86_power;
unsigned long loops_per_jiffy;
#ifdef CONFIG_SMP
cpumask_t llc_shared_map; /* cpus sharing the last level cache */
/* cpus sharing the last level cache: */
cpumask_t llc_shared_map;
#endif
u16 x86_max_cores; /* cpuid returned max cores value */
/* cpuid returned max cores value: */
u16 x86_max_cores;
u16 apicid;
u16 x86_clflush_size;
#ifdef CONFIG_SMP
u16 booted_cores; /* number of cores as seen by OS */
u16 phys_proc_id; /* Physical processor id. */
u16 cpu_core_id; /* Core id */
u16 cpu_index; /* index into per_cpu list */
/* number of cores as seen by the OS: */
u16 booted_cores;
/* Physical processor id: */
u16 phys_proc_id;
/* Core id: */
u16 cpu_core_id;
/* Index into per_cpu list: */
u16 cpu_index;
#endif
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
......@@ -110,6 +123,7 @@ struct cpuinfo_x86 {
#define X86_VENDOR_TRANSMETA 7
#define X86_VENDOR_NSC 8
#define X86_VENDOR_NUM 9
#define X86_VENDOR_UNKNOWN 0xff
/*
......@@ -117,6 +131,7 @@ struct cpuinfo_x86 {
*/
extern struct cpuinfo_x86 boot_cpu_data;
extern struct cpuinfo_x86 new_cpu_data;
extern struct tss_struct doublefault_tss;
extern __u32 cleared_cpu_caps[NCAPINTS];
......@@ -129,7 +144,9 @@ DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
#define current_cpu_data boot_cpu_data
#endif
void cpu_detect(struct cpuinfo_x86 *c);
#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
extern void cpu_detect(struct cpuinfo_x86 *c);
extern void identify_cpu(struct cpuinfo_x86 *);
extern void identify_boot_cpu(void);
......@@ -169,14 +186,21 @@ struct x86_hw_tss {
unsigned long sp0;
unsigned short ss0, __ss0h;
unsigned long sp1;
unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */
/* ss1 caches MSR_IA32_SYSENTER_CS: */
unsigned short ss1, __ss1h;
unsigned long sp2;
unsigned short ss2, __ss2h;
unsigned long __cr3;
unsigned long ip;
unsigned long flags;
unsigned long ax, cx, dx, bx;
unsigned long sp, bp, si, di;
unsigned long ax;
unsigned long cx;
unsigned long dx;
unsigned long bx;
unsigned long sp;
unsigned long bp;
unsigned long si;
unsigned long di;
unsigned short es, __esh;
unsigned short cs, __csh;
unsigned short ss, __ssh;
......@@ -184,7 +208,9 @@ struct x86_hw_tss {
unsigned short fs, __fsh;
unsigned short gs, __gsh;
unsigned short ldt, __ldth;
unsigned short trace, io_bitmap_base;
unsigned short trace;
unsigned short io_bitmap_base;
} __attribute__((packed));
#else
struct x86_hw_tss {
......@@ -198,11 +224,12 @@ struct x86_hw_tss {
u32 reserved4;
u16 reserved5;
u16 io_bitmap_base;
} __attribute__((packed)) ____cacheline_aligned;
#endif
/*
* Size of io_bitmap.
* IO-bitmap sizes:
*/
#define IO_BITMAP_BITS 65536
#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
......@@ -212,6 +239,9 @@ struct x86_hw_tss {
#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
struct tss_struct {
/*
* The hardware state:
*/
struct x86_hw_tss x86_tss;
/*
......@@ -226,19 +256,23 @@ struct tss_struct {
*/
unsigned long io_bitmap_max;
struct thread_struct *io_bitmap_owner;
/*
* pads the TSS to be cacheline-aligned (size is 0x100)
* Pad the TSS to be cacheline-aligned (size is 0x100):
*/
unsigned long __cacheline_filler[35];
/*
* .. and then another 0x100 bytes for emergency kernel stack
* .. and then another 0x100 bytes for the emergency kernel stack:
*/
unsigned long stack[64];
} __attribute__((packed));
DECLARE_PER_CPU(struct tss_struct, init_tss);
/* Save the original ist values for checking stack pointers during debugging */
/*
* Save the original ist values for checking stack pointers during debugging
*/
struct orig_ist {
unsigned long ist[7];
};
......@@ -253,8 +287,10 @@ struct i387_fsave_struct {
u32 fcs;
u32 foo;
u32 fos;
u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
u32 status; /* software status information */
/* 8*10 bytes for each FP-reg = 80 bytes: */
u32 st_space[20];
/* Software status information: */
u32 status;
};
struct i387_fxsave_struct {
......@@ -276,9 +312,12 @@ struct i387_fxsave_struct {
};
u32 mxcsr;
u32 mxcsr_mask;
u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
/* 8*16 bytes for each FP-reg = 128 bytes: */
u32 st_space[32];
/* 16*16 bytes for each XMM-reg = 256 bytes: */
u32 xmm_space[64];
u32 padding[24];
} __attribute__((aligned(16)));
struct i387_soft_struct {
......@@ -289,8 +328,14 @@ struct i387_soft_struct {
u32 fcs;
u32 foo;
u32 fos;
u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
u8 ftop, changed, lookahead, no_update, rm, alimit;
/* 8*10 bytes for each FP-reg = 80 bytes: */
u32 st_space[20];
u8 ftop;
u8 changed;
u8 lookahead;
u8 no_update;
u8 rm;
u8 alimit;
struct info *info;
u32 entry_eip;
};
......@@ -313,7 +358,7 @@ extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern unsigned short num_cache_leaves;
struct thread_struct {
/* cached TLS descriptors. */
/* Cached TLS descriptors: */
struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
unsigned long sp0;
unsigned long sp;
......@@ -321,33 +366,41 @@ struct thread_struct {
unsigned long sysenter_cs;
#else
unsigned long usersp; /* Copy from PDA */
unsigned short es, ds, fsindex, gsindex;
unsigned short es;
unsigned short ds;
unsigned short fsindex;
unsigned short gsindex;
#endif
unsigned long ip;
unsigned long fs;
unsigned long gs;
/* Hardware debugging registers */
/* Hardware debugging registers: */
unsigned long debugreg0;
unsigned long debugreg1;
unsigned long debugreg2;
unsigned long debugreg3;
unsigned long debugreg6;
unsigned long debugreg7;
/* fault info */
unsigned long cr2, trap_no, error_code;
/* floating point info */
/* Fault info: */
unsigned long cr2;
unsigned long trap_no;
unsigned long error_code;
/* Floating point info: */
union i387_union i387 __attribute__((aligned(16)));;
#ifdef CONFIG_X86_32
/* virtual 86 mode info */
/* Virtual 86 mode info */
struct vm86_struct __user *vm86_info;
unsigned long screen_bitmap;
unsigned long v86flags, v86mask, saved_sp0;
unsigned int saved_fs, saved_gs;
unsigned long v86flags;
unsigned long v86mask;
unsigned long saved_sp0;
unsigned int saved_fs;
unsigned int saved_gs;
#endif
/* IO permissions */
/* IO permissions: */
unsigned long *io_bitmap_ptr;
unsigned long iopl;
/* max allowed port in the bitmap, in bytes: */
/* Max allowed port in the bitmap, in bytes: */
unsigned io_bitmap_max;
/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
unsigned long debugctlmsr;
......@@ -383,22 +436,22 @@ static inline void native_set_debugreg(int regno, unsigned long value)
{
switch (regno) {
case 0:
asm("mov %0,%%db0" : /* no output */ :"r" (value));
asm("mov %0, %%db0" ::"r" (value));
break;
case 1:
asm("mov %0,%%db1" : /* no output */ :"r" (value));
asm("mov %0, %%db1" ::"r" (value));
break;
case 2:
asm("mov %0,%%db2" : /* no output */ :"r" (value));
asm("mov %0, %%db2" ::"r" (value));
break;
case 3:
asm("mov %0,%%db3" : /* no output */ :"r" (value));
asm("mov %0, %%db3" ::"r" (value));
break;
case 6:
asm("mov %0,%%db6" : /* no output */ :"r" (value));
asm("mov %0, %%db6" ::"r" (value));
break;
case 7:
asm("mov %0,%%db7" : /* no output */ :"r" (value));
asm("mov %0, %%db7" ::"r" (value));
break;
default:
BUG();
......@@ -412,6 +465,7 @@ static inline void native_set_iopl_mask(unsigned mask)
{
#ifdef CONFIG_X86_32
unsigned int reg;
__asm__ __volatile__ ("pushfl;"
"popl %0;"
"andl %1, %0;"
......@@ -423,12 +477,12 @@ static inline void native_set_iopl_mask(unsigned mask)
#endif
}
static inline void native_load_sp0(struct tss_struct *tss,
struct thread_struct *thread)
static inline void
native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
{
tss->x86_tss.sp0 = thread->sp0;
#ifdef CONFIG_X86_32
/* Only happens when SEP is enabled, no need to test "SEP"arately */
/* Only happens when SEP is enabled, no need to test "SEP"arately: */
if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
tss->x86_tss.ss1 = thread->sysenter_cs;
wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
......@@ -457,8 +511,8 @@ static inline void native_swapgs(void)
#define set_debugreg(value, register) \
native_set_debugreg(register, value)
static inline void load_sp0(struct tss_struct *tss,
struct thread_struct *thread)
static inline void
load_sp0(struct tss_struct *tss, struct thread_struct *thread)
{
native_load_sp0(tss, thread);
}
......@@ -478,6 +532,7 @@ extern unsigned long mmu_cr4_features;
static inline void set_in_cr4(unsigned long mask)
{
unsigned cr4;
mmu_cr4_features |= mask;
cr4 = read_cr4();
cr4 |= mask;
......@@ -487,6 +542,7 @@ static inline void set_in_cr4(unsigned long mask)
static inline void clear_in_cr4(unsigned long mask)
{
unsigned cr4;
mmu_cr4_features &= ~mask;
cr4 = read_cr4();
cr4 &= ~mask;
......@@ -541,7 +597,7 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
/* Prepare to copy thread state - unlazy all lazy status */
/* Prepare to copy thread state - unlazy all lazy state */
extern void prepare_to_copy(struct task_struct *tsk);
unsigned long get_wchan(struct task_struct *p);
......@@ -578,69 +634,80 @@ static inline unsigned int cpuid_eax(unsigned int op)
unsigned int eax, ebx, ecx, edx;
cpuid(op, &eax, &ebx, &ecx, &edx);
return eax;
}
static inline unsigned int cpuid_ebx(unsigned int op)
{
unsigned int eax, ebx, ecx, edx;
cpuid(op, &eax, &ebx, &ecx, &edx);
return ebx;
}
static inline unsigned int cpuid_ecx(unsigned int op)
{
unsigned int eax, ebx, ecx, edx;
cpuid(op, &eax, &ebx, &ecx, &edx);
return ecx;
}
static inline unsigned int cpuid_edx(unsigned int op)
{
unsigned int eax, ebx, ecx, edx;
cpuid(op, &eax, &ebx, &ecx, &edx);
return edx;
}
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
static inline void rep_nop(void)
{
__asm__ __volatile__("rep;nop": : :"memory");
__asm__ __volatile__("rep; nop" ::: "memory");
}
/* Stop speculative execution */
static inline void cpu_relax(void)
{
rep_nop();
}
/* Stop speculative execution: */
static inline void sync_core(void)
{
int tmp;
asm volatile("cpuid" : "=a" (tmp) : "0" (1)
: "ebx", "ecx", "edx", "memory");
}
#define cpu_relax() rep_nop()
static inline void __monitor(const void *eax, unsigned long ecx,
unsigned long edx)
static inline void
__monitor(const void *eax, unsigned long ecx, unsigned long edx)
{
/* "monitor %eax,%ecx,%edx;" */
/* "monitor %eax, %ecx, %edx;" */
asm volatile(
".byte 0x0f,0x01,0xc8;"
: :"a" (eax), "c" (ecx), "d"(edx));
".byte 0x0f, 0x01, 0xc8;"
:: "a" (eax), "c" (ecx), "d"(edx));
}
static inline void __mwait(unsigned long eax, unsigned long ecx)
{
/* "mwait %eax,%ecx;" */
/* "mwait %eax, %ecx;" */
asm volatile(
".byte 0x0f,0x01,0xc9;"
: :"a" (eax), "c" (ecx));
".byte 0x0f, 0x01, 0xc9;"
:: "a" (eax), "c" (ecx));
}
static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{
/* "mwait %eax,%ecx;" */
/* "mwait %eax, %ecx;" */
asm volatile(
"sti; .byte 0x0f,0x01,0xc9;"
: :"a" (eax), "c" (ecx));
"sti; .byte 0x0f, 0x01, 0xc9;"
:: "a" (eax), "c" (ecx));
}
extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
......@@ -662,34 +729,36 @@ extern void switch_to_new_gdt(void);
extern void cpu_init(void);
extern void init_gdt(int cpu);
/* from system description table in BIOS. Mostly for MCA use, but
* others may find it useful. */
/*
* from system description table in BIOS. Mostly for MCA use, but
* others may find it useful:
*/
extern unsigned int machine_id;
extern unsigned int machine_submodel_id;
extern unsigned int BIOS_revision;
/* Boot loader type from the setup header */
/* Boot loader type from the setup header: */
extern int bootloader_type;
extern char ignore_fpu_irq;
#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH
#ifdef CONFIG_X86_32
#define BASE_PREFETCH ASM_NOP4
#define ARCH_HAS_PREFETCH
# define BASE_PREFETCH ASM_NOP4
# define ARCH_HAS_PREFETCH
#else
#define BASE_PREFETCH "prefetcht0 (%1)"
# define BASE_PREFETCH "prefetcht0 (%1)"
#endif
/* Prefetch instructions for Pentium III and AMD Athlon */
/* It's not worth to care about 3dnow! prefetches for the K6
because they are microcoded there and very slow.
However we don't do prefetches for pre XP Athlons currently
That should be fixed. */
/*
* Prefetch instructions for Pentium III (+) and AMD Athlon (+)
*
* It's not worth to care about 3dnow prefetches for the K6
* because they are microcoded there and very slow.
*/
static inline void prefetch(const void *x)
{
alternative_input(BASE_PREFETCH,
......@@ -698,8 +767,11 @@ static inline void prefetch(const void *x)
"r" (x));
}
/* 3dnow! prefetch to get an exclusive cache line. Useful for
spinlocks to avoid one state transition in the cache coherency protocol. */
/*
* 3dnow prefetch to get an exclusive cache line.
* Useful for spinlocks to avoid one state transition in the
* cache coherency protocol:
*/
static inline void prefetchw(const void *x)
{
alternative_input(BASE_PREFETCH,
......@@ -708,12 +780,16 @@ static inline void prefetchw(const void *x)
"r" (x));
}
#define spin_lock_prefetch(x) prefetchw(x)
static inline void spin_lock_prefetch(const void *x)
{
prefetchw(x);
}
#ifdef CONFIG_X86_32
/*
* User space process size: 3GB (default).
*/
#define TASK_SIZE (PAGE_OFFSET)
#define TASK_SIZE PAGE_OFFSET
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX STACK_TOP
......@@ -741,7 +817,8 @@ static inline void prefetchw(const void *x)
.io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
}
#define start_thread(regs, new_eip, new_esp) do { \
#define start_thread(regs, new_eip, new_esp) \
do { \
__asm__("movl %0,%%gs": :"r" (0)); \
regs->fs = 0; \
set_fs(USER_DS); \
......@@ -832,7 +909,8 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
#endif /* CONFIG_X86_64 */
/* This decides where the kernel will search for a free chunk of vm
/*
* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment