Commit 60812a4a authored by Linus Torvalds's avatar Linus Torvalds

Merge ssh://master.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86

* ssh://master.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86: (33 commits)
  x86: convert cpuinfo_x86 array to a per_cpu array
  x86: introduce frame_pointer() and stack_pointer()
  x86 & generic: change to __builtin_prefetch()
  i386: do not BUG_ON() when MSR is unknown
  x86: acpi use cpu_physical_id
  x86: convert cpu_llc_id to be a per cpu variable
  x86: convert cpu_to_apicid to be a per cpu variable
  i386: introduce "used_vectors" bitmap which can be used to reserve vectors.
  x86: use raw locks during oopses
  x86: honor _PAGE_PSE bit on page walks
  i386: do cpuid_device_create() in CPU_UP_PREPARE instead of CPU_ONLINE.
  x86: implement missing x86_64 function smp_call_function_mask()
  x86: use descriptor's functions instead of inline assembly
  i386: consolidate show_regs and show_registers for i386
  i386: make callgraph use dump_trace() on i386/x86_64
  x86: enable iommu_merge by default
  i386: i386 add AMD64 Barcelona PMU MSR definitions to msr.h
  x86: Unify i386 and x86-64 early quirks
  x86: enable HPET on ICH3 and ICH4
  x86: force enable HPET on VT8235/8237 chipsets
  ...

Manually fix trivial conflict with task pid container helper changes in
arch/x86/kernel/process_32.c
parents b04cde34 92cb7612
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
*.s *.s
*.ko *.ko
*.so *.so
*.so.dbg
*.mod.c *.mod.c
*.i *.i
*.lst *.lst
......
...@@ -431,8 +431,10 @@ and is between 256 and 4096 characters. It is defined in the file ...@@ -431,8 +431,10 @@ and is between 256 and 4096 characters. It is defined in the file
over the 8254 in addition to over the IO-APIC. The over the 8254 in addition to over the IO-APIC. The
kernel tries to set a sensible default. kernel tries to set a sensible default.
hpet= [X86-32,HPET] option to disable HPET and use PIT. hpet= [X86-32,HPET] option to control HPET usage
Format: disable Format: { enable (default) | disable | force }
disable: disable HPET and use PIT instead
force: allow force enabled of undocumented chips (ICH4, VIA)
com20020= [HW,NET] ARCnet - COM20020 chipset com20020= [HW,NET] ARCnet - COM20020 chipset
Format: Format:
......
...@@ -1080,7 +1080,7 @@ config APM_REAL_MODE_POWER_OFF ...@@ -1080,7 +1080,7 @@ config APM_REAL_MODE_POWER_OFF
endif # APM endif # APM
source "arch/x86/kernel/cpu/cpufreq/Kconfig" source "arch/x86/kernel/cpu/cpufreq/Kconfig_32"
source "drivers/cpuidle/Kconfig" source "drivers/cpuidle/Kconfig"
......
...@@ -102,7 +102,7 @@ core-$(CONFIG_XEN) += arch/x86/xen/ ...@@ -102,7 +102,7 @@ core-$(CONFIG_XEN) += arch/x86/xen/
# default subarch .h files # default subarch .h files
mflags-y += -Iinclude/asm-x86/mach-default mflags-y += -Iinclude/asm-x86/mach-default
head-y := arch/x86/kernel/head_32.o arch/x86/kernel/init_task_32.o head-y := arch/x86/kernel/head_32.o arch/x86/kernel/init_task.o
libs-y += arch/x86/lib/ libs-y += arch/x86/lib/
core-y += arch/x86/kernel/ \ core-y += arch/x86/kernel/ \
...@@ -131,9 +131,9 @@ all: bzImage ...@@ -131,9 +131,9 @@ all: bzImage
zImage zlilo zdisk: KBUILD_IMAGE := arch/x86/boot/zImage zImage zlilo zdisk: KBUILD_IMAGE := arch/x86/boot/zImage
zImage bzImage: vmlinux zImage bzImage: vmlinux
$(Q)mkdir -p $(objtree)/arch/i386/boot
$(Q)ln -fsn $(objtree)/arch/x86/boot/bzImage $(objtree)/arch/i386/boot/bzImage
$(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE) $(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE)
$(Q)mkdir -p $(objtree)/arch/i386/boot
$(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/i386/boot/bzImage
compressed: zImage compressed: zImage
......
...@@ -5,10 +5,6 @@ ...@@ -5,10 +5,6 @@
* This tricks binfmt_elf.c into loading 32bit binaries using lots * This tricks binfmt_elf.c into loading 32bit binaries using lots
* of ugly preprocessor tricks. Talk about very very poor man's inheritance. * of ugly preprocessor tricks. Talk about very very poor man's inheritance.
*/ */
#define __ASM_X86_64_ELF_H 1
#undef ELF_CLASS
#define ELF_CLASS ELFCLASS32
#include <linux/types.h> #include <linux/types.h>
#include <linux/stddef.h> #include <linux/stddef.h>
...@@ -19,6 +15,7 @@ ...@@ -19,6 +15,7 @@
#include <linux/binfmts.h> #include <linux/binfmts.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/security.h> #include <linux/security.h>
#include <linux/elfcore-compat.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
...@@ -31,6 +28,20 @@ ...@@ -31,6 +28,20 @@
#include <asm/ia32.h> #include <asm/ia32.h>
#include <asm/vsyscall32.h> #include <asm/vsyscall32.h>
#undef ELF_ARCH
#undef ELF_CLASS
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_386
#undef elfhdr
#undef elf_phdr
#undef elf_note
#undef elf_addr_t
#define elfhdr elf32_hdr
#define elf_phdr elf32_phdr
#define elf_note elf32_note
#define elf_addr_t Elf32_Off
#define ELF_NAME "elf/i386" #define ELF_NAME "elf/i386"
#define AT_SYSINFO 32 #define AT_SYSINFO 32
...@@ -48,74 +59,20 @@ int sysctl_vsyscall32 = 1; ...@@ -48,74 +59,20 @@ int sysctl_vsyscall32 = 1;
} while(0) } while(0)
struct file; struct file;
struct elf_phdr;
#define IA32_EMULATOR 1 #define IA32_EMULATOR 1
#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) #undef ELF_ET_DYN_BASE
#undef ELF_ARCH
#define ELF_ARCH EM_386
#define ELF_DATA ELFDATA2LSB
#define USE_ELF_CORE_DUMP 1 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
/* Override elfcore.h */
#define _LINUX_ELFCORE_H 1
typedef unsigned int elf_greg_t;
#define ELF_NGREG (sizeof (struct user_regs_struct32) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
struct elf_siginfo
{
int si_signo; /* signal number */
int si_code; /* extra code */
int si_errno; /* errno */
};
#define jiffies_to_timeval(a,b) do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; }while(0) #define jiffies_to_timeval(a,b) do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; }while(0)
struct elf_prstatus
{
struct elf_siginfo pr_info; /* Info associated with signal */
short pr_cursig; /* Current signal */
unsigned int pr_sigpend; /* Set of pending signals */
unsigned int pr_sighold; /* Set of held signals */
pid_t pr_pid;
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
struct compat_timeval pr_utime; /* User time */
struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime; /* Cumulative user time */
struct compat_timeval pr_cstime; /* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
#define ELF_PRARGSZ (80) /* Number of chars for args */
struct elf_prpsinfo
{
char pr_state; /* numeric process state */
char pr_sname; /* char for pr_state */
char pr_zomb; /* zombie */
char pr_nice; /* nice val */
unsigned int pr_flag; /* flags */
__u16 pr_uid;
__u16 pr_gid;
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#define _GET_SEG(x) \ #define _GET_SEG(x) \
({ __u32 seg; asm("movl %%" __stringify(x) ",%0" : "=r"(seg)); seg; }) ({ __u32 seg; asm("movl %%" __stringify(x) ",%0" : "=r"(seg)); seg; })
/* Assumes current==process to be dumped */ /* Assumes current==process to be dumped */
#undef ELF_CORE_COPY_REGS
#define ELF_CORE_COPY_REGS(pr_reg, regs) \ #define ELF_CORE_COPY_REGS(pr_reg, regs) \
pr_reg[0] = regs->rbx; \ pr_reg[0] = regs->rbx; \
pr_reg[1] = regs->rcx; \ pr_reg[1] = regs->rcx; \
...@@ -135,36 +92,41 @@ struct elf_prpsinfo ...@@ -135,36 +92,41 @@ struct elf_prpsinfo
pr_reg[15] = regs->rsp; \ pr_reg[15] = regs->rsp; \
pr_reg[16] = regs->ss; pr_reg[16] = regs->ss;
#define user user32
#define elf_prstatus compat_elf_prstatus
#define elf_prpsinfo compat_elf_prpsinfo
#define elf_fpregset_t struct user_i387_ia32_struct
#define elf_fpxregset_t struct user32_fxsr_struct
#define user user32
#undef elf_read_implies_exec #undef elf_read_implies_exec
#define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X) #define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X)
//#include <asm/ia32.h>
#include <linux/elf.h>
typedef struct user_i387_ia32_struct elf_fpregset_t;
typedef struct user32_fxsr_struct elf_fpxregset_t;
static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *regs) #define elf_core_copy_regs elf32_core_copy_regs
static inline void elf32_core_copy_regs(compat_elf_gregset_t *elfregs,
struct pt_regs *regs)
{ {
ELF_CORE_COPY_REGS((*elfregs), regs) ELF_CORE_COPY_REGS((&elfregs->ebx), regs)
} }
static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs) #define elf_core_copy_task_regs elf32_core_copy_task_regs
static inline int elf32_core_copy_task_regs(struct task_struct *t,
compat_elf_gregset_t* elfregs)
{ {
struct pt_regs *pp = task_pt_regs(t); struct pt_regs *pp = task_pt_regs(t);
ELF_CORE_COPY_REGS((*elfregs), pp); ELF_CORE_COPY_REGS((&elfregs->ebx), pp);
/* fix wrong segments */ /* fix wrong segments */
(*elfregs)[7] = t->thread.ds; elfregs->ds = t->thread.ds;
(*elfregs)[9] = t->thread.fsindex; elfregs->fs = t->thread.fsindex;
(*elfregs)[10] = t->thread.gsindex; elfregs->gs = t->thread.gsindex;
(*elfregs)[8] = t->thread.es; elfregs->es = t->thread.es;
return 1; return 1;
} }
#define elf_core_copy_task_fpregs elf32_core_copy_task_fpregs
static inline int static inline int
elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpregset_t *fpu) elf32_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs,
elf_fpregset_t *fpu)
{ {
struct _fpstate_ia32 *fpstate = (void*)fpu; struct _fpstate_ia32 *fpstate = (void*)fpu;
mm_segment_t oldfs = get_fs(); mm_segment_t oldfs = get_fs();
...@@ -186,8 +148,9 @@ elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpr ...@@ -186,8 +148,9 @@ elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpr
#define ELF_CORE_COPY_XFPREGS 1 #define ELF_CORE_COPY_XFPREGS 1
#define ELF_CORE_XFPREG_TYPE NT_PRXFPREG #define ELF_CORE_XFPREG_TYPE NT_PRXFPREG
#define elf_core_copy_task_xfpregs elf32_core_copy_task_xfpregs
static inline int static inline int
elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu) elf32_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
{ {
struct pt_regs *regs = task_pt_regs(t); struct pt_regs *regs = task_pt_regs(t);
if (!tsk_used_math(t)) if (!tsk_used_math(t))
...@@ -206,6 +169,10 @@ elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu) ...@@ -206,6 +169,10 @@ elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
extern int force_personality32; extern int force_personality32;
#undef ELF_EXEC_PAGESIZE
#undef ELF_HWCAP
#undef ELF_PLATFORM
#undef SET_PERSONALITY
#define ELF_EXEC_PAGESIZE PAGE_SIZE #define ELF_EXEC_PAGESIZE PAGE_SIZE
#define ELF_HWCAP (boot_cpu_data.x86_capability[0]) #define ELF_HWCAP (boot_cpu_data.x86_capability[0])
#define ELF_PLATFORM ("i686") #define ELF_PLATFORM ("i686")
...@@ -231,6 +198,7 @@ do { \ ...@@ -231,6 +198,7 @@ do { \
#define load_elf_binary load_elf32_binary #define load_elf_binary load_elf32_binary
#undef ELF_PLAT_INIT
#define ELF_PLAT_INIT(r, load_addr) elf32_init(r) #define ELF_PLAT_INIT(r, load_addr) elf32_init(r)
#undef start_thread #undef start_thread
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# Makefile for the linux kernel. # Makefile for the linux kernel.
# #
extra-y := head_32.o init_task_32.o vmlinux.lds extra-y := head_32.o init_task.o vmlinux.lds
obj-y := process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \ obj-y := process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \
ptrace_32.o time_32.o ioport_32.o ldt_32.o setup_32.o i8259_32.o sys_i386_32.o \ ptrace_32.o time_32.o ioport_32.o ldt_32.o setup_32.o i8259_32.o sys_i386_32.o \
...@@ -17,6 +17,7 @@ obj-$(CONFIG_MCA) += mca_32.o ...@@ -17,6 +17,7 @@ obj-$(CONFIG_MCA) += mca_32.o
obj-$(CONFIG_X86_MSR) += msr.o obj-$(CONFIG_X86_MSR) += msr.o
obj-$(CONFIG_X86_CPUID) += cpuid.o obj-$(CONFIG_X86_CPUID) += cpuid.o
obj-$(CONFIG_MICROCODE) += microcode.o obj-$(CONFIG_MICROCODE) += microcode.o
obj-$(CONFIG_PCI) += early-quirks.o
obj-$(CONFIG_APM) += apm_32.o obj-$(CONFIG_APM) += apm_32.o
obj-$(CONFIG_X86_SMP) += smp_32.o smpboot_32.o tsc_sync.o obj-$(CONFIG_X86_SMP) += smp_32.o smpboot_32.o tsc_sync.o
obj-$(CONFIG_SMP) += smpcommon_32.o obj-$(CONFIG_SMP) += smpcommon_32.o
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# Makefile for the linux kernel. # Makefile for the linux kernel.
# #
extra-y := head_64.o head64.o init_task_64.o vmlinux.lds extra-y := head_64.o head64.o init_task.o vmlinux.lds
EXTRA_AFLAGS := -traditional EXTRA_AFLAGS := -traditional
obj-y := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \ obj-y := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \
ptrace_64.o time_64.o ioport_64.o ldt_64.o setup_64.o i8259_64.o sys_x86_64.o \ ptrace_64.o time_64.o ioport_64.o ldt_64.o setup_64.o i8259_64.o sys_x86_64.o \
...@@ -39,7 +39,7 @@ obj-$(CONFIG_K8_NB) += k8.o ...@@ -39,7 +39,7 @@ obj-$(CONFIG_K8_NB) += k8.o
obj-$(CONFIG_AUDIT) += audit_64.o obj-$(CONFIG_AUDIT) += audit_64.o
obj-$(CONFIG_MODULES) += module_64.o obj-$(CONFIG_MODULES) += module_64.o
obj-$(CONFIG_PCI) += early-quirks_64.o obj-$(CONFIG_PCI) += early-quirks.o
obj-y += topology.o obj-y += topology.o
obj-y += intel_cacheinfo.o obj-y += intel_cacheinfo.o
......
obj-$(CONFIG_ACPI) += boot.o obj-$(CONFIG_ACPI) += boot.o
ifneq ($(CONFIG_PCI),)
obj-$(CONFIG_X86_IO_APIC) += earlyquirk_32.o
endif
obj-$(CONFIG_ACPI_SLEEP) += sleep_32.o wakeup_32.o obj-$(CONFIG_ACPI_SLEEP) += sleep_32.o wakeup_32.o
ifneq ($(CONFIG_ACPI_PROCESSOR),) ifneq ($(CONFIG_ACPI_PROCESSOR),)
......
...@@ -555,7 +555,7 @@ EXPORT_SYMBOL(acpi_map_lsapic); ...@@ -555,7 +555,7 @@ EXPORT_SYMBOL(acpi_map_lsapic);
int acpi_unmap_lsapic(int cpu) int acpi_unmap_lsapic(int cpu)
{ {
x86_cpu_to_apicid[cpu] = -1; per_cpu(x86_cpu_to_apicid, cpu) = -1;
cpu_clear(cpu, cpu_present_map); cpu_clear(cpu, cpu_present_map);
num_processors--; num_processors--;
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
unsigned int cpu) unsigned int cpu)
{ {
struct cpuinfo_x86 *c = cpu_data + cpu; struct cpuinfo_x86 *c = &cpu_data(cpu);
flags->bm_check = 0; flags->bm_check = 0;
if (num_online_cpus() == 1) if (num_online_cpus() == 1)
...@@ -72,7 +72,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, ...@@ -72,7 +72,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
struct acpi_processor_cx *cx, struct acpi_power_register *reg) struct acpi_processor_cx *cx, struct acpi_power_register *reg)
{ {
struct cstate_entry *percpu_entry; struct cstate_entry *percpu_entry;
struct cpuinfo_x86 *c = cpu_data + cpu; struct cpuinfo_x86 *c = &cpu_data(cpu);
cpumask_t saved_mask; cpumask_t saved_mask;
int retval; int retval;
......
/*
* Do early PCI probing for bug detection when the main PCI subsystem is
* not up yet.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/acpi.h>
#include <asm/pci-direct.h>
#include <asm/acpi.h>
#include <asm/apic.h>
#ifdef CONFIG_ACPI
static int __init nvidia_hpet_check(struct acpi_table_header *header)
{
return 0;
}
#endif
static int __init check_bridge(int vendor, int device)
{
#ifdef CONFIG_ACPI
static int warned;
/* According to Nvidia all timer overrides are bogus unless HPET
is enabled. */
if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) {
if (!warned && acpi_table_parse(ACPI_SIG_HPET,
nvidia_hpet_check)) {
warned = 1;
acpi_skip_timer_override = 1;
printk(KERN_INFO "Nvidia board "
"detected. Ignoring ACPI "
"timer override.\n");
printk(KERN_INFO "If you got timer trouble "
"try acpi_use_timer_override\n");
}
}
#endif
if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) {
timer_over_8254 = 0;
printk(KERN_INFO "ATI board detected. Disabling timer routing "
"over 8254.\n");
}
return 0;
}
void __init check_acpi_pci(void)
{
int num, slot, func;
/* Assume the machine supports type 1. If not it will
always read ffffffff and should not have any side effect.
Actually a few buggy systems can machine check. Allow the user
to disable it by command line option at least -AK */
if (!early_pci_allowed())
return;
/* Poor man's PCI discovery */
for (num = 0; num < 32; num++) {
for (slot = 0; slot < 32; slot++) {
for (func = 0; func < 8; func++) {
u32 class;
u32 vendor;
class = read_pci_config(num, slot, func,
PCI_CLASS_REVISION);
if (class == 0xffffffff)
break;
if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
continue;
vendor = read_pci_config(num, slot, func,
PCI_VENDOR_ID);
if (check_bridge(vendor & 0xffff, vendor >> 16))
return;
}
}
}
}
...@@ -63,7 +63,7 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c) ...@@ -63,7 +63,7 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
void arch_acpi_processor_init_pdc(struct acpi_processor *pr) void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
{ {
unsigned int cpu = pr->id; unsigned int cpu = pr->id;
struct cpuinfo_x86 *c = cpu_data + cpu; struct cpuinfo_x86 *c = &cpu_data(cpu);
pr->pdc = NULL; pr->pdc = NULL;
if (c->x86_vendor == X86_VENDOR_INTEL) if (c->x86_vendor == X86_VENDOR_INTEL)
......
...@@ -357,14 +357,14 @@ void alternatives_smp_switch(int smp) ...@@ -357,14 +357,14 @@ void alternatives_smp_switch(int smp)
if (smp) { if (smp) {
printk(KERN_INFO "SMP alternatives: switching to SMP code\n"); printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); clear_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
list_for_each_entry(mod, &smp_alt_modules, next) list_for_each_entry(mod, &smp_alt_modules, next)
alternatives_smp_lock(mod->locks, mod->locks_end, alternatives_smp_lock(mod->locks, mod->locks_end,
mod->text, mod->text_end); mod->text, mod->text_end);
} else { } else {
printk(KERN_INFO "SMP alternatives: switching to UP code\n"); printk(KERN_INFO "SMP alternatives: switching to UP code\n");
set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
list_for_each_entry(mod, &smp_alt_modules, next) list_for_each_entry(mod, &smp_alt_modules, next)
alternatives_smp_unlock(mod->locks, mod->locks_end, alternatives_smp_unlock(mod->locks, mod->locks_end,
mod->text, mod->text_end); mod->text, mod->text_end);
...@@ -432,7 +432,7 @@ void __init alternative_instructions(void) ...@@ -432,7 +432,7 @@ void __init alternative_instructions(void)
if (1 == num_possible_cpus()) { if (1 == num_possible_cpus()) {
printk(KERN_INFO "SMP alternatives: switching to UP code\n"); printk(KERN_INFO "SMP alternatives: switching to UP code\n");
set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
alternatives_smp_unlock(__smp_locks, __smp_locks_end, alternatives_smp_unlock(__smp_locks, __smp_locks_end,
_text, _etext); _text, _etext);
} }
......
...@@ -19,7 +19,7 @@ config X86_POWERNOW_K8 ...@@ -19,7 +19,7 @@ config X86_POWERNOW_K8
To compile this driver as a module, choose M here: the To compile this driver as a module, choose M here: the
module will be called powernow-k8. module will be called powernow-k8.
For details, take a look at <file:Documentation/cpu-freq/>. For details, take a look at <file:Documentation/cpu-freq/>.
If in doubt, say N. If in doubt, say N.
......
...@@ -77,7 +77,7 @@ static unsigned int acpi_pstate_strict; ...@@ -77,7 +77,7 @@ static unsigned int acpi_pstate_strict;
static int check_est_cpu(unsigned int cpuid) static int check_est_cpu(unsigned int cpuid)
{ {
struct cpuinfo_x86 *cpu = &cpu_data[cpuid]; struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
if (cpu->x86_vendor != X86_VENDOR_INTEL || if (cpu->x86_vendor != X86_VENDOR_INTEL ||
!cpu_has(cpu, X86_FEATURE_EST)) !cpu_has(cpu, X86_FEATURE_EST))
...@@ -560,7 +560,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) ...@@ -560,7 +560,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
unsigned int cpu = policy->cpu; unsigned int cpu = policy->cpu;
struct acpi_cpufreq_data *data; struct acpi_cpufreq_data *data;
unsigned int result = 0; unsigned int result = 0;
struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
struct acpi_processor_performance *perf; struct acpi_processor_performance *perf;
dprintk("acpi_cpufreq_cpu_init\n"); dprintk("acpi_cpufreq_cpu_init\n");
......
...@@ -305,7 +305,7 @@ static struct cpufreq_driver eps_driver = { ...@@ -305,7 +305,7 @@ static struct cpufreq_driver eps_driver = {
static int __init eps_init(void) static int __init eps_init(void)
{ {
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = &cpu_data(0);
/* This driver will work only on Centaur C7 processors with /* This driver will work only on Centaur C7 processors with
* Enhanced SpeedStep/PowerSaver registers */ * Enhanced SpeedStep/PowerSaver registers */
......
...@@ -199,7 +199,7 @@ static int elanfreq_target (struct cpufreq_policy *policy, ...@@ -199,7 +199,7 @@ static int elanfreq_target (struct cpufreq_policy *policy,
static int elanfreq_cpu_init(struct cpufreq_policy *policy) static int elanfreq_cpu_init(struct cpufreq_policy *policy)
{ {
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = &cpu_data(0);
unsigned int i; unsigned int i;
int result; int result;
...@@ -280,7 +280,7 @@ static struct cpufreq_driver elanfreq_driver = { ...@@ -280,7 +280,7 @@ static struct cpufreq_driver elanfreq_driver = {
static int __init elanfreq_init(void) static int __init elanfreq_init(void)
{ {
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = &cpu_data(0);
/* Test if we have the right hardware */ /* Test if we have the right hardware */
if ((c->x86_vendor != X86_VENDOR_AMD) || if ((c->x86_vendor != X86_VENDOR_AMD) ||
......
...@@ -780,7 +780,7 @@ static int longhaul_setup_southbridge(void) ...@@ -780,7 +780,7 @@ static int longhaul_setup_southbridge(void)
static int __init longhaul_cpu_init(struct cpufreq_policy *policy) static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
{ {
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = &cpu_data(0);
char *cpuname=NULL; char *cpuname=NULL;
int ret; int ret;
u32 lo, hi; u32 lo, hi;
...@@ -959,7 +959,7 @@ static struct cpufreq_driver longhaul_driver = { ...@@ -959,7 +959,7 @@ static struct cpufreq_driver longhaul_driver = {
static int __init longhaul_init(void) static int __init longhaul_init(void)
{ {
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = &cpu_data(0);
if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6) if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6)
return -ENODEV; return -ENODEV;
......
...@@ -172,7 +172,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq, ...@@ -172,7 +172,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
u32 save_lo, save_hi; u32 save_lo, save_hi;
u32 eax, ebx, ecx, edx; u32 eax, ebx, ecx, edx;
u32 try_hi; u32 try_hi;
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = &cpu_data(0);
if (!low_freq || !high_freq) if (!low_freq || !high_freq)
return -EINVAL; return -EINVAL;
...@@ -298,7 +298,7 @@ static struct cpufreq_driver longrun_driver = { ...@@ -298,7 +298,7 @@ static struct cpufreq_driver longrun_driver = {
*/ */
static int __init longrun_init(void) static int __init longrun_init(void)
{ {
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = &cpu_data(0);
if (c->x86_vendor != X86_VENDOR_TRANSMETA || if (c->x86_vendor != X86_VENDOR_TRANSMETA ||
!cpu_has(c, X86_FEATURE_LONGRUN)) !cpu_has(c, X86_FEATURE_LONGRUN))
......
...@@ -195,7 +195,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) ...@@ -195,7 +195,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
{ {
struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
int cpuid = 0; int cpuid = 0;
unsigned int i; unsigned int i;
...@@ -279,7 +279,7 @@ static struct cpufreq_driver p4clockmod_driver = { ...@@ -279,7 +279,7 @@ static struct cpufreq_driver p4clockmod_driver = {
static int __init cpufreq_p4_init(void) static int __init cpufreq_p4_init(void)
{ {
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = &cpu_data(0);
int ret; int ret;
/* /*
......
...@@ -215,7 +215,7 @@ static struct cpufreq_driver powernow_k6_driver = { ...@@ -215,7 +215,7 @@ static struct cpufreq_driver powernow_k6_driver = {
*/ */
static int __init powernow_k6_init(void) static int __init powernow_k6_init(void)
{ {
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = &cpu_data(0);
if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) || if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) ||
((c->x86_model != 12) && (c->x86_model != 13))) ((c->x86_model != 12) && (c->x86_model != 13)))
......
...@@ -114,7 +114,7 @@ static int check_fsb(unsigned int fsbspeed) ...@@ -114,7 +114,7 @@ static int check_fsb(unsigned int fsbspeed)
static int check_powernow(void) static int check_powernow(void)
{ {
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = &cpu_data(0);
unsigned int maxei, eax, ebx, ecx, edx; unsigned int maxei, eax, ebx, ecx, edx;
if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 !=6)) { if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 !=6)) {
......
...@@ -102,7 +102,7 @@ static int sc520_freq_target (struct cpufreq_policy *policy, ...@@ -102,7 +102,7 @@ static int sc520_freq_target (struct cpufreq_policy *policy,
static int sc520_freq_cpu_init(struct cpufreq_policy *policy) static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
{ {
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = &cpu_data(0);
int result; int result;
/* capability check */ /* capability check */
...@@ -151,7 +151,7 @@ static struct cpufreq_driver sc520_freq_driver = { ...@@ -151,7 +151,7 @@ static struct cpufreq_driver sc520_freq_driver = {
static int __init sc520_freq_init(void) static int __init sc520_freq_init(void)
{ {
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = &cpu_data(0);
int err; int err;
/* Test if we have the right hardware */ /* Test if we have the right hardware */
......
...@@ -230,7 +230,7 @@ static struct cpu_model models[] = ...@@ -230,7 +230,7 @@ static struct cpu_model models[] =
static int centrino_cpu_init_table(struct cpufreq_policy *policy) static int centrino_cpu_init_table(struct cpufreq_policy *policy)
{ {
struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu]; struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
struct cpu_model *model; struct cpu_model *model;
for(model = models; model->cpu_id != NULL; model++) for(model = models; model->cpu_id != NULL; model++)
...@@ -340,7 +340,7 @@ static unsigned int get_cur_freq(unsigned int cpu) ...@@ -340,7 +340,7 @@ static unsigned int get_cur_freq(unsigned int cpu)
static int centrino_cpu_init(struct cpufreq_policy *policy) static int centrino_cpu_init(struct cpufreq_policy *policy)
{ {
struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu]; struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
unsigned freq; unsigned freq;
unsigned l, h; unsigned l, h;
int ret; int ret;
...@@ -612,7 +612,7 @@ static struct cpufreq_driver centrino_driver = { ...@@ -612,7 +612,7 @@ static struct cpufreq_driver centrino_driver = {
*/ */
static int __init centrino_init(void) static int __init centrino_init(void)
{ {
struct cpuinfo_x86 *cpu = cpu_data; struct cpuinfo_x86 *cpu = &cpu_data(0);
if (!cpu_has(cpu, X86_FEATURE_EST)) if (!cpu_has(cpu, X86_FEATURE_EST))
return -ENODEV; return -ENODEV;
......
...@@ -228,7 +228,7 @@ EXPORT_SYMBOL_GPL(speedstep_get_processor_frequency); ...@@ -228,7 +228,7 @@ EXPORT_SYMBOL_GPL(speedstep_get_processor_frequency);
unsigned int speedstep_detect_processor (void) unsigned int speedstep_detect_processor (void)
{ {
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = &cpu_data(0);
u32 ebx, msr_lo, msr_hi; u32 ebx, msr_lo, msr_hi;
dprintk("x86: %x, model: %x\n", c->x86, c->x86_model); dprintk("x86: %x, model: %x\n", c->x86, c->x86_model);
......
...@@ -295,7 +295,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) ...@@ -295,7 +295,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
#ifdef CONFIG_X86_HT #ifdef CONFIG_X86_HT
unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data); unsigned int cpu = c->cpu_index;
#endif #endif
if (c->cpuid_level > 3) { if (c->cpuid_level > 3) {
...@@ -417,14 +417,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) ...@@ -417,14 +417,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
if (new_l2) { if (new_l2) {
l2 = new_l2; l2 = new_l2;
#ifdef CONFIG_X86_HT #ifdef CONFIG_X86_HT
cpu_llc_id[cpu] = l2_id; per_cpu(cpu_llc_id, cpu) = l2_id;
#endif #endif
} }
if (new_l3) { if (new_l3) {
l3 = new_l3; l3 = new_l3;
#ifdef CONFIG_X86_HT #ifdef CONFIG_X86_HT
cpu_llc_id[cpu] = l3_id; per_cpu(cpu_llc_id, cpu) = l3_id;
#endif #endif
} }
...@@ -459,7 +459,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) ...@@ -459,7 +459,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
struct _cpuid4_info *this_leaf, *sibling_leaf; struct _cpuid4_info *this_leaf, *sibling_leaf;
unsigned long num_threads_sharing; unsigned long num_threads_sharing;
int index_msb, i; int index_msb, i;
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = &cpu_data(cpu);
this_leaf = CPUID4_INFO_IDX(cpu, index); this_leaf = CPUID4_INFO_IDX(cpu, index);
num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
...@@ -470,8 +470,8 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) ...@@ -470,8 +470,8 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
index_msb = get_count_order(num_threads_sharing); index_msb = get_count_order(num_threads_sharing);
for_each_online_cpu(i) { for_each_online_cpu(i) {
if (c[i].apicid >> index_msb == if (cpu_data(i).apicid >> index_msb ==
c[cpu].apicid >> index_msb) { c->apicid >> index_msb) {
cpu_set(i, this_leaf->shared_cpu_map); cpu_set(i, this_leaf->shared_cpu_map);
if (i != cpu && cpuid4_info[i]) { if (i != cpu && cpuid4_info[i]) {
sibling_leaf = CPUID4_INFO_IDX(i, index); sibling_leaf = CPUID4_INFO_IDX(i, index);
......
...@@ -120,7 +120,9 @@ int reserve_perfctr_nmi(unsigned int msr) ...@@ -120,7 +120,9 @@ int reserve_perfctr_nmi(unsigned int msr)
unsigned int counter; unsigned int counter;
counter = nmi_perfctr_msr_to_bit(msr); counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS); /* register not managed by the allocator? */
if (counter > NMI_MAX_COUNTER_BITS)
return 1;
if (!test_and_set_bit(counter, perfctr_nmi_owner)) if (!test_and_set_bit(counter, perfctr_nmi_owner))
return 1; return 1;
...@@ -132,7 +134,9 @@ void release_perfctr_nmi(unsigned int msr) ...@@ -132,7 +134,9 @@ void release_perfctr_nmi(unsigned int msr)
unsigned int counter; unsigned int counter;
counter = nmi_perfctr_msr_to_bit(msr); counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS); /* register not managed by the allocator? */
if (counter > NMI_MAX_COUNTER_BITS)
return;
clear_bit(counter, perfctr_nmi_owner); clear_bit(counter, perfctr_nmi_owner);
} }
...@@ -142,7 +146,9 @@ int reserve_evntsel_nmi(unsigned int msr) ...@@ -142,7 +146,9 @@ int reserve_evntsel_nmi(unsigned int msr)
unsigned int counter; unsigned int counter;
counter = nmi_evntsel_msr_to_bit(msr); counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS); /* register not managed by the allocator? */
if (counter > NMI_MAX_COUNTER_BITS)
return 1;
if (!test_and_set_bit(counter, evntsel_nmi_owner)) if (!test_and_set_bit(counter, evntsel_nmi_owner))
return 1; return 1;
...@@ -154,7 +160,9 @@ void release_evntsel_nmi(unsigned int msr) ...@@ -154,7 +160,9 @@ void release_evntsel_nmi(unsigned int msr)
unsigned int counter; unsigned int counter;
counter = nmi_evntsel_msr_to_bit(msr); counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS); /* register not managed by the allocator? */
if (counter > NMI_MAX_COUNTER_BITS)
return;
clear_bit(counter, evntsel_nmi_owner); clear_bit(counter, evntsel_nmi_owner);
} }
......
...@@ -85,12 +85,13 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -85,12 +85,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
/* nothing */ /* nothing */
}; };
struct cpuinfo_x86 *c = v; struct cpuinfo_x86 *c = v;
int i, n = c - cpu_data; int i, n = 0;
int fpu_exception; int fpu_exception;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (!cpu_online(n)) if (!cpu_online(n))
return 0; return 0;
n = c->cpu_index;
#endif #endif
seq_printf(m, "processor\t: %d\n" seq_printf(m, "processor\t: %d\n"
"vendor_id\t: %s\n" "vendor_id\t: %s\n"
...@@ -175,11 +176,15 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -175,11 +176,15 @@ static int show_cpuinfo(struct seq_file *m, void *v)
static void *c_start(struct seq_file *m, loff_t *pos) static void *c_start(struct seq_file *m, loff_t *pos)
{ {
return *pos < NR_CPUS ? cpu_data + *pos : NULL; if (*pos == 0) /* just in case, cpu 0 is not the first */
*pos = first_cpu(cpu_possible_map);
if ((*pos) < NR_CPUS && cpu_possible(*pos))
return &cpu_data(*pos);
return NULL;
} }
static void *c_next(struct seq_file *m, void *v, loff_t *pos) static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{ {
++*pos; *pos = next_cpu(*pos, cpu_possible_map);
return c_start(m, pos); return c_start(m, pos);
} }
static void c_stop(struct seq_file *m, void *v) static void c_stop(struct seq_file *m, void *v)
......
...@@ -114,7 +114,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf, ...@@ -114,7 +114,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
static int cpuid_open(struct inode *inode, struct file *file) static int cpuid_open(struct inode *inode, struct file *file)
{ {
unsigned int cpu = iminor(file->f_path.dentry->d_inode); unsigned int cpu = iminor(file->f_path.dentry->d_inode);
struct cpuinfo_x86 *c = &(cpu_data)[cpu]; struct cpuinfo_x86 *c = &cpu_data(cpu);
if (cpu >= NR_CPUS || !cpu_online(cpu)) if (cpu >= NR_CPUS || !cpu_online(cpu))
return -ENXIO; /* No such CPU */ return -ENXIO; /* No such CPU */
...@@ -134,15 +134,18 @@ static const struct file_operations cpuid_fops = { ...@@ -134,15 +134,18 @@ static const struct file_operations cpuid_fops = {
.open = cpuid_open, .open = cpuid_open,
}; };
static int __cpuinit cpuid_device_create(int i) static __cpuinit int cpuid_device_create(int cpu)
{ {
int err = 0;
struct device *dev; struct device *dev;
dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, i), "cpu%d",i); dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu),
if (IS_ERR(dev)) "cpu%d", cpu);
err = PTR_ERR(dev); return IS_ERR(dev) ? PTR_ERR(dev) : 0;
return err; }
static void cpuid_device_destroy(int cpu)
{
device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
} }
static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb, static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
...@@ -150,18 +153,21 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb, ...@@ -150,18 +153,21 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
void *hcpu) void *hcpu)
{ {
unsigned int cpu = (unsigned long)hcpu; unsigned int cpu = (unsigned long)hcpu;
int err = 0;
switch (action) { switch (action) {
case CPU_ONLINE: case CPU_UP_PREPARE:
case CPU_ONLINE_FROZEN: case CPU_UP_PREPARE_FROZEN:
cpuid_device_create(cpu); err = cpuid_device_create(cpu);
break; break;
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD: case CPU_DEAD:
case CPU_DEAD_FROZEN: case CPU_DEAD_FROZEN:
device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); cpuid_device_destroy(cpu);
break; break;
} }
return NOTIFY_OK; return err ? NOTIFY_BAD : NOTIFY_OK;
} }
static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier = static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier =
...@@ -198,7 +204,7 @@ static int __init cpuid_init(void) ...@@ -198,7 +204,7 @@ static int __init cpuid_init(void)
out_class: out_class:
i = 0; i = 0;
for_each_online_cpu(i) { for_each_online_cpu(i) {
device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, i)); cpuid_device_destroy(i);
} }
class_destroy(cpuid_class); class_destroy(cpuid_class);
out_chrdev: out_chrdev:
...@@ -212,7 +218,7 @@ static void __exit cpuid_exit(void) ...@@ -212,7 +218,7 @@ static void __exit cpuid_exit(void)
int cpu = 0; int cpu = 0;
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); cpuid_device_destroy(cpu);
class_destroy(cpuid_class); class_destroy(cpuid_class);
unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); unregister_chrdev(CPUID_MAJOR, "cpu/cpuid");
unregister_hotcpu_notifier(&cpuid_class_cpu_notifier); unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
......
...@@ -13,9 +13,13 @@ ...@@ -13,9 +13,13 @@
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/pci_ids.h> #include <linux/pci_ids.h>
#include <asm/pci-direct.h> #include <asm/pci-direct.h>
#include <asm/proto.h>
#include <asm/iommu.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
#ifdef CONFIG_IOMMU
#include <asm/iommu.h>
#endif
static void __init via_bugs(void) static void __init via_bugs(void)
{ {
...@@ -23,7 +27,8 @@ static void __init via_bugs(void) ...@@ -23,7 +27,8 @@ static void __init via_bugs(void)
if ((end_pfn > MAX_DMA32_PFN || force_iommu) && if ((end_pfn > MAX_DMA32_PFN || force_iommu) &&
!iommu_aperture_allowed) { !iommu_aperture_allowed) {
printk(KERN_INFO printk(KERN_INFO
"Looks like a VIA chipset. Disabling IOMMU. Override with iommu=allowed\n"); "Looks like a VIA chipset. Disabling IOMMU."
" Override with iommu=allowed\n");
iommu_aperture_disabled = 1; iommu_aperture_disabled = 1;
} }
#endif #endif
...@@ -40,6 +45,7 @@ static int __init nvidia_hpet_check(struct acpi_table_header *header) ...@@ -40,6 +45,7 @@ static int __init nvidia_hpet_check(struct acpi_table_header *header)
static void __init nvidia_bugs(void) static void __init nvidia_bugs(void)
{ {
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
#ifdef CONFIG_X86_IO_APIC
/* /*
* All timer overrides on Nvidia are * All timer overrides on Nvidia are
* wrong unless HPET is enabled. * wrong unless HPET is enabled.
...@@ -58,6 +64,7 @@ static void __init nvidia_bugs(void) ...@@ -58,6 +64,7 @@ static void __init nvidia_bugs(void)
printk(KERN_INFO "If you got timer trouble " printk(KERN_INFO "If you got timer trouble "
"try acpi_use_timer_override\n"); "try acpi_use_timer_override\n");
} }
#endif
#endif #endif
/* RED-PEN skip them on mptables too? */ /* RED-PEN skip them on mptables too? */
...@@ -65,11 +72,13 @@ static void __init nvidia_bugs(void) ...@@ -65,11 +72,13 @@ static void __init nvidia_bugs(void)
static void __init ati_bugs(void) static void __init ati_bugs(void)
{ {
#ifdef CONFIG_X86_IO_APIC
if (timer_over_8254 == 1) { if (timer_over_8254 == 1) {
timer_over_8254 = 0; timer_over_8254 = 0;
printk(KERN_INFO printk(KERN_INFO
"ATI board detected. Disabling timer routing over 8254.\n"); "ATI board detected. Disabling timer routing over 8254.\n");
} }
#endif
} }
struct chipset { struct chipset {
...@@ -104,7 +113,7 @@ void __init early_quirks(void) ...@@ -104,7 +113,7 @@ void __init early_quirks(void)
if (class == 0xffffffff) if (class == 0xffffffff)
break; break;
if ((class >> 16) != PCI_CLASS_BRIDGE_PCI) if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
continue; continue;
vendor = read_pci_config(num, slot, func, vendor = read_pci_config(num, slot, func,
......
...@@ -24,10 +24,19 @@ ...@@ -24,10 +24,19 @@
#include <acpi/acpi_bus.h> #include <acpi/acpi_bus.h>
#endif #endif
/* which logical CPU number maps to which CPU (physical APIC ID) */ /*
u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly * which logical CPU number maps to which CPU (physical APIC ID)
*
* The following static array is used during kernel startup
* and the x86_cpu_to_apicid_ptr contains the address of the
* array during this time. Is it zeroed when the per_cpu
* data area is removed.
*/
u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata
= { [0 ... NR_CPUS-1] = BAD_APICID }; = { [0 ... NR_CPUS-1] = BAD_APICID };
EXPORT_SYMBOL(x86_cpu_to_apicid); void *x86_cpu_to_apicid_ptr;
DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID;
EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
struct genapic __read_mostly *genapic = &apic_flat; struct genapic __read_mostly *genapic = &apic_flat;
......
...@@ -172,7 +172,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) ...@@ -172,7 +172,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask)
*/ */
cpu = first_cpu(cpumask); cpu = first_cpu(cpumask);
if ((unsigned)cpu < NR_CPUS) if ((unsigned)cpu < NR_CPUS)
return x86_cpu_to_apicid[cpu]; return per_cpu(x86_cpu_to_apicid, cpu);
else else
return BAD_APICID; return BAD_APICID;
} }
......
...@@ -58,7 +58,7 @@ void __init x86_64_start_kernel(char * real_mode_data) ...@@ -58,7 +58,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
for (i = 0; i < IDT_ENTRIES; i++) for (i = 0; i < IDT_ENTRIES; i++)
set_intr_gate(i, early_idt_handler); set_intr_gate(i, early_idt_handler);
asm volatile("lidt %0" :: "m" (idt_descr)); load_idt((const struct desc_ptr *)&idt_descr);
early_printk("Kernel alive\n"); early_printk("Kernel alive\n");
......
...@@ -69,12 +69,15 @@ static inline void hpet_clear_mapping(void) ...@@ -69,12 +69,15 @@ static inline void hpet_clear_mapping(void)
* HPET command line enable / disable * HPET command line enable / disable
*/ */
static int boot_hpet_disable; static int boot_hpet_disable;
int hpet_force_user;
static int __init hpet_setup(char* str) static int __init hpet_setup(char* str)
{ {
if (str) { if (str) {
if (!strncmp("disable", str, 7)) if (!strncmp("disable", str, 7))
boot_hpet_disable = 1; boot_hpet_disable = 1;
if (!strncmp("force", str, 5))
hpet_force_user = 1;
} }
return 1; return 1;
} }
......
...@@ -403,7 +403,8 @@ void __init native_init_IRQ(void) ...@@ -403,7 +403,8 @@ void __init native_init_IRQ(void)
int vector = FIRST_EXTERNAL_VECTOR + i; int vector = FIRST_EXTERNAL_VECTOR + i;
if (i >= NR_IRQS) if (i >= NR_IRQS)
break; break;
if (vector != SYSCALL_VECTOR) /* SYSCALL_VECTOR was reserved in trap_init. */
if (!test_bit(vector, used_vectors))
set_intr_gate(vector, interrupt[i]); set_intr_gate(vector, interrupt[i]);
} }
......
...@@ -15,7 +15,6 @@ static struct files_struct init_files = INIT_FILES; ...@@ -15,7 +15,6 @@ static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm); struct mm_struct init_mm = INIT_MM(init_mm);
EXPORT_SYMBOL(init_mm); EXPORT_SYMBOL(init_mm);
/* /*
...@@ -25,7 +24,7 @@ EXPORT_SYMBOL(init_mm); ...@@ -25,7 +24,7 @@ EXPORT_SYMBOL(init_mm);
* way process stacks are handled. This is done by having a special * way process stacks are handled. This is done by having a special
* "init_task" linker map entry.. * "init_task" linker map entry..
*/ */
union thread_union init_thread_union union thread_union init_thread_union
__attribute__((__section__(".data.init_task"))) = __attribute__((__section__(".data.init_task"))) =
{ INIT_THREAD_INFO(init_task) }; { INIT_THREAD_INFO(init_task) };
...@@ -35,12 +34,14 @@ union thread_union init_thread_union ...@@ -35,12 +34,14 @@ union thread_union init_thread_union
* All other task structs will be allocated on slabs in fork.c * All other task structs will be allocated on slabs in fork.c
*/ */
struct task_struct init_task = INIT_TASK(init_task); struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task); EXPORT_SYMBOL(init_task);
/* /*
* per-CPU TSS segments. Threads are completely 'soft' on Linux, * per-CPU TSS segments. Threads are completely 'soft' on Linux,
* no more per-task TSS's. * no more per-task TSS's. The TSS size is kept cacheline-aligned
*/ * so they are allowed to end up in the .data.cacheline_aligned
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/desc.h>
static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
EXPORT_SYMBOL(init_mm);
/*
* Initial task structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union
__attribute__((__section__(".data.init_task"))) =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
* no more per-task TSS's. The TSS size is kept cacheline-aligned
* so they are allowed to end up in the .data.cacheline_aligned
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
/* Copies of the original ist values from the tss are only accessed during
* debugging, no special alignment required.
*/
DEFINE_PER_CPU(struct orig_ist, orig_ist);
#define ALIGN_TO_4K __attribute__((section(".data.init_task")))
...@@ -1198,7 +1198,7 @@ static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 } ...@@ -1198,7 +1198,7 @@ static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 }
static int __assign_irq_vector(int irq) static int __assign_irq_vector(int irq)
{ {
static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
int vector, offset, i; int vector, offset;
BUG_ON((unsigned)irq >= NR_IRQ_VECTORS); BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
...@@ -1215,11 +1215,8 @@ static int __assign_irq_vector(int irq) ...@@ -1215,11 +1215,8 @@ static int __assign_irq_vector(int irq)
} }
if (vector == current_vector) if (vector == current_vector)
return -ENOSPC; return -ENOSPC;
if (vector == SYSCALL_VECTOR) if (test_and_set_bit(vector, used_vectors))
goto next; goto next;
for (i = 0; i < NR_IRQ_VECTORS; i++)
if (irq_vector[i] == vector)
goto next;
current_vector = vector; current_vector = vector;
current_offset = offset; current_offset = offset;
...@@ -2295,6 +2292,12 @@ static inline void __init check_timer(void) ...@@ -2295,6 +2292,12 @@ static inline void __init check_timer(void)
void __init setup_IO_APIC(void) void __init setup_IO_APIC(void)
{ {
int i;
/* Reserve all the system vectors. */
for (i = FIRST_SYSTEM_VECTOR; i < NR_VECTORS; i++)
set_bit(i, used_vectors);
enable_IO_APIC(); enable_IO_APIC();
if (acpi_ioapic) if (acpi_ioapic)
......
...@@ -799,7 +799,8 @@ static __cpuinit int mce_create_device(unsigned int cpu) ...@@ -799,7 +799,8 @@ static __cpuinit int mce_create_device(unsigned int cpu)
{ {
int err; int err;
int i; int i;
if (!mce_available(&cpu_data[cpu]))
if (!mce_available(&cpu_data(cpu)))
return -EIO; return -EIO;
memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject)); memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
......
...@@ -472,11 +472,11 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) ...@@ -472,11 +472,11 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
sprintf(name, "threshold_bank%i", bank); sprintf(name, "threshold_bank%i", bank);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */ if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
i = first_cpu(per_cpu(cpu_core_map, cpu)); i = first_cpu(per_cpu(cpu_core_map, cpu));
/* first core not up yet */ /* first core not up yet */
if (cpu_data[i].cpu_core_id) if (cpu_data(i).cpu_core_id)
goto out; goto out;
/* already linked */ /* already linked */
......
...@@ -132,7 +132,7 @@ static struct ucode_cpu_info { ...@@ -132,7 +132,7 @@ static struct ucode_cpu_info {
static void collect_cpu_info(int cpu_num) static void collect_cpu_info(int cpu_num)
{ {
struct cpuinfo_x86 *c = cpu_data + cpu_num; struct cpuinfo_x86 *c = &cpu_data(cpu_num);
struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
unsigned int val[2]; unsigned int val[2];
...@@ -522,7 +522,7 @@ static struct platform_device *microcode_pdev; ...@@ -522,7 +522,7 @@ static struct platform_device *microcode_pdev;
static int cpu_request_microcode(int cpu) static int cpu_request_microcode(int cpu)
{ {
char name[30]; char name[30];
struct cpuinfo_x86 *c = cpu_data + cpu; struct cpuinfo_x86 *c = &cpu_data(cpu);
const struct firmware *firmware; const struct firmware *firmware;
void *buf; void *buf;
unsigned long size; unsigned long size;
...@@ -570,7 +570,7 @@ static int cpu_request_microcode(int cpu) ...@@ -570,7 +570,7 @@ static int cpu_request_microcode(int cpu)
static int apply_microcode_check_cpu(int cpu) static int apply_microcode_check_cpu(int cpu)
{ {
struct cpuinfo_x86 *c = cpu_data + cpu; struct cpuinfo_x86 *c = &cpu_data(cpu);
struct ucode_cpu_info *uci = ucode_cpu_info + cpu; struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpumask_t old; cpumask_t old;
unsigned int val[2]; unsigned int val[2];
......
...@@ -57,6 +57,8 @@ unsigned long mp_lapic_addr = 0; ...@@ -57,6 +57,8 @@ unsigned long mp_lapic_addr = 0;
/* Processor that is doing the boot up */ /* Processor that is doing the boot up */
unsigned int boot_cpu_id = -1U; unsigned int boot_cpu_id = -1U;
EXPORT_SYMBOL(boot_cpu_id);
/* Internal processor count */ /* Internal processor count */
unsigned int num_processors __cpuinitdata = 0; unsigned int num_processors __cpuinitdata = 0;
...@@ -86,7 +88,7 @@ static int __init mpf_checksum(unsigned char *mp, int len) ...@@ -86,7 +88,7 @@ static int __init mpf_checksum(unsigned char *mp, int len)
return sum & 0xFF; return sum & 0xFF;
} }
static void __cpuinit MP_processor_info (struct mpc_config_processor *m) static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
{ {
int cpu; int cpu;
cpumask_t tmp_map; cpumask_t tmp_map;
...@@ -123,7 +125,18 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m) ...@@ -123,7 +125,18 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
cpu = 0; cpu = 0;
} }
bios_cpu_apicid[cpu] = m->mpc_apicid; bios_cpu_apicid[cpu] = m->mpc_apicid;
x86_cpu_to_apicid[cpu] = m->mpc_apicid; /*
* We get called early in the the start_kernel initialization
* process when the per_cpu data area is not yet setup, so we
* use a static array that is removed after the per_cpu data
* area is created.
*/
if (x86_cpu_to_apicid_ptr) {
u8 *x86_cpu_to_apicid = (u8 *)x86_cpu_to_apicid_ptr;
x86_cpu_to_apicid[cpu] = m->mpc_apicid;
} else {
per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid;
}
cpu_set(cpu, cpu_possible_map); cpu_set(cpu, cpu_possible_map);
cpu_set(cpu, cpu_present_map); cpu_set(cpu, cpu_present_map);
......
...@@ -112,7 +112,7 @@ static ssize_t msr_write(struct file *file, const char __user *buf, ...@@ -112,7 +112,7 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
static int msr_open(struct inode *inode, struct file *file) static int msr_open(struct inode *inode, struct file *file)
{ {
unsigned int cpu = iminor(file->f_path.dentry->d_inode); unsigned int cpu = iminor(file->f_path.dentry->d_inode);
struct cpuinfo_x86 *c = &(cpu_data)[cpu]; struct cpuinfo_x86 *c = &cpu_data(cpu);
if (cpu >= NR_CPUS || !cpu_online(cpu)) if (cpu >= NR_CPUS || !cpu_online(cpu))
return -ENXIO; /* No such CPU */ return -ENXIO; /* No such CPU */
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#include <asm/iommu.h> #include <asm/iommu.h>
#include <asm/calgary.h> #include <asm/calgary.h>
int iommu_merge __read_mostly = 0; int iommu_merge __read_mostly = 1;
EXPORT_SYMBOL(iommu_merge); EXPORT_SYMBOL(iommu_merge);
dma_addr_t bad_dma_address __read_mostly; dma_addr_t bad_dma_address __read_mostly;
......
...@@ -295,34 +295,52 @@ static int __init idle_setup(char *str) ...@@ -295,34 +295,52 @@ static int __init idle_setup(char *str)
} }
early_param("idle", idle_setup); early_param("idle", idle_setup);
void show_regs(struct pt_regs * regs) void __show_registers(struct pt_regs *regs, int all)
{ {
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
unsigned long d0, d1, d2, d3, d6, d7; unsigned long d0, d1, d2, d3, d6, d7;
unsigned long esp;
unsigned short ss, gs;
if (user_mode_vm(regs)) {
esp = regs->esp;
ss = regs->xss & 0xffff;
savesegment(gs, gs);
} else {
esp = (unsigned long) (&regs->esp);
savesegment(ss, ss);
savesegment(gs, gs);
}
printk("\n"); printk("\n");
printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm); printk("Pid: %d, comm: %s %s (%s %.*s)\n",
printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id()); task_pid_nr(current), current->comm,
print_tainted(), init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
0xffff & regs->xcs, regs->eip, regs->eflags,
smp_processor_id());
print_symbol("EIP is at %s\n", regs->eip); print_symbol("EIP is at %s\n", regs->eip);
if (user_mode_vm(regs))
printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
printk(" EFLAGS: %08lx %s (%s %.*s)\n",
regs->eflags, print_tainted(), init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
regs->eax,regs->ebx,regs->ecx,regs->edx); regs->eax, regs->ebx, regs->ecx, regs->edx);
printk("ESI: %08lx EDI: %08lx EBP: %08lx", printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
regs->esi, regs->edi, regs->ebp); regs->esi, regs->edi, regs->ebp, esp);
printk(" DS: %04x ES: %04x FS: %04x\n", printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xfs); regs->xds & 0xffff, regs->xes & 0xffff,
regs->xfs & 0xffff, gs, ss);
if (!all)
return;
cr0 = read_cr0(); cr0 = read_cr0();
cr2 = read_cr2(); cr2 = read_cr2();
cr3 = read_cr3(); cr3 = read_cr3();
cr4 = read_cr4_safe(); cr4 = read_cr4_safe();
printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
cr0, cr2, cr3, cr4);
get_debugreg(d0, 0); get_debugreg(d0, 0);
get_debugreg(d1, 1); get_debugreg(d1, 1);
...@@ -330,10 +348,16 @@ void show_regs(struct pt_regs * regs) ...@@ -330,10 +348,16 @@ void show_regs(struct pt_regs * regs)
get_debugreg(d3, 3); get_debugreg(d3, 3);
printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
d0, d1, d2, d3); d0, d1, d2, d3);
get_debugreg(d6, 6); get_debugreg(d6, 6);
get_debugreg(d7, 7); get_debugreg(d7, 7);
printk("DR6: %08lx DR7: %08lx\n", d6, d7); printk("DR6: %08lx DR7: %08lx\n",
d6, d7);
}
void show_regs(struct pt_regs *regs)
{
__show_registers(regs, 1);
show_trace(NULL, regs, &regs->esp); show_trace(NULL, regs, &regs->esp);
} }
......
...@@ -45,9 +45,12 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev) ...@@ -45,9 +45,12 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
if (!(config & 0x2)) if (!(config & 0x2))
pci_write_config_byte(dev, 0xf4, config); pci_write_config_byte(dev, 0xf4, config);
} }
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance); quirk_intel_irqbalance);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
quirk_intel_irqbalance);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
quirk_intel_irqbalance);
#endif #endif
#if defined(CONFIG_HPET_TIMER) #if defined(CONFIG_HPET_TIMER)
...@@ -56,7 +59,8 @@ unsigned long force_hpet_address; ...@@ -56,7 +59,8 @@ unsigned long force_hpet_address;
static enum { static enum {
NONE_FORCE_HPET_RESUME, NONE_FORCE_HPET_RESUME,
OLD_ICH_FORCE_HPET_RESUME, OLD_ICH_FORCE_HPET_RESUME,
ICH_FORCE_HPET_RESUME ICH_FORCE_HPET_RESUME,
VT8237_FORCE_HPET_RESUME
} force_hpet_resume_type; } force_hpet_resume_type;
static void __iomem *rcba_base; static void __iomem *rcba_base;
...@@ -146,17 +150,17 @@ static void ich_force_enable_hpet(struct pci_dev *dev) ...@@ -146,17 +150,17 @@ static void ich_force_enable_hpet(struct pci_dev *dev)
} }
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
ich_force_enable_hpet); ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
ich_force_enable_hpet); ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
ich_force_enable_hpet); ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
ich_force_enable_hpet); ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
ich_force_enable_hpet); ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
ich_force_enable_hpet); ich_force_enable_hpet);
static struct pci_dev *cached_dev; static struct pci_dev *cached_dev;
...@@ -232,10 +236,91 @@ static void old_ich_force_enable_hpet(struct pci_dev *dev) ...@@ -232,10 +236,91 @@ static void old_ich_force_enable_hpet(struct pci_dev *dev)
printk(KERN_DEBUG "Failed to force enable HPET\n"); printk(KERN_DEBUG "Failed to force enable HPET\n");
} }
/*
* Undocumented chipset features. Make sure that the user enforced
* this.
*/
static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
{
if (hpet_force_user)
old_ich_force_enable_hpet(dev);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
old_ich_force_enable_hpet_user);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
old_ich_force_enable_hpet_user);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
old_ich_force_enable_hpet_user);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
old_ich_force_enable_hpet_user);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
old_ich_force_enable_hpet); old_ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12, DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
old_ich_force_enable_hpet); old_ich_force_enable_hpet);
static void vt8237_force_hpet_resume(void)
{
u32 val;
if (!force_hpet_address || !cached_dev)
return;
val = 0xfed00000 | 0x80;
pci_write_config_dword(cached_dev, 0x68, val);
pci_read_config_dword(cached_dev, 0x68, &val);
if (val & 0x80)
printk(KERN_DEBUG "Force enabled HPET at resume\n");
else
BUG();
}
static void vt8237_force_enable_hpet(struct pci_dev *dev)
{
u32 uninitialized_var(val);
if (!hpet_force_user || hpet_address || force_hpet_address)
return;
pci_read_config_dword(dev, 0x68, &val);
/*
* Bit 7 is HPET enable bit.
* Bit 31:10 is HPET base address (contrary to what datasheet claims)
*/
if (val & 0x80) {
force_hpet_address = (val & ~0x3ff);
printk(KERN_DEBUG "HPET at base address 0x%lx\n",
force_hpet_address);
return;
}
/*
* HPET is disabled. Trying enabling at FED00000 and check
* whether it sticks
*/
val = 0xfed00000 | 0x80;
pci_write_config_dword(dev, 0x68, val);
pci_read_config_dword(dev, 0x68, &val);
if (val & 0x80) {
force_hpet_address = (val & ~0x3ff);
printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n",
force_hpet_address);
cached_dev = dev;
force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
return;
}
printk(KERN_DEBUG "Failed to force enable HPET\n");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
vt8237_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
vt8237_force_enable_hpet);
void force_hpet_resume(void) void force_hpet_resume(void)
{ {
...@@ -246,6 +331,9 @@ void force_hpet_resume(void) ...@@ -246,6 +331,9 @@ void force_hpet_resume(void)
case OLD_ICH_FORCE_HPET_RESUME: case OLD_ICH_FORCE_HPET_RESUME:
return old_ich_force_hpet_resume(); return old_ich_force_hpet_resume();
case VT8237_FORCE_HPET_RESUME:
return vt8237_force_hpet_resume();
default: default:
break; break;
} }
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/desc.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -136,7 +137,7 @@ void machine_emergency_restart(void) ...@@ -136,7 +137,7 @@ void machine_emergency_restart(void)
} }
case BOOT_TRIPLE: case BOOT_TRIPLE:
__asm__ __volatile__("lidt (%0)": :"r" (&no_idt)); load_idt((const struct desc_ptr *)&no_idt);
__asm__ __volatile__("int3"); __asm__ __volatile__("int3");
reboot_type = BOOT_KBD; reboot_type = BOOT_KBD;
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <asm/reboot_fixups.h> #include <asm/reboot_fixups.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/geode.h>
static void cs5530a_warm_reset(struct pci_dev *dev) static void cs5530a_warm_reset(struct pci_dev *dev)
{ {
...@@ -24,11 +25,8 @@ static void cs5530a_warm_reset(struct pci_dev *dev) ...@@ -24,11 +25,8 @@ static void cs5530a_warm_reset(struct pci_dev *dev)
static void cs5536_warm_reset(struct pci_dev *dev) static void cs5536_warm_reset(struct pci_dev *dev)
{ {
/* /* writing 1 to the LSB of this MSR causes a hard reset */
* 6.6.2.12 Soft Reset (DIVIL_SOFT_RESET) wrmsrl(MSR_DIVIL_SOFT_RESET, 1ULL);
* writing 1 to the LSB of this MSR causes a hard reset.
*/
wrmsrl(0x51400017, 1ULL);
udelay(50); /* shouldn't get here but be safe and spin a while */ udelay(50); /* shouldn't get here but be safe and spin a while */
} }
......
...@@ -184,6 +184,12 @@ void __cpuinit check_efer(void) ...@@ -184,6 +184,12 @@ void __cpuinit check_efer(void)
unsigned long kernel_eflags; unsigned long kernel_eflags;
/*
* Copies of the original ist values from the tss are only accessed during
* debugging, no special alignment required.
*/
DEFINE_PER_CPU(struct orig_ist, orig_ist);
/* /*
* cpu_init() initializes state that is per-CPU. Some data is already * cpu_init() initializes state that is per-CPU. Some data is already
* initialized (naturally) in the bootstrap process, such as the GDT * initialized (naturally) in the bootstrap process, such as the GDT
...@@ -224,8 +230,8 @@ void __cpuinit cpu_init (void) ...@@ -224,8 +230,8 @@ void __cpuinit cpu_init (void)
memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE); memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
cpu_gdt_descr[cpu].size = GDT_SIZE; cpu_gdt_descr[cpu].size = GDT_SIZE;
asm volatile("lgdt %0" :: "m" (cpu_gdt_descr[cpu])); load_gdt((const struct desc_ptr *)&cpu_gdt_descr[cpu]);
asm volatile("lidt %0" :: "m" (idt_descr)); load_idt((const struct desc_ptr *)&idt_descr);
memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
syscall_init(); syscall_init();
......
...@@ -661,9 +661,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -661,9 +661,7 @@ void __init setup_arch(char **cmdline_p)
#endif #endif
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
#ifdef CONFIG_X86_IO_APIC early_quirks();
check_acpi_pci(); /* Checks more than just ACPI actually */
#endif
#endif #endif
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
......
...@@ -302,6 +302,11 @@ void __init setup_arch(char **cmdline_p) ...@@ -302,6 +302,11 @@ void __init setup_arch(char **cmdline_p)
dmi_scan_machine(); dmi_scan_machine();
#ifdef CONFIG_SMP
/* setup to use the static apicid table during kernel startup */
x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init;
#endif
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
/* /*
* Initialize the ACPI boot-time table parser (gets the RSDP and SDT). * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
...@@ -554,7 +559,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) ...@@ -554,7 +559,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
but in the same order as the HT nodeids. but in the same order as the HT nodeids.
If that doesn't result in a usable node fall back to the If that doesn't result in a usable node fall back to the
path for the previous case. */ path for the previous case. */
int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits); int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
if (ht_nodeid >= 0 && if (ht_nodeid >= 0 &&
apicid_to_node[ht_nodeid] != NUMA_NO_NODE) apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
node = apicid_to_node[ht_nodeid]; node = apicid_to_node[ht_nodeid];
...@@ -878,6 +883,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) ...@@ -878,6 +883,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
c->cpu_index = 0;
#endif #endif
} }
...@@ -984,6 +990,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) ...@@ -984,6 +990,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
static int show_cpuinfo(struct seq_file *m, void *v) static int show_cpuinfo(struct seq_file *m, void *v)
{ {
struct cpuinfo_x86 *c = v; struct cpuinfo_x86 *c = v;
int cpu = 0;
/* /*
* These flag bits must match the definitions in <asm/cpufeature.h>. * These flag bits must match the definitions in <asm/cpufeature.h>.
...@@ -1062,8 +1069,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -1062,8 +1069,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (!cpu_online(c-cpu_data)) if (!cpu_online(c->cpu_index))
return 0; return 0;
cpu = c->cpu_index;
#endif #endif
seq_printf(m,"processor\t: %u\n" seq_printf(m,"processor\t: %u\n"
...@@ -1071,7 +1079,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -1071,7 +1079,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
"cpu family\t: %d\n" "cpu family\t: %d\n"
"model\t\t: %d\n" "model\t\t: %d\n"
"model name\t: %s\n", "model name\t: %s\n",
(unsigned)(c-cpu_data), (unsigned)cpu,
c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
c->x86, c->x86,
(int)c->x86_model, (int)c->x86_model,
...@@ -1083,7 +1091,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -1083,7 +1091,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "stepping\t: unknown\n"); seq_printf(m, "stepping\t: unknown\n");
if (cpu_has(c,X86_FEATURE_TSC)) { if (cpu_has(c,X86_FEATURE_TSC)) {
unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data)); unsigned int freq = cpufreq_quick_get((unsigned)cpu);
if (!freq) if (!freq)
freq = cpu_khz; freq = cpu_khz;
seq_printf(m, "cpu MHz\t\t: %u.%03u\n", seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
...@@ -1096,7 +1104,6 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -1096,7 +1104,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (smp_num_siblings * c->x86_max_cores > 1) { if (smp_num_siblings * c->x86_max_cores > 1) {
int cpu = c - cpu_data;
seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
seq_printf(m, "siblings\t: %d\n", seq_printf(m, "siblings\t: %d\n",
cpus_weight(per_cpu(cpu_core_map, cpu))); cpus_weight(per_cpu(cpu_core_map, cpu)));
...@@ -1154,12 +1161,16 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -1154,12 +1161,16 @@ static int show_cpuinfo(struct seq_file *m, void *v)
static void *c_start(struct seq_file *m, loff_t *pos) static void *c_start(struct seq_file *m, loff_t *pos)
{ {
return *pos < NR_CPUS ? cpu_data + *pos : NULL; if (*pos == 0) /* just in case, cpu 0 is not the first */
*pos = first_cpu(cpu_possible_map);
if ((*pos) < NR_CPUS && cpu_possible(*pos))
return &cpu_data(*pos);
return NULL;
} }
static void *c_next(struct seq_file *m, void *v, loff_t *pos) static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{ {
++*pos; *pos = next_cpu(*pos, cpu_possible_map);
return c_start(m, pos); return c_start(m, pos);
} }
......
...@@ -610,7 +610,7 @@ static void stop_this_cpu (void * dummy) ...@@ -610,7 +610,7 @@ static void stop_this_cpu (void * dummy)
*/ */
cpu_clear(smp_processor_id(), cpu_online_map); cpu_clear(smp_processor_id(), cpu_online_map);
disable_local_APIC(); disable_local_APIC();
if (cpu_data[smp_processor_id()].hlt_works_ok) if (cpu_data(smp_processor_id()).hlt_works_ok)
for(;;) halt(); for(;;) halt();
for (;;); for (;;);
} }
...@@ -676,7 +676,7 @@ static int convert_apicid_to_cpu(int apic_id) ...@@ -676,7 +676,7 @@ static int convert_apicid_to_cpu(int apic_id)
int i; int i;
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) {
if (x86_cpu_to_apicid[i] == apic_id) if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
return i; return i;
} }
return -1; return -1;
......
...@@ -322,17 +322,27 @@ void unlock_ipi_call_lock(void) ...@@ -322,17 +322,27 @@ void unlock_ipi_call_lock(void)
} }
/* /*
* this function sends a 'generic call function' IPI to one other CPU * this function sends a 'generic call function' IPI to all other CPU
* in the system. * of the system defined in the mask.
*
* cpu is a standard Linux logical CPU number.
*/ */
static void
__smp_call_function_single(int cpu, void (*func) (void *info), void *info, static int
int nonatomic, int wait) __smp_call_function_mask(cpumask_t mask,
void (*func)(void *), void *info,
int wait)
{ {
struct call_data_struct data; struct call_data_struct data;
int cpus = 1; cpumask_t allbutself;
int cpus;
allbutself = cpu_online_map;
cpu_clear(smp_processor_id(), allbutself);
cpus_and(mask, mask, allbutself);
cpus = cpus_weight(mask);
if (!cpus)
return 0;
data.func = func; data.func = func;
data.info = info; data.info = info;
...@@ -343,19 +353,55 @@ __smp_call_function_single(int cpu, void (*func) (void *info), void *info, ...@@ -343,19 +353,55 @@ __smp_call_function_single(int cpu, void (*func) (void *info), void *info,
call_data = &data; call_data = &data;
wmb(); wmb();
/* Send a message to all other CPUs and wait for them to respond */
send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR); /* Send a message to other CPUs */
if (cpus_equal(mask, allbutself))
send_IPI_allbutself(CALL_FUNCTION_VECTOR);
else
send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
/* Wait for response */ /* Wait for response */
while (atomic_read(&data.started) != cpus) while (atomic_read(&data.started) != cpus)
cpu_relax(); cpu_relax();
if (!wait) if (!wait)
return; return 0;
while (atomic_read(&data.finished) != cpus) while (atomic_read(&data.finished) != cpus)
cpu_relax(); cpu_relax();
return 0;
}
/**
* smp_call_function_mask(): Run a function on a set of other CPUs.
* @mask: The set of cpus to run on. Must not include the current cpu.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int smp_call_function_mask(cpumask_t mask,
void (*func)(void *), void *info,
int wait)
{
int ret;
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
spin_lock(&call_lock);
ret = __smp_call_function_mask(mask, func, info, wait);
spin_unlock(&call_lock);
return ret;
} }
EXPORT_SYMBOL(smp_call_function_mask);
/* /*
* smp_call_function_single - Run a function on a specific CPU * smp_call_function_single - Run a function on a specific CPU
...@@ -374,6 +420,7 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info, ...@@ -374,6 +420,7 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait) int nonatomic, int wait)
{ {
/* prevent preemption and reschedule on another processor */ /* prevent preemption and reschedule on another processor */
int ret;
int me = get_cpu(); int me = get_cpu();
/* Can deadlock when called with interrupts disabled */ /* Can deadlock when called with interrupts disabled */
...@@ -387,50 +434,13 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info, ...@@ -387,50 +434,13 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
return 0; return 0;
} }
spin_lock(&call_lock); ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
__smp_call_function_single(cpu, func, info, nonatomic, wait);
spin_unlock(&call_lock);
put_cpu(); put_cpu();
return 0; return ret;
} }
EXPORT_SYMBOL(smp_call_function_single); EXPORT_SYMBOL(smp_call_function_single);
/*
* this function sends a 'generic call function' IPI to all other CPUs
* in the system.
*/
static void __smp_call_function (void (*func) (void *info), void *info,
int nonatomic, int wait)
{
struct call_data_struct data;
int cpus = num_online_cpus()-1;
if (!cpus)
return;
data.func = func;
data.info = info;
atomic_set(&data.started, 0);
data.wait = wait;
if (wait)
atomic_set(&data.finished, 0);
call_data = &data;
wmb();
/* Send a message to all other CPUs and wait for them to respond */
send_IPI_allbutself(CALL_FUNCTION_VECTOR);
/* Wait for response */
while (atomic_read(&data.started) != cpus)
cpu_relax();
if (!wait)
return;
while (atomic_read(&data.finished) != cpus)
cpu_relax();
}
/* /*
* smp_call_function - run a function on all other CPUs. * smp_call_function - run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking. * @func: The function to run. This must be fast and non-blocking.
...@@ -449,10 +459,7 @@ static void __smp_call_function (void (*func) (void *info), void *info, ...@@ -449,10 +459,7 @@ static void __smp_call_function (void (*func) (void *info), void *info,
int smp_call_function (void (*func) (void *info), void *info, int nonatomic, int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
int wait) int wait)
{ {
spin_lock(&call_lock); return smp_call_function_mask(cpu_online_map, func, info, wait);
__smp_call_function(func,info,nonatomic,wait);
spin_unlock(&call_lock);
return 0;
} }
EXPORT_SYMBOL(smp_call_function); EXPORT_SYMBOL(smp_call_function);
...@@ -479,7 +486,7 @@ void smp_send_stop(void) ...@@ -479,7 +486,7 @@ void smp_send_stop(void)
/* Don't deadlock on the call lock in panic */ /* Don't deadlock on the call lock in panic */
nolock = !spin_trylock(&call_lock); nolock = !spin_trylock(&call_lock);
local_irq_save(flags); local_irq_save(flags);
__smp_call_function(stop_this_cpu, NULL, 0, 0); __smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0);
if (!nolock) if (!nolock)
spin_unlock(&call_lock); spin_unlock(&call_lock);
disable_local_APIC(); disable_local_APIC();
......
...@@ -67,7 +67,7 @@ int smp_num_siblings = 1; ...@@ -67,7 +67,7 @@ int smp_num_siblings = 1;
EXPORT_SYMBOL(smp_num_siblings); EXPORT_SYMBOL(smp_num_siblings);
/* Last level cache ID of each logical CPU */ /* Last level cache ID of each logical CPU */
int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID;
/* representing HT siblings of each logical CPU */ /* representing HT siblings of each logical CPU */
DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
...@@ -89,12 +89,20 @@ EXPORT_SYMBOL(cpu_possible_map); ...@@ -89,12 +89,20 @@ EXPORT_SYMBOL(cpu_possible_map);
static cpumask_t smp_commenced_mask; static cpumask_t smp_commenced_mask;
/* Per CPU bogomips and other parameters */ /* Per CPU bogomips and other parameters */
struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
EXPORT_SYMBOL(cpu_data); EXPORT_PER_CPU_SYMBOL(cpu_info);
u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly = /*
{ [0 ... NR_CPUS-1] = 0xff }; * The following static array is used during kernel startup
EXPORT_SYMBOL(x86_cpu_to_apicid); * and the x86_cpu_to_apicid_ptr contains the address of the
* array during this time. Is it zeroed when the per_cpu
* data area is removed.
*/
u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata =
{ [0 ... NR_CPUS-1] = BAD_APICID };
void *x86_cpu_to_apicid_ptr;
DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID;
EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
u8 apicid_2_node[MAX_APICID]; u8 apicid_2_node[MAX_APICID];
...@@ -150,9 +158,10 @@ void __init smp_alloc_memory(void) ...@@ -150,9 +158,10 @@ void __init smp_alloc_memory(void)
void __cpuinit smp_store_cpu_info(int id) void __cpuinit smp_store_cpu_info(int id)
{ {
struct cpuinfo_x86 *c = cpu_data + id; struct cpuinfo_x86 *c = &cpu_data(id);
*c = boot_cpu_data; *c = boot_cpu_data;
c->cpu_index = id;
if (id!=0) if (id!=0)
identify_secondary_cpu(c); identify_secondary_cpu(c);
/* /*
...@@ -294,7 +303,7 @@ static int cpucount; ...@@ -294,7 +303,7 @@ static int cpucount;
/* maps the cpu to the sched domain representing multi-core */ /* maps the cpu to the sched domain representing multi-core */
cpumask_t cpu_coregroup_map(int cpu) cpumask_t cpu_coregroup_map(int cpu)
{ {
struct cpuinfo_x86 *c = cpu_data + cpu; struct cpuinfo_x86 *c = &cpu_data(cpu);
/* /*
* For perf, we return last level cache shared map. * For perf, we return last level cache shared map.
* And for power savings, we return cpu_core_map * And for power savings, we return cpu_core_map
...@@ -311,41 +320,41 @@ static cpumask_t cpu_sibling_setup_map; ...@@ -311,41 +320,41 @@ static cpumask_t cpu_sibling_setup_map;
void __cpuinit set_cpu_sibling_map(int cpu) void __cpuinit set_cpu_sibling_map(int cpu)
{ {
int i; int i;
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu_set(cpu, cpu_sibling_setup_map); cpu_set(cpu, cpu_sibling_setup_map);
if (smp_num_siblings > 1) { if (smp_num_siblings > 1) {
for_each_cpu_mask(i, cpu_sibling_setup_map) { for_each_cpu_mask(i, cpu_sibling_setup_map) {
if (c[cpu].phys_proc_id == c[i].phys_proc_id && if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
c[cpu].cpu_core_id == c[i].cpu_core_id) { c->cpu_core_id == cpu_data(i).cpu_core_id) {
cpu_set(i, per_cpu(cpu_sibling_map, cpu)); cpu_set(i, per_cpu(cpu_sibling_map, cpu));
cpu_set(cpu, per_cpu(cpu_sibling_map, i)); cpu_set(cpu, per_cpu(cpu_sibling_map, i));
cpu_set(i, per_cpu(cpu_core_map, cpu)); cpu_set(i, per_cpu(cpu_core_map, cpu));
cpu_set(cpu, per_cpu(cpu_core_map, i)); cpu_set(cpu, per_cpu(cpu_core_map, i));
cpu_set(i, c[cpu].llc_shared_map); cpu_set(i, c->llc_shared_map);
cpu_set(cpu, c[i].llc_shared_map); cpu_set(cpu, cpu_data(i).llc_shared_map);
} }
} }
} else { } else {
cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
} }
cpu_set(cpu, c[cpu].llc_shared_map); cpu_set(cpu, c->llc_shared_map);
if (current_cpu_data.x86_max_cores == 1) { if (current_cpu_data.x86_max_cores == 1) {
per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
c[cpu].booted_cores = 1; c->booted_cores = 1;
return; return;
} }
for_each_cpu_mask(i, cpu_sibling_setup_map) { for_each_cpu_mask(i, cpu_sibling_setup_map) {
if (cpu_llc_id[cpu] != BAD_APICID && if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
cpu_llc_id[cpu] == cpu_llc_id[i]) { per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
cpu_set(i, c[cpu].llc_shared_map); cpu_set(i, c->llc_shared_map);
cpu_set(cpu, c[i].llc_shared_map); cpu_set(cpu, cpu_data(i).llc_shared_map);
} }
if (c[cpu].phys_proc_id == c[i].phys_proc_id) { if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
cpu_set(i, per_cpu(cpu_core_map, cpu)); cpu_set(i, per_cpu(cpu_core_map, cpu));
cpu_set(cpu, per_cpu(cpu_core_map, i)); cpu_set(cpu, per_cpu(cpu_core_map, i));
/* /*
...@@ -357,15 +366,15 @@ void __cpuinit set_cpu_sibling_map(int cpu) ...@@ -357,15 +366,15 @@ void __cpuinit set_cpu_sibling_map(int cpu)
* the booted_cores for this new cpu * the booted_cores for this new cpu
*/ */
if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
c[cpu].booted_cores++; c->booted_cores++;
/* /*
* increment the core count for all * increment the core count for all
* the other cpus in this package * the other cpus in this package
*/ */
if (i != cpu) if (i != cpu)
c[i].booted_cores++; cpu_data(i).booted_cores++;
} else if (i != cpu && !c[cpu].booted_cores) } else if (i != cpu && !c->booted_cores)
c[cpu].booted_cores = c[i].booted_cores; c->booted_cores = cpu_data(i).booted_cores;
} }
} }
} }
...@@ -804,7 +813,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) ...@@ -804,7 +813,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
irq_ctx_init(cpu); irq_ctx_init(cpu);
x86_cpu_to_apicid[cpu] = apicid; per_cpu(x86_cpu_to_apicid, cpu) = apicid;
/* /*
* This grunge runs the startup process for * This grunge runs the startup process for
* the targeted processor. * the targeted processor.
...@@ -844,7 +853,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) ...@@ -844,7 +853,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
/* number CPUs logically, starting from 1 (BSP is 0) */ /* number CPUs logically, starting from 1 (BSP is 0) */
Dprintk("OK.\n"); Dprintk("OK.\n");
printk("CPU%d: ", cpu); printk("CPU%d: ", cpu);
print_cpu_info(&cpu_data[cpu]); print_cpu_info(&cpu_data(cpu));
Dprintk("CPU has booted.\n"); Dprintk("CPU has booted.\n");
} else { } else {
boot_error= 1; boot_error= 1;
...@@ -866,7 +875,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) ...@@ -866,7 +875,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
cpucount--; cpucount--;
} else { } else {
x86_cpu_to_apicid[cpu] = apicid; per_cpu(x86_cpu_to_apicid, cpu) = apicid;
cpu_set(cpu, cpu_present_map); cpu_set(cpu, cpu_present_map);
} }
...@@ -915,7 +924,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu) ...@@ -915,7 +924,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
struct warm_boot_cpu_info info; struct warm_boot_cpu_info info;
int apicid, ret; int apicid, ret;
apicid = x86_cpu_to_apicid[cpu]; apicid = per_cpu(x86_cpu_to_apicid, cpu);
if (apicid == BAD_APICID) { if (apicid == BAD_APICID) {
ret = -ENODEV; ret = -ENODEV;
goto exit; goto exit;
...@@ -961,11 +970,11 @@ static void __init smp_boot_cpus(unsigned int max_cpus) ...@@ -961,11 +970,11 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
*/ */
smp_store_cpu_info(0); /* Final full version of the data */ smp_store_cpu_info(0); /* Final full version of the data */
printk("CPU%d: ", 0); printk("CPU%d: ", 0);
print_cpu_info(&cpu_data[0]); print_cpu_info(&cpu_data(0));
boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
boot_cpu_logical_apicid = logical_smp_processor_id(); boot_cpu_logical_apicid = logical_smp_processor_id();
x86_cpu_to_apicid[0] = boot_cpu_physical_apicid; per_cpu(x86_cpu_to_apicid, 0) = boot_cpu_physical_apicid;
current_thread_info()->cpu = 0; current_thread_info()->cpu = 0;
...@@ -1008,6 +1017,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) ...@@ -1008,6 +1017,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
smpboot_clear_io_apic_irqs(); smpboot_clear_io_apic_irqs();
phys_cpu_present_map = physid_mask_of_physid(0); phys_cpu_present_map = physid_mask_of_physid(0);
map_cpu_to_logical_apicid();
cpu_set(0, per_cpu(cpu_sibling_map, 0)); cpu_set(0, per_cpu(cpu_sibling_map, 0));
cpu_set(0, per_cpu(cpu_core_map, 0)); cpu_set(0, per_cpu(cpu_core_map, 0));
return; return;
...@@ -1029,6 +1039,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) ...@@ -1029,6 +1039,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
} }
smpboot_clear_io_apic_irqs(); smpboot_clear_io_apic_irqs();
phys_cpu_present_map = physid_mask_of_physid(0); phys_cpu_present_map = physid_mask_of_physid(0);
map_cpu_to_logical_apicid();
cpu_set(0, per_cpu(cpu_sibling_map, 0)); cpu_set(0, per_cpu(cpu_sibling_map, 0));
cpu_set(0, per_cpu(cpu_core_map, 0)); cpu_set(0, per_cpu(cpu_core_map, 0));
return; return;
...@@ -1082,7 +1093,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) ...@@ -1082,7 +1093,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
Dprintk("Before bogomips.\n"); Dprintk("Before bogomips.\n");
for (cpu = 0; cpu < NR_CPUS; cpu++) for (cpu = 0; cpu < NR_CPUS; cpu++)
if (cpu_isset(cpu, cpu_callout_map)) if (cpu_isset(cpu, cpu_callout_map))
bogosum += cpu_data[cpu].loops_per_jiffy; bogosum += cpu_data(cpu).loops_per_jiffy;
printk(KERN_INFO printk(KERN_INFO
"Total of %d processors activated (%lu.%02lu BogoMIPS).\n", "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
cpucount+1, cpucount+1,
...@@ -1152,7 +1163,7 @@ void __init native_smp_prepare_boot_cpu(void) ...@@ -1152,7 +1163,7 @@ void __init native_smp_prepare_boot_cpu(void)
void remove_siblinginfo(int cpu) void remove_siblinginfo(int cpu)
{ {
int sibling; int sibling;
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = &cpu_data(cpu);
for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
...@@ -1160,15 +1171,15 @@ void remove_siblinginfo(int cpu) ...@@ -1160,15 +1171,15 @@ void remove_siblinginfo(int cpu)
* last thread sibling in this cpu core going down * last thread sibling in this cpu core going down
*/ */
if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
c[sibling].booted_cores--; cpu_data(sibling).booted_cores--;
} }
for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
cpus_clear(per_cpu(cpu_sibling_map, cpu)); cpus_clear(per_cpu(cpu_sibling_map, cpu));
cpus_clear(per_cpu(cpu_core_map, cpu)); cpus_clear(per_cpu(cpu_core_map, cpu));
c[cpu].phys_proc_id = 0; c->phys_proc_id = 0;
c[cpu].cpu_core_id = 0; c->cpu_core_id = 0;
cpu_clear(cpu, cpu_sibling_setup_map); cpu_clear(cpu, cpu_sibling_setup_map);
} }
......
...@@ -65,7 +65,7 @@ int smp_num_siblings = 1; ...@@ -65,7 +65,7 @@ int smp_num_siblings = 1;
EXPORT_SYMBOL(smp_num_siblings); EXPORT_SYMBOL(smp_num_siblings);
/* Last level cache ID of each logical CPU */ /* Last level cache ID of each logical CPU */
u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID;
/* Bitmask of currently online CPUs */ /* Bitmask of currently online CPUs */
cpumask_t cpu_online_map __read_mostly; cpumask_t cpu_online_map __read_mostly;
...@@ -84,8 +84,8 @@ cpumask_t cpu_possible_map; ...@@ -84,8 +84,8 @@ cpumask_t cpu_possible_map;
EXPORT_SYMBOL(cpu_possible_map); EXPORT_SYMBOL(cpu_possible_map);
/* Per CPU bogomips and other parameters */ /* Per CPU bogomips and other parameters */
struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
EXPORT_SYMBOL(cpu_data); EXPORT_PER_CPU_SYMBOL(cpu_info);
/* Set when the idlers are all forked */ /* Set when the idlers are all forked */
int smp_threads_ready; int smp_threads_ready;
...@@ -138,9 +138,10 @@ static unsigned long __cpuinit setup_trampoline(void) ...@@ -138,9 +138,10 @@ static unsigned long __cpuinit setup_trampoline(void)
static void __cpuinit smp_store_cpu_info(int id) static void __cpuinit smp_store_cpu_info(int id)
{ {
struct cpuinfo_x86 *c = cpu_data + id; struct cpuinfo_x86 *c = &cpu_data(id);
*c = boot_cpu_data; *c = boot_cpu_data;
c->cpu_index = id;
identify_cpu(c); identify_cpu(c);
print_cpu_info(c); print_cpu_info(c);
} }
...@@ -237,7 +238,7 @@ void __cpuinit smp_callin(void) ...@@ -237,7 +238,7 @@ void __cpuinit smp_callin(void)
/* maps the cpu to the sched domain representing multi-core */ /* maps the cpu to the sched domain representing multi-core */
cpumask_t cpu_coregroup_map(int cpu) cpumask_t cpu_coregroup_map(int cpu)
{ {
struct cpuinfo_x86 *c = cpu_data + cpu; struct cpuinfo_x86 *c = &cpu_data(cpu);
/* /*
* For perf, we return last level cache shared map. * For perf, we return last level cache shared map.
* And for power savings, we return cpu_core_map * And for power savings, we return cpu_core_map
...@@ -254,41 +255,41 @@ static cpumask_t cpu_sibling_setup_map; ...@@ -254,41 +255,41 @@ static cpumask_t cpu_sibling_setup_map;
static inline void set_cpu_sibling_map(int cpu) static inline void set_cpu_sibling_map(int cpu)
{ {
int i; int i;
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = &cpu_data(cpu);
cpu_set(cpu, cpu_sibling_setup_map); cpu_set(cpu, cpu_sibling_setup_map);
if (smp_num_siblings > 1) { if (smp_num_siblings > 1) {
for_each_cpu_mask(i, cpu_sibling_setup_map) { for_each_cpu_mask(i, cpu_sibling_setup_map) {
if (c[cpu].phys_proc_id == c[i].phys_proc_id && if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
c[cpu].cpu_core_id == c[i].cpu_core_id) { c->cpu_core_id == cpu_data(i).cpu_core_id) {
cpu_set(i, per_cpu(cpu_sibling_map, cpu)); cpu_set(i, per_cpu(cpu_sibling_map, cpu));
cpu_set(cpu, per_cpu(cpu_sibling_map, i)); cpu_set(cpu, per_cpu(cpu_sibling_map, i));
cpu_set(i, per_cpu(cpu_core_map, cpu)); cpu_set(i, per_cpu(cpu_core_map, cpu));
cpu_set(cpu, per_cpu(cpu_core_map, i)); cpu_set(cpu, per_cpu(cpu_core_map, i));
cpu_set(i, c[cpu].llc_shared_map); cpu_set(i, c->llc_shared_map);
cpu_set(cpu, c[i].llc_shared_map); cpu_set(cpu, cpu_data(i).llc_shared_map);
} }
} }
} else { } else {
cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
} }
cpu_set(cpu, c[cpu].llc_shared_map); cpu_set(cpu, c->llc_shared_map);
if (current_cpu_data.x86_max_cores == 1) { if (current_cpu_data.x86_max_cores == 1) {
per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
c[cpu].booted_cores = 1; c->booted_cores = 1;
return; return;
} }
for_each_cpu_mask(i, cpu_sibling_setup_map) { for_each_cpu_mask(i, cpu_sibling_setup_map) {
if (cpu_llc_id[cpu] != BAD_APICID && if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
cpu_llc_id[cpu] == cpu_llc_id[i]) { per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
cpu_set(i, c[cpu].llc_shared_map); cpu_set(i, c->llc_shared_map);
cpu_set(cpu, c[i].llc_shared_map); cpu_set(cpu, cpu_data(i).llc_shared_map);
} }
if (c[cpu].phys_proc_id == c[i].phys_proc_id) { if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
cpu_set(i, per_cpu(cpu_core_map, cpu)); cpu_set(i, per_cpu(cpu_core_map, cpu));
cpu_set(cpu, per_cpu(cpu_core_map, i)); cpu_set(cpu, per_cpu(cpu_core_map, i));
/* /*
...@@ -300,15 +301,15 @@ static inline void set_cpu_sibling_map(int cpu) ...@@ -300,15 +301,15 @@ static inline void set_cpu_sibling_map(int cpu)
* the booted_cores for this new cpu * the booted_cores for this new cpu
*/ */
if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
c[cpu].booted_cores++; c->booted_cores++;
/* /*
* increment the core count for all * increment the core count for all
* the other cpus in this package * the other cpus in this package
*/ */
if (i != cpu) if (i != cpu)
c[i].booted_cores++; cpu_data(i).booted_cores++;
} else if (i != cpu && !c[cpu].booted_cores) } else if (i != cpu && !c->booted_cores)
c[cpu].booted_cores = c[i].booted_cores; c->booted_cores = cpu_data(i).booted_cores;
} }
} }
} }
...@@ -694,7 +695,7 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid) ...@@ -694,7 +695,7 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
clear_node_cpumask(cpu); /* was set by numa_add_cpu */ clear_node_cpumask(cpu); /* was set by numa_add_cpu */
cpu_clear(cpu, cpu_present_map); cpu_clear(cpu, cpu_present_map);
cpu_clear(cpu, cpu_possible_map); cpu_clear(cpu, cpu_possible_map);
x86_cpu_to_apicid[cpu] = BAD_APICID; per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
return -EIO; return -EIO;
} }
...@@ -840,6 +841,26 @@ static int __init smp_sanity_check(unsigned max_cpus) ...@@ -840,6 +841,26 @@ static int __init smp_sanity_check(unsigned max_cpus)
return 0; return 0;
} }
/*
* Copy apicid's found by MP_processor_info from initial array to the per cpu
* data area. The x86_cpu_to_apicid_init array is then expendable and the
* x86_cpu_to_apicid_ptr is zeroed indicating that the static array is no
* longer available.
*/
void __init smp_set_apicids(void)
{
int cpu;
for_each_cpu_mask(cpu, cpu_possible_map) {
if (per_cpu_offset(cpu))
per_cpu(x86_cpu_to_apicid, cpu) =
x86_cpu_to_apicid_init[cpu];
}
/* indicate the static array will be going away soon */
x86_cpu_to_apicid_ptr = NULL;
}
/* /*
* Prepare for SMP bootup. The MP table or ACPI has been read * Prepare for SMP bootup. The MP table or ACPI has been read
* earlier. Just do some sanity checking here and enable APIC mode. * earlier. Just do some sanity checking here and enable APIC mode.
...@@ -849,6 +870,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -849,6 +870,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
nmi_watchdog_default(); nmi_watchdog_default();
current_cpu_data = boot_cpu_data; current_cpu_data = boot_cpu_data;
current_thread_info()->cpu = 0; /* needed? */ current_thread_info()->cpu = 0; /* needed? */
smp_set_apicids();
set_cpu_sibling_map(0); set_cpu_sibling_map(0);
if (smp_sanity_check(max_cpus) < 0) { if (smp_sanity_check(max_cpus) < 0) {
...@@ -968,7 +990,7 @@ void __init smp_cpus_done(unsigned int max_cpus) ...@@ -968,7 +990,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
static void remove_siblinginfo(int cpu) static void remove_siblinginfo(int cpu)
{ {
int sibling; int sibling;
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = &cpu_data(cpu);
for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
...@@ -976,15 +998,15 @@ static void remove_siblinginfo(int cpu) ...@@ -976,15 +998,15 @@ static void remove_siblinginfo(int cpu)
* last thread sibling in this cpu core going down * last thread sibling in this cpu core going down
*/ */
if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
c[sibling].booted_cores--; cpu_data(sibling).booted_cores--;
} }
for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
cpus_clear(per_cpu(cpu_sibling_map, cpu)); cpus_clear(per_cpu(cpu_sibling_map, cpu));
cpus_clear(per_cpu(cpu_core_map, cpu)); cpus_clear(per_cpu(cpu_core_map, cpu));
c[cpu].phys_proc_id = 0; c->phys_proc_id = 0;
c[cpu].cpu_core_id = 0; c->cpu_core_id = 0;
cpu_clear(cpu, cpu_sibling_setup_map); cpu_clear(cpu, cpu_sibling_setup_map);
} }
......
...@@ -32,9 +32,9 @@ void __save_processor_state(struct saved_context *ctxt) ...@@ -32,9 +32,9 @@ void __save_processor_state(struct saved_context *ctxt)
/* /*
* descriptor tables * descriptor tables
*/ */
asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit)); store_gdt((struct desc_ptr *)&ctxt->gdt_limit);
asm volatile ("sidt %0" : "=m" (ctxt->idt_limit)); store_idt((struct desc_ptr *)&ctxt->idt_limit);
asm volatile ("str %0" : "=m" (ctxt->tr)); store_tr(ctxt->tr);
/* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
/* /*
...@@ -91,8 +91,9 @@ void __restore_processor_state(struct saved_context *ctxt) ...@@ -91,8 +91,9 @@ void __restore_processor_state(struct saved_context *ctxt)
* now restore the descriptor tables to their proper values * now restore the descriptor tables to their proper values
* ltr is done i fix_processor_context(). * ltr is done i fix_processor_context().
*/ */
asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit)); load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
asm volatile ("lidt %0" :: "m" (ctxt->idt_limit)); load_idt((const struct desc_ptr *)&ctxt->idt_limit);
/* /*
* segment registers * segment registers
......
...@@ -63,6 +63,9 @@ ...@@ -63,6 +63,9 @@
int panic_on_unrecovered_nmi; int panic_on_unrecovered_nmi;
DECLARE_BITMAP(used_vectors, NR_VECTORS);
EXPORT_SYMBOL_GPL(used_vectors);
asmlinkage int system_call(void); asmlinkage int system_call(void);
/* Do we ignore FPU interrupts ? */ /* Do we ignore FPU interrupts ? */
...@@ -288,33 +291,9 @@ EXPORT_SYMBOL(dump_stack); ...@@ -288,33 +291,9 @@ EXPORT_SYMBOL(dump_stack);
void show_registers(struct pt_regs *regs) void show_registers(struct pt_regs *regs)
{ {
int i; int i;
int in_kernel = 1;
unsigned long esp;
unsigned short ss, gs;
esp = (unsigned long) (&regs->esp);
savesegment(ss, ss);
savesegment(gs, gs);
if (user_mode_vm(regs)) {
in_kernel = 0;
esp = regs->esp;
ss = regs->xss & 0xffff;
}
print_modules(); print_modules();
printk(KERN_EMERG "CPU: %d\n" __show_registers(regs, 0);
KERN_EMERG "EIP: %04x:[<%08lx>] %s VLI\n"
KERN_EMERG "EFLAGS: %08lx (%s %.*s)\n",
smp_processor_id(), 0xffff & regs->xcs, regs->eip,
print_tainted(), regs->eflags, init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
regs->eax, regs->ebx, regs->ecx, regs->edx);
printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
regs->esi, regs->edi, regs->ebp, esp);
printk(KERN_EMERG "ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n",
regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, ss);
printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)", printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
TASK_COMM_LEN, current->comm, task_pid_nr(current), TASK_COMM_LEN, current->comm, task_pid_nr(current),
current_thread_info(), current, task_thread_info(current)); current_thread_info(), current, task_thread_info(current));
...@@ -322,14 +301,14 @@ void show_registers(struct pt_regs *regs) ...@@ -322,14 +301,14 @@ void show_registers(struct pt_regs *regs)
* When in-kernel, we also print out the stack and code at the * When in-kernel, we also print out the stack and code at the
* time of the fault.. * time of the fault..
*/ */
if (in_kernel) { if (!user_mode_vm(regs)) {
u8 *eip; u8 *eip;
unsigned int code_prologue = code_bytes * 43 / 64; unsigned int code_prologue = code_bytes * 43 / 64;
unsigned int code_len = code_bytes; unsigned int code_len = code_bytes;
unsigned char c; unsigned char c;
printk("\n" KERN_EMERG "Stack: "); printk("\n" KERN_EMERG "Stack: ");
show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG); show_stack_log_lvl(NULL, regs, &regs->esp, KERN_EMERG);
printk(KERN_EMERG "Code: "); printk(KERN_EMERG "Code: ");
...@@ -374,11 +353,11 @@ int is_valid_bugaddr(unsigned long eip) ...@@ -374,11 +353,11 @@ int is_valid_bugaddr(unsigned long eip)
void die(const char * str, struct pt_regs * regs, long err) void die(const char * str, struct pt_regs * regs, long err)
{ {
static struct { static struct {
spinlock_t lock; raw_spinlock_t lock;
u32 lock_owner; u32 lock_owner;
int lock_owner_depth; int lock_owner_depth;
} die = { } die = {
.lock = __SPIN_LOCK_UNLOCKED(die.lock), .lock = __RAW_SPIN_LOCK_UNLOCKED,
.lock_owner = -1, .lock_owner = -1,
.lock_owner_depth = 0 .lock_owner_depth = 0
}; };
...@@ -389,13 +368,14 @@ void die(const char * str, struct pt_regs * regs, long err) ...@@ -389,13 +368,14 @@ void die(const char * str, struct pt_regs * regs, long err)
if (die.lock_owner != raw_smp_processor_id()) { if (die.lock_owner != raw_smp_processor_id()) {
console_verbose(); console_verbose();
spin_lock_irqsave(&die.lock, flags); __raw_spin_lock(&die.lock);
raw_local_save_flags(flags);
die.lock_owner = smp_processor_id(); die.lock_owner = smp_processor_id();
die.lock_owner_depth = 0; die.lock_owner_depth = 0;
bust_spinlocks(1); bust_spinlocks(1);
} }
else else
local_save_flags(flags); raw_local_save_flags(flags);
if (++die.lock_owner_depth < 3) { if (++die.lock_owner_depth < 3) {
unsigned long esp; unsigned long esp;
...@@ -439,7 +419,8 @@ void die(const char * str, struct pt_regs * regs, long err) ...@@ -439,7 +419,8 @@ void die(const char * str, struct pt_regs * regs, long err)
bust_spinlocks(0); bust_spinlocks(0);
die.lock_owner = -1; die.lock_owner = -1;
add_taint(TAINT_DIE); add_taint(TAINT_DIE);
spin_unlock_irqrestore(&die.lock, flags); __raw_spin_unlock(&die.lock);
raw_local_irq_restore(flags);
if (!regs) if (!regs)
return; return;
...@@ -1142,6 +1123,8 @@ static void __init set_task_gate(unsigned int n, unsigned int gdt_entry) ...@@ -1142,6 +1123,8 @@ static void __init set_task_gate(unsigned int n, unsigned int gdt_entry)
void __init trap_init(void) void __init trap_init(void)
{ {
int i;
#ifdef CONFIG_EISA #ifdef CONFIG_EISA
void __iomem *p = ioremap(0x0FFFD9, 4); void __iomem *p = ioremap(0x0FFFD9, 4);
if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) { if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {
...@@ -1201,6 +1184,11 @@ void __init trap_init(void) ...@@ -1201,6 +1184,11 @@ void __init trap_init(void)
set_system_gate(SYSCALL_VECTOR,&system_call); set_system_gate(SYSCALL_VECTOR,&system_call);
/* Reserve all the builtin and the syscall vector. */
for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
set_bit(i, used_vectors);
set_bit(SYSCALL_VECTOR, used_vectors);
/* /*
* Should be a barrier for any external CPU state. * Should be a barrier for any external CPU state.
*/ */
......
...@@ -462,7 +462,7 @@ void out_of_line_bug(void) ...@@ -462,7 +462,7 @@ void out_of_line_bug(void)
EXPORT_SYMBOL(out_of_line_bug); EXPORT_SYMBOL(out_of_line_bug);
#endif #endif
static DEFINE_SPINLOCK(die_lock); static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
static int die_owner = -1; static int die_owner = -1;
static unsigned int die_nest_count; static unsigned int die_nest_count;
...@@ -474,13 +474,13 @@ unsigned __kprobes long oops_begin(void) ...@@ -474,13 +474,13 @@ unsigned __kprobes long oops_begin(void)
oops_enter(); oops_enter();
/* racy, but better than risking deadlock. */ /* racy, but better than risking deadlock. */
local_irq_save(flags); raw_local_irq_save(flags);
cpu = smp_processor_id(); cpu = smp_processor_id();
if (!spin_trylock(&die_lock)) { if (!__raw_spin_trylock(&die_lock)) {
if (cpu == die_owner) if (cpu == die_owner)
/* nested oops. should stop eventually */; /* nested oops. should stop eventually */;
else else
spin_lock(&die_lock); __raw_spin_lock(&die_lock);
} }
die_nest_count++; die_nest_count++;
die_owner = cpu; die_owner = cpu;
...@@ -494,12 +494,10 @@ void __kprobes oops_end(unsigned long flags) ...@@ -494,12 +494,10 @@ void __kprobes oops_end(unsigned long flags)
die_owner = -1; die_owner = -1;
bust_spinlocks(0); bust_spinlocks(0);
die_nest_count--; die_nest_count--;
if (die_nest_count) if (!die_nest_count)
/* We still own the lock */
local_irq_restore(flags);
else
/* Nest count reaches zero, release the lock. */ /* Nest count reaches zero, release the lock. */
spin_unlock_irqrestore(&die_lock, flags); __raw_spin_unlock(&die_lock);
raw_local_irq_restore(flags);
if (panic_on_oops) if (panic_on_oops)
panic("Fatal exception"); panic("Fatal exception");
oops_exit(); oops_exit();
......
...@@ -181,8 +181,8 @@ int recalibrate_cpu_khz(void) ...@@ -181,8 +181,8 @@ int recalibrate_cpu_khz(void)
if (cpu_has_tsc) { if (cpu_has_tsc) {
cpu_khz = calculate_cpu_khz(); cpu_khz = calculate_cpu_khz();
tsc_khz = cpu_khz; tsc_khz = cpu_khz;
cpu_data[0].loops_per_jiffy = cpu_data(0).loops_per_jiffy =
cpufreq_scale(cpu_data[0].loops_per_jiffy, cpufreq_scale(cpu_data(0).loops_per_jiffy,
cpu_khz_old, cpu_khz); cpu_khz_old, cpu_khz);
return 0; return 0;
} else } else
...@@ -215,7 +215,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) ...@@ -215,7 +215,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
return 0; return 0;
} }
ref_freq = freq->old; ref_freq = freq->old;
loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy; loops_per_jiffy_ref = cpu_data(freq->cpu).loops_per_jiffy;
cpu_khz_ref = cpu_khz; cpu_khz_ref = cpu_khz;
} }
...@@ -223,7 +223,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) ...@@ -223,7 +223,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
(val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
(val == CPUFREQ_RESUMECHANGE)) { (val == CPUFREQ_RESUMECHANGE)) {
if (!(freq->flags & CPUFREQ_CONST_LOOPS)) if (!(freq->flags & CPUFREQ_CONST_LOOPS))
cpu_data[freq->cpu].loops_per_jiffy = cpu_data(freq->cpu).loops_per_jiffy =
cpufreq_scale(loops_per_jiffy_ref, cpufreq_scale(loops_per_jiffy_ref,
ref_freq, freq->new); ref_freq, freq->new);
......
...@@ -73,13 +73,13 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, ...@@ -73,13 +73,13 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
struct cpufreq_freqs *freq = data; struct cpufreq_freqs *freq = data;
unsigned long *lpj, dummy; unsigned long *lpj, dummy;
if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC)) if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
return 0; return 0;
lpj = &dummy; lpj = &dummy;
if (!(freq->flags & CPUFREQ_CONST_LOOPS)) if (!(freq->flags & CPUFREQ_CONST_LOOPS))
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
lpj = &cpu_data[freq->cpu].loops_per_jiffy; lpj = &cpu_data(freq->cpu).loops_per_jiffy;
#else #else
lpj = &boot_cpu_data.loops_per_jiffy; lpj = &boot_cpu_data.loops_per_jiffy;
#endif #endif
......
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
({unsigned long v; \ ({unsigned long v; \
extern char __vsyscall_0; \ extern char __vsyscall_0; \
asm("" : "=r" (v) : "0" (x)); \ asm("" : "=r" (v) : "0" (x)); \
((v - VSYSCALL_FIRST_PAGE) + __pa_symbol(&__vsyscall_0)); }) ((v - VSYSCALL_START) + __pa_symbol(&__vsyscall_0)); })
/* /*
* vsyscall_gtod_data contains data that is : * vsyscall_gtod_data contains data that is :
...@@ -291,7 +291,7 @@ static void __cpuinit vsyscall_set_cpu(int cpu) ...@@ -291,7 +291,7 @@ static void __cpuinit vsyscall_set_cpu(int cpu)
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
node = cpu_to_node(cpu); node = cpu_to_node(cpu);
#endif #endif
if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
write_rdtscp_aux((node << 12) | cpu); write_rdtscp_aux((node << 12) | cpu);
/* Store cpu number in limit so that it can be loaded quickly /* Store cpu number in limit so that it can be loaded quickly
......
...@@ -82,7 +82,7 @@ inline void __const_udelay(unsigned long xloops) ...@@ -82,7 +82,7 @@ inline void __const_udelay(unsigned long xloops)
__asm__("mull %0" __asm__("mull %0"
:"=d" (xloops), "=&a" (d0) :"=d" (xloops), "=&a" (d0)
:"1" (xloops), "0" :"1" (xloops), "0"
(cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4))); (cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4)));
__delay(++xloops); __delay(++xloops);
} }
......
...@@ -40,7 +40,8 @@ EXPORT_SYMBOL(__delay); ...@@ -40,7 +40,8 @@ EXPORT_SYMBOL(__delay);
inline void __const_udelay(unsigned long xloops) inline void __const_udelay(unsigned long xloops)
{ {
__delay(((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32) + 1); __delay(((xloops * HZ *
cpu_data(raw_smp_processor_id()).loops_per_jiffy) >> 32) + 1);
} }
EXPORT_SYMBOL(__const_udelay); EXPORT_SYMBOL(__const_udelay);
......
...@@ -36,8 +36,8 @@ static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR ...@@ -36,8 +36,8 @@ static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR
/* per CPU data structure (for /proc/cpuinfo et al), visible externally /* per CPU data structure (for /proc/cpuinfo et al), visible externally
* indexed physically */ * indexed physically */
struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; DEFINE_PER_CPU(cpuinfo_x86, cpu_info) __cacheline_aligned;
EXPORT_SYMBOL(cpu_data); EXPORT_PER_CPU_SYMBOL(cpu_info);
/* physical ID of the CPU used to boot the system */ /* physical ID of the CPU used to boot the system */
unsigned char boot_cpu_id; unsigned char boot_cpu_id;
...@@ -430,7 +430,7 @@ find_smp_config(void) ...@@ -430,7 +430,7 @@ find_smp_config(void)
void __init void __init
smp_store_cpu_info(int id) smp_store_cpu_info(int id)
{ {
struct cpuinfo_x86 *c=&cpu_data[id]; struct cpuinfo_x86 *c = &cpu_data(id);
*c = boot_cpu_data; *c = boot_cpu_data;
...@@ -634,7 +634,7 @@ do_boot_cpu(__u8 cpu) ...@@ -634,7 +634,7 @@ do_boot_cpu(__u8 cpu)
cpu, smp_processor_id())); cpu, smp_processor_id()));
printk("CPU%d: ", cpu); printk("CPU%d: ", cpu);
print_cpu_info(&cpu_data[cpu]); print_cpu_info(&cpu_data(cpu));
wmb(); wmb();
cpu_set(cpu, cpu_callout_map); cpu_set(cpu, cpu_callout_map);
cpu_set(cpu, cpu_present_map); cpu_set(cpu, cpu_present_map);
...@@ -683,7 +683,7 @@ smp_boot_cpus(void) ...@@ -683,7 +683,7 @@ smp_boot_cpus(void)
*/ */
smp_store_cpu_info(boot_cpu_id); smp_store_cpu_info(boot_cpu_id);
printk("CPU%d: ", boot_cpu_id); printk("CPU%d: ", boot_cpu_id);
print_cpu_info(&cpu_data[boot_cpu_id]); print_cpu_info(&cpu_data(boot_cpu_id));
if(is_cpu_quad()) { if(is_cpu_quad()) {
/* booting on a Quad CPU */ /* booting on a Quad CPU */
...@@ -714,7 +714,7 @@ smp_boot_cpus(void) ...@@ -714,7 +714,7 @@ smp_boot_cpus(void)
unsigned long bogosum = 0; unsigned long bogosum = 0;
for (i = 0; i < NR_CPUS; i++) for (i = 0; i < NR_CPUS; i++)
if (cpu_isset(i, cpu_online_map)) if (cpu_isset(i, cpu_online_map))
bogosum += cpu_data[i].loops_per_jiffy; bogosum += cpu_data(i).loops_per_jiffy;
printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
cpucount+1, cpucount+1,
bogosum/(500000/HZ), bogosum/(500000/HZ),
......
...@@ -564,7 +564,8 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, ...@@ -564,7 +564,8 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
* it's allocated already. * it's allocated already.
*/ */
if ((page >> PAGE_SHIFT) < max_low_pfn if ((page >> PAGE_SHIFT) < max_low_pfn
&& (page & _PAGE_PRESENT)) { && (page & _PAGE_PRESENT)
&& !(page & _PAGE_PSE)) {
page &= PAGE_MASK; page &= PAGE_MASK;
page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
& (PTRS_PER_PTE - 1)]; & (PTRS_PER_PTE - 1)];
......
...@@ -169,7 +169,7 @@ void dump_pagetable(unsigned long address) ...@@ -169,7 +169,7 @@ void dump_pagetable(unsigned long address)
pmd = pmd_offset(pud, address); pmd = pmd_offset(pud, address);
if (bad_address(pmd)) goto bad; if (bad_address(pmd)) goto bad;
printk("PMD %lx ", pmd_val(*pmd)); printk("PMD %lx ", pmd_val(*pmd));
if (!pmd_present(*pmd)) goto ret; if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret;
pte = pte_offset_kernel(pmd, address); pte = pte_offset_kernel(pmd, address);
if (bad_address(pte)) goto bad; if (bad_address(pte)) goto bad;
...@@ -285,7 +285,6 @@ static int vmalloc_fault(unsigned long address) ...@@ -285,7 +285,6 @@ static int vmalloc_fault(unsigned long address)
return 0; return 0;
} }
static int page_fault_trace;
int show_unhandled_signals = 1; int show_unhandled_signals = 1;
/* /*
...@@ -354,10 +353,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, ...@@ -354,10 +353,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
if (likely(regs->eflags & X86_EFLAGS_IF)) if (likely(regs->eflags & X86_EFLAGS_IF))
local_irq_enable(); local_irq_enable();
if (unlikely(page_fault_trace))
printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
if (unlikely(error_code & PF_RSVD)) if (unlikely(error_code & PF_RSVD))
pgtable_bad(address, regs, error_code); pgtable_bad(address, regs, error_code);
...@@ -488,7 +483,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, ...@@ -488,7 +483,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
printk_ratelimit()) { printk_ratelimit()) {
printk( printk(
"%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n", "%s%s[%d]: segfault at %lx rip %lx rsp %lx error %lx\n",
tsk->pid > 1 ? KERN_INFO : KERN_EMERG, tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
tsk->comm, tsk->pid, address, regs->rip, tsk->comm, tsk->pid, address, regs->rip,
regs->rsp, error_code); regs->rsp, error_code);
...@@ -621,10 +616,3 @@ void vmalloc_sync_all(void) ...@@ -621,10 +616,3 @@ void vmalloc_sync_all(void)
BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
(__START_KERNEL & PGDIR_MASK))); (__START_KERNEL & PGDIR_MASK)));
} }
static int __init enable_pagefaulttrace(char *str)
{
page_fault_trace = 1;
return 1;
}
__setup("pagefaulttrace", enable_pagefaulttrace);
...@@ -612,7 +612,7 @@ void __init init_cpu_to_node(void) ...@@ -612,7 +612,7 @@ void __init init_cpu_to_node(void)
{ {
int i; int i;
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) {
u8 apicid = x86_cpu_to_apicid[i]; u8 apicid = x86_cpu_to_apicid_init[i];
if (apicid == BAD_APICID) if (apicid == BAD_APICID)
continue; continue;
if (apicid_to_node[apicid] == NUMA_NO_NODE) if (apicid_to_node[apicid] == NUMA_NO_NODE)
......
...@@ -13,25 +13,45 @@ ...@@ -13,25 +13,45 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/stacktrace.h>
struct frame_head { static void backtrace_warning_symbol(void *data, char *msg,
struct frame_head * ebp; unsigned long symbol)
unsigned long ret; {
} __attribute__((packed)); /* Ignore warnings */
}
static struct frame_head * static void backtrace_warning(void *data, char *msg)
dump_kernel_backtrace(struct frame_head * head)
{ {
oprofile_add_trace(head->ret); /* Ignore warnings */
}
/* frame pointers should strictly progress back up the stack static int backtrace_stack(void *data, char *name)
* (towards higher addresses) */ {
if (head >= head->ebp) /* Yes, we want all stacks */
return NULL; return 0;
}
static void backtrace_address(void *data, unsigned long addr)
{
unsigned int *depth = data;
return head->ebp; if ((*depth)--)
oprofile_add_trace(addr);
} }
static struct stacktrace_ops backtrace_ops = {
.warning = backtrace_warning,
.warning_symbol = backtrace_warning_symbol,
.stack = backtrace_stack,
.address = backtrace_address,
};
struct frame_head {
struct frame_head *ebp;
unsigned long ret;
} __attribute__((packed));
static struct frame_head * static struct frame_head *
dump_user_backtrace(struct frame_head * head) dump_user_backtrace(struct frame_head * head)
{ {
...@@ -53,72 +73,16 @@ dump_user_backtrace(struct frame_head * head) ...@@ -53,72 +73,16 @@ dump_user_backtrace(struct frame_head * head)
return bufhead[0].ebp; return bufhead[0].ebp;
} }
/*
* | | /\ Higher addresses
* | |
* --------------- stack base (address of current_thread_info)
* | thread info |
* . .
* | stack |
* --------------- saved regs->ebp value if valid (frame_head address)
* . .
* --------------- saved regs->rsp value if x86_64
* | |
* --------------- struct pt_regs * stored on stack if 32-bit
* | |
* . .
* | |
* --------------- %esp
* | |
* | | \/ Lower addresses
*
* Thus, regs (or regs->rsp for x86_64) <-> stack base restricts the
* valid(ish) ebp values. Note: (1) for x86_64, NMI and several other
* exceptions use special stacks, maintained by the interrupt stack table
* (IST). These stacks are set up in trap_init() in
* arch/x86_64/kernel/traps.c. Thus, for x86_64, regs now does not point
* to the kernel stack; instead, it points to some location on the NMI
* stack. On the other hand, regs->rsp is the stack pointer saved when the
* NMI occurred. (2) For 32-bit, regs->esp is not valid because the
* processor does not save %esp on the kernel stack when interrupts occur
* in the kernel mode.
*/
#ifdef CONFIG_FRAME_POINTER
static int valid_kernel_stack(struct frame_head * head, struct pt_regs * regs)
{
unsigned long headaddr = (unsigned long)head;
#ifdef CONFIG_X86_64
unsigned long stack = (unsigned long)regs->rsp;
#else
unsigned long stack = (unsigned long)regs;
#endif
unsigned long stack_base = (stack & ~(THREAD_SIZE - 1)) + THREAD_SIZE;
return headaddr > stack && headaddr < stack_base;
}
#else
/* without fp, it's just junk */
static int valid_kernel_stack(struct frame_head * head, struct pt_regs * regs)
{
return 0;
}
#endif
void void
x86_backtrace(struct pt_regs * const regs, unsigned int depth) x86_backtrace(struct pt_regs * const regs, unsigned int depth)
{ {
struct frame_head *head; struct frame_head *head = (struct frame_head *)frame_pointer(regs);
unsigned long stack = stack_pointer(regs);
#ifdef CONFIG_X86_64
head = (struct frame_head *)regs->rbp;
#else
head = (struct frame_head *)regs->ebp;
#endif
if (!user_mode_vm(regs)) { if (!user_mode_vm(regs)) {
while (depth-- && valid_kernel_stack(head, regs)) if (depth)
head = dump_kernel_backtrace(head); dump_trace(NULL, regs, (unsigned long *)stack,
&backtrace_ops, &depth);
return; return;
} }
......
...@@ -723,7 +723,7 @@ config ARCH_HIBERNATION_HEADER ...@@ -723,7 +723,7 @@ config ARCH_HIBERNATION_HEADER
source "drivers/acpi/Kconfig" source "drivers/acpi/Kconfig"
source "arch/x86/kernel/cpufreq/Kconfig" source "arch/x86/kernel/cpu/cpufreq/Kconfig_64"
source "drivers/cpuidle/Kconfig" source "drivers/cpuidle/Kconfig"
...@@ -768,9 +768,9 @@ source "fs/Kconfig.binfmt" ...@@ -768,9 +768,9 @@ source "fs/Kconfig.binfmt"
config IA32_EMULATION config IA32_EMULATION
bool "IA32 Emulation" bool "IA32 Emulation"
help help
Include code to run 32-bit programs under a 64-bit kernel. You should likely Include code to run 32-bit programs under a 64-bit kernel. You should
turn this on, unless you're 100% sure that you don't have any 32-bit programs likely turn this on, unless you're 100% sure that you don't have any
left. 32-bit programs left.
config IA32_AOUT config IA32_AOUT
tristate "IA32 a.out support" tristate "IA32 a.out support"
......
...@@ -74,7 +74,7 @@ KBUILD_CFLAGS += $(cflags-y) ...@@ -74,7 +74,7 @@ KBUILD_CFLAGS += $(cflags-y)
CFLAGS_KERNEL += $(cflags-kernel-y) CFLAGS_KERNEL += $(cflags-kernel-y)
KBUILD_AFLAGS += -m64 KBUILD_AFLAGS += -m64
head-y := arch/x86/kernel/head_64.o arch/x86/kernel/head64.o arch/x86/kernel/init_task_64.o head-y := arch/x86/kernel/head_64.o arch/x86/kernel/head64.o arch/x86/kernel/init_task.o
libs-y += arch/x86/lib/ libs-y += arch/x86/lib/
core-y += arch/x86/kernel/ \ core-y += arch/x86/kernel/ \
...@@ -97,9 +97,9 @@ BOOTIMAGE := arch/x86/boot/bzImage ...@@ -97,9 +97,9 @@ BOOTIMAGE := arch/x86/boot/bzImage
KBUILD_IMAGE := $(BOOTIMAGE) KBUILD_IMAGE := $(BOOTIMAGE)
bzImage: vmlinux bzImage: vmlinux
$(Q)mkdir -p $(objtree)/arch/x86_64/boot
$(Q)ln -fsn $(objtree)/arch/x86/boot/bzImage $(objtree)/arch/x86_64/boot/bzImage
$(Q)$(MAKE) $(build)=$(boot) $(BOOTIMAGE) $(Q)$(MAKE) $(build)=$(boot) $(BOOTIMAGE)
$(Q)mkdir -p $(objtree)/arch/x86_64/boot
$(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/x86_64/boot/bzImage
bzlilo: vmlinux bzlilo: vmlinux
$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) zlilo $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) zlilo
......
...@@ -422,12 +422,6 @@ static int map_lsapic_id(struct acpi_subtable_header *entry, ...@@ -422,12 +422,6 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
return 0; return 0;
} }
#ifdef CONFIG_IA64
#define arch_cpu_to_apicid ia64_cpu_to_sapicid
#else
#define arch_cpu_to_apicid x86_cpu_to_apicid
#endif
static int map_madt_entry(u32 acpi_id) static int map_madt_entry(u32 acpi_id)
{ {
unsigned long madt_end, entry; unsigned long madt_end, entry;
...@@ -501,7 +495,7 @@ static int get_cpu_id(acpi_handle handle, u32 acpi_id) ...@@ -501,7 +495,7 @@ static int get_cpu_id(acpi_handle handle, u32 acpi_id)
return apic_id; return apic_id;
for (i = 0; i < NR_CPUS; ++i) { for (i = 0; i < NR_CPUS; ++i) {
if (arch_cpu_to_apicid[i] == apic_id) if (cpu_physical_id(i) == apic_id)
return i; return i;
} }
return -1; return -1;
......
...@@ -150,7 +150,7 @@ static struct coretemp_data *coretemp_update_device(struct device *dev) ...@@ -150,7 +150,7 @@ static struct coretemp_data *coretemp_update_device(struct device *dev)
static int __devinit coretemp_probe(struct platform_device *pdev) static int __devinit coretemp_probe(struct platform_device *pdev)
{ {
struct coretemp_data *data; struct coretemp_data *data;
struct cpuinfo_x86 *c = &(cpu_data)[pdev->id]; struct cpuinfo_x86 *c = &cpu_data(pdev->id);
int err; int err;
u32 eax, edx; u32 eax, edx;
...@@ -359,7 +359,7 @@ static int __init coretemp_init(void) ...@@ -359,7 +359,7 @@ static int __init coretemp_init(void)
struct pdev_entry *p, *n; struct pdev_entry *p, *n;
/* quick check if we run Intel */ /* quick check if we run Intel */
if (cpu_data[0].x86_vendor != X86_VENDOR_INTEL) if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL)
goto exit; goto exit;
err = platform_driver_register(&coretemp_driver); err = platform_driver_register(&coretemp_driver);
...@@ -367,7 +367,7 @@ static int __init coretemp_init(void) ...@@ -367,7 +367,7 @@ static int __init coretemp_init(void)
goto exit; goto exit;
for_each_online_cpu(i) { for_each_online_cpu(i) {
struct cpuinfo_x86 *c = &(cpu_data)[i]; struct cpuinfo_x86 *c = &cpu_data(i);
/* check if family 6, models e, f, 16 */ /* check if family 6, models e, f, 16 */
if ((c->cpuid_level < 0) || (c->x86 != 0x6) || if ((c->cpuid_level < 0) || (c->x86 != 0x6) ||
......
...@@ -200,7 +200,7 @@ static u8 find_vrm(u8 eff_family, u8 eff_model, u8 eff_stepping, u8 vendor) ...@@ -200,7 +200,7 @@ static u8 find_vrm(u8 eff_family, u8 eff_model, u8 eff_stepping, u8 vendor)
u8 vid_which_vrm(void) u8 vid_which_vrm(void)
{ {
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = &cpu_data(0);
u32 eax; u32 eax;
u8 eff_family, eff_model, eff_stepping, vrm_ret; u8 eff_family, eff_model, eff_stepping, vrm_ret;
......
...@@ -136,7 +136,8 @@ static int gameport_measure_speed(struct gameport *gameport) ...@@ -136,7 +136,8 @@ static int gameport_measure_speed(struct gameport *gameport)
} }
gameport_close(gameport); gameport_close(gameport);
return (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (unsigned long)HZ / (1000 / 50)) / (tx < 1 ? 1 : tx); return (cpu_data(raw_smp_processor_id()).loops_per_jiffy *
(unsigned long)HZ / (1000 / 50)) / (tx < 1 ? 1 : tx);
#else #else
......
...@@ -127,7 +127,7 @@ static void gx_set_dclk_frequency(struct fb_info *info) ...@@ -127,7 +127,7 @@ static void gx_set_dclk_frequency(struct fb_info *info)
int timeout = 1000; int timeout = 1000;
/* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */ /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */
if (cpu_data->x86_mask == 1) { if (cpu_data(0).x86_mask == 1) {
pll_table = gx_pll_table_14MHz; pll_table = gx_pll_table_14MHz;
pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz); pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz);
} else { } else {
......
...@@ -81,11 +81,7 @@ int __acpi_release_global_lock(unsigned int *lock); ...@@ -81,11 +81,7 @@ int __acpi_release_global_lock(unsigned int *lock);
:"=r"(n_hi), "=r"(n_lo) \ :"=r"(n_hi), "=r"(n_lo) \
:"0"(n_hi), "1"(n_lo)) :"0"(n_hi), "1"(n_lo))
#ifdef CONFIG_X86_IO_APIC extern void early_quirks(void);
extern void check_acpi_pci(void);
#else
static inline void check_acpi_pci(void) { }
#endif
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
extern int acpi_lapic; extern int acpi_lapic;
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
*/ */
#include <linux/types.h> #include <linux/types.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/user32.h>
#define COMPAT_USER_HZ 100 #define COMPAT_USER_HZ 100
...@@ -180,6 +181,11 @@ struct compat_shmid64_ds { ...@@ -180,6 +181,11 @@ struct compat_shmid64_ds {
compat_ulong_t __unused5; compat_ulong_t __unused5;
}; };
/*
* The type of struct elf_prstatus.pr_reg in compatible core dumps.
*/
typedef struct user_regs_struct32 compat_elf_gregset_t;
/* /*
* A pointer passed in from user mode. This should not * A pointer passed in from user mode. This should not
* be used for syscall parameters, just declare them * be used for syscall parameters, just declare them
......
...@@ -20,6 +20,16 @@ extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; ...@@ -20,6 +20,16 @@ extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8)) #define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
#define clear_LDT() asm volatile("lldt %w0"::"r" (0)) #define clear_LDT() asm volatile("lldt %w0"::"r" (0))
static inline unsigned long __store_tr(void)
{
unsigned long tr;
asm volatile ("str %w0":"=r" (tr));
return tr;
}
#define store_tr(tr) (tr) = __store_tr()
/* /*
* This is the ldt that every process will get unless we need * This is the ldt that every process will get unless we need
* something other than this. * something other than this.
...@@ -31,6 +41,16 @@ extern struct desc_ptr cpu_gdt_descr[]; ...@@ -31,6 +41,16 @@ extern struct desc_ptr cpu_gdt_descr[];
/* the cpu gdt accessor */ /* the cpu gdt accessor */
#define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address) #define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address)
static inline void load_gdt(const struct desc_ptr *ptr)
{
asm volatile("lgdt %w0"::"m" (*ptr));
}
static inline void store_gdt(struct desc_ptr *ptr)
{
asm("sgdt %w0":"=m" (*ptr));
}
static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist) static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)
{ {
struct gate_struct s; struct gate_struct s;
...@@ -71,6 +91,16 @@ static inline void set_system_gate_ist(int nr, void *func, unsigned ist) ...@@ -71,6 +91,16 @@ static inline void set_system_gate_ist(int nr, void *func, unsigned ist)
_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist); _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
} }
static inline void load_idt(const struct desc_ptr *ptr)
{
asm volatile("lidt %w0"::"m" (*ptr));
}
static inline void store_idt(struct desc_ptr *dtr)
{
asm("sidt %w0":"=m" (*dtr));
}
static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type, static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
unsigned size) unsigned size)
{ {
......
...@@ -38,6 +38,8 @@ extern int geode_get_dev_base(unsigned int dev); ...@@ -38,6 +38,8 @@ extern int geode_get_dev_base(unsigned int dev);
#define MSR_LBAR_ACPI 0x5140000E #define MSR_LBAR_ACPI 0x5140000E
#define MSR_LBAR_PMS 0x5140000F #define MSR_LBAR_PMS 0x5140000F
#define MSR_DIVIL_SOFT_RESET 0x51400017
#define MSR_PIC_YSEL_LOW 0x51400020 #define MSR_PIC_YSEL_LOW 0x51400020
#define MSR_PIC_YSEL_HIGH 0x51400021 #define MSR_PIC_YSEL_HIGH 0x51400021
#define MSR_PIC_ZSEL_LOW 0x51400022 #define MSR_PIC_ZSEL_LOW 0x51400022
......
...@@ -64,6 +64,7 @@ ...@@ -64,6 +64,7 @@
/* hpet memory map physical address */ /* hpet memory map physical address */
extern unsigned long hpet_address; extern unsigned long hpet_address;
extern unsigned long force_hpet_address; extern unsigned long force_hpet_address;
extern int hpet_force_user;
extern int is_hpet_enabled(void); extern int is_hpet_enabled(void);
extern int hpet_enable(void); extern int hpet_enable(void);
extern unsigned long hpet_readl(unsigned long a); extern unsigned long hpet_readl(unsigned long a);
......
...@@ -133,4 +133,6 @@ void enable_NMI_through_LVT0 (void * dummy); ...@@ -133,4 +133,6 @@ void enable_NMI_through_LVT0 (void * dummy);
extern spinlock_t i8259A_lock; extern spinlock_t i8259A_lock;
extern int timer_over_8254;
#endif #endif
...@@ -119,7 +119,7 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) ...@@ -119,7 +119,7 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
*/ */
local_irq_save(flags); local_irq_save(flags);
for_each_cpu_mask(query_cpu, mask) { for_each_cpu_mask(query_cpu, mask) {
__send_IPI_dest_field(x86_cpu_to_apicid[query_cpu], __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
vector, APIC_DEST_PHYSICAL); vector, APIC_DEST_PHYSICAL);
} }
local_irq_restore(flags); local_irq_restore(flags);
......
...@@ -45,4 +45,7 @@ unsigned int do_IRQ(struct pt_regs *regs); ...@@ -45,4 +45,7 @@ unsigned int do_IRQ(struct pt_regs *regs);
void init_IRQ(void); void init_IRQ(void);
void __init native_init_IRQ(void); void __init native_init_IRQ(void);
/* Interrupt vector management */
extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
#endif /* _ASM_IRQ_H */ #endif /* _ASM_IRQ_H */
...@@ -73,8 +73,32 @@ ...@@ -73,8 +73,32 @@
#define MSR_P6_EVNTSEL0 0x00000186 #define MSR_P6_EVNTSEL0 0x00000186
#define MSR_P6_EVNTSEL1 0x00000187 #define MSR_P6_EVNTSEL1 0x00000187
/* K7/K8 MSRs. Not complete. See the architecture manual for a more /* AMD64 MSRs. Not complete. See the architecture manual for a more
complete list. */ complete list. */
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
#define MSR_AMD64_IBSOPCTL 0xc0011033
#define MSR_AMD64_IBSOPRIP 0xc0011034
#define MSR_AMD64_IBSOPDATA 0xc0011035
#define MSR_AMD64_IBSOPDATA2 0xc0011036
#define MSR_AMD64_IBSOPDATA3 0xc0011037
#define MSR_AMD64_IBSDCLINAD 0xc0011038
#define MSR_AMD64_IBSDCPHYSAD 0xc0011039
#define MSR_AMD64_IBSCTL 0xc001103a
/* K8 MSRs */
#define MSR_K8_TOP_MEM1 0xc001001a
#define MSR_K8_TOP_MEM2 0xc001001d
#define MSR_K8_SYSCFG 0xc0010010
#define MSR_K8_HWCR 0xc0010015
#define MSR_K8_ENABLE_C1E 0xc0010055
#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
/* K7 MSRs */
#define MSR_K7_EVNTSEL0 0xc0010000 #define MSR_K7_EVNTSEL0 0xc0010000
#define MSR_K7_PERFCTR0 0xc0010004 #define MSR_K7_PERFCTR0 0xc0010004
#define MSR_K7_EVNTSEL1 0xc0010001 #define MSR_K7_EVNTSEL1 0xc0010001
...@@ -83,20 +107,10 @@ ...@@ -83,20 +107,10 @@
#define MSR_K7_PERFCTR2 0xc0010006 #define MSR_K7_PERFCTR2 0xc0010006
#define MSR_K7_EVNTSEL3 0xc0010003 #define MSR_K7_EVNTSEL3 0xc0010003
#define MSR_K7_PERFCTR3 0xc0010007 #define MSR_K7_PERFCTR3 0xc0010007
#define MSR_K8_TOP_MEM1 0xc001001a
#define MSR_K7_CLK_CTL 0xc001001b #define MSR_K7_CLK_CTL 0xc001001b
#define MSR_K8_TOP_MEM2 0xc001001d
#define MSR_K8_SYSCFG 0xc0010010
#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
#define MSR_K7_HWCR 0xc0010015 #define MSR_K7_HWCR 0xc0010015
#define MSR_K8_HWCR 0xc0010015
#define MSR_K7_FID_VID_CTL 0xc0010041 #define MSR_K7_FID_VID_CTL 0xc0010041
#define MSR_K7_FID_VID_STATUS 0xc0010042 #define MSR_K7_FID_VID_STATUS 0xc0010042
#define MSR_K8_ENABLE_C1E 0xc0010055
/* K6 MSRs */ /* K6 MSRs */
#define MSR_K6_EFER 0xc0000080 #define MSR_K6_EFER 0xc0000080
......
...@@ -79,6 +79,7 @@ struct cpuinfo_x86 { ...@@ -79,6 +79,7 @@ struct cpuinfo_x86 {
unsigned char booted_cores; /* number of cores as seen by OS */ unsigned char booted_cores; /* number of cores as seen by OS */
__u8 phys_proc_id; /* Physical processor id. */ __u8 phys_proc_id; /* Physical processor id. */
__u8 cpu_core_id; /* Core id */ __u8 cpu_core_id; /* Core id */
__u8 cpu_index; /* index into per_cpu list */
#endif #endif
} __attribute__((__aligned__(SMP_CACHE_BYTES))); } __attribute__((__aligned__(SMP_CACHE_BYTES)));
...@@ -103,14 +104,19 @@ extern struct tss_struct doublefault_tss; ...@@ -103,14 +104,19 @@ extern struct tss_struct doublefault_tss;
DECLARE_PER_CPU(struct tss_struct, init_tss); DECLARE_PER_CPU(struct tss_struct, init_tss);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern struct cpuinfo_x86 cpu_data[]; DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
#define current_cpu_data cpu_data[smp_processor_id()] #define cpu_data(cpu) per_cpu(cpu_info, cpu)
#define current_cpu_data cpu_data(smp_processor_id())
#else #else
#define cpu_data (&boot_cpu_data) #define cpu_data(cpu) boot_cpu_data
#define current_cpu_data boot_cpu_data #define current_cpu_data boot_cpu_data
#endif #endif
extern int cpu_llc_id[NR_CPUS]; /*
* the following now lives in the per cpu area:
* extern int cpu_llc_id[NR_CPUS];
*/
DECLARE_PER_CPU(u8, cpu_llc_id);
extern char ignore_fpu_irq; extern char ignore_fpu_irq;
void __init cpu_detect(struct cpuinfo_x86 *c); void __init cpu_detect(struct cpuinfo_x86 *c);
......
...@@ -74,6 +74,7 @@ struct cpuinfo_x86 { ...@@ -74,6 +74,7 @@ struct cpuinfo_x86 {
__u8 booted_cores; /* number of cores as seen by OS */ __u8 booted_cores; /* number of cores as seen by OS */
__u8 phys_proc_id; /* Physical Processor id. */ __u8 phys_proc_id; /* Physical Processor id. */
__u8 cpu_core_id; /* Core id. */ __u8 cpu_core_id; /* Core id. */
__u8 cpu_index; /* index into per_cpu list */
#endif #endif
} ____cacheline_aligned; } ____cacheline_aligned;
...@@ -88,11 +89,12 @@ struct cpuinfo_x86 { ...@@ -88,11 +89,12 @@ struct cpuinfo_x86 {
#define X86_VENDOR_UNKNOWN 0xff #define X86_VENDOR_UNKNOWN 0xff
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern struct cpuinfo_x86 cpu_data[]; DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
#define current_cpu_data cpu_data[smp_processor_id()] #define cpu_data(cpu) per_cpu(cpu_info, cpu)
#define current_cpu_data cpu_data(smp_processor_id())
#else #else
#define cpu_data (&boot_cpu_data) #define cpu_data(cpu) boot_cpu_data
#define current_cpu_data boot_cpu_data #define current_cpu_data boot_cpu_data
#endif #endif
extern char ignore_irq13; extern char ignore_irq13;
...@@ -390,12 +392,6 @@ static inline void sync_core(void) ...@@ -390,12 +392,6 @@ static inline void sync_core(void)
asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
} }
#define ARCH_HAS_PREFETCH
static inline void prefetch(void *x)
{
asm volatile("prefetcht0 (%0)" :: "r" (x));
}
#define ARCH_HAS_PREFETCHW 1 #define ARCH_HAS_PREFETCHW 1
static inline void prefetchw(void *x) static inline void prefetchw(void *x)
{ {
......
...@@ -83,8 +83,6 @@ extern unsigned tsc_khz; ...@@ -83,8 +83,6 @@ extern unsigned tsc_khz;
extern int reboot_force; extern int reboot_force;
extern int notsc_setup(char *); extern int notsc_setup(char *);
extern int timer_over_8254;
extern int gsi_irq_sharing(int gsi); extern int gsi_irq_sharing(int gsi);
extern int force_mwait; extern int force_mwait;
......
...@@ -55,6 +55,8 @@ static inline int v8086_mode(struct pt_regs *regs) ...@@ -55,6 +55,8 @@ static inline int v8086_mode(struct pt_regs *regs)
} }
#define instruction_pointer(regs) ((regs)->eip) #define instruction_pointer(regs) ((regs)->eip)
#define frame_pointer(regs) ((regs)->ebp)
#define stack_pointer(regs) ((regs)->esp)
#define regs_return_value(regs) ((regs)->eax) #define regs_return_value(regs) ((regs)->eax)
extern unsigned long profile_pc(struct pt_regs *regs); extern unsigned long profile_pc(struct pt_regs *regs);
......
...@@ -40,6 +40,8 @@ struct pt_regs { ...@@ -40,6 +40,8 @@ struct pt_regs {
#define user_mode(regs) (!!((regs)->cs & 3)) #define user_mode(regs) (!!((regs)->cs & 3))
#define user_mode_vm(regs) user_mode(regs) #define user_mode_vm(regs) user_mode(regs)
#define instruction_pointer(regs) ((regs)->rip) #define instruction_pointer(regs) ((regs)->rip)
#define frame_pointer(regs) ((regs)->rbp)
#define stack_pointer(regs) ((regs)->rsp)
#define regs_return_value(regs) ((regs)->rax) #define regs_return_value(regs) ((regs)->rax)
extern unsigned long profile_pc(struct pt_regs *regs); extern unsigned long profile_pc(struct pt_regs *regs);
......
...@@ -39,9 +39,11 @@ extern void lock_ipi_call_lock(void); ...@@ -39,9 +39,11 @@ extern void lock_ipi_call_lock(void);
extern void unlock_ipi_call_lock(void); extern void unlock_ipi_call_lock(void);
#define MAX_APICID 256 #define MAX_APICID 256
extern u8 x86_cpu_to_apicid[]; extern u8 __initdata x86_cpu_to_apicid_init[];
extern void *x86_cpu_to_apicid_ptr;
DECLARE_PER_CPU(u8, x86_cpu_to_apicid);
#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
extern void set_cpu_sibling_map(int cpu); extern void set_cpu_sibling_map(int cpu);
......
...@@ -37,6 +37,8 @@ extern void lock_ipi_call_lock(void); ...@@ -37,6 +37,8 @@ extern void lock_ipi_call_lock(void);
extern void unlock_ipi_call_lock(void); extern void unlock_ipi_call_lock(void);
extern int smp_num_siblings; extern int smp_num_siblings;
extern void smp_send_reschedule(int cpu); extern void smp_send_reschedule(int cpu);
extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
void *info, int wait);
/* /*
* cpu_sibling_map and cpu_core_map now live * cpu_sibling_map and cpu_core_map now live
...@@ -47,7 +49,7 @@ extern void smp_send_reschedule(int cpu); ...@@ -47,7 +49,7 @@ extern void smp_send_reschedule(int cpu);
*/ */
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_t, cpu_core_map); DECLARE_PER_CPU(cpumask_t, cpu_core_map);
extern u8 cpu_llc_id[NR_CPUS]; DECLARE_PER_CPU(u8, cpu_llc_id);
#define SMP_TRAMPOLINE_BASE 0x6000 #define SMP_TRAMPOLINE_BASE 0x6000
...@@ -84,7 +86,9 @@ static inline int hard_smp_processor_id(void) ...@@ -84,7 +86,9 @@ static inline int hard_smp_processor_id(void)
* Some lowlevel functions might want to know about * Some lowlevel functions might want to know about
* the real APIC ID <-> CPU # mapping. * the real APIC ID <-> CPU # mapping.
*/ */
extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */ extern u8 __initdata x86_cpu_to_apicid_init[];
extern void *x86_cpu_to_apicid_ptr;
DECLARE_PER_CPU(u8, x86_cpu_to_apicid); /* physical ID */
extern u8 bios_cpu_apicid[]; extern u8 bios_cpu_apicid[];
static inline int cpu_present_to_apicid(int mps_cpu) static inline int cpu_present_to_apicid(int mps_cpu)
...@@ -115,8 +119,9 @@ static __inline int logical_smp_processor_id(void) ...@@ -115,8 +119,9 @@ static __inline int logical_smp_processor_id(void)
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
#else #else
extern unsigned int boot_cpu_id;
#define cpu_physical_id(cpu) boot_cpu_id #define cpu_physical_id(cpu) boot_cpu_id
#endif /* !CONFIG_SMP */ #endif /* !CONFIG_SMP */
#endif #endif
......
...@@ -315,5 +315,6 @@ extern unsigned long arch_align_stack(unsigned long sp); ...@@ -315,5 +315,6 @@ extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end); extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
void default_idle(void); void default_idle(void);
void __show_registers(struct pt_regs *, int all);
#endif #endif
...@@ -28,8 +28,8 @@ ...@@ -28,8 +28,8 @@
#define _ASM_I386_TOPOLOGY_H #define _ASM_I386_TOPOLOGY_H
#ifdef CONFIG_X86_HT #ifdef CONFIG_X86_HT
#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id) #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id) #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
#endif #endif
......
...@@ -56,8 +56,8 @@ extern int __node_distance(int, int); ...@@ -56,8 +56,8 @@ extern int __node_distance(int, int);
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id) #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id) #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
#define mc_capable() (boot_cpu_data.x86_max_cores > 1) #define mc_capable() (boot_cpu_data.x86_max_cores > 1)
......
...@@ -34,17 +34,12 @@ ...@@ -34,17 +34,12 @@
*/ */
/*
* These cannot be do{}while(0) macros. See the mental gymnastics in
* the loop macro.
*/
#ifndef ARCH_HAS_PREFETCH #ifndef ARCH_HAS_PREFETCH
static inline void prefetch(const void *x) {;} #define prefetch(x) __builtin_prefetch(x)
#endif #endif
#ifndef ARCH_HAS_PREFETCHW #ifndef ARCH_HAS_PREFETCHW
static inline void prefetchw(const void *x) {;} #define prefetchw(x) __builtin_prefetch(x,1)
#endif #endif
#ifndef ARCH_HAS_SPINLOCK_PREFETCH #ifndef ARCH_HAS_SPINLOCK_PREFETCH
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment