Commit f73e9f0f authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds

[PATCH] X86-64 merge

At least one of them is critical. It fixes an path in the IOMMU that
I broke with the ealier "fullflush" workaround.

 - Check for ~/bin/installkernel like i386 (M. Bligh) 
 - Implement 32bit RTC_IRQ_SET correctly (Lutz Vieweg)
 - Disable some useless printks in 32bit emulation
 - Warning fixes for mixed C99 style declarations/statements.
 - Sync lAPIC power management with i386
 - Use topology sysfs like i386
 - Fix some serious bugs in the MCE handler. ECC should
   be decoded correctly now.
 - Add oops=panic option to panic on Oopses.
 - Fix hackish code in head.S
 - Add missing options in IOMMU
 - Fix _syscall6 (Olaf Hering)
 - Remove broken ACPI locking code. Port IA64 C version.
 - Make safe_smp_processor_id() more reliable
 - Read HPET in vsyscall code
 - Add workaround for BIOS that corrupt 64bit registers in HLT
 - Fix unaligned access in bitops.h
 - Remove broken ntp drift correction code for now
 - i386 merge in SCI setup
 - Fix wrong offset in callin.h (Jim Houston)
 - Minor comment fixes
parent b68e2749
......@@ -333,6 +333,11 @@ config PCI_DIRECT
depends on PCI
default y
# the drivers/pci/msi.c code needs to be fixed first before enabling
config PCI_USE_VECTOR
bool
default n
source "drivers/pci/Kconfig"
config HOTPLUG
......@@ -526,13 +531,6 @@ config IOMMU_LEAK
Add a simple leak tracer to the IOMMU code. This is useful when you
are debugging a buggy device driver that leaks IOMMU mappings.
config MCE_DEBUG
bool "K8 Machine check debugging mode"
default y
help
Turn on all Machine Check debugging for device driver problems.
This can cause panics, but is useful to find device driver problems.
#config X86_REMOTE_DEBUG
# bool "kgdb debugging stub"
......
......@@ -21,6 +21,7 @@
# User may have a custom install script
if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi
if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi
# Default install - same as make zlilo
......
......@@ -59,7 +59,7 @@ static int rtc32_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
return ret;
case RTC_IRQP_SET32:
cmd = RTC_EPOCH_SET;
cmd = RTC_IRQP_SET;
break;
case RTC_EPOCH_READ32:
......
......@@ -330,10 +330,10 @@ ia32_sys_call_table:
.quad sys32_adjtimex
.quad sys32_mprotect /* 125 */
.quad compat_sys_sigprocmask
.quad sys32_module_warning /* create_module */
.quad quiet_ni_syscall /* create_module */
.quad sys_init_module
.quad sys_delete_module
.quad sys32_module_warning /* 130 get_kernel_syms */
.quad quiet_ni_syscall /* 130 get_kernel_syms */
.quad ni_syscall /* quotactl */
.quad sys_getpgid
.quad sys_fchdir
......
......@@ -1832,13 +1832,6 @@ long asmlinkage sys32_nfsservctl(int cmd, void *notused, void *notused2)
}
#endif
long sys32_module_warning(void)
{
printk(KERN_INFO "%s: 32bit 2.4.x modutils not supported on 64bit kernel\n",
current->comm);
return -ENOSYS ;
}
extern long sys_io_setup(unsigned nr_reqs, aio_context_t *ctx);
long sys32_io_setup(unsigned nr_reqs, u32 *ctx32p)
......@@ -2004,12 +1997,16 @@ long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
long sys32_vm86_warning(void)
{
struct task_struct *me = current;
static char lastcomm[8];
if (strcmp(lastcomm, me->comm)) {
printk(KERN_INFO "%s: vm86 mode not supported on 64 bit kernel\n",
current->comm);
return -ENOSYS ;
me->comm);
strcpy(lastcomm, me->comm);
}
return -ENOSYS;
}
struct exec_domain ia32_exec_domain = {
.name = "linux/x86",
.pers_low = PER_LINUX32,
......
......@@ -30,10 +30,12 @@ char *syscall32_page;
int map_syscall32(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
pmd_t *pmd;
int err = 0;
down_read(&mm->mmap_sem);
spin_lock(&mm->page_table_lock);
pmd_t *pmd = pmd_alloc(mm, pgd_offset(mm, address), address);
pmd = pmd_alloc(mm, pgd_offset(mm, address), address);
if (pmd && (pte = pte_alloc_map(mm, pmd, address)) != NULL) {
if (pte_none(*pte)) {
set_pte(pte,
......
......@@ -18,13 +18,16 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
obj-$(CONFIG_X86_IO_APIC) += io_apic.o mpparse.o
obj-$(CONFIG_PM) += suspend.o
obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_GART_IOMMU) += pci-gart.o aperture.o
obj-$(CONFIG_DUMMY_IOMMU) += pci-nommu.o pci-dma.o
obj-$(CONFIG_MODULES) += module.o
obj-y += topology.o
bootflag-y += ../../i386/kernel/bootflag.o
cpuid-$(subst m,y,$(CONFIG_X86_CPUID)) += ../../i386/kernel/cpuid.o
topology-y += ../../i386/mach-default/topology.o
obj-$(CONFIG_CPU_FREQ) += cpufreq/
......@@ -42,6 +42,8 @@ static DEFINE_PER_CPU(int, prof_multiplier) = 1;
static DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
static DEFINE_PER_CPU(int, prof_counter) = 1;
static void apic_pm_activate(void);
void enable_NMI_through_LVT0 (void * dummy)
{
unsigned int v, ver;
......@@ -435,6 +437,7 @@ void __init setup_local_APIC (void)
if (nmi_watchdog == NMI_LOCAL_APIC)
setup_apic_nmi_watchdog();
apic_pm_activate();
}
#ifdef CONFIG_PM
......@@ -556,7 +559,7 @@ device_initcall(init_lapic_sysfs);
#else /* CONFIG_PM */
static inline void apic_pm_activate(void) { }
static void apic_pm_activate(void) { }
#endif /* CONFIG_PM */
......@@ -579,7 +582,6 @@ static int __init detect_init_APIC (void)
if (nmi_watchdog != NMI_NONE)
nmi_watchdog = NMI_LOCAL_APIC;
apic_pm_activate();
return 0;
}
......
......@@ -26,19 +26,6 @@ static unsigned long mce_cpus;
static int banks;
static unsigned long ignored_banks, disabled_banks;
/* Machine Check on everything dubious. This is a good setting
for device driver testing. */
#define K8_DRIVER_DEBUG ((1<<13)-1)
/* Report RAM errors and Hyper Transport Problems, but ignore Device
aborts and GART errors. */
#define K8_NORMAL_OP 0xff
#ifdef CONFIG_MCE_DEBUG
static u32 k8_nb_flags __initdata = K8_DRIVER_DEBUG;
#else
static u32 k8_nb_flags __initdata = K8_NORMAL_OP;
#endif
static void generic_machine_check(struct pt_regs * regs, long error_code)
{
int recover=1;
......@@ -200,11 +187,14 @@ static char *highbits[32] = {
static void check_k8_nb(int header)
{
struct pci_dev *nb;
u32 statuslow, statushigh;
unsigned short errcode;
int i;
nb = find_k8_nb();
if (nb == NULL)
return;
u32 statuslow, statushigh;
pci_read_config_dword(nb, 0x48, &statuslow);
pci_read_config_dword(nb, 0x4c, &statushigh);
if (!(statushigh & (1<<31)))
......@@ -215,50 +205,42 @@ static void check_k8_nb(int header)
printk(KERN_ERR "Northbridge status %08x%08x\n",
statushigh,statuslow);
unsigned short errcode = statuslow & 0xffff;
switch (errcode >> 8) {
case 0:
printk(KERN_ERR " Error %s\n", extendederr[(statuslow >> 16) & 0xf]);
errcode = statuslow & 0xffff;
switch ((statuslow >> 16) & 0xF) {
case 5:
printk(KERN_ERR " GART TLB error %s %s\n",
transaction[(errcode >> 2) & 3],
cachelevel[errcode & 3]);
break;
case 1:
if (errcode & (1<<11)) {
printk(KERN_ERR " bus error %s %s %s %s %s\n",
partproc[(errcode >> 10) & 0x3],
timeout[(errcode >> 9) & 1],
case 8:
printk(KERN_ERR " ECC error syndrome %x\n",
(((statuslow >> 24) & 0xff) << 8) | ((statushigh >> 15) & 0x7f));
/*FALL THROUGH*/
default:
printk(KERN_ERR " bus error %s, %s\n %s\n %s, %s\n",
partproc[(errcode >> 9) & 0x3],
timeout[(errcode >> 8) & 1],
memtrans[(errcode >> 4) & 0xf],
memoryio[(errcode >> 2) & 0x3],
cachelevel[(errcode & 0x3)]);
} else if (errcode & (1<<8)) {
printk(KERN_ERR " memory error %s %s %s\n",
memtrans[(errcode >> 4) & 0xf],
transaction[(errcode >> 2) & 0x3],
cachelevel[(errcode & 0x3)]);
} else {
printk(KERN_ERR " unknown error code %x\n", errcode);
}
break;
}
if (statushigh & ((1<<14)|(1<<13)))
printk(KERN_ERR " ECC syndrome bits %x\n",
(((statuslow >> 24) & 0xff) << 8) | ((statushigh >> 15) & 0x7f));
errcode = (statuslow >> 16) & 0xf;
printk(KERN_ERR " extended error %s\n", extendederr[(statuslow >> 16) & 0xf]);
/* should only print when it was a HyperTransport related error. */
printk(KERN_ERR " link number %x\n", (statushigh >> 4) & 3);
break;
}
int i;
for (i = 0; i < 32; i++)
for (i = 0; i < 32; i++) {
if (i == 26 || i == 28)
continue;
if (highbits[i] && (statushigh & (1<<i)))
printk(KERN_ERR " %s\n", highbits[i]);
}
if (statushigh & (1<<26)) {
u32 addrhigh, addrlow;
pci_read_config_dword(nb, 0x54, &addrhigh);
pci_read_config_dword(nb, 0x50, &addrlow);
printk(KERN_ERR " error address %08x%08x\n", addrhigh,addrlow);
printk(KERN_ERR " NB error address %08x%08x\n", addrhigh,addrlow);
}
statushigh &= ~(1<<31);
pci_write_config_dword(nb, 0x4c, statushigh);
......@@ -307,9 +289,6 @@ static void k8_machine_check(struct pt_regs * regs, long error_code)
wrmsrl(MSR_IA32_MC0_STATUS+4*4, 0);
wrmsrl(MSR_IA32_MCG_STATUS, 0);
if (regs && (status & (1<<1)))
printk(KERN_EMERG "MCE at RIP %lx RSP %lx\n", regs->rip, regs->rsp);
others:
generic_machine_check(regs, error_code);
......@@ -367,12 +346,13 @@ static void __init k8_mcheck_init(struct cpuinfo_x86 *c)
machine_check_vector = k8_machine_check;
for (i = 0; i < banks; i++) {
u64 val = ((1UL<<i) & disabled_banks) ? 0 : ~0UL;
if (val && i == 4)
val = k8_nb_flags;
wrmsrl(MSR_IA32_MC0_CTL+4*i, val);
wrmsrl(MSR_IA32_MC0_STATUS+4*i,0);
}
if (cap & (1<<8))
wrmsrl(MSR_IA32_MCG_CTL, 0xffffffffffffffffULL);
set_in_cr4(X86_CR4_MCE);
if (mcheck_interval && (smp_processor_id() == 0)) {
......@@ -469,7 +449,6 @@ static int __init mcheck_disable(char *str)
mce=nok8 disable k8 specific features
mce=disable<NUMBER> disable bank NUMBER
mce=enable<NUMBER> enable bank number
mce=device Enable device driver test reporting in NB
mce=NUMBER mcheck timer interval number seconds.
Can be also comma separated in a single mce= */
static int __init mcheck_enable(char *str)
......@@ -486,8 +465,6 @@ static int __init mcheck_enable(char *str)
disabled_banks |= ~(1<<simple_strtol(p+7,NULL,0));
else if (!strcmp(p,"nok8"))
nok8 = 1;
else if (!strcmp(p,"device"))
k8_nb_flags = K8_DRIVER_DEBUG;
}
return 0;
}
......
......@@ -2,10 +2,6 @@
* Handle the memory map.
* The functions here do the job until bootmem takes over.
* $Id: e820.c,v 1.4 2002/09/19 19:25:32 ak Exp $
* AK: some of these functions are not used in 2.5 yet but they will be when
* NUMA is completely merged.
*/
#include <linux/config.h>
#include <linux/kernel.h>
......
......@@ -38,6 +38,9 @@ startup_32:
movl %ebx,%ebp /* Save trampoline flag */
movl $__KERNEL_DS,%eax
movl %eax,%ds
/* If the CPU doesn't support CPUID this will double fault.
* Unfortunately it is hard to check for CPUID without a stack.
*/
......@@ -114,25 +117,11 @@ reach_compatibility_mode:
movl $(pGDT32 - __START_KERNEL_map), %eax
lgdt (%eax)
second:
movl $(ljumpvector - __START_KERNEL_map), %eax
/* Finally jump in 64bit mode */
ljmp *(%eax)
second:
/* abuse syscall to get into 64bit mode. this way we don't need
a working low identity mapping just for the short 32bit roundtrip.
XXX kludge. this should not be needed. */
movl $MSR_STAR,%ecx
xorl %eax,%eax
movl $(__USER32_CS<<16)|__KERNEL_CS,%edx
wrmsr
movl $MSR_CSTAR,%ecx
movl $0xffffffff,%edx
movl $0x80100100,%eax # reach_long64 absolute
wrmsr
syscall
.code64
.org 0x100
reach_long64:
......
......@@ -446,8 +446,10 @@ void __init init_IRQ(void)
* us. (some of these will be overridden and become
* 'special' SMP interrupts)
*/
for (i = 0; i < NR_IRQS; i++) {
for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
int vector = FIRST_EXTERNAL_VECTOR + i;
if (i >= NR_IRQS)
break;
if (vector != IA32_SYSCALL_VECTOR && vector != KDB_VECTOR) {
set_intr_gate(vector, interrupt[i]);
}
......
......@@ -66,6 +66,14 @@ static struct irq_pin_list {
short apic, pin, next;
} irq_2_pin[PIN_MAP_SIZE];
#ifdef CONFIG_PCI_USE_VECTOR
int vector_irq[NR_IRQS] = { [0 ... NR_IRQS -1] = -1};
#define vector_to_irq(vector) \
(platform_legacy_irq(vector) ? vector : vector_irq[vector])
#else
#define vector_to_irq(vector) (vector)
#endif
/*
* The common case is 1:1 IRQ<->pin mappings. Sometimes there are
* shared ISA-space IRQs, so we have to support them. We are super
......@@ -147,6 +155,13 @@ void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
struct IO_APIC_route_entry entry;
unsigned long flags;
/* Check delivery_mode to be sure we're not clearing an SMI pin */
spin_lock_irqsave(&ioapic_lock, flags);
*(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
*(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
spin_unlock_irqrestore(&ioapic_lock, flags);
if (entry.delivery_mode == dest_SMI)
return;
/*
* Disable it in the IO-APIC irq-routing table:
*/
......@@ -625,7 +640,8 @@ static inline int IO_APIC_irq_trigger(int irq)
/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
static int __init assign_irq_vector(int irq)
#ifndef CONFIG_PCI_USE_VECTOR
int __init assign_irq_vector(int irq)
{
static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
BUG_ON(irq >= NR_IRQ_VECTORS);
......@@ -647,10 +663,34 @@ static int __init assign_irq_vector(int irq)
IO_APIC_VECTOR(irq) = current_vector;
return current_vector;
}
#endif
extern void (*interrupt[NR_IRQS])(void);
static struct hw_interrupt_type ioapic_level_irq_type;
static struct hw_interrupt_type ioapic_edge_irq_type;
static struct hw_interrupt_type ioapic_level_type;
static struct hw_interrupt_type ioapic_edge_type;
#define IOAPIC_AUTO -1
#define IOAPIC_EDGE 0
#define IOAPIC_LEVEL 1
static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
{
if (use_pci_vector() && !platform_legacy_irq(irq)) {
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
trigger == IOAPIC_LEVEL)
irq_desc[vector].handler = &ioapic_level_type;
else
irq_desc[vector].handler = &ioapic_edge_type;
set_intr_gate(vector, interrupt[vector]);
} else {
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
trigger == IOAPIC_LEVEL)
irq_desc[irq].handler = &ioapic_level_type;
else
irq_desc[irq].handler = &ioapic_edge_type;
set_intr_gate(vector, interrupt[irq]);
}
}
void __init setup_IO_APIC_irqs(void)
{
......@@ -702,13 +742,7 @@ void __init setup_IO_APIC_irqs(void)
vector = assign_irq_vector(irq);
entry.vector = vector;
if (IO_APIC_irq_trigger(irq))
irq_desc[irq].handler = &ioapic_level_irq_type;
else
irq_desc[irq].handler = &ioapic_edge_irq_type;
set_intr_gate(vector, interrupt[irq]);
ioapic_register_intr(irq, vector, IOAPIC_AUTO);
if (!apic && (irq < 16))
disable_8259A_irq(irq);
}
......@@ -755,7 +789,7 @@ void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector)
* The timer IRQ doesn't have to know that behind the
* scene we have a 8259A-master in AEOI mode ...
*/
irq_desc[0].handler = &ioapic_edge_irq_type;
irq_desc[0].handler = &ioapic_edge_type;
/*
* Add it to the IO-APIC irq-routing table:
......@@ -1210,9 +1244,6 @@ static int __init timer_irq_works(void)
* that was delayed but this is now handled in the device
* independent code.
*/
#define enable_edge_ioapic_irq unmask_IO_APIC_irq
static void disable_edge_ioapic_irq (unsigned int irq) { /* nothing */ }
/*
* Starting up a edge-triggered IO-APIC interrupt is
......@@ -1241,8 +1272,6 @@ static unsigned int startup_edge_ioapic_irq(unsigned int irq)
return was_pending;
}
#define shutdown_edge_ioapic_irq disable_edge_ioapic_irq
/*
* Once we have recorded IRQ_PENDING already, we can mask the
* interrupt for real. This prevents IRQ storms from unhandled
......@@ -1256,9 +1285,6 @@ static void ack_edge_ioapic_irq(unsigned int irq)
ack_APIC_irq();
}
static void end_edge_ioapic_irq (unsigned int i) { /* nothing */ }
/*
* Level triggered interrupts can just be masked,
* and shutting down and starting up the interrupt
......@@ -1280,10 +1306,6 @@ static unsigned int startup_level_ioapic_irq (unsigned int irq)
return 0; /* don't check for pending */
}
#define shutdown_level_ioapic_irq mask_IO_APIC_irq
#define enable_level_ioapic_irq unmask_IO_APIC_irq
#define disable_level_ioapic_irq mask_IO_APIC_irq
static void end_level_ioapic_irq (unsigned int irq)
{
unsigned long v;
......@@ -1343,9 +1365,7 @@ static void end_level_ioapic_irq (unsigned int irq)
}
}
static void mask_and_ack_level_ioapic_irq (unsigned int irq) { /* nothing */ }
static void set_ioapic_affinity (unsigned int irq, cpumask_t mask)
static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
{
unsigned long flags;
unsigned int dest;
......@@ -1362,6 +1382,58 @@ static void set_ioapic_affinity (unsigned int irq, cpumask_t mask)
spin_unlock_irqrestore(&ioapic_lock, flags);
}
#ifdef CONFIG_PCI_USE_VECTOR
static unsigned int startup_edge_ioapic_vector(unsigned int vector)
{
int irq = vector_to_irq(vector);
return startup_edge_ioapic_irq(irq);
}
static void ack_edge_ioapic_vector(unsigned int vector)
{
int irq = vector_to_irq(vector);
ack_edge_ioapic_irq(irq);
}
static unsigned int startup_level_ioapic_vector (unsigned int vector)
{
int irq = vector_to_irq(vector);
return startup_level_ioapic_irq (irq);
}
static void end_level_ioapic_vector (unsigned int vector)
{
int irq = vector_to_irq(vector);
end_level_ioapic_irq(irq);
}
static void mask_IO_APIC_vector (unsigned int vector)
{
int irq = vector_to_irq(vector);
mask_IO_APIC_irq(irq);
}
static void unmask_IO_APIC_vector (unsigned int vector)
{
int irq = vector_to_irq(vector);
unmask_IO_APIC_irq(irq);
}
static void set_ioapic_affinity_vector (unsigned int vector,
cpumask_t cpu_mask)
{
int irq = vector_to_irq(vector);
set_ioapic_affinity_irq(irq, cpu_mask);
}
#endif
/*
* Level and edge triggered IO-APIC interrupts need different handling,
* so we use two separate IRQ descriptors. Edge triggered IRQs can be
......@@ -1371,25 +1443,25 @@ static void set_ioapic_affinity (unsigned int irq, cpumask_t mask)
* races.
*/
static struct hw_interrupt_type ioapic_edge_irq_type = {
static struct hw_interrupt_type ioapic_edge_type = {
.typename = "IO-APIC-edge",
.startup = startup_edge_ioapic_irq,
.shutdown = shutdown_edge_ioapic_irq,
.enable = enable_edge_ioapic_irq,
.disable = disable_edge_ioapic_irq,
.ack = ack_edge_ioapic_irq,
.end = end_edge_ioapic_irq,
.startup = startup_edge_ioapic,
.shutdown = shutdown_edge_ioapic,
.enable = enable_edge_ioapic,
.disable = disable_edge_ioapic,
.ack = ack_edge_ioapic,
.end = end_edge_ioapic,
.set_affinity = set_ioapic_affinity,
};
static struct hw_interrupt_type ioapic_level_irq_type = {
static struct hw_interrupt_type ioapic_level_type = {
.typename = "IO-APIC-level",
.startup = startup_level_ioapic_irq,
.shutdown = shutdown_level_ioapic_irq,
.enable = enable_level_ioapic_irq,
.disable = disable_level_ioapic_irq,
.ack = mask_and_ack_level_ioapic_irq,
.end = end_level_ioapic_irq,
.startup = startup_level_ioapic,
.shutdown = shutdown_level_ioapic,
.enable = enable_level_ioapic,
.disable = disable_level_ioapic,
.ack = mask_and_ack_level_ioapic,
.end = end_level_ioapic,
.set_affinity = set_ioapic_affinity,
};
......@@ -1409,7 +1481,13 @@ static inline void init_IO_APIC_traps(void)
* 0x80, because int 0x80 is hm, kind of importantish. ;)
*/
for (irq = 0; irq < NR_IRQS ; irq++) {
if (IO_APIC_IRQ(irq) && !IO_APIC_VECTOR(irq)) {
int tmp = irq;
if (use_pci_vector()) {
if (!platform_legacy_irq(tmp))
if ((tmp = vector_to_irq(tmp)) == -1)
continue;
}
if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
/*
* Hmm.. We don't have an entry for this,
* so default to an old-fashioned 8259
......@@ -1837,10 +1915,12 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
edge_level, active_high_low);
if (use_pci_vector() && !platform_legacy_irq(irq))
irq = IO_APIC_VECTOR(irq);
if (edge_level) {
irq_desc[irq].handler = &ioapic_level_irq_type;
irq_desc[irq].handler = &ioapic_level_type;
} else {
irq_desc[irq].handler = &ioapic_edge_irq_type;
irq_desc[irq].handler = &ioapic_edge_type;
}
set_intr_gate(entry.vector, interrupt[irq]);
......
......@@ -408,6 +408,20 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
return 1;
}
int can_request_irq(unsigned int irq, unsigned long irqflags)
{
struct irqaction *action;
if (irq >= NR_IRQS)
return 0;
action = irq_desc[irq].action;
if (action) {
if (irqflags & action->flags & SA_SHIRQ)
action = NULL;
}
return !action;
}
/**
* request_irq - allocate an interrupt line
* @irq: Interrupt line to allocate
......
......@@ -881,7 +881,6 @@ void __init mp_parse_prt (void)
{
struct list_head *node = NULL;
struct acpi_prt_entry *entry = NULL;
int vector = 0;
int ioapic = -1;
int ioapic_pin = 0;
int irq = 0;
......@@ -933,20 +932,22 @@ void __init mp_parse_prt (void)
if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
printk(KERN_DEBUG "Pin %d-%d already programmed\n",
mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
if (use_pci_vector() && !platform_legacy_irq(irq))
irq = IO_APIC_VECTOR(irq);
entry->irq = irq;
continue;
}
mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
vector = io_apic_set_pci_routing(ioapic, ioapic_pin, irq, edge_level, active_high_low);
if (vector)
if (!io_apic_set_pci_routing(ioapic, ioapic_pin, irq, edge_level, active_high_low)) {
if (use_pci_vector() && !platform_legacy_irq(irq))
irq = IO_APIC_VECTOR(irq);
entry->irq = irq;
printk(KERN_DEBUG "%02x:%02x:%02x[%c] -> %d-%d -> vector 0x%02x"
}
printk(KERN_DEBUG "%02x:%02x:%02x[%c] -> %d-%d"
" -> IRQ %d\n", entry->id.segment, entry->id.bus,
entry->id.device, ('A' + entry->pin),
mp_ioapic_routing[ioapic].apic_id, ioapic_pin, vector,
mp_ioapic_routing[ioapic].apic_id, ioapic_pin,
entry->irq);
}
......
......@@ -52,6 +52,8 @@ int iommu_merge = 0;
int iommu_sac_force = 0;
int iommu_fullflush = 1;
#define MAX_NB 8
/* Allocation bitmap for the remapping area */
static spinlock_t iommu_bitmap_lock = SPIN_LOCK_UNLOCKED;
static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
......@@ -71,8 +73,8 @@ static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
if (dev->bus->number == 0 && \
(PCI_SLOT(dev->devfn) >= 24) && (PCI_SLOT(dev->devfn) <= 31))
static struct pci_dev *northbridges[NR_CPUS + 1];
static u32 northbridge_flush_word[NR_CPUS + 1];
static struct pci_dev *northbridges[MAX_NB];
static u32 northbridge_flush_word[MAX_NB];
#define EMERGENCY_PAGES 32 /* = 128KB */
......@@ -107,6 +109,8 @@ static unsigned long alloc_iommu(int size)
need_flush = 1;
}
}
if (iommu_fullflush)
need_flush = 1;
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
return offset;
}
......@@ -135,9 +139,11 @@ static void flush_gart(struct pci_dev *dev)
int i;
spin_lock_irqsave(&iommu_bitmap_lock, flags);
if (need_flush || iommu_fullflush) {
for (i = 0; northbridges[i]; i++) {
if (need_flush) {
for (i = 0; i < MAX_NB; i++) {
u32 w;
if (!northbridges[i])
continue;
if (bus >= 0 && !(cpu_isset_const(i, bus_cpumask)))
continue;
pci_write_config_dword(northbridges[i], 0x9c,
......@@ -767,10 +773,9 @@ static int __init pci_iommu_init(void)
for_all_nb(dev) {
u32 flag;
int cpu = PCI_SLOT(dev->devfn) - 24;
if (cpu >= NR_CPUS)
if (cpu >= MAX_NB)
continue;
northbridges[cpu] = dev;
pci_read_config_dword(dev, 0x9c, &flag); /* cache flush word */
northbridge_flush_word[cpu] = flag;
}
......@@ -783,7 +788,8 @@ static int __init pci_iommu_init(void)
/* Must execute after PCI subsystem */
fs_initcall(pci_iommu_init);
/* iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]]
/* iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]][,merge]
[,forcesac][,fullflush][,nomerge]
size set size of iommu (in bytes)
noagp don't initialize the AGP driver and use full aperture.
off don't use the IOMMU
......@@ -791,6 +797,10 @@ fs_initcall(pci_iommu_init);
memaper[=order] allocate an own aperture over RAM with size 32MB^order.
noforce don't force IOMMU usage. Default.
force Force IOMMU.
merge Do SG merging. Implies force (experimental)
nomerge Don't do SG merging.
forcesac For SAC mode for masks <40bits (experimental)
fullflush Flush IOMMU on each allocation (for testing)
*/
__init int iommu_setup(char *opt)
{
......@@ -804,8 +814,10 @@ __init int iommu_setup(char *opt)
no_iommu = 1;
if (!memcmp(p,"force", 5))
force_iommu = 1;
if (!memcmp(p,"noforce", 7))
if (!memcmp(p,"noforce", 7)) {
iommu_merge = 0;
force_iommu = 0;
}
if (!memcmp(p, "memaper", 7)) {
fallback_aper_force = 1;
p += 7;
......@@ -816,6 +828,16 @@ __init int iommu_setup(char *opt)
panic_on_overflow = 1;
if (!memcmp(p, "nopanic", 7))
panic_on_overflow = 0;
if (!memcmp(p, "merge", 5)) {
iommu_merge = 1;
force_iommu = 1;
}
if (!memcmp(p, "nomerge", 7))
iommu_merge = 0;
if (!memcmp(p, "forcesac", 8))
iommu_sac_force = 1;
if (!memcmp(p, "fullflush", 9))
iommu_fullflush = 1;
#ifdef CONFIG_IOMMU_LEAK
if (!memcmp(p,"leak", 4)) {
leak_trace = 1;
......
......@@ -152,6 +152,19 @@ static int __init idle_setup (char *str)
__setup("idle=", idle_setup);
void idle_warning(void)
{
static int warned;
if (warned)
return;
warned = 1;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
BUG();
printk(KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n");
printk(KERN_ERR "******* Working around it, but it will cost you a lot of power\n");
printk(KERN_ERR "******* Please consider a BIOS update.\n");
printk(KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n");
}
/* Prints also some state that isn't saved in the pt_regs */
void __show_regs(struct pt_regs * regs)
......
......@@ -237,6 +237,9 @@ static __init void parse_cmdline_early (char ** cmdline_p)
}
#endif
if (!memcmp(from,"oops=panic", 10))
panic_on_oops = 1;
next_char:
c = *(from++);
if (!c)
......@@ -332,6 +335,7 @@ __setup("noreplacement", noreplacement_setup);
void __init setup_arch(char **cmdline_p)
{
unsigned long low_mem_size;
unsigned long kernel_end;
ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
drive_info = DRIVE_INFO;
......@@ -380,7 +384,6 @@ void __init setup_arch(char **cmdline_p)
(table_end - table_start) << PAGE_SHIFT);
/* reserve kernel */
unsigned long kernel_end;
kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
......
......@@ -111,13 +111,14 @@ void do_gettimeofday(struct timeval *tv)
sec = xtime.tv_sec;
usec = xtime.tv_nsec / 1000;
/*
* If time_adjust is negative then NTP is slowing the clock
* so make sure not to go into next possible interval.
* Better to lose some accuracy than have time go backwards..
*/
if (unlikely(time_adjust < 0) && usec > tickadj)
usec = tickadj;
/* i386 does some correction here to keep the clock
monotonus even when ntpd is fixing drift.
But they didn't work for me, there is a non monotonic
clock anyways with ntp.
I dropped all corrections now until a real solution can
be found. Note when you fix it here you need to do the same
in arch/x86_64/kernel/vsyscall.c and export all needed
variables in vmlinux.lds. -AK */
t = (jiffies - wall_jiffies) * (1000000L / HZ) +
do_gettimeoffset();
......@@ -592,6 +593,7 @@ static int hpet_init(void)
if (!vxtime.hpet_address)
return -1;
set_fixmap_nocache(FIX_HPET_BASE, vxtime.hpet_address);
__set_fixmap(VSYSCALL_HPET, vxtime.hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
/*
* Read the period, compute tick and quotient.
......
......@@ -345,6 +345,8 @@ void oops_end(void)
bust_spinlocks(0);
spin_unlock(&die_lock);
local_irq_enable(); /* make sure back scroll still works */
if (panic_on_oops)
panic("Oops");
}
void __die(const char * str, struct pt_regs * regs, long err)
......@@ -353,8 +355,8 @@ void __die(const char * str, struct pt_regs * regs, long err)
printk(KERN_EMERG "%s: %04lx [%u]\n", str, err & 0xffff,++die_counter);
notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
show_registers(regs);
/* Execute summary in case the oops scrolled away */
printk(KERN_EMERG "RIP ");
/* Executive summary in case the oops scrolled away */
printk("RIP ");
printk_address(regs->rip);
printk(" RSP <%016lx>\n", regs->rsp);
}
......@@ -848,3 +850,11 @@ void __init trap_init(void)
cpu_init();
}
/* Actual parsing is done early in setup.c. */
static int __init oops_dummy(char *s)
{
panic_on_oops = 1;
return -1;
}
__setup("oops=", oops_dummy);
......@@ -49,6 +49,7 @@
#include <asm/page.h>
#include <asm/fixmap.h>
#include <asm/errno.h>
#include <asm/io.h>
#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
#define force_inline __attribute__((always_inline)) inline
......@@ -88,11 +89,10 @@ static force_inline void do_vgettimeofday(struct timeval * tv)
if (t < __vxtime.last_tsc) t = __vxtime.last_tsc;
usec += ((t - __vxtime.last_tsc) *
__vxtime.tsc_quot) >> 32;
/* See comment in x86_64 do_gettimeopfday. */
} else {
#if 0
usec += ((readl(fix_to_virt(VSYSCALL_HPET) + 0xf0) -
__vxtime.last) * __vxtime.quot) >> 32;
#endif
}
} while (read_seqretry(&__xtime_lock, sequence));
......
......@@ -378,7 +378,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
printk_address(regs->rip);
dump_pagetable(address);
__die("Oops", regs, error_code);
/* Execute summary in case the body of the oops scrolled away */
/* Executive summary in case the body of the oops scrolled away */
printk(KERN_EMERG "CR2: %016lx\n", address);
oops_end();
do_exit(SIGKILL);
......
......@@ -52,40 +52,36 @@
#define ACPI_ENABLE_IRQS() local_irq_enable()
#define ACPI_FLUSH_CPU_CACHE() wbinvd()
/*
* A brief explanation as GNU inline assembly is a bit hairy
* %0 is the output parameter in RAX ("=a")
* %1 and %2 are the input parameters in RCX ("c")
* and an immediate value ("i") respectively
* All actual register references are preceded with "%%" as in "%%edx"
* Immediate values in the assembly are preceded by "$" as in "$0x1"
* The final asm parameter are the operation altered non-output registers.
*/
static inline int
__acpi_acquire_global_lock (unsigned int *lock)
{
unsigned int old, new, val;
do {
old = *lock;
new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
val = cmpxchg4_locked(lock, new, old);
} while (unlikely (val != old));
return (new < 3) ? -1 : 0;
}
static inline int
__acpi_release_global_lock (unsigned int *lock)
{
unsigned int old, new, val;
do {
old = *lock;
new = old & ~0x3;
val = cmpxchg4_locked(lock, new, old);
} while (unlikely (val != old));
return old & 0x1;
}
#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \
do { \
unsigned long dummy; \
asm("1: movl (%2),%%eax;" \
"movl %%eax,%%edx;" \
"andq %2,%%rdx;" \
"btsl $0x1,%%edx;" \
"adcl $0x0,%%edx;" \
"lock; cmpxchgl %%edx,(%1);" \
"jnz 1b;" \
"cmpb $0x3,%%dl;" \
"sbbl %%eax,%%eax" \
:"=a"(Acq),"=c"(dummy):"c"(GLptr),"i"(~1L):"dx"); \
} while(0)
((Acq) = __acpi_acquire_global_lock((unsigned int *) GLptr))
#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \
do { \
unsigned long dummy; \
asm("1: movl (%2),%%eax;" \
"movl %%eax,%%edx;" \
"andq %2,%%rdx;" \
"lock; cmpxchgl %%edx,(%1);" \
"jnz 1b;" \
"andl $0x1,%%eax" \
:"=a"(Acq),"=c"(dummy):"c"(GLptr),"i"(~3L):"dx"); \
} while(0)
((Acq) = __acpi_release_global_lock((unsigned int *) GLptr))
/*
* Math helper asm macros
......
......@@ -7,14 +7,6 @@
#include <linux/config.h>
/*
* These have to be done with inline assembly: that way the bit-setting
* is guaranteed to be atomic. All bit operations return 0 if the bit
* was cleared before the operation and != 0 if it was not.
*
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
#ifdef CONFIG_SMP
#define LOCK_PREFIX "lock ; "
#else
......@@ -363,26 +355,26 @@ static __inline__ int find_first_bit(const unsigned long * addr, unsigned size)
*/
static __inline__ int find_next_bit(const unsigned long * addr, int size, int offset)
{
unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
int set = 0, bit = offset & 31, res;
const unsigned long * p = addr + (offset >> 6);
unsigned long set = 0, bit = offset & 63, res;
if (bit) {
/*
* Look for nonzero in the first 32 bits:
* Look for nonzero in the first 64 bits:
*/
__asm__("bsfl %1,%0\n\t"
"cmovel %2,%0\n\t"
__asm__("bsfq %1,%0\n\t"
"cmoveq %2,%0\n\t"
: "=r" (set)
: "r" (*p >> bit), "r" (32));
if (set < (32 - bit))
: "r" (*p >> bit), "r" (64L));
if (set < (64 - bit))
return set + offset;
set = 32 - bit;
set = 64 - bit;
p++;
}
/*
* No set bit yet, search remaining full words for a bit
*/
res = find_first_bit ((const unsigned long *)p, size - 32 * (p - (unsigned int *) addr));
res = find_first_bit (p, size - 64 * (p - addr));
return (offset + set + res);
}
......
......@@ -8,7 +8,7 @@
#define R14 8
#define R13 16
#define R12 24
#define RBP 36
#define RBP 32
#define RBX 40
/* arguments: interrupts/non tracing syscalls only save upto here*/
#define R11 48
......
#include <asm-i386/cpu.h>
......@@ -72,7 +72,7 @@ struct hw_interrupt_type;
* levels. (0x80 is the syscall vector)
*/
#define FIRST_DEVICE_VECTOR 0x31
#define FIRST_SYSTEM_VECTOR 0xef
#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in irq.h */
#ifndef __ASSEMBLY__
......
......@@ -13,6 +13,46 @@
#ifdef CONFIG_X86_IO_APIC
#ifdef CONFIG_PCI_USE_VECTOR
static inline int use_pci_vector(void) {return 1;}
static inline void disable_edge_ioapic_vector(unsigned int vector) { }
static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { }
static inline void end_edge_ioapic_vector (unsigned int vector) { }
#define startup_level_ioapic startup_level_ioapic_vector
#define shutdown_level_ioapic mask_IO_APIC_vector
#define enable_level_ioapic unmask_IO_APIC_vector
#define disable_level_ioapic mask_IO_APIC_vector
#define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_vector
#define end_level_ioapic end_level_ioapic_vector
#define set_ioapic_affinity set_ioapic_affinity_vector
#define startup_edge_ioapic startup_edge_ioapic_vector
#define shutdown_edge_ioapic disable_edge_ioapic_vector
#define enable_edge_ioapic unmask_IO_APIC_vector
#define disable_edge_ioapic disable_edge_ioapic_vector
#define ack_edge_ioapic ack_edge_ioapic_vector
#define end_edge_ioapic end_edge_ioapic_vector
#else
static inline int use_pci_vector(void) {return 0;}
static inline void disable_edge_ioapic_irq(unsigned int irq) { }
static inline void mask_and_ack_level_ioapic_irq(unsigned int irq) { }
static inline void end_edge_ioapic_irq (unsigned int irq) { }
#define startup_level_ioapic startup_level_ioapic_irq
#define shutdown_level_ioapic mask_IO_APIC_irq
#define enable_level_ioapic unmask_IO_APIC_irq
#define disable_level_ioapic mask_IO_APIC_irq
#define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_irq
#define end_level_ioapic end_level_ioapic_irq
#define set_ioapic_affinity set_ioapic_affinity_irq
#define startup_edge_ioapic startup_edge_ioapic_irq
#define shutdown_edge_ioapic disable_edge_ioapic_irq
#define enable_edge_ioapic unmask_IO_APIC_irq
#define disable_edge_ioapic disable_edge_ioapic_irq
#define ack_edge_ioapic ack_edge_ioapic_irq
#define end_edge_ioapic end_edge_ioapic_irq
#endif
#define APIC_MISMATCH_DEBUG
#define IO_APIC_BASE(idx) \
......@@ -174,24 +214,7 @@ extern int sis_apic_bug; /* dummy */
#define io_apic_assign_pci_irqs 0
#endif
static inline int use_pci_vector(void) {return 0;}
static inline void disable_edge_ioapic_irq(unsigned int irq) { }
static inline void mask_and_ack_level_ioapic_irq(unsigned int irq) { }
static inline void end_edge_ioapic_irq (unsigned int irq) { }
#define startup_level_ioapic startup_level_ioapic_irq
#define shutdown_level_ioapic mask_IO_APIC_irq
#define enable_level_ioapic unmask_IO_APIC_irq
#define disable_level_ioapic mask_IO_APIC_irq
#define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_irq
#define end_level_ioapic end_level_ioapic_irq
#define set_ioapic_affinity set_ioapic_affinity_irq
#define startup_edge_ioapic startup_edge_ioapic_irq
#define shutdown_edge_ioapic disable_edge_ioapic_irq
#define enable_edge_ioapic unmask_IO_APIC_irq
#define disable_edge_ioapic disable_edge_ioapic_irq
#define ack_edge_ioapic ack_edge_ioapic_irq
#define end_edge_ioapic end_edge_ioapic_irq
extern int assign_irq_vector(int irq);
void enable_NMI_through_LVT0 (void * dummy);
......
......@@ -21,8 +21,23 @@
* Since vectors 0x00-0x1f are used/reserved for the CPU,
* the usable vector space is 0x20-0xff (224 vectors)
*/
/*
* The maximum number of vectors supported by x86_64 processors
* is limited to 256. For processors other than x86_64, NR_VECTORS
* should be changed accordingly.
*/
#define NR_VECTORS 256
#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */
#ifdef CONFIG_PCI_USE_VECTOR
#define NR_IRQS FIRST_SYSTEM_VECTOR
#define NR_IRQ_VECTORS NR_IRQS
#else
#define NR_IRQS 224
#define NR_IRQ_VECTORS NR_IRQS
#endif
static __inline__ int irq_canonicalize(int irq)
{
......@@ -32,6 +47,7 @@ static __inline__ int irq_canonicalize(int irq)
extern void disable_irq(unsigned int);
extern void disable_irq_nosync(unsigned int);
extern void enable_irq(unsigned int);
extern int can_request_irq(unsigned int, unsigned long flags);
#ifdef CONFIG_X86_LOCAL_APIC
#define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
......
#include <asm-i386/memblk.h>
#include <asm-i386/node.h>
......@@ -180,6 +180,8 @@ static inline void set_pml4(pml4_t *dst, pml4_t val)
(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
#define __PAGE_KERNEL_VSYSCALL \
(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define __PAGE_KERNEL_VSYSCALL_NOCACHE \
(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD)
#define __PAGE_KERNEL_LARGE \
(__PAGE_KERNEL | _PAGE_PSE)
......@@ -191,6 +193,7 @@ static inline void set_pml4(pml4_t *dst, pml4_t val)
#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
/* xwr */
#define __P000 PAGE_NONE
......
......@@ -74,7 +74,7 @@ extern __inline int hard_smp_processor_id(void)
return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
}
#define safe_smp_processor_id() (cpuid_ebx(1) >> 24)
#define safe_smp_processor_id() (disable_apic ? 0 : hard_smp_processor_id())
#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
#endif /* !ASSEMBLY */
......
......@@ -276,6 +276,13 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
(unsigned long)(n),sizeof(*(ptr))))
static inline __u32 cmpxchg4_locked(__u32 *ptr, __u32 old, __u32 new)
{
asm volatile("lock ; cmpxchgl %k1,%2" :
"=r" (new) : "0" (old), "m" (*(__u32 *)ptr) : "memory");
return new;
}
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
......@@ -314,7 +321,21 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
/* used in the idle loop; sti takes one instruction cycle to complete */
#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
/* Work around BIOS that don't have K8 Errata #93 fixed. */
#define safe_halt() \
asm volatile(" sti\n" \
"1: hlt\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: call idle_warning\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n\t" \
".align 8\n\t" \
".quad 1b,3b\n" \
".previous" ::: "memory")
#define irqs_disabled() \
({ \
unsigned long flags; \
......
......@@ -623,11 +623,11 @@ __syscall_return(type,__res); \
type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \
{ \
long __res; \
__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; movq %7,%%r9" __syscall \
__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; movq %7,%%r9 ; " __syscall \
: "=a" (__res) \
: "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
"d" ((long)(arg3)),"g" ((long)(arg4)),"g" ((long)(arg5), \
"g" ((long)(arg6),) : \
"d" ((long)(arg3)), "g" ((long)(arg4)), "g" ((long)(arg5)), \
"g" ((long)(arg6)) : \
__syscall_clobber,"r8","r10","r9" ); \
__syscall_return(type,__res); \
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment