Commit f5ad1a78 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6

* 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6:
  [PATCH] x86-64: Fix race in exit_idle
  [PATCH] x86-64: Fix vgetcpu when CONFIG_HOTPLUG_CPU is disabled
  [PATCH] x86: Add acpi_user_timer_override option for Asus boards
  [PATCH] x86-64: setup saved_max_pfn correctly (kdump)
  [PATCH] x86-64: Handle reserve_bootmem_generic beyond end_pfn
  [PATCH] x86-64: shorten the x86_64 boot setup GDT to what the comment says
  [PATCH] x86-64: Fix PTRACE_[SG]ET_THREAD_AREA regression with ia32 emulation.
  [PATCH] x86-64: Fix partial page check to ensure unusable memory is not being marked usable.
  Revert "[PATCH] MMCONFIG and new Intel motherboards"
parents 9a3a04ac 9446868b
...@@ -164,6 +164,10 @@ and is between 256 and 4096 characters. It is defined in the file ...@@ -164,6 +164,10 @@ and is between 256 and 4096 characters. It is defined in the file
acpi_skip_timer_override [HW,ACPI] acpi_skip_timer_override [HW,ACPI]
Recognize and ignore IRQ0/pin2 Interrupt Override. Recognize and ignore IRQ0/pin2 Interrupt Override.
For broken nForce2 BIOS resulting in XT-PIC timer. For broken nForce2 BIOS resulting in XT-PIC timer.
acpi_use_timer_override [HW,ACPI}
Use timer override. For some broken Nvidia NF5 boards
that require a timer override, but don't have
HPET
acpi_dbg_layer= [HW,ACPI] acpi_dbg_layer= [HW,ACPI]
Format: <int> Format: <int>
......
...@@ -82,6 +82,7 @@ EXPORT_SYMBOL(acpi_strict); ...@@ -82,6 +82,7 @@ EXPORT_SYMBOL(acpi_strict);
acpi_interrupt_flags acpi_sci_flags __initdata; acpi_interrupt_flags acpi_sci_flags __initdata;
int acpi_sci_override_gsi __initdata; int acpi_sci_override_gsi __initdata;
int acpi_skip_timer_override __initdata; int acpi_skip_timer_override __initdata;
int acpi_use_timer_override __initdata;
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
...@@ -1300,6 +1301,13 @@ static int __init parse_acpi_skip_timer_override(char *arg) ...@@ -1300,6 +1301,13 @@ static int __init parse_acpi_skip_timer_override(char *arg)
return 0; return 0;
} }
early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override); early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override);
static int __init parse_acpi_use_timer_override(char *arg)
{
acpi_use_timer_override = 1;
return 0;
}
early_param("acpi_use_timer_override", parse_acpi_use_timer_override);
#endif /* CONFIG_X86_IO_APIC */ #endif /* CONFIG_X86_IO_APIC */
static int __init setup_acpi_sci(char *s) static int __init setup_acpi_sci(char *s)
......
...@@ -27,11 +27,17 @@ static int __init check_bridge(int vendor, int device) ...@@ -27,11 +27,17 @@ static int __init check_bridge(int vendor, int device)
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
/* According to Nvidia all timer overrides are bogus unless HPET /* According to Nvidia all timer overrides are bogus unless HPET
is enabled. */ is enabled. */
if (vendor == PCI_VENDOR_ID_NVIDIA) { if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) {
nvidia_hpet_detected = 0; nvidia_hpet_detected = 0;
acpi_table_parse(ACPI_HPET, nvidia_hpet_check); acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
if (nvidia_hpet_detected == 0) { if (nvidia_hpet_detected == 0) {
acpi_skip_timer_override = 1; acpi_skip_timer_override = 1;
printk(KERN_INFO "Nvidia board "
"detected. Ignoring ACPI "
"timer override.\n");
printk(KERN_INFO "If you got timer trouble "
"try acpi_use_timer_override\n");
} }
} }
#endif #endif
......
...@@ -836,13 +836,12 @@ gdt: ...@@ -836,13 +836,12 @@ gdt:
.word 0x9200 # data read/write .word 0x9200 # data read/write
.word 0x00CF # granularity = 4096, 386 .word 0x00CF # granularity = 4096, 386
# (+5th nibble of limit) # (+5th nibble of limit)
gdt_end:
idt_48: idt_48:
.word 0 # idt limit = 0 .word 0 # idt limit = 0
.word 0, 0 # idt base = 0L .word 0, 0 # idt base = 0L
gdt_48: gdt_48:
.word 0x8000 # gdt limit=2048, .word gdt_end-gdt-1 # gdt limit
# 256 GDT entries
.word 0, 0 # gdt base (filled in later) .word 0, 0 # gdt base (filled in later)
# Include video setup & detection code # Include video setup & detection code
......
...@@ -244,6 +244,8 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) ...@@ -244,6 +244,8 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
case PTRACE_DETACH: case PTRACE_DETACH:
case PTRACE_SYSCALL: case PTRACE_SYSCALL:
case PTRACE_SETOPTIONS: case PTRACE_SETOPTIONS:
case PTRACE_SET_THREAD_AREA:
case PTRACE_GET_THREAD_AREA:
return sys_ptrace(request, pid, addr, data); return sys_ptrace(request, pid, addr, data);
default: default:
......
...@@ -278,7 +278,7 @@ e820_register_active_regions(int nid, unsigned long start_pfn, ...@@ -278,7 +278,7 @@ e820_register_active_regions(int nid, unsigned long start_pfn,
>> PAGE_SHIFT; >> PAGE_SHIFT;
/* Skip map entries smaller than a page */ /* Skip map entries smaller than a page */
if (ei_startpfn > ei_endpfn) if (ei_startpfn >= ei_endpfn)
continue; continue;
/* Check if end_pfn_map should be updated */ /* Check if end_pfn_map should be updated */
...@@ -594,7 +594,9 @@ static int __init parse_memmap_opt(char *p) ...@@ -594,7 +594,9 @@ static int __init parse_memmap_opt(char *p)
* size before original memory map is * size before original memory map is
* reset. * reset.
*/ */
e820_register_active_regions(0, 0, -1UL);
saved_max_pfn = e820_end_of_ram(); saved_max_pfn = e820_end_of_ram();
remove_all_active_ranges();
#endif #endif
end_pfn_map = 0; end_pfn_map = 0;
e820.nr_map = 0; e820.nr_map = 0;
......
...@@ -45,7 +45,13 @@ static void nvidia_bugs(void) ...@@ -45,7 +45,13 @@ static void nvidia_bugs(void)
/* /*
* All timer overrides on Nvidia are * All timer overrides on Nvidia are
* wrong unless HPET is enabled. * wrong unless HPET is enabled.
* Unfortunately that's not true on many Asus boards.
* We don't know yet how to detect this automatically, but
* at least allow a command line override.
*/ */
if (acpi_use_timer_override)
return;
nvidia_hpet_detected = 0; nvidia_hpet_detected = 0;
acpi_table_parse(ACPI_HPET, nvidia_hpet_check); acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
if (nvidia_hpet_detected == 0) { if (nvidia_hpet_detected == 0) {
...@@ -53,6 +59,8 @@ static void nvidia_bugs(void) ...@@ -53,6 +59,8 @@ static void nvidia_bugs(void)
printk(KERN_INFO "Nvidia board " printk(KERN_INFO "Nvidia board "
"detected. Ignoring ACPI " "detected. Ignoring ACPI "
"timer override.\n"); "timer override.\n");
printk(KERN_INFO "If you got timer trouble "
"try acpi_use_timer_override\n");
} }
#endif #endif
/* RED-PEN skip them on mptables too? */ /* RED-PEN skip them on mptables too? */
......
...@@ -88,9 +88,8 @@ void enter_idle(void) ...@@ -88,9 +88,8 @@ void enter_idle(void)
static void __exit_idle(void) static void __exit_idle(void)
{ {
if (read_pda(isidle) == 0) if (test_and_clear_bit_pda(0, isidle) == 0)
return; return;
write_pda(isidle, 0);
atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
} }
......
...@@ -376,9 +376,8 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info, ...@@ -376,9 +376,8 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
/* prevent preemption and reschedule on another processor */ /* prevent preemption and reschedule on another processor */
int me = get_cpu(); int me = get_cpu();
if (cpu == me) { if (cpu == me) {
WARN_ON(1);
put_cpu(); put_cpu();
return -EBUSY; return 0;
} }
spin_lock_bh(&call_lock); spin_lock_bh(&call_lock);
__smp_call_function_single(cpu, func, info, nonatomic, wait); __smp_call_function_single(cpu, func, info, nonatomic, wait);
......
...@@ -876,15 +876,6 @@ static struct irqaction irq0 = { ...@@ -876,15 +876,6 @@ static struct irqaction irq0 = {
timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL
}; };
static int __cpuinit
time_cpu_notifier(struct notifier_block *nb, unsigned long action, void *hcpu)
{
unsigned cpu = (unsigned long) hcpu;
if (action == CPU_ONLINE)
vsyscall_set_cpu(cpu);
return NOTIFY_DONE;
}
void __init time_init(void) void __init time_init(void)
{ {
if (nohpet) if (nohpet)
...@@ -925,8 +916,6 @@ void __init time_init(void) ...@@ -925,8 +916,6 @@ void __init time_init(void)
vxtime.last_tsc = get_cycles_sync(); vxtime.last_tsc = get_cycles_sync();
set_cyc2ns_scale(cpu_khz); set_cyc2ns_scale(cpu_khz);
setup_irq(0, &irq0); setup_irq(0, &irq0);
hotcpu_notifier(time_cpu_notifier, 0);
time_cpu_notifier(NULL, CPU_ONLINE, (void *)(long)smp_processor_id());
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
time_init_gtod(); time_init_gtod();
......
...@@ -27,6 +27,9 @@ ...@@ -27,6 +27,9 @@
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <linux/getcpu.h> #include <linux/getcpu.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/notifier.h>
#include <asm/vsyscall.h> #include <asm/vsyscall.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -243,32 +246,17 @@ static ctl_table kernel_root_table2[] = { ...@@ -243,32 +246,17 @@ static ctl_table kernel_root_table2[] = {
#endif #endif
static void __cpuinit write_rdtscp_cb(void *info) /* Assume __initcall executes before all user space. Hopefully kmod
{ doesn't violate that. We'll find out if it does. */
write_rdtscp_aux((unsigned long)info); static void __cpuinit vsyscall_set_cpu(int cpu)
}
void __cpuinit vsyscall_set_cpu(int cpu)
{ {
unsigned long *d; unsigned long *d;
unsigned long node = 0; unsigned long node = 0;
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
node = cpu_to_node[cpu]; node = cpu_to_node[cpu];
#endif #endif
if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) { if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP))
void *info = (void *)((node << 12) | cpu); write_rdtscp_aux((node << 12) | cpu);
/* Can happen on preemptive kernel */
if (get_cpu() == cpu)
write_rdtscp_cb(info);
#ifdef CONFIG_SMP
else {
/* the notifier is unfortunately not executed on the
target CPU */
smp_call_function_single(cpu,write_rdtscp_cb,info,0,1);
}
#endif
put_cpu();
}
/* Store cpu number in limit so that it can be loaded quickly /* Store cpu number in limit so that it can be loaded quickly
in user space in vgetcpu. in user space in vgetcpu.
...@@ -280,6 +268,21 @@ void __cpuinit vsyscall_set_cpu(int cpu) ...@@ -280,6 +268,21 @@ void __cpuinit vsyscall_set_cpu(int cpu)
*d |= (node >> 4) << 48; *d |= (node >> 4) << 48;
} }
static void __cpuinit cpu_vsyscall_init(void *arg)
{
/* preemption should be already off */
vsyscall_set_cpu(raw_smp_processor_id());
}
static int __cpuinit
cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
{
long cpu = (long)arg;
if (action == CPU_ONLINE)
smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
return NOTIFY_DONE;
}
static void __init map_vsyscall(void) static void __init map_vsyscall(void)
{ {
extern char __vsyscall_0; extern char __vsyscall_0;
...@@ -299,6 +302,8 @@ static int __init vsyscall_init(void) ...@@ -299,6 +302,8 @@ static int __init vsyscall_init(void)
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
register_sysctl_table(kernel_root_table2, 0); register_sysctl_table(kernel_root_table2, 0);
#endif #endif
on_each_cpu(cpu_vsyscall_init, NULL, 0, 1);
hotcpu_notifier(cpu_vsyscall_notifier, 0);
return 0; return 0;
} }
......
...@@ -655,9 +655,22 @@ void free_initrd_mem(unsigned long start, unsigned long end) ...@@ -655,9 +655,22 @@ void free_initrd_mem(unsigned long start, unsigned long end)
void __init reserve_bootmem_generic(unsigned long phys, unsigned len) void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
{ {
/* Should check here against the e820 map to avoid double free */
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
int nid = phys_to_nid(phys); int nid = phys_to_nid(phys);
#endif
unsigned long pfn = phys >> PAGE_SHIFT;
if (pfn >= end_pfn) {
/* This can happen with kdump kernels when accessing firmware
tables. */
if (pfn < end_pfn_map)
return;
printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
phys, len);
return;
}
/* Should check here against the e820 map to avoid double free */
#ifdef CONFIG_NUMA
reserve_bootmem_node(NODE_DATA(nid), phys, len); reserve_bootmem_node(NODE_DATA(nid), phys, len);
#else #else
reserve_bootmem(phys, len); reserve_bootmem(phys, len);
......
...@@ -163,37 +163,6 @@ static __init void unreachable_devices(void) ...@@ -163,37 +163,6 @@ static __init void unreachable_devices(void)
} }
} }
static __init void pci_mmcfg_insert_resources(void)
{
#define PCI_MMCFG_RESOURCE_NAME_LEN 19
int i;
struct resource *res;
char *names;
unsigned num_buses;
res = kcalloc(PCI_MMCFG_RESOURCE_NAME_LEN + sizeof(*res),
pci_mmcfg_config_num, GFP_KERNEL);
if (!res) {
printk(KERN_ERR "PCI: Unable to allocate MMCONFIG resources\n");
return;
}
names = (void *)&res[pci_mmcfg_config_num];
for (i = 0; i < pci_mmcfg_config_num; i++, res++) {
num_buses = pci_mmcfg_config[i].end_bus_number -
pci_mmcfg_config[i].start_bus_number + 1;
res->name = names;
snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN, "PCI MMCONFIG %u",
pci_mmcfg_config[i].pci_segment_group_number);
res->start = pci_mmcfg_config[i].base_address;
res->end = res->start + (num_buses << 20) - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
insert_resource(&iomem_resource, res);
names += PCI_MMCFG_RESOURCE_NAME_LEN;
}
}
void __init pci_mmcfg_init(int type) void __init pci_mmcfg_init(int type)
{ {
int i; int i;
...@@ -237,7 +206,6 @@ void __init pci_mmcfg_init(int type) ...@@ -237,7 +206,6 @@ void __init pci_mmcfg_init(int type)
} }
unreachable_devices(); unreachable_devices();
pci_mmcfg_insert_resources();
raw_pci_ops = &pci_mmcfg; raw_pci_ops = &pci_mmcfg;
pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF; pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
......
...@@ -132,6 +132,7 @@ extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); ...@@ -132,6 +132,7 @@ extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
#ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_X86_IO_APIC
extern int acpi_skip_timer_override; extern int acpi_skip_timer_override;
extern int acpi_use_timer_override;
#endif #endif
static inline void acpi_noirq_set(void) { acpi_noirq = 1; } static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
......
...@@ -163,6 +163,7 @@ extern u8 x86_acpiid_to_apicid[]; ...@@ -163,6 +163,7 @@ extern u8 x86_acpiid_to_apicid[];
#define ARCH_HAS_POWER_INIT 1 #define ARCH_HAS_POWER_INIT 1
extern int acpi_skip_timer_override; extern int acpi_skip_timer_override;
extern int acpi_use_timer_override;
#endif /*__KERNEL__*/ #endif /*__KERNEL__*/
......
...@@ -109,6 +109,15 @@ extern struct x8664_pda _proxy_pda; ...@@ -109,6 +109,15 @@ extern struct x8664_pda _proxy_pda;
#define sub_pda(field,val) pda_to_op("sub",field,val) #define sub_pda(field,val) pda_to_op("sub",field,val)
#define or_pda(field,val) pda_to_op("or",field,val) #define or_pda(field,val) pda_to_op("or",field,val)
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
#define test_and_clear_bit_pda(bit,field) ({ \
int old__; \
asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
: "=r" (old__), "+m" (_proxy_pda.field) \
: "dIr" (bit), "i" (pda_offset(field)) : "memory"); \
old__; \
})
#endif #endif
#define PDA_STACKOFFSET (5*8) #define PDA_STACKOFFSET (5*8)
......
...@@ -59,8 +59,6 @@ extern seqlock_t xtime_lock; ...@@ -59,8 +59,6 @@ extern seqlock_t xtime_lock;
extern int sysctl_vsyscall; extern int sysctl_vsyscall;
extern void vsyscall_set_cpu(int cpu);
#define ARCH_HAVE_XTIME_LOCK 1 #define ARCH_HAVE_XTIME_LOCK 1
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment