Commit 9301975e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'genirq-v28-for-linus' of...

Merge branch 'genirq-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

This merges branches irq/genirq, irq/sparseirq-v4, timers/hpet-percpu
and x86/uv.

The sparseirq branch is just preliminary groundwork: no sparse IRQs are
actually implemented by this tree anymore - just the new APIs are added
while keeping the old way intact as well (the new APIs map 1:1 to
irq_desc[]).  The 'real' sparse IRQ support will then be a relatively
small patch ontop of this - with a v2.6.29 merge target.

* 'genirq-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (178 commits)
  genirq: improve include files
  intr_remapping: fix typo
  io_apic: make irq_mis_count available on 64-bit too
  genirq: fix name space collisions of nr_irqs in arch/*
  genirq: fix name space collision of nr_irqs in autoprobe.c
  genirq: use iterators for irq_desc loops
  proc: fixup irq iterator
  genirq: add reverse iterator for irq_desc
  x86: move ack_bad_irq() to irq.c
  x86: unify show_interrupts() and proc helpers
  x86: cleanup show_interrupts
  genirq: cleanup the sparseirq modifications
  genirq: remove artifacts from sparseirq removal
  genirq: revert dynarray
  genirq: remove irq_to_desc_alloc
  genirq: remove sparse irq code
  genirq: use inline function for irq_to_desc
  genirq: consolidate nr_irqs and for_each_irq_desc()
  x86: remove sparse irq from Kconfig
  genirq: define nr_irqs for architectures with GENERIC_HARDIRQS=n
  ...
parents 7110879c dd3a1db9
...@@ -47,7 +47,7 @@ typedef struct irq_swizzle_struct ...@@ -47,7 +47,7 @@ typedef struct irq_swizzle_struct
static irq_swizzle_t *sable_lynx_irq_swizzle; static irq_swizzle_t *sable_lynx_irq_swizzle;
static void sable_lynx_init_irq(int nr_irqs); static void sable_lynx_init_irq(int nr_of_irqs);
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE) #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE)
...@@ -530,11 +530,11 @@ sable_lynx_srm_device_interrupt(unsigned long vector) ...@@ -530,11 +530,11 @@ sable_lynx_srm_device_interrupt(unsigned long vector)
} }
static void __init static void __init
sable_lynx_init_irq(int nr_irqs) sable_lynx_init_irq(int nr_of_irqs)
{ {
long i; long i;
for (i = 0; i < nr_irqs; ++i) { for (i = 0; i < nr_of_irqs; ++i) {
irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL; irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
irq_desc[i].chip = &sable_lynx_irq_type; irq_desc[i].chip = &sable_lynx_irq_type;
} }
......
...@@ -143,7 +143,7 @@ static struct irq_chip ixdp2x00_cpld_irq_chip = { ...@@ -143,7 +143,7 @@ static struct irq_chip ixdp2x00_cpld_irq_chip = {
.unmask = ixdp2x00_irq_unmask .unmask = ixdp2x00_irq_unmask
}; };
void __init ixdp2x00_init_irq(volatile unsigned long *stat_reg, volatile unsigned long *mask_reg, unsigned long nr_irqs) void __init ixdp2x00_init_irq(volatile unsigned long *stat_reg, volatile unsigned long *mask_reg, unsigned long nr_of_irqs)
{ {
unsigned int irq; unsigned int irq;
...@@ -154,7 +154,7 @@ void __init ixdp2x00_init_irq(volatile unsigned long *stat_reg, volatile unsigne ...@@ -154,7 +154,7 @@ void __init ixdp2x00_init_irq(volatile unsigned long *stat_reg, volatile unsigne
board_irq_stat = stat_reg; board_irq_stat = stat_reg;
board_irq_mask = mask_reg; board_irq_mask = mask_reg;
board_irq_count = nr_irqs; board_irq_count = nr_of_irqs;
*board_irq_mask = 0xffffffff; *board_irq_mask = 0xffffffff;
......
...@@ -119,7 +119,7 @@ static void __init omap_irq_bank_init_one(struct omap_irq_bank *bank) ...@@ -119,7 +119,7 @@ static void __init omap_irq_bank_init_one(struct omap_irq_bank *bank)
void __init omap_init_irq(void) void __init omap_init_irq(void)
{ {
unsigned long nr_irqs = 0; unsigned long nr_of_irqs = 0;
unsigned int nr_banks = 0; unsigned int nr_banks = 0;
int i; int i;
...@@ -133,14 +133,14 @@ void __init omap_init_irq(void) ...@@ -133,14 +133,14 @@ void __init omap_init_irq(void)
omap_irq_bank_init_one(bank); omap_irq_bank_init_one(bank);
nr_irqs += bank->nr_irqs; nr_of_irqs += bank->nr_irqs;
nr_banks++; nr_banks++;
} }
printk(KERN_INFO "Total of %ld interrupts on %d active controller%s\n", printk(KERN_INFO "Total of %ld interrupts on %d active controller%s\n",
nr_irqs, nr_banks, nr_banks > 1 ? "s" : ""); nr_of_irqs, nr_banks, nr_banks > 1 ? "s" : "");
for (i = 0; i < nr_irqs; i++) { for (i = 0; i < nr_of_irqs; i++) {
set_irq_chip(i, &omap_irq_chip); set_irq_chip(i, &omap_irq_chip);
set_irq_handler(i, handle_level_irq); set_irq_handler(i, handle_level_irq);
set_irq_flags(i, IRQF_VALID); set_irq_flags(i, IRQF_VALID);
......
...@@ -191,7 +191,7 @@ static int __init eic_probe(struct platform_device *pdev) ...@@ -191,7 +191,7 @@ static int __init eic_probe(struct platform_device *pdev)
struct eic *eic; struct eic *eic;
struct resource *regs; struct resource *regs;
unsigned int i; unsigned int i;
unsigned int nr_irqs; unsigned int nr_of_irqs;
unsigned int int_irq; unsigned int int_irq;
int ret; int ret;
u32 pattern; u32 pattern;
...@@ -224,7 +224,7 @@ static int __init eic_probe(struct platform_device *pdev) ...@@ -224,7 +224,7 @@ static int __init eic_probe(struct platform_device *pdev)
eic_writel(eic, IDR, ~0UL); eic_writel(eic, IDR, ~0UL);
eic_writel(eic, MODE, ~0UL); eic_writel(eic, MODE, ~0UL);
pattern = eic_readl(eic, MODE); pattern = eic_readl(eic, MODE);
nr_irqs = fls(pattern); nr_of_irqs = fls(pattern);
/* Trigger on low level unless overridden by driver */ /* Trigger on low level unless overridden by driver */
eic_writel(eic, EDGE, 0UL); eic_writel(eic, EDGE, 0UL);
...@@ -232,7 +232,7 @@ static int __init eic_probe(struct platform_device *pdev) ...@@ -232,7 +232,7 @@ static int __init eic_probe(struct platform_device *pdev)
eic->chip = &eic_chip; eic->chip = &eic_chip;
for (i = 0; i < nr_irqs; i++) { for (i = 0; i < nr_of_irqs; i++) {
set_irq_chip_and_handler(eic->first_irq + i, &eic_chip, set_irq_chip_and_handler(eic->first_irq + i, &eic_chip,
handle_level_irq); handle_level_irq);
set_irq_chip_data(eic->first_irq + i, eic); set_irq_chip_data(eic->first_irq + i, eic);
...@@ -256,7 +256,7 @@ static int __init eic_probe(struct platform_device *pdev) ...@@ -256,7 +256,7 @@ static int __init eic_probe(struct platform_device *pdev)
eic->regs, int_irq); eic->regs, int_irq);
dev_info(&pdev->dev, dev_info(&pdev->dev,
"Handling %u external IRQs, starting with IRQ %u\n", "Handling %u external IRQs, starting with IRQ %u\n",
nr_irqs, eic->first_irq); nr_of_irqs, eic->first_irq);
return 0; return 0;
......
...@@ -1242,14 +1242,6 @@ config EFI ...@@ -1242,14 +1242,6 @@ config EFI
resultant kernel should continue to boot on existing non-EFI resultant kernel should continue to boot on existing non-EFI
platforms. platforms.
config IRQBALANCE
def_bool y
prompt "Enable kernel irq balancing"
depends on X86_32 && SMP && X86_IO_APIC
help
The default yes will allow the kernel to do irq load balancing.
Saying no will keep the kernel from doing irq load balancing.
config SECCOMP config SECCOMP
def_bool y def_bool y
prompt "Enable seccomp to safely compute untrusted bytecode" prompt "Enable seccomp to safely compute untrusted bytecode"
......
...@@ -287,7 +287,6 @@ CONFIG_MTRR=y ...@@ -287,7 +287,6 @@ CONFIG_MTRR=y
# CONFIG_MTRR_SANITIZER is not set # CONFIG_MTRR_SANITIZER is not set
CONFIG_X86_PAT=y CONFIG_X86_PAT=y
CONFIG_EFI=y CONFIG_EFI=y
# CONFIG_IRQBALANCE is not set
CONFIG_SECCOMP=y CONFIG_SECCOMP=y
# CONFIG_HZ_100 is not set # CONFIG_HZ_100 is not set
# CONFIG_HZ_250 is not set # CONFIG_HZ_250 is not set
......
...@@ -23,7 +23,7 @@ CFLAGS_hpet.o := $(nostackp) ...@@ -23,7 +23,7 @@ CFLAGS_hpet.o := $(nostackp)
CFLAGS_tsc.o := $(nostackp) CFLAGS_tsc.o := $(nostackp)
obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o
obj-y += traps.o irq_$(BITS).o dumpstack_$(BITS).o obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
obj-y += time_$(BITS).o ioport.o ldt.o obj-y += time_$(BITS).o ioport.o ldt.o
obj-y += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o obj-y += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o
obj-$(CONFIG_X86_VISWS) += visws_quirks.o obj-$(CONFIG_X86_VISWS) += visws_quirks.o
...@@ -60,8 +60,8 @@ obj-$(CONFIG_X86_32_SMP) += smpcommon.o ...@@ -60,8 +60,8 @@ obj-$(CONFIG_X86_32_SMP) += smpcommon.o
obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o
obj-$(CONFIG_X86_MPPARSE) += mpparse.o obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi.o obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
obj-$(CONFIG_X86_IO_APIC) += io_apic_$(BITS).o obj-$(CONFIG_X86_IO_APIC) += io_apic.o
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
...@@ -108,7 +108,7 @@ obj-$(CONFIG_MICROCODE) += microcode.o ...@@ -108,7 +108,7 @@ obj-$(CONFIG_MICROCODE) += microcode.o
# 64 bit specific files # 64 bit specific files
ifeq ($(CONFIG_X86_64),y) ifeq ($(CONFIG_X86_64),y)
obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o
obj-y += bios_uv.o obj-y += bios_uv.o uv_irq.o uv_sysfs.o
obj-y += genx2apic_cluster.o obj-y += genx2apic_cluster.o
obj-y += genx2apic_phys.o obj-y += genx2apic_phys.o
obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
......
...@@ -1256,7 +1256,7 @@ static int __init acpi_parse_madt_ioapic_entries(void) ...@@ -1256,7 +1256,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
count = count =
acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr,
NR_IRQ_VECTORS); nr_irqs);
if (count < 0) { if (count < 0) {
printk(KERN_ERR PREFIX printk(KERN_ERR PREFIX
"Error parsing interrupt source overrides entry\n"); "Error parsing interrupt source overrides entry\n");
...@@ -1276,7 +1276,7 @@ static int __init acpi_parse_madt_ioapic_entries(void) ...@@ -1276,7 +1276,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
count = count =
acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src,
NR_IRQ_VECTORS); nr_irqs);
if (count < 0) { if (count < 0) {
printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
/* TBD: Cleanup to allow fallback to MPS */ /* TBD: Cleanup to allow fallback to MPS */
......
This diff is collapsed.
/* /*
* BIOS run time interface routines. * BIOS run time interface routines.
* *
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or * the Free Software Foundation; either version 2 of the License, or
...@@ -16,33 +14,128 @@ ...@@ -16,33 +14,128 @@
* You should have received a copy of the GNU General Public License * You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) Russ Anderson
*/ */
#include <linux/efi.h>
#include <asm/efi.h>
#include <linux/io.h>
#include <asm/uv/bios.h> #include <asm/uv/bios.h>
#include <asm/uv/uv_hub.h>
struct uv_systab uv_systab;
const char * s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
x86_bios_strerror(long status)
{ {
const char *str; struct uv_systab *tab = &uv_systab;
switch (status) {
case 0: str = "Call completed without error"; break; if (!tab->function)
case -1: str = "Not implemented"; break; /*
case -2: str = "Invalid argument"; break; * BIOS does not support UV systab
case -3: str = "Call completed with error"; break; */
default: str = "Unknown BIOS status code"; break; return BIOS_STATUS_UNIMPLEMENTED;
}
return str; return efi_call6((void *)__va(tab->function),
(u64)which, a1, a2, a3, a4, a5);
} }
long s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
x86_bios_freq_base(unsigned long which, unsigned long *ticks_per_second, u64 a4, u64 a5)
unsigned long *drift_info)
{ {
struct uv_bios_retval isrv; unsigned long bios_flags;
s64 ret;
BIOS_CALL(isrv, BIOS_FREQ_BASE, which, 0, 0, 0, 0, 0, 0); local_irq_save(bios_flags);
*ticks_per_second = isrv.v0; ret = uv_bios_call(which, a1, a2, a3, a4, a5);
*drift_info = isrv.v1; local_irq_restore(bios_flags);
return isrv.status;
return ret;
} }
EXPORT_SYMBOL_GPL(x86_bios_freq_base);
s64 uv_bios_call_reentrant(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
u64 a4, u64 a5)
{
s64 ret;
preempt_disable();
ret = uv_bios_call(which, a1, a2, a3, a4, a5);
preempt_enable();
return ret;
}
long sn_partition_id;
EXPORT_SYMBOL_GPL(sn_partition_id);
long uv_coherency_id;
EXPORT_SYMBOL_GPL(uv_coherency_id);
long uv_region_size;
EXPORT_SYMBOL_GPL(uv_region_size);
int uv_type;
s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
long *region)
{
s64 ret;
u64 v0, v1;
union partition_info_u part;
ret = uv_bios_call_irqsave(UV_BIOS_GET_SN_INFO, fc,
(u64)(&v0), (u64)(&v1), 0, 0);
if (ret != BIOS_STATUS_SUCCESS)
return ret;
part.val = v0;
if (uvtype)
*uvtype = part.hub_version;
if (partid)
*partid = part.partition_id;
if (coher)
*coher = part.coherence_id;
if (region)
*region = part.region_size;
return ret;
}
s64 uv_bios_freq_base(u64 clock_type, u64 *ticks_per_second)
{
return uv_bios_call(UV_BIOS_FREQ_BASE, clock_type,
(u64)ticks_per_second, 0, 0, 0);
}
EXPORT_SYMBOL_GPL(uv_bios_freq_base);
#ifdef CONFIG_EFI
void uv_bios_init(void)
{
struct uv_systab *tab;
if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) ||
(efi.uv_systab == (unsigned long)NULL)) {
printk(KERN_CRIT "No EFI UV System Table.\n");
uv_systab.function = (unsigned long)NULL;
return;
}
tab = (struct uv_systab *)ioremap(efi.uv_systab,
sizeof(struct uv_systab));
if (strncmp(tab->signature, "UVST", 4) != 0)
printk(KERN_ERR "bad signature in UV system table!");
/*
* Copy table to permanent spot for later use.
*/
memcpy(&uv_systab, tab, sizeof(struct uv_systab));
iounmap(tab);
printk(KERN_INFO "EFI UV System Table Revision %d\n", tab->revision);
}
#else /* !CONFIG_EFI */
void uv_bios_init(void) { }
#endif
...@@ -249,7 +249,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) ...@@ -249,7 +249,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
} }
numa_set_node(cpu, node); numa_set_node(cpu, node);
printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node);
#endif #endif
} }
......
...@@ -174,7 +174,7 @@ static void __cpuinit srat_detect_node(void) ...@@ -174,7 +174,7 @@ static void __cpuinit srat_detect_node(void)
node = first_node(node_online_map); node = first_node(node_online_map);
numa_set_node(cpu, node); numa_set_node(cpu, node);
printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node);
#endif #endif
} }
......
...@@ -366,6 +366,10 @@ void __init efi_init(void) ...@@ -366,6 +366,10 @@ void __init efi_init(void)
SMBIOS_TABLE_GUID)) { SMBIOS_TABLE_GUID)) {
efi.smbios = config_tables[i].table; efi.smbios = config_tables[i].table;
printk(" SMBIOS=0x%lx ", config_tables[i].table); printk(" SMBIOS=0x%lx ", config_tables[i].table);
} else if (!efi_guidcmp(config_tables[i].guid,
UV_SYSTEM_TABLE_GUID)) {
efi.uv_systab = config_tables[i].table;
printk(" UVsystab=0x%lx ", config_tables[i].table);
} else if (!efi_guidcmp(config_tables[i].guid, } else if (!efi_guidcmp(config_tables[i].guid,
HCDP_TABLE_GUID)) { HCDP_TABLE_GUID)) {
efi.hcdp = config_tables[i].table; efi.hcdp = config_tables[i].table;
......
...@@ -629,7 +629,7 @@ ENTRY(interrupt) ...@@ -629,7 +629,7 @@ ENTRY(interrupt)
ENTRY(irq_entries_start) ENTRY(irq_entries_start)
RING0_INT_FRAME RING0_INT_FRAME
vector=0 vector=0
.rept NR_IRQS .rept NR_VECTORS
ALIGN ALIGN
.if vector .if vector
CFI_ADJUST_CFA_OFFSET -4 CFI_ADJUST_CFA_OFFSET -4
......
...@@ -179,8 +179,10 @@ static int __init physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) ...@@ -179,8 +179,10 @@ static int __init physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
* is an example). * is an example).
*/ */
if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID && if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID &&
(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
printk(KERN_DEBUG "system APIC only can use physical flat");
return 1; return 1;
}
#endif #endif
return 0; return 0;
......
...@@ -341,12 +341,12 @@ static __init void map_mmioh_high(int max_pnode) ...@@ -341,12 +341,12 @@ static __init void map_mmioh_high(int max_pnode)
static __init void uv_rtc_init(void) static __init void uv_rtc_init(void)
{ {
long status, ticks_per_sec, drift; long status;
u64 ticks_per_sec;
status = status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK,
x86_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec, &ticks_per_sec);
&drift); if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) {
if (status != 0 || ticks_per_sec < 100000) {
printk(KERN_WARNING printk(KERN_WARNING
"unable to determine platform RTC clock frequency, " "unable to determine platform RTC clock frequency, "
"guessing.\n"); "guessing.\n");
...@@ -356,7 +356,22 @@ static __init void uv_rtc_init(void) ...@@ -356,7 +356,22 @@ static __init void uv_rtc_init(void)
sn_rtc_cycles_per_second = ticks_per_sec; sn_rtc_cycles_per_second = ticks_per_sec;
} }
static bool uv_system_inited; /*
* Called on each cpu to initialize the per_cpu UV data area.
* ZZZ hotplug not supported yet
*/
void __cpuinit uv_cpu_init(void)
{
/* CPU 0 initilization will be done via uv_system_init. */
if (!uv_blade_info)
return;
uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
set_x2apic_extra_bits(uv_hub_info->pnode);
}
void __init uv_system_init(void) void __init uv_system_init(void)
{ {
...@@ -412,6 +427,9 @@ void __init uv_system_init(void) ...@@ -412,6 +427,9 @@ void __init uv_system_init(void)
gnode_upper = (((unsigned long)node_id.s.node_id) & gnode_upper = (((unsigned long)node_id.s.node_id) &
~((1 << n_val) - 1)) << m_val; ~((1 << n_val) - 1)) << m_val;
uv_bios_init();
uv_bios_get_sn_info(0, &uv_type, &sn_partition_id,
&uv_coherency_id, &uv_region_size);
uv_rtc_init(); uv_rtc_init();
for_each_present_cpu(cpu) { for_each_present_cpu(cpu) {
...@@ -433,7 +451,7 @@ void __init uv_system_init(void) ...@@ -433,7 +451,7 @@ void __init uv_system_init(void)
uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */ uv_cpu_hub_info(cpu)->coherency_domain_number = uv_coherency_id;
uv_node_to_blade[nid] = blade; uv_node_to_blade[nid] = blade;
uv_cpu_to_blade[cpu] = blade; uv_cpu_to_blade[cpu] = blade;
max_pnode = max(pnode, max_pnode); max_pnode = max(pnode, max_pnode);
...@@ -448,21 +466,6 @@ void __init uv_system_init(void) ...@@ -448,21 +466,6 @@ void __init uv_system_init(void)
map_mmr_high(max_pnode); map_mmr_high(max_pnode);
map_config_high(max_pnode); map_config_high(max_pnode);
map_mmioh_high(max_pnode); map_mmioh_high(max_pnode);
uv_system_inited = true;
}
/* uv_cpu_init();
* Called on each cpu to initialize the per_cpu UV data area.
* ZZZ hotplug not supported yet
*/
void __cpuinit uv_cpu_init(void)
{
BUG_ON(!uv_system_inited);
uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
set_x2apic_extra_bits(uv_hub_info->pnode);
} }
This diff is collapsed.
This diff is collapsed.
/*
* Common interrupt code for 32 and 64 bit
*/
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/smp.h>
atomic_t irq_err_count;
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
*/
void ack_bad_irq(unsigned int irq)
{
printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
#ifdef CONFIG_X86_LOCAL_APIC
/*
* Currently unexpected vectors happen only on SMP and APIC.
* We _must_ ack these because every local APIC has only N
* irq slots per priority level, and a 'hanging, unacked' IRQ
* holds up an irq slot - in excessive cases (when multiple
* unexpected vectors occur) that might lock up the APIC
* completely.
* But only ack when the APIC is enabled -AK
*/
if (cpu_has_apic)
ack_APIC_irq();
#endif
}
#ifdef CONFIG_X86_32
# define irq_stats(x) (&per_cpu(irq_stat,x))
#else
# define irq_stats(x) cpu_pda(x)
#endif
/*
* /proc/interrupts printing:
*/
static int show_other_interrupts(struct seq_file *p)
{
int j;
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
seq_printf(p, " Non-maskable interrupts\n");
#ifdef CONFIG_X86_LOCAL_APIC
seq_printf(p, "LOC: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
seq_printf(p, " Local timer interrupts\n");
#endif
#ifdef CONFIG_SMP
seq_printf(p, "RES: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
seq_printf(p, " Rescheduling interrupts\n");
seq_printf(p, "CAL: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
seq_printf(p, " Function call interrupts\n");
seq_printf(p, "TLB: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
seq_printf(p, " TLB shootdowns\n");
#endif
#ifdef CONFIG_X86_MCE
seq_printf(p, "TRM: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
seq_printf(p, " Thermal event interrupts\n");
# ifdef CONFIG_X86_64
seq_printf(p, "THR: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
seq_printf(p, " Threshold APIC interrupts\n");
# endif
#endif
#ifdef CONFIG_X86_LOCAL_APIC
seq_printf(p, "SPU: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
seq_printf(p, " Spurious interrupts\n");
#endif
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
#if defined(CONFIG_X86_IO_APIC)
seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
#endif
return 0;
}
int show_interrupts(struct seq_file *p, void *v)
{
unsigned long flags, any_count = 0;
int i = *(loff_t *) v, j;
struct irqaction *action;
struct irq_desc *desc;
if (i > nr_irqs)
return 0;
if (i == nr_irqs)
return show_other_interrupts(p);
/* print header */
if (i == 0) {
seq_printf(p, " ");
for_each_online_cpu(j)
seq_printf(p, "CPU%-8d",j);
seq_putc(p, '\n');
}
desc = irq_to_desc(i);
spin_lock_irqsave(&desc->lock, flags);
#ifndef CONFIG_SMP
any_count = kstat_irqs(i);
#else
for_each_online_cpu(j)
any_count |= kstat_irqs_cpu(i, j);
#endif
action = desc->action;
if (!action && !any_count)
goto out;
seq_printf(p, "%3d: ", i);
#ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i));
#else
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif
seq_printf(p, " %8s", desc->chip->name);
seq_printf(p, "-%-8s", desc->name);
if (action) {
seq_printf(p, " %s", action->name);
while ((action = action->next) != NULL)
seq_printf(p, ", %s", action->name);
}
seq_putc(p, '\n');
out:
spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
/*
* /proc/stat helpers
*/
u64 arch_irq_stat_cpu(unsigned int cpu)
{
u64 sum = irq_stats(cpu)->__nmi_count;
#ifdef CONFIG_X86_LOCAL_APIC
sum += irq_stats(cpu)->apic_timer_irqs;
#endif
#ifdef CONFIG_SMP
sum += irq_stats(cpu)->irq_resched_count;
sum += irq_stats(cpu)->irq_call_count;
sum += irq_stats(cpu)->irq_tlb_count;
#endif
#ifdef CONFIG_X86_MCE
sum += irq_stats(cpu)->irq_thermal_count;
# ifdef CONFIG_X86_64
sum += irq_stats(cpu)->irq_threshold_count;
#endif
#endif
#ifdef CONFIG_X86_LOCAL_APIC
sum += irq_stats(cpu)->irq_spurious_count;
#endif
return sum;
}
u64 arch_irq_stat(void)
{
u64 sum = atomic_read(&irq_err_count);
#ifdef CONFIG_X86_IO_APIC
sum += atomic_read(&irq_mis_count);
#endif
return sum;
}
...@@ -25,29 +25,6 @@ EXPORT_PER_CPU_SYMBOL(irq_stat); ...@@ -25,29 +25,6 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
DEFINE_PER_CPU(struct pt_regs *, irq_regs); DEFINE_PER_CPU(struct pt_regs *, irq_regs);
EXPORT_PER_CPU_SYMBOL(irq_regs); EXPORT_PER_CPU_SYMBOL(irq_regs);
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
*/
void ack_bad_irq(unsigned int irq)
{
printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
#ifdef CONFIG_X86_LOCAL_APIC
/*
* Currently unexpected vectors happen only on SMP and APIC.
* We _must_ ack these because every local APIC has only N
* irq slots per priority level, and a 'hanging, unacked' IRQ
* holds up an irq slot - in excessive cases (when multiple
* unexpected vectors occur) that might lock up the APIC
* completely.
* But only ack when the APIC is enabled -AK
*/
if (cpu_has_apic)
ack_APIC_irq();
#endif
}
#ifdef CONFIG_DEBUG_STACKOVERFLOW #ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */ /* Debugging check for stack overflow: is there less than 1KB free? */
static int check_stack_overflow(void) static int check_stack_overflow(void)
...@@ -223,20 +200,25 @@ unsigned int do_IRQ(struct pt_regs *regs) ...@@ -223,20 +200,25 @@ unsigned int do_IRQ(struct pt_regs *regs)
{ {
struct pt_regs *old_regs; struct pt_regs *old_regs;
/* high bit used in ret_from_ code */ /* high bit used in ret_from_ code */
int overflow, irq = ~regs->orig_ax; int overflow;
struct irq_desc *desc = irq_desc + irq; unsigned vector = ~regs->orig_ax;
struct irq_desc *desc;
unsigned irq;
if (unlikely((unsigned)irq >= NR_IRQS)) {
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
__func__, irq);
BUG();
}
old_regs = set_irq_regs(regs); old_regs = set_irq_regs(regs);
irq_enter(); irq_enter();
irq = __get_cpu_var(vector_irq)[vector];
overflow = check_stack_overflow(); overflow = check_stack_overflow();
desc = irq_to_desc(irq);
if (unlikely(!desc)) {
printk(KERN_EMERG "%s: cannot handle IRQ %d vector %#x cpu %d\n",
__func__, irq, vector, smp_processor_id());
BUG();
}
if (!execute_on_irq_stack(overflow, desc, irq)) { if (!execute_on_irq_stack(overflow, desc, irq)) {
if (unlikely(overflow)) if (unlikely(overflow))
print_stack_overflow(); print_stack_overflow();
...@@ -248,146 +230,6 @@ unsigned int do_IRQ(struct pt_regs *regs) ...@@ -248,146 +230,6 @@ unsigned int do_IRQ(struct pt_regs *regs)
return 1; return 1;
} }
/*
* Interrupt statistics:
*/
atomic_t irq_err_count;
/*
* /proc/interrupts printing:
*/
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
struct irqaction * action;
unsigned long flags;
if (i == 0) {
seq_printf(p, " ");
for_each_online_cpu(j)
seq_printf(p, "CPU%-8d",j);
seq_putc(p, '\n');
}
if (i < NR_IRQS) {
unsigned any_count = 0;
spin_lock_irqsave(&irq_desc[i].lock, flags);
#ifndef CONFIG_SMP
any_count = kstat_irqs(i);
#else
for_each_online_cpu(j)
any_count |= kstat_cpu(j).irqs[i];
#endif
action = irq_desc[i].action;
if (!action && !any_count)
goto skip;
seq_printf(p, "%3d: ",i);
#ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i));
#else
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
seq_printf(p, " %8s", irq_desc[i].chip->name);
seq_printf(p, "-%-8s", irq_desc[i].name);
if (action) {
seq_printf(p, " %s", action->name);
while ((action = action->next) != NULL)
seq_printf(p, ", %s", action->name);
}
seq_putc(p, '\n');
skip:
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", nmi_count(j));
seq_printf(p, " Non-maskable interrupts\n");
#ifdef CONFIG_X86_LOCAL_APIC
seq_printf(p, "LOC: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ",
per_cpu(irq_stat,j).apic_timer_irqs);
seq_printf(p, " Local timer interrupts\n");
#endif
#ifdef CONFIG_SMP
seq_printf(p, "RES: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ",
per_cpu(irq_stat,j).irq_resched_count);
seq_printf(p, " Rescheduling interrupts\n");
seq_printf(p, "CAL: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ",
per_cpu(irq_stat,j).irq_call_count);
seq_printf(p, " Function call interrupts\n");
seq_printf(p, "TLB: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ",
per_cpu(irq_stat,j).irq_tlb_count);
seq_printf(p, " TLB shootdowns\n");
#endif
#ifdef CONFIG_X86_MCE
seq_printf(p, "TRM: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ",
per_cpu(irq_stat,j).irq_thermal_count);
seq_printf(p, " Thermal event interrupts\n");
#endif
#ifdef CONFIG_X86_LOCAL_APIC
seq_printf(p, "SPU: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ",
per_cpu(irq_stat,j).irq_spurious_count);
seq_printf(p, " Spurious interrupts\n");
#endif
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
#if defined(CONFIG_X86_IO_APIC)
seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
#endif
}
return 0;
}
/*
* /proc/stat helpers
*/
u64 arch_irq_stat_cpu(unsigned int cpu)
{
u64 sum = nmi_count(cpu);
#ifdef CONFIG_X86_LOCAL_APIC
sum += per_cpu(irq_stat, cpu).apic_timer_irqs;
#endif
#ifdef CONFIG_SMP
sum += per_cpu(irq_stat, cpu).irq_resched_count;
sum += per_cpu(irq_stat, cpu).irq_call_count;
sum += per_cpu(irq_stat, cpu).irq_tlb_count;
#endif
#ifdef CONFIG_X86_MCE
sum += per_cpu(irq_stat, cpu).irq_thermal_count;
#endif
#ifdef CONFIG_X86_LOCAL_APIC
sum += per_cpu(irq_stat, cpu).irq_spurious_count;
#endif
return sum;
}
u64 arch_irq_stat(void)
{
u64 sum = atomic_read(&irq_err_count);
#ifdef CONFIG_X86_IO_APIC
sum += atomic_read(&irq_mis_count);
#endif
return sum;
}
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
#include <mach_apic.h> #include <mach_apic.h>
...@@ -395,20 +237,22 @@ void fixup_irqs(cpumask_t map) ...@@ -395,20 +237,22 @@ void fixup_irqs(cpumask_t map)
{ {
unsigned int irq; unsigned int irq;
static int warned; static int warned;
struct irq_desc *desc;
for (irq = 0; irq < NR_IRQS; irq++) { for_each_irq_desc(irq, desc) {
cpumask_t mask; cpumask_t mask;
if (irq == 2) if (irq == 2)
continue; continue;
cpus_and(mask, irq_desc[irq].affinity, map); cpus_and(mask, desc->affinity, map);
if (any_online_cpu(mask) == NR_CPUS) { if (any_online_cpu(mask) == NR_CPUS) {
printk("Breaking affinity for irq %i\n", irq); printk("Breaking affinity for irq %i\n", irq);
mask = map; mask = map;
} }
if (irq_desc[irq].chip->set_affinity) if (desc->chip->set_affinity)
irq_desc[irq].chip->set_affinity(irq, mask); desc->chip->set_affinity(irq, mask);
else if (irq_desc[irq].action && !(warned++)) else if (desc->action && !(warned++))
printk("Cannot set affinity for irq %i\n", irq); printk("Cannot set affinity for irq %i\n", irq);
} }
......
...@@ -18,28 +18,6 @@ ...@@ -18,28 +18,6 @@
#include <asm/idle.h> #include <asm/idle.h>
#include <asm/smp.h> #include <asm/smp.h>
atomic_t irq_err_count;
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
*/
void ack_bad_irq(unsigned int irq)
{
printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq);
/*
* Currently unexpected vectors happen only on SMP and APIC.
* We _must_ ack these because every local APIC has only N
* irq slots per priority level, and a 'hanging, unacked' IRQ
* holds up an irq slot - in excessive cases (when multiple
* unexpected vectors occur) that might lock up the APIC
* completely.
* But don't ack when the APIC is disabled. -AK
*/
if (!disable_apic)
ack_APIC_irq();
}
#ifdef CONFIG_DEBUG_STACKOVERFLOW #ifdef CONFIG_DEBUG_STACKOVERFLOW
/* /*
* Probabilistic stack overflow check: * Probabilistic stack overflow check:
...@@ -64,122 +42,6 @@ static inline void stack_overflow_check(struct pt_regs *regs) ...@@ -64,122 +42,6 @@ static inline void stack_overflow_check(struct pt_regs *regs)
} }
#endif #endif
/*
* Generic, controller-independent functions:
*/
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
struct irqaction * action;
unsigned long flags;
if (i == 0) {
seq_printf(p, " ");
for_each_online_cpu(j)
seq_printf(p, "CPU%-8d",j);
seq_putc(p, '\n');
}
if (i < NR_IRQS) {
unsigned any_count = 0;
spin_lock_irqsave(&irq_desc[i].lock, flags);
#ifndef CONFIG_SMP
any_count = kstat_irqs(i);
#else
for_each_online_cpu(j)
any_count |= kstat_cpu(j).irqs[i];
#endif
action = irq_desc[i].action;
if (!action && !any_count)
goto skip;
seq_printf(p, "%3d: ",i);
#ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i));
#else
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
seq_printf(p, " %8s", irq_desc[i].chip->name);
seq_printf(p, "-%-8s", irq_desc[i].name);
if (action) {
seq_printf(p, " %s", action->name);
while ((action = action->next) != NULL)
seq_printf(p, ", %s", action->name);
}
seq_putc(p, '\n');
skip:
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
seq_printf(p, " Non-maskable interrupts\n");
seq_printf(p, "LOC: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
seq_printf(p, " Local timer interrupts\n");
#ifdef CONFIG_SMP
seq_printf(p, "RES: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->irq_resched_count);
seq_printf(p, " Rescheduling interrupts\n");
seq_printf(p, "CAL: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count);
seq_printf(p, " Function call interrupts\n");
seq_printf(p, "TLB: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count);
seq_printf(p, " TLB shootdowns\n");
#endif
#ifdef CONFIG_X86_MCE
seq_printf(p, "TRM: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->irq_thermal_count);
seq_printf(p, " Thermal event interrupts\n");
seq_printf(p, "THR: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->irq_threshold_count);
seq_printf(p, " Threshold APIC interrupts\n");
#endif
seq_printf(p, "SPU: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->irq_spurious_count);
seq_printf(p, " Spurious interrupts\n");
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
}
return 0;
}
/*
* /proc/stat helpers
*/
u64 arch_irq_stat_cpu(unsigned int cpu)
{
u64 sum = cpu_pda(cpu)->__nmi_count;
sum += cpu_pda(cpu)->apic_timer_irqs;
#ifdef CONFIG_SMP
sum += cpu_pda(cpu)->irq_resched_count;
sum += cpu_pda(cpu)->irq_call_count;
sum += cpu_pda(cpu)->irq_tlb_count;
#endif
#ifdef CONFIG_X86_MCE
sum += cpu_pda(cpu)->irq_thermal_count;
sum += cpu_pda(cpu)->irq_threshold_count;
#endif
sum += cpu_pda(cpu)->irq_spurious_count;
return sum;
}
u64 arch_irq_stat(void)
{
return atomic_read(&irq_err_count);
}
/* /*
* do_IRQ handles all normal device IRQ's (the special * do_IRQ handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific * SMP cross-CPU interrupts have their own specific
...@@ -188,6 +50,7 @@ u64 arch_irq_stat(void) ...@@ -188,6 +50,7 @@ u64 arch_irq_stat(void)
asmlinkage unsigned int do_IRQ(struct pt_regs *regs) asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
{ {
struct pt_regs *old_regs = set_irq_regs(regs); struct pt_regs *old_regs = set_irq_regs(regs);
struct irq_desc *desc;
/* high bit used in ret_from_ code */ /* high bit used in ret_from_ code */
unsigned vector = ~regs->orig_ax; unsigned vector = ~regs->orig_ax;
...@@ -201,8 +64,9 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs) ...@@ -201,8 +64,9 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
stack_overflow_check(regs); stack_overflow_check(regs);
#endif #endif
if (likely(irq < NR_IRQS)) desc = irq_to_desc(irq);
generic_handle_irq(irq); if (likely(desc))
generic_handle_irq_desc(irq, desc);
else { else {
if (!disable_apic) if (!disable_apic)
ack_APIC_irq(); ack_APIC_irq();
...@@ -223,8 +87,9 @@ void fixup_irqs(cpumask_t map) ...@@ -223,8 +87,9 @@ void fixup_irqs(cpumask_t map)
{ {
unsigned int irq; unsigned int irq;
static int warned; static int warned;
struct irq_desc *desc;
for (irq = 0; irq < NR_IRQS; irq++) { for_each_irq_desc(irq, desc) {
cpumask_t mask; cpumask_t mask;
int break_affinity = 0; int break_affinity = 0;
int set_affinity = 1; int set_affinity = 1;
...@@ -233,32 +98,32 @@ void fixup_irqs(cpumask_t map) ...@@ -233,32 +98,32 @@ void fixup_irqs(cpumask_t map)
continue; continue;
/* interrupt's are disabled at this point */ /* interrupt's are disabled at this point */
spin_lock(&irq_desc[irq].lock); spin_lock(&desc->lock);
if (!irq_has_action(irq) || if (!irq_has_action(irq) ||
cpus_equal(irq_desc[irq].affinity, map)) { cpus_equal(desc->affinity, map)) {
spin_unlock(&irq_desc[irq].lock); spin_unlock(&desc->lock);
continue; continue;
} }
cpus_and(mask, irq_desc[irq].affinity, map); cpus_and(mask, desc->affinity, map);
if (cpus_empty(mask)) { if (cpus_empty(mask)) {
break_affinity = 1; break_affinity = 1;
mask = map; mask = map;
} }
if (irq_desc[irq].chip->mask) if (desc->chip->mask)
irq_desc[irq].chip->mask(irq); desc->chip->mask(irq);
if (irq_desc[irq].chip->set_affinity) if (desc->chip->set_affinity)
irq_desc[irq].chip->set_affinity(irq, mask); desc->chip->set_affinity(irq, mask);
else if (!(warned++)) else if (!(warned++))
set_affinity = 0; set_affinity = 0;
if (irq_desc[irq].chip->unmask) if (desc->chip->unmask)
irq_desc[irq].chip->unmask(irq); desc->chip->unmask(irq);
spin_unlock(&irq_desc[irq].lock); spin_unlock(&desc->lock);
if (break_affinity && set_affinity) if (break_affinity && set_affinity)
printk("Broke affinity for irq %i\n", irq); printk("Broke affinity for irq %i\n", irq);
......
...@@ -69,6 +69,13 @@ void __init init_ISA_irqs (void) ...@@ -69,6 +69,13 @@ void __init init_ISA_irqs (void)
* 16 old-style INTA-cycle interrupts: * 16 old-style INTA-cycle interrupts:
*/ */
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
/* first time call this irq_desc */
struct irq_desc *desc = irq_to_desc(i);
desc->status = IRQ_DISABLED;
desc->action = NULL;
desc->depth = 1;
set_irq_chip_and_handler_name(i, &i8259A_chip, set_irq_chip_and_handler_name(i, &i8259A_chip,
handle_level_irq, "XT"); handle_level_irq, "XT");
} }
...@@ -83,6 +90,27 @@ static struct irqaction irq2 = { ...@@ -83,6 +90,27 @@ static struct irqaction irq2 = {
.name = "cascade", .name = "cascade",
}; };
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
[0 ... IRQ0_VECTOR - 1] = -1,
[IRQ0_VECTOR] = 0,
[IRQ1_VECTOR] = 1,
[IRQ2_VECTOR] = 2,
[IRQ3_VECTOR] = 3,
[IRQ4_VECTOR] = 4,
[IRQ5_VECTOR] = 5,
[IRQ6_VECTOR] = 6,
[IRQ7_VECTOR] = 7,
[IRQ8_VECTOR] = 8,
[IRQ9_VECTOR] = 9,
[IRQ10_VECTOR] = 10,
[IRQ11_VECTOR] = 11,
[IRQ12_VECTOR] = 12,
[IRQ13_VECTOR] = 13,
[IRQ14_VECTOR] = 14,
[IRQ15_VECTOR] = 15,
[IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
};
/* Overridden in paravirt.c */ /* Overridden in paravirt.c */
void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
...@@ -98,22 +126,14 @@ void __init native_init_IRQ(void) ...@@ -98,22 +126,14 @@ void __init native_init_IRQ(void)
* us. (some of these will be overridden and become * us. (some of these will be overridden and become
* 'special' SMP interrupts) * 'special' SMP interrupts)
*/ */
for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) { for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
int vector = FIRST_EXTERNAL_VECTOR + i;
if (i >= NR_IRQS)
break;
/* SYSCALL_VECTOR was reserved in trap_init. */ /* SYSCALL_VECTOR was reserved in trap_init. */
if (!test_bit(vector, used_vectors)) if (i != SYSCALL_VECTOR)
set_intr_gate(vector, interrupt[i]); set_intr_gate(i, interrupt[i]);
} }
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP)
/*
* IRQ0 must be given a fixed assignment and initialized,
* because it's used before the IO-APIC is set up.
*/
set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP)
/* /*
* The reschedule interrupt is a CPU-to-CPU reschedule-helper * The reschedule interrupt is a CPU-to-CPU reschedule-helper
* IPI, driven by wakeup. * IPI, driven by wakeup.
...@@ -128,6 +148,9 @@ void __init native_init_IRQ(void) ...@@ -128,6 +148,9 @@ void __init native_init_IRQ(void)
/* IPI for single call function */ /* IPI for single call function */
set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt); set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt);
/* Low priority IPI to cleanup after moving an irq */
set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
#endif #endif
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
......
...@@ -142,23 +142,19 @@ void __init init_ISA_irqs(void) ...@@ -142,23 +142,19 @@ void __init init_ISA_irqs(void)
init_bsp_APIC(); init_bsp_APIC();
init_8259A(0); init_8259A(0);
for (i = 0; i < NR_IRQS; i++) { for (i = 0; i < 16; i++) {
irq_desc[i].status = IRQ_DISABLED; /* first time call this irq_desc */
irq_desc[i].action = NULL; struct irq_desc *desc = irq_to_desc(i);
irq_desc[i].depth = 1;
desc->status = IRQ_DISABLED;
if (i < 16) { desc->action = NULL;
/* desc->depth = 1;
* 16 old-style INTA-cycle interrupts:
*/ /*
set_irq_chip_and_handler_name(i, &i8259A_chip, * 16 old-style INTA-cycle interrupts:
*/
set_irq_chip_and_handler_name(i, &i8259A_chip,
handle_level_irq, "XT"); handle_level_irq, "XT");
} else {
/*
* 'high' PCI IRQs filled in on demand
*/
irq_desc[i].chip = &no_irq_chip;
}
} }
} }
......
...@@ -35,9 +35,6 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev) ...@@ -35,9 +35,6 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
if (!(word & (1 << 13))) { if (!(word & (1 << 13))) {
dev_info(&dev->dev, "Intel E7520/7320/7525 detected; " dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
"disabling irq balancing and affinity\n"); "disabling irq balancing and affinity\n");
#ifdef CONFIG_IRQBALANCE
irqbalance_disable("");
#endif
noirqdebug_setup(""); noirqdebug_setup("");
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
no_irq_affinity = 1; no_irq_affinity = 1;
......
...@@ -1073,6 +1073,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -1073,6 +1073,7 @@ void __init setup_arch(char **cmdline_p)
#endif #endif
prefill_possible_map(); prefill_possible_map();
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
init_cpu_to_node(); init_cpu_to_node();
#endif #endif
...@@ -1080,6 +1081,9 @@ void __init setup_arch(char **cmdline_p) ...@@ -1080,6 +1081,9 @@ void __init setup_arch(char **cmdline_p)
init_apic_mappings(); init_apic_mappings();
ioapic_init_mappings(); ioapic_init_mappings();
/* need to wait for io_apic is mapped */
nr_irqs = probe_nr_irqs();
kvm_guest_init(); kvm_guest_init();
e820_reserve_resources(); e820_reserve_resources();
......
...@@ -140,25 +140,30 @@ static void __init setup_cpu_pda_map(void) ...@@ -140,25 +140,30 @@ static void __init setup_cpu_pda_map(void)
*/ */
void __init setup_per_cpu_areas(void) void __init setup_per_cpu_areas(void)
{ {
ssize_t size = PERCPU_ENOUGH_ROOM; ssize_t size, old_size;
char *ptr; char *ptr;
int cpu; int cpu;
unsigned long align = 1;
/* Setup cpu_pda map */ /* Setup cpu_pda map */
setup_cpu_pda_map(); setup_cpu_pda_map();
/* Copy section for each CPU (we discard the original) */ /* Copy section for each CPU (we discard the original) */
size = PERCPU_ENOUGH_ROOM; old_size = PERCPU_ENOUGH_ROOM;
align = max_t(unsigned long, PAGE_SIZE, align);
size = roundup(old_size, align);
printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
size); size);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
#ifndef CONFIG_NEED_MULTIPLE_NODES #ifndef CONFIG_NEED_MULTIPLE_NODES
ptr = alloc_bootmem_pages(size); ptr = __alloc_bootmem(size, align,
__pa(MAX_DMA_ADDRESS));
#else #else
int node = early_cpu_to_node(cpu); int node = early_cpu_to_node(cpu);
if (!node_online(node) || !NODE_DATA(node)) { if (!node_online(node) || !NODE_DATA(node)) {
ptr = alloc_bootmem_pages(size); ptr = __alloc_bootmem(size, align,
__pa(MAX_DMA_ADDRESS));
printk(KERN_INFO printk(KERN_INFO
"cpu %d has no node %d or node-local memory\n", "cpu %d has no node %d or node-local memory\n",
cpu, node); cpu, node);
...@@ -167,7 +172,8 @@ void __init setup_per_cpu_areas(void) ...@@ -167,7 +172,8 @@ void __init setup_per_cpu_areas(void)
cpu, __pa(ptr)); cpu, __pa(ptr));
} }
else { else {
ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
__pa(MAX_DMA_ADDRESS));
if (ptr) if (ptr)
printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n", printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n",
cpu, node, __pa(ptr)); cpu, node, __pa(ptr));
...@@ -175,7 +181,6 @@ void __init setup_per_cpu_areas(void) ...@@ -175,7 +181,6 @@ void __init setup_per_cpu_areas(void)
#endif #endif
per_cpu_offset(cpu) = ptr - __per_cpu_start; per_cpu_offset(cpu) = ptr - __per_cpu_start;
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
} }
printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n", printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
......
...@@ -543,10 +543,10 @@ static inline void __inquire_remote_apic(int apicid) ...@@ -543,10 +543,10 @@ static inline void __inquire_remote_apic(int apicid)
int timeout; int timeout;
u32 status; u32 status;
printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid); printk(KERN_INFO "Inquiring remote APIC 0x%x...\n", apicid);
for (i = 0; i < ARRAY_SIZE(regs); i++) { for (i = 0; i < ARRAY_SIZE(regs); i++) {
printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]); printk(KERN_INFO "... APIC 0x%x %s: ", apicid, names[i]);
/* /*
* Wait for idle. * Wait for idle.
...@@ -874,7 +874,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) ...@@ -874,7 +874,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
start_ip = setup_trampoline(); start_ip = setup_trampoline();
/* So we see what's up */ /* So we see what's up */
printk(KERN_INFO "Booting processor %d/%d ip %lx\n", printk(KERN_INFO "Booting processor %d APIC 0x%x ip 0x%lx\n",
cpu, apicid, start_ip); cpu, apicid, start_ip);
/* /*
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* SGI UV IRQ functions
*
* Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <linux/irq.h>
#include <asm/apic.h>
#include <asm/uv/uv_irq.h>
static void uv_noop(unsigned int irq)
{
}
static unsigned int uv_noop_ret(unsigned int irq)
{
return 0;
}
static void uv_ack_apic(unsigned int irq)
{
ack_APIC_irq();
}
struct irq_chip uv_irq_chip = {
.name = "UV-CORE",
.startup = uv_noop_ret,
.shutdown = uv_noop,
.enable = uv_noop,
.disable = uv_noop,
.ack = uv_noop,
.mask = uv_noop,
.unmask = uv_noop,
.eoi = uv_ack_apic,
.end = uv_noop,
};
/*
* Set up a mapping of an available irq and vector, and enable the specified
* MMR that defines the MSI that is to be sent to the specified CPU when an
* interrupt is raised.
*/
int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
unsigned long mmr_offset)
{
int irq;
int ret;
irq = create_irq();
if (irq <= 0)
return -EBUSY;
ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset);
if (ret != irq)
destroy_irq(irq);
return ret;
}
EXPORT_SYMBOL_GPL(uv_setup_irq);
/*
* Tear down a mapping of an irq and vector, and disable the specified MMR that
* defined the MSI that was to be sent to the specified CPU when an interrupt
* was raised.
*
* Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
*/
void uv_teardown_irq(unsigned int irq, int mmr_blade, unsigned long mmr_offset)
{
arch_disable_uv_irq(mmr_blade, mmr_offset);
destroy_irq(irq);
}
EXPORT_SYMBOL_GPL(uv_teardown_irq);
/*
* This file supports the /sys/firmware/sgi_uv interfaces for SGI UV.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) Russ Anderson
*/
#include <linux/sysdev.h>
#include <asm/uv/bios.h>
struct kobject *sgi_uv_kobj;
static ssize_t partition_id_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%ld\n", sn_partition_id);
}
static ssize_t coherence_id_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%ld\n", partition_coherence_id());
}
static struct kobj_attribute partition_id_attr =
__ATTR(partition_id, S_IRUGO, partition_id_show, NULL);
static struct kobj_attribute coherence_id_attr =
__ATTR(coherence_id, S_IRUGO, coherence_id_show, NULL);
static int __init sgi_uv_sysfs_init(void)
{
unsigned long ret;
if (!sgi_uv_kobj)
sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj);
if (!sgi_uv_kobj) {
printk(KERN_WARNING "kobject_create_and_add sgi_uv failed \n");
return -EINVAL;
}
ret = sysfs_create_file(sgi_uv_kobj, &partition_id_attr.attr);
if (ret) {
printk(KERN_WARNING "sysfs_create_file partition_id failed \n");
return ret;
}
ret = sysfs_create_file(sgi_uv_kobj, &coherence_id_attr.attr);
if (ret) {
printk(KERN_WARNING "sysfs_create_file coherence_id failed \n");
return ret;
}
return 0;
}
device_initcall(sgi_uv_sysfs_init);
...@@ -484,10 +484,11 @@ static void disable_cobalt_irq(unsigned int irq) ...@@ -484,10 +484,11 @@ static void disable_cobalt_irq(unsigned int irq)
static unsigned int startup_cobalt_irq(unsigned int irq) static unsigned int startup_cobalt_irq(unsigned int irq)
{ {
unsigned long flags; unsigned long flags;
struct irq_desc *desc = irq_to_desc(irq);
spin_lock_irqsave(&cobalt_lock, flags); spin_lock_irqsave(&cobalt_lock, flags);
if ((irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING))) if ((desc->status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING)))
irq_desc[irq].status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING); desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING);
enable_cobalt_irq(irq); enable_cobalt_irq(irq);
spin_unlock_irqrestore(&cobalt_lock, flags); spin_unlock_irqrestore(&cobalt_lock, flags);
return 0; return 0;
...@@ -506,9 +507,10 @@ static void ack_cobalt_irq(unsigned int irq) ...@@ -506,9 +507,10 @@ static void ack_cobalt_irq(unsigned int irq)
static void end_cobalt_irq(unsigned int irq) static void end_cobalt_irq(unsigned int irq)
{ {
unsigned long flags; unsigned long flags;
struct irq_desc *desc = irq_to_desc(irq);
spin_lock_irqsave(&cobalt_lock, flags); spin_lock_irqsave(&cobalt_lock, flags);
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS)))
enable_cobalt_irq(irq); enable_cobalt_irq(irq);
spin_unlock_irqrestore(&cobalt_lock, flags); spin_unlock_irqrestore(&cobalt_lock, flags);
} }
...@@ -626,12 +628,12 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id) ...@@ -626,12 +628,12 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
spin_unlock_irqrestore(&i8259A_lock, flags); spin_unlock_irqrestore(&i8259A_lock, flags);
desc = irq_desc + realirq; desc = irq_to_desc(realirq);
/* /*
* handle this 'virtual interrupt' as a Cobalt one now. * handle this 'virtual interrupt' as a Cobalt one now.
*/ */
kstat_cpu(smp_processor_id()).irqs[realirq]++; kstat_incr_irqs_this_cpu(realirq, desc);
if (likely(desc->action != NULL)) if (likely(desc->action != NULL))
handle_IRQ_event(realirq, desc->action); handle_IRQ_event(realirq, desc->action);
...@@ -662,27 +664,29 @@ void init_VISWS_APIC_irqs(void) ...@@ -662,27 +664,29 @@ void init_VISWS_APIC_irqs(void)
int i; int i;
for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) { for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) {
irq_desc[i].status = IRQ_DISABLED; struct irq_desc *desc = irq_to_desc(i);
irq_desc[i].action = 0;
irq_desc[i].depth = 1; desc->status = IRQ_DISABLED;
desc->action = 0;
desc->depth = 1;
if (i == 0) { if (i == 0) {
irq_desc[i].chip = &cobalt_irq_type; desc->chip = &cobalt_irq_type;
} }
else if (i == CO_IRQ_IDE0) { else if (i == CO_IRQ_IDE0) {
irq_desc[i].chip = &cobalt_irq_type; desc->chip = &cobalt_irq_type;
} }
else if (i == CO_IRQ_IDE1) { else if (i == CO_IRQ_IDE1) {
irq_desc[i].chip = &cobalt_irq_type; desc->chip = &cobalt_irq_type;
} }
else if (i == CO_IRQ_8259) { else if (i == CO_IRQ_8259) {
irq_desc[i].chip = &piix4_master_irq_type; desc->chip = &piix4_master_irq_type;
} }
else if (i < CO_IRQ_APIC0) { else if (i < CO_IRQ_APIC0) {
irq_desc[i].chip = &piix4_virtual_irq_type; desc->chip = &piix4_virtual_irq_type;
} }
else if (IS_CO_APIC(i)) { else if (IS_CO_APIC(i)) {
irq_desc[i].chip = &cobalt_irq_type; desc->chip = &cobalt_irq_type;
} }
} }
......
...@@ -235,11 +235,14 @@ static void __devinit vmi_time_init_clockevent(void) ...@@ -235,11 +235,14 @@ static void __devinit vmi_time_init_clockevent(void)
void __init vmi_time_init(void) void __init vmi_time_init(void)
{ {
unsigned int cpu;
/* Disable PIT: BIOSes start PIT CH0 with 18.2hz peridic. */ /* Disable PIT: BIOSes start PIT CH0 with 18.2hz peridic. */
outb_pit(0x3a, PIT_MODE); /* binary, mode 5, LSB/MSB, ch 0 */ outb_pit(0x3a, PIT_MODE); /* binary, mode 5, LSB/MSB, ch 0 */
vmi_time_init_clockevent(); vmi_time_init_clockevent();
setup_irq(0, &vmi_clock_action); setup_irq(0, &vmi_clock_action);
for_each_possible_cpu(cpu)
per_cpu(vector_irq, cpu)[vmi_get_timer_vector()] = 0;
} }
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
......
...@@ -582,7 +582,7 @@ static void __init lguest_init_IRQ(void) ...@@ -582,7 +582,7 @@ static void __init lguest_init_IRQ(void)
for (i = 0; i < LGUEST_IRQS; i++) { for (i = 0; i < LGUEST_IRQS; i++) {
int vector = FIRST_EXTERNAL_VECTOR + i; int vector = FIRST_EXTERNAL_VECTOR + i;
if (vector != SYSCALL_VECTOR) { if (vector != SYSCALL_VECTOR) {
set_intr_gate(vector, interrupt[i]); set_intr_gate(vector, interrupt[vector]);
set_irq_chip_and_handler_name(i, &lguest_irq_controller, set_irq_chip_and_handler_name(i, &lguest_irq_controller,
handle_level_irq, handle_level_irq,
"level"); "level");
......
...@@ -41,6 +41,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = { ...@@ -41,6 +41,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
{ } { }
}; };
static cpumask_t vector_allocation_domain(int cpu)
{
return cpumask_of_cpu(cpu);
}
static int probe_bigsmp(void) static int probe_bigsmp(void)
{ {
......
...@@ -75,4 +75,18 @@ static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) ...@@ -75,4 +75,18 @@ static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
} }
#endif #endif
static cpumask_t vector_allocation_domain(int cpu)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
* priority interrupt delivery mode.
*
* In particular there was a hyperthreading cpu observed to
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
return domain;
}
struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000); struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000);
...@@ -38,4 +38,18 @@ static int acpi_madt_oem_check(char *oem_id, char *oem_table_id) ...@@ -38,4 +38,18 @@ static int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
return 0; return 0;
} }
static cpumask_t vector_allocation_domain(int cpu)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
* priority interrupt delivery mode.
*
* In particular there was a hyperthreading cpu observed to
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
return domain;
}
struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq); struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq);
...@@ -23,4 +23,18 @@ static int probe_summit(void) ...@@ -23,4 +23,18 @@ static int probe_summit(void)
return 0; return 0;
} }
static cpumask_t vector_allocation_domain(int cpu)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
* priority interrupt delivery mode.
*
* In particular there was a hyperthreading cpu observed to
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
return domain;
}
struct genapic apic_summit = APIC_INIT("summit", probe_summit); struct genapic apic_summit = APIC_INIT("summit", probe_summit);
...@@ -1483,7 +1483,7 @@ static void disable_local_vic_irq(unsigned int irq) ...@@ -1483,7 +1483,7 @@ static void disable_local_vic_irq(unsigned int irq)
* the interrupt off to another CPU */ * the interrupt off to another CPU */
static void before_handle_vic_irq(unsigned int irq) static void before_handle_vic_irq(unsigned int irq)
{ {
irq_desc_t *desc = irq_desc + irq; irq_desc_t *desc = irq_to_desc(irq);
__u8 cpu = smp_processor_id(); __u8 cpu = smp_processor_id();
_raw_spin_lock(&vic_irq_lock); _raw_spin_lock(&vic_irq_lock);
...@@ -1518,7 +1518,7 @@ static void before_handle_vic_irq(unsigned int irq) ...@@ -1518,7 +1518,7 @@ static void before_handle_vic_irq(unsigned int irq)
/* Finish the VIC interrupt: basically mask */ /* Finish the VIC interrupt: basically mask */
static void after_handle_vic_irq(unsigned int irq) static void after_handle_vic_irq(unsigned int irq)
{ {
irq_desc_t *desc = irq_desc + irq; irq_desc_t *desc = irq_to_desc(irq);
_raw_spin_lock(&vic_irq_lock); _raw_spin_lock(&vic_irq_lock);
{ {
......
...@@ -21,7 +21,6 @@ void xen_force_evtchn_callback(void) ...@@ -21,7 +21,6 @@ void xen_force_evtchn_callback(void)
static void __init __xen_init_IRQ(void) static void __init __xen_init_IRQ(void)
{ {
#ifdef CONFIG_X86_64
int i; int i;
/* Create identity vector->irq map */ /* Create identity vector->irq map */
...@@ -31,7 +30,6 @@ static void __init __xen_init_IRQ(void) ...@@ -31,7 +30,6 @@ static void __init __xen_init_IRQ(void)
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
per_cpu(vector_irq, cpu)[i] = i; per_cpu(vector_irq, cpu)[i] = i;
} }
#endif /* CONFIG_X86_64 */
xen_init_IRQ(); xen_init_IRQ();
} }
......
...@@ -241,7 +241,7 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl ...@@ -241,7 +241,7 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq));
} while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
kstat_this_cpu.irqs[irq]++; kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
out: out:
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
......
...@@ -219,7 +219,7 @@ static void hpet_timer_set_irq(struct hpet_dev *devp) ...@@ -219,7 +219,7 @@ static void hpet_timer_set_irq(struct hpet_dev *devp)
for (irq = find_first_bit(&v, HPET_MAX_IRQ); irq < HPET_MAX_IRQ; for (irq = find_first_bit(&v, HPET_MAX_IRQ); irq < HPET_MAX_IRQ;
irq = find_next_bit(&v, HPET_MAX_IRQ, 1 + irq)) { irq = find_next_bit(&v, HPET_MAX_IRQ, 1 + irq)) {
if (irq >= NR_IRQS) { if (irq >= nr_irqs) {
irq = HPET_MAX_IRQ; irq = HPET_MAX_IRQ;
break; break;
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment