Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
54b56170
Commit
54b56170
authored
Feb 22, 2010
by
H. Peter Anvin
Browse files
Options
Browse Files
Download
Plain Diff
Merge remote branch 'origin/x86/apic' into x86/mrst
Conflicts: arch/x86/kernel/apic/io_apic.c
parents
1f91233c
d02e30c3
Changes
26
Show whitespace changes
Inline
Side-by-side
Showing
26 changed files
with
333 additions
and
230 deletions
+333
-230
Documentation/kernel-parameters.txt
Documentation/kernel-parameters.txt
+6
-0
arch/ia64/include/asm/xen/events.h
arch/ia64/include/asm/xen/events.h
+0
-4
arch/ia64/kernel/acpi.c
arch/ia64/kernel/acpi.c
+2
-2
arch/x86/ia32/ia32_aout.c
arch/x86/ia32/ia32_aout.c
+0
-1
arch/x86/include/asm/i8259.h
arch/x86/include/asm/i8259.h
+1
-1
arch/x86/include/asm/io_apic.h
arch/x86/include/asm/io_apic.h
+1
-0
arch/x86/include/asm/irq.h
arch/x86/include/asm/irq.h
+1
-0
arch/x86/include/asm/irq_vectors.h
arch/x86/include/asm/irq_vectors.h
+23
-25
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/acpi/boot.c
+8
-1
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/io_apic.c
+134
-95
arch/x86/kernel/apic/nmi.c
arch/x86/kernel/apic/nmi.c
+3
-3
arch/x86/kernel/i8259.c
arch/x86/kernel/i8259.c
+15
-15
arch/x86/kernel/irqinit.c
arch/x86/kernel/irqinit.c
+17
-18
arch/x86/kernel/reboot.c
arch/x86/kernel/reboot.c
+8
-0
arch/x86/kernel/smpboot.c
arch/x86/kernel/smpboot.c
+9
-4
arch/x86/kernel/time.c
arch/x86/kernel/time.c
+2
-2
arch/x86/kernel/visws_quirks.c
arch/x86/kernel/visws_quirks.c
+3
-3
arch/x86/kernel/vmiclock_32.c
arch/x86/kernel/vmiclock_32.c
+1
-5
drivers/acpi/numa.c
drivers/acpi/numa.c
+2
-2
drivers/xen/events.c
drivers/xen/events.c
+6
-2
include/linux/irq.h
include/linux/irq.h
+2
-0
init/main.c
init/main.c
+15
-1
kernel/irq/chip.c
kernel/irq/chip.c
+43
-9
kernel/irq/handle.c
kernel/irq/handle.c
+28
-30
kernel/irq/internals.h
kernel/irq/internals.h
+1
-5
kernel/irq/numa_migrate.c
kernel/irq/numa_migrate.c
+2
-2
No files found.
Documentation/kernel-parameters.txt
View file @
54b56170
...
...
@@ -1772,6 +1772,12 @@ and is between 256 and 4096 characters. It is defined in the file
purges which is reported from either PAL_VM_SUMMARY or
SAL PALO.
nr_cpus= [SMP] Maximum number of processors that an SMP kernel
could support. nr_cpus=n : n >= 1 limits the kernel to
supporting 'n' processors. Later in runtime you can not
use hotplug cpu feature to put more cpu back to online.
just like you compile the kernel NR_CPUS=n
nr_uarts= [SERIAL] maximum number of UARTs to be registered.
numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA.
...
...
arch/ia64/include/asm/xen/events.h
View file @
54b56170
...
...
@@ -36,10 +36,6 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
return
!
(
ia64_psr
(
regs
)
->
i
);
}
static
inline
void
handle_irq
(
int
irq
,
struct
pt_regs
*
regs
)
{
__do_IRQ
(
irq
);
}
#define irq_ctx_init(cpu) do { } while (0)
#endif
/* _ASM_IA64_XEN_EVENTS_H */
arch/ia64/kernel/acpi.c
View file @
54b56170
...
...
@@ -881,8 +881,8 @@ __init void prefill_possible_map(void)
possible
=
available_cpus
+
additional_cpus
;
if
(
possible
>
NR_CPUS
)
possible
=
NR_CPUS
;
if
(
possible
>
nr_cpu_ids
)
possible
=
nr_cpu_ids
;
printk
(
KERN_INFO
"SMP: Allowing %d CPUs, %d hotplug CPUs
\n
"
,
possible
,
max
((
possible
-
available_cpus
),
0
));
...
...
arch/x86/ia32/ia32_aout.c
View file @
54b56170
...
...
@@ -327,7 +327,6 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
current
->
mm
->
free_area_cache
=
TASK_UNMAPPED_BASE
;
current
->
mm
->
cached_hole_size
=
0
;
current
->
mm
->
mmap
=
NULL
;
install_exec_creds
(
bprm
);
current
->
flags
&=
~
PF_FORKNOEXEC
;
...
...
arch/x86/include/asm/i8259.h
View file @
54b56170
...
...
@@ -24,7 +24,7 @@ extern unsigned int cached_irq_mask;
#define SLAVE_ICW4_DEFAULT 0x01
#define PIC_ICW4_AEOI 2
extern
spinlock_t
i8259A_lock
;
extern
raw_
spinlock_t
i8259A_lock
;
/* the PIC may need a careful delay on some platforms, hence specific calls */
static
inline
unsigned
char
inb_pic
(
unsigned
int
port
)
...
...
arch/x86/include/asm/io_apic.h
View file @
54b56170
...
...
@@ -158,6 +158,7 @@ extern int io_apic_get_redir_entries(int ioapic);
struct
io_apic_irq_attr
;
extern
int
io_apic_set_pci_routing
(
struct
device
*
dev
,
int
irq
,
struct
io_apic_irq_attr
*
irq_attr
);
void
setup_IO_APIC_irq_extra
(
u32
gsi
);
extern
int
(
*
ioapic_renumber_irq
)(
int
ioapic
,
int
irq
);
extern
void
ioapic_init_mappings
(
void
);
extern
void
ioapic_insert_resources
(
void
);
...
...
arch/x86/include/asm/irq.h
View file @
54b56170
...
...
@@ -48,5 +48,6 @@ extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
extern
int
vector_used_by_percpu_irq
(
unsigned
int
vector
);
extern
void
init_ISA_irqs
(
void
);
extern
int
nr_legacy_irqs
;
#endif
/* _ASM_X86_IRQ_H */
arch/x86/include/asm/irq_vectors.h
View file @
54b56170
...
...
@@ -28,28 +28,33 @@
#define MCE_VECTOR 0x12
/*
* IDT vectors usable for external interrupt sources start
*
at 0x20:
* IDT vectors usable for external interrupt sources start
at 0x20.
*
(0x80 is the syscall vector, 0x30-0x3f are for ISA)
*/
#define FIRST_EXTERNAL_VECTOR 0x20
#ifdef CONFIG_X86_32
# define SYSCALL_VECTOR 0x80
# define IA32_SYSCALL_VECTOR 0x80
#else
# define IA32_SYSCALL_VECTOR 0x80
#endif
/*
* We start allocating at 0x21 to spread out vectors evenly between
* priority levels. (0x80 is the syscall vector)
*/
#define VECTOR_OFFSET_START 1
/*
* Reserve the lowest usable priority level 0x20 - 0x2f for triggering
* cleanup after irq migration.
* Reserve the lowest usable vector (and hence lowest priority) 0x20 for
* triggering cleanup after irq migration. 0x21-0x2f will still be used
* for device interrupts.
*/
#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
#define IA32_SYSCALL_VECTOR 0x80
#ifdef CONFIG_X86_32
# define SYSCALL_VECTOR 0x80
#endif
/*
* Vectors 0x30-0x3f are used for ISA interrupts.
* round up to the next 16-vector boundary
*/
#define IRQ0_VECTOR (
FIRST_EXTERNAL_VECTOR + 0x10
)
#define IRQ0_VECTOR (
(FIRST_EXTERNAL_VECTOR + 16) & ~15
)
#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
...
...
@@ -120,13 +125,6 @@
*/
#define MCE_SELF_VECTOR 0xeb
/*
* First APIC vector available to drivers: (vectors 0x30-0xee) we
* start at 0x31(0x41) to spread out vectors evenly between priority
* levels. (0x80 is the syscall vector)
*/
#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
#define NR_VECTORS 256
#define FPU_IRQ 13
...
...
@@ -154,21 +152,21 @@ static inline int invalid_vm86_irq(int irq)
#define NR_IRQS_LEGACY 16
#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS )
#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS )
#ifdef CONFIG_X86_IO_APIC
# ifdef CONFIG_SPARSE_IRQ
# define CPU_VECTOR_LIMIT (64 * NR_CPUS)
# define NR_IRQS \
(CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
(NR_VECTORS + CPU_VECTOR_LIMIT) : \
(NR_VECTORS + IO_APIC_VECTOR_LIMIT))
# else
#
if NR_CPUS < MAX_IO_APICS
#
define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT)
# else
# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT)
# endif
#
define CPU_VECTOR_LIMIT (32 * NR_CPUS)
#
define NR_IRQS \
(CPU_VECTOR_LIMIT < IO_APIC_VECTOR_LIMIT ? \
(NR_VECTORS + CPU_VECTOR_LIMIT) : \
(NR_VECTORS + IO_APIC_VECTOR_LIMIT))
# endif
#else
/* !CONFIG_X86_IO_APIC: */
# define NR_IRQS NR_IRQS_LEGACY
...
...
arch/x86/kernel/acpi/boot.c
View file @
54b56170
...
...
@@ -447,6 +447,12 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
int
acpi_gsi_to_irq
(
u32
gsi
,
unsigned
int
*
irq
)
{
*
irq
=
gsi
;
#ifdef CONFIG_X86_IO_APIC
if
(
acpi_irq_model
==
ACPI_IRQ_MODEL_IOAPIC
)
setup_IO_APIC_irq_extra
(
gsi
);
#endif
return
0
;
}
...
...
@@ -474,7 +480,8 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
plat_gsi
=
mp_register_gsi
(
dev
,
gsi
,
trigger
,
polarity
);
}
#endif
acpi_gsi_to_irq
(
plat_gsi
,
&
irq
);
irq
=
plat_gsi
;
return
irq
;
}
...
...
arch/x86/kernel/apic/io_apic.c
View file @
54b56170
...
...
@@ -73,8 +73,8 @@
*/
int
sis_apic_bug
=
-
1
;
static
DEFINE_SPINLOCK
(
ioapic_lock
);
static
DEFINE_SPINLOCK
(
vector_lock
);
static
DEFINE_
RAW_
SPINLOCK
(
ioapic_lock
);
static
DEFINE_
RAW_
SPINLOCK
(
vector_lock
);
/*
* # of IRQ routing registers
...
...
@@ -167,8 +167,14 @@ int __init arch_early_irq_init(void)
desc
->
chip_data
=
&
cfg
[
i
];
zalloc_cpumask_var_node
(
&
cfg
[
i
].
domain
,
GFP_NOWAIT
,
node
);
zalloc_cpumask_var_node
(
&
cfg
[
i
].
old_domain
,
GFP_NOWAIT
,
node
);
if
(
i
<
legacy_pic
->
nr_legacy_irqs
)
cpumask_setall
(
cfg
[
i
].
domain
);
/*
* For legacy IRQ's, start with assigning irq0 to irq15 to
* IRQ0_VECTOR to IRQ15_VECTOR on cpu 0.
*/
if
(
i
<
legacy_pic
->
nr_legacy_irqs
)
{
cfg
[
i
].
vector
=
IRQ0_VECTOR
+
i
;
cpumask_set_cpu
(
0
,
cfg
[
i
].
domain
);
}
}
return
0
;
...
...
@@ -388,7 +394,7 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
struct
irq_pin_list
*
entry
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
for_each_irq_pin
(
entry
,
cfg
->
irq_2_pin
)
{
unsigned
int
reg
;
int
pin
;
...
...
@@ -397,11 +403,11 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
reg
=
io_apic_read
(
entry
->
apic
,
0x10
+
pin
*
2
);
/* Is the remote IRR bit set? */
if
(
reg
&
IO_APIC_REDIR_REMOTE_IRR
)
{
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
return
true
;
}
}
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
return
false
;
}
...
...
@@ -415,10 +421,10 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
{
union
entry_union
eu
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
eu
.
w1
=
io_apic_read
(
apic
,
0x10
+
2
*
pin
);
eu
.
w2
=
io_apic_read
(
apic
,
0x11
+
2
*
pin
);
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
return
eu
.
entry
;
}
...
...
@@ -441,9 +447,9 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
void
ioapic_write_entry
(
int
apic
,
int
pin
,
struct
IO_APIC_route_entry
e
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
__ioapic_write_entry
(
apic
,
pin
,
e
);
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
}
/*
...
...
@@ -456,10 +462,10 @@ static void ioapic_mask_entry(int apic, int pin)
unsigned
long
flags
;
union
entry_union
eu
=
{
.
entry
.
mask
=
1
};
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
io_apic_write
(
apic
,
0x10
+
2
*
pin
,
eu
.
w1
);
io_apic_write
(
apic
,
0x11
+
2
*
pin
,
eu
.
w2
);
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
}
/*
...
...
@@ -586,9 +592,9 @@ static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
BUG_ON
(
!
cfg
);
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
__mask_IO_APIC_irq
(
cfg
);
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
}
static
void
unmask_IO_APIC_irq_desc
(
struct
irq_desc
*
desc
)
...
...
@@ -596,9 +602,9 @@ static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
struct
irq_cfg
*
cfg
=
desc
->
chip_data
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
__unmask_IO_APIC_irq
(
cfg
);
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
}
static
void
mask_IO_APIC_irq
(
unsigned
int
irq
)
...
...
@@ -1122,12 +1128,12 @@ void lock_vector_lock(void)
/* Used to the online set of cpus does not change
* during assign_irq_vector.
*/
spin_lock
(
&
vector_lock
);
raw_
spin_lock
(
&
vector_lock
);
}
void
unlock_vector_lock
(
void
)
{
spin_unlock
(
&
vector_lock
);
raw_
spin_unlock
(
&
vector_lock
);
}
static
int
...
...
@@ -1144,7 +1150,8 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
* Also, we've got to be careful not to trash gate
* 0x80, because int 0x80 is hm, kind of importantish. ;)
*/
static
int
current_vector
=
FIRST_DEVICE_VECTOR
,
current_offset
=
0
;
static
int
current_vector
=
FIRST_EXTERNAL_VECTOR
+
VECTOR_OFFSET_START
;
static
int
current_offset
=
VECTOR_OFFSET_START
%
8
;
unsigned
int
old_vector
;
int
cpu
,
err
;
cpumask_var_t
tmp_mask
;
...
...
@@ -1180,7 +1187,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
if
(
vector
>=
first_system_vector
)
{
/* If out of vectors on large boxen, must share them. */
offset
=
(
offset
+
1
)
%
8
;
vector
=
FIRST_
DEVICE
_VECTOR
+
offset
;
vector
=
FIRST_
EXTERNAL
_VECTOR
+
offset
;
}
if
(
unlikely
(
current_vector
==
vector
))
continue
;
...
...
@@ -1214,9 +1221,9 @@ int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
int
err
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
vector_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
vector_lock
,
flags
);
err
=
__assign_irq_vector
(
irq
,
cfg
,
mask
);
spin_unlock_irqrestore
(
&
vector_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
vector_lock
,
flags
);
return
err
;
}
...
...
@@ -1250,11 +1257,16 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
void
__setup_vector_irq
(
int
cpu
)
{
/* Initialize vector_irq on a new cpu */
/* This function must be called with vector_lock held */
int
irq
,
vector
;
struct
irq_cfg
*
cfg
;
struct
irq_desc
*
desc
;
/*
* vector_lock will make sure that we don't run into irq vector
* assignments that might be happening on another cpu in parallel,
* while we setup our initial vector to irq mappings.
*/
raw_spin_lock
(
&
vector_lock
);
/* Mark the inuse vectors */
for_each_irq_desc
(
irq
,
desc
)
{
cfg
=
desc
->
chip_data
;
...
...
@@ -1273,6 +1285,7 @@ void __setup_vector_irq(int cpu)
if
(
!
cpumask_test_cpu
(
cpu
,
cfg
->
domain
))
per_cpu
(
vector_irq
,
cpu
)[
vector
]
=
-
1
;
}
raw_spin_unlock
(
&
vector_lock
);
}
static
struct
irq_chip
ioapic_chip
;
...
...
@@ -1422,6 +1435,14 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq
cfg
=
desc
->
chip_data
;
/*
* For legacy irqs, cfg->domain starts with cpu 0 for legacy
* controllers like 8259. Now that IO-APIC can handle this irq, update
* the cfg->domain.
*/
if
(
irq
<
nr_legacy_irqs
&&
cpumask_test_cpu
(
0
,
cfg
->
domain
))
apic
->
vector_allocation_domain
(
0
,
cfg
->
domain
);
if
(
assign_irq_vector
(
irq
,
cfg
,
apic
->
target_cpus
()))
return
;
...
...
@@ -1520,6 +1541,56 @@ static void __init setup_IO_APIC_irqs(void)
" (apicid-pin) not connected
\n
"
);
}
/*
* for the gsit that is not in first ioapic
* but could not use acpi_register_gsi()
* like some special sci in IBM x3330
*/
void
setup_IO_APIC_irq_extra
(
u32
gsi
)
{
int
apic_id
=
0
,
pin
,
idx
,
irq
;
int
node
=
cpu_to_node
(
boot_cpu_id
);
struct
irq_desc
*
desc
;
struct
irq_cfg
*
cfg
;
/*
* Convert 'gsi' to 'ioapic.pin'.
*/
apic_id
=
mp_find_ioapic
(
gsi
);
if
(
apic_id
<
0
)
return
;
pin
=
mp_find_ioapic_pin
(
apic_id
,
gsi
);
idx
=
find_irq_entry
(
apic_id
,
pin
,
mp_INT
);
if
(
idx
==
-
1
)
return
;
irq
=
pin_2_irq
(
idx
,
apic_id
,
pin
);
#ifdef CONFIG_SPARSE_IRQ
desc
=
irq_to_desc
(
irq
);
if
(
desc
)
return
;
#endif
desc
=
irq_to_desc_alloc_node
(
irq
,
node
);
if
(
!
desc
)
{
printk
(
KERN_INFO
"can not get irq_desc for %d
\n
"
,
irq
);
return
;
}
cfg
=
desc
->
chip_data
;
add_pin_to_irq_node
(
cfg
,
node
,
apic_id
,
pin
);
if
(
test_bit
(
pin
,
mp_ioapic_routing
[
apic_id
].
pin_programmed
))
{
pr_debug
(
"Pin %d-%d already programmed
\n
"
,
mp_ioapics
[
apic_id
].
apicid
,
pin
);
return
;
}
set_bit
(
pin
,
mp_ioapic_routing
[
apic_id
].
pin_programmed
);
setup_IO_APIC_irq
(
apic_id
,
pin
,
irq
,
desc
,
irq_trigger
(
idx
),
irq_polarity
(
idx
));
}
/*
* Set up the timer pin, possibly with the 8259A-master behind.
*/
...
...
@@ -1583,14 +1654,14 @@ __apicdebuginit(void) print_IO_APIC(void)
for
(
apic
=
0
;
apic
<
nr_ioapics
;
apic
++
)
{
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
reg_00
.
raw
=
io_apic_read
(
apic
,
0
);
reg_01
.
raw
=
io_apic_read
(
apic
,
1
);
if
(
reg_01
.
bits
.
version
>=
0x10
)
reg_02
.
raw
=
io_apic_read
(
apic
,
2
);
if
(
reg_01
.
bits
.
version
>=
0x20
)
reg_03
.
raw
=
io_apic_read
(
apic
,
3
);
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
printk
(
"
\n
"
);
printk
(
KERN_DEBUG
"IO APIC #%d......
\n
"
,
mp_ioapics
[
apic
].
apicid
);
...
...
@@ -1812,7 +1883,7 @@ __apicdebuginit(void) print_PIC(void)
printk
(
KERN_DEBUG
"
\n
printing PIC contents
\n
"
);
spin_lock_irqsave
(
&
i8259A_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
i8259A_lock
,
flags
);
v
=
inb
(
0xa1
)
<<
8
|
inb
(
0x21
);
printk
(
KERN_DEBUG
"... PIC IMR: %04x
\n
"
,
v
);
...
...
@@ -1826,7 +1897,7 @@ __apicdebuginit(void) print_PIC(void)
outb
(
0x0a
,
0xa0
);
outb
(
0x0a
,
0x20
);
spin_unlock_irqrestore
(
&
i8259A_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
i8259A_lock
,
flags
);
printk
(
KERN_DEBUG
"... PIC ISR: %04x
\n
"
,
v
);
...
...
@@ -1885,9 +1956,9 @@ void __init enable_IO_APIC(void)
* The number of IO-APIC IRQ registers (== #pins):
*/
for
(
apic
=
0
;
apic
<
nr_ioapics
;
apic
++
)
{
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
reg_01
.
raw
=
io_apic_read
(
apic
,
1
);
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
nr_ioapic_registers
[
apic
]
=
reg_01
.
bits
.
entries
+
1
;
}
...
...
@@ -2027,9 +2098,9 @@ void __init setup_ioapic_ids_from_mpc(void)
for
(
apic_id
=
0
;
apic_id
<
nr_ioapics
;
apic_id
++
)
{
/* Read the register 0 value */
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
reg_00
.
raw
=
io_apic_read
(
apic_id
,
0
);
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
old_id
=
mp_ioapics
[
apic_id
].
apicid
;
...
...
@@ -2088,16 +2159,16 @@ void __init setup_ioapic_ids_from_mpc(void)
mp_ioapics
[
apic_id
].
apicid
);
reg_00
.
bits
.
ID
=
mp_ioapics
[
apic_id
].
apicid
;
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
io_apic_write
(
apic_id
,
0
,
reg_00
.
raw
);
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
/*
* Sanity check
*/
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
reg_00
.
raw
=
io_apic_read
(
apic_id
,
0
);
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
if
(
reg_00
.
bits
.
ID
!=
mp_ioapics
[
apic_id
].
apicid
)
printk
(
"could not set ID!
\n
"
);
else
...
...
@@ -2180,7 +2251,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
unsigned
long
flags
;
struct
irq_cfg
*
cfg
;
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
if
(
irq
<
legacy_pic
->
nr_legacy_irqs
)
{
legacy_pic
->
chip
->
mask
(
irq
);
if
(
legacy_pic
->
irq_pending
(
irq
))
...
...
@@ -2188,7 +2259,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
}
cfg
=
irq_cfg
(
irq
);
__unmask_IO_APIC_irq
(
cfg
);
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
return
was_pending
;
}
...
...
@@ -2199,9 +2270,9 @@ static int ioapic_retrigger_irq(unsigned int irq)
struct
irq_cfg
*
cfg
=
irq_cfg
(
irq
);
unsigned
long
flags
;
spin_lock_irqsave
(
&
vector_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
vector_lock
,
flags
);
apic
->
send_IPI_mask
(
cpumask_of
(
cpumask_first
(
cfg
->
domain
)),
cfg
->
vector
);
spin_unlock_irqrestore
(
&
vector_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
vector_lock
,
flags
);
return
1
;
}
...
...
@@ -2294,14 +2365,14 @@ set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
irq
=
desc
->
irq
;
cfg
=
desc
->
chip_data
;
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
ret
=
set_desc_affinity
(
desc
,
mask
,
&
dest
);
if
(
!
ret
)
{
/* Only the high 8 bits are valid. */
dest
=
SET_APIC_LOGICAL_ID
(
dest
);
__target_IO_APIC_irq
(
irq
,
dest
,
cfg
);
}
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
return
ret
;
}
...
...
@@ -2536,9 +2607,9 @@ static void eoi_ioapic_irq(struct irq_desc *desc)
irq
=
desc
->
irq
;
cfg
=
desc
->
chip_data
;
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
__eoi_ioapic_irq
(
irq
,
cfg
);
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
}
static
void
ack_apic_level
(
unsigned
int
irq
)
...
...
@@ -3120,13 +3191,13 @@ static int ioapic_resume(struct sys_device *dev)
data
=
container_of
(
dev
,
struct
sysfs_ioapic_data
,
dev
);
entry
=
data
->
entry
;
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
reg_00
.
raw
=
io_apic_read
(
dev
->
id
,
0
);
if
(
reg_00
.
bits
.
ID
!=
mp_ioapics
[
dev
->
id
].
apicid
)
{
reg_00
.
bits
.
ID
=
mp_ioapics
[
dev
->
id
].
apicid
;
io_apic_write
(
dev
->
id
,
0
,
reg_00
.
raw
);
}
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
for
(
i
=
0
;
i
<
nr_ioapic_registers
[
dev
->
id
];
i
++
)
ioapic_write_entry
(
dev
->
id
,
i
,
entry
[
i
]);
...
...
@@ -3189,7 +3260,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
if
(
irq_want
<
nr_irqs_gsi
)
irq_want
=
nr_irqs_gsi
;
spin_lock_irqsave
(
&
vector_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
vector_lock
,
flags
);
for
(
new
=
irq_want
;
new
<
nr_irqs
;
new
++
)
{
desc_new
=
irq_to_desc_alloc_node
(
new
,
node
);
if
(
!
desc_new
)
{
...
...
@@ -3208,14 +3279,11 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
irq
=
new
;
break
;
}
spin_unlock_irqrestore
(
&
vector_lock
,
flags
);
raw_spin_unlock_irqrestore
(
&
vector_lock
,
flags
);
if
(
irq
>
0
)
dynamic_irq_init_keep_chip_data
(
irq
);
if
(
irq
>
0
)
{
dynamic_irq_init
(
irq
);
/* restore it, in case dynamic_irq_init clear it */
if
(
desc_new
)
desc_new
->
chip_data
=
cfg_new
;
}
return
irq
;
}
...
...
@@ -3237,20 +3305,13 @@ int create_irq(void)
void
destroy_irq
(
unsigned
int
irq
)
{
unsigned
long
flags
;
struct
irq_cfg
*
cfg
;
struct
irq_desc
*
desc
;
/* store it, in case dynamic_irq_cleanup clear it */
desc
=
irq_to_desc
(
irq
);
cfg
=
desc
->
chip_data
;
dynamic_irq_cleanup
(
irq
);
/* connect back irq_cfg */
desc
->
chip_data
=
cfg
;
dynamic_irq_cleanup_keep_chip_data
(
irq
);
free_irte
(
irq
);
spin_lock_irqsave
(
&
vector_lock
,
flags
);
__clear_irq_vector
(
irq
,
cfg
);
spin_unlock_irqrestore
(
&
vector_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
vector_lock
,
flags
);
__clear_irq_vector
(
irq
,
get_irq_chip_data
(
irq
)
);
raw_
spin_unlock_irqrestore
(
&
vector_lock
,
flags
);
}
/*
...
...
@@ -3787,9 +3848,9 @@ int __init io_apic_get_redir_entries (int ioapic)
union
IO_APIC_reg_01
reg_01
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
reg_01
.
raw
=
io_apic_read
(
ioapic
,
1
);
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
return
reg_01
.
bits
.
entries
;
}
...
...
@@ -3816,28 +3877,6 @@ void __init probe_nr_irqs_gsi(void)
printk
(
KERN_DEBUG
"nr_irqs_gsi: %d
\n
"
,
nr_irqs_gsi
);
}
#ifdef CONFIG_SPARSE_IRQ
int
__init
arch_probe_nr_irqs
(
void
)
{
int
nr
;
if
(
nr_irqs
>
(
NR_VECTORS
*
nr_cpu_ids
))
nr_irqs
=
NR_VECTORS
*
nr_cpu_ids
;
nr
=
nr_irqs_gsi
+
8
*
nr_cpu_ids
;
#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
/*
* for MSI and HT dyn irq
*/
nr
+=
nr_irqs_gsi
*
16
;
#endif
if
(
nr
<
nr_irqs
)
nr_irqs
=
nr
;
return
0
;
}
#endif
static
int
__io_apic_set_pci_routing
(
struct
device
*
dev
,
int
irq
,
struct
io_apic_irq_attr
*
irq_attr
)
{
...
...
@@ -3951,9 +3990,9 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
if
(
physids_empty
(
apic_id_map
))
apic
->
ioapic_phys_id_map
(
&
phys_cpu_present_map
,
&
apic_id_map
);
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
reg_00
.
raw
=
io_apic_read
(
ioapic
,
0
);
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
if
(
apic_id
>=
get_physical_broadcast
())
{
printk
(
KERN_WARNING
"IOAPIC[%d]: Invalid apic_id %d, trying "
...
...
@@ -3987,10 +4026,10 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
if
(
reg_00
.
bits
.
ID
!=
apic_id
)
{
reg_00
.
bits
.
ID
=
apic_id
;
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
io_apic_write
(
ioapic
,
0
,
reg_00
.
raw
);
reg_00
.
raw
=
io_apic_read
(
ioapic
,
0
);
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
/* Sanity check */
if
(
reg_00
.
bits
.
ID
!=
apic_id
)
{
...
...
@@ -4011,9 +4050,9 @@ int __init io_apic_get_version(int ioapic)
union
IO_APIC_reg_01
reg_01
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
ioapic_lock
,
flags
);
reg_01
.
raw
=
io_apic_read
(
ioapic
,
1
);
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
ioapic_lock
,
flags
);
return
reg_01
.
bits
.
version
;
}
...
...
arch/x86/kernel/apic/nmi.c
View file @
54b56170
...
...
@@ -416,13 +416,13 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
/* We can be called before check_nmi_watchdog, hence NULL check. */
if
(
cpumask_test_cpu
(
cpu
,
to_cpumask
(
backtrace_mask
)))
{
static
DEFINE_
SPINLOCK
(
lock
);
/* Serialise the printks */
static
DEFINE_
RAW_SPINLOCK
(
lock
);
/* Serialise the printks */
spin_lock
(
&
lock
);
raw_
spin_lock
(
&
lock
);
printk
(
KERN_WARNING
"NMI backtrace for cpu %d
\n
"
,
cpu
);
show_regs
(
regs
);
dump_stack
();
spin_unlock
(
&
lock
);
raw_
spin_unlock
(
&
lock
);
cpumask_clear_cpu
(
cpu
,
to_cpumask
(
backtrace_mask
));
rc
=
1
;
...
...
arch/x86/kernel/i8259.c
View file @
54b56170
...
...
@@ -32,7 +32,7 @@
*/
static
int
i8259A_auto_eoi
;
DEFINE_SPINLOCK
(
i8259A_lock
);
DEFINE_
RAW_
SPINLOCK
(
i8259A_lock
);
static
void
mask_and_ack_8259A
(
unsigned
int
);
static
void
mask_8259A
(
void
);
static
void
unmask_8259A
(
void
);
...
...
@@ -74,13 +74,13 @@ static void disable_8259A_irq(unsigned int irq)
unsigned
int
mask
=
1
<<
irq
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
i8259A_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
i8259A_lock
,
flags
);
cached_irq_mask
|=
mask
;
if
(
irq
&
8
)
outb
(
cached_slave_mask
,
PIC_SLAVE_IMR
);
else
outb
(
cached_master_mask
,
PIC_MASTER_IMR
);
spin_unlock_irqrestore
(
&
i8259A_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
i8259A_lock
,
flags
);
}
static
void
enable_8259A_irq
(
unsigned
int
irq
)
...
...
@@ -88,13 +88,13 @@ static void enable_8259A_irq(unsigned int irq)
unsigned
int
mask
=
~
(
1
<<
irq
);
unsigned
long
flags
;
spin_lock_irqsave
(
&
i8259A_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
i8259A_lock
,
flags
);
cached_irq_mask
&=
mask
;
if
(
irq
&
8
)
outb
(
cached_slave_mask
,
PIC_SLAVE_IMR
);
else
outb
(
cached_master_mask
,
PIC_MASTER_IMR
);
spin_unlock_irqrestore
(
&
i8259A_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
i8259A_lock
,
flags
);
}
static
int
i8259A_irq_pending
(
unsigned
int
irq
)
...
...
@@ -103,12 +103,12 @@ static int i8259A_irq_pending(unsigned int irq)
unsigned
long
flags
;
int
ret
;
spin_lock_irqsave
(
&
i8259A_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
i8259A_lock
,
flags
);
if
(
irq
<
8
)
ret
=
inb
(
PIC_MASTER_CMD
)
&
mask
;
else
ret
=
inb
(
PIC_SLAVE_CMD
)
&
(
mask
>>
8
);
spin_unlock_irqrestore
(
&
i8259A_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
i8259A_lock
,
flags
);
return
ret
;
}
...
...
@@ -156,7 +156,7 @@ static void mask_and_ack_8259A(unsigned int irq)
unsigned
int
irqmask
=
1
<<
irq
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
i8259A_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
i8259A_lock
,
flags
);
/*
* Lightweight spurious IRQ detection. We do not want
* to overdo spurious IRQ handling - it's usually a sign
...
...
@@ -189,7 +189,7 @@ static void mask_and_ack_8259A(unsigned int irq)
outb
(
cached_master_mask
,
PIC_MASTER_IMR
);
outb
(
0x60
+
irq
,
PIC_MASTER_CMD
);
/* 'Specific EOI to master */
}
spin_unlock_irqrestore
(
&
i8259A_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
i8259A_lock
,
flags
);
return
;
spurious_8259A_irq:
...
...
@@ -291,24 +291,24 @@ static void mask_8259A(void)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
i8259A_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
i8259A_lock
,
flags
);
outb
(
0xff
,
PIC_MASTER_IMR
);
/* mask all of 8259A-1 */
outb
(
0xff
,
PIC_SLAVE_IMR
);
/* mask all of 8259A-2 */
spin_unlock_irqrestore
(
&
i8259A_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
i8259A_lock
,
flags
);
}
static
void
unmask_8259A
(
void
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
i8259A_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
i8259A_lock
,
flags
);
outb
(
cached_master_mask
,
PIC_MASTER_IMR
);
/* restore master IRQ mask */
outb
(
cached_slave_mask
,
PIC_SLAVE_IMR
);
/* restore slave IRQ mask */
spin_unlock_irqrestore
(
&
i8259A_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
i8259A_lock
,
flags
);
}
static
void
init_8259A
(
int
auto_eoi
)
...
...
@@ -317,7 +317,7 @@ static void init_8259A(int auto_eoi)
i8259A_auto_eoi
=
auto_eoi
;
spin_lock_irqsave
(
&
i8259A_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
i8259A_lock
,
flags
);
outb
(
0xff
,
PIC_MASTER_IMR
);
/* mask all of 8259A-1 */
outb
(
0xff
,
PIC_SLAVE_IMR
);
/* mask all of 8259A-2 */
...
...
@@ -362,7 +362,7 @@ static void init_8259A(int auto_eoi)
outb
(
cached_master_mask
,
PIC_MASTER_IMR
);
/* restore master IRQ mask */
outb
(
cached_slave_mask
,
PIC_SLAVE_IMR
);
/* restore slave IRQ mask */
spin_unlock_irqrestore
(
&
i8259A_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
i8259A_lock
,
flags
);
}
/*
...
...
arch/x86/kernel/irqinit.c
View file @
54b56170
...
...
@@ -84,24 +84,7 @@ static struct irqaction irq2 = {
};
DEFINE_PER_CPU
(
vector_irq_t
,
vector_irq
)
=
{
[
0
...
IRQ0_VECTOR
-
1
]
=
-
1
,
[
IRQ0_VECTOR
]
=
0
,
[
IRQ1_VECTOR
]
=
1
,
[
IRQ2_VECTOR
]
=
2
,
[
IRQ3_VECTOR
]
=
3
,
[
IRQ4_VECTOR
]
=
4
,
[
IRQ5_VECTOR
]
=
5
,
[
IRQ6_VECTOR
]
=
6
,
[
IRQ7_VECTOR
]
=
7
,
[
IRQ8_VECTOR
]
=
8
,
[
IRQ9_VECTOR
]
=
9
,
[
IRQ10_VECTOR
]
=
10
,
[
IRQ11_VECTOR
]
=
11
,
[
IRQ12_VECTOR
]
=
12
,
[
IRQ13_VECTOR
]
=
13
,
[
IRQ14_VECTOR
]
=
14
,
[
IRQ15_VECTOR
]
=
15
,
[
IRQ15_VECTOR
+
1
...
NR_VECTORS
-
1
]
=
-
1
[
0
...
NR_VECTORS
-
1
]
=
-
1
,
};
int
vector_used_by_percpu_irq
(
unsigned
int
vector
)
...
...
@@ -116,6 +99,9 @@ int vector_used_by_percpu_irq(unsigned int vector)
return
0
;
}
/* Number of legacy interrupts */
int
nr_legacy_irqs
__read_mostly
=
NR_IRQS_LEGACY
;
void
__init
init_ISA_irqs
(
void
)
{
int
i
;
...
...
@@ -142,6 +128,19 @@ void __init init_ISA_irqs(void)
void
__init
init_IRQ
(
void
)
{
int
i
;
/*
* On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15.
* If these IRQ's are handled by legacy interrupt-controllers like PIC,
* then this configuration will likely be static after the boot. If
* these IRQ's are handled by more mordern controllers like IO-APIC,
* then this vector space can be freed and re-used dynamically as the
* irq's migrate etc.
*/
for
(
i
=
0
;
i
<
nr_legacy_irqs
;
i
++
)
per_cpu
(
vector_irq
,
0
)[
IRQ0_VECTOR
+
i
]
=
i
;
x86_init
.
irqs
.
intr_init
();
}
...
...
arch/x86/kernel/reboot.c
View file @
54b56170
...
...
@@ -461,6 +461,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
DMI_MATCH
(
DMI_PRODUCT_NAME
,
"Macmini3,1"
),
},
},
{
/* Handle problems with rebooting on the iMac9,1. */
.
callback
=
set_pci_reboot
,
.
ident
=
"Apple iMac9,1"
,
.
matches
=
{
DMI_MATCH
(
DMI_SYS_VENDOR
,
"Apple Inc."
),
DMI_MATCH
(
DMI_PRODUCT_NAME
,
"iMac9,1"
),
},
},
{
}
};
...
...
arch/x86/kernel/smpboot.c
View file @
54b56170
...
...
@@ -243,6 +243,11 @@ static void __cpuinit smp_callin(void)
map_cpu_to_logical_apicid
();
notify_cpu_starting
(
cpuid
);
/*
* Need to setup vector mappings before we enable interrupts.
*/
__setup_vector_irq
(
smp_processor_id
());
/*
* Get our bogomips.
*
...
...
@@ -317,7 +322,6 @@ notrace static void __cpuinit start_secondary(void *unused)
*/
ipi_call_lock
();
lock_vector_lock
();
__setup_vector_irq
(
smp_processor_id
());
set_cpu_online
(
smp_processor_id
(),
true
);
unlock_vector_lock
();
ipi_call_unlock
();
...
...
@@ -1216,11 +1220,12 @@ __init void prefill_possible_map(void)
total_cpus
=
max_t
(
int
,
possible
,
num_processors
+
disabled_cpus
);
if
(
possible
>
CONFIG_NR_CPUS
)
{
/* nr_cpu_ids could be reduced via nr_cpus= */
if
(
possible
>
nr_cpu_ids
)
{
printk
(
KERN_WARNING
"%d Processors exceeds NR_CPUS limit of %d
\n
"
,
possible
,
CONFIG_NR_CPUS
);
possible
=
CONFIG_NR_CPUS
;
possible
,
nr_cpu_ids
);
possible
=
nr_cpu_ids
;
}
printk
(
KERN_INFO
"SMP: Allowing %d CPUs, %d hotplug CPUs
\n
"
,
...
...
arch/x86/kernel/time.c
View file @
54b56170
...
...
@@ -70,11 +70,11 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
* manually to deassert NMI lines for the watchdog if run
* on an 82489DX-based system.
*/
spin_lock
(
&
i8259A_lock
);
raw_
spin_lock
(
&
i8259A_lock
);
outb
(
0x0c
,
PIC_MASTER_OCW3
);
/* Ack the IRQ; AEOI will end it automatically. */
inb
(
PIC_MASTER_POLL
);
spin_unlock
(
&
i8259A_lock
);
raw_
spin_unlock
(
&
i8259A_lock
);
}
global_clock_event
->
event_handler
(
global_clock_event
);
...
...
arch/x86/kernel/visws_quirks.c
View file @
54b56170
...
...
@@ -553,7 +553,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
struct
irq_desc
*
desc
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
i8259A_lock
,
flags
);
raw_
spin_lock_irqsave
(
&
i8259A_lock
,
flags
);
/* Find out what's interrupting in the PIIX4 master 8259 */
outb
(
0x0c
,
0x20
);
/* OCW3 Poll command */
...
...
@@ -590,7 +590,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
outb
(
0x60
+
realirq
,
0x20
);
}
spin_unlock_irqrestore
(
&
i8259A_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
i8259A_lock
,
flags
);
desc
=
irq_to_desc
(
realirq
);
...
...
@@ -608,7 +608,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
return
IRQ_HANDLED
;
out_unlock:
spin_unlock_irqrestore
(
&
i8259A_lock
,
flags
);
raw_
spin_unlock_irqrestore
(
&
i8259A_lock
,
flags
);
return
IRQ_NONE
;
}
...
...
arch/x86/kernel/vmiclock_32.c
View file @
54b56170
...
...
@@ -79,11 +79,7 @@ unsigned long vmi_tsc_khz(void)
static
inline
unsigned
int
vmi_get_timer_vector
(
void
)
{
#ifdef CONFIG_X86_IO_APIC
return
FIRST_DEVICE_VECTOR
;
#else
return
FIRST_EXTERNAL_VECTOR
;
#endif
return
IRQ0_VECTOR
;
}
/** vmi clockchip */
...
...
drivers/acpi/numa.c
View file @
54b56170
...
...
@@ -279,9 +279,9 @@ int __init acpi_numa_init(void)
/* SRAT: Static Resource Affinity Table */
if
(
!
acpi_table_parse
(
ACPI_SIG_SRAT
,
acpi_parse_srat
))
{
acpi_table_parse_srat
(
ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY
,
acpi_parse_x2apic_affinity
,
NR_CPUS
);
acpi_parse_x2apic_affinity
,
nr_cpu_ids
);
acpi_table_parse_srat
(
ACPI_SRAT_TYPE_CPU_AFFINITY
,
acpi_parse_processor_affinity
,
NR_CPUS
);
acpi_parse_processor_affinity
,
nr_cpu_ids
);
ret
=
acpi_table_parse_srat
(
ACPI_SRAT_TYPE_MEMORY_AFFINITY
,
acpi_parse_memory_affinity
,
NR_NODE_MEMBLKS
);
...
...
drivers/xen/events.c
View file @
54b56170
...
...
@@ -649,9 +649,13 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
int
bit_idx
=
__ffs
(
pending_bits
);
int
port
=
(
word_idx
*
BITS_PER_LONG
)
+
bit_idx
;
int
irq
=
evtchn_to_irq
[
port
];
struct
irq_desc
*
desc
;
if
(
irq
!=
-
1
)
handle_irq
(
irq
,
regs
);
if
(
irq
!=
-
1
)
{
desc
=
irq_to_desc
(
irq
);
if
(
desc
)
generic_handle_irq_desc
(
irq
,
desc
);
}
}
}
...
...
include/linux/irq.h
View file @
54b56170
...
...
@@ -400,7 +400,9 @@ static inline int irq_has_action(unsigned int irq)
/* Dynamic irq helper functions */
extern
void
dynamic_irq_init
(
unsigned
int
irq
);
void
dynamic_irq_init_keep_chip_data
(
unsigned
int
irq
);
extern
void
dynamic_irq_cleanup
(
unsigned
int
irq
);
void
dynamic_irq_cleanup_keep_chip_data
(
unsigned
int
irq
);
/* Set/get chip/data for an IRQ: */
extern
int
set_irq_chip
(
unsigned
int
irq
,
struct
irq_chip
*
chip
);
...
...
init/main.c
View file @
54b56170
...
...
@@ -149,6 +149,20 @@ static int __init nosmp(char *str)
early_param
(
"nosmp"
,
nosmp
);
/* this is hard limit */
static
int
__init
nrcpus
(
char
*
str
)
{
int
nr_cpus
;
get_option
(
&
str
,
&
nr_cpus
);
if
(
nr_cpus
>
0
&&
nr_cpus
<
nr_cpu_ids
)
nr_cpu_ids
=
nr_cpus
;
return
0
;
}
early_param
(
"nr_cpus"
,
nrcpus
);
static
int
__init
maxcpus
(
char
*
str
)
{
get_option
(
&
str
,
&
setup_max_cpus
);
...
...
@@ -584,6 +598,7 @@ asmlinkage void __init start_kernel(void)
local_irq_disable
();
}
rcu_init
();
radix_tree_init
();
/* init some links before init_ISA_irqs() */
early_irq_init
();
init_IRQ
();
...
...
@@ -657,7 +672,6 @@ asmlinkage void __init start_kernel(void)
proc_caches_init
();
buffer_init
();
key_init
();
radix_tree_init
();
security_init
();
vfs_caches_init
(
totalram_pages
);
signals_init
();
...
...
kernel/irq/chip.c
View file @
54b56170
...
...
@@ -18,11 +18,7 @@
#include "internals.h"
/**
* dynamic_irq_init - initialize a dynamically allocated irq
* @irq: irq number to initialize
*/
void
dynamic_irq_init
(
unsigned
int
irq
)
static
void
dynamic_irq_init_x
(
unsigned
int
irq
,
bool
keep_chip_data
)
{
struct
irq_desc
*
desc
;
unsigned
long
flags
;
...
...
@@ -41,6 +37,7 @@ void dynamic_irq_init(unsigned int irq)
desc
->
depth
=
1
;
desc
->
msi_desc
=
NULL
;
desc
->
handler_data
=
NULL
;
if
(
!
keep_chip_data
)
desc
->
chip_data
=
NULL
;
desc
->
action
=
NULL
;
desc
->
irq_count
=
0
;
...
...
@@ -55,10 +52,26 @@ void dynamic_irq_init(unsigned int irq)
}
/**
* dynamic_irq_
cleanup - cleanup
a dynamically allocated irq
* dynamic_irq_
init - initialize
a dynamically allocated irq
* @irq: irq number to initialize
*/
void
dynamic_irq_cleanup
(
unsigned
int
irq
)
void
dynamic_irq_init
(
unsigned
int
irq
)
{
dynamic_irq_init_x
(
irq
,
false
);
}
/**
* dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq
* @irq: irq number to initialize
*
* does not set irq_to_desc(irq)->chip_data to NULL
*/
void
dynamic_irq_init_keep_chip_data
(
unsigned
int
irq
)
{
dynamic_irq_init_x
(
irq
,
true
);
}
static
void
dynamic_irq_cleanup_x
(
unsigned
int
irq
,
bool
keep_chip_data
)
{
struct
irq_desc
*
desc
=
irq_to_desc
(
irq
);
unsigned
long
flags
;
...
...
@@ -77,6 +90,7 @@ void dynamic_irq_cleanup(unsigned int irq)
}
desc
->
msi_desc
=
NULL
;
desc
->
handler_data
=
NULL
;
if
(
!
keep_chip_data
)
desc
->
chip_data
=
NULL
;
desc
->
handle_irq
=
handle_bad_irq
;
desc
->
chip
=
&
no_irq_chip
;
...
...
@@ -85,6 +99,26 @@ void dynamic_irq_cleanup(unsigned int irq)
raw_spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
/**
* dynamic_irq_cleanup - cleanup a dynamically allocated irq
* @irq: irq number to initialize
*/
void
dynamic_irq_cleanup
(
unsigned
int
irq
)
{
dynamic_irq_cleanup_x
(
irq
,
false
);
}
/**
* dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq
* @irq: irq number to initialize
*
* does not set irq_to_desc(irq)->chip_data to NULL
*/
void
dynamic_irq_cleanup_keep_chip_data
(
unsigned
int
irq
)
{
dynamic_irq_cleanup_x
(
irq
,
true
);
}
/**
* set_irq_chip - set the irq chip for an irq
...
...
kernel/irq/handle.c
View file @
54b56170
...
...
@@ -19,7 +19,7 @@
#include <linux/kernel_stat.h>
#include <linux/rculist.h>
#include <linux/hash.h>
#include <linux/
bootmem
.h>
#include <linux/
radix-tree
.h>
#include <trace/events/irq.h>
#include "internals.h"
...
...
@@ -87,12 +87,8 @@ void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
{
void
*
ptr
;
if
(
slab_is_available
())
ptr
=
kzalloc_node
(
nr
*
sizeof
(
*
desc
->
kstat_irqs
),
GFP_ATOMIC
,
node
);
else
ptr
=
alloc_bootmem_node
(
NODE_DATA
(
node
),
nr
*
sizeof
(
*
desc
->
kstat_irqs
));
/*
* don't overwite if can not get new one
...
...
@@ -132,7 +128,26 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
*/
DEFINE_RAW_SPINLOCK
(
sparse_irq_lock
);
struct
irq_desc
**
irq_desc_ptrs
__read_mostly
;
static
RADIX_TREE
(
irq_desc_tree
,
GFP_ATOMIC
);
static
void
set_irq_desc
(
unsigned
int
irq
,
struct
irq_desc
*
desc
)
{
radix_tree_insert
(
&
irq_desc_tree
,
irq
,
desc
);
}
struct
irq_desc
*
irq_to_desc
(
unsigned
int
irq
)
{
return
radix_tree_lookup
(
&
irq_desc_tree
,
irq
);
}
void
replace_irq_desc
(
unsigned
int
irq
,
struct
irq_desc
*
desc
)
{
void
**
ptr
;
ptr
=
radix_tree_lookup_slot
(
&
irq_desc_tree
,
irq
);
if
(
ptr
)
radix_tree_replace_slot
(
ptr
,
desc
);
}
static
struct
irq_desc
irq_desc_legacy
[
NR_IRQS_LEGACY
]
__cacheline_aligned_in_smp
=
{
[
0
...
NR_IRQS_LEGACY
-
1
]
=
{
...
...
@@ -164,9 +179,6 @@ int __init early_irq_init(void)
legacy_count
=
ARRAY_SIZE
(
irq_desc_legacy
);
node
=
first_online_node
;
/* allocate irq_desc_ptrs array based on nr_irqs */
irq_desc_ptrs
=
kcalloc
(
nr_irqs
,
sizeof
(
void
*
),
GFP_NOWAIT
);
/* allocate based on nr_cpu_ids */
kstat_irqs_legacy
=
kzalloc_node
(
NR_IRQS_LEGACY
*
nr_cpu_ids
*
sizeof
(
int
),
GFP_NOWAIT
,
node
);
...
...
@@ -180,23 +192,12 @@ int __init early_irq_init(void)
lockdep_set_class
(
&
desc
[
i
].
lock
,
&
irq_desc_lock_class
);
alloc_desc_masks
(
&
desc
[
i
],
node
,
true
);
init_desc_masks
(
&
desc
[
i
]);
irq_desc_ptrs
[
i
]
=
desc
+
i
;
set_irq_desc
(
i
,
&
desc
[
i
])
;
}
for
(
i
=
legacy_count
;
i
<
nr_irqs
;
i
++
)
irq_desc_ptrs
[
i
]
=
NULL
;
return
arch_early_irq_init
();
}
struct
irq_desc
*
irq_to_desc
(
unsigned
int
irq
)
{
if
(
irq_desc_ptrs
&&
irq
<
nr_irqs
)
return
irq_desc_ptrs
[
irq
];
return
NULL
;
}
struct
irq_desc
*
__ref
irq_to_desc_alloc_node
(
unsigned
int
irq
,
int
node
)
{
struct
irq_desc
*
desc
;
...
...
@@ -208,21 +209,18 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
return
NULL
;
}
desc
=
irq_
desc_ptrs
[
irq
]
;
desc
=
irq_
to_desc
(
irq
)
;
if
(
desc
)
return
desc
;
raw_spin_lock_irqsave
(
&
sparse_irq_lock
,
flags
);
/* We have to check it to avoid races with another CPU */
desc
=
irq_
desc_ptrs
[
irq
]
;
desc
=
irq_
to_desc
(
irq
)
;
if
(
desc
)
goto
out_unlock
;
if
(
slab_is_available
())
desc
=
kzalloc_node
(
sizeof
(
*
desc
),
GFP_ATOMIC
,
node
);
else
desc
=
alloc_bootmem_node
(
NODE_DATA
(
node
),
sizeof
(
*
desc
));
printk
(
KERN_DEBUG
" alloc irq_desc for %d on node %d
\n
"
,
irq
,
node
);
if
(
!
desc
)
{
...
...
@@ -231,7 +229,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
}
init_one_irq_desc
(
irq
,
desc
,
node
);
irq_desc_ptrs
[
irq
]
=
desc
;
set_irq_desc
(
irq
,
desc
)
;
out_unlock:
raw_spin_unlock_irqrestore
(
&
sparse_irq_lock
,
flags
);
...
...
kernel/irq/internals.h
View file @
54b56170
...
...
@@ -21,11 +21,7 @@ extern void clear_kstat_irqs(struct irq_desc *desc);
extern
raw_spinlock_t
sparse_irq_lock
;
#ifdef CONFIG_SPARSE_IRQ
/* irq_desc_ptrs allocated at boot time */
extern
struct
irq_desc
**
irq_desc_ptrs
;
#else
/* irq_desc_ptrs is a fixed size array */
extern
struct
irq_desc
*
irq_desc_ptrs
[
NR_IRQS
];
void
replace_irq_desc
(
unsigned
int
irq
,
struct
irq_desc
*
desc
);
#endif
#ifdef CONFIG_PROC_FS
...
...
kernel/irq/numa_migrate.c
View file @
54b56170
...
...
@@ -70,7 +70,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
raw_spin_lock_irqsave
(
&
sparse_irq_lock
,
flags
);
/* We have to check it to avoid races with another CPU */
desc
=
irq_
desc_ptrs
[
irq
]
;
desc
=
irq_
to_desc
(
irq
)
;
if
(
desc
&&
old_desc
!=
desc
)
goto
out_unlock
;
...
...
@@ -90,7 +90,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
goto
out_unlock
;
}
irq_desc_ptrs
[
irq
]
=
desc
;
replace_irq_desc
(
irq
,
desc
)
;
raw_spin_unlock_irqrestore
(
&
sparse_irq_lock
,
flags
);
/* free the old one */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment