Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
775efacb
Commit
775efacb
authored
Apr 02, 2002
by
Dave Jones
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[PATCH] Pentium 4 NMI watchdog support
From Mikael via 2.4.
parent
8897452d
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
110 additions
and
23 deletions
+110
-23
arch/i386/kernel/nmi.c
arch/i386/kernel/nmi.c
+108
-21
include/asm-i386/msr.h
include/asm-i386/msr.h
+2
-2
No files found.
arch/i386/kernel/nmi.c
View file @
775efacb
...
@@ -8,6 +8,7 @@
...
@@ -8,6 +8,7 @@
* Fixes:
* Fixes:
* Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
* Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
* Mikael Pettersson : Power Management for local APIC NMI watchdog.
* Mikael Pettersson : Power Management for local APIC NMI watchdog.
* Mikael Pettersson : Pentium 4 support for local APIC NMI watchdog.
*/
*/
#include <linux/config.h>
#include <linux/config.h>
...
@@ -43,6 +44,32 @@ extern void show_registers(struct pt_regs *regs);
...
@@ -43,6 +44,32 @@ extern void show_registers(struct pt_regs *regs);
#define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79
#define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79
#define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED
#define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED
#define MSR_P4_MISC_ENABLE 0x1A0
#define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
#define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12)
#define MSR_P4_PERFCTR0 0x300
#define MSR_P4_CCCR0 0x360
#define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
#define P4_ESCR_OS (1<<3)
#define P4_ESCR_USR (1<<2)
#define P4_CCCR_OVF_PMI (1<<26)
#define P4_CCCR_THRESHOLD(N) ((N)<<20)
#define P4_CCCR_COMPLEMENT (1<<19)
#define P4_CCCR_COMPARE (1<<18)
#define P4_CCCR_REQUIRED (3<<16)
#define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
#define P4_CCCR_ENABLE (1<<12)
/* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
CRU_ESCR0 (with any non-null event selector) through a complemented
max threshold. [IA32-Vol3, Section 14.9.9] */
#define MSR_P4_IQ_COUNTER0 0x30C
#define MSR_P4_IQ_CCCR0 0x36C
#define MSR_P4_CRU_ESCR0 0x3B8
#define P4_NMI_CRU_ESCR0 (P4_ESCR_EVENT_SELECT(0x3F)|P4_ESCR_OS|P4_ESCR_USR)
#define P4_NMI_IQ_CCCR0 \
(P4_CCCR_OVF_PMI|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
int
__init
check_nmi_watchdog
(
void
)
int
__init
check_nmi_watchdog
(
void
)
{
{
irq_cpustat_t
tmp
[
NR_CPUS
];
irq_cpustat_t
tmp
[
NR_CPUS
];
...
@@ -84,11 +111,11 @@ static int __init setup_nmi_watchdog(char *str)
...
@@ -84,11 +111,11 @@ static int __init setup_nmi_watchdog(char *str)
/*
/*
* If any other x86 CPU has a local APIC, then
* If any other x86 CPU has a local APIC, then
* please test the NMI stuff there and send me the
* please test the NMI stuff there and send me the
* missing bits. Right now Intel P6 and AMD K7 only.
* missing bits. Right now Intel P6
/P4
and AMD K7 only.
*/
*/
if
((
nmi
==
NMI_LOCAL_APIC
)
&&
if
((
nmi
==
NMI_LOCAL_APIC
)
&&
(
boot_cpu_data
.
x86_vendor
==
X86_VENDOR_INTEL
)
&&
(
boot_cpu_data
.
x86_vendor
==
X86_VENDOR_INTEL
)
&&
(
boot_cpu_data
.
x86
==
6
))
(
boot_cpu_data
.
x86
==
6
||
boot_cpu_data
.
x86
==
15
))
nmi_watchdog
=
nmi
;
nmi_watchdog
=
nmi
;
if
((
nmi
==
NMI_LOCAL_APIC
)
&&
if
((
nmi
==
NMI_LOCAL_APIC
)
&&
(
boot_cpu_data
.
x86_vendor
==
X86_VENDOR_AMD
)
&&
(
boot_cpu_data
.
x86_vendor
==
X86_VENDOR_AMD
)
&&
...
@@ -118,7 +145,15 @@ static void disable_apic_nmi_watchdog(void)
...
@@ -118,7 +145,15 @@ static void disable_apic_nmi_watchdog(void)
wrmsr
(
MSR_K7_EVNTSEL0
,
0
,
0
);
wrmsr
(
MSR_K7_EVNTSEL0
,
0
,
0
);
break
;
break
;
case
X86_VENDOR_INTEL
:
case
X86_VENDOR_INTEL
:
wrmsr
(
MSR_IA32_EVNTSEL0
,
0
,
0
);
switch
(
boot_cpu_data
.
x86
)
{
case
6
:
wrmsr
(
MSR_P6_EVNTSEL0
,
0
,
0
);
break
;
case
15
:
wrmsr
(
MSR_P4_IQ_CCCR0
,
0
,
0
);
wrmsr
(
MSR_P4_CRU_ESCR0
,
0
,
0
);
break
;
}
break
;
break
;
}
}
}
}
...
@@ -157,17 +192,22 @@ static inline void nmi_pm_init(void) { }
...
@@ -157,17 +192,22 @@ static inline void nmi_pm_init(void) { }
* Original code written by Keith Owens.
* Original code written by Keith Owens.
*/
*/
static
void
__pminit
clear_msr_range
(
unsigned
int
base
,
unsigned
int
n
)
{
unsigned
int
i
;
for
(
i
=
0
;
i
<
n
;
++
i
)
wrmsr
(
base
+
i
,
0
,
0
);
}
static
void
__pminit
setup_k7_watchdog
(
void
)
static
void
__pminit
setup_k7_watchdog
(
void
)
{
{
int
i
;
unsigned
int
evntsel
;
unsigned
int
evntsel
;
nmi_perfctr_msr
=
MSR_K7_PERFCTR0
;
nmi_perfctr_msr
=
MSR_K7_PERFCTR0
;
for
(
i
=
0
;
i
<
4
;
++
i
)
{
clear_msr_range
(
MSR_K7_EVNTSEL0
,
4
);
wrmsr
(
MSR_K7_EVNTSEL0
+
i
,
0
,
0
);
clear_msr_range
(
MSR_K7_PERFCTR0
,
4
);
wrmsr
(
MSR_K7_PERFCTR0
+
i
,
0
,
0
);
}
evntsel
=
K7_EVNTSEL_INT
evntsel
=
K7_EVNTSEL_INT
|
K7_EVNTSEL_OS
|
K7_EVNTSEL_OS
...
@@ -184,27 +224,54 @@ static void __pminit setup_k7_watchdog(void)
...
@@ -184,27 +224,54 @@ static void __pminit setup_k7_watchdog(void)
static
void
__pminit
setup_p6_watchdog
(
void
)
static
void
__pminit
setup_p6_watchdog
(
void
)
{
{
int
i
;
unsigned
int
evntsel
;
unsigned
int
evntsel
;
nmi_perfctr_msr
=
MSR_
IA32
_PERFCTR0
;
nmi_perfctr_msr
=
MSR_
P6
_PERFCTR0
;
for
(
i
=
0
;
i
<
2
;
++
i
)
{
clear_msr_range
(
MSR_P6_EVNTSEL0
,
2
);
wrmsr
(
MSR_IA32_EVNTSEL0
+
i
,
0
,
0
);
clear_msr_range
(
MSR_P6_PERFCTR0
,
2
);
wrmsr
(
MSR_IA32_PERFCTR0
+
i
,
0
,
0
);
}
evntsel
=
P6_EVNTSEL_INT
evntsel
=
P6_EVNTSEL_INT
|
P6_EVNTSEL_OS
|
P6_EVNTSEL_OS
|
P6_EVNTSEL_USR
|
P6_EVNTSEL_USR
|
P6_NMI_EVENT
;
|
P6_NMI_EVENT
;
wrmsr
(
MSR_
IA32
_EVNTSEL0
,
evntsel
,
0
);
wrmsr
(
MSR_
P6
_EVNTSEL0
,
evntsel
,
0
);
Dprintk
(
"setting
IA32
_PERFCTR0 to %08lx
\n
"
,
-
(
cpu_khz
/
nmi_hz
*
1000
));
Dprintk
(
"setting
P6
_PERFCTR0 to %08lx
\n
"
,
-
(
cpu_khz
/
nmi_hz
*
1000
));
wrmsr
(
MSR_
IA32
_PERFCTR0
,
-
(
cpu_khz
/
nmi_hz
*
1000
),
0
);
wrmsr
(
MSR_
P6
_PERFCTR0
,
-
(
cpu_khz
/
nmi_hz
*
1000
),
0
);
apic_write
(
APIC_LVTPC
,
APIC_DM_NMI
);
apic_write
(
APIC_LVTPC
,
APIC_DM_NMI
);
evntsel
|=
P6_EVNTSEL0_ENABLE
;
evntsel
|=
P6_EVNTSEL0_ENABLE
;
wrmsr
(
MSR_IA32_EVNTSEL0
,
evntsel
,
0
);
wrmsr
(
MSR_P6_EVNTSEL0
,
evntsel
,
0
);
}
static
int
__pminit
setup_p4_watchdog
(
void
)
{
unsigned
int
misc_enable
,
dummy
;
rdmsr
(
MSR_P4_MISC_ENABLE
,
misc_enable
,
dummy
);
if
(
!
(
misc_enable
&
MSR_P4_MISC_ENABLE_PERF_AVAIL
))
return
0
;
nmi_perfctr_msr
=
MSR_P4_IQ_COUNTER0
;
if
(
!
(
misc_enable
&
MSR_P4_MISC_ENABLE_PEBS_UNAVAIL
))
clear_msr_range
(
0x3F1
,
2
);
/* MSR 0x3F0 seems to have a default value of 0xFC00, but current
docs doesn't fully define it, so leave it alone for now. */
clear_msr_range
(
0x3A0
,
31
);
clear_msr_range
(
0x3C0
,
6
);
clear_msr_range
(
0x3C8
,
6
);
clear_msr_range
(
0x3E0
,
2
);
clear_msr_range
(
MSR_P4_CCCR0
,
18
);
clear_msr_range
(
MSR_P4_PERFCTR0
,
18
);
wrmsr
(
MSR_P4_CRU_ESCR0
,
P4_NMI_CRU_ESCR0
,
0
);
wrmsr
(
MSR_P4_IQ_CCCR0
,
P4_NMI_IQ_CCCR0
&
~
P4_CCCR_ENABLE
,
0
);
Dprintk
(
"setting P4_IQ_COUNTER0 to 0x%08lx
\n
"
,
-
(
cpu_khz
/
nmi_hz
*
1000
));
wrmsr
(
MSR_P4_IQ_COUNTER0
,
-
(
cpu_khz
/
nmi_hz
*
1000
),
-
1
);
apic_write
(
APIC_LVTPC
,
APIC_DM_NMI
);
wrmsr
(
MSR_P4_IQ_CCCR0
,
P4_NMI_IQ_CCCR0
,
0
);
return
1
;
}
}
void
__pminit
setup_apic_nmi_watchdog
(
void
)
void
__pminit
setup_apic_nmi_watchdog
(
void
)
...
@@ -216,10 +283,18 @@ void __pminit setup_apic_nmi_watchdog (void)
...
@@ -216,10 +283,18 @@ void __pminit setup_apic_nmi_watchdog (void)
setup_k7_watchdog
();
setup_k7_watchdog
();
break
;
break
;
case
X86_VENDOR_INTEL
:
case
X86_VENDOR_INTEL
:
if
(
boot_cpu_data
.
x86
!=
6
)
switch
(
boot_cpu_data
.
x86
)
{
return
;
case
6
:
setup_p6_watchdog
();
setup_p6_watchdog
();
break
;
break
;
case
15
:
if
(
!
setup_p4_watchdog
())
return
;
break
;
default:
return
;
}
break
;
default:
default:
return
;
return
;
}
}
...
@@ -296,6 +371,18 @@ void nmi_watchdog_tick (struct pt_regs * regs)
...
@@ -296,6 +371,18 @@ void nmi_watchdog_tick (struct pt_regs * regs)
last_irq_sums
[
cpu
]
=
sum
;
last_irq_sums
[
cpu
]
=
sum
;
alert_counter
[
cpu
]
=
0
;
alert_counter
[
cpu
]
=
0
;
}
}
if
(
nmi_perfctr_msr
)
if
(
nmi_perfctr_msr
)
{
if
(
nmi_perfctr_msr
==
MSR_P4_IQ_COUNTER0
)
{
/*
* P4 quirks:
* - An overflown perfctr will assert its interrupt
* until the OVF flag in its CCCR is cleared.
* - LVTPC is masked on interrupt and must be
* unmasked by the LVTPC handler.
*/
wrmsr
(
MSR_P4_IQ_CCCR0
,
P4_NMI_IQ_CCCR0
,
0
);
apic_write
(
APIC_LVTPC
,
APIC_DM_NMI
);
}
wrmsr
(
nmi_perfctr_msr
,
-
(
cpu_khz
/
nmi_hz
*
1000
),
-
1
);
wrmsr
(
nmi_perfctr_msr
,
-
(
cpu_khz
/
nmi_hz
*
1000
),
-
1
);
}
}
}
include/asm-i386/msr.h
View file @
775efacb
...
@@ -48,8 +48,8 @@
...
@@ -48,8 +48,8 @@
#define MSR_IA32_UCODE_WRITE 0x79
#define MSR_IA32_UCODE_WRITE 0x79
#define MSR_IA32_UCODE_REV 0x8b
#define MSR_IA32_UCODE_REV 0x8b
#define MSR_
IA32
_PERFCTR0 0xc1
#define MSR_
P6
_PERFCTR0 0xc1
#define MSR_
IA32
_PERFCTR1 0xc2
#define MSR_
P6
_PERFCTR1 0xc2
#define MSR_IA32_BBL_CR_CTL 0x119
#define MSR_IA32_BBL_CR_CTL 0x119
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment