Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
ad8537cd
Commit
ad8537cd
authored
May 09, 2012
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'perf/x86-ibs' into perf/core
parents
149936a0
fab06992
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
438 additions
and
7 deletions
+438
-7
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/msr-index.h
+5
-0
arch/x86/include/asm/perf_event.h
arch/x86/include/asm/perf_event.h
+2
-0
arch/x86/kernel/cpu/perf_event_amd_ibs.c
arch/x86/kernel/cpu/perf_event_amd_ibs.c
+431
-7
No files found.
arch/x86/include/asm/msr-index.h
View file @
ad8537cd
...
...
@@ -134,6 +134,8 @@
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
#define MSR_AMD64_IBSFETCH_REG_COUNT 3
#define MSR_AMD64_IBSFETCH_REG_MASK ((1UL<<MSR_AMD64_IBSFETCH_REG_COUNT)-1)
#define MSR_AMD64_IBSOPCTL 0xc0011033
#define MSR_AMD64_IBSOPRIP 0xc0011034
#define MSR_AMD64_IBSOPDATA 0xc0011035
...
...
@@ -141,8 +143,11 @@
#define MSR_AMD64_IBSOPDATA3 0xc0011037
#define MSR_AMD64_IBSDCLINAD 0xc0011038
#define MSR_AMD64_IBSDCPHYSAD 0xc0011039
#define MSR_AMD64_IBSOP_REG_COUNT 7
#define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1)
#define MSR_AMD64_IBSCTL 0xc001103a
#define MSR_AMD64_IBSBRTARGET 0xc001103b
#define MSR_AMD64_IBS_REG_COUNT_MAX 8
/* includes MSR_AMD64_IBSBRTARGET */
/* Fam 15h MSRs */
#define MSR_F15H_PERF_CTL 0xc0010200
...
...
arch/x86/include/asm/perf_event.h
View file @
ad8537cd
...
...
@@ -178,6 +178,8 @@ struct x86_pmu_capability {
#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
/* IbsOpCtl bits */
/* lower 4 bits of the current count are ignored: */
#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
#define IBS_OP_CNT_CTL (1ULL<<19)
#define IBS_OP_VAL (1ULL<<18)
#define IBS_OP_ENABLE (1ULL<<17)
...
...
arch/x86/kernel/cpu/perf_event_amd_ibs.c
View file @
ad8537cd
...
...
@@ -16,36 +16,460 @@ static u32 ibs_caps;
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
static
struct
pmu
perf_ibs
;
#include <linux/kprobes.h>
#include <linux/hardirq.h>
#include <asm/nmi.h>
#define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
#define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
enum
ibs_states
{
IBS_ENABLED
=
0
,
IBS_STARTED
=
1
,
IBS_STOPPING
=
2
,
IBS_MAX_STATES
,
};
struct
cpu_perf_ibs
{
struct
perf_event
*
event
;
unsigned
long
state
[
BITS_TO_LONGS
(
IBS_MAX_STATES
)];
};
struct
perf_ibs
{
struct
pmu
pmu
;
unsigned
int
msr
;
u64
config_mask
;
u64
cnt_mask
;
u64
enable_mask
;
u64
valid_mask
;
u64
max_period
;
unsigned
long
offset_mask
[
1
];
int
offset_max
;
struct
cpu_perf_ibs
__percpu
*
pcpu
;
u64
(
*
get_count
)(
u64
config
);
};
struct
perf_ibs_data
{
u32
size
;
union
{
u32
data
[
0
];
/* data buffer starts here */
u32
caps
;
};
u64
regs
[
MSR_AMD64_IBS_REG_COUNT_MAX
];
};
static
int
perf_event_set_period
(
struct
hw_perf_event
*
hwc
,
u64
min
,
u64
max
,
u64
*
count
)
{
s64
left
=
local64_read
(
&
hwc
->
period_left
);
s64
period
=
hwc
->
sample_period
;
int
overflow
=
0
;
/*
* If we are way outside a reasonable range then just skip forward:
*/
if
(
unlikely
(
left
<=
-
period
))
{
left
=
period
;
local64_set
(
&
hwc
->
period_left
,
left
);
hwc
->
last_period
=
period
;
overflow
=
1
;
}
if
(
unlikely
(
left
<=
0
))
{
left
+=
period
;
local64_set
(
&
hwc
->
period_left
,
left
);
hwc
->
last_period
=
period
;
overflow
=
1
;
}
if
(
unlikely
(
left
<
min
))
left
=
min
;
if
(
left
>
max
)
left
=
max
;
*
count
=
(
u64
)
left
;
return
overflow
;
}
static
int
perf_event_try_update
(
struct
perf_event
*
event
,
u64
new_raw_count
,
int
width
)
{
struct
hw_perf_event
*
hwc
=
&
event
->
hw
;
int
shift
=
64
-
width
;
u64
prev_raw_count
;
u64
delta
;
/*
* Careful: an NMI might modify the previous event value.
*
* Our tactic to handle this is to first atomically read and
* exchange a new raw count - then add that new-prev delta
* count to the generic event atomically:
*/
prev_raw_count
=
local64_read
(
&
hwc
->
prev_count
);
if
(
local64_cmpxchg
(
&
hwc
->
prev_count
,
prev_raw_count
,
new_raw_count
)
!=
prev_raw_count
)
return
0
;
/*
* Now we have the new raw value and have updated the prev
* timestamp already. We can now calculate the elapsed delta
* (event-)time and add that to the generic event.
*
* Careful, not all hw sign-extends above the physical width
* of the count.
*/
delta
=
(
new_raw_count
<<
shift
)
-
(
prev_raw_count
<<
shift
);
delta
>>=
shift
;
local64_add
(
delta
,
&
event
->
count
);
local64_sub
(
delta
,
&
hwc
->
period_left
);
return
1
;
}
static
struct
perf_ibs
perf_ibs_fetch
;
static
struct
perf_ibs
perf_ibs_op
;
static
struct
perf_ibs
*
get_ibs_pmu
(
int
type
)
{
if
(
perf_ibs_fetch
.
pmu
.
type
==
type
)
return
&
perf_ibs_fetch
;
if
(
perf_ibs_op
.
pmu
.
type
==
type
)
return
&
perf_ibs_op
;
return
NULL
;
}
static
int
perf_ibs_init
(
struct
perf_event
*
event
)
{
if
(
perf_ibs
.
type
!=
event
->
attr
.
type
)
struct
hw_perf_event
*
hwc
=
&
event
->
hw
;
struct
perf_ibs
*
perf_ibs
;
u64
max_cnt
,
config
;
perf_ibs
=
get_ibs_pmu
(
event
->
attr
.
type
);
if
(
!
perf_ibs
)
return
-
ENOENT
;
config
=
event
->
attr
.
config
;
if
(
config
&
~
perf_ibs
->
config_mask
)
return
-
EINVAL
;
if
(
hwc
->
sample_period
)
{
if
(
config
&
perf_ibs
->
cnt_mask
)
/* raw max_cnt may not be set */
return
-
EINVAL
;
if
(
hwc
->
sample_period
&
0x0f
)
/* lower 4 bits can not be set in ibs max cnt */
return
-
EINVAL
;
}
else
{
max_cnt
=
config
&
perf_ibs
->
cnt_mask
;
config
&=
~
perf_ibs
->
cnt_mask
;
event
->
attr
.
sample_period
=
max_cnt
<<
4
;
hwc
->
sample_period
=
event
->
attr
.
sample_period
;
}
if
(
!
hwc
->
sample_period
)
return
-
EINVAL
;
hwc
->
config_base
=
perf_ibs
->
msr
;
hwc
->
config
=
config
;
return
0
;
}
static
int
perf_ibs_set_period
(
struct
perf_ibs
*
perf_ibs
,
struct
hw_perf_event
*
hwc
,
u64
*
period
)
{
int
ret
;
/* ignore lower 4 bits in min count: */
ret
=
perf_event_set_period
(
hwc
,
1
<<
4
,
perf_ibs
->
max_period
,
period
);
local64_set
(
&
hwc
->
prev_count
,
0
);
return
ret
;
}
static
u64
get_ibs_fetch_count
(
u64
config
)
{
return
(
config
&
IBS_FETCH_CNT
)
>>
12
;
}
static
u64
get_ibs_op_count
(
u64
config
)
{
return
(
config
&
IBS_OP_CUR_CNT
)
>>
32
;
}
static
void
perf_ibs_event_update
(
struct
perf_ibs
*
perf_ibs
,
struct
perf_event
*
event
,
u64
config
)
{
u64
count
=
perf_ibs
->
get_count
(
config
);
while
(
!
perf_event_try_update
(
event
,
count
,
20
))
{
rdmsrl
(
event
->
hw
.
config_base
,
config
);
count
=
perf_ibs
->
get_count
(
config
);
}
}
/* Note: The enable mask must be encoded in the config argument. */
static
inline
void
perf_ibs_enable_event
(
struct
hw_perf_event
*
hwc
,
u64
config
)
{
wrmsrl
(
hwc
->
config_base
,
hwc
->
config
|
config
);
}
/*
* We cannot restore the ibs pmu state, so we always needs to update
* the event while stopping it and then reset the state when starting
* again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in
* perf_ibs_start()/perf_ibs_stop() and instead always do it.
*/
static
void
perf_ibs_start
(
struct
perf_event
*
event
,
int
flags
)
{
struct
hw_perf_event
*
hwc
=
&
event
->
hw
;
struct
perf_ibs
*
perf_ibs
=
container_of
(
event
->
pmu
,
struct
perf_ibs
,
pmu
);
struct
cpu_perf_ibs
*
pcpu
=
this_cpu_ptr
(
perf_ibs
->
pcpu
);
u64
config
;
if
(
WARN_ON_ONCE
(
!
(
hwc
->
state
&
PERF_HES_STOPPED
)))
return
;
WARN_ON_ONCE
(
!
(
hwc
->
state
&
PERF_HES_UPTODATE
));
hwc
->
state
=
0
;
perf_ibs_set_period
(
perf_ibs
,
hwc
,
&
config
);
config
=
(
config
>>
4
)
|
perf_ibs
->
enable_mask
;
set_bit
(
IBS_STARTED
,
pcpu
->
state
);
perf_ibs_enable_event
(
hwc
,
config
);
perf_event_update_userpage
(
event
);
}
static
void
perf_ibs_stop
(
struct
perf_event
*
event
,
int
flags
)
{
struct
hw_perf_event
*
hwc
=
&
event
->
hw
;
struct
perf_ibs
*
perf_ibs
=
container_of
(
event
->
pmu
,
struct
perf_ibs
,
pmu
);
struct
cpu_perf_ibs
*
pcpu
=
this_cpu_ptr
(
perf_ibs
->
pcpu
);
u64
val
;
int
stopping
;
stopping
=
test_and_clear_bit
(
IBS_STARTED
,
pcpu
->
state
);
if
(
!
stopping
&&
(
hwc
->
state
&
PERF_HES_UPTODATE
))
return
;
rdmsrl
(
hwc
->
config_base
,
val
);
if
(
stopping
)
{
set_bit
(
IBS_STOPPING
,
pcpu
->
state
);
val
&=
~
perf_ibs
->
enable_mask
;
wrmsrl
(
hwc
->
config_base
,
val
);
WARN_ON_ONCE
(
hwc
->
state
&
PERF_HES_STOPPED
);
hwc
->
state
|=
PERF_HES_STOPPED
;
}
if
(
hwc
->
state
&
PERF_HES_UPTODATE
)
return
;
perf_ibs_event_update
(
perf_ibs
,
event
,
val
);
hwc
->
state
|=
PERF_HES_UPTODATE
;
}
static
int
perf_ibs_add
(
struct
perf_event
*
event
,
int
flags
)
{
struct
perf_ibs
*
perf_ibs
=
container_of
(
event
->
pmu
,
struct
perf_ibs
,
pmu
);
struct
cpu_perf_ibs
*
pcpu
=
this_cpu_ptr
(
perf_ibs
->
pcpu
);
if
(
test_and_set_bit
(
IBS_ENABLED
,
pcpu
->
state
))
return
-
ENOSPC
;
event
->
hw
.
state
=
PERF_HES_UPTODATE
|
PERF_HES_STOPPED
;
pcpu
->
event
=
event
;
if
(
flags
&
PERF_EF_START
)
perf_ibs_start
(
event
,
PERF_EF_RELOAD
);
return
0
;
}
static
void
perf_ibs_del
(
struct
perf_event
*
event
,
int
flags
)
{
struct
perf_ibs
*
perf_ibs
=
container_of
(
event
->
pmu
,
struct
perf_ibs
,
pmu
);
struct
cpu_perf_ibs
*
pcpu
=
this_cpu_ptr
(
perf_ibs
->
pcpu
);
if
(
!
test_and_clear_bit
(
IBS_ENABLED
,
pcpu
->
state
))
return
;
perf_ibs_stop
(
event
,
PERF_EF_UPDATE
);
pcpu
->
event
=
NULL
;
perf_event_update_userpage
(
event
);
}
static
struct
pmu
perf_ibs
=
{
.
event_init
=
perf_ibs_init
,
.
add
=
perf_ibs_add
,
.
del
=
perf_ibs_del
,
static
void
perf_ibs_read
(
struct
perf_event
*
event
)
{
}
static
struct
perf_ibs
perf_ibs_fetch
=
{
.
pmu
=
{
.
task_ctx_nr
=
perf_invalid_context
,
.
event_init
=
perf_ibs_init
,
.
add
=
perf_ibs_add
,
.
del
=
perf_ibs_del
,
.
start
=
perf_ibs_start
,
.
stop
=
perf_ibs_stop
,
.
read
=
perf_ibs_read
,
},
.
msr
=
MSR_AMD64_IBSFETCHCTL
,
.
config_mask
=
IBS_FETCH_CONFIG_MASK
,
.
cnt_mask
=
IBS_FETCH_MAX_CNT
,
.
enable_mask
=
IBS_FETCH_ENABLE
,
.
valid_mask
=
IBS_FETCH_VAL
,
.
max_period
=
IBS_FETCH_MAX_CNT
<<
4
,
.
offset_mask
=
{
MSR_AMD64_IBSFETCH_REG_MASK
},
.
offset_max
=
MSR_AMD64_IBSFETCH_REG_COUNT
,
.
get_count
=
get_ibs_fetch_count
,
};
static
struct
perf_ibs
perf_ibs_op
=
{
.
pmu
=
{
.
task_ctx_nr
=
perf_invalid_context
,
.
event_init
=
perf_ibs_init
,
.
add
=
perf_ibs_add
,
.
del
=
perf_ibs_del
,
.
start
=
perf_ibs_start
,
.
stop
=
perf_ibs_stop
,
.
read
=
perf_ibs_read
,
},
.
msr
=
MSR_AMD64_IBSOPCTL
,
.
config_mask
=
IBS_OP_CONFIG_MASK
,
.
cnt_mask
=
IBS_OP_MAX_CNT
,
.
enable_mask
=
IBS_OP_ENABLE
,
.
valid_mask
=
IBS_OP_VAL
,
.
max_period
=
IBS_OP_MAX_CNT
<<
4
,
.
offset_mask
=
{
MSR_AMD64_IBSOP_REG_MASK
},
.
offset_max
=
MSR_AMD64_IBSOP_REG_COUNT
,
.
get_count
=
get_ibs_op_count
,
};
static
int
perf_ibs_handle_irq
(
struct
perf_ibs
*
perf_ibs
,
struct
pt_regs
*
iregs
)
{
struct
cpu_perf_ibs
*
pcpu
=
this_cpu_ptr
(
perf_ibs
->
pcpu
);
struct
perf_event
*
event
=
pcpu
->
event
;
struct
hw_perf_event
*
hwc
=
&
event
->
hw
;
struct
perf_sample_data
data
;
struct
perf_raw_record
raw
;
struct
pt_regs
regs
;
struct
perf_ibs_data
ibs_data
;
int
offset
,
size
,
overflow
,
reenable
;
unsigned
int
msr
;
u64
*
buf
,
config
;
if
(
!
test_bit
(
IBS_STARTED
,
pcpu
->
state
))
{
/* Catch spurious interrupts after stopping IBS: */
if
(
!
test_and_clear_bit
(
IBS_STOPPING
,
pcpu
->
state
))
return
0
;
rdmsrl
(
perf_ibs
->
msr
,
*
ibs_data
.
regs
);
return
(
*
ibs_data
.
regs
&
perf_ibs
->
valid_mask
)
?
1
:
0
;
}
msr
=
hwc
->
config_base
;
buf
=
ibs_data
.
regs
;
rdmsrl
(
msr
,
*
buf
);
if
(
!
(
*
buf
++
&
perf_ibs
->
valid_mask
))
return
0
;
perf_sample_data_init
(
&
data
,
0
);
if
(
event
->
attr
.
sample_type
&
PERF_SAMPLE_RAW
)
{
ibs_data
.
caps
=
ibs_caps
;
size
=
1
;
offset
=
1
;
do
{
rdmsrl
(
msr
+
offset
,
*
buf
++
);
size
++
;
offset
=
find_next_bit
(
perf_ibs
->
offset_mask
,
perf_ibs
->
offset_max
,
offset
+
1
);
}
while
(
offset
<
perf_ibs
->
offset_max
);
raw
.
size
=
sizeof
(
u32
)
+
sizeof
(
u64
)
*
size
;
raw
.
data
=
ibs_data
.
data
;
data
.
raw
=
&
raw
;
}
regs
=
*
iregs
;
/* XXX: update ip from ibs sample */
/*
* Emulate IbsOpCurCnt in MSRC001_1033 (IbsOpCtl), not
* supported in all cpus. As this triggered an interrupt, we
* set the current count to the max count.
*/
config
=
ibs_data
.
regs
[
0
];
if
(
perf_ibs
==
&
perf_ibs_op
&&
!
(
ibs_caps
&
IBS_CAPS_RDWROPCNT
))
{
config
&=
~
IBS_OP_CUR_CNT
;
config
|=
(
config
&
IBS_OP_MAX_CNT
)
<<
36
;
}
perf_ibs_event_update
(
perf_ibs
,
event
,
config
);
overflow
=
perf_ibs_set_period
(
perf_ibs
,
hwc
,
&
config
);
reenable
=
!
(
overflow
&&
perf_event_overflow
(
event
,
&
data
,
&
regs
));
config
=
(
config
>>
4
)
|
(
reenable
?
perf_ibs
->
enable_mask
:
0
);
perf_ibs_enable_event
(
hwc
,
config
);
perf_event_update_userpage
(
event
);
return
1
;
}
static
int
__kprobes
perf_ibs_nmi_handler
(
unsigned
int
cmd
,
struct
pt_regs
*
regs
)
{
int
handled
=
0
;
handled
+=
perf_ibs_handle_irq
(
&
perf_ibs_fetch
,
regs
);
handled
+=
perf_ibs_handle_irq
(
&
perf_ibs_op
,
regs
);
if
(
handled
)
inc_irq_stat
(
apic_perf_irqs
);
return
handled
;
}
static
__init
int
perf_ibs_pmu_init
(
struct
perf_ibs
*
perf_ibs
,
char
*
name
)
{
struct
cpu_perf_ibs
__percpu
*
pcpu
;
int
ret
;
pcpu
=
alloc_percpu
(
struct
cpu_perf_ibs
);
if
(
!
pcpu
)
return
-
ENOMEM
;
perf_ibs
->
pcpu
=
pcpu
;
ret
=
perf_pmu_register
(
&
perf_ibs
->
pmu
,
name
,
-
1
);
if
(
ret
)
{
perf_ibs
->
pcpu
=
NULL
;
free_percpu
(
pcpu
);
}
return
ret
;
}
static
__init
int
perf_event_ibs_init
(
void
)
{
if
(
!
ibs_caps
)
return
-
ENODEV
;
/* ibs not supported by the cpu */
perf_pmu_register
(
&
perf_ibs
,
"ibs"
,
-
1
);
perf_ibs_pmu_init
(
&
perf_ibs_fetch
,
"ibs_fetch"
);
perf_ibs_pmu_init
(
&
perf_ibs_op
,
"ibs_op"
);
register_nmi_handler
(
NMI_LOCAL
,
perf_ibs_nmi_handler
,
0
,
"perf_ibs"
);
printk
(
KERN_INFO
"perf: AMD IBS detected (0x%08x)
\n
"
,
ibs_caps
);
return
0
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment