Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
50069218
Commit
50069218
authored
Sep 10, 2016
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'perf/urgent' into perf/core, to pick up fixes
Signed-off-by:
Ingo Molnar
<
mingo@kernel.org
>
parents
14520d63
8ef9b845
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
168 additions
and
51 deletions
+168
-51
arch/x86/events/amd/uncore.c
arch/x86/events/amd/uncore.c
+18
-4
arch/x86/events/intel/bts.c
arch/x86/events/intel/bts.c
+94
-29
arch/x86/events/intel/cqm.c
arch/x86/events/intel/cqm.c
+9
-0
arch/x86/events/intel/ds.c
arch/x86/events/intel/ds.c
+11
-8
kernel/events/core.c
kernel/events/core.c
+25
-6
kernel/events/ring_buffer.c
kernel/events/ring_buffer.c
+11
-4
No files found.
arch/x86/events/amd/uncore.c
View file @
50069218
...
...
@@ -29,6 +29,8 @@
#define COUNTER_SHIFT 16
static
HLIST_HEAD
(
uncore_unused_list
);
struct
amd_uncore
{
int
id
;
int
refcnt
;
...
...
@@ -39,7 +41,7 @@ struct amd_uncore {
cpumask_t
*
active_mask
;
struct
pmu
*
pmu
;
struct
perf_event
*
events
[
MAX_COUNTERS
];
struct
amd_uncore
*
free_when_cpu_onlin
e
;
struct
hlist_node
nod
e
;
};
static
struct
amd_uncore
*
__percpu
*
amd_uncore_nb
;
...
...
@@ -306,6 +308,7 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
uncore_nb
->
msr_base
=
MSR_F15H_NB_PERF_CTL
;
uncore_nb
->
active_mask
=
&
amd_nb_active_mask
;
uncore_nb
->
pmu
=
&
amd_nb_pmu
;
uncore_nb
->
id
=
-
1
;
*
per_cpu_ptr
(
amd_uncore_nb
,
cpu
)
=
uncore_nb
;
}
...
...
@@ -319,6 +322,7 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
uncore_l2
->
msr_base
=
MSR_F16H_L2I_PERF_CTL
;
uncore_l2
->
active_mask
=
&
amd_l2_active_mask
;
uncore_l2
->
pmu
=
&
amd_l2_pmu
;
uncore_l2
->
id
=
-
1
;
*
per_cpu_ptr
(
amd_uncore_l2
,
cpu
)
=
uncore_l2
;
}
...
...
@@ -348,7 +352,7 @@ amd_uncore_find_online_sibling(struct amd_uncore *this,
continue
;
if
(
this
->
id
==
that
->
id
)
{
that
->
free_when_cpu_online
=
this
;
hlist_add_head
(
&
this
->
node
,
&
uncore_unused_list
)
;
this
=
that
;
break
;
}
...
...
@@ -388,13 +392,23 @@ static int amd_uncore_cpu_starting(unsigned int cpu)
return
0
;
}
static
void
uncore_clean_online
(
void
)
{
struct
amd_uncore
*
uncore
;
struct
hlist_node
*
n
;
hlist_for_each_entry_safe
(
uncore
,
n
,
&
uncore_unused_list
,
node
)
{
hlist_del
(
&
uncore
->
node
);
kfree
(
uncore
);
}
}
static
void
uncore_online
(
unsigned
int
cpu
,
struct
amd_uncore
*
__percpu
*
uncores
)
{
struct
amd_uncore
*
uncore
=
*
per_cpu_ptr
(
uncores
,
cpu
);
kfree
(
uncore
->
free_when_cpu_online
);
uncore
->
free_when_cpu_online
=
NULL
;
uncore_clean_online
();
if
(
cpu
==
uncore
->
cpu
)
cpumask_set_cpu
(
cpu
,
uncore
->
active_mask
);
...
...
arch/x86/events/intel/bts.c
View file @
50069218
...
...
@@ -31,7 +31,17 @@
struct
bts_ctx
{
struct
perf_output_handle
handle
;
struct
debug_store
ds_back
;
int
started
;
int
state
;
};
/* BTS context states: */
enum
{
/* no ongoing AUX transactions */
BTS_STATE_STOPPED
=
0
,
/* AUX transaction is on, BTS tracing is disabled */
BTS_STATE_INACTIVE
,
/* AUX transaction is on, BTS tracing is running */
BTS_STATE_ACTIVE
,
};
static
DEFINE_PER_CPU
(
struct
bts_ctx
,
bts_ctx
);
...
...
@@ -204,6 +214,15 @@ static void bts_update(struct bts_ctx *bts)
static
int
bts_buffer_reset
(
struct
bts_buffer
*
buf
,
struct
perf_output_handle
*
handle
);
/*
* Ordering PMU callbacks wrt themselves and the PMI is done by means
* of bts::state, which:
* - is set when bts::handle::event is valid, that is, between
* perf_aux_output_begin() and perf_aux_output_end();
* - is zero otherwise;
* - is ordered against bts::handle::event with a compiler barrier.
*/
static
void
__bts_event_start
(
struct
perf_event
*
event
)
{
struct
bts_ctx
*
bts
=
this_cpu_ptr
(
&
bts_ctx
);
...
...
@@ -221,10 +240,13 @@ static void __bts_event_start(struct perf_event *event)
/*
* local barrier to make sure that ds configuration made it
* before we enable BTS
* before we enable BTS
and bts::state goes ACTIVE
*/
wmb
();
/* INACTIVE/STOPPED -> ACTIVE */
WRITE_ONCE
(
bts
->
state
,
BTS_STATE_ACTIVE
);
intel_pmu_enable_bts
(
config
);
}
...
...
@@ -251,9 +273,6 @@ static void bts_event_start(struct perf_event *event, int flags)
__bts_event_start
(
event
);
/* PMI handler: this counter is running and likely generating PMIs */
ACCESS_ONCE
(
bts
->
started
)
=
1
;
return
;
fail_end_stop:
...
...
@@ -263,30 +282,34 @@ static void bts_event_start(struct perf_event *event, int flags)
event
->
hw
.
state
=
PERF_HES_STOPPED
;
}
static
void
__bts_event_stop
(
struct
perf_event
*
event
)
static
void
__bts_event_stop
(
struct
perf_event
*
event
,
int
state
)
{
struct
bts_ctx
*
bts
=
this_cpu_ptr
(
&
bts_ctx
);
/* ACTIVE -> INACTIVE(PMI)/STOPPED(->stop()) */
WRITE_ONCE
(
bts
->
state
,
state
);
/*
* No extra synchronization is mandated by the documentation to have
* BTS data stores globally visible.
*/
intel_pmu_disable_bts
();
if
(
event
->
hw
.
state
&
PERF_HES_STOPPED
)
return
;
ACCESS_ONCE
(
event
->
hw
.
state
)
|=
PERF_HES_STOPPED
;
}
static
void
bts_event_stop
(
struct
perf_event
*
event
,
int
flags
)
{
struct
cpu_hw_events
*
cpuc
=
this_cpu_ptr
(
&
cpu_hw_events
);
struct
bts_ctx
*
bts
=
this_cpu_ptr
(
&
bts_ctx
);
struct
bts_buffer
*
buf
=
perf_get_aux
(
&
bts
->
handle
);
struct
bts_buffer
*
buf
=
NULL
;
int
state
=
READ_ONCE
(
bts
->
state
);
/* PMI handler: don't restart this counter */
ACCESS_ONCE
(
bts
->
started
)
=
0
;
if
(
state
==
BTS_STATE_ACTIVE
)
__bts_event_stop
(
event
,
BTS_STATE_STOPPED
)
;
__bts_event_stop
(
event
);
if
(
state
!=
BTS_STATE_STOPPED
)
buf
=
perf_get_aux
(
&
bts
->
handle
);
event
->
hw
.
state
|=
PERF_HES_STOPPED
;
if
(
flags
&
PERF_EF_UPDATE
)
{
bts_update
(
bts
);
...
...
@@ -296,6 +319,7 @@ static void bts_event_stop(struct perf_event *event, int flags)
bts
->
handle
.
head
=
local_xchg
(
&
buf
->
data_size
,
buf
->
nr_pages
<<
PAGE_SHIFT
);
perf_aux_output_end
(
&
bts
->
handle
,
local_xchg
(
&
buf
->
data_size
,
0
),
!!
local_xchg
(
&
buf
->
lost
,
0
));
}
...
...
@@ -310,8 +334,20 @@ static void bts_event_stop(struct perf_event *event, int flags)
void
intel_bts_enable_local
(
void
)
{
struct
bts_ctx
*
bts
=
this_cpu_ptr
(
&
bts_ctx
);
int
state
=
READ_ONCE
(
bts
->
state
);
/*
* Here we transition from INACTIVE to ACTIVE;
* if we instead are STOPPED from the interrupt handler,
* stay that way. Can't be ACTIVE here though.
*/
if
(
WARN_ON_ONCE
(
state
==
BTS_STATE_ACTIVE
))
return
;
if
(
state
==
BTS_STATE_STOPPED
)
return
;
if
(
bts
->
handle
.
event
&&
bts
->
started
)
if
(
bts
->
handle
.
event
)
__bts_event_start
(
bts
->
handle
.
event
);
}
...
...
@@ -319,8 +355,15 @@ void intel_bts_disable_local(void)
{
struct
bts_ctx
*
bts
=
this_cpu_ptr
(
&
bts_ctx
);
/*
* Here we transition from ACTIVE to INACTIVE;
* do nothing for STOPPED or INACTIVE.
*/
if
(
READ_ONCE
(
bts
->
state
)
!=
BTS_STATE_ACTIVE
)
return
;
if
(
bts
->
handle
.
event
)
__bts_event_stop
(
bts
->
handle
.
event
);
__bts_event_stop
(
bts
->
handle
.
event
,
BTS_STATE_INACTIVE
);
}
static
int
...
...
@@ -335,8 +378,6 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
return
0
;
head
=
handle
->
head
&
((
buf
->
nr_pages
<<
PAGE_SHIFT
)
-
1
);
if
(
WARN_ON_ONCE
(
head
!=
local_read
(
&
buf
->
head
)))
return
-
EINVAL
;
phys
=
&
buf
->
buf
[
buf
->
cur_buf
];
space
=
phys
->
offset
+
phys
->
displacement
+
phys
->
size
-
head
;
...
...
@@ -403,22 +444,37 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
int
intel_bts_interrupt
(
void
)
{
struct
debug_store
*
ds
=
this_cpu_ptr
(
&
cpu_hw_events
)
->
ds
;
struct
bts_ctx
*
bts
=
this_cpu_ptr
(
&
bts_ctx
);
struct
perf_event
*
event
=
bts
->
handle
.
event
;
struct
bts_buffer
*
buf
;
s64
old_head
;
int
err
;
int
err
=
-
ENOSPC
,
handled
=
0
;
if
(
!
event
||
!
bts
->
started
)
return
0
;
/*
* The only surefire way of knowing if this NMI is ours is by checking
* the write ptr against the PMI threshold.
*/
if
(
ds
->
bts_index
>=
ds
->
bts_interrupt_threshold
)
handled
=
1
;
/*
* this is wrapped in intel_bts_enable_local/intel_bts_disable_local,
* so we can only be INACTIVE or STOPPED
*/
if
(
READ_ONCE
(
bts
->
state
)
==
BTS_STATE_STOPPED
)
return
handled
;
buf
=
perf_get_aux
(
&
bts
->
handle
);
if
(
!
buf
)
return
handled
;
/*
* Skip snapshot counters: they don't use the interrupt, but
* there's no other way of telling, because the pointer will
* keep moving
*/
if
(
!
buf
||
buf
->
snapshot
)
if
(
buf
->
snapshot
)
return
0
;
old_head
=
local_read
(
&
buf
->
head
);
...
...
@@ -426,18 +482,27 @@ int intel_bts_interrupt(void)
/* no new data */
if
(
old_head
==
local_read
(
&
buf
->
head
))
return
0
;
return
handled
;
perf_aux_output_end
(
&
bts
->
handle
,
local_xchg
(
&
buf
->
data_size
,
0
),
!!
local_xchg
(
&
buf
->
lost
,
0
));
buf
=
perf_aux_output_begin
(
&
bts
->
handle
,
event
);
if
(
!
buf
)
return
1
;
if
(
buf
)
err
=
bts_buffer_reset
(
buf
,
&
bts
->
handle
);
if
(
err
)
{
WRITE_ONCE
(
bts
->
state
,
BTS_STATE_STOPPED
);
err
=
bts_buffer_reset
(
buf
,
&
bts
->
handle
);
if
(
err
)
perf_aux_output_end
(
&
bts
->
handle
,
0
,
false
);
if
(
buf
)
{
/*
* BTS_STATE_STOPPED should be visible before
* cleared handle::event
*/
barrier
();
perf_aux_output_end
(
&
bts
->
handle
,
0
,
false
);
}
}
return
1
;
}
...
...
arch/x86/events/intel/cqm.c
View file @
50069218
...
...
@@ -458,6 +458,11 @@ static void __intel_cqm_event_count(void *info);
static
void
init_mbm_sample
(
u32
rmid
,
u32
evt_type
);
static
void
__intel_mbm_event_count
(
void
*
info
);
static
bool
is_cqm_event
(
int
e
)
{
return
(
e
==
QOS_L3_OCCUP_EVENT_ID
);
}
static
bool
is_mbm_event
(
int
e
)
{
return
(
e
>=
QOS_MBM_TOTAL_EVENT_ID
&&
e
<=
QOS_MBM_LOCAL_EVENT_ID
);
...
...
@@ -1366,6 +1371,10 @@ static int intel_cqm_event_init(struct perf_event *event)
(
event
->
attr
.
config
>
QOS_MBM_LOCAL_EVENT_ID
))
return
-
EINVAL
;
if
((
is_cqm_event
(
event
->
attr
.
config
)
&&
!
cqm_enabled
)
||
(
is_mbm_event
(
event
->
attr
.
config
)
&&
!
mbm_enabled
))
return
-
EINVAL
;
/* unsupported modes and filters */
if
(
event
->
attr
.
exclude_user
||
event
->
attr
.
exclude_kernel
||
...
...
arch/x86/events/intel/ds.c
View file @
50069218
...
...
@@ -1312,18 +1312,18 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
struct
pebs_record_nhm
*
p
=
at
;
u64
pebs_status
;
/* PEBS v3 has accurate status bits */
pebs_status
=
p
->
status
&
cpuc
->
pebs_enabled
;
pebs_status
&=
(
1ULL
<<
x86_pmu
.
max_pebs_events
)
-
1
;
/* PEBS v3 has more accurate status bits */
if
(
x86_pmu
.
intel_cap
.
pebs_format
>=
3
)
{
for_each_set_bit
(
bit
,
(
unsigned
long
*
)
&
p
->
status
,
MAX_PEBS_EVENTS
)
for_each_set_bit
(
bit
,
(
unsigned
long
*
)
&
p
ebs_
status
,
x86_pmu
.
max_pebs_events
)
counts
[
bit
]
++
;
continue
;
}
pebs_status
=
p
->
status
&
cpuc
->
pebs_enabled
;
pebs_status
&=
(
1ULL
<<
x86_pmu
.
max_pebs_events
)
-
1
;
/*
* On some CPUs the PEBS status can be zero when PEBS is
* racing with clearing of GLOBAL_STATUS.
...
...
@@ -1371,8 +1371,11 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
continue
;
event
=
cpuc
->
events
[
bit
];
WARN_ON_ONCE
(
!
event
);
WARN_ON_ONCE
(
!
event
->
attr
.
precise_ip
);
if
(
WARN_ON_ONCE
(
!
event
))
continue
;
if
(
WARN_ON_ONCE
(
!
event
->
attr
.
precise_ip
))
continue
;
/* log dropped samples number */
if
(
error
[
bit
])
...
...
kernel/events/core.c
View file @
50069218
...
...
@@ -2497,11 +2497,11 @@ static int __perf_event_stop(void *info)
return
0
;
}
static
int
perf_event_
restart
(
struct
perf_event
*
even
t
)
static
int
perf_event_
stop
(
struct
perf_event
*
event
,
int
restar
t
)
{
struct
stop_event_data
sd
=
{
.
event
=
event
,
.
restart
=
1
,
.
restart
=
restart
,
};
int
ret
=
0
;
...
...
@@ -4874,6 +4874,19 @@ static void ring_buffer_attach(struct perf_event *event,
spin_unlock_irqrestore
(
&
rb
->
event_lock
,
flags
);
}
/*
* Avoid racing with perf_mmap_close(AUX): stop the event
* before swizzling the event::rb pointer; if it's getting
* unmapped, its aux_mmap_count will be 0 and it won't
* restart. See the comment in __perf_pmu_output_stop().
*
* Data will inevitably be lost when set_output is done in
* mid-air, but then again, whoever does it like this is
* not in for the data anyway.
*/
if
(
has_aux
(
event
))
perf_event_stop
(
event
,
0
);
rcu_assign_pointer
(
event
->
rb
,
rb
);
if
(
old_rb
)
{
...
...
@@ -6150,7 +6163,7 @@ static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
raw_spin_unlock_irqrestore
(
&
ifh
->
lock
,
flags
);
if
(
restart
)
perf_event_
restart
(
event
);
perf_event_
stop
(
event
,
1
);
}
void
perf_event_exec
(
void
)
...
...
@@ -6194,7 +6207,13 @@ static void __perf_event_output_stop(struct perf_event *event, void *data)
/*
* In case of inheritance, it will be the parent that links to the
* ring-buffer, but it will be the child that's actually using it:
* ring-buffer, but it will be the child that's actually using it.
*
* We are using event::rb to determine if the event should be stopped,
* however this may race with ring_buffer_attach() (through set_output),
* which will make us skip the event that actually needs to be stopped.
* So ring_buffer_attach() has to stop an aux event before re-assigning
* its rb pointer.
*/
if
(
rcu_dereference
(
parent
->
rb
)
==
rb
)
ro
->
err
=
__perf_event_stop
(
&
sd
);
...
...
@@ -6708,7 +6727,7 @@ static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
raw_spin_unlock_irqrestore
(
&
ifh
->
lock
,
flags
);
if
(
restart
)
perf_event_
restart
(
event
);
perf_event_
stop
(
event
,
1
);
}
/*
...
...
@@ -7897,7 +7916,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
mmput
(
mm
);
restart:
perf_event_
restart
(
event
);
perf_event_
stop
(
event
,
1
);
}
/*
...
...
kernel/events/ring_buffer.c
View file @
50069218
...
...
@@ -330,15 +330,22 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
if
(
!
rb
)
return
NULL
;
if
(
!
rb_has_aux
(
rb
)
||
!
atomic_inc_not_zero
(
&
rb
->
aux_refcount
)
)
if
(
!
rb_has_aux
(
rb
))
goto
err
;
/*
* If rb::aux_mmap_count is zero (and rb_has_aux() above went through),
* the aux buffer is in perf_mmap_close(), about to get freed.
* If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
* about to get freed, so we leave immediately.
*
* Checking rb::aux_mmap_count and rb::refcount has to be done in
* the same order, see perf_mmap_close. Otherwise we end up freeing
* aux pages in this path, which is a bug, because in_atomic().
*/
if
(
!
atomic_read
(
&
rb
->
aux_mmap_count
))
goto
err_put
;
goto
err
;
if
(
!
atomic_inc_not_zero
(
&
rb
->
aux_refcount
))
goto
err
;
/*
* Nesting is not supported for AUX area, make sure nested
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment