Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
8942c2b7
Commit
8942c2b7
authored
Aug 18, 2016
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'perf/urgent' into perf/core, to pick up dependencies
Signed-off-by:
Ingo Molnar
<
mingo@kernel.org
>
parents
bdfaa2ee
71e7bc2b
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
68 additions
and
27 deletions
+68
-27
kernel/events/core.c
kernel/events/core.c
+68
-27
No files found.
kernel/events/core.c
View file @
8942c2b7
...
...
@@ -242,18 +242,6 @@ static int event_function(void *info)
return
ret
;
}
static
void
event_function_local
(
struct
perf_event
*
event
,
event_f
func
,
void
*
data
)
{
struct
event_function_struct
efs
=
{
.
event
=
event
,
.
func
=
func
,
.
data
=
data
,
};
int
ret
=
event_function
(
&
efs
);
WARN_ON_ONCE
(
ret
);
}
static
void
event_function_call
(
struct
perf_event
*
event
,
event_f
func
,
void
*
data
)
{
struct
perf_event_context
*
ctx
=
event
->
ctx
;
...
...
@@ -303,6 +291,54 @@ static void event_function_call(struct perf_event *event, event_f func, void *da
raw_spin_unlock_irq
(
&
ctx
->
lock
);
}
/*
* Similar to event_function_call() + event_function(), but hard assumes IRQs
* are already disabled and we're on the right CPU.
*/
static
void
event_function_local
(
struct
perf_event
*
event
,
event_f
func
,
void
*
data
)
{
struct
perf_event_context
*
ctx
=
event
->
ctx
;
struct
perf_cpu_context
*
cpuctx
=
__get_cpu_context
(
ctx
);
struct
task_struct
*
task
=
READ_ONCE
(
ctx
->
task
);
struct
perf_event_context
*
task_ctx
=
NULL
;
WARN_ON_ONCE
(
!
irqs_disabled
());
if
(
task
)
{
if
(
task
==
TASK_TOMBSTONE
)
return
;
task_ctx
=
ctx
;
}
perf_ctx_lock
(
cpuctx
,
task_ctx
);
task
=
ctx
->
task
;
if
(
task
==
TASK_TOMBSTONE
)
goto
unlock
;
if
(
task
)
{
/*
* We must be either inactive or active and the right task,
* otherwise we're screwed, since we cannot IPI to somewhere
* else.
*/
if
(
ctx
->
is_active
)
{
if
(
WARN_ON_ONCE
(
task
!=
current
))
goto
unlock
;
if
(
WARN_ON_ONCE
(
cpuctx
->
task_ctx
!=
ctx
))
goto
unlock
;
}
}
else
{
WARN_ON_ONCE
(
&
cpuctx
->
ctx
!=
ctx
);
}
func
(
event
,
cpuctx
,
ctx
,
data
);
unlock:
perf_ctx_unlock
(
cpuctx
,
task_ctx
);
}
#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
PERF_FLAG_FD_OUTPUT |\
PERF_FLAG_PID_CGROUP |\
...
...
@@ -3524,9 +3560,10 @@ static int perf_event_read(struct perf_event *event, bool group)
.
group
=
group
,
.
ret
=
0
,
};
smp_call_function_single
(
event
->
oncpu
,
__perf_event_read
,
&
data
,
1
);
ret
=
data
.
ret
;
ret
=
smp_call_function_single
(
event
->
oncpu
,
__perf_event_read
,
&
data
,
1
);
/* The event must have been read from an online CPU: */
WARN_ON_ONCE
(
ret
);
ret
=
ret
?
:
data
.
ret
;
}
else
if
(
event
->
state
==
PERF_EVENT_STATE_INACTIVE
)
{
struct
perf_event_context
*
ctx
=
event
->
ctx
;
unsigned
long
flags
;
...
...
@@ -6594,15 +6631,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
kfree
(
buf
);
}
/*
* Whether this @filter depends on a dynamic object which is not loaded
* yet or its load addresses are not known.
*/
static
bool
perf_addr_filter_needs_mmap
(
struct
perf_addr_filter
*
filter
)
{
return
filter
->
filter
&&
filter
->
inode
;
}
/*
* Check whether inode and address range match filter criteria.
*/
...
...
@@ -6664,6 +6692,13 @@ static void perf_addr_filters_adjust(struct vm_area_struct *vma)
struct
perf_event_context
*
ctx
;
int
ctxn
;
/*
* Data tracing isn't supported yet and as such there is no need
* to keep track of anything that isn't related to executable code:
*/
if
(
!
(
vma
->
vm_flags
&
VM_EXEC
))
return
;
rcu_read_lock
();
for_each_task_context_nr
(
ctxn
)
{
ctx
=
rcu_dereference
(
current
->
perf_event_ctxp
[
ctxn
]);
...
...
@@ -7816,7 +7851,11 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
list_for_each_entry
(
filter
,
&
ifh
->
list
,
entry
)
{
event
->
addr_filters_offs
[
count
]
=
0
;
if
(
perf_addr_filter_needs_mmap
(
filter
))
/*
* Adjust base offset if the filter is associated to a binary
* that needs to be mapped:
*/
if
(
filter
->
inode
)
event
->
addr_filters_offs
[
count
]
=
perf_addr_filter_apply
(
filter
,
mm
);
...
...
@@ -7947,8 +7986,10 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
goto
fail
;
}
if
(
token
==
IF_SRC_FILE
)
{
filename
=
match_strdup
(
&
args
[
2
]);
if
(
token
==
IF_SRC_FILE
||
token
==
IF_SRC_FILEADDR
)
{
int
fpos
=
filter
->
range
?
2
:
1
;
filename
=
match_strdup
(
&
args
[
fpos
]);
if
(
!
filename
)
{
ret
=
-
ENOMEM
;
goto
fail
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment