Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
d652f4bb
Commit
d652f4bb
authored
Mar 28, 2017
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'perf/urgent' into perf/core, to pick up fixes
Signed-off-by:
Ingo Molnar
<
mingo@kernel.org
>
parents
e3a6a624
a01851fa
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
63 additions
and
19 deletions
+63
-19
arch/x86/events/core.c
arch/x86/events/core.c
+14
-2
kernel/events/core.c
kernel/events/core.c
+48
-16
tools/perf/util/symbol.c
tools/perf/util/symbol.c
+1
-1
No files found.
arch/x86/events/core.c
View file @
d652f4bb
...
...
@@ -2101,8 +2101,8 @@ static int x86_pmu_event_init(struct perf_event *event)
static
void
refresh_pce
(
void
*
ignored
)
{
if
(
current
->
mm
)
load_mm_cr4
(
current
->
mm
);
if
(
current
->
active_
mm
)
load_mm_cr4
(
current
->
active_
mm
);
}
static
void
x86_pmu_event_mapped
(
struct
perf_event
*
event
)
...
...
@@ -2110,6 +2110,18 @@ static void x86_pmu_event_mapped(struct perf_event *event)
if
(
!
(
event
->
hw
.
flags
&
PERF_X86_EVENT_RDPMC_ALLOWED
))
return
;
/*
* This function relies on not being called concurrently in two
* tasks in the same mm. Otherwise one task could observe
* perf_rdpmc_allowed > 1 and return all the way back to
* userspace with CR4.PCE clear while another task is still
* doing on_each_cpu_mask() to propagate CR4.PCE.
*
* For now, this can't happen because all callers hold mmap_sem
* for write. If this changes, we'll need a different solution.
*/
lockdep_assert_held_exclusive
(
&
current
->
mm
->
mmap_sem
);
if
(
atomic_inc_return
(
&
current
->
mm
->
context
.
perf_rdpmc_allowed
)
==
1
)
on_each_cpu_mask
(
mm_cpumask
(
current
->
mm
),
refresh_pce
,
NULL
,
1
);
}
...
...
kernel/events/core.c
View file @
d652f4bb
...
...
@@ -4261,7 +4261,7 @@ int perf_event_release_kernel(struct perf_event *event)
raw_spin_lock_irq
(
&
ctx
->
lock
);
/*
* Mark this even as STATE_DEAD, there is no external reference to it
* Mark this even
t
as STATE_DEAD, there is no external reference to it
* anymore.
*
* Anybody acquiring event->child_mutex after the below loop _must_
...
...
@@ -10556,21 +10556,22 @@ void perf_event_free_task(struct task_struct *task)
continue
;
mutex_lock
(
&
ctx
->
mutex
);
again:
list_for_each_entry_safe
(
event
,
tmp
,
&
ctx
->
pinned_groups
,
group_entry
)
perf_free_event
(
event
,
ctx
);
raw_spin_lock_irq
(
&
ctx
->
lock
);
/*
* Destroy the task <-> ctx relation and mark the context dead.
*
* This is important because even though the task hasn't been
* exposed yet the context has been (through child_list).
*/
RCU_INIT_POINTER
(
task
->
perf_event_ctxp
[
ctxn
],
NULL
);
WRITE_ONCE
(
ctx
->
task
,
TASK_TOMBSTONE
);
put_task_struct
(
task
);
/* cannot be last */
raw_spin_unlock_irq
(
&
ctx
->
lock
);
list_for_each_entry_safe
(
event
,
tmp
,
&
ctx
->
flexible_groups
,
group_entry
)
list_for_each_entry_safe
(
event
,
tmp
,
&
ctx
->
event_list
,
event_entry
)
perf_free_event
(
event
,
ctx
);
if
(
!
list_empty
(
&
ctx
->
pinned_groups
)
||
!
list_empty
(
&
ctx
->
flexible_groups
))
goto
again
;
mutex_unlock
(
&
ctx
->
mutex
);
put_ctx
(
ctx
);
}
}
...
...
@@ -10608,7 +10609,12 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
}
/*
* inherit a event from parent task to child task:
* Inherit a event from parent task to child task.
*
* Returns:
* - valid pointer on success
* - NULL for orphaned events
* - IS_ERR() on error
*/
static
struct
perf_event
*
inherit_event
(
struct
perf_event
*
parent_event
,
...
...
@@ -10702,6 +10708,16 @@ inherit_event(struct perf_event *parent_event,
return
child_event
;
}
/*
* Inherits an event group.
*
* This will quietly suppress orphaned events; !inherit_event() is not an error.
* This matches with perf_event_release_kernel() removing all child events.
*
* Returns:
* - 0 on success
* - <0 on error
*/
static
int
inherit_group
(
struct
perf_event
*
parent_event
,
struct
task_struct
*
parent
,
struct
perf_event_context
*
parent_ctx
,
...
...
@@ -10716,6 +10732,11 @@ static int inherit_group(struct perf_event *parent_event,
child
,
NULL
,
child_ctx
);
if
(
IS_ERR
(
leader
))
return
PTR_ERR
(
leader
);
/*
* @leader can be NULL here because of is_orphaned_event(). In this
* case inherit_event() will create individual events, similar to what
* perf_group_detach() would do anyway.
*/
list_for_each_entry
(
sub
,
&
parent_event
->
sibling_list
,
group_entry
)
{
child_ctr
=
inherit_event
(
sub
,
parent
,
parent_ctx
,
child
,
leader
,
child_ctx
);
...
...
@@ -10725,6 +10746,17 @@ static int inherit_group(struct perf_event *parent_event,
return
0
;
}
/*
* Creates the child task context and tries to inherit the event-group.
*
* Clears @inherited_all on !attr.inherited or error. Note that we'll leave
* inherited_all set when we 'fail' to inherit an orphaned event; this is
* consistent with perf_event_release_kernel() removing all child events.
*
* Returns:
* - 0 on success
* - <0 on error
*/
static
int
inherit_task_group
(
struct
perf_event
*
event
,
struct
task_struct
*
parent
,
struct
perf_event_context
*
parent_ctx
,
...
...
@@ -10747,7 +10779,6 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
* First allocate and initialize a context for the
* child.
*/
child_ctx
=
alloc_perf_context
(
parent_ctx
->
pmu
,
child
);
if
(
!
child_ctx
)
return
-
ENOMEM
;
...
...
@@ -10809,7 +10840,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
ret
=
inherit_task_group
(
event
,
parent
,
parent_ctx
,
child
,
ctxn
,
&
inherited_all
);
if
(
ret
)
brea
k
;
goto
out_unloc
k
;
}
/*
...
...
@@ -10825,7 +10856,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
ret
=
inherit_task_group
(
event
,
parent
,
parent_ctx
,
child
,
ctxn
,
&
inherited_all
);
if
(
ret
)
brea
k
;
goto
out_unloc
k
;
}
raw_spin_lock_irqsave
(
&
parent_ctx
->
lock
,
flags
);
...
...
@@ -10853,6 +10884,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
}
raw_spin_unlock_irqrestore
(
&
parent_ctx
->
lock
,
flags
);
out_unlock:
mutex_unlock
(
&
parent_ctx
->
mutex
);
perf_unpin_context
(
parent_ctx
);
...
...
tools/perf/util/symbol.c
View file @
d652f4bb
...
...
@@ -202,7 +202,7 @@ void symbols__fixup_end(struct rb_root *symbols)
/* Last entry */
if
(
curr
->
end
==
curr
->
start
)
curr
->
end
=
roundup
(
curr
->
start
,
4096
);
curr
->
end
=
roundup
(
curr
->
start
,
4096
)
+
4096
;
}
void
__map_groups__fixup_end
(
struct
map_groups
*
mg
,
enum
map_type
type
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment